forked from KolibriOS/kolibrios
drm: 3.19-rc2
git-svn-id: svn://kolibrios.org@5354 a494cfbc-eb01-0410-851d-a64ba20cac60
This commit is contained in:
parent
334d99f484
commit
8aa816f1ce
@ -29,15 +29,7 @@
|
||||
#ifndef _AGP_BACKEND_PRIV_H
|
||||
#define _AGP_BACKEND_PRIV_H 1
|
||||
|
||||
//#include <asm/agp.h> /* for flush_agp_cache() */
|
||||
|
||||
|
||||
enum chipset_type {
|
||||
NOT_SUPPORTED,
|
||||
SUPPORTED,
|
||||
};
|
||||
|
||||
struct agp_memory;
|
||||
#include <asm/agp.h> /* for flush_agp_cache() */
|
||||
|
||||
#define PFX "agpgart: "
|
||||
|
||||
|
@ -2,18 +2,20 @@
|
||||
* Intel AGPGART routines.
|
||||
*/
|
||||
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/mod_devicetable.h>
|
||||
#include <errno-base.h>
|
||||
#include <linux/gfp.h>
|
||||
#include <linux/pci.h>
|
||||
|
||||
//#include <linux/agp_backend.h>
|
||||
//#include <asm/smp.h>
|
||||
#include <linux/spinlock.h>
|
||||
|
||||
#include <linux/slab.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/agp_backend.h>
|
||||
#include "agp.h"
|
||||
#include "intel-agp.h"
|
||||
#include <drm/intel-gtt.h>
|
||||
|
||||
#include <linux/spinlock.h>
|
||||
|
||||
|
||||
|
||||
#include <syscall.h>
|
||||
|
||||
|
@ -18,15 +18,14 @@
|
||||
#include <syscall.h>
|
||||
|
||||
#include <linux/module.h>
|
||||
#include <errno-base.h>
|
||||
#include <linux/pci.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/agp_backend.h>
|
||||
#include <linux/delay.h>
|
||||
#
|
||||
#include <linux/export.h>
|
||||
#include <linux/scatterlist.h>
|
||||
|
||||
//#include <linux/pagemap.h>
|
||||
//#include <linux/agp_backend.h>
|
||||
//#include <asm/smp.h>
|
||||
#include <linux/spinlock.h>
|
||||
#include "agp.h"
|
||||
#include "intel-agp.h"
|
||||
|
@ -1,16 +1,20 @@
|
||||
|
||||
|
||||
CC = gcc.exe
|
||||
CC = gcc
|
||||
FASM = e:/fasm/fasm.exe
|
||||
|
||||
DEFINES = -D__KERNEL__ -DCONFIG_X86_32 -DCONFIG_DRM_I915_FBDEV -DCONFIG_DMI
|
||||
DEFINES = -D__KERNEL__ -DCONFIG_X86_32 -DCONFIG_TINY_RCU -DCONFIG_X86_L1_CACHE_SHIFT=6
|
||||
DEFINES += -DCONFIG_ARCH_HAS_CACHE_LINE_SIZE -DCONFIG_DRM_I915_FBDEV -DCONFIG_DMI -DKBUILD_MODNAME=\"i915.dll\"
|
||||
|
||||
|
||||
DDK_TOPDIR = /d/kos/kolibri/drivers/ddk
|
||||
DRV_INCLUDES = /d/kos/kolibri/drivers/include
|
||||
DRM_TOPDIR = $(CURDIR)/..
|
||||
|
||||
INCLUDES = -I$(DRV_INCLUDES) -I$(DRV_INCLUDES)/linux \
|
||||
-I$(DRV_INCLUDES)/linux/asm -I$(DRV_INCLUDES)/linux/uapi -I./
|
||||
INCLUDES = -I$(DRV_INCLUDES) \
|
||||
-I$(DRV_INCLUDES)/asm \
|
||||
-I$(DRV_INCLUDES)/uapi \
|
||||
-I$(DRV_INCLUDES)/drm -I./ -I$(DRV_INCLUDES)
|
||||
|
||||
CFLAGS= -c -O2 $(INCLUDES) $(DEFINES) -march=i686 -msse2 -fomit-frame-pointer -fno-ident -fno-builtin-printf
|
||||
CFLAGS+= -mno-stack-arg-probe -mpreferred-stack-boundary=2 -mincoming-stack-boundary=2 -mno-ms-bitfields
|
||||
@ -62,6 +66,7 @@ NAME_SRC= main.c \
|
||||
i915_gpu_error.c \
|
||||
i915_irq.c \
|
||||
i915_params.c \
|
||||
intel_audio.c \
|
||||
intel_bios.c \
|
||||
intel_crt.c \
|
||||
intel_ddi.c \
|
||||
@ -74,16 +79,22 @@ NAME_SRC= main.c \
|
||||
intel_dsi_pll.c \
|
||||
intel_dvo.c \
|
||||
intel_fbdev.c \
|
||||
intel_fifo_underrun.c \
|
||||
intel_frontbuffer.c \
|
||||
intel_hdmi.c \
|
||||
intel_i2c.c \
|
||||
intel_lrc.c \
|
||||
intel_lvds.c \
|
||||
intel_modes.c \
|
||||
intel_panel.c \
|
||||
intel_pm.c \
|
||||
intel_psr.c \
|
||||
intel_renderstate_gen6.c \
|
||||
intel_renderstate_gen7.c \
|
||||
intel_renderstate_gen8.c \
|
||||
intel_renderstate_gen9.c \
|
||||
intel_ringbuffer.c \
|
||||
intel_runtime_pm.c \
|
||||
intel_sdvo.c \
|
||||
intel_sideband.c \
|
||||
intel_sprite.c \
|
||||
@ -104,6 +115,7 @@ NAME_SRC= main.c \
|
||||
$(DRM_TOPDIR)/drm_crtc_helper.c \
|
||||
$(DRM_TOPDIR)/drm_dp_helper.c \
|
||||
../drm_dp_mst_topology.c \
|
||||
$(DRM_TOPDIR)/drm_atomic.c \
|
||||
$(DRM_TOPDIR)/drm_edid.c \
|
||||
$(DRM_TOPDIR)/drm_fb_helper.c \
|
||||
$(DRM_TOPDIR)/drm_gem.c \
|
||||
|
@ -2,14 +2,18 @@
|
||||
CC = gcc
|
||||
FASM = e:/fasm/fasm.exe
|
||||
|
||||
DEFINES = -D__KERNEL__ -DCONFIG_X86_32 -DCONFIG_DRM_I915_FBDEV -DCONFIG_DMI
|
||||
DEFINES = -D__KERNEL__ -DCONFIG_X86_32 -DCONFIG_TINY_RCU -DCONFIG_X86_L1_CACHE_SHIFT=6
|
||||
DEFINES += -DCONFIG_ARCH_HAS_CACHE_LINE_SIZE -DCONFIG_DRM_I915_FBDEV -DCONFIG_DMI -DKBUILD_MODNAME=\"i915.dll\"
|
||||
|
||||
|
||||
DDK_TOPDIR = /d/kos/kolibri/drivers/ddk
|
||||
DRV_INCLUDES = /d/kos/kolibri/drivers/include
|
||||
DRM_TOPDIR = $(CURDIR)/..
|
||||
|
||||
INCLUDES = -I$(DRV_INCLUDES) -I$(DRV_INCLUDES)/linux \
|
||||
-I$(DRV_INCLUDES)/linux/asm -I$(DRV_INCLUDES)/linux/uapi -I./
|
||||
INCLUDES = -I$(DRV_INCLUDES) \
|
||||
-I$(DRV_INCLUDES)/asm \
|
||||
-I$(DRV_INCLUDES)/uapi \
|
||||
-I$(DRV_INCLUDES)/drm -I./ -I$(DRV_INCLUDES)
|
||||
|
||||
CFLAGS_OPT = -Os -march=i686 -msse2 -fomit-frame-pointer -fno-builtin-printf -fno-ident -mno-stack-arg-probe
|
||||
CFLAGS_OPT+= -mpreferred-stack-boundary=2 -mincoming-stack-boundary=2 -mno-ms-bitfields -flto
|
||||
@ -60,6 +64,7 @@ NAME_SRC= main.c \
|
||||
i915_gpu_error.c \
|
||||
i915_irq.c \
|
||||
i915_params.c \
|
||||
intel_audio.c \
|
||||
intel_bios.c \
|
||||
intel_crt.c \
|
||||
intel_ddi.c \
|
||||
@ -72,16 +77,22 @@ NAME_SRC= main.c \
|
||||
intel_dsi_pll.c \
|
||||
intel_dvo.c \
|
||||
intel_fbdev.c \
|
||||
intel_fifo_underrun.c \
|
||||
intel_frontbuffer.c \
|
||||
intel_hdmi.c \
|
||||
intel_i2c.c \
|
||||
intel_lrc.c \
|
||||
intel_lvds.c \
|
||||
intel_modes.c \
|
||||
intel_panel.c \
|
||||
intel_pm.c \
|
||||
intel_psr.c \
|
||||
intel_renderstate_gen6.c \
|
||||
intel_renderstate_gen7.c \
|
||||
intel_renderstate_gen8.c \
|
||||
intel_renderstate_gen9.c \
|
||||
intel_ringbuffer.c \
|
||||
intel_runtime_pm.c \
|
||||
intel_sdvo.c \
|
||||
intel_sideband.c \
|
||||
intel_sprite.c \
|
||||
@ -102,6 +113,7 @@ NAME_SRC= main.c \
|
||||
$(DRM_TOPDIR)/drm_crtc_helper.c \
|
||||
$(DRM_TOPDIR)/drm_dp_helper.c \
|
||||
../drm_dp_mst_topology.c \
|
||||
$(DRM_TOPDIR)/drm_atomic.c \
|
||||
$(DRM_TOPDIR)/drm_edid.c \
|
||||
$(DRM_TOPDIR)/drm_fb_helper.c \
|
||||
$(DRM_TOPDIR)/drm_gem.c \
|
||||
|
@ -60,16 +60,297 @@
|
||||
|
||||
#define NS2501_REGC 0x0c
|
||||
|
||||
enum {
|
||||
MODE_640x480,
|
||||
MODE_800x600,
|
||||
MODE_1024x768,
|
||||
};
|
||||
|
||||
struct ns2501_reg {
|
||||
uint8_t offset;
|
||||
uint8_t value;
|
||||
};
|
||||
|
||||
/*
|
||||
* Magic values based on what the BIOS on
|
||||
* Fujitsu-Siemens Lifebook S6010 programs (1024x768 panel).
|
||||
*/
|
||||
static const struct ns2501_reg regs_1024x768[][86] = {
|
||||
[MODE_640x480] = {
|
||||
[0] = { .offset = 0x0a, .value = 0x81, },
|
||||
[1] = { .offset = 0x18, .value = 0x07, },
|
||||
[2] = { .offset = 0x19, .value = 0x00, },
|
||||
[3] = { .offset = 0x1a, .value = 0x00, },
|
||||
[4] = { .offset = 0x1b, .value = 0x11, },
|
||||
[5] = { .offset = 0x1c, .value = 0x54, },
|
||||
[6] = { .offset = 0x1d, .value = 0x03, },
|
||||
[7] = { .offset = 0x1e, .value = 0x02, },
|
||||
[8] = { .offset = 0xf3, .value = 0x90, },
|
||||
[9] = { .offset = 0xf9, .value = 0x00, },
|
||||
[10] = { .offset = 0xc1, .value = 0x90, },
|
||||
[11] = { .offset = 0xc2, .value = 0x00, },
|
||||
[12] = { .offset = 0xc3, .value = 0x0f, },
|
||||
[13] = { .offset = 0xc4, .value = 0x03, },
|
||||
[14] = { .offset = 0xc5, .value = 0x16, },
|
||||
[15] = { .offset = 0xc6, .value = 0x00, },
|
||||
[16] = { .offset = 0xc7, .value = 0x02, },
|
||||
[17] = { .offset = 0xc8, .value = 0x02, },
|
||||
[18] = { .offset = 0xf4, .value = 0x00, },
|
||||
[19] = { .offset = 0x80, .value = 0xff, },
|
||||
[20] = { .offset = 0x81, .value = 0x07, },
|
||||
[21] = { .offset = 0x82, .value = 0x3d, },
|
||||
[22] = { .offset = 0x83, .value = 0x05, },
|
||||
[23] = { .offset = 0x94, .value = 0x00, },
|
||||
[24] = { .offset = 0x95, .value = 0x00, },
|
||||
[25] = { .offset = 0x96, .value = 0x05, },
|
||||
[26] = { .offset = 0x97, .value = 0x00, },
|
||||
[27] = { .offset = 0x9a, .value = 0x88, },
|
||||
[28] = { .offset = 0x9b, .value = 0x00, },
|
||||
[29] = { .offset = 0x98, .value = 0x00, },
|
||||
[30] = { .offset = 0x99, .value = 0x00, },
|
||||
[31] = { .offset = 0xf7, .value = 0x88, },
|
||||
[32] = { .offset = 0xf8, .value = 0x0a, },
|
||||
[33] = { .offset = 0x9c, .value = 0x24, },
|
||||
[34] = { .offset = 0x9d, .value = 0x00, },
|
||||
[35] = { .offset = 0x9e, .value = 0x25, },
|
||||
[36] = { .offset = 0x9f, .value = 0x03, },
|
||||
[37] = { .offset = 0xa0, .value = 0x28, },
|
||||
[38] = { .offset = 0xa1, .value = 0x01, },
|
||||
[39] = { .offset = 0xa2, .value = 0x28, },
|
||||
[40] = { .offset = 0xa3, .value = 0x05, },
|
||||
[41] = { .offset = 0xb6, .value = 0x09, },
|
||||
[42] = { .offset = 0xb8, .value = 0x00, },
|
||||
[43] = { .offset = 0xb9, .value = 0xa0, },
|
||||
[44] = { .offset = 0xba, .value = 0x00, },
|
||||
[45] = { .offset = 0xbb, .value = 0x20, },
|
||||
[46] = { .offset = 0x10, .value = 0x00, },
|
||||
[47] = { .offset = 0x11, .value = 0xa0, },
|
||||
[48] = { .offset = 0x12, .value = 0x02, },
|
||||
[49] = { .offset = 0x20, .value = 0x00, },
|
||||
[50] = { .offset = 0x22, .value = 0x00, },
|
||||
[51] = { .offset = 0x23, .value = 0x00, },
|
||||
[52] = { .offset = 0x24, .value = 0x00, },
|
||||
[53] = { .offset = 0x25, .value = 0x00, },
|
||||
[54] = { .offset = 0x8c, .value = 0x10, },
|
||||
[55] = { .offset = 0x8d, .value = 0x02, },
|
||||
[56] = { .offset = 0x8e, .value = 0x10, },
|
||||
[57] = { .offset = 0x8f, .value = 0x00, },
|
||||
[58] = { .offset = 0x90, .value = 0xff, },
|
||||
[59] = { .offset = 0x91, .value = 0x07, },
|
||||
[60] = { .offset = 0x92, .value = 0xa0, },
|
||||
[61] = { .offset = 0x93, .value = 0x02, },
|
||||
[62] = { .offset = 0xa5, .value = 0x00, },
|
||||
[63] = { .offset = 0xa6, .value = 0x00, },
|
||||
[64] = { .offset = 0xa7, .value = 0x00, },
|
||||
[65] = { .offset = 0xa8, .value = 0x00, },
|
||||
[66] = { .offset = 0xa9, .value = 0x04, },
|
||||
[67] = { .offset = 0xaa, .value = 0x70, },
|
||||
[68] = { .offset = 0xab, .value = 0x4f, },
|
||||
[69] = { .offset = 0xac, .value = 0x00, },
|
||||
[70] = { .offset = 0xa4, .value = 0x84, },
|
||||
[71] = { .offset = 0x7e, .value = 0x18, },
|
||||
[72] = { .offset = 0x84, .value = 0x00, },
|
||||
[73] = { .offset = 0x85, .value = 0x00, },
|
||||
[74] = { .offset = 0x86, .value = 0x00, },
|
||||
[75] = { .offset = 0x87, .value = 0x00, },
|
||||
[76] = { .offset = 0x88, .value = 0x00, },
|
||||
[77] = { .offset = 0x89, .value = 0x00, },
|
||||
[78] = { .offset = 0x8a, .value = 0x00, },
|
||||
[79] = { .offset = 0x8b, .value = 0x00, },
|
||||
[80] = { .offset = 0x26, .value = 0x00, },
|
||||
[81] = { .offset = 0x27, .value = 0x00, },
|
||||
[82] = { .offset = 0xad, .value = 0x00, },
|
||||
[83] = { .offset = 0x08, .value = 0x30, }, /* 0x31 */
|
||||
[84] = { .offset = 0x41, .value = 0x00, },
|
||||
[85] = { .offset = 0xc0, .value = 0x05, },
|
||||
},
|
||||
[MODE_800x600] = {
|
||||
[0] = { .offset = 0x0a, .value = 0x81, },
|
||||
[1] = { .offset = 0x18, .value = 0x07, },
|
||||
[2] = { .offset = 0x19, .value = 0x00, },
|
||||
[3] = { .offset = 0x1a, .value = 0x00, },
|
||||
[4] = { .offset = 0x1b, .value = 0x19, },
|
||||
[5] = { .offset = 0x1c, .value = 0x64, },
|
||||
[6] = { .offset = 0x1d, .value = 0x02, },
|
||||
[7] = { .offset = 0x1e, .value = 0x02, },
|
||||
[8] = { .offset = 0xf3, .value = 0x90, },
|
||||
[9] = { .offset = 0xf9, .value = 0x00, },
|
||||
[10] = { .offset = 0xc1, .value = 0xd7, },
|
||||
[11] = { .offset = 0xc2, .value = 0x00, },
|
||||
[12] = { .offset = 0xc3, .value = 0xf8, },
|
||||
[13] = { .offset = 0xc4, .value = 0x03, },
|
||||
[14] = { .offset = 0xc5, .value = 0x1a, },
|
||||
[15] = { .offset = 0xc6, .value = 0x00, },
|
||||
[16] = { .offset = 0xc7, .value = 0x73, },
|
||||
[17] = { .offset = 0xc8, .value = 0x02, },
|
||||
[18] = { .offset = 0xf4, .value = 0x00, },
|
||||
[19] = { .offset = 0x80, .value = 0x27, },
|
||||
[20] = { .offset = 0x81, .value = 0x03, },
|
||||
[21] = { .offset = 0x82, .value = 0x41, },
|
||||
[22] = { .offset = 0x83, .value = 0x05, },
|
||||
[23] = { .offset = 0x94, .value = 0x00, },
|
||||
[24] = { .offset = 0x95, .value = 0x00, },
|
||||
[25] = { .offset = 0x96, .value = 0x05, },
|
||||
[26] = { .offset = 0x97, .value = 0x00, },
|
||||
[27] = { .offset = 0x9a, .value = 0x88, },
|
||||
[28] = { .offset = 0x9b, .value = 0x00, },
|
||||
[29] = { .offset = 0x98, .value = 0x00, },
|
||||
[30] = { .offset = 0x99, .value = 0x00, },
|
||||
[31] = { .offset = 0xf7, .value = 0x88, },
|
||||
[32] = { .offset = 0xf8, .value = 0x06, },
|
||||
[33] = { .offset = 0x9c, .value = 0x23, },
|
||||
[34] = { .offset = 0x9d, .value = 0x00, },
|
||||
[35] = { .offset = 0x9e, .value = 0x25, },
|
||||
[36] = { .offset = 0x9f, .value = 0x03, },
|
||||
[37] = { .offset = 0xa0, .value = 0x28, },
|
||||
[38] = { .offset = 0xa1, .value = 0x01, },
|
||||
[39] = { .offset = 0xa2, .value = 0x28, },
|
||||
[40] = { .offset = 0xa3, .value = 0x05, },
|
||||
[41] = { .offset = 0xb6, .value = 0x09, },
|
||||
[42] = { .offset = 0xb8, .value = 0x30, },
|
||||
[43] = { .offset = 0xb9, .value = 0xc8, },
|
||||
[44] = { .offset = 0xba, .value = 0x00, },
|
||||
[45] = { .offset = 0xbb, .value = 0x20, },
|
||||
[46] = { .offset = 0x10, .value = 0x20, },
|
||||
[47] = { .offset = 0x11, .value = 0xc8, },
|
||||
[48] = { .offset = 0x12, .value = 0x02, },
|
||||
[49] = { .offset = 0x20, .value = 0x00, },
|
||||
[50] = { .offset = 0x22, .value = 0x00, },
|
||||
[51] = { .offset = 0x23, .value = 0x00, },
|
||||
[52] = { .offset = 0x24, .value = 0x00, },
|
||||
[53] = { .offset = 0x25, .value = 0x00, },
|
||||
[54] = { .offset = 0x8c, .value = 0x10, },
|
||||
[55] = { .offset = 0x8d, .value = 0x02, },
|
||||
[56] = { .offset = 0x8e, .value = 0x04, },
|
||||
[57] = { .offset = 0x8f, .value = 0x00, },
|
||||
[58] = { .offset = 0x90, .value = 0xff, },
|
||||
[59] = { .offset = 0x91, .value = 0x07, },
|
||||
[60] = { .offset = 0x92, .value = 0xa0, },
|
||||
[61] = { .offset = 0x93, .value = 0x02, },
|
||||
[62] = { .offset = 0xa5, .value = 0x00, },
|
||||
[63] = { .offset = 0xa6, .value = 0x00, },
|
||||
[64] = { .offset = 0xa7, .value = 0x00, },
|
||||
[65] = { .offset = 0xa8, .value = 0x00, },
|
||||
[66] = { .offset = 0xa9, .value = 0x83, },
|
||||
[67] = { .offset = 0xaa, .value = 0x40, },
|
||||
[68] = { .offset = 0xab, .value = 0x32, },
|
||||
[69] = { .offset = 0xac, .value = 0x00, },
|
||||
[70] = { .offset = 0xa4, .value = 0x80, },
|
||||
[71] = { .offset = 0x7e, .value = 0x18, },
|
||||
[72] = { .offset = 0x84, .value = 0x00, },
|
||||
[73] = { .offset = 0x85, .value = 0x00, },
|
||||
[74] = { .offset = 0x86, .value = 0x00, },
|
||||
[75] = { .offset = 0x87, .value = 0x00, },
|
||||
[76] = { .offset = 0x88, .value = 0x00, },
|
||||
[77] = { .offset = 0x89, .value = 0x00, },
|
||||
[78] = { .offset = 0x8a, .value = 0x00, },
|
||||
[79] = { .offset = 0x8b, .value = 0x00, },
|
||||
[80] = { .offset = 0x26, .value = 0x00, },
|
||||
[81] = { .offset = 0x27, .value = 0x00, },
|
||||
[82] = { .offset = 0xad, .value = 0x00, },
|
||||
[83] = { .offset = 0x08, .value = 0x30, }, /* 0x31 */
|
||||
[84] = { .offset = 0x41, .value = 0x00, },
|
||||
[85] = { .offset = 0xc0, .value = 0x07, },
|
||||
},
|
||||
[MODE_1024x768] = {
|
||||
[0] = { .offset = 0x0a, .value = 0x81, },
|
||||
[1] = { .offset = 0x18, .value = 0x07, },
|
||||
[2] = { .offset = 0x19, .value = 0x00, },
|
||||
[3] = { .offset = 0x1a, .value = 0x00, },
|
||||
[4] = { .offset = 0x1b, .value = 0x11, },
|
||||
[5] = { .offset = 0x1c, .value = 0x54, },
|
||||
[6] = { .offset = 0x1d, .value = 0x03, },
|
||||
[7] = { .offset = 0x1e, .value = 0x02, },
|
||||
[8] = { .offset = 0xf3, .value = 0x90, },
|
||||
[9] = { .offset = 0xf9, .value = 0x00, },
|
||||
[10] = { .offset = 0xc1, .value = 0x90, },
|
||||
[11] = { .offset = 0xc2, .value = 0x00, },
|
||||
[12] = { .offset = 0xc3, .value = 0x0f, },
|
||||
[13] = { .offset = 0xc4, .value = 0x03, },
|
||||
[14] = { .offset = 0xc5, .value = 0x16, },
|
||||
[15] = { .offset = 0xc6, .value = 0x00, },
|
||||
[16] = { .offset = 0xc7, .value = 0x02, },
|
||||
[17] = { .offset = 0xc8, .value = 0x02, },
|
||||
[18] = { .offset = 0xf4, .value = 0x00, },
|
||||
[19] = { .offset = 0x80, .value = 0xff, },
|
||||
[20] = { .offset = 0x81, .value = 0x07, },
|
||||
[21] = { .offset = 0x82, .value = 0x3d, },
|
||||
[22] = { .offset = 0x83, .value = 0x05, },
|
||||
[23] = { .offset = 0x94, .value = 0x00, },
|
||||
[24] = { .offset = 0x95, .value = 0x00, },
|
||||
[25] = { .offset = 0x96, .value = 0x05, },
|
||||
[26] = { .offset = 0x97, .value = 0x00, },
|
||||
[27] = { .offset = 0x9a, .value = 0x88, },
|
||||
[28] = { .offset = 0x9b, .value = 0x00, },
|
||||
[29] = { .offset = 0x98, .value = 0x00, },
|
||||
[30] = { .offset = 0x99, .value = 0x00, },
|
||||
[31] = { .offset = 0xf7, .value = 0x88, },
|
||||
[32] = { .offset = 0xf8, .value = 0x0a, },
|
||||
[33] = { .offset = 0x9c, .value = 0x24, },
|
||||
[34] = { .offset = 0x9d, .value = 0x00, },
|
||||
[35] = { .offset = 0x9e, .value = 0x25, },
|
||||
[36] = { .offset = 0x9f, .value = 0x03, },
|
||||
[37] = { .offset = 0xa0, .value = 0x28, },
|
||||
[38] = { .offset = 0xa1, .value = 0x01, },
|
||||
[39] = { .offset = 0xa2, .value = 0x28, },
|
||||
[40] = { .offset = 0xa3, .value = 0x05, },
|
||||
[41] = { .offset = 0xb6, .value = 0x09, },
|
||||
[42] = { .offset = 0xb8, .value = 0x00, },
|
||||
[43] = { .offset = 0xb9, .value = 0xa0, },
|
||||
[44] = { .offset = 0xba, .value = 0x00, },
|
||||
[45] = { .offset = 0xbb, .value = 0x20, },
|
||||
[46] = { .offset = 0x10, .value = 0x00, },
|
||||
[47] = { .offset = 0x11, .value = 0xa0, },
|
||||
[48] = { .offset = 0x12, .value = 0x02, },
|
||||
[49] = { .offset = 0x20, .value = 0x00, },
|
||||
[50] = { .offset = 0x22, .value = 0x00, },
|
||||
[51] = { .offset = 0x23, .value = 0x00, },
|
||||
[52] = { .offset = 0x24, .value = 0x00, },
|
||||
[53] = { .offset = 0x25, .value = 0x00, },
|
||||
[54] = { .offset = 0x8c, .value = 0x10, },
|
||||
[55] = { .offset = 0x8d, .value = 0x02, },
|
||||
[56] = { .offset = 0x8e, .value = 0x10, },
|
||||
[57] = { .offset = 0x8f, .value = 0x00, },
|
||||
[58] = { .offset = 0x90, .value = 0xff, },
|
||||
[59] = { .offset = 0x91, .value = 0x07, },
|
||||
[60] = { .offset = 0x92, .value = 0xa0, },
|
||||
[61] = { .offset = 0x93, .value = 0x02, },
|
||||
[62] = { .offset = 0xa5, .value = 0x00, },
|
||||
[63] = { .offset = 0xa6, .value = 0x00, },
|
||||
[64] = { .offset = 0xa7, .value = 0x00, },
|
||||
[65] = { .offset = 0xa8, .value = 0x00, },
|
||||
[66] = { .offset = 0xa9, .value = 0x04, },
|
||||
[67] = { .offset = 0xaa, .value = 0x70, },
|
||||
[68] = { .offset = 0xab, .value = 0x4f, },
|
||||
[69] = { .offset = 0xac, .value = 0x00, },
|
||||
[70] = { .offset = 0xa4, .value = 0x84, },
|
||||
[71] = { .offset = 0x7e, .value = 0x18, },
|
||||
[72] = { .offset = 0x84, .value = 0x00, },
|
||||
[73] = { .offset = 0x85, .value = 0x00, },
|
||||
[74] = { .offset = 0x86, .value = 0x00, },
|
||||
[75] = { .offset = 0x87, .value = 0x00, },
|
||||
[76] = { .offset = 0x88, .value = 0x00, },
|
||||
[77] = { .offset = 0x89, .value = 0x00, },
|
||||
[78] = { .offset = 0x8a, .value = 0x00, },
|
||||
[79] = { .offset = 0x8b, .value = 0x00, },
|
||||
[80] = { .offset = 0x26, .value = 0x00, },
|
||||
[81] = { .offset = 0x27, .value = 0x00, },
|
||||
[82] = { .offset = 0xad, .value = 0x00, },
|
||||
[83] = { .offset = 0x08, .value = 0x34, }, /* 0x35 */
|
||||
[84] = { .offset = 0x41, .value = 0x00, },
|
||||
[85] = { .offset = 0xc0, .value = 0x01, },
|
||||
},
|
||||
};
|
||||
|
||||
static const struct ns2501_reg regs_init[] = {
|
||||
[0] = { .offset = 0x35, .value = 0xff, },
|
||||
[1] = { .offset = 0x34, .value = 0x00, },
|
||||
[2] = { .offset = 0x08, .value = 0x30, },
|
||||
};
|
||||
|
||||
struct ns2501_priv {
|
||||
//I2CDevRec d;
|
||||
bool quiet;
|
||||
int reg_8_shadow;
|
||||
int reg_8_set;
|
||||
// Shadow registers for i915
|
||||
int dvoc;
|
||||
int pll_a;
|
||||
int srcdim;
|
||||
int fw_blc;
|
||||
const struct ns2501_reg *regs;
|
||||
};
|
||||
|
||||
#define NSPTR(d) ((NS2501Ptr)(d->DriverPrivate.ptr))
|
||||
@ -205,11 +486,9 @@ static bool ns2501_init(struct intel_dvo_device *dvo,
|
||||
goto out;
|
||||
}
|
||||
ns->quiet = false;
|
||||
ns->reg_8_set = 0;
|
||||
ns->reg_8_shadow =
|
||||
NS2501_8_PD | NS2501_8_BPAS | NS2501_8_VEN | NS2501_8_HEN;
|
||||
|
||||
DRM_DEBUG_KMS("init ns2501 dvo controller successfully!\n");
|
||||
|
||||
return true;
|
||||
|
||||
out:
|
||||
@ -242,9 +521,9 @@ static enum drm_mode_status ns2501_mode_valid(struct intel_dvo_device *dvo,
|
||||
* of the panel in here so we could always accept it
|
||||
* by disabling the scaler.
|
||||
*/
|
||||
if ((mode->hdisplay == 800 && mode->vdisplay == 600) ||
|
||||
(mode->hdisplay == 640 && mode->vdisplay == 480) ||
|
||||
(mode->hdisplay == 1024 && mode->vdisplay == 768)) {
|
||||
if ((mode->hdisplay == 640 && mode->vdisplay == 480 && mode->clock == 25175) ||
|
||||
(mode->hdisplay == 800 && mode->vdisplay == 600 && mode->clock == 40000) ||
|
||||
(mode->hdisplay == 1024 && mode->vdisplay == 768 && mode->clock == 65000)) {
|
||||
return MODE_OK;
|
||||
} else {
|
||||
return MODE_ONE_SIZE; /* Is this a reasonable error? */
|
||||
@ -255,180 +534,30 @@ static void ns2501_mode_set(struct intel_dvo_device *dvo,
|
||||
struct drm_display_mode *mode,
|
||||
struct drm_display_mode *adjusted_mode)
|
||||
{
|
||||
bool ok;
|
||||
int retries = 10;
|
||||
struct ns2501_priv *ns = (struct ns2501_priv *)(dvo->dev_priv);
|
||||
int mode_idx, i;
|
||||
|
||||
DRM_DEBUG_KMS
|
||||
("set mode (hdisplay=%d,htotal=%d,vdisplay=%d,vtotal=%d).\n",
|
||||
mode->hdisplay, mode->htotal, mode->vdisplay, mode->vtotal);
|
||||
|
||||
/*
|
||||
* Where do I find the native resolution for which scaling is not required???
|
||||
*
|
||||
* First trigger the DVO on as otherwise the chip does not appear on the i2c
|
||||
* bus.
|
||||
*/
|
||||
do {
|
||||
ok = true;
|
||||
if (mode->hdisplay == 640 && mode->vdisplay == 480)
|
||||
mode_idx = MODE_640x480;
|
||||
else if (mode->hdisplay == 800 && mode->vdisplay == 600)
|
||||
mode_idx = MODE_800x600;
|
||||
else if (mode->hdisplay == 1024 && mode->vdisplay == 768)
|
||||
mode_idx = MODE_1024x768;
|
||||
else
|
||||
return;
|
||||
|
||||
if (mode->hdisplay == 800 && mode->vdisplay == 600) {
|
||||
/* mode 277 */
|
||||
ns->reg_8_shadow &= ~NS2501_8_BPAS;
|
||||
DRM_DEBUG_KMS("switching to 800x600\n");
|
||||
/* Hopefully doing it every time won't hurt... */
|
||||
for (i = 0; i < ARRAY_SIZE(regs_init); i++)
|
||||
ns2501_writeb(dvo, regs_init[i].offset, regs_init[i].value);
|
||||
|
||||
/*
|
||||
* No, I do not know where this data comes from.
|
||||
* It is just what the video bios left in the DVO, so
|
||||
* I'm just copying it here over.
|
||||
* This also means that I cannot support any other modes
|
||||
* except the ones supported by the bios.
|
||||
*/
|
||||
ok &= ns2501_writeb(dvo, 0x11, 0xc8); // 0xc7 also works.
|
||||
ok &= ns2501_writeb(dvo, 0x1b, 0x19);
|
||||
ok &= ns2501_writeb(dvo, 0x1c, 0x62); // VBIOS left 0x64 here, but 0x62 works nicer
|
||||
ok &= ns2501_writeb(dvo, 0x1d, 0x02);
|
||||
ns->regs = regs_1024x768[mode_idx];
|
||||
|
||||
ok &= ns2501_writeb(dvo, 0x34, 0x03);
|
||||
ok &= ns2501_writeb(dvo, 0x35, 0xff);
|
||||
|
||||
ok &= ns2501_writeb(dvo, 0x80, 0x27);
|
||||
ok &= ns2501_writeb(dvo, 0x81, 0x03);
|
||||
ok &= ns2501_writeb(dvo, 0x82, 0x41);
|
||||
ok &= ns2501_writeb(dvo, 0x83, 0x05);
|
||||
|
||||
ok &= ns2501_writeb(dvo, 0x8d, 0x02);
|
||||
ok &= ns2501_writeb(dvo, 0x8e, 0x04);
|
||||
ok &= ns2501_writeb(dvo, 0x8f, 0x00);
|
||||
|
||||
ok &= ns2501_writeb(dvo, 0x90, 0xfe); /* vertical. VBIOS left 0xff here, but 0xfe works better */
|
||||
ok &= ns2501_writeb(dvo, 0x91, 0x07);
|
||||
ok &= ns2501_writeb(dvo, 0x94, 0x00);
|
||||
ok &= ns2501_writeb(dvo, 0x95, 0x00);
|
||||
|
||||
ok &= ns2501_writeb(dvo, 0x96, 0x00);
|
||||
|
||||
ok &= ns2501_writeb(dvo, 0x99, 0x00);
|
||||
ok &= ns2501_writeb(dvo, 0x9a, 0x88);
|
||||
|
||||
ok &= ns2501_writeb(dvo, 0x9c, 0x23); /* Looks like first and last line of the image. */
|
||||
ok &= ns2501_writeb(dvo, 0x9d, 0x00);
|
||||
ok &= ns2501_writeb(dvo, 0x9e, 0x25);
|
||||
ok &= ns2501_writeb(dvo, 0x9f, 0x03);
|
||||
|
||||
ok &= ns2501_writeb(dvo, 0xa4, 0x80);
|
||||
|
||||
ok &= ns2501_writeb(dvo, 0xb6, 0x00);
|
||||
|
||||
ok &= ns2501_writeb(dvo, 0xb9, 0xc8); /* horizontal? */
|
||||
ok &= ns2501_writeb(dvo, 0xba, 0x00); /* horizontal? */
|
||||
|
||||
ok &= ns2501_writeb(dvo, 0xc0, 0x05); /* horizontal? */
|
||||
ok &= ns2501_writeb(dvo, 0xc1, 0xd7);
|
||||
|
||||
ok &= ns2501_writeb(dvo, 0xc2, 0x00);
|
||||
ok &= ns2501_writeb(dvo, 0xc3, 0xf8);
|
||||
|
||||
ok &= ns2501_writeb(dvo, 0xc4, 0x03);
|
||||
ok &= ns2501_writeb(dvo, 0xc5, 0x1a);
|
||||
|
||||
ok &= ns2501_writeb(dvo, 0xc6, 0x00);
|
||||
ok &= ns2501_writeb(dvo, 0xc7, 0x73);
|
||||
ok &= ns2501_writeb(dvo, 0xc8, 0x02);
|
||||
|
||||
} else if (mode->hdisplay == 640 && mode->vdisplay == 480) {
|
||||
/* mode 274 */
|
||||
DRM_DEBUG_KMS("switching to 640x480\n");
|
||||
/*
|
||||
* No, I do not know where this data comes from.
|
||||
* It is just what the video bios left in the DVO, so
|
||||
* I'm just copying it here over.
|
||||
* This also means that I cannot support any other modes
|
||||
* except the ones supported by the bios.
|
||||
*/
|
||||
ns->reg_8_shadow &= ~NS2501_8_BPAS;
|
||||
|
||||
ok &= ns2501_writeb(dvo, 0x11, 0xa0);
|
||||
ok &= ns2501_writeb(dvo, 0x1b, 0x11);
|
||||
ok &= ns2501_writeb(dvo, 0x1c, 0x54);
|
||||
ok &= ns2501_writeb(dvo, 0x1d, 0x03);
|
||||
|
||||
ok &= ns2501_writeb(dvo, 0x34, 0x03);
|
||||
ok &= ns2501_writeb(dvo, 0x35, 0xff);
|
||||
|
||||
ok &= ns2501_writeb(dvo, 0x80, 0xff);
|
||||
ok &= ns2501_writeb(dvo, 0x81, 0x07);
|
||||
ok &= ns2501_writeb(dvo, 0x82, 0x3d);
|
||||
ok &= ns2501_writeb(dvo, 0x83, 0x05);
|
||||
|
||||
ok &= ns2501_writeb(dvo, 0x8d, 0x02);
|
||||
ok &= ns2501_writeb(dvo, 0x8e, 0x10);
|
||||
ok &= ns2501_writeb(dvo, 0x8f, 0x00);
|
||||
|
||||
ok &= ns2501_writeb(dvo, 0x90, 0xff); /* vertical */
|
||||
ok &= ns2501_writeb(dvo, 0x91, 0x07);
|
||||
ok &= ns2501_writeb(dvo, 0x94, 0x00);
|
||||
ok &= ns2501_writeb(dvo, 0x95, 0x00);
|
||||
|
||||
ok &= ns2501_writeb(dvo, 0x96, 0x05);
|
||||
|
||||
ok &= ns2501_writeb(dvo, 0x99, 0x00);
|
||||
ok &= ns2501_writeb(dvo, 0x9a, 0x88);
|
||||
|
||||
ok &= ns2501_writeb(dvo, 0x9c, 0x24);
|
||||
ok &= ns2501_writeb(dvo, 0x9d, 0x00);
|
||||
ok &= ns2501_writeb(dvo, 0x9e, 0x25);
|
||||
ok &= ns2501_writeb(dvo, 0x9f, 0x03);
|
||||
|
||||
ok &= ns2501_writeb(dvo, 0xa4, 0x84);
|
||||
|
||||
ok &= ns2501_writeb(dvo, 0xb6, 0x09);
|
||||
|
||||
ok &= ns2501_writeb(dvo, 0xb9, 0xa0); /* horizontal? */
|
||||
ok &= ns2501_writeb(dvo, 0xba, 0x00); /* horizontal? */
|
||||
|
||||
ok &= ns2501_writeb(dvo, 0xc0, 0x05); /* horizontal? */
|
||||
ok &= ns2501_writeb(dvo, 0xc1, 0x90);
|
||||
|
||||
ok &= ns2501_writeb(dvo, 0xc2, 0x00);
|
||||
ok &= ns2501_writeb(dvo, 0xc3, 0x0f);
|
||||
|
||||
ok &= ns2501_writeb(dvo, 0xc4, 0x03);
|
||||
ok &= ns2501_writeb(dvo, 0xc5, 0x16);
|
||||
|
||||
ok &= ns2501_writeb(dvo, 0xc6, 0x00);
|
||||
ok &= ns2501_writeb(dvo, 0xc7, 0x02);
|
||||
ok &= ns2501_writeb(dvo, 0xc8, 0x02);
|
||||
|
||||
} else if (mode->hdisplay == 1024 && mode->vdisplay == 768) {
|
||||
/* mode 280 */
|
||||
DRM_DEBUG_KMS("switching to 1024x768\n");
|
||||
/*
|
||||
* This might or might not work, actually. I'm silently
|
||||
* assuming here that the native panel resolution is
|
||||
* 1024x768. If not, then this leaves the scaler disabled
|
||||
* generating a picture that is likely not the expected.
|
||||
*
|
||||
* Problem is that I do not know where to take the panel
|
||||
* dimensions from.
|
||||
*
|
||||
* Enable the bypass, scaling not required.
|
||||
*
|
||||
* The scaler registers are irrelevant here....
|
||||
*
|
||||
*/
|
||||
ns->reg_8_shadow |= NS2501_8_BPAS;
|
||||
ok &= ns2501_writeb(dvo, 0x37, 0x44);
|
||||
} else {
|
||||
/*
|
||||
* Data not known. Bummer!
|
||||
* Hopefully, the code should not go here
|
||||
* as mode_OK delivered no other modes.
|
||||
*/
|
||||
ns->reg_8_shadow |= NS2501_8_BPAS;
|
||||
}
|
||||
ok &= ns2501_writeb(dvo, NS2501_REG8, ns->reg_8_shadow);
|
||||
} while (!ok && retries--);
|
||||
for (i = 0; i < 84; i++)
|
||||
ns2501_writeb(dvo, ns->regs[i].offset, ns->regs[i].value);
|
||||
}
|
||||
|
||||
/* set the NS2501 power state */
|
||||
@ -439,62 +568,48 @@ static bool ns2501_get_hw_state(struct intel_dvo_device *dvo)
|
||||
if (!ns2501_readb(dvo, NS2501_REG8, &ch))
|
||||
return false;
|
||||
|
||||
if (ch & NS2501_8_PD)
|
||||
return true;
|
||||
else
|
||||
return false;
|
||||
return ch & NS2501_8_PD;
|
||||
}
|
||||
|
||||
/* set the NS2501 power state */
|
||||
static void ns2501_dpms(struct intel_dvo_device *dvo, bool enable)
|
||||
{
|
||||
bool ok;
|
||||
int retries = 10;
|
||||
struct ns2501_priv *ns = (struct ns2501_priv *)(dvo->dev_priv);
|
||||
unsigned char ch;
|
||||
|
||||
DRM_DEBUG_KMS("Trying set the dpms of the DVO to %i\n", enable);
|
||||
|
||||
ch = ns->reg_8_shadow;
|
||||
if (enable) {
|
||||
if (WARN_ON(ns->regs[83].offset != 0x08 ||
|
||||
ns->regs[84].offset != 0x41 ||
|
||||
ns->regs[85].offset != 0xc0))
|
||||
return;
|
||||
|
||||
if (enable)
|
||||
ch |= NS2501_8_PD;
|
||||
else
|
||||
ch &= ~NS2501_8_PD;
|
||||
ns2501_writeb(dvo, 0xc0, ns->regs[85].value | 0x08);
|
||||
|
||||
if (ns->reg_8_set == 0 || ns->reg_8_shadow != ch) {
|
||||
ns->reg_8_set = 1;
|
||||
ns->reg_8_shadow = ch;
|
||||
ns2501_writeb(dvo, 0x41, ns->regs[84].value);
|
||||
|
||||
do {
|
||||
ok = true;
|
||||
ok &= ns2501_writeb(dvo, NS2501_REG8, ch);
|
||||
ok &=
|
||||
ns2501_writeb(dvo, 0x34,
|
||||
enable ? 0x03 : 0x00);
|
||||
ok &=
|
||||
ns2501_writeb(dvo, 0x35,
|
||||
enable ? 0xff : 0x00);
|
||||
} while (!ok && retries--);
|
||||
ns2501_writeb(dvo, 0x34, 0x01);
|
||||
msleep(15);
|
||||
|
||||
ns2501_writeb(dvo, 0x08, 0x35);
|
||||
if (!(ns->regs[83].value & NS2501_8_BPAS))
|
||||
ns2501_writeb(dvo, 0x08, 0x31);
|
||||
msleep(200);
|
||||
|
||||
ns2501_writeb(dvo, 0x34, 0x03);
|
||||
|
||||
ns2501_writeb(dvo, 0xc0, ns->regs[85].value);
|
||||
} else {
|
||||
ns2501_writeb(dvo, 0x34, 0x01);
|
||||
msleep(200);
|
||||
|
||||
ns2501_writeb(dvo, 0x08, 0x34);
|
||||
msleep(15);
|
||||
|
||||
ns2501_writeb(dvo, 0x34, 0x00);
|
||||
}
|
||||
}
|
||||
|
||||
static void ns2501_dump_regs(struct intel_dvo_device *dvo)
|
||||
{
|
||||
uint8_t val;
|
||||
|
||||
ns2501_readb(dvo, NS2501_FREQ_LO, &val);
|
||||
DRM_DEBUG_KMS("NS2501_FREQ_LO: 0x%02x\n", val);
|
||||
ns2501_readb(dvo, NS2501_FREQ_HI, &val);
|
||||
DRM_DEBUG_KMS("NS2501_FREQ_HI: 0x%02x\n", val);
|
||||
ns2501_readb(dvo, NS2501_REG8, &val);
|
||||
DRM_DEBUG_KMS("NS2501_REG8: 0x%02x\n", val);
|
||||
ns2501_readb(dvo, NS2501_REG9, &val);
|
||||
DRM_DEBUG_KMS("NS2501_REG9: 0x%02x\n", val);
|
||||
ns2501_readb(dvo, NS2501_REGC, &val);
|
||||
DRM_DEBUG_KMS("NS2501_REGC: 0x%02x\n", val);
|
||||
}
|
||||
|
||||
static void ns2501_destroy(struct intel_dvo_device *dvo)
|
||||
{
|
||||
struct ns2501_priv *ns = dvo->dev_priv;
|
||||
@ -512,6 +627,5 @@ struct intel_dvo_dev_ops ns2501_ops = {
|
||||
.mode_set = ns2501_mode_set,
|
||||
.dpms = ns2501_dpms,
|
||||
.get_hw_state = ns2501_get_hw_state,
|
||||
.dump_regs = ns2501_dump_regs,
|
||||
.destroy = ns2501_destroy,
|
||||
};
|
||||
|
@ -73,7 +73,7 @@
|
||||
* those commands required by the parser. This generally works because command
|
||||
* opcode ranges have standard command length encodings. So for commands that
|
||||
* the parser does not need to check, it can easily skip them. This is
|
||||
* implementated via a per-ring length decoding vfunc.
|
||||
* implemented via a per-ring length decoding vfunc.
|
||||
*
|
||||
* Unfortunately, there are a number of commands that do not follow the standard
|
||||
* length encoding for their opcode range, primarily amongst the MI_* commands.
|
||||
@ -138,6 +138,11 @@ static const struct drm_i915_cmd_descriptor common_cmds[] = {
|
||||
.mask = MI_GLOBAL_GTT,
|
||||
.expected = 0,
|
||||
}}, ),
|
||||
/*
|
||||
* MI_BATCH_BUFFER_START requires some special handling. It's not
|
||||
* really a 'skip' action but it doesn't seem like it's worth adding
|
||||
* a new action. See i915_parse_cmds().
|
||||
*/
|
||||
CMD( MI_BATCH_BUFFER_START, SMI, !F, 0xFF, S ),
|
||||
};
|
||||
|
||||
@ -408,6 +413,8 @@ static const u32 gen7_render_regs[] = {
|
||||
REG64(PS_INVOCATION_COUNT),
|
||||
REG64(PS_DEPTH_COUNT),
|
||||
OACONTROL, /* Only allowed for LRI and SRM. See below. */
|
||||
REG64(MI_PREDICATE_SRC0),
|
||||
REG64(MI_PREDICATE_SRC1),
|
||||
GEN7_3DPRIM_END_OFFSET,
|
||||
GEN7_3DPRIM_START_VERTEX,
|
||||
GEN7_3DPRIM_VERTEX_COUNT,
|
||||
@ -709,12 +716,14 @@ int i915_cmd_parser_init_ring(struct intel_engine_cs *ring)
|
||||
BUG_ON(!validate_cmds_sorted(ring, cmd_tables, cmd_table_count));
|
||||
BUG_ON(!validate_regs_sorted(ring));
|
||||
|
||||
if (hash_empty(ring->cmd_hash)) {
|
||||
ret = init_hash_table(ring, cmd_tables, cmd_table_count);
|
||||
if (ret) {
|
||||
DRM_ERROR("CMD: cmd_parser_init failed!\n");
|
||||
fini_hash_table(ring);
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
ring->needs_cmd_parser = true;
|
||||
|
||||
@ -836,23 +845,16 @@ finish:
|
||||
* @ring: the ring in question
|
||||
*
|
||||
* Only certain platforms require software batch buffer command parsing, and
|
||||
* only when enabled via module paramter.
|
||||
* only when enabled via module parameter.
|
||||
*
|
||||
* Return: true if the ring requires software command parsing
|
||||
*/
|
||||
bool i915_needs_cmd_parser(struct intel_engine_cs *ring)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = ring->dev->dev_private;
|
||||
|
||||
if (!ring->needs_cmd_parser)
|
||||
return false;
|
||||
|
||||
/*
|
||||
* XXX: VLV is Gen7 and therefore has cmd_tables, but has PPGTT
|
||||
* disabled. That will cause all of the parser's PPGTT checks to
|
||||
* fail. For now, disable parsing when PPGTT is off.
|
||||
*/
|
||||
if (!dev_priv->mm.aliasing_ppgtt)
|
||||
if (!USES_PPGTT(ring->dev))
|
||||
return false;
|
||||
|
||||
return (i915.enable_cmd_parser == 1);
|
||||
@ -888,8 +890,10 @@ static bool check_cmd(const struct intel_engine_cs *ring,
|
||||
* OACONTROL writes to only MI_LOAD_REGISTER_IMM commands.
|
||||
*/
|
||||
if (reg_addr == OACONTROL) {
|
||||
if (desc->cmd.value == MI_LOAD_REGISTER_MEM)
|
||||
if (desc->cmd.value == MI_LOAD_REGISTER_MEM) {
|
||||
DRM_DEBUG_DRIVER("CMD: Rejected LRM to OACONTROL\n");
|
||||
return false;
|
||||
}
|
||||
|
||||
if (desc->cmd.value == MI_LOAD_REGISTER_IMM(1))
|
||||
*oacontrol_set = (cmd[2] != 0);
|
||||
@ -959,7 +963,8 @@ static bool check_cmd(const struct intel_engine_cs *ring,
|
||||
* Parses the specified batch buffer looking for privilege violations as
|
||||
* described in the overview.
|
||||
*
|
||||
* Return: non-zero if the parser finds violations or otherwise fails
|
||||
* Return: non-zero if the parser finds violations or otherwise fails; -EACCES
|
||||
* if the batch appears legal but should use hardware parsing
|
||||
*/
|
||||
int i915_parse_cmds(struct intel_engine_cs *ring,
|
||||
struct drm_i915_gem_object *batch_obj,
|
||||
@ -1006,6 +1011,16 @@ int i915_parse_cmds(struct intel_engine_cs *ring,
|
||||
break;
|
||||
}
|
||||
|
||||
/*
|
||||
* If the batch buffer contains a chained batch, return an
|
||||
* error that tells the caller to abort and dispatch the
|
||||
* workload as a non-secure batch.
|
||||
*/
|
||||
if (desc->cmd.value == MI_BATCH_BUFFER_START) {
|
||||
ret = -EACCES;
|
||||
break;
|
||||
}
|
||||
|
||||
if (desc->flags & CMD_DESC_FIXED)
|
||||
length = desc->length.fixed;
|
||||
else
|
||||
@ -1061,6 +1076,8 @@ int i915_cmd_parser_get_version(void)
|
||||
*
|
||||
* 1. Initial version. Checks batches and reports violations, but leaves
|
||||
* hardware parsing enabled (so does not allow new use cases).
|
||||
* 2. Allow access to the MI_PREDICATE_SRC0 and
|
||||
* MI_PREDICATE_SRC1 registers.
|
||||
*/
|
||||
return 1;
|
||||
return 2;
|
||||
}
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -36,7 +36,6 @@
|
||||
|
||||
#include <linux/module.h>
|
||||
#include <linux/mod_devicetable.h>
|
||||
#include <errno-base.h>
|
||||
#include <linux/pci.h>
|
||||
#include <drm/i915_pciids.h>
|
||||
|
||||
@ -44,8 +43,7 @@
|
||||
|
||||
#include <syscall.h>
|
||||
|
||||
#define __read_mostly
|
||||
|
||||
#
|
||||
static struct drm_driver driver;
|
||||
|
||||
#define GEN_DEFAULT_PIPEOFFSETS \
|
||||
@ -336,6 +334,19 @@ static const struct intel_device_info intel_cherryview_info = {
|
||||
CURSOR_OFFSETS,
|
||||
};
|
||||
|
||||
static const struct intel_device_info intel_skylake_info = {
|
||||
.is_preliminary = 1,
|
||||
.is_skylake = 1,
|
||||
.gen = 9, .num_pipes = 3,
|
||||
.need_gfx_hws = 1, .has_hotplug = 1,
|
||||
.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
|
||||
.has_llc = 1,
|
||||
.has_ddi = 1,
|
||||
.has_fbc = 1,
|
||||
GEN_DEFAULT_PIPEOFFSETS,
|
||||
IVB_CURSOR_OFFSETS,
|
||||
};
|
||||
|
||||
/*
|
||||
* Make sure any device matches here are from most specific to most
|
||||
* general. For example, since the Quanta match is based on the subsystem
|
||||
@ -368,7 +379,8 @@ static const struct intel_device_info intel_cherryview_info = {
|
||||
INTEL_BDW_GT12D_IDS(&intel_broadwell_d_info), \
|
||||
INTEL_BDW_GT3M_IDS(&intel_broadwell_gt3m_info), \
|
||||
INTEL_BDW_GT3D_IDS(&intel_broadwell_gt3d_info), \
|
||||
INTEL_CHV_IDS(&intel_cherryview_info)
|
||||
INTEL_CHV_IDS(&intel_cherryview_info), \
|
||||
INTEL_SKL_IDS(&intel_skylake_info)
|
||||
|
||||
static const struct pci_device_id pciidlist[] = { /* aka */
|
||||
INTEL_PCI_IDS,
|
||||
@ -427,7 +439,7 @@ void intel_detect_pch(struct drm_device *dev)
|
||||
dev_priv->pch_type = PCH_LPT;
|
||||
DRM_DEBUG_KMS("Found LynxPoint PCH\n");
|
||||
WARN_ON(!IS_HASWELL(dev));
|
||||
WARN_ON(IS_ULT(dev));
|
||||
WARN_ON(IS_HSW_ULT(dev));
|
||||
} else if (IS_BROADWELL(dev)) {
|
||||
dev_priv->pch_type = PCH_LPT;
|
||||
dev_priv->pch_id =
|
||||
@ -438,7 +450,15 @@ void intel_detect_pch(struct drm_device *dev)
|
||||
dev_priv->pch_type = PCH_LPT;
|
||||
DRM_DEBUG_KMS("Found LynxPoint LP PCH\n");
|
||||
WARN_ON(!IS_HASWELL(dev));
|
||||
WARN_ON(!IS_ULT(dev));
|
||||
WARN_ON(!IS_HSW_ULT(dev));
|
||||
} else if (id == INTEL_PCH_SPT_DEVICE_ID_TYPE) {
|
||||
dev_priv->pch_type = PCH_SPT;
|
||||
DRM_DEBUG_KMS("Found SunrisePoint PCH\n");
|
||||
WARN_ON(!IS_SKYLAKE(dev));
|
||||
} else if (id == INTEL_PCH_SPT_LP_DEVICE_ID_TYPE) {
|
||||
dev_priv->pch_type = PCH_SPT;
|
||||
DRM_DEBUG_KMS("Found SunrisePoint LP PCH\n");
|
||||
WARN_ON(!IS_SKYLAKE(dev));
|
||||
} else
|
||||
continue;
|
||||
|
||||
@ -459,6 +479,10 @@ bool i915_semaphore_is_enabled(struct drm_device *dev)
|
||||
if (i915.semaphores >= 0)
|
||||
return i915.semaphores;
|
||||
|
||||
/* TODO: make semaphores and Execlists play nicely together */
|
||||
if (i915.enable_execlists)
|
||||
return false;
|
||||
|
||||
/* Until we get further testing... */
|
||||
if (IS_GEN8(dev))
|
||||
return false;
|
||||
@ -488,7 +512,11 @@ static void intel_suspend_encoders(struct drm_i915_private *dev_priv)
|
||||
drm_modeset_unlock_all(dev);
|
||||
}
|
||||
|
||||
static int i915_drm_freeze(struct drm_device *dev)
|
||||
static int intel_suspend_complete(struct drm_i915_private *dev_priv);
|
||||
static int vlv_resume_prepare(struct drm_i915_private *dev_priv,
|
||||
bool rpm_resume);
|
||||
|
||||
static int i915_drm_suspend(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_crtc *crtc;
|
||||
@ -518,6 +546,8 @@ static int i915_drm_freeze(struct drm_device *dev)
|
||||
return error;
|
||||
}
|
||||
|
||||
intel_suspend_gt_powersave(dev);
|
||||
|
||||
/*
|
||||
* Disable CRTCs directly since we want to preserve sw state
|
||||
* for _thaw. Also, power gate the CRTC power wells.
|
||||
@ -529,14 +559,12 @@ static int i915_drm_freeze(struct drm_device *dev)
|
||||
|
||||
intel_dp_mst_suspend(dev);
|
||||
|
||||
flush_delayed_work(&dev_priv->rps.delayed_resume_work);
|
||||
intel_runtime_pm_disable_interrupts(dev_priv);
|
||||
intel_hpd_cancel_work(dev_priv);
|
||||
|
||||
intel_runtime_pm_disable_interrupts(dev);
|
||||
intel_suspend_encoders(dev_priv);
|
||||
|
||||
intel_suspend_gt_powersave(dev);
|
||||
|
||||
intel_modeset_suspend_hw(dev);
|
||||
intel_suspend_hw(dev);
|
||||
}
|
||||
|
||||
i915_gem_suspend_gtt_mappings(dev);
|
||||
@ -553,9 +581,7 @@ static int i915_drm_freeze(struct drm_device *dev)
|
||||
intel_uncore_forcewake_reset(dev, false);
|
||||
intel_opregion_fini(dev);
|
||||
|
||||
console_lock();
|
||||
intel_fbdev_set_suspend(dev, FBINFO_STATE_SUSPENDED);
|
||||
console_unlock();
|
||||
intel_fbdev_set_suspend(dev, FBINFO_STATE_SUSPENDED, true);
|
||||
|
||||
dev_priv->suspend_count++;
|
||||
|
||||
@ -564,7 +590,26 @@ static int i915_drm_freeze(struct drm_device *dev)
|
||||
return 0;
|
||||
}
|
||||
|
||||
int i915_suspend(struct drm_device *dev, pm_message_t state)
|
||||
static int i915_drm_suspend_late(struct drm_device *drm_dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = drm_dev->dev_private;
|
||||
int ret;
|
||||
|
||||
ret = intel_suspend_complete(dev_priv);
|
||||
|
||||
if (ret) {
|
||||
DRM_ERROR("Suspend complete failed: %d\n", ret);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
pci_disable_device(drm_dev->pdev);
|
||||
pci_set_power_state(drm_dev->pdev, PCI_D3hot);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int i915_suspend_legacy(struct drm_device *dev, pm_message_t state)
|
||||
{
|
||||
int error;
|
||||
|
||||
@ -574,58 +619,25 @@ int i915_suspend(struct drm_device *dev, pm_message_t state)
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
if (state.event == PM_EVENT_PRETHAW)
|
||||
return 0;
|
||||
|
||||
if (WARN_ON_ONCE(state.event != PM_EVENT_SUSPEND &&
|
||||
state.event != PM_EVENT_FREEZE))
|
||||
return -EINVAL;
|
||||
|
||||
if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
|
||||
return 0;
|
||||
|
||||
error = i915_drm_freeze(dev);
|
||||
error = i915_drm_suspend(dev);
|
||||
if (error)
|
||||
return error;
|
||||
|
||||
if (state.event == PM_EVENT_SUSPEND) {
|
||||
/* Shut down the device */
|
||||
pci_disable_device(dev->pdev);
|
||||
pci_set_power_state(dev->pdev, PCI_D3hot);
|
||||
}
|
||||
|
||||
return 0;
|
||||
return i915_drm_suspend_late(dev);
|
||||
}
|
||||
|
||||
void intel_console_resume(struct work_struct *work)
|
||||
{
|
||||
struct drm_i915_private *dev_priv =
|
||||
container_of(work, struct drm_i915_private,
|
||||
console_resume_work);
|
||||
struct drm_device *dev = dev_priv->dev;
|
||||
|
||||
console_lock();
|
||||
intel_fbdev_set_suspend(dev, FBINFO_STATE_RUNNING);
|
||||
console_unlock();
|
||||
}
|
||||
|
||||
static int i915_drm_thaw_early(struct drm_device *dev)
|
||||
static int i915_drm_resume(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
|
||||
if (IS_HASWELL(dev) || IS_BROADWELL(dev))
|
||||
hsw_disable_pc8(dev_priv);
|
||||
|
||||
intel_uncore_early_sanitize(dev, true);
|
||||
intel_uncore_sanitize(dev);
|
||||
intel_power_domains_init_hw(dev_priv);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int __i915_drm_thaw(struct drm_device *dev, bool restore_gtt_mappings)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
|
||||
if (drm_core_check_feature(dev, DRIVER_MODESET) &&
|
||||
restore_gtt_mappings) {
|
||||
if (drm_core_check_feature(dev, DRIVER_MODESET)) {
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
i915_gem_restore_gtt_mappings(dev);
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
@ -646,47 +658,36 @@ static int __i915_drm_thaw(struct drm_device *dev, bool restore_gtt_mappings)
|
||||
}
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
|
||||
intel_runtime_pm_restore_interrupts(dev);
|
||||
/* We need working interrupts for modeset enabling ... */
|
||||
intel_runtime_pm_enable_interrupts(dev_priv);
|
||||
|
||||
intel_modeset_init_hw(dev);
|
||||
|
||||
{
|
||||
unsigned long irqflags;
|
||||
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
|
||||
spin_lock_irq(&dev_priv->irq_lock);
|
||||
if (dev_priv->display.hpd_irq_setup)
|
||||
dev_priv->display.hpd_irq_setup(dev);
|
||||
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
|
||||
}
|
||||
spin_unlock_irq(&dev_priv->irq_lock);
|
||||
|
||||
intel_dp_mst_resume(dev);
|
||||
drm_modeset_lock_all(dev);
|
||||
intel_modeset_setup_hw_state(dev, true);
|
||||
drm_modeset_unlock_all(dev);
|
||||
|
||||
intel_dp_mst_resume(dev);
|
||||
|
||||
/*
|
||||
* ... but also need to make sure that hotplug processing
|
||||
* doesn't cause havoc. Like in the driver load code we don't
|
||||
* bother with the tiny race here where we might loose hotplug
|
||||
* notifications.
|
||||
* */
|
||||
intel_hpd_init(dev);
|
||||
intel_hpd_init(dev_priv);
|
||||
/* Config may have changed between suspend and resume */
|
||||
drm_helper_hpd_irq_event(dev);
|
||||
}
|
||||
|
||||
intel_opregion_init(dev);
|
||||
|
||||
/*
|
||||
* The console lock can be pretty contented on resume due
|
||||
* to all the printk activity. Try to keep it out of the hot
|
||||
* path of resume if possible.
|
||||
*/
|
||||
if (console_trylock()) {
|
||||
intel_fbdev_set_suspend(dev, FBINFO_STATE_RUNNING);
|
||||
console_unlock();
|
||||
} else {
|
||||
schedule_work(&dev_priv->console_resume_work);
|
||||
}
|
||||
intel_fbdev_set_suspend(dev, FBINFO_STATE_RUNNING, false);
|
||||
|
||||
mutex_lock(&dev_priv->modeset_restore_lock);
|
||||
dev_priv->modeset_restore = MODESET_DONE;
|
||||
@ -694,21 +695,15 @@ static int __i915_drm_thaw(struct drm_device *dev, bool restore_gtt_mappings)
|
||||
|
||||
intel_opregion_notify_adapter(dev, PCI_D0);
|
||||
|
||||
drm_kms_helper_poll_enable(dev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int i915_drm_thaw(struct drm_device *dev)
|
||||
static int i915_drm_resume_early(struct drm_device *dev)
|
||||
{
|
||||
if (drm_core_check_feature(dev, DRIVER_MODESET))
|
||||
i915_check_and_clear_faults(dev);
|
||||
|
||||
return __i915_drm_thaw(dev, true);
|
||||
}
|
||||
|
||||
static int i915_resume_early(struct drm_device *dev)
|
||||
{
|
||||
if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
|
||||
return 0;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
int ret = 0;
|
||||
|
||||
/*
|
||||
* We have a resume ordering issue with the snd-hda driver also
|
||||
@ -724,33 +719,34 @@ static int i915_resume_early(struct drm_device *dev)
|
||||
|
||||
pci_set_master(dev->pdev);
|
||||
|
||||
return i915_drm_thaw_early(dev);
|
||||
if (IS_VALLEYVIEW(dev_priv))
|
||||
ret = vlv_resume_prepare(dev_priv, false);
|
||||
if (ret)
|
||||
DRM_ERROR("Resume prepare failed: %d,Continuing resume\n", ret);
|
||||
|
||||
intel_uncore_early_sanitize(dev, true);
|
||||
|
||||
if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
|
||||
hsw_disable_pc8(dev_priv);
|
||||
|
||||
intel_uncore_sanitize(dev);
|
||||
intel_power_domains_init_hw(dev_priv);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
int i915_resume(struct drm_device *dev)
|
||||
int i915_resume_legacy(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
int ret;
|
||||
|
||||
/*
|
||||
* Platforms with opregion should have sane BIOS, older ones (gen3 and
|
||||
* earlier) need to restore the GTT mappings since the BIOS might clear
|
||||
* all our scratch PTEs.
|
||||
*/
|
||||
ret = __i915_drm_thaw(dev, !dev_priv->opregion.header);
|
||||
if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
|
||||
return 0;
|
||||
|
||||
ret = i915_drm_resume_early(dev);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
drm_kms_helper_poll_enable(dev);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int i915_resume_legacy(struct drm_device *dev)
|
||||
{
|
||||
i915_resume_early(dev);
|
||||
i915_resume(dev);
|
||||
|
||||
return 0;
|
||||
return i915_drm_resume(dev);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -796,6 +792,9 @@ int i915_reset(struct drm_device *dev)
|
||||
}
|
||||
}
|
||||
|
||||
if (i915_stop_ring_allow_warn(dev_priv))
|
||||
pr_notice("drm/i915: Resetting chip after gpu hang\n");
|
||||
|
||||
if (ret) {
|
||||
DRM_ERROR("Failed to reset chip: %i\n", ret);
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
@ -816,11 +815,14 @@ int i915_reset(struct drm_device *dev)
|
||||
* was running at the time of the reset (i.e. we weren't VT
|
||||
* switched away).
|
||||
*/
|
||||
if (drm_core_check_feature(dev, DRIVER_MODESET) ||
|
||||
!dev_priv->ums.mm_suspended) {
|
||||
dev_priv->ums.mm_suspended = 0;
|
||||
if (drm_core_check_feature(dev, DRIVER_MODESET)) {
|
||||
/* Used to prevent gem_check_wedged returning -EAGAIN during gpu reset */
|
||||
dev_priv->gpu_error.reload_in_reset = true;
|
||||
|
||||
ret = i915_gem_init_hw(dev);
|
||||
|
||||
dev_priv->gpu_error.reload_in_reset = false;
|
||||
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
if (ret) {
|
||||
DRM_ERROR("Failed hw init on reset %d\n", ret);
|
||||
@ -841,8 +843,6 @@ int i915_reset(struct drm_device *dev)
|
||||
*/
|
||||
if (INTEL_INFO(dev)->gen > 5)
|
||||
intel_reset_gt_powersave(dev);
|
||||
|
||||
intel_hpd_init(dev);
|
||||
} else {
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
}
|
||||
@ -895,14 +895,13 @@ static int i915_pm_suspend(struct device *dev)
|
||||
if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF)
|
||||
return 0;
|
||||
|
||||
return i915_drm_freeze(drm_dev);
|
||||
return i915_drm_suspend(drm_dev);
|
||||
}
|
||||
|
||||
static int i915_pm_suspend_late(struct device *dev)
|
||||
{
|
||||
struct pci_dev *pdev = to_pci_dev(dev);
|
||||
struct drm_device *drm_dev = pci_get_drvdata(pdev);
|
||||
struct drm_i915_private *dev_priv = drm_dev->dev_private;
|
||||
|
||||
/*
|
||||
* We have a suspedn ordering issue with the snd-hda driver also
|
||||
@ -916,13 +915,7 @@ static int i915_pm_suspend_late(struct device *dev)
|
||||
if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF)
|
||||
return 0;
|
||||
|
||||
if (IS_HASWELL(drm_dev) || IS_BROADWELL(drm_dev))
|
||||
hsw_enable_pc8(dev_priv);
|
||||
|
||||
pci_disable_device(pdev);
|
||||
pci_set_power_state(pdev, PCI_D3hot);
|
||||
|
||||
return 0;
|
||||
return i915_drm_suspend_late(drm_dev);
|
||||
}
|
||||
|
||||
static int i915_pm_resume_early(struct device *dev)
|
||||
@ -930,7 +923,10 @@ static int i915_pm_resume_early(struct device *dev)
|
||||
struct pci_dev *pdev = to_pci_dev(dev);
|
||||
struct drm_device *drm_dev = pci_get_drvdata(pdev);
|
||||
|
||||
return i915_resume_early(drm_dev);
|
||||
if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF)
|
||||
return 0;
|
||||
|
||||
return i915_drm_resume_early(drm_dev);
|
||||
}
|
||||
|
||||
static int i915_pm_resume(struct device *dev)
|
||||
@ -938,69 +934,19 @@ static int i915_pm_resume(struct device *dev)
|
||||
struct pci_dev *pdev = to_pci_dev(dev);
|
||||
struct drm_device *drm_dev = pci_get_drvdata(pdev);
|
||||
|
||||
return i915_resume(drm_dev);
|
||||
if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF)
|
||||
return 0;
|
||||
|
||||
return i915_drm_resume(drm_dev);
|
||||
}
|
||||
|
||||
static int i915_pm_freeze(struct device *dev)
|
||||
{
|
||||
struct pci_dev *pdev = to_pci_dev(dev);
|
||||
struct drm_device *drm_dev = pci_get_drvdata(pdev);
|
||||
|
||||
if (!drm_dev || !drm_dev->dev_private) {
|
||||
dev_err(dev, "DRM not initialized, aborting suspend.\n");
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
return i915_drm_freeze(drm_dev);
|
||||
}
|
||||
|
||||
static int i915_pm_thaw_early(struct device *dev)
|
||||
{
|
||||
struct pci_dev *pdev = to_pci_dev(dev);
|
||||
struct drm_device *drm_dev = pci_get_drvdata(pdev);
|
||||
|
||||
return i915_drm_thaw_early(drm_dev);
|
||||
}
|
||||
|
||||
static int i915_pm_thaw(struct device *dev)
|
||||
{
|
||||
struct pci_dev *pdev = to_pci_dev(dev);
|
||||
struct drm_device *drm_dev = pci_get_drvdata(pdev);
|
||||
|
||||
return i915_drm_thaw(drm_dev);
|
||||
}
|
||||
|
||||
static int i915_pm_poweroff(struct device *dev)
|
||||
{
|
||||
struct pci_dev *pdev = to_pci_dev(dev);
|
||||
struct drm_device *drm_dev = pci_get_drvdata(pdev);
|
||||
|
||||
return i915_drm_freeze(drm_dev);
|
||||
}
|
||||
|
||||
static int hsw_runtime_suspend(struct drm_i915_private *dev_priv)
|
||||
static int hsw_suspend_complete(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
hsw_enable_pc8(dev_priv);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int snb_runtime_resume(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
struct drm_device *dev = dev_priv->dev;
|
||||
|
||||
intel_init_pch_refclk(dev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int hsw_runtime_resume(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
hsw_disable_pc8(dev_priv);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Save all Gunit registers that may be lost after a D3 and a subsequent
|
||||
* S0i[R123] transition. The list of registers needing a save/restore is
|
||||
@ -1291,7 +1237,7 @@ static void vlv_check_no_gt_access(struct drm_i915_private *dev_priv)
|
||||
I915_WRITE(VLV_GTLC_PW_STATUS, VLV_GTLC_ALLOWWAKEERR);
|
||||
}
|
||||
|
||||
static int vlv_runtime_suspend(struct drm_i915_private *dev_priv)
|
||||
static int vlv_suspend_complete(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
u32 mask;
|
||||
int err;
|
||||
@ -1331,7 +1277,8 @@ err1:
|
||||
return err;
|
||||
}
|
||||
|
||||
static int vlv_runtime_resume(struct drm_i915_private *dev_priv)
|
||||
static int vlv_resume_prepare(struct drm_i915_private *dev_priv,
|
||||
bool rpm_resume)
|
||||
{
|
||||
struct drm_device *dev = dev_priv->dev;
|
||||
int err;
|
||||
@ -1356,8 +1303,10 @@ static int vlv_runtime_resume(struct drm_i915_private *dev_priv)
|
||||
|
||||
vlv_check_no_gt_access(dev_priv);
|
||||
|
||||
if (rpm_resume) {
|
||||
intel_init_clock_gating(dev);
|
||||
i915_gem_restore_fences(dev);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
@ -1372,7 +1321,9 @@ static int intel_runtime_suspend(struct device *device)
|
||||
if (WARN_ON_ONCE(!(dev_priv->rps.enabled && intel_enable_rc6(dev))))
|
||||
return -ENODEV;
|
||||
|
||||
WARN_ON(!HAS_RUNTIME_PM(dev));
|
||||
if (WARN_ON_ONCE(!HAS_RUNTIME_PM(dev)))
|
||||
return -ENODEV;
|
||||
|
||||
assert_force_wake_inactive(dev_priv);
|
||||
|
||||
DRM_DEBUG_KMS("Suspending device\n");
|
||||
@ -1401,28 +1352,13 @@ static int intel_runtime_suspend(struct device *device)
|
||||
i915_gem_release_all_mmaps(dev_priv);
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
|
||||
/*
|
||||
* rps.work can't be rearmed here, since we get here only after making
|
||||
* sure the GPU is idle and the RPS freq is set to the minimum. See
|
||||
* intel_mark_idle().
|
||||
*/
|
||||
cancel_work_sync(&dev_priv->rps.work);
|
||||
intel_runtime_pm_disable_interrupts(dev);
|
||||
|
||||
if (IS_GEN6(dev)) {
|
||||
ret = 0;
|
||||
} else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
|
||||
ret = hsw_runtime_suspend(dev_priv);
|
||||
} else if (IS_VALLEYVIEW(dev)) {
|
||||
ret = vlv_runtime_suspend(dev_priv);
|
||||
} else {
|
||||
ret = -ENODEV;
|
||||
WARN_ON(1);
|
||||
}
|
||||
intel_suspend_gt_powersave(dev);
|
||||
intel_runtime_pm_disable_interrupts(dev_priv);
|
||||
|
||||
ret = intel_suspend_complete(dev_priv);
|
||||
if (ret) {
|
||||
DRM_ERROR("Runtime suspend failed, disabling it (%d)\n", ret);
|
||||
intel_runtime_pm_restore_interrupts(dev);
|
||||
intel_runtime_pm_enable_interrupts(dev_priv);
|
||||
|
||||
return ret;
|
||||
}
|
||||
@ -1431,13 +1367,29 @@ static int intel_runtime_suspend(struct device *device)
|
||||
dev_priv->pm.suspended = true;
|
||||
|
||||
/*
|
||||
* FIXME: We really should find a document that references the arguments
|
||||
* used below!
|
||||
*/
|
||||
if (IS_HASWELL(dev)) {
|
||||
/*
|
||||
* current versions of firmware which depend on this opregion
|
||||
* notification have repurposed the D1 definition to mean
|
||||
* "runtime suspended" vs. what you would normally expect (D3)
|
||||
* to distinguish it from notifications that might be sent
|
||||
* via the suspend path.
|
||||
* to distinguish it from notifications that might be sent via
|
||||
* the suspend path.
|
||||
*/
|
||||
intel_opregion_notify_adapter(dev, PCI_D1);
|
||||
} else {
|
||||
/*
|
||||
* On Broadwell, if we use PCI_D1 the PCH DDI ports will stop
|
||||
* being detected, and the call we do at intel_runtime_resume()
|
||||
* won't be able to restore them. Since PCI_D3hot matches the
|
||||
* actual specification and appears to be working, use it. Let's
|
||||
* assume the other non-Haswell platforms will stay the same as
|
||||
* Broadwell.
|
||||
*/
|
||||
intel_opregion_notify_adapter(dev, PCI_D3hot);
|
||||
}
|
||||
|
||||
DRM_DEBUG_KMS("Device suspended\n");
|
||||
return 0;
|
||||
@ -1448,25 +1400,22 @@ static int intel_runtime_resume(struct device *device)
|
||||
struct pci_dev *pdev = to_pci_dev(device);
|
||||
struct drm_device *dev = pci_get_drvdata(pdev);
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
int ret;
|
||||
int ret = 0;
|
||||
|
||||
WARN_ON(!HAS_RUNTIME_PM(dev));
|
||||
if (WARN_ON_ONCE(!HAS_RUNTIME_PM(dev)))
|
||||
return -ENODEV;
|
||||
|
||||
DRM_DEBUG_KMS("Resuming device\n");
|
||||
|
||||
intel_opregion_notify_adapter(dev, PCI_D0);
|
||||
dev_priv->pm.suspended = false;
|
||||
|
||||
if (IS_GEN6(dev)) {
|
||||
ret = snb_runtime_resume(dev_priv);
|
||||
} else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
|
||||
ret = hsw_runtime_resume(dev_priv);
|
||||
} else if (IS_VALLEYVIEW(dev)) {
|
||||
ret = vlv_runtime_resume(dev_priv);
|
||||
} else {
|
||||
WARN_ON(1);
|
||||
ret = -ENODEV;
|
||||
}
|
||||
if (IS_GEN6(dev_priv))
|
||||
intel_init_pch_refclk(dev);
|
||||
else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
|
||||
hsw_disable_pc8(dev_priv);
|
||||
else if (IS_VALLEYVIEW(dev_priv))
|
||||
ret = vlv_resume_prepare(dev_priv, true);
|
||||
|
||||
/*
|
||||
* No point of rolling back things in case of an error, as the best
|
||||
@ -1475,8 +1424,8 @@ static int intel_runtime_resume(struct device *device)
|
||||
i915_gem_init_swizzling(dev);
|
||||
gen6_update_ring_freq(dev);
|
||||
|
||||
intel_runtime_pm_restore_interrupts(dev);
|
||||
intel_reset_gt_powersave(dev);
|
||||
intel_runtime_pm_enable_interrupts(dev_priv);
|
||||
intel_enable_gt_powersave(dev);
|
||||
|
||||
if (ret)
|
||||
DRM_ERROR("Runtime resume failed, disabling it (%d)\n", ret);
|
||||
@ -1486,17 +1435,60 @@ static int intel_runtime_resume(struct device *device)
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* This function implements common functionality of runtime and system
|
||||
* suspend sequence.
|
||||
*/
|
||||
static int intel_suspend_complete(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
struct drm_device *dev = dev_priv->dev;
|
||||
int ret;
|
||||
|
||||
if (IS_HASWELL(dev) || IS_BROADWELL(dev))
|
||||
ret = hsw_suspend_complete(dev_priv);
|
||||
else if (IS_VALLEYVIEW(dev))
|
||||
ret = vlv_suspend_complete(dev_priv);
|
||||
else
|
||||
ret = 0;
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static const struct dev_pm_ops i915_pm_ops = {
|
||||
/*
|
||||
* S0ix (via system suspend) and S3 event handlers [PMSG_SUSPEND,
|
||||
* PMSG_RESUME]
|
||||
*/
|
||||
.suspend = i915_pm_suspend,
|
||||
.suspend_late = i915_pm_suspend_late,
|
||||
.resume_early = i915_pm_resume_early,
|
||||
.resume = i915_pm_resume,
|
||||
.freeze = i915_pm_freeze,
|
||||
.thaw_early = i915_pm_thaw_early,
|
||||
.thaw = i915_pm_thaw,
|
||||
.poweroff = i915_pm_poweroff,
|
||||
|
||||
/*
|
||||
* S4 event handlers
|
||||
* @freeze, @freeze_late : called (1) before creating the
|
||||
* hibernation image [PMSG_FREEZE] and
|
||||
* (2) after rebooting, before restoring
|
||||
* the image [PMSG_QUIESCE]
|
||||
* @thaw, @thaw_early : called (1) after creating the hibernation
|
||||
* image, before writing it [PMSG_THAW]
|
||||
* and (2) after failing to create or
|
||||
* restore the image [PMSG_RECOVER]
|
||||
* @poweroff, @poweroff_late: called after writing the hibernation
|
||||
* image, before rebooting [PMSG_HIBERNATE]
|
||||
* @restore, @restore_early : called after rebooting and restoring the
|
||||
* hibernation image [PMSG_RESTORE]
|
||||
*/
|
||||
.freeze = i915_pm_suspend,
|
||||
.freeze_late = i915_pm_suspend_late,
|
||||
.thaw_early = i915_pm_resume_early,
|
||||
.thaw = i915_pm_resume,
|
||||
.poweroff = i915_pm_suspend,
|
||||
.poweroff_late = i915_pm_suspend_late,
|
||||
.restore_early = i915_pm_resume_early,
|
||||
.restore = i915_pm_resume,
|
||||
|
||||
/* S0ix (via runtime suspend) event handlers */
|
||||
.runtime_suspend = intel_runtime_suspend,
|
||||
.runtime_resume = intel_runtime_resume,
|
||||
};
|
||||
@ -1542,8 +1534,6 @@ static struct drm_driver driver = {
|
||||
// .resume = i915_resume,
|
||||
|
||||
// .device_is_agp = i915_driver_device_is_agp,
|
||||
// .master_create = i915_master_create,
|
||||
// .master_destroy = i915_master_destroy,
|
||||
#if defined(CONFIG_DEBUG_FS)
|
||||
.debugfs_init = i915_debugfs_init,
|
||||
.debugfs_cleanup = i915_debugfs_cleanup,
|
||||
|
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@ -88,6 +88,7 @@
|
||||
#include <drm/drmP.h>
|
||||
#include <drm/i915_drm.h>
|
||||
#include "i915_drv.h"
|
||||
#include "i915_trace.h"
|
||||
|
||||
/* This is a HW constraint. The value below is the largest known requirement
|
||||
* I've seen in a spec to date, and that was a workaround for a non-shipping
|
||||
@ -96,50 +97,6 @@
|
||||
#define GEN6_CONTEXT_ALIGN (64<<10)
|
||||
#define GEN7_CONTEXT_ALIGN 4096
|
||||
|
||||
static void do_ppgtt_cleanup(struct i915_hw_ppgtt *ppgtt)
|
||||
{
|
||||
struct drm_device *dev = ppgtt->base.dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct i915_address_space *vm = &ppgtt->base;
|
||||
|
||||
if (ppgtt == dev_priv->mm.aliasing_ppgtt ||
|
||||
(list_empty(&vm->active_list) && list_empty(&vm->inactive_list))) {
|
||||
ppgtt->base.cleanup(&ppgtt->base);
|
||||
return;
|
||||
}
|
||||
|
||||
/*
|
||||
* Make sure vmas are unbound before we take down the drm_mm
|
||||
*
|
||||
* FIXME: Proper refcounting should take care of this, this shouldn't be
|
||||
* needed at all.
|
||||
*/
|
||||
if (!list_empty(&vm->active_list)) {
|
||||
struct i915_vma *vma;
|
||||
|
||||
list_for_each_entry(vma, &vm->active_list, mm_list)
|
||||
if (WARN_ON(list_empty(&vma->vma_link) ||
|
||||
list_is_singular(&vma->vma_link)))
|
||||
break;
|
||||
|
||||
i915_gem_evict_vm(&ppgtt->base, true);
|
||||
} else {
|
||||
i915_gem_retire_requests(dev);
|
||||
i915_gem_evict_vm(&ppgtt->base, false);
|
||||
}
|
||||
|
||||
ppgtt->base.cleanup(&ppgtt->base);
|
||||
}
|
||||
|
||||
static void ppgtt_release(struct kref *kref)
|
||||
{
|
||||
struct i915_hw_ppgtt *ppgtt =
|
||||
container_of(kref, struct i915_hw_ppgtt, ref);
|
||||
|
||||
do_ppgtt_cleanup(ppgtt);
|
||||
kfree(ppgtt);
|
||||
}
|
||||
|
||||
static size_t get_context_alignment(struct drm_device *dev)
|
||||
{
|
||||
if (IS_GEN6(dev))
|
||||
@ -180,23 +137,21 @@ void i915_gem_context_free(struct kref *ctx_ref)
|
||||
{
|
||||
struct intel_context *ctx = container_of(ctx_ref,
|
||||
typeof(*ctx), ref);
|
||||
struct i915_hw_ppgtt *ppgtt = NULL;
|
||||
|
||||
if (ctx->legacy_hw_ctx.rcs_state) {
|
||||
/* We refcount even the aliasing PPGTT to keep the code symmetric */
|
||||
if (USES_PPGTT(ctx->legacy_hw_ctx.rcs_state->base.dev))
|
||||
ppgtt = ctx_to_ppgtt(ctx);
|
||||
}
|
||||
trace_i915_context_free(ctx);
|
||||
|
||||
if (i915.enable_execlists)
|
||||
intel_lr_context_free(ctx);
|
||||
|
||||
i915_ppgtt_put(ctx->ppgtt);
|
||||
|
||||
if (ppgtt)
|
||||
kref_put(&ppgtt->ref, ppgtt_release);
|
||||
if (ctx->legacy_hw_ctx.rcs_state)
|
||||
drm_gem_object_unreference(&ctx->legacy_hw_ctx.rcs_state->base);
|
||||
list_del(&ctx->link);
|
||||
kfree(ctx);
|
||||
}
|
||||
|
||||
static struct drm_i915_gem_object *
|
||||
struct drm_i915_gem_object *
|
||||
i915_gem_alloc_context_obj(struct drm_device *dev, size_t size)
|
||||
{
|
||||
struct drm_i915_gem_object *obj;
|
||||
@ -226,26 +181,6 @@ i915_gem_alloc_context_obj(struct drm_device *dev, size_t size)
|
||||
return obj;
|
||||
}
|
||||
|
||||
static struct i915_hw_ppgtt *
|
||||
create_vm_for_ctx(struct drm_device *dev, struct intel_context *ctx)
|
||||
{
|
||||
struct i915_hw_ppgtt *ppgtt;
|
||||
int ret;
|
||||
|
||||
ppgtt = kzalloc(sizeof(*ppgtt), GFP_KERNEL);
|
||||
if (!ppgtt)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
ret = i915_gem_init_ppgtt(dev, ppgtt);
|
||||
if (ret) {
|
||||
kfree(ppgtt);
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
|
||||
ppgtt->ctx = ctx;
|
||||
return ppgtt;
|
||||
}
|
||||
|
||||
static struct intel_context *
|
||||
__create_hw_context(struct drm_device *dev,
|
||||
struct drm_i915_file_private *file_priv)
|
||||
@ -301,11 +236,9 @@ err_out:
|
||||
*/
|
||||
static struct intel_context *
|
||||
i915_gem_create_context(struct drm_device *dev,
|
||||
struct drm_i915_file_private *file_priv,
|
||||
bool create_vm)
|
||||
struct drm_i915_file_private *file_priv)
|
||||
{
|
||||
const bool is_global_default_ctx = file_priv == NULL;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_context *ctx;
|
||||
int ret = 0;
|
||||
|
||||
@ -331,34 +264,20 @@ i915_gem_create_context(struct drm_device *dev,
|
||||
}
|
||||
}
|
||||
|
||||
if (create_vm) {
|
||||
struct i915_hw_ppgtt *ppgtt = create_vm_for_ctx(dev, ctx);
|
||||
if (USES_FULL_PPGTT(dev)) {
|
||||
struct i915_hw_ppgtt *ppgtt = i915_ppgtt_create(dev, file_priv);
|
||||
|
||||
if (IS_ERR_OR_NULL(ppgtt)) {
|
||||
DRM_DEBUG_DRIVER("PPGTT setup failed (%ld)\n",
|
||||
PTR_ERR(ppgtt));
|
||||
ret = PTR_ERR(ppgtt);
|
||||
goto err_unpin;
|
||||
} else
|
||||
ctx->vm = &ppgtt->base;
|
||||
}
|
||||
|
||||
/* This case is reserved for the global default context and
|
||||
* should only happen once. */
|
||||
if (is_global_default_ctx) {
|
||||
if (WARN_ON(dev_priv->mm.aliasing_ppgtt)) {
|
||||
ret = -EEXIST;
|
||||
goto err_unpin;
|
||||
ctx->ppgtt = ppgtt;
|
||||
}
|
||||
|
||||
dev_priv->mm.aliasing_ppgtt = ppgtt;
|
||||
}
|
||||
} else if (USES_PPGTT(dev)) {
|
||||
/* For platforms which only have aliasing PPGTT, we fake the
|
||||
* address space and refcounting. */
|
||||
ctx->vm = &dev_priv->mm.aliasing_ppgtt->base;
|
||||
kref_get(&dev_priv->mm.aliasing_ppgtt->ref);
|
||||
} else
|
||||
ctx->vm = &dev_priv->gtt.base;
|
||||
trace_i915_context_create(ctx);
|
||||
|
||||
return ctx;
|
||||
|
||||
@ -375,34 +294,23 @@ void i915_gem_context_reset(struct drm_device *dev)
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
int i;
|
||||
|
||||
/* Prevent the hardware from restoring the last context (which hung) on
|
||||
* the next switch */
|
||||
/* In execlists mode we will unreference the context when the execlist
|
||||
* queue is cleared and the requests destroyed.
|
||||
*/
|
||||
if (i915.enable_execlists)
|
||||
return;
|
||||
|
||||
for (i = 0; i < I915_NUM_RINGS; i++) {
|
||||
struct intel_engine_cs *ring = &dev_priv->ring[i];
|
||||
struct intel_context *dctx = ring->default_context;
|
||||
struct intel_context *lctx = ring->last_context;
|
||||
|
||||
/* Do a fake switch to the default context */
|
||||
if (lctx == dctx)
|
||||
continue;
|
||||
|
||||
if (!lctx)
|
||||
continue;
|
||||
|
||||
if (dctx->legacy_hw_ctx.rcs_state && i == RCS) {
|
||||
WARN_ON(i915_gem_obj_ggtt_pin(dctx->legacy_hw_ctx.rcs_state,
|
||||
get_context_alignment(dev), 0));
|
||||
/* Fake a finish/inactive */
|
||||
dctx->legacy_hw_ctx.rcs_state->base.write_domain = 0;
|
||||
dctx->legacy_hw_ctx.rcs_state->active = 0;
|
||||
}
|
||||
|
||||
if (lctx) {
|
||||
if (lctx->legacy_hw_ctx.rcs_state && i == RCS)
|
||||
i915_gem_object_ggtt_unpin(lctx->legacy_hw_ctx.rcs_state);
|
||||
|
||||
i915_gem_context_unreference(lctx);
|
||||
i915_gem_context_reference(dctx);
|
||||
ring->last_context = dctx;
|
||||
ring->last_context = NULL;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -417,7 +325,11 @@ int i915_gem_context_init(struct drm_device *dev)
|
||||
if (WARN_ON(dev_priv->ring[RCS].default_context))
|
||||
return 0;
|
||||
|
||||
if (HAS_HW_CONTEXTS(dev)) {
|
||||
if (i915.enable_execlists) {
|
||||
/* NB: intentionally left blank. We will allocate our own
|
||||
* backing objects as we need them, thank you very much */
|
||||
dev_priv->hw_context_size = 0;
|
||||
} else if (HAS_HW_CONTEXTS(dev)) {
|
||||
dev_priv->hw_context_size = round_up(get_context_size(dev), 4096);
|
||||
if (dev_priv->hw_context_size > (1<<20)) {
|
||||
DRM_DEBUG_DRIVER("Disabling HW Contexts; invalid size %d\n",
|
||||
@ -426,18 +338,23 @@ int i915_gem_context_init(struct drm_device *dev)
|
||||
}
|
||||
}
|
||||
|
||||
ctx = i915_gem_create_context(dev, NULL, USES_PPGTT(dev));
|
||||
ctx = i915_gem_create_context(dev, NULL);
|
||||
if (IS_ERR(ctx)) {
|
||||
DRM_ERROR("Failed to create default global context (error %ld)\n",
|
||||
PTR_ERR(ctx));
|
||||
return PTR_ERR(ctx);
|
||||
}
|
||||
|
||||
/* NB: RCS will hold a ref for all rings */
|
||||
for (i = 0; i < I915_NUM_RINGS; i++)
|
||||
dev_priv->ring[i].default_context = ctx;
|
||||
for (i = 0; i < I915_NUM_RINGS; i++) {
|
||||
struct intel_engine_cs *ring = &dev_priv->ring[i];
|
||||
|
||||
DRM_DEBUG_DRIVER("%s context support initialized\n", dev_priv->hw_context_size ? "HW" : "fake");
|
||||
/* NB: RCS will hold a ref for all rings */
|
||||
ring->default_context = ctx;
|
||||
}
|
||||
|
||||
DRM_DEBUG_DRIVER("%s context support initialized\n",
|
||||
i915.enable_execlists ? "LR" :
|
||||
dev_priv->hw_context_size ? "HW" : "fake");
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -489,19 +406,11 @@ int i915_gem_context_enable(struct drm_i915_private *dev_priv)
|
||||
struct intel_engine_cs *ring;
|
||||
int ret, i;
|
||||
|
||||
/* This is the only place the aliasing PPGTT gets enabled, which means
|
||||
* it has to happen before we bail on reset */
|
||||
if (dev_priv->mm.aliasing_ppgtt) {
|
||||
struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
|
||||
ppgtt->enable(ppgtt);
|
||||
}
|
||||
|
||||
/* FIXME: We should make this work, even in reset */
|
||||
if (i915_reset_in_progress(&dev_priv->gpu_error))
|
||||
return 0;
|
||||
|
||||
BUG_ON(!dev_priv->ring[RCS].default_context);
|
||||
|
||||
if (i915.enable_execlists)
|
||||
return 0;
|
||||
|
||||
for_each_ring(ring, dev_priv, i) {
|
||||
ret = i915_switch_context(ring, ring->default_context);
|
||||
if (ret)
|
||||
@ -527,7 +436,7 @@ int i915_gem_context_open(struct drm_device *dev, struct drm_file *file)
|
||||
idr_init(&file_priv->context_idr);
|
||||
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
ctx = i915_gem_create_context(dev, file_priv, USES_FULL_PPGTT(dev));
|
||||
ctx = i915_gem_create_context(dev, file_priv);
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
|
||||
if (IS_ERR(ctx)) {
|
||||
@ -563,7 +472,13 @@ mi_set_context(struct intel_engine_cs *ring,
|
||||
struct intel_context *new_context,
|
||||
u32 hw_flags)
|
||||
{
|
||||
int ret;
|
||||
u32 flags = hw_flags | MI_MM_SPACE_GTT;
|
||||
const int num_rings =
|
||||
/* Use an extended w/a on ivb+ if signalling from other rings */
|
||||
i915_semaphore_is_enabled(ring->dev) ?
|
||||
hweight32(INTEL_INFO(ring->dev)->ring_mask) - 1 :
|
||||
0;
|
||||
int len, i, ret;
|
||||
|
||||
/* w/a: If Flush TLB Invalidation Mode is enabled, driver must do a TLB
|
||||
* invalidation prior to MI_SET_CONTEXT. On GEN6 we don't set the value
|
||||
@ -576,33 +491,61 @@ mi_set_context(struct intel_engine_cs *ring,
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = intel_ring_begin(ring, 6);
|
||||
/* These flags are for resource streamer on HSW+ */
|
||||
if (!IS_HASWELL(ring->dev) && INTEL_INFO(ring->dev)->gen < 8)
|
||||
flags |= (MI_SAVE_EXT_STATE_EN | MI_RESTORE_EXT_STATE_EN);
|
||||
|
||||
|
||||
len = 4;
|
||||
if (INTEL_INFO(ring->dev)->gen >= 7)
|
||||
len += 2 + (num_rings ? 4*num_rings + 2 : 0);
|
||||
|
||||
ret = intel_ring_begin(ring, len);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/* WaProgramMiArbOnOffAroundMiSetContext:ivb,vlv,hsw,bdw,chv */
|
||||
if (INTEL_INFO(ring->dev)->gen >= 7)
|
||||
if (INTEL_INFO(ring->dev)->gen >= 7) {
|
||||
intel_ring_emit(ring, MI_ARB_ON_OFF | MI_ARB_DISABLE);
|
||||
else
|
||||
intel_ring_emit(ring, MI_NOOP);
|
||||
if (num_rings) {
|
||||
struct intel_engine_cs *signaller;
|
||||
|
||||
intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(num_rings));
|
||||
for_each_ring(signaller, to_i915(ring->dev), i) {
|
||||
if (signaller == ring)
|
||||
continue;
|
||||
|
||||
intel_ring_emit(ring, RING_PSMI_CTL(signaller->mmio_base));
|
||||
intel_ring_emit(ring, _MASKED_BIT_ENABLE(GEN6_PSMI_SLEEP_MSG_DISABLE));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
intel_ring_emit(ring, MI_NOOP);
|
||||
intel_ring_emit(ring, MI_SET_CONTEXT);
|
||||
intel_ring_emit(ring, i915_gem_obj_ggtt_offset(new_context->legacy_hw_ctx.rcs_state) |
|
||||
MI_MM_SPACE_GTT |
|
||||
MI_SAVE_EXT_STATE_EN |
|
||||
MI_RESTORE_EXT_STATE_EN |
|
||||
hw_flags);
|
||||
flags);
|
||||
/*
|
||||
* w/a: MI_SET_CONTEXT must always be followed by MI_NOOP
|
||||
* WaMiSetContext_Hang:snb,ivb,vlv
|
||||
*/
|
||||
intel_ring_emit(ring, MI_NOOP);
|
||||
|
||||
if (INTEL_INFO(ring->dev)->gen >= 7)
|
||||
if (INTEL_INFO(ring->dev)->gen >= 7) {
|
||||
if (num_rings) {
|
||||
struct intel_engine_cs *signaller;
|
||||
|
||||
intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(num_rings));
|
||||
for_each_ring(signaller, to_i915(ring->dev), i) {
|
||||
if (signaller == ring)
|
||||
continue;
|
||||
|
||||
intel_ring_emit(ring, RING_PSMI_CTL(signaller->mmio_base));
|
||||
intel_ring_emit(ring, _MASKED_BIT_DISABLE(GEN6_PSMI_SLEEP_MSG_DISABLE));
|
||||
}
|
||||
}
|
||||
intel_ring_emit(ring, MI_ARB_ON_OFF | MI_ARB_ENABLE);
|
||||
else
|
||||
intel_ring_emit(ring, MI_NOOP);
|
||||
}
|
||||
|
||||
intel_ring_advance(ring);
|
||||
|
||||
@ -614,9 +557,9 @@ static int do_switch(struct intel_engine_cs *ring,
|
||||
{
|
||||
struct drm_i915_private *dev_priv = ring->dev->dev_private;
|
||||
struct intel_context *from = ring->last_context;
|
||||
struct i915_hw_ppgtt *ppgtt = ctx_to_ppgtt(to);
|
||||
u32 hw_flags = 0;
|
||||
bool uninitialized = false;
|
||||
struct i915_vma *vma;
|
||||
int ret, i;
|
||||
|
||||
if (from != NULL && ring == &dev_priv->ring[RCS]) {
|
||||
@ -642,8 +585,9 @@ static int do_switch(struct intel_engine_cs *ring,
|
||||
*/
|
||||
from = ring->last_context;
|
||||
|
||||
if (USES_FULL_PPGTT(ring->dev)) {
|
||||
ret = ppgtt->switch_mm(ppgtt, ring, false);
|
||||
if (to->ppgtt) {
|
||||
trace_switch_mm(ring, to);
|
||||
ret = to->ppgtt->switch_mm(to->ppgtt, ring);
|
||||
if (ret)
|
||||
goto unpin_out;
|
||||
}
|
||||
@ -666,11 +610,10 @@ static int do_switch(struct intel_engine_cs *ring,
|
||||
if (ret)
|
||||
goto unpin_out;
|
||||
|
||||
if (!to->legacy_hw_ctx.rcs_state->has_global_gtt_mapping) {
|
||||
struct i915_vma *vma = i915_gem_obj_to_vma(to->legacy_hw_ctx.rcs_state,
|
||||
&dev_priv->gtt.base);
|
||||
vma->bind_vma(vma, to->legacy_hw_ctx.rcs_state->cache_level, GLOBAL_BIND);
|
||||
}
|
||||
vma = i915_gem_obj_to_ggtt(to->legacy_hw_ctx.rcs_state);
|
||||
if (!(vma->bound & GLOBAL_BIND))
|
||||
vma->bind_vma(vma, to->legacy_hw_ctx.rcs_state->cache_level,
|
||||
GLOBAL_BIND);
|
||||
|
||||
if (!to->legacy_hw_ctx.initialized || i915_gem_context_is_default(to))
|
||||
hw_flags |= MI_RESTORE_INHIBIT;
|
||||
@ -723,6 +666,12 @@ done:
|
||||
ring->last_context = to;
|
||||
|
||||
if (uninitialized) {
|
||||
if (ring->init_context) {
|
||||
ret = ring->init_context(ring, to);
|
||||
if (ret)
|
||||
DRM_ERROR("ring init context: %d\n", ret);
|
||||
}
|
||||
|
||||
ret = i915_gem_render_state_init(ring);
|
||||
if (ret)
|
||||
DRM_ERROR("init render state: %d\n", ret);
|
||||
@ -743,14 +692,19 @@ unpin_out:
|
||||
*
|
||||
* The context life cycle is simple. The context refcount is incremented and
|
||||
* decremented by 1 and create and destroy. If the context is in use by the GPU,
|
||||
* it will have a refoucnt > 1. This allows us to destroy the context abstract
|
||||
* it will have a refcount > 1. This allows us to destroy the context abstract
|
||||
* object while letting the normal object tracking destroy the backing BO.
|
||||
*
|
||||
* This function should not be used in execlists mode. Instead the context is
|
||||
* switched by writing to the ELSP and requests keep a reference to their
|
||||
* context.
|
||||
*/
|
||||
int i915_switch_context(struct intel_engine_cs *ring,
|
||||
struct intel_context *to)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = ring->dev->dev_private;
|
||||
|
||||
WARN_ON(i915.enable_execlists);
|
||||
WARN_ON(!mutex_is_locked(&dev_priv->dev->struct_mutex));
|
||||
|
||||
if (to->legacy_hw_ctx.rcs_state == NULL) { /* We have the fake context */
|
||||
@ -766,9 +720,9 @@ int i915_switch_context(struct intel_engine_cs *ring,
|
||||
return do_switch(ring, to);
|
||||
}
|
||||
|
||||
static bool hw_context_enabled(struct drm_device *dev)
|
||||
static bool contexts_enabled(struct drm_device *dev)
|
||||
{
|
||||
return to_i915(dev)->hw_context_size;
|
||||
return i915.enable_execlists || to_i915(dev)->hw_context_size;
|
||||
}
|
||||
|
||||
int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
|
||||
@ -779,14 +733,14 @@ int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
|
||||
struct intel_context *ctx;
|
||||
int ret;
|
||||
|
||||
if (!hw_context_enabled(dev))
|
||||
if (!contexts_enabled(dev))
|
||||
return -ENODEV;
|
||||
|
||||
ret = i915_mutex_lock_interruptible(dev);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ctx = i915_gem_create_context(dev, file_priv, USES_FULL_PPGTT(dev));
|
||||
ctx = i915_gem_create_context(dev, file_priv);
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
if (IS_ERR(ctx))
|
||||
return PTR_ERR(ctx);
|
||||
|
@ -243,7 +243,7 @@ int
|
||||
i915_gem_evict_everything(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct i915_address_space *vm;
|
||||
struct i915_address_space *vm, *v;
|
||||
bool lists_empty = true;
|
||||
int ret;
|
||||
|
||||
@ -270,7 +270,7 @@ i915_gem_evict_everything(struct drm_device *dev)
|
||||
i915_gem_retire_requests(dev);
|
||||
|
||||
/* Having flushed everything, unbind() should never raise an error */
|
||||
list_for_each_entry(vm, &dev_priv->vm_list, global_link)
|
||||
list_for_each_entry_safe(vm, v, &dev_priv->vm_list, global_link)
|
||||
WARN_ON(i915_gem_evict_vm(vm, false));
|
||||
|
||||
return 0;
|
||||
|
@ -35,6 +35,7 @@
|
||||
|
||||
#define __EXEC_OBJECT_HAS_PIN (1<<31)
|
||||
#define __EXEC_OBJECT_HAS_FENCE (1<<30)
|
||||
#define __EXEC_OBJECT_NEEDS_MAP (1<<29)
|
||||
#define __EXEC_OBJECT_NEEDS_BIAS (1<<28)
|
||||
|
||||
#define BATCH_OFFSET_BIAS (256*1024)
|
||||
@ -101,7 +102,6 @@ eb_lookup_vmas(struct eb_vmas *eb,
|
||||
struct i915_address_space *vm,
|
||||
struct drm_file *file)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = vm->dev->dev_private;
|
||||
struct drm_i915_gem_object *obj;
|
||||
struct list_head objects;
|
||||
int i, ret;
|
||||
@ -136,20 +136,6 @@ eb_lookup_vmas(struct eb_vmas *eb,
|
||||
i = 0;
|
||||
while (!list_empty(&objects)) {
|
||||
struct i915_vma *vma;
|
||||
struct i915_address_space *bind_vm = vm;
|
||||
|
||||
if (exec[i].flags & EXEC_OBJECT_NEEDS_GTT &&
|
||||
USES_FULL_PPGTT(vm->dev)) {
|
||||
ret = -EINVAL;
|
||||
goto err;
|
||||
}
|
||||
|
||||
/* If we have secure dispatch, or the userspace assures us that
|
||||
* they know what they're doing, use the GGTT VM.
|
||||
*/
|
||||
if (((args->flags & I915_EXEC_SECURE) &&
|
||||
(i == (args->buffer_count - 1))))
|
||||
bind_vm = &dev_priv->gtt.base;
|
||||
|
||||
obj = list_first_entry(&objects,
|
||||
struct drm_i915_gem_object,
|
||||
@ -163,7 +149,7 @@ eb_lookup_vmas(struct eb_vmas *eb,
|
||||
* from the (obj, vm) we don't run the risk of creating
|
||||
* duplicated vmas for the same vm.
|
||||
*/
|
||||
vma = i915_gem_obj_lookup_or_create_vma(obj, bind_vm);
|
||||
vma = i915_gem_obj_lookup_or_create_vma(obj, vm);
|
||||
if (IS_ERR(vma)) {
|
||||
DRM_DEBUG("Failed to lookup VMA\n");
|
||||
ret = PTR_ERR(vma);
|
||||
@ -276,7 +262,6 @@ relocate_entry_cpu(struct drm_i915_gem_object *obj,
|
||||
uint64_t target_offset)
|
||||
{
|
||||
struct drm_device *dev = obj->base.dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
uint32_t page_offset = offset_in_page(reloc->offset);
|
||||
uint64_t delta = reloc->delta + target_offset;
|
||||
char *vaddr;
|
||||
@ -286,21 +271,24 @@ relocate_entry_cpu(struct drm_i915_gem_object *obj,
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
vaddr = (char*)dev_priv->gtt.mappable+4096;
|
||||
MapPage(vaddr,(addr_t)i915_gem_object_get_page(obj,reloc->offset >> PAGE_SHIFT), PG_SW);
|
||||
vaddr = kmap_atomic(i915_gem_object_get_page(obj,
|
||||
reloc->offset >> PAGE_SHIFT));
|
||||
*(uint32_t *)(vaddr + page_offset) = lower_32_bits(delta);
|
||||
|
||||
if (INTEL_INFO(dev)->gen >= 8) {
|
||||
page_offset = offset_in_page(page_offset + sizeof(uint32_t));
|
||||
|
||||
if (page_offset == 0) {
|
||||
MapPage(vaddr,(addr_t)i915_gem_object_get_page(obj,
|
||||
(reloc->offset + sizeof(uint32_t)) >> PAGE_SHIFT), PG_SW);
|
||||
kunmap_atomic(vaddr);
|
||||
vaddr = kmap_atomic(i915_gem_object_get_page(obj,
|
||||
(reloc->offset + sizeof(uint32_t)) >> PAGE_SHIFT));
|
||||
}
|
||||
|
||||
*(uint32_t *)(vaddr + page_offset) = upper_32_bits(delta);
|
||||
}
|
||||
|
||||
kunmap_atomic(vaddr);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -312,7 +300,7 @@ relocate_entry_gtt(struct drm_i915_gem_object *obj,
|
||||
struct drm_device *dev = obj->base.dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
uint64_t delta = reloc->delta + target_offset;
|
||||
uint32_t __iomem *reloc_entry;
|
||||
uint64_t offset;
|
||||
void __iomem *reloc_page;
|
||||
int ret;
|
||||
|
||||
@ -325,13 +313,13 @@ relocate_entry_gtt(struct drm_i915_gem_object *obj,
|
||||
return ret;
|
||||
|
||||
/* Map the page containing the relocation we're going to perform. */
|
||||
reloc->offset += i915_gem_obj_ggtt_offset(obj);
|
||||
offset = i915_gem_obj_ggtt_offset(obj);
|
||||
offset += reloc->offset;
|
||||
MapPage(dev_priv->gtt.mappable,dev_priv->gtt.mappable_base +
|
||||
(reloc->offset & PAGE_MASK), PG_SW);
|
||||
(offset & PAGE_MASK), PG_SW);
|
||||
reloc_page = dev_priv->gtt.mappable;
|
||||
reloc_entry = (uint32_t __iomem *)
|
||||
(reloc_page + offset_in_page(reloc->offset));
|
||||
iowrite32(lower_32_bits(delta), reloc_entry);
|
||||
iowrite32(lower_32_bits(delta), reloc_page + offset_in_page(offset));
|
||||
|
||||
|
||||
|
||||
return 0;
|
||||
@ -363,12 +351,9 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
|
||||
* through the ppgtt for non_secure batchbuffers. */
|
||||
if (unlikely(IS_GEN6(dev) &&
|
||||
reloc->write_domain == I915_GEM_DOMAIN_INSTRUCTION &&
|
||||
!target_i915_obj->has_global_gtt_mapping)) {
|
||||
struct i915_vma *vma =
|
||||
list_first_entry(&target_i915_obj->vma_list,
|
||||
typeof(*vma), vma_link);
|
||||
vma->bind_vma(vma, target_i915_obj->cache_level, GLOBAL_BIND);
|
||||
}
|
||||
!(target_vma->bound & GLOBAL_BIND)))
|
||||
target_vma->bind_vma(target_vma, target_i915_obj->cache_level,
|
||||
GLOBAL_BIND);
|
||||
|
||||
/* Validate that the target is in a valid r/w GPU domain */
|
||||
if (unlikely(reloc->write_domain & (reloc->write_domain - 1))) {
|
||||
@ -522,14 +507,6 @@ i915_gem_execbuffer_relocate(struct eb_vmas *eb)
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int
|
||||
need_reloc_mappable(struct i915_vma *vma)
|
||||
{
|
||||
struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
|
||||
return entry->relocation_count && !use_cpu_reloc(vma->obj) &&
|
||||
i915_is_ggtt(vma->vm);
|
||||
}
|
||||
|
||||
static int
|
||||
i915_gem_execbuffer_reserve_vma(struct i915_vma *vma,
|
||||
struct intel_engine_cs *ring,
|
||||
@ -537,20 +514,12 @@ i915_gem_execbuffer_reserve_vma(struct i915_vma *vma,
|
||||
{
|
||||
struct drm_i915_gem_object *obj = vma->obj;
|
||||
struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
|
||||
bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4;
|
||||
bool need_fence;
|
||||
uint64_t flags;
|
||||
int ret;
|
||||
|
||||
flags = 0;
|
||||
|
||||
need_fence =
|
||||
has_fenced_gpu_access &&
|
||||
entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
|
||||
obj->tiling_mode != I915_TILING_NONE;
|
||||
if (need_fence || need_reloc_mappable(vma))
|
||||
flags |= PIN_MAPPABLE;
|
||||
|
||||
if (entry->flags & __EXEC_OBJECT_NEEDS_MAP)
|
||||
flags |= PIN_GLOBAL | PIN_MAPPABLE;
|
||||
if (entry->flags & EXEC_OBJECT_NEEDS_GTT)
|
||||
flags |= PIN_GLOBAL;
|
||||
if (entry->flags & __EXEC_OBJECT_NEEDS_BIAS)
|
||||
@ -562,7 +531,6 @@ i915_gem_execbuffer_reserve_vma(struct i915_vma *vma,
|
||||
|
||||
entry->flags |= __EXEC_OBJECT_HAS_PIN;
|
||||
|
||||
if (has_fenced_gpu_access) {
|
||||
if (entry->flags & EXEC_OBJECT_NEEDS_FENCE) {
|
||||
ret = i915_gem_object_get_fence(obj);
|
||||
if (ret)
|
||||
@ -570,9 +538,6 @@ i915_gem_execbuffer_reserve_vma(struct i915_vma *vma,
|
||||
|
||||
if (i915_gem_object_pin_fence(obj))
|
||||
entry->flags |= __EXEC_OBJECT_HAS_FENCE;
|
||||
|
||||
obj->pending_fenced_gpu_access = true;
|
||||
}
|
||||
}
|
||||
|
||||
if (entry->offset != vma->node.start) {
|
||||
@ -589,26 +554,40 @@ i915_gem_execbuffer_reserve_vma(struct i915_vma *vma,
|
||||
}
|
||||
|
||||
static bool
|
||||
eb_vma_misplaced(struct i915_vma *vma, bool has_fenced_gpu_access)
|
||||
need_reloc_mappable(struct i915_vma *vma)
|
||||
{
|
||||
struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
|
||||
|
||||
if (entry->relocation_count == 0)
|
||||
return false;
|
||||
|
||||
if (!i915_is_ggtt(vma->vm))
|
||||
return false;
|
||||
|
||||
/* See also use_cpu_reloc() */
|
||||
if (HAS_LLC(vma->obj->base.dev))
|
||||
return false;
|
||||
|
||||
if (vma->obj->base.write_domain == I915_GEM_DOMAIN_CPU)
|
||||
return false;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static bool
|
||||
eb_vma_misplaced(struct i915_vma *vma)
|
||||
{
|
||||
struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
|
||||
struct drm_i915_gem_object *obj = vma->obj;
|
||||
bool need_fence, need_mappable;
|
||||
|
||||
need_fence =
|
||||
has_fenced_gpu_access &&
|
||||
entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
|
||||
obj->tiling_mode != I915_TILING_NONE;
|
||||
need_mappable = need_fence || need_reloc_mappable(vma);
|
||||
|
||||
WARN_ON((need_mappable || need_fence) &&
|
||||
WARN_ON(entry->flags & __EXEC_OBJECT_NEEDS_MAP &&
|
||||
!i915_is_ggtt(vma->vm));
|
||||
|
||||
if (entry->alignment &&
|
||||
vma->node.start & (entry->alignment - 1))
|
||||
return true;
|
||||
|
||||
if (need_mappable && !obj->map_and_fenceable)
|
||||
if (entry->flags & __EXEC_OBJECT_NEEDS_MAP && !obj->map_and_fenceable)
|
||||
return true;
|
||||
|
||||
if (entry->flags & __EXEC_OBJECT_NEEDS_BIAS &&
|
||||
@ -630,9 +609,6 @@ i915_gem_execbuffer_reserve(struct intel_engine_cs *ring,
|
||||
bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4;
|
||||
int retry;
|
||||
|
||||
if (list_empty(vmas))
|
||||
return 0;
|
||||
|
||||
i915_gem_retire_requests_ring(ring);
|
||||
|
||||
vm = list_first_entry(vmas, struct i915_vma, exec_list)->vm;
|
||||
@ -646,20 +622,21 @@ i915_gem_execbuffer_reserve(struct intel_engine_cs *ring,
|
||||
obj = vma->obj;
|
||||
entry = vma->exec_entry;
|
||||
|
||||
if (!has_fenced_gpu_access)
|
||||
entry->flags &= ~EXEC_OBJECT_NEEDS_FENCE;
|
||||
need_fence =
|
||||
has_fenced_gpu_access &&
|
||||
entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
|
||||
obj->tiling_mode != I915_TILING_NONE;
|
||||
need_mappable = need_fence || need_reloc_mappable(vma);
|
||||
|
||||
if (need_mappable)
|
||||
if (need_mappable) {
|
||||
entry->flags |= __EXEC_OBJECT_NEEDS_MAP;
|
||||
list_move(&vma->exec_list, &ordered_vmas);
|
||||
else
|
||||
} else
|
||||
list_move_tail(&vma->exec_list, &ordered_vmas);
|
||||
|
||||
obj->base.pending_read_domains = I915_GEM_GPU_DOMAINS & ~I915_GEM_DOMAIN_COMMAND;
|
||||
obj->base.pending_write_domain = 0;
|
||||
obj->pending_fenced_gpu_access = false;
|
||||
}
|
||||
list_splice(&ordered_vmas, vmas);
|
||||
|
||||
@ -684,7 +661,7 @@ i915_gem_execbuffer_reserve(struct intel_engine_cs *ring,
|
||||
if (!drm_mm_node_allocated(&vma->node))
|
||||
continue;
|
||||
|
||||
if (eb_vma_misplaced(vma, has_fenced_gpu_access))
|
||||
if (eb_vma_misplaced(vma))
|
||||
ret = i915_vma_unbind(vma);
|
||||
else
|
||||
ret = i915_gem_execbuffer_reserve_vma(vma, ring, need_relocs);
|
||||
@ -732,9 +709,6 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
|
||||
int i, total, ret;
|
||||
unsigned count = args->buffer_count;
|
||||
|
||||
if (WARN_ON(list_empty(&eb->vmas)))
|
||||
return 0;
|
||||
|
||||
vm = list_first_entry(&eb->vmas, struct i915_vma, exec_list)->vm;
|
||||
|
||||
/* We may process another execbuffer during the unlock... */
|
||||
@ -878,18 +852,24 @@ i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec)
|
||||
}
|
||||
|
||||
static int
|
||||
validate_exec_list(struct drm_i915_gem_exec_object2 *exec,
|
||||
validate_exec_list(struct drm_device *dev,
|
||||
struct drm_i915_gem_exec_object2 *exec,
|
||||
int count)
|
||||
{
|
||||
int i;
|
||||
unsigned relocs_total = 0;
|
||||
unsigned relocs_max = UINT_MAX / sizeof(struct drm_i915_gem_relocation_entry);
|
||||
unsigned invalid_flags;
|
||||
int i;
|
||||
|
||||
invalid_flags = __EXEC_OBJECT_UNKNOWN_FLAGS;
|
||||
if (USES_FULL_PPGTT(dev))
|
||||
invalid_flags |= EXEC_OBJECT_NEEDS_GTT;
|
||||
|
||||
for (i = 0; i < count; i++) {
|
||||
char __user *ptr = to_user_ptr(exec[i].relocs_ptr);
|
||||
int length; /* limited by fault_in_pages_readable() */
|
||||
|
||||
if (exec[i].flags & __EXEC_OBJECT_UNKNOWN_FLAGS)
|
||||
if (exec[i].flags & invalid_flags)
|
||||
return -EINVAL;
|
||||
|
||||
/* First check for malicious input causing overflow in
|
||||
@ -932,16 +912,26 @@ i915_gem_validate_context(struct drm_device *dev, struct drm_file *file,
|
||||
return ERR_PTR(-EIO);
|
||||
}
|
||||
|
||||
if (i915.enable_execlists && !ctx->engine[ring->id].state) {
|
||||
int ret = intel_lr_context_deferred_create(ctx, ring);
|
||||
if (ret) {
|
||||
DRM_DEBUG("Could not create LRC %u: %d\n", ctx_id, ret);
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
}
|
||||
|
||||
return ctx;
|
||||
}
|
||||
|
||||
static void
|
||||
void
|
||||
i915_gem_execbuffer_move_to_active(struct list_head *vmas,
|
||||
struct intel_engine_cs *ring)
|
||||
{
|
||||
u32 seqno = intel_ring_get_seqno(ring);
|
||||
struct i915_vma *vma;
|
||||
|
||||
list_for_each_entry(vma, vmas, exec_list) {
|
||||
struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
|
||||
struct drm_i915_gem_object *obj = vma->obj;
|
||||
u32 old_read = obj->base.read_domains;
|
||||
u32 old_write = obj->base.write_domain;
|
||||
@ -950,24 +940,31 @@ i915_gem_execbuffer_move_to_active(struct list_head *vmas,
|
||||
if (obj->base.write_domain == 0)
|
||||
obj->base.pending_read_domains |= obj->base.read_domains;
|
||||
obj->base.read_domains = obj->base.pending_read_domains;
|
||||
obj->fenced_gpu_access = obj->pending_fenced_gpu_access;
|
||||
|
||||
i915_vma_move_to_active(vma, ring);
|
||||
if (obj->base.write_domain) {
|
||||
obj->dirty = 1;
|
||||
obj->last_write_seqno = intel_ring_get_seqno(ring);
|
||||
obj->last_write_seqno = seqno;
|
||||
|
||||
intel_fb_obj_invalidate(obj, ring);
|
||||
|
||||
/* update for the implicit flush after a batch */
|
||||
obj->base.write_domain &= ~I915_GEM_GPU_DOMAINS;
|
||||
}
|
||||
if (entry->flags & EXEC_OBJECT_NEEDS_FENCE) {
|
||||
obj->last_fenced_seqno = seqno;
|
||||
if (entry->flags & __EXEC_OBJECT_HAS_FENCE) {
|
||||
struct drm_i915_private *dev_priv = to_i915(ring->dev);
|
||||
list_move_tail(&dev_priv->fence_regs[obj->fence_reg].lru_list,
|
||||
&dev_priv->mm.fence_list);
|
||||
}
|
||||
}
|
||||
|
||||
trace_i915_gem_object_change_domain(obj, old_read, old_write);
|
||||
}
|
||||
}
|
||||
|
||||
static void
|
||||
void
|
||||
i915_gem_execbuffer_retire_commands(struct drm_device *dev,
|
||||
struct drm_file *file,
|
||||
struct intel_engine_cs *ring,
|
||||
@ -1008,7 +1005,48 @@ i915_reset_gen7_sol_offsets(struct drm_device *dev,
|
||||
}
|
||||
|
||||
static int
|
||||
legacy_ringbuffer_submission(struct drm_device *dev, struct drm_file *file,
|
||||
i915_emit_box(struct intel_engine_cs *ring,
|
||||
struct drm_clip_rect *box,
|
||||
int DR1, int DR4)
|
||||
{
|
||||
int ret;
|
||||
|
||||
if (box->y2 <= box->y1 || box->x2 <= box->x1 ||
|
||||
box->y2 <= 0 || box->x2 <= 0) {
|
||||
DRM_ERROR("Bad box %d,%d..%d,%d\n",
|
||||
box->x1, box->y1, box->x2, box->y2);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (INTEL_INFO(ring->dev)->gen >= 4) {
|
||||
ret = intel_ring_begin(ring, 4);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
intel_ring_emit(ring, GFX_OP_DRAWRECT_INFO_I965);
|
||||
intel_ring_emit(ring, (box->x1 & 0xffff) | box->y1 << 16);
|
||||
intel_ring_emit(ring, ((box->x2 - 1) & 0xffff) | (box->y2 - 1) << 16);
|
||||
intel_ring_emit(ring, DR4);
|
||||
} else {
|
||||
ret = intel_ring_begin(ring, 6);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
intel_ring_emit(ring, GFX_OP_DRAWRECT_INFO);
|
||||
intel_ring_emit(ring, DR1);
|
||||
intel_ring_emit(ring, (box->x1 & 0xffff) | box->y1 << 16);
|
||||
intel_ring_emit(ring, ((box->x2 - 1) & 0xffff) | (box->y2 - 1) << 16);
|
||||
intel_ring_emit(ring, DR4);
|
||||
intel_ring_emit(ring, 0);
|
||||
}
|
||||
intel_ring_advance(ring);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
int
|
||||
i915_gem_ringbuffer_submission(struct drm_device *dev, struct drm_file *file,
|
||||
struct intel_engine_cs *ring,
|
||||
struct intel_context *ctx,
|
||||
struct drm_i915_gem_execbuffer2 *args,
|
||||
@ -1135,7 +1173,7 @@ legacy_ringbuffer_submission(struct drm_device *dev, struct drm_file *file,
|
||||
exec_len = args->batch_len;
|
||||
if (cliprects) {
|
||||
for (i = 0; i < args->num_cliprects; i++) {
|
||||
ret = i915_emit_box(dev, &cliprects[i],
|
||||
ret = i915_emit_box(ring, &cliprects[i],
|
||||
args->DR1, args->DR4);
|
||||
if (ret)
|
||||
goto error;
|
||||
@ -1235,7 +1273,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
|
||||
if (!i915_gem_check_execbuffer(args))
|
||||
return -EINVAL;
|
||||
|
||||
ret = validate_exec_list(exec, args->buffer_count);
|
||||
ret = validate_exec_list(dev, exec, args->buffer_count);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
@ -1282,12 +1320,6 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
|
||||
if (ret)
|
||||
goto pre_mutex_err;
|
||||
|
||||
if (dev_priv->ums.mm_suspended) {
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
ret = -EBUSY;
|
||||
goto pre_mutex_err;
|
||||
}
|
||||
|
||||
ctx = i915_gem_validate_context(dev, file, ring, ctx_id);
|
||||
if (IS_ERR(ctx)) {
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
@ -1297,8 +1329,9 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
|
||||
|
||||
i915_gem_context_reference(ctx);
|
||||
|
||||
vm = ctx->vm;
|
||||
if (!USES_FULL_PPGTT(dev))
|
||||
if (ctx->ppgtt)
|
||||
vm = &ctx->ppgtt->base;
|
||||
else
|
||||
vm = &dev_priv->gtt.base;
|
||||
|
||||
eb = eb_create(args);
|
||||
@ -1365,25 +1398,36 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
|
||||
/* snb/ivb/vlv conflate the "batch in ppgtt" bit with the "non-secure
|
||||
* batch" bit. Hence we need to pin secure batches into the global gtt.
|
||||
* hsw should have this fixed, but bdw mucks it up again. */
|
||||
if (flags & I915_DISPATCH_SECURE &&
|
||||
!batch_obj->has_global_gtt_mapping) {
|
||||
/* When we have multiple VMs, we'll need to make sure that we
|
||||
* allocate space first */
|
||||
struct i915_vma *vma = i915_gem_obj_to_ggtt(batch_obj);
|
||||
BUG_ON(!vma);
|
||||
vma->bind_vma(vma, batch_obj->cache_level, GLOBAL_BIND);
|
||||
}
|
||||
if (flags & I915_DISPATCH_SECURE) {
|
||||
/*
|
||||
* So on first glance it looks freaky that we pin the batch here
|
||||
* outside of the reservation loop. But:
|
||||
* - The batch is already pinned into the relevant ppgtt, so we
|
||||
* already have the backing storage fully allocated.
|
||||
* - No other BO uses the global gtt (well contexts, but meh),
|
||||
* so we don't really have issues with mutliple objects not
|
||||
* fitting due to fragmentation.
|
||||
* So this is actually safe.
|
||||
*/
|
||||
ret = i915_gem_obj_ggtt_pin(batch_obj, 0, 0);
|
||||
if (ret)
|
||||
goto err;
|
||||
|
||||
if (flags & I915_DISPATCH_SECURE)
|
||||
exec_start += i915_gem_obj_ggtt_offset(batch_obj);
|
||||
else
|
||||
} else
|
||||
exec_start += i915_gem_obj_offset(batch_obj, vm);
|
||||
|
||||
ret = legacy_ringbuffer_submission(dev, file, ring, ctx,
|
||||
args, &eb->vmas, batch_obj, exec_start, flags);
|
||||
if (ret)
|
||||
goto err;
|
||||
ret = dev_priv->gt.do_execbuf(dev, file, ring, ctx, args,
|
||||
&eb->vmas, batch_obj, exec_start, flags);
|
||||
|
||||
/*
|
||||
* FIXME: We crucially rely upon the active tracking for the (ppgtt)
|
||||
* batch vma for correctness. For less ugly and less fragility this
|
||||
* needs to be adjusted to also track the ggtt batch vma properly as
|
||||
* active.
|
||||
*/
|
||||
if (flags & I915_DISPATCH_SECURE)
|
||||
i915_gem_object_ggtt_unpin(batch_obj);
|
||||
err:
|
||||
/* the request owns the ref now */
|
||||
i915_gem_context_unreference(ctx);
|
||||
|
@ -23,42 +23,40 @@
|
||||
*
|
||||
*/
|
||||
|
||||
|
||||
#define AGP_NORMAL_MEMORY 0
|
||||
|
||||
#define AGP_USER_TYPES (1 << 16)
|
||||
#define AGP_USER_MEMORY (AGP_USER_TYPES)
|
||||
#define AGP_USER_CACHED_MEMORY (AGP_USER_TYPES + 1)
|
||||
|
||||
#include <linux/seq_file.h>
|
||||
#include <drm/drmP.h>
|
||||
#include <drm/i915_drm.h>
|
||||
#include "i915_drv.h"
|
||||
#include "i915_trace.h"
|
||||
#include "intel_drv.h"
|
||||
|
||||
#include <asm/cacheflush.h>
|
||||
|
||||
static void bdw_setup_private_ppat(struct drm_i915_private *dev_priv);
|
||||
static void chv_setup_private_ppat(struct drm_i915_private *dev_priv);
|
||||
|
||||
bool intel_enable_ppgtt(struct drm_device *dev, bool full)
|
||||
{
|
||||
if (i915.enable_ppgtt == 0)
|
||||
return false;
|
||||
|
||||
if (i915.enable_ppgtt == 1 && full)
|
||||
return false;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static int sanitize_enable_ppgtt(struct drm_device *dev, int enable_ppgtt)
|
||||
{
|
||||
if (enable_ppgtt == 0 || !HAS_ALIASING_PPGTT(dev))
|
||||
bool has_aliasing_ppgtt;
|
||||
bool has_full_ppgtt;
|
||||
|
||||
has_aliasing_ppgtt = INTEL_INFO(dev)->gen >= 6;
|
||||
has_full_ppgtt = INTEL_INFO(dev)->gen >= 7;
|
||||
if (IS_GEN8(dev))
|
||||
has_full_ppgtt = false; /* XXX why? */
|
||||
|
||||
/*
|
||||
* We don't allow disabling PPGTT for gen9+ as it's a requirement for
|
||||
* execlists, the sole mechanism available to submit work.
|
||||
*/
|
||||
if (INTEL_INFO(dev)->gen < 9 &&
|
||||
(enable_ppgtt == 0 || !has_aliasing_ppgtt))
|
||||
return 0;
|
||||
|
||||
if (enable_ppgtt == 1)
|
||||
return 1;
|
||||
|
||||
if (enable_ppgtt == 2 && HAS_PPGTT(dev))
|
||||
if (enable_ppgtt == 2 && has_full_ppgtt)
|
||||
return 2;
|
||||
|
||||
#ifdef CONFIG_INTEL_IOMMU
|
||||
@ -76,7 +74,7 @@ static int sanitize_enable_ppgtt(struct drm_device *dev, int enable_ppgtt)
|
||||
return 0;
|
||||
}
|
||||
|
||||
return HAS_ALIASING_PPGTT(dev) ? 1 : 0;
|
||||
return has_aliasing_ppgtt ? 1 : 0;
|
||||
}
|
||||
|
||||
|
||||
@ -84,7 +82,6 @@ static void ppgtt_bind_vma(struct i915_vma *vma,
|
||||
enum i915_cache_level cache_level,
|
||||
u32 flags);
|
||||
static void ppgtt_unbind_vma(struct i915_vma *vma);
|
||||
static int gen8_ppgtt_enable(struct i915_hw_ppgtt *ppgtt);
|
||||
|
||||
static inline gen8_gtt_pte_t gen8_pte_encode(dma_addr_t addr,
|
||||
enum i915_cache_level level,
|
||||
@ -174,9 +171,6 @@ static gen6_gtt_pte_t byt_pte_encode(dma_addr_t addr,
|
||||
gen6_gtt_pte_t pte = valid ? GEN6_PTE_VALID : 0;
|
||||
pte |= GEN6_PTE_ADDR_ENCODE(addr);
|
||||
|
||||
/* Mark the page as writeable. Other platforms don't have a
|
||||
* setting for read-only/writable, so this matches that behavior.
|
||||
*/
|
||||
if (!(flags & PTE_READ_ONLY))
|
||||
pte |= BYT_PTE_WRITEABLE;
|
||||
|
||||
@ -222,19 +216,12 @@ static gen6_gtt_pte_t iris_pte_encode(dma_addr_t addr,
|
||||
|
||||
/* Broadwell Page Directory Pointer Descriptors */
|
||||
static int gen8_write_pdp(struct intel_engine_cs *ring, unsigned entry,
|
||||
uint64_t val, bool synchronous)
|
||||
uint64_t val)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = ring->dev->dev_private;
|
||||
int ret;
|
||||
|
||||
BUG_ON(entry >= 4);
|
||||
|
||||
if (synchronous) {
|
||||
I915_WRITE(GEN8_RING_PDP_UDW(ring, entry), val >> 32);
|
||||
I915_WRITE(GEN8_RING_PDP_LDW(ring, entry), (u32)val);
|
||||
return 0;
|
||||
}
|
||||
|
||||
ret = intel_ring_begin(ring, 6);
|
||||
if (ret)
|
||||
return ret;
|
||||
@ -251,8 +238,7 @@ static int gen8_write_pdp(struct intel_engine_cs *ring, unsigned entry,
|
||||
}
|
||||
|
||||
static int gen8_mm_switch(struct i915_hw_ppgtt *ppgtt,
|
||||
struct intel_engine_cs *ring,
|
||||
bool synchronous)
|
||||
struct intel_engine_cs *ring)
|
||||
{
|
||||
int i, ret;
|
||||
|
||||
@ -261,7 +247,7 @@ static int gen8_mm_switch(struct i915_hw_ppgtt *ppgtt,
|
||||
|
||||
for (i = used_pd - 1; i >= 0; i--) {
|
||||
dma_addr_t addr = ppgtt->pd_dma_addr[i];
|
||||
ret = gen8_write_pdp(ring, i, addr, synchronous);
|
||||
ret = gen8_write_pdp(ring, i, addr);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
@ -283,10 +269,6 @@ static void gen8_ppgtt_clear_range(struct i915_address_space *vm,
|
||||
unsigned num_entries = length >> PAGE_SHIFT;
|
||||
unsigned last_pte, i;
|
||||
|
||||
pt_vaddr = (gen8_gtt_pte_t*)AllocKernelSpace(4096);
|
||||
if(pt_vaddr == NULL)
|
||||
return;
|
||||
|
||||
scratch_pte = gen8_pte_encode(ppgtt->base.scratch.addr,
|
||||
I915_CACHE_LLC, use_scratch);
|
||||
|
||||
@ -297,7 +279,7 @@ static void gen8_ppgtt_clear_range(struct i915_address_space *vm,
|
||||
if (last_pte > GEN8_PTES_PER_PAGE)
|
||||
last_pte = GEN8_PTES_PER_PAGE;
|
||||
|
||||
MapPage(pt_vaddr,(addr_t)page_table, PG_SW);
|
||||
pt_vaddr = kmap_atomic(page_table);
|
||||
|
||||
for (i = pte; i < last_pte; i++) {
|
||||
pt_vaddr[i] = scratch_pte;
|
||||
@ -306,6 +288,7 @@ static void gen8_ppgtt_clear_range(struct i915_address_space *vm,
|
||||
|
||||
if (!HAS_LLC(ppgtt->base.dev))
|
||||
drm_clflush_virt_range(pt_vaddr, PAGE_SIZE);
|
||||
kunmap_atomic(pt_vaddr);
|
||||
|
||||
pte = 0;
|
||||
if (++pde == GEN8_PDES_PER_PAGE) {
|
||||
@ -313,7 +296,6 @@ static void gen8_ppgtt_clear_range(struct i915_address_space *vm,
|
||||
pde = 0;
|
||||
}
|
||||
}
|
||||
FreeKernelSpace(pt_vaddr);
|
||||
}
|
||||
|
||||
static void gen8_ppgtt_insert_entries(struct i915_address_space *vm,
|
||||
@ -329,31 +311,35 @@ static void gen8_ppgtt_insert_entries(struct i915_address_space *vm,
|
||||
unsigned pte = start >> GEN8_PTE_SHIFT & GEN8_PTE_MASK;
|
||||
struct sg_page_iter sg_iter;
|
||||
|
||||
pt_vaddr = AllocKernelSpace(4096);
|
||||
if(pt_vaddr == NULL)
|
||||
return;
|
||||
|
||||
MapPage(pt_vaddr,(addr_t)(ppgtt->gen8_pt_pages[pdpe][pde]), 3);
|
||||
pt_vaddr = NULL;
|
||||
|
||||
for_each_sg_page(pages->sgl, &sg_iter, pages->nents, 0) {
|
||||
if (WARN_ON(pdpe >= GEN8_LEGACY_PDPS))
|
||||
break;
|
||||
|
||||
if (pt_vaddr == NULL)
|
||||
pt_vaddr = kmap_atomic(ppgtt->gen8_pt_pages[pdpe][pde]);
|
||||
|
||||
pt_vaddr[pte] =
|
||||
gen8_pte_encode(sg_page_iter_dma_address(&sg_iter),
|
||||
cache_level, true);
|
||||
if (++pte == GEN8_PTES_PER_PAGE) {
|
||||
if (!HAS_LLC(ppgtt->base.dev))
|
||||
drm_clflush_virt_range(pt_vaddr, PAGE_SIZE);
|
||||
kunmap_atomic(pt_vaddr);
|
||||
pt_vaddr = NULL;
|
||||
if (++pde == GEN8_PDES_PER_PAGE) {
|
||||
pdpe++;
|
||||
pde = 0;
|
||||
}
|
||||
pte = 0;
|
||||
MapPage(pt_vaddr,(addr_t)(ppgtt->gen8_pt_pages[pdpe][pde]), 3);
|
||||
}
|
||||
}
|
||||
FreeKernelSpace(pt_vaddr);
|
||||
if (pt_vaddr) {
|
||||
if (!HAS_LLC(ppgtt->base.dev))
|
||||
drm_clflush_virt_range(pt_vaddr, PAGE_SIZE);
|
||||
kunmap_atomic(pt_vaddr);
|
||||
}
|
||||
}
|
||||
|
||||
static void gen8_free_page_tables(struct page **pt_pages)
|
||||
@ -409,9 +395,6 @@ static void gen8_ppgtt_cleanup(struct i915_address_space *vm)
|
||||
struct i915_hw_ppgtt *ppgtt =
|
||||
container_of(vm, struct i915_hw_ppgtt, base);
|
||||
|
||||
list_del(&vm->global_link);
|
||||
drm_mm_takedown(&vm->mm);
|
||||
|
||||
gen8_ppgtt_unmap_pages(ppgtt);
|
||||
gen8_ppgtt_free(ppgtt);
|
||||
}
|
||||
@ -576,7 +559,6 @@ static int gen8_ppgtt_init(struct i915_hw_ppgtt *ppgtt, uint64_t size)
|
||||
const int max_pdp = DIV_ROUND_UP(size, 1 << 30);
|
||||
const int min_pt_pages = GEN8_PDES_PER_PAGE * max_pdp;
|
||||
int i, j, ret;
|
||||
gen8_ppgtt_pde_t *pd_vaddr;
|
||||
|
||||
if (size % (1<<30))
|
||||
DRM_INFO("Pages will be wasted unless GTT size (%llu) is divisible by 1GB\n", size);
|
||||
@ -609,11 +591,9 @@ static int gen8_ppgtt_init(struct i915_hw_ppgtt *ppgtt, uint64_t size)
|
||||
* plugged in correctly. So we do that now/here. For aliasing PPGTT, we
|
||||
* will never need to touch the PDEs again.
|
||||
*/
|
||||
|
||||
pd_vaddr = AllocKernelSpace(4096);
|
||||
|
||||
for (i = 0; i < max_pdp; i++) {
|
||||
MapPage(pd_vaddr,(addr_t)(&ppgtt->pd_pages[i]), 3);
|
||||
gen8_ppgtt_pde_t *pd_vaddr;
|
||||
pd_vaddr = kmap_atomic(&ppgtt->pd_pages[i]);
|
||||
for (j = 0; j < GEN8_PDES_PER_PAGE; j++) {
|
||||
dma_addr_t addr = ppgtt->gen8_pt_dma_addr[i][j];
|
||||
pd_vaddr[j] = gen8_pde_encode(ppgtt->base.dev, addr,
|
||||
@ -621,10 +601,9 @@ static int gen8_ppgtt_init(struct i915_hw_ppgtt *ppgtt, uint64_t size)
|
||||
}
|
||||
if (!HAS_LLC(ppgtt->base.dev))
|
||||
drm_clflush_virt_range(pd_vaddr, PAGE_SIZE);
|
||||
kunmap_atomic(pd_vaddr);
|
||||
}
|
||||
FreeKernelSpace(pd_vaddr);
|
||||
|
||||
ppgtt->enable = gen8_ppgtt_enable;
|
||||
ppgtt->switch_mm = gen8_mm_switch;
|
||||
ppgtt->base.clear_range = gen8_ppgtt_clear_range;
|
||||
ppgtt->base.insert_entries = gen8_ppgtt_insert_entries;
|
||||
@ -677,29 +656,10 @@ static uint32_t get_pd_offset(struct i915_hw_ppgtt *ppgtt)
|
||||
}
|
||||
|
||||
static int hsw_mm_switch(struct i915_hw_ppgtt *ppgtt,
|
||||
struct intel_engine_cs *ring,
|
||||
bool synchronous)
|
||||
struct intel_engine_cs *ring)
|
||||
{
|
||||
struct drm_device *dev = ppgtt->base.dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
int ret;
|
||||
|
||||
/* If we're in reset, we can assume the GPU is sufficiently idle to
|
||||
* manually frob these bits. Ideally we could use the ring functions,
|
||||
* except our error handling makes it quite difficult (can't use
|
||||
* intel_ring_begin, ring->flush, or intel_ring_advance)
|
||||
*
|
||||
* FIXME: We should try not to special case reset
|
||||
*/
|
||||
if (synchronous ||
|
||||
i915_reset_in_progress(&dev_priv->gpu_error)) {
|
||||
WARN_ON(ppgtt != dev_priv->mm.aliasing_ppgtt);
|
||||
I915_WRITE(RING_PP_DIR_DCLV(ring), PP_DIR_DCLV_2G);
|
||||
I915_WRITE(RING_PP_DIR_BASE(ring), get_pd_offset(ppgtt));
|
||||
POSTING_READ(RING_PP_DIR_BASE(ring));
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* NB: TLBs must be flushed and invalidated before a switch */
|
||||
ret = ring->flush(ring, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
|
||||
if (ret)
|
||||
@ -721,29 +681,10 @@ static int hsw_mm_switch(struct i915_hw_ppgtt *ppgtt,
|
||||
}
|
||||
|
||||
static int gen7_mm_switch(struct i915_hw_ppgtt *ppgtt,
|
||||
struct intel_engine_cs *ring,
|
||||
bool synchronous)
|
||||
struct intel_engine_cs *ring)
|
||||
{
|
||||
struct drm_device *dev = ppgtt->base.dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
int ret;
|
||||
|
||||
/* If we're in reset, we can assume the GPU is sufficiently idle to
|
||||
* manually frob these bits. Ideally we could use the ring functions,
|
||||
* except our error handling makes it quite difficult (can't use
|
||||
* intel_ring_begin, ring->flush, or intel_ring_advance)
|
||||
*
|
||||
* FIXME: We should try not to special case reset
|
||||
*/
|
||||
if (synchronous ||
|
||||
i915_reset_in_progress(&dev_priv->gpu_error)) {
|
||||
WARN_ON(ppgtt != dev_priv->mm.aliasing_ppgtt);
|
||||
I915_WRITE(RING_PP_DIR_DCLV(ring), PP_DIR_DCLV_2G);
|
||||
I915_WRITE(RING_PP_DIR_BASE(ring), get_pd_offset(ppgtt));
|
||||
POSTING_READ(RING_PP_DIR_BASE(ring));
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* NB: TLBs must be flushed and invalidated before a switch */
|
||||
ret = ring->flush(ring, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
|
||||
if (ret)
|
||||
@ -772,14 +713,11 @@ static int gen7_mm_switch(struct i915_hw_ppgtt *ppgtt,
|
||||
}
|
||||
|
||||
static int gen6_mm_switch(struct i915_hw_ppgtt *ppgtt,
|
||||
struct intel_engine_cs *ring,
|
||||
bool synchronous)
|
||||
struct intel_engine_cs *ring)
|
||||
{
|
||||
struct drm_device *dev = ppgtt->base.dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
|
||||
if (!synchronous)
|
||||
return 0;
|
||||
|
||||
I915_WRITE(RING_PP_DIR_DCLV(ring), PP_DIR_DCLV_2G);
|
||||
I915_WRITE(RING_PP_DIR_BASE(ring), get_pd_offset(ppgtt));
|
||||
@ -789,39 +727,20 @@ static int gen6_mm_switch(struct i915_hw_ppgtt *ppgtt,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int gen8_ppgtt_enable(struct i915_hw_ppgtt *ppgtt)
|
||||
static void gen8_ppgtt_enable(struct drm_device *dev)
|
||||
{
|
||||
struct drm_device *dev = ppgtt->base.dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_engine_cs *ring;
|
||||
int j, ret;
|
||||
int j;
|
||||
|
||||
for_each_ring(ring, dev_priv, j) {
|
||||
I915_WRITE(RING_MODE_GEN7(ring),
|
||||
_MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
|
||||
|
||||
/* We promise to do a switch later with FULL PPGTT. If this is
|
||||
* aliasing, this is the one and only switch we'll do */
|
||||
if (USES_FULL_PPGTT(dev))
|
||||
continue;
|
||||
|
||||
ret = ppgtt->switch_mm(ppgtt, ring, true);
|
||||
if (ret)
|
||||
goto err_out;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
err_out:
|
||||
for_each_ring(ring, dev_priv, j)
|
||||
I915_WRITE(RING_MODE_GEN7(ring),
|
||||
_MASKED_BIT_DISABLE(GFX_PPGTT_ENABLE));
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int gen7_ppgtt_enable(struct i915_hw_ppgtt *ppgtt)
|
||||
static void gen7_ppgtt_enable(struct drm_device *dev)
|
||||
{
|
||||
struct drm_device *dev = ppgtt->base.dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_engine_cs *ring;
|
||||
uint32_t ecochk, ecobits;
|
||||
@ -840,31 +759,16 @@ static int gen7_ppgtt_enable(struct i915_hw_ppgtt *ppgtt)
|
||||
I915_WRITE(GAM_ECOCHK, ecochk);
|
||||
|
||||
for_each_ring(ring, dev_priv, i) {
|
||||
int ret;
|
||||
/* GFX_MODE is per-ring on gen7+ */
|
||||
I915_WRITE(RING_MODE_GEN7(ring),
|
||||
_MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
|
||||
|
||||
/* We promise to do a switch later with FULL PPGTT. If this is
|
||||
* aliasing, this is the one and only switch we'll do */
|
||||
if (USES_FULL_PPGTT(dev))
|
||||
continue;
|
||||
|
||||
ret = ppgtt->switch_mm(ppgtt, ring, true);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int gen6_ppgtt_enable(struct i915_hw_ppgtt *ppgtt)
|
||||
static void gen6_ppgtt_enable(struct drm_device *dev)
|
||||
{
|
||||
struct drm_device *dev = ppgtt->base.dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_engine_cs *ring;
|
||||
uint32_t ecochk, gab_ctl, ecobits;
|
||||
int i;
|
||||
|
||||
ecobits = I915_READ(GAC_ECO_BITS);
|
||||
I915_WRITE(GAC_ECO_BITS, ecobits | ECOBITS_SNB_BIT |
|
||||
@ -877,14 +781,6 @@ static int gen6_ppgtt_enable(struct i915_hw_ppgtt *ppgtt)
|
||||
I915_WRITE(GAM_ECOCHK, ecochk | ECOCHK_SNB_BIT | ECOCHK_PPGTT_CACHE64B);
|
||||
|
||||
I915_WRITE(GFX_MODE, _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
|
||||
|
||||
for_each_ring(ring, dev_priv, i) {
|
||||
int ret = ppgtt->switch_mm(ppgtt, ring, true);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* PPGTT support for Sandybdrige/Gen6 and later */
|
||||
@ -904,27 +800,22 @@ static void gen6_ppgtt_clear_range(struct i915_address_space *vm,
|
||||
|
||||
scratch_pte = vm->pte_encode(vm->scratch.addr, I915_CACHE_LLC, true, 0);
|
||||
|
||||
pt_vaddr = AllocKernelSpace(4096);
|
||||
|
||||
if(pt_vaddr == NULL)
|
||||
return;
|
||||
|
||||
while (num_entries) {
|
||||
last_pte = first_pte + num_entries;
|
||||
if (last_pte > I915_PPGTT_PT_ENTRIES)
|
||||
last_pte = I915_PPGTT_PT_ENTRIES;
|
||||
|
||||
MapPage(pt_vaddr,(addr_t)(ppgtt->pt_pages[act_pt]), 3);
|
||||
pt_vaddr = kmap_atomic(ppgtt->pt_pages[act_pt]);
|
||||
|
||||
for (i = first_pte; i < last_pte; i++)
|
||||
pt_vaddr[i] = scratch_pte;
|
||||
|
||||
kunmap_atomic(pt_vaddr);
|
||||
|
||||
num_entries -= last_pte - first_pte;
|
||||
first_pte = 0;
|
||||
act_pt++;
|
||||
};
|
||||
|
||||
FreeKernelSpace(pt_vaddr);
|
||||
}
|
||||
}
|
||||
|
||||
static void gen6_ppgtt_insert_entries(struct i915_address_space *vm,
|
||||
@ -940,25 +831,24 @@ static void gen6_ppgtt_insert_entries(struct i915_address_space *vm,
|
||||
unsigned act_pte = first_entry % I915_PPGTT_PT_ENTRIES;
|
||||
struct sg_page_iter sg_iter;
|
||||
|
||||
pt_vaddr = AllocKernelSpace(4096);
|
||||
|
||||
if(pt_vaddr == NULL)
|
||||
return;
|
||||
|
||||
MapPage(pt_vaddr,(addr_t)(ppgtt->pt_pages[act_pt]), 3);
|
||||
pt_vaddr = NULL;
|
||||
for_each_sg_page(pages->sgl, &sg_iter, pages->nents, 0) {
|
||||
if (pt_vaddr == NULL)
|
||||
pt_vaddr = kmap_atomic(ppgtt->pt_pages[act_pt]);
|
||||
|
||||
pt_vaddr[act_pte] =
|
||||
vm->pte_encode(sg_page_iter_dma_address(&sg_iter),
|
||||
cache_level, true, flags);
|
||||
|
||||
if (++act_pte == I915_PPGTT_PT_ENTRIES) {
|
||||
kunmap_atomic(pt_vaddr);
|
||||
pt_vaddr = NULL;
|
||||
act_pt++;
|
||||
MapPage(pt_vaddr,(addr_t)(ppgtt->pt_pages[act_pt]), 3);
|
||||
act_pte = 0;
|
||||
}
|
||||
}
|
||||
FreeKernelSpace(pt_vaddr);
|
||||
if (pt_vaddr)
|
||||
kunmap_atomic(pt_vaddr);
|
||||
}
|
||||
|
||||
static void gen6_ppgtt_unmap_pages(struct i915_hw_ppgtt *ppgtt)
|
||||
@ -988,8 +878,6 @@ static void gen6_ppgtt_cleanup(struct i915_address_space *vm)
|
||||
struct i915_hw_ppgtt *ppgtt =
|
||||
container_of(vm, struct i915_hw_ppgtt, base);
|
||||
|
||||
list_del(&vm->global_link);
|
||||
drm_mm_takedown(&ppgtt->base.mm);
|
||||
drm_mm_remove_node(&ppgtt->node);
|
||||
|
||||
gen6_ppgtt_unmap_pages(ppgtt);
|
||||
@ -1110,13 +998,10 @@ static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
|
||||
|
||||
ppgtt->base.pte_encode = dev_priv->gtt.base.pte_encode;
|
||||
if (IS_GEN6(dev)) {
|
||||
ppgtt->enable = gen6_ppgtt_enable;
|
||||
ppgtt->switch_mm = gen6_mm_switch;
|
||||
} else if (IS_HASWELL(dev)) {
|
||||
ppgtt->enable = gen7_ppgtt_enable;
|
||||
ppgtt->switch_mm = hsw_mm_switch;
|
||||
} else if (IS_GEN7(dev)) {
|
||||
ppgtt->enable = gen7_ppgtt_enable;
|
||||
ppgtt->switch_mm = gen7_mm_switch;
|
||||
} else
|
||||
BUG();
|
||||
@ -1147,39 +1032,118 @@ static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
|
||||
ppgtt->node.size >> 20,
|
||||
ppgtt->node.start / PAGE_SIZE);
|
||||
|
||||
gen6_write_pdes(ppgtt);
|
||||
DRM_DEBUG("Adding PPGTT at offset %x\n",
|
||||
ppgtt->pd_offset << 10);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int i915_gem_init_ppgtt(struct drm_device *dev, struct i915_hw_ppgtt *ppgtt)
|
||||
static int __hw_ppgtt_init(struct drm_device *dev, struct i915_hw_ppgtt *ppgtt)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
int ret = 0;
|
||||
|
||||
ppgtt->base.dev = dev;
|
||||
ppgtt->base.scratch = dev_priv->gtt.base.scratch;
|
||||
|
||||
if (INTEL_INFO(dev)->gen < 8)
|
||||
ret = gen6_ppgtt_init(ppgtt);
|
||||
else if (IS_GEN8(dev))
|
||||
ret = gen8_ppgtt_init(ppgtt, dev_priv->gtt.base.total);
|
||||
return gen6_ppgtt_init(ppgtt);
|
||||
else if (IS_GEN8(dev) || IS_GEN9(dev))
|
||||
return gen8_ppgtt_init(ppgtt, dev_priv->gtt.base.total);
|
||||
else
|
||||
BUG();
|
||||
}
|
||||
int i915_ppgtt_init(struct drm_device *dev, struct i915_hw_ppgtt *ppgtt)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
int ret = 0;
|
||||
|
||||
if (!ret) {
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
ret = __hw_ppgtt_init(dev, ppgtt);
|
||||
if (ret == 0) {
|
||||
kref_init(&ppgtt->ref);
|
||||
drm_mm_init(&ppgtt->base.mm, ppgtt->base.start,
|
||||
ppgtt->base.total);
|
||||
i915_init_vm(dev_priv, &ppgtt->base);
|
||||
if (INTEL_INFO(dev)->gen < 8) {
|
||||
gen6_write_pdes(ppgtt);
|
||||
DRM_DEBUG("Adding PPGTT at offset %x\n",
|
||||
ppgtt->pd_offset << 10);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
int i915_ppgtt_init_hw(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_engine_cs *ring;
|
||||
struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
|
||||
int i, ret = 0;
|
||||
|
||||
/* In the case of execlists, PPGTT is enabled by the context descriptor
|
||||
* and the PDPs are contained within the context itself. We don't
|
||||
* need to do anything here. */
|
||||
if (i915.enable_execlists)
|
||||
return 0;
|
||||
|
||||
if (!USES_PPGTT(dev))
|
||||
return 0;
|
||||
|
||||
if (IS_GEN6(dev))
|
||||
gen6_ppgtt_enable(dev);
|
||||
else if (IS_GEN7(dev))
|
||||
gen7_ppgtt_enable(dev);
|
||||
else if (INTEL_INFO(dev)->gen >= 8)
|
||||
gen8_ppgtt_enable(dev);
|
||||
else
|
||||
WARN_ON(1);
|
||||
|
||||
if (ppgtt) {
|
||||
for_each_ring(ring, dev_priv, i) {
|
||||
ret = ppgtt->switch_mm(ppgtt, ring);
|
||||
if (ret != 0)
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
struct i915_hw_ppgtt *
|
||||
i915_ppgtt_create(struct drm_device *dev, struct drm_i915_file_private *fpriv)
|
||||
{
|
||||
struct i915_hw_ppgtt *ppgtt;
|
||||
int ret;
|
||||
|
||||
ppgtt = kzalloc(sizeof(*ppgtt), GFP_KERNEL);
|
||||
if (!ppgtt)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
ret = i915_ppgtt_init(dev, ppgtt);
|
||||
if (ret) {
|
||||
kfree(ppgtt);
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
|
||||
ppgtt->file_priv = fpriv;
|
||||
|
||||
trace_i915_ppgtt_create(&ppgtt->base);
|
||||
|
||||
return ppgtt;
|
||||
}
|
||||
|
||||
void i915_ppgtt_release(struct kref *kref)
|
||||
{
|
||||
struct i915_hw_ppgtt *ppgtt =
|
||||
container_of(kref, struct i915_hw_ppgtt, ref);
|
||||
|
||||
trace_i915_ppgtt_release(&ppgtt->base);
|
||||
|
||||
/* vmas should already be unbound */
|
||||
WARN_ON(!list_empty(&ppgtt->base.active_list));
|
||||
WARN_ON(!list_empty(&ppgtt->base.inactive_list));
|
||||
|
||||
list_del(&ppgtt->base.global_link);
|
||||
drm_mm_takedown(&ppgtt->base.mm);
|
||||
|
||||
ppgtt->base.cleanup(&ppgtt->base);
|
||||
kfree(ppgtt);
|
||||
}
|
||||
|
||||
static void
|
||||
ppgtt_bind_vma(struct i915_vma *vma,
|
||||
@ -1254,7 +1218,7 @@ void i915_check_and_clear_faults(struct drm_device *dev)
|
||||
fault_reg = I915_READ(RING_FAULT_REG(ring));
|
||||
if (fault_reg & RING_FAULT_VALID) {
|
||||
DRM_DEBUG_DRIVER("Unexpected fault\n"
|
||||
"\tAddr: 0x%08lx\\n"
|
||||
"\tAddr: 0x%08lx\n"
|
||||
"\tAddress space: %s\n"
|
||||
"\tSource ID: %d\n"
|
||||
"\tType: %d\n",
|
||||
@ -1269,6 +1233,16 @@ void i915_check_and_clear_faults(struct drm_device *dev)
|
||||
POSTING_READ(RING_FAULT_REG(&dev_priv->ring[RCS]));
|
||||
}
|
||||
|
||||
static void i915_ggtt_flush(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
if (INTEL_INFO(dev_priv->dev)->gen < 6) {
|
||||
intel_gtt_chipset_flush();
|
||||
} else {
|
||||
I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
|
||||
POSTING_READ(GFX_FLSH_CNTL_GEN6);
|
||||
}
|
||||
}
|
||||
|
||||
void i915_gem_suspend_gtt_mappings(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
@ -1285,6 +1259,8 @@ void i915_gem_suspend_gtt_mappings(struct drm_device *dev)
|
||||
dev_priv->gtt.base.start,
|
||||
dev_priv->gtt.base.total,
|
||||
true);
|
||||
|
||||
i915_ggtt_flush(dev_priv);
|
||||
}
|
||||
|
||||
void i915_gem_restore_gtt_mappings(struct drm_device *dev)
|
||||
@ -1312,7 +1288,7 @@ void i915_gem_restore_gtt_mappings(struct drm_device *dev)
|
||||
* Unfortunately above, we've just wiped out the mappings
|
||||
* without telling our object about it. So we need to fake it.
|
||||
*/
|
||||
obj->has_global_gtt_mapping = 0;
|
||||
vma->bound &= ~GLOBAL_BIND;
|
||||
vma->bind_vma(vma, obj->cache_level, GLOBAL_BIND);
|
||||
}
|
||||
|
||||
@ -1337,7 +1313,7 @@ void i915_gem_restore_gtt_mappings(struct drm_device *dev)
|
||||
gen6_write_pdes(container_of(vm, struct i915_hw_ppgtt, base));
|
||||
}
|
||||
|
||||
i915_gem_chipset_flush(dev);
|
||||
i915_ggtt_flush(dev_priv);
|
||||
}
|
||||
|
||||
int i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj)
|
||||
@ -1509,7 +1485,7 @@ static void i915_ggtt_bind_vma(struct i915_vma *vma,
|
||||
|
||||
BUG_ON(!i915_is_ggtt(vma->vm));
|
||||
intel_gtt_insert_sg_entries(vma->obj->pages, entry, flags);
|
||||
vma->obj->has_global_gtt_mapping = 1;
|
||||
vma->bound = GLOBAL_BIND;
|
||||
}
|
||||
|
||||
static void i915_ggtt_clear_range(struct i915_address_space *vm,
|
||||
@ -1528,7 +1504,7 @@ static void i915_ggtt_unbind_vma(struct i915_vma *vma)
|
||||
const unsigned int size = vma->obj->base.size >> PAGE_SHIFT;
|
||||
|
||||
BUG_ON(!i915_is_ggtt(vma->vm));
|
||||
vma->obj->has_global_gtt_mapping = 0;
|
||||
vma->bound = 0;
|
||||
intel_gtt_clear_range(first, size);
|
||||
}
|
||||
|
||||
@ -1556,24 +1532,24 @@ static void ggtt_bind_vma(struct i915_vma *vma,
|
||||
* flags. At all other times, the GPU will use the aliasing PPGTT.
|
||||
*/
|
||||
if (!dev_priv->mm.aliasing_ppgtt || flags & GLOBAL_BIND) {
|
||||
if (!obj->has_global_gtt_mapping ||
|
||||
if (!(vma->bound & GLOBAL_BIND) ||
|
||||
(cache_level != obj->cache_level)) {
|
||||
vma->vm->insert_entries(vma->vm, obj->pages,
|
||||
vma->node.start,
|
||||
cache_level, flags);
|
||||
obj->has_global_gtt_mapping = 1;
|
||||
vma->bound |= GLOBAL_BIND;
|
||||
}
|
||||
}
|
||||
|
||||
if (dev_priv->mm.aliasing_ppgtt &&
|
||||
(!obj->has_aliasing_ppgtt_mapping ||
|
||||
(!(vma->bound & LOCAL_BIND) ||
|
||||
(cache_level != obj->cache_level))) {
|
||||
struct i915_hw_ppgtt *appgtt = dev_priv->mm.aliasing_ppgtt;
|
||||
appgtt->base.insert_entries(&appgtt->base,
|
||||
vma->obj->pages,
|
||||
vma->node.start,
|
||||
cache_level, flags);
|
||||
vma->obj->has_aliasing_ppgtt_mapping = 1;
|
||||
vma->bound |= LOCAL_BIND;
|
||||
}
|
||||
}
|
||||
|
||||
@ -1583,21 +1559,21 @@ static void ggtt_unbind_vma(struct i915_vma *vma)
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_gem_object *obj = vma->obj;
|
||||
|
||||
if (obj->has_global_gtt_mapping) {
|
||||
if (vma->bound & GLOBAL_BIND) {
|
||||
vma->vm->clear_range(vma->vm,
|
||||
vma->node.start,
|
||||
obj->base.size,
|
||||
true);
|
||||
obj->has_global_gtt_mapping = 0;
|
||||
vma->bound &= ~GLOBAL_BIND;
|
||||
}
|
||||
|
||||
if (obj->has_aliasing_ppgtt_mapping) {
|
||||
if (vma->bound & LOCAL_BIND) {
|
||||
struct i915_hw_ppgtt *appgtt = dev_priv->mm.aliasing_ppgtt;
|
||||
appgtt->base.clear_range(&appgtt->base,
|
||||
vma->node.start,
|
||||
obj->base.size,
|
||||
true);
|
||||
obj->has_aliasing_ppgtt_mapping = 0;
|
||||
vma->bound &= ~LOCAL_BIND;
|
||||
}
|
||||
}
|
||||
|
||||
@ -1634,7 +1610,7 @@ static void i915_gtt_color_adjust(struct drm_mm_node *node,
|
||||
}
|
||||
}
|
||||
|
||||
void i915_gem_setup_global_gtt(struct drm_device *dev,
|
||||
static int i915_gem_setup_global_gtt(struct drm_device *dev,
|
||||
unsigned long start,
|
||||
unsigned long mappable_end,
|
||||
unsigned long end)
|
||||
@ -1653,6 +1629,7 @@ void i915_gem_setup_global_gtt(struct drm_device *dev,
|
||||
struct drm_mm_node *entry;
|
||||
struct drm_i915_gem_object *obj;
|
||||
unsigned long hole_start, hole_end;
|
||||
int ret;
|
||||
|
||||
BUG_ON(mappable_end > end);
|
||||
|
||||
@ -1664,15 +1641,17 @@ void i915_gem_setup_global_gtt(struct drm_device *dev,
|
||||
/* Mark any preallocated objects as occupied */
|
||||
list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
|
||||
struct i915_vma *vma = i915_gem_obj_to_vma(obj, ggtt_vm);
|
||||
int ret;
|
||||
|
||||
DRM_DEBUG_KMS("reserving preallocated space: %lx + %zx\n",
|
||||
i915_gem_obj_ggtt_offset(obj), obj->base.size);
|
||||
|
||||
WARN_ON(i915_gem_obj_ggtt_bound(obj));
|
||||
ret = drm_mm_reserve_node(&ggtt_vm->mm, &vma->node);
|
||||
if (ret)
|
||||
DRM_DEBUG_KMS("Reservation failed\n");
|
||||
obj->has_global_gtt_mapping = 1;
|
||||
if (ret) {
|
||||
DRM_DEBUG_KMS("Reservation failed: %i\n", ret);
|
||||
return ret;
|
||||
}
|
||||
vma->bound |= GLOBAL_BIND;
|
||||
}
|
||||
|
||||
dev_priv->gtt.base.start = start;
|
||||
@ -1688,6 +1667,22 @@ void i915_gem_setup_global_gtt(struct drm_device *dev,
|
||||
|
||||
/* And finally clear the reserved guard page */
|
||||
ggtt_vm->clear_range(ggtt_vm, end - PAGE_SIZE, PAGE_SIZE, true);
|
||||
|
||||
if (USES_PPGTT(dev) && !USES_FULL_PPGTT(dev)) {
|
||||
struct i915_hw_ppgtt *ppgtt;
|
||||
|
||||
ppgtt = kzalloc(sizeof(*ppgtt), GFP_KERNEL);
|
||||
if (!ppgtt)
|
||||
return -ENOMEM;
|
||||
|
||||
ret = __hw_ppgtt_init(dev, ppgtt);
|
||||
if (ret != 0)
|
||||
return ret;
|
||||
|
||||
dev_priv->mm.aliasing_ppgtt = ppgtt;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void i915_gem_init_global_gtt(struct drm_device *dev)
|
||||
@ -1701,6 +1696,25 @@ void i915_gem_init_global_gtt(struct drm_device *dev)
|
||||
i915_gem_setup_global_gtt(dev, 0, mappable_size, gtt_size);
|
||||
}
|
||||
|
||||
void i915_global_gtt_cleanup(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct i915_address_space *vm = &dev_priv->gtt.base;
|
||||
|
||||
if (dev_priv->mm.aliasing_ppgtt) {
|
||||
struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
|
||||
|
||||
ppgtt->base.cleanup(&ppgtt->base);
|
||||
}
|
||||
|
||||
if (drm_mm_initialized(&vm->mm)) {
|
||||
drm_mm_takedown(&vm->mm);
|
||||
list_del(&vm->global_link);
|
||||
}
|
||||
|
||||
vm->cleanup(vm);
|
||||
}
|
||||
|
||||
static int setup_scratch_page(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
@ -1710,7 +1724,6 @@ static int setup_scratch_page(struct drm_device *dev)
|
||||
page = alloc_page(GFP_KERNEL | GFP_DMA32 | __GFP_ZERO);
|
||||
if (page == NULL)
|
||||
return -ENOMEM;
|
||||
get_page(page);
|
||||
set_pages_uc(page, 1);
|
||||
|
||||
#ifdef CONFIG_INTEL_IOMMU
|
||||
@ -1735,7 +1748,6 @@ static void teardown_scratch_page(struct drm_device *dev)
|
||||
set_pages_wb(page, 1);
|
||||
pci_unmap_page(dev->pdev, dev_priv->gtt.base.scratch.addr,
|
||||
PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
|
||||
put_page(page);
|
||||
__free_page(page);
|
||||
}
|
||||
|
||||
@ -1805,6 +1817,18 @@ static size_t chv_get_stolen_size(u16 gmch_ctrl)
|
||||
return (gmch_ctrl - 0x17 + 9) << 22;
|
||||
}
|
||||
|
||||
static size_t gen9_get_stolen_size(u16 gen9_gmch_ctl)
|
||||
{
|
||||
gen9_gmch_ctl >>= BDW_GMCH_GMS_SHIFT;
|
||||
gen9_gmch_ctl &= BDW_GMCH_GMS_MASK;
|
||||
|
||||
if (gen9_gmch_ctl < 0xf0)
|
||||
return gen9_gmch_ctl << 25; /* 32 MB units */
|
||||
else
|
||||
/* 4MB increments starting at 0xf0 for 4MB */
|
||||
return (gen9_gmch_ctl - 0xf0 + 1) << 22;
|
||||
}
|
||||
|
||||
static int ggtt_probe_common(struct drm_device *dev,
|
||||
size_t gtt_size)
|
||||
{
|
||||
@ -1848,6 +1872,22 @@ static void bdw_setup_private_ppat(struct drm_i915_private *dev_priv)
|
||||
GEN8_PPAT(6, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(2)) |
|
||||
GEN8_PPAT(7, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(3));
|
||||
|
||||
if (!USES_PPGTT(dev_priv->dev))
|
||||
/* Spec: "For GGTT, there is NO pat_sel[2:0] from the entry,
|
||||
* so RTL will always use the value corresponding to
|
||||
* pat_sel = 000".
|
||||
* So let's disable cache for GGTT to avoid screen corruptions.
|
||||
* MOCS still can be used though.
|
||||
* - System agent ggtt writes (i.e. cpu gtt mmaps) already work
|
||||
* before this patch, i.e. the same uncached + snooping access
|
||||
* like on gen6/7 seems to be in effect.
|
||||
* - So this just fixes blitter/render access. Again it looks
|
||||
* like it's not just uncached access, but uncached + snooping.
|
||||
* So we can still hold onto all our assumptions wrt cpu
|
||||
* clflushing on LLC machines.
|
||||
*/
|
||||
pat = GEN8_PPAT(0, GEN8_PPAT_UC);
|
||||
|
||||
/* XXX: spec defines this as 2 distinct registers. It's unclear if a 64b
|
||||
* write would work. */
|
||||
I915_WRITE(GEN8_PRIVATE_PAT, pat);
|
||||
@ -1864,9 +1904,17 @@ static void chv_setup_private_ppat(struct drm_i915_private *dev_priv)
|
||||
* Only the snoop bit has meaning for CHV, the rest is
|
||||
* ignored.
|
||||
*
|
||||
* Note that the harware enforces snooping for all page
|
||||
* table accesses. The snoop bit is actually ignored for
|
||||
* PDEs.
|
||||
* The hardware will never snoop for certain types of accesses:
|
||||
* - CPU GTT (GMADR->GGTT->no snoop->memory)
|
||||
* - PPGTT page tables
|
||||
* - some other special cycles
|
||||
*
|
||||
* As with BDW, we also need to consider the following for GT accesses:
|
||||
* "For GGTT, there is NO pat_sel[2:0] from the entry,
|
||||
* so RTL will always use the value corresponding to
|
||||
* pat_sel = 000".
|
||||
* Which means we must set the snoop bit in PAT entry 0
|
||||
* in order to keep the global status page working.
|
||||
*/
|
||||
pat = GEN8_PPAT(0, CHV_PPAT_SNOOP) |
|
||||
GEN8_PPAT(1, 0) |
|
||||
@ -1901,7 +1949,10 @@ static int gen8_gmch_probe(struct drm_device *dev,
|
||||
|
||||
pci_read_config_word(dev->pdev, SNB_GMCH_CTRL, &snb_gmch_ctl);
|
||||
|
||||
if (IS_CHERRYVIEW(dev)) {
|
||||
if (INTEL_INFO(dev)->gen >= 9) {
|
||||
*stolen = gen9_get_stolen_size(snb_gmch_ctl);
|
||||
gtt_size = gen8_get_total_gtt_size(snb_gmch_ctl);
|
||||
} else if (IS_CHERRYVIEW(dev)) {
|
||||
*stolen = chv_get_stolen_size(snb_gmch_ctl);
|
||||
gtt_size = chv_get_total_gtt_size(snb_gmch_ctl);
|
||||
} else {
|
||||
@ -1969,10 +2020,6 @@ static void gen6_gmch_remove(struct i915_address_space *vm)
|
||||
|
||||
struct i915_gtt *gtt = container_of(vm, struct i915_gtt, base);
|
||||
|
||||
if (drm_mm_initialized(&vm->mm)) {
|
||||
drm_mm_takedown(&vm->mm);
|
||||
list_del(&vm->global_link);
|
||||
}
|
||||
iounmap(gtt->gsm);
|
||||
teardown_scratch_page(vm->dev);
|
||||
}
|
||||
@ -2077,6 +2124,7 @@ static struct i915_vma *__i915_gem_vma_create(struct drm_i915_gem_object *obj,
|
||||
vma->obj = obj;
|
||||
|
||||
switch (INTEL_INFO(vm->dev)->gen) {
|
||||
case 9:
|
||||
case 8:
|
||||
case 7:
|
||||
case 6:
|
||||
@ -2103,8 +2151,10 @@ static struct i915_vma *__i915_gem_vma_create(struct drm_i915_gem_object *obj,
|
||||
/* Keep GGTT vmas first to make debug easier */
|
||||
if (i915_is_ggtt(vm))
|
||||
list_add(&vma->vma_link, &obj->vma_list);
|
||||
else
|
||||
else {
|
||||
list_add_tail(&vma->vma_link, &obj->vma_list);
|
||||
i915_ppgtt_get(i915_vm_to_ppgtt(vm));
|
||||
}
|
||||
|
||||
return vma;
|
||||
}
|
||||
|
@ -34,6 +34,8 @@
|
||||
#ifndef __I915_GEM_GTT_H__
|
||||
#define __I915_GEM_GTT_H__
|
||||
|
||||
struct drm_i915_file_private;
|
||||
|
||||
typedef uint32_t gen6_gtt_pte_t;
|
||||
typedef uint64_t gen8_gtt_pte_t;
|
||||
typedef gen8_gtt_pte_t gen8_ppgtt_pde_t;
|
||||
@ -121,6 +123,12 @@ struct i915_vma {
|
||||
struct drm_i915_gem_object *obj;
|
||||
struct i915_address_space *vm;
|
||||
|
||||
/** Flags and address space this VMA is bound to */
|
||||
#define GLOBAL_BIND (1<<0)
|
||||
#define LOCAL_BIND (1<<1)
|
||||
#define PTE_READ_ONLY (1<<2)
|
||||
unsigned int bound : 4;
|
||||
|
||||
/** This object's place on the active/inactive lists */
|
||||
struct list_head mm_list;
|
||||
|
||||
@ -153,8 +161,6 @@ struct i915_vma {
|
||||
* setting the valid PTE entries to a reserved scratch page. */
|
||||
void (*unbind_vma)(struct i915_vma *vma);
|
||||
/* Map an object into an address space with the given cache flags. */
|
||||
#define GLOBAL_BIND (1<<0)
|
||||
#define PTE_READ_ONLY (1<<1)
|
||||
void (*bind_vma)(struct i915_vma *vma,
|
||||
enum i915_cache_level cache_level,
|
||||
u32 flags);
|
||||
@ -258,22 +264,34 @@ struct i915_hw_ppgtt {
|
||||
dma_addr_t *gen8_pt_dma_addr[4];
|
||||
};
|
||||
|
||||
struct intel_context *ctx;
|
||||
struct drm_i915_file_private *file_priv;
|
||||
|
||||
int (*enable)(struct i915_hw_ppgtt *ppgtt);
|
||||
int (*switch_mm)(struct i915_hw_ppgtt *ppgtt,
|
||||
struct intel_engine_cs *ring,
|
||||
bool synchronous);
|
||||
struct intel_engine_cs *ring);
|
||||
// void (*debug_dump)(struct i915_hw_ppgtt *ppgtt, struct seq_file *m);
|
||||
};
|
||||
|
||||
int i915_gem_gtt_init(struct drm_device *dev);
|
||||
void i915_gem_init_global_gtt(struct drm_device *dev);
|
||||
void i915_gem_setup_global_gtt(struct drm_device *dev, unsigned long start,
|
||||
unsigned long mappable_end, unsigned long end);
|
||||
void i915_global_gtt_cleanup(struct drm_device *dev);
|
||||
|
||||
bool intel_enable_ppgtt(struct drm_device *dev, bool full);
|
||||
int i915_gem_init_ppgtt(struct drm_device *dev, struct i915_hw_ppgtt *ppgtt);
|
||||
|
||||
int i915_ppgtt_init(struct drm_device *dev, struct i915_hw_ppgtt *ppgtt);
|
||||
int i915_ppgtt_init_hw(struct drm_device *dev);
|
||||
void i915_ppgtt_release(struct kref *kref);
|
||||
struct i915_hw_ppgtt *i915_ppgtt_create(struct drm_device *dev,
|
||||
struct drm_i915_file_private *fpriv);
|
||||
static inline void i915_ppgtt_get(struct i915_hw_ppgtt *ppgtt)
|
||||
{
|
||||
if (ppgtt)
|
||||
kref_get(&ppgtt->ref);
|
||||
}
|
||||
static inline void i915_ppgtt_put(struct i915_hw_ppgtt *ppgtt)
|
||||
{
|
||||
if (ppgtt)
|
||||
kref_put(&ppgtt->ref, i915_ppgtt_release);
|
||||
}
|
||||
|
||||
void i915_check_and_clear_faults(struct drm_device *dev);
|
||||
void i915_gem_suspend_gtt_mappings(struct drm_device *dev);
|
||||
|
@ -28,13 +28,6 @@
|
||||
#include "i915_drv.h"
|
||||
#include "intel_renderstate.h"
|
||||
|
||||
struct render_state {
|
||||
const struct intel_renderstate_rodata *rodata;
|
||||
struct drm_i915_gem_object *obj;
|
||||
u64 ggtt_offset;
|
||||
int gen;
|
||||
};
|
||||
|
||||
static const struct intel_renderstate_rodata *
|
||||
render_state_get_rodata(struct drm_device *dev, const int gen)
|
||||
{
|
||||
@ -45,6 +38,8 @@ render_state_get_rodata(struct drm_device *dev, const int gen)
|
||||
return &gen7_null_state;
|
||||
case 8:
|
||||
return &gen8_null_state;
|
||||
case 9:
|
||||
return &gen9_null_state;
|
||||
}
|
||||
|
||||
return NULL;
|
||||
@ -113,7 +108,7 @@ static int render_state_setup(struct render_state *so)
|
||||
|
||||
d[i++] = s;
|
||||
}
|
||||
FreeKernelSpace(d);
|
||||
kunmap(page);
|
||||
|
||||
ret = i915_gem_object_set_to_gtt_domain(so->obj, false);
|
||||
if (ret)
|
||||
@ -127,31 +122,48 @@ static int render_state_setup(struct render_state *so)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void render_state_fini(struct render_state *so)
|
||||
void i915_gem_render_state_fini(struct render_state *so)
|
||||
{
|
||||
i915_gem_object_ggtt_unpin(so->obj);
|
||||
drm_gem_object_unreference(&so->obj->base);
|
||||
}
|
||||
|
||||
int i915_gem_render_state_prepare(struct intel_engine_cs *ring,
|
||||
struct render_state *so)
|
||||
{
|
||||
int ret;
|
||||
|
||||
if (WARN_ON(ring->id != RCS))
|
||||
return -ENOENT;
|
||||
|
||||
ret = render_state_init(so, ring->dev);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (so->rodata == NULL)
|
||||
return 0;
|
||||
|
||||
ret = render_state_setup(so);
|
||||
if (ret) {
|
||||
i915_gem_render_state_fini(so);
|
||||
return ret;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int i915_gem_render_state_init(struct intel_engine_cs *ring)
|
||||
{
|
||||
struct render_state so;
|
||||
int ret;
|
||||
|
||||
if (WARN_ON(ring->id != RCS))
|
||||
return -ENOENT;
|
||||
|
||||
ret = render_state_init(&so, ring->dev);
|
||||
ret = i915_gem_render_state_prepare(ring, &so);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (so.rodata == NULL)
|
||||
return 0;
|
||||
|
||||
ret = render_state_setup(&so);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
ret = ring->dispatch_execbuffer(ring,
|
||||
so.ggtt_offset,
|
||||
so.rodata->batch_items * 4,
|
||||
@ -164,6 +176,6 @@ int i915_gem_render_state_init(struct intel_engine_cs *ring)
|
||||
ret = __i915_add_request(ring, NULL, so.obj, NULL);
|
||||
/* __i915_add_request moves object to inactive if it fails */
|
||||
out:
|
||||
render_state_fini(&so);
|
||||
i915_gem_render_state_fini(&so);
|
||||
return ret;
|
||||
}
|
||||
|
47
drivers/video/drm/i915/i915_gem_render_state.h
Normal file
47
drivers/video/drm/i915/i915_gem_render_state.h
Normal file
@ -0,0 +1,47 @@
|
||||
/*
|
||||
* Copyright © 2014 Intel Corporation
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the next
|
||||
* paragraph) shall be included in all copies or substantial portions of the
|
||||
* Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
* DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#ifndef _I915_GEM_RENDER_STATE_H_
|
||||
#define _I915_GEM_RENDER_STATE_H_
|
||||
|
||||
#include <linux/types.h>
|
||||
|
||||
struct intel_renderstate_rodata {
|
||||
const u32 *reloc;
|
||||
const u32 *batch;
|
||||
const u32 batch_items;
|
||||
};
|
||||
|
||||
struct render_state {
|
||||
const struct intel_renderstate_rodata *rodata;
|
||||
struct drm_i915_gem_object *obj;
|
||||
u64 ggtt_offset;
|
||||
int gen;
|
||||
};
|
||||
|
||||
int i915_gem_render_state_init(struct intel_engine_cs *ring);
|
||||
void i915_gem_render_state_fini(struct render_state *so);
|
||||
int i915_gem_render_state_prepare(struct intel_engine_cs *ring,
|
||||
struct render_state *so);
|
||||
|
||||
#endif /* _I915_GEM_RENDER_STATE_H_ */
|
@ -276,6 +276,7 @@ void i915_gem_cleanup_stolen(struct drm_device *dev)
|
||||
int i915_gem_init_stolen(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
u32 tmp;
|
||||
int bios_reserved = 0;
|
||||
|
||||
#ifdef CONFIG_INTEL_IOMMU
|
||||
@ -295,8 +296,16 @@ int i915_gem_init_stolen(struct drm_device *dev)
|
||||
DRM_DEBUG_KMS("found %zd bytes of stolen memory at %08lx\n",
|
||||
dev_priv->gtt.stolen_size, dev_priv->mm.stolen_base);
|
||||
|
||||
if (IS_VALLEYVIEW(dev))
|
||||
bios_reserved = 1024*1024; /* top 1M on VLV/BYT */
|
||||
if (INTEL_INFO(dev)->gen >= 8) {
|
||||
tmp = I915_READ(GEN7_BIOS_RESERVED);
|
||||
tmp >>= GEN8_BIOS_RESERVED_SHIFT;
|
||||
tmp &= GEN8_BIOS_RESERVED_MASK;
|
||||
bios_reserved = (1024*1024) << tmp;
|
||||
} else if (IS_GEN7(dev)) {
|
||||
tmp = I915_READ(GEN7_BIOS_RESERVED);
|
||||
bios_reserved = tmp & GEN7_BIOS_RESERVED_256K ?
|
||||
256*1024 : 1024*1024;
|
||||
}
|
||||
|
||||
if (WARN_ON(bios_reserved > dev_priv->gtt.stolen_size))
|
||||
return 0;
|
||||
@ -511,7 +520,7 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
|
||||
}
|
||||
}
|
||||
|
||||
obj->has_global_gtt_mapping = 1;
|
||||
vma->bound |= GLOBAL_BIND;
|
||||
|
||||
list_add_tail(&obj->global_list, &dev_priv->mm.bound_list);
|
||||
list_add_tail(&vma->mm_list, &ggtt->inactive_list);
|
||||
|
@ -91,20 +91,37 @@ i915_gem_detect_bit_6_swizzle(struct drm_device *dev)
|
||||
uint32_t swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN;
|
||||
uint32_t swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN;
|
||||
|
||||
if (IS_VALLEYVIEW(dev)) {
|
||||
if (INTEL_INFO(dev)->gen >= 8 || IS_VALLEYVIEW(dev)) {
|
||||
/*
|
||||
* On BDW+, swizzling is not used. We leave the CPU memory
|
||||
* controller in charge of optimizing memory accesses without
|
||||
* the extra address manipulation GPU side.
|
||||
*
|
||||
* VLV and CHV don't have GPU swizzling.
|
||||
*/
|
||||
swizzle_x = I915_BIT_6_SWIZZLE_NONE;
|
||||
swizzle_y = I915_BIT_6_SWIZZLE_NONE;
|
||||
} else if (INTEL_INFO(dev)->gen >= 6) {
|
||||
if (dev_priv->preserve_bios_swizzle) {
|
||||
if (I915_READ(DISP_ARB_CTL) &
|
||||
DISP_TILE_SURFACE_SWIZZLING) {
|
||||
swizzle_x = I915_BIT_6_SWIZZLE_9_10;
|
||||
swizzle_y = I915_BIT_6_SWIZZLE_9;
|
||||
} else {
|
||||
swizzle_x = I915_BIT_6_SWIZZLE_NONE;
|
||||
swizzle_y = I915_BIT_6_SWIZZLE_NONE;
|
||||
}
|
||||
} else {
|
||||
uint32_t dimm_c0, dimm_c1;
|
||||
dimm_c0 = I915_READ(MAD_DIMM_C0);
|
||||
dimm_c1 = I915_READ(MAD_DIMM_C1);
|
||||
dimm_c0 &= MAD_DIMM_A_SIZE_MASK | MAD_DIMM_B_SIZE_MASK;
|
||||
dimm_c1 &= MAD_DIMM_A_SIZE_MASK | MAD_DIMM_B_SIZE_MASK;
|
||||
/* Enable swizzling when the channels are populated with
|
||||
* identically sized dimms. We don't need to check the 3rd
|
||||
* channel because no cpu with gpu attached ships in that
|
||||
* configuration. Also, swizzling only makes sense for 2
|
||||
* channels anyway. */
|
||||
/* Enable swizzling when the channels are populated
|
||||
* with identically sized dimms. We don't need to check
|
||||
* the 3rd channel because no cpu with gpu attached
|
||||
* ships in that configuration. Also, swizzling only
|
||||
* makes sense for 2 channels anyway. */
|
||||
if (dimm_c0 == dimm_c1) {
|
||||
swizzle_x = I915_BIT_6_SWIZZLE_9_10;
|
||||
swizzle_y = I915_BIT_6_SWIZZLE_9;
|
||||
@ -112,6 +129,7 @@ i915_gem_detect_bit_6_swizzle(struct drm_device *dev)
|
||||
swizzle_x = I915_BIT_6_SWIZZLE_NONE;
|
||||
swizzle_y = I915_BIT_6_SWIZZLE_NONE;
|
||||
}
|
||||
}
|
||||
} else if (IS_GEN5(dev)) {
|
||||
/* On Ironlake whatever DRAM config, GPU always do
|
||||
* same swizzling setup.
|
||||
@ -160,6 +178,15 @@ i915_gem_detect_bit_6_swizzle(struct drm_device *dev)
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
/* check for L-shaped memory aka modified enhanced addressing */
|
||||
if (IS_GEN4(dev)) {
|
||||
uint32_t ddc2 = I915_READ(DCC2);
|
||||
|
||||
if (!(ddc2 & DCC2_MODIFIED_ENHANCED_DISABLE))
|
||||
dev_priv->quirks |= QUIRK_PIN_SWIZZLED_PAGES;
|
||||
}
|
||||
|
||||
if (dcc == 0xffffffff) {
|
||||
DRM_ERROR("Couldn't read from MCHBAR. "
|
||||
"Disabling tiling.\n");
|
||||
@ -357,26 +384,22 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
|
||||
* has to also include the unfenced register the GPU uses
|
||||
* whilst executing a fenced command for an untiled object.
|
||||
*/
|
||||
|
||||
obj->map_and_fenceable =
|
||||
!i915_gem_obj_ggtt_bound(obj) ||
|
||||
(i915_gem_obj_ggtt_offset(obj) +
|
||||
obj->base.size <= dev_priv->gtt.mappable_end &&
|
||||
i915_gem_object_fence_ok(obj, args->tiling_mode));
|
||||
|
||||
/* Rebind if we need a change of alignment */
|
||||
if (!obj->map_and_fenceable) {
|
||||
u32 unfenced_align =
|
||||
i915_gem_get_gtt_alignment(dev, obj->base.size,
|
||||
args->tiling_mode,
|
||||
false);
|
||||
if (i915_gem_obj_ggtt_offset(obj) & (unfenced_align - 1))
|
||||
ret = i915_gem_object_ggtt_unbind(obj);
|
||||
}
|
||||
if (obj->map_and_fenceable &&
|
||||
!i915_gem_object_fence_ok(obj, args->tiling_mode))
|
||||
ret = i915_gem_object_ggtt_unbind(obj);
|
||||
|
||||
if (ret == 0) {
|
||||
if (obj->pages &&
|
||||
obj->madv == I915_MADV_WILLNEED &&
|
||||
dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES) {
|
||||
if (args->tiling_mode == I915_TILING_NONE)
|
||||
i915_gem_object_unpin_pages(obj);
|
||||
if (obj->tiling_mode == I915_TILING_NONE)
|
||||
i915_gem_object_pin_pages(obj);
|
||||
}
|
||||
|
||||
obj->fence_dirty =
|
||||
obj->fenced_gpu_access ||
|
||||
obj->last_fenced_seqno ||
|
||||
obj->fence_reg != I915_FENCE_REG_NONE;
|
||||
|
||||
obj->tiling_mode = args->tiling_mode;
|
||||
@ -440,6 +463,7 @@ i915_gem_get_tiling(struct drm_device *dev, void *data,
|
||||
}
|
||||
|
||||
/* Hide bit 17 from the user -- see comment in i915_gem_set_tiling */
|
||||
args->phys_swizzle_mode = args->swizzle_mode;
|
||||
if (args->swizzle_mode == I915_BIT_6_SWIZZLE_9_17)
|
||||
args->swizzle_mode = I915_BIT_6_SWIZZLE_9;
|
||||
if (args->swizzle_mode == I915_BIT_6_SWIZZLE_9_10_17)
|
||||
|
@ -192,7 +192,7 @@ static void print_error_buffers(struct drm_i915_error_state_buf *m,
|
||||
struct drm_i915_error_buffer *err,
|
||||
int count)
|
||||
{
|
||||
err_printf(m, "%s [%d]:\n", name, count);
|
||||
err_printf(m, " %s [%d]:\n", name, count);
|
||||
|
||||
while (count--) {
|
||||
err_printf(m, " %08x %8u %02x %02x %x %x",
|
||||
@ -208,7 +208,7 @@ static void print_error_buffers(struct drm_i915_error_state_buf *m,
|
||||
err_puts(m, err->userptr ? " userptr" : "");
|
||||
err_puts(m, err->ring != -1 ? " " : "");
|
||||
err_puts(m, ring_str(err->ring));
|
||||
err_puts(m, i915_cache_level_str(err->cache_level));
|
||||
err_puts(m, i915_cache_level_str(m->i915, err->cache_level));
|
||||
|
||||
if (err->name)
|
||||
err_printf(m, " (name: %d)", err->name);
|
||||
@ -229,6 +229,8 @@ static const char *hangcheck_action_to_str(enum intel_ring_hangcheck_action a)
|
||||
return "wait";
|
||||
case HANGCHECK_ACTIVE:
|
||||
return "active";
|
||||
case HANGCHECK_ACTIVE_LOOP:
|
||||
return "active (loop)";
|
||||
case HANGCHECK_KICK:
|
||||
return "kick";
|
||||
case HANGCHECK_HUNG:
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -35,6 +35,7 @@ struct i915_params i915 __read_mostly = {
|
||||
.vbt_sdvo_panel_type = -1,
|
||||
.enable_rc6 = -1,
|
||||
.enable_fbc = -1,
|
||||
.enable_execlists = 0,
|
||||
.enable_hangcheck = true,
|
||||
.enable_ppgtt = 1,
|
||||
.enable_psr = 0,
|
||||
@ -66,12 +67,12 @@ module_param_named(powersave, i915.powersave, int, 0600);
|
||||
MODULE_PARM_DESC(powersave,
|
||||
"Enable powersavings, fbc, downclocking, etc. (default: true)");
|
||||
|
||||
module_param_named(semaphores, i915.semaphores, int, 0400);
|
||||
module_param_named_unsafe(semaphores, i915.semaphores, int, 0400);
|
||||
MODULE_PARM_DESC(semaphores,
|
||||
"Use semaphores for inter-ring sync "
|
||||
"(default: -1 (use per-chip defaults))");
|
||||
|
||||
module_param_named(enable_rc6, i915.enable_rc6, int, 0400);
|
||||
module_param_named_unsafe(enable_rc6, i915.enable_rc6, int, 0400);
|
||||
MODULE_PARM_DESC(enable_rc6,
|
||||
"Enable power-saving render C-state 6. "
|
||||
"Different stages can be selected via bitmask values "
|
||||
@ -79,7 +80,7 @@ MODULE_PARM_DESC(enable_rc6,
|
||||
"For example, 3 would enable rc6 and deep rc6, and 7 would enable everything. "
|
||||
"default: -1 (use per-chip default)");
|
||||
|
||||
module_param_named(enable_fbc, i915.enable_fbc, int, 0600);
|
||||
module_param_named_unsafe(enable_fbc, i915.enable_fbc, int, 0600);
|
||||
MODULE_PARM_DESC(enable_fbc,
|
||||
"Enable frame buffer compression for power savings "
|
||||
"(default: -1 (use per-chip default))");
|
||||
@ -113,11 +114,16 @@ MODULE_PARM_DESC(enable_hangcheck,
|
||||
"WARNING: Disabling this can cause system wide hangs. "
|
||||
"(default: true)");
|
||||
|
||||
module_param_named(enable_ppgtt, i915.enable_ppgtt, int, 0400);
|
||||
module_param_named_unsafe(enable_ppgtt, i915.enable_ppgtt, int, 0400);
|
||||
MODULE_PARM_DESC(enable_ppgtt,
|
||||
"Override PPGTT usage. "
|
||||
"(-1=auto [default], 0=disabled, 1=aliasing, 2=full)");
|
||||
|
||||
module_param_named(enable_execlists, i915.enable_execlists, int, 0400);
|
||||
MODULE_PARM_DESC(enable_execlists,
|
||||
"Override execlists usage. "
|
||||
"(-1=auto, 0=disabled [default], 1=enabled)");
|
||||
|
||||
module_param_named(enable_psr, i915.enable_psr, int, 0600);
|
||||
MODULE_PARM_DESC(enable_psr, "Enable PSR (default: false)");
|
||||
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -30,5 +30,12 @@
|
||||
#define trace_i915_gem_evict(dev, min_size, alignment, flags)
|
||||
#define trace_i915_gem_evict_vm(vm)
|
||||
#define trace_i915_gem_evict_everything(dev)
|
||||
#define trace_i915_context_free(ctx)
|
||||
#define trace_i915_context_create(ctx)
|
||||
#define trace_switch_mm(ring, to)
|
||||
#define trace_i915_ppgtt_create(base)
|
||||
#define trace_i915_ppgtt_release(base)
|
||||
|
||||
|
||||
|
||||
#endif
|
||||
|
463
drivers/video/drm/i915/intel_audio.c
Normal file
463
drivers/video/drm/i915/intel_audio.c
Normal file
@ -0,0 +1,463 @@
|
||||
/*
|
||||
* Copyright © 2014 Intel Corporation
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the next
|
||||
* paragraph) shall be included in all copies or substantial portions of the
|
||||
* Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
* DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#include <linux/kernel.h>
|
||||
|
||||
#include <drm/drmP.h>
|
||||
#include <drm/drm_edid.h>
|
||||
#include "intel_drv.h"
|
||||
#include "i915_drv.h"
|
||||
|
||||
/**
|
||||
* DOC: High Definition Audio over HDMI and Display Port
|
||||
*
|
||||
* The graphics and audio drivers together support High Definition Audio over
|
||||
* HDMI and Display Port. The audio programming sequences are divided into audio
|
||||
* codec and controller enable and disable sequences. The graphics driver
|
||||
* handles the audio codec sequences, while the audio driver handles the audio
|
||||
* controller sequences.
|
||||
*
|
||||
* The disable sequences must be performed before disabling the transcoder or
|
||||
* port. The enable sequences may only be performed after enabling the
|
||||
* transcoder and port, and after completed link training.
|
||||
*
|
||||
* The codec and controller sequences could be done either parallel or serial,
|
||||
* but generally the ELDV/PD change in the codec sequence indicates to the audio
|
||||
* driver that the controller sequence should start. Indeed, most of the
|
||||
* co-operation between the graphics and audio drivers is handled via audio
|
||||
* related registers. (The notable exception is the power management, not
|
||||
* covered here.)
|
||||
*/
|
||||
|
||||
static const struct {
|
||||
int clock;
|
||||
u32 config;
|
||||
} hdmi_audio_clock[] = {
|
||||
{ DIV_ROUND_UP(25200 * 1000, 1001), AUD_CONFIG_PIXEL_CLOCK_HDMI_25175 },
|
||||
{ 25200, AUD_CONFIG_PIXEL_CLOCK_HDMI_25200 }, /* default per bspec */
|
||||
{ 27000, AUD_CONFIG_PIXEL_CLOCK_HDMI_27000 },
|
||||
{ 27000 * 1001 / 1000, AUD_CONFIG_PIXEL_CLOCK_HDMI_27027 },
|
||||
{ 54000, AUD_CONFIG_PIXEL_CLOCK_HDMI_54000 },
|
||||
{ 54000 * 1001 / 1000, AUD_CONFIG_PIXEL_CLOCK_HDMI_54054 },
|
||||
{ DIV_ROUND_UP(74250 * 1000, 1001), AUD_CONFIG_PIXEL_CLOCK_HDMI_74176 },
|
||||
{ 74250, AUD_CONFIG_PIXEL_CLOCK_HDMI_74250 },
|
||||
{ DIV_ROUND_UP(148500 * 1000, 1001), AUD_CONFIG_PIXEL_CLOCK_HDMI_148352 },
|
||||
{ 148500, AUD_CONFIG_PIXEL_CLOCK_HDMI_148500 },
|
||||
};
|
||||
|
||||
/* get AUD_CONFIG_PIXEL_CLOCK_HDMI_* value for mode */
|
||||
static u32 audio_config_hdmi_pixel_clock(struct drm_display_mode *mode)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(hdmi_audio_clock); i++) {
|
||||
if (mode->clock == hdmi_audio_clock[i].clock)
|
||||
break;
|
||||
}
|
||||
|
||||
if (i == ARRAY_SIZE(hdmi_audio_clock)) {
|
||||
DRM_DEBUG_KMS("HDMI audio pixel clock setting for %d not found, falling back to defaults\n", mode->clock);
|
||||
i = 1;
|
||||
}
|
||||
|
||||
DRM_DEBUG_KMS("Configuring HDMI audio for pixel clock %d (0x%08x)\n",
|
||||
hdmi_audio_clock[i].clock,
|
||||
hdmi_audio_clock[i].config);
|
||||
|
||||
return hdmi_audio_clock[i].config;
|
||||
}
|
||||
|
||||
static bool intel_eld_uptodate(struct drm_connector *connector,
|
||||
int reg_eldv, uint32_t bits_eldv,
|
||||
int reg_elda, uint32_t bits_elda,
|
||||
int reg_edid)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = connector->dev->dev_private;
|
||||
uint8_t *eld = connector->eld;
|
||||
uint32_t tmp;
|
||||
int i;
|
||||
|
||||
tmp = I915_READ(reg_eldv);
|
||||
tmp &= bits_eldv;
|
||||
|
||||
if (!tmp)
|
||||
return false;
|
||||
|
||||
tmp = I915_READ(reg_elda);
|
||||
tmp &= ~bits_elda;
|
||||
I915_WRITE(reg_elda, tmp);
|
||||
|
||||
for (i = 0; i < drm_eld_size(eld) / 4; i++)
|
||||
if (I915_READ(reg_edid) != *((uint32_t *)eld + i))
|
||||
return false;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static void g4x_audio_codec_disable(struct intel_encoder *encoder)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
|
||||
uint32_t eldv, tmp;
|
||||
|
||||
DRM_DEBUG_KMS("Disable audio codec\n");
|
||||
|
||||
tmp = I915_READ(G4X_AUD_VID_DID);
|
||||
if (tmp == INTEL_AUDIO_DEVBLC || tmp == INTEL_AUDIO_DEVCL)
|
||||
eldv = G4X_ELDV_DEVCL_DEVBLC;
|
||||
else
|
||||
eldv = G4X_ELDV_DEVCTG;
|
||||
|
||||
/* Invalidate ELD */
|
||||
tmp = I915_READ(G4X_AUD_CNTL_ST);
|
||||
tmp &= ~eldv;
|
||||
I915_WRITE(G4X_AUD_CNTL_ST, tmp);
|
||||
}
|
||||
|
||||
static void g4x_audio_codec_enable(struct drm_connector *connector,
|
||||
struct intel_encoder *encoder,
|
||||
struct drm_display_mode *mode)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = connector->dev->dev_private;
|
||||
uint8_t *eld = connector->eld;
|
||||
uint32_t eldv;
|
||||
uint32_t tmp;
|
||||
int len, i;
|
||||
|
||||
DRM_DEBUG_KMS("Enable audio codec, %u bytes ELD\n", eld[2]);
|
||||
|
||||
tmp = I915_READ(G4X_AUD_VID_DID);
|
||||
if (tmp == INTEL_AUDIO_DEVBLC || tmp == INTEL_AUDIO_DEVCL)
|
||||
eldv = G4X_ELDV_DEVCL_DEVBLC;
|
||||
else
|
||||
eldv = G4X_ELDV_DEVCTG;
|
||||
|
||||
if (intel_eld_uptodate(connector,
|
||||
G4X_AUD_CNTL_ST, eldv,
|
||||
G4X_AUD_CNTL_ST, G4X_ELD_ADDR_MASK,
|
||||
G4X_HDMIW_HDMIEDID))
|
||||
return;
|
||||
|
||||
tmp = I915_READ(G4X_AUD_CNTL_ST);
|
||||
tmp &= ~(eldv | G4X_ELD_ADDR_MASK);
|
||||
len = (tmp >> 9) & 0x1f; /* ELD buffer size */
|
||||
I915_WRITE(G4X_AUD_CNTL_ST, tmp);
|
||||
|
||||
len = min(drm_eld_size(eld) / 4, len);
|
||||
DRM_DEBUG_DRIVER("ELD size %d\n", len);
|
||||
for (i = 0; i < len; i++)
|
||||
I915_WRITE(G4X_HDMIW_HDMIEDID, *((uint32_t *)eld + i));
|
||||
|
||||
tmp = I915_READ(G4X_AUD_CNTL_ST);
|
||||
tmp |= eldv;
|
||||
I915_WRITE(G4X_AUD_CNTL_ST, tmp);
|
||||
}
|
||||
|
||||
static void hsw_audio_codec_disable(struct intel_encoder *encoder)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
|
||||
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
|
||||
enum pipe pipe = intel_crtc->pipe;
|
||||
uint32_t tmp;
|
||||
|
||||
DRM_DEBUG_KMS("Disable audio codec on pipe %c\n", pipe_name(pipe));
|
||||
|
||||
/* Disable timestamps */
|
||||
tmp = I915_READ(HSW_AUD_CFG(pipe));
|
||||
tmp &= ~AUD_CONFIG_N_VALUE_INDEX;
|
||||
tmp |= AUD_CONFIG_N_PROG_ENABLE;
|
||||
tmp &= ~AUD_CONFIG_UPPER_N_MASK;
|
||||
tmp &= ~AUD_CONFIG_LOWER_N_MASK;
|
||||
if (intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_DISPLAYPORT))
|
||||
tmp |= AUD_CONFIG_N_VALUE_INDEX;
|
||||
I915_WRITE(HSW_AUD_CFG(pipe), tmp);
|
||||
|
||||
/* Invalidate ELD */
|
||||
tmp = I915_READ(HSW_AUD_PIN_ELD_CP_VLD);
|
||||
tmp &= ~AUDIO_ELD_VALID(pipe);
|
||||
tmp &= ~AUDIO_OUTPUT_ENABLE(pipe);
|
||||
I915_WRITE(HSW_AUD_PIN_ELD_CP_VLD, tmp);
|
||||
}
|
||||
|
||||
static void hsw_audio_codec_enable(struct drm_connector *connector,
|
||||
struct intel_encoder *encoder,
|
||||
struct drm_display_mode *mode)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = connector->dev->dev_private;
|
||||
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
|
||||
enum pipe pipe = intel_crtc->pipe;
|
||||
const uint8_t *eld = connector->eld;
|
||||
uint32_t tmp;
|
||||
int len, i;
|
||||
|
||||
DRM_DEBUG_KMS("Enable audio codec on pipe %c, %u bytes ELD\n",
|
||||
pipe_name(pipe), drm_eld_size(eld));
|
||||
|
||||
/* Enable audio presence detect, invalidate ELD */
|
||||
tmp = I915_READ(HSW_AUD_PIN_ELD_CP_VLD);
|
||||
tmp |= AUDIO_OUTPUT_ENABLE(pipe);
|
||||
tmp &= ~AUDIO_ELD_VALID(pipe);
|
||||
I915_WRITE(HSW_AUD_PIN_ELD_CP_VLD, tmp);
|
||||
|
||||
/*
|
||||
* FIXME: We're supposed to wait for vblank here, but we have vblanks
|
||||
* disabled during the mode set. The proper fix would be to push the
|
||||
* rest of the setup into a vblank work item, queued here, but the
|
||||
* infrastructure is not there yet.
|
||||
*/
|
||||
|
||||
/* Reset ELD write address */
|
||||
tmp = I915_READ(HSW_AUD_DIP_ELD_CTRL(pipe));
|
||||
tmp &= ~IBX_ELD_ADDRESS_MASK;
|
||||
I915_WRITE(HSW_AUD_DIP_ELD_CTRL(pipe), tmp);
|
||||
|
||||
/* Up to 84 bytes of hw ELD buffer */
|
||||
len = min(drm_eld_size(eld), 84);
|
||||
for (i = 0; i < len / 4; i++)
|
||||
I915_WRITE(HSW_AUD_EDID_DATA(pipe), *((uint32_t *)eld + i));
|
||||
|
||||
/* ELD valid */
|
||||
tmp = I915_READ(HSW_AUD_PIN_ELD_CP_VLD);
|
||||
tmp |= AUDIO_ELD_VALID(pipe);
|
||||
I915_WRITE(HSW_AUD_PIN_ELD_CP_VLD, tmp);
|
||||
|
||||
/* Enable timestamps */
|
||||
tmp = I915_READ(HSW_AUD_CFG(pipe));
|
||||
tmp &= ~AUD_CONFIG_N_VALUE_INDEX;
|
||||
tmp &= ~AUD_CONFIG_N_PROG_ENABLE;
|
||||
tmp &= ~AUD_CONFIG_PIXEL_CLOCK_HDMI_MASK;
|
||||
if (intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_DISPLAYPORT))
|
||||
tmp |= AUD_CONFIG_N_VALUE_INDEX;
|
||||
else
|
||||
tmp |= audio_config_hdmi_pixel_clock(mode);
|
||||
I915_WRITE(HSW_AUD_CFG(pipe), tmp);
|
||||
}
|
||||
|
||||
static void ilk_audio_codec_disable(struct intel_encoder *encoder)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
|
||||
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
|
||||
struct intel_digital_port *intel_dig_port =
|
||||
enc_to_dig_port(&encoder->base);
|
||||
enum port port = intel_dig_port->port;
|
||||
enum pipe pipe = intel_crtc->pipe;
|
||||
uint32_t tmp, eldv;
|
||||
int aud_config;
|
||||
int aud_cntrl_st2;
|
||||
|
||||
DRM_DEBUG_KMS("Disable audio codec on port %c, pipe %c\n",
|
||||
port_name(port), pipe_name(pipe));
|
||||
|
||||
if (HAS_PCH_IBX(dev_priv->dev)) {
|
||||
aud_config = IBX_AUD_CFG(pipe);
|
||||
aud_cntrl_st2 = IBX_AUD_CNTL_ST2;
|
||||
} else if (IS_VALLEYVIEW(dev_priv)) {
|
||||
aud_config = VLV_AUD_CFG(pipe);
|
||||
aud_cntrl_st2 = VLV_AUD_CNTL_ST2;
|
||||
} else {
|
||||
aud_config = CPT_AUD_CFG(pipe);
|
||||
aud_cntrl_st2 = CPT_AUD_CNTRL_ST2;
|
||||
}
|
||||
|
||||
/* Disable timestamps */
|
||||
tmp = I915_READ(aud_config);
|
||||
tmp &= ~AUD_CONFIG_N_VALUE_INDEX;
|
||||
tmp |= AUD_CONFIG_N_PROG_ENABLE;
|
||||
tmp &= ~AUD_CONFIG_UPPER_N_MASK;
|
||||
tmp &= ~AUD_CONFIG_LOWER_N_MASK;
|
||||
if (intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_DISPLAYPORT))
|
||||
tmp |= AUD_CONFIG_N_VALUE_INDEX;
|
||||
I915_WRITE(aud_config, tmp);
|
||||
|
||||
if (WARN_ON(!port)) {
|
||||
eldv = IBX_ELD_VALID(PORT_B) | IBX_ELD_VALID(PORT_C) |
|
||||
IBX_ELD_VALID(PORT_D);
|
||||
} else {
|
||||
eldv = IBX_ELD_VALID(port);
|
||||
}
|
||||
|
||||
/* Invalidate ELD */
|
||||
tmp = I915_READ(aud_cntrl_st2);
|
||||
tmp &= ~eldv;
|
||||
I915_WRITE(aud_cntrl_st2, tmp);
|
||||
}
|
||||
|
||||
static void ilk_audio_codec_enable(struct drm_connector *connector,
|
||||
struct intel_encoder *encoder,
|
||||
struct drm_display_mode *mode)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = connector->dev->dev_private;
|
||||
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
|
||||
struct intel_digital_port *intel_dig_port =
|
||||
enc_to_dig_port(&encoder->base);
|
||||
enum port port = intel_dig_port->port;
|
||||
enum pipe pipe = intel_crtc->pipe;
|
||||
uint8_t *eld = connector->eld;
|
||||
uint32_t eldv;
|
||||
uint32_t tmp;
|
||||
int len, i;
|
||||
int hdmiw_hdmiedid;
|
||||
int aud_config;
|
||||
int aud_cntl_st;
|
||||
int aud_cntrl_st2;
|
||||
|
||||
DRM_DEBUG_KMS("Enable audio codec on port %c, pipe %c, %u bytes ELD\n",
|
||||
port_name(port), pipe_name(pipe), drm_eld_size(eld));
|
||||
|
||||
/*
|
||||
* FIXME: We're supposed to wait for vblank here, but we have vblanks
|
||||
* disabled during the mode set. The proper fix would be to push the
|
||||
* rest of the setup into a vblank work item, queued here, but the
|
||||
* infrastructure is not there yet.
|
||||
*/
|
||||
|
||||
if (HAS_PCH_IBX(connector->dev)) {
|
||||
hdmiw_hdmiedid = IBX_HDMIW_HDMIEDID(pipe);
|
||||
aud_config = IBX_AUD_CFG(pipe);
|
||||
aud_cntl_st = IBX_AUD_CNTL_ST(pipe);
|
||||
aud_cntrl_st2 = IBX_AUD_CNTL_ST2;
|
||||
} else if (IS_VALLEYVIEW(connector->dev)) {
|
||||
hdmiw_hdmiedid = VLV_HDMIW_HDMIEDID(pipe);
|
||||
aud_config = VLV_AUD_CFG(pipe);
|
||||
aud_cntl_st = VLV_AUD_CNTL_ST(pipe);
|
||||
aud_cntrl_st2 = VLV_AUD_CNTL_ST2;
|
||||
} else {
|
||||
hdmiw_hdmiedid = CPT_HDMIW_HDMIEDID(pipe);
|
||||
aud_config = CPT_AUD_CFG(pipe);
|
||||
aud_cntl_st = CPT_AUD_CNTL_ST(pipe);
|
||||
aud_cntrl_st2 = CPT_AUD_CNTRL_ST2;
|
||||
}
|
||||
|
||||
if (WARN_ON(!port)) {
|
||||
eldv = IBX_ELD_VALID(PORT_B) | IBX_ELD_VALID(PORT_C) |
|
||||
IBX_ELD_VALID(PORT_D);
|
||||
} else {
|
||||
eldv = IBX_ELD_VALID(port);
|
||||
}
|
||||
|
||||
/* Invalidate ELD */
|
||||
tmp = I915_READ(aud_cntrl_st2);
|
||||
tmp &= ~eldv;
|
||||
I915_WRITE(aud_cntrl_st2, tmp);
|
||||
|
||||
/* Reset ELD write address */
|
||||
tmp = I915_READ(aud_cntl_st);
|
||||
tmp &= ~IBX_ELD_ADDRESS_MASK;
|
||||
I915_WRITE(aud_cntl_st, tmp);
|
||||
|
||||
/* Up to 84 bytes of hw ELD buffer */
|
||||
len = min(drm_eld_size(eld), 84);
|
||||
for (i = 0; i < len / 4; i++)
|
||||
I915_WRITE(hdmiw_hdmiedid, *((uint32_t *)eld + i));
|
||||
|
||||
/* ELD valid */
|
||||
tmp = I915_READ(aud_cntrl_st2);
|
||||
tmp |= eldv;
|
||||
I915_WRITE(aud_cntrl_st2, tmp);
|
||||
|
||||
/* Enable timestamps */
|
||||
tmp = I915_READ(aud_config);
|
||||
tmp &= ~AUD_CONFIG_N_VALUE_INDEX;
|
||||
tmp &= ~AUD_CONFIG_N_PROG_ENABLE;
|
||||
tmp &= ~AUD_CONFIG_PIXEL_CLOCK_HDMI_MASK;
|
||||
if (intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_DISPLAYPORT))
|
||||
tmp |= AUD_CONFIG_N_VALUE_INDEX;
|
||||
else
|
||||
tmp |= audio_config_hdmi_pixel_clock(mode);
|
||||
I915_WRITE(aud_config, tmp);
|
||||
}
|
||||
|
||||
/**
|
||||
* intel_audio_codec_enable - Enable the audio codec for HD audio
|
||||
* @intel_encoder: encoder on which to enable audio
|
||||
*
|
||||
* The enable sequences may only be performed after enabling the transcoder and
|
||||
* port, and after completed link training.
|
||||
*/
|
||||
void intel_audio_codec_enable(struct intel_encoder *intel_encoder)
|
||||
{
|
||||
struct drm_encoder *encoder = &intel_encoder->base;
|
||||
struct intel_crtc *crtc = to_intel_crtc(encoder->crtc);
|
||||
struct drm_display_mode *mode = &crtc->config.adjusted_mode;
|
||||
struct drm_connector *connector;
|
||||
struct drm_device *dev = encoder->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
|
||||
connector = drm_select_eld(encoder, mode);
|
||||
if (!connector)
|
||||
return;
|
||||
|
||||
DRM_DEBUG_DRIVER("ELD on [CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
|
||||
connector->base.id,
|
||||
connector->name,
|
||||
connector->encoder->base.id,
|
||||
connector->encoder->name);
|
||||
|
||||
/* ELD Conn_Type */
|
||||
connector->eld[5] &= ~(3 << 2);
|
||||
if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT))
|
||||
connector->eld[5] |= (1 << 2);
|
||||
|
||||
connector->eld[6] = drm_av_sync_delay(connector, mode) / 2;
|
||||
|
||||
if (dev_priv->display.audio_codec_enable)
|
||||
dev_priv->display.audio_codec_enable(connector, intel_encoder, mode);
|
||||
}
|
||||
|
||||
/**
|
||||
* intel_audio_codec_disable - Disable the audio codec for HD audio
|
||||
* @encoder: encoder on which to disable audio
|
||||
*
|
||||
* The disable sequences must be performed before disabling the transcoder or
|
||||
* port.
|
||||
*/
|
||||
void intel_audio_codec_disable(struct intel_encoder *encoder)
|
||||
{
|
||||
struct drm_device *dev = encoder->base.dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
|
||||
if (dev_priv->display.audio_codec_disable)
|
||||
dev_priv->display.audio_codec_disable(encoder);
|
||||
}
|
||||
|
||||
/**
|
||||
* intel_init_audio - Set up chip specific audio functions
|
||||
* @dev: drm device
|
||||
*/
|
||||
void intel_init_audio(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
|
||||
if (IS_G4X(dev)) {
|
||||
dev_priv->display.audio_codec_enable = g4x_audio_codec_enable;
|
||||
dev_priv->display.audio_codec_disable = g4x_audio_codec_disable;
|
||||
} else if (IS_VALLEYVIEW(dev)) {
|
||||
dev_priv->display.audio_codec_enable = ilk_audio_codec_enable;
|
||||
dev_priv->display.audio_codec_disable = ilk_audio_codec_disable;
|
||||
} else if (IS_HASWELL(dev) || INTEL_INFO(dev)->gen >= 8) {
|
||||
dev_priv->display.audio_codec_enable = hsw_audio_codec_enable;
|
||||
dev_priv->display.audio_codec_disable = hsw_audio_codec_disable;
|
||||
} else if (HAS_PCH_SPLIT(dev)) {
|
||||
dev_priv->display.audio_codec_enable = ilk_audio_codec_enable;
|
||||
dev_priv->display.audio_codec_disable = ilk_audio_codec_disable;
|
||||
}
|
||||
}
|
@ -627,16 +627,16 @@ parse_edp(struct drm_i915_private *dev_priv, struct bdb_header *bdb)
|
||||
|
||||
switch (edp_link_params->preemphasis) {
|
||||
case EDP_PREEMPHASIS_NONE:
|
||||
dev_priv->vbt.edp_preemphasis = DP_TRAIN_PRE_EMPHASIS_0;
|
||||
dev_priv->vbt.edp_preemphasis = DP_TRAIN_PRE_EMPH_LEVEL_0;
|
||||
break;
|
||||
case EDP_PREEMPHASIS_3_5dB:
|
||||
dev_priv->vbt.edp_preemphasis = DP_TRAIN_PRE_EMPHASIS_3_5;
|
||||
dev_priv->vbt.edp_preemphasis = DP_TRAIN_PRE_EMPH_LEVEL_1;
|
||||
break;
|
||||
case EDP_PREEMPHASIS_6dB:
|
||||
dev_priv->vbt.edp_preemphasis = DP_TRAIN_PRE_EMPHASIS_6;
|
||||
dev_priv->vbt.edp_preemphasis = DP_TRAIN_PRE_EMPH_LEVEL_2;
|
||||
break;
|
||||
case EDP_PREEMPHASIS_9_5dB:
|
||||
dev_priv->vbt.edp_preemphasis = DP_TRAIN_PRE_EMPHASIS_9_5;
|
||||
dev_priv->vbt.edp_preemphasis = DP_TRAIN_PRE_EMPH_LEVEL_3;
|
||||
break;
|
||||
default:
|
||||
DRM_DEBUG_KMS("VBT has unknown eDP pre-emphasis value %u\n",
|
||||
@ -646,16 +646,16 @@ parse_edp(struct drm_i915_private *dev_priv, struct bdb_header *bdb)
|
||||
|
||||
switch (edp_link_params->vswing) {
|
||||
case EDP_VSWING_0_4V:
|
||||
dev_priv->vbt.edp_vswing = DP_TRAIN_VOLTAGE_SWING_400;
|
||||
dev_priv->vbt.edp_vswing = DP_TRAIN_VOLTAGE_SWING_LEVEL_0;
|
||||
break;
|
||||
case EDP_VSWING_0_6V:
|
||||
dev_priv->vbt.edp_vswing = DP_TRAIN_VOLTAGE_SWING_600;
|
||||
dev_priv->vbt.edp_vswing = DP_TRAIN_VOLTAGE_SWING_LEVEL_1;
|
||||
break;
|
||||
case EDP_VSWING_0_8V:
|
||||
dev_priv->vbt.edp_vswing = DP_TRAIN_VOLTAGE_SWING_800;
|
||||
dev_priv->vbt.edp_vswing = DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
|
||||
break;
|
||||
case EDP_VSWING_1_2V:
|
||||
dev_priv->vbt.edp_vswing = DP_TRAIN_VOLTAGE_SWING_1200;
|
||||
dev_priv->vbt.edp_vswing = DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
|
||||
break;
|
||||
default:
|
||||
DRM_DEBUG_KMS("VBT has unknown eDP voltage swing value %u\n",
|
||||
@ -946,7 +946,7 @@ static void parse_ddi_port(struct drm_i915_private *dev_priv, enum port port,
|
||||
DRM_DEBUG_KMS("Analog port %c is also DP or TMDS compatible\n",
|
||||
port_name(port));
|
||||
if (is_dvi && (port == PORT_A || port == PORT_E))
|
||||
DRM_DEBUG_KMS("Port %c is TMDS compabile\n", port_name(port));
|
||||
DRM_DEBUG_KMS("Port %c is TMDS compatible\n", port_name(port));
|
||||
if (!is_dvi && !is_dp && !is_crt)
|
||||
DRM_DEBUG_KMS("Port %c is not DP/TMDS/CRT compatible\n",
|
||||
port_name(port));
|
||||
@ -976,13 +976,11 @@ static void parse_ddi_port(struct drm_i915_private *dev_priv, enum port port,
|
||||
if (bdb->version >= 158) {
|
||||
/* The VBT HDMI level shift values match the table we have. */
|
||||
hdmi_level_shift = child->raw[7] & 0xF;
|
||||
if (hdmi_level_shift < 0xC) {
|
||||
DRM_DEBUG_KMS("VBT HDMI level shift for port %c: %d\n",
|
||||
port_name(port),
|
||||
hdmi_level_shift);
|
||||
info->hdmi_level_shift = hdmi_level_shift;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void parse_ddi_ports(struct drm_i915_private *dev_priv,
|
||||
@ -1114,8 +1112,7 @@ init_vbt_defaults(struct drm_i915_private *dev_priv)
|
||||
struct ddi_vbt_port_info *info =
|
||||
&dev_priv->vbt.ddi_port_info[port];
|
||||
|
||||
/* Recommended BSpec default: 800mV 0dB. */
|
||||
info->hdmi_level_shift = 6;
|
||||
info->hdmi_level_shift = HDMI_LEVEL_SHIFT_UNKNOWN;
|
||||
|
||||
info->supports_dvi = (port != PORT_A && port != PORT_E);
|
||||
info->supports_hdmi = info->supports_dvi;
|
||||
|
@ -46,7 +46,7 @@ struct bdb_header {
|
||||
u16 version; /**< decimal */
|
||||
u16 header_size; /**< in bytes */
|
||||
u16 bdb_size; /**< in bytes */
|
||||
};
|
||||
} __packed;
|
||||
|
||||
/* strictly speaking, this is a "skip" block, but it has interesting info */
|
||||
struct vbios_data {
|
||||
@ -252,7 +252,7 @@ union child_device_config {
|
||||
/* This one should also be safe to use anywhere, even without version
|
||||
* checks. */
|
||||
struct common_child_dev_config common;
|
||||
};
|
||||
} __packed;
|
||||
|
||||
struct bdb_general_definitions {
|
||||
/* DDC GPIO */
|
||||
@ -802,7 +802,8 @@ struct mipi_config {
|
||||
|
||||
u16 rsvd4;
|
||||
|
||||
u8 rsvd5[5];
|
||||
u8 rsvd5;
|
||||
u32 target_burst_mode_freq;
|
||||
u32 dsi_ddr_clk;
|
||||
u32 bridge_ref_clk;
|
||||
|
||||
@ -887,12 +888,12 @@ struct mipi_pps_data {
|
||||
u16 bl_disable_delay;
|
||||
u16 panel_off_delay;
|
||||
u16 panel_power_cycle_delay;
|
||||
};
|
||||
} __packed;
|
||||
|
||||
struct bdb_mipi_config {
|
||||
struct mipi_config config[MAX_MIPI_CONFIGURATIONS];
|
||||
struct mipi_pps_data pps[MAX_MIPI_CONFIGURATIONS];
|
||||
};
|
||||
} __packed;
|
||||
|
||||
/* Block 53 contains MIPI sequences as needed by the panel
|
||||
* for enabling it. This block can be variable in size and
|
||||
@ -901,7 +902,7 @@ struct bdb_mipi_config {
|
||||
struct bdb_mipi_sequence {
|
||||
u8 version;
|
||||
u8 data[0];
|
||||
};
|
||||
} __packed;
|
||||
|
||||
/* MIPI Sequnece Block definitions */
|
||||
enum mipi_seq {
|
||||
|
@ -72,7 +72,7 @@ static bool intel_crt_get_hw_state(struct intel_encoder *encoder,
|
||||
u32 tmp;
|
||||
|
||||
power_domain = intel_display_port_power_domain(encoder);
|
||||
if (!intel_display_power_enabled(dev_priv, power_domain))
|
||||
if (!intel_display_power_is_enabled(dev_priv, power_domain))
|
||||
return false;
|
||||
|
||||
tmp = I915_READ(crt->adpa_reg);
|
||||
@ -775,7 +775,7 @@ static void intel_crt_reset(struct drm_connector *connector)
|
||||
I915_WRITE(crt->adpa_reg, adpa);
|
||||
POSTING_READ(crt->adpa_reg);
|
||||
|
||||
DRM_DEBUG_KMS("pch crt adpa set to 0x%x\n", adpa);
|
||||
DRM_DEBUG_KMS("crt adpa set to 0x%x\n", adpa);
|
||||
crt->force_hotplug_required = 1;
|
||||
}
|
||||
|
||||
|
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@ -278,20 +278,12 @@ static int intel_dp_mst_get_ddc_modes(struct drm_connector *connector)
|
||||
}
|
||||
|
||||
static enum drm_connector_status
|
||||
intel_mst_port_dp_detect(struct drm_connector *connector)
|
||||
intel_dp_mst_detect(struct drm_connector *connector, bool force)
|
||||
{
|
||||
struct intel_connector *intel_connector = to_intel_connector(connector);
|
||||
struct intel_dp *intel_dp = intel_connector->mst_port;
|
||||
|
||||
return drm_dp_mst_detect_port(&intel_dp->mst_mgr, intel_connector->port);
|
||||
}
|
||||
|
||||
static enum drm_connector_status
|
||||
intel_dp_mst_detect(struct drm_connector *connector, bool force)
|
||||
{
|
||||
enum drm_connector_status status;
|
||||
status = intel_mst_port_dp_detect(connector);
|
||||
return status;
|
||||
return drm_dp_mst_detect_port(connector, &intel_dp->mst_mgr, intel_connector->port);
|
||||
}
|
||||
|
||||
static int
|
||||
@ -393,7 +385,7 @@ static void intel_connector_remove_from_fbdev(struct intel_connector *connector)
|
||||
#endif
|
||||
}
|
||||
|
||||
static struct drm_connector *intel_dp_add_mst_connector(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port, char *pathprop)
|
||||
static struct drm_connector *intel_dp_add_mst_connector(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port, const char *pathprop)
|
||||
{
|
||||
struct intel_dp *intel_dp = container_of(mgr, struct intel_dp, mst_mgr);
|
||||
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
|
||||
@ -422,6 +414,8 @@ static struct drm_connector *intel_dp_add_mst_connector(struct drm_dp_mst_topolo
|
||||
intel_dp_add_properties(intel_dp, connector);
|
||||
|
||||
drm_object_attach_property(&connector->base, dev->mode_config.path_property, 0);
|
||||
drm_object_attach_property(&connector->base, dev->mode_config.tile_property, 0);
|
||||
|
||||
drm_mode_connector_set_path_property(connector, pathprop);
|
||||
drm_reinit_primary_mode_group(dev);
|
||||
mutex_lock(&dev->mode_config.mutex);
|
||||
|
@ -25,6 +25,7 @@
|
||||
#ifndef __INTEL_DRV_H__
|
||||
#define __INTEL_DRV_H__
|
||||
|
||||
#include <linux/async.h>
|
||||
#include <linux/i2c.h>
|
||||
#include <linux/hdmi.h>
|
||||
#include <drm/i915_drm.h>
|
||||
@ -33,11 +34,10 @@
|
||||
#include <drm/drm_crtc_helper.h>
|
||||
#include <drm/drm_fb_helper.h>
|
||||
#include <drm/drm_dp_mst_helper.h>
|
||||
#include <drm/drm_rect.h>
|
||||
|
||||
#define KBUILD_MODNAME "i915.dll"
|
||||
|
||||
|
||||
#define cpu_relax() asm volatile("rep; nop")
|
||||
#define DIV_ROUND_CLOSEST_ULL(ll, d) \
|
||||
({ unsigned long long _tmp = (ll)+(d)/2; do_div(_tmp, d); _tmp; })
|
||||
|
||||
/**
|
||||
* _wait_for - magic (register) wait macro
|
||||
@ -94,18 +94,20 @@
|
||||
|
||||
/* these are outputs from the chip - integrated only
|
||||
external chips are via DVO or SDVO output */
|
||||
#define INTEL_OUTPUT_UNUSED 0
|
||||
#define INTEL_OUTPUT_ANALOG 1
|
||||
#define INTEL_OUTPUT_DVO 2
|
||||
#define INTEL_OUTPUT_SDVO 3
|
||||
#define INTEL_OUTPUT_LVDS 4
|
||||
#define INTEL_OUTPUT_TVOUT 5
|
||||
#define INTEL_OUTPUT_HDMI 6
|
||||
#define INTEL_OUTPUT_DISPLAYPORT 7
|
||||
#define INTEL_OUTPUT_EDP 8
|
||||
#define INTEL_OUTPUT_DSI 9
|
||||
#define INTEL_OUTPUT_UNKNOWN 10
|
||||
#define INTEL_OUTPUT_DP_MST 11
|
||||
enum intel_output_type {
|
||||
INTEL_OUTPUT_UNUSED = 0,
|
||||
INTEL_OUTPUT_ANALOG = 1,
|
||||
INTEL_OUTPUT_DVO = 2,
|
||||
INTEL_OUTPUT_SDVO = 3,
|
||||
INTEL_OUTPUT_LVDS = 4,
|
||||
INTEL_OUTPUT_TVOUT = 5,
|
||||
INTEL_OUTPUT_HDMI = 6,
|
||||
INTEL_OUTPUT_DISPLAYPORT = 7,
|
||||
INTEL_OUTPUT_EDP = 8,
|
||||
INTEL_OUTPUT_DSI = 9,
|
||||
INTEL_OUTPUT_UNKNOWN = 10,
|
||||
INTEL_OUTPUT_DP_MST = 11,
|
||||
};
|
||||
|
||||
#define INTEL_DVO_CHIP_NONE 0
|
||||
#define INTEL_DVO_CHIP_LVDS 1
|
||||
@ -136,7 +138,7 @@ struct intel_encoder {
|
||||
*/
|
||||
struct intel_crtc *new_crtc;
|
||||
|
||||
int type;
|
||||
enum intel_output_type type;
|
||||
unsigned int cloneable;
|
||||
bool connectors_active;
|
||||
void (*hot_plug)(struct intel_encoder *);
|
||||
@ -184,6 +186,8 @@ struct intel_panel {
|
||||
bool active_low_pwm;
|
||||
struct backlight_device *device;
|
||||
} backlight;
|
||||
|
||||
void (*backlight_power)(struct intel_connector *, bool enable);
|
||||
};
|
||||
|
||||
struct intel_connector {
|
||||
@ -216,6 +220,7 @@ struct intel_connector {
|
||||
|
||||
/* Cached EDID for eDP and LVDS. May hold ERR_PTR for invalid EDID. */
|
||||
struct edid *edid;
|
||||
struct edid *detect_edid;
|
||||
|
||||
/* since POLL and HPD connectors may use the same HPD line keep the native
|
||||
state of connector->polled in case hotplug storm detection changes it */
|
||||
@ -238,6 +243,17 @@ typedef struct dpll {
|
||||
int p;
|
||||
} intel_clock_t;
|
||||
|
||||
struct intel_plane_state {
|
||||
struct drm_crtc *crtc;
|
||||
struct drm_framebuffer *fb;
|
||||
struct drm_rect src;
|
||||
struct drm_rect dst;
|
||||
struct drm_rect clip;
|
||||
struct drm_rect orig_src;
|
||||
struct drm_rect orig_dst;
|
||||
bool visible;
|
||||
};
|
||||
|
||||
struct intel_plane_config {
|
||||
bool tiled;
|
||||
int size;
|
||||
@ -276,6 +292,9 @@ struct intel_crtc_config {
|
||||
* between pch encoders and cpu encoders. */
|
||||
bool has_pch_encoder;
|
||||
|
||||
/* Are we sending infoframes on the attached port */
|
||||
bool has_infoframe;
|
||||
|
||||
/* CPU Transcoder for the pipe. Currently this can only differ from the
|
||||
* pipe on Haswell (where we have a special eDP transcoder). */
|
||||
enum transcoder cpu_transcoder;
|
||||
@ -324,7 +343,10 @@ struct intel_crtc_config {
|
||||
/* Selected dpll when shared or DPLL_ID_PRIVATE. */
|
||||
enum intel_dpll_id shared_dpll;
|
||||
|
||||
/* PORT_CLK_SEL for DDI ports. */
|
||||
/*
|
||||
* - PORT_CLK_SEL for DDI ports on HSW/BDW.
|
||||
* - enum skl_dpll on SKL
|
||||
*/
|
||||
uint32_t ddi_pll_sel;
|
||||
|
||||
/* Actual register state of the dpll, for shared dpll cross-checking. */
|
||||
@ -335,6 +357,7 @@ struct intel_crtc_config {
|
||||
|
||||
/* m2_n2 for eDP downclock */
|
||||
struct intel_link_m_n dp_m2_n2;
|
||||
bool has_drrs;
|
||||
|
||||
/*
|
||||
* Frequence the dpll for the port should run at. Differs from the
|
||||
@ -384,7 +407,14 @@ struct intel_pipe_wm {
|
||||
|
||||
struct intel_mmio_flip {
|
||||
u32 seqno;
|
||||
u32 ring_id;
|
||||
struct intel_engine_cs *ring;
|
||||
struct work_struct work;
|
||||
};
|
||||
|
||||
struct skl_pipe_wm {
|
||||
struct skl_wm_level wm[8];
|
||||
struct skl_wm_level trans_wm;
|
||||
uint32_t linetime;
|
||||
};
|
||||
|
||||
struct intel_crtc {
|
||||
@ -415,6 +445,7 @@ struct intel_crtc {
|
||||
uint32_t cursor_addr;
|
||||
int16_t cursor_width, cursor_height;
|
||||
uint32_t cursor_cntl;
|
||||
uint32_t cursor_size;
|
||||
uint32_t cursor_base;
|
||||
|
||||
struct intel_plane_config plane_config;
|
||||
@ -433,11 +464,12 @@ struct intel_crtc {
|
||||
struct {
|
||||
/* watermarks currently being used */
|
||||
struct intel_pipe_wm active;
|
||||
/* SKL wm values currently in use */
|
||||
struct skl_pipe_wm skl_active;
|
||||
} wm;
|
||||
|
||||
wait_queue_head_t vbl_wait;
|
||||
|
||||
int scanline_offset;
|
||||
struct intel_mmio_flip mmio_flip;
|
||||
};
|
||||
|
||||
struct intel_plane_wm_parameters {
|
||||
@ -459,6 +491,7 @@ struct intel_plane {
|
||||
unsigned int crtc_w, crtc_h;
|
||||
uint32_t src_x, src_y;
|
||||
uint32_t src_w, src_h;
|
||||
unsigned int rotation;
|
||||
|
||||
/* Since we need to change the watermarks before/after
|
||||
* enabling/disabling the planes, we need to store the parameters here
|
||||
@ -525,6 +558,7 @@ struct intel_hdmi {
|
||||
void (*set_infoframes)(struct drm_encoder *encoder,
|
||||
bool enable,
|
||||
struct drm_display_mode *adjusted_mode);
|
||||
bool (*infoframe_enabled)(struct drm_encoder *encoder);
|
||||
};
|
||||
|
||||
struct intel_dp_mst_encoder;
|
||||
@ -567,6 +601,13 @@ struct intel_dp {
|
||||
unsigned long last_power_on;
|
||||
unsigned long last_backlight_off;
|
||||
|
||||
/*
|
||||
* Pipe whose power sequencer is currently locked into
|
||||
* this port. Only relevant on VLV/CHV.
|
||||
*/
|
||||
enum pipe pps_pipe;
|
||||
struct edp_power_seq pps_delays;
|
||||
|
||||
bool use_tps3;
|
||||
bool can_mst; /* this port supports mst */
|
||||
bool is_mst;
|
||||
@ -665,6 +706,10 @@ struct intel_unpin_work {
|
||||
#define INTEL_FLIP_COMPLETE 2
|
||||
u32 flip_count;
|
||||
u32 gtt_offset;
|
||||
struct intel_engine_cs *flip_queued_ring;
|
||||
u32 flip_queued_seqno;
|
||||
int flip_queued_vblank;
|
||||
int flip_ready_vblank;
|
||||
bool enable_stall_check;
|
||||
};
|
||||
|
||||
@ -718,32 +763,47 @@ hdmi_to_dig_port(struct intel_hdmi *intel_hdmi)
|
||||
return container_of(intel_hdmi, struct intel_digital_port, hdmi);
|
||||
}
|
||||
|
||||
/*
|
||||
* Returns the number of planes for this pipe, ie the number of sprites + 1
|
||||
* (primary plane). This doesn't count the cursor plane then.
|
||||
*/
|
||||
static inline unsigned int intel_num_planes(struct intel_crtc *crtc)
|
||||
{
|
||||
return INTEL_INFO(crtc->base.dev)->num_sprites[crtc->pipe] + 1;
|
||||
}
|
||||
|
||||
/* i915_irq.c */
|
||||
bool intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev,
|
||||
/* intel_fifo_underrun.c */
|
||||
bool intel_set_cpu_fifo_underrun_reporting(struct drm_i915_private *dev_priv,
|
||||
enum pipe pipe, bool enable);
|
||||
bool intel_set_pch_fifo_underrun_reporting(struct drm_device *dev,
|
||||
bool intel_set_pch_fifo_underrun_reporting(struct drm_i915_private *dev_priv,
|
||||
enum transcoder pch_transcoder,
|
||||
bool enable);
|
||||
void intel_cpu_fifo_underrun_irq_handler(struct drm_i915_private *dev_priv,
|
||||
enum pipe pipe);
|
||||
void intel_pch_fifo_underrun_irq_handler(struct drm_i915_private *dev_priv,
|
||||
enum transcoder pch_transcoder);
|
||||
void i9xx_check_fifo_underruns(struct drm_i915_private *dev_priv);
|
||||
|
||||
/* i915_irq.c */
|
||||
void gen5_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask);
|
||||
void gen5_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask);
|
||||
void gen6_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask);
|
||||
void gen6_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask);
|
||||
void gen8_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask);
|
||||
void gen8_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask);
|
||||
void intel_runtime_pm_disable_interrupts(struct drm_device *dev);
|
||||
void intel_runtime_pm_restore_interrupts(struct drm_device *dev);
|
||||
void gen6_reset_rps_interrupts(struct drm_device *dev);
|
||||
void gen6_enable_rps_interrupts(struct drm_device *dev);
|
||||
void gen6_disable_rps_interrupts(struct drm_device *dev);
|
||||
void intel_runtime_pm_disable_interrupts(struct drm_i915_private *dev_priv);
|
||||
void intel_runtime_pm_enable_interrupts(struct drm_i915_private *dev_priv);
|
||||
static inline bool intel_irqs_enabled(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
/*
|
||||
* We only use drm_irq_uninstall() at unload and VT switch, so
|
||||
* this is the only thing we need to check.
|
||||
*/
|
||||
return !dev_priv->pm._irqs_disabled;
|
||||
return dev_priv->pm.irqs_enabled;
|
||||
}
|
||||
|
||||
int intel_get_crtc_scanline(struct intel_crtc *crtc);
|
||||
void i9xx_check_fifo_underruns(struct drm_device *dev);
|
||||
void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv);
|
||||
|
||||
/* intel_crt.c */
|
||||
@ -776,11 +836,7 @@ void intel_ddi_clock_get(struct intel_encoder *encoder,
|
||||
struct intel_crtc_config *pipe_config);
|
||||
void intel_ddi_set_vc_payload_alloc(struct drm_crtc *crtc, bool state);
|
||||
|
||||
/* intel_display.c */
|
||||
const char *intel_output_name(int output);
|
||||
bool intel_has_pending_fb_unpin(struct drm_device *dev);
|
||||
int intel_pch_rawclk(struct drm_device *dev);
|
||||
void intel_mark_busy(struct drm_device *dev);
|
||||
/* intel_frontbuffer.c */
|
||||
void intel_fb_obj_invalidate(struct drm_i915_gem_object *obj,
|
||||
struct intel_engine_cs *ring);
|
||||
void intel_frontbuffer_flip_prepare(struct drm_device *dev,
|
||||
@ -790,7 +846,7 @@ void intel_frontbuffer_flip_complete(struct drm_device *dev,
|
||||
void intel_frontbuffer_flush(struct drm_device *dev,
|
||||
unsigned frontbuffer_bits);
|
||||
/**
|
||||
* intel_frontbuffer_flip - prepare frontbuffer flip
|
||||
* intel_frontbuffer_flip - synchronous frontbuffer flip
|
||||
* @dev: DRM device
|
||||
* @frontbuffer_bits: frontbuffer plane tracking bits
|
||||
*
|
||||
@ -808,6 +864,18 @@ void intel_frontbuffer_flip(struct drm_device *dev,
|
||||
}
|
||||
|
||||
void intel_fb_obj_flush(struct drm_i915_gem_object *obj, bool retire);
|
||||
|
||||
|
||||
/* intel_audio.c */
|
||||
void intel_init_audio(struct drm_device *dev);
|
||||
void intel_audio_codec_enable(struct intel_encoder *encoder);
|
||||
void intel_audio_codec_disable(struct intel_encoder *encoder);
|
||||
|
||||
/* intel_display.c */
|
||||
const char *intel_output_name(int output);
|
||||
bool intel_has_pending_fb_unpin(struct drm_device *dev);
|
||||
int intel_pch_rawclk(struct drm_device *dev);
|
||||
void intel_mark_busy(struct drm_device *dev);
|
||||
void intel_mark_idle(struct drm_device *dev);
|
||||
void intel_crtc_restore_mode(struct drm_crtc *crtc);
|
||||
void intel_crtc_control(struct drm_crtc *crtc, bool enable);
|
||||
@ -828,8 +896,12 @@ int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv);
|
||||
enum transcoder intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv,
|
||||
enum pipe pipe);
|
||||
void intel_wait_for_vblank(struct drm_device *dev, int pipe);
|
||||
void intel_wait_for_pipe_off(struct drm_device *dev, int pipe);
|
||||
bool intel_pipe_has_type(struct intel_crtc *crtc, enum intel_output_type type);
|
||||
static inline void
|
||||
intel_wait_for_vblank(struct drm_device *dev, int pipe)
|
||||
{
|
||||
drm_wait_one_vblank(dev, pipe);
|
||||
}
|
||||
int ironlake_get_lanes_required(int target_clock, int link_bw, int bpp);
|
||||
void vlv_wait_port_ready(struct drm_i915_private *dev_priv,
|
||||
struct intel_digital_port *dport);
|
||||
@ -839,8 +911,8 @@ bool intel_get_load_detect_pipe(struct drm_connector *connector,
|
||||
struct drm_modeset_acquire_ctx *ctx);
|
||||
void intel_release_load_detect_pipe(struct drm_connector *connector,
|
||||
struct intel_load_detect_pipe *old);
|
||||
int intel_pin_and_fence_fb_obj(struct drm_device *dev,
|
||||
struct drm_i915_gem_object *obj,
|
||||
int intel_pin_and_fence_fb_obj(struct drm_plane *plane,
|
||||
struct drm_framebuffer *fb,
|
||||
struct intel_engine_cs *pipelined);
|
||||
void intel_unpin_fb_obj(struct drm_i915_gem_object *obj);
|
||||
struct drm_framebuffer *
|
||||
@ -850,6 +922,7 @@ __intel_framebuffer_create(struct drm_device *dev,
|
||||
void intel_prepare_page_flip(struct drm_device *dev, int plane);
|
||||
void intel_finish_page_flip(struct drm_device *dev, int pipe);
|
||||
void intel_finish_page_flip_plane(struct drm_device *dev, int plane);
|
||||
void intel_check_page_flip(struct drm_device *dev, int pipe);
|
||||
|
||||
/* shared dpll functions */
|
||||
struct intel_shared_dpll *intel_crtc_to_shared_dpll(struct intel_crtc *crtc);
|
||||
@ -861,7 +934,13 @@ void assert_shared_dpll(struct drm_i915_private *dev_priv,
|
||||
struct intel_shared_dpll *intel_get_shared_dpll(struct intel_crtc *crtc);
|
||||
void intel_put_shared_dpll(struct intel_crtc *crtc);
|
||||
|
||||
void vlv_force_pll_on(struct drm_device *dev, enum pipe pipe,
|
||||
const struct dpll *dpll);
|
||||
void vlv_force_pll_off(struct drm_device *dev, enum pipe pipe);
|
||||
|
||||
/* modesetting asserts */
|
||||
void assert_panel_unlocked(struct drm_i915_private *dev_priv,
|
||||
enum pipe pipe);
|
||||
void assert_pll(struct drm_i915_private *dev_priv,
|
||||
enum pipe pipe, bool state);
|
||||
#define assert_pll_enabled(d, p) assert_pll(d, p, true)
|
||||
@ -873,17 +952,17 @@ void assert_fdi_rx_pll(struct drm_i915_private *dev_priv,
|
||||
void assert_pipe(struct drm_i915_private *dev_priv, enum pipe pipe, bool state);
|
||||
#define assert_pipe_enabled(d, p) assert_pipe(d, p, true)
|
||||
#define assert_pipe_disabled(d, p) assert_pipe(d, p, false)
|
||||
void intel_write_eld(struct drm_encoder *encoder,
|
||||
struct drm_display_mode *mode);
|
||||
unsigned long intel_gen4_compute_page_offset(int *x, int *y,
|
||||
unsigned int tiling_mode,
|
||||
unsigned int bpp,
|
||||
unsigned int pitch);
|
||||
void intel_display_handle_reset(struct drm_device *dev);
|
||||
void intel_prepare_reset(struct drm_device *dev);
|
||||
void intel_finish_reset(struct drm_device *dev);
|
||||
void hsw_enable_pc8(struct drm_i915_private *dev_priv);
|
||||
void hsw_disable_pc8(struct drm_i915_private *dev_priv);
|
||||
void intel_dp_get_m_n(struct intel_crtc *crtc,
|
||||
struct intel_crtc_config *pipe_config);
|
||||
void intel_dp_set_m_n(struct intel_crtc *crtc);
|
||||
int intel_dotclock_calculate(int link_freq, const struct intel_link_m_n *m_n);
|
||||
void
|
||||
ironlake_check_encoder_dotclock(const struct intel_crtc_config *pipe_config,
|
||||
@ -891,14 +970,13 @@ ironlake_check_encoder_dotclock(const struct intel_crtc_config *pipe_config,
|
||||
bool intel_crtc_active(struct drm_crtc *crtc);
|
||||
void hsw_enable_ips(struct intel_crtc *crtc);
|
||||
void hsw_disable_ips(struct intel_crtc *crtc);
|
||||
void intel_display_set_init_power(struct drm_i915_private *dev, bool enable);
|
||||
enum intel_display_power_domain
|
||||
intel_display_port_power_domain(struct intel_encoder *intel_encoder);
|
||||
void intel_mode_from_pipe_config(struct drm_display_mode *mode,
|
||||
struct intel_crtc_config *pipe_config);
|
||||
int intel_format_to_fourcc(int format);
|
||||
void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc);
|
||||
|
||||
void intel_modeset_preclose(struct drm_device *dev, struct drm_file *file);
|
||||
|
||||
/* intel_dp.c */
|
||||
void intel_dp_init(struct drm_device *dev, int output_reg, enum port port);
|
||||
@ -919,24 +997,18 @@ bool intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port,
|
||||
void intel_edp_backlight_on(struct intel_dp *intel_dp);
|
||||
void intel_edp_backlight_off(struct intel_dp *intel_dp);
|
||||
void intel_edp_panel_vdd_on(struct intel_dp *intel_dp);
|
||||
void intel_edp_panel_vdd_sanitize(struct intel_encoder *intel_encoder);
|
||||
void intel_edp_panel_on(struct intel_dp *intel_dp);
|
||||
void intel_edp_panel_off(struct intel_dp *intel_dp);
|
||||
void intel_edp_psr_enable(struct intel_dp *intel_dp);
|
||||
void intel_edp_psr_disable(struct intel_dp *intel_dp);
|
||||
void intel_dp_set_drrs_state(struct drm_device *dev, int refresh_rate);
|
||||
void intel_edp_psr_invalidate(struct drm_device *dev,
|
||||
unsigned frontbuffer_bits);
|
||||
void intel_edp_psr_flush(struct drm_device *dev,
|
||||
unsigned frontbuffer_bits);
|
||||
void intel_edp_psr_init(struct drm_device *dev);
|
||||
|
||||
int intel_dp_handle_hpd_irq(struct intel_digital_port *digport, bool long_hpd);
|
||||
void intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector);
|
||||
void intel_dp_mst_suspend(struct drm_device *dev);
|
||||
void intel_dp_mst_resume(struct drm_device *dev);
|
||||
int intel_dp_max_link_bw(struct intel_dp *intel_dp);
|
||||
void intel_dp_hot_plug(struct intel_encoder *intel_encoder);
|
||||
void vlv_power_sequencer_reset(struct drm_i915_private *dev_priv);
|
||||
uint32_t intel_dp_pack_aux(const uint8_t *src, int src_bytes);
|
||||
void intel_dp_unpack_aux(uint32_t src, uint8_t *dst, int dst_bytes);
|
||||
|
||||
/* intel_dp_mst.c */
|
||||
int intel_dp_mst_encoder_init(struct intel_digital_port *intel_dig_port, int conn_id);
|
||||
void intel_dp_mst_encoder_cleanup(struct intel_digital_port *intel_dig_port);
|
||||
@ -951,9 +1023,9 @@ void intel_dvo_init(struct drm_device *dev);
|
||||
/* legacy fbdev emulation in intel_fbdev.c */
|
||||
#ifdef CONFIG_DRM_I915_FBDEV
|
||||
extern int intel_fbdev_init(struct drm_device *dev);
|
||||
extern void intel_fbdev_initial_config(struct drm_device *dev);
|
||||
extern void intel_fbdev_initial_config(void *data, async_cookie_t cookie);
|
||||
extern void intel_fbdev_fini(struct drm_device *dev);
|
||||
extern void intel_fbdev_set_suspend(struct drm_device *dev, int state);
|
||||
extern void intel_fbdev_set_suspend(struct drm_device *dev, int state, bool synchronous);
|
||||
extern void intel_fbdev_output_poll_changed(struct drm_device *dev);
|
||||
extern void intel_fbdev_restore_mode(struct drm_device *dev);
|
||||
#else
|
||||
@ -962,7 +1034,7 @@ static inline int intel_fbdev_init(struct drm_device *dev)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline void intel_fbdev_initial_config(struct drm_device *dev)
|
||||
static inline void intel_fbdev_initial_config(void *data, async_cookie_t cookie)
|
||||
{
|
||||
}
|
||||
|
||||
@ -970,7 +1042,7 @@ static inline void intel_fbdev_fini(struct drm_device *dev)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void intel_fbdev_set_suspend(struct drm_device *dev, int state)
|
||||
static inline void intel_fbdev_set_suspend(struct drm_device *dev, int state, bool synchronous)
|
||||
{
|
||||
}
|
||||
|
||||
@ -1026,7 +1098,7 @@ void intel_gmch_panel_fitting(struct intel_crtc *crtc,
|
||||
int fitting_mode);
|
||||
void intel_panel_set_backlight_acpi(struct intel_connector *connector,
|
||||
u32 level, u32 max);
|
||||
int intel_panel_setup_backlight(struct drm_connector *connector);
|
||||
int intel_panel_setup_backlight(struct drm_connector *connector, enum pipe pipe);
|
||||
void intel_panel_enable_backlight(struct intel_connector *connector);
|
||||
void intel_panel_disable_backlight(struct intel_connector *connector);
|
||||
void intel_panel_destroy_backlight(struct drm_connector *connector);
|
||||
@ -1036,6 +1108,41 @@ extern struct drm_display_mode *intel_find_panel_downclock(
|
||||
struct drm_device *dev,
|
||||
struct drm_display_mode *fixed_mode,
|
||||
struct drm_connector *connector);
|
||||
void intel_backlight_register(struct drm_device *dev);
|
||||
void intel_backlight_unregister(struct drm_device *dev);
|
||||
|
||||
|
||||
/* intel_psr.c */
|
||||
bool intel_psr_is_enabled(struct drm_device *dev);
|
||||
void intel_psr_enable(struct intel_dp *intel_dp);
|
||||
void intel_psr_disable(struct intel_dp *intel_dp);
|
||||
void intel_psr_invalidate(struct drm_device *dev,
|
||||
unsigned frontbuffer_bits);
|
||||
void intel_psr_flush(struct drm_device *dev,
|
||||
unsigned frontbuffer_bits);
|
||||
void intel_psr_init(struct drm_device *dev);
|
||||
|
||||
/* intel_runtime_pm.c */
|
||||
int intel_power_domains_init(struct drm_i915_private *);
|
||||
void intel_power_domains_fini(struct drm_i915_private *);
|
||||
void intel_power_domains_init_hw(struct drm_i915_private *dev_priv);
|
||||
void intel_runtime_pm_enable(struct drm_i915_private *dev_priv);
|
||||
|
||||
bool intel_display_power_is_enabled(struct drm_i915_private *dev_priv,
|
||||
enum intel_display_power_domain domain);
|
||||
bool __intel_display_power_is_enabled(struct drm_i915_private *dev_priv,
|
||||
enum intel_display_power_domain domain);
|
||||
void intel_display_power_get(struct drm_i915_private *dev_priv,
|
||||
enum intel_display_power_domain domain);
|
||||
void intel_display_power_put(struct drm_i915_private *dev_priv,
|
||||
enum intel_display_power_domain domain);
|
||||
void intel_aux_display_runtime_get(struct drm_i915_private *dev_priv);
|
||||
void intel_aux_display_runtime_put(struct drm_i915_private *dev_priv);
|
||||
void intel_runtime_pm_get(struct drm_i915_private *dev_priv);
|
||||
void intel_runtime_pm_get_noresume(struct drm_i915_private *dev_priv);
|
||||
void intel_runtime_pm_put(struct drm_i915_private *dev_priv);
|
||||
|
||||
void intel_display_set_init_power(struct drm_i915_private *dev, bool enable);
|
||||
|
||||
/* intel_pm.c */
|
||||
void intel_init_clock_gating(struct drm_device *dev);
|
||||
@ -1054,17 +1161,6 @@ bool intel_fbc_enabled(struct drm_device *dev);
|
||||
void intel_update_fbc(struct drm_device *dev);
|
||||
void intel_gpu_ips_init(struct drm_i915_private *dev_priv);
|
||||
void intel_gpu_ips_teardown(void);
|
||||
int intel_power_domains_init(struct drm_i915_private *);
|
||||
void intel_power_domains_remove(struct drm_i915_private *);
|
||||
bool intel_display_power_enabled(struct drm_i915_private *dev_priv,
|
||||
enum intel_display_power_domain domain);
|
||||
bool intel_display_power_enabled_unlocked(struct drm_i915_private *dev_priv,
|
||||
enum intel_display_power_domain domain);
|
||||
void intel_display_power_get(struct drm_i915_private *dev_priv,
|
||||
enum intel_display_power_domain domain);
|
||||
void intel_display_power_put(struct drm_i915_private *dev_priv,
|
||||
enum intel_display_power_domain domain);
|
||||
void intel_power_domains_init_hw(struct drm_i915_private *dev_priv);
|
||||
void intel_init_gt_powersave(struct drm_device *dev);
|
||||
void intel_cleanup_gt_powersave(struct drm_device *dev);
|
||||
void intel_enable_gt_powersave(struct drm_device *dev);
|
||||
@ -1075,14 +1171,10 @@ void ironlake_teardown_rc6(struct drm_device *dev);
|
||||
void gen6_update_ring_freq(struct drm_device *dev);
|
||||
void gen6_rps_idle(struct drm_i915_private *dev_priv);
|
||||
void gen6_rps_boost(struct drm_i915_private *dev_priv);
|
||||
void intel_aux_display_runtime_get(struct drm_i915_private *dev_priv);
|
||||
void intel_aux_display_runtime_put(struct drm_i915_private *dev_priv);
|
||||
void intel_runtime_pm_get(struct drm_i915_private *dev_priv);
|
||||
void intel_runtime_pm_get_noresume(struct drm_i915_private *dev_priv);
|
||||
void intel_runtime_pm_put(struct drm_i915_private *dev_priv);
|
||||
void intel_init_runtime_pm(struct drm_i915_private *dev_priv);
|
||||
void intel_fini_runtime_pm(struct drm_i915_private *dev_priv);
|
||||
void ilk_wm_get_hw_state(struct drm_device *dev);
|
||||
void skl_wm_get_hw_state(struct drm_device *dev);
|
||||
void skl_ddb_get_hw_state(struct drm_i915_private *dev_priv,
|
||||
struct skl_ddb_allocation *ddb /* out */);
|
||||
|
||||
|
||||
/* intel_sdvo.c */
|
||||
@ -1093,13 +1185,18 @@ bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg, bool is_sdvob);
|
||||
int intel_plane_init(struct drm_device *dev, enum pipe pipe, int plane);
|
||||
void intel_flush_primary_plane(struct drm_i915_private *dev_priv,
|
||||
enum plane plane);
|
||||
void intel_plane_restore(struct drm_plane *plane);
|
||||
int intel_plane_set_property(struct drm_plane *plane,
|
||||
struct drm_property *prop,
|
||||
uint64_t val);
|
||||
int intel_plane_restore(struct drm_plane *plane);
|
||||
void intel_plane_disable(struct drm_plane *plane);
|
||||
int intel_sprite_set_colorkey(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv);
|
||||
int intel_sprite_get_colorkey(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv);
|
||||
|
||||
bool intel_pipe_update_start(struct intel_crtc *crtc,
|
||||
uint32_t *start_vbl_count);
|
||||
void intel_pipe_update_end(struct intel_crtc *crtc, u32 start_vbl_count);
|
||||
|
||||
/* intel_tv.c */
|
||||
void intel_tv_init(struct drm_device *dev);
|
||||
|
@ -344,7 +344,7 @@ static bool intel_dsi_get_hw_state(struct intel_encoder *encoder,
|
||||
DRM_DEBUG_KMS("\n");
|
||||
|
||||
power_domain = intel_display_port_power_domain(encoder);
|
||||
if (!intel_display_power_enabled(dev_priv, power_domain))
|
||||
if (!intel_display_power_is_enabled(dev_priv, power_domain))
|
||||
return false;
|
||||
|
||||
/* XXX: this only works for one DSI output */
|
||||
@ -423,9 +423,11 @@ static u16 txclkesc(u32 divider, unsigned int us)
|
||||
}
|
||||
|
||||
/* return pixels in terms of txbyteclkhs */
|
||||
static u16 txbyteclkhs(u16 pixels, int bpp, int lane_count)
|
||||
static u16 txbyteclkhs(u16 pixels, int bpp, int lane_count,
|
||||
u16 burst_mode_ratio)
|
||||
{
|
||||
return DIV_ROUND_UP(DIV_ROUND_UP(pixels * bpp, 8), lane_count);
|
||||
return DIV_ROUND_UP(DIV_ROUND_UP(pixels * bpp * burst_mode_ratio,
|
||||
8 * 100), lane_count);
|
||||
}
|
||||
|
||||
static void set_dsi_timings(struct drm_encoder *encoder,
|
||||
@ -451,10 +453,12 @@ static void set_dsi_timings(struct drm_encoder *encoder,
|
||||
vbp = mode->vtotal - mode->vsync_end;
|
||||
|
||||
/* horizontal values are in terms of high speed byte clock */
|
||||
hactive = txbyteclkhs(hactive, bpp, lane_count);
|
||||
hfp = txbyteclkhs(hfp, bpp, lane_count);
|
||||
hsync = txbyteclkhs(hsync, bpp, lane_count);
|
||||
hbp = txbyteclkhs(hbp, bpp, lane_count);
|
||||
hactive = txbyteclkhs(hactive, bpp, lane_count,
|
||||
intel_dsi->burst_mode_ratio);
|
||||
hfp = txbyteclkhs(hfp, bpp, lane_count, intel_dsi->burst_mode_ratio);
|
||||
hsync = txbyteclkhs(hsync, bpp, lane_count,
|
||||
intel_dsi->burst_mode_ratio);
|
||||
hbp = txbyteclkhs(hbp, bpp, lane_count, intel_dsi->burst_mode_ratio);
|
||||
|
||||
I915_WRITE(MIPI_HACTIVE_AREA_COUNT(pipe), hactive);
|
||||
I915_WRITE(MIPI_HFP_COUNT(pipe), hfp);
|
||||
@ -541,12 +545,14 @@ static void intel_dsi_prepare(struct intel_encoder *intel_encoder)
|
||||
intel_dsi->video_mode_format == VIDEO_MODE_BURST) {
|
||||
I915_WRITE(MIPI_HS_TX_TIMEOUT(pipe),
|
||||
txbyteclkhs(adjusted_mode->htotal, bpp,
|
||||
intel_dsi->lane_count) + 1);
|
||||
intel_dsi->lane_count,
|
||||
intel_dsi->burst_mode_ratio) + 1);
|
||||
} else {
|
||||
I915_WRITE(MIPI_HS_TX_TIMEOUT(pipe),
|
||||
txbyteclkhs(adjusted_mode->vtotal *
|
||||
adjusted_mode->htotal,
|
||||
bpp, intel_dsi->lane_count) + 1);
|
||||
bpp, intel_dsi->lane_count,
|
||||
intel_dsi->burst_mode_ratio) + 1);
|
||||
}
|
||||
I915_WRITE(MIPI_LP_RX_TIMEOUT(pipe), intel_dsi->lp_rx_timeout);
|
||||
I915_WRITE(MIPI_TURN_AROUND_TIMEOUT(pipe), intel_dsi->turn_arnd_val);
|
||||
|
@ -116,6 +116,8 @@ struct intel_dsi {
|
||||
u16 clk_hs_to_lp_count;
|
||||
|
||||
u16 init_count;
|
||||
u32 pclk;
|
||||
u16 burst_mode_ratio;
|
||||
|
||||
/* all delays in ms */
|
||||
u16 backlight_off_delay;
|
||||
|
@ -271,6 +271,8 @@ static bool generic_init(struct intel_dsi_device *dsi)
|
||||
u32 ths_prepare_ns, tclk_trail_ns;
|
||||
u32 tclk_prepare_clkzero, ths_prepare_hszero;
|
||||
u32 lp_to_hs_switch, hs_to_lp_switch;
|
||||
u32 pclk, computed_ddr;
|
||||
u16 burst_mode_ratio;
|
||||
|
||||
DRM_DEBUG_KMS("\n");
|
||||
|
||||
@ -284,8 +286,6 @@ static bool generic_init(struct intel_dsi_device *dsi)
|
||||
else if (intel_dsi->pixel_format == VID_MODE_FORMAT_RGB565)
|
||||
bits_per_pixel = 16;
|
||||
|
||||
bitrate = (mode->clock * bits_per_pixel) / intel_dsi->lane_count;
|
||||
|
||||
intel_dsi->operation_mode = mipi_config->is_cmd_mode;
|
||||
intel_dsi->video_mode_format = mipi_config->video_transfer_mode;
|
||||
intel_dsi->escape_clk_div = mipi_config->byte_clk_sel;
|
||||
@ -297,6 +297,40 @@ static bool generic_init(struct intel_dsi_device *dsi)
|
||||
intel_dsi->video_frmt_cfg_bits =
|
||||
mipi_config->bta_enabled ? DISABLE_VIDEO_BTA : 0;
|
||||
|
||||
pclk = mode->clock;
|
||||
|
||||
/* Burst Mode Ratio
|
||||
* Target ddr frequency from VBT / non burst ddr freq
|
||||
* multiply by 100 to preserve remainder
|
||||
*/
|
||||
if (intel_dsi->video_mode_format == VIDEO_MODE_BURST) {
|
||||
if (mipi_config->target_burst_mode_freq) {
|
||||
computed_ddr =
|
||||
(pclk * bits_per_pixel) / intel_dsi->lane_count;
|
||||
|
||||
if (mipi_config->target_burst_mode_freq <
|
||||
computed_ddr) {
|
||||
DRM_ERROR("Burst mode freq is less than computed\n");
|
||||
return false;
|
||||
}
|
||||
|
||||
burst_mode_ratio = DIV_ROUND_UP(
|
||||
mipi_config->target_burst_mode_freq * 100,
|
||||
computed_ddr);
|
||||
|
||||
pclk = DIV_ROUND_UP(pclk * burst_mode_ratio, 100);
|
||||
} else {
|
||||
DRM_ERROR("Burst mode target is not set\n");
|
||||
return false;
|
||||
}
|
||||
} else
|
||||
burst_mode_ratio = 100;
|
||||
|
||||
intel_dsi->burst_mode_ratio = burst_mode_ratio;
|
||||
intel_dsi->pclk = pclk;
|
||||
|
||||
bitrate = (pclk * bits_per_pixel) / intel_dsi->lane_count;
|
||||
|
||||
switch (intel_dsi->escape_clk_div) {
|
||||
case 0:
|
||||
tlpx_ns = 50;
|
||||
|
@ -134,8 +134,7 @@ static u32 dsi_rr_formula(const struct drm_display_mode *mode,
|
||||
#else
|
||||
|
||||
/* Get DSI clock from pixel clock */
|
||||
static u32 dsi_clk_from_pclk(const struct drm_display_mode *mode,
|
||||
int pixel_format, int lane_count)
|
||||
static u32 dsi_clk_from_pclk(u32 pclk, int pixel_format, int lane_count)
|
||||
{
|
||||
u32 dsi_clk_khz;
|
||||
u32 bpp;
|
||||
@ -156,7 +155,7 @@ static u32 dsi_clk_from_pclk(const struct drm_display_mode *mode,
|
||||
|
||||
/* DSI data rate = pixel clock * bits per pixel / lane count
|
||||
pixel clock is converted from KHz to Hz */
|
||||
dsi_clk_khz = DIV_ROUND_CLOSEST(mode->clock * bpp, lane_count);
|
||||
dsi_clk_khz = DIV_ROUND_CLOSEST(pclk * bpp, lane_count);
|
||||
|
||||
return dsi_clk_khz;
|
||||
}
|
||||
@ -228,14 +227,12 @@ static int dsi_calc_mnp(u32 dsi_clk, struct dsi_mnp *dsi_mnp)
|
||||
static void vlv_configure_dsi_pll(struct intel_encoder *encoder)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
|
||||
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
|
||||
const struct drm_display_mode *mode = &intel_crtc->config.adjusted_mode;
|
||||
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
|
||||
int ret;
|
||||
struct dsi_mnp dsi_mnp;
|
||||
u32 dsi_clk;
|
||||
|
||||
dsi_clk = dsi_clk_from_pclk(mode, intel_dsi->pixel_format,
|
||||
dsi_clk = dsi_clk_from_pclk(intel_dsi->pclk, intel_dsi->pixel_format,
|
||||
intel_dsi->lane_count);
|
||||
|
||||
ret = dsi_calc_mnp(dsi_clk, &dsi_mnp);
|
||||
|
@ -85,7 +85,7 @@ static const struct intel_dvo_device intel_dvo_devices[] = {
|
||||
{
|
||||
.type = INTEL_DVO_CHIP_TMDS,
|
||||
.name = "ns2501",
|
||||
.dvo_reg = DVOC,
|
||||
.dvo_reg = DVOB,
|
||||
.slave_addr = NS2501_ADDR,
|
||||
.dev_ops = &ns2501_ops,
|
||||
}
|
||||
@ -185,12 +185,13 @@ static void intel_enable_dvo(struct intel_encoder *encoder)
|
||||
u32 dvo_reg = intel_dvo->dev.dvo_reg;
|
||||
u32 temp = I915_READ(dvo_reg);
|
||||
|
||||
I915_WRITE(dvo_reg, temp | DVO_ENABLE);
|
||||
I915_READ(dvo_reg);
|
||||
intel_dvo->dev.dev_ops->mode_set(&intel_dvo->dev,
|
||||
&crtc->config.requested_mode,
|
||||
&crtc->config.adjusted_mode);
|
||||
|
||||
I915_WRITE(dvo_reg, temp | DVO_ENABLE);
|
||||
I915_READ(dvo_reg);
|
||||
|
||||
intel_dvo->dev.dev_ops->dpms(&intel_dvo->dev, true);
|
||||
}
|
||||
|
||||
@ -226,10 +227,6 @@ static void intel_dvo_dpms(struct drm_connector *connector, int mode)
|
||||
|
||||
intel_crtc_update_dpms(crtc);
|
||||
|
||||
intel_dvo->dev.dev_ops->mode_set(&intel_dvo->dev,
|
||||
&config->requested_mode,
|
||||
&config->adjusted_mode);
|
||||
|
||||
intel_dvo->dev.dev_ops->dpms(&intel_dvo->dev, true);
|
||||
} else {
|
||||
intel_dvo->dev.dev_ops->dpms(&intel_dvo->dev, false);
|
||||
|
@ -24,6 +24,7 @@
|
||||
* David Airlie
|
||||
*/
|
||||
|
||||
#include <linux/async.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/errno.h>
|
||||
@ -31,7 +32,7 @@
|
||||
//#include <linux/mm.h>
|
||||
//#include <linux/tty.h>
|
||||
#include <linux/sysrq.h>
|
||||
//#include <linux/delay.h>
|
||||
#include <linux/delay.h>
|
||||
#include <linux/fb.h>
|
||||
//#include <linux/init.h>
|
||||
//#include <linux/vga_switcheroo.h>
|
||||
@ -70,11 +71,36 @@ struct fb_info *framebuffer_alloc(size_t size, struct device *dev)
|
||||
#undef BYTES_PER_LONG
|
||||
}
|
||||
|
||||
static int intel_fbdev_set_par(struct fb_info *info)
|
||||
{
|
||||
struct drm_fb_helper *fb_helper = info->par;
|
||||
struct intel_fbdev *ifbdev =
|
||||
container_of(fb_helper, struct intel_fbdev, helper);
|
||||
int ret;
|
||||
|
||||
ret = drm_fb_helper_set_par(info);
|
||||
|
||||
if (ret == 0) {
|
||||
/*
|
||||
* FIXME: fbdev presumes that all callbacks also work from
|
||||
* atomic contexts and relies on that for emergency oops
|
||||
* printing. KMS totally doesn't do that and the locking here is
|
||||
* by far not the only place this goes wrong. Ignore this for
|
||||
* now until we solve this for real.
|
||||
*/
|
||||
mutex_lock(&fb_helper->dev->struct_mutex);
|
||||
ret = i915_gem_object_set_to_gtt_domain(ifbdev->fb->obj,
|
||||
true);
|
||||
mutex_unlock(&fb_helper->dev->struct_mutex);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static struct fb_ops intelfb_ops = {
|
||||
.owner = THIS_MODULE,
|
||||
.fb_check_var = drm_fb_helper_check_var,
|
||||
.fb_set_par = drm_fb_helper_set_par,
|
||||
.fb_set_par = intel_fbdev_set_par,
|
||||
// .fb_fillrect = cfb_fillrect,
|
||||
// .fb_copyarea = cfb_copyarea,
|
||||
// .fb_imageblit = cfb_imageblit,
|
||||
@ -103,40 +129,40 @@ static int intelfb_alloc(struct drm_fb_helper *helper,
|
||||
mode_cmd.width = sizes->surface_width;
|
||||
mode_cmd.height = sizes->surface_height;
|
||||
|
||||
mode_cmd.pitches[0] = ALIGN(mode_cmd.width * ((sizes->surface_bpp + 7) /
|
||||
8), 512);
|
||||
mode_cmd.pitches[0] = ALIGN(mode_cmd.width *
|
||||
DIV_ROUND_UP(sizes->surface_bpp, 8), 64);
|
||||
mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp,
|
||||
sizes->surface_depth);
|
||||
|
||||
size = mode_cmd.pitches[0] * mode_cmd.height;
|
||||
size = ALIGN(size, PAGE_SIZE);
|
||||
obj = main_fb_obj;
|
||||
obj->stride = mode_cmd.pitches[0];
|
||||
if (!obj) {
|
||||
DRM_ERROR("failed to allocate framebuffer\n");
|
||||
ret = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
obj->stride = mode_cmd.pitches[0];
|
||||
|
||||
/* Flush everything out, we'll be doing GTT only from now on */
|
||||
ret = intel_pin_and_fence_fb_obj(dev, obj, NULL);
|
||||
if (ret) {
|
||||
DRM_ERROR("failed to pin obj: %d\n", ret);
|
||||
goto out_unref;
|
||||
}
|
||||
|
||||
fb = __intel_framebuffer_create(dev, &mode_cmd, obj);
|
||||
if (IS_ERR(fb)) {
|
||||
ret = PTR_ERR(fb);
|
||||
goto out_unpin;
|
||||
goto out_unref;
|
||||
}
|
||||
|
||||
/* Flush everything out, we'll be doing GTT only from now on */
|
||||
ret = intel_pin_and_fence_fb_obj(NULL, fb, NULL);
|
||||
if (ret) {
|
||||
DRM_ERROR("failed to pin obj: %d\n", ret);
|
||||
goto out_fb;
|
||||
}
|
||||
|
||||
ifbdev->fb = to_intel_framebuffer(fb);
|
||||
|
||||
return 0;
|
||||
|
||||
out_unpin:
|
||||
i915_gem_object_ggtt_unpin(obj);
|
||||
out_fb:
|
||||
drm_framebuffer_remove(fb);
|
||||
out_unref:
|
||||
drm_gem_object_unreference(&obj->base);
|
||||
out:
|
||||
@ -302,6 +328,7 @@ intel_fb_helper_crtc(struct drm_fb_helper *fb_helper, struct drm_crtc *crtc)
|
||||
static bool intel_fb_initial_config(struct drm_fb_helper *fb_helper,
|
||||
struct drm_fb_helper_crtc **crtcs,
|
||||
struct drm_display_mode **modes,
|
||||
struct drm_fb_offset *offsets,
|
||||
bool *enabled, int width, int height)
|
||||
{
|
||||
struct drm_device *dev = fb_helper->dev;
|
||||
@ -310,24 +337,8 @@ static bool intel_fb_initial_config(struct drm_fb_helper *fb_helper,
|
||||
bool fallback = true;
|
||||
int num_connectors_enabled = 0;
|
||||
int num_connectors_detected = 0;
|
||||
|
||||
/*
|
||||
* If the user specified any force options, just bail here
|
||||
* and use that config.
|
||||
*/
|
||||
for (i = 0; i < fb_helper->connector_count; i++) {
|
||||
struct drm_fb_helper_connector *fb_conn;
|
||||
struct drm_connector *connector;
|
||||
|
||||
fb_conn = fb_helper->connector_info[i];
|
||||
connector = fb_conn->connector;
|
||||
|
||||
if (!enabled[i])
|
||||
continue;
|
||||
|
||||
if (connector->force != DRM_FORCE_UNSPECIFIED)
|
||||
return false;
|
||||
}
|
||||
uint64_t conn_configured = 0, mask;
|
||||
int pass = 0;
|
||||
|
||||
save_enabled = kcalloc(dev->mode_config.num_connector, sizeof(bool),
|
||||
GFP_KERNEL);
|
||||
@ -335,7 +346,8 @@ static bool intel_fb_initial_config(struct drm_fb_helper *fb_helper,
|
||||
return false;
|
||||
|
||||
memcpy(save_enabled, enabled, dev->mode_config.num_connector);
|
||||
|
||||
mask = (1 << fb_helper->connector_count) - 1;
|
||||
retry:
|
||||
for (i = 0; i < fb_helper->connector_count; i++) {
|
||||
struct drm_fb_helper_connector *fb_conn;
|
||||
struct drm_connector *connector;
|
||||
@ -345,20 +357,38 @@ static bool intel_fb_initial_config(struct drm_fb_helper *fb_helper,
|
||||
fb_conn = fb_helper->connector_info[i];
|
||||
connector = fb_conn->connector;
|
||||
|
||||
if (conn_configured & (1 << i))
|
||||
continue;
|
||||
|
||||
if (pass == 0 && !connector->has_tile)
|
||||
continue;
|
||||
|
||||
if (connector->status == connector_status_connected)
|
||||
num_connectors_detected++;
|
||||
|
||||
if (!enabled[i]) {
|
||||
DRM_DEBUG_KMS("connector %s not enabled, skipping\n",
|
||||
connector->name);
|
||||
conn_configured |= (1 << i);
|
||||
continue;
|
||||
}
|
||||
|
||||
if (connector->force == DRM_FORCE_OFF) {
|
||||
DRM_DEBUG_KMS("connector %s is disabled by user, skipping\n",
|
||||
connector->name);
|
||||
enabled[i] = false;
|
||||
continue;
|
||||
}
|
||||
|
||||
encoder = connector->encoder;
|
||||
if (!encoder || WARN_ON(!encoder->crtc)) {
|
||||
if (connector->force > DRM_FORCE_OFF)
|
||||
goto bail;
|
||||
|
||||
DRM_DEBUG_KMS("connector %s has no encoder or crtc, skipping\n",
|
||||
connector->name);
|
||||
enabled[i] = false;
|
||||
conn_configured |= (1 << i);
|
||||
continue;
|
||||
}
|
||||
|
||||
@ -374,8 +404,7 @@ static bool intel_fb_initial_config(struct drm_fb_helper *fb_helper,
|
||||
for (j = 0; j < fb_helper->connector_count; j++) {
|
||||
if (crtcs[j] == new_crtc) {
|
||||
DRM_DEBUG_KMS("fallback: cloned configuration\n");
|
||||
fallback = true;
|
||||
goto out;
|
||||
goto bail;
|
||||
}
|
||||
}
|
||||
|
||||
@ -387,8 +416,8 @@ static bool intel_fb_initial_config(struct drm_fb_helper *fb_helper,
|
||||
|
||||
/* try for preferred next */
|
||||
if (!modes[i]) {
|
||||
DRM_DEBUG_KMS("looking for preferred mode on connector %s\n",
|
||||
connector->name);
|
||||
DRM_DEBUG_KMS("looking for preferred mode on connector %s %d\n",
|
||||
connector->name, connector->has_tile);
|
||||
modes[i] = drm_has_preferred_mode(fb_conn, width,
|
||||
height);
|
||||
}
|
||||
@ -431,6 +460,12 @@ static bool intel_fb_initial_config(struct drm_fb_helper *fb_helper,
|
||||
modes[i]->flags & DRM_MODE_FLAG_INTERLACE ? "i" :"");
|
||||
|
||||
fallback = false;
|
||||
conn_configured |= (1 << i);
|
||||
}
|
||||
|
||||
if ((conn_configured & mask) != mask) {
|
||||
pass++;
|
||||
goto retry;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -446,8 +481,8 @@ static bool intel_fb_initial_config(struct drm_fb_helper *fb_helper,
|
||||
fallback = true;
|
||||
}
|
||||
|
||||
out:
|
||||
if (fallback) {
|
||||
bail:
|
||||
DRM_DEBUG_KMS("Not using firmware configuration\n");
|
||||
memcpy(enabled, save_enabled, dev->mode_config.num_connector);
|
||||
kfree(save_enabled);
|
||||
@ -627,9 +662,9 @@ int intel_fbdev_init(struct drm_device *dev)
|
||||
return 0;
|
||||
}
|
||||
|
||||
void intel_fbdev_initial_config(struct drm_device *dev)
|
||||
void intel_fbdev_initial_config(void *data, async_cookie_t cookie)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = data;
|
||||
struct intel_fbdev *ifbdev = dev_priv->fbdev;
|
||||
|
||||
/* Due to peculiar init order wrt to hpd handling this is separate. */
|
||||
|
381
drivers/video/drm/i915/intel_fifo_underrun.c
Normal file
381
drivers/video/drm/i915/intel_fifo_underrun.c
Normal file
@ -0,0 +1,381 @@
|
||||
/*
|
||||
* Copyright © 2014 Intel Corporation
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the next
|
||||
* paragraph) shall be included in all copies or substantial portions of the
|
||||
* Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
|
||||
* IN THE SOFTWARE.
|
||||
*
|
||||
* Authors:
|
||||
* Daniel Vetter <daniel.vetter@ffwll.ch>
|
||||
*
|
||||
*/
|
||||
|
||||
#include "i915_drv.h"
|
||||
#include "intel_drv.h"
|
||||
|
||||
/**
|
||||
* DOC: fifo underrun handling
|
||||
*
|
||||
* The i915 driver checks for display fifo underruns using the interrupt signals
|
||||
* provided by the hardware. This is enabled by default and fairly useful to
|
||||
* debug display issues, especially watermark settings.
|
||||
*
|
||||
* If an underrun is detected this is logged into dmesg. To avoid flooding logs
|
||||
* and occupying the cpu underrun interrupts are disabled after the first
|
||||
* occurrence until the next modeset on a given pipe.
|
||||
*
|
||||
* Note that underrun detection on gmch platforms is a bit more ugly since there
|
||||
* is no interrupt (despite that the signalling bit is in the PIPESTAT pipe
|
||||
* interrupt register). Also on some other platforms underrun interrupts are
|
||||
* shared, which means that if we detect an underrun we need to disable underrun
|
||||
* reporting on all pipes.
|
||||
*
|
||||
* The code also supports underrun detection on the PCH transcoder.
|
||||
*/
|
||||
|
||||
static bool ivb_can_enable_err_int(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_crtc *crtc;
|
||||
enum pipe pipe;
|
||||
|
||||
assert_spin_locked(&dev_priv->irq_lock);
|
||||
|
||||
for_each_pipe(dev_priv, pipe) {
|
||||
crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
|
||||
|
||||
if (crtc->cpu_fifo_underrun_disabled)
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static bool cpt_can_enable_serr_int(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
enum pipe pipe;
|
||||
struct intel_crtc *crtc;
|
||||
|
||||
assert_spin_locked(&dev_priv->irq_lock);
|
||||
|
||||
for_each_pipe(dev_priv, pipe) {
|
||||
crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
|
||||
|
||||
if (crtc->pch_fifo_underrun_disabled)
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
/**
|
||||
* i9xx_check_fifo_underruns - check for fifo underruns
|
||||
* @dev_priv: i915 device instance
|
||||
*
|
||||
* This function checks for fifo underruns on GMCH platforms. This needs to be
|
||||
* done manually on modeset to make sure that we catch all underruns since they
|
||||
* do not generate an interrupt by themselves on these platforms.
|
||||
*/
|
||||
void i9xx_check_fifo_underruns(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
struct intel_crtc *crtc;
|
||||
|
||||
spin_lock_irq(&dev_priv->irq_lock);
|
||||
|
||||
for_each_intel_crtc(dev_priv->dev, crtc) {
|
||||
u32 reg = PIPESTAT(crtc->pipe);
|
||||
u32 pipestat;
|
||||
|
||||
if (crtc->cpu_fifo_underrun_disabled)
|
||||
continue;
|
||||
|
||||
pipestat = I915_READ(reg) & 0xffff0000;
|
||||
if ((pipestat & PIPE_FIFO_UNDERRUN_STATUS) == 0)
|
||||
continue;
|
||||
|
||||
I915_WRITE(reg, pipestat | PIPE_FIFO_UNDERRUN_STATUS);
|
||||
POSTING_READ(reg);
|
||||
|
||||
DRM_ERROR("pipe %c underrun\n", pipe_name(crtc->pipe));
|
||||
}
|
||||
|
||||
spin_unlock_irq(&dev_priv->irq_lock);
|
||||
}
|
||||
|
||||
static void i9xx_set_fifo_underrun_reporting(struct drm_device *dev,
|
||||
enum pipe pipe,
|
||||
bool enable, bool old)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
u32 reg = PIPESTAT(pipe);
|
||||
u32 pipestat = I915_READ(reg) & 0xffff0000;
|
||||
|
||||
assert_spin_locked(&dev_priv->irq_lock);
|
||||
|
||||
if (enable) {
|
||||
I915_WRITE(reg, pipestat | PIPE_FIFO_UNDERRUN_STATUS);
|
||||
POSTING_READ(reg);
|
||||
} else {
|
||||
if (old && pipestat & PIPE_FIFO_UNDERRUN_STATUS)
|
||||
DRM_ERROR("pipe %c underrun\n", pipe_name(pipe));
|
||||
}
|
||||
}
|
||||
|
||||
static void ironlake_set_fifo_underrun_reporting(struct drm_device *dev,
|
||||
enum pipe pipe, bool enable)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
uint32_t bit = (pipe == PIPE_A) ? DE_PIPEA_FIFO_UNDERRUN :
|
||||
DE_PIPEB_FIFO_UNDERRUN;
|
||||
|
||||
if (enable)
|
||||
ironlake_enable_display_irq(dev_priv, bit);
|
||||
else
|
||||
ironlake_disable_display_irq(dev_priv, bit);
|
||||
}
|
||||
|
||||
static void ivybridge_set_fifo_underrun_reporting(struct drm_device *dev,
|
||||
enum pipe pipe,
|
||||
bool enable, bool old)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
if (enable) {
|
||||
I915_WRITE(GEN7_ERR_INT, ERR_INT_FIFO_UNDERRUN(pipe));
|
||||
|
||||
if (!ivb_can_enable_err_int(dev))
|
||||
return;
|
||||
|
||||
ironlake_enable_display_irq(dev_priv, DE_ERR_INT_IVB);
|
||||
} else {
|
||||
ironlake_disable_display_irq(dev_priv, DE_ERR_INT_IVB);
|
||||
|
||||
if (old &&
|
||||
I915_READ(GEN7_ERR_INT) & ERR_INT_FIFO_UNDERRUN(pipe)) {
|
||||
DRM_ERROR("uncleared fifo underrun on pipe %c\n",
|
||||
pipe_name(pipe));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void broadwell_set_fifo_underrun_reporting(struct drm_device *dev,
|
||||
enum pipe pipe, bool enable)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
|
||||
assert_spin_locked(&dev_priv->irq_lock);
|
||||
|
||||
if (enable)
|
||||
dev_priv->de_irq_mask[pipe] &= ~GEN8_PIPE_FIFO_UNDERRUN;
|
||||
else
|
||||
dev_priv->de_irq_mask[pipe] |= GEN8_PIPE_FIFO_UNDERRUN;
|
||||
I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]);
|
||||
POSTING_READ(GEN8_DE_PIPE_IMR(pipe));
|
||||
}
|
||||
|
||||
static void ibx_set_fifo_underrun_reporting(struct drm_device *dev,
|
||||
enum transcoder pch_transcoder,
|
||||
bool enable)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
uint32_t bit = (pch_transcoder == TRANSCODER_A) ?
|
||||
SDE_TRANSA_FIFO_UNDER : SDE_TRANSB_FIFO_UNDER;
|
||||
|
||||
if (enable)
|
||||
ibx_enable_display_interrupt(dev_priv, bit);
|
||||
else
|
||||
ibx_disable_display_interrupt(dev_priv, bit);
|
||||
}
|
||||
|
||||
static void cpt_set_fifo_underrun_reporting(struct drm_device *dev,
|
||||
enum transcoder pch_transcoder,
|
||||
bool enable, bool old)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
|
||||
if (enable) {
|
||||
I915_WRITE(SERR_INT,
|
||||
SERR_INT_TRANS_FIFO_UNDERRUN(pch_transcoder));
|
||||
|
||||
if (!cpt_can_enable_serr_int(dev))
|
||||
return;
|
||||
|
||||
ibx_enable_display_interrupt(dev_priv, SDE_ERROR_CPT);
|
||||
} else {
|
||||
ibx_disable_display_interrupt(dev_priv, SDE_ERROR_CPT);
|
||||
|
||||
if (old && I915_READ(SERR_INT) &
|
||||
SERR_INT_TRANS_FIFO_UNDERRUN(pch_transcoder)) {
|
||||
DRM_ERROR("uncleared pch fifo underrun on pch transcoder %c\n",
|
||||
transcoder_name(pch_transcoder));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static bool __intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev,
|
||||
enum pipe pipe, bool enable)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
|
||||
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
|
||||
bool old;
|
||||
|
||||
assert_spin_locked(&dev_priv->irq_lock);
|
||||
|
||||
old = !intel_crtc->cpu_fifo_underrun_disabled;
|
||||
intel_crtc->cpu_fifo_underrun_disabled = !enable;
|
||||
|
||||
if (HAS_GMCH_DISPLAY(dev))
|
||||
i9xx_set_fifo_underrun_reporting(dev, pipe, enable, old);
|
||||
else if (IS_GEN5(dev) || IS_GEN6(dev))
|
||||
ironlake_set_fifo_underrun_reporting(dev, pipe, enable);
|
||||
else if (IS_GEN7(dev))
|
||||
ivybridge_set_fifo_underrun_reporting(dev, pipe, enable, old);
|
||||
else if (IS_GEN8(dev) || IS_GEN9(dev))
|
||||
broadwell_set_fifo_underrun_reporting(dev, pipe, enable);
|
||||
|
||||
return old;
|
||||
}
|
||||
|
||||
/**
|
||||
* intel_set_cpu_fifo_underrun_reporting - set cpu fifo underrrun reporting state
|
||||
* @dev_priv: i915 device instance
|
||||
* @pipe: (CPU) pipe to set state for
|
||||
* @enable: whether underruns should be reported or not
|
||||
*
|
||||
* This function sets the fifo underrun state for @pipe. It is used in the
|
||||
* modeset code to avoid false positives since on many platforms underruns are
|
||||
* expected when disabling or enabling the pipe.
|
||||
*
|
||||
* Notice that on some platforms disabling underrun reports for one pipe
|
||||
* disables for all due to shared interrupts. Actual reporting is still per-pipe
|
||||
* though.
|
||||
*
|
||||
* Returns the previous state of underrun reporting.
|
||||
*/
|
||||
bool intel_set_cpu_fifo_underrun_reporting(struct drm_i915_private *dev_priv,
|
||||
enum pipe pipe, bool enable)
|
||||
{
|
||||
unsigned long flags;
|
||||
bool ret;
|
||||
|
||||
spin_lock_irqsave(&dev_priv->irq_lock, flags);
|
||||
ret = __intel_set_cpu_fifo_underrun_reporting(dev_priv->dev, pipe,
|
||||
enable);
|
||||
spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static bool
|
||||
__cpu_fifo_underrun_reporting_enabled(struct drm_i915_private *dev_priv,
|
||||
enum pipe pipe)
|
||||
{
|
||||
struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
|
||||
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
|
||||
|
||||
return !intel_crtc->cpu_fifo_underrun_disabled;
|
||||
}
|
||||
|
||||
/**
|
||||
* intel_set_pch_fifo_underrun_reporting - set PCH fifo underrun reporting state
|
||||
* @dev_priv: i915 device instance
|
||||
* @pch_transcoder: the PCH transcoder (same as pipe on IVB and older)
|
||||
* @enable: whether underruns should be reported or not
|
||||
*
|
||||
* This function makes us disable or enable PCH fifo underruns for a specific
|
||||
* PCH transcoder. Notice that on some PCHs (e.g. CPT/PPT), disabling FIFO
|
||||
* underrun reporting for one transcoder may also disable all the other PCH
|
||||
* error interruts for the other transcoders, due to the fact that there's just
|
||||
* one interrupt mask/enable bit for all the transcoders.
|
||||
*
|
||||
* Returns the previous state of underrun reporting.
|
||||
*/
|
||||
bool intel_set_pch_fifo_underrun_reporting(struct drm_i915_private *dev_priv,
|
||||
enum transcoder pch_transcoder,
|
||||
bool enable)
|
||||
{
|
||||
struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pch_transcoder];
|
||||
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
|
||||
unsigned long flags;
|
||||
bool old;
|
||||
|
||||
/*
|
||||
* NOTE: Pre-LPT has a fixed cpu pipe -> pch transcoder mapping, but LPT
|
||||
* has only one pch transcoder A that all pipes can use. To avoid racy
|
||||
* pch transcoder -> pipe lookups from interrupt code simply store the
|
||||
* underrun statistics in crtc A. Since we never expose this anywhere
|
||||
* nor use it outside of the fifo underrun code here using the "wrong"
|
||||
* crtc on LPT won't cause issues.
|
||||
*/
|
||||
|
||||
spin_lock_irqsave(&dev_priv->irq_lock, flags);
|
||||
|
||||
old = !intel_crtc->pch_fifo_underrun_disabled;
|
||||
intel_crtc->pch_fifo_underrun_disabled = !enable;
|
||||
|
||||
if (HAS_PCH_IBX(dev_priv->dev))
|
||||
ibx_set_fifo_underrun_reporting(dev_priv->dev, pch_transcoder,
|
||||
enable);
|
||||
else
|
||||
cpt_set_fifo_underrun_reporting(dev_priv->dev, pch_transcoder,
|
||||
enable, old);
|
||||
|
||||
spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
|
||||
return old;
|
||||
}
|
||||
|
||||
/**
|
||||
* intel_pch_fifo_underrun_irq_handler - handle PCH fifo underrun interrupt
|
||||
* @dev_priv: i915 device instance
|
||||
* @pipe: (CPU) pipe to set state for
|
||||
*
|
||||
* This handles a CPU fifo underrun interrupt, generating an underrun warning
|
||||
* into dmesg if underrun reporting is enabled and then disables the underrun
|
||||
* interrupt to avoid an irq storm.
|
||||
*/
|
||||
void intel_cpu_fifo_underrun_irq_handler(struct drm_i915_private *dev_priv,
|
||||
enum pipe pipe)
|
||||
{
|
||||
/* GMCH can't disable fifo underruns, filter them. */
|
||||
if (HAS_GMCH_DISPLAY(dev_priv->dev) &&
|
||||
!__cpu_fifo_underrun_reporting_enabled(dev_priv, pipe))
|
||||
return;
|
||||
|
||||
if (intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false))
|
||||
DRM_ERROR("CPU pipe %c FIFO underrun\n",
|
||||
pipe_name(pipe));
|
||||
}
|
||||
|
||||
/**
|
||||
* intel_pch_fifo_underrun_irq_handler - handle PCH fifo underrun interrupt
|
||||
* @dev_priv: i915 device instance
|
||||
* @pch_transcoder: the PCH transcoder (same as pipe on IVB and older)
|
||||
*
|
||||
* This handles a PCH fifo underrun interrupt, generating an underrun warning
|
||||
* into dmesg if underrun reporting is enabled and then disables the underrun
|
||||
* interrupt to avoid an irq storm.
|
||||
*/
|
||||
void intel_pch_fifo_underrun_irq_handler(struct drm_i915_private *dev_priv,
|
||||
enum transcoder pch_transcoder)
|
||||
{
|
||||
if (intel_set_pch_fifo_underrun_reporting(dev_priv, pch_transcoder,
|
||||
false))
|
||||
DRM_ERROR("PCH transcoder %c FIFO underrun\n",
|
||||
transcoder_name(pch_transcoder));
|
||||
}
|
279
drivers/video/drm/i915/intel_frontbuffer.c
Normal file
279
drivers/video/drm/i915/intel_frontbuffer.c
Normal file
@ -0,0 +1,279 @@
|
||||
/*
|
||||
* Copyright © 2014 Intel Corporation
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the next
|
||||
* paragraph) shall be included in all copies or substantial portions of the
|
||||
* Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
* DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
* Authors:
|
||||
* Daniel Vetter <daniel.vetter@ffwll.ch>
|
||||
*/
|
||||
|
||||
/**
|
||||
* DOC: frontbuffer tracking
|
||||
*
|
||||
* Many features require us to track changes to the currently active
|
||||
* frontbuffer, especially rendering targeted at the frontbuffer.
|
||||
*
|
||||
* To be able to do so GEM tracks frontbuffers using a bitmask for all possible
|
||||
* frontbuffer slots through i915_gem_track_fb(). The function in this file are
|
||||
* then called when the contents of the frontbuffer are invalidated, when
|
||||
* frontbuffer rendering has stopped again to flush out all the changes and when
|
||||
* the frontbuffer is exchanged with a flip. Subsystems interested in
|
||||
* frontbuffer changes (e.g. PSR, FBC, DRRS) should directly put their callbacks
|
||||
* into the relevant places and filter for the frontbuffer slots that they are
|
||||
* interested int.
|
||||
*
|
||||
* On a high level there are two types of powersaving features. The first one
|
||||
* work like a special cache (FBC and PSR) and are interested when they should
|
||||
* stop caching and when to restart caching. This is done by placing callbacks
|
||||
* into the invalidate and the flush functions: At invalidate the caching must
|
||||
* be stopped and at flush time it can be restarted. And maybe they need to know
|
||||
* when the frontbuffer changes (e.g. when the hw doesn't initiate an invalidate
|
||||
* and flush on its own) which can be achieved with placing callbacks into the
|
||||
* flip functions.
|
||||
*
|
||||
* The other type of display power saving feature only cares about busyness
|
||||
* (e.g. DRRS). In that case all three (invalidate, flush and flip) indicate
|
||||
* busyness. There is no direct way to detect idleness. Instead an idle timer
|
||||
* work delayed work should be started from the flush and flip functions and
|
||||
* cancelled as soon as busyness is detected.
|
||||
*
|
||||
* Note that there's also an older frontbuffer activity tracking scheme which
|
||||
* just tracks general activity. This is done by the various mark_busy and
|
||||
* mark_idle functions. For display power management features using these
|
||||
* functions is deprecated and should be avoided.
|
||||
*/
|
||||
|
||||
#include <drm/drmP.h>
|
||||
|
||||
#include "intel_drv.h"
|
||||
#include "i915_drv.h"
|
||||
|
||||
static void intel_increase_pllclock(struct drm_device *dev,
|
||||
enum pipe pipe)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
int dpll_reg = DPLL(pipe);
|
||||
int dpll;
|
||||
|
||||
if (!HAS_GMCH_DISPLAY(dev))
|
||||
return;
|
||||
|
||||
if (!dev_priv->lvds_downclock_avail)
|
||||
return;
|
||||
|
||||
dpll = I915_READ(dpll_reg);
|
||||
if (!HAS_PIPE_CXSR(dev) && (dpll & DISPLAY_RATE_SELECT_FPA1)) {
|
||||
DRM_DEBUG_DRIVER("upclocking LVDS\n");
|
||||
|
||||
assert_panel_unlocked(dev_priv, pipe);
|
||||
|
||||
dpll &= ~DISPLAY_RATE_SELECT_FPA1;
|
||||
I915_WRITE(dpll_reg, dpll);
|
||||
intel_wait_for_vblank(dev, pipe);
|
||||
|
||||
dpll = I915_READ(dpll_reg);
|
||||
if (dpll & DISPLAY_RATE_SELECT_FPA1)
|
||||
DRM_DEBUG_DRIVER("failed to upclock LVDS!\n");
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* intel_mark_fb_busy - mark given planes as busy
|
||||
* @dev: DRM device
|
||||
* @frontbuffer_bits: bits for the affected planes
|
||||
* @ring: optional ring for asynchronous commands
|
||||
*
|
||||
* This function gets called every time the screen contents change. It can be
|
||||
* used to keep e.g. the update rate at the nominal refresh rate with DRRS.
|
||||
*/
|
||||
static void intel_mark_fb_busy(struct drm_device *dev,
|
||||
unsigned frontbuffer_bits,
|
||||
struct intel_engine_cs *ring)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
enum pipe pipe;
|
||||
|
||||
if (!i915.powersave)
|
||||
return;
|
||||
|
||||
for_each_pipe(dev_priv, pipe) {
|
||||
if (!(frontbuffer_bits & INTEL_FRONTBUFFER_ALL_MASK(pipe)))
|
||||
continue;
|
||||
|
||||
intel_increase_pllclock(dev, pipe);
|
||||
if (ring && intel_fbc_enabled(dev))
|
||||
ring->fbc_dirty = true;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* intel_fb_obj_invalidate - invalidate frontbuffer object
|
||||
* @obj: GEM object to invalidate
|
||||
* @ring: set for asynchronous rendering
|
||||
*
|
||||
* This function gets called every time rendering on the given object starts and
|
||||
* frontbuffer caching (fbc, low refresh rate for DRRS, panel self refresh) must
|
||||
* be invalidated. If @ring is non-NULL any subsequent invalidation will be delayed
|
||||
* until the rendering completes or a flip on this frontbuffer plane is
|
||||
* scheduled.
|
||||
*/
|
||||
void intel_fb_obj_invalidate(struct drm_i915_gem_object *obj,
|
||||
struct intel_engine_cs *ring)
|
||||
{
|
||||
struct drm_device *dev = obj->base.dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
|
||||
WARN_ON(!mutex_is_locked(&dev->struct_mutex));
|
||||
|
||||
if (!obj->frontbuffer_bits)
|
||||
return;
|
||||
|
||||
if (ring) {
|
||||
mutex_lock(&dev_priv->fb_tracking.lock);
|
||||
dev_priv->fb_tracking.busy_bits
|
||||
|= obj->frontbuffer_bits;
|
||||
dev_priv->fb_tracking.flip_bits
|
||||
&= ~obj->frontbuffer_bits;
|
||||
mutex_unlock(&dev_priv->fb_tracking.lock);
|
||||
}
|
||||
|
||||
intel_mark_fb_busy(dev, obj->frontbuffer_bits, ring);
|
||||
|
||||
intel_psr_invalidate(dev, obj->frontbuffer_bits);
|
||||
}
|
||||
|
||||
/**
|
||||
* intel_frontbuffer_flush - flush frontbuffer
|
||||
* @dev: DRM device
|
||||
* @frontbuffer_bits: frontbuffer plane tracking bits
|
||||
*
|
||||
* This function gets called every time rendering on the given planes has
|
||||
* completed and frontbuffer caching can be started again. Flushes will get
|
||||
* delayed if they're blocked by some outstanding asynchronous rendering.
|
||||
*
|
||||
* Can be called without any locks held.
|
||||
*/
|
||||
void intel_frontbuffer_flush(struct drm_device *dev,
|
||||
unsigned frontbuffer_bits)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
|
||||
/* Delay flushing when rings are still busy.*/
|
||||
mutex_lock(&dev_priv->fb_tracking.lock);
|
||||
frontbuffer_bits &= ~dev_priv->fb_tracking.busy_bits;
|
||||
mutex_unlock(&dev_priv->fb_tracking.lock);
|
||||
|
||||
intel_mark_fb_busy(dev, frontbuffer_bits, NULL);
|
||||
|
||||
intel_psr_flush(dev, frontbuffer_bits);
|
||||
|
||||
/*
|
||||
* FIXME: Unconditional fbc flushing here is a rather gross hack and
|
||||
* needs to be reworked into a proper frontbuffer tracking scheme like
|
||||
* psr employs.
|
||||
*/
|
||||
if (dev_priv->fbc.need_sw_cache_clean) {
|
||||
dev_priv->fbc.need_sw_cache_clean = false;
|
||||
bdw_fbc_sw_flush(dev, FBC_REND_CACHE_CLEAN);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* intel_fb_obj_flush - flush frontbuffer object
|
||||
* @obj: GEM object to flush
|
||||
* @retire: set when retiring asynchronous rendering
|
||||
*
|
||||
* This function gets called every time rendering on the given object has
|
||||
* completed and frontbuffer caching can be started again. If @retire is true
|
||||
* then any delayed flushes will be unblocked.
|
||||
*/
|
||||
void intel_fb_obj_flush(struct drm_i915_gem_object *obj,
|
||||
bool retire)
|
||||
{
|
||||
struct drm_device *dev = obj->base.dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
unsigned frontbuffer_bits;
|
||||
|
||||
WARN_ON(!mutex_is_locked(&dev->struct_mutex));
|
||||
|
||||
if (!obj->frontbuffer_bits)
|
||||
return;
|
||||
|
||||
frontbuffer_bits = obj->frontbuffer_bits;
|
||||
|
||||
if (retire) {
|
||||
mutex_lock(&dev_priv->fb_tracking.lock);
|
||||
/* Filter out new bits since rendering started. */
|
||||
frontbuffer_bits &= dev_priv->fb_tracking.busy_bits;
|
||||
|
||||
dev_priv->fb_tracking.busy_bits &= ~frontbuffer_bits;
|
||||
mutex_unlock(&dev_priv->fb_tracking.lock);
|
||||
}
|
||||
|
||||
intel_frontbuffer_flush(dev, frontbuffer_bits);
|
||||
}
|
||||
|
||||
/**
|
||||
* intel_frontbuffer_flip_prepare - prepare asynchronous frontbuffer flip
|
||||
* @dev: DRM device
|
||||
* @frontbuffer_bits: frontbuffer plane tracking bits
|
||||
*
|
||||
* This function gets called after scheduling a flip on @obj. The actual
|
||||
* frontbuffer flushing will be delayed until completion is signalled with
|
||||
* intel_frontbuffer_flip_complete. If an invalidate happens in between this
|
||||
* flush will be cancelled.
|
||||
*
|
||||
* Can be called without any locks held.
|
||||
*/
|
||||
void intel_frontbuffer_flip_prepare(struct drm_device *dev,
|
||||
unsigned frontbuffer_bits)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
|
||||
mutex_lock(&dev_priv->fb_tracking.lock);
|
||||
dev_priv->fb_tracking.flip_bits |= frontbuffer_bits;
|
||||
/* Remove stale busy bits due to the old buffer. */
|
||||
dev_priv->fb_tracking.busy_bits &= ~frontbuffer_bits;
|
||||
mutex_unlock(&dev_priv->fb_tracking.lock);
|
||||
}
|
||||
|
||||
/**
|
||||
* intel_frontbuffer_flip_complete - complete asynchronous frontbuffer flip
|
||||
* @dev: DRM device
|
||||
* @frontbuffer_bits: frontbuffer plane tracking bits
|
||||
*
|
||||
* This function gets called after the flip has been latched and will complete
|
||||
* on the next vblank. It will execute the flush if it hasn't been cancelled yet.
|
||||
*
|
||||
* Can be called without any locks held.
|
||||
*/
|
||||
void intel_frontbuffer_flip_complete(struct drm_device *dev,
|
||||
unsigned frontbuffer_bits)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
|
||||
mutex_lock(&dev_priv->fb_tracking.lock);
|
||||
/* Mask any cancelled flips. */
|
||||
frontbuffer_bits &= dev_priv->fb_tracking.flip_bits;
|
||||
dev_priv->fb_tracking.flip_bits &= ~frontbuffer_bits;
|
||||
mutex_unlock(&dev_priv->fb_tracking.lock);
|
||||
|
||||
intel_frontbuffer_flush(dev, frontbuffer_bits);
|
||||
}
|
@ -166,6 +166,19 @@ static void g4x_write_infoframe(struct drm_encoder *encoder,
|
||||
POSTING_READ(VIDEO_DIP_CTL);
|
||||
}
|
||||
|
||||
static bool g4x_infoframe_enabled(struct drm_encoder *encoder)
|
||||
{
|
||||
struct drm_device *dev = encoder->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
|
||||
u32 val = I915_READ(VIDEO_DIP_CTL);
|
||||
|
||||
if (VIDEO_DIP_PORT(intel_dig_port->port) == (val & VIDEO_DIP_PORT_MASK))
|
||||
return val & VIDEO_DIP_ENABLE;
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
static void ibx_write_infoframe(struct drm_encoder *encoder,
|
||||
enum hdmi_infoframe_type type,
|
||||
const void *frame, ssize_t len)
|
||||
@ -204,6 +217,17 @@ static void ibx_write_infoframe(struct drm_encoder *encoder,
|
||||
POSTING_READ(reg);
|
||||
}
|
||||
|
||||
static bool ibx_infoframe_enabled(struct drm_encoder *encoder)
|
||||
{
|
||||
struct drm_device *dev = encoder->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
|
||||
int reg = TVIDEO_DIP_CTL(intel_crtc->pipe);
|
||||
u32 val = I915_READ(reg);
|
||||
|
||||
return val & VIDEO_DIP_ENABLE;
|
||||
}
|
||||
|
||||
static void cpt_write_infoframe(struct drm_encoder *encoder,
|
||||
enum hdmi_infoframe_type type,
|
||||
const void *frame, ssize_t len)
|
||||
@ -245,6 +269,17 @@ static void cpt_write_infoframe(struct drm_encoder *encoder,
|
||||
POSTING_READ(reg);
|
||||
}
|
||||
|
||||
static bool cpt_infoframe_enabled(struct drm_encoder *encoder)
|
||||
{
|
||||
struct drm_device *dev = encoder->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
|
||||
int reg = TVIDEO_DIP_CTL(intel_crtc->pipe);
|
||||
u32 val = I915_READ(reg);
|
||||
|
||||
return val & VIDEO_DIP_ENABLE;
|
||||
}
|
||||
|
||||
static void vlv_write_infoframe(struct drm_encoder *encoder,
|
||||
enum hdmi_infoframe_type type,
|
||||
const void *frame, ssize_t len)
|
||||
@ -283,6 +318,17 @@ static void vlv_write_infoframe(struct drm_encoder *encoder,
|
||||
POSTING_READ(reg);
|
||||
}
|
||||
|
||||
static bool vlv_infoframe_enabled(struct drm_encoder *encoder)
|
||||
{
|
||||
struct drm_device *dev = encoder->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
|
||||
int reg = VLV_TVIDEO_DIP_CTL(intel_crtc->pipe);
|
||||
u32 val = I915_READ(reg);
|
||||
|
||||
return val & VIDEO_DIP_ENABLE;
|
||||
}
|
||||
|
||||
static void hsw_write_infoframe(struct drm_encoder *encoder,
|
||||
enum hdmi_infoframe_type type,
|
||||
const void *frame, ssize_t len)
|
||||
@ -320,6 +366,18 @@ static void hsw_write_infoframe(struct drm_encoder *encoder,
|
||||
POSTING_READ(ctl_reg);
|
||||
}
|
||||
|
||||
static bool hsw_infoframe_enabled(struct drm_encoder *encoder)
|
||||
{
|
||||
struct drm_device *dev = encoder->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
|
||||
u32 ctl_reg = HSW_TVIDEO_DIP_CTL(intel_crtc->config.cpu_transcoder);
|
||||
u32 val = I915_READ(ctl_reg);
|
||||
|
||||
return val & (VIDEO_DIP_ENABLE_AVI_HSW | VIDEO_DIP_ENABLE_SPD_HSW |
|
||||
VIDEO_DIP_ENABLE_VS_HSW);
|
||||
}
|
||||
|
||||
/*
|
||||
* The data we write to the DIP data buffer registers is 1 byte bigger than the
|
||||
* HDMI infoframe size because of an ECC/reserved byte at position 3 (starting
|
||||
@ -661,14 +719,6 @@ static void intel_hdmi_prepare(struct intel_encoder *encoder)
|
||||
if (crtc->config.has_hdmi_sink)
|
||||
hdmi_val |= HDMI_MODE_SELECT_HDMI;
|
||||
|
||||
if (crtc->config.has_audio) {
|
||||
WARN_ON(!crtc->config.has_hdmi_sink);
|
||||
DRM_DEBUG_DRIVER("Enabling HDMI audio on pipe %c\n",
|
||||
pipe_name(crtc->pipe));
|
||||
hdmi_val |= SDVO_AUDIO_ENABLE;
|
||||
intel_write_eld(&encoder->base, adjusted_mode);
|
||||
}
|
||||
|
||||
if (HAS_PCH_CPT(dev))
|
||||
hdmi_val |= SDVO_PIPE_SEL_CPT(crtc->pipe);
|
||||
else if (IS_CHERRYVIEW(dev))
|
||||
@ -690,7 +740,7 @@ static bool intel_hdmi_get_hw_state(struct intel_encoder *encoder,
|
||||
u32 tmp;
|
||||
|
||||
power_domain = intel_display_port_power_domain(encoder);
|
||||
if (!intel_display_power_enabled(dev_priv, power_domain))
|
||||
if (!intel_display_power_is_enabled(dev_priv, power_domain))
|
||||
return false;
|
||||
|
||||
tmp = I915_READ(intel_hdmi->hdmi_reg);
|
||||
@ -732,7 +782,10 @@ static void intel_hdmi_get_config(struct intel_encoder *encoder,
|
||||
if (tmp & HDMI_MODE_SELECT_HDMI)
|
||||
pipe_config->has_hdmi_sink = true;
|
||||
|
||||
if (tmp & HDMI_MODE_SELECT_HDMI)
|
||||
if (intel_hdmi->infoframe_enabled(&encoder->base))
|
||||
pipe_config->has_infoframe = true;
|
||||
|
||||
if (tmp & SDVO_AUDIO_ENABLE)
|
||||
pipe_config->has_audio = true;
|
||||
|
||||
if (!HAS_PCH_SPLIT(dev) &&
|
||||
@ -791,6 +844,13 @@ static void intel_enable_hdmi(struct intel_encoder *encoder)
|
||||
I915_WRITE(intel_hdmi->hdmi_reg, temp);
|
||||
POSTING_READ(intel_hdmi->hdmi_reg);
|
||||
}
|
||||
|
||||
if (intel_crtc->config.has_audio) {
|
||||
WARN_ON(!intel_crtc->config.has_hdmi_sink);
|
||||
DRM_DEBUG_DRIVER("Enabling HDMI audio on pipe %c\n",
|
||||
pipe_name(intel_crtc->pipe));
|
||||
intel_audio_codec_enable(encoder);
|
||||
}
|
||||
}
|
||||
|
||||
static void vlv_enable_hdmi(struct intel_encoder *encoder)
|
||||
@ -802,9 +862,13 @@ static void intel_disable_hdmi(struct intel_encoder *encoder)
|
||||
struct drm_device *dev = encoder->base.dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
|
||||
struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
|
||||
u32 temp;
|
||||
u32 enable_bits = SDVO_ENABLE | SDVO_AUDIO_ENABLE;
|
||||
|
||||
if (crtc->config.has_audio)
|
||||
intel_audio_codec_disable(encoder);
|
||||
|
||||
temp = I915_READ(intel_hdmi->hdmi_reg);
|
||||
|
||||
/* HW workaround for IBX, we need to move the port to transcoder A
|
||||
@ -869,10 +933,15 @@ static enum drm_mode_status
|
||||
intel_hdmi_mode_valid(struct drm_connector *connector,
|
||||
struct drm_display_mode *mode)
|
||||
{
|
||||
if (mode->clock > hdmi_portclock_limit(intel_attached_hdmi(connector),
|
||||
int clock = mode->clock;
|
||||
|
||||
if (mode->flags & DRM_MODE_FLAG_DBLCLK)
|
||||
clock *= 2;
|
||||
|
||||
if (clock > hdmi_portclock_limit(intel_attached_hdmi(connector),
|
||||
true))
|
||||
return MODE_CLOCK_HIGH;
|
||||
if (mode->clock < 20000)
|
||||
if (clock < 20000)
|
||||
return MODE_CLOCK_LOW;
|
||||
|
||||
if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
|
||||
@ -890,7 +959,7 @@ static bool hdmi_12bpc_possible(struct intel_crtc *crtc)
|
||||
if (HAS_GMCH_DISPLAY(dev))
|
||||
return false;
|
||||
|
||||
list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) {
|
||||
for_each_intel_encoder(dev, encoder) {
|
||||
if (encoder->new_crtc != crtc)
|
||||
continue;
|
||||
|
||||
@ -917,6 +986,9 @@ bool intel_hdmi_compute_config(struct intel_encoder *encoder,
|
||||
|
||||
pipe_config->has_hdmi_sink = intel_hdmi->has_hdmi_sink;
|
||||
|
||||
if (pipe_config->has_hdmi_sink)
|
||||
pipe_config->has_infoframe = true;
|
||||
|
||||
if (intel_hdmi->color_range_auto) {
|
||||
/* See CEA-861-E - 5.1 Default Encoding Parameters */
|
||||
if (pipe_config->has_hdmi_sink &&
|
||||
@ -926,6 +998,10 @@ bool intel_hdmi_compute_config(struct intel_encoder *encoder,
|
||||
intel_hdmi->color_range = 0;
|
||||
}
|
||||
|
||||
if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK) {
|
||||
pipe_config->pixel_multiplier = 2;
|
||||
}
|
||||
|
||||
if (intel_hdmi->color_range)
|
||||
pipe_config->limited_color_range = true;
|
||||
|
||||
@ -967,104 +1043,117 @@ bool intel_hdmi_compute_config(struct intel_encoder *encoder,
|
||||
return true;
|
||||
}
|
||||
|
||||
static enum drm_connector_status
|
||||
intel_hdmi_detect(struct drm_connector *connector, bool force)
|
||||
static void
|
||||
intel_hdmi_unset_edid(struct drm_connector *connector)
|
||||
{
|
||||
struct drm_device *dev = connector->dev;
|
||||
struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector);
|
||||
struct intel_digital_port *intel_dig_port =
|
||||
hdmi_to_dig_port(intel_hdmi);
|
||||
struct intel_encoder *intel_encoder = &intel_dig_port->base;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct edid *edid;
|
||||
enum intel_display_power_domain power_domain;
|
||||
enum drm_connector_status status = connector_status_disconnected;
|
||||
|
||||
DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
|
||||
connector->base.id, connector->name);
|
||||
|
||||
power_domain = intel_display_port_power_domain(intel_encoder);
|
||||
intel_display_power_get(dev_priv, power_domain);
|
||||
|
||||
intel_hdmi->has_hdmi_sink = false;
|
||||
intel_hdmi->has_audio = false;
|
||||
intel_hdmi->rgb_quant_range_selectable = false;
|
||||
edid = drm_get_edid(connector,
|
||||
intel_gmbus_get_adapter(dev_priv,
|
||||
intel_hdmi->ddc_bus));
|
||||
|
||||
if (edid) {
|
||||
if (edid->input & DRM_EDID_INPUT_DIGITAL) {
|
||||
status = connector_status_connected;
|
||||
if (intel_hdmi->force_audio != HDMI_AUDIO_OFF_DVI)
|
||||
intel_hdmi->has_hdmi_sink =
|
||||
drm_detect_hdmi_monitor(edid);
|
||||
intel_hdmi->has_audio = drm_detect_monitor_audio(edid);
|
||||
intel_hdmi->rgb_quant_range_selectable =
|
||||
drm_rgb_quant_range_selectable(edid);
|
||||
}
|
||||
kfree(edid);
|
||||
}
|
||||
|
||||
if (status == connector_status_connected) {
|
||||
if (intel_hdmi->force_audio != HDMI_AUDIO_AUTO)
|
||||
intel_hdmi->has_audio =
|
||||
(intel_hdmi->force_audio == HDMI_AUDIO_ON);
|
||||
intel_encoder->type = INTEL_OUTPUT_HDMI;
|
||||
}
|
||||
|
||||
intel_display_power_put(dev_priv, power_domain);
|
||||
|
||||
return status;
|
||||
kfree(to_intel_connector(connector)->detect_edid);
|
||||
to_intel_connector(connector)->detect_edid = NULL;
|
||||
}
|
||||
|
||||
static int intel_hdmi_get_modes(struct drm_connector *connector)
|
||||
static bool
|
||||
intel_hdmi_set_edid(struct drm_connector *connector)
|
||||
{
|
||||
struct intel_encoder *intel_encoder = intel_attached_encoder(connector);
|
||||
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&intel_encoder->base);
|
||||
struct drm_i915_private *dev_priv = connector->dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(connector->dev);
|
||||
struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector);
|
||||
struct intel_encoder *intel_encoder =
|
||||
&hdmi_to_dig_port(intel_hdmi)->base;
|
||||
enum intel_display_power_domain power_domain;
|
||||
int ret;
|
||||
|
||||
/* We should parse the EDID data and find out if it's an HDMI sink so
|
||||
* we can send audio to it.
|
||||
*/
|
||||
struct edid *edid;
|
||||
bool connected = false;
|
||||
|
||||
power_domain = intel_display_port_power_domain(intel_encoder);
|
||||
intel_display_power_get(dev_priv, power_domain);
|
||||
|
||||
ret = intel_ddc_get_modes(connector,
|
||||
intel_gmbus_get_adapter(dev_priv,
|
||||
intel_hdmi->ddc_bus));
|
||||
edid = drm_get_edid(connector,
|
||||
intel_gmbus_get_adapter(dev_priv,
|
||||
intel_hdmi->ddc_bus));
|
||||
|
||||
intel_display_power_put(dev_priv, power_domain);
|
||||
|
||||
return ret;
|
||||
to_intel_connector(connector)->detect_edid = edid;
|
||||
if (edid && edid->input & DRM_EDID_INPUT_DIGITAL) {
|
||||
intel_hdmi->rgb_quant_range_selectable =
|
||||
drm_rgb_quant_range_selectable(edid);
|
||||
|
||||
intel_hdmi->has_audio = drm_detect_monitor_audio(edid);
|
||||
if (intel_hdmi->force_audio != HDMI_AUDIO_AUTO)
|
||||
intel_hdmi->has_audio =
|
||||
intel_hdmi->force_audio == HDMI_AUDIO_ON;
|
||||
|
||||
if (intel_hdmi->force_audio != HDMI_AUDIO_OFF_DVI)
|
||||
intel_hdmi->has_hdmi_sink =
|
||||
drm_detect_hdmi_monitor(edid);
|
||||
|
||||
connected = true;
|
||||
}
|
||||
|
||||
return connected;
|
||||
}
|
||||
|
||||
static enum drm_connector_status
|
||||
intel_hdmi_detect(struct drm_connector *connector, bool force)
|
||||
{
|
||||
enum drm_connector_status status;
|
||||
|
||||
DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
|
||||
connector->base.id, connector->name);
|
||||
|
||||
intel_hdmi_unset_edid(connector);
|
||||
|
||||
if (intel_hdmi_set_edid(connector)) {
|
||||
struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector);
|
||||
|
||||
hdmi_to_dig_port(intel_hdmi)->base.type = INTEL_OUTPUT_HDMI;
|
||||
status = connector_status_connected;
|
||||
} else
|
||||
status = connector_status_disconnected;
|
||||
|
||||
return status;
|
||||
}
|
||||
|
||||
static void
|
||||
intel_hdmi_force(struct drm_connector *connector)
|
||||
{
|
||||
struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector);
|
||||
|
||||
DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
|
||||
connector->base.id, connector->name);
|
||||
|
||||
intel_hdmi_unset_edid(connector);
|
||||
|
||||
if (connector->status != connector_status_connected)
|
||||
return;
|
||||
|
||||
intel_hdmi_set_edid(connector);
|
||||
hdmi_to_dig_port(intel_hdmi)->base.type = INTEL_OUTPUT_HDMI;
|
||||
}
|
||||
|
||||
static int intel_hdmi_get_modes(struct drm_connector *connector)
|
||||
{
|
||||
struct edid *edid;
|
||||
|
||||
edid = to_intel_connector(connector)->detect_edid;
|
||||
if (edid == NULL)
|
||||
return 0;
|
||||
|
||||
return intel_connector_update_modes(connector, edid);
|
||||
}
|
||||
|
||||
static bool
|
||||
intel_hdmi_detect_audio(struct drm_connector *connector)
|
||||
{
|
||||
struct intel_encoder *intel_encoder = intel_attached_encoder(connector);
|
||||
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&intel_encoder->base);
|
||||
struct drm_i915_private *dev_priv = connector->dev->dev_private;
|
||||
enum intel_display_power_domain power_domain;
|
||||
struct edid *edid;
|
||||
bool has_audio = false;
|
||||
struct edid *edid;
|
||||
|
||||
power_domain = intel_display_port_power_domain(intel_encoder);
|
||||
intel_display_power_get(dev_priv, power_domain);
|
||||
|
||||
edid = drm_get_edid(connector,
|
||||
intel_gmbus_get_adapter(dev_priv,
|
||||
intel_hdmi->ddc_bus));
|
||||
if (edid) {
|
||||
if (edid->input & DRM_EDID_INPUT_DIGITAL)
|
||||
edid = to_intel_connector(connector)->detect_edid;
|
||||
if (edid && edid->input & DRM_EDID_INPUT_DIGITAL)
|
||||
has_audio = drm_detect_monitor_audio(edid);
|
||||
kfree(edid);
|
||||
}
|
||||
|
||||
intel_display_power_put(dev_priv, power_domain);
|
||||
|
||||
return has_audio;
|
||||
}
|
||||
@ -1265,6 +1354,8 @@ static void chv_hdmi_pre_pll_enable(struct intel_encoder *encoder)
|
||||
enum pipe pipe = intel_crtc->pipe;
|
||||
u32 val;
|
||||
|
||||
intel_hdmi_prepare(encoder);
|
||||
|
||||
mutex_lock(&dev_priv->dpio_lock);
|
||||
|
||||
/* program left/right clock distribution */
|
||||
@ -1370,10 +1461,13 @@ static void chv_hdmi_post_disable(struct intel_encoder *encoder)
|
||||
static void chv_hdmi_pre_enable(struct intel_encoder *encoder)
|
||||
{
|
||||
struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
|
||||
struct intel_hdmi *intel_hdmi = &dport->hdmi;
|
||||
struct drm_device *dev = encoder->base.dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_crtc *intel_crtc =
|
||||
to_intel_crtc(encoder->base.crtc);
|
||||
struct drm_display_mode *adjusted_mode =
|
||||
&intel_crtc->config.adjusted_mode;
|
||||
enum dpio_channel ch = vlv_dport_to_channel(dport);
|
||||
int pipe = intel_crtc->pipe;
|
||||
int data, i;
|
||||
@ -1381,6 +1475,15 @@ static void chv_hdmi_pre_enable(struct intel_encoder *encoder)
|
||||
|
||||
mutex_lock(&dev_priv->dpio_lock);
|
||||
|
||||
/* allow hardware to manage TX FIFO reset source */
|
||||
val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW11(ch));
|
||||
val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
|
||||
vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val);
|
||||
|
||||
val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch));
|
||||
val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
|
||||
vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val);
|
||||
|
||||
/* Deassert soft data lane reset*/
|
||||
val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW1(ch));
|
||||
val |= CHV_PCS_REQ_SOFTRESET_EN;
|
||||
@ -1417,12 +1520,26 @@ static void chv_hdmi_pre_enable(struct intel_encoder *encoder)
|
||||
/* Clear calc init */
|
||||
val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
|
||||
val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
|
||||
val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
|
||||
val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
|
||||
vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
|
||||
|
||||
val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
|
||||
val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
|
||||
val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
|
||||
val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
|
||||
vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
|
||||
|
||||
val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW9(ch));
|
||||
val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
|
||||
val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
|
||||
vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW9(ch), val);
|
||||
|
||||
val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW9(ch));
|
||||
val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
|
||||
val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
|
||||
vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW9(ch), val);
|
||||
|
||||
/* FIXME: Program the support xxx V-dB */
|
||||
/* Use 800mV-0dB */
|
||||
for (i = 0; i < 4; i++) {
|
||||
@ -1434,8 +1551,8 @@ static void chv_hdmi_pre_enable(struct intel_encoder *encoder)
|
||||
|
||||
for (i = 0; i < 4; i++) {
|
||||
val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW2(ch, i));
|
||||
val &= ~DPIO_SWING_MARGIN_MASK;
|
||||
val |= 102 << DPIO_SWING_MARGIN_SHIFT;
|
||||
val &= ~DPIO_SWING_MARGIN000_MASK;
|
||||
val |= 102 << DPIO_SWING_MARGIN000_SHIFT;
|
||||
vlv_dpio_write(dev_priv, pipe, CHV_TX_DW2(ch, i), val);
|
||||
}
|
||||
|
||||
@ -1475,6 +1592,10 @@ static void chv_hdmi_pre_enable(struct intel_encoder *encoder)
|
||||
|
||||
mutex_unlock(&dev_priv->dpio_lock);
|
||||
|
||||
intel_hdmi->set_infoframes(&encoder->base,
|
||||
intel_crtc->config.has_hdmi_sink,
|
||||
adjusted_mode);
|
||||
|
||||
intel_enable_hdmi(encoder);
|
||||
|
||||
vlv_wait_port_ready(dev_priv, dport);
|
||||
@ -1482,6 +1603,7 @@ static void chv_hdmi_pre_enable(struct intel_encoder *encoder)
|
||||
|
||||
static void intel_hdmi_destroy(struct drm_connector *connector)
|
||||
{
|
||||
kfree(to_intel_connector(connector)->detect_edid);
|
||||
drm_connector_cleanup(connector);
|
||||
kfree(connector);
|
||||
}
|
||||
@ -1489,6 +1611,7 @@ static void intel_hdmi_destroy(struct drm_connector *connector)
|
||||
static const struct drm_connector_funcs intel_hdmi_connector_funcs = {
|
||||
.dpms = intel_connector_dpms,
|
||||
.detect = intel_hdmi_detect,
|
||||
.force = intel_hdmi_force,
|
||||
.fill_modes = drm_helper_probe_single_connector_modes,
|
||||
.set_property = intel_hdmi_set_property,
|
||||
.destroy = intel_hdmi_destroy,
|
||||
@ -1567,18 +1690,23 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
|
||||
if (IS_VALLEYVIEW(dev)) {
|
||||
intel_hdmi->write_infoframe = vlv_write_infoframe;
|
||||
intel_hdmi->set_infoframes = vlv_set_infoframes;
|
||||
intel_hdmi->infoframe_enabled = vlv_infoframe_enabled;
|
||||
} else if (IS_G4X(dev)) {
|
||||
intel_hdmi->write_infoframe = g4x_write_infoframe;
|
||||
intel_hdmi->set_infoframes = g4x_set_infoframes;
|
||||
intel_hdmi->infoframe_enabled = g4x_infoframe_enabled;
|
||||
} else if (HAS_DDI(dev)) {
|
||||
intel_hdmi->write_infoframe = hsw_write_infoframe;
|
||||
intel_hdmi->set_infoframes = hsw_set_infoframes;
|
||||
intel_hdmi->infoframe_enabled = hsw_infoframe_enabled;
|
||||
} else if (HAS_PCH_IBX(dev)) {
|
||||
intel_hdmi->write_infoframe = ibx_write_infoframe;
|
||||
intel_hdmi->set_infoframes = ibx_set_infoframes;
|
||||
intel_hdmi->infoframe_enabled = ibx_infoframe_enabled;
|
||||
} else {
|
||||
intel_hdmi->write_infoframe = cpt_write_infoframe;
|
||||
intel_hdmi->set_infoframes = cpt_set_infoframes;
|
||||
intel_hdmi->infoframe_enabled = cpt_infoframe_enabled;
|
||||
}
|
||||
|
||||
if (HAS_DDI(dev))
|
||||
|
1938
drivers/video/drm/i915/intel_lrc.c
Normal file
1938
drivers/video/drm/i915/intel_lrc.c
Normal file
File diff suppressed because it is too large
Load Diff
118
drivers/video/drm/i915/intel_lrc.h
Normal file
118
drivers/video/drm/i915/intel_lrc.h
Normal file
@ -0,0 +1,118 @@
|
||||
/*
|
||||
* Copyright © 2014 Intel Corporation
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the next
|
||||
* paragraph) shall be included in all copies or substantial portions of the
|
||||
* Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
* DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#ifndef _INTEL_LRC_H_
|
||||
#define _INTEL_LRC_H_
|
||||
|
||||
#define GEN8_LR_CONTEXT_ALIGN 4096
|
||||
|
||||
/* Execlists regs */
|
||||
#define RING_ELSP(ring) ((ring)->mmio_base+0x230)
|
||||
#define RING_EXECLIST_STATUS(ring) ((ring)->mmio_base+0x234)
|
||||
#define RING_CONTEXT_CONTROL(ring) ((ring)->mmio_base+0x244)
|
||||
#define RING_CONTEXT_STATUS_BUF(ring) ((ring)->mmio_base+0x370)
|
||||
#define RING_CONTEXT_STATUS_PTR(ring) ((ring)->mmio_base+0x3a0)
|
||||
|
||||
/* Logical Rings */
|
||||
void intel_logical_ring_stop(struct intel_engine_cs *ring);
|
||||
void intel_logical_ring_cleanup(struct intel_engine_cs *ring);
|
||||
int intel_logical_rings_init(struct drm_device *dev);
|
||||
|
||||
int logical_ring_flush_all_caches(struct intel_ringbuffer *ringbuf);
|
||||
void intel_logical_ring_advance_and_submit(struct intel_ringbuffer *ringbuf);
|
||||
/**
|
||||
* intel_logical_ring_advance() - advance the ringbuffer tail
|
||||
* @ringbuf: Ringbuffer to advance.
|
||||
*
|
||||
* The tail is only updated in our logical ringbuffer struct.
|
||||
*/
|
||||
static inline void intel_logical_ring_advance(struct intel_ringbuffer *ringbuf)
|
||||
{
|
||||
ringbuf->tail &= ringbuf->size - 1;
|
||||
}
|
||||
/**
|
||||
* intel_logical_ring_emit() - write a DWORD to the ringbuffer.
|
||||
* @ringbuf: Ringbuffer to write to.
|
||||
* @data: DWORD to write.
|
||||
*/
|
||||
static inline void intel_logical_ring_emit(struct intel_ringbuffer *ringbuf,
|
||||
u32 data)
|
||||
{
|
||||
iowrite32(data, ringbuf->virtual_start + ringbuf->tail);
|
||||
ringbuf->tail += 4;
|
||||
}
|
||||
int intel_logical_ring_begin(struct intel_ringbuffer *ringbuf, int num_dwords);
|
||||
|
||||
/* Logical Ring Contexts */
|
||||
int intel_lr_context_render_state_init(struct intel_engine_cs *ring,
|
||||
struct intel_context *ctx);
|
||||
void intel_lr_context_free(struct intel_context *ctx);
|
||||
int intel_lr_context_deferred_create(struct intel_context *ctx,
|
||||
struct intel_engine_cs *ring);
|
||||
void intel_lr_context_unpin(struct intel_engine_cs *ring,
|
||||
struct intel_context *ctx);
|
||||
|
||||
/* Execlists */
|
||||
int intel_sanitize_enable_execlists(struct drm_device *dev, int enable_execlists);
|
||||
int intel_execlists_submission(struct drm_device *dev, struct drm_file *file,
|
||||
struct intel_engine_cs *ring,
|
||||
struct intel_context *ctx,
|
||||
struct drm_i915_gem_execbuffer2 *args,
|
||||
struct list_head *vmas,
|
||||
struct drm_i915_gem_object *batch_obj,
|
||||
u64 exec_start, u32 flags);
|
||||
u32 intel_execlists_ctx_id(struct drm_i915_gem_object *ctx_obj);
|
||||
|
||||
/**
|
||||
* struct intel_ctx_submit_request - queued context submission request
|
||||
* @ctx: Context to submit to the ELSP.
|
||||
* @ring: Engine to submit it to.
|
||||
* @tail: how far in the context's ringbuffer this request goes to.
|
||||
* @execlist_link: link in the submission queue.
|
||||
* @work: workqueue for processing this request in a bottom half.
|
||||
* @elsp_submitted: no. of times this request has been sent to the ELSP.
|
||||
*
|
||||
* The ELSP only accepts two elements at a time, so we queue context/tail
|
||||
* pairs on a given queue (ring->execlist_queue) until the hardware is
|
||||
* available. The queue serves a double purpose: we also use it to keep track
|
||||
* of the up to 2 contexts currently in the hardware (usually one in execution
|
||||
* and the other queued up by the GPU): We only remove elements from the head
|
||||
* of the queue when the hardware informs us that an element has been
|
||||
* completed.
|
||||
*
|
||||
* All accesses to the queue are mediated by a spinlock (ring->execlist_lock).
|
||||
*/
|
||||
struct intel_ctx_submit_request {
|
||||
struct intel_context *ctx;
|
||||
struct intel_engine_cs *ring;
|
||||
u32 tail;
|
||||
|
||||
struct list_head execlist_link;
|
||||
|
||||
int elsp_submitted;
|
||||
};
|
||||
|
||||
void intel_execlists_handle_ctx_events(struct intel_engine_cs *ring);
|
||||
void intel_execlists_retire_requests(struct intel_engine_cs *ring);
|
||||
|
||||
#endif /* _INTEL_LRC_H_ */
|
@ -76,7 +76,7 @@ static bool intel_lvds_get_hw_state(struct intel_encoder *encoder,
|
||||
u32 tmp;
|
||||
|
||||
power_domain = intel_display_port_power_domain(encoder);
|
||||
if (!intel_display_power_enabled(dev_priv, power_domain))
|
||||
if (!intel_display_power_is_enabled(dev_priv, power_domain))
|
||||
return false;
|
||||
|
||||
tmp = I915_READ(lvds_encoder->reg);
|
||||
@ -823,8 +823,7 @@ bool intel_is_dual_link_lvds(struct drm_device *dev)
|
||||
struct intel_encoder *encoder;
|
||||
struct intel_lvds_encoder *lvds_encoder;
|
||||
|
||||
list_for_each_entry(encoder, &dev->mode_config.encoder_list,
|
||||
base.head) {
|
||||
for_each_intel_encoder(dev, encoder) {
|
||||
if (encoder->type == INTEL_OUTPUT_LVDS) {
|
||||
lvds_encoder = to_lvds_encoder(&encoder->base);
|
||||
|
||||
@ -900,6 +899,17 @@ void intel_lvds_init(struct drm_device *dev)
|
||||
int pipe;
|
||||
u8 pin;
|
||||
|
||||
/*
|
||||
* Unlock registers and just leave them unlocked. Do this before
|
||||
* checking quirk lists to avoid bogus WARNINGs.
|
||||
*/
|
||||
if (HAS_PCH_SPLIT(dev)) {
|
||||
I915_WRITE(PCH_PP_CONTROL,
|
||||
I915_READ(PCH_PP_CONTROL) | PANEL_UNLOCK_REGS);
|
||||
} else {
|
||||
I915_WRITE(PP_CONTROL,
|
||||
I915_READ(PP_CONTROL) | PANEL_UNLOCK_REGS);
|
||||
}
|
||||
if (!intel_lvds_supported(dev))
|
||||
return;
|
||||
|
||||
@ -1098,21 +1108,10 @@ out:
|
||||
lvds_encoder->a3_power = I915_READ(lvds_encoder->reg) &
|
||||
LVDS_A3_POWER_MASK;
|
||||
|
||||
/*
|
||||
* Unlock registers and just
|
||||
* leave them unlocked
|
||||
*/
|
||||
if (HAS_PCH_SPLIT(dev)) {
|
||||
I915_WRITE(PCH_PP_CONTROL,
|
||||
I915_READ(PCH_PP_CONTROL) | PANEL_UNLOCK_REGS);
|
||||
} else {
|
||||
I915_WRITE(PP_CONTROL,
|
||||
I915_READ(PP_CONTROL) | PANEL_UNLOCK_REGS);
|
||||
}
|
||||
drm_connector_register(connector);
|
||||
|
||||
intel_panel_init(&intel_connector->panel, fixed_mode, downclock_mode);
|
||||
intel_panel_setup_backlight(connector);
|
||||
intel_panel_setup_backlight(connector, INVALID_PIPE);
|
||||
|
||||
return;
|
||||
|
||||
|
@ -396,6 +396,16 @@ int intel_opregion_notify_adapter(struct drm_device *dev, pci_power_t state)
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/*
|
||||
* If the vendor backlight interface is not in use and ACPI backlight interface
|
||||
* is broken, do not bother processing backlight change requests from firmware.
|
||||
*/
|
||||
static bool should_ignore_backlight_request(void)
|
||||
{
|
||||
return acpi_video_backlight_support() &&
|
||||
!acpi_video_verify_backlight_support();
|
||||
}
|
||||
|
||||
static u32 asle_set_backlight(struct drm_device *dev, u32 bclp)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
@ -404,11 +414,7 @@ static u32 asle_set_backlight(struct drm_device *dev, u32 bclp)
|
||||
|
||||
DRM_DEBUG_DRIVER("bclp = 0x%08x\n", bclp);
|
||||
|
||||
/*
|
||||
* If the acpi_video interface is not supposed to be used, don't
|
||||
* bother processing backlight level change requests from firmware.
|
||||
*/
|
||||
if (!acpi_video_verify_backlight_support()) {
|
||||
if (should_ignore_backlight_request()) {
|
||||
DRM_DEBUG_KMS("opregion backlight request ignored\n");
|
||||
return 0;
|
||||
}
|
||||
|
@ -419,9 +419,8 @@ static uint32_t scale(uint32_t source_val,
|
||||
source_val = clamp(source_val, source_min, source_max);
|
||||
|
||||
/* avoid overflows */
|
||||
target_val = (uint64_t)(source_val - source_min) *
|
||||
(target_max - target_min);
|
||||
do_div(target_val, source_max - source_min);
|
||||
target_val = DIV_ROUND_CLOSEST_ULL((uint64_t)(source_val - source_min) *
|
||||
(target_max - target_min), source_max - source_min);
|
||||
target_val += target_min;
|
||||
|
||||
return target_val;
|
||||
@ -522,6 +521,9 @@ static u32 _vlv_get_backlight(struct drm_device *dev, enum pipe pipe)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
|
||||
if (WARN_ON(pipe != PIPE_A && pipe != PIPE_B))
|
||||
return 0;
|
||||
|
||||
return I915_READ(VLV_BLC_PWM_CTL(pipe)) & BACKLIGHT_DUTY_CYCLE_MASK;
|
||||
}
|
||||
|
||||
@ -537,15 +539,17 @@ static u32 intel_panel_get_backlight(struct intel_connector *connector)
|
||||
{
|
||||
struct drm_device *dev = connector->base.dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
u32 val;
|
||||
unsigned long flags;
|
||||
struct intel_panel *panel = &connector->panel;
|
||||
u32 val = 0;
|
||||
|
||||
spin_lock_irqsave(&dev_priv->backlight_lock, flags);
|
||||
mutex_lock(&dev_priv->backlight_lock);
|
||||
|
||||
if (panel->backlight.enabled) {
|
||||
val = dev_priv->display.get_backlight(connector);
|
||||
val = intel_panel_compute_brightness(connector, val);
|
||||
}
|
||||
|
||||
spin_unlock_irqrestore(&dev_priv->backlight_lock, flags);
|
||||
mutex_unlock(&dev_priv->backlight_lock);
|
||||
|
||||
DRM_DEBUG_DRIVER("get backlight PWM = %d\n", val);
|
||||
return val;
|
||||
@ -604,6 +608,9 @@ static void vlv_set_backlight(struct intel_connector *connector, u32 level)
|
||||
enum pipe pipe = intel_get_pipe_from_connector(connector);
|
||||
u32 tmp;
|
||||
|
||||
if (WARN_ON(pipe != PIPE_A && pipe != PIPE_B))
|
||||
return;
|
||||
|
||||
tmp = I915_READ(VLV_BLC_PWM_CTL(pipe)) & ~BACKLIGHT_DUTY_CYCLE_MASK;
|
||||
I915_WRITE(VLV_BLC_PWM_CTL(pipe), tmp | level);
|
||||
}
|
||||
@ -627,14 +634,12 @@ static void intel_panel_set_backlight(struct intel_connector *connector,
|
||||
struct drm_device *dev = connector->base.dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_panel *panel = &connector->panel;
|
||||
enum pipe pipe = intel_get_pipe_from_connector(connector);
|
||||
u32 hw_level;
|
||||
unsigned long flags;
|
||||
|
||||
if (!panel->backlight.present || pipe == INVALID_PIPE)
|
||||
if (!panel->backlight.present)
|
||||
return;
|
||||
|
||||
spin_lock_irqsave(&dev_priv->backlight_lock, flags);
|
||||
mutex_lock(&dev_priv->backlight_lock);
|
||||
|
||||
WARN_ON(panel->backlight.max == 0);
|
||||
|
||||
@ -644,7 +649,7 @@ static void intel_panel_set_backlight(struct intel_connector *connector,
|
||||
if (panel->backlight.enabled)
|
||||
intel_panel_actually_set_backlight(connector, hw_level);
|
||||
|
||||
spin_unlock_irqrestore(&dev_priv->backlight_lock, flags);
|
||||
mutex_unlock(&dev_priv->backlight_lock);
|
||||
}
|
||||
|
||||
/* set backlight brightness to level in range [0..max], assuming hw min is
|
||||
@ -658,12 +663,17 @@ void intel_panel_set_backlight_acpi(struct intel_connector *connector,
|
||||
struct intel_panel *panel = &connector->panel;
|
||||
enum pipe pipe = intel_get_pipe_from_connector(connector);
|
||||
u32 hw_level;
|
||||
unsigned long flags;
|
||||
|
||||
/*
|
||||
* INVALID_PIPE may occur during driver init because
|
||||
* connection_mutex isn't held across the entire backlight
|
||||
* setup + modeset readout, and the BIOS can issue the
|
||||
* requests at any time.
|
||||
*/
|
||||
if (!panel->backlight.present || pipe == INVALID_PIPE)
|
||||
return;
|
||||
|
||||
spin_lock_irqsave(&dev_priv->backlight_lock, flags);
|
||||
mutex_lock(&dev_priv->backlight_lock);
|
||||
|
||||
WARN_ON(panel->backlight.max == 0);
|
||||
|
||||
@ -674,7 +684,7 @@ void intel_panel_set_backlight_acpi(struct intel_connector *connector,
|
||||
if (panel->backlight.enabled)
|
||||
intel_panel_actually_set_backlight(connector, hw_level);
|
||||
|
||||
spin_unlock_irqrestore(&dev_priv->backlight_lock, flags);
|
||||
mutex_unlock(&dev_priv->backlight_lock);
|
||||
}
|
||||
|
||||
static void pch_disable_backlight(struct intel_connector *connector)
|
||||
@ -716,6 +726,9 @@ static void vlv_disable_backlight(struct intel_connector *connector)
|
||||
enum pipe pipe = intel_get_pipe_from_connector(connector);
|
||||
u32 tmp;
|
||||
|
||||
if (WARN_ON(pipe != PIPE_A && pipe != PIPE_B))
|
||||
return;
|
||||
|
||||
intel_panel_actually_set_backlight(connector, 0);
|
||||
|
||||
tmp = I915_READ(VLV_BLC_PWM_CTL2(pipe));
|
||||
@ -727,10 +740,8 @@ void intel_panel_disable_backlight(struct intel_connector *connector)
|
||||
struct drm_device *dev = connector->base.dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_panel *panel = &connector->panel;
|
||||
enum pipe pipe = intel_get_pipe_from_connector(connector);
|
||||
unsigned long flags;
|
||||
|
||||
if (!panel->backlight.present || pipe == INVALID_PIPE)
|
||||
if (!panel->backlight.present)
|
||||
return;
|
||||
|
||||
/*
|
||||
@ -744,12 +755,12 @@ void intel_panel_disable_backlight(struct intel_connector *connector)
|
||||
return;
|
||||
}
|
||||
|
||||
spin_lock_irqsave(&dev_priv->backlight_lock, flags);
|
||||
mutex_lock(&dev_priv->backlight_lock);
|
||||
|
||||
panel->backlight.enabled = false;
|
||||
dev_priv->display.disable_backlight(connector);
|
||||
|
||||
spin_unlock_irqrestore(&dev_priv->backlight_lock, flags);
|
||||
mutex_unlock(&dev_priv->backlight_lock);
|
||||
}
|
||||
|
||||
static void bdw_enable_backlight(struct intel_connector *connector)
|
||||
@ -773,7 +784,8 @@ static void bdw_enable_backlight(struct intel_connector *connector)
|
||||
if (panel->backlight.active_low_pwm)
|
||||
pch_ctl1 |= BLM_PCH_POLARITY;
|
||||
|
||||
/* BDW always uses the pch pwm controls. */
|
||||
/* After LPT, override is the default. */
|
||||
if (HAS_PCH_LPT(dev_priv))
|
||||
pch_ctl1 |= BLM_PCH_OVERRIDE_ENABLE;
|
||||
|
||||
I915_WRITE(BLC_PWM_PCH_CTL1, pch_ctl1);
|
||||
@ -903,6 +915,9 @@ static void vlv_enable_backlight(struct intel_connector *connector)
|
||||
enum pipe pipe = intel_get_pipe_from_connector(connector);
|
||||
u32 ctl, ctl2;
|
||||
|
||||
if (WARN_ON(pipe != PIPE_A && pipe != PIPE_B))
|
||||
return;
|
||||
|
||||
ctl2 = I915_READ(VLV_BLC_PWM_CTL2(pipe));
|
||||
if (ctl2 & BLM_PWM_ENABLE) {
|
||||
DRM_DEBUG_KMS("backlight already enabled\n");
|
||||
@ -930,14 +945,13 @@ void intel_panel_enable_backlight(struct intel_connector *connector)
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_panel *panel = &connector->panel;
|
||||
enum pipe pipe = intel_get_pipe_from_connector(connector);
|
||||
unsigned long flags;
|
||||
|
||||
if (!panel->backlight.present || pipe == INVALID_PIPE)
|
||||
if (!panel->backlight.present)
|
||||
return;
|
||||
|
||||
DRM_DEBUG_KMS("pipe %c\n", pipe_name(pipe));
|
||||
|
||||
spin_lock_irqsave(&dev_priv->backlight_lock, flags);
|
||||
mutex_lock(&dev_priv->backlight_lock);
|
||||
|
||||
WARN_ON(panel->backlight.max == 0);
|
||||
|
||||
@ -948,13 +962,14 @@ void intel_panel_enable_backlight(struct intel_connector *connector)
|
||||
dev_priv->display.enable_backlight(connector);
|
||||
panel->backlight.enabled = true;
|
||||
|
||||
spin_unlock_irqrestore(&dev_priv->backlight_lock, flags);
|
||||
mutex_unlock(&dev_priv->backlight_lock);
|
||||
}
|
||||
|
||||
#if IS_ENABLED(CONFIG_BACKLIGHT_CLASS_DEVICE)
|
||||
static int intel_backlight_device_update_status(struct backlight_device *bd)
|
||||
{
|
||||
struct intel_connector *connector = bl_get_data(bd);
|
||||
struct intel_panel *panel = &connector->panel;
|
||||
struct drm_device *dev = connector->base.dev;
|
||||
|
||||
drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
|
||||
@ -999,6 +1014,9 @@ static int intel_backlight_device_register(struct intel_connector *connector)
|
||||
if (WARN_ON(panel->backlight.device))
|
||||
return -ENODEV;
|
||||
|
||||
if (!panel->backlight.present)
|
||||
return 0;
|
||||
|
||||
WARN_ON(panel->backlight.max == 0);
|
||||
|
||||
memset(&props, 0, sizeof(props));
|
||||
@ -1013,6 +1031,11 @@ static int intel_backlight_device_register(struct intel_connector *connector)
|
||||
panel->backlight.level,
|
||||
props.max_brightness);
|
||||
|
||||
if (panel->backlight.enabled)
|
||||
props.power = FB_BLANK_UNBLANK;
|
||||
else
|
||||
props.power = FB_BLANK_POWERDOWN;
|
||||
|
||||
/*
|
||||
* Note: using the same name independent of the connector prevents
|
||||
* registration of multiple backlight devices in the driver.
|
||||
@ -1029,6 +1052,10 @@ static int intel_backlight_device_register(struct intel_connector *connector)
|
||||
panel->backlight.device = NULL;
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
DRM_DEBUG_KMS("Connector %s backlight sysfs interface registered\n",
|
||||
connector->base.name);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -1062,15 +1089,28 @@ static u32 get_backlight_min_vbt(struct intel_connector *connector)
|
||||
struct drm_device *dev = connector->base.dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_panel *panel = &connector->panel;
|
||||
int min;
|
||||
|
||||
WARN_ON(panel->backlight.max == 0);
|
||||
|
||||
/*
|
||||
* XXX: If the vbt value is 255, it makes min equal to max, which leads
|
||||
* to problems. There are such machines out there. Either our
|
||||
* interpretation is wrong or the vbt has bogus data. Or both. Safeguard
|
||||
* against this by letting the minimum be at most (arbitrarily chosen)
|
||||
* 25% of the max.
|
||||
*/
|
||||
min = clamp_t(int, dev_priv->vbt.backlight.min_brightness, 0, 64);
|
||||
if (min != dev_priv->vbt.backlight.min_brightness) {
|
||||
DRM_DEBUG_KMS("clamping VBT min backlight %d/255 to %d/255\n",
|
||||
dev_priv->vbt.backlight.min_brightness, min);
|
||||
}
|
||||
|
||||
/* vbt value is a coefficient in range [0..255] */
|
||||
return scale(dev_priv->vbt.backlight.min_brightness, 0, 255,
|
||||
0, panel->backlight.max);
|
||||
return scale(min, 0, 255, 0, panel->backlight.max);
|
||||
}
|
||||
|
||||
static int bdw_setup_backlight(struct intel_connector *connector)
|
||||
static int bdw_setup_backlight(struct intel_connector *connector, enum pipe unused)
|
||||
{
|
||||
struct drm_device *dev = connector->base.dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
@ -1096,7 +1136,7 @@ static int bdw_setup_backlight(struct intel_connector *connector)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int pch_setup_backlight(struct intel_connector *connector)
|
||||
static int pch_setup_backlight(struct intel_connector *connector, enum pipe unused)
|
||||
{
|
||||
struct drm_device *dev = connector->base.dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
@ -1123,7 +1163,7 @@ static int pch_setup_backlight(struct intel_connector *connector)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int i9xx_setup_backlight(struct intel_connector *connector)
|
||||
static int i9xx_setup_backlight(struct intel_connector *connector, enum pipe unused)
|
||||
{
|
||||
struct drm_device *dev = connector->base.dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
@ -1155,7 +1195,7 @@ static int i9xx_setup_backlight(struct intel_connector *connector)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int i965_setup_backlight(struct intel_connector *connector)
|
||||
static int i965_setup_backlight(struct intel_connector *connector, enum pipe unused)
|
||||
{
|
||||
struct drm_device *dev = connector->base.dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
@ -1185,37 +1225,40 @@ static int i965_setup_backlight(struct intel_connector *connector)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int vlv_setup_backlight(struct intel_connector *connector)
|
||||
static int vlv_setup_backlight(struct intel_connector *connector, enum pipe pipe)
|
||||
{
|
||||
struct drm_device *dev = connector->base.dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_panel *panel = &connector->panel;
|
||||
enum pipe pipe;
|
||||
enum pipe p;
|
||||
u32 ctl, ctl2, val;
|
||||
|
||||
for_each_pipe(pipe) {
|
||||
u32 cur_val = I915_READ(VLV_BLC_PWM_CTL(pipe));
|
||||
for_each_pipe(dev_priv, p) {
|
||||
u32 cur_val = I915_READ(VLV_BLC_PWM_CTL(p));
|
||||
|
||||
/* Skip if the modulation freq is already set */
|
||||
if (cur_val & ~BACKLIGHT_DUTY_CYCLE_MASK)
|
||||
continue;
|
||||
|
||||
cur_val &= BACKLIGHT_DUTY_CYCLE_MASK;
|
||||
I915_WRITE(VLV_BLC_PWM_CTL(pipe), (0xf42 << 16) |
|
||||
I915_WRITE(VLV_BLC_PWM_CTL(p), (0xf42 << 16) |
|
||||
cur_val);
|
||||
}
|
||||
|
||||
ctl2 = I915_READ(VLV_BLC_PWM_CTL2(PIPE_A));
|
||||
if (WARN_ON(pipe != PIPE_A && pipe != PIPE_B))
|
||||
return -ENODEV;
|
||||
|
||||
ctl2 = I915_READ(VLV_BLC_PWM_CTL2(pipe));
|
||||
panel->backlight.active_low_pwm = ctl2 & BLM_POLARITY_I965;
|
||||
|
||||
ctl = I915_READ(VLV_BLC_PWM_CTL(PIPE_A));
|
||||
ctl = I915_READ(VLV_BLC_PWM_CTL(pipe));
|
||||
panel->backlight.max = ctl >> 16;
|
||||
if (!panel->backlight.max)
|
||||
return -ENODEV;
|
||||
|
||||
panel->backlight.min = get_backlight_min_vbt(connector);
|
||||
|
||||
val = _vlv_get_backlight(dev, PIPE_A);
|
||||
val = _vlv_get_backlight(dev, pipe);
|
||||
panel->backlight.level = intel_panel_compute_brightness(connector, val);
|
||||
|
||||
panel->backlight.enabled = (ctl2 & BLM_PWM_ENABLE) &&
|
||||
@ -1224,13 +1267,12 @@ static int vlv_setup_backlight(struct intel_connector *connector)
|
||||
return 0;
|
||||
}
|
||||
|
||||
int intel_panel_setup_backlight(struct drm_connector *connector)
|
||||
int intel_panel_setup_backlight(struct drm_connector *connector, enum pipe pipe)
|
||||
{
|
||||
struct drm_device *dev = connector->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_connector *intel_connector = to_intel_connector(connector);
|
||||
struct intel_panel *panel = &intel_connector->panel;
|
||||
unsigned long flags;
|
||||
int ret;
|
||||
|
||||
if (!dev_priv->vbt.backlight.present) {
|
||||
@ -1243,9 +1285,9 @@ int intel_panel_setup_backlight(struct drm_connector *connector)
|
||||
}
|
||||
|
||||
/* set level and max in panel struct */
|
||||
spin_lock_irqsave(&dev_priv->backlight_lock, flags);
|
||||
ret = dev_priv->display.setup_backlight(intel_connector);
|
||||
spin_unlock_irqrestore(&dev_priv->backlight_lock, flags);
|
||||
mutex_lock(&dev_priv->backlight_lock);
|
||||
ret = dev_priv->display.setup_backlight(intel_connector, pipe);
|
||||
mutex_unlock(&dev_priv->backlight_lock);
|
||||
|
||||
if (ret) {
|
||||
DRM_DEBUG_KMS("failed to setup backlight for connector %s\n",
|
||||
@ -1253,15 +1295,12 @@ int intel_panel_setup_backlight(struct drm_connector *connector)
|
||||
return ret;
|
||||
}
|
||||
|
||||
intel_backlight_device_register(intel_connector);
|
||||
|
||||
panel->backlight.present = true;
|
||||
|
||||
DRM_DEBUG_KMS("backlight initialized, %s, brightness %u/%u, "
|
||||
"sysfs interface %sregistered\n",
|
||||
DRM_DEBUG_KMS("Connector %s backlight initialized, %s, brightness %u/%u\n",
|
||||
connector->name,
|
||||
panel->backlight.enabled ? "enabled" : "disabled",
|
||||
panel->backlight.level, panel->backlight.max,
|
||||
panel->backlight.device ? "" : "not ");
|
||||
panel->backlight.level, panel->backlight.max);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -1272,7 +1311,6 @@ void intel_panel_destroy_backlight(struct drm_connector *connector)
|
||||
struct intel_panel *panel = &intel_connector->panel;
|
||||
|
||||
panel->backlight.present = false;
|
||||
intel_backlight_device_unregister(intel_connector);
|
||||
}
|
||||
|
||||
/* Set up chip specific backlight functions */
|
||||
@ -1280,7 +1318,7 @@ void intel_panel_init_backlight_funcs(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
|
||||
if (IS_BROADWELL(dev)) {
|
||||
if (IS_BROADWELL(dev) || (INTEL_INFO(dev)->gen >= 9)) {
|
||||
dev_priv->display.setup_backlight = bdw_setup_backlight;
|
||||
dev_priv->display.enable_backlight = bdw_enable_backlight;
|
||||
dev_priv->display.disable_backlight = pch_disable_backlight;
|
||||
@ -1335,3 +1373,19 @@ void intel_panel_fini(struct intel_panel *panel)
|
||||
drm_mode_destroy(intel_connector->base.dev,
|
||||
panel->downclock_mode);
|
||||
}
|
||||
|
||||
void intel_backlight_register(struct drm_device *dev)
|
||||
{
|
||||
struct intel_connector *connector;
|
||||
|
||||
list_for_each_entry(connector, &dev->mode_config.connector_list, base.head)
|
||||
intel_backlight_device_register(connector);
|
||||
}
|
||||
|
||||
void intel_backlight_unregister(struct drm_device *dev)
|
||||
{
|
||||
struct intel_connector *connector;
|
||||
|
||||
list_for_each_entry(connector, &dev->mode_config.connector_list, base.head)
|
||||
intel_backlight_device_unregister(connector);
|
||||
}
|
||||
|
File diff suppressed because it is too large
Load Diff
481
drivers/video/drm/i915/intel_psr.c
Normal file
481
drivers/video/drm/i915/intel_psr.c
Normal file
@ -0,0 +1,481 @@
|
||||
/*
|
||||
* Copyright © 2014 Intel Corporation
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the next
|
||||
* paragraph) shall be included in all copies or substantial portions of the
|
||||
* Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
* DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
/**
|
||||
* DOC: Panel Self Refresh (PSR/SRD)
|
||||
*
|
||||
* Since Haswell Display controller supports Panel Self-Refresh on display
|
||||
* panels witch have a remote frame buffer (RFB) implemented according to PSR
|
||||
* spec in eDP1.3. PSR feature allows the display to go to lower standby states
|
||||
* when system is idle but display is on as it eliminates display refresh
|
||||
* request to DDR memory completely as long as the frame buffer for that
|
||||
* display is unchanged.
|
||||
*
|
||||
* Panel Self Refresh must be supported by both Hardware (source) and
|
||||
* Panel (sink).
|
||||
*
|
||||
* PSR saves power by caching the framebuffer in the panel RFB, which allows us
|
||||
* to power down the link and memory controller. For DSI panels the same idea
|
||||
* is called "manual mode".
|
||||
*
|
||||
* The implementation uses the hardware-based PSR support which automatically
|
||||
* enters/exits self-refresh mode. The hardware takes care of sending the
|
||||
* required DP aux message and could even retrain the link (that part isn't
|
||||
* enabled yet though). The hardware also keeps track of any frontbuffer
|
||||
* changes to know when to exit self-refresh mode again. Unfortunately that
|
||||
* part doesn't work too well, hence why the i915 PSR support uses the
|
||||
* software frontbuffer tracking to make sure it doesn't miss a screen
|
||||
* update. For this integration intel_psr_invalidate() and intel_psr_flush()
|
||||
* get called by the frontbuffer tracking code. Note that because of locking
|
||||
* issues the self-refresh re-enable code is done from a work queue, which
|
||||
* must be correctly synchronized/cancelled when shutting down the pipe."
|
||||
*/
|
||||
|
||||
#include <drm/drmP.h>
|
||||
|
||||
#include "intel_drv.h"
|
||||
#include "i915_drv.h"
|
||||
|
||||
static bool is_edp_psr(struct intel_dp *intel_dp)
|
||||
{
|
||||
return intel_dp->psr_dpcd[0] & DP_PSR_IS_SUPPORTED;
|
||||
}
|
||||
|
||||
bool intel_psr_is_enabled(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
|
||||
if (!HAS_PSR(dev))
|
||||
return false;
|
||||
|
||||
return I915_READ(EDP_PSR_CTL(dev)) & EDP_PSR_ENABLE;
|
||||
}
|
||||
|
||||
static void intel_psr_write_vsc(struct intel_dp *intel_dp,
|
||||
struct edp_vsc_psr *vsc_psr)
|
||||
{
|
||||
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
|
||||
struct drm_device *dev = dig_port->base.base.dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_crtc *crtc = to_intel_crtc(dig_port->base.base.crtc);
|
||||
u32 ctl_reg = HSW_TVIDEO_DIP_CTL(crtc->config.cpu_transcoder);
|
||||
u32 data_reg = HSW_TVIDEO_DIP_VSC_DATA(crtc->config.cpu_transcoder);
|
||||
uint32_t *data = (uint32_t *) vsc_psr;
|
||||
unsigned int i;
|
||||
|
||||
/* As per BSPec (Pipe Video Data Island Packet), we need to disable
|
||||
the video DIP being updated before program video DIP data buffer
|
||||
registers for DIP being updated. */
|
||||
I915_WRITE(ctl_reg, 0);
|
||||
POSTING_READ(ctl_reg);
|
||||
|
||||
for (i = 0; i < VIDEO_DIP_VSC_DATA_SIZE; i += 4) {
|
||||
if (i < sizeof(struct edp_vsc_psr))
|
||||
I915_WRITE(data_reg + i, *data++);
|
||||
else
|
||||
I915_WRITE(data_reg + i, 0);
|
||||
}
|
||||
|
||||
I915_WRITE(ctl_reg, VIDEO_DIP_ENABLE_VSC_HSW);
|
||||
POSTING_READ(ctl_reg);
|
||||
}
|
||||
|
||||
static void intel_psr_setup_vsc(struct intel_dp *intel_dp)
|
||||
{
|
||||
struct edp_vsc_psr psr_vsc;
|
||||
|
||||
/* Prepare VSC packet as per EDP 1.3 spec, Table 3.10 */
|
||||
memset(&psr_vsc, 0, sizeof(psr_vsc));
|
||||
psr_vsc.sdp_header.HB0 = 0;
|
||||
psr_vsc.sdp_header.HB1 = 0x7;
|
||||
psr_vsc.sdp_header.HB2 = 0x2;
|
||||
psr_vsc.sdp_header.HB3 = 0x8;
|
||||
intel_psr_write_vsc(intel_dp, &psr_vsc);
|
||||
}
|
||||
|
||||
static void intel_psr_enable_sink(struct intel_dp *intel_dp)
|
||||
{
|
||||
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
|
||||
struct drm_device *dev = dig_port->base.base.dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
uint32_t aux_clock_divider;
|
||||
int precharge = 0x3;
|
||||
bool only_standby = false;
|
||||
static const uint8_t aux_msg[] = {
|
||||
[0] = DP_AUX_NATIVE_WRITE << 4,
|
||||
[1] = DP_SET_POWER >> 8,
|
||||
[2] = DP_SET_POWER & 0xff,
|
||||
[3] = 1 - 1,
|
||||
[4] = DP_SET_POWER_D0,
|
||||
};
|
||||
int i;
|
||||
|
||||
BUILD_BUG_ON(sizeof(aux_msg) > 20);
|
||||
|
||||
aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, 0);
|
||||
|
||||
if (IS_BROADWELL(dev) && dig_port->port != PORT_A)
|
||||
only_standby = true;
|
||||
|
||||
/* Enable PSR in sink */
|
||||
if (intel_dp->psr_dpcd[1] & DP_PSR_NO_TRAIN_ON_EXIT || only_standby)
|
||||
drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG,
|
||||
DP_PSR_ENABLE & ~DP_PSR_MAIN_LINK_ACTIVE);
|
||||
else
|
||||
drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG,
|
||||
DP_PSR_ENABLE | DP_PSR_MAIN_LINK_ACTIVE);
|
||||
|
||||
/* Setup AUX registers */
|
||||
for (i = 0; i < sizeof(aux_msg); i += 4)
|
||||
I915_WRITE(EDP_PSR_AUX_DATA1(dev) + i,
|
||||
intel_dp_pack_aux(&aux_msg[i], sizeof(aux_msg) - i));
|
||||
|
||||
I915_WRITE(EDP_PSR_AUX_CTL(dev),
|
||||
DP_AUX_CH_CTL_TIME_OUT_400us |
|
||||
(sizeof(aux_msg) << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
|
||||
(precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
|
||||
(aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT));
|
||||
}
|
||||
|
||||
static void intel_psr_enable_source(struct intel_dp *intel_dp)
|
||||
{
|
||||
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
|
||||
struct drm_device *dev = dig_port->base.base.dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
uint32_t max_sleep_time = 0x1f;
|
||||
uint32_t idle_frames = 1;
|
||||
uint32_t val = 0x0;
|
||||
const uint32_t link_entry_time = EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES;
|
||||
bool only_standby = false;
|
||||
|
||||
if (IS_BROADWELL(dev) && dig_port->port != PORT_A)
|
||||
only_standby = true;
|
||||
|
||||
if (intel_dp->psr_dpcd[1] & DP_PSR_NO_TRAIN_ON_EXIT || only_standby) {
|
||||
val |= EDP_PSR_LINK_STANDBY;
|
||||
val |= EDP_PSR_TP2_TP3_TIME_0us;
|
||||
val |= EDP_PSR_TP1_TIME_0us;
|
||||
val |= EDP_PSR_SKIP_AUX_EXIT;
|
||||
val |= IS_BROADWELL(dev) ? BDW_PSR_SINGLE_FRAME : 0;
|
||||
} else
|
||||
val |= EDP_PSR_LINK_DISABLE;
|
||||
|
||||
I915_WRITE(EDP_PSR_CTL(dev), val |
|
||||
(IS_BROADWELL(dev) ? 0 : link_entry_time) |
|
||||
max_sleep_time << EDP_PSR_MAX_SLEEP_TIME_SHIFT |
|
||||
idle_frames << EDP_PSR_IDLE_FRAME_SHIFT |
|
||||
EDP_PSR_ENABLE);
|
||||
}
|
||||
|
||||
static bool intel_psr_match_conditions(struct intel_dp *intel_dp)
|
||||
{
|
||||
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
|
||||
struct drm_device *dev = dig_port->base.base.dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_crtc *crtc = dig_port->base.base.crtc;
|
||||
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
|
||||
|
||||
lockdep_assert_held(&dev_priv->psr.lock);
|
||||
WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
|
||||
WARN_ON(!drm_modeset_is_locked(&crtc->mutex));
|
||||
|
||||
dev_priv->psr.source_ok = false;
|
||||
|
||||
if (IS_HASWELL(dev) && dig_port->port != PORT_A) {
|
||||
DRM_DEBUG_KMS("HSW ties PSR to DDI A (eDP)\n");
|
||||
return false;
|
||||
}
|
||||
|
||||
if (!i915.enable_psr) {
|
||||
DRM_DEBUG_KMS("PSR disable by flag\n");
|
||||
return false;
|
||||
}
|
||||
|
||||
/* Below limitations aren't valid for Broadwell */
|
||||
if (IS_BROADWELL(dev))
|
||||
goto out;
|
||||
|
||||
if (I915_READ(HSW_STEREO_3D_CTL(intel_crtc->config.cpu_transcoder)) &
|
||||
S3D_ENABLE) {
|
||||
DRM_DEBUG_KMS("PSR condition failed: Stereo 3D is Enabled\n");
|
||||
return false;
|
||||
}
|
||||
|
||||
if (intel_crtc->config.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) {
|
||||
DRM_DEBUG_KMS("PSR condition failed: Interlaced is Enabled\n");
|
||||
return false;
|
||||
}
|
||||
|
||||
out:
|
||||
dev_priv->psr.source_ok = true;
|
||||
return true;
|
||||
}
|
||||
|
||||
static void intel_psr_do_enable(struct intel_dp *intel_dp)
|
||||
{
|
||||
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
|
||||
struct drm_device *dev = intel_dig_port->base.base.dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
|
||||
WARN_ON(I915_READ(EDP_PSR_CTL(dev)) & EDP_PSR_ENABLE);
|
||||
WARN_ON(dev_priv->psr.active);
|
||||
lockdep_assert_held(&dev_priv->psr.lock);
|
||||
|
||||
/* Enable/Re-enable PSR on the host */
|
||||
intel_psr_enable_source(intel_dp);
|
||||
|
||||
dev_priv->psr.active = true;
|
||||
}
|
||||
|
||||
/**
|
||||
* intel_psr_enable - Enable PSR
|
||||
* @intel_dp: Intel DP
|
||||
*
|
||||
* This function can only be called after the pipe is fully trained and enabled.
|
||||
*/
|
||||
void intel_psr_enable(struct intel_dp *intel_dp)
|
||||
{
|
||||
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
|
||||
struct drm_device *dev = intel_dig_port->base.base.dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
|
||||
if (!HAS_PSR(dev)) {
|
||||
DRM_DEBUG_KMS("PSR not supported on this platform\n");
|
||||
return;
|
||||
}
|
||||
|
||||
if (!is_edp_psr(intel_dp)) {
|
||||
DRM_DEBUG_KMS("PSR not supported by this panel\n");
|
||||
return;
|
||||
}
|
||||
|
||||
mutex_lock(&dev_priv->psr.lock);
|
||||
if (dev_priv->psr.enabled) {
|
||||
DRM_DEBUG_KMS("PSR already in use\n");
|
||||
goto unlock;
|
||||
}
|
||||
|
||||
if (!intel_psr_match_conditions(intel_dp))
|
||||
goto unlock;
|
||||
|
||||
dev_priv->psr.busy_frontbuffer_bits = 0;
|
||||
|
||||
intel_psr_setup_vsc(intel_dp);
|
||||
|
||||
/* Avoid continuous PSR exit by masking memup and hpd */
|
||||
I915_WRITE(EDP_PSR_DEBUG_CTL(dev), EDP_PSR_DEBUG_MASK_MEMUP |
|
||||
EDP_PSR_DEBUG_MASK_HPD | EDP_PSR_DEBUG_MASK_LPSP);
|
||||
|
||||
/* Enable PSR on the panel */
|
||||
intel_psr_enable_sink(intel_dp);
|
||||
|
||||
dev_priv->psr.enabled = intel_dp;
|
||||
unlock:
|
||||
mutex_unlock(&dev_priv->psr.lock);
|
||||
}
|
||||
|
||||
/**
|
||||
* intel_psr_disable - Disable PSR
|
||||
* @intel_dp: Intel DP
|
||||
*
|
||||
* This function needs to be called before disabling pipe.
|
||||
*/
|
||||
void intel_psr_disable(struct intel_dp *intel_dp)
|
||||
{
|
||||
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
|
||||
struct drm_device *dev = intel_dig_port->base.base.dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
|
||||
mutex_lock(&dev_priv->psr.lock);
|
||||
if (!dev_priv->psr.enabled) {
|
||||
mutex_unlock(&dev_priv->psr.lock);
|
||||
return;
|
||||
}
|
||||
|
||||
if (dev_priv->psr.active) {
|
||||
I915_WRITE(EDP_PSR_CTL(dev),
|
||||
I915_READ(EDP_PSR_CTL(dev)) & ~EDP_PSR_ENABLE);
|
||||
|
||||
/* Wait till PSR is idle */
|
||||
if (_wait_for((I915_READ(EDP_PSR_STATUS_CTL(dev)) &
|
||||
EDP_PSR_STATUS_STATE_MASK) == 0, 2000, 10))
|
||||
DRM_ERROR("Timed out waiting for PSR Idle State\n");
|
||||
|
||||
dev_priv->psr.active = false;
|
||||
} else {
|
||||
WARN_ON(I915_READ(EDP_PSR_CTL(dev)) & EDP_PSR_ENABLE);
|
||||
}
|
||||
|
||||
dev_priv->psr.enabled = NULL;
|
||||
mutex_unlock(&dev_priv->psr.lock);
|
||||
|
||||
cancel_delayed_work_sync(&dev_priv->psr.work);
|
||||
}
|
||||
|
||||
static void intel_psr_work(struct work_struct *work)
|
||||
{
|
||||
struct drm_i915_private *dev_priv =
|
||||
container_of(work, typeof(*dev_priv), psr.work.work);
|
||||
struct intel_dp *intel_dp = dev_priv->psr.enabled;
|
||||
|
||||
/* We have to make sure PSR is ready for re-enable
|
||||
* otherwise it keeps disabled until next full enable/disable cycle.
|
||||
* PSR might take some time to get fully disabled
|
||||
* and be ready for re-enable.
|
||||
*/
|
||||
if (wait_for((I915_READ(EDP_PSR_STATUS_CTL(dev_priv->dev)) &
|
||||
EDP_PSR_STATUS_STATE_MASK) == 0, 50)) {
|
||||
DRM_ERROR("Timed out waiting for PSR Idle for re-enable\n");
|
||||
return;
|
||||
}
|
||||
|
||||
mutex_lock(&dev_priv->psr.lock);
|
||||
intel_dp = dev_priv->psr.enabled;
|
||||
|
||||
if (!intel_dp)
|
||||
goto unlock;
|
||||
|
||||
/*
|
||||
* The delayed work can race with an invalidate hence we need to
|
||||
* recheck. Since psr_flush first clears this and then reschedules we
|
||||
* won't ever miss a flush when bailing out here.
|
||||
*/
|
||||
if (dev_priv->psr.busy_frontbuffer_bits)
|
||||
goto unlock;
|
||||
|
||||
intel_psr_do_enable(intel_dp);
|
||||
unlock:
|
||||
mutex_unlock(&dev_priv->psr.lock);
|
||||
}
|
||||
|
||||
static void intel_psr_exit(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
|
||||
if (dev_priv->psr.active) {
|
||||
u32 val = I915_READ(EDP_PSR_CTL(dev));
|
||||
|
||||
WARN_ON(!(val & EDP_PSR_ENABLE));
|
||||
|
||||
I915_WRITE(EDP_PSR_CTL(dev), val & ~EDP_PSR_ENABLE);
|
||||
|
||||
dev_priv->psr.active = false;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
/**
|
||||
* intel_psr_invalidate - Invalidade PSR
|
||||
* @dev: DRM device
|
||||
* @frontbuffer_bits: frontbuffer plane tracking bits
|
||||
*
|
||||
* Since the hardware frontbuffer tracking has gaps we need to integrate
|
||||
* with the software frontbuffer tracking. This function gets called every
|
||||
* time frontbuffer rendering starts and a buffer gets dirtied. PSR must be
|
||||
* disabled if the frontbuffer mask contains a buffer relevant to PSR.
|
||||
*
|
||||
* Dirty frontbuffers relevant to PSR are tracked in busy_frontbuffer_bits."
|
||||
*/
|
||||
void intel_psr_invalidate(struct drm_device *dev,
|
||||
unsigned frontbuffer_bits)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_crtc *crtc;
|
||||
enum pipe pipe;
|
||||
|
||||
mutex_lock(&dev_priv->psr.lock);
|
||||
if (!dev_priv->psr.enabled) {
|
||||
mutex_unlock(&dev_priv->psr.lock);
|
||||
return;
|
||||
}
|
||||
|
||||
crtc = dp_to_dig_port(dev_priv->psr.enabled)->base.base.crtc;
|
||||
pipe = to_intel_crtc(crtc)->pipe;
|
||||
|
||||
intel_psr_exit(dev);
|
||||
|
||||
frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
|
||||
|
||||
dev_priv->psr.busy_frontbuffer_bits |= frontbuffer_bits;
|
||||
mutex_unlock(&dev_priv->psr.lock);
|
||||
}
|
||||
|
||||
/**
|
||||
* intel_psr_flush - Flush PSR
|
||||
* @dev: DRM device
|
||||
* @frontbuffer_bits: frontbuffer plane tracking bits
|
||||
*
|
||||
* Since the hardware frontbuffer tracking has gaps we need to integrate
|
||||
* with the software frontbuffer tracking. This function gets called every
|
||||
* time frontbuffer rendering has completed and flushed out to memory. PSR
|
||||
* can be enabled again if no other frontbuffer relevant to PSR is dirty.
|
||||
*
|
||||
* Dirty frontbuffers relevant to PSR are tracked in busy_frontbuffer_bits.
|
||||
*/
|
||||
void intel_psr_flush(struct drm_device *dev,
|
||||
unsigned frontbuffer_bits)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_crtc *crtc;
|
||||
enum pipe pipe;
|
||||
|
||||
mutex_lock(&dev_priv->psr.lock);
|
||||
if (!dev_priv->psr.enabled) {
|
||||
mutex_unlock(&dev_priv->psr.lock);
|
||||
return;
|
||||
}
|
||||
|
||||
crtc = dp_to_dig_port(dev_priv->psr.enabled)->base.base.crtc;
|
||||
pipe = to_intel_crtc(crtc)->pipe;
|
||||
dev_priv->psr.busy_frontbuffer_bits &= ~frontbuffer_bits;
|
||||
|
||||
/*
|
||||
* On Haswell sprite plane updates don't result in a psr invalidating
|
||||
* signal in the hardware. Which means we need to manually fake this in
|
||||
* software for all flushes, not just when we've seen a preceding
|
||||
* invalidation through frontbuffer rendering.
|
||||
*/
|
||||
if (IS_HASWELL(dev) &&
|
||||
(frontbuffer_bits & INTEL_FRONTBUFFER_SPRITE(pipe)))
|
||||
intel_psr_exit(dev);
|
||||
|
||||
if (!dev_priv->psr.active && !dev_priv->psr.busy_frontbuffer_bits)
|
||||
schedule_delayed_work(&dev_priv->psr.work,
|
||||
msecs_to_jiffies(100));
|
||||
mutex_unlock(&dev_priv->psr.lock);
|
||||
}
|
||||
|
||||
/**
|
||||
* intel_psr_init - Init basic PSR work and mutex.
|
||||
* @dev: DRM device
|
||||
*
|
||||
* This function is called only once at driver load to initialize basic
|
||||
* PSR stuff.
|
||||
*/
|
||||
void intel_psr_init(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
|
||||
INIT_DELAYED_WORK(&dev_priv->psr.work, intel_psr_work);
|
||||
mutex_init(&dev_priv->psr.lock);
|
||||
}
|
@ -24,17 +24,12 @@
|
||||
#ifndef _INTEL_RENDERSTATE_H
|
||||
#define _INTEL_RENDERSTATE_H
|
||||
|
||||
#include <linux/types.h>
|
||||
|
||||
struct intel_renderstate_rodata {
|
||||
const u32 *reloc;
|
||||
const u32 *batch;
|
||||
const u32 batch_items;
|
||||
};
|
||||
#include "i915_drv.h"
|
||||
|
||||
extern const struct intel_renderstate_rodata gen6_null_state;
|
||||
extern const struct intel_renderstate_rodata gen7_null_state;
|
||||
extern const struct intel_renderstate_rodata gen8_null_state;
|
||||
extern const struct intel_renderstate_rodata gen9_null_state;
|
||||
|
||||
#define RO_RENDERSTATE(_g) \
|
||||
const struct intel_renderstate_rodata gen ## _g ## _null_state = { \
|
||||
|
File diff suppressed because it is too large
Load Diff
974
drivers/video/drm/i915/intel_renderstate_gen9.c
Normal file
974
drivers/video/drm/i915/intel_renderstate_gen9.c
Normal file
@ -0,0 +1,974 @@
|
||||
#include "intel_renderstate.h"
|
||||
|
||||
static const u32 gen9_null_state_relocs[] = {
|
||||
0x000007a8,
|
||||
0x000007b4,
|
||||
0x000007bc,
|
||||
0x000007cc,
|
||||
-1,
|
||||
};
|
||||
|
||||
static const u32 gen9_null_state_batch[] = {
|
||||
0x7a000004,
|
||||
0x01000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x69040300,
|
||||
0x78140000,
|
||||
0x04000000,
|
||||
0x7820000a,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x80000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x78130002,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x02001808,
|
||||
0x781f0004,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x78510009,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x78100007,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00010000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x781b0007,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000800,
|
||||
0x00000000,
|
||||
0x78110008,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x781e0003,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x781d0009,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x78120002,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x78500003,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x781c0002,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x780c0000,
|
||||
0x00000000,
|
||||
0x78520003,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x78300000,
|
||||
0x08010040,
|
||||
0x78310000,
|
||||
0x1e000000,
|
||||
0x78320000,
|
||||
0x1e000000,
|
||||
0x78330000,
|
||||
0x1e000000,
|
||||
0x79190002,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x791a0002,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x791b0002,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x79120000,
|
||||
0x00000000,
|
||||
0x79130000,
|
||||
0x00000000,
|
||||
0x79140000,
|
||||
0x00000000,
|
||||
0x79150000,
|
||||
0x00000000,
|
||||
0x79160000,
|
||||
0x00000000,
|
||||
0x78150009,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x78190009,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x781a0009,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x78160009,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x78170009,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x78490001,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x784a0000,
|
||||
0x00000000,
|
||||
0x784b0000,
|
||||
0x00000004,
|
||||
0x79170101,
|
||||
0x00000000,
|
||||
0x00000080,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x79180006,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x79180006,
|
||||
0x20000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x79180006,
|
||||
0x40000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x79180006,
|
||||
0x60000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x61010011,
|
||||
0x00000001, /* reloc */
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000001, /* reloc */
|
||||
0x00000000,
|
||||
0x00000001, /* reloc */
|
||||
0x00000000,
|
||||
0x00000001,
|
||||
0x00000000,
|
||||
0x00000001, /* reloc */
|
||||
0x00000000,
|
||||
0x00001001,
|
||||
0x00001001,
|
||||
0x00000001,
|
||||
0x00001001,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x61020001,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x79000002,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x78050006,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x79040002,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x79040002,
|
||||
0x40000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x79040002,
|
||||
0x80000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x79040002,
|
||||
0xc0000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x79080001,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x790a0001,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x78060003,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x78070003,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x78040001,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x79110000,
|
||||
0x00000000,
|
||||
0x780d0000,
|
||||
0x00000000,
|
||||
0x79060000,
|
||||
0x00000000,
|
||||
0x7907001f,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x7902000f,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x790c000f,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x780a0003,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x78080083,
|
||||
0x00004000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x04004000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x08004000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x0c004000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x10004000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x14004000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x18004000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x1c004000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x20004000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x24004000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x28004000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x2c004000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x30004000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x34004000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x38004000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x3c004000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x40004000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x44004000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x48004000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x4c004000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x50004000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x54004000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x58004000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x5c004000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x60004000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x64004000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x68004000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x6c004000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x70004000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x74004000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x78004000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x7c004000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x80004000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x78090043,
|
||||
0x02000000,
|
||||
0x22220000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x78550003,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x680b0001,
|
||||
0x780e0000,
|
||||
0x00000e01,
|
||||
0x78240000,
|
||||
0x00000e41,
|
||||
0x784f0000,
|
||||
0x80000100,
|
||||
0x784d0000,
|
||||
0x40000000,
|
||||
0x782b0000,
|
||||
0x00000000,
|
||||
0x782c0000,
|
||||
0x00000000,
|
||||
0x782d0000,
|
||||
0x00000000,
|
||||
0x782e0000,
|
||||
0x00000000,
|
||||
0x782f0000,
|
||||
0x00000000,
|
||||
0x780f0000,
|
||||
0x00000000,
|
||||
0x78230000,
|
||||
0x00000ea0,
|
||||
0x78210000,
|
||||
0x00000ec0,
|
||||
0x78260000,
|
||||
0x00000000,
|
||||
0x78270000,
|
||||
0x00000000,
|
||||
0x78280000,
|
||||
0x00000000,
|
||||
0x78290000,
|
||||
0x00000000,
|
||||
0x782a0000,
|
||||
0x00000000,
|
||||
0x7b000005,
|
||||
0x00000004,
|
||||
0x00000001,
|
||||
0x00000000,
|
||||
0x00000001,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x05000000, /* cmds end */
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000, /* state start */
|
||||
0x00000000,
|
||||
0x3f800000,
|
||||
0x3f800000,
|
||||
0x3f800000,
|
||||
0x3f800000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000, /* state end */
|
||||
};
|
||||
|
||||
RO_RENDERSTATE(9);
|
@ -33,14 +33,24 @@
|
||||
#include "i915_trace.h"
|
||||
#include "intel_drv.h"
|
||||
|
||||
/* Early gen2 devices have a cacheline of just 32 bytes, using 64 is overkill,
|
||||
* but keeps the logic simple. Indeed, the whole purpose of this macro is just
|
||||
* to give some inclination as to some of the magic values used in the various
|
||||
* workarounds!
|
||||
*/
|
||||
#define CACHELINE_BYTES 64
|
||||
bool
|
||||
intel_ring_initialized(struct intel_engine_cs *ring)
|
||||
{
|
||||
struct drm_device *dev = ring->dev;
|
||||
|
||||
static inline int __ring_space(int head, int tail, int size)
|
||||
if (!dev)
|
||||
return false;
|
||||
|
||||
if (i915.enable_execlists) {
|
||||
struct intel_context *dctx = ring->default_context;
|
||||
struct intel_ringbuffer *ringbuf = dctx->engine[ring->id].ringbuf;
|
||||
|
||||
return ringbuf->obj;
|
||||
} else
|
||||
return ring->buffer && ring->buffer->obj;
|
||||
}
|
||||
|
||||
int __intel_ring_space(int head, int tail, int size)
|
||||
{
|
||||
int space = head - (tail + I915_RING_FREE_SPACE);
|
||||
if (space < 0)
|
||||
@ -48,12 +58,13 @@ static inline int __ring_space(int head, int tail, int size)
|
||||
return space;
|
||||
}
|
||||
|
||||
static inline int ring_space(struct intel_ringbuffer *ringbuf)
|
||||
int intel_ring_space(struct intel_ringbuffer *ringbuf)
|
||||
{
|
||||
return __ring_space(ringbuf->head & HEAD_ADDR, ringbuf->tail, ringbuf->size);
|
||||
return __intel_ring_space(ringbuf->head & HEAD_ADDR,
|
||||
ringbuf->tail, ringbuf->size);
|
||||
}
|
||||
|
||||
static bool intel_ring_stopped(struct intel_engine_cs *ring)
|
||||
bool intel_ring_stopped(struct intel_engine_cs *ring)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = ring->dev->dev_private;
|
||||
return dev_priv->gpu_error.stop_rings & intel_ring_flag(ring);
|
||||
@ -351,12 +362,15 @@ gen7_render_ring_flush(struct intel_engine_cs *ring,
|
||||
flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
|
||||
flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
|
||||
flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
|
||||
flags |= PIPE_CONTROL_MEDIA_STATE_CLEAR;
|
||||
/*
|
||||
* TLB invalidate requires a post-sync write.
|
||||
*/
|
||||
flags |= PIPE_CONTROL_QW_WRITE;
|
||||
flags |= PIPE_CONTROL_GLOBAL_GTT_IVB;
|
||||
|
||||
flags |= PIPE_CONTROL_STALL_AT_SCOREBOARD;
|
||||
|
||||
/* Workaround: we must issue a pipe_control with CS-stall bit
|
||||
* set before a pipe_control command that has the state cache
|
||||
* invalidate bit set. */
|
||||
@ -433,7 +447,14 @@ gen8_render_ring_flush(struct intel_engine_cs *ring,
|
||||
return ret;
|
||||
}
|
||||
|
||||
return gen8_emit_pipe_control(ring, flags, scratch_addr);
|
||||
ret = gen8_emit_pipe_control(ring, flags, scratch_addr);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (!invalidate_domains && flush_domains)
|
||||
return gen7_ring_fbc_flush(ring, FBC_REND_NUKE);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void ring_write_tail(struct intel_engine_cs *ring,
|
||||
@ -476,8 +497,13 @@ static bool stop_ring(struct intel_engine_cs *ring)
|
||||
|
||||
if (!IS_GEN2(ring->dev)) {
|
||||
I915_WRITE_MODE(ring, _MASKED_BIT_ENABLE(STOP_RING));
|
||||
if (wait_for_atomic((I915_READ_MODE(ring) & MODE_IDLE) != 0, 1000)) {
|
||||
DRM_ERROR("%s :timed out trying to stop ring\n", ring->name);
|
||||
if (wait_for((I915_READ_MODE(ring) & MODE_IDLE) != 0, 1000)) {
|
||||
DRM_ERROR("%s : timed out trying to stop ring\n", ring->name);
|
||||
/* Sometimes we observe that the idle flag is not
|
||||
* set even though the ring is empty. So double
|
||||
* check before giving up.
|
||||
*/
|
||||
if (I915_READ_HEAD(ring) != I915_READ_TAIL(ring))
|
||||
return false;
|
||||
}
|
||||
}
|
||||
@ -540,6 +566,14 @@ static int init_ring_common(struct intel_engine_cs *ring)
|
||||
* also enforces ordering), otherwise the hw might lose the new ring
|
||||
* register values. */
|
||||
I915_WRITE_START(ring, i915_gem_obj_ggtt_offset(obj));
|
||||
|
||||
/* WaClearRingBufHeadRegAtInit:ctg,elk */
|
||||
if (I915_READ_HEAD(ring))
|
||||
DRM_DEBUG("%s initialization failed [head=%08x], fudging\n",
|
||||
ring->name, I915_READ_HEAD(ring));
|
||||
I915_WRITE_HEAD(ring, 0);
|
||||
(void)I915_READ_HEAD(ring);
|
||||
|
||||
I915_WRITE_CTL(ring,
|
||||
((ringbuf->size - PAGE_SIZE) & RING_NR_PAGES)
|
||||
| RING_VALID);
|
||||
@ -558,10 +592,9 @@ static int init_ring_common(struct intel_engine_cs *ring)
|
||||
goto out;
|
||||
}
|
||||
|
||||
|
||||
ringbuf->head = I915_READ_HEAD(ring);
|
||||
ringbuf->tail = I915_READ_TAIL(ring) & TAIL_ADDR;
|
||||
ringbuf->space = ring_space(ringbuf);
|
||||
ringbuf->space = intel_ring_space(ringbuf);
|
||||
ringbuf->last_retired_head = -1;
|
||||
|
||||
memset(&ring->hangcheck, 0, sizeof(ring->hangcheck));
|
||||
@ -572,8 +605,25 @@ out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int
|
||||
init_pipe_control(struct intel_engine_cs *ring)
|
||||
void
|
||||
intel_fini_pipe_control(struct intel_engine_cs *ring)
|
||||
{
|
||||
struct drm_device *dev = ring->dev;
|
||||
|
||||
if (ring->scratch.obj == NULL)
|
||||
return;
|
||||
|
||||
if (INTEL_INFO(dev)->gen >= 5) {
|
||||
kunmap(sg_page(ring->scratch.obj->pages->sgl));
|
||||
i915_gem_object_ggtt_unpin(ring->scratch.obj);
|
||||
}
|
||||
|
||||
drm_gem_object_unreference(&ring->scratch.obj->base);
|
||||
ring->scratch.obj = NULL;
|
||||
}
|
||||
|
||||
int
|
||||
intel_init_pipe_control(struct intel_engine_cs *ring)
|
||||
{
|
||||
int ret;
|
||||
|
||||
@ -596,7 +646,7 @@ init_pipe_control(struct intel_engine_cs *ring)
|
||||
goto err_unref;
|
||||
|
||||
ring->scratch.gtt_offset = i915_gem_obj_ggtt_offset(ring->scratch.obj);
|
||||
ring->scratch.cpu_page = (void*)MapIoMem((addr_t)sg_page(ring->scratch.obj->pages->sgl),4096, PG_SW|0x100);
|
||||
ring->scratch.cpu_page = kmap(sg_page(ring->scratch.obj->pages->sgl));
|
||||
if (ring->scratch.cpu_page == NULL) {
|
||||
ret = -ENOMEM;
|
||||
goto err_unpin;
|
||||
@ -614,6 +664,170 @@ err:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int intel_ring_workarounds_emit(struct intel_engine_cs *ring,
|
||||
struct intel_context *ctx)
|
||||
{
|
||||
int ret, i;
|
||||
struct drm_device *dev = ring->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct i915_workarounds *w = &dev_priv->workarounds;
|
||||
|
||||
if (WARN_ON(w->count == 0))
|
||||
return 0;
|
||||
|
||||
ring->gpu_caches_dirty = true;
|
||||
ret = intel_ring_flush_all_caches(ring);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = intel_ring_begin(ring, (w->count * 2 + 2));
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(w->count));
|
||||
for (i = 0; i < w->count; i++) {
|
||||
intel_ring_emit(ring, w->reg[i].addr);
|
||||
intel_ring_emit(ring, w->reg[i].value);
|
||||
}
|
||||
intel_ring_emit(ring, MI_NOOP);
|
||||
|
||||
intel_ring_advance(ring);
|
||||
|
||||
ring->gpu_caches_dirty = true;
|
||||
ret = intel_ring_flush_all_caches(ring);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
DRM_DEBUG_DRIVER("Number of Workarounds emitted: %d\n", w->count);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int wa_add(struct drm_i915_private *dev_priv,
|
||||
const u32 addr, const u32 mask, const u32 val)
|
||||
{
|
||||
const u32 idx = dev_priv->workarounds.count;
|
||||
|
||||
if (WARN_ON(idx >= I915_MAX_WA_REGS))
|
||||
return -ENOSPC;
|
||||
|
||||
dev_priv->workarounds.reg[idx].addr = addr;
|
||||
dev_priv->workarounds.reg[idx].value = val;
|
||||
dev_priv->workarounds.reg[idx].mask = mask;
|
||||
|
||||
dev_priv->workarounds.count++;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
#define WA_REG(addr, mask, val) { \
|
||||
const int r = wa_add(dev_priv, (addr), (mask), (val)); \
|
||||
if (r) \
|
||||
return r; \
|
||||
}
|
||||
|
||||
#define WA_SET_BIT_MASKED(addr, mask) \
|
||||
WA_REG(addr, (mask), _MASKED_BIT_ENABLE(mask))
|
||||
|
||||
#define WA_CLR_BIT_MASKED(addr, mask) \
|
||||
WA_REG(addr, (mask), _MASKED_BIT_DISABLE(mask))
|
||||
|
||||
#define WA_SET_FIELD_MASKED(addr, mask, value) \
|
||||
WA_REG(addr, mask, _MASKED_FIELD(mask, value))
|
||||
|
||||
#define WA_SET_BIT(addr, mask) WA_REG(addr, mask, I915_READ(addr) | (mask))
|
||||
#define WA_CLR_BIT(addr, mask) WA_REG(addr, mask, I915_READ(addr) & ~(mask))
|
||||
|
||||
#define WA_WRITE(addr, val) WA_REG(addr, 0xffffffff, val)
|
||||
|
||||
static int bdw_init_workarounds(struct intel_engine_cs *ring)
|
||||
{
|
||||
struct drm_device *dev = ring->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
|
||||
/* WaDisablePartialInstShootdown:bdw */
|
||||
/* WaDisableThreadStallDopClockGating:bdw (pre-production) */
|
||||
WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN,
|
||||
PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE |
|
||||
STALL_DOP_GATING_DISABLE);
|
||||
|
||||
/* WaDisableDopClockGating:bdw */
|
||||
WA_SET_BIT_MASKED(GEN7_ROW_CHICKEN2,
|
||||
DOP_CLOCK_GATING_DISABLE);
|
||||
|
||||
WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3,
|
||||
GEN8_SAMPLER_POWER_BYPASS_DIS);
|
||||
|
||||
/* Use Force Non-Coherent whenever executing a 3D context. This is a
|
||||
* workaround for for a possible hang in the unlikely event a TLB
|
||||
* invalidation occurs during a PSD flush.
|
||||
*/
|
||||
/* WaDisableFenceDestinationToSLM:bdw (GT3 pre-production) */
|
||||
WA_SET_BIT_MASKED(HDC_CHICKEN0,
|
||||
HDC_FORCE_NON_COHERENT |
|
||||
(IS_BDW_GT3(dev) ? HDC_FENCE_DEST_SLM_DISABLE : 0));
|
||||
|
||||
/* Wa4x4STCOptimizationDisable:bdw */
|
||||
WA_SET_BIT_MASKED(CACHE_MODE_1,
|
||||
GEN8_4x4_STC_OPTIMIZATION_DISABLE);
|
||||
|
||||
/*
|
||||
* BSpec recommends 8x4 when MSAA is used,
|
||||
* however in practice 16x4 seems fastest.
|
||||
*
|
||||
* Note that PS/WM thread counts depend on the WIZ hashing
|
||||
* disable bit, which we don't touch here, but it's good
|
||||
* to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
|
||||
*/
|
||||
WA_SET_FIELD_MASKED(GEN7_GT_MODE,
|
||||
GEN6_WIZ_HASHING_MASK,
|
||||
GEN6_WIZ_HASHING_16x4);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int chv_init_workarounds(struct intel_engine_cs *ring)
|
||||
{
|
||||
struct drm_device *dev = ring->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
|
||||
/* WaDisablePartialInstShootdown:chv */
|
||||
/* WaDisableThreadStallDopClockGating:chv */
|
||||
WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN,
|
||||
PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE |
|
||||
STALL_DOP_GATING_DISABLE);
|
||||
|
||||
/* Use Force Non-Coherent whenever executing a 3D context. This is a
|
||||
* workaround for a possible hang in the unlikely event a TLB
|
||||
* invalidation occurs during a PSD flush.
|
||||
*/
|
||||
/* WaForceEnableNonCoherent:chv */
|
||||
/* WaHdcDisableFetchWhenMasked:chv */
|
||||
WA_SET_BIT_MASKED(HDC_CHICKEN0,
|
||||
HDC_FORCE_NON_COHERENT |
|
||||
HDC_DONOT_FETCH_MEM_WHEN_MASKED);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int init_workarounds_ring(struct intel_engine_cs *ring)
|
||||
{
|
||||
struct drm_device *dev = ring->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
|
||||
WARN_ON(ring->id != RCS);
|
||||
|
||||
dev_priv->workarounds.count = 0;
|
||||
|
||||
if (IS_BROADWELL(dev))
|
||||
return bdw_init_workarounds(ring);
|
||||
|
||||
if (IS_CHERRYVIEW(dev))
|
||||
return chv_init_workarounds(ring);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int init_render_ring(struct intel_engine_cs *ring)
|
||||
{
|
||||
struct drm_device *dev = ring->dev;
|
||||
@ -632,7 +846,7 @@ static int init_render_ring(struct intel_engine_cs *ring)
|
||||
*
|
||||
* WaDisableAsyncFlipPerfMode:snb,ivb,hsw,vlv,bdw,chv
|
||||
*/
|
||||
if (INTEL_INFO(dev)->gen >= 6)
|
||||
if (INTEL_INFO(dev)->gen >= 6 && INTEL_INFO(dev)->gen < 9)
|
||||
I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(ASYNC_FLIP_PERF_DISABLE));
|
||||
|
||||
/* Required for the hardware to program scanline values for waiting */
|
||||
@ -648,7 +862,7 @@ static int init_render_ring(struct intel_engine_cs *ring)
|
||||
_MASKED_BIT_ENABLE(GFX_REPLAY_MODE));
|
||||
|
||||
if (INTEL_INFO(dev)->gen >= 5) {
|
||||
ret = init_pipe_control(ring);
|
||||
ret = intel_init_pipe_control(ring);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
@ -669,7 +883,7 @@ static int init_render_ring(struct intel_engine_cs *ring)
|
||||
if (HAS_L3_DPF(dev))
|
||||
I915_WRITE_IMR(ring, ~GT_PARITY_ERROR(dev));
|
||||
|
||||
return ret;
|
||||
return init_workarounds_ring(ring);
|
||||
}
|
||||
|
||||
static void render_ring_cleanup(struct intel_engine_cs *ring)
|
||||
@ -683,16 +897,7 @@ static void render_ring_cleanup(struct intel_engine_cs *ring)
|
||||
dev_priv->semaphore_obj = NULL;
|
||||
}
|
||||
|
||||
if (ring->scratch.obj == NULL)
|
||||
return;
|
||||
|
||||
if (INTEL_INFO(dev)->gen >= 5) {
|
||||
// kunmap(sg_page(ring->scratch.obj->pages->sgl));
|
||||
i915_gem_object_ggtt_unpin(ring->scratch.obj);
|
||||
}
|
||||
|
||||
drm_gem_object_unreference(&ring->scratch.obj->base);
|
||||
ring->scratch.obj = NULL;
|
||||
intel_fini_pipe_control(ring);
|
||||
}
|
||||
|
||||
static int gen8_rcs_signal(struct intel_engine_cs *signaller,
|
||||
@ -1015,7 +1220,7 @@ gen5_ring_get_irq(struct intel_engine_cs *ring)
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
unsigned long flags;
|
||||
|
||||
if (!dev->irq_enabled)
|
||||
if (WARN_ON(!intel_irqs_enabled(dev_priv)))
|
||||
return false;
|
||||
|
||||
spin_lock_irqsave(&dev_priv->irq_lock, flags);
|
||||
@ -1046,7 +1251,7 @@ i9xx_ring_get_irq(struct intel_engine_cs *ring)
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
unsigned long flags;
|
||||
|
||||
if (!dev->irq_enabled)
|
||||
if (!intel_irqs_enabled(dev_priv))
|
||||
return false;
|
||||
|
||||
spin_lock_irqsave(&dev_priv->irq_lock, flags);
|
||||
@ -1083,7 +1288,7 @@ i8xx_ring_get_irq(struct intel_engine_cs *ring)
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
unsigned long flags;
|
||||
|
||||
if (!dev->irq_enabled)
|
||||
if (!intel_irqs_enabled(dev_priv))
|
||||
return false;
|
||||
|
||||
spin_lock_irqsave(&dev_priv->irq_lock, flags);
|
||||
@ -1217,7 +1422,7 @@ gen6_ring_get_irq(struct intel_engine_cs *ring)
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
unsigned long flags;
|
||||
|
||||
if (!dev->irq_enabled)
|
||||
if (WARN_ON(!intel_irqs_enabled(dev_priv)))
|
||||
return false;
|
||||
|
||||
spin_lock_irqsave(&dev_priv->irq_lock, flags);
|
||||
@ -1260,7 +1465,7 @@ hsw_vebox_get_irq(struct intel_engine_cs *ring)
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
unsigned long flags;
|
||||
|
||||
if (!dev->irq_enabled)
|
||||
if (WARN_ON(!intel_irqs_enabled(dev_priv)))
|
||||
return false;
|
||||
|
||||
spin_lock_irqsave(&dev_priv->irq_lock, flags);
|
||||
@ -1280,9 +1485,6 @@ hsw_vebox_put_irq(struct intel_engine_cs *ring)
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
unsigned long flags;
|
||||
|
||||
if (!dev->irq_enabled)
|
||||
return;
|
||||
|
||||
spin_lock_irqsave(&dev_priv->irq_lock, flags);
|
||||
if (--ring->irq_refcount == 0) {
|
||||
I915_WRITE_IMR(ring, ~0);
|
||||
@ -1298,7 +1500,7 @@ gen8_ring_get_irq(struct intel_engine_cs *ring)
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
unsigned long flags;
|
||||
|
||||
if (!dev->irq_enabled)
|
||||
if (WARN_ON(!intel_irqs_enabled(dev_priv)))
|
||||
return false;
|
||||
|
||||
spin_lock_irqsave(&dev_priv->irq_lock, flags);
|
||||
@ -1449,7 +1651,7 @@ static void cleanup_status_page(struct intel_engine_cs *ring)
|
||||
if (obj == NULL)
|
||||
return;
|
||||
|
||||
// kunmap(sg_page(obj->pages->sgl));
|
||||
kunmap(sg_page(obj->pages->sgl));
|
||||
i915_gem_object_ggtt_unpin(obj);
|
||||
drm_gem_object_unreference(&obj->base);
|
||||
ring->status_page.obj = NULL;
|
||||
@ -1497,7 +1699,7 @@ err_unref:
|
||||
}
|
||||
|
||||
ring->status_page.gfx_addr = i915_gem_obj_ggtt_offset(obj);
|
||||
ring->status_page.page_addr = (void*)MapIoMem((addr_t)sg_page(obj->pages->sgl),4096,PG_SW|0x100);
|
||||
ring->status_page.page_addr = kmap(sg_page(obj->pages->sgl));
|
||||
memset(ring->status_page.page_addr, 0, PAGE_SIZE);
|
||||
|
||||
DRM_DEBUG_DRIVER("%s hws offset: 0x%08x\n",
|
||||
@ -1523,26 +1725,50 @@ static int init_phys_status_page(struct intel_engine_cs *ring)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void intel_destroy_ringbuffer_obj(struct intel_ringbuffer *ringbuf)
|
||||
void intel_unpin_ringbuffer_obj(struct intel_ringbuffer *ringbuf)
|
||||
{
|
||||
if (!ringbuf->obj)
|
||||
return;
|
||||
|
||||
iounmap(ringbuf->virtual_start);
|
||||
ringbuf->virtual_start = NULL;
|
||||
i915_gem_object_ggtt_unpin(ringbuf->obj);
|
||||
}
|
||||
|
||||
int intel_pin_and_map_ringbuffer_obj(struct drm_device *dev,
|
||||
struct intel_ringbuffer *ringbuf)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
struct drm_i915_gem_object *obj = ringbuf->obj;
|
||||
int ret;
|
||||
|
||||
ret = i915_gem_obj_ggtt_pin(obj, PAGE_SIZE, PIN_MAPPABLE);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = i915_gem_object_set_to_gtt_domain(obj, true);
|
||||
if (ret) {
|
||||
i915_gem_object_ggtt_unpin(obj);
|
||||
return ret;
|
||||
}
|
||||
|
||||
ringbuf->virtual_start = ioremap_wc(dev_priv->gtt.mappable_base +
|
||||
i915_gem_obj_ggtt_offset(obj), ringbuf->size);
|
||||
if (ringbuf->virtual_start == NULL) {
|
||||
i915_gem_object_ggtt_unpin(obj);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void intel_destroy_ringbuffer_obj(struct intel_ringbuffer *ringbuf)
|
||||
{
|
||||
drm_gem_object_unreference(&ringbuf->obj->base);
|
||||
ringbuf->obj = NULL;
|
||||
}
|
||||
|
||||
static int intel_alloc_ringbuffer_obj(struct drm_device *dev,
|
||||
struct intel_ringbuffer *ringbuf)
|
||||
int intel_alloc_ringbuffer_obj(struct drm_device *dev,
|
||||
struct intel_ringbuffer *ringbuf)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
struct drm_i915_gem_object *obj;
|
||||
int ret;
|
||||
|
||||
if (ringbuf->obj)
|
||||
return 0;
|
||||
|
||||
obj = NULL;
|
||||
if (!HAS_LLC(dev))
|
||||
@ -1555,30 +1781,9 @@ static int intel_alloc_ringbuffer_obj(struct drm_device *dev,
|
||||
/* mark ring buffers as read-only from GPU side by default */
|
||||
obj->gt_ro = 1;
|
||||
|
||||
ret = i915_gem_obj_ggtt_pin(obj, PAGE_SIZE, PIN_MAPPABLE);
|
||||
if (ret)
|
||||
goto err_unref;
|
||||
|
||||
ret = i915_gem_object_set_to_gtt_domain(obj, true);
|
||||
if (ret)
|
||||
goto err_unpin;
|
||||
|
||||
ringbuf->virtual_start =
|
||||
ioremap_wc(dev_priv->gtt.mappable_base + i915_gem_obj_ggtt_offset(obj),
|
||||
ringbuf->size);
|
||||
if (ringbuf->virtual_start == NULL) {
|
||||
ret = -EINVAL;
|
||||
goto err_unpin;
|
||||
}
|
||||
|
||||
ringbuf->obj = obj;
|
||||
return 0;
|
||||
|
||||
err_unpin:
|
||||
i915_gem_object_ggtt_unpin(obj);
|
||||
err_unref:
|
||||
drm_gem_object_unreference(&obj->base);
|
||||
return ret;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int intel_init_ring_buffer(struct drm_device *dev,
|
||||
@ -1597,7 +1802,9 @@ static int intel_init_ring_buffer(struct drm_device *dev,
|
||||
ring->dev = dev;
|
||||
INIT_LIST_HEAD(&ring->active_list);
|
||||
INIT_LIST_HEAD(&ring->request_list);
|
||||
INIT_LIST_HEAD(&ring->execlist_queue);
|
||||
ringbuf->size = 32 * PAGE_SIZE;
|
||||
ringbuf->ring = ring;
|
||||
memset(ring->semaphore.sync_seqno, 0, sizeof(ring->semaphore.sync_seqno));
|
||||
|
||||
init_waitqueue_head(&ring->irq_queue);
|
||||
@ -1613,11 +1820,22 @@ static int intel_init_ring_buffer(struct drm_device *dev,
|
||||
goto error;
|
||||
}
|
||||
|
||||
if (ringbuf->obj == NULL) {
|
||||
ret = intel_alloc_ringbuffer_obj(dev, ringbuf);
|
||||
if (ret) {
|
||||
DRM_ERROR("Failed to allocate ringbuffer %s: %d\n", ring->name, ret);
|
||||
DRM_ERROR("Failed to allocate ringbuffer %s: %d\n",
|
||||
ring->name, ret);
|
||||
goto error;
|
||||
}
|
||||
|
||||
ret = intel_pin_and_map_ringbuffer_obj(dev, ringbuf);
|
||||
if (ret) {
|
||||
DRM_ERROR("Failed to pin and map ringbuffer %s: %d\n",
|
||||
ring->name, ret);
|
||||
intel_destroy_ringbuffer_obj(ringbuf);
|
||||
goto error;
|
||||
}
|
||||
}
|
||||
|
||||
/* Workaround an erratum on the i830 which causes a hang if
|
||||
* the TAIL pointer points to within the last 2 cachelines
|
||||
@ -1645,15 +1863,19 @@ error:
|
||||
|
||||
void intel_cleanup_ring_buffer(struct intel_engine_cs *ring)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(ring->dev);
|
||||
struct intel_ringbuffer *ringbuf = ring->buffer;
|
||||
struct drm_i915_private *dev_priv;
|
||||
struct intel_ringbuffer *ringbuf;
|
||||
|
||||
if (!intel_ring_initialized(ring))
|
||||
return;
|
||||
|
||||
dev_priv = to_i915(ring->dev);
|
||||
ringbuf = ring->buffer;
|
||||
|
||||
intel_stop_ring_buffer(ring);
|
||||
WARN_ON(!IS_GEN2(ring->dev) && (I915_READ_MODE(ring) & MODE_IDLE) == 0);
|
||||
|
||||
intel_unpin_ringbuffer_obj(ringbuf);
|
||||
intel_destroy_ringbuffer_obj(ringbuf);
|
||||
ring->preallocated_lazy_request = NULL;
|
||||
ring->outstanding_lazy_seqno = 0;
|
||||
@ -1680,13 +1902,14 @@ static int intel_ring_wait_request(struct intel_engine_cs *ring, int n)
|
||||
ringbuf->head = ringbuf->last_retired_head;
|
||||
ringbuf->last_retired_head = -1;
|
||||
|
||||
ringbuf->space = ring_space(ringbuf);
|
||||
ringbuf->space = intel_ring_space(ringbuf);
|
||||
if (ringbuf->space >= n)
|
||||
return 0;
|
||||
}
|
||||
|
||||
list_for_each_entry(request, &ring->request_list, list) {
|
||||
if (__ring_space(request->tail, ringbuf->tail, ringbuf->size) >= n) {
|
||||
if (__intel_ring_space(request->tail, ringbuf->tail,
|
||||
ringbuf->size) >= n) {
|
||||
seqno = request->seqno;
|
||||
break;
|
||||
}
|
||||
@ -1703,7 +1926,7 @@ static int intel_ring_wait_request(struct intel_engine_cs *ring, int n)
|
||||
ringbuf->head = ringbuf->last_retired_head;
|
||||
ringbuf->last_retired_head = -1;
|
||||
|
||||
ringbuf->space = ring_space(ringbuf);
|
||||
ringbuf->space = intel_ring_space(ringbuf);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -1732,13 +1955,12 @@ static int ring_wait_for_space(struct intel_engine_cs *ring, int n)
|
||||
trace_i915_ring_wait_begin(ring);
|
||||
do {
|
||||
ringbuf->head = I915_READ_HEAD(ring);
|
||||
ringbuf->space = ring_space(ringbuf);
|
||||
ringbuf->space = intel_ring_space(ringbuf);
|
||||
if (ringbuf->space >= n) {
|
||||
ret = 0;
|
||||
break;
|
||||
}
|
||||
|
||||
|
||||
msleep(1);
|
||||
|
||||
ret = i915_gem_check_wedge(&dev_priv->gpu_error,
|
||||
@ -1773,7 +1995,7 @@ static int intel_wrap_ring_buffer(struct intel_engine_cs *ring)
|
||||
iowrite32(MI_NOOP, virt++);
|
||||
|
||||
ringbuf->tail = 0;
|
||||
ringbuf->space = ring_space(ringbuf);
|
||||
ringbuf->space = intel_ring_space(ringbuf);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -1978,9 +2200,7 @@ gen8_ring_dispatch_execbuffer(struct intel_engine_cs *ring,
|
||||
u64 offset, u32 len,
|
||||
unsigned flags)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = ring->dev->dev_private;
|
||||
bool ppgtt = dev_priv->mm.aliasing_ppgtt != NULL &&
|
||||
!(flags & I915_DISPATCH_SECURE);
|
||||
bool ppgtt = USES_PPGTT(ring->dev) && !(flags & I915_DISPATCH_SECURE);
|
||||
int ret;
|
||||
|
||||
ret = intel_ring_begin(ring, 4);
|
||||
@ -2009,8 +2229,9 @@ hsw_ring_dispatch_execbuffer(struct intel_engine_cs *ring,
|
||||
return ret;
|
||||
|
||||
intel_ring_emit(ring,
|
||||
MI_BATCH_BUFFER_START | MI_BATCH_PPGTT_HSW |
|
||||
(flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE_HSW));
|
||||
MI_BATCH_BUFFER_START |
|
||||
(flags & I915_DISPATCH_SECURE ?
|
||||
0 : MI_BATCH_PPGTT_HSW | MI_BATCH_NON_SECURE_HSW));
|
||||
/* bit0-7 is the length on GEN6+ */
|
||||
intel_ring_emit(ring, offset);
|
||||
intel_ring_advance(ring);
|
||||
@ -2045,6 +2266,7 @@ static int gen6_ring_flush(struct intel_engine_cs *ring,
|
||||
u32 invalidate, u32 flush)
|
||||
{
|
||||
struct drm_device *dev = ring->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
uint32_t cmd;
|
||||
int ret;
|
||||
|
||||
@ -2075,8 +2297,12 @@ static int gen6_ring_flush(struct intel_engine_cs *ring,
|
||||
}
|
||||
intel_ring_advance(ring);
|
||||
|
||||
if (IS_GEN7(dev) && !invalidate && flush)
|
||||
if (!invalidate && flush) {
|
||||
if (IS_GEN7(dev))
|
||||
return gen7_ring_fbc_flush(ring, FBC_REND_CACHE_CLEAN);
|
||||
else if (IS_BROADWELL(dev))
|
||||
dev_priv->fbc.need_sw_cache_clean = true;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -2109,6 +2335,8 @@ int intel_init_render_ring_buffer(struct drm_device *dev)
|
||||
dev_priv->semaphore_obj = obj;
|
||||
}
|
||||
}
|
||||
|
||||
ring->init_context = intel_ring_workarounds_emit;
|
||||
ring->add_request = gen6_add_request;
|
||||
ring->flush = gen8_render_ring_flush;
|
||||
ring->irq_get = gen8_ring_get_irq;
|
||||
@ -2218,93 +2446,6 @@ int intel_init_render_ring_buffer(struct drm_device *dev)
|
||||
return intel_init_ring_buffer(dev, ring);
|
||||
}
|
||||
|
||||
#if 0
|
||||
int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_engine_cs *ring = &dev_priv->ring[RCS];
|
||||
struct intel_ringbuffer *ringbuf = ring->buffer;
|
||||
int ret;
|
||||
|
||||
if (ringbuf == NULL) {
|
||||
ringbuf = kzalloc(sizeof(*ringbuf), GFP_KERNEL);
|
||||
if (!ringbuf)
|
||||
return -ENOMEM;
|
||||
ring->buffer = ringbuf;
|
||||
}
|
||||
|
||||
ring->name = "render ring";
|
||||
ring->id = RCS;
|
||||
ring->mmio_base = RENDER_RING_BASE;
|
||||
|
||||
if (INTEL_INFO(dev)->gen >= 6) {
|
||||
/* non-kms not supported on gen6+ */
|
||||
ret = -ENODEV;
|
||||
goto err_ringbuf;
|
||||
}
|
||||
|
||||
/* Note: gem is not supported on gen5/ilk without kms (the corresponding
|
||||
* gem_init ioctl returns with -ENODEV). Hence we do not need to set up
|
||||
* the special gen5 functions. */
|
||||
ring->add_request = i9xx_add_request;
|
||||
if (INTEL_INFO(dev)->gen < 4)
|
||||
ring->flush = gen2_render_ring_flush;
|
||||
else
|
||||
ring->flush = gen4_render_ring_flush;
|
||||
ring->get_seqno = ring_get_seqno;
|
||||
ring->set_seqno = ring_set_seqno;
|
||||
if (IS_GEN2(dev)) {
|
||||
ring->irq_get = i8xx_ring_get_irq;
|
||||
ring->irq_put = i8xx_ring_put_irq;
|
||||
} else {
|
||||
ring->irq_get = i9xx_ring_get_irq;
|
||||
ring->irq_put = i9xx_ring_put_irq;
|
||||
}
|
||||
ring->irq_enable_mask = I915_USER_INTERRUPT;
|
||||
ring->write_tail = ring_write_tail;
|
||||
if (INTEL_INFO(dev)->gen >= 4)
|
||||
ring->dispatch_execbuffer = i965_dispatch_execbuffer;
|
||||
else if (IS_I830(dev) || IS_845G(dev))
|
||||
ring->dispatch_execbuffer = i830_dispatch_execbuffer;
|
||||
else
|
||||
ring->dispatch_execbuffer = i915_dispatch_execbuffer;
|
||||
ring->init = init_render_ring;
|
||||
ring->cleanup = render_ring_cleanup;
|
||||
|
||||
ring->dev = dev;
|
||||
INIT_LIST_HEAD(&ring->active_list);
|
||||
INIT_LIST_HEAD(&ring->request_list);
|
||||
|
||||
ringbuf->size = size;
|
||||
ringbuf->effective_size = ringbuf->size;
|
||||
if (IS_I830(ring->dev) || IS_845G(ring->dev))
|
||||
ringbuf->effective_size -= 2 * CACHELINE_BYTES;
|
||||
|
||||
ringbuf->virtual_start = ioremap_wc(start, size);
|
||||
if (ringbuf->virtual_start == NULL) {
|
||||
DRM_ERROR("can not ioremap virtual address for"
|
||||
" ring buffer\n");
|
||||
ret = -ENOMEM;
|
||||
goto err_ringbuf;
|
||||
}
|
||||
|
||||
if (!I915_NEED_GFX_HWS(dev)) {
|
||||
ret = init_phys_status_page(ring);
|
||||
if (ret)
|
||||
goto err_vstart;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
err_vstart:
|
||||
iounmap(ringbuf->virtual_start);
|
||||
err_ringbuf:
|
||||
kfree(ringbuf);
|
||||
ring->buffer = NULL;
|
||||
return ret;
|
||||
}
|
||||
#endif
|
||||
|
||||
int intel_init_bsd_ring_buffer(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
|
@ -5,6 +5,13 @@
|
||||
|
||||
#define I915_CMD_HASH_ORDER 9
|
||||
|
||||
/* Early gen2 devices have a cacheline of just 32 bytes, using 64 is overkill,
|
||||
* but keeps the logic simple. Indeed, the whole purpose of this macro is just
|
||||
* to give some inclination as to some of the magic values used in the various
|
||||
* workarounds!
|
||||
*/
|
||||
#define CACHELINE_BYTES 64
|
||||
|
||||
/*
|
||||
* Gen2 BSpec "1. Programming Environment" / 1.4.4.6 "Ring Buffer Use"
|
||||
* Gen3 BSpec "vol1c Memory Interface Functions" / 2.3.4.5 "Ring Buffer Use"
|
||||
@ -90,6 +97,15 @@ struct intel_ringbuffer {
|
||||
struct drm_i915_gem_object *obj;
|
||||
void __iomem *virtual_start;
|
||||
|
||||
struct intel_engine_cs *ring;
|
||||
|
||||
/*
|
||||
* FIXME: This backpointer is an artifact of the history of how the
|
||||
* execlist patches came into being. It will get removed once the basic
|
||||
* code has landed.
|
||||
*/
|
||||
struct intel_context *FIXME_lrc_ctx;
|
||||
|
||||
u32 head;
|
||||
u32 tail;
|
||||
int space;
|
||||
@ -132,6 +148,9 @@ struct intel_engine_cs {
|
||||
|
||||
int (*init)(struct intel_engine_cs *ring);
|
||||
|
||||
int (*init_context)(struct intel_engine_cs *ring,
|
||||
struct intel_context *ctx);
|
||||
|
||||
void (*write_tail)(struct intel_engine_cs *ring,
|
||||
u32 value);
|
||||
int __must_check (*flush)(struct intel_engine_cs *ring,
|
||||
@ -214,6 +233,19 @@ struct intel_engine_cs {
|
||||
unsigned int num_dwords);
|
||||
} semaphore;
|
||||
|
||||
/* Execlists */
|
||||
spinlock_t execlist_lock;
|
||||
struct list_head execlist_queue;
|
||||
struct list_head execlist_retired_req_list;
|
||||
u8 next_context_status_buffer;
|
||||
u32 irq_keep_mask; /* bitmask for interrupts that should not be masked */
|
||||
int (*emit_request)(struct intel_ringbuffer *ringbuf);
|
||||
int (*emit_flush)(struct intel_ringbuffer *ringbuf,
|
||||
u32 invalidate_domains,
|
||||
u32 flush_domains);
|
||||
int (*emit_bb_start)(struct intel_ringbuffer *ringbuf,
|
||||
u64 offset, unsigned flags);
|
||||
|
||||
/**
|
||||
* List of objects currently involved in rendering from the
|
||||
* ringbuffer.
|
||||
@ -287,11 +319,7 @@ struct intel_engine_cs {
|
||||
u32 (*get_cmd_length_mask)(u32 cmd_header);
|
||||
};
|
||||
|
||||
static inline bool
|
||||
intel_ring_initialized(struct intel_engine_cs *ring)
|
||||
{
|
||||
return ring->buffer && ring->buffer->obj;
|
||||
}
|
||||
bool intel_ring_initialized(struct intel_engine_cs *ring);
|
||||
|
||||
static inline unsigned
|
||||
intel_ring_flag(struct intel_engine_cs *ring)
|
||||
@ -355,6 +383,13 @@ intel_write_status_page(struct intel_engine_cs *ring,
|
||||
#define I915_GEM_HWS_SCRATCH_INDEX 0x30
|
||||
#define I915_GEM_HWS_SCRATCH_ADDR (I915_GEM_HWS_SCRATCH_INDEX << MI_STORE_DWORD_INDEX_SHIFT)
|
||||
|
||||
void intel_unpin_ringbuffer_obj(struct intel_ringbuffer *ringbuf);
|
||||
int intel_pin_and_map_ringbuffer_obj(struct drm_device *dev,
|
||||
struct intel_ringbuffer *ringbuf);
|
||||
void intel_destroy_ringbuffer_obj(struct intel_ringbuffer *ringbuf);
|
||||
int intel_alloc_ringbuffer_obj(struct drm_device *dev,
|
||||
struct intel_ringbuffer *ringbuf);
|
||||
|
||||
void intel_stop_ring_buffer(struct intel_engine_cs *ring);
|
||||
void intel_cleanup_ring_buffer(struct intel_engine_cs *ring);
|
||||
|
||||
@ -372,6 +407,9 @@ static inline void intel_ring_advance(struct intel_engine_cs *ring)
|
||||
struct intel_ringbuffer *ringbuf = ring->buffer;
|
||||
ringbuf->tail &= ringbuf->size - 1;
|
||||
}
|
||||
int __intel_ring_space(int head, int tail, int size);
|
||||
int intel_ring_space(struct intel_ringbuffer *ringbuf);
|
||||
bool intel_ring_stopped(struct intel_engine_cs *ring);
|
||||
void __intel_ring_advance(struct intel_engine_cs *ring);
|
||||
|
||||
int __must_check intel_ring_idle(struct intel_engine_cs *ring);
|
||||
@ -379,6 +417,9 @@ void intel_ring_init_seqno(struct intel_engine_cs *ring, u32 seqno);
|
||||
int intel_ring_flush_all_caches(struct intel_engine_cs *ring);
|
||||
int intel_ring_invalidate_all_caches(struct intel_engine_cs *ring);
|
||||
|
||||
void intel_fini_pipe_control(struct intel_engine_cs *ring);
|
||||
int intel_init_pipe_control(struct intel_engine_cs *ring);
|
||||
|
||||
int intel_init_render_ring_buffer(struct drm_device *dev);
|
||||
int intel_init_bsd_ring_buffer(struct drm_device *dev);
|
||||
int intel_init_bsd2_ring_buffer(struct drm_device *dev);
|
||||
@ -388,6 +429,8 @@ int intel_init_vebox_ring_buffer(struct drm_device *dev);
|
||||
u64 intel_ring_get_active_head(struct intel_engine_cs *ring);
|
||||
void intel_ring_setup_status_page(struct intel_engine_cs *ring);
|
||||
|
||||
int init_workarounds_ring(struct intel_engine_cs *ring);
|
||||
|
||||
static inline u32 intel_ring_get_tail(struct intel_ringbuffer *ringbuf)
|
||||
{
|
||||
return ringbuf->tail;
|
||||
@ -405,7 +448,4 @@ static inline void i915_trace_irq_get(struct intel_engine_cs *ring, u32 seqno)
|
||||
ring->trace_irq_seqno = seqno;
|
||||
}
|
||||
|
||||
/* DRI warts */
|
||||
int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size);
|
||||
|
||||
#endif /* _INTEL_RINGBUFFER_H_ */
|
||||
|
1406
drivers/video/drm/i915/intel_runtime_pm.c
Normal file
1406
drivers/video/drm/i915/intel_runtime_pm.c
Normal file
File diff suppressed because it is too large
Load Diff
@ -1991,57 +1991,10 @@ static int intel_sdvo_get_modes(struct drm_connector *connector)
|
||||
return !list_empty(&connector->probed_modes);
|
||||
}
|
||||
|
||||
static void
|
||||
intel_sdvo_destroy_enhance_property(struct drm_connector *connector)
|
||||
{
|
||||
struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector);
|
||||
struct drm_device *dev = connector->dev;
|
||||
|
||||
if (intel_sdvo_connector->left)
|
||||
drm_property_destroy(dev, intel_sdvo_connector->left);
|
||||
if (intel_sdvo_connector->right)
|
||||
drm_property_destroy(dev, intel_sdvo_connector->right);
|
||||
if (intel_sdvo_connector->top)
|
||||
drm_property_destroy(dev, intel_sdvo_connector->top);
|
||||
if (intel_sdvo_connector->bottom)
|
||||
drm_property_destroy(dev, intel_sdvo_connector->bottom);
|
||||
if (intel_sdvo_connector->hpos)
|
||||
drm_property_destroy(dev, intel_sdvo_connector->hpos);
|
||||
if (intel_sdvo_connector->vpos)
|
||||
drm_property_destroy(dev, intel_sdvo_connector->vpos);
|
||||
if (intel_sdvo_connector->saturation)
|
||||
drm_property_destroy(dev, intel_sdvo_connector->saturation);
|
||||
if (intel_sdvo_connector->contrast)
|
||||
drm_property_destroy(dev, intel_sdvo_connector->contrast);
|
||||
if (intel_sdvo_connector->hue)
|
||||
drm_property_destroy(dev, intel_sdvo_connector->hue);
|
||||
if (intel_sdvo_connector->sharpness)
|
||||
drm_property_destroy(dev, intel_sdvo_connector->sharpness);
|
||||
if (intel_sdvo_connector->flicker_filter)
|
||||
drm_property_destroy(dev, intel_sdvo_connector->flicker_filter);
|
||||
if (intel_sdvo_connector->flicker_filter_2d)
|
||||
drm_property_destroy(dev, intel_sdvo_connector->flicker_filter_2d);
|
||||
if (intel_sdvo_connector->flicker_filter_adaptive)
|
||||
drm_property_destroy(dev, intel_sdvo_connector->flicker_filter_adaptive);
|
||||
if (intel_sdvo_connector->tv_luma_filter)
|
||||
drm_property_destroy(dev, intel_sdvo_connector->tv_luma_filter);
|
||||
if (intel_sdvo_connector->tv_chroma_filter)
|
||||
drm_property_destroy(dev, intel_sdvo_connector->tv_chroma_filter);
|
||||
if (intel_sdvo_connector->dot_crawl)
|
||||
drm_property_destroy(dev, intel_sdvo_connector->dot_crawl);
|
||||
if (intel_sdvo_connector->brightness)
|
||||
drm_property_destroy(dev, intel_sdvo_connector->brightness);
|
||||
}
|
||||
|
||||
static void intel_sdvo_destroy(struct drm_connector *connector)
|
||||
{
|
||||
struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector);
|
||||
|
||||
if (intel_sdvo_connector->tv_format)
|
||||
drm_property_destroy(connector->dev,
|
||||
intel_sdvo_connector->tv_format);
|
||||
|
||||
intel_sdvo_destroy_enhance_property(connector);
|
||||
drm_connector_cleanup(connector);
|
||||
kfree(intel_sdvo_connector);
|
||||
}
|
||||
|
@ -37,6 +37,20 @@
|
||||
#include <drm/i915_drm.h>
|
||||
#include "i915_drv.h"
|
||||
|
||||
static bool
|
||||
format_is_yuv(uint32_t format)
|
||||
{
|
||||
switch (format) {
|
||||
case DRM_FORMAT_YUYV:
|
||||
case DRM_FORMAT_UYVY:
|
||||
case DRM_FORMAT_VYUY:
|
||||
case DRM_FORMAT_YVYU:
|
||||
return true;
|
||||
default:
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
static int usecs_to_scanlines(const struct drm_display_mode *mode, int usecs)
|
||||
{
|
||||
/* paranoia */
|
||||
@ -46,17 +60,32 @@ static int usecs_to_scanlines(const struct drm_display_mode *mode, int usecs)
|
||||
return DIV_ROUND_UP(usecs * mode->crtc_clock, 1000 * mode->crtc_htotal);
|
||||
}
|
||||
|
||||
static bool intel_pipe_update_start(struct intel_crtc *crtc, uint32_t *start_vbl_count)
|
||||
/**
|
||||
* intel_pipe_update_start() - start update of a set of display registers
|
||||
* @crtc: the crtc of which the registers are going to be updated
|
||||
* @start_vbl_count: vblank counter return pointer used for error checking
|
||||
*
|
||||
* Mark the start of an update to pipe registers that should be updated
|
||||
* atomically regarding vblank. If the next vblank will happens within
|
||||
* the next 100 us, this function waits until the vblank passes.
|
||||
*
|
||||
* After a successful call to this function, interrupts will be disabled
|
||||
* until a subsequent call to intel_pipe_update_end(). That is done to
|
||||
* avoid random delays. The value written to @start_vbl_count should be
|
||||
* supplied to intel_pipe_update_end() for error checking.
|
||||
*
|
||||
* Return: true if the call was successful
|
||||
*/
|
||||
bool intel_pipe_update_start(struct intel_crtc *crtc, uint32_t *start_vbl_count)
|
||||
{
|
||||
struct drm_device *dev = crtc->base.dev;
|
||||
const struct drm_display_mode *mode = &crtc->config.adjusted_mode;
|
||||
enum pipe pipe = crtc->pipe;
|
||||
long timeout = msecs_to_jiffies_timeout(1);
|
||||
int scanline, min, max, vblank_start;
|
||||
wait_queue_head_t *wq = drm_crtc_vblank_waitqueue(&crtc->base);
|
||||
DEFINE_WAIT(wait);
|
||||
|
||||
WARN_ON(!drm_modeset_is_locked(&crtc->base.mutex));
|
||||
|
||||
vblank_start = mode->crtc_vblank_start;
|
||||
if (mode->flags & DRM_MODE_FLAG_INTERLACE)
|
||||
vblank_start = DIV_ROUND_UP(vblank_start, 2);
|
||||
@ -81,7 +110,7 @@ static bool intel_pipe_update_start(struct intel_crtc *crtc, uint32_t *start_vbl
|
||||
* other CPUs can see the task state update by the time we
|
||||
* read the scanline.
|
||||
*/
|
||||
prepare_to_wait(&crtc->vbl_wait, &wait, TASK_UNINTERRUPTIBLE);
|
||||
prepare_to_wait(wq, &wait, TASK_UNINTERRUPTIBLE);
|
||||
|
||||
scanline = intel_get_crtc_scanline(crtc);
|
||||
if (scanline < min || scanline > max)
|
||||
@ -100,7 +129,7 @@ static bool intel_pipe_update_start(struct intel_crtc *crtc, uint32_t *start_vbl
|
||||
// local_irq_disable();
|
||||
}
|
||||
|
||||
finish_wait(&crtc->vbl_wait, &wait);
|
||||
finish_wait(wq, &wait);
|
||||
|
||||
// drm_vblank_put(dev, pipe);
|
||||
|
||||
@ -111,7 +140,16 @@ static bool intel_pipe_update_start(struct intel_crtc *crtc, uint32_t *start_vbl
|
||||
return true;
|
||||
}
|
||||
|
||||
static void intel_pipe_update_end(struct intel_crtc *crtc, u32 start_vbl_count)
|
||||
/**
|
||||
* intel_pipe_update_end() - end update of a set of display registers
|
||||
* @crtc: the crtc of which the registers were updated
|
||||
* @start_vbl_count: start vblank counter (used for error checking)
|
||||
*
|
||||
* Mark the end of an update started with intel_pipe_update_start(). This
|
||||
* re-enables interrupts and verifies the update was actually completed
|
||||
* before a vblank using the value of @start_vbl_count.
|
||||
*/
|
||||
void intel_pipe_update_end(struct intel_crtc *crtc, u32 start_vbl_count)
|
||||
{
|
||||
struct drm_device *dev = crtc->base.dev;
|
||||
enum pipe pipe = crtc->pipe;
|
||||
@ -137,6 +175,226 @@ static void intel_update_primary_plane(struct intel_crtc *crtc)
|
||||
I915_WRITE(reg, I915_READ(reg) & ~DISPLAY_PLANE_ENABLE);
|
||||
}
|
||||
|
||||
static void
|
||||
skl_update_plane(struct drm_plane *drm_plane, struct drm_crtc *crtc,
|
||||
struct drm_framebuffer *fb,
|
||||
struct drm_i915_gem_object *obj, int crtc_x, int crtc_y,
|
||||
unsigned int crtc_w, unsigned int crtc_h,
|
||||
uint32_t x, uint32_t y,
|
||||
uint32_t src_w, uint32_t src_h)
|
||||
{
|
||||
struct drm_device *dev = drm_plane->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_plane *intel_plane = to_intel_plane(drm_plane);
|
||||
const int pipe = intel_plane->pipe;
|
||||
const int plane = intel_plane->plane + 1;
|
||||
u32 plane_ctl, stride;
|
||||
int pixel_size = drm_format_plane_cpp(fb->pixel_format, 0);
|
||||
|
||||
plane_ctl = I915_READ(PLANE_CTL(pipe, plane));
|
||||
|
||||
/* Mask out pixel format bits in case we change it */
|
||||
plane_ctl &= ~PLANE_CTL_FORMAT_MASK;
|
||||
plane_ctl &= ~PLANE_CTL_ORDER_RGBX;
|
||||
plane_ctl &= ~PLANE_CTL_YUV422_ORDER_MASK;
|
||||
plane_ctl &= ~PLANE_CTL_TILED_MASK;
|
||||
plane_ctl &= ~PLANE_CTL_ALPHA_MASK;
|
||||
plane_ctl &= ~PLANE_CTL_ROTATE_MASK;
|
||||
|
||||
/* Trickle feed has to be enabled */
|
||||
plane_ctl &= ~PLANE_CTL_TRICKLE_FEED_DISABLE;
|
||||
|
||||
switch (fb->pixel_format) {
|
||||
case DRM_FORMAT_RGB565:
|
||||
plane_ctl |= PLANE_CTL_FORMAT_RGB_565;
|
||||
break;
|
||||
case DRM_FORMAT_XBGR8888:
|
||||
plane_ctl |= PLANE_CTL_FORMAT_XRGB_8888 | PLANE_CTL_ORDER_RGBX;
|
||||
break;
|
||||
case DRM_FORMAT_XRGB8888:
|
||||
plane_ctl |= PLANE_CTL_FORMAT_XRGB_8888;
|
||||
break;
|
||||
/*
|
||||
* XXX: For ARBG/ABGR formats we default to expecting scanout buffers
|
||||
* to be already pre-multiplied. We need to add a knob (or a different
|
||||
* DRM_FORMAT) for user-space to configure that.
|
||||
*/
|
||||
case DRM_FORMAT_ABGR8888:
|
||||
plane_ctl |= PLANE_CTL_FORMAT_XRGB_8888 |
|
||||
PLANE_CTL_ORDER_RGBX |
|
||||
PLANE_CTL_ALPHA_SW_PREMULTIPLY;
|
||||
break;
|
||||
case DRM_FORMAT_ARGB8888:
|
||||
plane_ctl |= PLANE_CTL_FORMAT_XRGB_8888 |
|
||||
PLANE_CTL_ALPHA_SW_PREMULTIPLY;
|
||||
break;
|
||||
case DRM_FORMAT_YUYV:
|
||||
plane_ctl |= PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_YUYV;
|
||||
break;
|
||||
case DRM_FORMAT_YVYU:
|
||||
plane_ctl |= PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_YVYU;
|
||||
break;
|
||||
case DRM_FORMAT_UYVY:
|
||||
plane_ctl |= PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_UYVY;
|
||||
break;
|
||||
case DRM_FORMAT_VYUY:
|
||||
plane_ctl |= PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_VYUY;
|
||||
break;
|
||||
default:
|
||||
BUG();
|
||||
}
|
||||
|
||||
switch (obj->tiling_mode) {
|
||||
case I915_TILING_NONE:
|
||||
stride = fb->pitches[0] >> 6;
|
||||
break;
|
||||
case I915_TILING_X:
|
||||
plane_ctl |= PLANE_CTL_TILED_X;
|
||||
stride = fb->pitches[0] >> 9;
|
||||
break;
|
||||
default:
|
||||
BUG();
|
||||
}
|
||||
if (intel_plane->rotation == BIT(DRM_ROTATE_180))
|
||||
plane_ctl |= PLANE_CTL_ROTATE_180;
|
||||
|
||||
plane_ctl |= PLANE_CTL_ENABLE;
|
||||
plane_ctl |= PLANE_CTL_PIPE_CSC_ENABLE;
|
||||
|
||||
intel_update_sprite_watermarks(drm_plane, crtc, src_w, src_h,
|
||||
pixel_size, true,
|
||||
src_w != crtc_w || src_h != crtc_h);
|
||||
|
||||
/* Sizes are 0 based */
|
||||
src_w--;
|
||||
src_h--;
|
||||
crtc_w--;
|
||||
crtc_h--;
|
||||
|
||||
I915_WRITE(PLANE_OFFSET(pipe, plane), (y << 16) | x);
|
||||
I915_WRITE(PLANE_STRIDE(pipe, plane), stride);
|
||||
I915_WRITE(PLANE_POS(pipe, plane), (crtc_y << 16) | crtc_x);
|
||||
I915_WRITE(PLANE_SIZE(pipe, plane), (crtc_h << 16) | crtc_w);
|
||||
I915_WRITE(PLANE_CTL(pipe, plane), plane_ctl);
|
||||
I915_WRITE(PLANE_SURF(pipe, plane), i915_gem_obj_ggtt_offset(obj));
|
||||
POSTING_READ(PLANE_SURF(pipe, plane));
|
||||
}
|
||||
|
||||
static void
|
||||
skl_disable_plane(struct drm_plane *drm_plane, struct drm_crtc *crtc)
|
||||
{
|
||||
struct drm_device *dev = drm_plane->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_plane *intel_plane = to_intel_plane(drm_plane);
|
||||
const int pipe = intel_plane->pipe;
|
||||
const int plane = intel_plane->plane + 1;
|
||||
|
||||
I915_WRITE(PLANE_CTL(pipe, plane),
|
||||
I915_READ(PLANE_CTL(pipe, plane)) & ~PLANE_CTL_ENABLE);
|
||||
|
||||
/* Activate double buffered register update */
|
||||
I915_WRITE(PLANE_CTL(pipe, plane), 0);
|
||||
POSTING_READ(PLANE_CTL(pipe, plane));
|
||||
|
||||
intel_update_sprite_watermarks(drm_plane, crtc, 0, 0, 0, false, false);
|
||||
}
|
||||
|
||||
static int
|
||||
skl_update_colorkey(struct drm_plane *drm_plane,
|
||||
struct drm_intel_sprite_colorkey *key)
|
||||
{
|
||||
struct drm_device *dev = drm_plane->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_plane *intel_plane = to_intel_plane(drm_plane);
|
||||
const int pipe = intel_plane->pipe;
|
||||
const int plane = intel_plane->plane;
|
||||
u32 plane_ctl;
|
||||
|
||||
I915_WRITE(PLANE_KEYVAL(pipe, plane), key->min_value);
|
||||
I915_WRITE(PLANE_KEYMAX(pipe, plane), key->max_value);
|
||||
I915_WRITE(PLANE_KEYMSK(pipe, plane), key->channel_mask);
|
||||
|
||||
plane_ctl = I915_READ(PLANE_CTL(pipe, plane));
|
||||
plane_ctl &= ~PLANE_CTL_KEY_ENABLE_MASK;
|
||||
if (key->flags & I915_SET_COLORKEY_DESTINATION)
|
||||
plane_ctl |= PLANE_CTL_KEY_ENABLE_DESTINATION;
|
||||
else if (key->flags & I915_SET_COLORKEY_SOURCE)
|
||||
plane_ctl |= PLANE_CTL_KEY_ENABLE_SOURCE;
|
||||
I915_WRITE(PLANE_CTL(pipe, plane), plane_ctl);
|
||||
|
||||
POSTING_READ(PLANE_CTL(pipe, plane));
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void
|
||||
skl_get_colorkey(struct drm_plane *drm_plane,
|
||||
struct drm_intel_sprite_colorkey *key)
|
||||
{
|
||||
struct drm_device *dev = drm_plane->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_plane *intel_plane = to_intel_plane(drm_plane);
|
||||
const int pipe = intel_plane->pipe;
|
||||
const int plane = intel_plane->plane;
|
||||
u32 plane_ctl;
|
||||
|
||||
key->min_value = I915_READ(PLANE_KEYVAL(pipe, plane));
|
||||
key->max_value = I915_READ(PLANE_KEYMAX(pipe, plane));
|
||||
key->channel_mask = I915_READ(PLANE_KEYMSK(pipe, plane));
|
||||
|
||||
plane_ctl = I915_READ(PLANE_CTL(pipe, plane));
|
||||
|
||||
switch (plane_ctl & PLANE_CTL_KEY_ENABLE_MASK) {
|
||||
case PLANE_CTL_KEY_ENABLE_DESTINATION:
|
||||
key->flags = I915_SET_COLORKEY_DESTINATION;
|
||||
break;
|
||||
case PLANE_CTL_KEY_ENABLE_SOURCE:
|
||||
key->flags = I915_SET_COLORKEY_SOURCE;
|
||||
break;
|
||||
default:
|
||||
key->flags = I915_SET_COLORKEY_NONE;
|
||||
}
|
||||
}
|
||||
|
||||
static void
|
||||
chv_update_csc(struct intel_plane *intel_plane, uint32_t format)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = intel_plane->base.dev->dev_private;
|
||||
int plane = intel_plane->plane;
|
||||
|
||||
/* Seems RGB data bypasses the CSC always */
|
||||
if (!format_is_yuv(format))
|
||||
return;
|
||||
|
||||
/*
|
||||
* BT.601 limited range YCbCr -> full range RGB
|
||||
*
|
||||
* |r| | 6537 4769 0| |cr |
|
||||
* |g| = |-3330 4769 -1605| x |y-64|
|
||||
* |b| | 0 4769 8263| |cb |
|
||||
*
|
||||
* Cb and Cr apparently come in as signed already, so no
|
||||
* need for any offset. For Y we need to remove the offset.
|
||||
*/
|
||||
I915_WRITE(SPCSCYGOFF(plane), SPCSC_OOFF(0) | SPCSC_IOFF(-64));
|
||||
I915_WRITE(SPCSCCBOFF(plane), SPCSC_OOFF(0) | SPCSC_IOFF(0));
|
||||
I915_WRITE(SPCSCCROFF(plane), SPCSC_OOFF(0) | SPCSC_IOFF(0));
|
||||
|
||||
I915_WRITE(SPCSCC01(plane), SPCSC_C1(4769) | SPCSC_C0(6537));
|
||||
I915_WRITE(SPCSCC23(plane), SPCSC_C1(-3330) | SPCSC_C0(0));
|
||||
I915_WRITE(SPCSCC45(plane), SPCSC_C1(-1605) | SPCSC_C0(4769));
|
||||
I915_WRITE(SPCSCC67(plane), SPCSC_C1(4769) | SPCSC_C0(0));
|
||||
I915_WRITE(SPCSCC8(plane), SPCSC_C0(8263));
|
||||
|
||||
I915_WRITE(SPCSCYGICLAMP(plane), SPCSC_IMAX(940) | SPCSC_IMIN(64));
|
||||
I915_WRITE(SPCSCCBICLAMP(plane), SPCSC_IMAX(448) | SPCSC_IMIN(-448));
|
||||
I915_WRITE(SPCSCCRICLAMP(plane), SPCSC_IMAX(448) | SPCSC_IMIN(-448));
|
||||
|
||||
I915_WRITE(SPCSCYGOCLAMP(plane), SPCSC_OMAX(1023) | SPCSC_OMIN(0));
|
||||
I915_WRITE(SPCSCCBOCLAMP(plane), SPCSC_OMAX(1023) | SPCSC_OMIN(0));
|
||||
I915_WRITE(SPCSCCROCLAMP(plane), SPCSC_OMAX(1023) | SPCSC_OMIN(0));
|
||||
}
|
||||
|
||||
static void
|
||||
vlv_update_plane(struct drm_plane *dplane, struct drm_crtc *crtc,
|
||||
struct drm_framebuffer *fb,
|
||||
@ -163,6 +421,7 @@ vlv_update_plane(struct drm_plane *dplane, struct drm_crtc *crtc,
|
||||
sprctl &= ~SP_PIXFORMAT_MASK;
|
||||
sprctl &= ~SP_YUV_BYTE_ORDER_MASK;
|
||||
sprctl &= ~SP_TILED;
|
||||
sprctl &= ~SP_ROTATE_180;
|
||||
|
||||
switch (fb->pixel_format) {
|
||||
case DRM_FORMAT_YUYV:
|
||||
@ -235,10 +494,21 @@ vlv_update_plane(struct drm_plane *dplane, struct drm_crtc *crtc,
|
||||
fb->pitches[0]);
|
||||
linear_offset -= sprsurf_offset;
|
||||
|
||||
if (intel_plane->rotation == BIT(DRM_ROTATE_180)) {
|
||||
sprctl |= SP_ROTATE_180;
|
||||
|
||||
x += src_w;
|
||||
y += src_h;
|
||||
linear_offset += src_h * fb->pitches[0] + src_w * pixel_size;
|
||||
}
|
||||
|
||||
atomic_update = intel_pipe_update_start(intel_crtc, &start_vbl_count);
|
||||
|
||||
intel_update_primary_plane(intel_crtc);
|
||||
|
||||
if (IS_CHERRYVIEW(dev) && pipe == PIPE_B)
|
||||
chv_update_csc(intel_plane, fb->pixel_format);
|
||||
|
||||
I915_WRITE(SPSTRIDE(pipe, plane), fb->pitches[0]);
|
||||
I915_WRITE(SPPOS(pipe, plane), (crtc_y << 16) | crtc_x);
|
||||
|
||||
@ -247,6 +517,8 @@ vlv_update_plane(struct drm_plane *dplane, struct drm_crtc *crtc,
|
||||
else
|
||||
I915_WRITE(SPLINOFF(pipe, plane), linear_offset);
|
||||
|
||||
I915_WRITE(SPCONSTALPHA(pipe, plane), 0);
|
||||
|
||||
I915_WRITE(SPSIZE(pipe, plane), (crtc_h << 16) | crtc_w);
|
||||
I915_WRITE(SPCNTR(pipe, plane), sprctl);
|
||||
I915_WRITE(SPSURF(pipe, plane), i915_gem_obj_ggtt_offset(obj) +
|
||||
@ -364,6 +636,7 @@ ivb_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
|
||||
sprctl &= ~SPRITE_RGB_ORDER_RGBX;
|
||||
sprctl &= ~SPRITE_YUV_BYTE_ORDER_MASK;
|
||||
sprctl &= ~SPRITE_TILED;
|
||||
sprctl &= ~SPRITE_ROTATE_180;
|
||||
|
||||
switch (fb->pixel_format) {
|
||||
case DRM_FORMAT_XBGR8888:
|
||||
@ -426,6 +699,18 @@ ivb_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
|
||||
pixel_size, fb->pitches[0]);
|
||||
linear_offset -= sprsurf_offset;
|
||||
|
||||
if (intel_plane->rotation == BIT(DRM_ROTATE_180)) {
|
||||
sprctl |= SPRITE_ROTATE_180;
|
||||
|
||||
/* HSW and BDW does this automagically in hardware */
|
||||
if (!IS_HASWELL(dev) && !IS_BROADWELL(dev)) {
|
||||
x += src_w;
|
||||
y += src_h;
|
||||
linear_offset += src_h * fb->pitches[0] +
|
||||
src_w * pixel_size;
|
||||
}
|
||||
}
|
||||
|
||||
atomic_update = intel_pipe_update_start(intel_crtc, &start_vbl_count);
|
||||
|
||||
intel_update_primary_plane(intel_crtc);
|
||||
@ -571,6 +856,7 @@ ilk_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
|
||||
dvscntr &= ~DVS_RGB_ORDER_XBGR;
|
||||
dvscntr &= ~DVS_YUV_BYTE_ORDER_MASK;
|
||||
dvscntr &= ~DVS_TILED;
|
||||
dvscntr &= ~DVS_ROTATE_180;
|
||||
|
||||
switch (fb->pixel_format) {
|
||||
case DRM_FORMAT_XBGR8888:
|
||||
@ -628,6 +914,14 @@ ilk_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
|
||||
pixel_size, fb->pitches[0]);
|
||||
linear_offset -= dvssurf_offset;
|
||||
|
||||
if (intel_plane->rotation == BIT(DRM_ROTATE_180)) {
|
||||
dvscntr |= DVS_ROTATE_180;
|
||||
|
||||
x += src_w;
|
||||
y += src_h;
|
||||
linear_offset += src_h * fb->pitches[0] + src_w * pixel_size;
|
||||
}
|
||||
|
||||
atomic_update = intel_pipe_update_start(intel_crtc, &start_vbl_count);
|
||||
|
||||
intel_update_primary_plane(intel_crtc);
|
||||
@ -693,6 +987,14 @@ intel_post_enable_primary(struct drm_crtc *crtc)
|
||||
struct drm_device *dev = crtc->dev;
|
||||
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
|
||||
|
||||
/*
|
||||
* BDW signals flip done immediately if the plane
|
||||
* is disabled, even if the plane enable is already
|
||||
* armed to occur at the next vblank :(
|
||||
*/
|
||||
if (IS_BROADWELL(dev))
|
||||
intel_wait_for_vblank(dev, intel_crtc->pipe);
|
||||
|
||||
/*
|
||||
* FIXME IPS should be fine as long as one plane is
|
||||
* enabled, but in practice it seems to have problems
|
||||
@ -781,20 +1083,6 @@ ilk_get_colorkey(struct drm_plane *plane, struct drm_intel_sprite_colorkey *key)
|
||||
key->flags = I915_SET_COLORKEY_NONE;
|
||||
}
|
||||
|
||||
static bool
|
||||
format_is_yuv(uint32_t format)
|
||||
{
|
||||
switch (format) {
|
||||
case DRM_FORMAT_YUYV:
|
||||
case DRM_FORMAT_UYVY:
|
||||
case DRM_FORMAT_VYUY:
|
||||
case DRM_FORMAT_YVYU:
|
||||
return true;
|
||||
default:
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
static bool colorkey_enabled(struct intel_plane *intel_plane)
|
||||
{
|
||||
struct drm_intel_sprite_colorkey key;
|
||||
@ -805,57 +1093,23 @@ static bool colorkey_enabled(struct intel_plane *intel_plane)
|
||||
}
|
||||
|
||||
static int
|
||||
intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
|
||||
struct drm_framebuffer *fb, int crtc_x, int crtc_y,
|
||||
unsigned int crtc_w, unsigned int crtc_h,
|
||||
uint32_t src_x, uint32_t src_y,
|
||||
uint32_t src_w, uint32_t src_h)
|
||||
intel_check_sprite_plane(struct drm_plane *plane,
|
||||
struct intel_plane_state *state)
|
||||
{
|
||||
struct drm_device *dev = plane->dev;
|
||||
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
|
||||
struct intel_crtc *intel_crtc = to_intel_crtc(state->crtc);
|
||||
struct intel_plane *intel_plane = to_intel_plane(plane);
|
||||
enum pipe pipe = intel_crtc->pipe;
|
||||
struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
|
||||
struct drm_i915_gem_object *obj = intel_fb->obj;
|
||||
struct drm_i915_gem_object *old_obj = intel_plane->obj;
|
||||
int ret;
|
||||
bool primary_enabled;
|
||||
bool visible;
|
||||
struct drm_framebuffer *fb = state->fb;
|
||||
struct drm_i915_gem_object *obj = intel_fb_obj(fb);
|
||||
int crtc_x, crtc_y;
|
||||
unsigned int crtc_w, crtc_h;
|
||||
uint32_t src_x, src_y, src_w, src_h;
|
||||
struct drm_rect *src = &state->src;
|
||||
struct drm_rect *dst = &state->dst;
|
||||
struct drm_rect *orig_src = &state->orig_src;
|
||||
const struct drm_rect *clip = &state->clip;
|
||||
int hscale, vscale;
|
||||
int max_scale, min_scale;
|
||||
int pixel_size = drm_format_plane_cpp(fb->pixel_format, 0);
|
||||
struct drm_rect src = {
|
||||
/* sample coordinates in 16.16 fixed point */
|
||||
.x1 = src_x,
|
||||
.x2 = src_x + src_w,
|
||||
.y1 = src_y,
|
||||
.y2 = src_y + src_h,
|
||||
};
|
||||
struct drm_rect dst = {
|
||||
/* integer pixels */
|
||||
.x1 = crtc_x,
|
||||
.x2 = crtc_x + crtc_w,
|
||||
.y1 = crtc_y,
|
||||
.y2 = crtc_y + crtc_h,
|
||||
};
|
||||
const struct drm_rect clip = {
|
||||
.x2 = intel_crtc->active ? intel_crtc->config.pipe_src_w : 0,
|
||||
.y2 = intel_crtc->active ? intel_crtc->config.pipe_src_h : 0,
|
||||
};
|
||||
const struct {
|
||||
int crtc_x, crtc_y;
|
||||
unsigned int crtc_w, crtc_h;
|
||||
uint32_t src_x, src_y, src_w, src_h;
|
||||
} orig = {
|
||||
.crtc_x = crtc_x,
|
||||
.crtc_y = crtc_y,
|
||||
.crtc_w = crtc_w,
|
||||
.crtc_h = crtc_h,
|
||||
.src_x = src_x,
|
||||
.src_y = src_y,
|
||||
.src_w = src_w,
|
||||
.src_h = src_h,
|
||||
};
|
||||
|
||||
/* Don't modify another pipe's plane */
|
||||
if (intel_plane->pipe != intel_crtc->pipe) {
|
||||
@ -887,49 +1141,55 @@ intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
|
||||
max_scale = intel_plane->max_downscale << 16;
|
||||
min_scale = intel_plane->can_scale ? 1 : (1 << 16);
|
||||
|
||||
hscale = drm_rect_calc_hscale_relaxed(&src, &dst, min_scale, max_scale);
|
||||
drm_rect_rotate(src, fb->width << 16, fb->height << 16,
|
||||
intel_plane->rotation);
|
||||
|
||||
hscale = drm_rect_calc_hscale_relaxed(src, dst, min_scale, max_scale);
|
||||
BUG_ON(hscale < 0);
|
||||
|
||||
vscale = drm_rect_calc_vscale_relaxed(&src, &dst, min_scale, max_scale);
|
||||
vscale = drm_rect_calc_vscale_relaxed(src, dst, min_scale, max_scale);
|
||||
BUG_ON(vscale < 0);
|
||||
|
||||
visible = drm_rect_clip_scaled(&src, &dst, &clip, hscale, vscale);
|
||||
state->visible = drm_rect_clip_scaled(src, dst, clip, hscale, vscale);
|
||||
|
||||
crtc_x = dst.x1;
|
||||
crtc_y = dst.y1;
|
||||
crtc_w = drm_rect_width(&dst);
|
||||
crtc_h = drm_rect_height(&dst);
|
||||
crtc_x = dst->x1;
|
||||
crtc_y = dst->y1;
|
||||
crtc_w = drm_rect_width(dst);
|
||||
crtc_h = drm_rect_height(dst);
|
||||
|
||||
if (visible) {
|
||||
if (state->visible) {
|
||||
/* check again in case clipping clamped the results */
|
||||
hscale = drm_rect_calc_hscale(&src, &dst, min_scale, max_scale);
|
||||
hscale = drm_rect_calc_hscale(src, dst, min_scale, max_scale);
|
||||
if (hscale < 0) {
|
||||
DRM_DEBUG_KMS("Horizontal scaling factor out of limits\n");
|
||||
drm_rect_debug_print(&src, true);
|
||||
drm_rect_debug_print(&dst, false);
|
||||
drm_rect_debug_print(src, true);
|
||||
drm_rect_debug_print(dst, false);
|
||||
|
||||
return hscale;
|
||||
}
|
||||
|
||||
vscale = drm_rect_calc_vscale(&src, &dst, min_scale, max_scale);
|
||||
vscale = drm_rect_calc_vscale(src, dst, min_scale, max_scale);
|
||||
if (vscale < 0) {
|
||||
DRM_DEBUG_KMS("Vertical scaling factor out of limits\n");
|
||||
drm_rect_debug_print(&src, true);
|
||||
drm_rect_debug_print(&dst, false);
|
||||
drm_rect_debug_print(src, true);
|
||||
drm_rect_debug_print(dst, false);
|
||||
|
||||
return vscale;
|
||||
}
|
||||
|
||||
/* Make the source viewport size an exact multiple of the scaling factors. */
|
||||
drm_rect_adjust_size(&src,
|
||||
drm_rect_width(&dst) * hscale - drm_rect_width(&src),
|
||||
drm_rect_height(&dst) * vscale - drm_rect_height(&src));
|
||||
drm_rect_adjust_size(src,
|
||||
drm_rect_width(dst) * hscale - drm_rect_width(src),
|
||||
drm_rect_height(dst) * vscale - drm_rect_height(src));
|
||||
|
||||
drm_rect_rotate_inv(src, fb->width << 16, fb->height << 16,
|
||||
intel_plane->rotation);
|
||||
|
||||
/* sanity check to make sure the src viewport wasn't enlarged */
|
||||
WARN_ON(src.x1 < (int) src_x ||
|
||||
src.y1 < (int) src_y ||
|
||||
src.x2 > (int) (src_x + src_w) ||
|
||||
src.y2 > (int) (src_y + src_h));
|
||||
WARN_ON(src->x1 < (int) orig_src->x1 ||
|
||||
src->y1 < (int) orig_src->y1 ||
|
||||
src->x2 > (int) orig_src->x2 ||
|
||||
src->y2 > (int) orig_src->y2);
|
||||
|
||||
/*
|
||||
* Hardware doesn't handle subpixel coordinates.
|
||||
@ -937,10 +1197,10 @@ intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
|
||||
* increase the source viewport size, because that could
|
||||
* push the downscaling factor out of bounds.
|
||||
*/
|
||||
src_x = src.x1 >> 16;
|
||||
src_w = drm_rect_width(&src) >> 16;
|
||||
src_y = src.y1 >> 16;
|
||||
src_h = drm_rect_height(&src) >> 16;
|
||||
src_x = src->x1 >> 16;
|
||||
src_w = drm_rect_width(src) >> 16;
|
||||
src_y = src->y1 >> 16;
|
||||
src_h = drm_rect_height(src) >> 16;
|
||||
|
||||
if (format_is_yuv(fb->pixel_format)) {
|
||||
src_x &= ~1;
|
||||
@ -954,12 +1214,12 @@ intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
|
||||
crtc_w &= ~1;
|
||||
|
||||
if (crtc_w == 0)
|
||||
visible = false;
|
||||
state->visible = false;
|
||||
}
|
||||
}
|
||||
|
||||
/* Check size restrictions when scaling */
|
||||
if (visible && (src_w != crtc_w || src_h != crtc_h)) {
|
||||
if (state->visible && (src_w != crtc_w || src_h != crtc_h)) {
|
||||
unsigned int width_bytes;
|
||||
|
||||
WARN_ON(!intel_plane->can_scale);
|
||||
@ -967,12 +1227,13 @@ intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
|
||||
/* FIXME interlacing min height is 6 */
|
||||
|
||||
if (crtc_w < 3 || crtc_h < 3)
|
||||
visible = false;
|
||||
state->visible = false;
|
||||
|
||||
if (src_w < 3 || src_h < 3)
|
||||
visible = false;
|
||||
state->visible = false;
|
||||
|
||||
width_bytes = ((src_x * pixel_size) & 63) + src_w * pixel_size;
|
||||
width_bytes = ((src_x * pixel_size) & 63) +
|
||||
src_w * pixel_size;
|
||||
|
||||
if (src_w > 2048 || src_h > 2048 ||
|
||||
width_bytes > 4096 || fb->pitches[0] > 4096) {
|
||||
@ -981,42 +1242,90 @@ intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
|
||||
}
|
||||
}
|
||||
|
||||
dst.x1 = crtc_x;
|
||||
dst.x2 = crtc_x + crtc_w;
|
||||
dst.y1 = crtc_y;
|
||||
dst.y2 = crtc_y + crtc_h;
|
||||
if (state->visible) {
|
||||
src->x1 = src_x;
|
||||
src->x2 = src_x + src_w;
|
||||
src->y1 = src_y;
|
||||
src->y2 = src_y + src_h;
|
||||
}
|
||||
|
||||
/*
|
||||
* If the sprite is completely covering the primary plane,
|
||||
* we can disable the primary and save power.
|
||||
*/
|
||||
primary_enabled = !drm_rect_equals(&dst, &clip) || colorkey_enabled(intel_plane);
|
||||
WARN_ON(!primary_enabled && !visible && intel_crtc->active);
|
||||
dst->x1 = crtc_x;
|
||||
dst->x2 = crtc_x + crtc_w;
|
||||
dst->y1 = crtc_y;
|
||||
dst->y2 = crtc_y + crtc_h;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
intel_prepare_sprite_plane(struct drm_plane *plane,
|
||||
struct intel_plane_state *state)
|
||||
{
|
||||
struct drm_device *dev = plane->dev;
|
||||
struct drm_crtc *crtc = state->crtc;
|
||||
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
|
||||
struct intel_plane *intel_plane = to_intel_plane(plane);
|
||||
enum pipe pipe = intel_crtc->pipe;
|
||||
struct drm_framebuffer *fb = state->fb;
|
||||
struct drm_i915_gem_object *obj = intel_fb_obj(fb);
|
||||
struct drm_i915_gem_object *old_obj = intel_plane->obj;
|
||||
int ret;
|
||||
|
||||
if (old_obj != obj) {
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
|
||||
/* Note that this will apply the VT-d workaround for scanouts,
|
||||
* which is more restrictive than required for sprites. (The
|
||||
* primary plane requires 256KiB alignment with 64 PTE padding,
|
||||
* the sprite planes only require 128KiB alignment and 32 PTE padding.
|
||||
* the sprite planes only require 128KiB alignment and 32 PTE
|
||||
* padding.
|
||||
*/
|
||||
ret = intel_pin_and_fence_fb_obj(dev, obj, NULL);
|
||||
|
||||
ret = intel_pin_and_fence_fb_obj(plane, fb, NULL);
|
||||
if (ret == 0)
|
||||
i915_gem_track_fb(old_obj, obj,
|
||||
INTEL_FRONTBUFFER_SPRITE(pipe));
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
intel_plane->crtc_x = orig.crtc_x;
|
||||
intel_plane->crtc_y = orig.crtc_y;
|
||||
intel_plane->crtc_w = orig.crtc_w;
|
||||
intel_plane->crtc_h = orig.crtc_h;
|
||||
intel_plane->src_x = orig.src_x;
|
||||
intel_plane->src_y = orig.src_y;
|
||||
intel_plane->src_w = orig.src_w;
|
||||
intel_plane->src_h = orig.src_h;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void
|
||||
intel_commit_sprite_plane(struct drm_plane *plane,
|
||||
struct intel_plane_state *state)
|
||||
{
|
||||
struct drm_device *dev = plane->dev;
|
||||
struct drm_crtc *crtc = state->crtc;
|
||||
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
|
||||
struct intel_plane *intel_plane = to_intel_plane(plane);
|
||||
enum pipe pipe = intel_crtc->pipe;
|
||||
struct drm_framebuffer *fb = state->fb;
|
||||
struct drm_i915_gem_object *obj = intel_fb_obj(fb);
|
||||
struct drm_i915_gem_object *old_obj = intel_plane->obj;
|
||||
int crtc_x, crtc_y;
|
||||
unsigned int crtc_w, crtc_h;
|
||||
uint32_t src_x, src_y, src_w, src_h;
|
||||
struct drm_rect *dst = &state->dst;
|
||||
const struct drm_rect *clip = &state->clip;
|
||||
bool primary_enabled;
|
||||
|
||||
/*
|
||||
* If the sprite is completely covering the primary plane,
|
||||
* we can disable the primary and save power.
|
||||
*/
|
||||
primary_enabled = !drm_rect_equals(dst, clip) || colorkey_enabled(intel_plane);
|
||||
WARN_ON(!primary_enabled && !state->visible && intel_crtc->active);
|
||||
|
||||
intel_plane->crtc_x = state->orig_dst.x1;
|
||||
intel_plane->crtc_y = state->orig_dst.y1;
|
||||
intel_plane->crtc_w = drm_rect_width(&state->orig_dst);
|
||||
intel_plane->crtc_h = drm_rect_height(&state->orig_dst);
|
||||
intel_plane->src_x = state->orig_src.x1;
|
||||
intel_plane->src_y = state->orig_src.y1;
|
||||
intel_plane->src_w = drm_rect_width(&state->orig_src);
|
||||
intel_plane->src_h = drm_rect_height(&state->orig_src);
|
||||
intel_plane->obj = obj;
|
||||
|
||||
if (intel_crtc->active) {
|
||||
@ -1025,37 +1334,94 @@ intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
|
||||
intel_crtc->primary_enabled = primary_enabled;
|
||||
|
||||
// if (primary_was_enabled != primary_enabled)
|
||||
// intel_crtc_wait_for_pending_flips(crtc);
|
||||
|
||||
if (primary_was_enabled && !primary_enabled)
|
||||
intel_pre_disable_primary(crtc);
|
||||
|
||||
if (visible)
|
||||
if (state->visible) {
|
||||
crtc_x = state->dst.x1;
|
||||
crtc_y = state->dst.y1;
|
||||
crtc_w = drm_rect_width(&state->dst);
|
||||
crtc_h = drm_rect_height(&state->dst);
|
||||
src_x = state->src.x1;
|
||||
src_y = state->src.y1;
|
||||
src_w = drm_rect_width(&state->src);
|
||||
src_h = drm_rect_height(&state->src);
|
||||
intel_plane->update_plane(plane, crtc, fb, obj,
|
||||
crtc_x, crtc_y, crtc_w, crtc_h,
|
||||
src_x, src_y, src_w, src_h);
|
||||
else
|
||||
} else {
|
||||
intel_plane->disable_plane(plane, crtc);
|
||||
}
|
||||
|
||||
|
||||
intel_frontbuffer_flip(dev, INTEL_FRONTBUFFER_SPRITE(pipe));
|
||||
|
||||
if (!primary_was_enabled && primary_enabled)
|
||||
intel_post_enable_primary(crtc);
|
||||
}
|
||||
|
||||
/* Unpin old obj after new one is active to avoid ugliness */
|
||||
if (old_obj) {
|
||||
if (old_obj && old_obj != obj) {
|
||||
|
||||
/*
|
||||
* It's fairly common to simply update the position of
|
||||
* an existing object. In that case, we don't need to
|
||||
* wait for vblank to avoid ugliness, we only need to
|
||||
* do the pin & ref bookkeeping.
|
||||
*/
|
||||
if (old_obj != obj && intel_crtc->active)
|
||||
if (intel_crtc->active)
|
||||
intel_wait_for_vblank(dev, intel_crtc->pipe);
|
||||
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
intel_unpin_fb_obj(old_obj);
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
}
|
||||
}
|
||||
|
||||
static int
|
||||
intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
|
||||
struct drm_framebuffer *fb, int crtc_x, int crtc_y,
|
||||
unsigned int crtc_w, unsigned int crtc_h,
|
||||
uint32_t src_x, uint32_t src_y,
|
||||
uint32_t src_w, uint32_t src_h)
|
||||
{
|
||||
struct intel_plane_state state;
|
||||
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
|
||||
int ret;
|
||||
|
||||
state.crtc = crtc;
|
||||
state.fb = fb;
|
||||
|
||||
/* sample coordinates in 16.16 fixed point */
|
||||
state.src.x1 = src_x;
|
||||
state.src.x2 = src_x + src_w;
|
||||
state.src.y1 = src_y;
|
||||
state.src.y2 = src_y + src_h;
|
||||
|
||||
/* integer pixels */
|
||||
state.dst.x1 = crtc_x;
|
||||
state.dst.x2 = crtc_x + crtc_w;
|
||||
state.dst.y1 = crtc_y;
|
||||
state.dst.y2 = crtc_y + crtc_h;
|
||||
|
||||
state.clip.x1 = 0;
|
||||
state.clip.y1 = 0;
|
||||
state.clip.x2 = intel_crtc->active ? intel_crtc->config.pipe_src_w : 0;
|
||||
state.clip.y2 = intel_crtc->active ? intel_crtc->config.pipe_src_h : 0;
|
||||
state.orig_src = state.src;
|
||||
state.orig_dst = state.dst;
|
||||
|
||||
ret = intel_check_sprite_plane(plane, &state);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = intel_prepare_sprite_plane(plane, &state);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
intel_commit_sprite_plane(plane, &state);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -1169,14 +1535,41 @@ out_unlock:
|
||||
return ret;
|
||||
}
|
||||
|
||||
void intel_plane_restore(struct drm_plane *plane)
|
||||
int intel_plane_set_property(struct drm_plane *plane,
|
||||
struct drm_property *prop,
|
||||
uint64_t val)
|
||||
{
|
||||
struct drm_device *dev = plane->dev;
|
||||
struct intel_plane *intel_plane = to_intel_plane(plane);
|
||||
uint64_t old_val;
|
||||
int ret = -ENOENT;
|
||||
|
||||
if (prop == dev->mode_config.rotation_property) {
|
||||
/* exactly one rotation angle please */
|
||||
if (hweight32(val & 0xf) != 1)
|
||||
return -EINVAL;
|
||||
|
||||
if (intel_plane->rotation == val)
|
||||
return 0;
|
||||
|
||||
old_val = intel_plane->rotation;
|
||||
intel_plane->rotation = val;
|
||||
ret = intel_plane_restore(plane);
|
||||
if (ret)
|
||||
intel_plane->rotation = old_val;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
int intel_plane_restore(struct drm_plane *plane)
|
||||
{
|
||||
struct intel_plane *intel_plane = to_intel_plane(plane);
|
||||
|
||||
if (!plane->crtc || !plane->fb)
|
||||
return;
|
||||
return 0;
|
||||
|
||||
intel_update_plane(plane, plane->crtc, plane->fb,
|
||||
return plane->funcs->update_plane(plane, plane->crtc, plane->fb,
|
||||
intel_plane->crtc_x, intel_plane->crtc_y,
|
||||
intel_plane->crtc_w, intel_plane->crtc_h,
|
||||
intel_plane->src_x, intel_plane->src_y,
|
||||
@ -1195,6 +1588,7 @@ static const struct drm_plane_funcs intel_plane_funcs = {
|
||||
.update_plane = intel_update_plane,
|
||||
.disable_plane = intel_disable_plane,
|
||||
.destroy = intel_destroy_plane,
|
||||
.set_property = intel_plane_set_property,
|
||||
};
|
||||
|
||||
static uint32_t ilk_plane_formats[] = {
|
||||
@ -1228,6 +1622,18 @@ static uint32_t vlv_plane_formats[] = {
|
||||
DRM_FORMAT_VYUY,
|
||||
};
|
||||
|
||||
static uint32_t skl_plane_formats[] = {
|
||||
DRM_FORMAT_RGB565,
|
||||
DRM_FORMAT_ABGR8888,
|
||||
DRM_FORMAT_ARGB8888,
|
||||
DRM_FORMAT_XBGR8888,
|
||||
DRM_FORMAT_XRGB8888,
|
||||
DRM_FORMAT_YUYV,
|
||||
DRM_FORMAT_YVYU,
|
||||
DRM_FORMAT_UYVY,
|
||||
DRM_FORMAT_VYUY,
|
||||
};
|
||||
|
||||
int
|
||||
intel_plane_init(struct drm_device *dev, enum pipe pipe, int plane)
|
||||
{
|
||||
@ -1291,7 +1697,21 @@ intel_plane_init(struct drm_device *dev, enum pipe pipe, int plane)
|
||||
num_plane_formats = ARRAY_SIZE(snb_plane_formats);
|
||||
}
|
||||
break;
|
||||
case 9:
|
||||
/*
|
||||
* FIXME: Skylake planes can be scaled (with some restrictions),
|
||||
* but this is for another time.
|
||||
*/
|
||||
intel_plane->can_scale = false;
|
||||
intel_plane->max_downscale = 1;
|
||||
intel_plane->update_plane = skl_update_plane;
|
||||
intel_plane->disable_plane = skl_disable_plane;
|
||||
intel_plane->update_colorkey = skl_update_colorkey;
|
||||
intel_plane->get_colorkey = skl_get_colorkey;
|
||||
|
||||
plane_formats = skl_plane_formats;
|
||||
num_plane_formats = ARRAY_SIZE(skl_plane_formats);
|
||||
break;
|
||||
default:
|
||||
kfree(intel_plane);
|
||||
return -ENODEV;
|
||||
@ -1299,13 +1719,28 @@ intel_plane_init(struct drm_device *dev, enum pipe pipe, int plane)
|
||||
|
||||
intel_plane->pipe = pipe;
|
||||
intel_plane->plane = plane;
|
||||
intel_plane->rotation = BIT(DRM_ROTATE_0);
|
||||
possible_crtcs = (1 << pipe);
|
||||
ret = drm_plane_init(dev, &intel_plane->base, possible_crtcs,
|
||||
ret = drm_universal_plane_init(dev, &intel_plane->base, possible_crtcs,
|
||||
&intel_plane_funcs,
|
||||
plane_formats, num_plane_formats,
|
||||
false);
|
||||
if (ret)
|
||||
DRM_PLANE_TYPE_OVERLAY);
|
||||
if (ret) {
|
||||
kfree(intel_plane);
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (!dev->mode_config.rotation_property)
|
||||
dev->mode_config.rotation_property =
|
||||
drm_mode_create_rotation_property(dev,
|
||||
BIT(DRM_ROTATE_0) |
|
||||
BIT(DRM_ROTATE_180));
|
||||
|
||||
if (dev->mode_config.rotation_property)
|
||||
drm_object_attach_property(&intel_plane->base.base,
|
||||
dev->mode_config.rotation_property,
|
||||
intel_plane->rotation);
|
||||
|
||||
out:
|
||||
return ret;
|
||||
}
|
||||
|
@ -43,23 +43,17 @@
|
||||
static void
|
||||
assert_device_not_suspended(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
WARN(HAS_RUNTIME_PM(dev_priv->dev) && dev_priv->pm.suspended,
|
||||
WARN_ONCE(HAS_RUNTIME_PM(dev_priv->dev) && dev_priv->pm.suspended,
|
||||
"Device suspended\n");
|
||||
}
|
||||
|
||||
static void __gen6_gt_wait_for_thread_c0(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
u32 gt_thread_status_mask;
|
||||
|
||||
if (IS_HASWELL(dev_priv->dev))
|
||||
gt_thread_status_mask = GEN6_GT_THREAD_STATUS_CORE_MASK_HSW;
|
||||
else
|
||||
gt_thread_status_mask = GEN6_GT_THREAD_STATUS_CORE_MASK;
|
||||
|
||||
/* w/a for a sporadic read returning 0 by waiting for the GT
|
||||
* thread to wake up.
|
||||
*/
|
||||
if (wait_for_atomic_us((__raw_i915_read32(dev_priv, GEN6_GT_THREAD_STATUS_REG) & gt_thread_status_mask) == 0, 500))
|
||||
if (wait_for_atomic_us((__raw_i915_read32(dev_priv, GEN6_GT_THREAD_STATUS_REG) &
|
||||
GEN6_GT_THREAD_STATUS_CORE_MASK) == 0, 500))
|
||||
DRM_ERROR("GT thread status wait timed out\n");
|
||||
}
|
||||
|
||||
@ -101,7 +95,7 @@ static void __gen7_gt_force_wake_mt_get(struct drm_i915_private *dev_priv,
|
||||
{
|
||||
u32 forcewake_ack;
|
||||
|
||||
if (IS_HASWELL(dev_priv->dev) || IS_GEN8(dev_priv->dev))
|
||||
if (IS_HASWELL(dev_priv->dev) || IS_BROADWELL(dev_priv->dev))
|
||||
forcewake_ack = FORCEWAKE_ACK_HSW;
|
||||
else
|
||||
forcewake_ack = FORCEWAKE_MT_ACK;
|
||||
@ -120,7 +114,6 @@ static void __gen7_gt_force_wake_mt_get(struct drm_i915_private *dev_priv,
|
||||
DRM_ERROR("Timed out waiting for forcewake to ack request.\n");
|
||||
|
||||
/* WaRsForcewakeWaitTC0:ivb,hsw */
|
||||
if (INTEL_INFO(dev_priv->dev)->gen < 8)
|
||||
__gen6_gt_wait_for_thread_c0(dev_priv);
|
||||
}
|
||||
|
||||
@ -229,10 +222,6 @@ static void __vlv_force_wake_get(struct drm_i915_private *dev_priv,
|
||||
FORCEWAKE_ACK_TIMEOUT_MS))
|
||||
DRM_ERROR("Timed out: waiting for media to ack.\n");
|
||||
}
|
||||
|
||||
/* WaRsForcewakeWaitTC0:vlv */
|
||||
if (!IS_CHERRYVIEW(dev_priv->dev))
|
||||
__gen6_gt_wait_for_thread_c0(dev_priv);
|
||||
}
|
||||
|
||||
static void __vlv_force_wake_put(struct drm_i915_private *dev_priv,
|
||||
@ -299,6 +288,154 @@ static void vlv_force_wake_put(struct drm_i915_private *dev_priv, int fw_engine)
|
||||
spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
|
||||
}
|
||||
|
||||
static void __gen9_gt_force_wake_mt_reset(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
__raw_i915_write32(dev_priv, FORCEWAKE_RENDER_GEN9,
|
||||
_MASKED_BIT_DISABLE(0xffff));
|
||||
|
||||
__raw_i915_write32(dev_priv, FORCEWAKE_MEDIA_GEN9,
|
||||
_MASKED_BIT_DISABLE(0xffff));
|
||||
|
||||
__raw_i915_write32(dev_priv, FORCEWAKE_BLITTER_GEN9,
|
||||
_MASKED_BIT_DISABLE(0xffff));
|
||||
}
|
||||
|
||||
static void
|
||||
__gen9_force_wake_get(struct drm_i915_private *dev_priv, int fw_engine)
|
||||
{
|
||||
/* Check for Render Engine */
|
||||
if (FORCEWAKE_RENDER & fw_engine) {
|
||||
if (wait_for_atomic((__raw_i915_read32(dev_priv,
|
||||
FORCEWAKE_ACK_RENDER_GEN9) &
|
||||
FORCEWAKE_KERNEL) == 0,
|
||||
FORCEWAKE_ACK_TIMEOUT_MS))
|
||||
DRM_ERROR("Timed out: Render forcewake old ack to clear.\n");
|
||||
|
||||
__raw_i915_write32(dev_priv, FORCEWAKE_RENDER_GEN9,
|
||||
_MASKED_BIT_ENABLE(FORCEWAKE_KERNEL));
|
||||
|
||||
if (wait_for_atomic((__raw_i915_read32(dev_priv,
|
||||
FORCEWAKE_ACK_RENDER_GEN9) &
|
||||
FORCEWAKE_KERNEL),
|
||||
FORCEWAKE_ACK_TIMEOUT_MS))
|
||||
DRM_ERROR("Timed out: waiting for Render to ack.\n");
|
||||
}
|
||||
|
||||
/* Check for Media Engine */
|
||||
if (FORCEWAKE_MEDIA & fw_engine) {
|
||||
if (wait_for_atomic((__raw_i915_read32(dev_priv,
|
||||
FORCEWAKE_ACK_MEDIA_GEN9) &
|
||||
FORCEWAKE_KERNEL) == 0,
|
||||
FORCEWAKE_ACK_TIMEOUT_MS))
|
||||
DRM_ERROR("Timed out: Media forcewake old ack to clear.\n");
|
||||
|
||||
__raw_i915_write32(dev_priv, FORCEWAKE_MEDIA_GEN9,
|
||||
_MASKED_BIT_ENABLE(FORCEWAKE_KERNEL));
|
||||
|
||||
if (wait_for_atomic((__raw_i915_read32(dev_priv,
|
||||
FORCEWAKE_ACK_MEDIA_GEN9) &
|
||||
FORCEWAKE_KERNEL),
|
||||
FORCEWAKE_ACK_TIMEOUT_MS))
|
||||
DRM_ERROR("Timed out: waiting for Media to ack.\n");
|
||||
}
|
||||
|
||||
/* Check for Blitter Engine */
|
||||
if (FORCEWAKE_BLITTER & fw_engine) {
|
||||
if (wait_for_atomic((__raw_i915_read32(dev_priv,
|
||||
FORCEWAKE_ACK_BLITTER_GEN9) &
|
||||
FORCEWAKE_KERNEL) == 0,
|
||||
FORCEWAKE_ACK_TIMEOUT_MS))
|
||||
DRM_ERROR("Timed out: Blitter forcewake old ack to clear.\n");
|
||||
|
||||
__raw_i915_write32(dev_priv, FORCEWAKE_BLITTER_GEN9,
|
||||
_MASKED_BIT_ENABLE(FORCEWAKE_KERNEL));
|
||||
|
||||
if (wait_for_atomic((__raw_i915_read32(dev_priv,
|
||||
FORCEWAKE_ACK_BLITTER_GEN9) &
|
||||
FORCEWAKE_KERNEL),
|
||||
FORCEWAKE_ACK_TIMEOUT_MS))
|
||||
DRM_ERROR("Timed out: waiting for Blitter to ack.\n");
|
||||
}
|
||||
}
|
||||
|
||||
static void
|
||||
__gen9_force_wake_put(struct drm_i915_private *dev_priv, int fw_engine)
|
||||
{
|
||||
/* Check for Render Engine */
|
||||
if (FORCEWAKE_RENDER & fw_engine)
|
||||
__raw_i915_write32(dev_priv, FORCEWAKE_RENDER_GEN9,
|
||||
_MASKED_BIT_DISABLE(FORCEWAKE_KERNEL));
|
||||
|
||||
/* Check for Media Engine */
|
||||
if (FORCEWAKE_MEDIA & fw_engine)
|
||||
__raw_i915_write32(dev_priv, FORCEWAKE_MEDIA_GEN9,
|
||||
_MASKED_BIT_DISABLE(FORCEWAKE_KERNEL));
|
||||
|
||||
/* Check for Blitter Engine */
|
||||
if (FORCEWAKE_BLITTER & fw_engine)
|
||||
__raw_i915_write32(dev_priv, FORCEWAKE_BLITTER_GEN9,
|
||||
_MASKED_BIT_DISABLE(FORCEWAKE_KERNEL));
|
||||
}
|
||||
|
||||
static void
|
||||
gen9_force_wake_get(struct drm_i915_private *dev_priv, int fw_engine)
|
||||
{
|
||||
unsigned long irqflags;
|
||||
|
||||
spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
|
||||
|
||||
if (FORCEWAKE_RENDER & fw_engine) {
|
||||
if (dev_priv->uncore.fw_rendercount++ == 0)
|
||||
dev_priv->uncore.funcs.force_wake_get(dev_priv,
|
||||
FORCEWAKE_RENDER);
|
||||
}
|
||||
|
||||
if (FORCEWAKE_MEDIA & fw_engine) {
|
||||
if (dev_priv->uncore.fw_mediacount++ == 0)
|
||||
dev_priv->uncore.funcs.force_wake_get(dev_priv,
|
||||
FORCEWAKE_MEDIA);
|
||||
}
|
||||
|
||||
if (FORCEWAKE_BLITTER & fw_engine) {
|
||||
if (dev_priv->uncore.fw_blittercount++ == 0)
|
||||
dev_priv->uncore.funcs.force_wake_get(dev_priv,
|
||||
FORCEWAKE_BLITTER);
|
||||
}
|
||||
|
||||
spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
|
||||
}
|
||||
|
||||
static void
|
||||
gen9_force_wake_put(struct drm_i915_private *dev_priv, int fw_engine)
|
||||
{
|
||||
unsigned long irqflags;
|
||||
|
||||
spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
|
||||
|
||||
if (FORCEWAKE_RENDER & fw_engine) {
|
||||
WARN_ON(dev_priv->uncore.fw_rendercount == 0);
|
||||
if (--dev_priv->uncore.fw_rendercount == 0)
|
||||
dev_priv->uncore.funcs.force_wake_put(dev_priv,
|
||||
FORCEWAKE_RENDER);
|
||||
}
|
||||
|
||||
if (FORCEWAKE_MEDIA & fw_engine) {
|
||||
WARN_ON(dev_priv->uncore.fw_mediacount == 0);
|
||||
if (--dev_priv->uncore.fw_mediacount == 0)
|
||||
dev_priv->uncore.funcs.force_wake_put(dev_priv,
|
||||
FORCEWAKE_MEDIA);
|
||||
}
|
||||
|
||||
if (FORCEWAKE_BLITTER & fw_engine) {
|
||||
WARN_ON(dev_priv->uncore.fw_blittercount == 0);
|
||||
if (--dev_priv->uncore.fw_blittercount == 0)
|
||||
dev_priv->uncore.funcs.force_wake_put(dev_priv,
|
||||
FORCEWAKE_BLITTER);
|
||||
}
|
||||
|
||||
spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
|
||||
}
|
||||
|
||||
static void gen6_force_wake_timer(unsigned long arg)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = (void *)arg;
|
||||
@ -334,9 +471,12 @@ void intel_uncore_forcewake_reset(struct drm_device *dev, bool restore)
|
||||
else if (IS_GEN6(dev) || IS_GEN7(dev))
|
||||
__gen6_gt_force_wake_reset(dev_priv);
|
||||
|
||||
if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev) || IS_GEN8(dev))
|
||||
if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev) || IS_BROADWELL(dev))
|
||||
__gen7_gt_force_wake_mt_reset(dev_priv);
|
||||
|
||||
if (IS_GEN9(dev))
|
||||
__gen9_gt_force_wake_mt_reset(dev_priv);
|
||||
|
||||
if (restore) { /* If reset with a user forcewake, try to restore */
|
||||
unsigned fw = 0;
|
||||
|
||||
@ -346,6 +486,15 @@ void intel_uncore_forcewake_reset(struct drm_device *dev, bool restore)
|
||||
|
||||
if (dev_priv->uncore.fw_mediacount)
|
||||
fw |= FORCEWAKE_MEDIA;
|
||||
} else if (IS_GEN9(dev)) {
|
||||
if (dev_priv->uncore.fw_rendercount)
|
||||
fw |= FORCEWAKE_RENDER;
|
||||
|
||||
if (dev_priv->uncore.fw_mediacount)
|
||||
fw |= FORCEWAKE_MEDIA;
|
||||
|
||||
if (dev_priv->uncore.fw_blittercount)
|
||||
fw |= FORCEWAKE_BLITTER;
|
||||
} else {
|
||||
if (dev_priv->uncore.forcewake_count)
|
||||
fw = FORCEWAKE_ALL;
|
||||
@ -363,7 +512,8 @@ void intel_uncore_forcewake_reset(struct drm_device *dev, bool restore)
|
||||
spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
|
||||
}
|
||||
|
||||
void intel_uncore_early_sanitize(struct drm_device *dev, bool restore_forcewake)
|
||||
static void __intel_uncore_early_sanitize(struct drm_device *dev,
|
||||
bool restore_forcewake)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
|
||||
@ -389,6 +539,12 @@ void intel_uncore_early_sanitize(struct drm_device *dev, bool restore_forcewake)
|
||||
intel_uncore_forcewake_reset(dev, restore_forcewake);
|
||||
}
|
||||
|
||||
void intel_uncore_early_sanitize(struct drm_device *dev, bool restore_forcewake)
|
||||
{
|
||||
__intel_uncore_early_sanitize(dev, restore_forcewake);
|
||||
i915_check_and_clear_faults(dev);
|
||||
}
|
||||
|
||||
void intel_uncore_sanitize(struct drm_device *dev)
|
||||
{
|
||||
/* BIOS often leaves RC6 enabled, but disable it for hw init */
|
||||
@ -410,6 +566,10 @@ void gen6_gt_force_wake_get(struct drm_i915_private *dev_priv, int fw_engine)
|
||||
|
||||
intel_runtime_pm_get(dev_priv);
|
||||
|
||||
/* Redirect to Gen9 specific routine */
|
||||
if (IS_GEN9(dev_priv->dev))
|
||||
return gen9_force_wake_get(dev_priv, fw_engine);
|
||||
|
||||
/* Redirect to VLV specific routine */
|
||||
if (IS_VALLEYVIEW(dev_priv->dev))
|
||||
return vlv_force_wake_get(dev_priv, fw_engine);
|
||||
@ -431,6 +591,12 @@ void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv, int fw_engine)
|
||||
if (!dev_priv->uncore.funcs.force_wake_put)
|
||||
return;
|
||||
|
||||
/* Redirect to Gen9 specific routine */
|
||||
if (IS_GEN9(dev_priv->dev)) {
|
||||
gen9_force_wake_put(dev_priv, fw_engine);
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* Redirect to VLV specific routine */
|
||||
if (IS_VALLEYVIEW(dev_priv->dev)) {
|
||||
vlv_force_wake_put(dev_priv, fw_engine);
|
||||
@ -504,6 +670,38 @@ void assert_force_wake_inactive(struct drm_i915_private *dev_priv)
|
||||
REG_RANGE((reg), 0x14000, 0x14400) || \
|
||||
REG_RANGE((reg), 0x22000, 0x24000))
|
||||
|
||||
#define FORCEWAKE_GEN9_UNCORE_RANGE_OFFSET(reg) \
|
||||
REG_RANGE((reg), 0xB00, 0x2000)
|
||||
|
||||
#define FORCEWAKE_GEN9_RENDER_RANGE_OFFSET(reg) \
|
||||
(REG_RANGE((reg), 0x2000, 0x2700) || \
|
||||
REG_RANGE((reg), 0x3000, 0x4000) || \
|
||||
REG_RANGE((reg), 0x5200, 0x8000) || \
|
||||
REG_RANGE((reg), 0x8140, 0x8160) || \
|
||||
REG_RANGE((reg), 0x8300, 0x8500) || \
|
||||
REG_RANGE((reg), 0x8C00, 0x8D00) || \
|
||||
REG_RANGE((reg), 0xB000, 0xB480) || \
|
||||
REG_RANGE((reg), 0xE000, 0xE900) || \
|
||||
REG_RANGE((reg), 0x24400, 0x24800))
|
||||
|
||||
#define FORCEWAKE_GEN9_MEDIA_RANGE_OFFSET(reg) \
|
||||
(REG_RANGE((reg), 0x8130, 0x8140) || \
|
||||
REG_RANGE((reg), 0x8800, 0x8A00) || \
|
||||
REG_RANGE((reg), 0xD000, 0xD800) || \
|
||||
REG_RANGE((reg), 0x12000, 0x14000) || \
|
||||
REG_RANGE((reg), 0x1A000, 0x1EA00) || \
|
||||
REG_RANGE((reg), 0x30000, 0x40000))
|
||||
|
||||
#define FORCEWAKE_GEN9_COMMON_RANGE_OFFSET(reg) \
|
||||
REG_RANGE((reg), 0x9400, 0x9800)
|
||||
|
||||
#define FORCEWAKE_GEN9_BLITTER_RANGE_OFFSET(reg) \
|
||||
((reg) < 0x40000 &&\
|
||||
!FORCEWAKE_GEN9_UNCORE_RANGE_OFFSET(reg) && \
|
||||
!FORCEWAKE_GEN9_RENDER_RANGE_OFFSET(reg) && \
|
||||
!FORCEWAKE_GEN9_MEDIA_RANGE_OFFSET(reg) && \
|
||||
!FORCEWAKE_GEN9_COMMON_RANGE_OFFSET(reg))
|
||||
|
||||
static void
|
||||
ilk_dummy_write(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
@ -634,6 +832,45 @@ chv_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
|
||||
REG_READ_FOOTER; \
|
||||
}
|
||||
|
||||
#define SKL_NEEDS_FORCE_WAKE(dev_priv, reg) \
|
||||
((reg) < 0x40000 && !FORCEWAKE_GEN9_UNCORE_RANGE_OFFSET(reg))
|
||||
|
||||
#define __gen9_read(x) \
|
||||
static u##x \
|
||||
gen9_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
|
||||
REG_READ_HEADER(x); \
|
||||
if (!SKL_NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
|
||||
val = __raw_i915_read##x(dev_priv, reg); \
|
||||
} else { \
|
||||
unsigned fwengine = 0; \
|
||||
if (FORCEWAKE_GEN9_RENDER_RANGE_OFFSET(reg)) { \
|
||||
if (dev_priv->uncore.fw_rendercount == 0) \
|
||||
fwengine = FORCEWAKE_RENDER; \
|
||||
} else if (FORCEWAKE_GEN9_MEDIA_RANGE_OFFSET(reg)) { \
|
||||
if (dev_priv->uncore.fw_mediacount == 0) \
|
||||
fwengine = FORCEWAKE_MEDIA; \
|
||||
} else if (FORCEWAKE_GEN9_COMMON_RANGE_OFFSET(reg)) { \
|
||||
if (dev_priv->uncore.fw_rendercount == 0) \
|
||||
fwengine |= FORCEWAKE_RENDER; \
|
||||
if (dev_priv->uncore.fw_mediacount == 0) \
|
||||
fwengine |= FORCEWAKE_MEDIA; \
|
||||
} else { \
|
||||
if (dev_priv->uncore.fw_blittercount == 0) \
|
||||
fwengine = FORCEWAKE_BLITTER; \
|
||||
} \
|
||||
if (fwengine) \
|
||||
dev_priv->uncore.funcs.force_wake_get(dev_priv, fwengine); \
|
||||
val = __raw_i915_read##x(dev_priv, reg); \
|
||||
if (fwengine) \
|
||||
dev_priv->uncore.funcs.force_wake_put(dev_priv, fwengine); \
|
||||
} \
|
||||
REG_READ_FOOTER; \
|
||||
}
|
||||
|
||||
__gen9_read(8)
|
||||
__gen9_read(16)
|
||||
__gen9_read(32)
|
||||
__gen9_read(64)
|
||||
__chv_read(8)
|
||||
__chv_read(16)
|
||||
__chv_read(32)
|
||||
@ -655,6 +892,7 @@ __gen4_read(16)
|
||||
__gen4_read(32)
|
||||
__gen4_read(64)
|
||||
|
||||
#undef __gen9_read
|
||||
#undef __chv_read
|
||||
#undef __vlv_read
|
||||
#undef __gen6_read
|
||||
@ -792,6 +1030,69 @@ chv_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace)
|
||||
REG_WRITE_FOOTER; \
|
||||
}
|
||||
|
||||
static const u32 gen9_shadowed_regs[] = {
|
||||
RING_TAIL(RENDER_RING_BASE),
|
||||
RING_TAIL(GEN6_BSD_RING_BASE),
|
||||
RING_TAIL(VEBOX_RING_BASE),
|
||||
RING_TAIL(BLT_RING_BASE),
|
||||
FORCEWAKE_BLITTER_GEN9,
|
||||
FORCEWAKE_RENDER_GEN9,
|
||||
FORCEWAKE_MEDIA_GEN9,
|
||||
GEN6_RPNSWREQ,
|
||||
GEN6_RC_VIDEO_FREQ,
|
||||
/* TODO: Other registers are not yet used */
|
||||
};
|
||||
|
||||
static bool is_gen9_shadowed(struct drm_i915_private *dev_priv, u32 reg)
|
||||
{
|
||||
int i;
|
||||
for (i = 0; i < ARRAY_SIZE(gen9_shadowed_regs); i++)
|
||||
if (reg == gen9_shadowed_regs[i])
|
||||
return true;
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
#define __gen9_write(x) \
|
||||
static void \
|
||||
gen9_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, \
|
||||
bool trace) { \
|
||||
REG_WRITE_HEADER; \
|
||||
if (!SKL_NEEDS_FORCE_WAKE((dev_priv), (reg)) || \
|
||||
is_gen9_shadowed(dev_priv, reg)) { \
|
||||
__raw_i915_write##x(dev_priv, reg, val); \
|
||||
} else { \
|
||||
unsigned fwengine = 0; \
|
||||
if (FORCEWAKE_GEN9_RENDER_RANGE_OFFSET(reg)) { \
|
||||
if (dev_priv->uncore.fw_rendercount == 0) \
|
||||
fwengine = FORCEWAKE_RENDER; \
|
||||
} else if (FORCEWAKE_GEN9_MEDIA_RANGE_OFFSET(reg)) { \
|
||||
if (dev_priv->uncore.fw_mediacount == 0) \
|
||||
fwengine = FORCEWAKE_MEDIA; \
|
||||
} else if (FORCEWAKE_GEN9_COMMON_RANGE_OFFSET(reg)) { \
|
||||
if (dev_priv->uncore.fw_rendercount == 0) \
|
||||
fwengine |= FORCEWAKE_RENDER; \
|
||||
if (dev_priv->uncore.fw_mediacount == 0) \
|
||||
fwengine |= FORCEWAKE_MEDIA; \
|
||||
} else { \
|
||||
if (dev_priv->uncore.fw_blittercount == 0) \
|
||||
fwengine = FORCEWAKE_BLITTER; \
|
||||
} \
|
||||
if (fwengine) \
|
||||
dev_priv->uncore.funcs.force_wake_get(dev_priv, \
|
||||
fwengine); \
|
||||
__raw_i915_write##x(dev_priv, reg, val); \
|
||||
if (fwengine) \
|
||||
dev_priv->uncore.funcs.force_wake_put(dev_priv, \
|
||||
fwengine); \
|
||||
} \
|
||||
REG_WRITE_FOOTER; \
|
||||
}
|
||||
|
||||
__gen9_write(8)
|
||||
__gen9_write(16)
|
||||
__gen9_write(32)
|
||||
__gen9_write(64)
|
||||
__chv_write(8)
|
||||
__chv_write(16)
|
||||
__chv_write(32)
|
||||
@ -817,6 +1118,7 @@ __gen4_write(16)
|
||||
__gen4_write(32)
|
||||
__gen4_write(64)
|
||||
|
||||
#undef __gen9_write
|
||||
#undef __chv_write
|
||||
#undef __gen8_write
|
||||
#undef __hsw_write
|
||||
@ -826,6 +1128,22 @@ __gen4_write(64)
|
||||
#undef REG_WRITE_FOOTER
|
||||
#undef REG_WRITE_HEADER
|
||||
|
||||
#define ASSIGN_WRITE_MMIO_VFUNCS(x) \
|
||||
do { \
|
||||
dev_priv->uncore.funcs.mmio_writeb = x##_write8; \
|
||||
dev_priv->uncore.funcs.mmio_writew = x##_write16; \
|
||||
dev_priv->uncore.funcs.mmio_writel = x##_write32; \
|
||||
dev_priv->uncore.funcs.mmio_writeq = x##_write64; \
|
||||
} while (0)
|
||||
|
||||
#define ASSIGN_READ_MMIO_VFUNCS(x) \
|
||||
do { \
|
||||
dev_priv->uncore.funcs.mmio_readb = x##_read8; \
|
||||
dev_priv->uncore.funcs.mmio_readw = x##_read16; \
|
||||
dev_priv->uncore.funcs.mmio_readl = x##_read32; \
|
||||
dev_priv->uncore.funcs.mmio_readq = x##_read64; \
|
||||
} while (0)
|
||||
|
||||
void intel_uncore_init(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
@ -833,12 +1151,15 @@ void intel_uncore_init(struct drm_device *dev)
|
||||
setup_timer(&dev_priv->uncore.force_wake_timer,
|
||||
gen6_force_wake_timer, (unsigned long)dev_priv);
|
||||
|
||||
intel_uncore_early_sanitize(dev, false);
|
||||
__intel_uncore_early_sanitize(dev, false);
|
||||
|
||||
if (IS_VALLEYVIEW(dev)) {
|
||||
if (IS_GEN9(dev)) {
|
||||
dev_priv->uncore.funcs.force_wake_get = __gen9_force_wake_get;
|
||||
dev_priv->uncore.funcs.force_wake_put = __gen9_force_wake_put;
|
||||
} else if (IS_VALLEYVIEW(dev)) {
|
||||
dev_priv->uncore.funcs.force_wake_get = __vlv_force_wake_get;
|
||||
dev_priv->uncore.funcs.force_wake_put = __vlv_force_wake_put;
|
||||
} else if (IS_HASWELL(dev) || IS_GEN8(dev)) {
|
||||
} else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
|
||||
dev_priv->uncore.funcs.force_wake_get = __gen7_gt_force_wake_mt_get;
|
||||
dev_priv->uncore.funcs.force_wake_put = __gen7_gt_force_wake_mt_put;
|
||||
} else if (IS_IVYBRIDGE(dev)) {
|
||||
@ -881,77 +1202,52 @@ void intel_uncore_init(struct drm_device *dev)
|
||||
|
||||
switch (INTEL_INFO(dev)->gen) {
|
||||
default:
|
||||
WARN_ON(1);
|
||||
return;
|
||||
case 9:
|
||||
ASSIGN_WRITE_MMIO_VFUNCS(gen9);
|
||||
ASSIGN_READ_MMIO_VFUNCS(gen9);
|
||||
break;
|
||||
case 8:
|
||||
if (IS_CHERRYVIEW(dev)) {
|
||||
dev_priv->uncore.funcs.mmio_writeb = chv_write8;
|
||||
dev_priv->uncore.funcs.mmio_writew = chv_write16;
|
||||
dev_priv->uncore.funcs.mmio_writel = chv_write32;
|
||||
dev_priv->uncore.funcs.mmio_writeq = chv_write64;
|
||||
dev_priv->uncore.funcs.mmio_readb = chv_read8;
|
||||
dev_priv->uncore.funcs.mmio_readw = chv_read16;
|
||||
dev_priv->uncore.funcs.mmio_readl = chv_read32;
|
||||
dev_priv->uncore.funcs.mmio_readq = chv_read64;
|
||||
ASSIGN_WRITE_MMIO_VFUNCS(chv);
|
||||
ASSIGN_READ_MMIO_VFUNCS(chv);
|
||||
|
||||
} else {
|
||||
dev_priv->uncore.funcs.mmio_writeb = gen8_write8;
|
||||
dev_priv->uncore.funcs.mmio_writew = gen8_write16;
|
||||
dev_priv->uncore.funcs.mmio_writel = gen8_write32;
|
||||
dev_priv->uncore.funcs.mmio_writeq = gen8_write64;
|
||||
dev_priv->uncore.funcs.mmio_readb = gen6_read8;
|
||||
dev_priv->uncore.funcs.mmio_readw = gen6_read16;
|
||||
dev_priv->uncore.funcs.mmio_readl = gen6_read32;
|
||||
dev_priv->uncore.funcs.mmio_readq = gen6_read64;
|
||||
ASSIGN_WRITE_MMIO_VFUNCS(gen8);
|
||||
ASSIGN_READ_MMIO_VFUNCS(gen6);
|
||||
}
|
||||
break;
|
||||
case 7:
|
||||
case 6:
|
||||
if (IS_HASWELL(dev)) {
|
||||
dev_priv->uncore.funcs.mmio_writeb = hsw_write8;
|
||||
dev_priv->uncore.funcs.mmio_writew = hsw_write16;
|
||||
dev_priv->uncore.funcs.mmio_writel = hsw_write32;
|
||||
dev_priv->uncore.funcs.mmio_writeq = hsw_write64;
|
||||
ASSIGN_WRITE_MMIO_VFUNCS(hsw);
|
||||
} else {
|
||||
dev_priv->uncore.funcs.mmio_writeb = gen6_write8;
|
||||
dev_priv->uncore.funcs.mmio_writew = gen6_write16;
|
||||
dev_priv->uncore.funcs.mmio_writel = gen6_write32;
|
||||
dev_priv->uncore.funcs.mmio_writeq = gen6_write64;
|
||||
ASSIGN_WRITE_MMIO_VFUNCS(gen6);
|
||||
}
|
||||
|
||||
if (IS_VALLEYVIEW(dev)) {
|
||||
dev_priv->uncore.funcs.mmio_readb = vlv_read8;
|
||||
dev_priv->uncore.funcs.mmio_readw = vlv_read16;
|
||||
dev_priv->uncore.funcs.mmio_readl = vlv_read32;
|
||||
dev_priv->uncore.funcs.mmio_readq = vlv_read64;
|
||||
ASSIGN_READ_MMIO_VFUNCS(vlv);
|
||||
} else {
|
||||
dev_priv->uncore.funcs.mmio_readb = gen6_read8;
|
||||
dev_priv->uncore.funcs.mmio_readw = gen6_read16;
|
||||
dev_priv->uncore.funcs.mmio_readl = gen6_read32;
|
||||
dev_priv->uncore.funcs.mmio_readq = gen6_read64;
|
||||
ASSIGN_READ_MMIO_VFUNCS(gen6);
|
||||
}
|
||||
break;
|
||||
case 5:
|
||||
dev_priv->uncore.funcs.mmio_writeb = gen5_write8;
|
||||
dev_priv->uncore.funcs.mmio_writew = gen5_write16;
|
||||
dev_priv->uncore.funcs.mmio_writel = gen5_write32;
|
||||
dev_priv->uncore.funcs.mmio_writeq = gen5_write64;
|
||||
dev_priv->uncore.funcs.mmio_readb = gen5_read8;
|
||||
dev_priv->uncore.funcs.mmio_readw = gen5_read16;
|
||||
dev_priv->uncore.funcs.mmio_readl = gen5_read32;
|
||||
dev_priv->uncore.funcs.mmio_readq = gen5_read64;
|
||||
ASSIGN_WRITE_MMIO_VFUNCS(gen5);
|
||||
ASSIGN_READ_MMIO_VFUNCS(gen5);
|
||||
break;
|
||||
case 4:
|
||||
case 3:
|
||||
case 2:
|
||||
dev_priv->uncore.funcs.mmio_writeb = gen4_write8;
|
||||
dev_priv->uncore.funcs.mmio_writew = gen4_write16;
|
||||
dev_priv->uncore.funcs.mmio_writel = gen4_write32;
|
||||
dev_priv->uncore.funcs.mmio_writeq = gen4_write64;
|
||||
dev_priv->uncore.funcs.mmio_readb = gen4_read8;
|
||||
dev_priv->uncore.funcs.mmio_readw = gen4_read16;
|
||||
dev_priv->uncore.funcs.mmio_readl = gen4_read32;
|
||||
dev_priv->uncore.funcs.mmio_readq = gen4_read64;
|
||||
ASSIGN_WRITE_MMIO_VFUNCS(gen4);
|
||||
ASSIGN_READ_MMIO_VFUNCS(gen4);
|
||||
break;
|
||||
}
|
||||
|
||||
i915_check_and_clear_faults(dev);
|
||||
}
|
||||
#undef ASSIGN_WRITE_MMIO_VFUNCS
|
||||
#undef ASSIGN_READ_MMIO_VFUNCS
|
||||
|
||||
void intel_uncore_fini(struct drm_device *dev)
|
||||
{
|
||||
@ -968,7 +1264,7 @@ static const struct register_whitelist {
|
||||
/* supported gens, 0x10 for 4, 0x30 for 4 and 5, etc. */
|
||||
uint32_t gen_bitmask;
|
||||
} whitelist[] = {
|
||||
{ RING_TIMESTAMP(RENDER_RING_BASE), 8, GEN_RANGE(4, 8) },
|
||||
{ RING_TIMESTAMP(RENDER_RING_BASE), 8, GEN_RANGE(4, 9) },
|
||||
};
|
||||
|
||||
int i915_reg_read_ioctl(struct drm_device *dev,
|
||||
@ -1044,41 +1340,34 @@ int i915_get_reset_stats_ioctl(struct drm_device *dev,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int i965_reset_complete(struct drm_device *dev)
|
||||
static int i915_reset_complete(struct drm_device *dev)
|
||||
{
|
||||
u8 gdrst;
|
||||
pci_read_config_byte(dev->pdev, I965_GDRST, &gdrst);
|
||||
pci_read_config_byte(dev->pdev, I915_GDRST, &gdrst);
|
||||
return (gdrst & GRDOM_RESET_STATUS) == 0;
|
||||
}
|
||||
|
||||
static int i915_do_reset(struct drm_device *dev)
|
||||
{
|
||||
/* assert reset for at least 20 usec */
|
||||
pci_write_config_byte(dev->pdev, I915_GDRST, GRDOM_RESET_ENABLE);
|
||||
udelay(20);
|
||||
pci_write_config_byte(dev->pdev, I915_GDRST, 0);
|
||||
|
||||
return wait_for(i915_reset_complete(dev), 500);
|
||||
}
|
||||
|
||||
static int g4x_reset_complete(struct drm_device *dev)
|
||||
{
|
||||
u8 gdrst;
|
||||
pci_read_config_byte(dev->pdev, I915_GDRST, &gdrst);
|
||||
return (gdrst & GRDOM_RESET_ENABLE) == 0;
|
||||
}
|
||||
|
||||
static int i965_do_reset(struct drm_device *dev)
|
||||
static int g33_do_reset(struct drm_device *dev)
|
||||
{
|
||||
int ret;
|
||||
|
||||
/* FIXME: i965g/gm need a display save/restore for gpu reset. */
|
||||
return -ENODEV;
|
||||
|
||||
/*
|
||||
* Set the domains we want to reset (GRDOM/bits 2 and 3) as
|
||||
* well as the reset bit (GR/bit 0). Setting the GR bit
|
||||
* triggers the reset; when done, the hardware will clear it.
|
||||
*/
|
||||
pci_write_config_byte(dev->pdev, I965_GDRST,
|
||||
GRDOM_RENDER | GRDOM_RESET_ENABLE);
|
||||
ret = wait_for(i965_reset_complete(dev), 500);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
pci_write_config_byte(dev->pdev, I965_GDRST,
|
||||
GRDOM_MEDIA | GRDOM_RESET_ENABLE);
|
||||
|
||||
ret = wait_for(i965_reset_complete(dev), 500);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
pci_write_config_byte(dev->pdev, I965_GDRST, 0);
|
||||
|
||||
return 0;
|
||||
pci_write_config_byte(dev->pdev, I915_GDRST, GRDOM_RESET_ENABLE);
|
||||
return wait_for(g4x_reset_complete(dev), 500);
|
||||
}
|
||||
|
||||
static int g4x_do_reset(struct drm_device *dev)
|
||||
@ -1086,9 +1375,9 @@ static int g4x_do_reset(struct drm_device *dev)
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
int ret;
|
||||
|
||||
pci_write_config_byte(dev->pdev, I965_GDRST,
|
||||
pci_write_config_byte(dev->pdev, I915_GDRST,
|
||||
GRDOM_RENDER | GRDOM_RESET_ENABLE);
|
||||
ret = wait_for(i965_reset_complete(dev), 500);
|
||||
ret = wait_for(g4x_reset_complete(dev), 500);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
@ -1096,9 +1385,9 @@ static int g4x_do_reset(struct drm_device *dev)
|
||||
I915_WRITE(VDECCLK_GATE_D, I915_READ(VDECCLK_GATE_D) | VCP_UNIT_CLOCK_GATE_DISABLE);
|
||||
POSTING_READ(VDECCLK_GATE_D);
|
||||
|
||||
pci_write_config_byte(dev->pdev, I965_GDRST,
|
||||
pci_write_config_byte(dev->pdev, I915_GDRST,
|
||||
GRDOM_MEDIA | GRDOM_RESET_ENABLE);
|
||||
ret = wait_for(i965_reset_complete(dev), 500);
|
||||
ret = wait_for(g4x_reset_complete(dev), 500);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
@ -1106,7 +1395,7 @@ static int g4x_do_reset(struct drm_device *dev)
|
||||
I915_WRITE(VDECCLK_GATE_D, I915_READ(VDECCLK_GATE_D) & ~VCP_UNIT_CLOCK_GATE_DISABLE);
|
||||
POSTING_READ(VDECCLK_GATE_D);
|
||||
|
||||
pci_write_config_byte(dev->pdev, I965_GDRST, 0);
|
||||
pci_write_config_byte(dev->pdev, I915_GDRST, 0);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -1164,8 +1453,10 @@ int intel_gpu_reset(struct drm_device *dev)
|
||||
return ironlake_do_reset(dev);
|
||||
else if (IS_G4X(dev))
|
||||
return g4x_do_reset(dev);
|
||||
else if (IS_GEN4(dev))
|
||||
return i965_do_reset(dev);
|
||||
else if (IS_G33(dev))
|
||||
return g33_do_reset(dev);
|
||||
else if (INTEL_INFO(dev)->gen >= 3)
|
||||
return i915_do_reset(dev);
|
||||
else
|
||||
return -ENODEV;
|
||||
}
|
||||
|
@ -5,73 +5,25 @@
|
||||
#include <uapi/drm/drm.h>
|
||||
#include "i915_drv.h"
|
||||
#include "intel_drv.h"
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/mod_devicetable.h>
|
||||
#include <errno-base.h>
|
||||
#include <linux/pci.h>
|
||||
|
||||
#include <syscall.h>
|
||||
|
||||
#include "bitmap.h"
|
||||
|
||||
typedef struct
|
||||
{
|
||||
kobj_t header;
|
||||
|
||||
uint32_t *data;
|
||||
uint32_t hot_x;
|
||||
uint32_t hot_y;
|
||||
|
||||
struct list_head list;
|
||||
struct drm_i915_gem_object *cobj;
|
||||
}cursor_t;
|
||||
|
||||
#define KMS_CURSOR_WIDTH 64
|
||||
#define KMS_CURSOR_HEIGHT 64
|
||||
#include <display.h>
|
||||
|
||||
|
||||
struct tag_display
|
||||
{
|
||||
int x;
|
||||
int y;
|
||||
int width;
|
||||
int height;
|
||||
int bpp;
|
||||
int vrefresh;
|
||||
int pitch;
|
||||
int lfb;
|
||||
|
||||
int supported_modes;
|
||||
struct drm_device *ddev;
|
||||
struct drm_connector *connector;
|
||||
struct drm_crtc *crtc;
|
||||
|
||||
struct list_head cursors;
|
||||
|
||||
cursor_t *cursor;
|
||||
int (*init_cursor)(cursor_t*);
|
||||
cursor_t* (__stdcall *select_cursor)(cursor_t*);
|
||||
void (*show_cursor)(int show);
|
||||
void (__stdcall *move_cursor)(cursor_t *cursor, int x, int y);
|
||||
void (__stdcall *restore_cursor)(int x, int y);
|
||||
void (*disable_mouse)(void);
|
||||
u32 mask_seqno;
|
||||
u32 check_mouse;
|
||||
u32 check_m_pixel;
|
||||
};
|
||||
|
||||
|
||||
static display_t *os_display;
|
||||
display_t *os_display;
|
||||
struct drm_i915_gem_object *main_fb_obj;
|
||||
|
||||
u32_t cmd_buffer;
|
||||
u32_t cmd_offset;
|
||||
u32 cmd_buffer;
|
||||
u32 cmd_offset;
|
||||
|
||||
void init_render();
|
||||
int sna_init();
|
||||
|
||||
int init_cursor(cursor_t *cursor);
|
||||
static cursor_t* __stdcall select_cursor_kms(cursor_t *cursor);
|
||||
static void __stdcall move_cursor_kms(cursor_t *cursor, int x, int y);
|
||||
|
||||
@ -330,7 +282,7 @@ int init_display_kms(struct drm_device *dev, videomode_t *usermode)
|
||||
struct drm_framebuffer *fb;
|
||||
|
||||
cursor_t *cursor;
|
||||
u32_t ifl;
|
||||
u32 ifl;
|
||||
int ret;
|
||||
|
||||
mutex_lock(&dev->mode_config.mutex);
|
||||
@ -484,12 +436,13 @@ void i915_dpms(struct drm_device *dev, int mode)
|
||||
|
||||
void __attribute__((regparm(1))) destroy_cursor(cursor_t *cursor)
|
||||
{
|
||||
struct drm_i915_gem_object *obj = cursor->cobj;
|
||||
list_del(&cursor->list);
|
||||
|
||||
i915_gem_object_ggtt_unpin(cursor->cobj);
|
||||
|
||||
mutex_lock(&main_device->struct_mutex);
|
||||
drm_gem_object_unreference(&cursor->cobj->base);
|
||||
drm_gem_object_unreference(&obj->base);
|
||||
mutex_unlock(&main_device->struct_mutex);
|
||||
|
||||
__DestroyObject(cursor);
|
||||
@ -645,15 +598,6 @@ typedef struct
|
||||
|
||||
#define CURRENT_TASK (0x80003000)
|
||||
|
||||
static u32_t get_display_map()
|
||||
{
|
||||
u32_t addr;
|
||||
|
||||
addr = (u32_t)os_display;
|
||||
addr+= sizeof(display_t); /* shoot me */
|
||||
return *(u32_t*)addr;
|
||||
}
|
||||
|
||||
void FASTCALL GetWindowRect(rect_t *rc)__asm__("GetWindowRect");
|
||||
|
||||
int i915_mask_update(struct drm_device *dev, void *data,
|
||||
@ -719,12 +663,12 @@ int i915_mask_update(struct drm_device *dev, void *data,
|
||||
|
||||
// slot = 0x01;
|
||||
|
||||
src_offset = os_display->win_map;
|
||||
src_offset+= winrc.top*os_display->width + winrc.left;
|
||||
|
||||
src_offset = (u8*)( winrc.top*os_display->width + winrc.left);
|
||||
src_offset+= get_display_map();
|
||||
dst_offset = (u8*)mask->bo_map;
|
||||
|
||||
u32_t tmp_h = mask->height;
|
||||
u32 tmp_h = mask->height;
|
||||
|
||||
ifl = safe_cli();
|
||||
{
|
||||
@ -926,11 +870,11 @@ int i915_mask_update_ex(struct drm_device *dev, void *data,
|
||||
|
||||
i915_gem_object_set_to_cpu_domain(to_intel_bo(obj), true);
|
||||
|
||||
src_offset = (u8*)( mt*os_display->width + ml);
|
||||
src_offset+= get_display_map();
|
||||
src_offset = os_display->win_map;
|
||||
src_offset+= mt*os_display->width + ml;
|
||||
dst_offset = (u8*)mask->bo_map;
|
||||
|
||||
u32_t tmp_h = mask->height;
|
||||
u32 tmp_h = mask->height;
|
||||
|
||||
ifl = safe_cli();
|
||||
{
|
||||
@ -1145,21 +1089,5 @@ int autoremove_wake_function(wait_queue_t *wait, unsigned mode, int sync, void *
|
||||
return 1;
|
||||
}
|
||||
|
||||
unsigned int hweight16(unsigned int w)
|
||||
{
|
||||
unsigned int res = w - ((w >> 1) & 0x5555);
|
||||
res = (res & 0x3333) + ((res >> 2) & 0x3333);
|
||||
res = (res + (res >> 4)) & 0x0F0F;
|
||||
return (res + (res >> 8)) & 0x00FF;
|
||||
}
|
||||
|
||||
|
||||
unsigned long round_jiffies_up_relative(unsigned long j)
|
||||
{
|
||||
unsigned long j0 = GetTimerTicks();
|
||||
|
||||
/* Use j0 because jiffies might change while we run */
|
||||
return round_jiffies_common(j + j0, true) - j0;
|
||||
}
|
||||
|
||||
|
||||
|
@ -5,7 +5,6 @@
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/mod_devicetable.h>
|
||||
#include <errno-base.h>
|
||||
#include <linux/pci.h>
|
||||
#include <syscall.h>
|
||||
|
||||
@ -33,7 +32,8 @@ struct drm_device *main_device;
|
||||
struct drm_file *drm_file_handlers[256];
|
||||
videomode_t usermode;
|
||||
|
||||
void cpu_detect();
|
||||
void cpu_detect1();
|
||||
int kmap_init();
|
||||
|
||||
int _stdcall display_handler(ioctl_t *io);
|
||||
int init_agp(void);
|
||||
@ -170,7 +170,7 @@ void i915_driver_thread()
|
||||
asm volatile ("int $0x40"::"a"(-1));
|
||||
}
|
||||
|
||||
u32_t __attribute__((externally_visible)) drvEntry(int action, char *cmdline)
|
||||
u32 __attribute__((externally_visible)) drvEntry(int action, char *cmdline)
|
||||
{
|
||||
static pci_dev_t device;
|
||||
const struct pci_device_id *ent;
|
||||
@ -186,7 +186,7 @@ u32_t __attribute__((externally_visible)) drvEntry(int action, char *cmdline)
|
||||
if( GetService("DISPLAY") != 0 )
|
||||
return 0;
|
||||
|
||||
printf("\ni915 v3.17-rc5 build %s %s\nusage: i915 [options]\n"
|
||||
printf("\ni915 v3.19-rc2 build %s %s\nusage: i915 [options]\n"
|
||||
"-pm=<0,1> Enable powersavings, fbc, downclocking, etc. (default: 1 - true)\n",
|
||||
__DATE__, __TIME__);
|
||||
printf("-rc6=<-1,0-7> Enable power-saving render C-state 6.\n"
|
||||
@ -210,7 +210,7 @@ u32_t __attribute__((externally_visible)) drvEntry(int action, char *cmdline)
|
||||
return 0;
|
||||
}
|
||||
|
||||
cpu_detect();
|
||||
cpu_detect1();
|
||||
// dbgprintf("\ncache line size %d\n", x86_clflush_size);
|
||||
|
||||
err = enum_pci_devices();
|
||||
@ -220,6 +220,13 @@ u32_t __attribute__((externally_visible)) drvEntry(int action, char *cmdline)
|
||||
return 0;
|
||||
}
|
||||
|
||||
err = kmap_init();
|
||||
if( unlikely(err != 0) )
|
||||
{
|
||||
dbgprintf("kmap initialization failed\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
dmi_scan_machine();
|
||||
|
||||
driver_wq_state = I915_DEV_INIT;
|
||||
@ -310,8 +317,8 @@ int _stdcall display_handler(ioctl_t *io)
|
||||
struct drm_file *file;
|
||||
|
||||
int retval = -1;
|
||||
u32_t *inp;
|
||||
u32_t *outp;
|
||||
u32 *inp;
|
||||
u32 *outp;
|
||||
|
||||
inp = io->input;
|
||||
outp = io->output;
|
||||
@ -465,10 +472,10 @@ int _stdcall display_handler(ioctl_t *io)
|
||||
#define PCI_CLASS_BRIDGE_HOST 0x0600
|
||||
#define PCI_CLASS_BRIDGE_ISA 0x0601
|
||||
|
||||
int pci_scan_filter(u32_t id, u32_t busnr, u32_t devfn)
|
||||
int pci_scan_filter(u32 id, u32 busnr, u32 devfn)
|
||||
{
|
||||
u16_t vendor, device;
|
||||
u32_t class;
|
||||
u16 vendor, device;
|
||||
u32 class;
|
||||
int ret = 0;
|
||||
|
||||
vendor = id & 0xffff;
|
||||
@ -488,43 +495,17 @@ int pci_scan_filter(u32_t id, u32_t busnr, u32_t devfn)
|
||||
};
|
||||
|
||||
|
||||
|
||||
|
||||
static inline void __cpuid(unsigned int *eax, unsigned int *ebx,
|
||||
unsigned int *ecx, unsigned int *edx)
|
||||
{
|
||||
/* ecx is often an input as well as an output. */
|
||||
asm volatile("cpuid"
|
||||
: "=a" (*eax),
|
||||
"=b" (*ebx),
|
||||
"=c" (*ecx),
|
||||
"=d" (*edx)
|
||||
: "0" (*eax), "2" (*ecx)
|
||||
: "memory");
|
||||
}
|
||||
|
||||
|
||||
|
||||
static inline void cpuid(unsigned int op,
|
||||
unsigned int *eax, unsigned int *ebx,
|
||||
unsigned int *ecx, unsigned int *edx)
|
||||
{
|
||||
*eax = op;
|
||||
*ecx = 0;
|
||||
__cpuid(eax, ebx, ecx, edx);
|
||||
}
|
||||
|
||||
struct mtrr
|
||||
{
|
||||
u64_t base;
|
||||
u64_t mask;
|
||||
u64 base;
|
||||
u64 mask;
|
||||
};
|
||||
|
||||
struct cpuinfo
|
||||
{
|
||||
u64_t caps;
|
||||
u64_t def_mtrr;
|
||||
u64_t mtrr_cap;
|
||||
u64 caps;
|
||||
u64 def_mtrr;
|
||||
u64 mtrr_cap;
|
||||
int var_mtrr_count;
|
||||
int fix_mtrr_count;
|
||||
struct mtrr var_mtrr[9];
|
||||
@ -549,13 +530,13 @@ struct cpuinfo
|
||||
#define MTRR_WC 1
|
||||
#define MTRR_WB 6
|
||||
|
||||
static inline u64_t read_msr(u32_t msr)
|
||||
static inline u64 read_msr(u32 msr)
|
||||
{
|
||||
union {
|
||||
u64_t val;
|
||||
u64 val;
|
||||
struct {
|
||||
u32_t low;
|
||||
u32_t high;
|
||||
u32 low;
|
||||
u32 high;
|
||||
};
|
||||
}tmp;
|
||||
|
||||
@ -566,13 +547,13 @@ static inline u64_t read_msr(u32_t msr)
|
||||
return tmp.val;
|
||||
}
|
||||
|
||||
static inline void write_msr(u32_t msr, u64_t val)
|
||||
static inline void write_msr(u32 msr, u64 val)
|
||||
{
|
||||
union {
|
||||
u64_t val;
|
||||
u64 val;
|
||||
struct {
|
||||
u32_t low;
|
||||
u32_t high;
|
||||
u32 low;
|
||||
u32 high;
|
||||
};
|
||||
}tmp;
|
||||
|
||||
@ -583,24 +564,6 @@ static inline void write_msr(u32_t msr, u64_t val)
|
||||
:: "a" (tmp.low), "d" (tmp.high), "c" (msr));
|
||||
}
|
||||
|
||||
#define rdmsr(msr, low, high) \
|
||||
do { \
|
||||
u64 __val = read_msr((msr)); \
|
||||
(void)((low) = (u32)__val); \
|
||||
(void)((high) = (u32)(__val >> 32)); \
|
||||
} while (0)
|
||||
|
||||
static inline void native_write_msr(unsigned int msr,
|
||||
unsigned low, unsigned high)
|
||||
{
|
||||
asm volatile("wrmsr" : : "c" (msr), "a"(low), "d" (high) : "memory");
|
||||
}
|
||||
|
||||
static inline void wbinvd(void)
|
||||
{
|
||||
asm volatile("wbinvd": : :"memory");
|
||||
}
|
||||
|
||||
#define SIZE_OR_MASK_BITS(n) (~((1ULL << ((n) - PAGE_SHIFT)) - 1))
|
||||
|
||||
static void set_mtrr(unsigned int reg, unsigned long base,
|
||||
@ -630,53 +593,16 @@ static void set_mtrr(unsigned int reg, unsigned long base,
|
||||
};
|
||||
}
|
||||
|
||||
static unsigned long __force_order;
|
||||
|
||||
static inline unsigned long read_cr0(void)
|
||||
{
|
||||
unsigned long val;
|
||||
asm volatile("mov %%cr0,%0\n\t" : "=r" (val), "=m" (__force_order));
|
||||
return val;
|
||||
}
|
||||
|
||||
static inline void write_cr0(unsigned long val)
|
||||
{
|
||||
asm volatile("mov %0,%%cr0": : "r" (val), "m" (__force_order));
|
||||
}
|
||||
|
||||
static inline unsigned long read_cr4(void)
|
||||
{
|
||||
unsigned long val;
|
||||
asm volatile("mov %%cr4,%0\n\t" : "=r" (val), "=m" (__force_order));
|
||||
return val;
|
||||
}
|
||||
|
||||
static inline void write_cr4(unsigned long val)
|
||||
{
|
||||
asm volatile("mov %0,%%cr4": : "r" (val), "m" (__force_order));
|
||||
}
|
||||
|
||||
static inline unsigned long read_cr3(void)
|
||||
{
|
||||
unsigned long val;
|
||||
asm volatile("mov %%cr3,%0\n\t" : "=r" (val), "=m" (__force_order));
|
||||
return val;
|
||||
}
|
||||
|
||||
static inline void write_cr3(unsigned long val)
|
||||
{
|
||||
asm volatile("mov %0,%%cr3": : "r" (val), "m" (__force_order));
|
||||
}
|
||||
|
||||
static u32 deftype_lo, deftype_hi;
|
||||
|
||||
void cpu_detect()
|
||||
void cpu_detect1()
|
||||
{
|
||||
struct cpuinfo cpuinfo;
|
||||
|
||||
u32 junk, tfms, cap0, misc;
|
||||
int i;
|
||||
#if 0
|
||||
|
||||
cpuid(0x00000001, &tfms, &misc, &junk, &cap0);
|
||||
|
||||
if (cap0 & (1<<19))
|
||||
@ -684,6 +610,7 @@ void cpu_detect()
|
||||
x86_clflush_size = ((misc >> 8) & 0xff) * 8;
|
||||
}
|
||||
|
||||
#if 0
|
||||
cpuid(0x80000002, (unsigned int*)&cpuinfo.model_name[0], (unsigned int*)&cpuinfo.model_name[4],
|
||||
(unsigned int*)&cpuinfo.model_name[8], (unsigned int*)&cpuinfo.model_name[12]);
|
||||
cpuid(0x80000003, (unsigned int*)&cpuinfo.model_name[16], (unsigned int*)&cpuinfo.model_name[20],
|
||||
@ -955,4 +882,3 @@ __asm__ __volatile__(
|
||||
return __res;
|
||||
}
|
||||
|
||||
|
||||
|
@ -2,11 +2,11 @@
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/mutex.h>
|
||||
#include <linux/mod_devicetable.h>
|
||||
#include <errno-base.h>
|
||||
#include <pci.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/pci.h>
|
||||
#include <syscall.h>
|
||||
|
||||
extern int pci_scan_filter(u32_t id, u32_t busnr, u32_t devfn);
|
||||
extern int pci_scan_filter(u32 id, u32 busnr, u32 devfn);
|
||||
|
||||
static LIST_HEAD(devices);
|
||||
|
||||
@ -31,9 +31,9 @@ static inline unsigned int pci_calc_resource_flags(unsigned int flags)
|
||||
}
|
||||
|
||||
|
||||
static u32_t pci_size(u32_t base, u32_t maxbase, u32_t mask)
|
||||
static u32 pci_size(u32 base, u32 maxbase, u32 mask)
|
||||
{
|
||||
u32_t size = mask & maxbase; /* Find the significant bits */
|
||||
u32 size = mask & maxbase; /* Find the significant bits */
|
||||
|
||||
if (!size)
|
||||
return 0;
|
||||
@ -50,9 +50,9 @@ static u32_t pci_size(u32_t base, u32_t maxbase, u32_t mask)
|
||||
return size;
|
||||
}
|
||||
|
||||
static u64_t pci_size64(u64_t base, u64_t maxbase, u64_t mask)
|
||||
static u64 pci_size64(u64 base, u64 maxbase, u64 mask)
|
||||
{
|
||||
u64_t size = mask & maxbase; /* Find the significant bits */
|
||||
u64 size = mask & maxbase; /* Find the significant bits */
|
||||
|
||||
if (!size)
|
||||
return 0;
|
||||
@ -69,7 +69,7 @@ static u64_t pci_size64(u64_t base, u64_t maxbase, u64_t mask)
|
||||
return size;
|
||||
}
|
||||
|
||||
static inline int is_64bit_memory(u32_t mask)
|
||||
static inline int is_64bit_memory(u32 mask)
|
||||
{
|
||||
if ((mask & (PCI_BASE_ADDRESS_SPACE|PCI_BASE_ADDRESS_MEM_TYPE_MASK)) ==
|
||||
(PCI_BASE_ADDRESS_SPACE_MEMORY|PCI_BASE_ADDRESS_MEM_TYPE_64))
|
||||
@ -79,15 +79,15 @@ static inline int is_64bit_memory(u32_t mask)
|
||||
|
||||
static void pci_read_bases(struct pci_dev *dev, unsigned int howmany, int rom)
|
||||
{
|
||||
u32_t pos, reg, next;
|
||||
u32_t l, sz;
|
||||
u32 pos, reg, next;
|
||||
u32 l, sz;
|
||||
struct resource *res;
|
||||
|
||||
for(pos=0; pos < howmany; pos = next)
|
||||
{
|
||||
u64_t l64;
|
||||
u64_t sz64;
|
||||
u32_t raw_sz;
|
||||
u64 l64;
|
||||
u64 sz64;
|
||||
u32 raw_sz;
|
||||
|
||||
next = pos + 1;
|
||||
|
||||
@ -109,7 +109,7 @@ static void pci_read_bases(struct pci_dev *dev, unsigned int howmany, int rom)
|
||||
if ((l & PCI_BASE_ADDRESS_SPACE) ==
|
||||
PCI_BASE_ADDRESS_SPACE_MEMORY)
|
||||
{
|
||||
sz = pci_size(l, sz, (u32_t)PCI_BASE_ADDRESS_MEM_MASK);
|
||||
sz = pci_size(l, sz, (u32)PCI_BASE_ADDRESS_MEM_MASK);
|
||||
/*
|
||||
* For 64bit prefetchable memory sz could be 0, if the
|
||||
* real size is bigger than 4G, so we need to check
|
||||
@ -131,14 +131,14 @@ static void pci_read_bases(struct pci_dev *dev, unsigned int howmany, int rom)
|
||||
res->flags |= pci_calc_resource_flags(l);
|
||||
if (is_64bit_memory(l))
|
||||
{
|
||||
u32_t szhi, lhi;
|
||||
u32 szhi, lhi;
|
||||
|
||||
lhi = PciRead32(dev->busnr, dev->devfn, reg+4);
|
||||
PciWrite32(dev->busnr, dev->devfn, reg+4, ~0);
|
||||
szhi = PciRead32(dev->busnr, dev->devfn, reg+4);
|
||||
PciWrite32(dev->busnr, dev->devfn, reg+4, lhi);
|
||||
sz64 = ((u64_t)szhi << 32) | raw_sz;
|
||||
l64 = ((u64_t)lhi << 32) | l;
|
||||
sz64 = ((u64)szhi << 32) | raw_sz;
|
||||
l64 = ((u64)lhi << 32) | l;
|
||||
sz64 = pci_size64(l64, sz64, PCI_BASE_ADDRESS_MEM_MASK);
|
||||
next++;
|
||||
|
||||
@ -162,7 +162,7 @@ static void pci_read_bases(struct pci_dev *dev, unsigned int howmany, int rom)
|
||||
{
|
||||
/* 64-bit wide address, treat as disabled */
|
||||
PciWrite32(dev->busnr, dev->devfn, reg,
|
||||
l & ~(u32_t)PCI_BASE_ADDRESS_MEM_MASK);
|
||||
l & ~(u32)PCI_BASE_ADDRESS_MEM_MASK);
|
||||
PciWrite32(dev->busnr, dev->devfn, reg+4, 0);
|
||||
res->start = 0;
|
||||
res->end = sz;
|
||||
@ -186,7 +186,7 @@ static void pci_read_bases(struct pci_dev *dev, unsigned int howmany, int rom)
|
||||
|
||||
if (sz && sz != 0xffffffff)
|
||||
{
|
||||
sz = pci_size(l, sz, (u32_t)PCI_ROM_ADDRESS_MASK);
|
||||
sz = pci_size(l, sz, (u32)PCI_ROM_ADDRESS_MASK);
|
||||
|
||||
if (sz)
|
||||
{
|
||||
@ -202,7 +202,7 @@ static void pci_read_bases(struct pci_dev *dev, unsigned int howmany, int rom)
|
||||
|
||||
static void pci_read_irq(struct pci_dev *dev)
|
||||
{
|
||||
u8_t irq;
|
||||
u8 irq;
|
||||
|
||||
irq = PciRead8(dev->busnr, dev->devfn, PCI_INTERRUPT_PIN);
|
||||
dev->pin = irq;
|
||||
@ -214,7 +214,7 @@ static void pci_read_irq(struct pci_dev *dev)
|
||||
|
||||
int pci_setup_device(struct pci_dev *dev)
|
||||
{
|
||||
u32_t class;
|
||||
u32 class;
|
||||
|
||||
class = PciRead32(dev->busnr, dev->devfn, PCI_CLASS_REVISION);
|
||||
dev->revision = class & 0xff;
|
||||
@ -246,7 +246,7 @@ int pci_setup_device(struct pci_dev *dev)
|
||||
*/
|
||||
if (class == PCI_CLASS_STORAGE_IDE)
|
||||
{
|
||||
u8_t progif;
|
||||
u8 progif;
|
||||
|
||||
progif = PciRead8(dev->busnr, dev->devfn,PCI_CLASS_PROG);
|
||||
if ((progif & 1) == 0)
|
||||
@ -311,12 +311,12 @@ int pci_setup_device(struct pci_dev *dev)
|
||||
return 0;
|
||||
};
|
||||
|
||||
static pci_dev_t* pci_scan_device(u32_t busnr, int devfn)
|
||||
static pci_dev_t* pci_scan_device(u32 busnr, int devfn)
|
||||
{
|
||||
pci_dev_t *dev;
|
||||
|
||||
u32_t id;
|
||||
u8_t hdr;
|
||||
u32 id;
|
||||
u8 hdr;
|
||||
|
||||
int timeout = 10;
|
||||
|
||||
@ -372,7 +372,7 @@ static pci_dev_t* pci_scan_device(u32_t busnr, int devfn)
|
||||
|
||||
|
||||
|
||||
int pci_scan_slot(u32_t bus, int devfn)
|
||||
int pci_scan_slot(u32 bus, int devfn)
|
||||
{
|
||||
int func, nr = 0;
|
||||
|
||||
@ -480,8 +480,8 @@ int pci_find_capability(struct pci_dev *dev, int cap)
|
||||
int enum_pci_devices()
|
||||
{
|
||||
pci_dev_t *dev;
|
||||
u32_t last_bus;
|
||||
u32_t bus = 0 , devfn = 0;
|
||||
u32 last_bus;
|
||||
u32 bus = 0 , devfn = 0;
|
||||
|
||||
|
||||
last_bus = PciApi(1);
|
||||
@ -664,11 +664,6 @@ void pci_iounmap(struct pci_dev *dev, void __iomem * addr)
|
||||
}
|
||||
|
||||
|
||||
struct pci_bus_region {
|
||||
resource_size_t start;
|
||||
resource_size_t end;
|
||||
};
|
||||
|
||||
static inline void
|
||||
pcibios_resource_to_bus(struct pci_dev *dev, struct pci_bus_region *region,
|
||||
struct resource *res)
|
||||
|
@ -12,7 +12,7 @@ struct file *shmem_file_setup(const char *name, loff_t size, unsigned long flags
|
||||
struct file *filep;
|
||||
int count;
|
||||
|
||||
filep = malloc(sizeof(*filep));
|
||||
filep = __builtin_malloc(sizeof(*filep));
|
||||
|
||||
if(unlikely(filep == NULL))
|
||||
return ERR_PTR(-ENOMEM);
|
||||
@ -248,7 +248,6 @@ static inline char _tolower(const char c)
|
||||
}
|
||||
|
||||
|
||||
|
||||
//const char hex_asc[] = "0123456789abcdef";
|
||||
|
||||
/**
|
||||
@ -478,35 +477,302 @@ void *kmemdup(const void *src, size_t len, gfp_t gfp)
|
||||
}
|
||||
|
||||
|
||||
#define KMAP_MAX 256
|
||||
|
||||
static struct mutex kmap_mutex;
|
||||
static struct page* kmap_table[KMAP_MAX];
|
||||
static int kmap_av;
|
||||
static int kmap_first;
|
||||
static void* kmap_base;
|
||||
|
||||
|
||||
int kmap_init()
|
||||
{
|
||||
kmap_base = AllocKernelSpace(KMAP_MAX*4096);
|
||||
if(kmap_base == NULL)
|
||||
return -1;
|
||||
|
||||
kmap_av = KMAP_MAX;
|
||||
MutexInit(&kmap_mutex);
|
||||
return 0;
|
||||
};
|
||||
|
||||
void *kmap(struct page *page)
|
||||
{
|
||||
void *vaddr;
|
||||
void *vaddr = NULL;
|
||||
int i;
|
||||
|
||||
vaddr = (void*)MapIoMem(page_to_phys(page), 4096, PG_SW);
|
||||
do
|
||||
{
|
||||
MutexLock(&kmap_mutex);
|
||||
if(kmap_av != 0)
|
||||
{
|
||||
for(i = kmap_first; i < KMAP_MAX; i++)
|
||||
{
|
||||
if(kmap_table[i] == NULL)
|
||||
{
|
||||
kmap_av--;
|
||||
kmap_first = i;
|
||||
kmap_table[i] = page;
|
||||
vaddr = kmap_base + (i<<12);
|
||||
MapPage(vaddr,(addr_t)page,3);
|
||||
break;
|
||||
};
|
||||
};
|
||||
};
|
||||
MutexUnlock(&kmap_mutex);
|
||||
}while(vaddr == NULL);
|
||||
|
||||
return vaddr;
|
||||
}
|
||||
};
|
||||
|
||||
unsigned long find_first_zero_bit(const unsigned long *addr, unsigned long size)
|
||||
void *kmap_atomic(struct page *page) __attribute__ ((alias ("kmap")));
|
||||
|
||||
void kunmap(struct page *page)
|
||||
{
|
||||
const unsigned long *p = addr;
|
||||
unsigned long result = 0;
|
||||
unsigned long tmp;
|
||||
void *vaddr;
|
||||
int i;
|
||||
|
||||
while (size & ~(BITS_PER_LONG-1)) {
|
||||
if (~(tmp = *(p++)))
|
||||
goto found;
|
||||
result += BITS_PER_LONG;
|
||||
size -= BITS_PER_LONG;
|
||||
}
|
||||
if (!size)
|
||||
return result;
|
||||
MutexLock(&kmap_mutex);
|
||||
|
||||
tmp = (*p) | (~0UL << size);
|
||||
if (tmp == ~0UL) /* Are any bits zero? */
|
||||
return result + size; /* Nope. */
|
||||
found:
|
||||
return result + ffz(tmp);
|
||||
for(i = 0; i < KMAP_MAX; i++)
|
||||
{
|
||||
if(kmap_table[i] == page)
|
||||
{
|
||||
kmap_av++;
|
||||
if(i < kmap_first)
|
||||
kmap_first = i;
|
||||
kmap_table[i] = NULL;
|
||||
vaddr = kmap_base + (i<<12);
|
||||
MapPage(vaddr,0,0);
|
||||
break;
|
||||
};
|
||||
};
|
||||
|
||||
MutexUnlock(&kmap_mutex);
|
||||
};
|
||||
|
||||
void kunmap_atomic(void *vaddr)
|
||||
{
|
||||
int i;
|
||||
|
||||
MapPage(vaddr,0,0);
|
||||
|
||||
i = (vaddr - kmap_base) >> 12;
|
||||
|
||||
MutexLock(&kmap_mutex);
|
||||
|
||||
kmap_av++;
|
||||
if(i < kmap_first)
|
||||
kmap_first = i;
|
||||
kmap_table[i] = NULL;
|
||||
|
||||
MutexUnlock(&kmap_mutex);
|
||||
}
|
||||
|
||||
size_t strlcat(char *dest, const char *src, size_t count)
|
||||
{
|
||||
size_t dsize = strlen(dest);
|
||||
size_t len = strlen(src);
|
||||
size_t res = dsize + len;
|
||||
|
||||
/* This would be a bug */
|
||||
BUG_ON(dsize >= count);
|
||||
|
||||
dest += dsize;
|
||||
count -= dsize;
|
||||
if (len >= count)
|
||||
len = count-1;
|
||||
memcpy(dest, src, len);
|
||||
dest[len] = 0;
|
||||
return res;
|
||||
}
|
||||
EXPORT_SYMBOL(strlcat);
|
||||
|
||||
void msleep(unsigned int msecs)
|
||||
{
|
||||
msecs /= 10;
|
||||
if(!msecs) msecs = 1;
|
||||
|
||||
__asm__ __volatile__ (
|
||||
"call *__imp__Delay"
|
||||
::"b" (msecs));
|
||||
__asm__ __volatile__ (
|
||||
"":::"ebx");
|
||||
|
||||
};
|
||||
|
||||
|
||||
/* simple loop based delay: */
|
||||
static void delay_loop(unsigned long loops)
|
||||
{
|
||||
asm volatile(
|
||||
" test %0,%0 \n"
|
||||
" jz 3f \n"
|
||||
" jmp 1f \n"
|
||||
|
||||
".align 16 \n"
|
||||
"1: jmp 2f \n"
|
||||
|
||||
".align 16 \n"
|
||||
"2: dec %0 \n"
|
||||
" jnz 2b \n"
|
||||
"3: dec %0 \n"
|
||||
|
||||
: /* we don't need output */
|
||||
:"a" (loops)
|
||||
);
|
||||
}
|
||||
|
||||
|
||||
static void (*delay_fn)(unsigned long) = delay_loop;
|
||||
|
||||
void __delay(unsigned long loops)
|
||||
{
|
||||
delay_fn(loops);
|
||||
}
|
||||
|
||||
|
||||
inline void __const_udelay(unsigned long xloops)
|
||||
{
|
||||
int d0;
|
||||
|
||||
xloops *= 4;
|
||||
asm("mull %%edx"
|
||||
: "=d" (xloops), "=&a" (d0)
|
||||
: "1" (xloops), ""
|
||||
(loops_per_jiffy * (HZ/4)));
|
||||
|
||||
__delay(++xloops);
|
||||
}
|
||||
|
||||
void __udelay(unsigned long usecs)
|
||||
{
|
||||
__const_udelay(usecs * 0x000010c7); /* 2**32 / 1000000 (rounded up) */
|
||||
}
|
||||
|
||||
unsigned int _sw_hweight32(unsigned int w)
|
||||
{
|
||||
#ifdef CONFIG_ARCH_HAS_FAST_MULTIPLIER
|
||||
w -= (w >> 1) & 0x55555555;
|
||||
w = (w & 0x33333333) + ((w >> 2) & 0x33333333);
|
||||
w = (w + (w >> 4)) & 0x0f0f0f0f;
|
||||
return (w * 0x01010101) >> 24;
|
||||
#else
|
||||
unsigned int res = w - ((w >> 1) & 0x55555555);
|
||||
res = (res & 0x33333333) + ((res >> 2) & 0x33333333);
|
||||
res = (res + (res >> 4)) & 0x0F0F0F0F;
|
||||
res = res + (res >> 8);
|
||||
return (res + (res >> 16)) & 0x000000FF;
|
||||
#endif
|
||||
}
|
||||
EXPORT_SYMBOL(_sw_hweight32);
|
||||
|
||||
|
||||
void usleep_range(unsigned long min, unsigned long max)
|
||||
{
|
||||
udelay(max);
|
||||
}
|
||||
EXPORT_SYMBOL(usleep_range);
|
||||
|
||||
|
||||
static unsigned long round_jiffies_common(unsigned long j, int cpu,
|
||||
bool force_up)
|
||||
{
|
||||
int rem;
|
||||
unsigned long original = j;
|
||||
|
||||
/*
|
||||
* We don't want all cpus firing their timers at once hitting the
|
||||
* same lock or cachelines, so we skew each extra cpu with an extra
|
||||
* 3 jiffies. This 3 jiffies came originally from the mm/ code which
|
||||
* already did this.
|
||||
* The skew is done by adding 3*cpunr, then round, then subtract this
|
||||
* extra offset again.
|
||||
*/
|
||||
j += cpu * 3;
|
||||
|
||||
rem = j % HZ;
|
||||
|
||||
/*
|
||||
* If the target jiffie is just after a whole second (which can happen
|
||||
* due to delays of the timer irq, long irq off times etc etc) then
|
||||
* we should round down to the whole second, not up. Use 1/4th second
|
||||
* as cutoff for this rounding as an extreme upper bound for this.
|
||||
* But never round down if @force_up is set.
|
||||
*/
|
||||
if (rem < HZ/4 && !force_up) /* round down */
|
||||
j = j - rem;
|
||||
else /* round up */
|
||||
j = j - rem + HZ;
|
||||
|
||||
/* now that we have rounded, subtract the extra skew again */
|
||||
j -= cpu * 3;
|
||||
|
||||
/*
|
||||
* Make sure j is still in the future. Otherwise return the
|
||||
* unmodified value.
|
||||
*/
|
||||
return time_is_after_jiffies(j) ? j : original;
|
||||
}
|
||||
|
||||
|
||||
unsigned long round_jiffies_up_relative(unsigned long j, int cpu)
|
||||
{
|
||||
unsigned long j0 = jiffies;
|
||||
|
||||
/* Use j0 because jiffies might change while we run */
|
||||
return round_jiffies_common(j + j0, 0, true) - j0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(__round_jiffies_up_relative);
|
||||
|
||||
|
||||
#include <linux/rcupdate.h>
|
||||
|
||||
struct rcu_ctrlblk {
|
||||
struct rcu_head *rcucblist; /* List of pending callbacks (CBs). */
|
||||
struct rcu_head **donetail; /* ->next pointer of last "done" CB. */
|
||||
struct rcu_head **curtail; /* ->next pointer of last CB. */
|
||||
// RCU_TRACE(long qlen); /* Number of pending CBs. */
|
||||
// RCU_TRACE(unsigned long gp_start); /* Start time for stalls. */
|
||||
// RCU_TRACE(unsigned long ticks_this_gp); /* Statistic for stalls. */
|
||||
// RCU_TRACE(unsigned long jiffies_stall); /* Jiffies at next stall. */
|
||||
// RCU_TRACE(const char *name); /* Name of RCU type. */
|
||||
};
|
||||
|
||||
/* Definition for rcupdate control block. */
|
||||
static struct rcu_ctrlblk rcu_sched_ctrlblk = {
|
||||
.donetail = &rcu_sched_ctrlblk.rcucblist,
|
||||
.curtail = &rcu_sched_ctrlblk.rcucblist,
|
||||
// RCU_TRACE(.name = "rcu_sched")
|
||||
};
|
||||
|
||||
static void __call_rcu(struct rcu_head *head,
|
||||
void (*func)(struct rcu_head *rcu),
|
||||
struct rcu_ctrlblk *rcp)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
// debug_rcu_head_queue(head);
|
||||
head->func = func;
|
||||
head->next = NULL;
|
||||
|
||||
local_irq_save(flags);
|
||||
*rcp->curtail = head;
|
||||
rcp->curtail = &head->next;
|
||||
// RCU_TRACE(rcp->qlen++);
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
|
||||
/*
|
||||
* Post an RCU callback to be invoked after the end of an RCU-sched grace
|
||||
* period. But since we have but one CPU, that would be after any
|
||||
* quiescent state.
|
||||
*/
|
||||
void call_rcu_sched(struct rcu_head *head, void (*func)(struct rcu_head *rcu))
|
||||
{
|
||||
__call_rcu(head, func, &rcu_sched_ctrlblk);
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
@ -36,6 +36,7 @@ NAME:= atikms
|
||||
HFILES:= $(DRV_INCLUDES)/linux/types.h \
|
||||
$(DRV_INCLUDES)/linux/list.h \
|
||||
$(DRV_INCLUDES)/linux/pci.h \
|
||||
$(DRV_INCLUDES)/drm/drm.h \
|
||||
$(DRV_INCLUDES)/drm/drmP.h \
|
||||
$(DRV_INCLUDES)/drm/drm_edid.h \
|
||||
$(DRV_INCLUDES)/drm/drm_crtc.h \
|
||||
@ -59,7 +60,6 @@ NAME_SRC= \
|
||||
$(DRM_TOPDIR)/drm_crtc_helper.c \
|
||||
$(DRM_TOPDIR)/drm_dp_helper.c \
|
||||
$(DRM_TOPDIR)/drm_drv.c \
|
||||
$(DRM_TOPDIR)/drm_atomic.c \
|
||||
$(DRM_TOPDIR)/drm_edid.c \
|
||||
$(DRM_TOPDIR)/drm_fb_helper.c \
|
||||
$(DRM_TOPDIR)/drm_gem.c \
|
||||
@ -129,7 +129,6 @@ NAME_SRC= \
|
||||
radeon_ring.c \
|
||||
radeon_sa.c \
|
||||
radeon_semaphore.c \
|
||||
radeon_sync.c \
|
||||
radeon_test.c \
|
||||
radeon_ttm.c \
|
||||
radeon_ucode.c \
|
||||
@ -146,6 +145,7 @@ NAME_SRC= \
|
||||
rv740_dpm.c \
|
||||
r520.c \
|
||||
r600.c \
|
||||
r600_audio.c \
|
||||
r600_blit_shaders.c \
|
||||
r600_cs.c \
|
||||
r600_dma.c \
|
||||
|
@ -134,7 +134,7 @@ u32 __attribute__((externally_visible)) drvEntry(int action, char *cmdline)
|
||||
if( GetService("DISPLAY") != 0 )
|
||||
return 0;
|
||||
|
||||
printf("Radeon v3.19-rc1 cmdline %s\n", cmdline);
|
||||
printf("Radeon v3.19-rc2 cmdline %s\n", cmdline);
|
||||
|
||||
if( cmdline && *cmdline )
|
||||
parse_cmdline(cmdline, &usermode, log, &radeon_modeset);
|
||||
|
@ -4,7 +4,7 @@
|
||||
#include "radeon.h"
|
||||
#include "radeon_object.h"
|
||||
#include "bitmap.h"
|
||||
#include "display.h"
|
||||
#include <display.h>
|
||||
|
||||
#include "r100d.h"
|
||||
|
||||
@ -32,20 +32,20 @@ int init_cursor(cursor_t *cursor)
|
||||
rdev = (struct radeon_device *)os_display->ddev->dev_private;
|
||||
|
||||
r = radeon_bo_create(rdev, CURSOR_WIDTH*CURSOR_HEIGHT*4,
|
||||
4096, false, RADEON_GEM_DOMAIN_VRAM, 0, NULL, NULL, &cursor->robj);
|
||||
4096, false, RADEON_GEM_DOMAIN_VRAM, 0, NULL, NULL, (struct radeon_bo**)&cursor->cobj);
|
||||
|
||||
if (unlikely(r != 0))
|
||||
return r;
|
||||
|
||||
r = radeon_bo_reserve(cursor->robj, false);
|
||||
r = radeon_bo_reserve(cursor->cobj, false);
|
||||
if (unlikely(r != 0))
|
||||
return r;
|
||||
|
||||
r = radeon_bo_pin(cursor->robj, RADEON_GEM_DOMAIN_VRAM, NULL);
|
||||
r = radeon_bo_pin(cursor->cobj, RADEON_GEM_DOMAIN_VRAM, NULL);
|
||||
if (unlikely(r != 0))
|
||||
return r;
|
||||
|
||||
r = radeon_bo_kmap(cursor->robj, (void**)&bits);
|
||||
r = radeon_bo_kmap(cursor->cobj, (void**)&bits);
|
||||
if (r) {
|
||||
DRM_ERROR("radeon: failed to map cursor (%d).\n", r);
|
||||
return r;
|
||||
@ -63,7 +63,7 @@ int init_cursor(cursor_t *cursor)
|
||||
for(i = 0; i < CURSOR_WIDTH*(CURSOR_HEIGHT-32); i++)
|
||||
*bits++ = 0;
|
||||
|
||||
radeon_bo_kunmap(cursor->robj);
|
||||
radeon_bo_kunmap(cursor->cobj);
|
||||
|
||||
// cursor->header.destroy = destroy_cursor;
|
||||
|
||||
@ -73,7 +73,7 @@ int init_cursor(cursor_t *cursor)
|
||||
void __attribute__((regparm(1))) destroy_cursor(cursor_t *cursor)
|
||||
{
|
||||
list_del(&cursor->list);
|
||||
radeon_bo_unpin(cursor->robj);
|
||||
radeon_bo_unpin(cursor->cobj);
|
||||
KernelFree(cursor->data);
|
||||
__DestroyObject(cursor);
|
||||
};
|
||||
@ -110,7 +110,7 @@ cursor_t* __stdcall select_cursor(cursor_t *cursor)
|
||||
old = os_display->cursor;
|
||||
|
||||
os_display->cursor = cursor;
|
||||
gpu_addr = radeon_bo_gpu_offset(cursor->robj);
|
||||
gpu_addr = radeon_bo_gpu_offset(cursor->cobj);
|
||||
|
||||
if (ASIC_IS_DCE4(rdev))
|
||||
{
|
||||
@ -207,7 +207,7 @@ void __stdcall move_cursor(cursor_t *cursor, int x, int y)
|
||||
WREG32(RADEON_CUR_HORZ_VERT_POSN,
|
||||
(RADEON_CUR_LOCK | (x << 16) | y));
|
||||
|
||||
gpu_addr = radeon_bo_gpu_offset(cursor->robj);
|
||||
gpu_addr = radeon_bo_gpu_offset(cursor->cobj);
|
||||
|
||||
/* offset is from DISP(2)_BASE_ADDRESS */
|
||||
WREG32(RADEON_CUR_OFFSET,
|
||||
|
@ -6,7 +6,7 @@
|
||||
#include "drm_fb_helper.h"
|
||||
#include "hmm.h"
|
||||
#include "bitmap.h"
|
||||
#include "display.h"
|
||||
#include <display.h>
|
||||
|
||||
extern struct drm_framebuffer *main_fb;
|
||||
extern struct drm_gem_object *main_fb_obj;
|
||||
@ -94,7 +94,7 @@ cursor_t* __stdcall select_cursor_kms(cursor_t *cursor)
|
||||
old = os_display->cursor;
|
||||
|
||||
os_display->cursor = cursor;
|
||||
gpu_addr = radeon_bo_gpu_offset(cursor->robj);
|
||||
gpu_addr = radeon_bo_gpu_offset(cursor->cobj);
|
||||
|
||||
if (ASIC_IS_DCE4(rdev)) {
|
||||
WREG32(EVERGREEN_CUR_SURFACE_ADDRESS_HIGH + radeon_crtc->crtc_offset,
|
||||
@ -169,7 +169,7 @@ void __stdcall move_cursor_kms(cursor_t *cursor, int x, int y)
|
||||
WREG32(RADEON_CUR_HORZ_VERT_POSN,
|
||||
(RADEON_CUR_LOCK | (x << 16) | y));
|
||||
|
||||
gpu_addr = radeon_bo_gpu_offset(cursor->robj);
|
||||
gpu_addr = radeon_bo_gpu_offset(cursor->cobj);
|
||||
|
||||
/* offset is from DISP(2)_BASE_ADDRESS */
|
||||
WREG32(RADEON_CUR_OFFSET,
|
||||
|
Loading…
Reference in New Issue
Block a user