summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/i915
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/i915')
-rw-r--r--drivers/gpu/drm/i915/Kconfig.debug15
-rw-r--r--drivers/gpu/drm/i915/Makefile11
-rw-r--r--drivers/gpu/drm/i915/gvt/aperture_gm.c43
-rw-r--r--drivers/gpu/drm/i915/gvt/cmd_parser.c25
-rw-r--r--drivers/gpu/drm/i915/gvt/display.c31
-rw-r--r--drivers/gpu/drm/i915/gvt/display.h1
-rw-r--r--drivers/gpu/drm/i915/gvt/execlist.c2
-rw-r--r--drivers/gpu/drm/i915/gvt/firmware.c47
-rw-r--r--drivers/gpu/drm/i915/gvt/gtt.c70
-rw-r--r--drivers/gpu/drm/i915/gvt/gvt.c7
-rw-r--r--drivers/gpu/drm/i915/gvt/handlers.c2
-rw-r--r--drivers/gpu/drm/i915/gvt/hypercall.h1
-rw-r--r--drivers/gpu/drm/i915/gvt/interrupt.c57
-rw-r--r--drivers/gpu/drm/i915/gvt/kvmgt.c108
-rw-r--r--drivers/gpu/drm/i915/gvt/mpt.h12
-rw-r--r--drivers/gpu/drm/i915/gvt/render.c17
-rw-r--r--drivers/gpu/drm/i915/gvt/sched_policy.c1
-rw-r--r--drivers/gpu/drm/i915/gvt/scheduler.c15
-rw-r--r--drivers/gpu/drm/i915/gvt/vgpu.c14
-rw-r--r--drivers/gpu/drm/i915/i915_cmd_parser.c174
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c1150
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c218
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h925
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c525
-rw-r--r--drivers/gpu/drm/i915/i915_gem.h4
-rw-r--r--drivers/gpu/drm/i915/i915_gem_context.c297
-rw-r--r--drivers/gpu/drm/i915/i915_gem_context.h277
-rw-r--r--drivers/gpu/drm/i915/i915_gem_dmabuf.c2
-rw-r--r--drivers/gpu/drm/i915/i915_gem_evict.c172
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c20
-rw-r--r--drivers/gpu/drm/i915/i915_gem_fence_reg.c100
-rw-r--r--drivers/gpu/drm/i915/i915_gem_fence_reg.h2
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c617
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.h95
-rw-r--r--drivers/gpu/drm/i915/i915_gem_internal.c49
-rw-r--r--drivers/gpu/drm/i915/i915_gem_object.h23
-rw-r--r--drivers/gpu/drm/i915/i915_gem_render_state.c6
-rw-r--r--drivers/gpu/drm/i915/i915_gem_request.c113
-rw-r--r--drivers/gpu/drm/i915/i915_gem_request.h11
-rw-r--r--drivers/gpu/drm/i915/i915_gem_stolen.c146
-rw-r--r--drivers/gpu/drm/i915/i915_gem_tiling.c347
-rw-r--r--drivers/gpu/drm/i915/i915_gem_timeline.c16
-rw-r--r--drivers/gpu/drm/i915/i915_gem_timeline.h2
-rw-r--r--drivers/gpu/drm/i915/i915_gem_userptr.c2
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.c116
-rw-r--r--drivers/gpu/drm/i915/i915_guc_reg.h13
-rw-r--r--drivers/gpu/drm/i915/i915_guc_submission.c959
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c169
-rw-r--r--drivers/gpu/drm/i915/i915_oa_hsw.c752
-rw-r--r--drivers/gpu/drm/i915/i915_oa_hsw.h38
-rw-r--r--drivers/gpu/drm/i915/i915_params.c6
-rw-r--r--drivers/gpu/drm/i915/i915_params.h2
-rw-r--r--drivers/gpu/drm/i915/i915_pci.c183
-rw-r--r--drivers/gpu/drm/i915/i915_perf.c2096
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h488
-rw-r--r--drivers/gpu/drm/i915/i915_suspend.c16
-rw-r--r--drivers/gpu/drm/i915/i915_sw_fence.c141
-rw-r--r--drivers/gpu/drm/i915/i915_sw_fence.h6
-rw-r--r--drivers/gpu/drm/i915/i915_sysfs.c6
-rw-r--r--drivers/gpu/drm/i915/i915_trace.h34
-rw-r--r--drivers/gpu/drm/i915/i915_utils.h64
-rw-r--r--drivers/gpu/drm/i915/i915_vgpu.c33
-rw-r--r--drivers/gpu/drm/i915/i915_vma.c266
-rw-r--r--drivers/gpu/drm/i915/i915_vma.h59
-rw-r--r--drivers/gpu/drm/i915/intel_atomic.c31
-rw-r--r--drivers/gpu/drm/i915/intel_atomic_plane.c51
-rw-r--r--drivers/gpu/drm/i915/intel_audio.c39
-rw-r--r--drivers/gpu/drm/i915/intel_bios.c27
-rw-r--r--drivers/gpu/drm/i915/intel_breadcrumbs.c10
-rw-r--r--drivers/gpu/drm/i915/intel_crt.c7
-rw-r--r--drivers/gpu/drm/i915/intel_csr.c16
-rw-r--r--drivers/gpu/drm/i915/intel_ddi.c70
-rw-r--r--drivers/gpu/drm/i915/intel_device_info.c55
-rw-r--r--drivers/gpu/drm/i915/intel_display.c827
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c366
-rw-r--r--drivers/gpu/drm/i915/intel_dp_mst.c31
-rw-r--r--drivers/gpu/drm/i915/intel_dpio_phy.c130
-rw-r--r--drivers/gpu/drm/i915/intel_dpll_mgr.c351
-rw-r--r--drivers/gpu/drm/i915/intel_dpll_mgr.h178
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h111
-rw-r--r--drivers/gpu/drm/i915/intel_dsi.c46
-rw-r--r--drivers/gpu/drm/i915/intel_dsi_panel_vbt.c38
-rw-r--r--drivers/gpu/drm/i915/intel_dsi_pll.c18
-rw-r--r--drivers/gpu/drm/i915/intel_dvo.c9
-rw-r--r--drivers/gpu/drm/i915/intel_engine_cs.c30
-rw-r--r--drivers/gpu/drm/i915/intel_fbc.c20
-rw-r--r--drivers/gpu/drm/i915/intel_fbdev.c19
-rw-r--r--drivers/gpu/drm/i915/intel_guc_fwif.h79
-rw-r--r--drivers/gpu/drm/i915/intel_guc_loader.c255
-rw-r--r--drivers/gpu/drm/i915/intel_guc_log.c658
-rw-r--r--drivers/gpu/drm/i915/intel_gvt.c5
-rw-r--r--drivers/gpu/drm/i915/intel_hangcheck.c256
-rw-r--r--drivers/gpu/drm/i915/intel_hdmi.c198
-rw-r--r--drivers/gpu/drm/i915/intel_hotplug.c4
-rw-r--r--drivers/gpu/drm/i915/intel_huc.c338
-rw-r--r--drivers/gpu/drm/i915/intel_i2c.c22
-rw-r--r--drivers/gpu/drm/i915/intel_lrc.c259
-rw-r--r--drivers/gpu/drm/i915/intel_lrc.h11
-rw-r--r--drivers/gpu/drm/i915/intel_lspcon.c99
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c8
-rw-r--r--drivers/gpu/drm/i915/intel_mocs.c7
-rw-r--r--drivers/gpu/drm/i915/intel_mocs.h2
-rw-r--r--drivers/gpu/drm/i915/intel_opregion.c13
-rw-r--r--drivers/gpu/drm/i915/intel_overlay.c289
-rw-r--r--drivers/gpu/drm/i915/intel_panel.c32
-rw-r--r--drivers/gpu/drm/i915/intel_pipe_crc.c1011
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c843
-rw-r--r--drivers/gpu/drm/i915/intel_psr.c209
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c155
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.h66
-rw-r--r--drivers/gpu/drm/i915/intel_runtime_pm.c163
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo.c21
-rw-r--r--drivers/gpu/drm/i915/intel_sprite.c131
-rw-r--r--drivers/gpu/drm/i915/intel_tv.c4
-rw-r--r--drivers/gpu/drm/i915/intel_uc.c116
-rw-r--r--drivers/gpu/drm/i915/intel_uc.h (renamed from drivers/gpu/drm/i915/intel_guc.h)108
-rw-r--r--drivers/gpu/drm/i915/intel_uncore.c14
-rw-r--r--drivers/gpu/drm/i915/intel_vbt_defs.h12
118 files changed, 12987 insertions, 6100 deletions
diff --git a/drivers/gpu/drm/i915/Kconfig.debug b/drivers/gpu/drm/i915/Kconfig.debug
index 51ba630a134b..597648c7a645 100644
--- a/drivers/gpu/drm/i915/Kconfig.debug
+++ b/drivers/gpu/drm/i915/Kconfig.debug
@@ -19,9 +19,12 @@ config DRM_I915_DEBUG
bool "Enable additional driver debugging"
depends on DRM_I915
select PREEMPT_COUNT
+ select I2C_CHARDEV
+ select DRM_DP_AUX_CHARDEV
select X86_MSR # used by igt/pm_rpm
select DRM_VGEM # used by igt/prime_vgem (dmabuf interop checks)
select DRM_DEBUG_MM if DRM=y
+ select DRM_I915_SW_FENCE_DEBUG_OBJECTS
default n
help
Choose this option to turn on extra driver debugging that may affect
@@ -43,3 +46,15 @@ config DRM_I915_DEBUG_GEM
If in doubt, say "N".
+config DRM_I915_SW_FENCE_DEBUG_OBJECTS
+ bool "Enable additional driver debugging for fence objects"
+ depends on DRM_I915
+ select DEBUG_OBJECTS
+ default n
+ help
+ Choose this option to turn on extra driver debugging that may affect
+ performance but will catch some internal issues.
+
+ Recommended for driver developers only.
+
+ If in doubt, say "N".
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index 78711dddd937..c62ab45683c0 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -24,7 +24,7 @@ i915-y := i915_drv.o \
intel_runtime_pm.o
i915-$(CONFIG_COMPAT) += i915_ioc32.o
-i915-$(CONFIG_DEBUG_FS) += i915_debugfs.o
+i915-$(CONFIG_DEBUG_FS) += i915_debugfs.o intel_pipe_crc.o
# GEM code
i915-y += i915_cmd_parser.o \
@@ -55,7 +55,10 @@ i915-y += i915_cmd_parser.o \
intel_uncore.o
# general-purpose microcontroller (GuC) support
-i915-y += intel_guc_loader.o \
+i915-y += intel_uc.o \
+ intel_guc_log.o \
+ intel_guc_loader.o \
+ intel_huc.o \
i915_guc_submission.o
# autogenerated null render state
@@ -117,6 +120,10 @@ i915-$(CONFIG_DRM_I915_CAPTURE_ERROR) += i915_gpu_error.o
# virtual gpu code
i915-y += i915_vgpu.o
+# perf code
+i915-y += i915_perf.o \
+ i915_oa_hsw.o
+
ifeq ($(CONFIG_DRM_I915_GVT),y)
i915-y += intel_gvt.o
include $(src)/gvt/Makefile
diff --git a/drivers/gpu/drm/i915/gvt/aperture_gm.c b/drivers/gpu/drm/i915/gvt/aperture_gm.c
index f7bce8603958..3b6caaca9751 100644
--- a/drivers/gpu/drm/i915/gvt/aperture_gm.c
+++ b/drivers/gpu/drm/i915/gvt/aperture_gm.c
@@ -41,44 +41,35 @@ static int alloc_gm(struct intel_vgpu *vgpu, bool high_gm)
{
struct intel_gvt *gvt = vgpu->gvt;
struct drm_i915_private *dev_priv = gvt->dev_priv;
- u32 alloc_flag, search_flag;
+ unsigned int flags;
u64 start, end, size;
struct drm_mm_node *node;
- int retried = 0;
int ret;
if (high_gm) {
- search_flag = DRM_MM_SEARCH_BELOW;
- alloc_flag = DRM_MM_CREATE_TOP;
node = &vgpu->gm.high_gm_node;
size = vgpu_hidden_sz(vgpu);
- start = gvt_hidden_gmadr_base(gvt);
- end = gvt_hidden_gmadr_end(gvt);
+ start = ALIGN(gvt_hidden_gmadr_base(gvt), I915_GTT_PAGE_SIZE);
+ end = ALIGN(gvt_hidden_gmadr_end(gvt), I915_GTT_PAGE_SIZE);
+ flags = PIN_HIGH;
} else {
- search_flag = DRM_MM_SEARCH_DEFAULT;
- alloc_flag = DRM_MM_CREATE_DEFAULT;
node = &vgpu->gm.low_gm_node;
size = vgpu_aperture_sz(vgpu);
- start = gvt_aperture_gmadr_base(gvt);
- end = gvt_aperture_gmadr_end(gvt);
+ start = ALIGN(gvt_aperture_gmadr_base(gvt), I915_GTT_PAGE_SIZE);
+ end = ALIGN(gvt_aperture_gmadr_end(gvt), I915_GTT_PAGE_SIZE);
+ flags = PIN_MAPPABLE;
}
mutex_lock(&dev_priv->drm.struct_mutex);
-search_again:
- ret = drm_mm_insert_node_in_range_generic(&dev_priv->ggtt.base.mm,
- node, size, 4096, 0,
- start, end, search_flag,
- alloc_flag);
- if (ret) {
- ret = i915_gem_evict_something(&dev_priv->ggtt.base,
- size, 4096, 0, start, end, 0);
- if (ret == 0 && ++retried < 3)
- goto search_again;
-
- gvt_err("fail to alloc %s gm space from host, retried %d\n",
- high_gm ? "high" : "low", retried);
- }
+ ret = i915_gem_gtt_insert(&dev_priv->ggtt.base, node,
+ size, I915_GTT_PAGE_SIZE,
+ I915_COLOR_UNEVICTABLE,
+ start, end, flags);
mutex_unlock(&dev_priv->drm.struct_mutex);
+ if (ret)
+ gvt_err("fail to alloc %s gm space from host\n",
+ high_gm ? "high" : "low");
+
return ret;
}
@@ -264,7 +255,7 @@ static int alloc_resource(struct intel_vgpu *vgpu,
if (request > avail)
goto no_enough_resource;
- vgpu_aperture_sz(vgpu) = request;
+ vgpu_aperture_sz(vgpu) = ALIGN(request, I915_GTT_PAGE_SIZE);
item = "high GM space";
max = gvt_hidden_sz(gvt) - HOST_HIGH_GM_SIZE;
@@ -275,7 +266,7 @@ static int alloc_resource(struct intel_vgpu *vgpu,
if (request > avail)
goto no_enough_resource;
- vgpu_hidden_sz(vgpu) = request;
+ vgpu_hidden_sz(vgpu) = ALIGN(request, I915_GTT_PAGE_SIZE);
item = "fence";
max = gvt_fence_sz(gvt) - HOST_FENCE;
diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.c b/drivers/gpu/drm/i915/gvt/cmd_parser.c
index e4563984cb1e..b9c8e2407682 100644
--- a/drivers/gpu/drm/i915/gvt/cmd_parser.c
+++ b/drivers/gpu/drm/i915/gvt/cmd_parser.c
@@ -1134,6 +1134,8 @@ static int skl_decode_mi_display_flip(struct parser_exec_state *s,
u32 dword2 = cmd_val(s, 2);
u32 plane = (dword0 & GENMASK(12, 8)) >> 8;
+ info->plane = PRIMARY_PLANE;
+
switch (plane) {
case MI_DISPLAY_FLIP_SKL_PLANE_1_A:
info->pipe = PIPE_A;
@@ -1147,12 +1149,28 @@ static int skl_decode_mi_display_flip(struct parser_exec_state *s,
info->pipe = PIPE_C;
info->event = PRIMARY_C_FLIP_DONE;
break;
+
+ case MI_DISPLAY_FLIP_SKL_PLANE_2_A:
+ info->pipe = PIPE_A;
+ info->event = SPRITE_A_FLIP_DONE;
+ info->plane = SPRITE_PLANE;
+ break;
+ case MI_DISPLAY_FLIP_SKL_PLANE_2_B:
+ info->pipe = PIPE_B;
+ info->event = SPRITE_B_FLIP_DONE;
+ info->plane = SPRITE_PLANE;
+ break;
+ case MI_DISPLAY_FLIP_SKL_PLANE_2_C:
+ info->pipe = PIPE_C;
+ info->event = SPRITE_C_FLIP_DONE;
+ info->plane = SPRITE_PLANE;
+ break;
+
default:
gvt_err("unknown plane code %d\n", plane);
return -EINVAL;
}
- info->pipe = PRIMARY_PLANE;
info->stride_val = (dword1 & GENMASK(15, 6)) >> 6;
info->tile_val = (dword1 & GENMASK(2, 0));
info->surf_val = (dword2 & GENMASK(31, 12)) >> 12;
@@ -1598,7 +1616,7 @@ static int perform_bb_shadow(struct parser_exec_state *s)
return -ENOMEM;
entry_obj->obj =
- i915_gem_object_create(&(s->vgpu->gvt->dev_priv->drm),
+ i915_gem_object_create(s->vgpu->gvt->dev_priv,
roundup(bb_size, PAGE_SIZE));
if (IS_ERR(entry_obj->obj)) {
ret = PTR_ERR(entry_obj->obj);
@@ -2661,14 +2679,13 @@ int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload)
static int shadow_indirect_ctx(struct intel_shadow_wa_ctx *wa_ctx)
{
- struct drm_device *dev = &wa_ctx->workload->vgpu->gvt->dev_priv->drm;
int ctx_size = wa_ctx->indirect_ctx.size;
unsigned long guest_gma = wa_ctx->indirect_ctx.guest_gma;
struct drm_i915_gem_object *obj;
int ret = 0;
void *map;
- obj = i915_gem_object_create(dev,
+ obj = i915_gem_object_create(wa_ctx->workload->vgpu->gvt->dev_priv,
roundup(ctx_size + CACHELINE_BYTES,
PAGE_SIZE));
if (IS_ERR(obj))
diff --git a/drivers/gpu/drm/i915/gvt/display.c b/drivers/gpu/drm/i915/gvt/display.c
index c0c884aeb30e..6d8fde880c39 100644
--- a/drivers/gpu/drm/i915/gvt/display.c
+++ b/drivers/gpu/drm/i915/gvt/display.c
@@ -83,7 +83,7 @@ static int pipe_is_enabled(struct intel_vgpu *vgpu, int pipe)
return 0;
}
-/* EDID with 1024x768 as its resolution */
+/* EDID with 1920x1200 as its resolution */
static unsigned char virtual_dp_monitor_edid[] = {
/*Header*/
0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00,
@@ -97,11 +97,16 @@ static unsigned char virtual_dp_monitor_edid[] = {
0xfc, 0x81, 0xa4, 0x55, 0x4d, 0x9d, 0x25, 0x12, 0x50, 0x54,
/* Established Timings: maximum resolution is 1024x768 */
0x21, 0x08, 0x00,
- /* Standard Timings. All invalid */
- 0x00, 0xc0, 0x00, 0xc0, 0x00, 0x40, 0x00, 0x80, 0x00, 0x00,
- 0x00, 0x40, 0x00, 0x00, 0x00, 0x01,
- /* 18 Byte Data Blocks 1: invalid */
- 0x00, 0x00, 0x80, 0xa0, 0x70, 0xb0,
+ /*
+ * Standard Timings.
+ * below new resolutions can be supported:
+ * 1920x1080, 1280x720, 1280x960, 1280x1024,
+ * 1440x900, 1600x1200, 1680x1050
+ */
+ 0xd1, 0xc0, 0x81, 0xc0, 0x81, 0x40, 0x81, 0x80, 0x95, 0x00,
+ 0xa9, 0x40, 0xb3, 0x00, 0x01, 0x01,
+ /* 18 Byte Data Blocks 1: max resolution is 1920x1200 */
+ 0x28, 0x3c, 0x80, 0xa0, 0x70, 0xb0,
0x23, 0x40, 0x30, 0x20, 0x36, 0x00, 0x06, 0x44, 0x21, 0x00, 0x00, 0x1a,
/* 18 Byte Data Blocks 2: invalid */
0x00, 0x00, 0x00, 0xfd, 0x00, 0x18, 0x3c, 0x18, 0x50, 0x11, 0x00, 0x0a,
@@ -115,7 +120,7 @@ static unsigned char virtual_dp_monitor_edid[] = {
/* Extension Block Count */
0x00,
/* Checksum */
- 0xef,
+ 0x45,
};
#define DPCD_HEADER_SIZE 0xb
@@ -328,3 +333,15 @@ int intel_vgpu_init_display(struct intel_vgpu *vgpu)
else
return setup_virtual_dp_monitor(vgpu, PORT_B, GVT_DP_B);
}
+
+/**
+ * intel_vgpu_reset_display- reset vGPU virtual display emulation
+ * @vgpu: a vGPU
+ *
+ * This function is used to reset vGPU virtual display emulation stuffs
+ *
+ */
+void intel_vgpu_reset_display(struct intel_vgpu *vgpu)
+{
+ emulate_monitor_status_change(vgpu);
+}
diff --git a/drivers/gpu/drm/i915/gvt/display.h b/drivers/gpu/drm/i915/gvt/display.h
index 7a60cb848268..8b234ea961f6 100644
--- a/drivers/gpu/drm/i915/gvt/display.h
+++ b/drivers/gpu/drm/i915/gvt/display.h
@@ -158,6 +158,7 @@ void intel_gvt_emulate_vblank(struct intel_gvt *gvt);
void intel_gvt_check_vblank_emulation(struct intel_gvt *gvt);
int intel_vgpu_init_display(struct intel_vgpu *vgpu);
+void intel_vgpu_reset_display(struct intel_vgpu *vgpu);
void intel_vgpu_clean_display(struct intel_vgpu *vgpu);
#endif
diff --git a/drivers/gpu/drm/i915/gvt/execlist.c b/drivers/gpu/drm/i915/gvt/execlist.c
index 34083731669d..46eb9fd3c03f 100644
--- a/drivers/gpu/drm/i915/gvt/execlist.c
+++ b/drivers/gpu/drm/i915/gvt/execlist.c
@@ -487,7 +487,7 @@ static void release_shadow_batch_buffer(struct intel_vgpu_workload *workload)
static void release_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
{
- if (wa_ctx->indirect_ctx.size == 0)
+ if (!wa_ctx->indirect_ctx.obj)
return;
i915_gem_object_unpin_map(wa_ctx->indirect_ctx.obj);
diff --git a/drivers/gpu/drm/i915/gvt/firmware.c b/drivers/gpu/drm/i915/gvt/firmware.c
index 2fae2a2ca96f..1cb29b2d7dc6 100644
--- a/drivers/gpu/drm/i915/gvt/firmware.c
+++ b/drivers/gpu/drm/i915/gvt/firmware.c
@@ -48,31 +48,6 @@ struct gvt_firmware_header {
unsigned char data[1];
};
-#define RD(offset) (readl(mmio + offset.reg))
-#define WR(v, offset) (writel(v, mmio + offset.reg))
-
-static void bdw_forcewake_get(void __iomem *mmio)
-{
- WR(_MASKED_BIT_DISABLE(0xffff), FORCEWAKE_MT);
-
- RD(ECOBUS);
-
- if (wait_for((RD(FORCEWAKE_ACK_HSW) & FORCEWAKE_KERNEL) == 0, 50))
- gvt_err("fail to wait forcewake idle\n");
-
- WR(_MASKED_BIT_ENABLE(FORCEWAKE_KERNEL), FORCEWAKE_MT);
-
- if (wait_for((RD(FORCEWAKE_ACK_HSW) & FORCEWAKE_KERNEL), 50))
- gvt_err("fail to wait forcewake ack\n");
-
- if (wait_for((RD(GEN6_GT_THREAD_STATUS_REG) &
- GEN6_GT_THREAD_STATUS_CORE_MASK) == 0, 50))
- gvt_err("fail to wait c0 wake up\n");
-}
-
-#undef RD
-#undef WR
-
#define dev_to_drm_minor(d) dev_get_drvdata((d))
static ssize_t
@@ -91,9 +66,9 @@ static struct bin_attribute firmware_attr = {
.mmap = NULL,
};
-static int expose_firmware_sysfs(struct intel_gvt *gvt,
- void __iomem *mmio)
+static int expose_firmware_sysfs(struct intel_gvt *gvt)
{
+ struct drm_i915_private *dev_priv = gvt->dev_priv;
struct intel_gvt_device_info *info = &gvt->device_info;
struct pci_dev *pdev = gvt->dev_priv->drm.pdev;
struct intel_gvt_mmio_info *e;
@@ -132,7 +107,7 @@ static int expose_firmware_sysfs(struct intel_gvt *gvt,
for (j = 0; j < e->length; j += 4)
*(u32 *)(p + e->offset + j) =
- readl(mmio + e->offset + j);
+ I915_READ_NOTRACE(_MMIO(e->offset + j));
}
memcpy(gvt->firmware.mmio, p, info->mmio_size);
@@ -235,7 +210,6 @@ int intel_gvt_load_firmware(struct intel_gvt *gvt)
struct gvt_firmware_header *h;
const struct firmware *fw;
char *path;
- void __iomem *mmio;
void *mem;
int ret;
@@ -260,17 +234,6 @@ int intel_gvt_load_firmware(struct intel_gvt *gvt)
firmware->mmio = mem;
- mmio = pci_iomap(pdev, info->mmio_bar, info->mmio_size);
- if (!mmio) {
- kfree(path);
- kfree(firmware->cfg_space);
- kfree(firmware->mmio);
- return -EINVAL;
- }
-
- if (IS_BROADWELL(gvt->dev_priv) || IS_SKYLAKE(gvt->dev_priv))
- bdw_forcewake_get(mmio);
-
sprintf(path, "%s/vid_0x%04x_did_0x%04x_rid_0x%04x.golden_hw_state",
GVT_FIRMWARE_PATH, pdev->vendor, pdev->device,
pdev->revision);
@@ -300,13 +263,11 @@ int intel_gvt_load_firmware(struct intel_gvt *gvt)
release_firmware(fw);
firmware->firmware_loaded = true;
- pci_iounmap(pdev, mmio);
return 0;
out_free_fw:
release_firmware(fw);
expose_firmware:
- expose_firmware_sysfs(gvt, mmio);
- pci_iounmap(pdev, mmio);
+ expose_firmware_sysfs(gvt);
return 0;
}
diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c
index 47dec4acf7ff..28c92346db0e 100644
--- a/drivers/gpu/drm/i915/gvt/gtt.c
+++ b/drivers/gpu/drm/i915/gvt/gtt.c
@@ -606,21 +606,33 @@ struct intel_vgpu_guest_page *intel_vgpu_find_guest_page(
static inline int init_shadow_page(struct intel_vgpu *vgpu,
struct intel_vgpu_shadow_page *p, int type)
{
+ struct device *kdev = &vgpu->gvt->dev_priv->drm.pdev->dev;
+ dma_addr_t daddr;
+
+ daddr = dma_map_page(kdev, p->page, 0, 4096, PCI_DMA_BIDIRECTIONAL);
+ if (dma_mapping_error(kdev, daddr)) {
+ gvt_err("fail to map dma addr\n");
+ return -EINVAL;
+ }
+
p->vaddr = page_address(p->page);
p->type = type;
INIT_HLIST_NODE(&p->node);
- p->mfn = intel_gvt_hypervisor_virt_to_mfn(p->vaddr);
- if (p->mfn == INTEL_GVT_INVALID_ADDR)
- return -EFAULT;
-
+ p->mfn = daddr >> GTT_PAGE_SHIFT;
hash_add(vgpu->gtt.shadow_page_hash_table, &p->node, p->mfn);
return 0;
}
-static inline void clean_shadow_page(struct intel_vgpu_shadow_page *p)
+static inline void clean_shadow_page(struct intel_vgpu *vgpu,
+ struct intel_vgpu_shadow_page *p)
{
+ struct device *kdev = &vgpu->gvt->dev_priv->drm.pdev->dev;
+
+ dma_unmap_page(kdev, p->mfn << GTT_PAGE_SHIFT, 4096,
+ PCI_DMA_BIDIRECTIONAL);
+
if (!hlist_unhashed(&p->node))
hash_del(&p->node);
}
@@ -670,7 +682,7 @@ static void ppgtt_free_shadow_page(struct intel_vgpu_ppgtt_spt *spt)
{
trace_spt_free(spt->vgpu->id, spt, spt->shadow_page.type);
- clean_shadow_page(&spt->shadow_page);
+ clean_shadow_page(spt->vgpu, &spt->shadow_page);
intel_vgpu_clean_guest_page(spt->vgpu, &spt->guest_page);
list_del_init(&spt->post_shadow_list);
@@ -1875,8 +1887,9 @@ static int alloc_scratch_pages(struct intel_vgpu *vgpu,
int page_entry_num = GTT_PAGE_SIZE >>
vgpu->gvt->device_info.gtt_entry_size_shift;
void *scratch_pt;
- unsigned long mfn;
int i;
+ struct device *dev = &vgpu->gvt->dev_priv->drm.pdev->dev;
+ dma_addr_t daddr;
if (WARN_ON(type < GTT_TYPE_PPGTT_PTE_PT || type >= GTT_TYPE_MAX))
return -EINVAL;
@@ -1887,16 +1900,18 @@ static int alloc_scratch_pages(struct intel_vgpu *vgpu,
return -ENOMEM;
}
- mfn = intel_gvt_hypervisor_virt_to_mfn(scratch_pt);
- if (mfn == INTEL_GVT_INVALID_ADDR) {
- gvt_err("fail to translate vaddr:0x%lx\n", (unsigned long)scratch_pt);
- free_page((unsigned long)scratch_pt);
- return -EFAULT;
+ daddr = dma_map_page(dev, virt_to_page(scratch_pt), 0,
+ 4096, PCI_DMA_BIDIRECTIONAL);
+ if (dma_mapping_error(dev, daddr)) {
+ gvt_err("fail to dmamap scratch_pt\n");
+ __free_page(virt_to_page(scratch_pt));
+ return -ENOMEM;
}
- gtt->scratch_pt[type].page_mfn = mfn;
+ gtt->scratch_pt[type].page_mfn =
+ (unsigned long)(daddr >> GTT_PAGE_SHIFT);
gtt->scratch_pt[type].page = virt_to_page(scratch_pt);
gvt_dbg_mm("vgpu%d create scratch_pt: type %d mfn=0x%lx\n",
- vgpu->id, type, mfn);
+ vgpu->id, type, gtt->scratch_pt[type].page_mfn);
/* Build the tree by full filled the scratch pt with the entries which
* point to the next level scratch pt or scratch page. The
@@ -1930,9 +1945,14 @@ static int alloc_scratch_pages(struct intel_vgpu *vgpu,
static int release_scratch_page_tree(struct intel_vgpu *vgpu)
{
int i;
+ struct device *dev = &vgpu->gvt->dev_priv->drm.pdev->dev;
+ dma_addr_t daddr;
for (i = GTT_TYPE_PPGTT_PTE_PT; i < GTT_TYPE_MAX; i++) {
if (vgpu->gtt.scratch_pt[i].page != NULL) {
+ daddr = (dma_addr_t)(vgpu->gtt.scratch_pt[i].page_mfn <<
+ GTT_PAGE_SHIFT);
+ dma_unmap_page(dev, daddr, 4096, PCI_DMA_BIDIRECTIONAL);
__free_page(vgpu->gtt.scratch_pt[i].page);
vgpu->gtt.scratch_pt[i].page = NULL;
vgpu->gtt.scratch_pt[i].page_mfn = 0;
@@ -2192,6 +2212,8 @@ int intel_gvt_init_gtt(struct intel_gvt *gvt)
{
int ret;
void *page;
+ struct device *dev = &gvt->dev_priv->drm.pdev->dev;
+ dma_addr_t daddr;
gvt_dbg_core("init gtt\n");
@@ -2209,14 +2231,16 @@ int intel_gvt_init_gtt(struct intel_gvt *gvt)
gvt_err("fail to allocate scratch ggtt page\n");
return -ENOMEM;
}
- gvt->gtt.scratch_ggtt_page = virt_to_page(page);
- gvt->gtt.scratch_ggtt_mfn = intel_gvt_hypervisor_virt_to_mfn(page);
- if (gvt->gtt.scratch_ggtt_mfn == INTEL_GVT_INVALID_ADDR) {
- gvt_err("fail to translate scratch ggtt page\n");
- __free_page(gvt->gtt.scratch_ggtt_page);
- return -EFAULT;
+ daddr = dma_map_page(dev, virt_to_page(page), 0,
+ 4096, PCI_DMA_BIDIRECTIONAL);
+ if (dma_mapping_error(dev, daddr)) {
+ gvt_err("fail to dmamap scratch ggtt page\n");
+ __free_page(virt_to_page(page));
+ return -ENOMEM;
}
+ gvt->gtt.scratch_ggtt_page = virt_to_page(page);
+ gvt->gtt.scratch_ggtt_mfn = (unsigned long)(daddr >> GTT_PAGE_SHIFT);
if (enable_out_of_sync) {
ret = setup_spt_oos(gvt);
@@ -2239,6 +2263,12 @@ int intel_gvt_init_gtt(struct intel_gvt *gvt)
*/
void intel_gvt_clean_gtt(struct intel_gvt *gvt)
{
+ struct device *dev = &gvt->dev_priv->drm.pdev->dev;
+ dma_addr_t daddr = (dma_addr_t)(gvt->gtt.scratch_ggtt_mfn <<
+ GTT_PAGE_SHIFT);
+
+ dma_unmap_page(dev, daddr, 4096, PCI_DMA_BIDIRECTIONAL);
+
__free_page(gvt->gtt.scratch_ggtt_page);
if (enable_out_of_sync)
diff --git a/drivers/gpu/drm/i915/gvt/gvt.c b/drivers/gpu/drm/i915/gvt/gvt.c
index e6bf5c533fbe..3b9d59e457ba 100644
--- a/drivers/gpu/drm/i915/gvt/gvt.c
+++ b/drivers/gpu/drm/i915/gvt/gvt.c
@@ -68,8 +68,6 @@ static const struct intel_gvt_ops intel_gvt_ops = {
*/
int intel_gvt_init_host(void)
{
- int ret;
-
if (intel_gvt_host.initialized)
return 0;
@@ -96,11 +94,6 @@ int intel_gvt_init_host(void)
if (!intel_gvt_host.mpt)
return -EINVAL;
- /* Try to detect if we're running in host instead of VM. */
- ret = intel_gvt_hypervisor_detect_host();
- if (ret)
- return -ENODEV;
-
gvt_dbg_core("Running with hypervisor %s in host mode\n",
supported_hypervisors[intel_gvt_host.hypervisor_type]);
diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c
index ab2ea157da4c..1d450627ff65 100644
--- a/drivers/gpu/drm/i915/gvt/handlers.c
+++ b/drivers/gpu/drm/i915/gvt/handlers.c
@@ -2167,7 +2167,7 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
MMIO_DFH(0x1217c, D_ALL, F_CMD_ACCESS, NULL, NULL);
MMIO_F(0x2290, 8, 0, 0, 0, D_HSW_PLUS, NULL, NULL);
- MMIO_D(OACONTROL, D_HSW);
+ MMIO_D(GEN7_OACONTROL, D_HSW);
MMIO_D(0x2b00, D_BDW_PLUS);
MMIO_D(0x2360, D_BDW_PLUS);
MMIO_F(0x5200, 32, 0, 0, 0, D_ALL, NULL, NULL);
diff --git a/drivers/gpu/drm/i915/gvt/hypercall.h b/drivers/gpu/drm/i915/gvt/hypercall.h
index 30e543f5a703..df7f33abd393 100644
--- a/drivers/gpu/drm/i915/gvt/hypercall.h
+++ b/drivers/gpu/drm/i915/gvt/hypercall.h
@@ -38,7 +38,6 @@
* both Xen and KVM by providing dedicated hypervisor-related MPT modules.
*/
struct intel_gvt_mpt {
- int (*detect_host)(void);
int (*host_init)(struct device *dev, void *gvt, const void *ops);
void (*host_exit)(struct device *dev, void *gvt);
int (*attach_vgpu)(void *vgpu, unsigned long *handle);
diff --git a/drivers/gpu/drm/i915/gvt/interrupt.c b/drivers/gpu/drm/i915/gvt/interrupt.c
index f7be02ac4be1..92bb247e3478 100644
--- a/drivers/gpu/drm/i915/gvt/interrupt.c
+++ b/drivers/gpu/drm/i915/gvt/interrupt.c
@@ -176,26 +176,15 @@ int intel_vgpu_reg_imr_handler(struct intel_vgpu *vgpu,
{
struct intel_gvt *gvt = vgpu->gvt;
struct intel_gvt_irq_ops *ops = gvt->irq.ops;
- u32 changed, masked, unmasked;
u32 imr = *(u32 *)p_data;
- gvt_dbg_irq("write IMR %x with val %x\n",
- reg, imr);
-
- gvt_dbg_irq("old vIMR %x\n", vgpu_vreg(vgpu, reg));
-
- /* figure out newly masked/unmasked bits */
- changed = vgpu_vreg(vgpu, reg) ^ imr;
- masked = (vgpu_vreg(vgpu, reg) & changed) ^ changed;
- unmasked = masked ^ changed;
-
- gvt_dbg_irq("changed %x, masked %x, unmasked %x\n",
- changed, masked, unmasked);
+ gvt_dbg_irq("write IMR %x, new %08x, old %08x, changed %08x\n",
+ reg, imr, vgpu_vreg(vgpu, reg), vgpu_vreg(vgpu, reg) ^ imr);
vgpu_vreg(vgpu, reg) = imr;
ops->check_pending_irq(vgpu);
- gvt_dbg_irq("IRQ: new vIMR %x\n", vgpu_vreg(vgpu, reg));
+
return 0;
}
@@ -217,14 +206,11 @@ int intel_vgpu_reg_master_irq_handler(struct intel_vgpu *vgpu,
{
struct intel_gvt *gvt = vgpu->gvt;
struct intel_gvt_irq_ops *ops = gvt->irq.ops;
- u32 changed, enabled, disabled;
u32 ier = *(u32 *)p_data;
u32 virtual_ier = vgpu_vreg(vgpu, reg);
- gvt_dbg_irq("write master irq reg %x with val %x\n",
- reg, ier);
-
- gvt_dbg_irq("old vreg %x\n", vgpu_vreg(vgpu, reg));
+ gvt_dbg_irq("write MASTER_IRQ %x, new %08x, old %08x, changed %08x\n",
+ reg, ier, virtual_ier, virtual_ier ^ ier);
/*
* GEN8_MASTER_IRQ is a special irq register,
@@ -236,16 +222,8 @@ int intel_vgpu_reg_master_irq_handler(struct intel_vgpu *vgpu,
vgpu_vreg(vgpu, reg) &= ~GEN8_MASTER_IRQ_CONTROL;
vgpu_vreg(vgpu, reg) |= ier;
- /* figure out newly enabled/disable bits */
- changed = virtual_ier ^ ier;
- enabled = (virtual_ier & changed) ^ changed;
- disabled = enabled ^ changed;
-
- gvt_dbg_irq("changed %x, enabled %x, disabled %x\n",
- changed, enabled, disabled);
-
ops->check_pending_irq(vgpu);
- gvt_dbg_irq("new vreg %x\n", vgpu_vreg(vgpu, reg));
+
return 0;
}
@@ -268,21 +246,11 @@ int intel_vgpu_reg_ier_handler(struct intel_vgpu *vgpu,
struct intel_gvt *gvt = vgpu->gvt;
struct intel_gvt_irq_ops *ops = gvt->irq.ops;
struct intel_gvt_irq_info *info;
- u32 changed, enabled, disabled;
u32 ier = *(u32 *)p_data;
- gvt_dbg_irq("write IER %x with val %x\n",
- reg, ier);
-
- gvt_dbg_irq("old vIER %x\n", vgpu_vreg(vgpu, reg));
+ gvt_dbg_irq("write IER %x, new %08x, old %08x, changed %08x\n",
+ reg, ier, vgpu_vreg(vgpu, reg), vgpu_vreg(vgpu, reg) ^ ier);
- /* figure out newly enabled/disable bits */
- changed = vgpu_vreg(vgpu, reg) ^ ier;
- enabled = (vgpu_vreg(vgpu, reg) & changed) ^ changed;
- disabled = enabled ^ changed;
-
- gvt_dbg_irq("changed %x, enabled %x, disabled %x\n",
- changed, enabled, disabled);
vgpu_vreg(vgpu, reg) = ier;
info = regbase_to_irq_info(gvt, ier_to_regbase(reg));
@@ -293,7 +261,7 @@ int intel_vgpu_reg_ier_handler(struct intel_vgpu *vgpu,
update_upstream_irq(vgpu, info);
ops->check_pending_irq(vgpu);
- gvt_dbg_irq("new vIER %x\n", vgpu_vreg(vgpu, reg));
+
return 0;
}
@@ -317,7 +285,8 @@ int intel_vgpu_reg_iir_handler(struct intel_vgpu *vgpu, unsigned int reg,
iir_to_regbase(reg));
u32 iir = *(u32 *)p_data;
- gvt_dbg_irq("write IIR %x with val %x\n", reg, iir);
+ gvt_dbg_irq("write IIR %x, new %08x, old %08x, changed %08x\n",
+ reg, iir, vgpu_vreg(vgpu, reg), vgpu_vreg(vgpu, reg) ^ iir);
if (WARN_ON(!info))
return -EINVAL;
@@ -619,6 +588,10 @@ static void gen8_init_irq(
SET_BIT_INFO(irq, 3, PRIMARY_A_FLIP_DONE, INTEL_GVT_IRQ_INFO_DE_PIPE_A);
SET_BIT_INFO(irq, 3, PRIMARY_B_FLIP_DONE, INTEL_GVT_IRQ_INFO_DE_PIPE_B);
SET_BIT_INFO(irq, 3, PRIMARY_C_FLIP_DONE, INTEL_GVT_IRQ_INFO_DE_PIPE_C);
+
+ SET_BIT_INFO(irq, 4, SPRITE_A_FLIP_DONE, INTEL_GVT_IRQ_INFO_DE_PIPE_A);
+ SET_BIT_INFO(irq, 4, SPRITE_B_FLIP_DONE, INTEL_GVT_IRQ_INFO_DE_PIPE_B);
+ SET_BIT_INFO(irq, 4, SPRITE_C_FLIP_DONE, INTEL_GVT_IRQ_INFO_DE_PIPE_C);
}
/* GEN8 interrupt PCU events */
diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c
index 3f656e3a6e5a..0f7f5d97f582 100644
--- a/drivers/gpu/drm/i915/gvt/kvmgt.c
+++ b/drivers/gpu/drm/i915/gvt/kvmgt.c
@@ -77,7 +77,7 @@ struct kvmgt_guest_info {
struct gvt_dma {
struct rb_node node;
gfn_t gfn;
- kvm_pfn_t pfn;
+ unsigned long iova;
};
static inline bool handle_valid(unsigned long handle)
@@ -89,6 +89,35 @@ static int kvmgt_guest_init(struct mdev_device *mdev);
static void intel_vgpu_release_work(struct work_struct *work);
static bool kvmgt_guest_exit(struct kvmgt_guest_info *info);
+static int gvt_dma_map_iova(struct intel_vgpu *vgpu, kvm_pfn_t pfn,
+ unsigned long *iova)
+{
+ struct page *page;
+ struct device *dev = &vgpu->gvt->dev_priv->drm.pdev->dev;
+ dma_addr_t daddr;
+
+ page = pfn_to_page(pfn);
+ if (is_error_page(page))
+ return -EFAULT;
+
+ daddr = dma_map_page(dev, page, 0, PAGE_SIZE,
+ PCI_DMA_BIDIRECTIONAL);
+ if (dma_mapping_error(dev, daddr))
+ return -ENOMEM;
+
+ *iova = (unsigned long)(daddr >> PAGE_SHIFT);
+ return 0;
+}
+
+static void gvt_dma_unmap_iova(struct intel_vgpu *vgpu, unsigned long iova)
+{
+ struct device *dev = &vgpu->gvt->dev_priv->drm.pdev->dev;
+ dma_addr_t daddr;
+
+ daddr = (dma_addr_t)(iova << PAGE_SHIFT);
+ dma_unmap_page(dev, daddr, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
+}
+
static struct gvt_dma *__gvt_cache_find(struct intel_vgpu *vgpu, gfn_t gfn)
{
struct rb_node *node = vgpu->vdev.cache.rb_node;
@@ -111,21 +140,22 @@ out:
return ret;
}
-static kvm_pfn_t gvt_cache_find(struct intel_vgpu *vgpu, gfn_t gfn)
+static unsigned long gvt_cache_find(struct intel_vgpu *vgpu, gfn_t gfn)
{
struct gvt_dma *entry;
- kvm_pfn_t pfn;
+ unsigned long iova;
mutex_lock(&vgpu->vdev.cache_lock);
entry = __gvt_cache_find(vgpu, gfn);
- pfn = (entry == NULL) ? 0 : entry->pfn;
+ iova = (entry == NULL) ? INTEL_GVT_INVALID_ADDR : entry->iova;
mutex_unlock(&vgpu->vdev.cache_lock);
- return pfn;
+ return iova;
}
-static void gvt_cache_add(struct intel_vgpu *vgpu, gfn_t gfn, kvm_pfn_t pfn)
+static void gvt_cache_add(struct intel_vgpu *vgpu, gfn_t gfn,
+ unsigned long iova)
{
struct gvt_dma *new, *itr;
struct rb_node **link = &vgpu->vdev.cache.rb_node, *parent = NULL;
@@ -135,7 +165,7 @@ static void gvt_cache_add(struct intel_vgpu *vgpu, gfn_t gfn, kvm_pfn_t pfn)
return;
new->gfn = gfn;
- new->pfn = pfn;
+ new->iova = iova;
mutex_lock(&vgpu->vdev.cache_lock);
while (*link) {
@@ -182,6 +212,7 @@ static void gvt_cache_remove(struct intel_vgpu *vgpu, gfn_t gfn)
}
g1 = gfn;
+ gvt_dma_unmap_iova(vgpu, this->iova);
rc = vfio_unpin_pages(dev, &g1, 1);
WARN_ON(rc != 1);
__gvt_cache_remove_entry(vgpu, this);
@@ -204,6 +235,7 @@ static void gvt_cache_destroy(struct intel_vgpu *vgpu)
mutex_lock(&vgpu->vdev.cache_lock);
while ((node = rb_first(&vgpu->vdev.cache))) {
dma = rb_entry(node, struct gvt_dma, node);
+ gvt_dma_unmap_iova(vgpu, dma->iova);
gfn = dma->gfn;
vfio_unpin_pages(dev, &gfn, 1);
@@ -965,11 +997,6 @@ static long intel_vgpu_ioctl(struct mdev_device *mdev, unsigned int cmd,
sparse->areas[0].offset =
PAGE_ALIGN(vgpu_aperture_offset(vgpu));
sparse->areas[0].size = vgpu_aperture_sz(vgpu);
- if (!caps.buf) {
- kfree(caps.buf);
- caps.buf = NULL;
- caps.size = 0;
- }
break;
case VFIO_PCI_BAR3_REGION_INDEX ... VFIO_PCI_BAR5_REGION_INDEX:
@@ -1248,43 +1275,6 @@ static void kvmgt_page_track_flush_slot(struct kvm *kvm,
spin_unlock(&kvm->mmu_lock);
}
-static bool kvmgt_check_guest(void)
-{
- unsigned int eax, ebx, ecx, edx;
- char s[12];
- unsigned int *i;
-
- eax = KVM_CPUID_SIGNATURE;
- ebx = ecx = edx = 0;
-
- asm volatile ("cpuid"
- : "+a"(eax), "=b"(ebx), "=c"(ecx), "=d"(edx)
- :
- : "cc", "memory");
- i = (unsigned int *)s;
- i[0] = ebx;
- i[1] = ecx;
- i[2] = edx;
-
- return !strncmp(s, "KVMKVMKVM", strlen("KVMKVMKVM"));
-}
-
-/**
- * NOTE:
- * It's actually impossible to check if we are running in KVM host,
- * since the "KVM host" is simply native. So we only dectect guest here.
- */
-static int kvmgt_detect_host(void)
-{
-#ifdef CONFIG_INTEL_IOMMU
- if (intel_iommu_gfx_mapped) {
- gvt_err("Hardware IOMMU compatibility not yet supported, try to boot with intel_iommu=igfx_off\n");
- return -ENODEV;
- }
-#endif
- return kvmgt_check_guest() ? -ENODEV : 0;
-}
-
static bool __kvmgt_vgpu_exist(struct intel_vgpu *vgpu, struct kvm *kvm)
{
struct intel_vgpu *itr;
@@ -1390,7 +1380,7 @@ static int kvmgt_inject_msi(unsigned long handle, u32 addr, u16 data)
static unsigned long kvmgt_gfn_to_pfn(unsigned long handle, unsigned long gfn)
{
- unsigned long pfn;
+ unsigned long iova, pfn;
struct kvmgt_guest_info *info;
struct device *dev;
int rc;
@@ -1399,9 +1389,9 @@ static unsigned long kvmgt_gfn_to_pfn(unsigned long handle, unsigned long gfn)
return INTEL_GVT_INVALID_ADDR;
info = (struct kvmgt_guest_info *)handle;
- pfn = gvt_cache_find(info->vgpu, gfn);
- if (pfn != 0)
- return pfn;
+ iova = gvt_cache_find(info->vgpu, gfn);
+ if (iova != INTEL_GVT_INVALID_ADDR)
+ return iova;
pfn = INTEL_GVT_INVALID_ADDR;
dev = mdev_dev(info->vgpu->vdev.mdev);
@@ -1410,9 +1400,16 @@ static unsigned long kvmgt_gfn_to_pfn(unsigned long handle, unsigned long gfn)
gvt_err("vfio_pin_pages failed for gfn 0x%lx: %d\n", gfn, rc);
return INTEL_GVT_INVALID_ADDR;
}
+ /* transfer to host iova for GFX to use DMA */
+ rc = gvt_dma_map_iova(info->vgpu, pfn, &iova);
+ if (rc) {
+ gvt_err("gvt_dma_map_iova failed for gfn: 0x%lx\n", gfn);
+ vfio_unpin_pages(dev, &gfn, 1);
+ return INTEL_GVT_INVALID_ADDR;
+ }
- gvt_cache_add(info->vgpu, gfn, pfn);
- return pfn;
+ gvt_cache_add(info->vgpu, gfn, iova);
+ return iova;
}
static int kvmgt_rw_gpa(unsigned long handle, unsigned long gpa,
@@ -1459,7 +1456,6 @@ static unsigned long kvmgt_virt_to_pfn(void *addr)
}
struct intel_gvt_mpt kvmgt_mpt = {
- .detect_host = kvmgt_detect_host,
.host_init = kvmgt_host_init,
.host_exit = kvmgt_host_exit,
.attach_vgpu = kvmgt_attach_vgpu,
diff --git a/drivers/gpu/drm/i915/gvt/mpt.h b/drivers/gpu/drm/i915/gvt/mpt.h
index 1af5830c0a56..419353624c5a 100644
--- a/drivers/gpu/drm/i915/gvt/mpt.h
+++ b/drivers/gpu/drm/i915/gvt/mpt.h
@@ -44,18 +44,6 @@
*/
/**
- * intel_gvt_hypervisor_detect_host - check if GVT-g is running within
- * hypervisor host/privilged domain
- *
- * Returns:
- * Zero on success, -ENODEV if current kernel is running inside a VM
- */
-static inline int intel_gvt_hypervisor_detect_host(void)
-{
- return intel_gvt_host.mpt->detect_host();
-}
-
-/**
* intel_gvt_hypervisor_host_init - init GVT-g host side
*
* Returns:
diff --git a/drivers/gpu/drm/i915/gvt/render.c b/drivers/gpu/drm/i915/gvt/render.c
index 44136b1f3aab..2b3a642284b6 100644
--- a/drivers/gpu/drm/i915/gvt/render.c
+++ b/drivers/gpu/drm/i915/gvt/render.c
@@ -236,12 +236,18 @@ static void restore_mocs(struct intel_vgpu *vgpu, int ring_id)
}
}
+#define CTX_CONTEXT_CONTROL_VAL 0x03
+
void intel_gvt_load_render_mmio(struct intel_vgpu *vgpu, int ring_id)
{
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
struct render_mmio *mmio;
u32 v;
int i, array_size;
+ u32 *reg_state = vgpu->shadow_ctx->engine[ring_id].lrc_reg_state;
+ u32 ctx_ctrl = reg_state[CTX_CONTEXT_CONTROL_VAL];
+ u32 inhibit_mask =
+ _MASKED_BIT_ENABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT);
if (IS_SKYLAKE(vgpu->gvt->dev_priv)) {
mmio = gen9_render_mmio_list;
@@ -257,6 +263,17 @@ void intel_gvt_load_render_mmio(struct intel_vgpu *vgpu, int ring_id)
continue;
mmio->value = I915_READ(mmio->reg);
+
+ /*
+ * if it is an inhibit context, load in_context mmio
+ * into HW by mmio write. If it is not, skip this mmio
+ * write.
+ */
+ if (mmio->in_context &&
+ ((ctx_ctrl & inhibit_mask) != inhibit_mask) &&
+ i915.enable_execlists)
+ continue;
+
if (mmio->mask)
v = vgpu_vreg(vgpu, mmio->reg) | (mmio->mask << 16);
else
diff --git a/drivers/gpu/drm/i915/gvt/sched_policy.c b/drivers/gpu/drm/i915/gvt/sched_policy.c
index 678b0be85376..06c9584ac5f0 100644
--- a/drivers/gpu/drm/i915/gvt/sched_policy.c
+++ b/drivers/gpu/drm/i915/gvt/sched_policy.c
@@ -125,7 +125,6 @@ static void tbs_sched_func(struct work_struct *work)
vgpu_data = scheduler->current_vgpu->sched_data;
head = &vgpu_data->list;
} else {
- gvt_dbg_sched("no current vgpu search from q head\n");
head = &sched_data->runq_head;
}
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c
index e91885dffeff..d6b6d0efdd1a 100644
--- a/drivers/gpu/drm/i915/gvt/scheduler.c
+++ b/drivers/gpu/drm/i915/gvt/scheduler.c
@@ -169,7 +169,8 @@ static int dispatch_workload(struct intel_vgpu_workload *workload)
gvt_dbg_sched("ring id %d prepare to dispatch workload %p\n",
ring_id, workload);
- shadow_ctx->desc_template = workload->ctx_desc.addressing_mode <<
+ shadow_ctx->desc_template &= ~(0x3 << GEN8_CTX_ADDRESSING_MODE_SHIFT);
+ shadow_ctx->desc_template |= workload->ctx_desc.addressing_mode <<
GEN8_CTX_ADDRESSING_MODE_SHIFT;
mutex_lock(&dev_priv->drm.struct_mutex);
@@ -456,7 +457,7 @@ static int workload_thread(void *priv)
}
complete:
- gvt_dbg_sched("will complete workload %p\n, status: %d\n",
+ gvt_dbg_sched("will complete workload %p, status: %d\n",
workload, workload->status);
if (workload->req)
@@ -549,18 +550,10 @@ err:
void intel_vgpu_clean_gvt_context(struct intel_vgpu *vgpu)
{
- struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
-
atomic_notifier_chain_unregister(&vgpu->shadow_ctx->status_notifier,
&vgpu->shadow_ctx_notifier_block);
- mutex_lock(&dev_priv->drm.struct_mutex);
-
- /* a little hacky to mark as ctx closed */
- vgpu->shadow_ctx->closed = true;
- i915_gem_context_put(vgpu->shadow_ctx);
-
- mutex_unlock(&dev_priv->drm.struct_mutex);
+ i915_gem_context_put_unlocked(vgpu->shadow_ctx);
}
int intel_vgpu_init_gvt_context(struct intel_vgpu *vgpu)
diff --git a/drivers/gpu/drm/i915/gvt/vgpu.c b/drivers/gpu/drm/i915/gvt/vgpu.c
index 7295bc8e12fb..95a97aa0051e 100644
--- a/drivers/gpu/drm/i915/gvt/vgpu.c
+++ b/drivers/gpu/drm/i915/gvt/vgpu.c
@@ -74,7 +74,7 @@ void populate_pvinfo_page(struct intel_vgpu *vgpu)
int intel_gvt_init_vgpu_types(struct intel_gvt *gvt)
{
unsigned int num_types;
- unsigned int i, low_avail;
+ unsigned int i, low_avail, high_avail;
unsigned int min_low;
/* vGPU type name is defined as GVTg_Vx_y which contains
@@ -89,9 +89,9 @@ int intel_gvt_init_vgpu_types(struct intel_gvt *gvt)
* to indicate how many vGPU instance can be created for this
* type.
*
- * Currently use static size here as we init type earlier..
*/
- low_avail = MB_TO_BYTES(256) - HOST_LOW_GM_SIZE;
+ low_avail = gvt_aperture_sz(gvt) - HOST_LOW_GM_SIZE;
+ high_avail = gvt_hidden_sz(gvt) - HOST_HIGH_GM_SIZE;
num_types = 4;
gvt->types = kzalloc(num_types * sizeof(struct intel_vgpu_type),
@@ -106,7 +106,8 @@ int intel_gvt_init_vgpu_types(struct intel_gvt *gvt)
gvt->types[i].low_gm_size = min_low;
gvt->types[i].high_gm_size = max((min_low<<3), MB_TO_BYTES(384U));
gvt->types[i].fence = 4;
- gvt->types[i].max_instance = low_avail / min_low;
+ gvt->types[i].max_instance = min(low_avail / min_low,
+ high_avail / gvt->types[i].high_gm_size);
gvt->types[i].avail_instance = gvt->types[i].max_instance;
if (IS_GEN8(gvt->dev_priv))
@@ -142,9 +143,9 @@ static void intel_gvt_update_vgpu_types(struct intel_gvt *gvt)
/* Need to depend on maxium hw resource size but keep on
* static config for now.
*/
- low_gm_avail = MB_TO_BYTES(256) - HOST_LOW_GM_SIZE -
+ low_gm_avail = gvt_aperture_sz(gvt) - HOST_LOW_GM_SIZE -
gvt->gm.vgpu_allocated_low_gm_size;
- high_gm_avail = MB_TO_BYTES(256) * 8UL - HOST_HIGH_GM_SIZE -
+ high_gm_avail = gvt_hidden_sz(gvt) - HOST_HIGH_GM_SIZE -
gvt->gm.vgpu_allocated_high_gm_size;
fence_avail = gvt_fence_sz(gvt) - HOST_FENCE -
gvt->fence.vgpu_allocated_fence_num;
@@ -384,6 +385,7 @@ void intel_gvt_reset_vgpu_locked(struct intel_vgpu *vgpu, bool dmlr,
intel_vgpu_reset_resource(vgpu);
intel_vgpu_reset_mmio(vgpu);
populate_pvinfo_page(vgpu);
+ intel_vgpu_reset_display(vgpu);
if (dmlr)
intel_vgpu_reset_cfg_space(vgpu);
diff --git a/drivers/gpu/drm/i915/i915_cmd_parser.c b/drivers/gpu/drm/i915/i915_cmd_parser.c
index f5039f4f988f..21b1cd917d81 100644
--- a/drivers/gpu/drm/i915/i915_cmd_parser.c
+++ b/drivers/gpu/drm/i915/i915_cmd_parser.c
@@ -86,6 +86,102 @@
* general bitmasking mechanism.
*/
+/*
+ * A command that requires special handling by the command parser.
+ */
+struct drm_i915_cmd_descriptor {
+ /*
+ * Flags describing how the command parser processes the command.
+ *
+ * CMD_DESC_FIXED: The command has a fixed length if this is set,
+ * a length mask if not set
+ * CMD_DESC_SKIP: The command is allowed but does not follow the
+ * standard length encoding for the opcode range in
+ * which it falls
+ * CMD_DESC_REJECT: The command is never allowed
+ * CMD_DESC_REGISTER: The command should be checked against the
+ * register whitelist for the appropriate ring
+ * CMD_DESC_MASTER: The command is allowed if the submitting process
+ * is the DRM master
+ */
+ u32 flags;
+#define CMD_DESC_FIXED (1<<0)
+#define CMD_DESC_SKIP (1<<1)
+#define CMD_DESC_REJECT (1<<2)
+#define CMD_DESC_REGISTER (1<<3)
+#define CMD_DESC_BITMASK (1<<4)
+#define CMD_DESC_MASTER (1<<5)
+
+ /*
+ * The command's unique identification bits and the bitmask to get them.
+ * This isn't strictly the opcode field as defined in the spec and may
+ * also include type, subtype, and/or subop fields.
+ */
+ struct {
+ u32 value;
+ u32 mask;
+ } cmd;
+
+ /*
+ * The command's length. The command is either fixed length (i.e. does
+ * not include a length field) or has a length field mask. The flag
+ * CMD_DESC_FIXED indicates a fixed length. Otherwise, the command has
+ * a length mask. All command entries in a command table must include
+ * length information.
+ */
+ union {
+ u32 fixed;
+ u32 mask;
+ } length;
+
+ /*
+ * Describes where to find a register address in the command to check
+ * against the ring's register whitelist. Only valid if flags has the
+ * CMD_DESC_REGISTER bit set.
+ *
+ * A non-zero step value implies that the command may access multiple
+ * registers in sequence (e.g. LRI), in that case step gives the
+ * distance in dwords between individual offset fields.
+ */
+ struct {
+ u32 offset;
+ u32 mask;
+ u32 step;
+ } reg;
+
+#define MAX_CMD_DESC_BITMASKS 3
+ /*
+ * Describes command checks where a particular dword is masked and
+ * compared against an expected value. If the command does not match
+ * the expected value, the parser rejects it. Only valid if flags has
+ * the CMD_DESC_BITMASK bit set. Only entries where mask is non-zero
+ * are valid.
+ *
+ * If the check specifies a non-zero condition_mask then the parser
+ * only performs the check when the bits specified by condition_mask
+ * are non-zero.
+ */
+ struct {
+ u32 offset;
+ u32 mask;
+ u32 expected;
+ u32 condition_offset;
+ u32 condition_mask;
+ } bits[MAX_CMD_DESC_BITMASKS];
+};
+
+/*
+ * A table of commands requiring special handling by the command parser.
+ *
+ * Each engine has an array of tables. Each table consists of an array of
+ * command descriptors, which must be sorted with command opcodes in
+ * ascending order.
+ */
+struct drm_i915_cmd_table {
+ const struct drm_i915_cmd_descriptor *table;
+ int count;
+};
+
#define STD_MI_OPCODE_SHIFT (32 - 9)
#define STD_3D_OPCODE_SHIFT (32 - 16)
#define STD_2D_OPCODE_SHIFT (32 - 10)
@@ -450,7 +546,6 @@ static const struct drm_i915_reg_descriptor gen7_render_regs[] = {
REG64(PS_INVOCATION_COUNT),
REG64(PS_DEPTH_COUNT),
REG64_IDX(RING_TIMESTAMP, RENDER_RING_BASE),
- REG32(OACONTROL), /* Only allowed for LRI and SRM. See below. */
REG64(MI_PREDICATE_SRC0),
REG64(MI_PREDICATE_SRC1),
REG32(GEN7_3DPRIM_END_OFFSET),
@@ -559,7 +654,7 @@ static const struct drm_i915_reg_table hsw_blt_reg_tables[] = {
static u32 gen7_render_get_cmd_length_mask(u32 cmd_header)
{
- u32 client = (cmd_header & INSTR_CLIENT_MASK) >> INSTR_CLIENT_SHIFT;
+ u32 client = cmd_header >> INSTR_CLIENT_SHIFT;
u32 subclient =
(cmd_header & INSTR_SUBCLIENT_MASK) >> INSTR_SUBCLIENT_SHIFT;
@@ -578,7 +673,7 @@ static u32 gen7_render_get_cmd_length_mask(u32 cmd_header)
static u32 gen7_bsd_get_cmd_length_mask(u32 cmd_header)
{
- u32 client = (cmd_header & INSTR_CLIENT_MASK) >> INSTR_CLIENT_SHIFT;
+ u32 client = cmd_header >> INSTR_CLIENT_SHIFT;
u32 subclient =
(cmd_header & INSTR_SUBCLIENT_MASK) >> INSTR_SUBCLIENT_SHIFT;
u32 op = (cmd_header & INSTR_26_TO_24_MASK) >> INSTR_26_TO_24_SHIFT;
@@ -601,7 +696,7 @@ static u32 gen7_bsd_get_cmd_length_mask(u32 cmd_header)
static u32 gen7_blt_get_cmd_length_mask(u32 cmd_header)
{
- u32 client = (cmd_header & INSTR_CLIENT_MASK) >> INSTR_CLIENT_SHIFT;
+ u32 client = cmd_header >> INSTR_CLIENT_SHIFT;
if (client == INSTR_MI_CLIENT)
return 0x3F;
@@ -984,7 +1079,7 @@ static u32 *copy_batch(struct drm_i915_gem_object *dst_obj,
src = ERR_PTR(-ENODEV);
if (src_needs_clflush &&
- i915_memcpy_from_wc((void *)(uintptr_t)batch_start_offset, NULL, 0)) {
+ i915_can_memcpy_from_wc(NULL, batch_start_offset, 0)) {
src = i915_gem_object_pin_map(src_obj, I915_MAP_WC);
if (!IS_ERR(src)) {
i915_memcpy_from_wc(dst,
@@ -1036,32 +1131,10 @@ unpin_src:
return dst;
}
-/**
- * intel_engine_needs_cmd_parser() - should a given engine use software
- * command parsing?
- * @engine: the engine in question
- *
- * Only certain platforms require software batch buffer command parsing, and
- * only when enabled via module parameter.
- *
- * Return: true if the engine requires software command parsing
- */
-bool intel_engine_needs_cmd_parser(struct intel_engine_cs *engine)
-{
- if (!engine->needs_cmd_parser)
- return false;
-
- if (!USES_PPGTT(engine->i915))
- return false;
-
- return (i915.enable_cmd_parser == 1);
-}
-
static bool check_cmd(const struct intel_engine_cs *engine,
const struct drm_i915_cmd_descriptor *desc,
const u32 *cmd, u32 length,
- const bool is_master,
- bool *oacontrol_set)
+ const bool is_master)
{
if (desc->flags & CMD_DESC_SKIP)
return true;
@@ -1099,31 +1172,6 @@ static bool check_cmd(const struct intel_engine_cs *engine,
}
/*
- * OACONTROL requires some special handling for
- * writes. We want to make sure that any batch which
- * enables OA also disables it before the end of the
- * batch. The goal is to prevent one process from
- * snooping on the perf data from another process. To do
- * that, we need to check the value that will be written
- * to the register. Hence, limit OACONTROL writes to
- * only MI_LOAD_REGISTER_IMM commands.
- */
- if (reg_addr == i915_mmio_reg_offset(OACONTROL)) {
- if (desc->cmd.value == MI_LOAD_REGISTER_MEM) {
- DRM_DEBUG_DRIVER("CMD: Rejected LRM to OACONTROL\n");
- return false;
- }
-
- if (desc->cmd.value == MI_LOAD_REGISTER_REG) {
- DRM_DEBUG_DRIVER("CMD: Rejected LRR to OACONTROL\n");
- return false;
- }
-
- if (desc->cmd.value == MI_LOAD_REGISTER_IMM(1))
- *oacontrol_set = (cmd[offset + 1] != 0);
- }
-
- /*
* Check the value written to the register against the
* allowed mask/value pair given in the whitelist entry.
*/
@@ -1214,7 +1262,6 @@ int intel_engine_cmd_parser(struct intel_engine_cs *engine,
u32 *cmd, *batch_end;
struct drm_i915_cmd_descriptor default_desc = noop_desc;
const struct drm_i915_cmd_descriptor *desc = &default_desc;
- bool oacontrol_set = false; /* OACONTROL tracking. See check_cmd() */
bool needs_clflush_after = false;
int ret = 0;
@@ -1270,20 +1317,14 @@ int intel_engine_cmd_parser(struct intel_engine_cs *engine,
break;
}
- if (!check_cmd(engine, desc, cmd, length, is_master,
- &oacontrol_set)) {
- ret = -EINVAL;
+ if (!check_cmd(engine, desc, cmd, length, is_master)) {
+ ret = -EACCES;
break;
}
cmd += length;
}
- if (oacontrol_set) {
- DRM_DEBUG_DRIVER("CMD: batch set OACONTROL but did not clear it\n");
- ret = -EINVAL;
- }
-
if (cmd >= batch_end) {
DRM_DEBUG_DRIVER("CMD: Got to the end of the buffer w/o a BBE cmd!\n");
ret = -EINVAL;
@@ -1313,7 +1354,7 @@ int i915_cmd_parser_get_version(struct drm_i915_private *dev_priv)
/* If the command parser is not enabled, report 0 - unsupported */
for_each_engine(engine, dev_priv, id) {
- if (intel_engine_needs_cmd_parser(engine)) {
+ if (engine->needs_cmd_parser) {
active = true;
break;
}
@@ -1333,6 +1374,11 @@ int i915_cmd_parser_get_version(struct drm_i915_private *dev_priv)
* 5. GPGPU dispatch compute indirect registers.
* 6. TIMESTAMP register and Haswell CS GPR registers
* 7. Allow MI_LOAD_REGISTER_REG between whitelisted registers.
+ * 8. Don't report cmd_check() failures as EINVAL errors to userspace;
+ * rely on the HW to NOOP disallowed commands as it would without
+ * the parser enabled.
+ * 9. Don't whitelist or handle oacontrol specially, as ownership
+ * for oacontrol state is moving to i915-perf.
*/
- return 7;
+ return 9;
}
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 791bfc760075..fa69d72fdcb9 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -26,19 +26,9 @@
*
*/
-#include <linux/seq_file.h>
-#include <linux/circ_buf.h>
-#include <linux/ctype.h>
#include <linux/debugfs.h>
-#include <linux/slab.h>
-#include <linux/export.h>
#include <linux/list_sort.h>
-#include <asm/msr-index.h>
-#include <drm/drmP.h>
#include "intel_drv.h"
-#include "intel_ringbuffer.h"
-#include <drm/i915_drm.h>
-#include "i915_drv.h"
static inline struct drm_i915_private *node_to_i915(struct drm_info_node *node)
{
@@ -77,6 +67,7 @@ static int i915_capabilities(struct seq_file *m, void *data)
const struct intel_device_info *info = INTEL_INFO(dev_priv);
seq_printf(m, "gen: %d\n", INTEL_GEN(dev_priv));
+ seq_printf(m, "platform: %s\n", intel_platform_name(info->platform));
seq_printf(m, "pch: %d\n", INTEL_PCH_TYPE(dev_priv));
#define PRINT_FLAG(x) seq_printf(m, #x ": %s\n", yesno(info->x))
DEV_INFO_FOR_EACH_FLAG(PRINT_FLAG);
@@ -168,8 +159,35 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
seq_printf(m, " (%sgtt offset: %08llx, size: %08llx",
i915_vma_is_ggtt(vma) ? "g" : "pp",
vma->node.start, vma->node.size);
- if (i915_vma_is_ggtt(vma))
- seq_printf(m, ", type: %u", vma->ggtt_view.type);
+ if (i915_vma_is_ggtt(vma)) {
+ switch (vma->ggtt_view.type) {
+ case I915_GGTT_VIEW_NORMAL:
+ seq_puts(m, ", normal");
+ break;
+
+ case I915_GGTT_VIEW_PARTIAL:
+ seq_printf(m, ", partial [%08llx+%x]",
+ vma->ggtt_view.partial.offset << PAGE_SHIFT,
+ vma->ggtt_view.partial.size << PAGE_SHIFT);
+ break;
+
+ case I915_GGTT_VIEW_ROTATED:
+ seq_printf(m, ", rotated [(%ux%u, stride=%u, offset=%u), (%ux%u, stride=%u, offset=%u)]",
+ vma->ggtt_view.rotated.plane[0].width,
+ vma->ggtt_view.rotated.plane[0].height,
+ vma->ggtt_view.rotated.plane[0].stride,
+ vma->ggtt_view.rotated.plane[0].offset,
+ vma->ggtt_view.rotated.plane[1].width,
+ vma->ggtt_view.rotated.plane[1].height,
+ vma->ggtt_view.rotated.plane[1].stride,
+ vma->ggtt_view.rotated.plane[1].offset);
+ break;
+
+ default:
+ MISSING_CASE(vma->ggtt_view.type);
+ break;
+ }
+ }
if (vma->fence)
seq_printf(m, " , fence: %d%s",
vma->fence->id,
@@ -549,10 +567,10 @@ static int i915_gem_pageflip_info(struct seq_file *m, void *data)
if (work->flip_queued_req) {
struct intel_engine_cs *engine = work->flip_queued_req->engine;
- seq_printf(m, "Flip queued on %s at seqno %x, next seqno %x [current breadcrumb %x], completed? %d\n",
+ seq_printf(m, "Flip queued on %s at seqno %x, last submitted seqno %x [current breadcrumb %x], completed? %d\n",
engine->name,
work->flip_queued_req->global_seqno,
- atomic_read(&dev_priv->gt.global_timeline.next_seqno),
+ intel_engine_last_submit(engine),
intel_engine_get_seqno(engine),
i915_gem_request_completed(work->flip_queued_req));
} else
@@ -686,7 +704,7 @@ static void i915_ring_seqno_info(struct seq_file *m,
spin_lock_irq(&b->lock);
for (rb = rb_first(&b->waiters); rb; rb = rb_next(rb)) {
- struct intel_wait *w = container_of(rb, typeof(*w), node);
+ struct intel_wait *w = rb_entry(rb, typeof(*w), node);
seq_printf(m, "Waiting (%s): %s [%d] on %x\n",
engine->name, w->tsk->comm, w->tsk->pid, w->seqno);
@@ -946,7 +964,7 @@ i915_error_state_write(struct file *filp,
struct i915_error_state_file_priv *error_priv = filp->private_data;
DRM_DEBUG_DRIVER("Resetting error state\n");
- i915_destroy_error_state(error_priv->dev);
+ i915_destroy_error_state(error_priv->i915);
return cnt;
}
@@ -960,7 +978,7 @@ static int i915_error_state_open(struct inode *inode, struct file *file)
if (!error_priv)
return -ENOMEM;
- error_priv->dev = &dev_priv->drm;
+ error_priv->i915 = dev_priv;
i915_error_state_get(&dev_priv->drm, error_priv);
@@ -988,8 +1006,8 @@ static ssize_t i915_error_state_read(struct file *file, char __user *userbuf,
ssize_t ret_count = 0;
int ret;
- ret = i915_error_state_buf_init(&error_str,
- to_i915(error_priv->dev), count, *pos);
+ ret = i915_error_state_buf_init(&error_str, error_priv->i915,
+ count, *pos);
if (ret)
return ret;
@@ -1026,7 +1044,7 @@ i915_next_seqno_get(void *data, u64 *val)
{
struct drm_i915_private *dev_priv = data;
- *val = 1 + atomic_read(&dev_priv->gt.global_timeline.next_seqno);
+ *val = 1 + atomic_read(&dev_priv->gt.global_timeline.seqno);
return 0;
}
@@ -1108,7 +1126,7 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
int max_freq;
rp_state_limits = I915_READ(GEN6_RP_STATE_LIMITS);
- if (IS_BROXTON(dev_priv)) {
+ if (IS_GEN9_LP(dev_priv)) {
rp_state_cap = I915_READ(BXT_RP_STATE_CAP);
gt_perf_status = I915_READ(BXT_GT_PERF_STATUS);
} else {
@@ -1204,7 +1222,7 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
seq_printf(m, "Down threshold: %d%%\n",
dev_priv->rps.down_threshold);
- max_freq = (IS_BROXTON(dev_priv) ? rp_state_cap >> 0 :
+ max_freq = (IS_GEN9_LP(dev_priv) ? rp_state_cap >> 0 :
rp_state_cap >> 16) & 0xff;
max_freq *= (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv) ?
GEN9_FREQ_SCALER : 1);
@@ -1217,7 +1235,7 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
seq_printf(m, "Nominal (RP1) frequency: %dMHz\n",
intel_gpu_freq(dev_priv, max_freq));
- max_freq = (IS_BROXTON(dev_priv) ? rp_state_cap >> 16 :
+ max_freq = (IS_GEN9_LP(dev_priv) ? rp_state_cap >> 16 :
rp_state_cap >> 0) & 0xff;
max_freq *= (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv) ?
GEN9_FREQ_SCALER : 1);
@@ -1330,13 +1348,15 @@ static int i915_hangcheck_info(struct seq_file *m, void *unused)
seq_printf(m, "\tseqno = %x [current %x, last %x]\n",
engine->hangcheck.seqno, seqno[id],
intel_engine_last_submit(engine));
- seq_printf(m, "\twaiters? %s, fake irq active? %s\n",
+ seq_printf(m, "\twaiters? %s, fake irq active? %s, stalled? %s\n",
yesno(intel_engine_has_waiter(engine)),
yesno(test_bit(engine->id,
- &dev_priv->gpu_error.missed_irq_rings)));
+ &dev_priv->gpu_error.missed_irq_rings)),
+ yesno(engine->hangcheck.stalled));
+
spin_lock_irq(&b->lock);
for (rb = rb_first(&b->waiters); rb; rb = rb_next(rb)) {
- struct intel_wait *w = container_of(rb, typeof(*w), node);
+ struct intel_wait *w = rb_entry(rb, typeof(*w), node);
seq_printf(m, "\t%s [%d] waiting for %x\n",
w->tsk->comm, w->tsk->pid, w->seqno);
@@ -1346,8 +1366,11 @@ static int i915_hangcheck_info(struct seq_file *m, void *unused)
seq_printf(m, "\tACTHD = 0x%08llx [current 0x%08llx]\n",
(long long)engine->hangcheck.acthd,
(long long)acthd[id]);
- seq_printf(m, "\tscore = %d\n", engine->hangcheck.score);
- seq_printf(m, "\taction = %d\n", engine->hangcheck.action);
+ seq_printf(m, "\taction = %s(%d) %d ms ago\n",
+ hangcheck_action_to_str(engine->hangcheck.action),
+ engine->hangcheck.action,
+ jiffies_to_msecs(jiffies -
+ engine->hangcheck.action_timestamp));
if (engine->id == RCS) {
seq_puts(m, "\tinstdone read =\n");
@@ -1728,7 +1751,7 @@ static int i915_sr_status(struct seq_file *m, void *unused)
if (HAS_PCH_SPLIT(dev_priv))
sr_enabled = I915_READ(WM1_LP_ILK) & WM1_LP_SR_EN;
- else if (IS_CRESTLINE(dev_priv) || IS_G4X(dev_priv) ||
+ else if (IS_I965GM(dev_priv) || IS_G4X(dev_priv) ||
IS_I945G(dev_priv) || IS_I945GM(dev_priv))
sr_enabled = I915_READ(FW_BLC_SELF) & FW_BLC_SELF_EN;
else if (IS_I915GM(dev_priv))
@@ -1873,8 +1896,8 @@ static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
seq_printf(m, "fbcon size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ",
fbdev_fb->base.width,
fbdev_fb->base.height,
- fbdev_fb->base.depth,
- fbdev_fb->base.bits_per_pixel,
+ fbdev_fb->base.format->depth,
+ fbdev_fb->base.format->cpp[0] * 8,
fbdev_fb->base.modifier,
drm_framebuffer_read_refcount(&fbdev_fb->base));
describe_obj(m, fbdev_fb->obj);
@@ -1891,8 +1914,8 @@ static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
seq_printf(m, "user size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ",
fb->base.width,
fb->base.height,
- fb->base.depth,
- fb->base.bits_per_pixel,
+ fb->base.format->depth,
+ fb->base.format->cpp[0] * 8,
fb->base.modifier,
drm_framebuffer_read_refcount(&fb->base));
describe_obj(m, fb->obj);
@@ -2329,10 +2352,40 @@ static int i915_llc(struct seq_file *m, void *data)
return 0;
}
+static int i915_huc_load_status_info(struct seq_file *m, void *data)
+{
+ struct drm_i915_private *dev_priv = node_to_i915(m->private);
+ struct intel_uc_fw *huc_fw = &dev_priv->huc.fw;
+
+ if (!HAS_HUC_UCODE(dev_priv))
+ return 0;
+
+ seq_puts(m, "HuC firmware status:\n");
+ seq_printf(m, "\tpath: %s\n", huc_fw->path);
+ seq_printf(m, "\tfetch: %s\n",
+ intel_uc_fw_status_repr(huc_fw->fetch_status));
+ seq_printf(m, "\tload: %s\n",
+ intel_uc_fw_status_repr(huc_fw->load_status));
+ seq_printf(m, "\tversion wanted: %d.%d\n",
+ huc_fw->major_ver_wanted, huc_fw->minor_ver_wanted);
+ seq_printf(m, "\tversion found: %d.%d\n",
+ huc_fw->major_ver_found, huc_fw->minor_ver_found);
+ seq_printf(m, "\theader: offset is %d; size = %d\n",
+ huc_fw->header_offset, huc_fw->header_size);
+ seq_printf(m, "\tuCode: offset is %d; size = %d\n",
+ huc_fw->ucode_offset, huc_fw->ucode_size);
+ seq_printf(m, "\tRSA: offset is %d; size = %d\n",
+ huc_fw->rsa_offset, huc_fw->rsa_size);
+
+ seq_printf(m, "\nHuC status 0x%08x:\n", I915_READ(HUC_STATUS2));
+
+ return 0;
+}
+
static int i915_guc_load_status_info(struct seq_file *m, void *data)
{
struct drm_i915_private *dev_priv = node_to_i915(m->private);
- struct intel_guc_fw *guc_fw = &dev_priv->guc.guc_fw;
+ struct intel_uc_fw *guc_fw = &dev_priv->guc.fw;
u32 tmp, i;
if (!HAS_GUC_UCODE(dev_priv))
@@ -2340,15 +2393,15 @@ static int i915_guc_load_status_info(struct seq_file *m, void *data)
seq_printf(m, "GuC firmware status:\n");
seq_printf(m, "\tpath: %s\n",
- guc_fw->guc_fw_path);
+ guc_fw->path);
seq_printf(m, "\tfetch: %s\n",
- intel_guc_fw_status_repr(guc_fw->guc_fw_fetch_status));
+ intel_uc_fw_status_repr(guc_fw->fetch_status));
seq_printf(m, "\tload: %s\n",
- intel_guc_fw_status_repr(guc_fw->guc_fw_load_status));
+ intel_uc_fw_status_repr(guc_fw->load_status));
seq_printf(m, "\tversion wanted: %d.%d\n",
- guc_fw->guc_fw_major_wanted, guc_fw->guc_fw_minor_wanted);
+ guc_fw->major_ver_wanted, guc_fw->minor_ver_wanted);
seq_printf(m, "\tversion found: %d.%d\n",
- guc_fw->guc_fw_major_found, guc_fw->guc_fw_minor_found);
+ guc_fw->major_ver_found, guc_fw->minor_ver_found);
seq_printf(m, "\theader: offset is %d; size = %d\n",
guc_fw->header_offset, guc_fw->header_size);
seq_printf(m, "\tuCode: offset is %d; size = %d\n",
@@ -2409,7 +2462,7 @@ static void i915_guc_client_info(struct seq_file *m,
seq_printf(m, "\tPriority %d, GuC ctx index: %u, PD offset 0x%x\n",
client->priority, client->ctx_index, client->proc_desc_offset);
seq_printf(m, "\tDoorbell id %d, offset: 0x%x, cookie 0x%x\n",
- client->doorbell_id, client->doorbell_offset, client->cookie);
+ client->doorbell_id, client->doorbell_offset, client->doorbell_cookie);
seq_printf(m, "\tWQ size %d, offset: 0x%x, tail %d\n",
client->wq_size, client->wq_offset, client->wq_tail);
@@ -2429,47 +2482,41 @@ static void i915_guc_client_info(struct seq_file *m,
static int i915_guc_info(struct seq_file *m, void *data)
{
struct drm_i915_private *dev_priv = node_to_i915(m->private);
- struct drm_device *dev = &dev_priv->drm;
- struct intel_guc guc;
- struct i915_guc_client client = {};
+ const struct intel_guc *guc = &dev_priv->guc;
struct intel_engine_cs *engine;
enum intel_engine_id id;
- u64 total = 0;
+ u64 total;
- if (!HAS_GUC_SCHED(dev_priv))
+ if (!guc->execbuf_client) {
+ seq_printf(m, "GuC submission %s\n",
+ HAS_GUC_SCHED(dev_priv) ?
+ "disabled" :
+ "not supported");
return 0;
-
- if (mutex_lock_interruptible(&dev->struct_mutex))
- return 0;
-
- /* Take a local copy of the GuC data, so we can dump it at leisure */
- guc = dev_priv->guc;
- if (guc.execbuf_client)
- client = *guc.execbuf_client;
-
- mutex_unlock(&dev->struct_mutex);
+ }
seq_printf(m, "Doorbell map:\n");
- seq_printf(m, "\t%*pb\n", GUC_MAX_DOORBELLS, guc.doorbell_bitmap);
- seq_printf(m, "Doorbell next cacheline: 0x%x\n\n", guc.db_cacheline);
+ seq_printf(m, "\t%*pb\n", GUC_MAX_DOORBELLS, guc->doorbell_bitmap);
+ seq_printf(m, "Doorbell next cacheline: 0x%x\n\n", guc->db_cacheline);
- seq_printf(m, "GuC total action count: %llu\n", guc.action_count);
- seq_printf(m, "GuC action failure count: %u\n", guc.action_fail);
- seq_printf(m, "GuC last action command: 0x%x\n", guc.action_cmd);
- seq_printf(m, "GuC last action status: 0x%x\n", guc.action_status);
- seq_printf(m, "GuC last action error code: %d\n", guc.action_err);
+ seq_printf(m, "GuC total action count: %llu\n", guc->action_count);
+ seq_printf(m, "GuC action failure count: %u\n", guc->action_fail);
+ seq_printf(m, "GuC last action command: 0x%x\n", guc->action_cmd);
+ seq_printf(m, "GuC last action status: 0x%x\n", guc->action_status);
+ seq_printf(m, "GuC last action error code: %d\n", guc->action_err);
+ total = 0;
seq_printf(m, "\nGuC submissions:\n");
for_each_engine(engine, dev_priv, id) {
- u64 submissions = guc.submissions[id];
+ u64 submissions = guc->submissions[id];
total += submissions;
seq_printf(m, "\t%-24s: %10llu, last seqno 0x%08x\n",
- engine->name, submissions, guc.last_seqno[id]);
+ engine->name, submissions, guc->last_seqno[id]);
}
seq_printf(m, "\t%s: %llu\n", "Total", total);
- seq_printf(m, "\nGuC execbuf client @ %p:\n", guc.execbuf_client);
- i915_guc_client_info(m, dev_priv, &client);
+ seq_printf(m, "\nGuC execbuf client @ %p:\n", guc->execbuf_client);
+ i915_guc_client_info(m, dev_priv, guc->execbuf_client);
i915_guc_log_info(m, dev_priv);
@@ -2542,6 +2589,29 @@ DEFINE_SIMPLE_ATTRIBUTE(i915_guc_log_control_fops,
i915_guc_log_control_get, i915_guc_log_control_set,
"%lld\n");
+static const char *psr2_live_status(u32 val)
+{
+ static const char * const live_status[] = {
+ "IDLE",
+ "CAPTURE",
+ "CAPTURE_FS",
+ "SLEEP",
+ "BUFON_FW",
+ "ML_UP",
+ "SU_STANDBY",
+ "FAST_SLEEP",
+ "DEEP_SLEEP",
+ "BUF_ON",
+ "TG_ON"
+ };
+
+ val = (val & EDP_PSR2_STATUS_STATE_MASK) >> EDP_PSR2_STATUS_STATE_SHIFT;
+ if (val < ARRAY_SIZE(live_status))
+ return live_status[val];
+
+ return "unknown";
+}
+
static int i915_edp_psr_status(struct seq_file *m, void *data)
{
struct drm_i915_private *dev_priv = node_to_i915(m->private);
@@ -2567,9 +2637,12 @@ static int i915_edp_psr_status(struct seq_file *m, void *data)
seq_printf(m, "Re-enable work scheduled: %s\n",
yesno(work_busy(&dev_priv->psr.work.work)));
- if (HAS_DDI(dev_priv))
- enabled = I915_READ(EDP_PSR_CTL) & EDP_PSR_ENABLE;
- else {
+ if (HAS_DDI(dev_priv)) {
+ if (dev_priv->psr.psr2_support)
+ enabled = I915_READ(EDP_PSR2_CTL) & EDP_PSR2_ENABLE;
+ else
+ enabled = I915_READ(EDP_PSR_CTL) & EDP_PSR_ENABLE;
+ } else {
for_each_pipe(dev_priv, pipe) {
enum transcoder cpu_transcoder =
intel_pipe_to_cpu_transcoder(dev_priv, pipe);
@@ -2613,6 +2686,12 @@ static int i915_edp_psr_status(struct seq_file *m, void *data)
seq_printf(m, "Performance_Counter: %u\n", psrperf);
}
+ if (dev_priv->psr.psr2_support) {
+ u32 psr2 = I915_READ(EDP_PSR2_STATUS_CTL);
+
+ seq_printf(m, "EDP_PSR2_STATUS_CTL: %x [%s]\n",
+ psr2, psr2_live_status(psr2));
+ }
mutex_unlock(&dev_priv->psr.lock);
intel_runtime_pm_put(dev_priv);
@@ -2872,6 +2951,20 @@ static void intel_dp_info(struct seq_file *m,
&intel_dp->aux);
}
+static void intel_dp_mst_info(struct seq_file *m,
+ struct intel_connector *intel_connector)
+{
+ struct intel_encoder *intel_encoder = intel_connector->encoder;
+ struct intel_dp_mst_encoder *intel_mst =
+ enc_to_mst(&intel_encoder->base);
+ struct intel_digital_port *intel_dig_port = intel_mst->primary;
+ struct intel_dp *intel_dp = &intel_dig_port->dp;
+ bool has_audio = drm_dp_mst_port_has_audio(&intel_dp->mst_mgr,
+ intel_connector->port);
+
+ seq_printf(m, "\taudio support: %s\n", yesno(has_audio));
+}
+
static void intel_hdmi_info(struct seq_file *m,
struct intel_connector *intel_connector)
{
@@ -2914,7 +3007,10 @@ static void intel_connector_info(struct seq_file *m,
switch (connector->connector_type) {
case DRM_MODE_CONNECTOR_DisplayPort:
case DRM_MODE_CONNECTOR_eDP:
- intel_dp_info(m, intel_connector);
+ if (intel_encoder->type == INTEL_OUTPUT_DP_MST)
+ intel_dp_mst_info(m, intel_connector);
+ else
+ intel_dp_info(m, intel_connector);
break;
case DRM_MODE_CONNECTOR_LVDS:
if (intel_encoder->type == INTEL_OUTPUT_LVDS)
@@ -2938,7 +3034,7 @@ static bool cursor_active(struct drm_i915_private *dev_priv, int pipe)
{
u32 state;
- if (IS_845G(dev_priv) || IS_I865G(dev_priv))
+ if (IS_I845G(dev_priv) || IS_I865G(dev_priv))
state = I915_READ(CURCNTR(PIPE_A)) & CURSOR_ENABLE;
else
state = I915_READ(CURCNTR(pipe)) & CURSOR_MODE;
@@ -3021,7 +3117,8 @@ static void intel_plane_info(struct seq_file *m, struct intel_crtc *intel_crtc)
state = plane->state;
if (state->fb) {
- drm_get_format_name(state->fb->pixel_format, &format_name);
+ drm_get_format_name(state->fb->format->format,
+ &format_name);
} else {
sprintf(format_name.str, "N/A");
}
@@ -3059,7 +3156,7 @@ static void intel_scaler_info(struct seq_file *m, struct intel_crtc *intel_crtc)
pipe_config->scaler_state.scaler_users,
pipe_config->scaler_state.scaler_id);
- for (i = 0; i < SKL_NUM_SCALERS; i++) {
+ for (i = 0; i < num_scalers; i++) {
struct intel_scaler *sc =
&pipe_config->scaler_state.scalers[i];
@@ -3141,11 +3238,11 @@ static int i915_engine_info(struct seq_file *m, void *unused)
u64 addr;
seq_printf(m, "%s\n", engine->name);
- seq_printf(m, "\tcurrent seqno %x, last %x, hangcheck %x [score %d]\n",
+ seq_printf(m, "\tcurrent seqno %x, last %x, hangcheck %x [%d ms]\n",
intel_engine_get_seqno(engine),
intel_engine_last_submit(engine),
engine->hangcheck.seqno,
- engine->hangcheck.score);
+ jiffies_to_msecs(jiffies - engine->hangcheck.action_timestamp));
rcu_read_lock();
@@ -3251,7 +3348,7 @@ static int i915_engine_info(struct seq_file *m, void *unused)
spin_lock_irq(&b->lock);
for (rb = rb_first(&b->waiters); rb; rb = rb_next(rb)) {
- struct intel_wait *w = container_of(rb, typeof(*w), node);
+ struct intel_wait *w = rb_entry(rb, typeof(*w), node);
seq_printf(m, "\t%s [%d] waiting for %x\n",
w->tsk->comm, w->tsk->pid, w->seqno);
@@ -3341,14 +3438,14 @@ static int i915_shared_dplls_info(struct seq_file *m, void *unused)
seq_printf(m, "DPLL%i: %s, id: %i\n", i, pll->name, pll->id);
seq_printf(m, " crtc_mask: 0x%08x, active: 0x%x, on: %s\n",
- pll->config.crtc_mask, pll->active_mask, yesno(pll->on));
+ pll->state.crtc_mask, pll->active_mask, yesno(pll->on));
seq_printf(m, " tracked hardware state:\n");
- seq_printf(m, " dpll: 0x%08x\n", pll->config.hw_state.dpll);
+ seq_printf(m, " dpll: 0x%08x\n", pll->state.hw_state.dpll);
seq_printf(m, " dpll_md: 0x%08x\n",
- pll->config.hw_state.dpll_md);
- seq_printf(m, " fp0: 0x%08x\n", pll->config.hw_state.fp0);
- seq_printf(m, " fp1: 0x%08x\n", pll->config.hw_state.fp1);
- seq_printf(m, " wrpll: 0x%08x\n", pll->config.hw_state.wrpll);
+ pll->state.hw_state.dpll_md);
+ seq_printf(m, " fp0: 0x%08x\n", pll->state.hw_state.fp0);
+ seq_printf(m, " fp1: 0x%08x\n", pll->state.hw_state.fp1);
+ seq_printf(m, " wrpll: 0x%08x\n", pll->state.hw_state.wrpll);
}
drm_modeset_unlock_all(dev);
@@ -3526,12 +3623,6 @@ static int i915_drrs_status(struct seq_file *m, void *unused)
return 0;
}
-struct pipe_crc_info {
- const char *name;
- struct drm_i915_private *dev_priv;
- enum pipe pipe;
-};
-
static int i915_dp_mst_info(struct seq_file *m, void *unused)
{
struct drm_i915_private *dev_priv = node_to_i915(m->private);
@@ -3561,844 +3652,6 @@ static int i915_dp_mst_info(struct seq_file *m, void *unused)
return 0;
}
-static int i915_pipe_crc_open(struct inode *inode, struct file *filep)
-{
- struct pipe_crc_info *info = inode->i_private;
- struct drm_i915_private *dev_priv = info->dev_priv;
- struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[info->pipe];
-
- if (info->pipe >= INTEL_INFO(dev_priv)->num_pipes)
- return -ENODEV;
-
- spin_lock_irq(&pipe_crc->lock);
-
- if (pipe_crc->opened) {
- spin_unlock_irq(&pipe_crc->lock);
- return -EBUSY; /* already open */
- }
-
- pipe_crc->opened = true;
- filep->private_data = inode->i_private;
-
- spin_unlock_irq(&pipe_crc->lock);
-
- return 0;
-}
-
-static int i915_pipe_crc_release(struct inode *inode, struct file *filep)
-{
- struct pipe_crc_info *info = inode->i_private;
- struct drm_i915_private *dev_priv = info->dev_priv;
- struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[info->pipe];
-
- spin_lock_irq(&pipe_crc->lock);
- pipe_crc->opened = false;
- spin_unlock_irq(&pipe_crc->lock);
-
- return 0;
-}
-
-/* (6 fields, 8 chars each, space separated (5) + '\n') */
-#define PIPE_CRC_LINE_LEN (6 * 8 + 5 + 1)
-/* account for \'0' */
-#define PIPE_CRC_BUFFER_LEN (PIPE_CRC_LINE_LEN + 1)
-
-static int pipe_crc_data_count(struct intel_pipe_crc *pipe_crc)
-{
- assert_spin_locked(&pipe_crc->lock);
- return CIRC_CNT(pipe_crc->head, pipe_crc->tail,
- INTEL_PIPE_CRC_ENTRIES_NR);
-}
-
-static ssize_t
-i915_pipe_crc_read(struct file *filep, char __user *user_buf, size_t count,
- loff_t *pos)
-{
- struct pipe_crc_info *info = filep->private_data;
- struct drm_i915_private *dev_priv = info->dev_priv;
- struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[info->pipe];
- char buf[PIPE_CRC_BUFFER_LEN];
- int n_entries;
- ssize_t bytes_read;
-
- /*
- * Don't allow user space to provide buffers not big enough to hold
- * a line of data.
- */
- if (count < PIPE_CRC_LINE_LEN)
- return -EINVAL;
-
- if (pipe_crc->source == INTEL_PIPE_CRC_SOURCE_NONE)
- return 0;
-
- /* nothing to read */
- spin_lock_irq(&pipe_crc->lock);
- while (pipe_crc_data_count(pipe_crc) == 0) {
- int ret;
-
- if (filep->f_flags & O_NONBLOCK) {
- spin_unlock_irq(&pipe_crc->lock);
- return -EAGAIN;
- }
-
- ret = wait_event_interruptible_lock_irq(pipe_crc->wq,
- pipe_crc_data_count(pipe_crc), pipe_crc->lock);
- if (ret) {
- spin_unlock_irq(&pipe_crc->lock);
- return ret;
- }
- }
-
- /* We now have one or more entries to read */
- n_entries = count / PIPE_CRC_LINE_LEN;
-
- bytes_read = 0;
- while (n_entries > 0) {
- struct intel_pipe_crc_entry *entry =
- &pipe_crc->entries[pipe_crc->tail];
-
- if (CIRC_CNT(pipe_crc->head, pipe_crc->tail,
- INTEL_PIPE_CRC_ENTRIES_NR) < 1)
- break;
-
- BUILD_BUG_ON_NOT_POWER_OF_2(INTEL_PIPE_CRC_ENTRIES_NR);
- pipe_crc->tail = (pipe_crc->tail + 1) & (INTEL_PIPE_CRC_ENTRIES_NR - 1);
-
- bytes_read += snprintf(buf, PIPE_CRC_BUFFER_LEN,
- "%8u %8x %8x %8x %8x %8x\n",
- entry->frame, entry->crc[0],
- entry->crc[1], entry->crc[2],
- entry->crc[3], entry->crc[4]);
-
- spin_unlock_irq(&pipe_crc->lock);
-
- if (copy_to_user(user_buf, buf, PIPE_CRC_LINE_LEN))
- return -EFAULT;
-
- user_buf += PIPE_CRC_LINE_LEN;
- n_entries--;
-
- spin_lock_irq(&pipe_crc->lock);
- }
-
- spin_unlock_irq(&pipe_crc->lock);
-
- return bytes_read;
-}
-
-static const struct file_operations i915_pipe_crc_fops = {
- .owner = THIS_MODULE,
- .open = i915_pipe_crc_open,
- .read = i915_pipe_crc_read,
- .release = i915_pipe_crc_release,
-};
-
-static struct pipe_crc_info i915_pipe_crc_data[I915_MAX_PIPES] = {
- {
- .name = "i915_pipe_A_crc",
- .pipe = PIPE_A,
- },
- {
- .name = "i915_pipe_B_crc",
- .pipe = PIPE_B,
- },
- {
- .name = "i915_pipe_C_crc",
- .pipe = PIPE_C,
- },
-};
-
-static int i915_pipe_crc_create(struct dentry *root, struct drm_minor *minor,
- enum pipe pipe)
-{
- struct drm_i915_private *dev_priv = to_i915(minor->dev);
- struct dentry *ent;
- struct pipe_crc_info *info = &i915_pipe_crc_data[pipe];
-
- info->dev_priv = dev_priv;
- ent = debugfs_create_file(info->name, S_IRUGO, root, info,
- &i915_pipe_crc_fops);
- if (!ent)
- return -ENOMEM;
-
- return drm_add_fake_info_node(minor, ent, info);
-}
-
-static const char * const pipe_crc_sources[] = {
- "none",
- "plane1",
- "plane2",
- "pf",
- "pipe",
- "TV",
- "DP-B",
- "DP-C",
- "DP-D",
- "auto",
-};
-
-static const char *pipe_crc_source_name(enum intel_pipe_crc_source source)
-{
- BUILD_BUG_ON(ARRAY_SIZE(pipe_crc_sources) != INTEL_PIPE_CRC_SOURCE_MAX);
- return pipe_crc_sources[source];
-}
-
-static int display_crc_ctl_show(struct seq_file *m, void *data)
-{
- struct drm_i915_private *dev_priv = m->private;
- int i;
-
- for (i = 0; i < I915_MAX_PIPES; i++)
- seq_printf(m, "%c %s\n", pipe_name(i),
- pipe_crc_source_name(dev_priv->pipe_crc[i].source));
-
- return 0;
-}
-
-static int display_crc_ctl_open(struct inode *inode, struct file *file)
-{
- return single_open(file, display_crc_ctl_show, inode->i_private);
-}
-
-static int i8xx_pipe_crc_ctl_reg(enum intel_pipe_crc_source *source,
- uint32_t *val)
-{
- if (*source == INTEL_PIPE_CRC_SOURCE_AUTO)
- *source = INTEL_PIPE_CRC_SOURCE_PIPE;
-
- switch (*source) {
- case INTEL_PIPE_CRC_SOURCE_PIPE:
- *val = PIPE_CRC_ENABLE | PIPE_CRC_INCLUDE_BORDER_I8XX;
- break;
- case INTEL_PIPE_CRC_SOURCE_NONE:
- *val = 0;
- break;
- default:
- return -EINVAL;
- }
-
- return 0;
-}
-
-static int i9xx_pipe_crc_auto_source(struct drm_i915_private *dev_priv,
- enum pipe pipe,
- enum intel_pipe_crc_source *source)
-{
- struct drm_device *dev = &dev_priv->drm;
- struct intel_encoder *encoder;
- struct intel_crtc *crtc;
- struct intel_digital_port *dig_port;
- int ret = 0;
-
- *source = INTEL_PIPE_CRC_SOURCE_PIPE;
-
- drm_modeset_lock_all(dev);
- for_each_intel_encoder(dev, encoder) {
- if (!encoder->base.crtc)
- continue;
-
- crtc = to_intel_crtc(encoder->base.crtc);
-
- if (crtc->pipe != pipe)
- continue;
-
- switch (encoder->type) {
- case INTEL_OUTPUT_TVOUT:
- *source = INTEL_PIPE_CRC_SOURCE_TV;
- break;
- case INTEL_OUTPUT_DP:
- case INTEL_OUTPUT_EDP:
- dig_port = enc_to_dig_port(&encoder->base);
- switch (dig_port->port) {
- case PORT_B:
- *source = INTEL_PIPE_CRC_SOURCE_DP_B;
- break;
- case PORT_C:
- *source = INTEL_PIPE_CRC_SOURCE_DP_C;
- break;
- case PORT_D:
- *source = INTEL_PIPE_CRC_SOURCE_DP_D;
- break;
- default:
- WARN(1, "nonexisting DP port %c\n",
- port_name(dig_port->port));
- break;
- }
- break;
- default:
- break;
- }
- }
- drm_modeset_unlock_all(dev);
-
- return ret;
-}
-
-static int vlv_pipe_crc_ctl_reg(struct drm_i915_private *dev_priv,
- enum pipe pipe,
- enum intel_pipe_crc_source *source,
- uint32_t *val)
-{
- bool need_stable_symbols = false;
-
- if (*source == INTEL_PIPE_CRC_SOURCE_AUTO) {
- int ret = i9xx_pipe_crc_auto_source(dev_priv, pipe, source);
- if (ret)
- return ret;
- }
-
- switch (*source) {
- case INTEL_PIPE_CRC_SOURCE_PIPE:
- *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PIPE_VLV;
- break;
- case INTEL_PIPE_CRC_SOURCE_DP_B:
- *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_B_VLV;
- need_stable_symbols = true;
- break;
- case INTEL_PIPE_CRC_SOURCE_DP_C:
- *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_C_VLV;
- need_stable_symbols = true;
- break;
- case INTEL_PIPE_CRC_SOURCE_DP_D:
- if (!IS_CHERRYVIEW(dev_priv))
- return -EINVAL;
- *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_D_VLV;
- need_stable_symbols = true;
- break;
- case INTEL_PIPE_CRC_SOURCE_NONE:
- *val = 0;
- break;
- default:
- return -EINVAL;
- }
-
- /*
- * When the pipe CRC tap point is after the transcoders we need
- * to tweak symbol-level features to produce a deterministic series of
- * symbols for a given frame. We need to reset those features only once
- * a frame (instead of every nth symbol):
- * - DC-balance: used to ensure a better clock recovery from the data
- * link (SDVO)
- * - DisplayPort scrambling: used for EMI reduction
- */
- if (need_stable_symbols) {
- uint32_t tmp = I915_READ(PORT_DFT2_G4X);
-
- tmp |= DC_BALANCE_RESET_VLV;
- switch (pipe) {
- case PIPE_A:
- tmp |= PIPE_A_SCRAMBLE_RESET;
- break;
- case PIPE_B:
- tmp |= PIPE_B_SCRAMBLE_RESET;
- break;
- case PIPE_C:
- tmp |= PIPE_C_SCRAMBLE_RESET;
- break;
- default:
- return -EINVAL;
- }
- I915_WRITE(PORT_DFT2_G4X, tmp);
- }
-
- return 0;
-}
-
-static int i9xx_pipe_crc_ctl_reg(struct drm_i915_private *dev_priv,
- enum pipe pipe,
- enum intel_pipe_crc_source *source,
- uint32_t *val)
-{
- bool need_stable_symbols = false;
-
- if (*source == INTEL_PIPE_CRC_SOURCE_AUTO) {
- int ret = i9xx_pipe_crc_auto_source(dev_priv, pipe, source);
- if (ret)
- return ret;
- }
-
- switch (*source) {
- case INTEL_PIPE_CRC_SOURCE_PIPE:
- *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PIPE_I9XX;
- break;
- case INTEL_PIPE_CRC_SOURCE_TV:
- if (!SUPPORTS_TV(dev_priv))
- return -EINVAL;
- *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_TV_PRE;
- break;
- case INTEL_PIPE_CRC_SOURCE_DP_B:
- if (!IS_G4X(dev_priv))
- return -EINVAL;
- *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_B_G4X;
- need_stable_symbols = true;
- break;
- case INTEL_PIPE_CRC_SOURCE_DP_C:
- if (!IS_G4X(dev_priv))
- return -EINVAL;
- *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_C_G4X;
- need_stable_symbols = true;
- break;
- case INTEL_PIPE_CRC_SOURCE_DP_D:
- if (!IS_G4X(dev_priv))
- return -EINVAL;
- *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_D_G4X;
- need_stable_symbols = true;
- break;
- case INTEL_PIPE_CRC_SOURCE_NONE:
- *val = 0;
- break;
- default:
- return -EINVAL;
- }
-
- /*
- * When the pipe CRC tap point is after the transcoders we need
- * to tweak symbol-level features to produce a deterministic series of
- * symbols for a given frame. We need to reset those features only once
- * a frame (instead of every nth symbol):
- * - DC-balance: used to ensure a better clock recovery from the data
- * link (SDVO)
- * - DisplayPort scrambling: used for EMI reduction
- */
- if (need_stable_symbols) {
- uint32_t tmp = I915_READ(PORT_DFT2_G4X);
-
- WARN_ON(!IS_G4X(dev_priv));
-
- I915_WRITE(PORT_DFT_I9XX,
- I915_READ(PORT_DFT_I9XX) | DC_BALANCE_RESET);
-
- if (pipe == PIPE_A)
- tmp |= PIPE_A_SCRAMBLE_RESET;
- else
- tmp |= PIPE_B_SCRAMBLE_RESET;
-
- I915_WRITE(PORT_DFT2_G4X, tmp);
- }
-
- return 0;
-}
-
-static void vlv_undo_pipe_scramble_reset(struct drm_i915_private *dev_priv,
- enum pipe pipe)
-{
- uint32_t tmp = I915_READ(PORT_DFT2_G4X);
-
- switch (pipe) {
- case PIPE_A:
- tmp &= ~PIPE_A_SCRAMBLE_RESET;
- break;
- case PIPE_B:
- tmp &= ~PIPE_B_SCRAMBLE_RESET;
- break;
- case PIPE_C:
- tmp &= ~PIPE_C_SCRAMBLE_RESET;
- break;
- default:
- return;
- }
- if (!(tmp & PIPE_SCRAMBLE_RESET_MASK))
- tmp &= ~DC_BALANCE_RESET_VLV;
- I915_WRITE(PORT_DFT2_G4X, tmp);
-
-}
-
-static void g4x_undo_pipe_scramble_reset(struct drm_i915_private *dev_priv,
- enum pipe pipe)
-{
- uint32_t tmp = I915_READ(PORT_DFT2_G4X);
-
- if (pipe == PIPE_A)
- tmp &= ~PIPE_A_SCRAMBLE_RESET;
- else
- tmp &= ~PIPE_B_SCRAMBLE_RESET;
- I915_WRITE(PORT_DFT2_G4X, tmp);
-
- if (!(tmp & PIPE_SCRAMBLE_RESET_MASK)) {
- I915_WRITE(PORT_DFT_I9XX,
- I915_READ(PORT_DFT_I9XX) & ~DC_BALANCE_RESET);
- }
-}
-
-static int ilk_pipe_crc_ctl_reg(enum intel_pipe_crc_source *source,
- uint32_t *val)
-{
- if (*source == INTEL_PIPE_CRC_SOURCE_AUTO)
- *source = INTEL_PIPE_CRC_SOURCE_PIPE;
-
- switch (*source) {
- case INTEL_PIPE_CRC_SOURCE_PLANE1:
- *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PRIMARY_ILK;
- break;
- case INTEL_PIPE_CRC_SOURCE_PLANE2:
- *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_SPRITE_ILK;
- break;
- case INTEL_PIPE_CRC_SOURCE_PIPE:
- *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PIPE_ILK;
- break;
- case INTEL_PIPE_CRC_SOURCE_NONE:
- *val = 0;
- break;
- default:
- return -EINVAL;
- }
-
- return 0;
-}
-
-static void hsw_trans_edp_pipe_A_crc_wa(struct drm_i915_private *dev_priv,
- bool enable)
-{
- struct drm_device *dev = &dev_priv->drm;
- struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, PIPE_A);
- struct intel_crtc_state *pipe_config;
- struct drm_atomic_state *state;
- int ret = 0;
-
- drm_modeset_lock_all(dev);
- state = drm_atomic_state_alloc(dev);
- if (!state) {
- ret = -ENOMEM;
- goto out;
- }
-
- state->acquire_ctx = drm_modeset_legacy_acquire_ctx(&crtc->base);
- pipe_config = intel_atomic_get_crtc_state(state, crtc);
- if (IS_ERR(pipe_config)) {
- ret = PTR_ERR(pipe_config);
- goto out;
- }
-
- pipe_config->pch_pfit.force_thru = enable;
- if (pipe_config->cpu_transcoder == TRANSCODER_EDP &&
- pipe_config->pch_pfit.enabled != enable)
- pipe_config->base.connectors_changed = true;
-
- ret = drm_atomic_commit(state);
-out:
- WARN(ret, "Toggling workaround to %i returns %i\n", enable, ret);
- drm_modeset_unlock_all(dev);
- drm_atomic_state_put(state);
-}
-
-static int ivb_pipe_crc_ctl_reg(struct drm_i915_private *dev_priv,
- enum pipe pipe,
- enum intel_pipe_crc_source *source,
- uint32_t *val)
-{
- if (*source == INTEL_PIPE_CRC_SOURCE_AUTO)
- *source = INTEL_PIPE_CRC_SOURCE_PF;
-
- switch (*source) {
- case INTEL_PIPE_CRC_SOURCE_PLANE1:
- *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PRIMARY_IVB;
- break;
- case INTEL_PIPE_CRC_SOURCE_PLANE2:
- *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_SPRITE_IVB;
- break;
- case INTEL_PIPE_CRC_SOURCE_PF:
- if (IS_HASWELL(dev_priv) && pipe == PIPE_A)
- hsw_trans_edp_pipe_A_crc_wa(dev_priv, true);
-
- *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PF_IVB;
- break;
- case INTEL_PIPE_CRC_SOURCE_NONE:
- *val = 0;
- break;
- default:
- return -EINVAL;
- }
-
- return 0;
-}
-
-static int pipe_crc_set_source(struct drm_i915_private *dev_priv,
- enum pipe pipe,
- enum intel_pipe_crc_source source)
-{
- struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe];
- struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
- enum intel_display_power_domain power_domain;
- u32 val = 0; /* shut up gcc */
- int ret;
-
- if (pipe_crc->source == source)
- return 0;
-
- /* forbid changing the source without going back to 'none' */
- if (pipe_crc->source && source)
- return -EINVAL;
-
- power_domain = POWER_DOMAIN_PIPE(pipe);
- if (!intel_display_power_get_if_enabled(dev_priv, power_domain)) {
- DRM_DEBUG_KMS("Trying to capture CRC while pipe is off\n");
- return -EIO;
- }
-
- if (IS_GEN2(dev_priv))
- ret = i8xx_pipe_crc_ctl_reg(&source, &val);
- else if (INTEL_GEN(dev_priv) < 5)
- ret = i9xx_pipe_crc_ctl_reg(dev_priv, pipe, &source, &val);
- else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
- ret = vlv_pipe_crc_ctl_reg(dev_priv, pipe, &source, &val);
- else if (IS_GEN5(dev_priv) || IS_GEN6(dev_priv))
- ret = ilk_pipe_crc_ctl_reg(&source, &val);
- else
- ret = ivb_pipe_crc_ctl_reg(dev_priv, pipe, &source, &val);
-
- if (ret != 0)
- goto out;
-
- /* none -> real source transition */
- if (source) {
- struct intel_pipe_crc_entry *entries;
-
- DRM_DEBUG_DRIVER("collecting CRCs for pipe %c, %s\n",
- pipe_name(pipe), pipe_crc_source_name(source));
-
- entries = kcalloc(INTEL_PIPE_CRC_ENTRIES_NR,
- sizeof(pipe_crc->entries[0]),
- GFP_KERNEL);
- if (!entries) {
- ret = -ENOMEM;
- goto out;
- }
-
- /*
- * When IPS gets enabled, the pipe CRC changes. Since IPS gets
- * enabled and disabled dynamically based on package C states,
- * user space can't make reliable use of the CRCs, so let's just
- * completely disable it.
- */
- hsw_disable_ips(crtc);
-
- spin_lock_irq(&pipe_crc->lock);
- kfree(pipe_crc->entries);
- pipe_crc->entries = entries;
- pipe_crc->head = 0;
- pipe_crc->tail = 0;
- spin_unlock_irq(&pipe_crc->lock);
- }
-
- pipe_crc->source = source;
-
- I915_WRITE(PIPE_CRC_CTL(pipe), val);
- POSTING_READ(PIPE_CRC_CTL(pipe));
-
- /* real source -> none transition */
- if (source == INTEL_PIPE_CRC_SOURCE_NONE) {
- struct intel_pipe_crc_entry *entries;
- struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv,
- pipe);
-
- DRM_DEBUG_DRIVER("stopping CRCs for pipe %c\n",
- pipe_name(pipe));
-
- drm_modeset_lock(&crtc->base.mutex, NULL);
- if (crtc->base.state->active)
- intel_wait_for_vblank(dev_priv, pipe);
- drm_modeset_unlock(&crtc->base.mutex);
-
- spin_lock_irq(&pipe_crc->lock);
- entries = pipe_crc->entries;
- pipe_crc->entries = NULL;
- pipe_crc->head = 0;
- pipe_crc->tail = 0;
- spin_unlock_irq(&pipe_crc->lock);
-
- kfree(entries);
-
- if (IS_G4X(dev_priv))
- g4x_undo_pipe_scramble_reset(dev_priv, pipe);
- else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
- vlv_undo_pipe_scramble_reset(dev_priv, pipe);
- else if (IS_HASWELL(dev_priv) && pipe == PIPE_A)
- hsw_trans_edp_pipe_A_crc_wa(dev_priv, false);
-
- hsw_enable_ips(crtc);
- }
-
- ret = 0;
-
-out:
- intel_display_power_put(dev_priv, power_domain);
-
- return ret;
-}
-
-/*
- * Parse pipe CRC command strings:
- * command: wsp* object wsp+ name wsp+ source wsp*
- * object: 'pipe'
- * name: (A | B | C)
- * source: (none | plane1 | plane2 | pf)
- * wsp: (#0x20 | #0x9 | #0xA)+
- *
- * eg.:
- * "pipe A plane1" -> Start CRC computations on plane1 of pipe A
- * "pipe A none" -> Stop CRC
- */
-static int display_crc_ctl_tokenize(char *buf, char *words[], int max_words)
-{
- int n_words = 0;
-
- while (*buf) {
- char *end;
-
- /* skip leading white space */
- buf = skip_spaces(buf);
- if (!*buf)
- break; /* end of buffer */
-
- /* find end of word */
- for (end = buf; *end && !isspace(*end); end++)
- ;
-
- if (n_words == max_words) {
- DRM_DEBUG_DRIVER("too many words, allowed <= %d\n",
- max_words);
- return -EINVAL; /* ran out of words[] before bytes */
- }
-
- if (*end)
- *end++ = '\0';
- words[n_words++] = buf;
- buf = end;
- }
-
- return n_words;
-}
-
-enum intel_pipe_crc_object {
- PIPE_CRC_OBJECT_PIPE,
-};
-
-static const char * const pipe_crc_objects[] = {
- "pipe",
-};
-
-static int
-display_crc_ctl_parse_object(const char *buf, enum intel_pipe_crc_object *o)
-{
- int i;
-
- for (i = 0; i < ARRAY_SIZE(pipe_crc_objects); i++)
- if (!strcmp(buf, pipe_crc_objects[i])) {
- *o = i;
- return 0;
- }
-
- return -EINVAL;
-}
-
-static int display_crc_ctl_parse_pipe(const char *buf, enum pipe *pipe)
-{
- const char name = buf[0];
-
- if (name < 'A' || name >= pipe_name(I915_MAX_PIPES))
- return -EINVAL;
-
- *pipe = name - 'A';
-
- return 0;
-}
-
-static int
-display_crc_ctl_parse_source(const char *buf, enum intel_pipe_crc_source *s)
-{
- int i;
-
- for (i = 0; i < ARRAY_SIZE(pipe_crc_sources); i++)
- if (!strcmp(buf, pipe_crc_sources[i])) {
- *s = i;
- return 0;
- }
-
- return -EINVAL;
-}
-
-static int display_crc_ctl_parse(struct drm_i915_private *dev_priv,
- char *buf, size_t len)
-{
-#define N_WORDS 3
- int n_words;
- char *words[N_WORDS];
- enum pipe pipe;
- enum intel_pipe_crc_object object;
- enum intel_pipe_crc_source source;
-
- n_words = display_crc_ctl_tokenize(buf, words, N_WORDS);
- if (n_words != N_WORDS) {
- DRM_DEBUG_DRIVER("tokenize failed, a command is %d words\n",
- N_WORDS);
- return -EINVAL;
- }
-
- if (display_crc_ctl_parse_object(words[0], &object) < 0) {
- DRM_DEBUG_DRIVER("unknown object %s\n", words[0]);
- return -EINVAL;
- }
-
- if (display_crc_ctl_parse_pipe(words[1], &pipe) < 0) {
- DRM_DEBUG_DRIVER("unknown pipe %s\n", words[1]);
- return -EINVAL;
- }
-
- if (display_crc_ctl_parse_source(words[2], &source) < 0) {
- DRM_DEBUG_DRIVER("unknown source %s\n", words[2]);
- return -EINVAL;
- }
-
- return pipe_crc_set_source(dev_priv, pipe, source);
-}
-
-static ssize_t display_crc_ctl_write(struct file *file, const char __user *ubuf,
- size_t len, loff_t *offp)
-{
- struct seq_file *m = file->private_data;
- struct drm_i915_private *dev_priv = m->private;
- char *tmpbuf;
- int ret;
-
- if (len == 0)
- return 0;
-
- if (len > PAGE_SIZE - 1) {
- DRM_DEBUG_DRIVER("expected <%lu bytes into pipe crc control\n",
- PAGE_SIZE);
- return -E2BIG;
- }
-
- tmpbuf = kmalloc(len + 1, GFP_KERNEL);
- if (!tmpbuf)
- return -ENOMEM;
-
- if (copy_from_user(tmpbuf, ubuf, len)) {
- ret = -EFAULT;
- goto out;
- }
- tmpbuf[len] = '\0';
-
- ret = display_crc_ctl_parse(dev_priv, tmpbuf, len);
-
-out:
- kfree(tmpbuf);
- if (ret < 0)
- return ret;
-
- *offp += len;
- return len;
-}
-
-static const struct file_operations i915_display_crc_ctl_fops = {
- .owner = THIS_MODULE,
- .open = display_crc_ctl_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
- .write = display_crc_ctl_write
-};
-
static ssize_t i915_displayport_test_active_write(struct file *file,
const char __user *ubuf,
size_t len, loff_t *offp)
@@ -4446,9 +3699,9 @@ static ssize_t i915_displayport_test_active_write(struct file *file,
* testing code, only accept an actual value of 1 here
*/
if (val == 1)
- intel_dp->compliance_test_active = 1;
+ intel_dp->compliance.test_active = 1;
else
- intel_dp->compliance_test_active = 0;
+ intel_dp->compliance.test_active = 0;
}
}
out:
@@ -4475,7 +3728,7 @@ static int i915_displayport_test_active_show(struct seq_file *m, void *data)
if (connector->status == connector_status_connected &&
connector->encoder != NULL) {
intel_dp = enc_to_intel_dp(connector->encoder);
- if (intel_dp->compliance_test_active)
+ if (intel_dp->compliance.test_active)
seq_puts(m, "1");
else
seq_puts(m, "0");
@@ -4519,7 +3772,7 @@ static int i915_displayport_test_data_show(struct seq_file *m, void *data)
if (connector->status == connector_status_connected &&
connector->encoder != NULL) {
intel_dp = enc_to_intel_dp(connector->encoder);
- seq_printf(m, "%lx", intel_dp->compliance_test_data);
+ seq_printf(m, "%lx", intel_dp->compliance.test_data.edid);
} else
seq_puts(m, "0");
}
@@ -4558,7 +3811,7 @@ static int i915_displayport_test_type_show(struct seq_file *m, void *data)
if (connector->status == connector_status_connected &&
connector->encoder != NULL) {
intel_dp = enc_to_intel_dp(connector->encoder);
- seq_printf(m, "%02lx", intel_dp->compliance_test_type);
+ seq_printf(m, "%02lx", intel_dp->compliance.test_type);
} else
seq_puts(m, "0");
}
@@ -4957,7 +4210,7 @@ unlock:
if (val & DROP_FREED) {
synchronize_rcu();
- flush_work(&dev_priv->mm.free_work);
+ i915_gem_drain_freed_objects(dev_priv);
}
return ret;
@@ -5164,7 +4417,7 @@ static void gen9_sseu_device_status(struct drm_i915_private *dev_priv,
u32 s_reg[s_max], eu_reg[2*s_max], eu_mask[2];
/* BXT has a single slice and at most 3 subslices. */
- if (IS_BROXTON(dev_priv)) {
+ if (IS_GEN9_LP(dev_priv)) {
s_max = 1;
ss_max = 3;
}
@@ -5198,7 +4451,7 @@ static void gen9_sseu_device_status(struct drm_i915_private *dev_priv,
for (ss = 0; ss < ss_max; ss++) {
unsigned int eu_cnt;
- if (IS_BROXTON(dev_priv)) {
+ if (IS_GEN9_LP(dev_priv)) {
if (!(s_reg[s] & (GEN9_PGCTL_SS_ACK(ss))))
/* skip disabled subslice */
continue;
@@ -5386,6 +4639,7 @@ static const struct drm_info_list i915_debugfs_list[] = {
{"i915_guc_info", i915_guc_info, 0},
{"i915_guc_load_status", i915_guc_load_status_info, 0},
{"i915_guc_log_dump", i915_guc_log_dump, 0},
+ {"i915_huc_load_status", i915_huc_load_status_info, 0},
{"i915_frequency_info", i915_frequency_info, 0},
{"i915_hangcheck_info", i915_hangcheck_info, 0},
{"i915_drpc_info", i915_drpc_info, 0},
@@ -5449,19 +4703,6 @@ static const struct i915_debugfs_files {
{"i915_guc_log_control", &i915_guc_log_control_fops}
};
-void intel_display_crc_init(struct drm_i915_private *dev_priv)
-{
- enum pipe pipe;
-
- for_each_pipe(dev_priv, pipe) {
- struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe];
-
- pipe_crc->opened = false;
- spin_lock_init(&pipe_crc->lock);
- init_waitqueue_head(&pipe_crc->wq);
- }
-}
-
int i915_debugfs_register(struct drm_i915_private *dev_priv)
{
struct drm_minor *minor = dev_priv->drm.primary;
@@ -5471,11 +4712,9 @@ int i915_debugfs_register(struct drm_i915_private *dev_priv)
if (ret)
return ret;
- for (i = 0; i < ARRAY_SIZE(i915_pipe_crc_data); i++) {
- ret = i915_pipe_crc_create(minor->debugfs_root, minor, i);
- if (ret)
- return ret;
- }
+ ret = intel_pipe_crc_create(minor);
+ if (ret)
+ return ret;
for (i = 0; i < ARRAY_SIZE(i915_debugfs_files); i++) {
ret = i915_debugfs_create(minor->debugfs_root, minor,
@@ -5501,12 +4740,7 @@ void i915_debugfs_unregister(struct drm_i915_private *dev_priv)
drm_debugfs_remove_files((struct drm_info_list *)&i915_forcewake_fops,
1, minor);
- for (i = 0; i < ARRAY_SIZE(i915_pipe_crc_data); i++) {
- struct drm_info_list *info_list =
- (struct drm_info_list *)&i915_pipe_crc_data[i];
-
- drm_debugfs_remove_files(info_list, 1, minor);
- }
+ intel_pipe_crc_cleanup(minor);
for (i = 0; i < ARRAY_SIZE(i915_debugfs_files); i++) {
struct drm_info_list *info_list =
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 188eb7f1192d..e703556eba99 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -49,6 +49,7 @@
#include "i915_trace.h"
#include "i915_vgpu.h"
#include "intel_drv.h"
+#include "intel_uc.h"
static struct drm_driver driver;
@@ -142,9 +143,8 @@ static enum intel_pch intel_virt_detect_pch(struct drm_i915_private *dev_priv)
return ret;
}
-static void intel_detect_pch(struct drm_device *dev)
+static void intel_detect_pch(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
struct pci_dev *pch = NULL;
/* In all current cases, num_pipes is equivalent to the PCH_NOP setting
@@ -317,6 +317,12 @@ static int i915_getparam(struct drm_device *dev, void *data,
case I915_PARAM_MIN_EU_IN_POOL:
value = INTEL_INFO(dev_priv)->sseu.min_eu_in_pool;
break;
+ case I915_PARAM_HUC_STATUS:
+ /* The register is already force-woken. We dont need
+ * any rpm here
+ */
+ value = I915_READ(HUC_STATUS2) & HUC_FW_VERIFIED;
+ break;
case I915_PARAM_MMAP_GTT_VERSION:
/* Though we've started our numbering from 1, and so class all
* earlier versions as 0, in effect their value is undefined as
@@ -362,10 +368,8 @@ static int i915_getparam(struct drm_device *dev, void *data,
return 0;
}
-static int i915_get_bridge_dev(struct drm_device *dev)
+static int i915_get_bridge_dev(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
-
dev_priv->bridge_dev = pci_get_bus_and_slot(0, PCI_DEVFN(0, 0));
if (!dev_priv->bridge_dev) {
DRM_ERROR("bridge device not found\n");
@@ -376,9 +380,8 @@ static int i915_get_bridge_dev(struct drm_device *dev)
/* Allocate space for the MCH regs if needed, return nonzero on error */
static int
-intel_alloc_mchbar_resource(struct drm_device *dev)
+intel_alloc_mchbar_resource(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
int reg = INTEL_GEN(dev_priv) >= 4 ? MCHBAR_I965 : MCHBAR_I915;
u32 temp_lo, temp_hi = 0;
u64 mchbar_addr;
@@ -422,9 +425,8 @@ intel_alloc_mchbar_resource(struct drm_device *dev)
/* Setup MCHBAR if possible, return true if we should disable it again */
static void
-intel_setup_mchbar(struct drm_device *dev)
+intel_setup_mchbar(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
int mchbar_reg = INTEL_GEN(dev_priv) >= 4 ? MCHBAR_I965 : MCHBAR_I915;
u32 temp;
bool enabled;
@@ -446,7 +448,7 @@ intel_setup_mchbar(struct drm_device *dev)
if (enabled)
return;
- if (intel_alloc_mchbar_resource(dev))
+ if (intel_alloc_mchbar_resource(dev_priv))
return;
dev_priv->mchbar_need_disable = true;
@@ -462,9 +464,8 @@ intel_setup_mchbar(struct drm_device *dev)
}
static void
-intel_teardown_mchbar(struct drm_device *dev)
+intel_teardown_mchbar(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
int mchbar_reg = INTEL_GEN(dev_priv) >= 4 ? MCHBAR_I965 : MCHBAR_I915;
if (dev_priv->mchbar_need_disable) {
@@ -494,9 +495,9 @@ intel_teardown_mchbar(struct drm_device *dev)
/* true = enable decode, false = disable decoder */
static unsigned int i915_vga_set_decode(void *cookie, bool state)
{
- struct drm_device *dev = cookie;
+ struct drm_i915_private *dev_priv = cookie;
- intel_modeset_vga_set_state(to_i915(dev), state);
+ intel_modeset_vga_set_state(dev_priv, state);
if (state)
return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
@@ -504,6 +505,9 @@ static unsigned int i915_vga_set_decode(void *cookie, bool state)
return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
}
+static int i915_resume_switcheroo(struct drm_device *dev);
+static int i915_suspend_switcheroo(struct drm_device *dev, pm_message_t state);
+
static void i915_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_state state)
{
struct drm_device *dev = pci_get_drvdata(pdev);
@@ -545,12 +549,11 @@ static const struct vga_switcheroo_client_ops i915_switcheroo_ops = {
static void i915_gem_fini(struct drm_i915_private *dev_priv)
{
mutex_lock(&dev_priv->drm.struct_mutex);
- i915_gem_cleanup_engines(&dev_priv->drm);
- i915_gem_context_fini(&dev_priv->drm);
+ i915_gem_cleanup_engines(dev_priv);
+ i915_gem_context_fini(dev_priv);
mutex_unlock(&dev_priv->drm.struct_mutex);
- rcu_barrier();
- flush_work(&dev_priv->mm.free_work);
+ i915_gem_drain_freed_objects(dev_priv);
WARN_ON(!list_empty(&dev_priv->context_list));
}
@@ -575,7 +578,7 @@ static int i915_load_modeset_init(struct drm_device *dev)
* then we do not take part in VGA arbitration and the
* vga_client_register() fails with -ENODEV.
*/
- ret = vga_client_register(pdev, dev, NULL, i915_vga_set_decode);
+ ret = vga_client_register(pdev, dev_priv, NULL, i915_vga_set_decode);
if (ret && ret != -ENODEV)
goto out;
@@ -596,7 +599,7 @@ static int i915_load_modeset_init(struct drm_device *dev)
if (ret)
goto cleanup_csr;
- intel_setup_gmbus(dev);
+ intel_setup_gmbus(dev_priv);
/* Important: The output setup functions called by modeset_init need
* working irqs for e.g. gmbus and dp aux transfers. */
@@ -604,9 +607,10 @@ static int i915_load_modeset_init(struct drm_device *dev)
if (ret)
goto cleanup_irq;
- intel_guc_init(dev);
+ intel_huc_init(dev_priv);
+ intel_guc_init(dev_priv);
- ret = i915_gem_init(dev);
+ ret = i915_gem_init(dev_priv);
if (ret)
goto cleanup_irq;
@@ -627,13 +631,14 @@ static int i915_load_modeset_init(struct drm_device *dev)
return 0;
cleanup_gem:
- if (i915_gem_suspend(dev))
+ if (i915_gem_suspend(dev_priv))
DRM_ERROR("failed to idle hardware; continuing to unload!\n");
i915_gem_fini(dev_priv);
cleanup_irq:
- intel_guc_fini(dev);
+ intel_guc_fini(dev_priv);
+ intel_huc_fini(dev_priv);
drm_irq_uninstall(dev);
- intel_teardown_gmbus(dev);
+ intel_teardown_gmbus(dev_priv);
cleanup_csr:
intel_csr_ucode_fini(dev_priv);
intel_power_domains_fini(dev_priv);
@@ -644,7 +649,6 @@ out:
return ret;
}
-#if IS_ENABLED(CONFIG_FB)
static int i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv)
{
struct apertures_struct *ap;
@@ -669,12 +673,6 @@ static int i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv)
return ret;
}
-#else
-static int i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv)
-{
- return 0;
-}
-#endif
#if !defined(CONFIG_VGA_CONSOLE)
static int i915_kick_out_vgacon(struct drm_i915_private *dev_priv)
@@ -812,26 +810,25 @@ static int i915_driver_init_early(struct drm_i915_private *dev_priv,
spin_lock_init(&dev_priv->uncore.lock);
spin_lock_init(&dev_priv->mm.object_stat_lock);
spin_lock_init(&dev_priv->mmio_flip_lock);
+ spin_lock_init(&dev_priv->wm.dsparb_lock);
mutex_init(&dev_priv->sb_lock);
mutex_init(&dev_priv->modeset_restore_lock);
mutex_init(&dev_priv->av_mutex);
mutex_init(&dev_priv->wm.wm_mutex);
mutex_init(&dev_priv->pps_mutex);
+ intel_uc_init_early(dev_priv);
+
i915_memcpy_init_early(dev_priv);
ret = i915_workqueues_init(dev_priv);
if (ret < 0)
return ret;
- ret = intel_gvt_init(dev_priv);
- if (ret < 0)
- goto err_workqueues;
-
/* This must be called before any calls to HAS_PCH_* */
- intel_detect_pch(&dev_priv->drm);
+ intel_detect_pch(dev_priv);
- intel_pm_setup(&dev_priv->drm);
+ intel_pm_setup(dev_priv);
intel_init_dpio(dev_priv);
intel_power_domains_init(dev_priv);
intel_irq_init(dev_priv);
@@ -839,9 +836,9 @@ static int i915_driver_init_early(struct drm_i915_private *dev_priv,
intel_init_display_hooks(dev_priv);
intel_init_clock_gating_hooks(dev_priv);
intel_init_audio_hooks(dev_priv);
- ret = i915_gem_load_init(&dev_priv->drm);
+ ret = i915_gem_load_init(dev_priv);
if (ret < 0)
- goto err_gvt;
+ goto err_workqueues;
intel_display_crc_init(dev_priv);
@@ -849,10 +846,10 @@ static int i915_driver_init_early(struct drm_i915_private *dev_priv,
intel_detect_preproduction_hw(dev_priv);
+ i915_perf_init(dev_priv);
+
return 0;
-err_gvt:
- intel_gvt_cleanup(dev_priv);
err_workqueues:
i915_workqueues_cleanup(dev_priv);
return ret;
@@ -864,13 +861,13 @@ err_workqueues:
*/
static void i915_driver_cleanup_early(struct drm_i915_private *dev_priv)
{
- i915_gem_load_cleanup(&dev_priv->drm);
+ i915_perf_fini(dev_priv);
+ i915_gem_load_cleanup(dev_priv);
i915_workqueues_cleanup(dev_priv);
}
-static int i915_mmio_setup(struct drm_device *dev)
+static int i915_mmio_setup(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
struct pci_dev *pdev = dev_priv->drm.pdev;
int mmio_bar;
int mmio_size;
@@ -896,17 +893,16 @@ static int i915_mmio_setup(struct drm_device *dev)
}
/* Try to make sure MCHBAR is enabled before poking at it */
- intel_setup_mchbar(dev);
+ intel_setup_mchbar(dev_priv);
return 0;
}
-static void i915_mmio_cleanup(struct drm_device *dev)
+static void i915_mmio_cleanup(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
struct pci_dev *pdev = dev_priv->drm.pdev;
- intel_teardown_mchbar(dev);
+ intel_teardown_mchbar(dev_priv);
pci_iounmap(pdev, dev_priv->regs);
}
@@ -921,16 +917,15 @@ static void i915_mmio_cleanup(struct drm_device *dev)
*/
static int i915_driver_init_mmio(struct drm_i915_private *dev_priv)
{
- struct drm_device *dev = &dev_priv->drm;
int ret;
if (i915_inject_load_failure())
return -ENODEV;
- if (i915_get_bridge_dev(dev))
+ if (i915_get_bridge_dev(dev_priv))
return -EIO;
- ret = i915_mmio_setup(dev);
+ ret = i915_mmio_setup(dev_priv);
if (ret < 0)
goto put_bridge;
@@ -950,10 +945,8 @@ put_bridge:
*/
static void i915_driver_cleanup_mmio(struct drm_i915_private *dev_priv)
{
- struct drm_device *dev = &dev_priv->drm;
-
intel_uncore_fini(dev_priv);
- i915_mmio_cleanup(dev);
+ i915_mmio_cleanup(dev_priv);
pci_dev_put(dev_priv->bridge_dev);
}
@@ -1044,7 +1037,7 @@ static int i915_driver_init_hw(struct drm_i915_private *dev_priv)
* behaviour if any general state is accessed within a page above 4GB,
* which also needs to be handled carefully.
*/
- if (IS_BROADWATER(dev_priv) || IS_CRESTLINE(dev_priv)) {
+ if (IS_I965G(dev_priv) || IS_I965GM(dev_priv)) {
ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
if (ret) {
@@ -1079,6 +1072,10 @@ static int i915_driver_init_hw(struct drm_i915_private *dev_priv)
DRM_DEBUG_DRIVER("can't enable MSI");
}
+ ret = intel_gvt_init(dev_priv);
+ if (ret)
+ goto out_ggtt;
+
return 0;
out_ggtt:
@@ -1125,8 +1122,11 @@ static void i915_driver_register(struct drm_i915_private *dev_priv)
/* Reveal our presence to userspace */
if (drm_dev_register(dev, 0) == 0) {
i915_debugfs_register(dev_priv);
- i915_guc_register(dev_priv);
+ i915_guc_log_register(dev_priv);
i915_setup_sysfs(dev_priv);
+
+ /* Depends on sysfs having been initialized */
+ i915_perf_register(dev_priv);
} else
DRM_ERROR("Failed to register driver for userspace access!\n");
@@ -1163,8 +1163,10 @@ static void i915_driver_unregister(struct drm_i915_private *dev_priv)
acpi_video_unregister();
intel_opregion_unregister(dev_priv);
+ i915_perf_unregister(dev_priv);
+
i915_teardown_sysfs(dev_priv);
- i915_guc_unregister(dev_priv);
+ i915_guc_log_unregister(dev_priv);
i915_debugfs_unregister(dev_priv);
drm_dev_unregister(&dev_priv->drm);
@@ -1195,8 +1197,7 @@ int i915_driver_load(struct pci_dev *pdev, const struct pci_device_id *ent)
if (dev_priv)
ret = drm_dev_init(&dev_priv->drm, &driver, &pdev->dev);
if (ret) {
- dev_printk(KERN_ERR, &pdev->dev,
- "[" DRM_NAME ":%s] allocation failed\n", __func__);
+ DRM_DEV_ERROR(&pdev->dev, "allocation failed\n");
kfree(dev_priv);
return ret;
}
@@ -1244,6 +1245,8 @@ int i915_driver_load(struct pci_dev *pdev, const struct pci_device_id *ent)
intel_runtime_pm_enable(dev_priv);
+ dev_priv->ipc_enabled = false;
+
/* Everything is in place, we can now relax! */
DRM_INFO("Initialized %s %d.%d.%d %s for %s on minor %d\n",
driver.name, driver.major, driver.minor, driver.patchlevel,
@@ -1281,11 +1284,13 @@ void i915_driver_unload(struct drm_device *dev)
intel_fbdev_fini(dev);
- if (i915_gem_suspend(dev))
+ if (i915_gem_suspend(dev_priv))
DRM_ERROR("failed to idle hardware; continuing to unload!\n");
intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
+ intel_gvt_cleanup(dev_priv);
+
i915_driver_unregister(dev_priv);
drm_vblank_cleanup(dev);
@@ -1313,12 +1318,13 @@ void i915_driver_unload(struct drm_device *dev)
/* Free error state after interrupts are fully disabled. */
cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work);
- i915_destroy_error_state(dev);
+ i915_destroy_error_state(dev_priv);
/* Flush any outstanding unpin_work. */
drain_workqueue(dev_priv->wq);
- intel_guc_fini(dev);
+ intel_guc_fini(dev_priv);
+ intel_huc_fini(dev_priv);
i915_gem_fini(dev_priv);
intel_fbc_cleanup_cfb(dev_priv);
@@ -1423,14 +1429,14 @@ static int i915_drm_suspend(struct drm_device *dev)
pci_save_state(pdev);
- error = i915_gem_suspend(dev);
+ error = i915_gem_suspend(dev_priv);
if (error) {
dev_err(&pdev->dev,
"GEM idle failed, resume might fail\n");
goto out;
}
- intel_guc_suspend(dev);
+ intel_guc_suspend(dev_priv);
intel_display_suspend(dev);
@@ -1445,7 +1451,7 @@ static int i915_drm_suspend(struct drm_device *dev)
i915_gem_suspend_gtt_mappings(dev_priv);
- i915_save_state(dev);
+ i915_save_state(dev_priv);
opregion_target_state = suspend_to_idle(dev_priv) ? PCI_D1 : PCI_D3cold;
intel_opregion_notify_adapter(dev_priv, opregion_target_state);
@@ -1476,7 +1482,7 @@ static int i915_drm_suspend_late(struct drm_device *dev, bool hibernation)
intel_display_set_init_power(dev_priv, false);
- fw_csr = !IS_BROXTON(dev_priv) &&
+ fw_csr = !IS_GEN9_LP(dev_priv) &&
suspend_to_idle(dev_priv) && dev_priv->csr.dmc_payload;
/*
* In case of firmware assisted context save/restore don't manually
@@ -1489,7 +1495,7 @@ static int i915_drm_suspend_late(struct drm_device *dev, bool hibernation)
intel_power_domains_suspend(dev_priv);
ret = 0;
- if (IS_BROXTON(dev_priv))
+ if (IS_GEN9_LP(dev_priv))
bxt_enable_dc9(dev_priv);
else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
hsw_enable_pc8(dev_priv);
@@ -1528,7 +1534,7 @@ out:
return ret;
}
-int i915_suspend_switcheroo(struct drm_device *dev, pm_message_t state)
+static int i915_suspend_switcheroo(struct drm_device *dev, pm_message_t state)
{
int error;
@@ -1566,33 +1572,36 @@ static int i915_drm_resume(struct drm_device *dev)
intel_csr_ucode_resume(dev_priv);
- i915_gem_resume(dev);
+ i915_gem_resume(dev_priv);
- i915_restore_state(dev);
+ i915_restore_state(dev_priv);
intel_pps_unlock_regs_wa(dev_priv);
intel_opregion_setup(dev_priv);
- intel_init_pch_refclk(dev);
- drm_mode_config_reset(dev);
+ intel_init_pch_refclk(dev_priv);
/*
* Interrupts have to be enabled before any batches are run. If not the
* GPU will hang. i915_gem_init_hw() will initiate batches to
* update/restore the context.
*
+ * drm_mode_config_reset() needs AUX interrupts.
+ *
* Modeset enabling in intel_modeset_init_hw() also needs working
* interrupts.
*/
intel_runtime_pm_enable_interrupts(dev_priv);
+ drm_mode_config_reset(dev);
+
mutex_lock(&dev->struct_mutex);
- if (i915_gem_init_hw(dev)) {
+ if (i915_gem_init_hw(dev_priv)) {
DRM_ERROR("failed to re-initialize GPU, declaring wedged!\n");
i915_gem_set_wedged(dev_priv);
}
mutex_unlock(&dev->struct_mutex);
- intel_guc_resume(dev);
+ intel_guc_resume(dev_priv);
intel_modeset_init_hw(dev);
@@ -1694,7 +1703,7 @@ static int i915_drm_resume_early(struct drm_device *dev)
intel_uncore_early_sanitize(dev_priv, true);
- if (IS_BROXTON(dev_priv)) {
+ if (IS_GEN9_LP(dev_priv)) {
if (!dev_priv->suspended_to_idle)
gen9_sanitize_dc_state(dev_priv);
bxt_disable_dc9(dev_priv);
@@ -1704,7 +1713,7 @@ static int i915_drm_resume_early(struct drm_device *dev)
intel_uncore_sanitize(dev_priv);
- if (IS_BROXTON(dev_priv) ||
+ if (IS_GEN9_LP(dev_priv) ||
!(dev_priv->suspended_to_idle && dev_priv->csr.dmc_payload))
intel_power_domains_init_hw(dev_priv, true);
@@ -1716,7 +1725,7 @@ out:
return ret;
}
-int i915_resume_switcheroo(struct drm_device *dev)
+static int i915_resume_switcheroo(struct drm_device *dev)
{
int ret;
@@ -1730,25 +1739,9 @@ int i915_resume_switcheroo(struct drm_device *dev)
return i915_drm_resume(dev);
}
-static void disable_engines_irq(struct drm_i915_private *dev_priv)
-{
- struct intel_engine_cs *engine;
- enum intel_engine_id id;
-
- /* Ensure irq handler finishes, and not run again. */
- disable_irq(dev_priv->drm.irq);
- for_each_engine(engine, dev_priv, id)
- tasklet_kill(&engine->irq_tasklet);
-}
-
-static void enable_engines_irq(struct drm_i915_private *dev_priv)
-{
- enable_irq(dev_priv->drm.irq);
-}
-
/**
* i915_reset - reset chip after a hang
- * @dev: drm device to reset
+ * @dev_priv: device private to reset
*
* Reset the chip. Useful if a hang is detected. Marks the device as wedged
* on failure.
@@ -1765,11 +1758,10 @@ static void enable_engines_irq(struct drm_i915_private *dev_priv)
*/
void i915_reset(struct drm_i915_private *dev_priv)
{
- struct drm_device *dev = &dev_priv->drm;
struct i915_gpu_error *error = &dev_priv->gpu_error;
int ret;
- lockdep_assert_held(&dev->struct_mutex);
+ lockdep_assert_held(&dev_priv->drm.struct_mutex);
if (!test_and_clear_bit(I915_RESET_IN_PROGRESS, &error->flags))
return;
@@ -1779,11 +1771,15 @@ void i915_reset(struct drm_i915_private *dev_priv)
error->reset_count++;
pr_notice("drm/i915: Resetting chip after gpu hang\n");
+ disable_irq(dev_priv->drm.irq);
+ ret = i915_gem_reset_prepare(dev_priv);
+ if (ret) {
+ DRM_ERROR("GPU recovery failed\n");
+ intel_gpu_reset(dev_priv, ALL_ENGINES);
+ goto error;
+ }
- disable_engines_irq(dev_priv);
ret = intel_gpu_reset(dev_priv, ALL_ENGINES);
- enable_engines_irq(dev_priv);
-
if (ret) {
if (ret != -ENODEV)
DRM_ERROR("Failed to reset chip: %i\n", ret);
@@ -1792,7 +1788,7 @@ void i915_reset(struct drm_i915_private *dev_priv)
goto error;
}
- i915_gem_reset(dev_priv);
+ i915_gem_reset_finish(dev_priv);
intel_overlay_reset(dev_priv);
/* Ok, now get things going again... */
@@ -1809,13 +1805,16 @@ void i915_reset(struct drm_i915_private *dev_priv)
* was running at the time of the reset (i.e. we weren't VT
* switched away).
*/
- ret = i915_gem_init_hw(dev);
+ ret = i915_gem_init_hw(dev_priv);
if (ret) {
DRM_ERROR("Failed hw init on reset %d\n", ret);
goto error;
}
+ i915_queue_hangcheck(dev_priv);
+
wakeup:
+ enable_irq(dev_priv->drm.irq);
wake_up_bit(&error->flags, I915_RESET_IN_PROGRESS);
return;
@@ -2321,12 +2320,12 @@ static int intel_runtime_suspend(struct device *kdev)
*/
i915_gem_runtime_suspend(dev_priv);
- intel_guc_suspend(dev);
+ intel_guc_suspend(dev_priv);
intel_runtime_pm_disable_interrupts(dev_priv);
ret = 0;
- if (IS_BROXTON(dev_priv)) {
+ if (IS_GEN9_LP(dev_priv)) {
bxt_display_core_uninit(dev_priv);
bxt_enable_dc9(dev_priv);
} else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
@@ -2406,12 +2405,12 @@ static int intel_runtime_resume(struct device *kdev)
if (intel_uncore_unclaimed_mmio(dev_priv))
DRM_DEBUG_DRIVER("Unclaimed access during suspend, bios?\n");
- intel_guc_resume(dev);
+ intel_guc_resume(dev_priv);
if (IS_GEN6(dev_priv))
- intel_init_pch_refclk(dev);
+ intel_init_pch_refclk(dev_priv);
- if (IS_BROXTON(dev_priv)) {
+ if (IS_GEN9_LP(dev_priv)) {
bxt_disable_dc9(dev_priv);
bxt_display_core_init(dev_priv, true);
if (dev_priv->csr.dmc_payload &&
@@ -2550,8 +2549,8 @@ static const struct drm_ioctl_desc i915_ioctls[] = {
DRM_IOCTL_DEF_DRV(I915_GEM_MMAP_GTT, i915_gem_mmap_gtt_ioctl, DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_GEM_SET_DOMAIN, i915_gem_set_domain_ioctl, DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_GEM_SW_FINISH, i915_gem_sw_finish_ioctl, DRM_RENDER_ALLOW),
- DRM_IOCTL_DEF_DRV(I915_GEM_SET_TILING, i915_gem_set_tiling, DRM_RENDER_ALLOW),
- DRM_IOCTL_DEF_DRV(I915_GEM_GET_TILING, i915_gem_get_tiling, DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(I915_GEM_SET_TILING, i915_gem_set_tiling_ioctl, DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(I915_GEM_GET_TILING, i915_gem_get_tiling_ioctl, DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_GEM_GET_APERTURE, i915_gem_get_aperture_ioctl, DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_GET_PIPE_FROM_CRTC_ID, intel_get_pipe_from_crtc_id, 0),
DRM_IOCTL_DEF_DRV(I915_GEM_MADVISE, i915_gem_madvise_ioctl, DRM_RENDER_ALLOW),
@@ -2567,6 +2566,7 @@ static const struct drm_ioctl_desc i915_ioctls[] = {
DRM_IOCTL_DEF_DRV(I915_GEM_USERPTR, i915_gem_userptr_ioctl, DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_GETPARAM, i915_gem_context_getparam_ioctl, DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_SETPARAM, i915_gem_context_setparam_ioctl, DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(I915_PERF_OPEN, i915_perf_open_ioctl, DRM_RENDER_ALLOW),
};
static struct drm_driver driver = {
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 029d5c3c81ef..bcc81912b5e5 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -49,17 +49,20 @@
#include <drm/drm_legacy.h> /* for struct drm_dma_handle */
#include <drm/drm_gem.h>
#include <drm/drm_auth.h>
+#include <drm/drm_cache.h>
#include "i915_params.h"
#include "i915_reg.h"
+#include "i915_utils.h"
#include "intel_bios.h"
#include "intel_dpll_mgr.h"
-#include "intel_guc.h"
+#include "intel_uc.h"
#include "intel_lrc.h"
#include "intel_ringbuffer.h"
#include "i915_gem.h"
+#include "i915_gem_context.h"
#include "i915_gem_fence_reg.h"
#include "i915_gem_object.h"
#include "i915_gem_gtt.h"
@@ -76,8 +79,8 @@
#define DRIVER_NAME "i915"
#define DRIVER_DESC "Intel Graphics"
-#define DRIVER_DATE "20161121"
-#define DRIVER_TIMESTAMP 1479717903
+#define DRIVER_DATE "20170123"
+#define DRIVER_TIMESTAMP 1485156432
#undef WARN_ON
/* Many gcc seem to no see through this and fall over :( */
@@ -119,6 +122,90 @@ bool __i915_inject_load_failure(const char *func, int line);
#define i915_inject_load_failure() \
__i915_inject_load_failure(__func__, __LINE__)
+typedef struct {
+ uint32_t val;
+} uint_fixed_16_16_t;
+
+#define FP_16_16_MAX ({ \
+ uint_fixed_16_16_t fp; \
+ fp.val = UINT_MAX; \
+ fp; \
+})
+
+static inline uint_fixed_16_16_t u32_to_fixed_16_16(uint32_t val)
+{
+ uint_fixed_16_16_t fp;
+
+ WARN_ON(val >> 16);
+
+ fp.val = val << 16;
+ return fp;
+}
+
+static inline uint32_t fixed_16_16_to_u32_round_up(uint_fixed_16_16_t fp)
+{
+ return DIV_ROUND_UP(fp.val, 1 << 16);
+}
+
+static inline uint32_t fixed_16_16_to_u32(uint_fixed_16_16_t fp)
+{
+ return fp.val >> 16;
+}
+
+static inline uint_fixed_16_16_t min_fixed_16_16(uint_fixed_16_16_t min1,
+ uint_fixed_16_16_t min2)
+{
+ uint_fixed_16_16_t min;
+
+ min.val = min(min1.val, min2.val);
+ return min;
+}
+
+static inline uint_fixed_16_16_t max_fixed_16_16(uint_fixed_16_16_t max1,
+ uint_fixed_16_16_t max2)
+{
+ uint_fixed_16_16_t max;
+
+ max.val = max(max1.val, max2.val);
+ return max;
+}
+
+static inline uint_fixed_16_16_t fixed_16_16_div_round_up(uint32_t val,
+ uint32_t d)
+{
+ uint_fixed_16_16_t fp, res;
+
+ fp = u32_to_fixed_16_16(val);
+ res.val = DIV_ROUND_UP(fp.val, d);
+ return res;
+}
+
+static inline uint_fixed_16_16_t fixed_16_16_div_round_up_u64(uint32_t val,
+ uint32_t d)
+{
+ uint_fixed_16_16_t res;
+ uint64_t interm_val;
+
+ interm_val = (uint64_t)val << 16;
+ interm_val = DIV_ROUND_UP_ULL(interm_val, d);
+ WARN_ON(interm_val >> 32);
+ res.val = (uint32_t) interm_val;
+
+ return res;
+}
+
+static inline uint_fixed_16_16_t mul_u32_fixed_16_16(uint32_t val,
+ uint_fixed_16_16_t mul)
+{
+ uint64_t intermediate_val;
+ uint_fixed_16_16_t fp;
+
+ intermediate_val = (uint64_t) val * mul.val;
+ WARN_ON(intermediate_val >> 32);
+ fp.val = (uint32_t) intermediate_val;
+ return fp;
+}
+
static inline const char *yesno(bool v)
{
return v ? "yes" : "no";
@@ -180,21 +267,39 @@ static inline bool transcoder_is_dsi(enum transcoder transcoder)
}
/*
+ * Global legacy plane identifier. Valid only for primary/sprite
+ * planes on pre-g4x, and only for primary planes on g4x+.
+ */
+enum plane {
+ PLANE_A,
+ PLANE_B,
+ PLANE_C,
+};
+#define plane_name(p) ((p) + 'A')
+
+#define sprite_name(p, s) ((p) * INTEL_INFO(dev_priv)->num_sprites[(p)] + (s) + 'A')
+
+/*
+ * Per-pipe plane identifier.
* I915_MAX_PLANES in the enum below is the maximum (across all platforms)
* number of planes per CRTC. Not all platforms really have this many planes,
* which means some arrays of size I915_MAX_PLANES may have unused entries
* between the topmost sprite plane and the cursor plane.
+ *
+ * This is expected to be passed to various register macros
+ * (eg. PLANE_CTL(), PS_PLANE_SEL(), etc.) so adjust with care.
*/
-enum plane {
- PLANE_A = 0,
- PLANE_B,
- PLANE_C,
+enum plane_id {
+ PLANE_PRIMARY,
+ PLANE_SPRITE0,
+ PLANE_SPRITE1,
PLANE_CURSOR,
I915_MAX_PLANES,
};
-#define plane_name(p) ((p) + 'A')
-#define sprite_name(p, s) ((p) * INTEL_INFO(dev_priv)->num_sprites[(p)] + (s) + 'A')
+#define for_each_plane_id_on_crtc(__crtc, __p) \
+ for ((__p) = PLANE_PRIMARY; (__p) < I915_MAX_PLANES; (__p)++) \
+ for_each_if ((__crtc)->plane_ids_mask & BIT(__p))
enum port {
PORT_NONE = -1,
@@ -216,7 +321,8 @@ enum dpio_channel {
enum dpio_phy {
DPIO_PHY0,
- DPIO_PHY1
+ DPIO_PHY1,
+ DPIO_PHY2,
};
enum intel_display_power_domain {
@@ -416,6 +522,15 @@ struct drm_i915_file_private {
} rps;
unsigned int bsd_engine;
+
+/* Client can have a maximum of 3 contexts banned before
+ * it is denied of creating new contexts. As one context
+ * ban needs 4 consecutive hangs, and more if there is
+ * progress in between, this is a last resort stop gap measure
+ * to limit the badly behaving clients access to gpu.
+ */
+#define I915_MAX_CLIENT_CONTEXT_BANS 3
+ int context_bans;
};
/* Used by dp and fdi links */
@@ -659,32 +774,20 @@ struct intel_csr {
};
#define DEV_INFO_FOR_EACH_FLAG(func) \
- /* Keep is_* in chronological order */ \
func(is_mobile); \
- func(is_i85x); \
- func(is_i915g); \
- func(is_i945gm); \
- func(is_g33); \
- func(is_g4x); \
- func(is_pineview); \
- func(is_broadwater); \
- func(is_crestline); \
- func(is_ivybridge); \
- func(is_valleyview); \
- func(is_cherryview); \
- func(is_haswell); \
- func(is_broadwell); \
- func(is_skylake); \
- func(is_broxton); \
- func(is_kabylake); \
+ func(is_lp); \
func(is_alpha_support); \
/* Keep has_* in alphabetical order */ \
func(has_64bit_reloc); \
+ func(has_aliasing_ppgtt); \
func(has_csr); \
func(has_ddi); \
+ func(has_decoupled_mmio); \
func(has_dp_mst); \
func(has_fbc); \
func(has_fpga_dbg); \
+ func(has_full_ppgtt); \
+ func(has_full_48bit_ppgtt); \
func(has_gmbus_irq); \
func(has_gmch_display); \
func(has_guc); \
@@ -705,8 +808,7 @@ struct intel_csr {
func(cursor_needs_physical); \
func(hws_needs_physical); \
func(overlay_needs_physical); \
- func(supports_tv); \
- func(has_decoupled_mmio)
+ func(supports_tv);
struct sseu_dev_info {
u8 slice_mask;
@@ -726,13 +828,45 @@ static inline unsigned int sseu_subslice_total(const struct sseu_dev_info *sseu)
return hweight8(sseu->slice_mask) * hweight8(sseu->subslice_mask);
}
+/* Keep in gen based order, and chronological order within a gen */
+enum intel_platform {
+ INTEL_PLATFORM_UNINITIALIZED = 0,
+ INTEL_I830,
+ INTEL_I845G,
+ INTEL_I85X,
+ INTEL_I865G,
+ INTEL_I915G,
+ INTEL_I915GM,
+ INTEL_I945G,
+ INTEL_I945GM,
+ INTEL_G33,
+ INTEL_PINEVIEW,
+ INTEL_I965G,
+ INTEL_I965GM,
+ INTEL_G45,
+ INTEL_GM45,
+ INTEL_IRONLAKE,
+ INTEL_SANDYBRIDGE,
+ INTEL_IVYBRIDGE,
+ INTEL_VALLEYVIEW,
+ INTEL_HASWELL,
+ INTEL_BROADWELL,
+ INTEL_CHERRYVIEW,
+ INTEL_SKYLAKE,
+ INTEL_BROXTON,
+ INTEL_KABYLAKE,
+ INTEL_GEMINILAKE,
+};
+
struct intel_device_info {
u32 display_mmio_offset;
u16 device_id;
u8 num_pipes;
u8 num_sprites[I915_MAX_PIPES];
+ u8 num_scalers[I915_MAX_PIPES];
u8 gen;
u16 gen_mask;
+ enum intel_platform platform;
u8 ring_mask; /* Rings supported by the HW */
u8 num_rings;
#define DEFINE_FLAG(name) u8 name:1
@@ -800,7 +934,8 @@ struct drm_i915_error_state {
/* Software tracked state */
bool waiting;
int num_waiters;
- int hangcheck_score;
+ unsigned long hangcheck_timestamp;
+ bool hangcheck_stalled;
enum intel_engine_hangcheck_action hangcheck_action;
struct i915_address_space *vm;
int num_requests;
@@ -849,6 +984,7 @@ struct drm_i915_error_state {
long jiffies;
pid_t pid;
u32 context;
+ int ban_score;
u32 seqno;
u32 head;
u32 tail;
@@ -870,6 +1006,7 @@ struct drm_i915_error_state {
pid_t pid;
char comm[TASK_COMM_LEN];
+ int context_bans;
} engine[I915_NUM_ENGINES];
struct drm_i915_error_buffer {
@@ -901,86 +1038,7 @@ enum i915_cache_level {
I915_CACHE_WT, /* hsw:gt3e WriteThrough for scanouts */
};
-struct i915_ctx_hang_stats {
- /* This context had batch pending when hang was declared */
- unsigned batch_pending;
-
- /* This context had batch active when hang was declared */
- unsigned batch_active;
-
- /* Time when this context was last blamed for a GPU reset */
- unsigned long guilty_ts;
-
- /* If the contexts causes a second GPU hang within this time,
- * it is permanently banned from submitting any more work.
- */
- unsigned long ban_period_seconds;
-
- /* This context is banned to submit more work */
- bool banned;
-};
-
-/* This must match up with the value previously used for execbuf2.rsvd1. */
-#define DEFAULT_CONTEXT_HANDLE 0
-
-/**
- * struct i915_gem_context - as the name implies, represents a context.
- * @ref: reference count.
- * @user_handle: userspace tracking identity for this context.
- * @remap_slice: l3 row remapping information.
- * @flags: context specific flags:
- * CONTEXT_NO_ZEROMAP: do not allow mapping things to page 0.
- * @file_priv: filp associated with this context (NULL for global default
- * context).
- * @hang_stats: information about the role of this context in possible GPU
- * hangs.
- * @ppgtt: virtual memory space used by this context.
- * @legacy_hw_ctx: render context backing object and whether it is correctly
- * initialized (legacy ring submission mechanism only).
- * @link: link in the global list of contexts.
- *
- * Contexts are memory images used by the hardware to store copies of their
- * internal state.
- */
-struct i915_gem_context {
- struct kref ref;
- struct drm_i915_private *i915;
- struct drm_i915_file_private *file_priv;
- struct i915_hw_ppgtt *ppgtt;
- struct pid *pid;
- const char *name;
-
- struct i915_ctx_hang_stats hang_stats;
-
- unsigned long flags;
-#define CONTEXT_NO_ZEROMAP BIT(0)
-#define CONTEXT_NO_ERROR_CAPTURE BIT(1)
-
- /* Unique identifier for this context, used by the hw for tracking */
- unsigned int hw_id;
- u32 user_handle;
- int priority; /* greater priorities are serviced first */
-
- u32 ggtt_alignment;
-
- struct intel_context {
- struct i915_vma *state;
- struct intel_ring *ring;
- uint32_t *lrc_reg_state;
- u64 lrc_desc;
- int pin_count;
- bool initialised;
- } engine[I915_NUM_ENGINES];
- u32 ring_size;
- u32 desc_template;
- struct atomic_notifier_head status_notifier;
- bool execlists_force_single_submission;
-
- struct list_head link;
-
- u8 remap_slice;
- bool closed:1;
-};
+#define I915_COLOR_UNEVICTABLE (-1) /* a non-vma sharing the address space */
enum fb_op_origin {
ORIGIN_GTT,
@@ -1027,7 +1085,7 @@ struct intel_fbc {
} plane;
struct {
- uint32_t pixel_format;
+ const struct drm_format_info *format;
unsigned int stride;
} fb;
} state_cache;
@@ -1042,7 +1100,7 @@ struct intel_fbc {
} crtc;
struct {
- uint32_t pixel_format;
+ const struct drm_format_info *format;
unsigned int stride;
} fb;
@@ -1058,7 +1116,7 @@ struct intel_fbc {
const char *no_fbc_reason;
};
-/**
+/*
* HIGH_RR is the highest eDP panel refresh rate read from EDID
* LOW_RR is the lowest eDP panel refresh rate found from EDID
* parsing for same resolution.
@@ -1096,6 +1154,9 @@ struct i915_psr {
bool psr2_support;
bool aux_frame_sync;
bool link_standby;
+ bool y_cord_support;
+ bool colorimetry_support;
+ bool alpm;
};
enum intel_pch {
@@ -1395,7 +1456,7 @@ struct i915_gem_mm {
struct work_struct free_work;
/** Usable portion of the GTT for GEM */
- unsigned long stolen_base; /* limited to low memory (32-bit) */
+ phys_addr_t stolen_base; /* limited to low memory (32-bit) */
/** PPGTT used for aliasing the PPGTT with the GTT */
struct i915_hw_ppgtt *aliasing_ppgtt;
@@ -1438,19 +1499,20 @@ struct drm_i915_error_state_buf {
};
struct i915_error_state_file_priv {
- struct drm_device *dev;
+ struct drm_i915_private *i915;
struct drm_i915_error_state *error;
};
#define I915_RESET_TIMEOUT (10 * HZ) /* 10s */
#define I915_FENCE_TIMEOUT (10 * HZ) /* 10s */
+#define I915_ENGINE_DEAD_TIMEOUT (4 * HZ) /* Seqno, head and subunits dead */
+#define I915_SEQNO_DEAD_TIMEOUT (12 * HZ) /* Seqno dead with active head */
+
struct i915_gpu_error {
/* For hangcheck timer */
#define DRM_I915_HANGCHECK_PERIOD 1500 /* in ms */
#define DRM_I915_HANGCHECK_JIFFIES msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD)
- /* Hang gpu twice in this window and your context gets banned */
-#define DRM_I915_CTX_BAN_PERIOD DIV_ROUND_UP(8*DRM_I915_HANGCHECK_PERIOD, 1000)
struct delayed_work hangcheck_work;
@@ -1532,6 +1594,7 @@ struct ddi_vbt_port_info {
uint8_t supports_dvi:1;
uint8_t supports_hdmi:1;
uint8_t supports_dp:1;
+ uint8_t supports_edp:1;
uint8_t alternate_aux_channel;
uint8_t alternate_ddc_pin;
@@ -1591,6 +1654,7 @@ struct intel_vbt_data {
bool present;
bool active_low_pwm;
u8 min_brightness; /* min_brightness/255 of max */
+ u8 controller; /* brightness controller number */
enum intel_backlight_type type;
} backlight;
@@ -1637,24 +1701,22 @@ struct ilk_wm_values {
};
struct vlv_pipe_wm {
- uint16_t primary;
- uint16_t sprite[2];
- uint8_t cursor;
+ uint16_t plane[I915_MAX_PLANES];
};
struct vlv_sr_wm {
uint16_t plane;
- uint8_t cursor;
+ uint16_t cursor;
+};
+
+struct vlv_wm_ddl_values {
+ uint8_t plane[I915_MAX_PLANES];
};
struct vlv_wm_values {
struct vlv_pipe_wm pipe[3];
struct vlv_sr_wm sr;
- struct {
- uint8_t cursor;
- uint8_t sprite[2];
- uint8_t primary;
- } ddl[3];
+ struct vlv_wm_ddl_values ddl[3];
uint8_t level;
bool cxsr;
};
@@ -1750,6 +1812,7 @@ struct intel_pipe_crc {
enum intel_pipe_crc_source source;
int head, tail;
wait_queue_head_t wq;
+ int skipped;
};
struct i915_frontbuffer_tracking {
@@ -1795,6 +1858,201 @@ struct intel_wm_config {
bool sprites_scaled;
};
+struct i915_oa_format {
+ u32 format;
+ int size;
+};
+
+struct i915_oa_reg {
+ i915_reg_t addr;
+ u32 value;
+};
+
+struct i915_perf_stream;
+
+/**
+ * struct i915_perf_stream_ops - the OPs to support a specific stream type
+ */
+struct i915_perf_stream_ops {
+ /**
+ * @enable: Enables the collection of HW samples, either in response to
+ * `I915_PERF_IOCTL_ENABLE` or implicitly called when stream is opened
+ * without `I915_PERF_FLAG_DISABLED`.
+ */
+ void (*enable)(struct i915_perf_stream *stream);
+
+ /**
+ * @disable: Disables the collection of HW samples, either in response
+ * to `I915_PERF_IOCTL_DISABLE` or implicitly called before destroying
+ * the stream.
+ */
+ void (*disable)(struct i915_perf_stream *stream);
+
+ /**
+ * @poll_wait: Call poll_wait, passing a wait queue that will be woken
+ * once there is something ready to read() for the stream
+ */
+ void (*poll_wait)(struct i915_perf_stream *stream,
+ struct file *file,
+ poll_table *wait);
+
+ /**
+ * @wait_unlocked: For handling a blocking read, wait until there is
+ * something to ready to read() for the stream. E.g. wait on the same
+ * wait queue that would be passed to poll_wait().
+ */
+ int (*wait_unlocked)(struct i915_perf_stream *stream);
+
+ /**
+ * @read: Copy buffered metrics as records to userspace
+ * **buf**: the userspace, destination buffer
+ * **count**: the number of bytes to copy, requested by userspace
+ * **offset**: zero at the start of the read, updated as the read
+ * proceeds, it represents how many bytes have been copied so far and
+ * the buffer offset for copying the next record.
+ *
+ * Copy as many buffered i915 perf samples and records for this stream
+ * to userspace as will fit in the given buffer.
+ *
+ * Only write complete records; returning -%ENOSPC if there isn't room
+ * for a complete record.
+ *
+ * Return any error condition that results in a short read such as
+ * -%ENOSPC or -%EFAULT, even though these may be squashed before
+ * returning to userspace.
+ */
+ int (*read)(struct i915_perf_stream *stream,
+ char __user *buf,
+ size_t count,
+ size_t *offset);
+
+ /**
+ * @destroy: Cleanup any stream specific resources.
+ *
+ * The stream will always be disabled before this is called.
+ */
+ void (*destroy)(struct i915_perf_stream *stream);
+};
+
+/**
+ * struct i915_perf_stream - state for a single open stream FD
+ */
+struct i915_perf_stream {
+ /**
+ * @dev_priv: i915 drm device
+ */
+ struct drm_i915_private *dev_priv;
+
+ /**
+ * @link: Links the stream into ``&drm_i915_private->streams``
+ */
+ struct list_head link;
+
+ /**
+ * @sample_flags: Flags representing the `DRM_I915_PERF_PROP_SAMPLE_*`
+ * properties given when opening a stream, representing the contents
+ * of a single sample as read() by userspace.
+ */
+ u32 sample_flags;
+
+ /**
+ * @sample_size: Considering the configured contents of a sample
+ * combined with the required header size, this is the total size
+ * of a single sample record.
+ */
+ int sample_size;
+
+ /**
+ * @ctx: %NULL if measuring system-wide across all contexts or a
+ * specific context that is being monitored.
+ */
+ struct i915_gem_context *ctx;
+
+ /**
+ * @enabled: Whether the stream is currently enabled, considering
+ * whether the stream was opened in a disabled state and based
+ * on `I915_PERF_IOCTL_ENABLE` and `I915_PERF_IOCTL_DISABLE` calls.
+ */
+ bool enabled;
+
+ /**
+ * @ops: The callbacks providing the implementation of this specific
+ * type of configured stream.
+ */
+ const struct i915_perf_stream_ops *ops;
+};
+
+/**
+ * struct i915_oa_ops - Gen specific implementation of an OA unit stream
+ */
+struct i915_oa_ops {
+ /**
+ * @init_oa_buffer: Resets the head and tail pointers of the
+ * circular buffer for periodic OA reports.
+ *
+ * Called when first opening a stream for OA metrics, but also may be
+ * called in response to an OA buffer overflow or other error
+ * condition.
+ *
+ * Note it may be necessary to clear the full OA buffer here as part of
+ * maintaining the invariable that new reports must be written to
+ * zeroed memory for us to be able to reliable detect if an expected
+ * report has not yet landed in memory. (At least on Haswell the OA
+ * buffer tail pointer is not synchronized with reports being visible
+ * to the CPU)
+ */
+ void (*init_oa_buffer)(struct drm_i915_private *dev_priv);
+
+ /**
+ * @enable_metric_set: Applies any MUX configuration to set up the
+ * Boolean and Custom (B/C) counters that are part of the counter
+ * reports being sampled. May apply system constraints such as
+ * disabling EU clock gating as required.
+ */
+ int (*enable_metric_set)(struct drm_i915_private *dev_priv);
+
+ /**
+ * @disable_metric_set: Remove system constraints associated with using
+ * the OA unit.
+ */
+ void (*disable_metric_set)(struct drm_i915_private *dev_priv);
+
+ /**
+ * @oa_enable: Enable periodic sampling
+ */
+ void (*oa_enable)(struct drm_i915_private *dev_priv);
+
+ /**
+ * @oa_disable: Disable periodic sampling
+ */
+ void (*oa_disable)(struct drm_i915_private *dev_priv);
+
+ /**
+ * @read: Copy data from the circular OA buffer into a given userspace
+ * buffer.
+ */
+ int (*read)(struct i915_perf_stream *stream,
+ char __user *buf,
+ size_t count,
+ size_t *offset);
+
+ /**
+ * @oa_buffer_is_empty: Check if OA buffer empty (false positives OK)
+ *
+ * This is either called via fops or the poll check hrtimer (atomic
+ * ctx) without any locks taken.
+ *
+ * It's safe to read OA config state here unlocked, assuming that this
+ * is only called while the stream is enabled, while the global OA
+ * configuration can't be modified.
+ *
+ * Efficiency is more important than avoiding some false positives
+ * here, which will be handled gracefully - likely resulting in an
+ * %EAGAIN error for userspace.
+ */
+ bool (*oa_buffer_is_empty)(struct drm_i915_private *dev_priv);
+};
+
struct drm_i915_private {
struct drm_device drm;
@@ -1815,6 +2073,7 @@ struct drm_i915_private {
struct intel_gvt *gvt;
+ struct intel_huc huc;
struct intel_guc guc;
struct intel_csr csr;
@@ -1898,7 +2157,14 @@ struct drm_i915_private {
unsigned int fsb_freq, mem_freq, is_ddr3;
unsigned int skl_preferred_vco_freq;
- unsigned int cdclk_freq, max_cdclk_freq, atomic_cdclk_freq;
+ unsigned int cdclk_freq, max_cdclk_freq;
+
+ /*
+ * For reading holding any crtc lock is sufficient,
+ * for writing must hold all of them.
+ */
+ unsigned int atomic_cdclk_freq;
+
unsigned int max_dotclk_freq;
unsigned int rawclk_freq;
unsigned int hpll_freq;
@@ -2051,6 +2317,9 @@ struct drm_i915_private {
} sagv_status;
struct {
+ /* protects DSPARB registers on pre-g4x/vlv/chv */
+ spinlock_t dsparb_lock;
+
/*
* Raw watermark latency values:
* in 0.1us units for WM0,
@@ -2095,6 +2364,54 @@ struct drm_i915_private {
struct i915_runtime_pm pm;
+ struct {
+ bool initialized;
+
+ struct kobject *metrics_kobj;
+ struct ctl_table_header *sysctl_header;
+
+ struct mutex lock;
+ struct list_head streams;
+
+ spinlock_t hook_lock;
+
+ struct {
+ struct i915_perf_stream *exclusive_stream;
+
+ u32 specific_ctx_id;
+
+ struct hrtimer poll_check_timer;
+ wait_queue_head_t poll_wq;
+ bool pollin;
+
+ bool periodic;
+ int period_exponent;
+ int timestamp_frequency;
+
+ int tail_margin;
+
+ int metrics_set;
+
+ const struct i915_oa_reg *mux_regs;
+ int mux_regs_len;
+ const struct i915_oa_reg *b_counter_regs;
+ int b_counter_regs_len;
+
+ struct {
+ struct i915_vma *vma;
+ u8 *vaddr;
+ int format;
+ int format_size;
+ } oa_buffer;
+
+ u32 gen7_latched_oastatus1;
+
+ struct i915_oa_ops ops;
+ const struct i915_oa_format *oa_formats;
+ int n_builtin_sets;
+ } oa;
+ } perf;
+
/* Abstract the submission mechanism (legacy ringbuffer or execlists) away */
struct {
void (*resume)(struct drm_i915_private *);
@@ -2137,6 +2454,8 @@ struct drm_i915_private {
/* perform PHY state sanity checks? */
bool chv_phy_assert[2];
+ bool ipc_enabled;
+
/* Used to save the pipe-to-encoder mapping for audio */
struct intel_encoder *av_enc_map[I915_MAX_PIPES];
@@ -2291,102 +2610,6 @@ static inline struct scatterlist *__sg_next(struct scatterlist *sg)
(((__iter).curr += PAGE_SIZE) < (__iter).max) || \
((__iter) = __sgt_iter(__sg_next((__iter).sgp), false), 0))
-/*
- * A command that requires special handling by the command parser.
- */
-struct drm_i915_cmd_descriptor {
- /*
- * Flags describing how the command parser processes the command.
- *
- * CMD_DESC_FIXED: The command has a fixed length if this is set,
- * a length mask if not set
- * CMD_DESC_SKIP: The command is allowed but does not follow the
- * standard length encoding for the opcode range in
- * which it falls
- * CMD_DESC_REJECT: The command is never allowed
- * CMD_DESC_REGISTER: The command should be checked against the
- * register whitelist for the appropriate ring
- * CMD_DESC_MASTER: The command is allowed if the submitting process
- * is the DRM master
- */
- u32 flags;
-#define CMD_DESC_FIXED (1<<0)
-#define CMD_DESC_SKIP (1<<1)
-#define CMD_DESC_REJECT (1<<2)
-#define CMD_DESC_REGISTER (1<<3)
-#define CMD_DESC_BITMASK (1<<4)
-#define CMD_DESC_MASTER (1<<5)
-
- /*
- * The command's unique identification bits and the bitmask to get them.
- * This isn't strictly the opcode field as defined in the spec and may
- * also include type, subtype, and/or subop fields.
- */
- struct {
- u32 value;
- u32 mask;
- } cmd;
-
- /*
- * The command's length. The command is either fixed length (i.e. does
- * not include a length field) or has a length field mask. The flag
- * CMD_DESC_FIXED indicates a fixed length. Otherwise, the command has
- * a length mask. All command entries in a command table must include
- * length information.
- */
- union {
- u32 fixed;
- u32 mask;
- } length;
-
- /*
- * Describes where to find a register address in the command to check
- * against the ring's register whitelist. Only valid if flags has the
- * CMD_DESC_REGISTER bit set.
- *
- * A non-zero step value implies that the command may access multiple
- * registers in sequence (e.g. LRI), in that case step gives the
- * distance in dwords between individual offset fields.
- */
- struct {
- u32 offset;
- u32 mask;
- u32 step;
- } reg;
-
-#define MAX_CMD_DESC_BITMASKS 3
- /*
- * Describes command checks where a particular dword is masked and
- * compared against an expected value. If the command does not match
- * the expected value, the parser rejects it. Only valid if flags has
- * the CMD_DESC_BITMASK bit set. Only entries where mask is non-zero
- * are valid.
- *
- * If the check specifies a non-zero condition_mask then the parser
- * only performs the check when the bits specified by condition_mask
- * are non-zero.
- */
- struct {
- u32 offset;
- u32 mask;
- u32 expected;
- u32 condition_offset;
- u32 condition_mask;
- } bits[MAX_CMD_DESC_BITMASKS];
-};
-
-/*
- * A table of commands requiring special handling by the command parser.
- *
- * Each engine has an array of tables. Each table consists of an array of
- * command descriptors, which must be sorted with command opcodes in
- * ascending order.
- */
-struct drm_i915_cmd_table {
- const struct drm_i915_cmd_descriptor *table;
- int count;
-};
-
static inline const struct intel_device_info *
intel_info(const struct drm_i915_private *dev_priv)
{
@@ -2428,34 +2651,36 @@ intel_info(const struct drm_i915_private *dev_priv)
#define IS_REVID(p, since, until) \
(INTEL_REVID(p) >= (since) && INTEL_REVID(p) <= (until))
-#define IS_I830(dev_priv) (INTEL_DEVID(dev_priv) == 0x3577)
-#define IS_845G(dev_priv) (INTEL_DEVID(dev_priv) == 0x2562)
-#define IS_I85X(dev_priv) ((dev_priv)->info.is_i85x)
-#define IS_I865G(dev_priv) (INTEL_DEVID(dev_priv) == 0x2572)
-#define IS_I915G(dev_priv) ((dev_priv)->info.is_i915g)
-#define IS_I915GM(dev_priv) (INTEL_DEVID(dev_priv) == 0x2592)
-#define IS_I945G(dev_priv) (INTEL_DEVID(dev_priv) == 0x2772)
-#define IS_I945GM(dev_priv) ((dev_priv)->info.is_i945gm)
-#define IS_BROADWATER(dev_priv) ((dev_priv)->info.is_broadwater)
-#define IS_CRESTLINE(dev_priv) ((dev_priv)->info.is_crestline)
-#define IS_GM45(dev_priv) (INTEL_DEVID(dev_priv) == 0x2A42)
-#define IS_G4X(dev_priv) ((dev_priv)->info.is_g4x)
+#define IS_I830(dev_priv) ((dev_priv)->info.platform == INTEL_I830)
+#define IS_I845G(dev_priv) ((dev_priv)->info.platform == INTEL_I845G)
+#define IS_I85X(dev_priv) ((dev_priv)->info.platform == INTEL_I85X)
+#define IS_I865G(dev_priv) ((dev_priv)->info.platform == INTEL_I865G)
+#define IS_I915G(dev_priv) ((dev_priv)->info.platform == INTEL_I915G)
+#define IS_I915GM(dev_priv) ((dev_priv)->info.platform == INTEL_I915GM)
+#define IS_I945G(dev_priv) ((dev_priv)->info.platform == INTEL_I945G)
+#define IS_I945GM(dev_priv) ((dev_priv)->info.platform == INTEL_I945GM)
+#define IS_I965G(dev_priv) ((dev_priv)->info.platform == INTEL_I965G)
+#define IS_I965GM(dev_priv) ((dev_priv)->info.platform == INTEL_I965GM)
+#define IS_G45(dev_priv) ((dev_priv)->info.platform == INTEL_G45)
+#define IS_GM45(dev_priv) ((dev_priv)->info.platform == INTEL_GM45)
+#define IS_G4X(dev_priv) (IS_G45(dev_priv) || IS_GM45(dev_priv))
#define IS_PINEVIEW_G(dev_priv) (INTEL_DEVID(dev_priv) == 0xa001)
#define IS_PINEVIEW_M(dev_priv) (INTEL_DEVID(dev_priv) == 0xa011)
-#define IS_PINEVIEW(dev_priv) ((dev_priv)->info.is_pineview)
-#define IS_G33(dev_priv) ((dev_priv)->info.is_g33)
+#define IS_PINEVIEW(dev_priv) ((dev_priv)->info.platform == INTEL_PINEVIEW)
+#define IS_G33(dev_priv) ((dev_priv)->info.platform == INTEL_G33)
#define IS_IRONLAKE_M(dev_priv) (INTEL_DEVID(dev_priv) == 0x0046)
-#define IS_IVYBRIDGE(dev_priv) ((dev_priv)->info.is_ivybridge)
+#define IS_IVYBRIDGE(dev_priv) ((dev_priv)->info.platform == INTEL_IVYBRIDGE)
#define IS_IVB_GT1(dev_priv) (INTEL_DEVID(dev_priv) == 0x0156 || \
INTEL_DEVID(dev_priv) == 0x0152 || \
INTEL_DEVID(dev_priv) == 0x015a)
-#define IS_VALLEYVIEW(dev_priv) ((dev_priv)->info.is_valleyview)
-#define IS_CHERRYVIEW(dev_priv) ((dev_priv)->info.is_cherryview)
-#define IS_HASWELL(dev_priv) ((dev_priv)->info.is_haswell)
-#define IS_BROADWELL(dev_priv) ((dev_priv)->info.is_broadwell)
-#define IS_SKYLAKE(dev_priv) ((dev_priv)->info.is_skylake)
-#define IS_BROXTON(dev_priv) ((dev_priv)->info.is_broxton)
-#define IS_KABYLAKE(dev_priv) ((dev_priv)->info.is_kabylake)
+#define IS_VALLEYVIEW(dev_priv) ((dev_priv)->info.platform == INTEL_VALLEYVIEW)
+#define IS_CHERRYVIEW(dev_priv) ((dev_priv)->info.platform == INTEL_CHERRYVIEW)
+#define IS_HASWELL(dev_priv) ((dev_priv)->info.platform == INTEL_HASWELL)
+#define IS_BROADWELL(dev_priv) ((dev_priv)->info.platform == INTEL_BROADWELL)
+#define IS_SKYLAKE(dev_priv) ((dev_priv)->info.platform == INTEL_SKYLAKE)
+#define IS_BROXTON(dev_priv) ((dev_priv)->info.platform == INTEL_BROXTON)
+#define IS_KABYLAKE(dev_priv) ((dev_priv)->info.platform == INTEL_KABYLAKE)
+#define IS_GEMINILAKE(dev_priv) ((dev_priv)->info.platform == INTEL_GEMINILAKE)
#define IS_MOBILE(dev_priv) ((dev_priv)->info.is_mobile)
#define IS_HSW_EARLY_SDV(dev_priv) (IS_HASWELL(dev_priv) && \
(INTEL_DEVID(dev_priv) & 0xFF00) == 0x0C00)
@@ -2512,6 +2737,7 @@ intel_info(const struct drm_i915_private *dev_priv)
#define BXT_REVID_A0 0x0
#define BXT_REVID_A1 0x1
#define BXT_REVID_B0 0x3
+#define BXT_REVID_B_LAST 0x8
#define BXT_REVID_C0 0x9
#define IS_BXT_REVID(dev_priv, since, until) \
@@ -2541,6 +2767,9 @@ intel_info(const struct drm_i915_private *dev_priv)
#define IS_GEN8(dev_priv) (!!((dev_priv)->info.gen_mask & BIT(7)))
#define IS_GEN9(dev_priv) (!!((dev_priv)->info.gen_mask & BIT(8)))
+#define IS_GEN9_LP(dev_priv) (IS_GEN9(dev_priv) && INTEL_INFO(dev_priv)->is_lp)
+#define IS_LP(dev_priv) (INTEL_INFO(dev_priv)->is_lp)
+
#define ENGINE_MASK(id) BIT(id)
#define RENDER_RING ENGINE_MASK(RCS)
#define BSD_RING ENGINE_MASK(VCS)
@@ -2577,7 +2806,7 @@ intel_info(const struct drm_i915_private *dev_priv)
((dev_priv)->info.overlay_needs_physical)
/* Early gen2 have a totally busted CS tlb and require pinned batches. */
-#define HAS_BROKEN_CS_TLB(dev_priv) (IS_I830(dev_priv) || IS_845G(dev_priv))
+#define HAS_BROKEN_CS_TLB(dev_priv) (IS_I830(dev_priv) || IS_I845G(dev_priv))
/* WaRsDisableCoarsePowerGating:skl,bxt */
#define NEEDS_WaRsDisableCoarsePowerGating(dev_priv) \
@@ -2630,6 +2859,7 @@ intel_info(const struct drm_i915_private *dev_priv)
#define HAS_GUC(dev_priv) ((dev_priv)->info.has_guc)
#define HAS_GUC_UCODE(dev_priv) (HAS_GUC(dev_priv))
#define HAS_GUC_SCHED(dev_priv) (HAS_GUC(dev_priv))
+#define HAS_HUC_UCODE(dev_priv) (HAS_GUC(dev_priv))
#define HAS_RESOURCE_STREAMER(dev_priv) ((dev_priv)->info.has_resource_streamer)
@@ -2686,9 +2916,6 @@ static inline bool intel_scanout_needs_vtd_wa(struct drm_i915_private *dev_priv)
return false;
}
-extern int i915_suspend_switcheroo(struct drm_device *dev, pm_message_t state);
-extern int i915_resume_switcheroo(struct drm_device *dev);
-
int intel_sanitize_enable_ppgtt(struct drm_i915_private *dev_priv,
int enable_ppgtt);
@@ -2891,10 +3118,10 @@ int i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
-int i915_gem_set_tiling(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
-int i915_gem_get_tiling(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
+int i915_gem_set_tiling_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+int i915_gem_get_tiling_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
void i915_gem_init_userptr(struct drm_i915_private *dev_priv);
int i915_gem_userptr_ioctl(struct drm_device *dev, void *data,
struct drm_file *file);
@@ -2902,23 +3129,37 @@ int i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int i915_gem_wait_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
-int i915_gem_load_init(struct drm_device *dev);
-void i915_gem_load_cleanup(struct drm_device *dev);
+int i915_gem_load_init(struct drm_i915_private *dev_priv);
+void i915_gem_load_cleanup(struct drm_i915_private *dev_priv);
void i915_gem_load_init_fences(struct drm_i915_private *dev_priv);
int i915_gem_freeze(struct drm_i915_private *dev_priv);
int i915_gem_freeze_late(struct drm_i915_private *dev_priv);
-void *i915_gem_object_alloc(struct drm_device *dev);
+void *i915_gem_object_alloc(struct drm_i915_private *dev_priv);
void i915_gem_object_free(struct drm_i915_gem_object *obj);
void i915_gem_object_init(struct drm_i915_gem_object *obj,
const struct drm_i915_gem_object_ops *ops);
-struct drm_i915_gem_object *i915_gem_object_create(struct drm_device *dev,
- u64 size);
-struct drm_i915_gem_object *i915_gem_object_create_from_data(
- struct drm_device *dev, const void *data, size_t size);
+struct drm_i915_gem_object *
+i915_gem_object_create(struct drm_i915_private *dev_priv, u64 size);
+struct drm_i915_gem_object *
+i915_gem_object_create_from_data(struct drm_i915_private *dev_priv,
+ const void *data, size_t size);
void i915_gem_close_object(struct drm_gem_object *gem, struct drm_file *file);
void i915_gem_free_object(struct drm_gem_object *obj);
+static inline void i915_gem_drain_freed_objects(struct drm_i915_private *i915)
+{
+ /* A single pass should suffice to release all the freed objects (along
+ * most call paths) , but be a little more paranoid in that freeing
+ * the objects does take a little amount of time, during which the rcu
+ * callbacks could have added new objects into the freed list, and
+ * armed the work again.
+ */
+ do {
+ rcu_barrier();
+ } while (flush_work(&i915->mm.free_work));
+}
+
struct i915_vma * __must_check
i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
const struct i915_ggtt_view *view,
@@ -2988,7 +3229,6 @@ __i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj)
GEM_BUG_ON(!obj->mm.pages);
atomic_dec(&obj->mm.pages_pin_count);
- GEM_BUG_ON(atomic_read(&obj->mm.pages_pin_count) < obj->bind_count);
}
static inline void
@@ -3013,8 +3253,8 @@ enum i915_map_type {
/**
* i915_gem_object_pin_map - return a contiguous mapping of the entire object
- * @obj - the object to map into kernel address space
- * @type - the type of mapping, used to select pgprot_t
+ * @obj: the object to map into kernel address space
+ * @type: the type of mapping, used to select pgprot_t
*
* Calls i915_gem_object_pin_pages() to prevent reaping of the object's
* pages and then returns a contiguous mapping of the backing storage into
@@ -3032,7 +3272,7 @@ void *__must_check i915_gem_object_pin_map(struct drm_i915_gem_object *obj,
/**
* i915_gem_object_unpin_map - releases an earlier mapping
- * @obj - the object to unmap
+ * @obj: the object to unmap
*
* After pinning the object and mapping its pages, once you are finished
* with your access, call i915_gem_object_unpin_map() to release the pin
@@ -3100,17 +3340,18 @@ static inline u32 i915_reset_count(struct i915_gpu_error *error)
return READ_ONCE(error->reset_count);
}
-void i915_gem_reset(struct drm_i915_private *dev_priv);
+int i915_gem_reset_prepare(struct drm_i915_private *dev_priv);
+void i915_gem_reset_finish(struct drm_i915_private *dev_priv);
void i915_gem_set_wedged(struct drm_i915_private *dev_priv);
void i915_gem_clflush_object(struct drm_i915_gem_object *obj, bool force);
-int __must_check i915_gem_init(struct drm_device *dev);
-int __must_check i915_gem_init_hw(struct drm_device *dev);
+int __must_check i915_gem_init(struct drm_i915_private *dev_priv);
+int __must_check i915_gem_init_hw(struct drm_i915_private *dev_priv);
void i915_gem_init_swizzling(struct drm_i915_private *dev_priv);
-void i915_gem_cleanup_engines(struct drm_device *dev);
+void i915_gem_cleanup_engines(struct drm_i915_private *dev_priv);
int __must_check i915_gem_wait_for_idle(struct drm_i915_private *dev_priv,
unsigned int flags);
-int __must_check i915_gem_suspend(struct drm_device *dev);
-void i915_gem_resume(struct drm_device *dev);
+int __must_check i915_gem_suspend(struct drm_i915_private *dev_priv);
+void i915_gem_resume(struct drm_i915_private *dev_priv);
int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
int i915_gem_object_wait(struct drm_i915_gem_object *obj,
unsigned int flags,
@@ -3136,11 +3377,6 @@ int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj,
int i915_gem_open(struct drm_device *dev, struct drm_file *file);
void i915_gem_release(struct drm_device *dev, struct drm_file *file);
-u64 i915_gem_get_ggtt_size(struct drm_i915_private *dev_priv, u64 size,
- int tiling_mode);
-u64 i915_gem_get_ggtt_alignment(struct drm_i915_private *dev_priv, u64 size,
- int tiling_mode, bool fenced);
-
int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
enum i915_cache_level cache_level);
@@ -3150,33 +3386,17 @@ struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev,
struct dma_buf *i915_gem_prime_export(struct drm_device *dev,
struct drm_gem_object *gem_obj, int flags);
-struct i915_vma *
-i915_gem_obj_to_vma(struct drm_i915_gem_object *obj,
- struct i915_address_space *vm,
- const struct i915_ggtt_view *view);
-
-struct i915_vma *
-i915_gem_obj_lookup_or_create_vma(struct drm_i915_gem_object *obj,
- struct i915_address_space *vm,
- const struct i915_ggtt_view *view);
-
static inline struct i915_hw_ppgtt *
i915_vm_to_ppgtt(struct i915_address_space *vm)
{
return container_of(vm, struct i915_hw_ppgtt, base);
}
-static inline struct i915_vma *
-i915_gem_object_to_ggtt(struct drm_i915_gem_object *obj,
- const struct i915_ggtt_view *view)
-{
- return i915_gem_obj_to_vma(obj, &to_i915(obj->base.dev)->ggtt.base, view);
-}
-
/* i915_gem_fence_reg.c */
int __must_check i915_vma_get_fence(struct i915_vma *vma);
int __must_check i915_vma_put_fence(struct i915_vma *vma);
+void i915_gem_revoke_fences(struct drm_i915_private *dev_priv);
void i915_gem_restore_fences(struct drm_i915_private *dev_priv);
void i915_gem_detect_bit_6_swizzle(struct drm_i915_private *dev_priv);
@@ -3185,23 +3405,6 @@ void i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj,
void i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj,
struct sg_table *pages);
-/* i915_gem_context.c */
-int __must_check i915_gem_context_init(struct drm_device *dev);
-void i915_gem_context_lost(struct drm_i915_private *dev_priv);
-void i915_gem_context_fini(struct drm_device *dev);
-int i915_gem_context_open(struct drm_device *dev, struct drm_file *file);
-void i915_gem_context_close(struct drm_device *dev, struct drm_file *file);
-int i915_switch_context(struct drm_i915_gem_request *req);
-int i915_gem_switch_to_kernel_context(struct drm_i915_private *dev_priv);
-struct i915_vma *
-i915_gem_context_pin_legacy(struct i915_gem_context *ctx,
- unsigned int flags);
-void i915_gem_context_free(struct kref *ctx_ref);
-struct drm_i915_gem_object *
-i915_gem_alloc_context_obj(struct drm_device *dev, size_t size);
-struct i915_gem_context *
-i915_gem_context_create_gvt(struct drm_device *dev);
-
static inline struct i915_gem_context *
i915_gem_context_lookup(struct drm_i915_file_private *file_priv, u32 id)
{
@@ -3229,6 +3432,14 @@ static inline void i915_gem_context_put(struct i915_gem_context *ctx)
kref_put(&ctx->ref, i915_gem_context_free);
}
+static inline void i915_gem_context_put_unlocked(struct i915_gem_context *ctx)
+{
+ struct mutex *lock = &ctx->i915->drm.struct_mutex;
+
+ if (kref_put_mutex(&ctx->ref, i915_gem_context_free, lock))
+ mutex_unlock(lock);
+}
+
static inline struct intel_timeline *
i915_gem_context_lookup_timeline(struct i915_gem_context *ctx,
struct intel_engine_cs *engine)
@@ -3239,21 +3450,8 @@ i915_gem_context_lookup_timeline(struct i915_gem_context *ctx,
return &vm->timeline.engine[engine->id];
}
-static inline bool i915_gem_context_is_default(const struct i915_gem_context *c)
-{
- return c->user_handle == DEFAULT_CONTEXT_HANDLE;
-}
-
-int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file);
-int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file);
-int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
-int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
-int i915_gem_context_reset_stats_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file);
+int i915_perf_open_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file);
/* i915_gem_evict.c */
int __must_check i915_gem_evict_something(struct i915_address_space *vm,
@@ -3261,7 +3459,9 @@ int __must_check i915_gem_evict_something(struct i915_address_space *vm,
unsigned cache_level,
u64 start, u64 end,
unsigned flags);
-int __must_check i915_gem_evict_for_vma(struct i915_vma *target);
+int __must_check i915_gem_evict_for_node(struct i915_address_space *vm,
+ struct drm_mm_node *node,
+ unsigned int flags);
int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle);
/* belongs in i915_gem_gtt.h */
@@ -3285,9 +3485,9 @@ void i915_gem_stolen_remove_node(struct drm_i915_private *dev_priv,
int i915_gem_init_stolen(struct drm_i915_private *dev_priv);
void i915_gem_cleanup_stolen(struct drm_device *dev);
struct drm_i915_gem_object *
-i915_gem_object_create_stolen(struct drm_device *dev, u32 size);
+i915_gem_object_create_stolen(struct drm_i915_private *dev_priv, u32 size);
struct drm_i915_gem_object *
-i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
+i915_gem_object_create_stolen_for_preallocated(struct drm_i915_private *dev_priv,
u32 stolen_offset,
u32 gtt_offset,
u32 size);
@@ -3295,7 +3495,7 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
/* i915_gem_internal.c */
struct drm_i915_gem_object *
i915_gem_object_create_internal(struct drm_i915_private *dev_priv,
- unsigned int size);
+ phys_addr_t size);
/* i915_gem_shrinker.c */
unsigned long i915_gem_shrink(struct drm_i915_private *dev_priv,
@@ -3320,6 +3520,11 @@ static inline bool i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_objec
i915_gem_object_is_tiled(obj);
}
+u32 i915_gem_fence_size(struct drm_i915_private *dev_priv, u32 size,
+ unsigned int tiling, unsigned int stride);
+u32 i915_gem_fence_alignment(struct drm_i915_private *dev_priv, u32 size,
+ unsigned int tiling, unsigned int stride);
+
/* i915_debugfs.c */
#ifdef CONFIG_DEBUG_FS
int i915_debugfs_register(struct drm_i915_private *dev_priv);
@@ -3355,7 +3560,7 @@ void i915_capture_error_state(struct drm_i915_private *dev_priv,
void i915_error_state_get(struct drm_device *dev,
struct i915_error_state_file_priv *error_priv);
void i915_error_state_put(struct i915_error_state_file_priv *error_priv);
-void i915_destroy_error_state(struct drm_device *dev);
+void i915_destroy_error_state(struct drm_i915_private *dev_priv);
#else
@@ -3365,7 +3570,7 @@ static inline void i915_capture_error_state(struct drm_i915_private *dev_priv,
{
}
-static inline void i915_destroy_error_state(struct drm_device *dev)
+static inline void i915_destroy_error_state(struct drm_i915_private *dev_priv)
{
}
@@ -3377,7 +3582,6 @@ const char *i915_cache_level_str(struct drm_i915_private *i915, int type);
int i915_cmd_parser_get_version(struct drm_i915_private *dev_priv);
void intel_engine_init_cmd_parser(struct intel_engine_cs *engine);
void intel_engine_cleanup_cmd_parser(struct intel_engine_cs *engine);
-bool intel_engine_needs_cmd_parser(struct intel_engine_cs *engine);
int intel_engine_cmd_parser(struct intel_engine_cs *engine,
struct drm_i915_gem_object *batch_obj,
struct drm_i915_gem_object *shadow_batch_obj,
@@ -3385,9 +3589,15 @@ int intel_engine_cmd_parser(struct intel_engine_cs *engine,
u32 batch_len,
bool is_master);
+/* i915_perf.c */
+extern void i915_perf_init(struct drm_i915_private *dev_priv);
+extern void i915_perf_fini(struct drm_i915_private *dev_priv);
+extern void i915_perf_register(struct drm_i915_private *dev_priv);
+extern void i915_perf_unregister(struct drm_i915_private *dev_priv);
+
/* i915_suspend.c */
-extern int i915_save_state(struct drm_device *dev);
-extern int i915_restore_state(struct drm_device *dev);
+extern int i915_save_state(struct drm_i915_private *dev_priv);
+extern int i915_restore_state(struct drm_i915_private *dev_priv);
/* i915_sysfs.c */
void i915_setup_sysfs(struct drm_i915_private *dev_priv);
@@ -3402,8 +3612,8 @@ void intel_lpe_audio_notify(struct drm_i915_private *dev_priv,
bool dp_output, int link_rate);
/* intel_i2c.c */
-extern int intel_setup_gmbus(struct drm_device *dev);
-extern void intel_teardown_gmbus(struct drm_device *dev);
+extern int intel_setup_gmbus(struct drm_i915_private *dev_priv);
+extern void intel_teardown_gmbus(struct drm_i915_private *dev_priv);
extern bool intel_gmbus_is_valid_pin(struct drm_i915_private *dev_priv,
unsigned int pin);
@@ -3415,7 +3625,7 @@ static inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter)
{
return container_of(adapter, struct intel_gmbus, adapter)->force_bit;
}
-extern void intel_i2c_reset(struct drm_device *dev);
+extern void intel_i2c_reset(struct drm_i915_private *dev_priv);
/* intel_bios.c */
int intel_bios_init(struct drm_i915_private *dev_priv);
@@ -3482,6 +3692,7 @@ mkwrite_device_info(struct drm_i915_private *dev_priv)
return (struct intel_device_info *)&dev_priv->info;
}
+const char *intel_platform_name(enum intel_platform platform);
void intel_device_info_runtime_init(struct drm_i915_private *dev_priv);
void intel_device_info_dump(struct drm_i915_private *dev_priv);
@@ -3498,9 +3709,9 @@ extern void intel_display_resume(struct drm_device *dev);
extern void i915_redisable_vga(struct drm_i915_private *dev_priv);
extern void i915_redisable_vga_power_on(struct drm_i915_private *dev_priv);
extern bool ironlake_set_drps(struct drm_i915_private *dev_priv, u8 val);
-extern void intel_init_pch_refclk(struct drm_device *dev);
+extern void intel_init_pch_refclk(struct drm_i915_private *dev_priv);
extern void intel_set_rps(struct drm_i915_private *dev_priv, u8 val);
-extern void intel_set_memory_cxsr(struct drm_i915_private *dev_priv,
+extern bool intel_set_memory_cxsr(struct drm_i915_private *dev_priv,
bool enable);
int i915_reg_read_ioctl(struct drm_device *dev, void *data,
@@ -3545,7 +3756,7 @@ u32 vlv_flisdsi_read(struct drm_i915_private *dev_priv, u32 reg);
void vlv_flisdsi_write(struct drm_i915_private *dev_priv, u32 reg, u32 val);
/* intel_dpio_phy.c */
-void bxt_port_to_phy_channel(enum port port,
+void bxt_port_to_phy_channel(struct drm_i915_private *dev_priv, enum port port,
enum dpio_phy *phy, enum dpio_channel *ch);
void bxt_ddi_phy_set_signal_level(struct drm_i915_private *dev_priv,
enum port port, u32 margin, u32 scale,
@@ -3812,29 +4023,25 @@ __i915_request_irq_complete(struct drm_i915_gem_request *req)
void i915_memcpy_init_early(struct drm_i915_private *dev_priv);
bool i915_memcpy_from_wc(void *dst, const void *src, unsigned long len);
+/* The movntdqa instructions used for memcpy-from-wc require 16-byte alignment,
+ * as well as SSE4.1 support. i915_memcpy_from_wc() will report if it cannot
+ * perform the operation. To check beforehand, pass in the parameters to
+ * to i915_can_memcpy_from_wc() - since we only care about the low 4 bits,
+ * you only need to pass in the minor offsets, page-aligned pointers are
+ * always valid.
+ *
+ * For just checking for SSE4.1, in the foreknowledge that the future use
+ * will be correctly aligned, just use i915_has_memcpy_from_wc().
+ */
+#define i915_can_memcpy_from_wc(dst, src, len) \
+ i915_memcpy_from_wc((void *)((unsigned long)(dst) | (unsigned long)(src) | (len)), NULL, 0)
+
+#define i915_has_memcpy_from_wc() \
+ i915_memcpy_from_wc(NULL, NULL, 0)
+
/* i915_mm.c */
int remap_io_mapping(struct vm_area_struct *vma,
unsigned long addr, unsigned long pfn, unsigned long size,
struct io_mapping *iomap);
-#define ptr_mask_bits(ptr) ({ \
- unsigned long __v = (unsigned long)(ptr); \
- (typeof(ptr))(__v & PAGE_MASK); \
-})
-
-#define ptr_unpack_bits(ptr, bits) ({ \
- unsigned long __v = (unsigned long)(ptr); \
- (bits) = __v & ~PAGE_MASK; \
- (typeof(ptr))(__v & PAGE_MASK); \
-})
-
-#define ptr_pack_bits(ptr, bits) \
- ((typeof(ptr))((unsigned long)(ptr) | (bits)))
-
-#define fetch_and_zero(ptr) ({ \
- typeof(*ptr) __T = *(ptr); \
- *(ptr) = (typeof(*ptr))0; \
- __T; \
-})
-
#endif
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 24b5b046754b..88f3628b4e29 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -38,6 +38,7 @@
#include <linux/reservation.h>
#include <linux/shmem_fs.h>
#include <linux/slab.h>
+#include <linux/stop_machine.h>
#include <linux/swap.h>
#include <linux/pci.h>
#include <linux/dma-buf.h>
@@ -68,11 +69,10 @@ insert_mappable_node(struct i915_ggtt *ggtt,
struct drm_mm_node *node, u32 size)
{
memset(node, 0, sizeof(*node));
- return drm_mm_insert_node_in_range_generic(&ggtt->base.mm, node,
- size, 0, -1,
- 0, ggtt->mappable_end,
- DRM_MM_SEARCH_DEFAULT,
- DRM_MM_CREATE_DEFAULT);
+ return drm_mm_insert_node_in_range(&ggtt->base.mm, node,
+ size, 0, I915_COLOR_UNEVICTABLE,
+ 0, ggtt->mappable_end,
+ DRM_MM_INSERT_LOW);
}
static void
@@ -440,7 +440,7 @@ i915_gem_object_wait_reservation(struct reservation_object *resv,
timeout = i915_gem_object_wait_fence(shared[i],
flags, timeout,
rps);
- if (timeout <= 0)
+ if (timeout < 0)
break;
dma_fence_put(shared[i]);
@@ -453,7 +453,7 @@ i915_gem_object_wait_reservation(struct reservation_object *resv,
excl = reservation_object_get_excl_rcu(resv);
}
- if (excl && timeout > 0)
+ if (excl && timeout >= 0)
timeout = i915_gem_object_wait_fence(excl, flags, timeout, rps);
dma_fence_put(excl);
@@ -612,9 +612,8 @@ i915_gem_phys_pwrite(struct drm_i915_gem_object *obj,
return 0;
}
-void *i915_gem_object_alloc(struct drm_device *dev)
+void *i915_gem_object_alloc(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
return kmem_cache_zalloc(dev_priv->objects, GFP_KERNEL);
}
@@ -626,7 +625,7 @@ void i915_gem_object_free(struct drm_i915_gem_object *obj)
static int
i915_gem_create(struct drm_file *file,
- struct drm_device *dev,
+ struct drm_i915_private *dev_priv,
uint64_t size,
uint32_t *handle_p)
{
@@ -639,7 +638,7 @@ i915_gem_create(struct drm_file *file,
return -EINVAL;
/* Allocate the new object */
- obj = i915_gem_object_create(dev, size);
+ obj = i915_gem_object_create(dev_priv, size);
if (IS_ERR(obj))
return PTR_ERR(obj);
@@ -661,7 +660,7 @@ i915_gem_dumb_create(struct drm_file *file,
/* have to work out size/pitch and return them */
args->pitch = ALIGN(args->width * DIV_ROUND_UP(args->bpp, 8), 64);
args->size = args->pitch * args->height;
- return i915_gem_create(file, dev,
+ return i915_gem_create(file, to_i915(dev),
args->size, &args->handle);
}
@@ -675,11 +674,12 @@ int
i915_gem_create_ioctl(struct drm_device *dev, void *data,
struct drm_file *file)
{
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_i915_gem_create *args = data;
- i915_gem_flush_free_objects(to_i915(dev));
+ i915_gem_flush_free_objects(dev_priv);
- return i915_gem_create(file, dev,
+ return i915_gem_create(file, dev_priv,
args->size, &args->handle);
}
@@ -1114,8 +1114,7 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data,
return -ENOENT;
/* Bounds check source. */
- if (args->offset > obj->base.size ||
- args->size > obj->base.size - args->offset) {
+ if (range_overflows_t(u64, args->offset, args->size, obj->base.size)) {
ret = -EINVAL;
goto out;
}
@@ -1428,8 +1427,7 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
return -ENOENT;
/* Bounds check destination. */
- if (args->offset > obj->base.size ||
- args->size > obj->base.size - args->offset) {
+ if (range_overflows_t(u64, args->offset, args->size, obj->base.size)) {
ret = -EINVAL;
goto err;
}
@@ -1491,7 +1489,7 @@ static void i915_gem_object_bump_inactive_ggtt(struct drm_i915_gem_object *obj)
list_for_each_entry(vma, &obj->vma_list, obj_link) {
if (!i915_vma_is_ggtt(vma))
- continue;
+ break;
if (i915_vma_is_active(vma))
continue;
@@ -1696,12 +1694,7 @@ i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
static unsigned int tile_row_pages(struct drm_i915_gem_object *obj)
{
- u64 size;
-
- size = i915_gem_object_get_stride(obj);
- size *= i915_gem_object_get_tiling(obj) == I915_TILING_Y ? 32 : 8;
-
- return size >> PAGE_SHIFT;
+ return i915_gem_object_get_tile_row_size(obj) >> PAGE_SHIFT;
}
/**
@@ -1754,6 +1747,29 @@ int i915_gem_mmap_gtt_version(void)
return 1;
}
+static inline struct i915_ggtt_view
+compute_partial_view(struct drm_i915_gem_object *obj,
+ pgoff_t page_offset,
+ unsigned int chunk)
+{
+ struct i915_ggtt_view view;
+
+ if (i915_gem_object_is_tiled(obj))
+ chunk = roundup(chunk, tile_row_pages(obj));
+
+ view.type = I915_GGTT_VIEW_PARTIAL;
+ view.partial.offset = rounddown(page_offset, chunk);
+ view.partial.size =
+ min_t(unsigned int, chunk,
+ (obj->base.size >> PAGE_SHIFT) - view.partial.offset);
+
+ /* If the partial covers the entire object, just create a normal VMA. */
+ if (chunk >= obj->base.size >> PAGE_SHIFT)
+ view.type = I915_GGTT_VIEW_NORMAL;
+
+ return view;
+}
+
/**
* i915_gem_fault - fault a page into the GTT
* @area: CPU VMA in question
@@ -1830,26 +1846,9 @@ int i915_gem_fault(struct vm_area_struct *area, struct vm_fault *vmf)
/* Now pin it into the GTT as needed */
vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, flags);
if (IS_ERR(vma)) {
- struct i915_ggtt_view view;
- unsigned int chunk_size;
-
/* Use a partial view if it is bigger than available space */
- chunk_size = MIN_CHUNK_PAGES;
- if (i915_gem_object_is_tiled(obj))
- chunk_size = roundup(chunk_size, tile_row_pages(obj));
-
- memset(&view, 0, sizeof(view));
- view.type = I915_GGTT_VIEW_PARTIAL;
- view.params.partial.offset = rounddown(page_offset, chunk_size);
- view.params.partial.size =
- min_t(unsigned int, chunk_size,
- vma_pages(area) - view.params.partial.offset);
-
- /* If the partial covers the entire object, just create a
- * normal VMA.
- */
- if (chunk_size >= obj->base.size >> PAGE_SHIFT)
- view.type = I915_GGTT_VIEW_NORMAL;
+ struct i915_ggtt_view view =
+ compute_partial_view(obj, page_offset, MIN_CHUNK_PAGES);
/* Userspace is now writing through an untracked VMA, abandon
* all hope that the hardware is able to track future writes.
@@ -1878,7 +1877,7 @@ int i915_gem_fault(struct vm_area_struct *area, struct vm_fault *vmf)
/* Finally, remap it using the new GTT offset */
ret = remap_io_mapping(area,
- area->vm_start + (vma->ggtt_view.params.partial.offset << PAGE_SHIFT),
+ area->vm_start + (vma->ggtt_view.partial.offset << PAGE_SHIFT),
(ggtt->mappable_base + vma->node.start) >> PAGE_SHIFT,
min_t(u64, vma->size, area->vm_end - area->vm_start),
&ggtt->mappable);
@@ -2029,91 +2028,27 @@ void i915_gem_runtime_suspend(struct drm_i915_private *dev_priv)
}
}
-/**
- * i915_gem_get_ggtt_size - return required global GTT size for an object
- * @dev_priv: i915 device
- * @size: object size
- * @tiling_mode: tiling mode
- *
- * Return the required global GTT size for an object, taking into account
- * potential fence register mapping.
- */
-u64 i915_gem_get_ggtt_size(struct drm_i915_private *dev_priv,
- u64 size, int tiling_mode)
-{
- u64 ggtt_size;
-
- GEM_BUG_ON(size == 0);
-
- if (INTEL_GEN(dev_priv) >= 4 ||
- tiling_mode == I915_TILING_NONE)
- return size;
-
- /* Previous chips need a power-of-two fence region when tiling */
- if (IS_GEN3(dev_priv))
- ggtt_size = 1024*1024;
- else
- ggtt_size = 512*1024;
-
- while (ggtt_size < size)
- ggtt_size <<= 1;
-
- return ggtt_size;
-}
-
-/**
- * i915_gem_get_ggtt_alignment - return required global GTT alignment
- * @dev_priv: i915 device
- * @size: object size
- * @tiling_mode: tiling mode
- * @fenced: is fenced alignment required or not
- *
- * Return the required global GTT alignment for an object, taking into account
- * potential fence register mapping.
- */
-u64 i915_gem_get_ggtt_alignment(struct drm_i915_private *dev_priv, u64 size,
- int tiling_mode, bool fenced)
-{
- GEM_BUG_ON(size == 0);
-
- /*
- * Minimum alignment is 4k (GTT page size), but might be greater
- * if a fence register is needed for the object.
- */
- if (INTEL_GEN(dev_priv) >= 4 || (!fenced && IS_G33(dev_priv)) ||
- tiling_mode == I915_TILING_NONE)
- return 4096;
-
- /*
- * Previous chips need to be aligned to the size of the smallest
- * fence register that can contain the object.
- */
- return i915_gem_get_ggtt_size(dev_priv, size, tiling_mode);
-}
-
static int i915_gem_object_create_mmap_offset(struct drm_i915_gem_object *obj)
{
struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
int err;
err = drm_gem_create_mmap_offset(&obj->base);
- if (!err)
+ if (likely(!err))
return 0;
- /* We can idle the GPU locklessly to flush stale objects, but in order
- * to claim that space for ourselves, we need to take the big
- * struct_mutex to free the requests+objects and allocate our slot.
- */
- err = i915_gem_wait_for_idle(dev_priv, I915_WAIT_INTERRUPTIBLE);
- if (err)
- return err;
+ /* Attempt to reap some mmap space from dead objects */
+ do {
+ err = i915_gem_wait_for_idle(dev_priv, I915_WAIT_INTERRUPTIBLE);
+ if (err)
+ break;
- err = i915_mutex_lock_interruptible(&dev_priv->drm);
- if (!err) {
- i915_gem_retire_requests(dev_priv);
+ i915_gem_drain_freed_objects(dev_priv);
err = drm_gem_create_mmap_offset(&obj->base);
- mutex_unlock(&dev_priv->drm.struct_mutex);
- }
+ if (!err)
+ break;
+
+ } while (flush_delayed_work(&dev_priv->gt.retire_work));
return err;
}
@@ -2306,6 +2241,7 @@ static void i915_sg_trim(struct sg_table *orig_st)
/* called before being DMA mapped, no need to copy sg->dma_* */
new_sg = sg_next(new_sg);
}
+ GEM_BUG_ON(new_sg); /* Should walk exactly nents and hit the end */
sg_free_table(orig_st);
@@ -2627,35 +2563,34 @@ err_unlock:
goto out_unlock;
}
-static bool i915_context_is_banned(const struct i915_gem_context *ctx)
+static bool ban_context(const struct i915_gem_context *ctx)
{
- unsigned long elapsed;
+ return (i915_gem_context_is_bannable(ctx) &&
+ ctx->ban_score >= CONTEXT_SCORE_BAN_THRESHOLD);
+}
- if (ctx->hang_stats.banned)
- return true;
+static void i915_gem_context_mark_guilty(struct i915_gem_context *ctx)
+{
+ ctx->guilty_count++;
+ ctx->ban_score += CONTEXT_SCORE_GUILTY;
+ if (ban_context(ctx))
+ i915_gem_context_set_banned(ctx);
- elapsed = get_seconds() - ctx->hang_stats.guilty_ts;
- if (ctx->hang_stats.ban_period_seconds &&
- elapsed <= ctx->hang_stats.ban_period_seconds) {
- DRM_DEBUG("context hanging too fast, banning!\n");
- return true;
- }
+ DRM_DEBUG_DRIVER("context %s marked guilty (score %d) banned? %s\n",
+ ctx->name, ctx->ban_score,
+ yesno(i915_gem_context_is_banned(ctx)));
+
+ if (!i915_gem_context_is_banned(ctx) || IS_ERR_OR_NULL(ctx->file_priv))
+ return;
- return false;
+ ctx->file_priv->context_bans++;
+ DRM_DEBUG_DRIVER("client %s has had %d context banned\n",
+ ctx->name, ctx->file_priv->context_bans);
}
-static void i915_set_reset_status(struct i915_gem_context *ctx,
- const bool guilty)
+static void i915_gem_context_mark_innocent(struct i915_gem_context *ctx)
{
- struct i915_ctx_hang_stats *hs = &ctx->hang_stats;
-
- if (guilty) {
- hs->banned = i915_context_is_banned(ctx);
- hs->batch_active++;
- hs->guilty_ts = get_seconds();
- } else {
- hs->batch_pending++;
- }
+ ctx->active_count++;
}
struct drm_i915_gem_request *
@@ -2675,13 +2610,52 @@ i915_gem_find_active_request(struct intel_engine_cs *engine)
if (__i915_gem_request_completed(request))
continue;
+ GEM_BUG_ON(request->engine != engine);
return request;
}
return NULL;
}
-static void reset_request(struct drm_i915_gem_request *request)
+static bool engine_stalled(struct intel_engine_cs *engine)
+{
+ if (!engine->hangcheck.stalled)
+ return false;
+
+ /* Check for possible seqno movement after hang declaration */
+ if (engine->hangcheck.seqno != intel_engine_get_seqno(engine)) {
+ DRM_DEBUG_DRIVER("%s pardoned\n", engine->name);
+ return false;
+ }
+
+ return true;
+}
+
+int i915_gem_reset_prepare(struct drm_i915_private *dev_priv)
+{
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ int err = 0;
+
+ /* Ensure irq handler finishes, and not run again. */
+ for_each_engine(engine, dev_priv, id) {
+ struct drm_i915_gem_request *request;
+
+ tasklet_kill(&engine->irq_tasklet);
+
+ if (engine_stalled(engine)) {
+ request = i915_gem_find_active_request(engine);
+ if (request && request->fence.error == -EIO)
+ err = -EIO; /* Previous reset failed! */
+ }
+ }
+
+ i915_gem_revoke_fences(dev_priv);
+
+ return err;
+}
+
+static void skip_request(struct drm_i915_gem_request *request)
{
void *vaddr = request->ring->vaddr;
u32 head;
@@ -2696,66 +2670,93 @@ static void reset_request(struct drm_i915_gem_request *request)
head = 0;
}
memset(vaddr + head, 0, request->postfix - head);
+
+ dma_fence_set_error(&request->fence, -EIO);
}
-static void i915_gem_reset_engine(struct intel_engine_cs *engine)
+static void engine_skip_context(struct drm_i915_gem_request *request)
{
- struct drm_i915_gem_request *request;
- struct i915_gem_context *incomplete_ctx;
+ struct intel_engine_cs *engine = request->engine;
+ struct i915_gem_context *hung_ctx = request->ctx;
struct intel_timeline *timeline;
unsigned long flags;
- bool ring_hung;
- if (engine->irq_seqno_barrier)
- engine->irq_seqno_barrier(engine);
+ timeline = i915_gem_context_lookup_timeline(hung_ctx, engine);
- request = i915_gem_find_active_request(engine);
- if (!request)
- return;
+ spin_lock_irqsave(&engine->timeline->lock, flags);
+ spin_lock(&timeline->lock);
- ring_hung = engine->hangcheck.score >= HANGCHECK_SCORE_RING_HUNG;
- if (engine->hangcheck.seqno != intel_engine_get_seqno(engine))
- ring_hung = false;
+ list_for_each_entry_continue(request, &engine->timeline->requests, link)
+ if (request->ctx == hung_ctx)
+ skip_request(request);
- i915_set_reset_status(request->ctx, ring_hung);
- if (!ring_hung)
- return;
+ list_for_each_entry(request, &timeline->requests, link)
+ skip_request(request);
- DRM_DEBUG_DRIVER("resetting %s to restart from tail of request 0x%x\n",
- engine->name, request->global_seqno);
+ spin_unlock(&timeline->lock);
+ spin_unlock_irqrestore(&engine->timeline->lock, flags);
+}
- /* Setup the CS to resume from the breadcrumb of the hung request */
- engine->reset_hw(engine, request);
+/* Returns true if the request was guilty of hang */
+static bool i915_gem_reset_request(struct drm_i915_gem_request *request)
+{
+ /* Read once and return the resolution */
+ const bool guilty = engine_stalled(request->engine);
- /* Users of the default context do not rely on logical state
- * preserved between batches. They have to emit full state on
- * every batch and so it is safe to execute queued requests following
- * the hang.
+ /* The guilty request will get skipped on a hung engine.
+ *
+ * Users of client default contexts do not rely on logical
+ * state preserved between batches so it is safe to execute
+ * queued requests following the hang. Non default contexts
+ * rely on preserved state, so skipping a batch loses the
+ * evolution of the state and it needs to be considered corrupted.
+ * Executing more queued batches on top of corrupted state is
+ * risky. But we take the risk by trying to advance through
+ * the queued requests in order to make the client behaviour
+ * more predictable around resets, by not throwing away random
+ * amount of batches it has prepared for execution. Sophisticated
+ * clients can use gem_reset_stats_ioctl and dma fence status
+ * (exported via sync_file info ioctl on explicit fences) to observe
+ * when it loses the context state and should rebuild accordingly.
*
- * Other contexts preserve state, now corrupt. We want to skip all
- * queued requests that reference the corrupt context.
+ * The context ban, and ultimately the client ban, mechanism are safety
+ * valves if client submission ends up resulting in nothing more than
+ * subsequent hangs.
*/
- incomplete_ctx = request->ctx;
- if (i915_gem_context_is_default(incomplete_ctx))
- return;
- timeline = i915_gem_context_lookup_timeline(incomplete_ctx, engine);
+ if (guilty) {
+ i915_gem_context_mark_guilty(request->ctx);
+ skip_request(request);
+ } else {
+ i915_gem_context_mark_innocent(request->ctx);
+ dma_fence_set_error(&request->fence, -EAGAIN);
+ }
- spin_lock_irqsave(&engine->timeline->lock, flags);
- spin_lock(&timeline->lock);
+ return guilty;
+}
- list_for_each_entry_continue(request, &engine->timeline->requests, link)
- if (request->ctx == incomplete_ctx)
- reset_request(request);
+static void i915_gem_reset_engine(struct intel_engine_cs *engine)
+{
+ struct drm_i915_gem_request *request;
- list_for_each_entry(request, &timeline->requests, link)
- reset_request(request);
+ if (engine->irq_seqno_barrier)
+ engine->irq_seqno_barrier(engine);
- spin_unlock(&timeline->lock);
- spin_unlock_irqrestore(&engine->timeline->lock, flags);
+ request = i915_gem_find_active_request(engine);
+ if (request && i915_gem_reset_request(request)) {
+ DRM_DEBUG_DRIVER("resetting %s to restart from tail of request 0x%x\n",
+ engine->name, request->global_seqno);
+
+ /* If this context is now banned, skip all pending requests. */
+ if (i915_gem_context_is_banned(request->ctx))
+ engine_skip_context(request);
+ }
+
+ /* Setup the CS to resume from the breadcrumb of the hung request */
+ engine->reset_hw(engine, request);
}
-void i915_gem_reset(struct drm_i915_private *dev_priv)
+void i915_gem_reset_finish(struct drm_i915_private *dev_priv)
{
struct intel_engine_cs *engine;
enum intel_engine_id id;
@@ -2779,14 +2780,30 @@ void i915_gem_reset(struct drm_i915_private *dev_priv)
static void nop_submit_request(struct drm_i915_gem_request *request)
{
+ dma_fence_set_error(&request->fence, -EIO);
i915_gem_request_submit(request);
intel_engine_init_global_seqno(request->engine, request->global_seqno);
}
-static void i915_gem_cleanup_engine(struct intel_engine_cs *engine)
+static void engine_set_wedged(struct intel_engine_cs *engine)
{
+ struct drm_i915_gem_request *request;
+ unsigned long flags;
+
+ /* We need to be sure that no thread is running the old callback as
+ * we install the nop handler (otherwise we would submit a request
+ * to hardware that will never complete). In order to prevent this
+ * race, we wait until the machine is idle before making the swap
+ * (using stop_machine()).
+ */
engine->submit_request = nop_submit_request;
+ /* Mark all executing requests as skipped */
+ spin_lock_irqsave(&engine->timeline->lock, flags);
+ list_for_each_entry(request, &engine->timeline->requests, link)
+ dma_fence_set_error(&request->fence, -EIO);
+ spin_unlock_irqrestore(&engine->timeline->lock, flags);
+
/* Mark all pending requests as complete so that any concurrent
* (lockless) lookup doesn't try and wait upon the request as we
* reset it.
@@ -2815,20 +2832,29 @@ static void i915_gem_cleanup_engine(struct intel_engine_cs *engine)
}
}
-void i915_gem_set_wedged(struct drm_i915_private *dev_priv)
+static int __i915_gem_set_wedged_BKL(void *data)
{
+ struct drm_i915_private *i915 = data;
struct intel_engine_cs *engine;
enum intel_engine_id id;
+ for_each_engine(engine, i915, id)
+ engine_set_wedged(engine);
+
+ return 0;
+}
+
+void i915_gem_set_wedged(struct drm_i915_private *dev_priv)
+{
lockdep_assert_held(&dev_priv->drm.struct_mutex);
set_bit(I915_WEDGED, &dev_priv->gpu_error.flags);
- i915_gem_context_lost(dev_priv);
- for_each_engine(engine, dev_priv, id)
- i915_gem_cleanup_engine(engine);
- mod_delayed_work(dev_priv->wq, &dev_priv->gt.idle_work, 0);
+ stop_machine(__i915_gem_set_wedged_BKL, dev_priv, NULL);
+ i915_gem_context_lost(dev_priv);
i915_gem_retire_requests(dev_priv);
+
+ mod_delayed_work(dev_priv->wq, &dev_priv->gt.idle_work, 0);
}
static void
@@ -3373,7 +3399,7 @@ int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data,
struct drm_i915_gem_caching *args = data;
struct drm_i915_gem_object *obj;
enum i915_cache_level level;
- int ret;
+ int ret = 0;
switch (args->caching) {
case I915_CACHING_NONE:
@@ -3398,20 +3424,29 @@ int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data,
return -EINVAL;
}
- ret = i915_mutex_lock_interruptible(dev);
+ obj = i915_gem_object_lookup(file, args->handle);
+ if (!obj)
+ return -ENOENT;
+
+ if (obj->cache_level == level)
+ goto out;
+
+ ret = i915_gem_object_wait(obj,
+ I915_WAIT_INTERRUPTIBLE,
+ MAX_SCHEDULE_TIMEOUT,
+ to_rps_client(file));
if (ret)
- return ret;
+ goto out;
- obj = i915_gem_object_lookup(file, args->handle);
- if (!obj) {
- ret = -ENOENT;
- goto unlock;
- }
+ ret = i915_mutex_lock_interruptible(dev);
+ if (ret)
+ goto out;
ret = i915_gem_object_set_cache_level(obj, level);
- i915_gem_object_put(obj);
-unlock:
mutex_unlock(&dev->struct_mutex);
+
+out:
+ i915_gem_object_put(obj);
return ret;
}
@@ -3461,7 +3496,7 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
* try to preserve the existing ABI).
*/
vma = ERR_PTR(-ENOSPC);
- if (view->type == I915_GGTT_VIEW_NORMAL)
+ if (!view || view->type == I915_GGTT_VIEW_NORMAL)
vma = i915_gem_object_ggtt_pin(obj, view, 0, alignment,
PIN_MAPPABLE | PIN_NONBLOCK);
if (IS_ERR(vma)) {
@@ -3514,17 +3549,16 @@ err_unpin_display:
void
i915_gem_object_unpin_from_display_plane(struct i915_vma *vma)
{
- lockdep_assert_held(&vma->vm->dev->struct_mutex);
+ lockdep_assert_held(&vma->vm->i915->drm.struct_mutex);
if (WARN_ON(vma->obj->pin_display == 0))
return;
if (--vma->obj->pin_display == 0)
- vma->display_alignment = 0;
+ vma->display_alignment = I915_GTT_MIN_ALIGNMENT;
/* Bump the LRU to try and avoid premature eviction whilst flipping */
- if (!i915_vma_is_active(vma))
- list_move_tail(&vma->vm_link, &vma->vm->inactive_list);
+ i915_gem_object_bump_inactive_ggtt(vma->obj);
i915_vma_unpin(vma);
}
@@ -3655,8 +3689,8 @@ i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
lockdep_assert_held(&obj->base.dev->struct_mutex);
- vma = i915_gem_obj_lookup_or_create_vma(obj, vm, view);
- if (IS_ERR(vma))
+ vma = i915_vma_instance(obj, vm, view);
+ if (unlikely(IS_ERR(vma)))
return vma;
if (i915_vma_misplaced(vma, size, alignment, flags)) {
@@ -3665,10 +3699,6 @@ i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
return ERR_PTR(-ENOSPC);
if (flags & PIN_MAPPABLE) {
- u32 fence_size;
-
- fence_size = i915_gem_get_ggtt_size(dev_priv, vma->size,
- i915_gem_object_get_tiling(obj));
/* If the required space is larger than the available
* aperture, we will not able to find a slot for the
* object and unbinding the object now will be in
@@ -3676,7 +3706,7 @@ i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
* the object in and out of the Global GTT and
* waste a lot of cycles under the mutex.
*/
- if (fence_size > dev_priv->ggtt.mappable_end)
+ if (vma->fence_size > dev_priv->ggtt.mappable_end)
return ERR_PTR(-E2BIG);
/* If NONBLOCK is set the caller is optimistically
@@ -3695,7 +3725,7 @@ i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
* we could try to minimise harm to others.
*/
if (flags & PIN_NONBLOCK &&
- fence_size > dev_priv->ggtt.mappable_end / 2)
+ vma->fence_size > dev_priv->ggtt.mappable_end / 2)
return ERR_PTR(-ENOSPC);
}
@@ -3948,14 +3978,9 @@ static const struct drm_i915_gem_object_ops i915_gem_object_ops = {
.put_pages = i915_gem_object_put_pages_gtt,
};
-/* Note we don't consider signbits :| */
-#define overflows_type(x, T) \
- (sizeof(x) > sizeof(T) && (x) >> (sizeof(T) * BITS_PER_BYTE))
-
struct drm_i915_gem_object *
-i915_gem_object_create(struct drm_device *dev, u64 size)
+i915_gem_object_create(struct drm_i915_private *dev_priv, u64 size)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_i915_gem_object *obj;
struct address_space *mapping;
gfp_t mask;
@@ -3972,16 +3997,16 @@ i915_gem_object_create(struct drm_device *dev, u64 size)
if (overflows_type(size, obj->base.size))
return ERR_PTR(-E2BIG);
- obj = i915_gem_object_alloc(dev);
+ obj = i915_gem_object_alloc(dev_priv);
if (obj == NULL)
return ERR_PTR(-ENOMEM);
- ret = drm_gem_object_init(dev, &obj->base, size);
+ ret = drm_gem_object_init(&dev_priv->drm, &obj->base, size);
if (ret)
goto fail;
mask = GFP_HIGHUSER | __GFP_RECLAIMABLE;
- if (IS_CRESTLINE(dev_priv) || IS_BROADWATER(dev_priv)) {
+ if (IS_I965GM(dev_priv) || IS_I965G(dev_priv)) {
/* 965gm cannot relocate objects above 4GiB. */
mask &= ~__GFP_HIGHMEM;
mask |= __GFP_DMA32;
@@ -4174,12 +4199,13 @@ static void assert_kernel_context_is_current(struct drm_i915_private *dev_priv)
enum intel_engine_id id;
for_each_engine(engine, dev_priv, id)
- GEM_BUG_ON(engine->last_context != dev_priv->kernel_context);
+ GEM_BUG_ON(engine->last_retired_context &&
+ !i915_gem_context_is_kernel(engine->last_retired_context));
}
-int i915_gem_suspend(struct drm_device *dev)
+int i915_gem_suspend(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_device *dev = &dev_priv->drm;
int ret;
intel_suspend_gt_powersave(dev_priv);
@@ -4213,8 +4239,14 @@ int i915_gem_suspend(struct drm_device *dev)
cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work);
cancel_delayed_work_sync(&dev_priv->gt.retire_work);
- flush_delayed_work(&dev_priv->gt.idle_work);
- flush_work(&dev_priv->mm.free_work);
+
+ /* As the idle_work is rearming if it detects a race, play safe and
+ * repeat the flush until it is definitely idle.
+ */
+ while (flush_delayed_work(&dev_priv->gt.idle_work))
+ ;
+
+ i915_gem_drain_freed_objects(dev_priv);
/* Assert that we sucessfully flushed all the work and
* reset the GPU back to its idle, low power state.
@@ -4253,9 +4285,9 @@ err:
return ret;
}
-void i915_gem_resume(struct drm_device *dev)
+void i915_gem_resume(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_device *dev = &dev_priv->drm;
WARN_ON(dev_priv->gt.awake);
@@ -4320,9 +4352,8 @@ static void init_unused_rings(struct drm_i915_private *dev_priv)
}
int
-i915_gem_init_hw(struct drm_device *dev)
+i915_gem_init_hw(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_engine_cs *engine;
enum intel_engine_id id;
int ret;
@@ -4376,10 +4407,10 @@ i915_gem_init_hw(struct drm_device *dev)
goto out;
}
- intel_mocs_init_l3cc_table(dev);
+ intel_mocs_init_l3cc_table(dev_priv);
/* We can't enable contexts until all firmware is loaded */
- ret = intel_guc_setup(dev);
+ ret = intel_guc_setup(dev_priv);
if (ret)
goto out;
@@ -4409,12 +4440,11 @@ bool intel_sanitize_semaphores(struct drm_i915_private *dev_priv, int value)
return true;
}
-int i915_gem_init(struct drm_device *dev)
+int i915_gem_init(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
int ret;
- mutex_lock(&dev->struct_mutex);
+ mutex_lock(&dev_priv->drm.struct_mutex);
if (!i915.enable_execlists) {
dev_priv->gt.resume = intel_legacy_submission_resume;
@@ -4438,15 +4468,15 @@ int i915_gem_init(struct drm_device *dev)
if (ret)
goto out_unlock;
- ret = i915_gem_context_init(dev);
+ ret = i915_gem_context_init(dev_priv);
if (ret)
goto out_unlock;
- ret = intel_engines_init(dev);
+ ret = intel_engines_init(dev_priv);
if (ret)
goto out_unlock;
- ret = i915_gem_init_hw(dev);
+ ret = i915_gem_init_hw(dev_priv);
if (ret == -EIO) {
/* Allow engine initialisation to fail by marking the GPU as
* wedged. But we only want to do this where the GPU is angry,
@@ -4459,15 +4489,14 @@ int i915_gem_init(struct drm_device *dev)
out_unlock:
intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
- mutex_unlock(&dev->struct_mutex);
+ mutex_unlock(&dev_priv->drm.struct_mutex);
return ret;
}
void
-i915_gem_cleanup_engines(struct drm_device *dev)
+i915_gem_cleanup_engines(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_engine_cs *engine;
enum intel_engine_id id;
@@ -4483,8 +4512,9 @@ i915_gem_load_init_fences(struct drm_i915_private *dev_priv)
if (INTEL_INFO(dev_priv)->gen >= 7 && !IS_VALLEYVIEW(dev_priv) &&
!IS_CHERRYVIEW(dev_priv))
dev_priv->num_fence_regs = 32;
- else if (INTEL_INFO(dev_priv)->gen >= 4 || IS_I945G(dev_priv) ||
- IS_I945GM(dev_priv) || IS_G33(dev_priv))
+ else if (INTEL_INFO(dev_priv)->gen >= 4 ||
+ IS_I945G(dev_priv) || IS_I945GM(dev_priv) ||
+ IS_G33(dev_priv) || IS_PINEVIEW(dev_priv))
dev_priv->num_fence_regs = 16;
else
dev_priv->num_fence_regs = 8;
@@ -4507,9 +4537,8 @@ i915_gem_load_init_fences(struct drm_i915_private *dev_priv)
}
int
-i915_gem_load_init(struct drm_device *dev)
+i915_gem_load_init(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
int err = -ENOMEM;
dev_priv->objects = KMEM_CACHE(drm_i915_gem_object, SLAB_HWCACHE_ALIGN);
@@ -4578,10 +4607,8 @@ err_out:
return err;
}
-void i915_gem_load_cleanup(struct drm_device *dev)
+void i915_gem_load_cleanup(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
-
WARN_ON(!llist_empty(&dev_priv->mm.free_list));
mutex_lock(&dev_priv->drm.struct_mutex);
@@ -4732,7 +4759,7 @@ void i915_gem_track_fb(struct drm_i915_gem_object *old,
/* Allocate a new GEM object and fill it with the supplied data */
struct drm_i915_gem_object *
-i915_gem_object_create_from_data(struct drm_device *dev,
+i915_gem_object_create_from_data(struct drm_i915_private *dev_priv,
const void *data, size_t size)
{
struct drm_i915_gem_object *obj;
@@ -4740,7 +4767,7 @@ i915_gem_object_create_from_data(struct drm_device *dev,
size_t bytes;
int ret;
- obj = i915_gem_object_create(dev, round_up(size, PAGE_SIZE));
+ obj = i915_gem_object_create(dev_priv, round_up(size, PAGE_SIZE));
if (IS_ERR(obj))
return obj;
diff --git a/drivers/gpu/drm/i915/i915_gem.h b/drivers/gpu/drm/i915/i915_gem.h
index 51ec793f2e20..a585d47c420a 100644
--- a/drivers/gpu/drm/i915/i915_gem.h
+++ b/drivers/gpu/drm/i915/i915_gem.h
@@ -27,8 +27,10 @@
#ifdef CONFIG_DRM_I915_DEBUG_GEM
#define GEM_BUG_ON(expr) BUG_ON(expr)
+#define GEM_WARN_ON(expr) WARN_ON(expr)
#else
-#define GEM_BUG_ON(expr) do { } while (0)
+#define GEM_BUG_ON(expr) BUILD_BUG_ON_INVALID(expr)
+#define GEM_WARN_ON(expr) (BUILD_BUG_ON_INVALID(expr), 0)
#endif
#define I915_NUM_ENGINES 5
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index 1f94b8d6d83d..17f90c618208 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -97,7 +97,7 @@
* part. It should be safe to decrease this, but it's more future proof as is.
*/
#define GEN6_CONTEXT_ALIGN (64<<10)
-#define GEN7_CONTEXT_ALIGN 4096
+#define GEN7_CONTEXT_ALIGN I915_GTT_MIN_ALIGNMENT
static size_t get_context_alignment(struct drm_i915_private *dev_priv)
{
@@ -141,7 +141,7 @@ void i915_gem_context_free(struct kref *ctx_ref)
lockdep_assert_held(&ctx->i915->drm.struct_mutex);
trace_i915_context_free(ctx);
- GEM_BUG_ON(!ctx->closed);
+ GEM_BUG_ON(!i915_gem_context_is_closed(ctx));
i915_ppgtt_put(ctx->ppgtt);
@@ -166,15 +166,15 @@ void i915_gem_context_free(struct kref *ctx_ref)
kfree(ctx);
}
-struct drm_i915_gem_object *
-i915_gem_alloc_context_obj(struct drm_device *dev, size_t size)
+static struct drm_i915_gem_object *
+alloc_context_obj(struct drm_i915_private *dev_priv, u64 size)
{
struct drm_i915_gem_object *obj;
int ret;
- lockdep_assert_held(&dev->struct_mutex);
+ lockdep_assert_held(&dev_priv->drm.struct_mutex);
- obj = i915_gem_object_create(dev, size);
+ obj = i915_gem_object_create(dev_priv, size);
if (IS_ERR(obj))
return obj;
@@ -193,7 +193,7 @@ i915_gem_alloc_context_obj(struct drm_device *dev, size_t size)
* This is only applicable for Ivy Bridge devices since
* later platforms don't have L3 control bits in the PTE.
*/
- if (IS_IVYBRIDGE(to_i915(dev))) {
+ if (IS_IVYBRIDGE(dev_priv)) {
ret = i915_gem_object_set_cache_level(obj, I915_CACHE_L3_LLC);
/* Failure shouldn't ever happen this early */
if (WARN_ON(ret)) {
@@ -205,31 +205,9 @@ i915_gem_alloc_context_obj(struct drm_device *dev, size_t size)
return obj;
}
-static void i915_ppgtt_close(struct i915_address_space *vm)
-{
- struct list_head *phases[] = {
- &vm->active_list,
- &vm->inactive_list,
- &vm->unbound_list,
- NULL,
- }, **phase;
-
- GEM_BUG_ON(vm->closed);
- vm->closed = true;
-
- for (phase = phases; *phase; phase++) {
- struct i915_vma *vma, *vn;
-
- list_for_each_entry_safe(vma, vn, *phase, vm_link)
- if (!i915_vma_is_closed(vma))
- i915_vma_close(vma);
- }
-}
-
static void context_close(struct i915_gem_context *ctx)
{
- GEM_BUG_ON(ctx->closed);
- ctx->closed = true;
+ i915_gem_context_set_closed(ctx);
if (ctx->ppgtt)
i915_ppgtt_close(&ctx->ppgtt->base);
ctx->file_priv = ERR_PTR(-EBADF);
@@ -259,10 +237,9 @@ static int assign_hw_id(struct drm_i915_private *dev_priv, unsigned *out)
}
static struct i915_gem_context *
-__create_hw_context(struct drm_device *dev,
+__create_hw_context(struct drm_i915_private *dev_priv,
struct drm_i915_file_private *file_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
struct i915_gem_context *ctx;
int ret;
@@ -286,14 +263,13 @@ __create_hw_context(struct drm_device *dev,
struct drm_i915_gem_object *obj;
struct i915_vma *vma;
- obj = i915_gem_alloc_context_obj(dev,
- dev_priv->hw_context_size);
+ obj = alloc_context_obj(dev_priv, dev_priv->hw_context_size);
if (IS_ERR(obj)) {
ret = PTR_ERR(obj);
goto err_out;
}
- vma = i915_vma_create(obj, &dev_priv->ggtt.base, NULL);
+ vma = i915_vma_instance(obj, &dev_priv->ggtt.base, NULL);
if (IS_ERR(vma)) {
i915_gem_object_put(obj);
ret = PTR_ERR(vma);
@@ -331,12 +307,21 @@ __create_hw_context(struct drm_device *dev,
* is no remap info, it will be a NOP. */
ctx->remap_slice = ALL_L3_SLICES(dev_priv);
- ctx->hang_stats.ban_period_seconds = DRM_I915_CTX_BAN_PERIOD;
+ i915_gem_context_set_bannable(ctx);
ctx->ring_size = 4 * PAGE_SIZE;
ctx->desc_template = GEN8_CTX_ADDRESSING_MODE(dev_priv) <<
GEN8_CTX_ADDRESSING_MODE_SHIFT;
ATOMIC_INIT_NOTIFIER_HEAD(&ctx->status_notifier);
+ /* GuC requires the ring to be placed above GUC_WOPCM_TOP. If GuC is not
+ * present or not in use we still need a small bias as ring wraparound
+ * at offset 0 sometimes hangs. No idea why.
+ */
+ if (HAS_GUC(dev_priv) && i915.enable_guc_loading)
+ ctx->ggtt_offset_bias = GUC_WOPCM_TOP;
+ else
+ ctx->ggtt_offset_bias = I915_GTT_PAGE_SIZE;
+
return ctx;
err_pid:
@@ -353,21 +338,21 @@ err_out:
* well as an idle case.
*/
static struct i915_gem_context *
-i915_gem_create_context(struct drm_device *dev,
+i915_gem_create_context(struct drm_i915_private *dev_priv,
struct drm_i915_file_private *file_priv)
{
struct i915_gem_context *ctx;
- lockdep_assert_held(&dev->struct_mutex);
+ lockdep_assert_held(&dev_priv->drm.struct_mutex);
- ctx = __create_hw_context(dev, file_priv);
+ ctx = __create_hw_context(dev_priv, file_priv);
if (IS_ERR(ctx))
return ctx;
- if (USES_FULL_PPGTT(dev)) {
+ if (USES_FULL_PPGTT(dev_priv)) {
struct i915_hw_ppgtt *ppgtt;
- ppgtt = i915_ppgtt_create(to_i915(dev), file_priv, ctx->name);
+ ppgtt = i915_ppgtt_create(dev_priv, file_priv, ctx->name);
if (IS_ERR(ppgtt)) {
DRM_DEBUG_DRIVER("PPGTT setup failed (%ld)\n",
PTR_ERR(ppgtt));
@@ -407,35 +392,24 @@ i915_gem_context_create_gvt(struct drm_device *dev)
if (ret)
return ERR_PTR(ret);
- ctx = i915_gem_create_context(dev, NULL);
+ ctx = __create_hw_context(to_i915(dev), NULL);
if (IS_ERR(ctx))
goto out;
- ctx->execlists_force_single_submission = true;
+ ctx->file_priv = ERR_PTR(-EBADF);
+ i915_gem_context_set_closed(ctx); /* not user accessible */
+ i915_gem_context_clear_bannable(ctx);
+ i915_gem_context_set_force_single_submission(ctx);
ctx->ring_size = 512 * PAGE_SIZE; /* Max ring buffer size */
+
+ GEM_BUG_ON(i915_gem_context_is_kernel(ctx));
out:
mutex_unlock(&dev->struct_mutex);
return ctx;
}
-static void i915_gem_context_unpin(struct i915_gem_context *ctx,
- struct intel_engine_cs *engine)
-{
- if (i915.enable_execlists) {
- intel_lr_context_unpin(ctx, engine);
- } else {
- struct intel_context *ce = &ctx->engine[engine->id];
-
- if (ce->state)
- i915_vma_unpin(ce->state);
-
- i915_gem_context_put(ctx);
- }
-}
-
-int i915_gem_context_init(struct drm_device *dev)
+int i915_gem_context_init(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
struct i915_gem_context *ctx;
/* Init should only be called once per module load. Eventually the
@@ -461,7 +435,8 @@ int i915_gem_context_init(struct drm_device *dev)
dev_priv->hw_context_size = 0;
} else if (HAS_HW_CONTEXTS(dev_priv)) {
dev_priv->hw_context_size =
- round_up(get_context_size(dev_priv), 4096);
+ round_up(get_context_size(dev_priv),
+ I915_GTT_PAGE_SIZE);
if (dev_priv->hw_context_size > (1<<20)) {
DRM_DEBUG_DRIVER("Disabling HW Contexts; invalid size %d\n",
dev_priv->hw_context_size);
@@ -469,16 +444,19 @@ int i915_gem_context_init(struct drm_device *dev)
}
}
- ctx = i915_gem_create_context(dev, NULL);
+ ctx = i915_gem_create_context(dev_priv, NULL);
if (IS_ERR(ctx)) {
DRM_ERROR("Failed to create default global context (error %ld)\n",
PTR_ERR(ctx));
return PTR_ERR(ctx);
}
+ i915_gem_context_clear_bannable(ctx);
ctx->priority = I915_PRIORITY_MIN; /* lowest priority; idle task */
dev_priv->kernel_context = ctx;
+ GEM_BUG_ON(!i915_gem_context_is_kernel(ctx));
+
DRM_DEBUG_DRIVER("%s context support initialized\n",
i915.enable_execlists ? "LR" :
dev_priv->hw_context_size ? "HW" : "fake");
@@ -493,10 +471,13 @@ void i915_gem_context_lost(struct drm_i915_private *dev_priv)
lockdep_assert_held(&dev_priv->drm.struct_mutex);
for_each_engine(engine, dev_priv, id) {
- if (engine->last_context) {
- i915_gem_context_unpin(engine->last_context, engine);
- engine->last_context = NULL;
- }
+ engine->legacy_active_context = NULL;
+
+ if (!engine->last_retired_context)
+ continue;
+
+ engine->context_unpin(engine, engine->last_retired_context);
+ engine->last_retired_context = NULL;
}
/* Force the GPU state to be restored on enabling */
@@ -522,12 +503,13 @@ void i915_gem_context_lost(struct drm_i915_private *dev_priv)
}
}
-void i915_gem_context_fini(struct drm_device *dev)
+void i915_gem_context_fini(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
struct i915_gem_context *dctx = dev_priv->kernel_context;
- lockdep_assert_held(&dev->struct_mutex);
+ lockdep_assert_held(&dev_priv->drm.struct_mutex);
+
+ GEM_BUG_ON(!i915_gem_context_is_kernel(dctx));
context_close(dctx);
dev_priv->kernel_context = NULL;
@@ -551,9 +533,11 @@ int i915_gem_context_open(struct drm_device *dev, struct drm_file *file)
idr_init(&file_priv->context_idr);
mutex_lock(&dev->struct_mutex);
- ctx = i915_gem_create_context(dev, file_priv);
+ ctx = i915_gem_create_context(to_i915(dev), file_priv);
mutex_unlock(&dev->struct_mutex);
+ GEM_BUG_ON(i915_gem_context_is_kernel(ctx));
+
if (IS_ERR(ctx)) {
idr_destroy(&file_priv->context_idr);
return PTR_ERR(ctx);
@@ -719,7 +703,7 @@ static inline bool skip_rcs_switch(struct i915_hw_ppgtt *ppgtt,
if (ppgtt && (intel_engine_flag(engine) & ppgtt->pd_dirty_rings))
return false;
- return to == engine->last_context;
+ return to == engine->legacy_active_context;
}
static bool
@@ -731,11 +715,11 @@ needs_pd_load_pre(struct i915_hw_ppgtt *ppgtt,
return false;
/* Always load the ppgtt on first use */
- if (!engine->last_context)
+ if (!engine->legacy_active_context)
return true;
/* Same context without new entries, skip */
- if (engine->last_context == to &&
+ if (engine->legacy_active_context == to &&
!(intel_engine_flag(engine) & ppgtt->pd_dirty_rings))
return false;
@@ -765,57 +749,20 @@ needs_pd_load_post(struct i915_hw_ppgtt *ppgtt,
return false;
}
-struct i915_vma *
-i915_gem_context_pin_legacy(struct i915_gem_context *ctx,
- unsigned int flags)
-{
- struct i915_vma *vma = ctx->engine[RCS].state;
- int ret;
-
- /* Clear this page out of any CPU caches for coherent swap-in/out.
- * We only want to do this on the first bind so that we do not stall
- * on an active context (which by nature is already on the GPU).
- */
- if (!(vma->flags & I915_VMA_GLOBAL_BIND)) {
- ret = i915_gem_object_set_to_gtt_domain(vma->obj, false);
- if (ret)
- return ERR_PTR(ret);
- }
-
- ret = i915_vma_pin(vma, 0, ctx->ggtt_alignment, PIN_GLOBAL | flags);
- if (ret)
- return ERR_PTR(ret);
-
- return vma;
-}
-
static int do_rcs_switch(struct drm_i915_gem_request *req)
{
struct i915_gem_context *to = req->ctx;
struct intel_engine_cs *engine = req->engine;
struct i915_hw_ppgtt *ppgtt = to->ppgtt ?: req->i915->mm.aliasing_ppgtt;
- struct i915_vma *vma;
- struct i915_gem_context *from;
+ struct i915_gem_context *from = engine->legacy_active_context;
u32 hw_flags;
int ret, i;
+ GEM_BUG_ON(engine->id != RCS);
+
if (skip_rcs_switch(ppgtt, engine, to))
return 0;
- /* Trying to pin first makes error handling easier. */
- vma = i915_gem_context_pin_legacy(to, 0);
- if (IS_ERR(vma))
- return PTR_ERR(vma);
-
- /*
- * Pin can switch back to the default context if we end up calling into
- * evict_everything - as a last ditch gtt defrag effort that also
- * switches to the default context. Hence we need to reload from here.
- *
- * XXX: Doing so is painfully broken!
- */
- from = engine->last_context;
-
if (needs_pd_load_pre(ppgtt, engine, to)) {
/* Older GENs and non render rings still want the load first,
* "PP_DCLV followed by PP_DIR_BASE register through Load
@@ -824,7 +771,7 @@ static int do_rcs_switch(struct drm_i915_gem_request *req)
trace_switch_mm(engine, to);
ret = ppgtt->switch_mm(ppgtt, req);
if (ret)
- goto err;
+ return ret;
}
if (!to->engine[RCS].initialised || i915_gem_context_is_default(to))
@@ -841,29 +788,10 @@ static int do_rcs_switch(struct drm_i915_gem_request *req)
if (to != from || (hw_flags & MI_FORCE_RESTORE)) {
ret = mi_set_context(req, hw_flags);
if (ret)
- goto err;
- }
+ return ret;
- /* The backing object for the context is done after switching to the
- * *next* context. Therefore we cannot retire the previous context until
- * the next context has already started running. In fact, the below code
- * is a bit suboptimal because the retiring can occur simply after the
- * MI_SET_CONTEXT instead of when the next seqno has completed.
- */
- if (from != NULL) {
- /* As long as MI_SET_CONTEXT is serializing, ie. it flushes the
- * whole damn pipeline, we don't need to explicitly mark the
- * object dirty. The only exception is that the context must be
- * correct in case the object gets swapped out. Ideally we'd be
- * able to defer doing this until we know the object would be
- * swapped, but there is no way to do that yet.
- */
- i915_vma_move_to_active(from->engine[RCS].state, req, 0);
- /* state is kept alive until the next request */
- i915_vma_unpin(from->engine[RCS].state);
- i915_gem_context_put(from);
+ engine->legacy_active_context = to;
}
- engine->last_context = i915_gem_context_get(to);
/* GEN8 does *not* require an explicit reload if the PDPs have been
* setup, and we do not wish to move them.
@@ -904,10 +832,6 @@ static int do_rcs_switch(struct drm_i915_gem_request *req)
}
return 0;
-
-err:
- i915_vma_unpin(vma);
- return ret;
}
/**
@@ -947,18 +871,32 @@ int i915_switch_context(struct drm_i915_gem_request *req)
ppgtt->pd_dirty_rings &= ~intel_engine_flag(engine);
}
- if (to != engine->last_context) {
- if (engine->last_context)
- i915_gem_context_put(engine->last_context);
- engine->last_context = i915_gem_context_get(to);
- }
-
return 0;
}
return do_rcs_switch(req);
}
+static bool engine_has_kernel_context(struct intel_engine_cs *engine)
+{
+ struct i915_gem_timeline *timeline;
+
+ list_for_each_entry(timeline, &engine->i915->gt.timelines, link) {
+ struct intel_timeline *tl;
+
+ if (timeline == &engine->i915->gt.global_timeline)
+ continue;
+
+ tl = &timeline->engine[engine->id];
+ if (i915_gem_active_peek(&tl->last_request,
+ &engine->i915->drm.struct_mutex))
+ return false;
+ }
+
+ return (!engine->last_retired_context ||
+ i915_gem_context_is_kernel(engine->last_retired_context));
+}
+
int i915_gem_switch_to_kernel_context(struct drm_i915_private *dev_priv)
{
struct intel_engine_cs *engine;
@@ -967,10 +905,15 @@ int i915_gem_switch_to_kernel_context(struct drm_i915_private *dev_priv)
lockdep_assert_held(&dev_priv->drm.struct_mutex);
+ i915_gem_retire_requests(dev_priv);
+
for_each_engine(engine, dev_priv, id) {
struct drm_i915_gem_request *req;
int ret;
+ if (engine_has_kernel_context(engine))
+ continue;
+
req = i915_gem_request_alloc(engine, dev_priv->kernel_context);
if (IS_ERR(req))
return PTR_ERR(req);
@@ -1003,6 +946,11 @@ static bool contexts_enabled(struct drm_device *dev)
return i915.enable_execlists || to_i915(dev)->hw_context_size;
}
+static bool client_is_banned(struct drm_i915_file_private *file_priv)
+{
+ return file_priv->context_bans > I915_MAX_CLIENT_CONTEXT_BANS;
+}
+
int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
struct drm_file *file)
{
@@ -1017,17 +965,27 @@ int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
if (args->pad != 0)
return -EINVAL;
+ if (client_is_banned(file_priv)) {
+ DRM_DEBUG("client %s[%d] banned from creating ctx\n",
+ current->comm,
+ pid_nr(get_task_pid(current, PIDTYPE_PID)));
+
+ return -EIO;
+ }
+
ret = i915_mutex_lock_interruptible(dev);
if (ret)
return ret;
- ctx = i915_gem_create_context(dev, file_priv);
+ ctx = i915_gem_create_context(to_i915(dev), file_priv);
mutex_unlock(&dev->struct_mutex);
if (IS_ERR(ctx))
return PTR_ERR(ctx);
+ GEM_BUG_ON(i915_gem_context_is_kernel(ctx));
+
args->ctx_id = ctx->user_handle;
- DRM_DEBUG_DRIVER("HW context %d created\n", args->ctx_id);
+ DRM_DEBUG("HW context %d created\n", args->ctx_id);
return 0;
}
@@ -1060,7 +1018,7 @@ int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data,
context_close(ctx);
mutex_unlock(&dev->struct_mutex);
- DRM_DEBUG_DRIVER("HW context %d destroyed\n", args->ctx_id);
+ DRM_DEBUG("HW context %d destroyed\n", args->ctx_id);
return 0;
}
@@ -1085,7 +1043,7 @@ int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data,
args->size = 0;
switch (args->param) {
case I915_CONTEXT_PARAM_BAN_PERIOD:
- args->value = ctx->hang_stats.ban_period_seconds;
+ ret = -EINVAL;
break;
case I915_CONTEXT_PARAM_NO_ZEROMAP:
args->value = ctx->flags & CONTEXT_NO_ZEROMAP;
@@ -1099,7 +1057,10 @@ int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data,
args->value = to_i915(dev)->ggtt.base.total;
break;
case I915_CONTEXT_PARAM_NO_ERROR_CAPTURE:
- args->value = !!(ctx->flags & CONTEXT_NO_ERROR_CAPTURE);
+ args->value = i915_gem_context_no_error_capture(ctx);
+ break;
+ case I915_CONTEXT_PARAM_BANNABLE:
+ args->value = i915_gem_context_is_bannable(ctx);
break;
default:
ret = -EINVAL;
@@ -1130,13 +1091,7 @@ int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data,
switch (args->param) {
case I915_CONTEXT_PARAM_BAN_PERIOD:
- if (args->size)
- ret = -EINVAL;
- else if (args->value < ctx->hang_stats.ban_period_seconds &&
- !capable(CAP_SYS_ADMIN))
- ret = -EPERM;
- else
- ctx->hang_stats.ban_period_seconds = args->value;
+ ret = -EINVAL;
break;
case I915_CONTEXT_PARAM_NO_ZEROMAP:
if (args->size) {
@@ -1147,14 +1102,22 @@ int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data,
}
break;
case I915_CONTEXT_PARAM_NO_ERROR_CAPTURE:
- if (args->size) {
+ if (args->size)
ret = -EINVAL;
- } else {
- if (args->value)
- ctx->flags |= CONTEXT_NO_ERROR_CAPTURE;
- else
- ctx->flags &= ~CONTEXT_NO_ERROR_CAPTURE;
- }
+ else if (args->value)
+ i915_gem_context_set_no_error_capture(ctx);
+ else
+ i915_gem_context_clear_no_error_capture(ctx);
+ break;
+ case I915_CONTEXT_PARAM_BANNABLE:
+ if (args->size)
+ ret = -EINVAL;
+ else if (!capable(CAP_SYS_ADMIN) && !args->value)
+ ret = -EPERM;
+ else if (args->value)
+ i915_gem_context_set_bannable(ctx);
+ else
+ i915_gem_context_clear_bannable(ctx);
break;
default:
ret = -EINVAL;
@@ -1170,7 +1133,6 @@ int i915_gem_context_reset_stats_ioctl(struct drm_device *dev,
{
struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_i915_reset_stats *args = data;
- struct i915_ctx_hang_stats *hs;
struct i915_gem_context *ctx;
int ret;
@@ -1189,15 +1151,14 @@ int i915_gem_context_reset_stats_ioctl(struct drm_device *dev,
mutex_unlock(&dev->struct_mutex);
return PTR_ERR(ctx);
}
- hs = &ctx->hang_stats;
if (capable(CAP_SYS_ADMIN))
args->reset_count = i915_reset_count(&dev_priv->gpu_error);
else
args->reset_count = 0;
- args->batch_active = hs->batch_active;
- args->batch_pending = hs->batch_pending;
+ args->batch_active = ctx->guilty_count;
+ args->batch_pending = ctx->active_count;
mutex_unlock(&dev->struct_mutex);
diff --git a/drivers/gpu/drm/i915/i915_gem_context.h b/drivers/gpu/drm/i915/i915_gem_context.h
new file mode 100644
index 000000000000..0ac750b90f3d
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_gem_context.h
@@ -0,0 +1,277 @@
+/*
+ * Copyright © 2016 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __I915_GEM_CONTEXT_H__
+#define __I915_GEM_CONTEXT_H__
+
+#include <linux/bitops.h>
+#include <linux/list.h>
+
+struct pid;
+
+struct drm_device;
+struct drm_file;
+
+struct drm_i915_private;
+struct drm_i915_file_private;
+struct i915_hw_ppgtt;
+struct i915_vma;
+struct intel_ring;
+
+#define DEFAULT_CONTEXT_HANDLE 0
+
+/**
+ * struct i915_gem_context - client state
+ *
+ * The struct i915_gem_context represents the combined view of the driver and
+ * logical hardware state for a particular client.
+ */
+struct i915_gem_context {
+ /** i915: i915 device backpointer */
+ struct drm_i915_private *i915;
+
+ /** file_priv: owning file descriptor */
+ struct drm_i915_file_private *file_priv;
+
+ /**
+ * @ppgtt: unique address space (GTT)
+ *
+ * In full-ppgtt mode, each context has its own address space ensuring
+ * complete seperation of one client from all others.
+ *
+ * In other modes, this is a NULL pointer with the expectation that
+ * the caller uses the shared global GTT.
+ */
+ struct i915_hw_ppgtt *ppgtt;
+
+ /**
+ * @pid: process id of creator
+ *
+ * Note that who created the context may not be the principle user,
+ * as the context may be shared across a local socket. However,
+ * that should only affect the default context, all contexts created
+ * explicitly by the client are expected to be isolated.
+ */
+ struct pid *pid;
+
+ /**
+ * @name: arbitrary name
+ *
+ * A name is constructed for the context from the creator's process
+ * name, pid and user handle in order to uniquely identify the
+ * context in messages.
+ */
+ const char *name;
+
+ /** link: place with &drm_i915_private.context_list */
+ struct list_head link;
+
+ /**
+ * @ref: reference count
+ *
+ * A reference to a context is held by both the client who created it
+ * and on each request submitted to the hardware using the request
+ * (to ensure the hardware has access to the state until it has
+ * finished all pending writes). See i915_gem_context_get() and
+ * i915_gem_context_put() for access.
+ */
+ struct kref ref;
+
+ /**
+ * @flags: small set of booleans
+ */
+ unsigned long flags;
+#define CONTEXT_NO_ZEROMAP BIT(0)
+#define CONTEXT_NO_ERROR_CAPTURE 1
+#define CONTEXT_CLOSED 2
+#define CONTEXT_BANNABLE 3
+#define CONTEXT_BANNED 4
+#define CONTEXT_FORCE_SINGLE_SUBMISSION 5
+
+ /**
+ * @hw_id: - unique identifier for the context
+ *
+ * The hardware needs to uniquely identify the context for a few
+ * functions like fault reporting, PASID, scheduling. The
+ * &drm_i915_private.context_hw_ida is used to assign a unqiue
+ * id for the lifetime of the context.
+ */
+ unsigned int hw_id;
+
+ /**
+ * @user_handle: userspace identifier
+ *
+ * A unique per-file identifier is generated from
+ * &drm_i915_file_private.contexts.
+ */
+ u32 user_handle;
+
+ /**
+ * @priority: execution and service priority
+ *
+ * All clients are equal, but some are more equal than others!
+ *
+ * Requests from a context with a greater (more positive) value of
+ * @priority will be executed before those with a lower @priority
+ * value, forming a simple QoS.
+ *
+ * The &drm_i915_private.kernel_context is assigned the lowest priority.
+ */
+ int priority;
+
+ /** ggtt_alignment: alignment restriction for context objects */
+ u32 ggtt_alignment;
+ /** ggtt_offset_bias: placement restriction for context objects */
+ u32 ggtt_offset_bias;
+
+ /** engine: per-engine logical HW state */
+ struct intel_context {
+ struct i915_vma *state;
+ struct intel_ring *ring;
+ u32 *lrc_reg_state;
+ u64 lrc_desc;
+ int pin_count;
+ bool initialised;
+ } engine[I915_NUM_ENGINES];
+
+ /** ring_size: size for allocating the per-engine ring buffer */
+ u32 ring_size;
+ /** desc_template: invariant fields for the HW context descriptor */
+ u32 desc_template;
+
+ /** status_notifier: list of callbacks for context-switch changes */
+ struct atomic_notifier_head status_notifier;
+
+ /** guilty_count: How many times this context has caused a GPU hang. */
+ unsigned int guilty_count;
+ /**
+ * @active_count: How many times this context was active during a GPU
+ * hang, but did not cause it.
+ */
+ unsigned int active_count;
+
+#define CONTEXT_SCORE_GUILTY 10
+#define CONTEXT_SCORE_BAN_THRESHOLD 40
+ /** ban_score: Accumulated score of all hangs caused by this context. */
+ int ban_score;
+
+ /** remap_slice: Bitmask of cache lines that need remapping */
+ u8 remap_slice;
+};
+
+static inline bool i915_gem_context_is_closed(const struct i915_gem_context *ctx)
+{
+ return test_bit(CONTEXT_CLOSED, &ctx->flags);
+}
+
+static inline void i915_gem_context_set_closed(struct i915_gem_context *ctx)
+{
+ GEM_BUG_ON(i915_gem_context_is_closed(ctx));
+ __set_bit(CONTEXT_CLOSED, &ctx->flags);
+}
+
+static inline bool i915_gem_context_no_error_capture(const struct i915_gem_context *ctx)
+{
+ return test_bit(CONTEXT_NO_ERROR_CAPTURE, &ctx->flags);
+}
+
+static inline void i915_gem_context_set_no_error_capture(struct i915_gem_context *ctx)
+{
+ __set_bit(CONTEXT_NO_ERROR_CAPTURE, &ctx->flags);
+}
+
+static inline void i915_gem_context_clear_no_error_capture(struct i915_gem_context *ctx)
+{
+ __clear_bit(CONTEXT_NO_ERROR_CAPTURE, &ctx->flags);
+}
+
+static inline bool i915_gem_context_is_bannable(const struct i915_gem_context *ctx)
+{
+ return test_bit(CONTEXT_BANNABLE, &ctx->flags);
+}
+
+static inline void i915_gem_context_set_bannable(struct i915_gem_context *ctx)
+{
+ __set_bit(CONTEXT_BANNABLE, &ctx->flags);
+}
+
+static inline void i915_gem_context_clear_bannable(struct i915_gem_context *ctx)
+{
+ __clear_bit(CONTEXT_BANNABLE, &ctx->flags);
+}
+
+static inline bool i915_gem_context_is_banned(const struct i915_gem_context *ctx)
+{
+ return test_bit(CONTEXT_BANNED, &ctx->flags);
+}
+
+static inline void i915_gem_context_set_banned(struct i915_gem_context *ctx)
+{
+ __set_bit(CONTEXT_BANNED, &ctx->flags);
+}
+
+static inline bool i915_gem_context_force_single_submission(const struct i915_gem_context *ctx)
+{
+ return test_bit(CONTEXT_FORCE_SINGLE_SUBMISSION, &ctx->flags);
+}
+
+static inline void i915_gem_context_set_force_single_submission(struct i915_gem_context *ctx)
+{
+ __set_bit(CONTEXT_FORCE_SINGLE_SUBMISSION, &ctx->flags);
+}
+
+static inline bool i915_gem_context_is_default(const struct i915_gem_context *c)
+{
+ return c->user_handle == DEFAULT_CONTEXT_HANDLE;
+}
+
+static inline bool i915_gem_context_is_kernel(struct i915_gem_context *ctx)
+{
+ return !ctx->file_priv;
+}
+
+/* i915_gem_context.c */
+int __must_check i915_gem_context_init(struct drm_i915_private *dev_priv);
+void i915_gem_context_lost(struct drm_i915_private *dev_priv);
+void i915_gem_context_fini(struct drm_i915_private *dev_priv);
+int i915_gem_context_open(struct drm_device *dev, struct drm_file *file);
+void i915_gem_context_close(struct drm_device *dev, struct drm_file *file);
+int i915_switch_context(struct drm_i915_gem_request *req);
+int i915_gem_switch_to_kernel_context(struct drm_i915_private *dev_priv);
+void i915_gem_context_free(struct kref *ctx_ref);
+struct i915_gem_context *
+i915_gem_context_create_gvt(struct drm_device *dev);
+
+int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file);
+int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file);
+int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+int i915_gem_context_reset_stats_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file);
+
+#endif /* !__I915_GEM_CONTEXT_H__ */
diff --git a/drivers/gpu/drm/i915/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/i915_gem_dmabuf.c
index 5e38299b5df6..d037adcda6f2 100644
--- a/drivers/gpu/drm/i915/i915_gem_dmabuf.c
+++ b/drivers/gpu/drm/i915/i915_gem_dmabuf.c
@@ -278,7 +278,7 @@ struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev,
get_dma_buf(dma_buf);
- obj = i915_gem_object_alloc(dev);
+ obj = i915_gem_object_alloc(to_i915(dev));
if (obj == NULL) {
ret = -ENOMEM;
goto fail_detach;
diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c
index d534a316a16e..c181b1bb3d2c 100644
--- a/drivers/gpu/drm/i915/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/i915_gem_evict.c
@@ -51,7 +51,10 @@ static bool ggtt_is_idle(struct drm_i915_private *dev_priv)
}
static bool
-mark_free(struct i915_vma *vma, unsigned int flags, struct list_head *unwind)
+mark_free(struct drm_mm_scan *scan,
+ struct i915_vma *vma,
+ unsigned int flags,
+ struct list_head *unwind)
{
if (i915_vma_is_pinned(vma))
return false;
@@ -63,7 +66,7 @@ mark_free(struct i915_vma *vma, unsigned int flags, struct list_head *unwind)
return false;
list_add(&vma->exec_list, unwind);
- return drm_mm_scan_add_block(&vma->node);
+ return drm_mm_scan_add_block(scan, &vma->node);
}
/**
@@ -96,7 +99,8 @@ i915_gem_evict_something(struct i915_address_space *vm,
u64 start, u64 end,
unsigned flags)
{
- struct drm_i915_private *dev_priv = to_i915(vm->dev);
+ struct drm_i915_private *dev_priv = vm->i915;
+ struct drm_mm_scan scan;
struct list_head eviction_list;
struct list_head *phases[] = {
&vm->inactive_list,
@@ -104,9 +108,11 @@ i915_gem_evict_something(struct i915_address_space *vm,
NULL,
}, **phase;
struct i915_vma *vma, *next;
+ struct drm_mm_node *node;
+ enum drm_mm_insert_mode mode;
int ret;
- lockdep_assert_held(&vm->dev->struct_mutex);
+ lockdep_assert_held(&vm->i915->drm.struct_mutex);
trace_i915_gem_evict(vm, min_size, alignment, flags);
/*
@@ -122,14 +128,23 @@ i915_gem_evict_something(struct i915_address_space *vm,
* On each list, the oldest objects lie at the HEAD with the freshest
* object on the TAIL.
*/
- if (start != 0 || end != vm->total) {
- drm_mm_init_scan_with_range(&vm->mm, min_size,
- alignment, cache_level,
- start, end);
- } else
- drm_mm_init_scan(&vm->mm, min_size, alignment, cache_level);
-
- if (flags & PIN_NONBLOCK)
+ mode = DRM_MM_INSERT_BEST;
+ if (flags & PIN_HIGH)
+ mode = DRM_MM_INSERT_HIGH;
+ if (flags & PIN_MAPPABLE)
+ mode = DRM_MM_INSERT_LOW;
+ drm_mm_scan_init_with_range(&scan, &vm->mm,
+ min_size, alignment, cache_level,
+ start, end, mode);
+
+ /* Retire before we search the active list. Although we have
+ * reasonable accuracy in our retirement lists, we may have
+ * a stray pin (preventing eviction) that can only be resolved by
+ * retiring.
+ */
+ if (!(flags & PIN_NONBLOCK))
+ i915_gem_retire_requests(dev_priv);
+ else
phases[1] = NULL;
search_again:
@@ -137,13 +152,13 @@ search_again:
phase = phases;
do {
list_for_each_entry(vma, *phase, vm_link)
- if (mark_free(vma, flags, &eviction_list))
+ if (mark_free(&scan, vma, flags, &eviction_list))
goto found;
} while (*++phase);
/* Nothing found, clean up and bail out! */
list_for_each_entry_safe(vma, next, &eviction_list, exec_list) {
- ret = drm_mm_scan_remove_block(&vma->node);
+ ret = drm_mm_scan_remove_block(&scan, &vma->node);
BUG_ON(ret);
INIT_LIST_HEAD(&vma->exec_list);
@@ -162,7 +177,7 @@ search_again:
* back to userspace to give our workqueues time to
* acquire our locks and unpin the old scanouts.
*/
- return intel_has_pending_fb_unpin(vm->dev) ? -EAGAIN : -ENOSPC;
+ return intel_has_pending_fb_unpin(dev_priv) ? -EAGAIN : -ENOSPC;
}
/* Not everything in the GGTT is tracked via vma (otherwise we
@@ -192,7 +207,7 @@ found:
* of any of our objects, thus corrupting the list).
*/
list_for_each_entry_safe(vma, next, &eviction_list, exec_list) {
- if (drm_mm_scan_remove_block(&vma->node))
+ if (drm_mm_scan_remove_block(&scan, &vma->node))
__i915_vma_pin(vma);
else
list_del_init(&vma->exec_list);
@@ -210,48 +225,119 @@ found:
if (ret == 0)
ret = i915_vma_unbind(vma);
}
+
+ while (ret == 0 && (node = drm_mm_scan_color_evict(&scan))) {
+ vma = container_of(node, struct i915_vma, node);
+ ret = i915_vma_unbind(vma);
+ }
+
return ret;
}
-int
-i915_gem_evict_for_vma(struct i915_vma *target)
+/**
+ * i915_gem_evict_for_vma - Evict vmas to make room for binding a new one
+ * @vm: address space to evict from
+ * @target: range (and color) to evict for
+ * @flags: additional flags to control the eviction algorithm
+ *
+ * This function will try to evict vmas that overlap the target node.
+ *
+ * To clarify: This is for freeing up virtual address space, not for freeing
+ * memory in e.g. the shrinker.
+ */
+int i915_gem_evict_for_node(struct i915_address_space *vm,
+ struct drm_mm_node *target,
+ unsigned int flags)
{
- struct drm_mm_node *node, *next;
+ LIST_HEAD(eviction_list);
+ struct drm_mm_node *node;
+ u64 start = target->start;
+ u64 end = start + target->size;
+ struct i915_vma *vma, *next;
+ bool check_color;
+ int ret = 0;
- lockdep_assert_held(&target->vm->dev->struct_mutex);
+ lockdep_assert_held(&vm->i915->drm.struct_mutex);
+ trace_i915_gem_evict_node(vm, target, flags);
- list_for_each_entry_safe(node, next,
- &target->vm->mm.head_node.node_list,
- node_list) {
- struct i915_vma *vma;
- int ret;
+ /* Retire before we search the active list. Although we have
+ * reasonable accuracy in our retirement lists, we may have
+ * a stray pin (preventing eviction) that can only be resolved by
+ * retiring.
+ */
+ if (!(flags & PIN_NONBLOCK))
+ i915_gem_retire_requests(vm->i915);
+
+ check_color = vm->mm.color_adjust;
+ if (check_color) {
+ /* Expand search to cover neighbouring guard pages (or lack!) */
+ if (start > vm->start)
+ start -= I915_GTT_PAGE_SIZE;
+ if (end < vm->start + vm->total)
+ end += I915_GTT_PAGE_SIZE;
+ }
- if (node->start + node->size <= target->node.start)
- continue;
- if (node->start >= target->node.start + target->node.size)
+ drm_mm_for_each_node_in_range(node, &vm->mm, start, end) {
+ /* If we find any non-objects (!vma), we cannot evict them */
+ if (node->color == I915_COLOR_UNEVICTABLE) {
+ ret = -ENOSPC;
break;
+ }
vma = container_of(node, typeof(*vma), node);
- if (i915_vma_is_pinned(vma)) {
- if (!vma->exec_entry || i915_vma_pin_count(vma) > 1)
- /* Object is pinned for some other use */
- return -EBUSY;
+ /* If we are using coloring to insert guard pages between
+ * different cache domains within the address space, we have
+ * to check whether the objects on either side of our range
+ * abutt and conflict. If they are in conflict, then we evict
+ * those as well to make room for our guard pages.
+ */
+ if (check_color) {
+ if (vma->node.start + vma->node.size == node->start) {
+ if (vma->node.color == node->color)
+ continue;
+ }
+ if (vma->node.start == node->start + node->size) {
+ if (vma->node.color == node->color)
+ continue;
+ }
+ }
- /* We need to evict a buffer in the same batch */
- if (vma->exec_entry->flags & EXEC_OBJECT_PINNED)
- /* Overlapping fixed objects in the same batch */
- return -EINVAL;
+ if (flags & PIN_NONBLOCK &&
+ (i915_vma_is_pinned(vma) || i915_vma_is_active(vma))) {
+ ret = -ENOSPC;
+ break;
+ }
- return -ENOSPC;
+ /* Overlap of objects in the same batch? */
+ if (i915_vma_is_pinned(vma) || !list_empty(&vma->exec_list)) {
+ ret = -ENOSPC;
+ if (vma->exec_entry &&
+ vma->exec_entry->flags & EXEC_OBJECT_PINNED)
+ ret = -EINVAL;
+ break;
}
- ret = i915_vma_unbind(vma);
- if (ret)
- return ret;
+ /* Never show fear in the face of dragons!
+ *
+ * We cannot directly remove this node from within this
+ * iterator and as with i915_gem_evict_something() we employ
+ * the vma pin_count in order to prevent the action of
+ * unbinding one vma from freeing (by dropping its active
+ * reference) another in our eviction list.
+ */
+ __i915_vma_pin(vma);
+ list_add(&vma->exec_list, &eviction_list);
}
- return 0;
+ list_for_each_entry_safe(vma, next, &eviction_list, exec_list) {
+ list_del_init(&vma->exec_list);
+ __i915_vma_unpin(vma);
+ if (ret == 0)
+ ret = i915_vma_unbind(vma);
+ }
+
+ return ret;
}
/**
@@ -273,11 +359,11 @@ int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle)
struct i915_vma *vma, *next;
int ret;
- lockdep_assert_held(&vm->dev->struct_mutex);
+ lockdep_assert_held(&vm->i915->drm.struct_mutex);
trace_i915_gem_evict_vm(vm);
if (do_idle) {
- struct drm_i915_private *dev_priv = to_i915(vm->dev);
+ struct drm_i915_private *dev_priv = vm->i915;
if (i915_is_ggtt(vm)) {
ret = i915_gem_switch_to_kernel_context(dev_priv);
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index b8b877c91b0a..d02cfaefe1c8 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -184,7 +184,7 @@ eb_lookup_vmas(struct eb_vmas *eb,
* from the (obj, vm) we don't run the risk of creating
* duplicated vmas for the same vm.
*/
- vma = i915_gem_obj_lookup_or_create_vma(obj, vm, NULL);
+ vma = i915_vma_instance(obj, vm, NULL);
if (unlikely(IS_ERR(vma))) {
DRM_DEBUG("Failed to lookup VMA\n");
ret = PTR_ERR(vma);
@@ -274,6 +274,7 @@ static void eb_destroy(struct eb_vmas *eb)
exec_list);
list_del_init(&vma->exec_list);
i915_gem_execbuffer_unreserve_vma(vma);
+ vma->exec_entry = NULL;
i915_vma_put(vma);
}
kfree(eb);
@@ -435,12 +436,11 @@ static void *reloc_iomap(struct drm_i915_gem_object *obj,
PIN_MAPPABLE | PIN_NONBLOCK);
if (IS_ERR(vma)) {
memset(&cache->node, 0, sizeof(cache->node));
- ret = drm_mm_insert_node_in_range_generic
+ ret = drm_mm_insert_node_in_range
(&ggtt->base.mm, &cache->node,
- 4096, 0, 0,
+ PAGE_SIZE, 0, I915_COLOR_UNEVICTABLE,
0, ggtt->mappable_end,
- DRM_MM_SEARCH_DEFAULT,
- DRM_MM_CREATE_DEFAULT);
+ DRM_MM_INSERT_LOW);
if (ret) /* no inactive aperture space, use cpu reloc */
return NULL;
} else {
@@ -850,8 +850,7 @@ eb_vma_misplaced(struct i915_vma *vma)
WARN_ON(entry->flags & __EXEC_OBJECT_NEEDS_MAP &&
!i915_vma_is_ggtt(vma));
- if (entry->alignment &&
- vma->node.start & (entry->alignment - 1))
+ if (entry->alignment && !IS_ALIGNED(vma->node.start, entry->alignment))
return true;
if (vma->node.size < entry->pad_to_size)
@@ -1232,14 +1231,12 @@ i915_gem_validate_context(struct drm_device *dev, struct drm_file *file,
struct intel_engine_cs *engine, const u32 ctx_id)
{
struct i915_gem_context *ctx;
- struct i915_ctx_hang_stats *hs;
ctx = i915_gem_context_lookup(file->driver_priv, ctx_id);
if (IS_ERR(ctx))
return ctx;
- hs = &ctx->hang_stats;
- if (hs->banned) {
+ if (i915_gem_context_is_banned(ctx)) {
DRM_DEBUG("Context %u tried to submit while banned\n", ctx_id);
return ERR_PTR(-EIO);
}
@@ -1260,6 +1257,7 @@ void i915_vma_move_to_active(struct i915_vma *vma,
struct drm_i915_gem_object *obj = vma->obj;
const unsigned int idx = req->engine->id;
+ lockdep_assert_held(&req->i915->drm.struct_mutex);
GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
/* Add a reference if we're newly entering the active list.
@@ -1715,7 +1713,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
}
params->args_batch_start_offset = args->batch_start_offset;
- if (intel_engine_needs_cmd_parser(engine) && args->batch_len) {
+ if (engine->needs_cmd_parser && args->batch_len) {
struct i915_vma *vma;
vma = i915_gem_execbuffer_parse(engine, &shadow_exec_entry,
diff --git a/drivers/gpu/drm/i915/i915_gem_fence_reg.c b/drivers/gpu/drm/i915/i915_gem_fence_reg.c
index 0efa3571afc3..fadbe8f4c745 100644
--- a/drivers/gpu/drm/i915/i915_gem_fence_reg.c
+++ b/drivers/gpu/drm/i915/i915_gem_fence_reg.c
@@ -77,16 +77,17 @@ static void i965_write_fence_reg(struct drm_i915_fence_reg *fence,
val = 0;
if (vma) {
- unsigned int tiling = i915_gem_object_get_tiling(vma->obj);
- bool is_y_tiled = tiling == I915_TILING_Y;
unsigned int stride = i915_gem_object_get_stride(vma->obj);
- u32 row_size = stride * (is_y_tiled ? 32 : 8);
- u32 size = rounddown((u32)vma->node.size, row_size);
- val = ((vma->node.start + size - 4096) & 0xfffff000) << 32;
- val |= vma->node.start & 0xfffff000;
+ GEM_BUG_ON(!i915_vma_is_map_and_fenceable(vma));
+ GEM_BUG_ON(!IS_ALIGNED(vma->node.start, I965_FENCE_PAGE));
+ GEM_BUG_ON(!IS_ALIGNED(vma->fence_size, I965_FENCE_PAGE));
+ GEM_BUG_ON(!IS_ALIGNED(stride, 128));
+
+ val = (vma->node.start + vma->fence_size - I965_FENCE_PAGE) << 32;
+ val |= vma->node.start;
val |= (u64)((stride / 128) - 1) << fence_pitch_shift;
- if (is_y_tiled)
+ if (i915_gem_object_get_tiling(vma->obj) == I915_TILING_Y)
val |= BIT(I965_FENCE_TILING_Y_SHIFT);
val |= I965_FENCE_REG_VALID;
}
@@ -122,31 +123,24 @@ static void i915_write_fence_reg(struct drm_i915_fence_reg *fence,
unsigned int tiling = i915_gem_object_get_tiling(vma->obj);
bool is_y_tiled = tiling == I915_TILING_Y;
unsigned int stride = i915_gem_object_get_stride(vma->obj);
- int pitch_val;
- int tile_width;
- WARN((vma->node.start & ~I915_FENCE_START_MASK) ||
- !is_power_of_2(vma->node.size) ||
- (vma->node.start & (vma->node.size - 1)),
- "object 0x%08llx [fenceable? %d] not 1M or pot-size (0x%08llx) aligned\n",
- vma->node.start,
- i915_vma_is_map_and_fenceable(vma),
- vma->node.size);
+ GEM_BUG_ON(!i915_vma_is_map_and_fenceable(vma));
+ GEM_BUG_ON(vma->node.start & ~I915_FENCE_START_MASK);
+ GEM_BUG_ON(!is_power_of_2(vma->fence_size));
+ GEM_BUG_ON(!IS_ALIGNED(vma->node.start, vma->fence_size));
if (is_y_tiled && HAS_128_BYTE_Y_TILING(fence->i915))
- tile_width = 128;
+ stride /= 128;
else
- tile_width = 512;
-
- /* Note: pitch better be a power of two tile widths */
- pitch_val = stride / tile_width;
- pitch_val = ffs(pitch_val) - 1;
+ stride /= 512;
+ GEM_BUG_ON(!is_power_of_2(stride));
val = vma->node.start;
if (is_y_tiled)
val |= BIT(I830_FENCE_TILING_Y_SHIFT);
- val |= I915_FENCE_SIZE_BITS(vma->node.size);
- val |= pitch_val << I830_FENCE_PITCH_SHIFT;
+ val |= I915_FENCE_SIZE_BITS(vma->fence_size);
+ val |= ilog2(stride) << I830_FENCE_PITCH_SHIFT;
+
val |= I830_FENCE_REG_VALID;
}
@@ -166,25 +160,19 @@ static void i830_write_fence_reg(struct drm_i915_fence_reg *fence,
val = 0;
if (vma) {
- unsigned int tiling = i915_gem_object_get_tiling(vma->obj);
- bool is_y_tiled = tiling == I915_TILING_Y;
unsigned int stride = i915_gem_object_get_stride(vma->obj);
- u32 pitch_val;
-
- WARN((vma->node.start & ~I830_FENCE_START_MASK) ||
- !is_power_of_2(vma->node.size) ||
- (vma->node.start & (vma->node.size - 1)),
- "object 0x%08llx not 512K or pot-size 0x%08llx aligned\n",
- vma->node.start, vma->node.size);
- pitch_val = stride / 128;
- pitch_val = ffs(pitch_val) - 1;
+ GEM_BUG_ON(!i915_vma_is_map_and_fenceable(vma));
+ GEM_BUG_ON(vma->node.start & ~I830_FENCE_START_MASK);
+ GEM_BUG_ON(!is_power_of_2(vma->fence_size));
+ GEM_BUG_ON(!is_power_of_2(stride / 128));
+ GEM_BUG_ON(!IS_ALIGNED(vma->node.start, vma->fence_size));
val = vma->node.start;
- if (is_y_tiled)
+ if (i915_gem_object_get_tiling(vma->obj) == I915_TILING_Y)
val |= BIT(I830_FENCE_TILING_Y_SHIFT);
- val |= I830_FENCE_SIZE_BITS(vma->node.size);
- val |= pitch_val << I830_FENCE_PITCH_SHIFT;
+ val |= I830_FENCE_SIZE_BITS(vma->fence_size);
+ val |= ilog2(stride / 128) << I830_FENCE_PITCH_SHIFT;
val |= I830_FENCE_REG_VALID;
}
@@ -290,7 +278,7 @@ i915_vma_put_fence(struct i915_vma *vma)
{
struct drm_i915_fence_reg *fence = vma->fence;
- assert_rpm_wakelock_held(to_i915(vma->vm->dev));
+ assert_rpm_wakelock_held(vma->vm->i915);
if (!fence)
return 0;
@@ -313,7 +301,7 @@ static struct drm_i915_fence_reg *fence_find(struct drm_i915_private *dev_priv)
}
/* Wait for completion of pending flips which consume fences */
- if (intel_has_pending_fb_unpin(&dev_priv->drm))
+ if (intel_has_pending_fb_unpin(dev_priv))
return ERR_PTR(-EAGAIN);
return ERR_PTR(-EDEADLK);
@@ -346,7 +334,7 @@ i915_vma_get_fence(struct i915_vma *vma)
/* Note that we revoke fences on runtime suspend. Therefore the user
* must keep the device awake whilst using the fence.
*/
- assert_rpm_wakelock_held(to_i915(vma->vm->dev));
+ assert_rpm_wakelock_held(vma->vm->i915);
/* Just update our place in the LRU if our fence is getting reused. */
if (vma->fence) {
@@ -357,7 +345,7 @@ i915_vma_get_fence(struct i915_vma *vma)
return 0;
}
} else if (set) {
- fence = fence_find(to_i915(vma->vm->dev));
+ fence = fence_find(vma->vm->i915);
if (IS_ERR(fence))
return PTR_ERR(fence);
} else
@@ -367,6 +355,30 @@ i915_vma_get_fence(struct i915_vma *vma)
}
/**
+ * i915_gem_revoke_fences - revoke fence state
+ * @dev_priv: i915 device private
+ *
+ * Removes all GTT mmappings via the fence registers. This forces any user
+ * of the fence to reacquire that fence before continuing with their access.
+ * One use is during GPU reset where the fence register is lost and we need to
+ * revoke concurrent userspace access via GTT mmaps until the hardware has been
+ * reset and the fence registers have been restored.
+ */
+void i915_gem_revoke_fences(struct drm_i915_private *dev_priv)
+{
+ int i;
+
+ lockdep_assert_held(&dev_priv->drm.struct_mutex);
+
+ for (i = 0; i < dev_priv->num_fence_regs; i++) {
+ struct drm_i915_fence_reg *fence = &dev_priv->fence_regs[i];
+
+ if (fence->vma)
+ i915_gem_release_mmap(fence->vma->obj);
+ }
+}
+
+/**
* i915_gem_restore_fences - restore fence state
* @dev_priv: i915 device private
*
@@ -512,8 +524,8 @@ i915_gem_detect_bit_6_swizzle(struct drm_i915_private *dev_priv)
*/
swizzle_x = I915_BIT_6_SWIZZLE_NONE;
swizzle_y = I915_BIT_6_SWIZZLE_NONE;
- } else if (IS_MOBILE(dev_priv) || (IS_GEN3(dev_priv) &&
- !IS_G33(dev_priv))) {
+ } else if (IS_MOBILE(dev_priv) ||
+ IS_I915G(dev_priv) || IS_I945G(dev_priv)) {
uint32_t dcc;
/* On 9xx chipsets, channel interleave by the CPU is
diff --git a/drivers/gpu/drm/i915/i915_gem_fence_reg.h b/drivers/gpu/drm/i915/i915_gem_fence_reg.h
index 22c4a2d01adf..99a31ded4dfd 100644
--- a/drivers/gpu/drm/i915/i915_gem_fence_reg.h
+++ b/drivers/gpu/drm/i915/i915_gem_fence_reg.h
@@ -30,6 +30,8 @@
struct drm_i915_private;
struct i915_vma;
+#define I965_FENCE_PAGE 4096UL
+
struct drm_i915_fence_reg {
struct list_head link;
struct drm_i915_private *i915;
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index b4bde1452f2a..2801a4d56324 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -23,10 +23,14 @@
*
*/
+#include <linux/log2.h>
+#include <linux/random.h>
#include <linux/seq_file.h>
#include <linux/stop_machine.h>
+
#include <drm/drmP.h>
#include <drm/i915_drm.h>
+
#include "i915_drv.h"
#include "i915_vgpu.h"
#include "i915_trace.h"
@@ -99,12 +103,29 @@
static int
i915_get_ggtt_vma_pages(struct i915_vma *vma);
-const struct i915_ggtt_view i915_ggtt_view_normal = {
- .type = I915_GGTT_VIEW_NORMAL,
-};
-const struct i915_ggtt_view i915_ggtt_view_rotated = {
- .type = I915_GGTT_VIEW_ROTATED,
-};
+static void gen6_ggtt_invalidate(struct drm_i915_private *dev_priv)
+{
+ /* Note that as an uncached mmio write, this should flush the
+ * WCB of the writes into the GGTT before it triggers the invalidate.
+ */
+ I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
+}
+
+static void guc_ggtt_invalidate(struct drm_i915_private *dev_priv)
+{
+ gen6_ggtt_invalidate(dev_priv);
+ I915_WRITE(GEN8_GTCR, GEN8_GTCR_INVALIDATE);
+}
+
+static void gmch_ggtt_invalidate(struct drm_i915_private *dev_priv)
+{
+ intel_gtt_chipset_flush();
+}
+
+static inline void i915_ggtt_invalidate(struct drm_i915_private *i915)
+{
+ i915->ggtt.invalidate(i915);
+}
int intel_sanitize_enable_ppgtt(struct drm_i915_private *dev_priv,
int enable_ppgtt)
@@ -113,10 +134,9 @@ int intel_sanitize_enable_ppgtt(struct drm_i915_private *dev_priv,
bool has_full_ppgtt;
bool has_full_48bit_ppgtt;
- has_aliasing_ppgtt = INTEL_GEN(dev_priv) >= 6;
- has_full_ppgtt = INTEL_GEN(dev_priv) >= 7;
- has_full_48bit_ppgtt =
- IS_BROADWELL(dev_priv) || INTEL_GEN(dev_priv) >= 9;
+ has_aliasing_ppgtt = dev_priv->info.has_aliasing_ppgtt;
+ has_full_ppgtt = dev_priv->info.has_full_ppgtt;
+ has_full_48bit_ppgtt = dev_priv->info.has_full_48bit_ppgtt;
if (intel_vgpu_active(dev_priv)) {
/* emulation is too hard */
@@ -330,7 +350,7 @@ static int __setup_page_dma(struct drm_i915_private *dev_priv,
return -ENOMEM;
p->daddr = dma_map_page(kdev,
- p->page, 0, 4096, PCI_DMA_BIDIRECTIONAL);
+ p->page, 0, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
if (dma_mapping_error(kdev, p->daddr)) {
__free_page(p->page);
@@ -354,7 +374,7 @@ static void cleanup_page_dma(struct drm_i915_private *dev_priv,
if (WARN_ON(!p->page))
return;
- dma_unmap_page(&pdev->dev, p->daddr, 4096, PCI_DMA_BIDIRECTIONAL);
+ dma_unmap_page(&pdev->dev, p->daddr, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
__free_page(p->page);
memset(p, 0, sizeof(*p));
}
@@ -372,7 +392,7 @@ static void kunmap_page_dma(struct drm_i915_private *dev_priv, void *vaddr)
/* There are only few exceptions for gen >=6. chv and bxt.
* And we are not sure about the latter so play safe for now.
*/
- if (IS_CHERRYVIEW(dev_priv) || IS_BROXTON(dev_priv))
+ if (IS_CHERRYVIEW(dev_priv) || IS_GEN9_LP(dev_priv))
drm_clflush_virt_range(vaddr, PAGE_SIZE);
kunmap_atomic(vaddr);
@@ -380,7 +400,7 @@ static void kunmap_page_dma(struct drm_i915_private *dev_priv, void *vaddr)
#define kmap_px(px) kmap_page_dma(px_base(px))
#define kunmap_px(ppgtt, vaddr) \
- kunmap_page_dma(to_i915((ppgtt)->base.dev), (vaddr))
+ kunmap_page_dma((ppgtt)->base.i915, (vaddr))
#define setup_px(dev_priv, px) setup_page_dma((dev_priv), px_base(px))
#define cleanup_px(dev_priv, px) cleanup_page_dma((dev_priv), px_base(px))
@@ -470,7 +490,7 @@ static void gen8_initialize_pt(struct i915_address_space *vm,
scratch_pte = gen8_pte_encode(vm->scratch_page.daddr,
I915_CACHE_LLC);
- fill_px(to_i915(vm->dev), pt, scratch_pte);
+ fill_px(vm->i915, pt, scratch_pte);
}
static void gen6_initialize_pt(struct i915_address_space *vm,
@@ -483,7 +503,7 @@ static void gen6_initialize_pt(struct i915_address_space *vm,
scratch_pte = vm->pte_encode(vm->scratch_page.daddr,
I915_CACHE_LLC, 0);
- fill32_px(to_i915(vm->dev), pt, scratch_pte);
+ fill32_px(vm->i915, pt, scratch_pte);
}
static struct i915_page_directory *alloc_pd(struct drm_i915_private *dev_priv)
@@ -531,7 +551,7 @@ static void gen8_initialize_pd(struct i915_address_space *vm,
scratch_pde = gen8_pde_encode(px_dma(vm->scratch_pt), I915_CACHE_LLC);
- fill_px(to_i915(vm->dev), pd, scratch_pde);
+ fill_px(vm->i915, pd, scratch_pde);
}
static int __pdp_init(struct drm_i915_private *dev_priv,
@@ -612,7 +632,7 @@ static void gen8_initialize_pdp(struct i915_address_space *vm,
scratch_pdpe = gen8_pdpe_encode(px_dma(vm->scratch_pd), I915_CACHE_LLC);
- fill_px(to_i915(vm->dev), pdp, scratch_pdpe);
+ fill_px(vm->i915, pdp, scratch_pdpe);
}
static void gen8_initialize_pml4(struct i915_address_space *vm,
@@ -623,14 +643,14 @@ static void gen8_initialize_pml4(struct i915_address_space *vm,
scratch_pml4e = gen8_pml4e_encode(px_dma(vm->scratch_pdp),
I915_CACHE_LLC);
- fill_px(to_i915(vm->dev), pml4, scratch_pml4e);
+ fill_px(vm->i915, pml4, scratch_pml4e);
}
static void
-gen8_setup_page_directory(struct i915_hw_ppgtt *ppgtt,
- struct i915_page_directory_pointer *pdp,
- struct i915_page_directory *pd,
- int index)
+gen8_setup_pdpe(struct i915_hw_ppgtt *ppgtt,
+ struct i915_page_directory_pointer *pdp,
+ struct i915_page_directory *pd,
+ int index)
{
gen8_ppgtt_pdpe_t *page_directorypo;
@@ -643,10 +663,10 @@ gen8_setup_page_directory(struct i915_hw_ppgtt *ppgtt,
}
static void
-gen8_setup_page_directory_pointer(struct i915_hw_ppgtt *ppgtt,
- struct i915_pml4 *pml4,
- struct i915_page_directory_pointer *pdp,
- int index)
+gen8_setup_pml4e(struct i915_hw_ppgtt *ppgtt,
+ struct i915_pml4 *pml4,
+ struct i915_page_directory_pointer *pdp,
+ int index)
{
gen8_ppgtt_pml4e_t *pagemap = kmap_px(pml4);
@@ -710,7 +730,7 @@ static int gen8_48b_mm_switch(struct i915_hw_ppgtt *ppgtt,
*/
static void mark_tlbs_dirty(struct i915_hw_ppgtt *ppgtt)
{
- ppgtt->pd_dirty_rings = INTEL_INFO(to_i915(ppgtt->base.dev))->ring_mask;
+ ppgtt->pd_dirty_rings = INTEL_INFO(ppgtt->base.i915)->ring_mask;
}
/* Removes entries from a single page table, releasing it if it's empty.
@@ -735,10 +755,9 @@ static bool gen8_ppgtt_clear_pt(struct i915_address_space *vm,
GEM_BUG_ON(pte_end > GEN8_PTES);
bitmap_clear(pt->used_ptes, pte, num_entries);
-
- if (bitmap_empty(pt->used_ptes, GEN8_PTES)) {
- free_pt(to_i915(vm->dev), pt);
- return true;
+ if (USES_FULL_PPGTT(vm->i915)) {
+ if (bitmap_empty(pt->used_ptes, GEN8_PTES))
+ return true;
}
pt_vaddr = kmap_px(pt);
@@ -775,13 +794,12 @@ static bool gen8_ppgtt_clear_pd(struct i915_address_space *vm,
pde_vaddr = kmap_px(pd);
pde_vaddr[pde] = scratch_pde;
kunmap_px(ppgtt, pde_vaddr);
+ free_pt(vm->i915, pt);
}
}
- if (bitmap_empty(pd->used_pdes, I915_PDES)) {
- free_pd(to_i915(vm->dev), pd);
+ if (bitmap_empty(pd->used_pdes, I915_PDES))
return true;
- }
return false;
}
@@ -795,12 +813,8 @@ static bool gen8_ppgtt_clear_pdp(struct i915_address_space *vm,
uint64_t length)
{
struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
- struct drm_i915_private *dev_priv = to_i915(vm->dev);
struct i915_page_directory *pd;
uint64_t pdpe;
- gen8_ppgtt_pdpe_t *pdpe_vaddr;
- gen8_ppgtt_pdpe_t scratch_pdpe =
- gen8_pdpe_encode(px_dma(vm->scratch_pd), I915_CACHE_LLC);
gen8_for_each_pdpe(pd, pdp, start, length, pdpe) {
if (WARN_ON(!pdp->page_directory[pdpe]))
@@ -808,21 +822,15 @@ static bool gen8_ppgtt_clear_pdp(struct i915_address_space *vm,
if (gen8_ppgtt_clear_pd(vm, pd, start, length)) {
__clear_bit(pdpe, pdp->used_pdpes);
- if (USES_FULL_48BIT_PPGTT(dev_priv)) {
- pdpe_vaddr = kmap_px(pdp);
- pdpe_vaddr[pdpe] = scratch_pdpe;
- kunmap_px(ppgtt, pdpe_vaddr);
- }
+ gen8_setup_pdpe(ppgtt, pdp, vm->scratch_pd, pdpe);
+ free_pd(vm->i915, pd);
}
}
mark_tlbs_dirty(ppgtt);
- if (USES_FULL_48BIT_PPGTT(dev_priv) &&
- bitmap_empty(pdp->used_pdpes, I915_PDPES_PER_PDP(dev_priv))) {
- free_pdp(dev_priv, pdp);
+ if (bitmap_empty(pdp->used_pdpes, I915_PDPES_PER_PDP(dev_priv)))
return true;
- }
return false;
}
@@ -839,11 +847,8 @@ static void gen8_ppgtt_clear_pml4(struct i915_address_space *vm,
struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
struct i915_page_directory_pointer *pdp;
uint64_t pml4e;
- gen8_ppgtt_pml4e_t *pml4e_vaddr;
- gen8_ppgtt_pml4e_t scratch_pml4e =
- gen8_pml4e_encode(px_dma(vm->scratch_pdp), I915_CACHE_LLC);
- GEM_BUG_ON(!USES_FULL_48BIT_PPGTT(to_i915(vm->dev)));
+ GEM_BUG_ON(!USES_FULL_48BIT_PPGTT(vm->i915));
gen8_for_each_pml4e(pdp, pml4, start, length, pml4e) {
if (WARN_ON(!pml4->pdps[pml4e]))
@@ -851,9 +856,8 @@ static void gen8_ppgtt_clear_pml4(struct i915_address_space *vm,
if (gen8_ppgtt_clear_pdp(vm, pdp, start, length)) {
__clear_bit(pml4e, pml4->used_pml4es);
- pml4e_vaddr = kmap_px(pml4);
- pml4e_vaddr[pml4e] = scratch_pml4e;
- kunmap_px(ppgtt, pml4e_vaddr);
+ gen8_setup_pml4e(ppgtt, pml4, vm->scratch_pdp, pml4e);
+ free_pdp(vm->i915, pdp);
}
}
}
@@ -863,7 +867,7 @@ static void gen8_ppgtt_clear_range(struct i915_address_space *vm,
{
struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
- if (USES_FULL_48BIT_PPGTT(to_i915(vm->dev)))
+ if (USES_FULL_48BIT_PPGTT(vm->i915))
gen8_ppgtt_clear_pml4(vm, &ppgtt->pml4, start, length);
else
gen8_ppgtt_clear_pdp(vm, &ppgtt->pdp, start, length);
@@ -898,7 +902,7 @@ gen8_ppgtt_insert_pte_entries(struct i915_address_space *vm,
kunmap_px(ppgtt, pt_vaddr);
pt_vaddr = NULL;
if (++pde == I915_PDES) {
- if (++pdpe == I915_PDPES_PER_PDP(to_i915(vm->dev)))
+ if (++pdpe == I915_PDPES_PER_PDP(vm->i915))
break;
pde = 0;
}
@@ -921,7 +925,7 @@ static void gen8_ppgtt_insert_entries(struct i915_address_space *vm,
__sg_page_iter_start(&sg_iter, pages->sgl, sg_nents(pages->sgl), 0);
- if (!USES_FULL_48BIT_PPGTT(to_i915(vm->dev))) {
+ if (!USES_FULL_48BIT_PPGTT(vm->i915)) {
gen8_ppgtt_insert_pte_entries(vm, &ppgtt->pdp, &sg_iter, start,
cache_level);
} else {
@@ -955,7 +959,7 @@ static void gen8_free_page_tables(struct drm_i915_private *dev_priv,
static int gen8_init_scratch(struct i915_address_space *vm)
{
- struct drm_i915_private *dev_priv = to_i915(vm->dev);
+ struct drm_i915_private *dev_priv = vm->i915;
int ret;
ret = setup_scratch_page(dev_priv, &vm->scratch_page, I915_GFP_DMA);
@@ -1002,7 +1006,7 @@ free_scratch_page:
static int gen8_ppgtt_notify_vgt(struct i915_hw_ppgtt *ppgtt, bool create)
{
enum vgt_g2v_type msg;
- struct drm_i915_private *dev_priv = to_i915(ppgtt->base.dev);
+ struct drm_i915_private *dev_priv = ppgtt->base.i915;
int i;
if (USES_FULL_48BIT_PPGTT(dev_priv)) {
@@ -1032,7 +1036,7 @@ static int gen8_ppgtt_notify_vgt(struct i915_hw_ppgtt *ppgtt, bool create)
static void gen8_free_scratch(struct i915_address_space *vm)
{
- struct drm_i915_private *dev_priv = to_i915(vm->dev);
+ struct drm_i915_private *dev_priv = vm->i915;
if (USES_FULL_48BIT_PPGTT(dev_priv))
free_pdp(dev_priv, vm->scratch_pdp);
@@ -1059,7 +1063,7 @@ static void gen8_ppgtt_cleanup_3lvl(struct drm_i915_private *dev_priv,
static void gen8_ppgtt_cleanup_4lvl(struct i915_hw_ppgtt *ppgtt)
{
- struct drm_i915_private *dev_priv = to_i915(ppgtt->base.dev);
+ struct drm_i915_private *dev_priv = ppgtt->base.i915;
int i;
for_each_set_bit(i, ppgtt->pml4.used_pml4es, GEN8_PML4ES_PER_PML4) {
@@ -1074,7 +1078,7 @@ static void gen8_ppgtt_cleanup_4lvl(struct i915_hw_ppgtt *ppgtt)
static void gen8_ppgtt_cleanup(struct i915_address_space *vm)
{
- struct drm_i915_private *dev_priv = to_i915(vm->dev);
+ struct drm_i915_private *dev_priv = vm->i915;
struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
if (intel_vgpu_active(dev_priv))
@@ -1112,7 +1116,7 @@ static int gen8_ppgtt_alloc_pagetabs(struct i915_address_space *vm,
uint64_t length,
unsigned long *new_pts)
{
- struct drm_i915_private *dev_priv = to_i915(vm->dev);
+ struct drm_i915_private *dev_priv = vm->i915;
struct i915_page_table *pt;
uint32_t pde;
@@ -1173,7 +1177,7 @@ gen8_ppgtt_alloc_page_directories(struct i915_address_space *vm,
uint64_t length,
unsigned long *new_pds)
{
- struct drm_i915_private *dev_priv = to_i915(vm->dev);
+ struct drm_i915_private *dev_priv = vm->i915;
struct i915_page_directory *pd;
uint32_t pdpe;
uint32_t pdpes = I915_PDPES_PER_PDP(dev_priv);
@@ -1226,7 +1230,7 @@ gen8_ppgtt_alloc_page_dirpointers(struct i915_address_space *vm,
uint64_t length,
unsigned long *new_pdps)
{
- struct drm_i915_private *dev_priv = to_i915(vm->dev);
+ struct drm_i915_private *dev_priv = vm->i915;
struct i915_page_directory_pointer *pdp;
uint32_t pml4e;
@@ -1301,7 +1305,7 @@ static int gen8_alloc_va_range_3lvl(struct i915_address_space *vm,
{
struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
unsigned long *new_page_dirs, *new_page_tables;
- struct drm_i915_private *dev_priv = to_i915(vm->dev);
+ struct drm_i915_private *dev_priv = vm->i915;
struct i915_page_directory *pd;
const uint64_t orig_start = start;
const uint64_t orig_length = length;
@@ -1309,15 +1313,6 @@ static int gen8_alloc_va_range_3lvl(struct i915_address_space *vm,
uint32_t pdpes = I915_PDPES_PER_PDP(dev_priv);
int ret;
- /* Wrap is never okay since we can only represent 48b, and we don't
- * actually use the other side of the canonical address space.
- */
- if (WARN_ON(start + length < start))
- return -ENODEV;
-
- if (WARN_ON(start + length > vm->total))
- return -ENODEV;
-
ret = alloc_gen8_temp_bitmaps(&new_page_dirs, &new_page_tables, pdpes);
if (ret)
return ret;
@@ -1381,7 +1376,7 @@ static int gen8_alloc_va_range_3lvl(struct i915_address_space *vm,
kunmap_px(ppgtt, page_directory);
__set_bit(pdpe, pdp->used_pdpes);
- gen8_setup_page_directory(ppgtt, pdp, pd, pdpe);
+ gen8_setup_pdpe(ppgtt, pdp, pd, pdpe);
}
free_gen8_temp_bitmaps(new_page_dirs, new_page_tables);
@@ -1440,7 +1435,7 @@ static int gen8_alloc_va_range_4lvl(struct i915_address_space *vm,
if (ret)
goto err_out;
- gen8_setup_page_directory_pointer(ppgtt, pml4, pdp, pml4e);
+ gen8_setup_pml4e(ppgtt, pml4, pdp, pml4e);
}
bitmap_or(pml4->used_pml4es, new_pdps, pml4->used_pml4es,
@@ -1450,7 +1445,7 @@ static int gen8_alloc_va_range_4lvl(struct i915_address_space *vm,
err_out:
for_each_set_bit(pml4e, new_pdps, GEN8_PML4ES_PER_PML4)
- gen8_ppgtt_cleanup_3lvl(to_i915(vm->dev), pml4->pdps[pml4e]);
+ gen8_ppgtt_cleanup_3lvl(vm->i915, pml4->pdps[pml4e]);
return ret;
}
@@ -1460,7 +1455,7 @@ static int gen8_alloc_va_range(struct i915_address_space *vm,
{
struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
- if (USES_FULL_48BIT_PPGTT(to_i915(vm->dev)))
+ if (USES_FULL_48BIT_PPGTT(vm->i915))
return gen8_alloc_va_range_4lvl(vm, &ppgtt->pml4, start, length);
else
return gen8_alloc_va_range_3lvl(vm, &ppgtt->pdp, start, length);
@@ -1531,7 +1526,7 @@ static void gen8_dump_ppgtt(struct i915_hw_ppgtt *ppgtt, struct seq_file *m)
gen8_pte_t scratch_pte = gen8_pte_encode(vm->scratch_page.daddr,
I915_CACHE_LLC);
- if (!USES_FULL_48BIT_PPGTT(to_i915(vm->dev))) {
+ if (!USES_FULL_48BIT_PPGTT(vm->i915)) {
gen8_dump_pdp(&ppgtt->pdp, start, length, scratch_pte, m);
} else {
uint64_t pml4e;
@@ -1584,7 +1579,7 @@ static int gen8_preallocate_top_level_pdps(struct i915_hw_ppgtt *ppgtt)
*/
static int gen8_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
{
- struct drm_i915_private *dev_priv = to_i915(ppgtt->base.dev);
+ struct drm_i915_private *dev_priv = ppgtt->base.i915;
int ret;
ret = gen8_init_scratch(&ppgtt->base);
@@ -1927,7 +1922,7 @@ static int gen6_alloc_va_range(struct i915_address_space *vm,
uint64_t start_in, uint64_t length_in)
{
DECLARE_BITMAP(new_page_tables, I915_PDES);
- struct drm_i915_private *dev_priv = to_i915(vm->dev);
+ struct drm_i915_private *dev_priv = vm->i915;
struct i915_ggtt *ggtt = &dev_priv->ggtt;
struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
struct i915_page_table *pt;
@@ -1935,9 +1930,6 @@ static int gen6_alloc_va_range(struct i915_address_space *vm,
uint32_t pde;
int ret;
- if (WARN_ON(start_in + length_in > ppgtt->base.total))
- return -ENODEV;
-
start = start_save = start_in;
length = length_save = length_in;
@@ -2014,7 +2006,7 @@ unwind_out:
static int gen6_init_scratch(struct i915_address_space *vm)
{
- struct drm_i915_private *dev_priv = to_i915(vm->dev);
+ struct drm_i915_private *dev_priv = vm->i915;
int ret;
ret = setup_scratch_page(dev_priv, &vm->scratch_page, I915_GFP_DMA);
@@ -2034,7 +2026,7 @@ static int gen6_init_scratch(struct i915_address_space *vm)
static void gen6_free_scratch(struct i915_address_space *vm)
{
- struct drm_i915_private *dev_priv = to_i915(vm->dev);
+ struct drm_i915_private *dev_priv = vm->i915;
free_pt(dev_priv, vm->scratch_pt);
cleanup_scratch_page(dev_priv, &vm->scratch_page);
@@ -2044,7 +2036,7 @@ static void gen6_ppgtt_cleanup(struct i915_address_space *vm)
{
struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
struct i915_page_directory *pd = &ppgtt->pd;
- struct drm_i915_private *dev_priv = to_i915(vm->dev);
+ struct drm_i915_private *dev_priv = vm->i915;
struct i915_page_table *pt;
uint32_t pde;
@@ -2060,9 +2052,8 @@ static void gen6_ppgtt_cleanup(struct i915_address_space *vm)
static int gen6_ppgtt_allocate_page_directories(struct i915_hw_ppgtt *ppgtt)
{
struct i915_address_space *vm = &ppgtt->base;
- struct drm_i915_private *dev_priv = to_i915(ppgtt->base.dev);
+ struct drm_i915_private *dev_priv = ppgtt->base.i915;
struct i915_ggtt *ggtt = &dev_priv->ggtt;
- bool retried = false;
int ret;
/* PPGTT PDEs reside in the GGTT and consists of 512 entries. The
@@ -2075,29 +2066,14 @@ static int gen6_ppgtt_allocate_page_directories(struct i915_hw_ppgtt *ppgtt)
if (ret)
return ret;
-alloc:
- ret = drm_mm_insert_node_in_range_generic(&ggtt->base.mm,
- &ppgtt->node, GEN6_PD_SIZE,
- GEN6_PD_ALIGN, 0,
- 0, ggtt->base.total,
- DRM_MM_TOPDOWN);
- if (ret == -ENOSPC && !retried) {
- ret = i915_gem_evict_something(&ggtt->base,
- GEN6_PD_SIZE, GEN6_PD_ALIGN,
- I915_CACHE_NONE,
- 0, ggtt->base.total,
- 0);
- if (ret)
- goto err_out;
-
- retried = true;
- goto alloc;
- }
-
+ ret = i915_gem_gtt_insert(&ggtt->base, &ppgtt->node,
+ GEN6_PD_SIZE, GEN6_PD_ALIGN,
+ I915_COLOR_UNEVICTABLE,
+ 0, ggtt->base.total,
+ PIN_HIGH);
if (ret)
goto err_out;
-
if (ppgtt->node.start < ggtt->mappable_end)
DRM_DEBUG("Forced to use aperture for PDEs\n");
@@ -2125,7 +2101,7 @@ static void gen6_scratch_va_range(struct i915_hw_ppgtt *ppgtt,
static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
{
- struct drm_i915_private *dev_priv = to_i915(ppgtt->base.dev);
+ struct drm_i915_private *dev_priv = ppgtt->base.i915;
struct i915_ggtt *ggtt = &dev_priv->ggtt;
int ret;
@@ -2176,7 +2152,7 @@ static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
static int __hw_ppgtt_init(struct i915_hw_ppgtt *ppgtt,
struct drm_i915_private *dev_priv)
{
- ppgtt->base.dev = &dev_priv->drm;
+ ppgtt->base.i915 = dev_priv;
if (INTEL_INFO(dev_priv)->gen < 8)
return gen6_ppgtt_init(ppgtt);
@@ -2285,6 +2261,27 @@ i915_ppgtt_create(struct drm_i915_private *dev_priv,
return ppgtt;
}
+void i915_ppgtt_close(struct i915_address_space *vm)
+{
+ struct list_head *phases[] = {
+ &vm->active_list,
+ &vm->inactive_list,
+ &vm->unbound_list,
+ NULL,
+ }, **phase;
+
+ GEM_BUG_ON(vm->closed);
+ vm->closed = true;
+
+ for (phase = phases; *phase; phase++) {
+ struct i915_vma *vma, *vn;
+
+ list_for_each_entry_safe(vma, vn, *phase, vm_link)
+ if (!i915_vma_is_closed(vma))
+ i915_vma_close(vma);
+ }
+}
+
void i915_ppgtt_release(struct kref *kref)
{
struct i915_hw_ppgtt *ppgtt =
@@ -2349,16 +2346,6 @@ void i915_check_and_clear_faults(struct drm_i915_private *dev_priv)
POSTING_READ(RING_FAULT_REG(dev_priv->engine[RCS]));
}
-static void i915_ggtt_flush(struct drm_i915_private *dev_priv)
-{
- if (INTEL_INFO(dev_priv)->gen < 6) {
- intel_gtt_chipset_flush();
- } else {
- I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
- POSTING_READ(GFX_FLSH_CNTL_GEN6);
- }
-}
-
void i915_gem_suspend_gtt_mappings(struct drm_i915_private *dev_priv)
{
struct i915_ggtt *ggtt = &dev_priv->ggtt;
@@ -2373,16 +2360,30 @@ void i915_gem_suspend_gtt_mappings(struct drm_i915_private *dev_priv)
ggtt->base.clear_range(&ggtt->base, ggtt->base.start, ggtt->base.total);
- i915_ggtt_flush(dev_priv);
+ i915_ggtt_invalidate(dev_priv);
}
int i915_gem_gtt_prepare_pages(struct drm_i915_gem_object *obj,
struct sg_table *pages)
{
- if (dma_map_sg(&obj->base.dev->pdev->dev,
- pages->sgl, pages->nents,
- PCI_DMA_BIDIRECTIONAL))
- return 0;
+ do {
+ if (dma_map_sg(&obj->base.dev->pdev->dev,
+ pages->sgl, pages->nents,
+ PCI_DMA_BIDIRECTIONAL))
+ return 0;
+
+ /* If the DMA remap fails, one cause can be that we have
+ * too many objects pinned in a small remapping table,
+ * such as swiotlb. Incrementally purge all other objects and
+ * try again - if there are no more pages to remove from
+ * the DMA remapper, i915_gem_shrink will return 0.
+ */
+ GEM_BUG_ON(obj->mm.pages == pages);
+ } while (i915_gem_shrink(to_i915(obj->base.dev),
+ obj->base.size >> PAGE_SHIFT,
+ I915_SHRINK_BOUND |
+ I915_SHRINK_UNBOUND |
+ I915_SHRINK_ACTIVE));
return -ENOSPC;
}
@@ -2398,15 +2399,13 @@ static void gen8_ggtt_insert_page(struct i915_address_space *vm,
enum i915_cache_level level,
u32 unused)
{
- struct drm_i915_private *dev_priv = to_i915(vm->dev);
+ struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
gen8_pte_t __iomem *pte =
- (gen8_pte_t __iomem *)dev_priv->ggtt.gsm +
- (offset >> PAGE_SHIFT);
+ (gen8_pte_t __iomem *)ggtt->gsm + (offset >> PAGE_SHIFT);
gen8_set_pte(pte, gen8_pte_encode(addr, level));
- I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
- POSTING_READ(GFX_FLSH_CNTL_GEN6);
+ ggtt->invalidate(vm->i915);
}
static void gen8_ggtt_insert_entries(struct i915_address_space *vm,
@@ -2414,7 +2413,6 @@ static void gen8_ggtt_insert_entries(struct i915_address_space *vm,
uint64_t start,
enum i915_cache_level level, u32 unused)
{
- struct drm_i915_private *dev_priv = to_i915(vm->dev);
struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
struct sgt_iter sgt_iter;
gen8_pte_t __iomem *gtt_entries;
@@ -2443,8 +2441,7 @@ static void gen8_ggtt_insert_entries(struct i915_address_space *vm,
* want to flush the TLBs only after we're certain all the PTE updates
* have finished.
*/
- I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
- POSTING_READ(GFX_FLSH_CNTL_GEN6);
+ ggtt->invalidate(vm->i915);
}
struct insert_entries {
@@ -2479,15 +2476,13 @@ static void gen6_ggtt_insert_page(struct i915_address_space *vm,
enum i915_cache_level level,
u32 flags)
{
- struct drm_i915_private *dev_priv = to_i915(vm->dev);
+ struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
gen6_pte_t __iomem *pte =
- (gen6_pte_t __iomem *)dev_priv->ggtt.gsm +
- (offset >> PAGE_SHIFT);
+ (gen6_pte_t __iomem *)ggtt->gsm + (offset >> PAGE_SHIFT);
iowrite32(vm->pte_encode(addr, level, flags), pte);
- I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
- POSTING_READ(GFX_FLSH_CNTL_GEN6);
+ ggtt->invalidate(vm->i915);
}
/*
@@ -2501,7 +2496,6 @@ static void gen6_ggtt_insert_entries(struct i915_address_space *vm,
uint64_t start,
enum i915_cache_level level, u32 flags)
{
- struct drm_i915_private *dev_priv = to_i915(vm->dev);
struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
struct sgt_iter sgt_iter;
gen6_pte_t __iomem *gtt_entries;
@@ -2529,8 +2523,7 @@ static void gen6_ggtt_insert_entries(struct i915_address_space *vm,
* want to flush the TLBs only after we're certain all the PTE updates
* have finished.
*/
- I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
- POSTING_READ(GFX_FLSH_CNTL_GEN6);
+ ggtt->invalidate(vm->i915);
}
static void nop_clear_range(struct i915_address_space *vm,
@@ -2621,7 +2614,7 @@ static int ggtt_bind_vma(struct i915_vma *vma,
enum i915_cache_level cache_level,
u32 flags)
{
- struct drm_i915_private *i915 = to_i915(vma->vm->dev);
+ struct drm_i915_private *i915 = vma->vm->i915;
struct drm_i915_gem_object *obj = vma->obj;
u32 pte_flags = 0;
int ret;
@@ -2653,7 +2646,7 @@ static int aliasing_gtt_bind_vma(struct i915_vma *vma,
enum i915_cache_level cache_level,
u32 flags)
{
- struct drm_i915_private *i915 = to_i915(vma->vm->dev);
+ struct drm_i915_private *i915 = vma->vm->i915;
u32 pte_flags;
int ret;
@@ -2687,7 +2680,7 @@ static int aliasing_gtt_bind_vma(struct i915_vma *vma,
static void ggtt_unbind_vma(struct i915_vma *vma)
{
- struct drm_i915_private *i915 = to_i915(vma->vm->dev);
+ struct drm_i915_private *i915 = vma->vm->i915;
struct i915_hw_ppgtt *appgtt = i915->mm.aliasing_ppgtt;
const u64 size = min(vma->size, vma->node.size);
@@ -2721,19 +2714,17 @@ void i915_gem_gtt_finish_pages(struct drm_i915_gem_object *obj,
dma_unmap_sg(kdev, pages->sgl, pages->nents, PCI_DMA_BIDIRECTIONAL);
}
-static void i915_gtt_color_adjust(struct drm_mm_node *node,
+static void i915_gtt_color_adjust(const struct drm_mm_node *node,
unsigned long color,
u64 *start,
u64 *end)
{
if (node->color != color)
- *start += 4096;
+ *start += I915_GTT_PAGE_SIZE;
- node = list_first_entry_or_null(&node->node_list,
- struct drm_mm_node,
- node_list);
- if (node && node->allocated && node->color != color)
- *end -= 4096;
+ node = list_next_entry(node, node_list);
+ if (node->allocated && node->color != color)
+ *end -= I915_GTT_PAGE_SIZE;
}
int i915_gem_init_ggtt(struct drm_i915_private *dev_priv)
@@ -2758,11 +2749,10 @@ int i915_gem_init_ggtt(struct drm_i915_private *dev_priv)
return ret;
/* Reserve a mappable slot for our lockless error capture */
- ret = drm_mm_insert_node_in_range_generic(&ggtt->base.mm,
- &ggtt->error_capture,
- 4096, 0, -1,
- 0, ggtt->mappable_end,
- 0, 0);
+ ret = drm_mm_insert_node_in_range(&ggtt->base.mm, &ggtt->error_capture,
+ PAGE_SIZE, 0, I915_COLOR_UNEVICTABLE,
+ 0, ggtt->mappable_end,
+ DRM_MM_INSERT_LOW);
if (ret)
return ret;
@@ -2929,8 +2919,8 @@ static size_t gen9_get_stolen_size(u16 gen9_gmch_ctl)
static int ggtt_probe_common(struct i915_ggtt *ggtt, u64 size)
{
- struct drm_i915_private *dev_priv = to_i915(ggtt->base.dev);
- struct pci_dev *pdev = ggtt->base.dev->pdev;
+ struct drm_i915_private *dev_priv = ggtt->base.i915;
+ struct pci_dev *pdev = dev_priv->drm.pdev;
phys_addr_t phys_addr;
int ret;
@@ -2944,7 +2934,7 @@ static int ggtt_probe_common(struct i915_ggtt *ggtt, u64 size)
* resort to an uncached mapping. The WC issue is easily caught by the
* readback check when writing GTT PTE entries.
*/
- if (IS_BROXTON(dev_priv))
+ if (IS_GEN9_LP(dev_priv))
ggtt->gsm = ioremap_nocache(phys_addr, size);
else
ggtt->gsm = ioremap_wc(phys_addr, size);
@@ -3042,12 +3032,12 @@ static void gen6_gmch_remove(struct i915_address_space *vm)
struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
iounmap(ggtt->gsm);
- cleanup_scratch_page(to_i915(vm->dev), &vm->scratch_page);
+ cleanup_scratch_page(vm->i915, &vm->scratch_page);
}
static int gen8_gmch_probe(struct i915_ggtt *ggtt)
{
- struct drm_i915_private *dev_priv = to_i915(ggtt->base.dev);
+ struct drm_i915_private *dev_priv = ggtt->base.i915;
struct pci_dev *pdev = dev_priv->drm.pdev;
unsigned int size;
u16 snb_gmch_ctl;
@@ -3074,7 +3064,7 @@ static int gen8_gmch_probe(struct i915_ggtt *ggtt)
ggtt->base.total = (size / sizeof(gen8_pte_t)) << PAGE_SHIFT;
- if (IS_CHERRYVIEW(dev_priv) || IS_BROXTON(dev_priv))
+ if (IS_CHERRYVIEW(dev_priv) || IS_GEN9_LP(dev_priv))
chv_setup_private_ppat(dev_priv);
else
bdw_setup_private_ppat(dev_priv);
@@ -3091,12 +3081,14 @@ static int gen8_gmch_probe(struct i915_ggtt *ggtt)
if (IS_CHERRYVIEW(dev_priv))
ggtt->base.insert_entries = gen8_ggtt_insert_entries__BKL;
+ ggtt->invalidate = gen6_ggtt_invalidate;
+
return ggtt_probe_common(ggtt, size);
}
static int gen6_gmch_probe(struct i915_ggtt *ggtt)
{
- struct drm_i915_private *dev_priv = to_i915(ggtt->base.dev);
+ struct drm_i915_private *dev_priv = ggtt->base.i915;
struct pci_dev *pdev = dev_priv->drm.pdev;
unsigned int size;
u16 snb_gmch_ctl;
@@ -3128,6 +3120,8 @@ static int gen6_gmch_probe(struct i915_ggtt *ggtt)
ggtt->base.unbind_vma = ggtt_unbind_vma;
ggtt->base.cleanup = gen6_gmch_remove;
+ ggtt->invalidate = gen6_ggtt_invalidate;
+
if (HAS_EDRAM(dev_priv))
ggtt->base.pte_encode = iris_pte_encode;
else if (IS_HASWELL(dev_priv))
@@ -3149,7 +3143,7 @@ static void i915_gmch_remove(struct i915_address_space *vm)
static int i915_gmch_probe(struct i915_ggtt *ggtt)
{
- struct drm_i915_private *dev_priv = to_i915(ggtt->base.dev);
+ struct drm_i915_private *dev_priv = ggtt->base.i915;
int ret;
ret = intel_gmch_probe(dev_priv->bridge_dev, dev_priv->drm.pdev, NULL);
@@ -3158,8 +3152,10 @@ static int i915_gmch_probe(struct i915_ggtt *ggtt)
return -EIO;
}
- intel_gtt_get(&ggtt->base.total, &ggtt->stolen_size,
- &ggtt->mappable_base, &ggtt->mappable_end);
+ intel_gtt_get(&ggtt->base.total,
+ &ggtt->stolen_size,
+ &ggtt->mappable_base,
+ &ggtt->mappable_end);
ggtt->do_idle_maps = needs_idle_maps(dev_priv);
ggtt->base.insert_page = i915_ggtt_insert_page;
@@ -3169,6 +3165,8 @@ static int i915_gmch_probe(struct i915_ggtt *ggtt)
ggtt->base.unbind_vma = ggtt_unbind_vma;
ggtt->base.cleanup = i915_gmch_remove;
+ ggtt->invalidate = gmch_ggtt_invalidate;
+
if (unlikely(ggtt->do_idle_maps))
DRM_INFO("applying Ironlake quirks for intel_iommu\n");
@@ -3184,7 +3182,7 @@ int i915_ggtt_probe_hw(struct drm_i915_private *dev_priv)
struct i915_ggtt *ggtt = &dev_priv->ggtt;
int ret;
- ggtt->base.dev = &dev_priv->drm;
+ ggtt->base.i915 = dev_priv;
if (INTEL_GEN(dev_priv) <= 5)
ret = i915_gmch_probe(ggtt);
@@ -3195,6 +3193,16 @@ int i915_ggtt_probe_hw(struct drm_i915_private *dev_priv)
if (ret)
return ret;
+ /* Trim the GGTT to fit the GuC mappable upper range (when enabled).
+ * This is easier than doing range restriction on the fly, as we
+ * currently don't have any bits spare to pass in this upper
+ * restriction!
+ */
+ if (HAS_GUC(dev_priv) && i915.enable_guc_loading) {
+ ggtt->base.total = min_t(u64, ggtt->base.total, GUC_GGTT_TOP);
+ ggtt->mappable_end = min(ggtt->mappable_end, ggtt->base.total);
+ }
+
if ((ggtt->base.total - 1) >> 32) {
DRM_ERROR("We never expected a Global GTT with more than 32bits"
" of address space! Found %lldM!\n",
@@ -3214,7 +3222,7 @@ int i915_ggtt_probe_hw(struct drm_i915_private *dev_priv)
DRM_INFO("Memory usable by graphics device = %lluM\n",
ggtt->base.total >> 20);
DRM_DEBUG_DRIVER("GMADR size = %lldM\n", ggtt->mappable_end >> 20);
- DRM_DEBUG_DRIVER("GTT stolen size = %zdM\n", ggtt->stolen_size >> 20);
+ DRM_DEBUG_DRIVER("GTT stolen size = %uM\n", ggtt->stolen_size >> 20);
#ifdef CONFIG_INTEL_IOMMU
if (intel_iommu_gfx_mapped)
DRM_INFO("VT-d active for gfx access\n");
@@ -3277,6 +3285,16 @@ int i915_ggtt_enable_hw(struct drm_i915_private *dev_priv)
return 0;
}
+void i915_ggtt_enable_guc(struct drm_i915_private *i915)
+{
+ i915->ggtt.invalidate = guc_ggtt_invalidate;
+}
+
+void i915_ggtt_disable_guc(struct drm_i915_private *i915)
+{
+ i915->ggtt.invalidate = gen6_ggtt_invalidate;
+}
+
void i915_gem_restore_gtt_mappings(struct drm_i915_private *dev_priv)
{
struct i915_ggtt *ggtt = &dev_priv->ggtt;
@@ -3314,7 +3332,7 @@ void i915_gem_restore_gtt_mappings(struct drm_i915_private *dev_priv)
ggtt->base.closed = false;
if (INTEL_GEN(dev_priv) >= 8) {
- if (IS_CHERRYVIEW(dev_priv) || IS_BROXTON(dev_priv))
+ if (IS_CHERRYVIEW(dev_priv) || IS_GEN9_LP(dev_priv))
chv_setup_private_ppat(dev_priv);
else
bdw_setup_private_ppat(dev_priv);
@@ -3340,52 +3358,7 @@ void i915_gem_restore_gtt_mappings(struct drm_i915_private *dev_priv)
}
}
- i915_ggtt_flush(dev_priv);
-}
-
-struct i915_vma *
-i915_gem_obj_to_vma(struct drm_i915_gem_object *obj,
- struct i915_address_space *vm,
- const struct i915_ggtt_view *view)
-{
- struct rb_node *rb;
-
- rb = obj->vma_tree.rb_node;
- while (rb) {
- struct i915_vma *vma = rb_entry(rb, struct i915_vma, obj_node);
- long cmp;
-
- cmp = i915_vma_compare(vma, vm, view);
- if (cmp == 0)
- return vma;
-
- if (cmp < 0)
- rb = rb->rb_right;
- else
- rb = rb->rb_left;
- }
-
- return NULL;
-}
-
-struct i915_vma *
-i915_gem_obj_lookup_or_create_vma(struct drm_i915_gem_object *obj,
- struct i915_address_space *vm,
- const struct i915_ggtt_view *view)
-{
- struct i915_vma *vma;
-
- lockdep_assert_held(&obj->base.dev->struct_mutex);
- GEM_BUG_ON(view && !i915_is_ggtt(vm));
-
- vma = i915_gem_obj_to_vma(obj, vm, view);
- if (!vma) {
- vma = i915_vma_create(obj, vm, view);
- GEM_BUG_ON(vma != i915_gem_obj_to_vma(obj, vm, view));
- }
-
- GEM_BUG_ON(i915_vma_is_closed(vma));
- return vma;
+ i915_ggtt_invalidate(dev_priv);
}
static struct scatterlist *
@@ -3485,7 +3458,7 @@ intel_partial_pages(const struct i915_ggtt_view *view,
{
struct sg_table *st;
struct scatterlist *sg, *iter;
- unsigned int count = view->params.partial.size;
+ unsigned int count = view->partial.size;
unsigned int offset;
int ret = -ENOMEM;
@@ -3497,9 +3470,7 @@ intel_partial_pages(const struct i915_ggtt_view *view,
if (ret)
goto err_sg_alloc;
- iter = i915_gem_object_get_sg(obj,
- view->params.partial.offset,
- &offset);
+ iter = i915_gem_object_get_sg(obj, view->partial.offset, &offset);
GEM_BUG_ON(!iter);
sg = st->sgl;
@@ -3551,7 +3522,8 @@ i915_get_ggtt_vma_pages(struct i915_vma *vma)
vma->pages = vma->obj->mm.pages;
else if (vma->ggtt_view.type == I915_GGTT_VIEW_ROTATED)
vma->pages =
- intel_rotate_fb_obj_pages(&vma->ggtt_view.params.rotated, vma->obj);
+ intel_rotate_fb_obj_pages(&vma->ggtt_view.rotated,
+ vma->obj);
else if (vma->ggtt_view.type == I915_GGTT_VIEW_PARTIAL)
vma->pages = intel_partial_pages(&vma->ggtt_view, vma->obj);
else
@@ -3572,3 +3544,202 @@ i915_get_ggtt_vma_pages(struct i915_vma *vma)
return ret;
}
+/**
+ * i915_gem_gtt_reserve - reserve a node in an address_space (GTT)
+ * @vm: the &struct i915_address_space
+ * @node: the &struct drm_mm_node (typically i915_vma.mode)
+ * @size: how much space to allocate inside the GTT,
+ * must be #I915_GTT_PAGE_SIZE aligned
+ * @offset: where to insert inside the GTT,
+ * must be #I915_GTT_MIN_ALIGNMENT aligned, and the node
+ * (@offset + @size) must fit within the address space
+ * @color: color to apply to node, if this node is not from a VMA,
+ * color must be #I915_COLOR_UNEVICTABLE
+ * @flags: control search and eviction behaviour
+ *
+ * i915_gem_gtt_reserve() tries to insert the @node at the exact @offset inside
+ * the address space (using @size and @color). If the @node does not fit, it
+ * tries to evict any overlapping nodes from the GTT, including any
+ * neighbouring nodes if the colors do not match (to ensure guard pages between
+ * differing domains). See i915_gem_evict_for_node() for the gory details
+ * on the eviction algorithm. #PIN_NONBLOCK may used to prevent waiting on
+ * evicting active overlapping objects, and any overlapping node that is pinned
+ * or marked as unevictable will also result in failure.
+ *
+ * Returns: 0 on success, -ENOSPC if no suitable hole is found, -EINTR if
+ * asked to wait for eviction and interrupted.
+ */
+int i915_gem_gtt_reserve(struct i915_address_space *vm,
+ struct drm_mm_node *node,
+ u64 size, u64 offset, unsigned long color,
+ unsigned int flags)
+{
+ int err;
+
+ GEM_BUG_ON(!size);
+ GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE));
+ GEM_BUG_ON(!IS_ALIGNED(offset, I915_GTT_MIN_ALIGNMENT));
+ GEM_BUG_ON(range_overflows(offset, size, vm->total));
+ GEM_BUG_ON(vm == &vm->i915->mm.aliasing_ppgtt->base);
+ GEM_BUG_ON(drm_mm_node_allocated(node));
+
+ node->size = size;
+ node->start = offset;
+ node->color = color;
+
+ err = drm_mm_reserve_node(&vm->mm, node);
+ if (err != -ENOSPC)
+ return err;
+
+ err = i915_gem_evict_for_node(vm, node, flags);
+ if (err == 0)
+ err = drm_mm_reserve_node(&vm->mm, node);
+
+ return err;
+}
+
+static u64 random_offset(u64 start, u64 end, u64 len, u64 align)
+{
+ u64 range, addr;
+
+ GEM_BUG_ON(range_overflows(start, len, end));
+ GEM_BUG_ON(round_up(start, align) > round_down(end - len, align));
+
+ range = round_down(end - len, align) - round_up(start, align);
+ if (range) {
+ if (sizeof(unsigned long) == sizeof(u64)) {
+ addr = get_random_long();
+ } else {
+ addr = get_random_int();
+ if (range > U32_MAX) {
+ addr <<= 32;
+ addr |= get_random_int();
+ }
+ }
+ div64_u64_rem(addr, range, &addr);
+ start += addr;
+ }
+
+ return round_up(start, align);
+}
+
+/**
+ * i915_gem_gtt_insert - insert a node into an address_space (GTT)
+ * @vm: the &struct i915_address_space
+ * @node: the &struct drm_mm_node (typically i915_vma.node)
+ * @size: how much space to allocate inside the GTT,
+ * must be #I915_GTT_PAGE_SIZE aligned
+ * @alignment: required alignment of starting offset, may be 0 but
+ * if specified, this must be a power-of-two and at least
+ * #I915_GTT_MIN_ALIGNMENT
+ * @color: color to apply to node
+ * @start: start of any range restriction inside GTT (0 for all),
+ * must be #I915_GTT_PAGE_SIZE aligned
+ * @end: end of any range restriction inside GTT (U64_MAX for all),
+ * must be #I915_GTT_PAGE_SIZE aligned if not U64_MAX
+ * @flags: control search and eviction behaviour
+ *
+ * i915_gem_gtt_insert() first searches for an available hole into which
+ * is can insert the node. The hole address is aligned to @alignment and
+ * its @size must then fit entirely within the [@start, @end] bounds. The
+ * nodes on either side of the hole must match @color, or else a guard page
+ * will be inserted between the two nodes (or the node evicted). If no
+ * suitable hole is found, first a victim is randomly selected and tested
+ * for eviction, otherwise then the LRU list of objects within the GTT
+ * is scanned to find the first set of replacement nodes to create the hole.
+ * Those old overlapping nodes are evicted from the GTT (and so must be
+ * rebound before any future use). Any node that is currently pinned cannot
+ * be evicted (see i915_vma_pin()). Similar if the node's VMA is currently
+ * active and #PIN_NONBLOCK is specified, that node is also skipped when
+ * searching for an eviction candidate. See i915_gem_evict_something() for
+ * the gory details on the eviction algorithm.
+ *
+ * Returns: 0 on success, -ENOSPC if no suitable hole is found, -EINTR if
+ * asked to wait for eviction and interrupted.
+ */
+int i915_gem_gtt_insert(struct i915_address_space *vm,
+ struct drm_mm_node *node,
+ u64 size, u64 alignment, unsigned long color,
+ u64 start, u64 end, unsigned int flags)
+{
+ enum drm_mm_insert_mode mode;
+ u64 offset;
+ int err;
+
+ lockdep_assert_held(&vm->i915->drm.struct_mutex);
+ GEM_BUG_ON(!size);
+ GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE));
+ GEM_BUG_ON(alignment && !is_power_of_2(alignment));
+ GEM_BUG_ON(alignment && !IS_ALIGNED(alignment, I915_GTT_MIN_ALIGNMENT));
+ GEM_BUG_ON(start >= end);
+ GEM_BUG_ON(start > 0 && !IS_ALIGNED(start, I915_GTT_PAGE_SIZE));
+ GEM_BUG_ON(end < U64_MAX && !IS_ALIGNED(end, I915_GTT_PAGE_SIZE));
+ GEM_BUG_ON(vm == &vm->i915->mm.aliasing_ppgtt->base);
+ GEM_BUG_ON(drm_mm_node_allocated(node));
+
+ if (unlikely(range_overflows(start, size, end)))
+ return -ENOSPC;
+
+ if (unlikely(round_up(start, alignment) > round_down(end - size, alignment)))
+ return -ENOSPC;
+
+ mode = DRM_MM_INSERT_BEST;
+ if (flags & PIN_HIGH)
+ mode = DRM_MM_INSERT_HIGH;
+ if (flags & PIN_MAPPABLE)
+ mode = DRM_MM_INSERT_LOW;
+
+ /* We only allocate in PAGE_SIZE/GTT_PAGE_SIZE (4096) chunks,
+ * so we know that we always have a minimum alignment of 4096.
+ * The drm_mm range manager is optimised to return results
+ * with zero alignment, so where possible use the optimal
+ * path.
+ */
+ BUILD_BUG_ON(I915_GTT_MIN_ALIGNMENT > I915_GTT_PAGE_SIZE);
+ if (alignment <= I915_GTT_MIN_ALIGNMENT)
+ alignment = 0;
+
+ err = drm_mm_insert_node_in_range(&vm->mm, node,
+ size, alignment, color,
+ start, end, mode);
+ if (err != -ENOSPC)
+ return err;
+
+ /* No free space, pick a slot at random.
+ *
+ * There is a pathological case here using a GTT shared between
+ * mmap and GPU (i.e. ggtt/aliasing_ppgtt but not full-ppgtt):
+ *
+ * |<-- 256 MiB aperture -->||<-- 1792 MiB unmappable -->|
+ * (64k objects) (448k objects)
+ *
+ * Now imagine that the eviction LRU is ordered top-down (just because
+ * pathology meets real life), and that we need to evict an object to
+ * make room inside the aperture. The eviction scan then has to walk
+ * the 448k list before it finds one within range. And now imagine that
+ * it has to search for a new hole between every byte inside the memcpy,
+ * for several simultaneous clients.
+ *
+ * On a full-ppgtt system, if we have run out of available space, there
+ * will be lots and lots of objects in the eviction list! Again,
+ * searching that LRU list may be slow if we are also applying any
+ * range restrictions (e.g. restriction to low 4GiB) and so, for
+ * simplicity and similarilty between different GTT, try the single
+ * random replacement first.
+ */
+ offset = random_offset(start, end,
+ size, alignment ?: I915_GTT_MIN_ALIGNMENT);
+ err = i915_gem_gtt_reserve(vm, node, size, offset, color, flags);
+ if (err != -ENOSPC)
+ return err;
+
+ /* Randomly selected placement is pinned, do a search */
+ err = i915_gem_evict_something(vm, size, alignment, color,
+ start, end, flags);
+ if (err)
+ return err;
+
+ return drm_mm_insert_node_in_range(&vm->mm, node,
+ size, alignment, color,
+ start, end, DRM_MM_INSERT_EVICT);
+}
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.h b/drivers/gpu/drm/i915/i915_gem_gtt.h
index 4f35be4c26c7..3c5ef5358cef 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.h
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.h
@@ -40,6 +40,9 @@
#include "i915_gem_timeline.h"
#include "i915_gem_request.h"
+#define I915_GTT_PAGE_SIZE 4096UL
+#define I915_GTT_MIN_ALIGNMENT I915_GTT_PAGE_SIZE
+
#define I915_FENCE_REG_NONE -1
#define I915_MAX_NUM_FENCES 32
/* 32 fences + sign bit for FENCE_REG_NONE */
@@ -142,34 +145,57 @@ typedef uint64_t gen8_ppgtt_pml4e_t;
struct sg_table;
-enum i915_ggtt_view_type {
- I915_GGTT_VIEW_NORMAL = 0,
- I915_GGTT_VIEW_ROTATED,
- I915_GGTT_VIEW_PARTIAL,
-};
-
struct intel_rotation_info {
- struct {
+ struct intel_rotation_plane_info {
/* tiles */
unsigned int width, height, stride, offset;
} plane[2];
+} __packed;
+
+static inline void assert_intel_rotation_info_is_packed(void)
+{
+ BUILD_BUG_ON(sizeof(struct intel_rotation_info) != 8*sizeof(unsigned int));
+}
+
+struct intel_partial_info {
+ u64 offset;
+ unsigned int size;
+} __packed;
+
+static inline void assert_intel_partial_info_is_packed(void)
+{
+ BUILD_BUG_ON(sizeof(struct intel_partial_info) != sizeof(u64) + sizeof(unsigned int));
+}
+
+enum i915_ggtt_view_type {
+ I915_GGTT_VIEW_NORMAL = 0,
+ I915_GGTT_VIEW_ROTATED = sizeof(struct intel_rotation_info),
+ I915_GGTT_VIEW_PARTIAL = sizeof(struct intel_partial_info),
};
+static inline void assert_i915_ggtt_view_type_is_unique(void)
+{
+ /* As we encode the size of each branch inside the union into its type,
+ * we have to be careful that each branch has a unique size.
+ */
+ switch ((enum i915_ggtt_view_type)0) {
+ case I915_GGTT_VIEW_NORMAL:
+ case I915_GGTT_VIEW_PARTIAL:
+ case I915_GGTT_VIEW_ROTATED:
+ /* gcc complains if these are identical cases */
+ break;
+ }
+}
+
struct i915_ggtt_view {
enum i915_ggtt_view_type type;
-
union {
- struct {
- u64 offset;
- unsigned int size;
- } partial;
+ /* Members need to contain no holes/padding */
+ struct intel_partial_info partial;
struct intel_rotation_info rotated;
- } params;
+ };
};
-extern const struct i915_ggtt_view i915_ggtt_view_normal;
-extern const struct i915_ggtt_view i915_ggtt_view_rotated;
-
enum i915_cache_level;
struct i915_vma;
@@ -220,7 +246,7 @@ struct i915_pml4 {
struct i915_address_space {
struct drm_mm mm;
struct i915_gem_timeline timeline;
- struct drm_device *dev;
+ struct drm_i915_private *i915;
/* Every address space belongs to a struct file - except for the global
* GTT that is owned by the driver (and so @file is set to NULL). In
* principle, no information should leak from one context to another
@@ -315,15 +341,25 @@ struct i915_ggtt {
struct i915_address_space base;
struct io_mapping mappable; /* Mapping to our CPU mappable region */
- size_t stolen_size; /* Total size of stolen memory */
- size_t stolen_usable_size; /* Total size minus BIOS reserved */
- size_t stolen_reserved_base;
- size_t stolen_reserved_size;
- u64 mappable_end; /* End offset that we can CPU map */
phys_addr_t mappable_base; /* PA of our GMADR */
+ u64 mappable_end; /* End offset that we can CPU map */
+
+ /* Stolen memory is segmented in hardware with different portions
+ * offlimits to certain functions.
+ *
+ * The drm_mm is initialised to the total accessible range, as found
+ * from the PCI config. On Broadwell+, this is further restricted to
+ * avoid the first page! The upper end of stolen memory is reserved for
+ * hardware functions and similarly removed from the accessible range.
+ */
+ u32 stolen_size; /* Total size of stolen memory */
+ u32 stolen_usable_size; /* Total size minus reserved ranges */
+ u32 stolen_reserved_base;
+ u32 stolen_reserved_size;
/** "Graphics Stolen Memory" holds the global PTEs */
void __iomem *gsm;
+ void (*invalidate)(struct drm_i915_private *dev_priv);
bool do_idle_maps;
@@ -492,6 +528,8 @@ i915_vm_to_ggtt(struct i915_address_space *vm)
int i915_ggtt_probe_hw(struct drm_i915_private *dev_priv);
int i915_ggtt_init_hw(struct drm_i915_private *dev_priv);
int i915_ggtt_enable_hw(struct drm_i915_private *dev_priv);
+void i915_ggtt_enable_guc(struct drm_i915_private *i915);
+void i915_ggtt_disable_guc(struct drm_i915_private *i915);
int i915_gem_init_ggtt(struct drm_i915_private *dev_priv);
void i915_ggtt_cleanup_hw(struct drm_i915_private *dev_priv);
@@ -500,6 +538,7 @@ void i915_ppgtt_release(struct kref *kref);
struct i915_hw_ppgtt *i915_ppgtt_create(struct drm_i915_private *dev_priv,
struct drm_i915_file_private *fpriv,
const char *name);
+void i915_ppgtt_close(struct i915_address_space *vm);
static inline void i915_ppgtt_get(struct i915_hw_ppgtt *ppgtt)
{
if (ppgtt)
@@ -520,6 +559,16 @@ int __must_check i915_gem_gtt_prepare_pages(struct drm_i915_gem_object *obj,
void i915_gem_gtt_finish_pages(struct drm_i915_gem_object *obj,
struct sg_table *pages);
+int i915_gem_gtt_reserve(struct i915_address_space *vm,
+ struct drm_mm_node *node,
+ u64 size, u64 offset, unsigned long color,
+ unsigned int flags);
+
+int i915_gem_gtt_insert(struct i915_address_space *vm,
+ struct drm_mm_node *node,
+ u64 size, u64 alignment, unsigned long color,
+ u64 start, u64 end, unsigned int flags);
+
/* Flags used by pin/bind&friends. */
#define PIN_NONBLOCK BIT(0)
#define PIN_MAPPABLE BIT(1)
@@ -534,6 +583,6 @@ void i915_gem_gtt_finish_pages(struct drm_i915_gem_object *obj,
#define PIN_HIGH BIT(9)
#define PIN_OFFSET_BIAS BIT(10)
#define PIN_OFFSET_FIXED BIT(11)
-#define PIN_OFFSET_MASK (~4095)
+#define PIN_OFFSET_MASK (-I915_GTT_PAGE_SIZE)
#endif
diff --git a/drivers/gpu/drm/i915/i915_gem_internal.c b/drivers/gpu/drm/i915/i915_gem_internal.c
index d09c74973cb3..933019e1b206 100644
--- a/drivers/gpu/drm/i915/i915_gem_internal.c
+++ b/drivers/gpu/drm/i915/i915_gem_internal.c
@@ -46,24 +46,12 @@ static struct sg_table *
i915_gem_object_get_pages_internal(struct drm_i915_gem_object *obj)
{
struct drm_i915_private *i915 = to_i915(obj->base.dev);
- unsigned int npages = obj->base.size / PAGE_SIZE;
struct sg_table *st;
struct scatterlist *sg;
+ unsigned int npages;
int max_order;
gfp_t gfp;
- st = kmalloc(sizeof(*st), GFP_KERNEL);
- if (!st)
- return ERR_PTR(-ENOMEM);
-
- if (sg_alloc_table(st, npages, GFP_KERNEL)) {
- kfree(st);
- return ERR_PTR(-ENOMEM);
- }
-
- sg = st->sgl;
- st->nents = 0;
-
max_order = MAX_ORDER;
#ifdef CONFIG_SWIOTLB
if (swiotlb_nr_tbl()) {
@@ -79,12 +67,26 @@ i915_gem_object_get_pages_internal(struct drm_i915_gem_object *obj)
#endif
gfp = GFP_KERNEL | __GFP_HIGHMEM | __GFP_RECLAIMABLE;
- if (IS_CRESTLINE(i915) || IS_BROADWATER(i915)) {
+ if (IS_I965GM(i915) || IS_I965G(i915)) {
/* 965gm cannot relocate objects above 4GiB. */
gfp &= ~__GFP_HIGHMEM;
gfp |= __GFP_DMA32;
}
+create_st:
+ st = kmalloc(sizeof(*st), GFP_KERNEL);
+ if (!st)
+ return ERR_PTR(-ENOMEM);
+
+ npages = obj->base.size / PAGE_SIZE;
+ if (sg_alloc_table(st, npages, GFP_KERNEL)) {
+ kfree(st);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ sg = st->sgl;
+ st->nents = 0;
+
do {
int order = min(fls(npages) - 1, max_order);
struct page *page;
@@ -112,8 +114,15 @@ i915_gem_object_get_pages_internal(struct drm_i915_gem_object *obj)
sg = __sg_next(sg);
} while (1);
- if (i915_gem_gtt_prepare_pages(obj, st))
+ if (i915_gem_gtt_prepare_pages(obj, st)) {
+ /* Failed to dma-map try again with single page sg segments */
+ if (get_order(st->sgl->length)) {
+ internal_free_pages(st);
+ max_order = 0;
+ goto create_st;
+ }
goto err;
+ }
/* Mark the pages as dontneed whilst they are still pinned. As soon
* as they are unpinned they are allowed to be reaped by the shrinker,
@@ -159,11 +168,17 @@ static const struct drm_i915_gem_object_ops i915_gem_object_internal_ops = {
*/
struct drm_i915_gem_object *
i915_gem_object_create_internal(struct drm_i915_private *i915,
- unsigned int size)
+ phys_addr_t size)
{
struct drm_i915_gem_object *obj;
- obj = i915_gem_object_alloc(&i915->drm);
+ GEM_BUG_ON(!size);
+ GEM_BUG_ON(!IS_ALIGNED(size, PAGE_SIZE));
+
+ if (overflows_type(size, obj->base.size))
+ return ERR_PTR(-E2BIG);
+
+ obj = i915_gem_object_alloc(i915);
if (!obj)
return ERR_PTR(-ENOMEM);
diff --git a/drivers/gpu/drm/i915/i915_gem_object.h b/drivers/gpu/drm/i915/i915_gem_object.h
index ecfefb9d42e4..bf90b07163d1 100644
--- a/drivers/gpu/drm/i915/i915_gem_object.h
+++ b/drivers/gpu/drm/i915/i915_gem_object.h
@@ -317,6 +317,29 @@ i915_gem_object_get_stride(struct drm_i915_gem_object *obj)
return obj->tiling_and_stride & STRIDE_MASK;
}
+static inline unsigned int
+i915_gem_tile_height(unsigned int tiling)
+{
+ GEM_BUG_ON(!tiling);
+ return tiling == I915_TILING_Y ? 32 : 8;
+}
+
+static inline unsigned int
+i915_gem_object_get_tile_height(struct drm_i915_gem_object *obj)
+{
+ return i915_gem_tile_height(i915_gem_object_get_tiling(obj));
+}
+
+static inline unsigned int
+i915_gem_object_get_tile_row_size(struct drm_i915_gem_object *obj)
+{
+ return (i915_gem_object_get_stride(obj) *
+ i915_gem_object_get_tile_height(obj));
+}
+
+int i915_gem_object_set_tiling(struct drm_i915_gem_object *obj,
+ unsigned int tiling, unsigned int stride);
+
static inline struct intel_engine_cs *
i915_gem_object_last_write_engine(struct drm_i915_gem_object *obj)
{
diff --git a/drivers/gpu/drm/i915/i915_gem_render_state.c b/drivers/gpu/drm/i915/i915_gem_render_state.c
index 5af19b0bf713..b42c81b42487 100644
--- a/drivers/gpu/drm/i915/i915_gem_render_state.c
+++ b/drivers/gpu/drm/i915/i915_gem_render_state.c
@@ -187,20 +187,20 @@ int i915_gem_render_state_init(struct intel_engine_cs *engine)
if (!rodata)
return 0;
- if (rodata->batch_items * 4 > 4096)
+ if (rodata->batch_items * 4 > PAGE_SIZE)
return -EINVAL;
so = kmalloc(sizeof(*so), GFP_KERNEL);
if (!so)
return -ENOMEM;
- obj = i915_gem_object_create_internal(engine->i915, 4096);
+ obj = i915_gem_object_create_internal(engine->i915, PAGE_SIZE);
if (IS_ERR(obj)) {
ret = PTR_ERR(obj);
goto err_free;
}
- so->vma = i915_vma_create(obj, &engine->i915->ggtt.base, NULL);
+ so->vma = i915_vma_instance(obj, &engine->i915->ggtt.base, NULL);
if (IS_ERR(so->vma)) {
ret = PTR_ERR(so->vma);
goto err_obj;
diff --git a/drivers/gpu/drm/i915/i915_gem_request.c b/drivers/gpu/drm/i915/i915_gem_request.c
index b8f403faadbb..f31deeb72703 100644
--- a/drivers/gpu/drm/i915/i915_gem_request.c
+++ b/drivers/gpu/drm/i915/i915_gem_request.c
@@ -62,6 +62,15 @@ static void i915_fence_release(struct dma_fence *fence)
{
struct drm_i915_gem_request *req = to_request(fence);
+ /* The request is put onto a RCU freelist (i.e. the address
+ * is immediately reused), mark the fences as being freed now.
+ * Otherwise the debugobjects for the fences are only marked as
+ * freed when the slab cache itself is freed, and so we would get
+ * caught trying to reuse dead objects.
+ */
+ i915_sw_fence_fini(&req->submit);
+ i915_sw_fence_fini(&req->execute);
+
kmem_cache_free(req->i915->requests, req);
}
@@ -197,6 +206,7 @@ void i915_gem_retire_noop(struct i915_gem_active *active,
static void i915_gem_request_retire(struct drm_i915_gem_request *request)
{
+ struct intel_engine_cs *engine = request->engine;
struct i915_gem_active *active, *next;
lockdep_assert_held(&request->i915->drm.struct_mutex);
@@ -207,9 +217,9 @@ static void i915_gem_request_retire(struct drm_i915_gem_request *request)
trace_i915_gem_request_retire(request);
- spin_lock_irq(&request->engine->timeline->lock);
+ spin_lock_irq(&engine->timeline->lock);
list_del_init(&request->link);
- spin_unlock_irq(&request->engine->timeline->lock);
+ spin_unlock_irq(&engine->timeline->lock);
/* We know the GPU must have read the request to have
* sent us the seqno + interrupt, so use the position
@@ -257,13 +267,20 @@ static void i915_gem_request_retire(struct drm_i915_gem_request *request)
i915_gem_request_remove_from_client(request);
- if (request->previous_context) {
- if (i915.enable_execlists)
- intel_lr_context_unpin(request->previous_context,
- request->engine);
- }
+ /* Retirement decays the ban score as it is a sign of ctx progress */
+ if (request->ctx->ban_score > 0)
+ request->ctx->ban_score--;
- i915_gem_context_put(request->ctx);
+ /* The backing object for the context is done after switching to the
+ * *next* context. Therefore we cannot retire the previous context until
+ * the next context has already started running. However, since we
+ * cannot take the required locks at i915_gem_request_submit() we
+ * defer the unpinning of the active context to now, retirement of
+ * the subsequent request.
+ */
+ if (engine->last_retired_context)
+ engine->context_unpin(engine, engine->last_retired_context);
+ engine->last_retired_context = request->ctx;
dma_fence_signal(&request->fence);
@@ -277,6 +294,8 @@ void i915_gem_request_retire_upto(struct drm_i915_gem_request *req)
struct drm_i915_gem_request *tmp;
lockdep_assert_held(&req->i915->drm.struct_mutex);
+ GEM_BUG_ON(!i915_gem_request_completed(req));
+
if (list_empty(&req->link))
return;
@@ -288,26 +307,6 @@ void i915_gem_request_retire_upto(struct drm_i915_gem_request *req)
} while (tmp != req);
}
-static int i915_gem_check_wedge(struct drm_i915_private *dev_priv)
-{
- struct i915_gpu_error *error = &dev_priv->gpu_error;
-
- if (i915_terminally_wedged(error))
- return -EIO;
-
- if (i915_reset_in_progress(error)) {
- /* Non-interruptible callers can't handle -EAGAIN, hence return
- * -EIO unconditionally for these.
- */
- if (!dev_priv->mm.interruptible)
- return -EIO;
-
- return -EAGAIN;
- }
-
- return 0;
-}
-
static int i915_gem_init_global_seqno(struct drm_i915_private *i915, u32 seqno)
{
struct i915_gem_timeline *timeline = &i915->gt.global_timeline;
@@ -326,11 +325,11 @@ static int i915_gem_init_global_seqno(struct drm_i915_private *i915, u32 seqno)
GEM_BUG_ON(i915->gt.active_requests > 1);
/* If the seqno wraps around, we need to clear the breadcrumb rbtree */
- if (!i915_seqno_passed(seqno, atomic_read(&timeline->next_seqno))) {
+ if (!i915_seqno_passed(seqno, atomic_read(&timeline->seqno))) {
while (intel_breadcrumbs_busy(i915))
cond_resched(); /* spin until threads are complete */
}
- atomic_set(&timeline->next_seqno, seqno);
+ atomic_set(&timeline->seqno, seqno);
/* Finally reset hw state */
for_each_engine(engine, i915, id)
@@ -365,11 +364,11 @@ int i915_gem_set_global_seqno(struct drm_device *dev, u32 seqno)
static int reserve_global_seqno(struct drm_i915_private *i915)
{
u32 active_requests = ++i915->gt.active_requests;
- u32 next_seqno = atomic_read(&i915->gt.global_timeline.next_seqno);
+ u32 seqno = atomic_read(&i915->gt.global_timeline.seqno);
int ret;
/* Reservation is fine until we need to wrap around */
- if (likely(next_seqno + active_requests > next_seqno))
+ if (likely(seqno + active_requests > seqno))
return 0;
ret = i915_gem_init_global_seqno(i915, 0);
@@ -383,13 +382,13 @@ static int reserve_global_seqno(struct drm_i915_private *i915)
static u32 __timeline_get_seqno(struct i915_gem_timeline *tl)
{
- /* next_seqno only incremented under a mutex */
- return ++tl->next_seqno.counter;
+ /* seqno only incremented under a mutex */
+ return ++tl->seqno.counter;
}
static u32 timeline_get_seqno(struct i915_gem_timeline *tl)
{
- return atomic_inc_return(&tl->next_seqno);
+ return atomic_inc_return(&tl->seqno);
}
void __i915_gem_request_submit(struct drm_i915_gem_request *request)
@@ -502,16 +501,22 @@ i915_gem_request_alloc(struct intel_engine_cs *engine,
lockdep_assert_held(&dev_priv->drm.struct_mutex);
/* ABI: Before userspace accesses the GPU (e.g. execbuffer), report
- * EIO if the GPU is already wedged, or EAGAIN to drop the struct_mutex
- * and restart.
+ * EIO if the GPU is already wedged.
+ */
+ if (i915_terminally_wedged(&dev_priv->gpu_error))
+ return ERR_PTR(-EIO);
+
+ /* Pinning the contexts may generate requests in order to acquire
+ * GGTT space, so do this first before we reserve a seqno for
+ * ourselves.
*/
- ret = i915_gem_check_wedge(dev_priv);
+ ret = engine->context_pin(engine, ctx);
if (ret)
return ERR_PTR(ret);
ret = reserve_global_seqno(dev_priv);
if (ret)
- return ERR_PTR(ret);
+ goto err_unpin;
/* Move the oldest request to the slab-cache (if not in use!) */
req = list_first_entry_or_null(&engine->timeline->requests,
@@ -578,11 +583,10 @@ i915_gem_request_alloc(struct intel_engine_cs *engine,
INIT_LIST_HEAD(&req->active_list);
req->i915 = dev_priv;
req->engine = engine;
- req->ctx = i915_gem_context_get(ctx);
+ req->ctx = ctx;
/* No zalloc, must clear what we need by hand */
req->global_seqno = 0;
- req->previous_context = NULL;
req->file_priv = NULL;
req->batch = NULL;
@@ -596,10 +600,7 @@ i915_gem_request_alloc(struct intel_engine_cs *engine,
req->reserved_space = MIN_SPACE_FOR_ADD_REQUEST;
GEM_BUG_ON(req->reserved_space < engine->emit_breadcrumb_sz);
- if (i915.enable_execlists)
- ret = intel_logical_ring_alloc_request_extras(req);
- else
- ret = intel_ring_alloc_request_extras(req);
+ ret = engine->request_alloc(req);
if (ret)
goto err_ctx;
@@ -613,10 +614,16 @@ i915_gem_request_alloc(struct intel_engine_cs *engine,
return req;
err_ctx:
- i915_gem_context_put(ctx);
+ /* Make sure we didn't add ourselves to external state before freeing */
+ GEM_BUG_ON(!list_empty(&req->active_list));
+ GEM_BUG_ON(!list_empty(&req->priotree.signalers_list));
+ GEM_BUG_ON(!list_empty(&req->priotree.waiters_list));
+
kmem_cache_free(dev_priv->requests, req);
err_unreserve:
dev_priv->gt.active_requests--;
+err_unpin:
+ engine->context_unpin(engine, ctx);
return ERR_PTR(ret);
}
@@ -822,6 +829,13 @@ void __i915_add_request(struct drm_i915_gem_request *request, bool flush_caches)
lockdep_assert_held(&request->i915->drm.struct_mutex);
trace_i915_gem_request_add(request);
+ /* Make sure that no request gazumped us - if it was allocated after
+ * our i915_gem_request_alloc() and called __i915_add_request() before
+ * us, the timeline will hold its seqno which is later than ours.
+ */
+ GEM_BUG_ON(i915_seqno_passed(timeline->last_submitted_seqno,
+ request->fence.seqno));
+
/*
* To ensure that this call will not fail, space for its emissions
* should already have been reserved in the ring buffer. Let the ring
@@ -1011,8 +1025,13 @@ __i915_request_wait_for_execute(struct drm_i915_gem_request *request,
break;
}
+ if (!timeout) {
+ timeout = -ETIME;
+ break;
+ }
+
timeout = io_schedule_timeout(timeout);
- } while (timeout);
+ } while (1);
finish_wait(&request->execute.wait, &wait);
if (flags & I915_WAIT_LOCKED)
diff --git a/drivers/gpu/drm/i915/i915_gem_request.h b/drivers/gpu/drm/i915/i915_gem_request.h
index d229f47d1028..ea511f06efaf 100644
--- a/drivers/gpu/drm/i915/i915_gem_request.h
+++ b/drivers/gpu/drm/i915/i915_gem_request.h
@@ -170,17 +170,6 @@ struct drm_i915_gem_request {
/** Preallocate space in the ring for the emitting the request */
u32 reserved_space;
- /**
- * Context related to the previous request.
- * As the contexts are accessed by the hardware until the switch is
- * completed to a new context, the hardware may still be writing
- * to the context object after the breadcrumb is visible. We must
- * not unpin/unbind/prune that object whilst still active and so
- * we keep the previous context pinned until the following (this)
- * request is retired.
- */
- struct i915_gem_context *previous_context;
-
/** Batch buffer related to this request if any (used for
* error state dump only).
*/
diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c
index abc78bbfc1dc..9673bcc3b6ad 100644
--- a/drivers/gpu/drm/i915/i915_gem_stolen.c
+++ b/drivers/gpu/drm/i915/i915_gem_stolen.c
@@ -54,16 +54,10 @@ int i915_gem_stolen_insert_node_in_range(struct drm_i915_private *dev_priv,
if (!drm_mm_initialized(&dev_priv->mm.stolen))
return -ENODEV;
- /* See the comment at the drm_mm_init() call for more about this check.
- * WaSkipStolenMemoryFirstPage:bdw+ (incomplete)
- */
- if (start < 4096 && INTEL_GEN(dev_priv) >= 8)
- start = 4096;
-
mutex_lock(&dev_priv->mm.stolen_lock);
- ret = drm_mm_insert_node_in_range(&dev_priv->mm.stolen, node, size,
- alignment, start, end,
- DRM_MM_SEARCH_DEFAULT);
+ ret = drm_mm_insert_node_in_range(&dev_priv->mm.stolen, node,
+ size, alignment, 0,
+ start, end, DRM_MM_INSERT_BEST);
mutex_unlock(&dev_priv->mm.stolen_lock);
return ret;
@@ -73,11 +67,8 @@ int i915_gem_stolen_insert_node(struct drm_i915_private *dev_priv,
struct drm_mm_node *node, u64 size,
unsigned alignment)
{
- struct i915_ggtt *ggtt = &dev_priv->ggtt;
-
return i915_gem_stolen_insert_node_in_range(dev_priv, node, size,
- alignment, 0,
- ggtt->stolen_usable_size);
+ alignment, 0, U64_MAX);
}
void i915_gem_stolen_remove_node(struct drm_i915_private *dev_priv,
@@ -152,7 +143,7 @@ static unsigned long i915_stolen_to_physical(struct drm_i915_private *dev_priv)
tom = tmp * MB(32);
base = tom - tseg_size - ggtt->stolen_size;
- } else if (IS_845G(dev_priv)) {
+ } else if (IS_I845G(dev_priv)) {
u32 tseg_size = 0;
u32 tom;
u8 tmp;
@@ -202,8 +193,8 @@ static unsigned long i915_stolen_to_physical(struct drm_i915_private *dev_priv)
return 0;
/* make sure we don't clobber the GTT if it's within stolen memory */
- if (INTEL_GEN(dev_priv) <= 4 && !IS_G33(dev_priv) &&
- !IS_G4X(dev_priv)) {
+ if (INTEL_GEN(dev_priv) <= 4 &&
+ !IS_G33(dev_priv) && !IS_PINEVIEW(dev_priv) && !IS_G4X(dev_priv)) {
struct {
u32 start, end;
} stolen[2] = {
@@ -290,14 +281,13 @@ void i915_gem_cleanup_stolen(struct drm_device *dev)
}
static void g4x_get_stolen_reserved(struct drm_i915_private *dev_priv,
- unsigned long *base, unsigned long *size)
+ phys_addr_t *base, u32 *size)
{
struct i915_ggtt *ggtt = &dev_priv->ggtt;
uint32_t reg_val = I915_READ(IS_GM45(dev_priv) ?
CTG_STOLEN_RESERVED :
ELK_STOLEN_RESERVED);
- unsigned long stolen_top = dev_priv->mm.stolen_base +
- ggtt->stolen_size;
+ phys_addr_t stolen_top = dev_priv->mm.stolen_base + ggtt->stolen_size;
*base = (reg_val & G4X_STOLEN_RESERVED_ADDR2_MASK) << 16;
@@ -314,7 +304,7 @@ static void g4x_get_stolen_reserved(struct drm_i915_private *dev_priv,
}
static void gen6_get_stolen_reserved(struct drm_i915_private *dev_priv,
- unsigned long *base, unsigned long *size)
+ phys_addr_t *base, u32 *size)
{
uint32_t reg_val = I915_READ(GEN6_STOLEN_RESERVED);
@@ -340,7 +330,7 @@ static void gen6_get_stolen_reserved(struct drm_i915_private *dev_priv,
}
static void gen7_get_stolen_reserved(struct drm_i915_private *dev_priv,
- unsigned long *base, unsigned long *size)
+ phys_addr_t *base, u32 *size)
{
uint32_t reg_val = I915_READ(GEN6_STOLEN_RESERVED);
@@ -359,8 +349,8 @@ static void gen7_get_stolen_reserved(struct drm_i915_private *dev_priv,
}
}
-static void gen8_get_stolen_reserved(struct drm_i915_private *dev_priv,
- unsigned long *base, unsigned long *size)
+static void chv_get_stolen_reserved(struct drm_i915_private *dev_priv,
+ phys_addr_t *base, u32 *size)
{
uint32_t reg_val = I915_READ(GEN6_STOLEN_RESERVED);
@@ -386,11 +376,11 @@ static void gen8_get_stolen_reserved(struct drm_i915_private *dev_priv,
}
static void bdw_get_stolen_reserved(struct drm_i915_private *dev_priv,
- unsigned long *base, unsigned long *size)
+ phys_addr_t *base, u32 *size)
{
struct i915_ggtt *ggtt = &dev_priv->ggtt;
uint32_t reg_val = I915_READ(GEN6_STOLEN_RESERVED);
- unsigned long stolen_top;
+ phys_addr_t stolen_top;
stolen_top = dev_priv->mm.stolen_base + ggtt->stolen_size;
@@ -409,11 +399,17 @@ static void bdw_get_stolen_reserved(struct drm_i915_private *dev_priv,
int i915_gem_init_stolen(struct drm_i915_private *dev_priv)
{
struct i915_ggtt *ggtt = &dev_priv->ggtt;
- unsigned long reserved_total, reserved_base = 0, reserved_size;
- unsigned long stolen_top;
+ phys_addr_t reserved_base, stolen_top;
+ u32 reserved_total, reserved_size;
+ u32 stolen_usable_start;
mutex_init(&dev_priv->mm.stolen_lock);
+ if (intel_vgpu_active(dev_priv)) {
+ DRM_INFO("iGVT-g active, disabling use of stolen memory\n");
+ return 0;
+ }
+
#ifdef CONFIG_INTEL_IOMMU
if (intel_iommu_gfx_mapped && INTEL_GEN(dev_priv) < 8) {
DRM_INFO("DMAR active, disabling use of stolen memory\n");
@@ -429,6 +425,8 @@ int i915_gem_init_stolen(struct drm_i915_private *dev_priv)
return 0;
stolen_top = dev_priv->mm.stolen_base + ggtt->stolen_size;
+ reserved_base = 0;
+ reserved_size = 0;
switch (INTEL_INFO(dev_priv)->gen) {
case 2:
@@ -436,8 +434,8 @@ int i915_gem_init_stolen(struct drm_i915_private *dev_priv)
break;
case 4:
if (IS_G4X(dev_priv))
- g4x_get_stolen_reserved(dev_priv, &reserved_base,
- &reserved_size);
+ g4x_get_stolen_reserved(dev_priv,
+ &reserved_base, &reserved_size);
break;
case 5:
/* Assume the gen6 maximum for the older platforms. */
@@ -445,21 +443,20 @@ int i915_gem_init_stolen(struct drm_i915_private *dev_priv)
reserved_base = stolen_top - reserved_size;
break;
case 6:
- gen6_get_stolen_reserved(dev_priv, &reserved_base,
- &reserved_size);
+ gen6_get_stolen_reserved(dev_priv,
+ &reserved_base, &reserved_size);
break;
case 7:
- gen7_get_stolen_reserved(dev_priv, &reserved_base,
- &reserved_size);
+ gen7_get_stolen_reserved(dev_priv,
+ &reserved_base, &reserved_size);
break;
default:
- if (IS_BROADWELL(dev_priv) ||
- IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
- bdw_get_stolen_reserved(dev_priv, &reserved_base,
- &reserved_size);
+ if (IS_LP(dev_priv))
+ chv_get_stolen_reserved(dev_priv,
+ &reserved_base, &reserved_size);
else
- gen8_get_stolen_reserved(dev_priv, &reserved_base,
- &reserved_size);
+ bdw_get_stolen_reserved(dev_priv,
+ &reserved_base, &reserved_size);
break;
}
@@ -472,9 +469,10 @@ int i915_gem_init_stolen(struct drm_i915_private *dev_priv)
if (reserved_base < dev_priv->mm.stolen_base ||
reserved_base + reserved_size > stolen_top) {
- DRM_DEBUG_KMS("Stolen reserved area [0x%08lx - 0x%08lx] outside stolen memory [0x%08lx - 0x%08lx]\n",
- reserved_base, reserved_base + reserved_size,
- dev_priv->mm.stolen_base, stolen_top);
+ phys_addr_t reserved_top = reserved_base + reserved_size;
+ DRM_DEBUG_KMS("Stolen reserved area [%pa - %pa] outside stolen memory [%pa - %pa]\n",
+ &reserved_base, &reserved_top,
+ &dev_priv->mm.stolen_base, &stolen_top);
return 0;
}
@@ -485,24 +483,21 @@ int i915_gem_init_stolen(struct drm_i915_private *dev_priv)
* memory, so just consider the start. */
reserved_total = stolen_top - reserved_base;
- DRM_DEBUG_KMS("Memory reserved for graphics device: %zuK, usable: %luK\n",
+ DRM_DEBUG_KMS("Memory reserved for graphics device: %uK, usable: %uK\n",
ggtt->stolen_size >> 10,
(ggtt->stolen_size - reserved_total) >> 10);
- ggtt->stolen_usable_size = ggtt->stolen_size - reserved_total;
+ stolen_usable_start = 0;
+ /* WaSkipStolenMemoryFirstPage:bdw+ */
+ if (INTEL_GEN(dev_priv) >= 8)
+ stolen_usable_start = 4096;
- /*
- * Basic memrange allocator for stolen space.
- *
- * TODO: Notice that some platforms require us to not use the first page
- * of the stolen memory but their BIOSes may still put the framebuffer
- * on the first page. So we don't reserve this page for now because of
- * that. Our current solution is to just prevent new nodes from being
- * inserted on the first page - see the check we have at
- * i915_gem_stolen_insert_node_in_range(). We may want to fix the fbcon
- * problem later.
- */
- drm_mm_init(&dev_priv->mm.stolen, 0, ggtt->stolen_usable_size);
+ ggtt->stolen_usable_size =
+ ggtt->stolen_size - reserved_total - stolen_usable_start;
+
+ /* Basic memrange allocator for stolen space. */
+ drm_mm_init(&dev_priv->mm.stolen, stolen_usable_start,
+ ggtt->stolen_usable_size);
return 0;
}
@@ -515,7 +510,7 @@ i915_pages_create_for_stolen(struct drm_device *dev,
struct sg_table *st;
struct scatterlist *sg;
- GEM_BUG_ON(offset > dev_priv->ggtt.stolen_size - size);
+ GEM_BUG_ON(range_overflows(offset, size, dev_priv->ggtt.stolen_size));
/* We hide that we have no struct page backing our stolen object
* by wrapping the contiguous physical allocation with a fake
@@ -578,22 +573,21 @@ static const struct drm_i915_gem_object_ops i915_gem_object_stolen_ops = {
};
static struct drm_i915_gem_object *
-_i915_gem_object_create_stolen(struct drm_device *dev,
+_i915_gem_object_create_stolen(struct drm_i915_private *dev_priv,
struct drm_mm_node *stolen)
{
struct drm_i915_gem_object *obj;
- obj = i915_gem_object_alloc(dev);
+ obj = i915_gem_object_alloc(dev_priv);
if (obj == NULL)
return NULL;
- drm_gem_private_object_init(dev, &obj->base, stolen->size);
+ drm_gem_private_object_init(&dev_priv->drm, &obj->base, stolen->size);
i915_gem_object_init(obj, &i915_gem_object_stolen_ops);
obj->stolen = stolen;
obj->base.read_domains = I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT;
- obj->cache_level = HAS_LLC(to_i915(dev)) ?
- I915_CACHE_LLC : I915_CACHE_NONE;
+ obj->cache_level = HAS_LLC(dev_priv) ? I915_CACHE_LLC : I915_CACHE_NONE;
if (i915_gem_object_pin_pages(obj))
goto cleanup;
@@ -606,9 +600,8 @@ cleanup:
}
struct drm_i915_gem_object *
-i915_gem_object_create_stolen(struct drm_device *dev, u32 size)
+i915_gem_object_create_stolen(struct drm_i915_private *dev_priv, u32 size)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_i915_gem_object *obj;
struct drm_mm_node *stolen;
int ret;
@@ -629,7 +622,7 @@ i915_gem_object_create_stolen(struct drm_device *dev, u32 size)
return NULL;
}
- obj = _i915_gem_object_create_stolen(dev, stolen);
+ obj = _i915_gem_object_create_stolen(dev_priv, stolen);
if (obj)
return obj;
@@ -639,12 +632,11 @@ i915_gem_object_create_stolen(struct drm_device *dev, u32 size)
}
struct drm_i915_gem_object *
-i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
+i915_gem_object_create_stolen_for_preallocated(struct drm_i915_private *dev_priv,
u32 stolen_offset,
u32 gtt_offset,
u32 size)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
struct i915_ggtt *ggtt = &dev_priv->ggtt;
struct drm_i915_gem_object *obj;
struct drm_mm_node *stolen;
@@ -654,14 +646,15 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
if (!drm_mm_initialized(&dev_priv->mm.stolen))
return NULL;
- lockdep_assert_held(&dev->struct_mutex);
+ lockdep_assert_held(&dev_priv->drm.struct_mutex);
DRM_DEBUG_KMS("creating preallocated stolen object: stolen_offset=%x, gtt_offset=%x, size=%x\n",
stolen_offset, gtt_offset, size);
/* KISS and expect everything to be page-aligned */
- if (WARN_ON(size == 0) || WARN_ON(size & 4095) ||
- WARN_ON(stolen_offset & 4095))
+ if (WARN_ON(size == 0) ||
+ WARN_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE)) ||
+ WARN_ON(!IS_ALIGNED(stolen_offset, I915_GTT_MIN_ALIGNMENT)))
return NULL;
stolen = kzalloc(sizeof(*stolen), GFP_KERNEL);
@@ -679,7 +672,7 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
return NULL;
}
- obj = _i915_gem_object_create_stolen(dev, stolen);
+ obj = _i915_gem_object_create_stolen(dev_priv, stolen);
if (obj == NULL) {
DRM_DEBUG_KMS("failed to allocate stolen object\n");
i915_gem_stolen_remove_node(dev_priv, stolen);
@@ -695,7 +688,7 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
if (ret)
goto err;
- vma = i915_gem_obj_lookup_or_create_vma(obj, &ggtt->base, NULL);
+ vma = i915_vma_instance(obj, &ggtt->base, NULL);
if (IS_ERR(vma)) {
ret = PTR_ERR(vma);
goto err_pages;
@@ -706,15 +699,16 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
* setting up the GTT space. The actual reservation will occur
* later.
*/
- vma->node.start = gtt_offset;
- vma->node.size = size;
-
- ret = drm_mm_reserve_node(&ggtt->base.mm, &vma->node);
+ ret = i915_gem_gtt_reserve(&ggtt->base, &vma->node,
+ size, gtt_offset, obj->cache_level,
+ 0);
if (ret) {
DRM_DEBUG_KMS("failed to allocate stolen GTT space\n");
goto err_pages;
}
+ GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
+
vma->pages = obj->mm.pages;
vma->flags |= I915_VMA_GLOBAL_BIND;
__i915_vma_set_map_and_fenceable(vma);
diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c
index c85e7b06bdba..974ac08df473 100644
--- a/drivers/gpu/drm/i915/i915_gem_tiling.c
+++ b/drivers/gpu/drm/i915/i915_gem_tiling.c
@@ -34,8 +34,8 @@
/**
* DOC: buffer object tiling
*
- * i915_gem_set_tiling() and i915_gem_get_tiling() is the userspace interface to
- * declare fence register requirements.
+ * i915_gem_set_tiling_ioctl() and i915_gem_get_tiling_ioctl() is the userspace
+ * interface to declare fence register requirements.
*
* In principle GEM doesn't care at all about the internal data layout of an
* object, and hence it also doesn't care about tiling or swizzling. There's two
@@ -58,86 +58,147 @@
* invovlement.
*/
+/**
+ * i915_gem_fence_size - required global GTT size for a fence
+ * @i915: i915 device
+ * @size: object size
+ * @tiling: tiling mode
+ * @stride: tiling stride
+ *
+ * Return the required global GTT size for a fence (view of a tiled object),
+ * taking into account potential fence register mapping.
+ */
+u32 i915_gem_fence_size(struct drm_i915_private *i915,
+ u32 size, unsigned int tiling, unsigned int stride)
+{
+ u32 ggtt_size;
+
+ GEM_BUG_ON(!size);
+
+ if (tiling == I915_TILING_NONE)
+ return size;
+
+ GEM_BUG_ON(!stride);
+
+ if (INTEL_GEN(i915) >= 4) {
+ stride *= i915_gem_tile_height(tiling);
+ GEM_BUG_ON(!IS_ALIGNED(stride, I965_FENCE_PAGE));
+ return roundup(size, stride);
+ }
+
+ /* Previous chips need a power-of-two fence region when tiling */
+ if (IS_GEN3(i915))
+ ggtt_size = 1024*1024;
+ else
+ ggtt_size = 512*1024;
+
+ while (ggtt_size < size)
+ ggtt_size <<= 1;
+
+ return ggtt_size;
+}
+
+/**
+ * i915_gem_fence_alignment - required global GTT alignment for a fence
+ * @i915: i915 device
+ * @size: object size
+ * @tiling: tiling mode
+ * @stride: tiling stride
+ *
+ * Return the required global GTT alignment for a fence (a view of a tiled
+ * object), taking into account potential fence register mapping.
+ */
+u32 i915_gem_fence_alignment(struct drm_i915_private *i915, u32 size,
+ unsigned int tiling, unsigned int stride)
+{
+ GEM_BUG_ON(!size);
+
+ /*
+ * Minimum alignment is 4k (GTT page size), but might be greater
+ * if a fence register is needed for the object.
+ */
+ if (tiling == I915_TILING_NONE)
+ return I915_GTT_MIN_ALIGNMENT;
+
+ if (INTEL_GEN(i915) >= 4)
+ return I965_FENCE_PAGE;
+
+ /*
+ * Previous chips need to be aligned to the size of the smallest
+ * fence register that can contain the object.
+ */
+ return i915_gem_fence_size(i915, size, tiling, stride);
+}
+
/* Check pitch constriants for all chips & tiling formats */
static bool
-i915_tiling_ok(struct drm_i915_private *dev_priv,
- int stride, int size, int tiling_mode)
+i915_tiling_ok(struct drm_i915_gem_object *obj,
+ unsigned int tiling, unsigned int stride)
{
- int tile_width;
+ struct drm_i915_private *i915 = to_i915(obj->base.dev);
+ unsigned int tile_width;
/* Linear is always fine */
- if (tiling_mode == I915_TILING_NONE)
+ if (tiling == I915_TILING_NONE)
return true;
- if (tiling_mode > I915_TILING_LAST)
+ if (tiling > I915_TILING_LAST)
return false;
- if (IS_GEN2(dev_priv) ||
- (tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev_priv)))
- tile_width = 128;
- else
- tile_width = 512;
-
/* check maximum stride & object size */
/* i965+ stores the end address of the gtt mapping in the fence
* reg, so dont bother to check the size */
- if (INTEL_GEN(dev_priv) >= 7) {
+ if (INTEL_GEN(i915) >= 7) {
if (stride / 128 > GEN7_FENCE_MAX_PITCH_VAL)
return false;
- } else if (INTEL_GEN(dev_priv) >= 4) {
+ } else if (INTEL_GEN(i915) >= 4) {
if (stride / 128 > I965_FENCE_MAX_PITCH_VAL)
return false;
} else {
if (stride > 8192)
return false;
- if (IS_GEN3(dev_priv)) {
- if (size > I830_FENCE_MAX_SIZE_VAL << 20)
+ if (IS_GEN3(i915)) {
+ if (obj->base.size > I830_FENCE_MAX_SIZE_VAL << 20)
return false;
} else {
- if (size > I830_FENCE_MAX_SIZE_VAL << 19)
+ if (obj->base.size > I830_FENCE_MAX_SIZE_VAL << 19)
return false;
}
}
- if (stride < tile_width)
+ if (IS_GEN2(i915) ||
+ (tiling == I915_TILING_Y && HAS_128_BYTE_Y_TILING(i915)))
+ tile_width = 128;
+ else
+ tile_width = 512;
+
+ if (!stride || !IS_ALIGNED(stride, tile_width))
return false;
/* 965+ just needs multiples of tile width */
- if (INTEL_GEN(dev_priv) >= 4) {
- if (stride & (tile_width - 1))
- return false;
+ if (INTEL_GEN(i915) >= 4)
return true;
- }
/* Pre-965 needs power of two tile widths */
- if (stride & (stride - 1))
- return false;
-
- return true;
+ return is_power_of_2(stride);
}
-static bool i915_vma_fence_prepare(struct i915_vma *vma, int tiling_mode)
+static bool i915_vma_fence_prepare(struct i915_vma *vma,
+ int tiling_mode, unsigned int stride)
{
- struct drm_i915_private *dev_priv = to_i915(vma->vm->dev);
- u32 size;
+ struct drm_i915_private *i915 = vma->vm->i915;
+ u32 size, alignment;
if (!i915_vma_is_map_and_fenceable(vma))
return true;
- if (INTEL_GEN(dev_priv) == 3) {
- if (vma->node.start & ~I915_FENCE_START_MASK)
- return false;
- } else {
- if (vma->node.start & ~I830_FENCE_START_MASK)
- return false;
- }
-
- size = i915_gem_get_ggtt_size(dev_priv, vma->size, tiling_mode);
+ size = i915_gem_fence_size(i915, vma->size, tiling_mode, stride);
if (vma->node.size < size)
return false;
- if (vma->node.start & (size - 1))
+ alignment = i915_gem_fence_alignment(i915, vma->size, tiling_mode, stride);
+ if (!IS_ALIGNED(vma->node.start, alignment))
return false;
return true;
@@ -145,20 +206,20 @@ static bool i915_vma_fence_prepare(struct i915_vma *vma, int tiling_mode)
/* Make the current GTT allocation valid for the change in tiling. */
static int
-i915_gem_object_fence_prepare(struct drm_i915_gem_object *obj, int tiling_mode)
+i915_gem_object_fence_prepare(struct drm_i915_gem_object *obj,
+ int tiling_mode, unsigned int stride)
{
- struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
struct i915_vma *vma;
int ret;
if (tiling_mode == I915_TILING_NONE)
return 0;
- if (INTEL_GEN(dev_priv) >= 4)
- return 0;
-
list_for_each_entry(vma, &obj->vma_list, obj_link) {
- if (i915_vma_fence_prepare(vma, tiling_mode))
+ if (!i915_vma_is_ggtt(vma))
+ break;
+
+ if (i915_vma_fence_prepare(vma, tiling_mode, stride))
continue;
ret = i915_vma_unbind(vma);
@@ -169,8 +230,100 @@ i915_gem_object_fence_prepare(struct drm_i915_gem_object *obj, int tiling_mode)
return 0;
}
+int
+i915_gem_object_set_tiling(struct drm_i915_gem_object *obj,
+ unsigned int tiling, unsigned int stride)
+{
+ struct drm_i915_private *i915 = to_i915(obj->base.dev);
+ struct i915_vma *vma;
+ int err;
+
+ /* Make sure we don't cross-contaminate obj->tiling_and_stride */
+ BUILD_BUG_ON(I915_TILING_LAST & STRIDE_MASK);
+
+ GEM_BUG_ON(!i915_tiling_ok(obj, tiling, stride));
+ GEM_BUG_ON(!stride ^ (tiling == I915_TILING_NONE));
+ lockdep_assert_held(&i915->drm.struct_mutex);
+
+ if ((tiling | stride) == obj->tiling_and_stride)
+ return 0;
+
+ if (obj->framebuffer_references)
+ return -EBUSY;
+
+ /* We need to rebind the object if its current allocation
+ * no longer meets the alignment restrictions for its new
+ * tiling mode. Otherwise we can just leave it alone, but
+ * need to ensure that any fence register is updated before
+ * the next fenced (either through the GTT or by the BLT unit
+ * on older GPUs) access.
+ *
+ * After updating the tiling parameters, we then flag whether
+ * we need to update an associated fence register. Note this
+ * has to also include the unfenced register the GPU uses
+ * whilst executing a fenced command for an untiled object.
+ */
+
+ err = i915_gem_object_fence_prepare(obj, tiling, stride);
+ if (err)
+ return err;
+
+ /* If the memory has unknown (i.e. varying) swizzling, we pin the
+ * pages to prevent them being swapped out and causing corruption
+ * due to the change in swizzling.
+ */
+ mutex_lock(&obj->mm.lock);
+ if (obj->mm.pages &&
+ obj->mm.madv == I915_MADV_WILLNEED &&
+ i915->quirks & QUIRK_PIN_SWIZZLED_PAGES) {
+ if (tiling == I915_TILING_NONE) {
+ GEM_BUG_ON(!obj->mm.quirked);
+ __i915_gem_object_unpin_pages(obj);
+ obj->mm.quirked = false;
+ }
+ if (!i915_gem_object_is_tiled(obj)) {
+ GEM_BUG_ON(!obj->mm.quirked);
+ __i915_gem_object_pin_pages(obj);
+ obj->mm.quirked = true;
+ }
+ }
+ mutex_unlock(&obj->mm.lock);
+
+ list_for_each_entry(vma, &obj->vma_list, obj_link) {
+ if (!i915_vma_is_ggtt(vma))
+ break;
+
+ vma->fence_size =
+ i915_gem_fence_size(i915, vma->size, tiling, stride);
+ vma->fence_alignment =
+ i915_gem_fence_alignment(i915,
+ vma->size, tiling, stride);
+
+ if (vma->fence)
+ vma->fence->dirty = true;
+ }
+
+ obj->tiling_and_stride = tiling | stride;
+
+ /* Force the fence to be reacquired for GTT access */
+ i915_gem_release_mmap(obj);
+
+ /* Try to preallocate memory required to save swizzling on put-pages */
+ if (i915_gem_object_needs_bit17_swizzle(obj)) {
+ if (!obj->bit_17) {
+ obj->bit_17 = kcalloc(BITS_TO_LONGS(obj->base.size >> PAGE_SHIFT),
+ sizeof(long), GFP_KERNEL);
+ }
+ } else {
+ kfree(obj->bit_17);
+ obj->bit_17 = NULL;
+ }
+
+ return 0;
+}
+
/**
- * i915_gem_set_tiling - IOCTL handler to set tiling mode
+ * i915_gem_set_tiling_ioctl - IOCTL handler to set tiling mode
* @dev: DRM device
* @data: data pointer for the ioctl
* @file: DRM file for the ioctl call
@@ -184,30 +337,19 @@ i915_gem_object_fence_prepare(struct drm_i915_gem_object *obj, int tiling_mode)
* Zero on success, negative errno on failure.
*/
int
-i915_gem_set_tiling(struct drm_device *dev, void *data,
- struct drm_file *file)
+i915_gem_set_tiling_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file)
{
struct drm_i915_gem_set_tiling *args = data;
- struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_i915_gem_object *obj;
- int err = 0;
-
- /* Make sure we don't cross-contaminate obj->tiling_and_stride */
- BUILD_BUG_ON(I915_TILING_LAST & STRIDE_MASK);
+ int err;
obj = i915_gem_object_lookup(file, args->handle);
if (!obj)
return -ENOENT;
- if (!i915_tiling_ok(dev_priv,
- args->stride, obj->base.size, args->tiling_mode)) {
- i915_gem_object_put(obj);
- return -EINVAL;
- }
-
- mutex_lock(&dev->struct_mutex);
- if (obj->pin_display || obj->framebuffer_references) {
- err = -EBUSY;
+ if (!i915_tiling_ok(obj, args->tiling_mode, args->stride)) {
+ err = -EINVAL;
goto err;
}
@@ -216,9 +358,9 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
args->stride = 0;
} else {
if (args->tiling_mode == I915_TILING_X)
- args->swizzle_mode = dev_priv->mm.bit_6_swizzle_x;
+ args->swizzle_mode = to_i915(dev)->mm.bit_6_swizzle_x;
else
- args->swizzle_mode = dev_priv->mm.bit_6_swizzle_y;
+ args->swizzle_mode = to_i915(dev)->mm.bit_6_swizzle_y;
/* Hide bit 17 swizzling from the user. This prevents old Mesa
* from aborting the application on sw fallbacks to bit 17,
@@ -240,79 +382,24 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
}
}
- if (args->tiling_mode != i915_gem_object_get_tiling(obj) ||
- args->stride != i915_gem_object_get_stride(obj)) {
- /* We need to rebind the object if its current allocation
- * no longer meets the alignment restrictions for its new
- * tiling mode. Otherwise we can just leave it alone, but
- * need to ensure that any fence register is updated before
- * the next fenced (either through the GTT or by the BLT unit
- * on older GPUs) access.
- *
- * After updating the tiling parameters, we then flag whether
- * we need to update an associated fence register. Note this
- * has to also include the unfenced register the GPU uses
- * whilst executing a fenced command for an untiled object.
- */
+ err = mutex_lock_interruptible(&dev->struct_mutex);
+ if (err)
+ goto err;
- err = i915_gem_object_fence_prepare(obj, args->tiling_mode);
- if (!err) {
- struct i915_vma *vma;
-
- mutex_lock(&obj->mm.lock);
- if (obj->mm.pages &&
- obj->mm.madv == I915_MADV_WILLNEED &&
- dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES) {
- if (args->tiling_mode == I915_TILING_NONE) {
- GEM_BUG_ON(!obj->mm.quirked);
- __i915_gem_object_unpin_pages(obj);
- obj->mm.quirked = false;
- }
- if (!i915_gem_object_is_tiled(obj)) {
- GEM_BUG_ON(!obj->mm.quirked);
- __i915_gem_object_pin_pages(obj);
- obj->mm.quirked = true;
- }
- }
- mutex_unlock(&obj->mm.lock);
-
- list_for_each_entry(vma, &obj->vma_list, obj_link) {
- if (!vma->fence)
- continue;
-
- vma->fence->dirty = true;
- }
- obj->tiling_and_stride =
- args->stride | args->tiling_mode;
-
- /* Force the fence to be reacquired for GTT access */
- i915_gem_release_mmap(obj);
- }
- }
- /* we have to maintain this existing ABI... */
+ err = i915_gem_object_set_tiling(obj, args->tiling_mode, args->stride);
+ mutex_unlock(&dev->struct_mutex);
+
+ /* We have to maintain this existing ABI... */
args->stride = i915_gem_object_get_stride(obj);
args->tiling_mode = i915_gem_object_get_tiling(obj);
- /* Try to preallocate memory required to save swizzling on put-pages */
- if (i915_gem_object_needs_bit17_swizzle(obj)) {
- if (obj->bit_17 == NULL) {
- obj->bit_17 = kcalloc(BITS_TO_LONGS(obj->base.size >> PAGE_SHIFT),
- sizeof(long), GFP_KERNEL);
- }
- } else {
- kfree(obj->bit_17);
- obj->bit_17 = NULL;
- }
-
err:
i915_gem_object_put(obj);
- mutex_unlock(&dev->struct_mutex);
-
return err;
}
/**
- * i915_gem_get_tiling - IOCTL handler to get tiling mode
+ * i915_gem_get_tiling_ioctl - IOCTL handler to get tiling mode
* @dev: DRM device
* @data: data pointer for the ioctl
* @file: DRM file for the ioctl call
@@ -325,8 +412,8 @@ err:
* Zero on success, negative errno on failure.
*/
int
-i915_gem_get_tiling(struct drm_device *dev, void *data,
- struct drm_file *file)
+i915_gem_get_tiling_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file)
{
struct drm_i915_gem_get_tiling *args = data;
struct drm_i915_private *dev_priv = to_i915(dev);
diff --git a/drivers/gpu/drm/i915/i915_gem_timeline.c b/drivers/gpu/drm/i915/i915_gem_timeline.c
index bf8a471b61e6..b596ca7ee058 100644
--- a/drivers/gpu/drm/i915/i915_gem_timeline.c
+++ b/drivers/gpu/drm/i915/i915_gem_timeline.c
@@ -81,10 +81,18 @@ int i915_gem_timeline_init__global(struct drm_i915_private *i915)
&class, "&global_timeline->lock");
}
-void i915_gem_timeline_fini(struct i915_gem_timeline *tl)
+void i915_gem_timeline_fini(struct i915_gem_timeline *timeline)
{
- lockdep_assert_held(&tl->i915->drm.struct_mutex);
+ int i;
- list_del(&tl->link);
- kfree(tl->name);
+ lockdep_assert_held(&timeline->i915->drm.struct_mutex);
+
+ for (i = 0; i < ARRAY_SIZE(timeline->engine); i++) {
+ struct intel_timeline *tl = &timeline->engine[i];
+
+ GEM_BUG_ON(!list_empty(&tl->requests));
+ }
+
+ list_del(&timeline->link);
+ kfree(timeline->name);
}
diff --git a/drivers/gpu/drm/i915/i915_gem_timeline.h b/drivers/gpu/drm/i915/i915_gem_timeline.h
index 98d99a62b4ae..f2e51f42cc2f 100644
--- a/drivers/gpu/drm/i915/i915_gem_timeline.h
+++ b/drivers/gpu/drm/i915/i915_gem_timeline.h
@@ -56,7 +56,7 @@ struct intel_timeline {
struct i915_gem_timeline {
struct list_head link;
- atomic_t next_seqno;
+ atomic_t seqno;
struct drm_i915_private *i915;
const char *name;
diff --git a/drivers/gpu/drm/i915/i915_gem_userptr.c b/drivers/gpu/drm/i915/i915_gem_userptr.c
index d068af2ec3a3..6a8fa085b74e 100644
--- a/drivers/gpu/drm/i915/i915_gem_userptr.c
+++ b/drivers/gpu/drm/i915/i915_gem_userptr.c
@@ -784,7 +784,7 @@ i915_gem_userptr_ioctl(struct drm_device *dev, void *data, struct drm_file *file
return -ENODEV;
}
- obj = i915_gem_object_alloc(dev);
+ obj = i915_gem_object_alloc(dev_priv);
if (obj == NULL)
return -ENOMEM;
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index ae84aa4b1467..9cd22cda17af 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -121,6 +121,7 @@ static void __i915_error_advance(struct drm_i915_error_state_buf *e,
e->pos += len;
}
+__printf(2, 0)
static void i915_error_vprintf(struct drm_i915_error_state_buf *e,
const char *f, va_list args)
{
@@ -176,9 +177,14 @@ static void i915_error_puts(struct drm_i915_error_state_buf *e,
#ifdef CONFIG_DRM_I915_COMPRESS_ERROR
-static bool compress_init(struct z_stream_s *zstream)
+struct compress {
+ struct z_stream_s zstream;
+ void *tmp;
+};
+
+static bool compress_init(struct compress *c)
{
- memset(zstream, 0, sizeof(*zstream));
+ struct z_stream_s *zstream = memset(&c->zstream, 0, sizeof(c->zstream));
zstream->workspace =
kmalloc(zlib_deflate_workspacesize(MAX_WBITS, MAX_MEM_LEVEL),
@@ -191,14 +197,22 @@ static bool compress_init(struct z_stream_s *zstream)
return false;
}
+ c->tmp = NULL;
+ if (i915_has_memcpy_from_wc())
+ c->tmp = (void *)__get_free_page(GFP_ATOMIC | __GFP_NOWARN);
+
return true;
}
-static int compress_page(struct z_stream_s *zstream,
+static int compress_page(struct compress *c,
void *src,
struct drm_i915_error_object *dst)
{
+ struct z_stream_s *zstream = &c->zstream;
+
zstream->next_in = src;
+ if (c->tmp && i915_memcpy_from_wc(c->tmp, src, PAGE_SIZE))
+ zstream->next_in = c->tmp;
zstream->avail_in = PAGE_SIZE;
do {
@@ -226,9 +240,11 @@ static int compress_page(struct z_stream_s *zstream,
return 0;
}
-static void compress_fini(struct z_stream_s *zstream,
+static void compress_fini(struct compress *c,
struct drm_i915_error_object *dst)
{
+ struct z_stream_s *zstream = &c->zstream;
+
if (dst) {
zlib_deflate(zstream, Z_FINISH);
dst->unused = zstream->avail_out;
@@ -236,6 +252,9 @@ static void compress_fini(struct z_stream_s *zstream,
zlib_deflateEnd(zstream);
kfree(zstream->workspace);
+
+ if (c->tmp)
+ free_page((unsigned long)c->tmp);
}
static void err_compression_marker(struct drm_i915_error_state_buf *m)
@@ -245,28 +264,34 @@ static void err_compression_marker(struct drm_i915_error_state_buf *m)
#else
-static bool compress_init(struct z_stream_s *zstream)
+struct compress {
+};
+
+static bool compress_init(struct compress *c)
{
return true;
}
-static int compress_page(struct z_stream_s *zstream,
+static int compress_page(struct compress *c,
void *src,
struct drm_i915_error_object *dst)
{
unsigned long page;
+ void *ptr;
page = __get_free_page(GFP_ATOMIC | __GFP_NOWARN);
if (!page)
return -ENOMEM;
- dst->pages[dst->page_count++] =
- memcpy((void *)page, src, PAGE_SIZE);
+ ptr = (void *)page;
+ if (!i915_memcpy_from_wc(ptr, src, PAGE_SIZE))
+ memcpy(ptr, src, PAGE_SIZE);
+ dst->pages[dst->page_count++] = ptr;
return 0;
}
-static void compress_fini(struct z_stream_s *zstream,
+static void compress_fini(struct compress *c,
struct drm_i915_error_object *dst)
{
}
@@ -316,24 +341,6 @@ static void print_error_buffers(struct drm_i915_error_state_buf *m,
}
}
-static const char *hangcheck_action_to_str(enum intel_engine_hangcheck_action a)
-{
- switch (a) {
- case HANGCHECK_IDLE:
- return "idle";
- case HANGCHECK_WAIT:
- return "wait";
- case HANGCHECK_ACTIVE:
- return "active";
- case HANGCHECK_KICK:
- return "kick";
- case HANGCHECK_HUNG:
- return "hung";
- }
-
- return "unknown";
-}
-
static void error_print_instdone(struct drm_i915_error_state_buf *m,
struct drm_i915_error_engine *ee)
{
@@ -370,8 +377,8 @@ static void error_print_request(struct drm_i915_error_state_buf *m,
if (!erq->seqno)
return;
- err_printf(m, "%s pid %d, seqno %8x:%08x, emitted %dms ago, head %08x, tail %08x\n",
- prefix, erq->pid,
+ err_printf(m, "%s pid %d, ban score %d, seqno %8x:%08x, emitted %dms ago, head %08x, tail %08x\n",
+ prefix, erq->pid, erq->ban_score,
erq->context, erq->seqno,
jiffies_to_msecs(jiffies - erq->jiffies),
erq->head, erq->tail);
@@ -441,9 +448,13 @@ static void error_print_engine(struct drm_i915_error_state_buf *m,
err_printf(m, " waiting: %s\n", yesno(ee->waiting));
err_printf(m, " ring->head: 0x%08x\n", ee->cpu_ring_head);
err_printf(m, " ring->tail: 0x%08x\n", ee->cpu_ring_tail);
- err_printf(m, " hangcheck: %s [%d]\n",
- hangcheck_action_to_str(ee->hangcheck_action),
- ee->hangcheck_score);
+ err_printf(m, " hangcheck stall: %s\n", yesno(ee->hangcheck_stalled));
+ err_printf(m, " hangcheck action: %s\n",
+ hangcheck_action_to_str(ee->hangcheck_action));
+ err_printf(m, " hangcheck action timestamp: %lu, %u ms ago\n",
+ ee->hangcheck_timestamp,
+ jiffies_to_msecs(jiffies - ee->hangcheck_timestamp));
+
error_print_request(m, " ELSP[0]: ", &ee->execlist[0]);
error_print_request(m, " ELSP[1]: ", &ee->execlist[1]);
}
@@ -528,11 +539,10 @@ static void err_print_capabilities(struct drm_i915_error_state_buf *m,
int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
const struct i915_error_state_file_priv *error_priv)
{
- struct drm_i915_private *dev_priv = to_i915(error_priv->dev);
+ struct drm_i915_private *dev_priv = error_priv->i915;
struct pci_dev *pdev = dev_priv->drm.pdev;
struct drm_i915_error_state *error = error_priv->error;
struct drm_i915_error_object *obj;
- int max_hangcheck_score;
int i, j;
if (!error) {
@@ -549,22 +559,20 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
err_printf(m, "Uptime: %ld s %ld us\n",
error->uptime.tv_sec, error->uptime.tv_usec);
err_print_capabilities(m, &error->device_info);
- max_hangcheck_score = 0;
- for (i = 0; i < ARRAY_SIZE(error->engine); i++) {
- if (error->engine[i].hangcheck_score > max_hangcheck_score)
- max_hangcheck_score = error->engine[i].hangcheck_score;
- }
+
for (i = 0; i < ARRAY_SIZE(error->engine); i++) {
- if (error->engine[i].hangcheck_score == max_hangcheck_score &&
+ if (error->engine[i].hangcheck_stalled &&
error->engine[i].pid != -1) {
- err_printf(m, "Active process (on ring %s): %s [%d]\n",
+ err_printf(m, "Active process (on ring %s): %s [%d], context bans %d\n",
engine_str(i),
error->engine[i].comm,
- error->engine[i].pid);
+ error->engine[i].pid,
+ error->engine[i].context_bans);
}
}
err_printf(m, "Reset count: %u\n", error->reset_count);
err_printf(m, "Suspend count: %u\n", error->suspend_count);
+ err_printf(m, "Platform: %s\n", intel_platform_name(error->device_info.platform));
err_printf(m, "PCI ID: 0x%04x\n", pdev->device);
err_printf(m, "PCI Revision: 0x%02x\n", pdev->revision);
err_printf(m, "PCI Subsystem: %04x:%04x\n",
@@ -651,9 +659,10 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
if (obj) {
err_puts(m, dev_priv->engine[i]->name);
if (ee->pid != -1)
- err_printf(m, " (submitted by %s [%d])",
+ err_printf(m, " (submitted by %s [%d], bans %d)",
ee->comm,
- ee->pid);
+ ee->pid,
+ ee->context_bans);
err_printf(m, " --- gtt_offset = 0x%08x %08x\n",
upper_32_bits(obj->gtt_offset),
lower_32_bits(obj->gtt_offset));
@@ -801,7 +810,7 @@ i915_error_object_create(struct drm_i915_private *i915,
struct i915_ggtt *ggtt = &i915->ggtt;
const u64 slot = ggtt->error_capture.start;
struct drm_i915_error_object *dst;
- struct z_stream_s zstream;
+ struct compress compress;
unsigned long num_pages;
struct sgt_iter iter;
dma_addr_t dma;
@@ -821,7 +830,7 @@ i915_error_object_create(struct drm_i915_private *i915,
dst->page_count = 0;
dst->unused = 0;
- if (!compress_init(&zstream)) {
+ if (!compress_init(&compress)) {
kfree(dst);
return NULL;
}
@@ -834,7 +843,7 @@ i915_error_object_create(struct drm_i915_private *i915,
I915_CACHE_NONE, 0);
s = io_mapping_map_atomic_wc(&ggtt->mappable, slot);
- ret = compress_page(&zstream, (void __force *)s, dst);
+ ret = compress_page(&compress, (void __force *)s, dst);
io_mapping_unmap_atomic(s);
if (ret)
@@ -849,7 +858,7 @@ unwind:
dst = NULL;
out:
- compress_fini(&zstream, dst);
+ compress_fini(&compress, dst);
ggtt->base.clear_range(&ggtt->base, slot, PAGE_SIZE);
return dst;
}
@@ -941,7 +950,7 @@ static uint32_t i915_error_generate_code(struct drm_i915_private *dev_priv,
* strictly a client bug. Use instdone to differentiate those some.
*/
for (i = 0; i < I915_NUM_ENGINES; i++) {
- if (error->engine[i].hangcheck_action == HANGCHECK_HUNG) {
+ if (error->engine[i].hangcheck_stalled) {
if (engine_id)
*engine_id = i;
@@ -1159,8 +1168,9 @@ static void error_record_engine_registers(struct drm_i915_error_state *error,
ee->hws = I915_READ(mmio);
}
- ee->hangcheck_score = engine->hangcheck.score;
+ ee->hangcheck_timestamp = engine->hangcheck.action_timestamp;
ee->hangcheck_action = engine->hangcheck.action;
+ ee->hangcheck_stalled = engine->hangcheck.stalled;
if (USES_PPGTT(dev_priv)) {
int i;
@@ -1188,6 +1198,7 @@ static void record_request(struct drm_i915_gem_request *request,
struct drm_i915_error_request *erq)
{
erq->context = request->ctx->hw_id;
+ erq->ban_score = request->ctx->ban_score;
erq->seqno = request->global_seqno;
erq->jiffies = request->emitted_jiffies;
erq->head = request->head;
@@ -1321,7 +1332,7 @@ static void i915_gem_record_rings(struct drm_i915_private *dev_priv,
}
error->simulated |=
- request->ctx->flags & CONTEXT_NO_ERROR_CAPTURE;
+ i915_gem_context_no_error_capture(request->ctx);
ee->rq_head = request->head;
ee->rq_post = request->postfix;
@@ -1659,9 +1670,8 @@ void i915_error_state_put(struct i915_error_state_file_priv *error_priv)
kref_put(&error_priv->error->ref, i915_error_state_free);
}
-void i915_destroy_error_state(struct drm_device *dev)
+void i915_destroy_error_state(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_i915_error_state *error;
spin_lock_irq(&dev_priv->gpu_error.lock);
diff --git a/drivers/gpu/drm/i915/i915_guc_reg.h b/drivers/gpu/drm/i915/i915_guc_reg.h
index a47e1e4aec03..35cf9918d09a 100644
--- a/drivers/gpu/drm/i915/i915_guc_reg.h
+++ b/drivers/gpu/drm/i915/i915_guc_reg.h
@@ -61,18 +61,27 @@
#define DMA_ADDRESS_SPACE_GTT (8 << 16)
#define DMA_COPY_SIZE _MMIO(0xc310)
#define DMA_CTRL _MMIO(0xc314)
+#define HUC_UKERNEL (1<<9)
#define UOS_MOVE (1<<4)
#define START_DMA (1<<0)
#define DMA_GUC_WOPCM_OFFSET _MMIO(0xc340)
+#define HUC_LOADING_AGENT_VCR (0<<1)
+#define HUC_LOADING_AGENT_GUC (1<<1)
#define GUC_WOPCM_OFFSET_VALUE 0x80000 /* 512KB */
#define GUC_MAX_IDLE_COUNT _MMIO(0xC3E4)
+#define HUC_STATUS2 _MMIO(0xD3B0)
+#define HUC_FW_VERIFIED (1<<7)
+
/* Defines WOPCM space available to GuC firmware */
#define GUC_WOPCM_SIZE _MMIO(0xc050)
/* GuC addresses below GUC_WOPCM_TOP don't map through the GTT */
#define GUC_WOPCM_TOP (0x80 << 12) /* 512KB */
#define BXT_GUC_WOPCM_RC6_RESERVED (0x10 << 12) /* 64KB */
+/* GuC addresses above GUC_GGTT_TOP also don't map through the GTT */
+#define GUC_GGTT_TOP 0xFEE00000
+
#define GEN8_GT_PM_CONFIG _MMIO(0x138140)
#define GEN9LP_GT_PM_CONFIG _MMIO(0x138140)
#define GEN9_GT_PM_CONFIG _MMIO(0x13816c)
@@ -100,8 +109,8 @@
GUC_ENABLE_READ_CACHE_FOR_WOPCM_DATA | \
GUC_ENABLE_MIA_CLOCK_GATING)
-#define HOST2GUC_INTERRUPT _MMIO(0xc4c8)
-#define HOST2GUC_TRIGGER (1<<0)
+#define GUC_SEND_INTERRUPT _MMIO(0xc4c8)
+#define GUC_SEND_TRIGGER (1<<0)
#define GEN8_DRBREGL(x) _MMIO(0x1000 + (x) * 8)
#define GEN8_DRB_VALID (1<<0)
diff --git a/drivers/gpu/drm/i915/i915_guc_submission.c b/drivers/gpu/drm/i915/i915_guc_submission.c
index 4462112725ef..8ced9e26f075 100644
--- a/drivers/gpu/drm/i915/i915_guc_submission.c
+++ b/drivers/gpu/drm/i915/i915_guc_submission.c
@@ -21,12 +21,9 @@
* IN THE SOFTWARE.
*
*/
-#include <linux/firmware.h>
#include <linux/circ_buf.h>
-#include <linux/debugfs.h>
-#include <linux/relay.h>
#include "i915_drv.h"
-#include "intel_guc.h"
+#include "intel_uc.h"
/**
* DOC: GuC-based command submission
@@ -49,7 +46,7 @@
* Firmware writes a success/fail code back to the action register after
* processes the request. The kernel driver polls waiting for this update and
* then proceeds.
- * See host2guc_action()
+ * See intel_guc_send()
*
* Doorbells:
* Doorbells are interrupts to uKernel. A doorbell is a single cache line (QW)
@@ -66,141 +63,29 @@
*/
/*
- * Read GuC command/status register (SOFT_SCRATCH_0)
- * Return true if it contains a response rather than a command
- */
-static inline bool host2guc_action_response(struct drm_i915_private *dev_priv,
- u32 *status)
-{
- u32 val = I915_READ(SOFT_SCRATCH(0));
- *status = val;
- return GUC2HOST_IS_RESPONSE(val);
-}
-
-static int host2guc_action(struct intel_guc *guc, u32 *data, u32 len)
-{
- struct drm_i915_private *dev_priv = guc_to_i915(guc);
- u32 status;
- int i;
- int ret;
-
- if (WARN_ON(len < 1 || len > 15))
- return -EINVAL;
-
- mutex_lock(&guc->action_lock);
- intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
-
- dev_priv->guc.action_count += 1;
- dev_priv->guc.action_cmd = data[0];
-
- for (i = 0; i < len; i++)
- I915_WRITE(SOFT_SCRATCH(i), data[i]);
-
- POSTING_READ(SOFT_SCRATCH(i - 1));
-
- I915_WRITE(HOST2GUC_INTERRUPT, HOST2GUC_TRIGGER);
-
- /*
- * Fast commands should complete in less than 10us, so sample quickly
- * up to that length of time, then switch to a slower sleep-wait loop.
- * No HOST2GUC command should ever take longer than 10ms.
- */
- ret = wait_for_us(host2guc_action_response(dev_priv, &status), 10);
- if (ret)
- ret = wait_for(host2guc_action_response(dev_priv, &status), 10);
- if (status != GUC2HOST_STATUS_SUCCESS) {
- /*
- * Either the GuC explicitly returned an error (which
- * we convert to -EIO here) or no response at all was
- * received within the timeout limit (-ETIMEDOUT)
- */
- if (ret != -ETIMEDOUT)
- ret = -EIO;
-
- DRM_WARN("Action 0x%X failed; ret=%d status=0x%08X response=0x%08X\n",
- data[0], ret, status, I915_READ(SOFT_SCRATCH(15)));
-
- dev_priv->guc.action_fail += 1;
- dev_priv->guc.action_err = ret;
- }
- dev_priv->guc.action_status = status;
-
- intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
- mutex_unlock(&guc->action_lock);
-
- return ret;
-}
-
-/*
* Tell the GuC to allocate or deallocate a specific doorbell
*/
-static int host2guc_allocate_doorbell(struct intel_guc *guc,
- struct i915_guc_client *client)
-{
- u32 data[2];
-
- data[0] = HOST2GUC_ACTION_ALLOCATE_DOORBELL;
- data[1] = client->ctx_index;
-
- return host2guc_action(guc, data, 2);
-}
-
-static int host2guc_release_doorbell(struct intel_guc *guc,
- struct i915_guc_client *client)
-{
- u32 data[2];
-
- data[0] = HOST2GUC_ACTION_DEALLOCATE_DOORBELL;
- data[1] = client->ctx_index;
-
- return host2guc_action(guc, data, 2);
-}
-
-static int host2guc_sample_forcewake(struct intel_guc *guc,
- struct i915_guc_client *client)
-{
- struct drm_i915_private *dev_priv = guc_to_i915(guc);
- u32 data[2];
-
- data[0] = HOST2GUC_ACTION_SAMPLE_FORCEWAKE;
- /* WaRsDisableCoarsePowerGating:skl,bxt */
- if (!intel_enable_rc6() || NEEDS_WaRsDisableCoarsePowerGating(dev_priv))
- data[1] = 0;
- else
- /* bit 0 and 1 are for Render and Media domain separately */
- data[1] = GUC_FORCEWAKE_RENDER | GUC_FORCEWAKE_MEDIA;
-
- return host2guc_action(guc, data, ARRAY_SIZE(data));
-}
-
-static int host2guc_logbuffer_flush_complete(struct intel_guc *guc)
-{
- u32 data[1];
-
- data[0] = HOST2GUC_ACTION_LOG_BUFFER_FILE_FLUSH_COMPLETE;
-
- return host2guc_action(guc, data, 1);
-}
-
-static int host2guc_force_logbuffer_flush(struct intel_guc *guc)
+static int guc_allocate_doorbell(struct intel_guc *guc,
+ struct i915_guc_client *client)
{
- u32 data[2];
+ u32 action[] = {
+ INTEL_GUC_ACTION_ALLOCATE_DOORBELL,
+ client->ctx_index
+ };
- data[0] = HOST2GUC_ACTION_FORCE_LOG_BUFFER_FLUSH;
- data[1] = 0;
-
- return host2guc_action(guc, data, 2);
+ return intel_guc_send(guc, action, ARRAY_SIZE(action));
}
-static int host2guc_logging_control(struct intel_guc *guc, u32 control_val)
+static int guc_release_doorbell(struct intel_guc *guc,
+ struct i915_guc_client *client)
{
- u32 data[2];
-
- data[0] = HOST2GUC_ACTION_UK_LOG_ENABLE_LOGGING;
- data[1] = control_val;
+ u32 action[] = {
+ INTEL_GUC_ACTION_DEALLOCATE_DOORBELL,
+ client->ctx_index
+ };
- return host2guc_action(guc, data, 2);
+ return intel_guc_send(guc, action, ARRAY_SIZE(action));
}
/*
@@ -226,7 +111,7 @@ static int guc_update_doorbell_id(struct intel_guc *guc,
test_bit(client->doorbell_id, doorbell_bitmap)) {
/* Deactivate the old doorbell */
doorbell->db_status = GUC_DOORBELL_DISABLED;
- (void)host2guc_release_doorbell(guc, client);
+ (void)guc_release_doorbell(guc, client);
__clear_bit(client->doorbell_id, doorbell_bitmap);
}
@@ -247,16 +132,9 @@ static int guc_update_doorbell_id(struct intel_guc *guc,
/* Activate the new doorbell */
__set_bit(new_id, doorbell_bitmap);
- doorbell->cookie = 0;
doorbell->db_status = GUC_DOORBELL_ENABLED;
- return host2guc_allocate_doorbell(guc, client);
-}
-
-static int guc_init_doorbell(struct intel_guc *guc,
- struct i915_guc_client *client,
- uint16_t db_id)
-{
- return guc_update_doorbell_id(guc, client, db_id);
+ doorbell->cookie = client->doorbell_cookie;
+ return guc_allocate_doorbell(guc, client);
}
static void guc_disable_doorbell(struct intel_guc *guc,
@@ -298,7 +176,7 @@ select_doorbell_register(struct intel_guc *guc, uint32_t priority)
* Select, assign and relase doorbell cachelines
*
* These functions track which doorbell cachelines are in use.
- * The data they manipulate is protected by the host2guc lock.
+ * The data they manipulate is protected by the intel_guc_send lock.
*/
static uint32_t select_doorbell_cacheline(struct intel_guc *guc)
@@ -390,11 +268,11 @@ static void guc_ctx_desc_init(struct intel_guc *guc,
/* The state page is after PPHWSP */
lrc->ring_lcra =
- i915_ggtt_offset(ce->state) + LRC_STATE_PN * PAGE_SIZE;
+ guc_ggtt_offset(ce->state) + LRC_STATE_PN * PAGE_SIZE;
lrc->context_id = (client->ctx_index << GUC_ELC_CTXID_OFFSET) |
(guc_engine_id << GUC_ELC_ENGINE_OFFSET);
- lrc->ring_begin = i915_ggtt_offset(ce->ring->vma);
+ lrc->ring_begin = guc_ggtt_offset(ce->ring->vma);
lrc->ring_end = lrc->ring_begin + ce->ring->size - 1;
lrc->ring_next_free_location = lrc->ring_begin;
lrc->ring_current_tail_pointer_value = 0;
@@ -410,7 +288,7 @@ static void guc_ctx_desc_init(struct intel_guc *guc,
* The doorbell, process descriptor, and workqueue are all parts
* of the client object, which the GuC will reference via the GGTT
*/
- gfx_addr = i915_ggtt_offset(client->vma);
+ gfx_addr = guc_ggtt_offset(client->vma);
desc.db_trigger_phy = sg_dma_address(client->vma->pages->sgl) +
client->doorbell_offset;
desc.db_trigger_cpu =
@@ -464,22 +342,23 @@ static void guc_ctx_desc_fini(struct intel_guc *guc,
int i915_guc_wq_reserve(struct drm_i915_gem_request *request)
{
const size_t wqi_size = sizeof(struct guc_wq_item);
- struct i915_guc_client *gc = request->i915->guc.execbuf_client;
- struct guc_process_desc *desc = gc->vaddr + gc->proc_desc_offset;
+ struct i915_guc_client *client = request->i915->guc.execbuf_client;
+ struct guc_process_desc *desc = client->vaddr +
+ client->proc_desc_offset;
u32 freespace;
int ret;
- spin_lock(&gc->wq_lock);
- freespace = CIRC_SPACE(gc->wq_tail, desc->head, gc->wq_size);
- freespace -= gc->wq_rsvd;
+ spin_lock(&client->wq_lock);
+ freespace = CIRC_SPACE(client->wq_tail, desc->head, client->wq_size);
+ freespace -= client->wq_rsvd;
if (likely(freespace >= wqi_size)) {
- gc->wq_rsvd += wqi_size;
+ client->wq_rsvd += wqi_size;
ret = 0;
} else {
- gc->no_wq_space++;
+ client->no_wq_space++;
ret = -EAGAIN;
}
- spin_unlock(&gc->wq_lock);
+ spin_unlock(&client->wq_lock);
return ret;
}
@@ -487,17 +366,17 @@ int i915_guc_wq_reserve(struct drm_i915_gem_request *request)
void i915_guc_wq_unreserve(struct drm_i915_gem_request *request)
{
const size_t wqi_size = sizeof(struct guc_wq_item);
- struct i915_guc_client *gc = request->i915->guc.execbuf_client;
+ struct i915_guc_client *client = request->i915->guc.execbuf_client;
- GEM_BUG_ON(READ_ONCE(gc->wq_rsvd) < wqi_size);
+ GEM_BUG_ON(READ_ONCE(client->wq_rsvd) < wqi_size);
- spin_lock(&gc->wq_lock);
- gc->wq_rsvd -= wqi_size;
- spin_unlock(&gc->wq_lock);
+ spin_lock(&client->wq_lock);
+ client->wq_rsvd -= wqi_size;
+ spin_unlock(&client->wq_lock);
}
/* Construct a Work Item and append it to the GuC's Work Queue */
-static void guc_wq_item_append(struct i915_guc_client *gc,
+static void guc_wq_item_append(struct i915_guc_client *client,
struct drm_i915_gem_request *rq)
{
/* wqi_len is in DWords, and does not include the one-word header */
@@ -508,10 +387,10 @@ static void guc_wq_item_append(struct i915_guc_client *gc,
struct guc_wq_item *wqi;
u32 freespace, tail, wq_off;
- desc = gc->vaddr + gc->proc_desc_offset;
+ desc = client->vaddr + client->proc_desc_offset;
/* Free space is guaranteed, see i915_guc_wq_reserve() above */
- freespace = CIRC_SPACE(gc->wq_tail, desc->head, gc->wq_size);
+ freespace = CIRC_SPACE(client->wq_tail, desc->head, client->wq_size);
GEM_BUG_ON(freespace < wqi_size);
/* The GuC firmware wants the tail index in QWords, not bytes */
@@ -528,17 +407,17 @@ static void guc_wq_item_append(struct i915_guc_client *gc,
* workqueue buffer dw by dw.
*/
BUILD_BUG_ON(wqi_size != 16);
- GEM_BUG_ON(gc->wq_rsvd < wqi_size);
+ GEM_BUG_ON(client->wq_rsvd < wqi_size);
/* postincrement WQ tail for next time */
- wq_off = gc->wq_tail;
+ wq_off = client->wq_tail;
GEM_BUG_ON(wq_off & (wqi_size - 1));
- gc->wq_tail += wqi_size;
- gc->wq_tail &= gc->wq_size - 1;
- gc->wq_rsvd -= wqi_size;
+ client->wq_tail += wqi_size;
+ client->wq_tail &= client->wq_size - 1;
+ client->wq_rsvd -= wqi_size;
/* WQ starts from the page after doorbell / process_desc */
- wqi = gc->vaddr + wq_off + GUC_DB_SIZE;
+ wqi = client->vaddr + wq_off + GUC_DB_SIZE;
/* Now fill in the 4-word work queue item */
wqi->header = WQ_TYPE_INORDER |
@@ -553,30 +432,30 @@ static void guc_wq_item_append(struct i915_guc_client *gc,
wqi->fence_id = rq->global_seqno;
}
-static int guc_ring_doorbell(struct i915_guc_client *gc)
+static int guc_ring_doorbell(struct i915_guc_client *client)
{
struct guc_process_desc *desc;
union guc_doorbell_qw db_cmp, db_exc, db_ret;
union guc_doorbell_qw *db;
int attempt = 2, ret = -EAGAIN;
- desc = gc->vaddr + gc->proc_desc_offset;
+ desc = client->vaddr + client->proc_desc_offset;
/* Update the tail so it is visible to GuC */
- desc->tail = gc->wq_tail;
+ desc->tail = client->wq_tail;
/* current cookie */
db_cmp.db_status = GUC_DOORBELL_ENABLED;
- db_cmp.cookie = gc->cookie;
+ db_cmp.cookie = client->doorbell_cookie;
/* cookie to be updated */
db_exc.db_status = GUC_DOORBELL_ENABLED;
- db_exc.cookie = gc->cookie + 1;
+ db_exc.cookie = client->doorbell_cookie + 1;
if (db_exc.cookie == 0)
db_exc.cookie = 1;
/* pointer of current doorbell cacheline */
- db = gc->vaddr + gc->doorbell_offset;
+ db = client->vaddr + client->doorbell_offset;
while (attempt--) {
/* lets ring the doorbell */
@@ -586,7 +465,7 @@ static int guc_ring_doorbell(struct i915_guc_client *gc)
/* if the exchange was successfully executed */
if (db_ret.value_qw == db_cmp.value_qw) {
/* db was successfully rung */
- gc->cookie = db_exc.cookie;
+ client->doorbell_cookie = db_exc.cookie;
ret = 0;
break;
}
@@ -609,12 +488,9 @@ static int guc_ring_doorbell(struct i915_guc_client *gc)
}
/**
- * i915_guc_submit() - Submit commands through GuC
+ * __i915_guc_submit() - Submit commands through GuC
* @rq: request associated with the commands
*
- * Return: 0 on success, otherwise an errno.
- * (Note: nonzero really shouldn't happen!)
- *
* The caller must have already called i915_guc_wq_reserve() above with
* a result of 0 (success), guaranteeing that there is space in the work
* queue for the new request, so enqueuing the item cannot fail.
@@ -626,7 +502,7 @@ static int guc_ring_doorbell(struct i915_guc_client *gc)
* The only error here arises if the doorbell hardware isn't functioning
* as expected, which really shouln't happen.
*/
-static void i915_guc_submit(struct drm_i915_gem_request *rq)
+static void __i915_guc_submit(struct drm_i915_gem_request *rq)
{
struct drm_i915_private *dev_priv = rq->i915;
struct intel_engine_cs *engine = rq->engine;
@@ -635,17 +511,6 @@ static void i915_guc_submit(struct drm_i915_gem_request *rq)
struct i915_guc_client *client = guc->execbuf_client;
int b_ret;
- /* We keep the previous context alive until we retire the following
- * request. This ensures that any the context object is still pinned
- * for any residual writes the HW makes into it on the context switch
- * into the next object following the breadcrumb. Otherwise, we may
- * retire the context too early.
- */
- rq->previous_context = engine->last_context;
- engine->last_context = rq->ctx;
-
- i915_gem_request_submit(rq);
-
spin_lock(&client->wq_lock);
guc_wq_item_append(client, rq);
@@ -665,6 +530,12 @@ static void i915_guc_submit(struct drm_i915_gem_request *rq)
spin_unlock(&client->wq_lock);
}
+static void i915_guc_submit(struct drm_i915_gem_request *rq)
+{
+ i915_gem_request_submit(rq);
+ __i915_guc_submit(rq);
+}
+
/*
* Everything below here is concerned with setup & teardown, and is
* therefore not part of the somewhat time-critical batch-submission
@@ -672,7 +543,7 @@ static void i915_guc_submit(struct drm_i915_gem_request *rq)
*/
/**
- * guc_allocate_vma() - Allocate a GGTT VMA for GuC usage
+ * intel_guc_allocate_vma() - Allocate a GGTT VMA for GuC usage
* @guc: the guc
* @size: size of area to allocate (both virtual space and memory)
*
@@ -684,18 +555,18 @@ static void i915_guc_submit(struct drm_i915_gem_request *rq)
*
* Return: A i915_vma if successful, otherwise an ERR_PTR.
*/
-static struct i915_vma *guc_allocate_vma(struct intel_guc *guc, u32 size)
+struct i915_vma *intel_guc_allocate_vma(struct intel_guc *guc, u32 size)
{
struct drm_i915_private *dev_priv = guc_to_i915(guc);
struct drm_i915_gem_object *obj;
struct i915_vma *vma;
int ret;
- obj = i915_gem_object_create(&dev_priv->drm, size);
+ obj = i915_gem_object_create(dev_priv, size);
if (IS_ERR(obj))
return ERR_CAST(obj);
- vma = i915_vma_create(obj, &dev_priv->ggtt.base, NULL);
+ vma = i915_vma_instance(obj, &dev_priv->ggtt.base, NULL);
if (IS_ERR(vma))
goto err;
@@ -706,9 +577,6 @@ static struct i915_vma *guc_allocate_vma(struct intel_guc *guc, u32 size)
goto err;
}
- /* Invalidate GuC TLB to let GuC take the latest updates to GTT. */
- I915_WRITE(GEN8_GTCR, GEN8_GTCR_INVALIDATE);
-
return vma;
err:
@@ -779,8 +647,7 @@ static void guc_init_doorbell_hw(struct intel_guc *guc)
uint16_t db_id;
int i, err;
- /* Save client's original doorbell selection */
- db_id = client->doorbell_id;
+ guc_disable_doorbell(guc, client);
for (i = 0; i < GUC_MAX_DOORBELLS; ++i) {
/* Skip if doorbell is OK */
@@ -793,7 +660,9 @@ static void guc_init_doorbell_hw(struct intel_guc *guc)
i, err);
}
- /* Restore to original value */
+ db_id = select_doorbell_register(guc, client->priority);
+ WARN_ON(db_id == GUC_INVALID_DOORBELL_ID);
+
err = guc_update_doorbell_id(guc, client, db_id);
if (err)
DRM_WARN("Failed to restore doorbell to %d, err %d\n",
@@ -847,7 +716,7 @@ guc_client_alloc(struct drm_i915_private *dev_priv,
}
/* The first page is doorbell/proc_desc. Two followed pages are wq. */
- vma = guc_allocate_vma(guc, GUC_DB_SIZE + GUC_WQ_SIZE);
+ vma = intel_guc_allocate_vma(guc, GUC_DB_SIZE + GUC_WQ_SIZE);
if (IS_ERR(vma))
goto err;
@@ -883,8 +752,13 @@ guc_client_alloc(struct drm_i915_private *dev_priv,
guc_proc_desc_init(guc, client);
guc_ctx_desc_init(guc, client);
- if (guc_init_doorbell(guc, client, db_id))
- goto err;
+
+ /* For runtime client allocation we need to enable the doorbell. Not
+ * required yet for the static execbuf_client as this special kernel
+ * client is enabled from i915_guc_submission_enable().
+ *
+ * guc_update_doorbell_id(guc, client, db_id);
+ */
DRM_DEBUG_DRIVER("new priority %u client %p for engine(s) 0x%x: ctx_index %u\n",
priority, client, client->engines, client->ctx_index);
@@ -898,488 +772,7 @@ err:
return NULL;
}
-/*
- * Sub buffer switch callback. Called whenever relay has to switch to a new
- * sub buffer, relay stays on the same sub buffer if 0 is returned.
- */
-static int subbuf_start_callback(struct rchan_buf *buf,
- void *subbuf,
- void *prev_subbuf,
- size_t prev_padding)
-{
- /* Use no-overwrite mode by default, where relay will stop accepting
- * new data if there are no empty sub buffers left.
- * There is no strict synchronization enforced by relay between Consumer
- * and Producer. In overwrite mode, there is a possibility of getting
- * inconsistent/garbled data, the producer could be writing on to the
- * same sub buffer from which Consumer is reading. This can't be avoided
- * unless Consumer is fast enough and can always run in tandem with
- * Producer.
- */
- if (relay_buf_full(buf))
- return 0;
-
- return 1;
-}
-
-/*
- * file_create() callback. Creates relay file in debugfs.
- */
-static struct dentry *create_buf_file_callback(const char *filename,
- struct dentry *parent,
- umode_t mode,
- struct rchan_buf *buf,
- int *is_global)
-{
- struct dentry *buf_file;
-
- /* This to enable the use of a single buffer for the relay channel and
- * correspondingly have a single file exposed to User, through which
- * it can collect the logs in order without any post-processing.
- * Need to set 'is_global' even if parent is NULL for early logging.
- */
- *is_global = 1;
-
- if (!parent)
- return NULL;
-
- /* Not using the channel filename passed as an argument, since for each
- * channel relay appends the corresponding CPU number to the filename
- * passed in relay_open(). This should be fine as relay just needs a
- * dentry of the file associated with the channel buffer and that file's
- * name need not be same as the filename passed as an argument.
- */
- buf_file = debugfs_create_file("guc_log", mode,
- parent, buf, &relay_file_operations);
- return buf_file;
-}
-
-/*
- * file_remove() default callback. Removes relay file in debugfs.
- */
-static int remove_buf_file_callback(struct dentry *dentry)
-{
- debugfs_remove(dentry);
- return 0;
-}
-
-/* relay channel callbacks */
-static struct rchan_callbacks relay_callbacks = {
- .subbuf_start = subbuf_start_callback,
- .create_buf_file = create_buf_file_callback,
- .remove_buf_file = remove_buf_file_callback,
-};
-
-static void guc_log_remove_relay_file(struct intel_guc *guc)
-{
- relay_close(guc->log.relay_chan);
-}
-
-static int guc_log_create_relay_channel(struct intel_guc *guc)
-{
- struct drm_i915_private *dev_priv = guc_to_i915(guc);
- struct rchan *guc_log_relay_chan;
- size_t n_subbufs, subbuf_size;
-
- /* Keep the size of sub buffers same as shared log buffer */
- subbuf_size = guc->log.vma->obj->base.size;
-
- /* Store up to 8 snapshots, which is large enough to buffer sufficient
- * boot time logs and provides enough leeway to User, in terms of
- * latency, for consuming the logs from relay. Also doesn't take
- * up too much memory.
- */
- n_subbufs = 8;
-
- guc_log_relay_chan = relay_open(NULL, NULL, subbuf_size,
- n_subbufs, &relay_callbacks, dev_priv);
- if (!guc_log_relay_chan) {
- DRM_ERROR("Couldn't create relay chan for GuC logging\n");
- return -ENOMEM;
- }
-
- GEM_BUG_ON(guc_log_relay_chan->subbuf_size < subbuf_size);
- guc->log.relay_chan = guc_log_relay_chan;
- return 0;
-}
-
-static int guc_log_create_relay_file(struct intel_guc *guc)
-{
- struct drm_i915_private *dev_priv = guc_to_i915(guc);
- struct dentry *log_dir;
- int ret;
-
- /* For now create the log file in /sys/kernel/debug/dri/0 dir */
- log_dir = dev_priv->drm.primary->debugfs_root;
-
- /* If /sys/kernel/debug/dri/0 location do not exist, then debugfs is
- * not mounted and so can't create the relay file.
- * The relay API seems to fit well with debugfs only, for availing relay
- * there are 3 requirements which can be met for debugfs file only in a
- * straightforward/clean manner :-
- * i) Need the associated dentry pointer of the file, while opening the
- * relay channel.
- * ii) Should be able to use 'relay_file_operations' fops for the file.
- * iii) Set the 'i_private' field of file's inode to the pointer of
- * relay channel buffer.
- */
- if (!log_dir) {
- DRM_ERROR("Debugfs dir not available yet for GuC log file\n");
- return -ENODEV;
- }
-
- ret = relay_late_setup_files(guc->log.relay_chan, "guc_log", log_dir);
- if (ret) {
- DRM_ERROR("Couldn't associate relay chan with file %d\n", ret);
- return ret;
- }
-
- return 0;
-}
-
-static void guc_move_to_next_buf(struct intel_guc *guc)
-{
- /* Make sure the updates made in the sub buffer are visible when
- * Consumer sees the following update to offset inside the sub buffer.
- */
- smp_wmb();
-
- /* All data has been written, so now move the offset of sub buffer. */
- relay_reserve(guc->log.relay_chan, guc->log.vma->obj->base.size);
-
- /* Switch to the next sub buffer */
- relay_flush(guc->log.relay_chan);
-}
-
-static void *guc_get_write_buffer(struct intel_guc *guc)
-{
- if (!guc->log.relay_chan)
- return NULL;
-
- /* Just get the base address of a new sub buffer and copy data into it
- * ourselves. NULL will be returned in no-overwrite mode, if all sub
- * buffers are full. Could have used the relay_write() to indirectly
- * copy the data, but that would have been bit convoluted, as we need to
- * write to only certain locations inside a sub buffer which cannot be
- * done without using relay_reserve() along with relay_write(). So its
- * better to use relay_reserve() alone.
- */
- return relay_reserve(guc->log.relay_chan, 0);
-}
-
-static bool
-guc_check_log_buf_overflow(struct intel_guc *guc,
- enum guc_log_buffer_type type, unsigned int full_cnt)
-{
- unsigned int prev_full_cnt = guc->log.prev_overflow_count[type];
- bool overflow = false;
-
- if (full_cnt != prev_full_cnt) {
- overflow = true;
-
- guc->log.prev_overflow_count[type] = full_cnt;
- guc->log.total_overflow_count[type] += full_cnt - prev_full_cnt;
-
- if (full_cnt < prev_full_cnt) {
- /* buffer_full_cnt is a 4 bit counter */
- guc->log.total_overflow_count[type] += 16;
- }
- DRM_ERROR_RATELIMITED("GuC log buffer overflow\n");
- }
-
- return overflow;
-}
-
-static unsigned int guc_get_log_buffer_size(enum guc_log_buffer_type type)
-{
- switch (type) {
- case GUC_ISR_LOG_BUFFER:
- return (GUC_LOG_ISR_PAGES + 1) * PAGE_SIZE;
- case GUC_DPC_LOG_BUFFER:
- return (GUC_LOG_DPC_PAGES + 1) * PAGE_SIZE;
- case GUC_CRASH_DUMP_LOG_BUFFER:
- return (GUC_LOG_CRASH_PAGES + 1) * PAGE_SIZE;
- default:
- MISSING_CASE(type);
- }
-
- return 0;
-}
-
-static void guc_read_update_log_buffer(struct intel_guc *guc)
-{
- unsigned int buffer_size, read_offset, write_offset, bytes_to_copy, full_cnt;
- struct guc_log_buffer_state *log_buf_state, *log_buf_snapshot_state;
- struct guc_log_buffer_state log_buf_state_local;
- enum guc_log_buffer_type type;
- void *src_data, *dst_data;
- bool new_overflow;
-
- if (WARN_ON(!guc->log.buf_addr))
- return;
-
- /* Get the pointer to shared GuC log buffer */
- log_buf_state = src_data = guc->log.buf_addr;
-
- /* Get the pointer to local buffer to store the logs */
- log_buf_snapshot_state = dst_data = guc_get_write_buffer(guc);
- /* Actual logs are present from the 2nd page */
- src_data += PAGE_SIZE;
- dst_data += PAGE_SIZE;
-
- for (type = GUC_ISR_LOG_BUFFER; type < GUC_MAX_LOG_BUFFER; type++) {
- /* Make a copy of the state structure, inside GuC log buffer
- * (which is uncached mapped), on the stack to avoid reading
- * from it multiple times.
- */
- memcpy(&log_buf_state_local, log_buf_state,
- sizeof(struct guc_log_buffer_state));
- buffer_size = guc_get_log_buffer_size(type);
- read_offset = log_buf_state_local.read_ptr;
- write_offset = log_buf_state_local.sampled_write_ptr;
- full_cnt = log_buf_state_local.buffer_full_cnt;
-
- /* Bookkeeping stuff */
- guc->log.flush_count[type] += log_buf_state_local.flush_to_file;
- new_overflow = guc_check_log_buf_overflow(guc, type, full_cnt);
-
- /* Update the state of shared log buffer */
- log_buf_state->read_ptr = write_offset;
- log_buf_state->flush_to_file = 0;
- log_buf_state++;
-
- if (unlikely(!log_buf_snapshot_state))
- continue;
-
- /* First copy the state structure in snapshot buffer */
- memcpy(log_buf_snapshot_state, &log_buf_state_local,
- sizeof(struct guc_log_buffer_state));
-
- /* The write pointer could have been updated by GuC firmware,
- * after sending the flush interrupt to Host, for consistency
- * set write pointer value to same value of sampled_write_ptr
- * in the snapshot buffer.
- */
- log_buf_snapshot_state->write_ptr = write_offset;
- log_buf_snapshot_state++;
-
- /* Now copy the actual logs. */
- if (unlikely(new_overflow)) {
- /* copy the whole buffer in case of overflow */
- read_offset = 0;
- write_offset = buffer_size;
- } else if (unlikely((read_offset > buffer_size) ||
- (write_offset > buffer_size))) {
- DRM_ERROR("invalid log buffer state\n");
- /* copy whole buffer as offsets are unreliable */
- read_offset = 0;
- write_offset = buffer_size;
- }
-
- /* Just copy the newly written data */
- if (read_offset > write_offset) {
- i915_memcpy_from_wc(dst_data, src_data, write_offset);
- bytes_to_copy = buffer_size - read_offset;
- } else {
- bytes_to_copy = write_offset - read_offset;
- }
- i915_memcpy_from_wc(dst_data + read_offset,
- src_data + read_offset, bytes_to_copy);
-
- src_data += buffer_size;
- dst_data += buffer_size;
- }
-
- if (log_buf_snapshot_state)
- guc_move_to_next_buf(guc);
- else {
- /* Used rate limited to avoid deluge of messages, logs might be
- * getting consumed by User at a slow rate.
- */
- DRM_ERROR_RATELIMITED("no sub-buffer to capture logs\n");
- guc->log.capture_miss_count++;
- }
-}
-
-static void guc_capture_logs_work(struct work_struct *work)
-{
- struct drm_i915_private *dev_priv =
- container_of(work, struct drm_i915_private, guc.log.flush_work);
-
- i915_guc_capture_logs(dev_priv);
-}
-
-static void guc_log_cleanup(struct intel_guc *guc)
-{
- struct drm_i915_private *dev_priv = guc_to_i915(guc);
-
- lockdep_assert_held(&dev_priv->drm.struct_mutex);
-
- /* First disable the flush interrupt */
- gen9_disable_guc_interrupts(dev_priv);
-
- if (guc->log.flush_wq)
- destroy_workqueue(guc->log.flush_wq);
-
- guc->log.flush_wq = NULL;
-
- if (guc->log.relay_chan)
- guc_log_remove_relay_file(guc);
-
- guc->log.relay_chan = NULL;
-
- if (guc->log.buf_addr)
- i915_gem_object_unpin_map(guc->log.vma->obj);
-
- guc->log.buf_addr = NULL;
-}
-
-static int guc_log_create_extras(struct intel_guc *guc)
-{
- struct drm_i915_private *dev_priv = guc_to_i915(guc);
- void *vaddr;
- int ret;
-
- lockdep_assert_held(&dev_priv->drm.struct_mutex);
-
- /* Nothing to do */
- if (i915.guc_log_level < 0)
- return 0;
-
- if (!guc->log.buf_addr) {
- /* Create a WC (Uncached for read) vmalloc mapping of log
- * buffer pages, so that we can directly get the data
- * (up-to-date) from memory.
- */
- vaddr = i915_gem_object_pin_map(guc->log.vma->obj, I915_MAP_WC);
- if (IS_ERR(vaddr)) {
- ret = PTR_ERR(vaddr);
- DRM_ERROR("Couldn't map log buffer pages %d\n", ret);
- return ret;
- }
-
- guc->log.buf_addr = vaddr;
- }
-
- if (!guc->log.relay_chan) {
- /* Create a relay channel, so that we have buffers for storing
- * the GuC firmware logs, the channel will be linked with a file
- * later on when debugfs is registered.
- */
- ret = guc_log_create_relay_channel(guc);
- if (ret)
- return ret;
- }
-
- if (!guc->log.flush_wq) {
- INIT_WORK(&guc->log.flush_work, guc_capture_logs_work);
-
- /*
- * GuC log buffer flush work item has to do register access to
- * send the ack to GuC and this work item, if not synced before
- * suspend, can potentially get executed after the GFX device is
- * suspended.
- * By marking the WQ as freezable, we don't have to bother about
- * flushing of this work item from the suspend hooks, the pending
- * work item if any will be either executed before the suspend
- * or scheduled later on resume. This way the handling of work
- * item can be kept same between system suspend & rpm suspend.
- */
- guc->log.flush_wq = alloc_ordered_workqueue("i915-guc_log",
- WQ_HIGHPRI | WQ_FREEZABLE);
- if (guc->log.flush_wq == NULL) {
- DRM_ERROR("Couldn't allocate the wq for GuC logging\n");
- return -ENOMEM;
- }
- }
-
- return 0;
-}
-
-static void guc_log_create(struct intel_guc *guc)
-{
- struct i915_vma *vma;
- unsigned long offset;
- uint32_t size, flags;
-
- if (i915.guc_log_level > GUC_LOG_VERBOSITY_MAX)
- i915.guc_log_level = GUC_LOG_VERBOSITY_MAX;
-
- /* The first page is to save log buffer state. Allocate one
- * extra page for others in case for overlap */
- size = (1 + GUC_LOG_DPC_PAGES + 1 +
- GUC_LOG_ISR_PAGES + 1 +
- GUC_LOG_CRASH_PAGES + 1) << PAGE_SHIFT;
-
- vma = guc->log.vma;
- if (!vma) {
- /* We require SSE 4.1 for fast reads from the GuC log buffer and
- * it should be present on the chipsets supporting GuC based
- * submisssions.
- */
- if (WARN_ON(!i915_memcpy_from_wc(NULL, NULL, 0))) {
- /* logging will not be enabled */
- i915.guc_log_level = -1;
- return;
- }
-
- vma = guc_allocate_vma(guc, size);
- if (IS_ERR(vma)) {
- /* logging will be off */
- i915.guc_log_level = -1;
- return;
- }
-
- guc->log.vma = vma;
-
- if (guc_log_create_extras(guc)) {
- guc_log_cleanup(guc);
- i915_vma_unpin_and_release(&guc->log.vma);
- i915.guc_log_level = -1;
- return;
- }
- }
-
- /* each allocated unit is a page */
- flags = GUC_LOG_VALID | GUC_LOG_NOTIFY_ON_HALF_FULL |
- (GUC_LOG_DPC_PAGES << GUC_LOG_DPC_SHIFT) |
- (GUC_LOG_ISR_PAGES << GUC_LOG_ISR_SHIFT) |
- (GUC_LOG_CRASH_PAGES << GUC_LOG_CRASH_SHIFT);
-
- offset = i915_ggtt_offset(vma) >> PAGE_SHIFT; /* in pages */
- guc->log.flags = (offset << GUC_LOG_BUF_ADDR_SHIFT) | flags;
-}
-
-static int guc_log_late_setup(struct intel_guc *guc)
-{
- struct drm_i915_private *dev_priv = guc_to_i915(guc);
- int ret;
-
- lockdep_assert_held(&dev_priv->drm.struct_mutex);
-
- if (i915.guc_log_level < 0)
- return -EINVAL;
-
- /* If log_level was set as -1 at boot time, then setup needed to
- * handle log buffer flush interrupts would not have been done yet,
- * so do that now.
- */
- ret = guc_log_create_extras(guc);
- if (ret)
- goto err;
-
- ret = guc_log_create_relay_file(guc);
- if (ret)
- goto err;
-
- return 0;
-err:
- guc_log_cleanup(guc);
- /* logging will remain off */
- i915.guc_log_level = -1;
- return ret;
-}
static void guc_policies_init(struct guc_policies *policies)
{
@@ -1422,7 +815,7 @@ static void guc_addon_create(struct intel_guc *guc)
vma = guc->ads_vma;
if (!vma) {
- vma = guc_allocate_vma(guc, PAGE_ALIGN(size));
+ vma = intel_guc_allocate_vma(guc, PAGE_ALIGN(size));
if (IS_ERR(vma))
return;
@@ -1450,7 +843,7 @@ static void guc_addon_create(struct intel_guc *guc)
guc_policies_init(policies);
ads->scheduler_policies =
- i915_ggtt_offset(vma) + sizeof(struct guc_ads);
+ guc_ggtt_offset(vma) + sizeof(struct guc_ads);
/* MMIO reg state */
reg_state = (void *)policies + sizeof(struct guc_policies);
@@ -1484,6 +877,9 @@ int i915_guc_submission_init(struct drm_i915_private *dev_priv)
struct intel_guc *guc = &dev_priv->guc;
struct i915_vma *vma;
+ if (!HAS_GUC_SCHED(dev_priv))
+ return 0;
+
/* Wipe bitmap & delete client in case of reinitialisation */
bitmap_clear(guc->doorbell_bitmap, 0, GUC_MAX_DOORBELLS);
i915_guc_submission_disable(dev_priv);
@@ -1494,52 +890,68 @@ int i915_guc_submission_init(struct drm_i915_private *dev_priv)
if (guc->ctx_pool_vma)
return 0; /* already allocated */
- vma = guc_allocate_vma(guc, gemsize);
+ vma = intel_guc_allocate_vma(guc, gemsize);
if (IS_ERR(vma))
return PTR_ERR(vma);
guc->ctx_pool_vma = vma;
ida_init(&guc->ctx_ids);
- mutex_init(&guc->action_lock);
- guc_log_create(guc);
+ intel_guc_log_create(guc);
guc_addon_create(guc);
+ guc->execbuf_client = guc_client_alloc(dev_priv,
+ INTEL_INFO(dev_priv)->ring_mask,
+ GUC_CTX_PRIORITY_KMD_NORMAL,
+ dev_priv->kernel_context);
+ if (!guc->execbuf_client) {
+ DRM_ERROR("Failed to create GuC client for execbuf!\n");
+ goto err;
+ }
+
return 0;
+
+err:
+ i915_guc_submission_fini(dev_priv);
+ return -ENOMEM;
+}
+
+static void guc_reset_wq(struct i915_guc_client *client)
+{
+ struct guc_process_desc *desc = client->vaddr +
+ client->proc_desc_offset;
+
+ desc->head = 0;
+ desc->tail = 0;
+
+ client->wq_tail = 0;
}
int i915_guc_submission_enable(struct drm_i915_private *dev_priv)
{
struct intel_guc *guc = &dev_priv->guc;
- struct drm_i915_gem_request *request;
- struct i915_guc_client *client;
+ struct i915_guc_client *client = guc->execbuf_client;
struct intel_engine_cs *engine;
enum intel_engine_id id;
- /* client for execbuf submission */
- client = guc_client_alloc(dev_priv,
- INTEL_INFO(dev_priv)->ring_mask,
- GUC_CTX_PRIORITY_KMD_NORMAL,
- dev_priv->kernel_context);
- if (!client) {
- DRM_ERROR("Failed to create normal GuC client!\n");
- return -ENOMEM;
- }
+ if (!client)
+ return -ENODEV;
+
+ intel_guc_sample_forcewake(guc);
- guc->execbuf_client = client;
- host2guc_sample_forcewake(guc, client);
+ guc_reset_wq(client);
guc_init_doorbell_hw(guc);
/* Take over from manual control of ELSP (execlists) */
for_each_engine(engine, dev_priv, id) {
+ struct drm_i915_gem_request *rq;
+
engine->submit_request = i915_guc_submit;
engine->schedule = NULL;
/* Replay the current set of previously submitted requests */
- list_for_each_entry(request,
- &engine->timeline->requests, link) {
+ list_for_each_entry(rq, &engine->timeline->requests, link) {
client->wq_rsvd += sizeof(struct guc_wq_item);
- if (i915_sw_fence_done(&request->submit))
- i915_guc_submit(request);
+ __i915_guc_submit(rq);
}
}
@@ -1555,14 +967,18 @@ void i915_guc_submission_disable(struct drm_i915_private *dev_priv)
/* Revert back to manual ELSP submission */
intel_execlists_enable_submission(dev_priv);
-
- guc_client_free(dev_priv, guc->execbuf_client);
- guc->execbuf_client = NULL;
}
void i915_guc_submission_fini(struct drm_i915_private *dev_priv)
{
struct intel_guc *guc = &dev_priv->guc;
+ struct i915_guc_client *client;
+
+ client = fetch_and_zero(&guc->execbuf_client);
+ if (!client)
+ return;
+
+ guc_client_free(dev_priv, client);
i915_vma_unpin_and_release(&guc->ads_vma);
i915_vma_unpin_and_release(&guc->log.vma);
@@ -1574,44 +990,42 @@ void i915_guc_submission_fini(struct drm_i915_private *dev_priv)
/**
* intel_guc_suspend() - notify GuC entering suspend state
- * @dev: drm device
+ * @dev_priv: i915 device private
*/
-int intel_guc_suspend(struct drm_device *dev)
+int intel_guc_suspend(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_guc *guc = &dev_priv->guc;
struct i915_gem_context *ctx;
u32 data[3];
- if (guc->guc_fw.guc_fw_load_status != GUC_FIRMWARE_SUCCESS)
+ if (guc->fw.load_status != INTEL_UC_FIRMWARE_SUCCESS)
return 0;
gen9_disable_guc_interrupts(dev_priv);
ctx = dev_priv->kernel_context;
- data[0] = HOST2GUC_ACTION_ENTER_S_STATE;
+ data[0] = INTEL_GUC_ACTION_ENTER_S_STATE;
/* any value greater than GUC_POWER_D0 */
data[1] = GUC_POWER_D1;
/* first page is shared data with GuC */
- data[2] = i915_ggtt_offset(ctx->engine[RCS].state);
+ data[2] = guc_ggtt_offset(ctx->engine[RCS].state);
- return host2guc_action(guc, data, ARRAY_SIZE(data));
+ return intel_guc_send(guc, data, ARRAY_SIZE(data));
}
/**
* intel_guc_resume() - notify GuC resuming from suspend state
- * @dev: drm device
+ * @dev_priv: i915 device private
*/
-int intel_guc_resume(struct drm_device *dev)
+int intel_guc_resume(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_guc *guc = &dev_priv->guc;
struct i915_gem_context *ctx;
u32 data[3];
- if (guc->guc_fw.guc_fw_load_status != GUC_FIRMWARE_SUCCESS)
+ if (guc->fw.load_status != INTEL_UC_FIRMWARE_SUCCESS)
return 0;
if (i915.guc_log_level >= 0)
@@ -1619,111 +1033,12 @@ int intel_guc_resume(struct drm_device *dev)
ctx = dev_priv->kernel_context;
- data[0] = HOST2GUC_ACTION_EXIT_S_STATE;
+ data[0] = INTEL_GUC_ACTION_EXIT_S_STATE;
data[1] = GUC_POWER_D0;
/* first page is shared data with GuC */
- data[2] = i915_ggtt_offset(ctx->engine[RCS].state);
+ data[2] = guc_ggtt_offset(ctx->engine[RCS].state);
- return host2guc_action(guc, data, ARRAY_SIZE(data));
+ return intel_guc_send(guc, data, ARRAY_SIZE(data));
}
-void i915_guc_capture_logs(struct drm_i915_private *dev_priv)
-{
- guc_read_update_log_buffer(&dev_priv->guc);
- /* Generally device is expected to be active only at this
- * time, so get/put should be really quick.
- */
- intel_runtime_pm_get(dev_priv);
- host2guc_logbuffer_flush_complete(&dev_priv->guc);
- intel_runtime_pm_put(dev_priv);
-}
-
-void i915_guc_flush_logs(struct drm_i915_private *dev_priv)
-{
- if (!i915.enable_guc_submission || (i915.guc_log_level < 0))
- return;
-
- /* First disable the interrupts, will be renabled afterwards */
- gen9_disable_guc_interrupts(dev_priv);
-
- /* Before initiating the forceful flush, wait for any pending/ongoing
- * flush to complete otherwise forceful flush may not actually happen.
- */
- flush_work(&dev_priv->guc.log.flush_work);
-
- /* Ask GuC to update the log buffer state */
- host2guc_force_logbuffer_flush(&dev_priv->guc);
-
- /* GuC would have updated log buffer by now, so capture it */
- i915_guc_capture_logs(dev_priv);
-}
-
-void i915_guc_unregister(struct drm_i915_private *dev_priv)
-{
- if (!i915.enable_guc_submission)
- return;
-
- mutex_lock(&dev_priv->drm.struct_mutex);
- guc_log_cleanup(&dev_priv->guc);
- mutex_unlock(&dev_priv->drm.struct_mutex);
-}
-
-void i915_guc_register(struct drm_i915_private *dev_priv)
-{
- if (!i915.enable_guc_submission)
- return;
-
- mutex_lock(&dev_priv->drm.struct_mutex);
- guc_log_late_setup(&dev_priv->guc);
- mutex_unlock(&dev_priv->drm.struct_mutex);
-}
-
-int i915_guc_log_control(struct drm_i915_private *dev_priv, u64 control_val)
-{
- union guc_log_control log_param;
- int ret;
-
- log_param.value = control_val;
-
- if (log_param.verbosity < GUC_LOG_VERBOSITY_MIN ||
- log_param.verbosity > GUC_LOG_VERBOSITY_MAX)
- return -EINVAL;
-
- /* This combination doesn't make sense & won't have any effect */
- if (!log_param.logging_enabled && (i915.guc_log_level < 0))
- return 0;
-
- ret = host2guc_logging_control(&dev_priv->guc, log_param.value);
- if (ret < 0) {
- DRM_DEBUG_DRIVER("host2guc action failed %d\n", ret);
- return ret;
- }
-
- i915.guc_log_level = log_param.verbosity;
-
- /* If log_level was set as -1 at boot time, then the relay channel file
- * wouldn't have been created by now and interrupts also would not have
- * been enabled.
- */
- if (!dev_priv->guc.log.relay_chan) {
- ret = guc_log_late_setup(&dev_priv->guc);
- if (!ret)
- gen9_enable_guc_interrupts(dev_priv);
- } else if (!log_param.logging_enabled) {
- /* Once logging is disabled, GuC won't generate logs & send an
- * interrupt. But there could be some data in the log buffer
- * which is yet to be captured. So request GuC to update the log
- * buffer state and then collect the left over logs.
- */
- i915_guc_flush_logs(dev_priv);
-
- /* As logging is disabled, update log level to reflect that */
- i915.guc_log_level = -1;
- } else {
- /* In case interrupts were disabled, enable them now */
- gen9_enable_guc_interrupts(dev_priv);
- }
-
- return ret;
-}
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index f0880afbb878..e6ffef2f707a 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -1170,6 +1170,9 @@ static void gen6_pm_rps_work(struct work_struct *work)
adj *= 2;
else /* CHV needs even encode values */
adj = IS_CHERRYVIEW(dev_priv) ? 2 : 1;
+
+ if (new_delay >= dev_priv->rps.max_freq_softlimit)
+ adj = 0;
/*
* For better performance, jump directly
* to RPe if we're below it.
@@ -1191,6 +1194,9 @@ static void gen6_pm_rps_work(struct work_struct *work)
adj *= 2;
else /* CHV needs even encode values */
adj = IS_CHERRYVIEW(dev_priv) ? -2 : -1;
+
+ if (new_delay <= dev_priv->rps.min_freq_softlimit)
+ adj = 0;
} else { /* unknown event */
adj = 0;
}
@@ -1553,41 +1559,68 @@ static void display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
{
struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe];
struct intel_pipe_crc_entry *entry;
+ struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
+ struct drm_driver *driver = dev_priv->drm.driver;
+ uint32_t crcs[5];
int head, tail;
spin_lock(&pipe_crc->lock);
+ if (pipe_crc->source) {
+ if (!pipe_crc->entries) {
+ spin_unlock(&pipe_crc->lock);
+ DRM_DEBUG_KMS("spurious interrupt\n");
+ return;
+ }
- if (!pipe_crc->entries) {
- spin_unlock(&pipe_crc->lock);
- DRM_DEBUG_KMS("spurious interrupt\n");
- return;
- }
-
- head = pipe_crc->head;
- tail = pipe_crc->tail;
+ head = pipe_crc->head;
+ tail = pipe_crc->tail;
- if (CIRC_SPACE(head, tail, INTEL_PIPE_CRC_ENTRIES_NR) < 1) {
- spin_unlock(&pipe_crc->lock);
- DRM_ERROR("CRC buffer overflowing\n");
- return;
- }
+ if (CIRC_SPACE(head, tail, INTEL_PIPE_CRC_ENTRIES_NR) < 1) {
+ spin_unlock(&pipe_crc->lock);
+ DRM_ERROR("CRC buffer overflowing\n");
+ return;
+ }
- entry = &pipe_crc->entries[head];
+ entry = &pipe_crc->entries[head];
- entry->frame = dev_priv->drm.driver->get_vblank_counter(&dev_priv->drm,
- pipe);
- entry->crc[0] = crc0;
- entry->crc[1] = crc1;
- entry->crc[2] = crc2;
- entry->crc[3] = crc3;
- entry->crc[4] = crc4;
+ entry->frame = driver->get_vblank_counter(&dev_priv->drm, pipe);
+ entry->crc[0] = crc0;
+ entry->crc[1] = crc1;
+ entry->crc[2] = crc2;
+ entry->crc[3] = crc3;
+ entry->crc[4] = crc4;
- head = (head + 1) & (INTEL_PIPE_CRC_ENTRIES_NR - 1);
- pipe_crc->head = head;
+ head = (head + 1) & (INTEL_PIPE_CRC_ENTRIES_NR - 1);
+ pipe_crc->head = head;
- spin_unlock(&pipe_crc->lock);
+ spin_unlock(&pipe_crc->lock);
- wake_up_interruptible(&pipe_crc->wq);
+ wake_up_interruptible(&pipe_crc->wq);
+ } else {
+ /*
+ * For some not yet identified reason, the first CRC is
+ * bonkers. So let's just wait for the next vblank and read
+ * out the buggy result.
+ *
+ * On CHV sometimes the second CRC is bonkers as well, so
+ * don't trust that one either.
+ */
+ if (pipe_crc->skipped == 0 ||
+ (IS_CHERRYVIEW(dev_priv) && pipe_crc->skipped == 1)) {
+ pipe_crc->skipped++;
+ spin_unlock(&pipe_crc->lock);
+ return;
+ }
+ spin_unlock(&pipe_crc->lock);
+ crcs[0] = crc0;
+ crcs[1] = crc1;
+ crcs[2] = crc2;
+ crcs[3] = crc3;
+ crcs[4] = crc4;
+ drm_crtc_add_crc_entry(&crtc->base, true,
+ drm_accurate_vblank_count(&crtc->base),
+ crcs);
+ }
}
#else
static inline void
@@ -1683,8 +1716,8 @@ static void gen9_guc_irq_handler(struct drm_i915_private *dev_priv, u32 gt_iir)
u32 msg, flush;
msg = I915_READ(SOFT_SCRATCH(15));
- flush = msg & (GUC2HOST_MSG_CRASH_DUMP_POSTED |
- GUC2HOST_MSG_FLUSH_LOG_BUFFER);
+ flush = msg & (INTEL_GUC_RECV_MSG_CRASH_DUMP_POSTED |
+ INTEL_GUC_RECV_MSG_FLUSH_LOG_BUFFER);
if (flush) {
/* Clear the message bits that are handled */
I915_WRITE(SOFT_SCRATCH(15), msg & ~flush);
@@ -2444,7 +2477,7 @@ gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
found = true;
}
- if (IS_BROXTON(dev_priv)) {
+ if (IS_GEN9_LP(dev_priv)) {
tmp_mask = iir & BXT_DE_PORT_HOTPLUG_MASK;
if (tmp_mask) {
bxt_hpd_irq_handler(dev_priv, tmp_mask,
@@ -2460,7 +2493,7 @@ gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
}
}
- if (IS_BROXTON(dev_priv) && (iir & BXT_DE_PORT_GMBUS)) {
+ if (IS_GEN9_LP(dev_priv) && (iir & BXT_DE_PORT_GMBUS)) {
gmbus_irq_handler(dev_priv);
found = true;
}
@@ -2712,12 +2745,13 @@ static void i915_clear_error_registers(struct drm_i915_private *dev_priv)
* i915_handle_error - handle a gpu error
* @dev_priv: i915 device private
* @engine_mask: mask representing engines that are hung
+ * @fmt: Error message format string
+ *
* Do some basic checking of register state at error time and
* dump it to the syslog. Also call i915_capture_error_state() to make
* sure we get a record and make it available in debugfs. Fire a uevent
* so userspace knows something bad happened (should trigger collection
* of a ring dump etc.).
- * @fmt: Error message format string
*/
void i915_handle_error(struct drm_i915_private *dev_priv,
u32 engine_mask,
@@ -3105,19 +3139,16 @@ static void ibx_hpd_irq_setup(struct drm_i915_private *dev_priv)
I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
}
-static void spt_hpd_irq_setup(struct drm_i915_private *dev_priv)
+static void spt_hpd_detection_setup(struct drm_i915_private *dev_priv)
{
- u32 hotplug_irqs, hotplug, enabled_irqs;
-
- hotplug_irqs = SDE_HOTPLUG_MASK_SPT;
- enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_spt);
-
- ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
+ u32 hotplug;
/* Enable digital hotplug on the PCH */
hotplug = I915_READ(PCH_PORT_HOTPLUG);
- hotplug |= PORTD_HOTPLUG_ENABLE | PORTC_HOTPLUG_ENABLE |
- PORTB_HOTPLUG_ENABLE | PORTA_HOTPLUG_ENABLE;
+ hotplug |= PORTA_HOTPLUG_ENABLE |
+ PORTB_HOTPLUG_ENABLE |
+ PORTC_HOTPLUG_ENABLE |
+ PORTD_HOTPLUG_ENABLE;
I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
hotplug = I915_READ(PCH_PORT_HOTPLUG2);
@@ -3125,6 +3156,18 @@ static void spt_hpd_irq_setup(struct drm_i915_private *dev_priv)
I915_WRITE(PCH_PORT_HOTPLUG2, hotplug);
}
+static void spt_hpd_irq_setup(struct drm_i915_private *dev_priv)
+{
+ u32 hotplug_irqs, enabled_irqs;
+
+ hotplug_irqs = SDE_HOTPLUG_MASK_SPT;
+ enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_spt);
+
+ ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
+
+ spt_hpd_detection_setup(dev_priv);
+}
+
static void ilk_hpd_irq_setup(struct drm_i915_private *dev_priv)
{
u32 hotplug_irqs, hotplug, enabled_irqs;
@@ -3159,18 +3202,15 @@ static void ilk_hpd_irq_setup(struct drm_i915_private *dev_priv)
ibx_hpd_irq_setup(dev_priv);
}
-static void bxt_hpd_irq_setup(struct drm_i915_private *dev_priv)
+static void __bxt_hpd_detection_setup(struct drm_i915_private *dev_priv,
+ u32 enabled_irqs)
{
- u32 hotplug_irqs, hotplug, enabled_irqs;
-
- enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_bxt);
- hotplug_irqs = BXT_DE_PORT_HOTPLUG_MASK;
-
- bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs);
+ u32 hotplug;
hotplug = I915_READ(PCH_PORT_HOTPLUG);
- hotplug |= PORTC_HOTPLUG_ENABLE | PORTB_HOTPLUG_ENABLE |
- PORTA_HOTPLUG_ENABLE;
+ hotplug |= PORTA_HOTPLUG_ENABLE |
+ PORTB_HOTPLUG_ENABLE |
+ PORTC_HOTPLUG_ENABLE;
DRM_DEBUG_KMS("Invert bit setting: hp_ctl:%x hp_port:%x\n",
hotplug, enabled_irqs);
@@ -3180,7 +3220,6 @@ static void bxt_hpd_irq_setup(struct drm_i915_private *dev_priv)
* For BXT invert bit has to be set based on AOB design
* for HPD detection logic, update it based on VBT fields.
*/
-
if ((enabled_irqs & BXT_DE_PORT_HP_DDIA) &&
intel_bios_is_port_hpd_inverted(dev_priv, PORT_A))
hotplug |= BXT_DDIA_HPD_INVERT;
@@ -3194,6 +3233,23 @@ static void bxt_hpd_irq_setup(struct drm_i915_private *dev_priv)
I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
}
+static void bxt_hpd_detection_setup(struct drm_i915_private *dev_priv)
+{
+ __bxt_hpd_detection_setup(dev_priv, BXT_DE_PORT_HOTPLUG_MASK);
+}
+
+static void bxt_hpd_irq_setup(struct drm_i915_private *dev_priv)
+{
+ u32 hotplug_irqs, enabled_irqs;
+
+ enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_bxt);
+ hotplug_irqs = BXT_DE_PORT_HOTPLUG_MASK;
+
+ bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs);
+
+ __bxt_hpd_detection_setup(dev_priv, enabled_irqs);
+}
+
static void ibx_irq_postinstall(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = to_i915(dev);
@@ -3209,6 +3265,12 @@ static void ibx_irq_postinstall(struct drm_device *dev)
gen5_assert_iir_is_zero(dev_priv, SDEIIR);
I915_WRITE(SDEIMR, ~mask);
+
+ if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv) ||
+ HAS_PCH_LPT(dev_priv))
+ ; /* TODO: Enable HPD detection on older PCH platforms too */
+ else
+ spt_hpd_detection_setup(dev_priv);
}
static void gen5_gt_irq_postinstall(struct drm_device *dev)
@@ -3391,7 +3453,7 @@ static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
GEN9_DE_PIPE_IRQ_FAULT_ERRORS;
de_port_masked |= GEN9_AUX_CHANNEL_B | GEN9_AUX_CHANNEL_C |
GEN9_AUX_CHANNEL_D;
- if (IS_BROXTON(dev_priv))
+ if (IS_GEN9_LP(dev_priv))
de_port_masked |= BXT_DE_PORT_GMBUS;
} else {
de_pipe_masked |= GEN8_PIPE_PRIMARY_FLIP_DONE |
@@ -3402,7 +3464,7 @@ static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
GEN8_PIPE_FIFO_UNDERRUN;
de_port_enables = de_port_masked;
- if (IS_BROXTON(dev_priv))
+ if (IS_GEN9_LP(dev_priv))
de_port_enables |= BXT_DE_PORT_HOTPLUG_MASK;
else if (IS_BROADWELL(dev_priv))
de_port_enables |= GEN8_PORT_DP_A_HOTPLUG;
@@ -3420,6 +3482,9 @@ static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
GEN5_IRQ_INIT(GEN8_DE_PORT_, ~de_port_masked, de_port_enables);
GEN5_IRQ_INIT(GEN8_DE_MISC_, ~de_misc_masked, de_misc_masked);
+
+ if (IS_GEN9_LP(dev_priv))
+ bxt_hpd_detection_setup(dev_priv);
}
static int gen8_irq_postinstall(struct drm_device *dev)
@@ -4227,7 +4292,7 @@ void intel_irq_init(struct drm_i915_private *dev_priv)
dev->driver->irq_uninstall = gen8_irq_uninstall;
dev->driver->enable_vblank = gen8_enable_vblank;
dev->driver->disable_vblank = gen8_disable_vblank;
- if (IS_BROXTON(dev_priv))
+ if (IS_GEN9_LP(dev_priv))
dev_priv->display.hpd_irq_setup = bxt_hpd_irq_setup;
else if (HAS_PCH_SPT(dev_priv) || HAS_PCH_KBP(dev_priv))
dev_priv->display.hpd_irq_setup = spt_hpd_irq_setup;
diff --git a/drivers/gpu/drm/i915/i915_oa_hsw.c b/drivers/gpu/drm/i915/i915_oa_hsw.c
new file mode 100644
index 000000000000..4ddf756add31
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_oa_hsw.c
@@ -0,0 +1,752 @@
+/*
+ * Autogenerated file, DO NOT EDIT manually!
+ *
+ * Copyright (c) 2015 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#include <linux/sysfs.h>
+
+#include "i915_drv.h"
+#include "i915_oa_hsw.h"
+
+enum metric_set_id {
+ METRIC_SET_ID_RENDER_BASIC = 1,
+ METRIC_SET_ID_COMPUTE_BASIC,
+ METRIC_SET_ID_COMPUTE_EXTENDED,
+ METRIC_SET_ID_MEMORY_READS,
+ METRIC_SET_ID_MEMORY_WRITES,
+ METRIC_SET_ID_SAMPLER_BALANCE,
+};
+
+int i915_oa_n_builtin_metric_sets_hsw = 6;
+
+static const struct i915_oa_reg b_counter_config_render_basic[] = {
+ { _MMIO(0x2724), 0x00800000 },
+ { _MMIO(0x2720), 0x00000000 },
+ { _MMIO(0x2714), 0x00800000 },
+ { _MMIO(0x2710), 0x00000000 },
+};
+
+static const struct i915_oa_reg mux_config_render_basic[] = {
+ { _MMIO(0x253a4), 0x01600000 },
+ { _MMIO(0x25440), 0x00100000 },
+ { _MMIO(0x25128), 0x00000000 },
+ { _MMIO(0x2691c), 0x00000800 },
+ { _MMIO(0x26aa0), 0x01500000 },
+ { _MMIO(0x26b9c), 0x00006000 },
+ { _MMIO(0x2791c), 0x00000800 },
+ { _MMIO(0x27aa0), 0x01500000 },
+ { _MMIO(0x27b9c), 0x00006000 },
+ { _MMIO(0x2641c), 0x00000400 },
+ { _MMIO(0x25380), 0x00000010 },
+ { _MMIO(0x2538c), 0x00000000 },
+ { _MMIO(0x25384), 0x0800aaaa },
+ { _MMIO(0x25400), 0x00000004 },
+ { _MMIO(0x2540c), 0x06029000 },
+ { _MMIO(0x25410), 0x00000002 },
+ { _MMIO(0x25404), 0x5c30ffff },
+ { _MMIO(0x25100), 0x00000016 },
+ { _MMIO(0x25110), 0x00000400 },
+ { _MMIO(0x25104), 0x00000000 },
+ { _MMIO(0x26804), 0x00001211 },
+ { _MMIO(0x26884), 0x00000100 },
+ { _MMIO(0x26900), 0x00000002 },
+ { _MMIO(0x26908), 0x00700000 },
+ { _MMIO(0x26904), 0x00000000 },
+ { _MMIO(0x26984), 0x00001022 },
+ { _MMIO(0x26a04), 0x00000011 },
+ { _MMIO(0x26a80), 0x00000006 },
+ { _MMIO(0x26a88), 0x00000c02 },
+ { _MMIO(0x26a84), 0x00000000 },
+ { _MMIO(0x26b04), 0x00001000 },
+ { _MMIO(0x26b80), 0x00000002 },
+ { _MMIO(0x26b8c), 0x00000007 },
+ { _MMIO(0x26b84), 0x00000000 },
+ { _MMIO(0x27804), 0x00004844 },
+ { _MMIO(0x27884), 0x00000400 },
+ { _MMIO(0x27900), 0x00000002 },
+ { _MMIO(0x27908), 0x0e000000 },
+ { _MMIO(0x27904), 0x00000000 },
+ { _MMIO(0x27984), 0x00004088 },
+ { _MMIO(0x27a04), 0x00000044 },
+ { _MMIO(0x27a80), 0x00000006 },
+ { _MMIO(0x27a88), 0x00018040 },
+ { _MMIO(0x27a84), 0x00000000 },
+ { _MMIO(0x27b04), 0x00004000 },
+ { _MMIO(0x27b80), 0x00000002 },
+ { _MMIO(0x27b8c), 0x000000e0 },
+ { _MMIO(0x27b84), 0x00000000 },
+ { _MMIO(0x26104), 0x00002222 },
+ { _MMIO(0x26184), 0x0c006666 },
+ { _MMIO(0x26284), 0x04000000 },
+ { _MMIO(0x26304), 0x04000000 },
+ { _MMIO(0x26400), 0x00000002 },
+ { _MMIO(0x26410), 0x000000a0 },
+ { _MMIO(0x26404), 0x00000000 },
+ { _MMIO(0x25420), 0x04108020 },
+ { _MMIO(0x25424), 0x1284a420 },
+ { _MMIO(0x2541c), 0x00000000 },
+ { _MMIO(0x25428), 0x00042049 },
+};
+
+static const struct i915_oa_reg *
+get_render_basic_mux_config(struct drm_i915_private *dev_priv,
+ int *len)
+{
+ *len = ARRAY_SIZE(mux_config_render_basic);
+ return mux_config_render_basic;
+}
+
+static const struct i915_oa_reg b_counter_config_compute_basic[] = {
+ { _MMIO(0x2710), 0x00000000 },
+ { _MMIO(0x2714), 0x00800000 },
+ { _MMIO(0x2718), 0xaaaaaaaa },
+ { _MMIO(0x271c), 0xaaaaaaaa },
+ { _MMIO(0x2720), 0x00000000 },
+ { _MMIO(0x2724), 0x00800000 },
+ { _MMIO(0x2728), 0xaaaaaaaa },
+ { _MMIO(0x272c), 0xaaaaaaaa },
+ { _MMIO(0x2740), 0x00000000 },
+ { _MMIO(0x2744), 0x00000000 },
+ { _MMIO(0x2748), 0x00000000 },
+ { _MMIO(0x274c), 0x00000000 },
+ { _MMIO(0x2750), 0x00000000 },
+ { _MMIO(0x2754), 0x00000000 },
+ { _MMIO(0x2758), 0x00000000 },
+ { _MMIO(0x275c), 0x00000000 },
+ { _MMIO(0x236c), 0x00000000 },
+};
+
+static const struct i915_oa_reg mux_config_compute_basic[] = {
+ { _MMIO(0x253a4), 0x00000000 },
+ { _MMIO(0x2681c), 0x01f00800 },
+ { _MMIO(0x26820), 0x00001000 },
+ { _MMIO(0x2781c), 0x01f00800 },
+ { _MMIO(0x26520), 0x00000007 },
+ { _MMIO(0x265a0), 0x00000007 },
+ { _MMIO(0x25380), 0x00000010 },
+ { _MMIO(0x2538c), 0x00300000 },
+ { _MMIO(0x25384), 0xaa8aaaaa },
+ { _MMIO(0x25404), 0xffffffff },
+ { _MMIO(0x26800), 0x00004202 },
+ { _MMIO(0x26808), 0x00605817 },
+ { _MMIO(0x2680c), 0x10001005 },
+ { _MMIO(0x26804), 0x00000000 },
+ { _MMIO(0x27800), 0x00000102 },
+ { _MMIO(0x27808), 0x0c0701e0 },
+ { _MMIO(0x2780c), 0x000200a0 },
+ { _MMIO(0x27804), 0x00000000 },
+ { _MMIO(0x26484), 0x44000000 },
+ { _MMIO(0x26704), 0x44000000 },
+ { _MMIO(0x26500), 0x00000006 },
+ { _MMIO(0x26510), 0x00000001 },
+ { _MMIO(0x26504), 0x88000000 },
+ { _MMIO(0x26580), 0x00000006 },
+ { _MMIO(0x26590), 0x00000020 },
+ { _MMIO(0x26584), 0x00000000 },
+ { _MMIO(0x26104), 0x55822222 },
+ { _MMIO(0x26184), 0xaa866666 },
+ { _MMIO(0x25420), 0x08320c83 },
+ { _MMIO(0x25424), 0x06820c83 },
+ { _MMIO(0x2541c), 0x00000000 },
+ { _MMIO(0x25428), 0x00000c03 },
+};
+
+static const struct i915_oa_reg *
+get_compute_basic_mux_config(struct drm_i915_private *dev_priv,
+ int *len)
+{
+ *len = ARRAY_SIZE(mux_config_compute_basic);
+ return mux_config_compute_basic;
+}
+
+static const struct i915_oa_reg b_counter_config_compute_extended[] = {
+ { _MMIO(0x2724), 0xf0800000 },
+ { _MMIO(0x2720), 0x00000000 },
+ { _MMIO(0x2714), 0xf0800000 },
+ { _MMIO(0x2710), 0x00000000 },
+ { _MMIO(0x2770), 0x0007fe2a },
+ { _MMIO(0x2774), 0x0000ff00 },
+ { _MMIO(0x2778), 0x0007fe6a },
+ { _MMIO(0x277c), 0x0000ff00 },
+ { _MMIO(0x2780), 0x0007fe92 },
+ { _MMIO(0x2784), 0x0000ff00 },
+ { _MMIO(0x2788), 0x0007fea2 },
+ { _MMIO(0x278c), 0x0000ff00 },
+ { _MMIO(0x2790), 0x0007fe32 },
+ { _MMIO(0x2794), 0x0000ff00 },
+ { _MMIO(0x2798), 0x0007fe9a },
+ { _MMIO(0x279c), 0x0000ff00 },
+ { _MMIO(0x27a0), 0x0007ff23 },
+ { _MMIO(0x27a4), 0x0000ff00 },
+ { _MMIO(0x27a8), 0x0007fff3 },
+ { _MMIO(0x27ac), 0x0000fffe },
+};
+
+static const struct i915_oa_reg mux_config_compute_extended[] = {
+ { _MMIO(0x2681c), 0x3eb00800 },
+ { _MMIO(0x26820), 0x00900000 },
+ { _MMIO(0x25384), 0x02aaaaaa },
+ { _MMIO(0x25404), 0x03ffffff },
+ { _MMIO(0x26800), 0x00142284 },
+ { _MMIO(0x26808), 0x0e629062 },
+ { _MMIO(0x2680c), 0x3f6f55cb },
+ { _MMIO(0x26810), 0x00000014 },
+ { _MMIO(0x26804), 0x00000000 },
+ { _MMIO(0x26104), 0x02aaaaaa },
+ { _MMIO(0x26184), 0x02aaaaaa },
+ { _MMIO(0x25420), 0x00000000 },
+ { _MMIO(0x25424), 0x00000000 },
+ { _MMIO(0x2541c), 0x00000000 },
+ { _MMIO(0x25428), 0x00000000 },
+};
+
+static const struct i915_oa_reg *
+get_compute_extended_mux_config(struct drm_i915_private *dev_priv,
+ int *len)
+{
+ *len = ARRAY_SIZE(mux_config_compute_extended);
+ return mux_config_compute_extended;
+}
+
+static const struct i915_oa_reg b_counter_config_memory_reads[] = {
+ { _MMIO(0x2724), 0xf0800000 },
+ { _MMIO(0x2720), 0x00000000 },
+ { _MMIO(0x2714), 0xf0800000 },
+ { _MMIO(0x2710), 0x00000000 },
+ { _MMIO(0x274c), 0x76543298 },
+ { _MMIO(0x2748), 0x98989898 },
+ { _MMIO(0x2744), 0x000000e4 },
+ { _MMIO(0x2740), 0x00000000 },
+ { _MMIO(0x275c), 0x98a98a98 },
+ { _MMIO(0x2758), 0x88888888 },
+ { _MMIO(0x2754), 0x000c5500 },
+ { _MMIO(0x2750), 0x00000000 },
+ { _MMIO(0x2770), 0x0007f81a },
+ { _MMIO(0x2774), 0x0000fc00 },
+ { _MMIO(0x2778), 0x0007f82a },
+ { _MMIO(0x277c), 0x0000fc00 },
+ { _MMIO(0x2780), 0x0007f872 },
+ { _MMIO(0x2784), 0x0000fc00 },
+ { _MMIO(0x2788), 0x0007f8ba },
+ { _MMIO(0x278c), 0x0000fc00 },
+ { _MMIO(0x2790), 0x0007f87a },
+ { _MMIO(0x2794), 0x0000fc00 },
+ { _MMIO(0x2798), 0x0007f8ea },
+ { _MMIO(0x279c), 0x0000fc00 },
+ { _MMIO(0x27a0), 0x0007f8e2 },
+ { _MMIO(0x27a4), 0x0000fc00 },
+ { _MMIO(0x27a8), 0x0007f8f2 },
+ { _MMIO(0x27ac), 0x0000fc00 },
+};
+
+static const struct i915_oa_reg mux_config_memory_reads[] = {
+ { _MMIO(0x253a4), 0x34300000 },
+ { _MMIO(0x25440), 0x2d800000 },
+ { _MMIO(0x25444), 0x00000008 },
+ { _MMIO(0x25128), 0x0e600000 },
+ { _MMIO(0x25380), 0x00000450 },
+ { _MMIO(0x25390), 0x00052c43 },
+ { _MMIO(0x25384), 0x00000000 },
+ { _MMIO(0x25400), 0x00006144 },
+ { _MMIO(0x25408), 0x0a418820 },
+ { _MMIO(0x2540c), 0x000820e6 },
+ { _MMIO(0x25404), 0xff500000 },
+ { _MMIO(0x25100), 0x000005d6 },
+ { _MMIO(0x2510c), 0x0ef00000 },
+ { _MMIO(0x25104), 0x00000000 },
+ { _MMIO(0x25420), 0x02108421 },
+ { _MMIO(0x25424), 0x00008421 },
+ { _MMIO(0x2541c), 0x00000000 },
+ { _MMIO(0x25428), 0x00000000 },
+};
+
+static const struct i915_oa_reg *
+get_memory_reads_mux_config(struct drm_i915_private *dev_priv,
+ int *len)
+{
+ *len = ARRAY_SIZE(mux_config_memory_reads);
+ return mux_config_memory_reads;
+}
+
+static const struct i915_oa_reg b_counter_config_memory_writes[] = {
+ { _MMIO(0x2724), 0xf0800000 },
+ { _MMIO(0x2720), 0x00000000 },
+ { _MMIO(0x2714), 0xf0800000 },
+ { _MMIO(0x2710), 0x00000000 },
+ { _MMIO(0x274c), 0x76543298 },
+ { _MMIO(0x2748), 0x98989898 },
+ { _MMIO(0x2744), 0x000000e4 },
+ { _MMIO(0x2740), 0x00000000 },
+ { _MMIO(0x275c), 0xbabababa },
+ { _MMIO(0x2758), 0x88888888 },
+ { _MMIO(0x2754), 0x000c5500 },
+ { _MMIO(0x2750), 0x00000000 },
+ { _MMIO(0x2770), 0x0007f81a },
+ { _MMIO(0x2774), 0x0000fc00 },
+ { _MMIO(0x2778), 0x0007f82a },
+ { _MMIO(0x277c), 0x0000fc00 },
+ { _MMIO(0x2780), 0x0007f822 },
+ { _MMIO(0x2784), 0x0000fc00 },
+ { _MMIO(0x2788), 0x0007f8ba },
+ { _MMIO(0x278c), 0x0000fc00 },
+ { _MMIO(0x2790), 0x0007f87a },
+ { _MMIO(0x2794), 0x0000fc00 },
+ { _MMIO(0x2798), 0x0007f8ea },
+ { _MMIO(0x279c), 0x0000fc00 },
+ { _MMIO(0x27a0), 0x0007f8e2 },
+ { _MMIO(0x27a4), 0x0000fc00 },
+ { _MMIO(0x27a8), 0x0007f8f2 },
+ { _MMIO(0x27ac), 0x0000fc00 },
+};
+
+static const struct i915_oa_reg mux_config_memory_writes[] = {
+ { _MMIO(0x253a4), 0x34300000 },
+ { _MMIO(0x25440), 0x01500000 },
+ { _MMIO(0x25444), 0x00000120 },
+ { _MMIO(0x25128), 0x0c200000 },
+ { _MMIO(0x25380), 0x00000450 },
+ { _MMIO(0x25390), 0x00052c43 },
+ { _MMIO(0x25384), 0x00000000 },
+ { _MMIO(0x25400), 0x00007184 },
+ { _MMIO(0x25408), 0x0a418820 },
+ { _MMIO(0x2540c), 0x000820e6 },
+ { _MMIO(0x25404), 0xff500000 },
+ { _MMIO(0x25100), 0x000005d6 },
+ { _MMIO(0x2510c), 0x1e700000 },
+ { _MMIO(0x25104), 0x00000000 },
+ { _MMIO(0x25420), 0x02108421 },
+ { _MMIO(0x25424), 0x00008421 },
+ { _MMIO(0x2541c), 0x00000000 },
+ { _MMIO(0x25428), 0x00000000 },
+};
+
+static const struct i915_oa_reg *
+get_memory_writes_mux_config(struct drm_i915_private *dev_priv,
+ int *len)
+{
+ *len = ARRAY_SIZE(mux_config_memory_writes);
+ return mux_config_memory_writes;
+}
+
+static const struct i915_oa_reg b_counter_config_sampler_balance[] = {
+ { _MMIO(0x2740), 0x00000000 },
+ { _MMIO(0x2744), 0x00800000 },
+ { _MMIO(0x2710), 0x00000000 },
+ { _MMIO(0x2714), 0x00800000 },
+ { _MMIO(0x2720), 0x00000000 },
+ { _MMIO(0x2724), 0x00800000 },
+};
+
+static const struct i915_oa_reg mux_config_sampler_balance[] = {
+ { _MMIO(0x2eb9c), 0x01906400 },
+ { _MMIO(0x2fb9c), 0x01906400 },
+ { _MMIO(0x253a4), 0x00000000 },
+ { _MMIO(0x26b9c), 0x01906400 },
+ { _MMIO(0x27b9c), 0x01906400 },
+ { _MMIO(0x27104), 0x00a00000 },
+ { _MMIO(0x27184), 0x00a50000 },
+ { _MMIO(0x2e804), 0x00500000 },
+ { _MMIO(0x2e984), 0x00500000 },
+ { _MMIO(0x2eb04), 0x00500000 },
+ { _MMIO(0x2eb80), 0x00000084 },
+ { _MMIO(0x2eb8c), 0x14200000 },
+ { _MMIO(0x2eb84), 0x00000000 },
+ { _MMIO(0x2f804), 0x00050000 },
+ { _MMIO(0x2f984), 0x00050000 },
+ { _MMIO(0x2fb04), 0x00050000 },
+ { _MMIO(0x2fb80), 0x00000084 },
+ { _MMIO(0x2fb8c), 0x00050800 },
+ { _MMIO(0x2fb84), 0x00000000 },
+ { _MMIO(0x25380), 0x00000010 },
+ { _MMIO(0x2538c), 0x000000c0 },
+ { _MMIO(0x25384), 0xaa550000 },
+ { _MMIO(0x25404), 0xffffc000 },
+ { _MMIO(0x26804), 0x50000000 },
+ { _MMIO(0x26984), 0x50000000 },
+ { _MMIO(0x26b04), 0x50000000 },
+ { _MMIO(0x26b80), 0x00000084 },
+ { _MMIO(0x26b90), 0x00050800 },
+ { _MMIO(0x26b84), 0x00000000 },
+ { _MMIO(0x27804), 0x05000000 },
+ { _MMIO(0x27984), 0x05000000 },
+ { _MMIO(0x27b04), 0x05000000 },
+ { _MMIO(0x27b80), 0x00000084 },
+ { _MMIO(0x27b90), 0x00000142 },
+ { _MMIO(0x27b84), 0x00000000 },
+ { _MMIO(0x26104), 0xa0000000 },
+ { _MMIO(0x26184), 0xa5000000 },
+ { _MMIO(0x25424), 0x00008620 },
+ { _MMIO(0x2541c), 0x00000000 },
+ { _MMIO(0x25428), 0x0004a54a },
+};
+
+static const struct i915_oa_reg *
+get_sampler_balance_mux_config(struct drm_i915_private *dev_priv,
+ int *len)
+{
+ *len = ARRAY_SIZE(mux_config_sampler_balance);
+ return mux_config_sampler_balance;
+}
+
+int i915_oa_select_metric_set_hsw(struct drm_i915_private *dev_priv)
+{
+ dev_priv->perf.oa.mux_regs = NULL;
+ dev_priv->perf.oa.mux_regs_len = 0;
+ dev_priv->perf.oa.b_counter_regs = NULL;
+ dev_priv->perf.oa.b_counter_regs_len = 0;
+
+ switch (dev_priv->perf.oa.metrics_set) {
+ case METRIC_SET_ID_RENDER_BASIC:
+ dev_priv->perf.oa.mux_regs =
+ get_render_basic_mux_config(dev_priv,
+ &dev_priv->perf.oa.mux_regs_len);
+ if (!dev_priv->perf.oa.mux_regs) {
+ DRM_DEBUG_DRIVER("No suitable MUX config for \"RENDER_BASIC\" metric set");
+
+ /* EINVAL because *_register_sysfs already checked this
+ * and so it wouldn't have been advertised so userspace and
+ * so shouldn't have been requested
+ */
+ return -EINVAL;
+ }
+
+ dev_priv->perf.oa.b_counter_regs =
+ b_counter_config_render_basic;
+ dev_priv->perf.oa.b_counter_regs_len =
+ ARRAY_SIZE(b_counter_config_render_basic);
+
+ return 0;
+ case METRIC_SET_ID_COMPUTE_BASIC:
+ dev_priv->perf.oa.mux_regs =
+ get_compute_basic_mux_config(dev_priv,
+ &dev_priv->perf.oa.mux_regs_len);
+ if (!dev_priv->perf.oa.mux_regs) {
+ DRM_DEBUG_DRIVER("No suitable MUX config for \"COMPUTE_BASIC\" metric set");
+
+ /* EINVAL because *_register_sysfs already checked this
+ * and so it wouldn't have been advertised so userspace and
+ * so shouldn't have been requested
+ */
+ return -EINVAL;
+ }
+
+ dev_priv->perf.oa.b_counter_regs =
+ b_counter_config_compute_basic;
+ dev_priv->perf.oa.b_counter_regs_len =
+ ARRAY_SIZE(b_counter_config_compute_basic);
+
+ return 0;
+ case METRIC_SET_ID_COMPUTE_EXTENDED:
+ dev_priv->perf.oa.mux_regs =
+ get_compute_extended_mux_config(dev_priv,
+ &dev_priv->perf.oa.mux_regs_len);
+ if (!dev_priv->perf.oa.mux_regs) {
+ DRM_DEBUG_DRIVER("No suitable MUX config for \"COMPUTE_EXTENDED\" metric set");
+
+ /* EINVAL because *_register_sysfs already checked this
+ * and so it wouldn't have been advertised so userspace and
+ * so shouldn't have been requested
+ */
+ return -EINVAL;
+ }
+
+ dev_priv->perf.oa.b_counter_regs =
+ b_counter_config_compute_extended;
+ dev_priv->perf.oa.b_counter_regs_len =
+ ARRAY_SIZE(b_counter_config_compute_extended);
+
+ return 0;
+ case METRIC_SET_ID_MEMORY_READS:
+ dev_priv->perf.oa.mux_regs =
+ get_memory_reads_mux_config(dev_priv,
+ &dev_priv->perf.oa.mux_regs_len);
+ if (!dev_priv->perf.oa.mux_regs) {
+ DRM_DEBUG_DRIVER("No suitable MUX config for \"MEMORY_READS\" metric set");
+
+ /* EINVAL because *_register_sysfs already checked this
+ * and so it wouldn't have been advertised so userspace and
+ * so shouldn't have been requested
+ */
+ return -EINVAL;
+ }
+
+ dev_priv->perf.oa.b_counter_regs =
+ b_counter_config_memory_reads;
+ dev_priv->perf.oa.b_counter_regs_len =
+ ARRAY_SIZE(b_counter_config_memory_reads);
+
+ return 0;
+ case METRIC_SET_ID_MEMORY_WRITES:
+ dev_priv->perf.oa.mux_regs =
+ get_memory_writes_mux_config(dev_priv,
+ &dev_priv->perf.oa.mux_regs_len);
+ if (!dev_priv->perf.oa.mux_regs) {
+ DRM_DEBUG_DRIVER("No suitable MUX config for \"MEMORY_WRITES\" metric set");
+
+ /* EINVAL because *_register_sysfs already checked this
+ * and so it wouldn't have been advertised so userspace and
+ * so shouldn't have been requested
+ */
+ return -EINVAL;
+ }
+
+ dev_priv->perf.oa.b_counter_regs =
+ b_counter_config_memory_writes;
+ dev_priv->perf.oa.b_counter_regs_len =
+ ARRAY_SIZE(b_counter_config_memory_writes);
+
+ return 0;
+ case METRIC_SET_ID_SAMPLER_BALANCE:
+ dev_priv->perf.oa.mux_regs =
+ get_sampler_balance_mux_config(dev_priv,
+ &dev_priv->perf.oa.mux_regs_len);
+ if (!dev_priv->perf.oa.mux_regs) {
+ DRM_DEBUG_DRIVER("No suitable MUX config for \"SAMPLER_BALANCE\" metric set");
+
+ /* EINVAL because *_register_sysfs already checked this
+ * and so it wouldn't have been advertised so userspace and
+ * so shouldn't have been requested
+ */
+ return -EINVAL;
+ }
+
+ dev_priv->perf.oa.b_counter_regs =
+ b_counter_config_sampler_balance;
+ dev_priv->perf.oa.b_counter_regs_len =
+ ARRAY_SIZE(b_counter_config_sampler_balance);
+
+ return 0;
+ default:
+ return -ENODEV;
+ }
+}
+
+static ssize_t
+show_render_basic_id(struct device *kdev, struct device_attribute *attr, char *buf)
+{
+ return sprintf(buf, "%d\n", METRIC_SET_ID_RENDER_BASIC);
+}
+
+static struct device_attribute dev_attr_render_basic_id = {
+ .attr = { .name = "id", .mode = 0444 },
+ .show = show_render_basic_id,
+ .store = NULL,
+};
+
+static struct attribute *attrs_render_basic[] = {
+ &dev_attr_render_basic_id.attr,
+ NULL,
+};
+
+static struct attribute_group group_render_basic = {
+ .name = "403d8832-1a27-4aa6-a64e-f5389ce7b212",
+ .attrs = attrs_render_basic,
+};
+
+static ssize_t
+show_compute_basic_id(struct device *kdev, struct device_attribute *attr, char *buf)
+{
+ return sprintf(buf, "%d\n", METRIC_SET_ID_COMPUTE_BASIC);
+}
+
+static struct device_attribute dev_attr_compute_basic_id = {
+ .attr = { .name = "id", .mode = 0444 },
+ .show = show_compute_basic_id,
+ .store = NULL,
+};
+
+static struct attribute *attrs_compute_basic[] = {
+ &dev_attr_compute_basic_id.attr,
+ NULL,
+};
+
+static struct attribute_group group_compute_basic = {
+ .name = "39ad14bc-2380-45c4-91eb-fbcb3aa7ae7b",
+ .attrs = attrs_compute_basic,
+};
+
+static ssize_t
+show_compute_extended_id(struct device *kdev, struct device_attribute *attr, char *buf)
+{
+ return sprintf(buf, "%d\n", METRIC_SET_ID_COMPUTE_EXTENDED);
+}
+
+static struct device_attribute dev_attr_compute_extended_id = {
+ .attr = { .name = "id", .mode = 0444 },
+ .show = show_compute_extended_id,
+ .store = NULL,
+};
+
+static struct attribute *attrs_compute_extended[] = {
+ &dev_attr_compute_extended_id.attr,
+ NULL,
+};
+
+static struct attribute_group group_compute_extended = {
+ .name = "3865be28-6982-49fe-9494-e4d1b4795413",
+ .attrs = attrs_compute_extended,
+};
+
+static ssize_t
+show_memory_reads_id(struct device *kdev, struct device_attribute *attr, char *buf)
+{
+ return sprintf(buf, "%d\n", METRIC_SET_ID_MEMORY_READS);
+}
+
+static struct device_attribute dev_attr_memory_reads_id = {
+ .attr = { .name = "id", .mode = 0444 },
+ .show = show_memory_reads_id,
+ .store = NULL,
+};
+
+static struct attribute *attrs_memory_reads[] = {
+ &dev_attr_memory_reads_id.attr,
+ NULL,
+};
+
+static struct attribute_group group_memory_reads = {
+ .name = "bb5ed49b-2497-4095-94f6-26ba294db88a",
+ .attrs = attrs_memory_reads,
+};
+
+static ssize_t
+show_memory_writes_id(struct device *kdev, struct device_attribute *attr, char *buf)
+{
+ return sprintf(buf, "%d\n", METRIC_SET_ID_MEMORY_WRITES);
+}
+
+static struct device_attribute dev_attr_memory_writes_id = {
+ .attr = { .name = "id", .mode = 0444 },
+ .show = show_memory_writes_id,
+ .store = NULL,
+};
+
+static struct attribute *attrs_memory_writes[] = {
+ &dev_attr_memory_writes_id.attr,
+ NULL,
+};
+
+static struct attribute_group group_memory_writes = {
+ .name = "3358d639-9b5f-45ab-976d-9b08cbfc6240",
+ .attrs = attrs_memory_writes,
+};
+
+static ssize_t
+show_sampler_balance_id(struct device *kdev, struct device_attribute *attr, char *buf)
+{
+ return sprintf(buf, "%d\n", METRIC_SET_ID_SAMPLER_BALANCE);
+}
+
+static struct device_attribute dev_attr_sampler_balance_id = {
+ .attr = { .name = "id", .mode = 0444 },
+ .show = show_sampler_balance_id,
+ .store = NULL,
+};
+
+static struct attribute *attrs_sampler_balance[] = {
+ &dev_attr_sampler_balance_id.attr,
+ NULL,
+};
+
+static struct attribute_group group_sampler_balance = {
+ .name = "bc274488-b4b6-40c7-90da-b77d7ad16189",
+ .attrs = attrs_sampler_balance,
+};
+
+int
+i915_perf_register_sysfs_hsw(struct drm_i915_private *dev_priv)
+{
+ int mux_len;
+ int ret = 0;
+
+ if (get_render_basic_mux_config(dev_priv, &mux_len)) {
+ ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_render_basic);
+ if (ret)
+ goto error_render_basic;
+ }
+ if (get_compute_basic_mux_config(dev_priv, &mux_len)) {
+ ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_compute_basic);
+ if (ret)
+ goto error_compute_basic;
+ }
+ if (get_compute_extended_mux_config(dev_priv, &mux_len)) {
+ ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_compute_extended);
+ if (ret)
+ goto error_compute_extended;
+ }
+ if (get_memory_reads_mux_config(dev_priv, &mux_len)) {
+ ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_memory_reads);
+ if (ret)
+ goto error_memory_reads;
+ }
+ if (get_memory_writes_mux_config(dev_priv, &mux_len)) {
+ ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_memory_writes);
+ if (ret)
+ goto error_memory_writes;
+ }
+ if (get_sampler_balance_mux_config(dev_priv, &mux_len)) {
+ ret = sysfs_create_group(dev_priv->perf.metrics_kobj, &group_sampler_balance);
+ if (ret)
+ goto error_sampler_balance;
+ }
+
+ return 0;
+
+error_sampler_balance:
+ if (get_sampler_balance_mux_config(dev_priv, &mux_len))
+ sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_memory_writes);
+error_memory_writes:
+ if (get_sampler_balance_mux_config(dev_priv, &mux_len))
+ sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_memory_reads);
+error_memory_reads:
+ if (get_sampler_balance_mux_config(dev_priv, &mux_len))
+ sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_compute_extended);
+error_compute_extended:
+ if (get_sampler_balance_mux_config(dev_priv, &mux_len))
+ sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_compute_basic);
+error_compute_basic:
+ if (get_sampler_balance_mux_config(dev_priv, &mux_len))
+ sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_render_basic);
+error_render_basic:
+ return ret;
+}
+
+void
+i915_perf_unregister_sysfs_hsw(struct drm_i915_private *dev_priv)
+{
+ int mux_len;
+
+ if (get_render_basic_mux_config(dev_priv, &mux_len))
+ sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_render_basic);
+ if (get_compute_basic_mux_config(dev_priv, &mux_len))
+ sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_compute_basic);
+ if (get_compute_extended_mux_config(dev_priv, &mux_len))
+ sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_compute_extended);
+ if (get_memory_reads_mux_config(dev_priv, &mux_len))
+ sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_memory_reads);
+ if (get_memory_writes_mux_config(dev_priv, &mux_len))
+ sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_memory_writes);
+ if (get_sampler_balance_mux_config(dev_priv, &mux_len))
+ sysfs_remove_group(dev_priv->perf.metrics_kobj, &group_sampler_balance);
+}
diff --git a/drivers/gpu/drm/i915/i915_oa_hsw.h b/drivers/gpu/drm/i915/i915_oa_hsw.h
new file mode 100644
index 000000000000..429a229b5158
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_oa_hsw.h
@@ -0,0 +1,38 @@
+/*
+ * Autogenerated file, DO NOT EDIT manually!
+ *
+ * Copyright (c) 2015 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __I915_OA_HSW_H__
+#define __I915_OA_HSW_H__
+
+extern int i915_oa_n_builtin_metric_sets_hsw;
+
+extern int i915_oa_select_metric_set_hsw(struct drm_i915_private *dev_priv);
+
+extern int i915_perf_register_sysfs_hsw(struct drm_i915_private *dev_priv);
+
+extern void i915_perf_unregister_sysfs_hsw(struct drm_i915_private *dev_priv);
+
+#endif
diff --git a/drivers/gpu/drm/i915/i915_params.c b/drivers/gpu/drm/i915/i915_params.c
index d46ffe7086bc..0e280fbd52f1 100644
--- a/drivers/gpu/drm/i915/i915_params.c
+++ b/drivers/gpu/drm/i915/i915_params.c
@@ -50,7 +50,7 @@ struct i915_params i915 __read_mostly = {
.error_capture = true,
.invert_brightness = 0,
.disable_display = 0,
- .enable_cmd_parser = 1,
+ .enable_cmd_parser = true,
.use_mmio_flip = 0,
.mmio_debug = 0,
.verbose_state_checks = 1,
@@ -188,9 +188,9 @@ MODULE_PARM_DESC(invert_brightness,
module_param_named(disable_display, i915.disable_display, bool, 0400);
MODULE_PARM_DESC(disable_display, "Disable display (default: false)");
-module_param_named_unsafe(enable_cmd_parser, i915.enable_cmd_parser, int, 0600);
+module_param_named_unsafe(enable_cmd_parser, i915.enable_cmd_parser, bool, 0400);
MODULE_PARM_DESC(enable_cmd_parser,
- "Enable command parsing (1=enabled [default], 0=disabled)");
+ "Enable command parsing (true=enabled [default], false=disabled)");
module_param_named_unsafe(use_mmio_flip, i915.use_mmio_flip, int, 0600);
MODULE_PARM_DESC(use_mmio_flip,
diff --git a/drivers/gpu/drm/i915/i915_params.h b/drivers/gpu/drm/i915/i915_params.h
index 817ad959941e..8e433de04679 100644
--- a/drivers/gpu/drm/i915/i915_params.h
+++ b/drivers/gpu/drm/i915/i915_params.h
@@ -44,7 +44,6 @@ struct i915_params {
int disable_power_well;
int enable_ips;
int invert_brightness;
- int enable_cmd_parser;
int enable_guc_loading;
int enable_guc_submission;
int guc_log_level;
@@ -53,6 +52,7 @@ struct i915_params {
int edp_vswing;
unsigned int inject_load_failure;
/* leave bools at the end to not create holes */
+ bool enable_cmd_parser;
bool enable_hangcheck;
bool fastboot;
bool prefault_disable;
diff --git a/drivers/gpu/drm/i915/i915_pci.c b/drivers/gpu/drm/i915/i915_pci.c
index fce8e198bc76..ecb487b5356f 100644
--- a/drivers/gpu/drm/i915/i915_pci.c
+++ b/drivers/gpu/drm/i915/i915_pci.c
@@ -54,6 +54,7 @@
#define CHV_COLORS \
.color = { .degamma_lut_size = 65, .gamma_lut_size = 257 }
+/* Keep in gen based order, and chronological order within a gen */
#define GEN2_FEATURES \
.gen = 2, .num_pipes = 1, \
.has_overlay = 1, .overlay_needs_physical = 1, \
@@ -65,17 +66,19 @@
static const struct intel_device_info intel_i830_info = {
GEN2_FEATURES,
+ .platform = INTEL_I830,
.is_mobile = 1, .cursor_needs_physical = 1,
.num_pipes = 2, /* legal, last one wins */
};
-static const struct intel_device_info intel_845g_info = {
+static const struct intel_device_info intel_i845g_info = {
GEN2_FEATURES,
+ .platform = INTEL_I845G,
};
static const struct intel_device_info intel_i85x_info = {
GEN2_FEATURES,
- .is_i85x = 1, .is_mobile = 1,
+ .platform = INTEL_I85X, .is_mobile = 1,
.num_pipes = 2, /* legal, last one wins */
.cursor_needs_physical = 1,
.has_fbc = 1,
@@ -83,6 +86,7 @@ static const struct intel_device_info intel_i85x_info = {
static const struct intel_device_info intel_i865g_info = {
GEN2_FEATURES,
+ .platform = INTEL_I865G,
};
#define GEN3_FEATURES \
@@ -94,12 +98,14 @@ static const struct intel_device_info intel_i865g_info = {
static const struct intel_device_info intel_i915g_info = {
GEN3_FEATURES,
- .is_i915g = 1, .cursor_needs_physical = 1,
+ .platform = INTEL_I915G, .cursor_needs_physical = 1,
.has_overlay = 1, .overlay_needs_physical = 1,
.hws_needs_physical = 1,
};
+
static const struct intel_device_info intel_i915gm_info = {
GEN3_FEATURES,
+ .platform = INTEL_I915GM,
.is_mobile = 1,
.cursor_needs_physical = 1,
.has_overlay = 1, .overlay_needs_physical = 1,
@@ -107,15 +113,18 @@ static const struct intel_device_info intel_i915gm_info = {
.has_fbc = 1,
.hws_needs_physical = 1,
};
+
static const struct intel_device_info intel_i945g_info = {
GEN3_FEATURES,
+ .platform = INTEL_I945G,
.has_hotplug = 1, .cursor_needs_physical = 1,
.has_overlay = 1, .overlay_needs_physical = 1,
.hws_needs_physical = 1,
};
+
static const struct intel_device_info intel_i945gm_info = {
GEN3_FEATURES,
- .is_i945gm = 1, .is_mobile = 1,
+ .platform = INTEL_I945GM, .is_mobile = 1,
.has_hotplug = 1, .cursor_needs_physical = 1,
.has_overlay = 1, .overlay_needs_physical = 1,
.supports_tv = 1,
@@ -123,6 +132,20 @@ static const struct intel_device_info intel_i945gm_info = {
.hws_needs_physical = 1,
};
+static const struct intel_device_info intel_g33_info = {
+ GEN3_FEATURES,
+ .platform = INTEL_G33,
+ .has_hotplug = 1,
+ .has_overlay = 1,
+};
+
+static const struct intel_device_info intel_pineview_info = {
+ GEN3_FEATURES,
+ .platform = INTEL_PINEVIEW, .is_mobile = 1,
+ .has_hotplug = 1,
+ .has_overlay = 1,
+};
+
#define GEN4_FEATURES \
.gen = 4, .num_pipes = 2, \
.has_hotplug = 1, \
@@ -133,50 +156,36 @@ static const struct intel_device_info intel_i945gm_info = {
static const struct intel_device_info intel_i965g_info = {
GEN4_FEATURES,
- .is_broadwater = 1,
+ .platform = INTEL_I965G,
.has_overlay = 1,
.hws_needs_physical = 1,
};
static const struct intel_device_info intel_i965gm_info = {
GEN4_FEATURES,
- .is_crestline = 1,
+ .platform = INTEL_I965GM,
.is_mobile = 1, .has_fbc = 1,
.has_overlay = 1,
.supports_tv = 1,
.hws_needs_physical = 1,
};
-static const struct intel_device_info intel_g33_info = {
- GEN3_FEATURES,
- .is_g33 = 1,
- .has_hotplug = 1,
- .has_overlay = 1,
-};
-
static const struct intel_device_info intel_g45_info = {
GEN4_FEATURES,
- .is_g4x = 1,
+ .platform = INTEL_G45,
.has_pipe_cxsr = 1,
.ring_mask = RENDER_RING | BSD_RING,
};
static const struct intel_device_info intel_gm45_info = {
GEN4_FEATURES,
- .is_g4x = 1,
+ .platform = INTEL_GM45,
.is_mobile = 1, .has_fbc = 1,
.has_pipe_cxsr = 1,
.supports_tv = 1,
.ring_mask = RENDER_RING | BSD_RING,
};
-static const struct intel_device_info intel_pineview_info = {
- GEN3_FEATURES,
- .is_g33 = 1, .is_pineview = 1, .is_mobile = 1,
- .has_hotplug = 1,
- .has_overlay = 1,
-};
-
#define GEN5_FEATURES \
.gen = 5, .num_pipes = 2, \
.has_hotplug = 1, \
@@ -187,10 +196,12 @@ static const struct intel_device_info intel_pineview_info = {
static const struct intel_device_info intel_ironlake_d_info = {
GEN5_FEATURES,
+ .platform = INTEL_IRONLAKE,
};
static const struct intel_device_info intel_ironlake_m_info = {
GEN5_FEATURES,
+ .platform = INTEL_IRONLAKE,
.is_mobile = 1,
};
@@ -204,15 +215,18 @@ static const struct intel_device_info intel_ironlake_m_info = {
.has_rc6p = 1, \
.has_gmbus_irq = 1, \
.has_hw_contexts = 1, \
+ .has_aliasing_ppgtt = 1, \
GEN_DEFAULT_PIPEOFFSETS, \
CURSOR_OFFSETS
static const struct intel_device_info intel_sandybridge_d_info = {
GEN6_FEATURES,
+ .platform = INTEL_SANDYBRIDGE,
};
static const struct intel_device_info intel_sandybridge_m_info = {
GEN6_FEATURES,
+ .platform = INTEL_SANDYBRIDGE,
.is_mobile = 1,
};
@@ -226,46 +240,49 @@ static const struct intel_device_info intel_sandybridge_m_info = {
.has_rc6p = 1, \
.has_gmbus_irq = 1, \
.has_hw_contexts = 1, \
+ .has_aliasing_ppgtt = 1, \
+ .has_full_ppgtt = 1, \
GEN_DEFAULT_PIPEOFFSETS, \
IVB_CURSOR_OFFSETS
static const struct intel_device_info intel_ivybridge_d_info = {
GEN7_FEATURES,
- .is_ivybridge = 1,
+ .platform = INTEL_IVYBRIDGE,
.has_l3_dpf = 1,
};
static const struct intel_device_info intel_ivybridge_m_info = {
GEN7_FEATURES,
- .is_ivybridge = 1,
+ .platform = INTEL_IVYBRIDGE,
.is_mobile = 1,
.has_l3_dpf = 1,
};
static const struct intel_device_info intel_ivybridge_q_info = {
GEN7_FEATURES,
- .is_ivybridge = 1,
+ .platform = INTEL_IVYBRIDGE,
.num_pipes = 0, /* legal, last one wins */
.has_l3_dpf = 1,
};
-#define VLV_FEATURES \
- .gen = 7, .num_pipes = 2, \
- .has_psr = 1, \
- .has_runtime_pm = 1, \
- .has_rc6 = 1, \
- .has_gmbus_irq = 1, \
- .has_hw_contexts = 1, \
- .has_gmch_display = 1, \
- .has_hotplug = 1, \
- .ring_mask = RENDER_RING | BSD_RING | BLT_RING, \
- .display_mmio_offset = VLV_DISPLAY_BASE, \
- GEN_DEFAULT_PIPEOFFSETS, \
- CURSOR_OFFSETS
-
static const struct intel_device_info intel_valleyview_info = {
- VLV_FEATURES,
- .is_valleyview = 1,
+ .platform = INTEL_VALLEYVIEW,
+ .gen = 7,
+ .is_lp = 1,
+ .num_pipes = 2,
+ .has_psr = 1,
+ .has_runtime_pm = 1,
+ .has_rc6 = 1,
+ .has_gmbus_irq = 1,
+ .has_hw_contexts = 1,
+ .has_gmch_display = 1,
+ .has_hotplug = 1,
+ .has_aliasing_ppgtt = 1,
+ .has_full_ppgtt = 1,
+ .ring_mask = RENDER_RING | BSD_RING | BLT_RING,
+ .display_mmio_offset = VLV_DISPLAY_BASE,
+ GEN_DEFAULT_PIPEOFFSETS,
+ CURSOR_OFFSETS
};
#define HSW_FEATURES \
@@ -281,7 +298,7 @@ static const struct intel_device_info intel_valleyview_info = {
static const struct intel_device_info intel_haswell_info = {
HSW_FEATURES,
- .is_haswell = 1,
+ .platform = INTEL_HASWELL,
.has_l3_dpf = 1,
};
@@ -289,26 +306,28 @@ static const struct intel_device_info intel_haswell_info = {
HSW_FEATURES, \
BDW_COLORS, \
.has_logical_ring_contexts = 1, \
+ .has_full_48bit_ppgtt = 1, \
.has_64bit_reloc = 1
static const struct intel_device_info intel_broadwell_info = {
BDW_FEATURES,
.gen = 8,
- .is_broadwell = 1,
+ .platform = INTEL_BROADWELL,
};
static const struct intel_device_info intel_broadwell_gt3_info = {
BDW_FEATURES,
.gen = 8,
- .is_broadwell = 1,
+ .platform = INTEL_BROADWELL,
.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING,
};
static const struct intel_device_info intel_cherryview_info = {
.gen = 8, .num_pipes = 3,
.has_hotplug = 1,
+ .is_lp = 1,
.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
- .is_cherryview = 1,
+ .platform = INTEL_CHERRYVIEW,
.has_64bit_reloc = 1,
.has_psr = 1,
.has_runtime_pm = 1,
@@ -318,6 +337,8 @@ static const struct intel_device_info intel_cherryview_info = {
.has_hw_contexts = 1,
.has_logical_ring_contexts = 1,
.has_gmch_display = 1,
+ .has_aliasing_ppgtt = 1,
+ .has_full_ppgtt = 1,
.display_mmio_offset = VLV_DISPLAY_BASE,
GEN_CHV_PIPEOFFSETS,
CURSOR_OFFSETS,
@@ -326,7 +347,7 @@ static const struct intel_device_info intel_cherryview_info = {
static const struct intel_device_info intel_skylake_info = {
BDW_FEATURES,
- .is_skylake = 1,
+ .platform = INTEL_SKYLAKE,
.gen = 9,
.has_csr = 1,
.has_guc = 1,
@@ -335,7 +356,7 @@ static const struct intel_device_info intel_skylake_info = {
static const struct intel_device_info intel_skylake_gt3_info = {
BDW_FEATURES,
- .is_skylake = 1,
+ .platform = INTEL_SKYLAKE,
.gen = 9,
.has_csr = 1,
.has_guc = 1,
@@ -343,36 +364,50 @@ static const struct intel_device_info intel_skylake_gt3_info = {
.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING,
};
+#define GEN9_LP_FEATURES \
+ .gen = 9, \
+ .is_lp = 1, \
+ .has_hotplug = 1, \
+ .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING, \
+ .num_pipes = 3, \
+ .has_64bit_reloc = 1, \
+ .has_ddi = 1, \
+ .has_fpga_dbg = 1, \
+ .has_fbc = 1, \
+ .has_runtime_pm = 1, \
+ .has_pooled_eu = 0, \
+ .has_csr = 1, \
+ .has_resource_streamer = 1, \
+ .has_rc6 = 1, \
+ .has_dp_mst = 1, \
+ .has_gmbus_irq = 1, \
+ .has_hw_contexts = 1, \
+ .has_logical_ring_contexts = 1, \
+ .has_guc = 1, \
+ .has_decoupled_mmio = 1, \
+ .has_aliasing_ppgtt = 1, \
+ .has_full_ppgtt = 1, \
+ .has_full_48bit_ppgtt = 1, \
+ GEN_DEFAULT_PIPEOFFSETS, \
+ IVB_CURSOR_OFFSETS, \
+ BDW_COLORS
+
static const struct intel_device_info intel_broxton_info = {
- .is_broxton = 1,
- .gen = 9,
- .has_hotplug = 1,
- .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
- .num_pipes = 3,
- .has_64bit_reloc = 1,
- .has_ddi = 1,
- .has_fpga_dbg = 1,
- .has_fbc = 1,
- .has_runtime_pm = 1,
- .has_pooled_eu = 0,
- .has_csr = 1,
- .has_resource_streamer = 1,
- .has_rc6 = 1,
- .has_dp_mst = 1,
- .has_gmbus_irq = 1,
- .has_hw_contexts = 1,
- .has_logical_ring_contexts = 1,
- .has_guc = 1,
- .has_decoupled_mmio = 1,
+ GEN9_LP_FEATURES,
+ .platform = INTEL_BROXTON,
.ddb_size = 512,
- GEN_DEFAULT_PIPEOFFSETS,
- IVB_CURSOR_OFFSETS,
- BDW_COLORS,
+};
+
+static const struct intel_device_info intel_geminilake_info = {
+ GEN9_LP_FEATURES,
+ .platform = INTEL_GEMINILAKE,
+ .is_alpha_support = 1,
+ .ddb_size = 1024,
};
static const struct intel_device_info intel_kabylake_info = {
BDW_FEATURES,
- .is_kabylake = 1,
+ .platform = INTEL_KABYLAKE,
.gen = 9,
.has_csr = 1,
.has_guc = 1,
@@ -381,7 +416,7 @@ static const struct intel_device_info intel_kabylake_info = {
static const struct intel_device_info intel_kabylake_gt3_info = {
BDW_FEATURES,
- .is_kabylake = 1,
+ .platform = INTEL_KABYLAKE,
.gen = 9,
.has_csr = 1,
.has_guc = 1,
@@ -397,7 +432,7 @@ static const struct intel_device_info intel_kabylake_gt3_info = {
*/
static const struct pci_device_id pciidlist[] = {
INTEL_I830_IDS(&intel_i830_info),
- INTEL_I845G_IDS(&intel_845g_info),
+ INTEL_I845G_IDS(&intel_i845g_info),
INTEL_I85X_IDS(&intel_i85x_info),
INTEL_I865G_IDS(&intel_i865g_info),
INTEL_I915G_IDS(&intel_i915g_info),
@@ -421,12 +456,14 @@ static const struct pci_device_id pciidlist[] = {
INTEL_VLV_IDS(&intel_valleyview_info),
INTEL_BDW_GT12_IDS(&intel_broadwell_info),
INTEL_BDW_GT3_IDS(&intel_broadwell_gt3_info),
+ INTEL_BDW_RSVD_IDS(&intel_broadwell_info),
INTEL_CHV_IDS(&intel_cherryview_info),
INTEL_SKL_GT1_IDS(&intel_skylake_info),
INTEL_SKL_GT2_IDS(&intel_skylake_info),
INTEL_SKL_GT3_IDS(&intel_skylake_gt3_info),
INTEL_SKL_GT4_IDS(&intel_skylake_gt3_info),
INTEL_BXT_IDS(&intel_broxton_info),
+ INTEL_GLK_IDS(&intel_geminilake_info),
INTEL_KBL_GT1_IDS(&intel_kabylake_info),
INTEL_KBL_GT2_IDS(&intel_kabylake_info),
INTEL_KBL_GT3_IDS(&intel_kabylake_gt3_info),
diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c
new file mode 100644
index 000000000000..a1b7eec58be2
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_perf.c
@@ -0,0 +1,2096 @@
+/*
+ * Copyright © 2015-2016 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ * Robert Bragg <robert@sixbynine.org>
+ */
+
+
+/**
+ * DOC: i915 Perf Overview
+ *
+ * Gen graphics supports a large number of performance counters that can help
+ * driver and application developers understand and optimize their use of the
+ * GPU.
+ *
+ * This i915 perf interface enables userspace to configure and open a file
+ * descriptor representing a stream of GPU metrics which can then be read() as
+ * a stream of sample records.
+ *
+ * The interface is particularly suited to exposing buffered metrics that are
+ * captured by DMA from the GPU, unsynchronized with and unrelated to the CPU.
+ *
+ * Streams representing a single context are accessible to applications with a
+ * corresponding drm file descriptor, such that OpenGL can use the interface
+ * without special privileges. Access to system-wide metrics requires root
+ * privileges by default, unless changed via the dev.i915.perf_event_paranoid
+ * sysctl option.
+ *
+ */
+
+/**
+ * DOC: i915 Perf History and Comparison with Core Perf
+ *
+ * The interface was initially inspired by the core Perf infrastructure but
+ * some notable differences are:
+ *
+ * i915 perf file descriptors represent a "stream" instead of an "event"; where
+ * a perf event primarily corresponds to a single 64bit value, while a stream
+ * might sample sets of tightly-coupled counters, depending on the
+ * configuration. For example the Gen OA unit isn't designed to support
+ * orthogonal configurations of individual counters; it's configured for a set
+ * of related counters. Samples for an i915 perf stream capturing OA metrics
+ * will include a set of counter values packed in a compact HW specific format.
+ * The OA unit supports a number of different packing formats which can be
+ * selected by the user opening the stream. Perf has support for grouping
+ * events, but each event in the group is configured, validated and
+ * authenticated individually with separate system calls.
+ *
+ * i915 perf stream configurations are provided as an array of u64 (key,value)
+ * pairs, instead of a fixed struct with multiple miscellaneous config members,
+ * interleaved with event-type specific members.
+ *
+ * i915 perf doesn't support exposing metrics via an mmap'd circular buffer.
+ * The supported metrics are being written to memory by the GPU unsynchronized
+ * with the CPU, using HW specific packing formats for counter sets. Sometimes
+ * the constraints on HW configuration require reports to be filtered before it
+ * would be acceptable to expose them to unprivileged applications - to hide
+ * the metrics of other processes/contexts. For these use cases a read() based
+ * interface is a good fit, and provides an opportunity to filter data as it
+ * gets copied from the GPU mapped buffers to userspace buffers.
+ *
+ *
+ * Issues hit with first prototype based on Core Perf
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ * The first prototype of this driver was based on the core perf
+ * infrastructure, and while we did make that mostly work, with some changes to
+ * perf, we found we were breaking or working around too many assumptions baked
+ * into perf's currently cpu centric design.
+ *
+ * In the end we didn't see a clear benefit to making perf's implementation and
+ * interface more complex by changing design assumptions while we knew we still
+ * wouldn't be able to use any existing perf based userspace tools.
+ *
+ * Also considering the Gen specific nature of the Observability hardware and
+ * how userspace will sometimes need to combine i915 perf OA metrics with
+ * side-band OA data captured via MI_REPORT_PERF_COUNT commands; we're
+ * expecting the interface to be used by a platform specific userspace such as
+ * OpenGL or tools. This is to say; we aren't inherently missing out on having
+ * a standard vendor/architecture agnostic interface by not using perf.
+ *
+ *
+ * For posterity, in case we might re-visit trying to adapt core perf to be
+ * better suited to exposing i915 metrics these were the main pain points we
+ * hit:
+ *
+ * - The perf based OA PMU driver broke some significant design assumptions:
+ *
+ * Existing perf pmus are used for profiling work on a cpu and we were
+ * introducing the idea of _IS_DEVICE pmus with different security
+ * implications, the need to fake cpu-related data (such as user/kernel
+ * registers) to fit with perf's current design, and adding _DEVICE records
+ * as a way to forward device-specific status records.
+ *
+ * The OA unit writes reports of counters into a circular buffer, without
+ * involvement from the CPU, making our PMU driver the first of a kind.
+ *
+ * Given the way we were periodically forward data from the GPU-mapped, OA
+ * buffer to perf's buffer, those bursts of sample writes looked to perf like
+ * we were sampling too fast and so we had to subvert its throttling checks.
+ *
+ * Perf supports groups of counters and allows those to be read via
+ * transactions internally but transactions currently seem designed to be
+ * explicitly initiated from the cpu (say in response to a userspace read())
+ * and while we could pull a report out of the OA buffer we can't
+ * trigger a report from the cpu on demand.
+ *
+ * Related to being report based; the OA counters are configured in HW as a
+ * set while perf generally expects counter configurations to be orthogonal.
+ * Although counters can be associated with a group leader as they are
+ * opened, there's no clear precedent for being able to provide group-wide
+ * configuration attributes (for example we want to let userspace choose the
+ * OA unit report format used to capture all counters in a set, or specify a
+ * GPU context to filter metrics on). We avoided using perf's grouping
+ * feature and forwarded OA reports to userspace via perf's 'raw' sample
+ * field. This suited our userspace well considering how coupled the counters
+ * are when dealing with normalizing. It would be inconvenient to split
+ * counters up into separate events, only to require userspace to recombine
+ * them. For Mesa it's also convenient to be forwarded raw, periodic reports
+ * for combining with the side-band raw reports it captures using
+ * MI_REPORT_PERF_COUNT commands.
+ *
+ * - As a side note on perf's grouping feature; there was also some concern
+ * that using PERF_FORMAT_GROUP as a way to pack together counter values
+ * would quite drastically inflate our sample sizes, which would likely
+ * lower the effective sampling resolutions we could use when the available
+ * memory bandwidth is limited.
+ *
+ * With the OA unit's report formats, counters are packed together as 32
+ * or 40bit values, with the largest report size being 256 bytes.
+ *
+ * PERF_FORMAT_GROUP values are 64bit, but there doesn't appear to be a
+ * documented ordering to the values, implying PERF_FORMAT_ID must also be
+ * used to add a 64bit ID before each value; giving 16 bytes per counter.
+ *
+ * Related to counter orthogonality; we can't time share the OA unit, while
+ * event scheduling is a central design idea within perf for allowing
+ * userspace to open + enable more events than can be configured in HW at any
+ * one time. The OA unit is not designed to allow re-configuration while in
+ * use. We can't reconfigure the OA unit without losing internal OA unit
+ * state which we can't access explicitly to save and restore. Reconfiguring
+ * the OA unit is also relatively slow, involving ~100 register writes. From
+ * userspace Mesa also depends on a stable OA configuration when emitting
+ * MI_REPORT_PERF_COUNT commands and importantly the OA unit can't be
+ * disabled while there are outstanding MI_RPC commands lest we hang the
+ * command streamer.
+ *
+ * The contents of sample records aren't extensible by device drivers (i.e.
+ * the sample_type bits). As an example; Sourab Gupta had been looking to
+ * attach GPU timestamps to our OA samples. We were shoehorning OA reports
+ * into sample records by using the 'raw' field, but it's tricky to pack more
+ * than one thing into this field because events/core.c currently only lets a
+ * pmu give a single raw data pointer plus len which will be copied into the
+ * ring buffer. To include more than the OA report we'd have to copy the
+ * report into an intermediate larger buffer. I'd been considering allowing a
+ * vector of data+len values to be specified for copying the raw data, but
+ * it felt like a kludge to being using the raw field for this purpose.
+ *
+ * - It felt like our perf based PMU was making some technical compromises
+ * just for the sake of using perf:
+ *
+ * perf_event_open() requires events to either relate to a pid or a specific
+ * cpu core, while our device pmu related to neither. Events opened with a
+ * pid will be automatically enabled/disabled according to the scheduling of
+ * that process - so not appropriate for us. When an event is related to a
+ * cpu id, perf ensures pmu methods will be invoked via an inter process
+ * interrupt on that core. To avoid invasive changes our userspace opened OA
+ * perf events for a specific cpu. This was workable but it meant the
+ * majority of the OA driver ran in atomic context, including all OA report
+ * forwarding, which wasn't really necessary in our case and seems to make
+ * our locking requirements somewhat complex as we handled the interaction
+ * with the rest of the i915 driver.
+ */
+
+#include <linux/anon_inodes.h>
+#include <linux/sizes.h>
+
+#include "i915_drv.h"
+#include "i915_oa_hsw.h"
+
+/* HW requires this to be a power of two, between 128k and 16M, though driver
+ * is currently generally designed assuming the largest 16M size is used such
+ * that the overflow cases are unlikely in normal operation.
+ */
+#define OA_BUFFER_SIZE SZ_16M
+
+#define OA_TAKEN(tail, head) ((tail - head) & (OA_BUFFER_SIZE - 1))
+
+/* There's a HW race condition between OA unit tail pointer register updates and
+ * writes to memory whereby the tail pointer can sometimes get ahead of what's
+ * been written out to the OA buffer so far.
+ *
+ * Although this can be observed explicitly by checking for a zeroed report-id
+ * field in tail reports, it seems preferable to account for this earlier e.g.
+ * as part of the _oa_buffer_is_empty checks to minimize -EAGAIN polling cycles
+ * in this situation.
+ *
+ * To give time for the most recent reports to land before they may be copied to
+ * userspace, the driver operates as if the tail pointer effectively lags behind
+ * the HW tail pointer by 'tail_margin' bytes. The margin in bytes is calculated
+ * based on this constant in nanoseconds, the current OA sampling exponent
+ * and current report size.
+ *
+ * There is also a fallback check while reading to simply skip over reports with
+ * a zeroed report-id.
+ */
+#define OA_TAIL_MARGIN_NSEC 100000ULL
+
+/* frequency for checking whether the OA unit has written new reports to the
+ * circular OA buffer...
+ */
+#define POLL_FREQUENCY 200
+#define POLL_PERIOD (NSEC_PER_SEC / POLL_FREQUENCY)
+
+/* for sysctl proc_dointvec_minmax of dev.i915.perf_stream_paranoid */
+static int zero;
+static int one = 1;
+static u32 i915_perf_stream_paranoid = true;
+
+/* The maximum exponent the hardware accepts is 63 (essentially it selects one
+ * of the 64bit timestamp bits to trigger reports from) but there's currently
+ * no known use case for sampling as infrequently as once per 47 thousand years.
+ *
+ * Since the timestamps included in OA reports are only 32bits it seems
+ * reasonable to limit the OA exponent where it's still possible to account for
+ * overflow in OA report timestamps.
+ */
+#define OA_EXPONENT_MAX 31
+
+#define INVALID_CTX_ID 0xffffffff
+
+
+/* For sysctl proc_dointvec_minmax of i915_oa_max_sample_rate
+ *
+ * 160ns is the smallest sampling period we can theoretically program the OA
+ * unit with on Haswell, corresponding to 6.25MHz.
+ */
+static int oa_sample_rate_hard_limit = 6250000;
+
+/* Theoretically we can program the OA unit to sample every 160ns but don't
+ * allow that by default unless root...
+ *
+ * The default threshold of 100000Hz is based on perf's similar
+ * kernel.perf_event_max_sample_rate sysctl parameter.
+ */
+static u32 i915_oa_max_sample_rate = 100000;
+
+/* XXX: beware if future OA HW adds new report formats that the current
+ * code assumes all reports have a power-of-two size and ~(size - 1) can
+ * be used as a mask to align the OA tail pointer.
+ */
+static struct i915_oa_format hsw_oa_formats[I915_OA_FORMAT_MAX] = {
+ [I915_OA_FORMAT_A13] = { 0, 64 },
+ [I915_OA_FORMAT_A29] = { 1, 128 },
+ [I915_OA_FORMAT_A13_B8_C8] = { 2, 128 },
+ /* A29_B8_C8 Disallowed as 192 bytes doesn't factor into buffer size */
+ [I915_OA_FORMAT_B4_C8] = { 4, 64 },
+ [I915_OA_FORMAT_A45_B8_C8] = { 5, 256 },
+ [I915_OA_FORMAT_B4_C8_A16] = { 6, 128 },
+ [I915_OA_FORMAT_C4_B8] = { 7, 64 },
+};
+
+#define SAMPLE_OA_REPORT (1<<0)
+
+/**
+ * struct perf_open_properties - for validated properties given to open a stream
+ * @sample_flags: `DRM_I915_PERF_PROP_SAMPLE_*` properties are tracked as flags
+ * @single_context: Whether a single or all gpu contexts should be monitored
+ * @ctx_handle: A gem ctx handle for use with @single_context
+ * @metrics_set: An ID for an OA unit metric set advertised via sysfs
+ * @oa_format: An OA unit HW report format
+ * @oa_periodic: Whether to enable periodic OA unit sampling
+ * @oa_period_exponent: The OA unit sampling period is derived from this
+ *
+ * As read_properties_unlocked() enumerates and validates the properties given
+ * to open a stream of metrics the configuration is built up in the structure
+ * which starts out zero initialized.
+ */
+struct perf_open_properties {
+ u32 sample_flags;
+
+ u64 single_context:1;
+ u64 ctx_handle;
+
+ /* OA sampling state */
+ int metrics_set;
+ int oa_format;
+ bool oa_periodic;
+ int oa_period_exponent;
+};
+
+/* NB: This is either called via fops or the poll check hrtimer (atomic ctx)
+ *
+ * It's safe to read OA config state here unlocked, assuming that this is only
+ * called while the stream is enabled, while the global OA configuration can't
+ * be modified.
+ *
+ * Note: we don't lock around the head/tail reads even though there's the slim
+ * possibility of read() fop errors forcing a re-init of the OA buffer
+ * pointers. A race here could result in a false positive !empty status which
+ * is acceptable.
+ */
+static bool gen7_oa_buffer_is_empty_fop_unlocked(struct drm_i915_private *dev_priv)
+{
+ int report_size = dev_priv->perf.oa.oa_buffer.format_size;
+ u32 oastatus2 = I915_READ(GEN7_OASTATUS2);
+ u32 oastatus1 = I915_READ(GEN7_OASTATUS1);
+ u32 head = oastatus2 & GEN7_OASTATUS2_HEAD_MASK;
+ u32 tail = oastatus1 & GEN7_OASTATUS1_TAIL_MASK;
+
+ return OA_TAKEN(tail, head) <
+ dev_priv->perf.oa.tail_margin + report_size;
+}
+
+/**
+ * append_oa_status - Appends a status record to a userspace read() buffer.
+ * @stream: An i915-perf stream opened for OA metrics
+ * @buf: destination buffer given by userspace
+ * @count: the number of bytes userspace wants to read
+ * @offset: (inout): the current position for writing into @buf
+ * @type: The kind of status to report to userspace
+ *
+ * Writes a status record (such as `DRM_I915_PERF_RECORD_OA_REPORT_LOST`)
+ * into the userspace read() buffer.
+ *
+ * The @buf @offset will only be updated on success.
+ *
+ * Returns: 0 on success, negative error code on failure.
+ */
+static int append_oa_status(struct i915_perf_stream *stream,
+ char __user *buf,
+ size_t count,
+ size_t *offset,
+ enum drm_i915_perf_record_type type)
+{
+ struct drm_i915_perf_record_header header = { type, 0, sizeof(header) };
+
+ if ((count - *offset) < header.size)
+ return -ENOSPC;
+
+ if (copy_to_user(buf + *offset, &header, sizeof(header)))
+ return -EFAULT;
+
+ (*offset) += header.size;
+
+ return 0;
+}
+
+/**
+ * append_oa_sample - Copies single OA report into userspace read() buffer.
+ * @stream: An i915-perf stream opened for OA metrics
+ * @buf: destination buffer given by userspace
+ * @count: the number of bytes userspace wants to read
+ * @offset: (inout): the current position for writing into @buf
+ * @report: A single OA report to (optionally) include as part of the sample
+ *
+ * The contents of a sample are configured through `DRM_I915_PERF_PROP_SAMPLE_*`
+ * properties when opening a stream, tracked as `stream->sample_flags`. This
+ * function copies the requested components of a single sample to the given
+ * read() @buf.
+ *
+ * The @buf @offset will only be updated on success.
+ *
+ * Returns: 0 on success, negative error code on failure.
+ */
+static int append_oa_sample(struct i915_perf_stream *stream,
+ char __user *buf,
+ size_t count,
+ size_t *offset,
+ const u8 *report)
+{
+ struct drm_i915_private *dev_priv = stream->dev_priv;
+ int report_size = dev_priv->perf.oa.oa_buffer.format_size;
+ struct drm_i915_perf_record_header header;
+ u32 sample_flags = stream->sample_flags;
+
+ header.type = DRM_I915_PERF_RECORD_SAMPLE;
+ header.pad = 0;
+ header.size = stream->sample_size;
+
+ if ((count - *offset) < header.size)
+ return -ENOSPC;
+
+ buf += *offset;
+ if (copy_to_user(buf, &header, sizeof(header)))
+ return -EFAULT;
+ buf += sizeof(header);
+
+ if (sample_flags & SAMPLE_OA_REPORT) {
+ if (copy_to_user(buf, report, report_size))
+ return -EFAULT;
+ }
+
+ (*offset) += header.size;
+
+ return 0;
+}
+
+/**
+ * Copies all buffered OA reports into userspace read() buffer.
+ * @stream: An i915-perf stream opened for OA metrics
+ * @buf: destination buffer given by userspace
+ * @count: the number of bytes userspace wants to read
+ * @offset: (inout): the current position for writing into @buf
+ * @head_ptr: (inout): the current oa buffer cpu read position
+ * @tail: the current oa buffer gpu write position
+ *
+ * Notably any error condition resulting in a short read (-%ENOSPC or
+ * -%EFAULT) will be returned even though one or more records may
+ * have been successfully copied. In this case it's up to the caller
+ * to decide if the error should be squashed before returning to
+ * userspace.
+ *
+ * Note: reports are consumed from the head, and appended to the
+ * tail, so the head chases the tail?... If you think that's mad
+ * and back-to-front you're not alone, but this follows the
+ * Gen PRM naming convention.
+ *
+ * Returns: 0 on success, negative error code on failure.
+ */
+static int gen7_append_oa_reports(struct i915_perf_stream *stream,
+ char __user *buf,
+ size_t count,
+ size_t *offset,
+ u32 *head_ptr,
+ u32 tail)
+{
+ struct drm_i915_private *dev_priv = stream->dev_priv;
+ int report_size = dev_priv->perf.oa.oa_buffer.format_size;
+ u8 *oa_buf_base = dev_priv->perf.oa.oa_buffer.vaddr;
+ int tail_margin = dev_priv->perf.oa.tail_margin;
+ u32 gtt_offset = i915_ggtt_offset(dev_priv->perf.oa.oa_buffer.vma);
+ u32 mask = (OA_BUFFER_SIZE - 1);
+ u32 head;
+ u32 taken;
+ int ret = 0;
+
+ if (WARN_ON(!stream->enabled))
+ return -EIO;
+
+ head = *head_ptr - gtt_offset;
+ tail -= gtt_offset;
+
+ /* The OA unit is expected to wrap the tail pointer according to the OA
+ * buffer size and since we should never write a misaligned head
+ * pointer we don't expect to read one back either...
+ */
+ if (tail > OA_BUFFER_SIZE || head > OA_BUFFER_SIZE ||
+ head % report_size) {
+ DRM_ERROR("Inconsistent OA buffer pointer (head = %u, tail = %u): force restart\n",
+ head, tail);
+ dev_priv->perf.oa.ops.oa_disable(dev_priv);
+ dev_priv->perf.oa.ops.oa_enable(dev_priv);
+ *head_ptr = I915_READ(GEN7_OASTATUS2) &
+ GEN7_OASTATUS2_HEAD_MASK;
+ return -EIO;
+ }
+
+
+ /* The tail pointer increases in 64 byte increments, not in report_size
+ * steps...
+ */
+ tail &= ~(report_size - 1);
+
+ /* Move the tail pointer back by the current tail_margin to account for
+ * the possibility that the latest reports may not have really landed
+ * in memory yet...
+ */
+
+ if (OA_TAKEN(tail, head) < report_size + tail_margin)
+ return -EAGAIN;
+
+ tail -= tail_margin;
+ tail &= mask;
+
+ for (/* none */;
+ (taken = OA_TAKEN(tail, head));
+ head = (head + report_size) & mask) {
+ u8 *report = oa_buf_base + head;
+ u32 *report32 = (void *)report;
+
+ /* All the report sizes factor neatly into the buffer
+ * size so we never expect to see a report split
+ * between the beginning and end of the buffer.
+ *
+ * Given the initial alignment check a misalignment
+ * here would imply a driver bug that would result
+ * in an overrun.
+ */
+ if (WARN_ON((OA_BUFFER_SIZE - head) < report_size)) {
+ DRM_ERROR("Spurious OA head ptr: non-integral report offset\n");
+ break;
+ }
+
+ /* The report-ID field for periodic samples includes
+ * some undocumented flags related to what triggered
+ * the report and is never expected to be zero so we
+ * can check that the report isn't invalid before
+ * copying it to userspace...
+ */
+ if (report32[0] == 0) {
+ DRM_NOTE("Skipping spurious, invalid OA report\n");
+ continue;
+ }
+
+ ret = append_oa_sample(stream, buf, count, offset, report);
+ if (ret)
+ break;
+
+ /* The above report-id field sanity check is based on
+ * the assumption that the OA buffer is initially
+ * zeroed and we reset the field after copying so the
+ * check is still meaningful once old reports start
+ * being overwritten.
+ */
+ report32[0] = 0;
+ }
+
+ *head_ptr = gtt_offset + head;
+
+ return ret;
+}
+
+/**
+ * gen7_oa_read - copy status records then buffered OA reports
+ * @stream: An i915-perf stream opened for OA metrics
+ * @buf: destination buffer given by userspace
+ * @count: the number of bytes userspace wants to read
+ * @offset: (inout): the current position for writing into @buf
+ *
+ * Checks Gen 7 specific OA unit status registers and if necessary appends
+ * corresponding status records for userspace (such as for a buffer full
+ * condition) and then initiate appending any buffered OA reports.
+ *
+ * Updates @offset according to the number of bytes successfully copied into
+ * the userspace buffer.
+ *
+ * Returns: zero on success or a negative error code
+ */
+static int gen7_oa_read(struct i915_perf_stream *stream,
+ char __user *buf,
+ size_t count,
+ size_t *offset)
+{
+ struct drm_i915_private *dev_priv = stream->dev_priv;
+ int report_size = dev_priv->perf.oa.oa_buffer.format_size;
+ u32 oastatus2;
+ u32 oastatus1;
+ u32 head;
+ u32 tail;
+ int ret;
+
+ if (WARN_ON(!dev_priv->perf.oa.oa_buffer.vaddr))
+ return -EIO;
+
+ oastatus2 = I915_READ(GEN7_OASTATUS2);
+ oastatus1 = I915_READ(GEN7_OASTATUS1);
+
+ head = oastatus2 & GEN7_OASTATUS2_HEAD_MASK;
+ tail = oastatus1 & GEN7_OASTATUS1_TAIL_MASK;
+
+ /* XXX: On Haswell we don't have a safe way to clear oastatus1
+ * bits while the OA unit is enabled (while the tail pointer
+ * may be updated asynchronously) so we ignore status bits
+ * that have already been reported to userspace.
+ */
+ oastatus1 &= ~dev_priv->perf.oa.gen7_latched_oastatus1;
+
+ /* We treat OABUFFER_OVERFLOW as a significant error:
+ *
+ * - The status can be interpreted to mean that the buffer is
+ * currently full (with a higher precedence than OA_TAKEN()
+ * which will start to report a near-empty buffer after an
+ * overflow) but it's awkward that we can't clear the status
+ * on Haswell, so without a reset we won't be able to catch
+ * the state again.
+ *
+ * - Since it also implies the HW has started overwriting old
+ * reports it may also affect our sanity checks for invalid
+ * reports when copying to userspace that assume new reports
+ * are being written to cleared memory.
+ *
+ * - In the future we may want to introduce a flight recorder
+ * mode where the driver will automatically maintain a safe
+ * guard band between head/tail, avoiding this overflow
+ * condition, but we avoid the added driver complexity for
+ * now.
+ */
+ if (unlikely(oastatus1 & GEN7_OASTATUS1_OABUFFER_OVERFLOW)) {
+ ret = append_oa_status(stream, buf, count, offset,
+ DRM_I915_PERF_RECORD_OA_BUFFER_LOST);
+ if (ret)
+ return ret;
+
+ DRM_DEBUG("OA buffer overflow: force restart\n");
+
+ dev_priv->perf.oa.ops.oa_disable(dev_priv);
+ dev_priv->perf.oa.ops.oa_enable(dev_priv);
+
+ oastatus2 = I915_READ(GEN7_OASTATUS2);
+ oastatus1 = I915_READ(GEN7_OASTATUS1);
+
+ head = oastatus2 & GEN7_OASTATUS2_HEAD_MASK;
+ tail = oastatus1 & GEN7_OASTATUS1_TAIL_MASK;
+ }
+
+ if (unlikely(oastatus1 & GEN7_OASTATUS1_REPORT_LOST)) {
+ ret = append_oa_status(stream, buf, count, offset,
+ DRM_I915_PERF_RECORD_OA_REPORT_LOST);
+ if (ret)
+ return ret;
+ dev_priv->perf.oa.gen7_latched_oastatus1 |=
+ GEN7_OASTATUS1_REPORT_LOST;
+ }
+
+ ret = gen7_append_oa_reports(stream, buf, count, offset,
+ &head, tail);
+
+ /* All the report sizes are a power of two and the
+ * head should always be incremented by some multiple
+ * of the report size.
+ *
+ * A warning here, but notably if we later read back a
+ * misaligned pointer we will treat that as a bug since
+ * it could lead to a buffer overrun.
+ */
+ WARN_ONCE(head & (report_size - 1),
+ "i915: Writing misaligned OA head pointer");
+
+ /* Note: we update the head pointer here even if an error
+ * was returned since the error may represent a short read
+ * where some some reports were successfully copied.
+ */
+ I915_WRITE(GEN7_OASTATUS2,
+ ((head & GEN7_OASTATUS2_HEAD_MASK) |
+ OA_MEM_SELECT_GGTT));
+
+ return ret;
+}
+
+/**
+ * i915_oa_wait_unlocked - handles blocking IO until OA data available
+ * @stream: An i915-perf stream opened for OA metrics
+ *
+ * Called when userspace tries to read() from a blocking stream FD opened
+ * for OA metrics. It waits until the hrtimer callback finds a non-empty
+ * OA buffer and wakes us.
+ *
+ * Note: it's acceptable to have this return with some false positives
+ * since any subsequent read handling will return -EAGAIN if there isn't
+ * really data ready for userspace yet.
+ *
+ * Returns: zero on success or a negative error code
+ */
+static int i915_oa_wait_unlocked(struct i915_perf_stream *stream)
+{
+ struct drm_i915_private *dev_priv = stream->dev_priv;
+
+ /* We would wait indefinitely if periodic sampling is not enabled */
+ if (!dev_priv->perf.oa.periodic)
+ return -EIO;
+
+ /* Note: the oa_buffer_is_empty() condition is ok to run unlocked as it
+ * just performs mmio reads of the OA buffer head + tail pointers and
+ * it's assumed we're handling some operation that implies the stream
+ * can't be destroyed until completion (such as a read()) that ensures
+ * the device + OA buffer can't disappear
+ */
+ return wait_event_interruptible(dev_priv->perf.oa.poll_wq,
+ !dev_priv->perf.oa.ops.oa_buffer_is_empty(dev_priv));
+}
+
+/**
+ * i915_oa_poll_wait - call poll_wait() for an OA stream poll()
+ * @stream: An i915-perf stream opened for OA metrics
+ * @file: An i915 perf stream file
+ * @wait: poll() state table
+ *
+ * For handling userspace polling on an i915 perf stream opened for OA metrics,
+ * this starts a poll_wait with the wait queue that our hrtimer callback wakes
+ * when it sees data ready to read in the circular OA buffer.
+ */
+static void i915_oa_poll_wait(struct i915_perf_stream *stream,
+ struct file *file,
+ poll_table *wait)
+{
+ struct drm_i915_private *dev_priv = stream->dev_priv;
+
+ poll_wait(file, &dev_priv->perf.oa.poll_wq, wait);
+}
+
+/**
+ * i915_oa_read - just calls through to &i915_oa_ops->read
+ * @stream: An i915-perf stream opened for OA metrics
+ * @buf: destination buffer given by userspace
+ * @count: the number of bytes userspace wants to read
+ * @offset: (inout): the current position for writing into @buf
+ *
+ * Updates @offset according to the number of bytes successfully copied into
+ * the userspace buffer.
+ *
+ * Returns: zero on success or a negative error code
+ */
+static int i915_oa_read(struct i915_perf_stream *stream,
+ char __user *buf,
+ size_t count,
+ size_t *offset)
+{
+ struct drm_i915_private *dev_priv = stream->dev_priv;
+
+ return dev_priv->perf.oa.ops.read(stream, buf, count, offset);
+}
+
+/**
+ * oa_get_render_ctx_id - determine and hold ctx hw id
+ * @stream: An i915-perf stream opened for OA metrics
+ *
+ * Determine the render context hw id, and ensure it remains fixed for the
+ * lifetime of the stream. This ensures that we don't have to worry about
+ * updating the context ID in OACONTROL on the fly.
+ *
+ * Returns: zero on success or a negative error code
+ */
+static int oa_get_render_ctx_id(struct i915_perf_stream *stream)
+{
+ struct drm_i915_private *dev_priv = stream->dev_priv;
+ struct intel_engine_cs *engine = dev_priv->engine[RCS];
+ int ret;
+
+ ret = i915_mutex_lock_interruptible(&dev_priv->drm);
+ if (ret)
+ return ret;
+
+ /* As the ID is the gtt offset of the context's vma we pin
+ * the vma to ensure the ID remains fixed.
+ *
+ * NB: implied RCS engine...
+ */
+ ret = engine->context_pin(engine, stream->ctx);
+ if (ret)
+ goto unlock;
+
+ /* Explicitly track the ID (instead of calling i915_ggtt_offset()
+ * on the fly) considering the difference with gen8+ and
+ * execlists
+ */
+ dev_priv->perf.oa.specific_ctx_id =
+ i915_ggtt_offset(stream->ctx->engine[engine->id].state);
+
+unlock:
+ mutex_unlock(&dev_priv->drm.struct_mutex);
+
+ return ret;
+}
+
+/**
+ * oa_put_render_ctx_id - counterpart to oa_get_render_ctx_id releases hold
+ * @stream: An i915-perf stream opened for OA metrics
+ *
+ * In case anything needed doing to ensure the context HW ID would remain valid
+ * for the lifetime of the stream, then that can be undone here.
+ */
+static void oa_put_render_ctx_id(struct i915_perf_stream *stream)
+{
+ struct drm_i915_private *dev_priv = stream->dev_priv;
+ struct intel_engine_cs *engine = dev_priv->engine[RCS];
+
+ mutex_lock(&dev_priv->drm.struct_mutex);
+
+ dev_priv->perf.oa.specific_ctx_id = INVALID_CTX_ID;
+ engine->context_unpin(engine, stream->ctx);
+
+ mutex_unlock(&dev_priv->drm.struct_mutex);
+}
+
+static void
+free_oa_buffer(struct drm_i915_private *i915)
+{
+ mutex_lock(&i915->drm.struct_mutex);
+
+ i915_gem_object_unpin_map(i915->perf.oa.oa_buffer.vma->obj);
+ i915_vma_unpin(i915->perf.oa.oa_buffer.vma);
+ i915_gem_object_put(i915->perf.oa.oa_buffer.vma->obj);
+
+ i915->perf.oa.oa_buffer.vma = NULL;
+ i915->perf.oa.oa_buffer.vaddr = NULL;
+
+ mutex_unlock(&i915->drm.struct_mutex);
+}
+
+static void i915_oa_stream_destroy(struct i915_perf_stream *stream)
+{
+ struct drm_i915_private *dev_priv = stream->dev_priv;
+
+ BUG_ON(stream != dev_priv->perf.oa.exclusive_stream);
+
+ dev_priv->perf.oa.ops.disable_metric_set(dev_priv);
+
+ free_oa_buffer(dev_priv);
+
+ intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
+ intel_runtime_pm_put(dev_priv);
+
+ if (stream->ctx)
+ oa_put_render_ctx_id(stream);
+
+ dev_priv->perf.oa.exclusive_stream = NULL;
+}
+
+static void gen7_init_oa_buffer(struct drm_i915_private *dev_priv)
+{
+ u32 gtt_offset = i915_ggtt_offset(dev_priv->perf.oa.oa_buffer.vma);
+
+ /* Pre-DevBDW: OABUFFER must be set with counters off,
+ * before OASTATUS1, but after OASTATUS2
+ */
+ I915_WRITE(GEN7_OASTATUS2, gtt_offset | OA_MEM_SELECT_GGTT); /* head */
+ I915_WRITE(GEN7_OABUFFER, gtt_offset);
+ I915_WRITE(GEN7_OASTATUS1, gtt_offset | OABUFFER_SIZE_16M); /* tail */
+
+ /* On Haswell we have to track which OASTATUS1 flags we've
+ * already seen since they can't be cleared while periodic
+ * sampling is enabled.
+ */
+ dev_priv->perf.oa.gen7_latched_oastatus1 = 0;
+
+ /* NB: although the OA buffer will initially be allocated
+ * zeroed via shmfs (and so this memset is redundant when
+ * first allocating), we may re-init the OA buffer, either
+ * when re-enabling a stream or in error/reset paths.
+ *
+ * The reason we clear the buffer for each re-init is for the
+ * sanity check in gen7_append_oa_reports() that looks at the
+ * report-id field to make sure it's non-zero which relies on
+ * the assumption that new reports are being written to zeroed
+ * memory...
+ */
+ memset(dev_priv->perf.oa.oa_buffer.vaddr, 0, OA_BUFFER_SIZE);
+
+ /* Maybe make ->pollin per-stream state if we support multiple
+ * concurrent streams in the future.
+ */
+ dev_priv->perf.oa.pollin = false;
+}
+
+static int alloc_oa_buffer(struct drm_i915_private *dev_priv)
+{
+ struct drm_i915_gem_object *bo;
+ struct i915_vma *vma;
+ int ret;
+
+ if (WARN_ON(dev_priv->perf.oa.oa_buffer.vma))
+ return -ENODEV;
+
+ ret = i915_mutex_lock_interruptible(&dev_priv->drm);
+ if (ret)
+ return ret;
+
+ BUILD_BUG_ON_NOT_POWER_OF_2(OA_BUFFER_SIZE);
+ BUILD_BUG_ON(OA_BUFFER_SIZE < SZ_128K || OA_BUFFER_SIZE > SZ_16M);
+
+ bo = i915_gem_object_create(dev_priv, OA_BUFFER_SIZE);
+ if (IS_ERR(bo)) {
+ DRM_ERROR("Failed to allocate OA buffer\n");
+ ret = PTR_ERR(bo);
+ goto unlock;
+ }
+
+ ret = i915_gem_object_set_cache_level(bo, I915_CACHE_LLC);
+ if (ret)
+ goto err_unref;
+
+ /* PreHSW required 512K alignment, HSW requires 16M */
+ vma = i915_gem_object_ggtt_pin(bo, NULL, 0, SZ_16M, 0);
+ if (IS_ERR(vma)) {
+ ret = PTR_ERR(vma);
+ goto err_unref;
+ }
+ dev_priv->perf.oa.oa_buffer.vma = vma;
+
+ dev_priv->perf.oa.oa_buffer.vaddr =
+ i915_gem_object_pin_map(bo, I915_MAP_WB);
+ if (IS_ERR(dev_priv->perf.oa.oa_buffer.vaddr)) {
+ ret = PTR_ERR(dev_priv->perf.oa.oa_buffer.vaddr);
+ goto err_unpin;
+ }
+
+ dev_priv->perf.oa.ops.init_oa_buffer(dev_priv);
+
+ DRM_DEBUG_DRIVER("OA Buffer initialized, gtt offset = 0x%x, vaddr = %p\n",
+ i915_ggtt_offset(dev_priv->perf.oa.oa_buffer.vma),
+ dev_priv->perf.oa.oa_buffer.vaddr);
+
+ goto unlock;
+
+err_unpin:
+ __i915_vma_unpin(vma);
+
+err_unref:
+ i915_gem_object_put(bo);
+
+ dev_priv->perf.oa.oa_buffer.vaddr = NULL;
+ dev_priv->perf.oa.oa_buffer.vma = NULL;
+
+unlock:
+ mutex_unlock(&dev_priv->drm.struct_mutex);
+ return ret;
+}
+
+static void config_oa_regs(struct drm_i915_private *dev_priv,
+ const struct i915_oa_reg *regs,
+ int n_regs)
+{
+ int i;
+
+ for (i = 0; i < n_regs; i++) {
+ const struct i915_oa_reg *reg = regs + i;
+
+ I915_WRITE(reg->addr, reg->value);
+ }
+}
+
+static int hsw_enable_metric_set(struct drm_i915_private *dev_priv)
+{
+ int ret = i915_oa_select_metric_set_hsw(dev_priv);
+
+ if (ret)
+ return ret;
+
+ I915_WRITE(GDT_CHICKEN_BITS, (I915_READ(GDT_CHICKEN_BITS) |
+ GT_NOA_ENABLE));
+
+ /* PRM:
+ *
+ * OA unit is using “crclk” for its functionality. When trunk
+ * level clock gating takes place, OA clock would be gated,
+ * unable to count the events from non-render clock domain.
+ * Render clock gating must be disabled when OA is enabled to
+ * count the events from non-render domain. Unit level clock
+ * gating for RCS should also be disabled.
+ */
+ I915_WRITE(GEN7_MISCCPCTL, (I915_READ(GEN7_MISCCPCTL) &
+ ~GEN7_DOP_CLOCK_GATE_ENABLE));
+ I915_WRITE(GEN6_UCGCTL1, (I915_READ(GEN6_UCGCTL1) |
+ GEN6_CSUNIT_CLOCK_GATE_DISABLE));
+
+ config_oa_regs(dev_priv, dev_priv->perf.oa.mux_regs,
+ dev_priv->perf.oa.mux_regs_len);
+
+ /* It apparently takes a fairly long time for a new MUX
+ * configuration to be be applied after these register writes.
+ * This delay duration was derived empirically based on the
+ * render_basic config but hopefully it covers the maximum
+ * configuration latency.
+ *
+ * As a fallback, the checks in _append_oa_reports() to skip
+ * invalid OA reports do also seem to work to discard reports
+ * generated before this config has completed - albeit not
+ * silently.
+ *
+ * Unfortunately this is essentially a magic number, since we
+ * don't currently know of a reliable mechanism for predicting
+ * how long the MUX config will take to apply and besides
+ * seeing invalid reports we don't know of a reliable way to
+ * explicitly check that the MUX config has landed.
+ *
+ * It's even possible we've miss characterized the underlying
+ * problem - it just seems like the simplest explanation why
+ * a delay at this location would mitigate any invalid reports.
+ */
+ usleep_range(15000, 20000);
+
+ config_oa_regs(dev_priv, dev_priv->perf.oa.b_counter_regs,
+ dev_priv->perf.oa.b_counter_regs_len);
+
+ return 0;
+}
+
+static void hsw_disable_metric_set(struct drm_i915_private *dev_priv)
+{
+ I915_WRITE(GEN6_UCGCTL1, (I915_READ(GEN6_UCGCTL1) &
+ ~GEN6_CSUNIT_CLOCK_GATE_DISABLE));
+ I915_WRITE(GEN7_MISCCPCTL, (I915_READ(GEN7_MISCCPCTL) |
+ GEN7_DOP_CLOCK_GATE_ENABLE));
+
+ I915_WRITE(GDT_CHICKEN_BITS, (I915_READ(GDT_CHICKEN_BITS) &
+ ~GT_NOA_ENABLE));
+}
+
+static void gen7_update_oacontrol_locked(struct drm_i915_private *dev_priv)
+{
+ assert_spin_locked(&dev_priv->perf.hook_lock);
+
+ if (dev_priv->perf.oa.exclusive_stream->enabled) {
+ struct i915_gem_context *ctx =
+ dev_priv->perf.oa.exclusive_stream->ctx;
+ u32 ctx_id = dev_priv->perf.oa.specific_ctx_id;
+
+ bool periodic = dev_priv->perf.oa.periodic;
+ u32 period_exponent = dev_priv->perf.oa.period_exponent;
+ u32 report_format = dev_priv->perf.oa.oa_buffer.format;
+
+ I915_WRITE(GEN7_OACONTROL,
+ (ctx_id & GEN7_OACONTROL_CTX_MASK) |
+ (period_exponent <<
+ GEN7_OACONTROL_TIMER_PERIOD_SHIFT) |
+ (periodic ? GEN7_OACONTROL_TIMER_ENABLE : 0) |
+ (report_format << GEN7_OACONTROL_FORMAT_SHIFT) |
+ (ctx ? GEN7_OACONTROL_PER_CTX_ENABLE : 0) |
+ GEN7_OACONTROL_ENABLE);
+ } else
+ I915_WRITE(GEN7_OACONTROL, 0);
+}
+
+static void gen7_oa_enable(struct drm_i915_private *dev_priv)
+{
+ unsigned long flags;
+
+ /* Reset buf pointers so we don't forward reports from before now.
+ *
+ * Think carefully if considering trying to avoid this, since it
+ * also ensures status flags and the buffer itself are cleared
+ * in error paths, and we have checks for invalid reports based
+ * on the assumption that certain fields are written to zeroed
+ * memory which this helps maintains.
+ */
+ gen7_init_oa_buffer(dev_priv);
+
+ spin_lock_irqsave(&dev_priv->perf.hook_lock, flags);
+ gen7_update_oacontrol_locked(dev_priv);
+ spin_unlock_irqrestore(&dev_priv->perf.hook_lock, flags);
+}
+
+/**
+ * i915_oa_stream_enable - handle `I915_PERF_IOCTL_ENABLE` for OA stream
+ * @stream: An i915 perf stream opened for OA metrics
+ *
+ * [Re]enables hardware periodic sampling according to the period configured
+ * when opening the stream. This also starts a hrtimer that will periodically
+ * check for data in the circular OA buffer for notifying userspace (e.g.
+ * during a read() or poll()).
+ */
+static void i915_oa_stream_enable(struct i915_perf_stream *stream)
+{
+ struct drm_i915_private *dev_priv = stream->dev_priv;
+
+ dev_priv->perf.oa.ops.oa_enable(dev_priv);
+
+ if (dev_priv->perf.oa.periodic)
+ hrtimer_start(&dev_priv->perf.oa.poll_check_timer,
+ ns_to_ktime(POLL_PERIOD),
+ HRTIMER_MODE_REL_PINNED);
+}
+
+static void gen7_oa_disable(struct drm_i915_private *dev_priv)
+{
+ I915_WRITE(GEN7_OACONTROL, 0);
+}
+
+/**
+ * i915_oa_stream_disable - handle `I915_PERF_IOCTL_DISABLE` for OA stream
+ * @stream: An i915 perf stream opened for OA metrics
+ *
+ * Stops the OA unit from periodically writing counter reports into the
+ * circular OA buffer. This also stops the hrtimer that periodically checks for
+ * data in the circular OA buffer, for notifying userspace.
+ */
+static void i915_oa_stream_disable(struct i915_perf_stream *stream)
+{
+ struct drm_i915_private *dev_priv = stream->dev_priv;
+
+ dev_priv->perf.oa.ops.oa_disable(dev_priv);
+
+ if (dev_priv->perf.oa.periodic)
+ hrtimer_cancel(&dev_priv->perf.oa.poll_check_timer);
+}
+
+static u64 oa_exponent_to_ns(struct drm_i915_private *dev_priv, int exponent)
+{
+ return div_u64(1000000000ULL * (2ULL << exponent),
+ dev_priv->perf.oa.timestamp_frequency);
+}
+
+static const struct i915_perf_stream_ops i915_oa_stream_ops = {
+ .destroy = i915_oa_stream_destroy,
+ .enable = i915_oa_stream_enable,
+ .disable = i915_oa_stream_disable,
+ .wait_unlocked = i915_oa_wait_unlocked,
+ .poll_wait = i915_oa_poll_wait,
+ .read = i915_oa_read,
+};
+
+/**
+ * i915_oa_stream_init - validate combined props for OA stream and init
+ * @stream: An i915 perf stream
+ * @param: The open parameters passed to `DRM_I915_PERF_OPEN`
+ * @props: The property state that configures stream (individually validated)
+ *
+ * While read_properties_unlocked() validates properties in isolation it
+ * doesn't ensure that the combination necessarily makes sense.
+ *
+ * At this point it has been determined that userspace wants a stream of
+ * OA metrics, but still we need to further validate the combined
+ * properties are OK.
+ *
+ * If the configuration makes sense then we can allocate memory for
+ * a circular OA buffer and apply the requested metric set configuration.
+ *
+ * Returns: zero on success or a negative error code.
+ */
+static int i915_oa_stream_init(struct i915_perf_stream *stream,
+ struct drm_i915_perf_open_param *param,
+ struct perf_open_properties *props)
+{
+ struct drm_i915_private *dev_priv = stream->dev_priv;
+ int format_size;
+ int ret;
+
+ /* If the sysfs metrics/ directory wasn't registered for some
+ * reason then don't let userspace try their luck with config
+ * IDs
+ */
+ if (!dev_priv->perf.metrics_kobj) {
+ DRM_DEBUG("OA metrics weren't advertised via sysfs\n");
+ return -EINVAL;
+ }
+
+ if (!(props->sample_flags & SAMPLE_OA_REPORT)) {
+ DRM_DEBUG("Only OA report sampling supported\n");
+ return -EINVAL;
+ }
+
+ if (!dev_priv->perf.oa.ops.init_oa_buffer) {
+ DRM_DEBUG("OA unit not supported\n");
+ return -ENODEV;
+ }
+
+ /* To avoid the complexity of having to accurately filter
+ * counter reports and marshal to the appropriate client
+ * we currently only allow exclusive access
+ */
+ if (dev_priv->perf.oa.exclusive_stream) {
+ DRM_DEBUG("OA unit already in use\n");
+ return -EBUSY;
+ }
+
+ if (!props->metrics_set) {
+ DRM_DEBUG("OA metric set not specified\n");
+ return -EINVAL;
+ }
+
+ if (!props->oa_format) {
+ DRM_DEBUG("OA report format not specified\n");
+ return -EINVAL;
+ }
+
+ stream->sample_size = sizeof(struct drm_i915_perf_record_header);
+
+ format_size = dev_priv->perf.oa.oa_formats[props->oa_format].size;
+
+ stream->sample_flags |= SAMPLE_OA_REPORT;
+ stream->sample_size += format_size;
+
+ dev_priv->perf.oa.oa_buffer.format_size = format_size;
+ if (WARN_ON(dev_priv->perf.oa.oa_buffer.format_size == 0))
+ return -EINVAL;
+
+ dev_priv->perf.oa.oa_buffer.format =
+ dev_priv->perf.oa.oa_formats[props->oa_format].format;
+
+ dev_priv->perf.oa.metrics_set = props->metrics_set;
+
+ dev_priv->perf.oa.periodic = props->oa_periodic;
+ if (dev_priv->perf.oa.periodic) {
+ u32 tail;
+
+ dev_priv->perf.oa.period_exponent = props->oa_period_exponent;
+
+ /* See comment for OA_TAIL_MARGIN_NSEC for details
+ * about this tail_margin...
+ */
+ tail = div64_u64(OA_TAIL_MARGIN_NSEC,
+ oa_exponent_to_ns(dev_priv,
+ props->oa_period_exponent));
+ dev_priv->perf.oa.tail_margin = (tail + 1) * format_size;
+ }
+
+ if (stream->ctx) {
+ ret = oa_get_render_ctx_id(stream);
+ if (ret)
+ return ret;
+ }
+
+ ret = alloc_oa_buffer(dev_priv);
+ if (ret)
+ goto err_oa_buf_alloc;
+
+ /* PRM - observability performance counters:
+ *
+ * OACONTROL, performance counter enable, note:
+ *
+ * "When this bit is set, in order to have coherent counts,
+ * RC6 power state and trunk clock gating must be disabled.
+ * This can be achieved by programming MMIO registers as
+ * 0xA094=0 and 0xA090[31]=1"
+ *
+ * In our case we are expecting that taking pm + FORCEWAKE
+ * references will effectively disable RC6.
+ */
+ intel_runtime_pm_get(dev_priv);
+ intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
+
+ ret = dev_priv->perf.oa.ops.enable_metric_set(dev_priv);
+ if (ret)
+ goto err_enable;
+
+ stream->ops = &i915_oa_stream_ops;
+
+ dev_priv->perf.oa.exclusive_stream = stream;
+
+ return 0;
+
+err_enable:
+ intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
+ intel_runtime_pm_put(dev_priv);
+ free_oa_buffer(dev_priv);
+
+err_oa_buf_alloc:
+ if (stream->ctx)
+ oa_put_render_ctx_id(stream);
+
+ return ret;
+}
+
+/**
+ * i915_perf_read_locked - &i915_perf_stream_ops->read with error normalisation
+ * @stream: An i915 perf stream
+ * @file: An i915 perf stream file
+ * @buf: destination buffer given by userspace
+ * @count: the number of bytes userspace wants to read
+ * @ppos: (inout) file seek position (unused)
+ *
+ * Besides wrapping &i915_perf_stream_ops->read this provides a common place to
+ * ensure that if we've successfully copied any data then reporting that takes
+ * precedence over any internal error status, so the data isn't lost.
+ *
+ * For example ret will be -ENOSPC whenever there is more buffered data than
+ * can be copied to userspace, but that's only interesting if we weren't able
+ * to copy some data because it implies the userspace buffer is too small to
+ * receive a single record (and we never split records).
+ *
+ * Another case with ret == -EFAULT is more of a grey area since it would seem
+ * like bad form for userspace to ask us to overrun its buffer, but the user
+ * knows best:
+ *
+ * http://yarchive.net/comp/linux/partial_reads_writes.html
+ *
+ * Returns: The number of bytes copied or a negative error code on failure.
+ */
+static ssize_t i915_perf_read_locked(struct i915_perf_stream *stream,
+ struct file *file,
+ char __user *buf,
+ size_t count,
+ loff_t *ppos)
+{
+ /* Note we keep the offset (aka bytes read) separate from any
+ * error status so that the final check for whether we return
+ * the bytes read with a higher precedence than any error (see
+ * comment below) doesn't need to be handled/duplicated in
+ * stream->ops->read() implementations.
+ */
+ size_t offset = 0;
+ int ret = stream->ops->read(stream, buf, count, &offset);
+
+ return offset ?: (ret ?: -EAGAIN);
+}
+
+/**
+ * i915_perf_read - handles read() FOP for i915 perf stream FDs
+ * @file: An i915 perf stream file
+ * @buf: destination buffer given by userspace
+ * @count: the number of bytes userspace wants to read
+ * @ppos: (inout) file seek position (unused)
+ *
+ * The entry point for handling a read() on a stream file descriptor from
+ * userspace. Most of the work is left to the i915_perf_read_locked() and
+ * &i915_perf_stream_ops->read but to save having stream implementations (of
+ * which we might have multiple later) we handle blocking read here.
+ *
+ * We can also consistently treat trying to read from a disabled stream
+ * as an IO error so implementations can assume the stream is enabled
+ * while reading.
+ *
+ * Returns: The number of bytes copied or a negative error code on failure.
+ */
+static ssize_t i915_perf_read(struct file *file,
+ char __user *buf,
+ size_t count,
+ loff_t *ppos)
+{
+ struct i915_perf_stream *stream = file->private_data;
+ struct drm_i915_private *dev_priv = stream->dev_priv;
+ ssize_t ret;
+
+ /* To ensure it's handled consistently we simply treat all reads of a
+ * disabled stream as an error. In particular it might otherwise lead
+ * to a deadlock for blocking file descriptors...
+ */
+ if (!stream->enabled)
+ return -EIO;
+
+ if (!(file->f_flags & O_NONBLOCK)) {
+ /* There's the small chance of false positives from
+ * stream->ops->wait_unlocked.
+ *
+ * E.g. with single context filtering since we only wait until
+ * oabuffer has >= 1 report we don't immediately know whether
+ * any reports really belong to the current context
+ */
+ do {
+ ret = stream->ops->wait_unlocked(stream);
+ if (ret)
+ return ret;
+
+ mutex_lock(&dev_priv->perf.lock);
+ ret = i915_perf_read_locked(stream, file,
+ buf, count, ppos);
+ mutex_unlock(&dev_priv->perf.lock);
+ } while (ret == -EAGAIN);
+ } else {
+ mutex_lock(&dev_priv->perf.lock);
+ ret = i915_perf_read_locked(stream, file, buf, count, ppos);
+ mutex_unlock(&dev_priv->perf.lock);
+ }
+
+ if (ret >= 0) {
+ /* Maybe make ->pollin per-stream state if we support multiple
+ * concurrent streams in the future.
+ */
+ dev_priv->perf.oa.pollin = false;
+ }
+
+ return ret;
+}
+
+static enum hrtimer_restart oa_poll_check_timer_cb(struct hrtimer *hrtimer)
+{
+ struct drm_i915_private *dev_priv =
+ container_of(hrtimer, typeof(*dev_priv),
+ perf.oa.poll_check_timer);
+
+ if (!dev_priv->perf.oa.ops.oa_buffer_is_empty(dev_priv)) {
+ dev_priv->perf.oa.pollin = true;
+ wake_up(&dev_priv->perf.oa.poll_wq);
+ }
+
+ hrtimer_forward_now(hrtimer, ns_to_ktime(POLL_PERIOD));
+
+ return HRTIMER_RESTART;
+}
+
+/**
+ * i915_perf_poll_locked - poll_wait() with a suitable wait queue for stream
+ * @dev_priv: i915 device instance
+ * @stream: An i915 perf stream
+ * @file: An i915 perf stream file
+ * @wait: poll() state table
+ *
+ * For handling userspace polling on an i915 perf stream, this calls through to
+ * &i915_perf_stream_ops->poll_wait to call poll_wait() with a wait queue that
+ * will be woken for new stream data.
+ *
+ * Note: The &drm_i915_private->perf.lock mutex has been taken to serialize
+ * with any non-file-operation driver hooks.
+ *
+ * Returns: any poll events that are ready without sleeping
+ */
+static unsigned int i915_perf_poll_locked(struct drm_i915_private *dev_priv,
+ struct i915_perf_stream *stream,
+ struct file *file,
+ poll_table *wait)
+{
+ unsigned int events = 0;
+
+ stream->ops->poll_wait(stream, file, wait);
+
+ /* Note: we don't explicitly check whether there's something to read
+ * here since this path may be very hot depending on what else
+ * userspace is polling, or on the timeout in use. We rely solely on
+ * the hrtimer/oa_poll_check_timer_cb to notify us when there are
+ * samples to read.
+ */
+ if (dev_priv->perf.oa.pollin)
+ events |= POLLIN;
+
+ return events;
+}
+
+/**
+ * i915_perf_poll - call poll_wait() with a suitable wait queue for stream
+ * @file: An i915 perf stream file
+ * @wait: poll() state table
+ *
+ * For handling userspace polling on an i915 perf stream, this ensures
+ * poll_wait() gets called with a wait queue that will be woken for new stream
+ * data.
+ *
+ * Note: Implementation deferred to i915_perf_poll_locked()
+ *
+ * Returns: any poll events that are ready without sleeping
+ */
+static unsigned int i915_perf_poll(struct file *file, poll_table *wait)
+{
+ struct i915_perf_stream *stream = file->private_data;
+ struct drm_i915_private *dev_priv = stream->dev_priv;
+ int ret;
+
+ mutex_lock(&dev_priv->perf.lock);
+ ret = i915_perf_poll_locked(dev_priv, stream, file, wait);
+ mutex_unlock(&dev_priv->perf.lock);
+
+ return ret;
+}
+
+/**
+ * i915_perf_enable_locked - handle `I915_PERF_IOCTL_ENABLE` ioctl
+ * @stream: A disabled i915 perf stream
+ *
+ * [Re]enables the associated capture of data for this stream.
+ *
+ * If a stream was previously enabled then there's currently no intention
+ * to provide userspace any guarantee about the preservation of previously
+ * buffered data.
+ */
+static void i915_perf_enable_locked(struct i915_perf_stream *stream)
+{
+ if (stream->enabled)
+ return;
+
+ /* Allow stream->ops->enable() to refer to this */
+ stream->enabled = true;
+
+ if (stream->ops->enable)
+ stream->ops->enable(stream);
+}
+
+/**
+ * i915_perf_disable_locked - handle `I915_PERF_IOCTL_DISABLE` ioctl
+ * @stream: An enabled i915 perf stream
+ *
+ * Disables the associated capture of data for this stream.
+ *
+ * The intention is that disabling an re-enabling a stream will ideally be
+ * cheaper than destroying and re-opening a stream with the same configuration,
+ * though there are no formal guarantees about what state or buffered data
+ * must be retained between disabling and re-enabling a stream.
+ *
+ * Note: while a stream is disabled it's considered an error for userspace
+ * to attempt to read from the stream (-EIO).
+ */
+static void i915_perf_disable_locked(struct i915_perf_stream *stream)
+{
+ if (!stream->enabled)
+ return;
+
+ /* Allow stream->ops->disable() to refer to this */
+ stream->enabled = false;
+
+ if (stream->ops->disable)
+ stream->ops->disable(stream);
+}
+
+/**
+ * i915_perf_ioctl - support ioctl() usage with i915 perf stream FDs
+ * @stream: An i915 perf stream
+ * @cmd: the ioctl request
+ * @arg: the ioctl data
+ *
+ * Note: The &drm_i915_private->perf.lock mutex has been taken to serialize
+ * with any non-file-operation driver hooks.
+ *
+ * Returns: zero on success or a negative error code. Returns -EINVAL for
+ * an unknown ioctl request.
+ */
+static long i915_perf_ioctl_locked(struct i915_perf_stream *stream,
+ unsigned int cmd,
+ unsigned long arg)
+{
+ switch (cmd) {
+ case I915_PERF_IOCTL_ENABLE:
+ i915_perf_enable_locked(stream);
+ return 0;
+ case I915_PERF_IOCTL_DISABLE:
+ i915_perf_disable_locked(stream);
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+/**
+ * i915_perf_ioctl - support ioctl() usage with i915 perf stream FDs
+ * @file: An i915 perf stream file
+ * @cmd: the ioctl request
+ * @arg: the ioctl data
+ *
+ * Implementation deferred to i915_perf_ioctl_locked().
+ *
+ * Returns: zero on success or a negative error code. Returns -EINVAL for
+ * an unknown ioctl request.
+ */
+static long i915_perf_ioctl(struct file *file,
+ unsigned int cmd,
+ unsigned long arg)
+{
+ struct i915_perf_stream *stream = file->private_data;
+ struct drm_i915_private *dev_priv = stream->dev_priv;
+ long ret;
+
+ mutex_lock(&dev_priv->perf.lock);
+ ret = i915_perf_ioctl_locked(stream, cmd, arg);
+ mutex_unlock(&dev_priv->perf.lock);
+
+ return ret;
+}
+
+/**
+ * i915_perf_destroy_locked - destroy an i915 perf stream
+ * @stream: An i915 perf stream
+ *
+ * Frees all resources associated with the given i915 perf @stream, disabling
+ * any associated data capture in the process.
+ *
+ * Note: The &drm_i915_private->perf.lock mutex has been taken to serialize
+ * with any non-file-operation driver hooks.
+ */
+static void i915_perf_destroy_locked(struct i915_perf_stream *stream)
+{
+ if (stream->enabled)
+ i915_perf_disable_locked(stream);
+
+ if (stream->ops->destroy)
+ stream->ops->destroy(stream);
+
+ list_del(&stream->link);
+
+ if (stream->ctx)
+ i915_gem_context_put_unlocked(stream->ctx);
+
+ kfree(stream);
+}
+
+/**
+ * i915_perf_release - handles userspace close() of a stream file
+ * @inode: anonymous inode associated with file
+ * @file: An i915 perf stream file
+ *
+ * Cleans up any resources associated with an open i915 perf stream file.
+ *
+ * NB: close() can't really fail from the userspace point of view.
+ *
+ * Returns: zero on success or a negative error code.
+ */
+static int i915_perf_release(struct inode *inode, struct file *file)
+{
+ struct i915_perf_stream *stream = file->private_data;
+ struct drm_i915_private *dev_priv = stream->dev_priv;
+
+ mutex_lock(&dev_priv->perf.lock);
+ i915_perf_destroy_locked(stream);
+ mutex_unlock(&dev_priv->perf.lock);
+
+ return 0;
+}
+
+
+static const struct file_operations fops = {
+ .owner = THIS_MODULE,
+ .llseek = no_llseek,
+ .release = i915_perf_release,
+ .poll = i915_perf_poll,
+ .read = i915_perf_read,
+ .unlocked_ioctl = i915_perf_ioctl,
+};
+
+
+static struct i915_gem_context *
+lookup_context(struct drm_i915_private *dev_priv,
+ struct drm_i915_file_private *file_priv,
+ u32 ctx_user_handle)
+{
+ struct i915_gem_context *ctx;
+ int ret;
+
+ ret = i915_mutex_lock_interruptible(&dev_priv->drm);
+ if (ret)
+ return ERR_PTR(ret);
+
+ ctx = i915_gem_context_lookup(file_priv, ctx_user_handle);
+ if (!IS_ERR(ctx))
+ i915_gem_context_get(ctx);
+
+ mutex_unlock(&dev_priv->drm.struct_mutex);
+
+ return ctx;
+}
+
+/**
+ * i915_perf_open_ioctl_locked - DRM ioctl() for userspace to open a stream FD
+ * @dev_priv: i915 device instance
+ * @param: The open parameters passed to 'DRM_I915_PERF_OPEN`
+ * @props: individually validated u64 property value pairs
+ * @file: drm file
+ *
+ * See i915_perf_ioctl_open() for interface details.
+ *
+ * Implements further stream config validation and stream initialization on
+ * behalf of i915_perf_open_ioctl() with the &drm_i915_private->perf.lock mutex
+ * taken to serialize with any non-file-operation driver hooks.
+ *
+ * Note: at this point the @props have only been validated in isolation and
+ * it's still necessary to validate that the combination of properties makes
+ * sense.
+ *
+ * In the case where userspace is interested in OA unit metrics then further
+ * config validation and stream initialization details will be handled by
+ * i915_oa_stream_init(). The code here should only validate config state that
+ * will be relevant to all stream types / backends.
+ *
+ * Returns: zero on success or a negative error code.
+ */
+static int
+i915_perf_open_ioctl_locked(struct drm_i915_private *dev_priv,
+ struct drm_i915_perf_open_param *param,
+ struct perf_open_properties *props,
+ struct drm_file *file)
+{
+ struct i915_gem_context *specific_ctx = NULL;
+ struct i915_perf_stream *stream = NULL;
+ unsigned long f_flags = 0;
+ int stream_fd;
+ int ret;
+
+ if (props->single_context) {
+ u32 ctx_handle = props->ctx_handle;
+ struct drm_i915_file_private *file_priv = file->driver_priv;
+
+ specific_ctx = lookup_context(dev_priv, file_priv, ctx_handle);
+ if (IS_ERR(specific_ctx)) {
+ ret = PTR_ERR(specific_ctx);
+ if (ret != -EINTR)
+ DRM_DEBUG("Failed to look up context with ID %u for opening perf stream\n",
+ ctx_handle);
+ goto err;
+ }
+ }
+
+ /* Similar to perf's kernel.perf_paranoid_cpu sysctl option
+ * we check a dev.i915.perf_stream_paranoid sysctl option
+ * to determine if it's ok to access system wide OA counters
+ * without CAP_SYS_ADMIN privileges.
+ */
+ if (!specific_ctx &&
+ i915_perf_stream_paranoid && !capable(CAP_SYS_ADMIN)) {
+ DRM_DEBUG("Insufficient privileges to open system-wide i915 perf stream\n");
+ ret = -EACCES;
+ goto err_ctx;
+ }
+
+ stream = kzalloc(sizeof(*stream), GFP_KERNEL);
+ if (!stream) {
+ ret = -ENOMEM;
+ goto err_ctx;
+ }
+
+ stream->dev_priv = dev_priv;
+ stream->ctx = specific_ctx;
+
+ ret = i915_oa_stream_init(stream, param, props);
+ if (ret)
+ goto err_alloc;
+
+ /* we avoid simply assigning stream->sample_flags = props->sample_flags
+ * to have _stream_init check the combination of sample flags more
+ * thoroughly, but still this is the expected result at this point.
+ */
+ if (WARN_ON(stream->sample_flags != props->sample_flags)) {
+ ret = -ENODEV;
+ goto err_alloc;
+ }
+
+ list_add(&stream->link, &dev_priv->perf.streams);
+
+ if (param->flags & I915_PERF_FLAG_FD_CLOEXEC)
+ f_flags |= O_CLOEXEC;
+ if (param->flags & I915_PERF_FLAG_FD_NONBLOCK)
+ f_flags |= O_NONBLOCK;
+
+ stream_fd = anon_inode_getfd("[i915_perf]", &fops, stream, f_flags);
+ if (stream_fd < 0) {
+ ret = stream_fd;
+ goto err_open;
+ }
+
+ if (!(param->flags & I915_PERF_FLAG_DISABLED))
+ i915_perf_enable_locked(stream);
+
+ return stream_fd;
+
+err_open:
+ list_del(&stream->link);
+ if (stream->ops->destroy)
+ stream->ops->destroy(stream);
+err_alloc:
+ kfree(stream);
+err_ctx:
+ if (specific_ctx)
+ i915_gem_context_put_unlocked(specific_ctx);
+err:
+ return ret;
+}
+
+/**
+ * read_properties_unlocked - validate + copy userspace stream open properties
+ * @dev_priv: i915 device instance
+ * @uprops: The array of u64 key value pairs given by userspace
+ * @n_props: The number of key value pairs expected in @uprops
+ * @props: The stream configuration built up while validating properties
+ *
+ * Note this function only validates properties in isolation it doesn't
+ * validate that the combination of properties makes sense or that all
+ * properties necessary for a particular kind of stream have been set.
+ *
+ * Note that there currently aren't any ordering requirements for properties so
+ * we shouldn't validate or assume anything about ordering here. This doesn't
+ * rule out defining new properties with ordering requirements in the future.
+ */
+static int read_properties_unlocked(struct drm_i915_private *dev_priv,
+ u64 __user *uprops,
+ u32 n_props,
+ struct perf_open_properties *props)
+{
+ u64 __user *uprop = uprops;
+ int i;
+
+ memset(props, 0, sizeof(struct perf_open_properties));
+
+ if (!n_props) {
+ DRM_DEBUG("No i915 perf properties given\n");
+ return -EINVAL;
+ }
+
+ /* Considering that ID = 0 is reserved and assuming that we don't
+ * (currently) expect any configurations to ever specify duplicate
+ * values for a particular property ID then the last _PROP_MAX value is
+ * one greater than the maximum number of properties we expect to get
+ * from userspace.
+ */
+ if (n_props >= DRM_I915_PERF_PROP_MAX) {
+ DRM_DEBUG("More i915 perf properties specified than exist\n");
+ return -EINVAL;
+ }
+
+ for (i = 0; i < n_props; i++) {
+ u64 oa_period, oa_freq_hz;
+ u64 id, value;
+ int ret;
+
+ ret = get_user(id, uprop);
+ if (ret)
+ return ret;
+
+ ret = get_user(value, uprop + 1);
+ if (ret)
+ return ret;
+
+ switch ((enum drm_i915_perf_property_id)id) {
+ case DRM_I915_PERF_PROP_CTX_HANDLE:
+ props->single_context = 1;
+ props->ctx_handle = value;
+ break;
+ case DRM_I915_PERF_PROP_SAMPLE_OA:
+ props->sample_flags |= SAMPLE_OA_REPORT;
+ break;
+ case DRM_I915_PERF_PROP_OA_METRICS_SET:
+ if (value == 0 ||
+ value > dev_priv->perf.oa.n_builtin_sets) {
+ DRM_DEBUG("Unknown OA metric set ID\n");
+ return -EINVAL;
+ }
+ props->metrics_set = value;
+ break;
+ case DRM_I915_PERF_PROP_OA_FORMAT:
+ if (value == 0 || value >= I915_OA_FORMAT_MAX) {
+ DRM_DEBUG("Invalid OA report format\n");
+ return -EINVAL;
+ }
+ if (!dev_priv->perf.oa.oa_formats[value].size) {
+ DRM_DEBUG("Invalid OA report format\n");
+ return -EINVAL;
+ }
+ props->oa_format = value;
+ break;
+ case DRM_I915_PERF_PROP_OA_EXPONENT:
+ if (value > OA_EXPONENT_MAX) {
+ DRM_DEBUG("OA timer exponent too high (> %u)\n",
+ OA_EXPONENT_MAX);
+ return -EINVAL;
+ }
+
+ /* Theoretically we can program the OA unit to sample
+ * every 160ns but don't allow that by default unless
+ * root.
+ *
+ * On Haswell the period is derived from the exponent
+ * as:
+ *
+ * period = 80ns * 2^(exponent + 1)
+ */
+ BUILD_BUG_ON(sizeof(oa_period) != 8);
+ oa_period = 80ull * (2ull << value);
+
+ /* This check is primarily to ensure that oa_period <=
+ * UINT32_MAX (before passing to do_div which only
+ * accepts a u32 denominator), but we can also skip
+ * checking anything < 1Hz which implicitly can't be
+ * limited via an integer oa_max_sample_rate.
+ */
+ if (oa_period <= NSEC_PER_SEC) {
+ u64 tmp = NSEC_PER_SEC;
+ do_div(tmp, oa_period);
+ oa_freq_hz = tmp;
+ } else
+ oa_freq_hz = 0;
+
+ if (oa_freq_hz > i915_oa_max_sample_rate &&
+ !capable(CAP_SYS_ADMIN)) {
+ DRM_DEBUG("OA exponent would exceed the max sampling frequency (sysctl dev.i915.oa_max_sample_rate) %uHz without root privileges\n",
+ i915_oa_max_sample_rate);
+ return -EACCES;
+ }
+
+ props->oa_periodic = true;
+ props->oa_period_exponent = value;
+ break;
+ default:
+ MISSING_CASE(id);
+ DRM_DEBUG("Unknown i915 perf property ID\n");
+ return -EINVAL;
+ }
+
+ uprop += 2;
+ }
+
+ return 0;
+}
+
+/**
+ * i915_perf_open_ioctl - DRM ioctl() for userspace to open a stream FD
+ * @dev: drm device
+ * @data: ioctl data copied from userspace (unvalidated)
+ * @file: drm file
+ *
+ * Validates the stream open parameters given by userspace including flags
+ * and an array of u64 key, value pair properties.
+ *
+ * Very little is assumed up front about the nature of the stream being
+ * opened (for instance we don't assume it's for periodic OA unit metrics). An
+ * i915-perf stream is expected to be a suitable interface for other forms of
+ * buffered data written by the GPU besides periodic OA metrics.
+ *
+ * Note we copy the properties from userspace outside of the i915 perf
+ * mutex to avoid an awkward lockdep with mmap_sem.
+ *
+ * Most of the implementation details are handled by
+ * i915_perf_open_ioctl_locked() after taking the &drm_i915_private->perf.lock
+ * mutex for serializing with any non-file-operation driver hooks.
+ *
+ * Return: A newly opened i915 Perf stream file descriptor or negative
+ * error code on failure.
+ */
+int i915_perf_open_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_perf_open_param *param = data;
+ struct perf_open_properties props;
+ u32 known_open_flags;
+ int ret;
+
+ if (!dev_priv->perf.initialized) {
+ DRM_DEBUG("i915 perf interface not available for this system\n");
+ return -ENOTSUPP;
+ }
+
+ known_open_flags = I915_PERF_FLAG_FD_CLOEXEC |
+ I915_PERF_FLAG_FD_NONBLOCK |
+ I915_PERF_FLAG_DISABLED;
+ if (param->flags & ~known_open_flags) {
+ DRM_DEBUG("Unknown drm_i915_perf_open_param flag\n");
+ return -EINVAL;
+ }
+
+ ret = read_properties_unlocked(dev_priv,
+ u64_to_user_ptr(param->properties_ptr),
+ param->num_properties,
+ &props);
+ if (ret)
+ return ret;
+
+ mutex_lock(&dev_priv->perf.lock);
+ ret = i915_perf_open_ioctl_locked(dev_priv, param, &props, file);
+ mutex_unlock(&dev_priv->perf.lock);
+
+ return ret;
+}
+
+/**
+ * i915_perf_register - exposes i915-perf to userspace
+ * @dev_priv: i915 device instance
+ *
+ * In particular OA metric sets are advertised under a sysfs metrics/
+ * directory allowing userspace to enumerate valid IDs that can be
+ * used to open an i915-perf stream.
+ */
+void i915_perf_register(struct drm_i915_private *dev_priv)
+{
+ if (!IS_HASWELL(dev_priv))
+ return;
+
+ if (!dev_priv->perf.initialized)
+ return;
+
+ /* To be sure we're synchronized with an attempted
+ * i915_perf_open_ioctl(); considering that we register after
+ * being exposed to userspace.
+ */
+ mutex_lock(&dev_priv->perf.lock);
+
+ dev_priv->perf.metrics_kobj =
+ kobject_create_and_add("metrics",
+ &dev_priv->drm.primary->kdev->kobj);
+ if (!dev_priv->perf.metrics_kobj)
+ goto exit;
+
+ if (i915_perf_register_sysfs_hsw(dev_priv)) {
+ kobject_put(dev_priv->perf.metrics_kobj);
+ dev_priv->perf.metrics_kobj = NULL;
+ }
+
+exit:
+ mutex_unlock(&dev_priv->perf.lock);
+}
+
+/**
+ * i915_perf_unregister - hide i915-perf from userspace
+ * @dev_priv: i915 device instance
+ *
+ * i915-perf state cleanup is split up into an 'unregister' and
+ * 'deinit' phase where the interface is first hidden from
+ * userspace by i915_perf_unregister() before cleaning up
+ * remaining state in i915_perf_fini().
+ */
+void i915_perf_unregister(struct drm_i915_private *dev_priv)
+{
+ if (!IS_HASWELL(dev_priv))
+ return;
+
+ if (!dev_priv->perf.metrics_kobj)
+ return;
+
+ i915_perf_unregister_sysfs_hsw(dev_priv);
+
+ kobject_put(dev_priv->perf.metrics_kobj);
+ dev_priv->perf.metrics_kobj = NULL;
+}
+
+static struct ctl_table oa_table[] = {
+ {
+ .procname = "perf_stream_paranoid",
+ .data = &i915_perf_stream_paranoid,
+ .maxlen = sizeof(i915_perf_stream_paranoid),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_minmax,
+ .extra1 = &zero,
+ .extra2 = &one,
+ },
+ {
+ .procname = "oa_max_sample_rate",
+ .data = &i915_oa_max_sample_rate,
+ .maxlen = sizeof(i915_oa_max_sample_rate),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_minmax,
+ .extra1 = &zero,
+ .extra2 = &oa_sample_rate_hard_limit,
+ },
+ {}
+};
+
+static struct ctl_table i915_root[] = {
+ {
+ .procname = "i915",
+ .maxlen = 0,
+ .mode = 0555,
+ .child = oa_table,
+ },
+ {}
+};
+
+static struct ctl_table dev_root[] = {
+ {
+ .procname = "dev",
+ .maxlen = 0,
+ .mode = 0555,
+ .child = i915_root,
+ },
+ {}
+};
+
+/**
+ * i915_perf_init - initialize i915-perf state on module load
+ * @dev_priv: i915 device instance
+ *
+ * Initializes i915-perf state without exposing anything to userspace.
+ *
+ * Note: i915-perf initialization is split into an 'init' and 'register'
+ * phase with the i915_perf_register() exposing state to userspace.
+ */
+void i915_perf_init(struct drm_i915_private *dev_priv)
+{
+ if (!IS_HASWELL(dev_priv))
+ return;
+
+ hrtimer_init(&dev_priv->perf.oa.poll_check_timer,
+ CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ dev_priv->perf.oa.poll_check_timer.function = oa_poll_check_timer_cb;
+ init_waitqueue_head(&dev_priv->perf.oa.poll_wq);
+
+ INIT_LIST_HEAD(&dev_priv->perf.streams);
+ mutex_init(&dev_priv->perf.lock);
+ spin_lock_init(&dev_priv->perf.hook_lock);
+
+ dev_priv->perf.oa.ops.init_oa_buffer = gen7_init_oa_buffer;
+ dev_priv->perf.oa.ops.enable_metric_set = hsw_enable_metric_set;
+ dev_priv->perf.oa.ops.disable_metric_set = hsw_disable_metric_set;
+ dev_priv->perf.oa.ops.oa_enable = gen7_oa_enable;
+ dev_priv->perf.oa.ops.oa_disable = gen7_oa_disable;
+ dev_priv->perf.oa.ops.read = gen7_oa_read;
+ dev_priv->perf.oa.ops.oa_buffer_is_empty =
+ gen7_oa_buffer_is_empty_fop_unlocked;
+
+ dev_priv->perf.oa.timestamp_frequency = 12500000;
+
+ dev_priv->perf.oa.oa_formats = hsw_oa_formats;
+
+ dev_priv->perf.oa.n_builtin_sets =
+ i915_oa_n_builtin_metric_sets_hsw;
+
+ dev_priv->perf.sysctl_header = register_sysctl_table(dev_root);
+
+ dev_priv->perf.initialized = true;
+}
+
+/**
+ * i915_perf_fini - Counter part to i915_perf_init()
+ * @dev_priv: i915 device instance
+ */
+void i915_perf_fini(struct drm_i915_private *dev_priv)
+{
+ if (!dev_priv->perf.initialized)
+ return;
+
+ unregister_sysctl_table(dev_priv->perf.sysctl_header);
+
+ memset(&dev_priv->perf.oa.ops, 0, sizeof(dev_priv->perf.oa.ops));
+ dev_priv->perf.initialized = false;
+}
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 4f15a3dc6d98..1c8f5b9a7fcd 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -62,6 +62,9 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define _PORT3(port, a, b, c) ((port) == PORT_A ? (a) : \
(port) == PORT_B ? (b) : (c))
#define _MMIO_PORT3(pipe, a, b, c) _MMIO(_PORT3(pipe, a, b, c))
+#define _PHY3(phy, a, b, c) ((phy) == DPIO_PHY0 ? (a) : \
+ (phy) == DPIO_PHY1 ? (b) : (c))
+#define _MMIO_PHY3(phy, a, b, c) _MMIO(_PHY3(phy, a, b, c))
#define _MASKED_FIELD(mask, value) ({ \
if (__builtin_constant_p(mask)) \
@@ -107,6 +110,10 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define GRDOM_RESET_STATUS (1 << 1)
#define GRDOM_RESET_ENABLE (1 << 0)
+/* BSpec only has register offset, PCI device and bit found empirically */
+#define I830_CLOCK_GATE 0xc8 /* device 0 */
+#define I830_L2_CACHE_CLOCK_GATE_DISABLE (1 << 2)
+
#define GCDGMBUS 0xcc
#define GCFGC2 0xda
@@ -294,7 +301,6 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
* Instruction field definitions used by the command parser
*/
#define INSTR_CLIENT_SHIFT 29
-#define INSTR_CLIENT_MASK 0xE0000000
#define INSTR_MI_CLIENT 0x0
#define INSTR_BC_CLIENT 0x2
#define INSTR_RC_CLIENT 0x3
@@ -615,7 +621,344 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define HSW_CS_GPR(n) _MMIO(0x2600 + (n) * 8)
#define HSW_CS_GPR_UDW(n) _MMIO(0x2600 + (n) * 8 + 4)
-#define OACONTROL _MMIO(0x2360)
+#define GEN7_OACONTROL _MMIO(0x2360)
+#define GEN7_OACONTROL_CTX_MASK 0xFFFFF000
+#define GEN7_OACONTROL_TIMER_PERIOD_MASK 0x3F
+#define GEN7_OACONTROL_TIMER_PERIOD_SHIFT 6
+#define GEN7_OACONTROL_TIMER_ENABLE (1<<5)
+#define GEN7_OACONTROL_FORMAT_A13 (0<<2)
+#define GEN7_OACONTROL_FORMAT_A29 (1<<2)
+#define GEN7_OACONTROL_FORMAT_A13_B8_C8 (2<<2)
+#define GEN7_OACONTROL_FORMAT_A29_B8_C8 (3<<2)
+#define GEN7_OACONTROL_FORMAT_B4_C8 (4<<2)
+#define GEN7_OACONTROL_FORMAT_A45_B8_C8 (5<<2)
+#define GEN7_OACONTROL_FORMAT_B4_C8_A16 (6<<2)
+#define GEN7_OACONTROL_FORMAT_C4_B8 (7<<2)
+#define GEN7_OACONTROL_FORMAT_SHIFT 2
+#define GEN7_OACONTROL_PER_CTX_ENABLE (1<<1)
+#define GEN7_OACONTROL_ENABLE (1<<0)
+
+#define GEN8_OACTXID _MMIO(0x2364)
+
+#define GEN8_OACONTROL _MMIO(0x2B00)
+#define GEN8_OA_REPORT_FORMAT_A12 (0<<2)
+#define GEN8_OA_REPORT_FORMAT_A12_B8_C8 (2<<2)
+#define GEN8_OA_REPORT_FORMAT_A36_B8_C8 (5<<2)
+#define GEN8_OA_REPORT_FORMAT_C4_B8 (7<<2)
+#define GEN8_OA_REPORT_FORMAT_SHIFT 2
+#define GEN8_OA_SPECIFIC_CONTEXT_ENABLE (1<<1)
+#define GEN8_OA_COUNTER_ENABLE (1<<0)
+
+#define GEN8_OACTXCONTROL _MMIO(0x2360)
+#define GEN8_OA_TIMER_PERIOD_MASK 0x3F
+#define GEN8_OA_TIMER_PERIOD_SHIFT 2
+#define GEN8_OA_TIMER_ENABLE (1<<1)
+#define GEN8_OA_COUNTER_RESUME (1<<0)
+
+#define GEN7_OABUFFER _MMIO(0x23B0) /* R/W */
+#define GEN7_OABUFFER_OVERRUN_DISABLE (1<<3)
+#define GEN7_OABUFFER_EDGE_TRIGGER (1<<2)
+#define GEN7_OABUFFER_STOP_RESUME_ENABLE (1<<1)
+#define GEN7_OABUFFER_RESUME (1<<0)
+
+#define GEN8_OABUFFER _MMIO(0x2b14)
+
+#define GEN7_OASTATUS1 _MMIO(0x2364)
+#define GEN7_OASTATUS1_TAIL_MASK 0xffffffc0
+#define GEN7_OASTATUS1_COUNTER_OVERFLOW (1<<2)
+#define GEN7_OASTATUS1_OABUFFER_OVERFLOW (1<<1)
+#define GEN7_OASTATUS1_REPORT_LOST (1<<0)
+
+#define GEN7_OASTATUS2 _MMIO(0x2368)
+#define GEN7_OASTATUS2_HEAD_MASK 0xffffffc0
+
+#define GEN8_OASTATUS _MMIO(0x2b08)
+#define GEN8_OASTATUS_OVERRUN_STATUS (1<<3)
+#define GEN8_OASTATUS_COUNTER_OVERFLOW (1<<2)
+#define GEN8_OASTATUS_OABUFFER_OVERFLOW (1<<1)
+#define GEN8_OASTATUS_REPORT_LOST (1<<0)
+
+#define GEN8_OAHEADPTR _MMIO(0x2B0C)
+#define GEN8_OATAILPTR _MMIO(0x2B10)
+
+#define OABUFFER_SIZE_128K (0<<3)
+#define OABUFFER_SIZE_256K (1<<3)
+#define OABUFFER_SIZE_512K (2<<3)
+#define OABUFFER_SIZE_1M (3<<3)
+#define OABUFFER_SIZE_2M (4<<3)
+#define OABUFFER_SIZE_4M (5<<3)
+#define OABUFFER_SIZE_8M (6<<3)
+#define OABUFFER_SIZE_16M (7<<3)
+
+#define OA_MEM_SELECT_GGTT (1<<0)
+
+#define EU_PERF_CNTL0 _MMIO(0xe458)
+
+#define GDT_CHICKEN_BITS _MMIO(0x9840)
+#define GT_NOA_ENABLE 0x00000080
+
+/*
+ * OA Boolean state
+ */
+
+#define OAREPORTTRIG1 _MMIO(0x2740)
+#define OAREPORTTRIG1_THRESHOLD_MASK 0xffff
+#define OAREPORTTRIG1_EDGE_LEVEL_TRIGER_SELECT_MASK 0xffff0000 /* 0=level */
+
+#define OAREPORTTRIG2 _MMIO(0x2744)
+#define OAREPORTTRIG2_INVERT_A_0 (1<<0)
+#define OAREPORTTRIG2_INVERT_A_1 (1<<1)
+#define OAREPORTTRIG2_INVERT_A_2 (1<<2)
+#define OAREPORTTRIG2_INVERT_A_3 (1<<3)
+#define OAREPORTTRIG2_INVERT_A_4 (1<<4)
+#define OAREPORTTRIG2_INVERT_A_5 (1<<5)
+#define OAREPORTTRIG2_INVERT_A_6 (1<<6)
+#define OAREPORTTRIG2_INVERT_A_7 (1<<7)
+#define OAREPORTTRIG2_INVERT_A_8 (1<<8)
+#define OAREPORTTRIG2_INVERT_A_9 (1<<9)
+#define OAREPORTTRIG2_INVERT_A_10 (1<<10)
+#define OAREPORTTRIG2_INVERT_A_11 (1<<11)
+#define OAREPORTTRIG2_INVERT_A_12 (1<<12)
+#define OAREPORTTRIG2_INVERT_A_13 (1<<13)
+#define OAREPORTTRIG2_INVERT_A_14 (1<<14)
+#define OAREPORTTRIG2_INVERT_A_15 (1<<15)
+#define OAREPORTTRIG2_INVERT_B_0 (1<<16)
+#define OAREPORTTRIG2_INVERT_B_1 (1<<17)
+#define OAREPORTTRIG2_INVERT_B_2 (1<<18)
+#define OAREPORTTRIG2_INVERT_B_3 (1<<19)
+#define OAREPORTTRIG2_INVERT_C_0 (1<<20)
+#define OAREPORTTRIG2_INVERT_C_1 (1<<21)
+#define OAREPORTTRIG2_INVERT_D_0 (1<<22)
+#define OAREPORTTRIG2_THRESHOLD_ENABLE (1<<23)
+#define OAREPORTTRIG2_REPORT_TRIGGER_ENABLE (1<<31)
+
+#define OAREPORTTRIG3 _MMIO(0x2748)
+#define OAREPORTTRIG3_NOA_SELECT_MASK 0xf
+#define OAREPORTTRIG3_NOA_SELECT_8_SHIFT 0
+#define OAREPORTTRIG3_NOA_SELECT_9_SHIFT 4
+#define OAREPORTTRIG3_NOA_SELECT_10_SHIFT 8
+#define OAREPORTTRIG3_NOA_SELECT_11_SHIFT 12
+#define OAREPORTTRIG3_NOA_SELECT_12_SHIFT 16
+#define OAREPORTTRIG3_NOA_SELECT_13_SHIFT 20
+#define OAREPORTTRIG3_NOA_SELECT_14_SHIFT 24
+#define OAREPORTTRIG3_NOA_SELECT_15_SHIFT 28
+
+#define OAREPORTTRIG4 _MMIO(0x274c)
+#define OAREPORTTRIG4_NOA_SELECT_MASK 0xf
+#define OAREPORTTRIG4_NOA_SELECT_0_SHIFT 0
+#define OAREPORTTRIG4_NOA_SELECT_1_SHIFT 4
+#define OAREPORTTRIG4_NOA_SELECT_2_SHIFT 8
+#define OAREPORTTRIG4_NOA_SELECT_3_SHIFT 12
+#define OAREPORTTRIG4_NOA_SELECT_4_SHIFT 16
+#define OAREPORTTRIG4_NOA_SELECT_5_SHIFT 20
+#define OAREPORTTRIG4_NOA_SELECT_6_SHIFT 24
+#define OAREPORTTRIG4_NOA_SELECT_7_SHIFT 28
+
+#define OAREPORTTRIG5 _MMIO(0x2750)
+#define OAREPORTTRIG5_THRESHOLD_MASK 0xffff
+#define OAREPORTTRIG5_EDGE_LEVEL_TRIGER_SELECT_MASK 0xffff0000 /* 0=level */
+
+#define OAREPORTTRIG6 _MMIO(0x2754)
+#define OAREPORTTRIG6_INVERT_A_0 (1<<0)
+#define OAREPORTTRIG6_INVERT_A_1 (1<<1)
+#define OAREPORTTRIG6_INVERT_A_2 (1<<2)
+#define OAREPORTTRIG6_INVERT_A_3 (1<<3)
+#define OAREPORTTRIG6_INVERT_A_4 (1<<4)
+#define OAREPORTTRIG6_INVERT_A_5 (1<<5)
+#define OAREPORTTRIG6_INVERT_A_6 (1<<6)
+#define OAREPORTTRIG6_INVERT_A_7 (1<<7)
+#define OAREPORTTRIG6_INVERT_A_8 (1<<8)
+#define OAREPORTTRIG6_INVERT_A_9 (1<<9)
+#define OAREPORTTRIG6_INVERT_A_10 (1<<10)
+#define OAREPORTTRIG6_INVERT_A_11 (1<<11)
+#define OAREPORTTRIG6_INVERT_A_12 (1<<12)
+#define OAREPORTTRIG6_INVERT_A_13 (1<<13)
+#define OAREPORTTRIG6_INVERT_A_14 (1<<14)
+#define OAREPORTTRIG6_INVERT_A_15 (1<<15)
+#define OAREPORTTRIG6_INVERT_B_0 (1<<16)
+#define OAREPORTTRIG6_INVERT_B_1 (1<<17)
+#define OAREPORTTRIG6_INVERT_B_2 (1<<18)
+#define OAREPORTTRIG6_INVERT_B_3 (1<<19)
+#define OAREPORTTRIG6_INVERT_C_0 (1<<20)
+#define OAREPORTTRIG6_INVERT_C_1 (1<<21)
+#define OAREPORTTRIG6_INVERT_D_0 (1<<22)
+#define OAREPORTTRIG6_THRESHOLD_ENABLE (1<<23)
+#define OAREPORTTRIG6_REPORT_TRIGGER_ENABLE (1<<31)
+
+#define OAREPORTTRIG7 _MMIO(0x2758)
+#define OAREPORTTRIG7_NOA_SELECT_MASK 0xf
+#define OAREPORTTRIG7_NOA_SELECT_8_SHIFT 0
+#define OAREPORTTRIG7_NOA_SELECT_9_SHIFT 4
+#define OAREPORTTRIG7_NOA_SELECT_10_SHIFT 8
+#define OAREPORTTRIG7_NOA_SELECT_11_SHIFT 12
+#define OAREPORTTRIG7_NOA_SELECT_12_SHIFT 16
+#define OAREPORTTRIG7_NOA_SELECT_13_SHIFT 20
+#define OAREPORTTRIG7_NOA_SELECT_14_SHIFT 24
+#define OAREPORTTRIG7_NOA_SELECT_15_SHIFT 28
+
+#define OAREPORTTRIG8 _MMIO(0x275c)
+#define OAREPORTTRIG8_NOA_SELECT_MASK 0xf
+#define OAREPORTTRIG8_NOA_SELECT_0_SHIFT 0
+#define OAREPORTTRIG8_NOA_SELECT_1_SHIFT 4
+#define OAREPORTTRIG8_NOA_SELECT_2_SHIFT 8
+#define OAREPORTTRIG8_NOA_SELECT_3_SHIFT 12
+#define OAREPORTTRIG8_NOA_SELECT_4_SHIFT 16
+#define OAREPORTTRIG8_NOA_SELECT_5_SHIFT 20
+#define OAREPORTTRIG8_NOA_SELECT_6_SHIFT 24
+#define OAREPORTTRIG8_NOA_SELECT_7_SHIFT 28
+
+#define OASTARTTRIG1 _MMIO(0x2710)
+#define OASTARTTRIG1_THRESHOLD_COUNT_MASK_MBZ 0xffff0000
+#define OASTARTTRIG1_THRESHOLD_MASK 0xffff
+
+#define OASTARTTRIG2 _MMIO(0x2714)
+#define OASTARTTRIG2_INVERT_A_0 (1<<0)
+#define OASTARTTRIG2_INVERT_A_1 (1<<1)
+#define OASTARTTRIG2_INVERT_A_2 (1<<2)
+#define OASTARTTRIG2_INVERT_A_3 (1<<3)
+#define OASTARTTRIG2_INVERT_A_4 (1<<4)
+#define OASTARTTRIG2_INVERT_A_5 (1<<5)
+#define OASTARTTRIG2_INVERT_A_6 (1<<6)
+#define OASTARTTRIG2_INVERT_A_7 (1<<7)
+#define OASTARTTRIG2_INVERT_A_8 (1<<8)
+#define OASTARTTRIG2_INVERT_A_9 (1<<9)
+#define OASTARTTRIG2_INVERT_A_10 (1<<10)
+#define OASTARTTRIG2_INVERT_A_11 (1<<11)
+#define OASTARTTRIG2_INVERT_A_12 (1<<12)
+#define OASTARTTRIG2_INVERT_A_13 (1<<13)
+#define OASTARTTRIG2_INVERT_A_14 (1<<14)
+#define OASTARTTRIG2_INVERT_A_15 (1<<15)
+#define OASTARTTRIG2_INVERT_B_0 (1<<16)
+#define OASTARTTRIG2_INVERT_B_1 (1<<17)
+#define OASTARTTRIG2_INVERT_B_2 (1<<18)
+#define OASTARTTRIG2_INVERT_B_3 (1<<19)
+#define OASTARTTRIG2_INVERT_C_0 (1<<20)
+#define OASTARTTRIG2_INVERT_C_1 (1<<21)
+#define OASTARTTRIG2_INVERT_D_0 (1<<22)
+#define OASTARTTRIG2_THRESHOLD_ENABLE (1<<23)
+#define OASTARTTRIG2_START_TRIG_FLAG_MBZ (1<<24)
+#define OASTARTTRIG2_EVENT_SELECT_0 (1<<28)
+#define OASTARTTRIG2_EVENT_SELECT_1 (1<<29)
+#define OASTARTTRIG2_EVENT_SELECT_2 (1<<30)
+#define OASTARTTRIG2_EVENT_SELECT_3 (1<<31)
+
+#define OASTARTTRIG3 _MMIO(0x2718)
+#define OASTARTTRIG3_NOA_SELECT_MASK 0xf
+#define OASTARTTRIG3_NOA_SELECT_8_SHIFT 0
+#define OASTARTTRIG3_NOA_SELECT_9_SHIFT 4
+#define OASTARTTRIG3_NOA_SELECT_10_SHIFT 8
+#define OASTARTTRIG3_NOA_SELECT_11_SHIFT 12
+#define OASTARTTRIG3_NOA_SELECT_12_SHIFT 16
+#define OASTARTTRIG3_NOA_SELECT_13_SHIFT 20
+#define OASTARTTRIG3_NOA_SELECT_14_SHIFT 24
+#define OASTARTTRIG3_NOA_SELECT_15_SHIFT 28
+
+#define OASTARTTRIG4 _MMIO(0x271c)
+#define OASTARTTRIG4_NOA_SELECT_MASK 0xf
+#define OASTARTTRIG4_NOA_SELECT_0_SHIFT 0
+#define OASTARTTRIG4_NOA_SELECT_1_SHIFT 4
+#define OASTARTTRIG4_NOA_SELECT_2_SHIFT 8
+#define OASTARTTRIG4_NOA_SELECT_3_SHIFT 12
+#define OASTARTTRIG4_NOA_SELECT_4_SHIFT 16
+#define OASTARTTRIG4_NOA_SELECT_5_SHIFT 20
+#define OASTARTTRIG4_NOA_SELECT_6_SHIFT 24
+#define OASTARTTRIG4_NOA_SELECT_7_SHIFT 28
+
+#define OASTARTTRIG5 _MMIO(0x2720)
+#define OASTARTTRIG5_THRESHOLD_COUNT_MASK_MBZ 0xffff0000
+#define OASTARTTRIG5_THRESHOLD_MASK 0xffff
+
+#define OASTARTTRIG6 _MMIO(0x2724)
+#define OASTARTTRIG6_INVERT_A_0 (1<<0)
+#define OASTARTTRIG6_INVERT_A_1 (1<<1)
+#define OASTARTTRIG6_INVERT_A_2 (1<<2)
+#define OASTARTTRIG6_INVERT_A_3 (1<<3)
+#define OASTARTTRIG6_INVERT_A_4 (1<<4)
+#define OASTARTTRIG6_INVERT_A_5 (1<<5)
+#define OASTARTTRIG6_INVERT_A_6 (1<<6)
+#define OASTARTTRIG6_INVERT_A_7 (1<<7)
+#define OASTARTTRIG6_INVERT_A_8 (1<<8)
+#define OASTARTTRIG6_INVERT_A_9 (1<<9)
+#define OASTARTTRIG6_INVERT_A_10 (1<<10)
+#define OASTARTTRIG6_INVERT_A_11 (1<<11)
+#define OASTARTTRIG6_INVERT_A_12 (1<<12)
+#define OASTARTTRIG6_INVERT_A_13 (1<<13)
+#define OASTARTTRIG6_INVERT_A_14 (1<<14)
+#define OASTARTTRIG6_INVERT_A_15 (1<<15)
+#define OASTARTTRIG6_INVERT_B_0 (1<<16)
+#define OASTARTTRIG6_INVERT_B_1 (1<<17)
+#define OASTARTTRIG6_INVERT_B_2 (1<<18)
+#define OASTARTTRIG6_INVERT_B_3 (1<<19)
+#define OASTARTTRIG6_INVERT_C_0 (1<<20)
+#define OASTARTTRIG6_INVERT_C_1 (1<<21)
+#define OASTARTTRIG6_INVERT_D_0 (1<<22)
+#define OASTARTTRIG6_THRESHOLD_ENABLE (1<<23)
+#define OASTARTTRIG6_START_TRIG_FLAG_MBZ (1<<24)
+#define OASTARTTRIG6_EVENT_SELECT_4 (1<<28)
+#define OASTARTTRIG6_EVENT_SELECT_5 (1<<29)
+#define OASTARTTRIG6_EVENT_SELECT_6 (1<<30)
+#define OASTARTTRIG6_EVENT_SELECT_7 (1<<31)
+
+#define OASTARTTRIG7 _MMIO(0x2728)
+#define OASTARTTRIG7_NOA_SELECT_MASK 0xf
+#define OASTARTTRIG7_NOA_SELECT_8_SHIFT 0
+#define OASTARTTRIG7_NOA_SELECT_9_SHIFT 4
+#define OASTARTTRIG7_NOA_SELECT_10_SHIFT 8
+#define OASTARTTRIG7_NOA_SELECT_11_SHIFT 12
+#define OASTARTTRIG7_NOA_SELECT_12_SHIFT 16
+#define OASTARTTRIG7_NOA_SELECT_13_SHIFT 20
+#define OASTARTTRIG7_NOA_SELECT_14_SHIFT 24
+#define OASTARTTRIG7_NOA_SELECT_15_SHIFT 28
+
+#define OASTARTTRIG8 _MMIO(0x272c)
+#define OASTARTTRIG8_NOA_SELECT_MASK 0xf
+#define OASTARTTRIG8_NOA_SELECT_0_SHIFT 0
+#define OASTARTTRIG8_NOA_SELECT_1_SHIFT 4
+#define OASTARTTRIG8_NOA_SELECT_2_SHIFT 8
+#define OASTARTTRIG8_NOA_SELECT_3_SHIFT 12
+#define OASTARTTRIG8_NOA_SELECT_4_SHIFT 16
+#define OASTARTTRIG8_NOA_SELECT_5_SHIFT 20
+#define OASTARTTRIG8_NOA_SELECT_6_SHIFT 24
+#define OASTARTTRIG8_NOA_SELECT_7_SHIFT 28
+
+/* CECX_0 */
+#define OACEC_COMPARE_LESS_OR_EQUAL 6
+#define OACEC_COMPARE_NOT_EQUAL 5
+#define OACEC_COMPARE_LESS_THAN 4
+#define OACEC_COMPARE_GREATER_OR_EQUAL 3
+#define OACEC_COMPARE_EQUAL 2
+#define OACEC_COMPARE_GREATER_THAN 1
+#define OACEC_COMPARE_ANY_EQUAL 0
+
+#define OACEC_COMPARE_VALUE_MASK 0xffff
+#define OACEC_COMPARE_VALUE_SHIFT 3
+
+#define OACEC_SELECT_NOA (0<<19)
+#define OACEC_SELECT_PREV (1<<19)
+#define OACEC_SELECT_BOOLEAN (2<<19)
+
+/* CECX_1 */
+#define OACEC_MASK_MASK 0xffff
+#define OACEC_CONSIDERATIONS_MASK 0xffff
+#define OACEC_CONSIDERATIONS_SHIFT 16
+
+#define OACEC0_0 _MMIO(0x2770)
+#define OACEC0_1 _MMIO(0x2774)
+#define OACEC1_0 _MMIO(0x2778)
+#define OACEC1_1 _MMIO(0x277c)
+#define OACEC2_0 _MMIO(0x2780)
+#define OACEC2_1 _MMIO(0x2784)
+#define OACEC3_0 _MMIO(0x2788)
+#define OACEC3_1 _MMIO(0x278c)
+#define OACEC4_0 _MMIO(0x2790)
+#define OACEC4_1 _MMIO(0x2794)
+#define OACEC5_0 _MMIO(0x2798)
+#define OACEC5_1 _MMIO(0x279c)
+#define OACEC6_0 _MMIO(0x27a0)
+#define OACEC6_1 _MMIO(0x27a4)
+#define OACEC7_0 _MMIO(0x27a8)
+#define OACEC7_1 _MMIO(0x27ac)
+
#define _GEN7_PIPEA_DE_LOAD_SL 0x70068
#define _GEN7_PIPEB_DE_LOAD_SL 0x71068
@@ -708,9 +1051,15 @@ enum skl_disp_power_wells {
/* These numbers are fixed and must match the position of the pw bits */
SKL_DISP_PW_MISC_IO,
SKL_DISP_PW_DDI_A_E,
+ GLK_DISP_PW_DDI_A = SKL_DISP_PW_DDI_A_E,
SKL_DISP_PW_DDI_B,
SKL_DISP_PW_DDI_C,
SKL_DISP_PW_DDI_D,
+
+ GLK_DISP_PW_AUX_A = 8,
+ GLK_DISP_PW_AUX_B,
+ GLK_DISP_PW_AUX_C,
+
SKL_DISP_PW_1 = 14,
SKL_DISP_PW_2,
@@ -720,6 +1069,7 @@ enum skl_disp_power_wells {
BXT_DPIO_CMN_A,
BXT_DPIO_CMN_BC,
+ GLK_DPIO_CMN_C,
};
#define SKL_POWER_WELL_STATE(pw) (1 << ((pw) * 2))
@@ -1188,8 +1538,10 @@ enum skl_disp_power_wells {
/* BXT PHY registers */
#define _BXT_PHY0_BASE 0x6C000
#define _BXT_PHY1_BASE 0x162000
-#define BXT_PHY_BASE(phy) _PIPE((phy), _BXT_PHY0_BASE, \
- _BXT_PHY1_BASE)
+#define _BXT_PHY2_BASE 0x163000
+#define BXT_PHY_BASE(phy) _PHY3((phy), _BXT_PHY0_BASE, \
+ _BXT_PHY1_BASE, \
+ _BXT_PHY2_BASE)
#define _BXT_PHY(phy, reg) \
_MMIO(BXT_PHY_BASE(phy) - _BXT_PHY0_BASE + (reg))
@@ -1201,7 +1553,6 @@ enum skl_disp_power_wells {
_MMIO(_BXT_PHY_CH(phy, ch, reg_ch0, reg_ch1))
#define BXT_P_CR_GT_DISP_PWRON _MMIO(0x138090)
-#define GT_DISPLAY_POWER_ON(phy) (1 << (phy))
#define _BXT_PHY_CTL_DDI_A 0x64C00
#define _BXT_PHY_CTL_DDI_B 0x64C10
@@ -1214,9 +1565,11 @@ enum skl_disp_power_wells {
#define _PHY_CTL_FAMILY_EDP 0x64C80
#define _PHY_CTL_FAMILY_DDI 0x64C90
+#define _PHY_CTL_FAMILY_DDI_C 0x64CA0
#define COMMON_RESET_DIS (1 << 31)
-#define BXT_PHY_CTL_FAMILY(phy) _MMIO_PIPE((phy), _PHY_CTL_FAMILY_DDI, \
- _PHY_CTL_FAMILY_EDP)
+#define BXT_PHY_CTL_FAMILY(phy) _MMIO_PHY3((phy), _PHY_CTL_FAMILY_DDI, \
+ _PHY_CTL_FAMILY_EDP, \
+ _PHY_CTL_FAMILY_DDI_C)
/* BXT PHY PLL registers */
#define _PORT_PLL_A 0x46074
@@ -1225,6 +1578,8 @@ enum skl_disp_power_wells {
#define PORT_PLL_ENABLE (1 << 31)
#define PORT_PLL_LOCK (1 << 30)
#define PORT_PLL_REF_SEL (1 << 27)
+#define PORT_PLL_POWER_ENABLE (1 << 26)
+#define PORT_PLL_POWER_STATE (1 << 25)
#define BXT_PORT_PLL_ENABLE(port) _MMIO_PORT(port, _PORT_PLL_A, _PORT_PLL_B)
#define _PORT_PLL_EBB_0_A 0x162034
@@ -1435,6 +1790,21 @@ enum skl_disp_power_wells {
#define DEEMPH_SHIFT 24
#define DE_EMPHASIS (0xFF << DEEMPH_SHIFT)
+#define _PORT_TX_DW5_LN0_A 0x162514
+#define _PORT_TX_DW5_LN0_B 0x6C514
+#define _PORT_TX_DW5_LN0_C 0x6C914
+#define _PORT_TX_DW5_GRP_A 0x162D14
+#define _PORT_TX_DW5_GRP_B 0x6CD14
+#define _PORT_TX_DW5_GRP_C 0x6CF14
+#define BXT_PORT_TX_DW5_LN0(phy, ch) _MMIO_BXT_PHY_CH(phy, ch, \
+ _PORT_TX_DW5_LN0_B, \
+ _PORT_TX_DW5_LN0_C)
+#define BXT_PORT_TX_DW5_GRP(phy, ch) _MMIO_BXT_PHY_CH(phy, ch, \
+ _PORT_TX_DW5_GRP_B, \
+ _PORT_TX_DW5_GRP_C)
+#define DCC_DELAY_RANGE_1 (1 << 9)
+#define DCC_DELAY_RANGE_2 (1 << 8)
+
#define _PORT_TX_DW14_LN0_A 0x162538
#define _PORT_TX_DW14_LN0_B 0x6C538
#define _PORT_TX_DW14_LN0_C 0x6C938
@@ -2936,7 +3306,7 @@ enum skl_disp_power_wells {
#define INTERVAL_1_33_US(us) (((us) * 3) >> 2)
#define INTERVAL_0_833_US(us) (((us) * 6) / 5)
#define GT_INTERVAL_FROM_US(dev_priv, us) (IS_GEN9(dev_priv) ? \
- (IS_BROXTON(dev_priv) ? \
+ (IS_GEN9_LP(dev_priv) ? \
INTERVAL_0_833_US(us) : \
INTERVAL_1_33_US(us)) : \
INTERVAL_1_28_US(us))
@@ -2945,7 +3315,7 @@ enum skl_disp_power_wells {
#define INTERVAL_1_33_TO_US(interval) (((interval) << 2) / 3)
#define INTERVAL_0_833_TO_US(interval) (((interval) * 5) / 6)
#define GT_PM_INTERVAL_TO_US(dev_priv, interval) (IS_GEN9(dev_priv) ? \
- (IS_BROXTON(dev_priv) ? \
+ (IS_GEN9_LP(dev_priv) ? \
INTERVAL_0_833_TO_US(interval) : \
INTERVAL_1_33_TO_US(interval)) : \
INTERVAL_1_28_TO_US(interval))
@@ -2953,8 +3323,10 @@ enum skl_disp_power_wells {
/*
* Logical Context regs
*/
-#define CCID _MMIO(0x2180)
-#define CCID_EN (1<<0)
+#define CCID _MMIO(0x2180)
+#define CCID_EN BIT(0)
+#define CCID_EXTENDED_STATE_RESTORE BIT(2)
+#define CCID_EXTENDED_STATE_SAVE BIT(3)
/*
* Notes on SNB/IVB/VLV context size:
* - Power context is saved elsewhere (LLC or stolen)
@@ -3243,9 +3615,12 @@ enum {
#define EDP_PSR_PERF_CNT_MASK 0xffffff
#define EDP_PSR_DEBUG_CTL _MMIO(dev_priv->psr_mmio_base + 0x60)
-#define EDP_PSR_DEBUG_MASK_LPSP (1<<27)
-#define EDP_PSR_DEBUG_MASK_MEMUP (1<<26)
-#define EDP_PSR_DEBUG_MASK_HPD (1<<25)
+#define EDP_PSR_DEBUG_MASK_MAX_SLEEP (1<<28)
+#define EDP_PSR_DEBUG_MASK_LPSP (1<<27)
+#define EDP_PSR_DEBUG_MASK_MEMUP (1<<26)
+#define EDP_PSR_DEBUG_MASK_HPD (1<<25)
+#define EDP_PSR_DEBUG_MASK_DISP_REG_WRITE (1<<16)
+#define EDP_PSR_DEBUG_EXIT_ON_PIXEL_UNDERRUN (1<<15)
#define EDP_PSR2_CTL _MMIO(0x6f900)
#define EDP_PSR2_ENABLE (1<<31)
@@ -3260,6 +3635,11 @@ enum {
#define EDP_PSR2_FRAME_BEFORE_SU_SHIFT 4
#define EDP_PSR2_FRAME_BEFORE_SU_MASK (0xf<<4)
#define EDP_PSR2_IDLE_MASK 0xf
+#define EDP_FRAMES_BEFORE_SU_ENTRY (1<<4)
+
+#define EDP_PSR2_STATUS_CTL _MMIO(0x6f940)
+#define EDP_PSR2_STATUS_STATE_MASK (0xf<<28)
+#define EDP_PSR2_STATUS_STATE_SHIFT 28
/* VGA port control */
#define ADPA _MMIO(0x61100)
@@ -5390,18 +5770,21 @@ enum {
#define _SPBCONSTALPHA (VLV_DISPLAY_BASE + 0x722a8)
#define _SPBGAMC (VLV_DISPLAY_BASE + 0x722f4)
-#define SPCNTR(pipe, plane) _MMIO_PIPE((pipe) * 2 + (plane), _SPACNTR, _SPBCNTR)
-#define SPLINOFF(pipe, plane) _MMIO_PIPE((pipe) * 2 + (plane), _SPALINOFF, _SPBLINOFF)
-#define SPSTRIDE(pipe, plane) _MMIO_PIPE((pipe) * 2 + (plane), _SPASTRIDE, _SPBSTRIDE)
-#define SPPOS(pipe, plane) _MMIO_PIPE((pipe) * 2 + (plane), _SPAPOS, _SPBPOS)
-#define SPSIZE(pipe, plane) _MMIO_PIPE((pipe) * 2 + (plane), _SPASIZE, _SPBSIZE)
-#define SPKEYMINVAL(pipe, plane) _MMIO_PIPE((pipe) * 2 + (plane), _SPAKEYMINVAL, _SPBKEYMINVAL)
-#define SPKEYMSK(pipe, plane) _MMIO_PIPE((pipe) * 2 + (plane), _SPAKEYMSK, _SPBKEYMSK)
-#define SPSURF(pipe, plane) _MMIO_PIPE((pipe) * 2 + (plane), _SPASURF, _SPBSURF)
-#define SPKEYMAXVAL(pipe, plane) _MMIO_PIPE((pipe) * 2 + (plane), _SPAKEYMAXVAL, _SPBKEYMAXVAL)
-#define SPTILEOFF(pipe, plane) _MMIO_PIPE((pipe) * 2 + (plane), _SPATILEOFF, _SPBTILEOFF)
-#define SPCONSTALPHA(pipe, plane) _MMIO_PIPE((pipe) * 2 + (plane), _SPACONSTALPHA, _SPBCONSTALPHA)
-#define SPGAMC(pipe, plane) _MMIO_PIPE((pipe) * 2 + (plane), _SPAGAMC, _SPBGAMC)
+#define _MMIO_VLV_SPR(pipe, plane_id, reg_a, reg_b) \
+ _MMIO_PIPE((pipe) * 2 + (plane_id) - PLANE_SPRITE0, (reg_a), (reg_b))
+
+#define SPCNTR(pipe, plane_id) _MMIO_VLV_SPR((pipe), (plane_id), _SPACNTR, _SPBCNTR)
+#define SPLINOFF(pipe, plane_id) _MMIO_VLV_SPR((pipe), (plane_id), _SPALINOFF, _SPBLINOFF)
+#define SPSTRIDE(pipe, plane_id) _MMIO_VLV_SPR((pipe), (plane_id), _SPASTRIDE, _SPBSTRIDE)
+#define SPPOS(pipe, plane_id) _MMIO_VLV_SPR((pipe), (plane_id), _SPAPOS, _SPBPOS)
+#define SPSIZE(pipe, plane_id) _MMIO_VLV_SPR((pipe), (plane_id), _SPASIZE, _SPBSIZE)
+#define SPKEYMINVAL(pipe, plane_id) _MMIO_VLV_SPR((pipe), (plane_id), _SPAKEYMINVAL, _SPBKEYMINVAL)
+#define SPKEYMSK(pipe, plane_id) _MMIO_VLV_SPR((pipe), (plane_id), _SPAKEYMSK, _SPBKEYMSK)
+#define SPSURF(pipe, plane_id) _MMIO_VLV_SPR((pipe), (plane_id), _SPASURF, _SPBSURF)
+#define SPKEYMAXVAL(pipe, plane_id) _MMIO_VLV_SPR((pipe), (plane_id), _SPAKEYMAXVAL, _SPBKEYMAXVAL)
+#define SPTILEOFF(pipe, plane_id) _MMIO_VLV_SPR((pipe), (plane_id), _SPATILEOFF, _SPBTILEOFF)
+#define SPCONSTALPHA(pipe, plane_id) _MMIO_VLV_SPR((pipe), (plane_id), _SPACONSTALPHA, _SPBCONSTALPHA)
+#define SPGAMC(pipe, plane_id) _MMIO_VLV_SPR((pipe), (plane_id), _SPAGAMC, _SPBGAMC)
/*
* CHV pipe B sprite CSC
@@ -5410,29 +5793,32 @@ enum {
* |yg| = |c3 c4 c5| x |yg + yg_ioff| + |yg_ooff|
* |cb| |c6 c7 c8| |cb + cr_ioff| |cb_ooff|
*/
-#define SPCSCYGOFF(sprite) _MMIO(VLV_DISPLAY_BASE + 0x6d900 + (sprite) * 0x1000)
-#define SPCSCCBOFF(sprite) _MMIO(VLV_DISPLAY_BASE + 0x6d904 + (sprite) * 0x1000)
-#define SPCSCCROFF(sprite) _MMIO(VLV_DISPLAY_BASE + 0x6d908 + (sprite) * 0x1000)
+#define _MMIO_CHV_SPCSC(plane_id, reg) \
+ _MMIO(VLV_DISPLAY_BASE + ((plane_id) - PLANE_SPRITE0) * 0x1000 + (reg))
+
+#define SPCSCYGOFF(plane_id) _MMIO_CHV_SPCSC(plane_id, 0x6d900)
+#define SPCSCCBOFF(plane_id) _MMIO_CHV_SPCSC(plane_id, 0x6d904)
+#define SPCSCCROFF(plane_id) _MMIO_CHV_SPCSC(plane_id, 0x6d908)
#define SPCSC_OOFF(x) (((x) & 0x7ff) << 16) /* s11 */
#define SPCSC_IOFF(x) (((x) & 0x7ff) << 0) /* s11 */
-#define SPCSCC01(sprite) _MMIO(VLV_DISPLAY_BASE + 0x6d90c + (sprite) * 0x1000)
-#define SPCSCC23(sprite) _MMIO(VLV_DISPLAY_BASE + 0x6d910 + (sprite) * 0x1000)
-#define SPCSCC45(sprite) _MMIO(VLV_DISPLAY_BASE + 0x6d914 + (sprite) * 0x1000)
-#define SPCSCC67(sprite) _MMIO(VLV_DISPLAY_BASE + 0x6d918 + (sprite) * 0x1000)
-#define SPCSCC8(sprite) _MMIO(VLV_DISPLAY_BASE + 0x6d91c + (sprite) * 0x1000)
+#define SPCSCC01(plane_id) _MMIO_CHV_SPCSC(plane_id, 0x6d90c)
+#define SPCSCC23(plane_id) _MMIO_CHV_SPCSC(plane_id, 0x6d910)
+#define SPCSCC45(plane_id) _MMIO_CHV_SPCSC(plane_id, 0x6d914)
+#define SPCSCC67(plane_id) _MMIO_CHV_SPCSC(plane_id, 0x6d918)
+#define SPCSCC8(plane_id) _MMIO_CHV_SPCSC(plane_id, 0x6d91c)
#define SPCSC_C1(x) (((x) & 0x7fff) << 16) /* s3.12 */
#define SPCSC_C0(x) (((x) & 0x7fff) << 0) /* s3.12 */
-#define SPCSCYGICLAMP(sprite) _MMIO(VLV_DISPLAY_BASE + 0x6d920 + (sprite) * 0x1000)
-#define SPCSCCBICLAMP(sprite) _MMIO(VLV_DISPLAY_BASE + 0x6d924 + (sprite) * 0x1000)
-#define SPCSCCRICLAMP(sprite) _MMIO(VLV_DISPLAY_BASE + 0x6d928 + (sprite) * 0x1000)
+#define SPCSCYGICLAMP(plane_id) _MMIO_CHV_SPCSC(plane_id, 0x6d920)
+#define SPCSCCBICLAMP(plane_id) _MMIO_CHV_SPCSC(plane_id, 0x6d924)
+#define SPCSCCRICLAMP(plane_id) _MMIO_CHV_SPCSC(plane_id, 0x6d928)
#define SPCSC_IMAX(x) (((x) & 0x7ff) << 16) /* s11 */
#define SPCSC_IMIN(x) (((x) & 0x7ff) << 0) /* s11 */
-#define SPCSCYGOCLAMP(sprite) _MMIO(VLV_DISPLAY_BASE + 0x6d92c + (sprite) * 0x1000)
-#define SPCSCCBOCLAMP(sprite) _MMIO(VLV_DISPLAY_BASE + 0x6d930 + (sprite) * 0x1000)
-#define SPCSCCROCLAMP(sprite) _MMIO(VLV_DISPLAY_BASE + 0x6d934 + (sprite) * 0x1000)
+#define SPCSCYGOCLAMP(plane_id) _MMIO_CHV_SPCSC(plane_id, 0x6d92c)
+#define SPCSCCBOCLAMP(plane_id) _MMIO_CHV_SPCSC(plane_id, 0x6d930)
+#define SPCSCCROCLAMP(plane_id) _MMIO_CHV_SPCSC(plane_id, 0x6d934)
#define SPCSC_OMAX(x) ((x) << 16) /* u10 */
#define SPCSC_OMIN(x) ((x) << 0) /* u10 */
@@ -6086,6 +6472,12 @@ enum {
#define BDW_DPRS_MASK_VBLANK_SRD (1 << 0)
#define CHICKEN_PIPESL_1(pipe) _MMIO_PIPE(pipe, _CHICKEN_PIPESL_1_A, _CHICKEN_PIPESL_1_B)
+#define CHICKEN_TRANS_A 0x420c0
+#define CHICKEN_TRANS_B 0x420c4
+#define CHICKEN_TRANS(trans) _MMIO_TRANS(trans, CHICKEN_TRANS_A, CHICKEN_TRANS_B)
+#define PSR2_VSC_ENABLE_PROG_HEADER (1<<12)
+#define PSR2_ADD_VERTICAL_LINE_COUNT (1<<15)
+
#define DISP_ARB_CTL _MMIO(0x45000)
#define DISP_FBC_MEMORY_WAKE (1<<31)
#define DISP_TILE_SURFACE_SWIZZLING (1<<13)
@@ -6930,6 +7322,7 @@ enum {
# define GEN6_RCCUNIT_CLOCK_GATE_DISABLE (1 << 11)
#define GEN6_UCGCTL3 _MMIO(0x9408)
+# define GEN6_OACSUNIT_CLOCK_GATE_DISABLE (1 << 20)
#define GEN7_UCGCTL4 _MMIO(0x940c)
#define GEN7_L3BANK2X_CLOCK_GATE_DISABLE (1<<25)
@@ -8315,6 +8708,21 @@ enum {
#define BXT_PIPE_SELECT_SHIFT 7
#define BXT_PIPE_SELECT_MASK (7 << 7)
#define BXT_PIPE_SELECT(pipe) ((pipe) << 7)
+#define GLK_PHY_STATUS_PORT_READY (1 << 31) /* RO */
+#define GLK_ULPS_NOT_ACTIVE (1 << 30) /* RO */
+#define GLK_MIPIIO_RESET_RELEASED (1 << 28)
+#define GLK_CLOCK_LANE_STOP_STATE (1 << 27) /* RO */
+#define GLK_DATA_LANE_STOP_STATE (1 << 26) /* RO */
+#define GLK_LP_WAKE (1 << 22)
+#define GLK_LP11_LOW_PWR_MODE (1 << 21)
+#define GLK_LP00_LOW_PWR_MODE (1 << 20)
+#define GLK_FIREWALL_ENABLE (1 << 16)
+#define BXT_PIXEL_OVERLAP_CNT_MASK (0xf << 10)
+#define BXT_PIXEL_OVERLAP_CNT_SHIFT 10
+#define BXT_DSC_ENABLE (1 << 3)
+#define BXT_RGB_FLIP (1 << 2)
+#define GLK_MIPIIO_PORT_POWERED (1 << 1) /* RO */
+#define GLK_MIPIIO_ENABLE (1 << 0)
#define _MIPIA_DATA_ADDRESS (dev_priv->mipi_mmio_base + 0xb108)
#define _MIPIC_DATA_ADDRESS (dev_priv->mipi_mmio_base + 0xb908)
diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c
index b0e1e7ca75da..5c86925a0294 100644
--- a/drivers/gpu/drm/i915/i915_suspend.c
+++ b/drivers/gpu/drm/i915/i915_suspend.c
@@ -56,13 +56,12 @@ static void i915_restore_display(struct drm_i915_private *dev_priv)
i915_redisable_vga(dev_priv);
}
-int i915_save_state(struct drm_device *dev)
+int i915_save_state(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
struct pci_dev *pdev = dev_priv->drm.pdev;
int i;
- mutex_lock(&dev->struct_mutex);
+ mutex_lock(&dev_priv->drm.struct_mutex);
i915_save_display(dev_priv);
@@ -97,18 +96,17 @@ int i915_save_state(struct drm_device *dev)
dev_priv->regfile.saveSWF3[i] = I915_READ(SWF3(i));
}
- mutex_unlock(&dev->struct_mutex);
+ mutex_unlock(&dev_priv->drm.struct_mutex);
return 0;
}
-int i915_restore_state(struct drm_device *dev)
+int i915_restore_state(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
struct pci_dev *pdev = dev_priv->drm.pdev;
int i;
- mutex_lock(&dev->struct_mutex);
+ mutex_lock(&dev_priv->drm.struct_mutex);
i915_gem_restore_fences(dev_priv);
@@ -145,9 +143,9 @@ int i915_restore_state(struct drm_device *dev)
I915_WRITE(SWF3(i), dev_priv->regfile.saveSWF3[i]);
}
- mutex_unlock(&dev->struct_mutex);
+ mutex_unlock(&dev_priv->drm.struct_mutex);
- intel_i2c_reset(dev);
+ intel_i2c_reset(dev_priv);
return 0;
}
diff --git a/drivers/gpu/drm/i915/i915_sw_fence.c b/drivers/gpu/drm/i915/i915_sw_fence.c
index 147420ccf49c..40f4e5efaf83 100644
--- a/drivers/gpu/drm/i915/i915_sw_fence.c
+++ b/drivers/gpu/drm/i915/i915_sw_fence.c
@@ -17,6 +17,93 @@
static DEFINE_SPINLOCK(i915_sw_fence_lock);
+enum {
+ DEBUG_FENCE_IDLE = 0,
+ DEBUG_FENCE_NOTIFY,
+};
+
+#ifdef CONFIG_DRM_I915_SW_FENCE_DEBUG_OBJECTS
+
+static void *i915_sw_fence_debug_hint(void *addr)
+{
+ return (void *)(((struct i915_sw_fence *)addr)->flags & I915_SW_FENCE_MASK);
+}
+
+static struct debug_obj_descr i915_sw_fence_debug_descr = {
+ .name = "i915_sw_fence",
+ .debug_hint = i915_sw_fence_debug_hint,
+};
+
+static inline void debug_fence_init(struct i915_sw_fence *fence)
+{
+ debug_object_init(fence, &i915_sw_fence_debug_descr);
+}
+
+static inline void debug_fence_activate(struct i915_sw_fence *fence)
+{
+ debug_object_activate(fence, &i915_sw_fence_debug_descr);
+}
+
+static inline void debug_fence_set_state(struct i915_sw_fence *fence,
+ int old, int new)
+{
+ debug_object_active_state(fence, &i915_sw_fence_debug_descr, old, new);
+}
+
+static inline void debug_fence_deactivate(struct i915_sw_fence *fence)
+{
+ debug_object_deactivate(fence, &i915_sw_fence_debug_descr);
+}
+
+static inline void debug_fence_destroy(struct i915_sw_fence *fence)
+{
+ debug_object_destroy(fence, &i915_sw_fence_debug_descr);
+}
+
+static inline void debug_fence_free(struct i915_sw_fence *fence)
+{
+ debug_object_free(fence, &i915_sw_fence_debug_descr);
+ smp_wmb(); /* flush the change in state before reallocation */
+}
+
+static inline void debug_fence_assert(struct i915_sw_fence *fence)
+{
+ debug_object_assert_init(fence, &i915_sw_fence_debug_descr);
+}
+
+#else
+
+static inline void debug_fence_init(struct i915_sw_fence *fence)
+{
+}
+
+static inline void debug_fence_activate(struct i915_sw_fence *fence)
+{
+}
+
+static inline void debug_fence_set_state(struct i915_sw_fence *fence,
+ int old, int new)
+{
+}
+
+static inline void debug_fence_deactivate(struct i915_sw_fence *fence)
+{
+}
+
+static inline void debug_fence_destroy(struct i915_sw_fence *fence)
+{
+}
+
+static inline void debug_fence_free(struct i915_sw_fence *fence)
+{
+}
+
+static inline void debug_fence_assert(struct i915_sw_fence *fence)
+{
+}
+
+#endif
+
static int __i915_sw_fence_notify(struct i915_sw_fence *fence,
enum i915_sw_fence_notify state)
{
@@ -26,25 +113,37 @@ static int __i915_sw_fence_notify(struct i915_sw_fence *fence,
return fn(fence, state);
}
-static void i915_sw_fence_free(struct kref *kref)
+#ifdef CONFIG_DRM_I915_SW_FENCE_DEBUG_OBJECTS
+void i915_sw_fence_fini(struct i915_sw_fence *fence)
+{
+ debug_fence_free(fence);
+}
+#endif
+
+static void i915_sw_fence_release(struct kref *kref)
{
struct i915_sw_fence *fence = container_of(kref, typeof(*fence), kref);
WARN_ON(atomic_read(&fence->pending) > 0);
+ debug_fence_destroy(fence);
- if (fence->flags & I915_SW_FENCE_MASK)
+ if (fence->flags & I915_SW_FENCE_MASK) {
__i915_sw_fence_notify(fence, FENCE_FREE);
- else
+ } else {
+ i915_sw_fence_fini(fence);
kfree(fence);
+ }
}
static void i915_sw_fence_put(struct i915_sw_fence *fence)
{
- kref_put(&fence->kref, i915_sw_fence_free);
+ debug_fence_assert(fence);
+ kref_put(&fence->kref, i915_sw_fence_release);
}
static struct i915_sw_fence *i915_sw_fence_get(struct i915_sw_fence *fence)
{
+ debug_fence_assert(fence);
kref_get(&fence->kref);
return fence;
}
@@ -56,6 +155,7 @@ static void __i915_sw_fence_wake_up_all(struct i915_sw_fence *fence,
wait_queue_t *pos, *next;
unsigned long flags;
+ debug_fence_deactivate(fence);
atomic_set_release(&fence->pending, -1); /* 0 -> -1 [done] */
/*
@@ -88,23 +188,33 @@ static void __i915_sw_fence_wake_up_all(struct i915_sw_fence *fence,
} while (1);
}
spin_unlock_irqrestore(&x->lock, flags);
+
+ debug_fence_assert(fence);
}
static void __i915_sw_fence_complete(struct i915_sw_fence *fence,
struct list_head *continuation)
{
+ debug_fence_assert(fence);
+
if (!atomic_dec_and_test(&fence->pending))
return;
+ debug_fence_set_state(fence, DEBUG_FENCE_IDLE, DEBUG_FENCE_NOTIFY);
+
if (fence->flags & I915_SW_FENCE_MASK &&
__i915_sw_fence_notify(fence, FENCE_COMPLETE) != NOTIFY_DONE)
return;
+ debug_fence_set_state(fence, DEBUG_FENCE_NOTIFY, DEBUG_FENCE_IDLE);
+
__i915_sw_fence_wake_up_all(fence, continuation);
}
static void i915_sw_fence_complete(struct i915_sw_fence *fence)
{
+ debug_fence_assert(fence);
+
if (WARN_ON(i915_sw_fence_done(fence)))
return;
@@ -113,6 +223,7 @@ static void i915_sw_fence_complete(struct i915_sw_fence *fence)
static void i915_sw_fence_await(struct i915_sw_fence *fence)
{
+ debug_fence_assert(fence);
WARN_ON(atomic_inc_return(&fence->pending) <= 1);
}
@@ -123,18 +234,26 @@ void __i915_sw_fence_init(struct i915_sw_fence *fence,
{
BUG_ON((unsigned long)fn & ~I915_SW_FENCE_MASK);
+ debug_fence_init(fence);
+
__init_waitqueue_head(&fence->wait, name, key);
kref_init(&fence->kref);
atomic_set(&fence->pending, 1);
fence->flags = (unsigned long)fn;
}
-void i915_sw_fence_commit(struct i915_sw_fence *fence)
+static void __i915_sw_fence_commit(struct i915_sw_fence *fence)
{
i915_sw_fence_complete(fence);
i915_sw_fence_put(fence);
}
+void i915_sw_fence_commit(struct i915_sw_fence *fence)
+{
+ debug_fence_activate(fence);
+ __i915_sw_fence_commit(fence);
+}
+
static int i915_sw_fence_wake(wait_queue_t *wq, unsigned mode, int flags, void *key)
{
list_del(&wq->task_list);
@@ -206,9 +325,13 @@ static int __i915_sw_fence_await_sw_fence(struct i915_sw_fence *fence,
unsigned long flags;
int pending;
+ debug_fence_assert(fence);
+
if (i915_sw_fence_done(signaler))
return 0;
+ debug_fence_assert(signaler);
+
/* The dependency graph must be acyclic. */
if (unlikely(i915_sw_fence_check_if_after(fence, signaler)))
return -EINVAL;
@@ -279,7 +402,7 @@ static void timer_i915_sw_fence_wake(unsigned long data)
dma_fence_put(cb->dma);
cb->dma = NULL;
- i915_sw_fence_commit(cb->fence);
+ __i915_sw_fence_commit(cb->fence);
cb->timer.function = NULL;
}
@@ -290,7 +413,7 @@ static void dma_i915_sw_fence_wake(struct dma_fence *dma,
del_timer_sync(&cb->timer);
if (cb->timer.function)
- i915_sw_fence_commit(cb->fence);
+ __i915_sw_fence_commit(cb->fence);
dma_fence_put(cb->dma);
kfree(cb);
@@ -304,6 +427,8 @@ int i915_sw_fence_await_dma_fence(struct i915_sw_fence *fence,
struct i915_sw_dma_fence_cb *cb;
int ret;
+ debug_fence_assert(fence);
+
if (dma_fence_is_signaled(dma))
return 0;
@@ -349,6 +474,8 @@ int i915_sw_fence_await_reservation(struct i915_sw_fence *fence,
struct dma_fence *excl;
int ret = 0, pending;
+ debug_fence_assert(fence);
+
if (write) {
struct dma_fence **shared;
unsigned int count, i;
diff --git a/drivers/gpu/drm/i915/i915_sw_fence.h b/drivers/gpu/drm/i915/i915_sw_fence.h
index 0f3185ef7f4e..d31cefbbcc04 100644
--- a/drivers/gpu/drm/i915/i915_sw_fence.h
+++ b/drivers/gpu/drm/i915/i915_sw_fence.h
@@ -56,6 +56,12 @@ do { \
__i915_sw_fence_init((fence), (fn), NULL, NULL)
#endif
+#ifdef CONFIG_DRM_I915_SW_FENCE_DEBUG_OBJECTS
+void i915_sw_fence_fini(struct i915_sw_fence *fence);
+#else
+static inline void i915_sw_fence_fini(struct i915_sw_fence *fence) {}
+#endif
+
void i915_sw_fence_commit(struct i915_sw_fence *fence);
int i915_sw_fence_await_sw_fence(struct i915_sw_fence *fence,
diff --git a/drivers/gpu/drm/i915/i915_sysfs.c b/drivers/gpu/drm/i915/i915_sysfs.c
index 3df8d3dd31cd..376ac957cd1c 100644
--- a/drivers/gpu/drm/i915/i915_sysfs.c
+++ b/drivers/gpu/drm/i915/i915_sysfs.c
@@ -58,7 +58,7 @@ static u32 calc_residency(struct drm_i915_private *dev_priv,
if (I915_READ(VLV_COUNTER_CONTROL) & VLV_COUNT_RANGE_HIGH)
units <<= 8;
- } else if (IS_BROXTON(dev_priv)) {
+ } else if (IS_GEN9_LP(dev_priv)) {
units = 1;
div = 1200; /* 833.33ns */
}
@@ -535,7 +535,7 @@ static ssize_t error_state_read(struct file *filp, struct kobject *kobj,
if (ret)
return ret;
- error_priv.dev = dev;
+ error_priv.i915 = dev_priv;
i915_error_state_get(dev, &error_priv);
ret = i915_error_state_to_str(&error_str, &error_priv);
@@ -560,7 +560,7 @@ static ssize_t error_state_write(struct file *file, struct kobject *kobj,
struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
DRM_DEBUG_DRIVER("Resetting error state\n");
- i915_destroy_error_state(&dev_priv->drm);
+ i915_destroy_error_state(dev_priv);
return count;
}
diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h
index c5d210ebaa9a..4461df5a94fe 100644
--- a/drivers/gpu/drm/i915/i915_trace.h
+++ b/drivers/gpu/drm/i915/i915_trace.h
@@ -406,7 +406,7 @@ TRACE_EVENT(i915_gem_evict,
),
TP_fast_assign(
- __entry->dev = vm->dev->primary->index;
+ __entry->dev = vm->i915->drm.primary->index;
__entry->vm = vm;
__entry->size = size;
__entry->align = align;
@@ -443,13 +443,41 @@ TRACE_EVENT(i915_gem_evict_vm,
),
TP_fast_assign(
- __entry->dev = vm->dev->primary->index;
+ __entry->dev = vm->i915->drm.primary->index;
__entry->vm = vm;
),
TP_printk("dev=%d, vm=%p", __entry->dev, __entry->vm)
);
+TRACE_EVENT(i915_gem_evict_node,
+ TP_PROTO(struct i915_address_space *vm, struct drm_mm_node *node, unsigned int flags),
+ TP_ARGS(vm, node, flags),
+
+ TP_STRUCT__entry(
+ __field(u32, dev)
+ __field(struct i915_address_space *, vm)
+ __field(u64, start)
+ __field(u64, size)
+ __field(unsigned long, color)
+ __field(unsigned int, flags)
+ ),
+
+ TP_fast_assign(
+ __entry->dev = vm->i915->drm.primary->index;
+ __entry->vm = vm;
+ __entry->start = node->start;
+ __entry->size = node->size;
+ __entry->color = node->color;
+ __entry->flags = flags;
+ ),
+
+ TP_printk("dev=%d, vm=%p, start=%llx size=%llx, color=%lx, flags=%x",
+ __entry->dev, __entry->vm,
+ __entry->start, __entry->size,
+ __entry->color, __entry->flags)
+);
+
TRACE_EVENT(i915_gem_ring_sync_to,
TP_PROTO(struct drm_i915_gem_request *to,
struct drm_i915_gem_request *from),
@@ -711,7 +739,7 @@ DECLARE_EVENT_CLASS(i915_ppgtt,
TP_fast_assign(
__entry->vm = vm;
- __entry->dev = vm->dev->primary->index;
+ __entry->dev = vm->i915->drm.primary->index;
),
TP_printk("dev=%u, vm=%p", __entry->dev, __entry->vm)
diff --git a/drivers/gpu/drm/i915/i915_utils.h b/drivers/gpu/drm/i915/i915_utils.h
new file mode 100644
index 000000000000..34020873e1f6
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_utils.h
@@ -0,0 +1,64 @@
+/*
+ * Copyright © 2016 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __I915_UTILS_H
+#define __I915_UTILS_H
+
+#define range_overflows(start, size, max) ({ \
+ typeof(start) start__ = (start); \
+ typeof(size) size__ = (size); \
+ typeof(max) max__ = (max); \
+ (void)(&start__ == &size__); \
+ (void)(&start__ == &max__); \
+ start__ > max__ || size__ > max__ - start__; \
+})
+
+#define range_overflows_t(type, start, size, max) \
+ range_overflows((type)(start), (type)(size), (type)(max))
+
+/* Note we don't consider signbits :| */
+#define overflows_type(x, T) \
+ (sizeof(x) > sizeof(T) && (x) >> (sizeof(T) * BITS_PER_BYTE))
+
+#define ptr_mask_bits(ptr) ({ \
+ unsigned long __v = (unsigned long)(ptr); \
+ (typeof(ptr))(__v & PAGE_MASK); \
+})
+
+#define ptr_unpack_bits(ptr, bits) ({ \
+ unsigned long __v = (unsigned long)(ptr); \
+ (bits) = __v & ~PAGE_MASK; \
+ (typeof(ptr))(__v & PAGE_MASK); \
+})
+
+#define ptr_pack_bits(ptr, bits) \
+ ((typeof(ptr))((unsigned long)(ptr) | (bits)))
+
+#define fetch_and_zero(ptr) ({ \
+ typeof(*ptr) __T = *(ptr); \
+ *(ptr) = (typeof(*ptr))0; \
+ __T; \
+})
+
+#endif /* !__I915_UTILS_H */
diff --git a/drivers/gpu/drm/i915/i915_vgpu.c b/drivers/gpu/drm/i915/i915_vgpu.c
index dae340cfc6c7..d0abfd08a01c 100644
--- a/drivers/gpu/drm/i915/i915_vgpu.c
+++ b/drivers/gpu/drm/i915/i915_vgpu.c
@@ -116,22 +116,20 @@ void intel_vgt_deballoon(struct drm_i915_private *dev_priv)
memset(&bl_info, 0, sizeof(bl_info));
}
-static int vgt_balloon_space(struct drm_mm *mm,
+static int vgt_balloon_space(struct i915_ggtt *ggtt,
struct drm_mm_node *node,
unsigned long start, unsigned long end)
{
unsigned long size = end - start;
- if (start == end)
+ if (start >= end)
return -EINVAL;
DRM_INFO("balloon space: range [ 0x%lx - 0x%lx ] %lu KiB.\n",
start, end, size / 1024);
-
- node->start = start;
- node->size = size;
-
- return drm_mm_reserve_node(mm, node);
+ return i915_gem_gtt_reserve(&ggtt->base, node,
+ size, start, I915_COLOR_UNEVICTABLE,
+ 0);
}
/**
@@ -214,10 +212,8 @@ int intel_vgt_balloon(struct drm_i915_private *dev_priv)
/* Unmappable graphic memory ballooning */
if (unmappable_base > ggtt->mappable_end) {
- ret = vgt_balloon_space(&ggtt->base.mm,
- &bl_info.space[2],
- ggtt->mappable_end,
- unmappable_base);
+ ret = vgt_balloon_space(ggtt, &bl_info.space[2],
+ ggtt->mappable_end, unmappable_base);
if (ret)
goto err;
@@ -228,18 +224,15 @@ int intel_vgt_balloon(struct drm_i915_private *dev_priv)
* because it is reserved to the guard page.
*/
if (unmappable_end < ggtt_end - PAGE_SIZE) {
- ret = vgt_balloon_space(&ggtt->base.mm,
- &bl_info.space[3],
- unmappable_end,
- ggtt_end - PAGE_SIZE);
+ ret = vgt_balloon_space(ggtt, &bl_info.space[3],
+ unmappable_end, ggtt_end - PAGE_SIZE);
if (ret)
goto err;
}
/* Mappable graphic memory ballooning */
if (mappable_base > ggtt->base.start) {
- ret = vgt_balloon_space(&ggtt->base.mm,
- &bl_info.space[0],
+ ret = vgt_balloon_space(ggtt, &bl_info.space[0],
ggtt->base.start, mappable_base);
if (ret)
@@ -247,10 +240,8 @@ int intel_vgt_balloon(struct drm_i915_private *dev_priv)
}
if (mappable_end < ggtt->mappable_end) {
- ret = vgt_balloon_space(&ggtt->base.mm,
- &bl_info.space[1],
- mappable_end,
- ggtt->mappable_end);
+ ret = vgt_balloon_space(ggtt, &bl_info.space[1],
+ mappable_end, ggtt->mappable_end);
if (ret)
goto err;
diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c
index e924a9516079..155906e84812 100644
--- a/drivers/gpu/drm/i915/i915_vma.c
+++ b/drivers/gpu/drm/i915/i915_vma.c
@@ -45,6 +45,7 @@ i915_vma_retire(struct i915_gem_active *active,
if (i915_vma_is_active(vma))
return;
+ GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
list_move_tail(&vma->vm_link, &vma->vm->inactive_list);
if (unlikely(i915_vma_is_closed(vma) && !i915_vma_is_pinned(vma)))
WARN_ON(i915_vma_unbind(vma));
@@ -69,17 +70,15 @@ i915_vma_retire(struct i915_gem_active *active,
}
static struct i915_vma *
-__i915_vma_create(struct drm_i915_gem_object *obj,
- struct i915_address_space *vm,
- const struct i915_ggtt_view *view)
+vma_create(struct drm_i915_gem_object *obj,
+ struct i915_address_space *vm,
+ const struct i915_ggtt_view *view)
{
struct i915_vma *vma;
struct rb_node *rb, **p;
int i;
- GEM_BUG_ON(vm->closed);
-
- vma = kmem_cache_zalloc(to_i915(obj->base.dev)->vmas, GFP_KERNEL);
+ vma = kmem_cache_zalloc(vm->i915->vmas, GFP_KERNEL);
if (vma == NULL)
return ERR_PTR(-ENOMEM);
@@ -87,24 +86,50 @@ __i915_vma_create(struct drm_i915_gem_object *obj,
for (i = 0; i < ARRAY_SIZE(vma->last_read); i++)
init_request_active(&vma->last_read[i], i915_vma_retire);
init_request_active(&vma->last_fence, NULL);
- list_add(&vma->vm_link, &vm->unbound_list);
vma->vm = vm;
vma->obj = obj;
vma->size = obj->base.size;
+ vma->display_alignment = I915_GTT_MIN_ALIGNMENT;
- if (view) {
+ if (view && view->type != I915_GGTT_VIEW_NORMAL) {
vma->ggtt_view = *view;
if (view->type == I915_GGTT_VIEW_PARTIAL) {
- vma->size = view->params.partial.size;
+ GEM_BUG_ON(range_overflows_t(u64,
+ view->partial.offset,
+ view->partial.size,
+ obj->base.size >> PAGE_SHIFT));
+ vma->size = view->partial.size;
vma->size <<= PAGE_SHIFT;
+ GEM_BUG_ON(vma->size >= obj->base.size);
} else if (view->type == I915_GGTT_VIEW_ROTATED) {
- vma->size =
- intel_rotation_info_size(&view->params.rotated);
+ vma->size = intel_rotation_info_size(&view->rotated);
vma->size <<= PAGE_SHIFT;
}
}
+ if (unlikely(vma->size > vm->total))
+ goto err_vma;
+
+ GEM_BUG_ON(!IS_ALIGNED(vma->size, I915_GTT_PAGE_SIZE));
+
if (i915_is_ggtt(vm)) {
+ if (unlikely(overflows_type(vma->size, u32)))
+ goto err_vma;
+
+ vma->fence_size = i915_gem_fence_size(vm->i915, vma->size,
+ i915_gem_object_get_tiling(obj),
+ i915_gem_object_get_stride(obj));
+ if (unlikely(vma->fence_size < vma->size || /* overflow */
+ vma->fence_size > vm->total))
+ goto err_vma;
+
+ GEM_BUG_ON(!IS_ALIGNED(vma->fence_size, I915_GTT_MIN_ALIGNMENT));
+
+ vma->fence_alignment = i915_gem_fence_alignment(vm->i915, vma->size,
+ i915_gem_object_get_tiling(obj),
+ i915_gem_object_get_stride(obj));
+ GEM_BUG_ON(!is_power_of_2(vma->fence_alignment));
+
vma->flags |= I915_VMA_GGTT;
list_add(&vma->obj_link, &obj->vma_list);
} else {
@@ -126,20 +151,74 @@ __i915_vma_create(struct drm_i915_gem_object *obj,
}
rb_link_node(&vma->obj_node, rb, p);
rb_insert_color(&vma->obj_node, &obj->vma_tree);
+ list_add(&vma->vm_link, &vm->unbound_list);
return vma;
+
+err_vma:
+ kmem_cache_free(vm->i915->vmas, vma);
+ return ERR_PTR(-E2BIG);
}
+static struct i915_vma *
+vma_lookup(struct drm_i915_gem_object *obj,
+ struct i915_address_space *vm,
+ const struct i915_ggtt_view *view)
+{
+ struct rb_node *rb;
+
+ rb = obj->vma_tree.rb_node;
+ while (rb) {
+ struct i915_vma *vma = rb_entry(rb, struct i915_vma, obj_node);
+ long cmp;
+
+ cmp = i915_vma_compare(vma, vm, view);
+ if (cmp == 0)
+ return vma;
+
+ if (cmp < 0)
+ rb = rb->rb_right;
+ else
+ rb = rb->rb_left;
+ }
+
+ return NULL;
+}
+
+/**
+ * i915_vma_instance - return the singleton instance of the VMA
+ * @obj: parent &struct drm_i915_gem_object to be mapped
+ * @vm: address space in which the mapping is located
+ * @view: additional mapping requirements
+ *
+ * i915_vma_instance() looks up an existing VMA of the @obj in the @vm with
+ * the same @view characteristics. If a match is not found, one is created.
+ * Once created, the VMA is kept until either the object is freed, or the
+ * address space is closed.
+ *
+ * Must be called with struct_mutex held.
+ *
+ * Returns the vma, or an error pointer.
+ */
struct i915_vma *
-i915_vma_create(struct drm_i915_gem_object *obj,
- struct i915_address_space *vm,
- const struct i915_ggtt_view *view)
+i915_vma_instance(struct drm_i915_gem_object *obj,
+ struct i915_address_space *vm,
+ const struct i915_ggtt_view *view)
{
+ struct i915_vma *vma;
+
lockdep_assert_held(&obj->base.dev->struct_mutex);
GEM_BUG_ON(view && !i915_is_ggtt(vm));
- GEM_BUG_ON(i915_gem_obj_to_vma(obj, vm, view));
+ GEM_BUG_ON(vm->closed);
+
+ vma = vma_lookup(obj, vm, view);
+ if (!vma)
+ vma = vma_create(obj, vm, view);
- return __i915_vma_create(obj, vm, view);
+ GEM_BUG_ON(!IS_ERR(vma) && i915_vma_is_closed(vma));
+ GEM_BUG_ON(!IS_ERR(vma) && i915_vma_compare(vma, vm, view));
+ GEM_BUG_ON(!IS_ERR(vma) && vma_lookup(obj, vm, view) != vma);
+ return vma;
}
/**
@@ -176,6 +255,11 @@ int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level,
if (bind_flags == 0)
return 0;
+ if (GEM_WARN_ON(range_overflows(vma->node.start,
+ vma->node.size,
+ vma->vm->total)))
+ return -ENODEV;
+
if (vma_flags == 0 && vma->vm->allocate_va_range) {
trace_i915_va_alloc(vma);
ret = vma->vm->allocate_va_range(vma->vm,
@@ -199,9 +283,9 @@ void __iomem *i915_vma_pin_iomap(struct i915_vma *vma)
void __iomem *ptr;
/* Access through the GTT requires the device to be awake. */
- assert_rpm_wakelock_held(to_i915(vma->vm->dev));
+ assert_rpm_wakelock_held(vma->vm->i915);
- lockdep_assert_held(&vma->vm->dev->struct_mutex);
+ lockdep_assert_held(&vma->vm->i915->drm.struct_mutex);
if (WARN_ON(!i915_vma_is_map_and_fenceable(vma)))
return IO_ERR_PTR(-ENODEV);
@@ -249,7 +333,8 @@ i915_vma_misplaced(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
if (vma->node.size < size)
return true;
- if (alignment && vma->node.start & (alignment - 1))
+ GEM_BUG_ON(alignment && !is_power_of_2(alignment));
+ if (alignment && !IS_ALIGNED(vma->node.start, alignment))
return true;
if (flags & PIN_MAPPABLE && !i915_vma_is_map_and_fenceable(vma))
@@ -268,40 +353,37 @@ i915_vma_misplaced(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
void __i915_vma_set_map_and_fenceable(struct i915_vma *vma)
{
- struct drm_i915_gem_object *obj = vma->obj;
- struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
bool mappable, fenceable;
- u32 fence_size, fence_alignment;
-
- fence_size = i915_gem_get_ggtt_size(dev_priv,
- vma->size,
- i915_gem_object_get_tiling(obj));
- fence_alignment = i915_gem_get_ggtt_alignment(dev_priv,
- vma->size,
- i915_gem_object_get_tiling(obj),
- true);
-
- fenceable = (vma->node.size == fence_size &&
- (vma->node.start & (fence_alignment - 1)) == 0);
- mappable = (vma->node.start + fence_size <=
- dev_priv->ggtt.mappable_end);
+ GEM_BUG_ON(!i915_vma_is_ggtt(vma));
+ GEM_BUG_ON(!vma->fence_size);
/*
* Explicitly disable for rotated VMA since the display does not
* need the fence and the VMA is not accessible to other users.
*/
- if (mappable && fenceable &&
- vma->ggtt_view.type != I915_GGTT_VIEW_ROTATED)
+ if (vma->ggtt_view.type == I915_GGTT_VIEW_ROTATED)
+ return;
+
+ fenceable = (vma->node.size >= vma->fence_size &&
+ IS_ALIGNED(vma->node.start, vma->fence_alignment));
+
+ mappable = vma->node.start + vma->fence_size <= i915_vm_to_ggtt(vma->vm)->mappable_end;
+
+ if (mappable && fenceable)
vma->flags |= I915_VMA_CAN_FENCE;
else
vma->flags &= ~I915_VMA_CAN_FENCE;
}
-bool i915_gem_valid_gtt_space(struct i915_vma *vma,
- unsigned long cache_level)
+static bool color_differs(struct drm_mm_node *node, unsigned long color)
+{
+ return node->allocated && node->color != color;
+}
+
+bool i915_gem_valid_gtt_space(struct i915_vma *vma, unsigned long cache_level)
{
- struct drm_mm_node *gtt_space = &vma->node;
+ struct drm_mm_node *node = &vma->node;
struct drm_mm_node *other;
/*
@@ -314,18 +396,16 @@ bool i915_gem_valid_gtt_space(struct i915_vma *vma,
if (vma->vm->mm.color_adjust == NULL)
return true;
- if (!drm_mm_node_allocated(gtt_space))
- return true;
-
- if (list_empty(&gtt_space->node_list))
- return true;
+ /* Only valid to be called on an already inserted vma */
+ GEM_BUG_ON(!drm_mm_node_allocated(node));
+ GEM_BUG_ON(list_empty(&node->node_list));
- other = list_entry(gtt_space->node_list.prev, struct drm_mm_node, node_list);
- if (other->allocated && !other->hole_follows && other->color != cache_level)
+ other = list_prev_entry(node, node_list);
+ if (color_differs(other, cache_level) && !drm_mm_hole_follows(other))
return false;
- other = list_entry(gtt_space->node_list.next, struct drm_mm_node, node_list);
- if (other->allocated && !gtt_space->hole_follows && other->color != cache_level)
+ other = list_next_entry(node, node_list);
+ if (color_differs(other, cache_level) && !drm_mm_hole_follows(node))
return false;
return true;
@@ -348,7 +428,7 @@ bool i915_gem_valid_gtt_space(struct i915_vma *vma,
static int
i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
{
- struct drm_i915_private *dev_priv = to_i915(vma->vm->dev);
+ struct drm_i915_private *dev_priv = vma->vm->i915;
struct drm_i915_gem_object *obj = vma->obj;
u64 start, end;
int ret;
@@ -357,22 +437,26 @@ i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
GEM_BUG_ON(drm_mm_node_allocated(&vma->node));
size = max(size, vma->size);
- if (flags & PIN_MAPPABLE)
- size = i915_gem_get_ggtt_size(dev_priv, size,
- i915_gem_object_get_tiling(obj));
+ alignment = max(alignment, vma->display_alignment);
+ if (flags & PIN_MAPPABLE) {
+ size = max_t(typeof(size), size, vma->fence_size);
+ alignment = max_t(typeof(alignment),
+ alignment, vma->fence_alignment);
+ }
- alignment = max(max(alignment, vma->display_alignment),
- i915_gem_get_ggtt_alignment(dev_priv, size,
- i915_gem_object_get_tiling(obj),
- flags & PIN_MAPPABLE));
+ GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE));
+ GEM_BUG_ON(!IS_ALIGNED(alignment, I915_GTT_MIN_ALIGNMENT));
+ GEM_BUG_ON(!is_power_of_2(alignment));
start = flags & PIN_OFFSET_BIAS ? flags & PIN_OFFSET_MASK : 0;
+ GEM_BUG_ON(!IS_ALIGNED(start, I915_GTT_PAGE_SIZE));
end = vma->vm->total;
if (flags & PIN_MAPPABLE)
end = min_t(u64, end, dev_priv->ggtt.mappable_end);
if (flags & PIN_ZONE_4G)
- end = min_t(u64, end, (1ULL << 32) - PAGE_SIZE);
+ end = min_t(u64, end, (1ULL << 32) - I915_GTT_PAGE_SIZE);
+ GEM_BUG_ON(!IS_ALIGNED(end, I915_GTT_PAGE_SIZE));
/* If binding the object/GGTT view requires more space than the entire
* aperture has, reject it early before evicting everything in a vain
@@ -392,64 +476,28 @@ i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
if (flags & PIN_OFFSET_FIXED) {
u64 offset = flags & PIN_OFFSET_MASK;
- if (offset & (alignment - 1) || offset > end - size) {
+ if (!IS_ALIGNED(offset, alignment) ||
+ range_overflows(offset, size, end)) {
ret = -EINVAL;
goto err_unpin;
}
- vma->node.start = offset;
- vma->node.size = size;
- vma->node.color = obj->cache_level;
- ret = drm_mm_reserve_node(&vma->vm->mm, &vma->node);
- if (ret) {
- ret = i915_gem_evict_for_vma(vma);
- if (ret == 0)
- ret = drm_mm_reserve_node(&vma->vm->mm, &vma->node);
- if (ret)
- goto err_unpin;
- }
+ ret = i915_gem_gtt_reserve(vma->vm, &vma->node,
+ size, offset, obj->cache_level,
+ flags);
+ if (ret)
+ goto err_unpin;
} else {
- u32 search_flag, alloc_flag;
-
- if (flags & PIN_HIGH) {
- search_flag = DRM_MM_SEARCH_BELOW;
- alloc_flag = DRM_MM_CREATE_TOP;
- } else {
- search_flag = DRM_MM_SEARCH_DEFAULT;
- alloc_flag = DRM_MM_CREATE_DEFAULT;
- }
-
- /* We only allocate in PAGE_SIZE/GTT_PAGE_SIZE (4096) chunks,
- * so we know that we always have a minimum alignment of 4096.
- * The drm_mm range manager is optimised to return results
- * with zero alignment, so where possible use the optimal
- * path.
- */
- if (alignment <= 4096)
- alignment = 0;
-
-search_free:
- ret = drm_mm_insert_node_in_range_generic(&vma->vm->mm,
- &vma->node,
- size, alignment,
- obj->cache_level,
- start, end,
- search_flag,
- alloc_flag);
- if (ret) {
- ret = i915_gem_evict_something(vma->vm, size, alignment,
- obj->cache_level,
- start, end,
- flags);
- if (ret == 0)
- goto search_free;
-
+ ret = i915_gem_gtt_insert(vma->vm, &vma->node,
+ size, alignment, obj->cache_level,
+ start, end, flags);
+ if (ret)
goto err_unpin;
- }
GEM_BUG_ON(vma->node.start < start);
GEM_BUG_ON(vma->node.start + vma->node.size > end);
}
+ GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
GEM_BUG_ON(!i915_gem_valid_gtt_space(vma, obj->cache_level));
list_move_tail(&obj->global_link, &dev_priv->mm.bound_list);
@@ -470,7 +518,7 @@ int __i915_vma_do_pin(struct i915_vma *vma,
unsigned int bound = vma->flags;
int ret;
- lockdep_assert_held(&vma->vm->dev->struct_mutex);
+ lockdep_assert_held(&vma->vm->i915->drm.struct_mutex);
GEM_BUG_ON((flags & (PIN_GLOBAL | PIN_USER)) == 0);
GEM_BUG_ON((flags & PIN_GLOBAL) && !i915_vma_is_ggtt(vma));
@@ -492,6 +540,7 @@ int __i915_vma_do_pin(struct i915_vma *vma,
if ((bound ^ vma->flags) & I915_VMA_GLOBAL_BIND)
__i915_vma_set_map_and_fenceable(vma);
+ GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
GEM_BUG_ON(i915_vma_misplaced(vma, size, alignment, flags));
return 0;
@@ -568,7 +617,7 @@ int i915_vma_unbind(struct i915_vma *vma)
for_each_active(active, idx) {
ret = i915_gem_active_retire(&vma->last_read[idx],
- &vma->vm->dev->struct_mutex);
+ &vma->vm->i915->drm.struct_mutex);
if (ret)
break;
}
@@ -629,6 +678,7 @@ int i915_vma_unbind(struct i915_vma *vma)
* reaped by the shrinker.
*/
i915_gem_object_unpin_pages(obj);
+ GEM_BUG_ON(atomic_read(&obj->mm.pages_pin_count) < obj->bind_count);
destroy:
if (unlikely(i915_vma_is_closed(vma)))
diff --git a/drivers/gpu/drm/i915/i915_vma.h b/drivers/gpu/drm/i915/i915_vma.h
index 85446f0b0b3f..e39d922cfb6f 100644
--- a/drivers/gpu/drm/i915/i915_vma.h
+++ b/drivers/gpu/drm/i915/i915_vma.h
@@ -55,6 +55,9 @@ struct i915_vma {
u64 size;
u64 display_alignment;
+ u32 fence_size;
+ u32 fence_alignment;
+
unsigned int flags;
/**
* How many users have pinned this object in GTT space. The following
@@ -109,9 +112,9 @@ struct i915_vma {
};
struct i915_vma *
-i915_vma_create(struct drm_i915_gem_object *obj,
- struct i915_address_space *vm,
- const struct i915_ggtt_view *view);
+i915_vma_instance(struct drm_i915_gem_object *obj,
+ struct i915_address_space *vm,
+ const struct i915_ggtt_view *view);
void i915_vma_unpin_and_release(struct i915_vma **p_vma);
@@ -178,25 +181,48 @@ static inline void i915_vma_put(struct i915_vma *vma)
i915_gem_object_put(vma->obj);
}
+static __always_inline ptrdiff_t ptrdiff(const void *a, const void *b)
+{
+ return a - b;
+}
+
static inline long
i915_vma_compare(struct i915_vma *vma,
struct i915_address_space *vm,
const struct i915_ggtt_view *view)
{
+ ptrdiff_t cmp;
+
GEM_BUG_ON(view && !i915_is_ggtt(vm));
- if (vma->vm != vm)
- return vma->vm - vm;
+ cmp = ptrdiff(vma->vm, vm);
+ if (cmp)
+ return cmp;
+ BUILD_BUG_ON(I915_GGTT_VIEW_NORMAL != 0);
+ cmp = vma->ggtt_view.type;
if (!view)
- return vma->ggtt_view.type;
+ return cmp;
- if (vma->ggtt_view.type != view->type)
- return vma->ggtt_view.type - view->type;
+ cmp -= view->type;
+ if (cmp)
+ return cmp;
- return memcmp(&vma->ggtt_view.params,
- &view->params,
- sizeof(view->params));
+ /* ggtt_view.type also encodes its size so that we both distinguish
+ * different views using it as a "type" and also use a compact (no
+ * accessing of uninitialised padding bytes) memcmp without storing
+ * an extra parameter or adding more code.
+ *
+ * To ensure that the memcmp is valid for all branches of the union,
+ * even though the code looks like it is just comparing one branch,
+ * we assert above that all branches have the same address, and that
+ * each branch has a unique type/size.
+ */
+ BUILD_BUG_ON(I915_GGTT_VIEW_NORMAL >= I915_GGTT_VIEW_PARTIAL);
+ BUILD_BUG_ON(I915_GGTT_VIEW_PARTIAL >= I915_GGTT_VIEW_ROTATED);
+ BUILD_BUG_ON(offsetof(typeof(*view), rotated) !=
+ offsetof(typeof(*view), partial));
+ return memcmp(&vma->ggtt_view.partial, &view->partial, view->type);
}
int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level,
@@ -221,8 +247,11 @@ i915_vma_pin(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
/* Pin early to prevent the shrinker/eviction logic from destroying
* our vma as we insert and bind.
*/
- if (likely(((++vma->flags ^ flags) & I915_VMA_BIND_MASK) == 0))
+ if (likely(((++vma->flags ^ flags) & I915_VMA_BIND_MASK) == 0)) {
+ GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
+ GEM_BUG_ON(i915_vma_misplaced(vma, size, alignment, flags));
return 0;
+ }
return __i915_vma_do_pin(vma, size, alignment, flags);
}
@@ -282,7 +311,7 @@ void __iomem *i915_vma_pin_iomap(struct i915_vma *vma);
*/
static inline void i915_vma_unpin_iomap(struct i915_vma *vma)
{
- lockdep_assert_held(&vma->vm->dev->struct_mutex);
+ lockdep_assert_held(&vma->obj->base.dev->struct_mutex);
GEM_BUG_ON(vma->iomap == NULL);
i915_vma_unpin(vma);
}
@@ -311,7 +340,7 @@ static inline struct page *i915_vma_first_page(struct i915_vma *vma)
static inline bool
i915_vma_pin_fence(struct i915_vma *vma)
{
- lockdep_assert_held(&vma->vm->dev->struct_mutex);
+ lockdep_assert_held(&vma->obj->base.dev->struct_mutex);
if (vma->fence) {
vma->fence->pin_count++;
return true;
@@ -330,7 +359,7 @@ i915_vma_pin_fence(struct i915_vma *vma)
static inline void
i915_vma_unpin_fence(struct i915_vma *vma)
{
- lockdep_assert_held(&vma->vm->dev->struct_mutex);
+ lockdep_assert_held(&vma->obj->base.dev->struct_mutex);
if (vma->fence) {
GEM_BUG_ON(vma->fence->pin_count <= 0);
vma->fence->pin_count--;
diff --git a/drivers/gpu/drm/i915/intel_atomic.c b/drivers/gpu/drm/i915/intel_atomic.c
index c5a166752eda..aa9160e7f1d8 100644
--- a/drivers/gpu/drm/i915/intel_atomic.c
+++ b/drivers/gpu/drm/i915/intel_atomic.c
@@ -265,37 +265,6 @@ int intel_atomic_setup_scalers(struct drm_device *dev,
return 0;
}
-static void
-intel_atomic_duplicate_dpll_state(struct drm_i915_private *dev_priv,
- struct intel_shared_dpll_config *shared_dpll)
-{
- enum intel_dpll_id i;
-
- /* Copy shared dpll state */
- for (i = 0; i < dev_priv->num_shared_dpll; i++) {
- struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
-
- shared_dpll[i] = pll->config;
- }
-}
-
-struct intel_shared_dpll_config *
-intel_atomic_get_shared_dpll_state(struct drm_atomic_state *s)
-{
- struct intel_atomic_state *state = to_intel_atomic_state(s);
-
- WARN_ON(!drm_modeset_is_locked(&s->dev->mode_config.connection_mutex));
-
- if (!state->dpll_set) {
- state->dpll_set = true;
-
- intel_atomic_duplicate_dpll_state(to_i915(s->dev),
- state->shared_dpll);
- }
-
- return state->shared_dpll;
-}
-
struct drm_atomic_state *
intel_atomic_state_alloc(struct drm_device *dev)
{
diff --git a/drivers/gpu/drm/i915/intel_atomic_plane.c b/drivers/gpu/drm/i915/intel_atomic_plane.c
index 8d3e515f27ba..41fd94e62d3c 100644
--- a/drivers/gpu/drm/i915/intel_atomic_plane.c
+++ b/drivers/gpu/drm/i915/intel_atomic_plane.c
@@ -123,36 +123,24 @@ intel_plane_destroy_state(struct drm_plane *plane,
drm_atomic_helper_plane_destroy_state(plane, state);
}
-static int intel_plane_atomic_check(struct drm_plane *plane,
- struct drm_plane_state *state)
+int intel_plane_atomic_check_with_state(struct intel_crtc_state *crtc_state,
+ struct intel_plane_state *intel_state)
{
+ struct drm_plane *plane = intel_state->base.plane;
struct drm_i915_private *dev_priv = to_i915(plane->dev);
- struct drm_crtc *crtc = state->crtc;
- struct intel_crtc *intel_crtc;
- struct intel_crtc_state *crtc_state;
+ struct drm_plane_state *state = &intel_state->base;
struct intel_plane *intel_plane = to_intel_plane(plane);
- struct intel_plane_state *intel_state = to_intel_plane_state(state);
- struct drm_crtc_state *drm_crtc_state;
int ret;
- crtc = crtc ? crtc : plane->state->crtc;
- intel_crtc = to_intel_crtc(crtc);
-
/*
* Both crtc and plane->crtc could be NULL if we're updating a
* property while the plane is disabled. We don't actually have
* anything driver-specific we need to test in that case, so
* just return success.
*/
- if (!crtc)
+ if (!intel_state->base.crtc && !plane->state->crtc)
return 0;
- drm_crtc_state = drm_atomic_get_existing_crtc_state(state->state, crtc);
- if (WARN_ON(!drm_crtc_state))
- return -EINVAL;
-
- crtc_state = to_intel_crtc_state(drm_crtc_state);
-
/* Clip all planes to CRTC size, or 0x0 if CRTC is disabled */
intel_state->clip.x1 = 0;
intel_state->clip.y1 = 0;
@@ -175,11 +163,11 @@ static int intel_plane_atomic_check(struct drm_plane *plane,
* RGB 16-bit 5:6:5, and Indexed 8-bit.
* TBD: Add RGB64 case once its added in supported format list.
*/
- switch (state->fb->pixel_format) {
+ switch (state->fb->format->format) {
case DRM_FORMAT_C8:
case DRM_FORMAT_RGB565:
DRM_DEBUG_KMS("Unsupported pixel format %s for 90/270!\n",
- drm_get_format_name(state->fb->pixel_format,
+ drm_get_format_name(state->fb->format->format,
&format_name));
return -EINVAL;
@@ -204,6 +192,31 @@ static int intel_plane_atomic_check(struct drm_plane *plane,
return intel_plane_atomic_calc_changes(&crtc_state->base, state);
}
+static int intel_plane_atomic_check(struct drm_plane *plane,
+ struct drm_plane_state *state)
+{
+ struct drm_crtc *crtc = state->crtc;
+ struct drm_crtc_state *drm_crtc_state;
+
+ crtc = crtc ? crtc : plane->state->crtc;
+
+ /*
+ * Both crtc and plane->crtc could be NULL if we're updating a
+ * property while the plane is disabled. We don't actually have
+ * anything driver-specific we need to test in that case, so
+ * just return success.
+ */
+ if (!crtc)
+ return 0;
+
+ drm_crtc_state = drm_atomic_get_existing_crtc_state(state->state, crtc);
+ if (WARN_ON(!drm_crtc_state))
+ return -EINVAL;
+
+ return intel_plane_atomic_check_with_state(to_intel_crtc_state(drm_crtc_state),
+ to_intel_plane_state(state));
+}
+
static void intel_plane_atomic_update(struct drm_plane *plane,
struct drm_plane_state *old_state)
{
diff --git a/drivers/gpu/drm/i915/intel_audio.c b/drivers/gpu/drm/i915/intel_audio.c
index 892169b7952b..d76f3033e890 100644
--- a/drivers/gpu/drm/i915/intel_audio.c
+++ b/drivers/gpu/drm/i915/intel_audio.c
@@ -755,25 +755,49 @@ static int i915_audio_component_get_cdclk_freq(struct device *kdev)
return dev_priv->cdclk_freq;
}
+/*
+ * get the intel_encoder according to the parameter port and pipe
+ * intel_encoder is saved by the index of pipe
+ * MST & (pipe >= 0): return the av_enc_map[pipe],
+ * when port is matched
+ * MST & (pipe < 0): this is invalid
+ * Non-MST & (pipe >= 0): only pipe = 0 (the first device entry)
+ * will get the right intel_encoder with port matched
+ * Non-MST & (pipe < 0): get the right intel_encoder with port matched
+ */
static struct intel_encoder *get_saved_enc(struct drm_i915_private *dev_priv,
int port, int pipe)
{
+ struct intel_encoder *encoder;
if (WARN_ON(pipe >= I915_MAX_PIPES))
return NULL;
/* MST */
- if (pipe >= 0)
- return dev_priv->av_enc_map[pipe];
+ if (pipe >= 0) {
+ encoder = dev_priv->av_enc_map[pipe];
+ /*
+ * when bootup, audio driver may not know it is
+ * MST or not. So it will poll all the port & pipe
+ * combinations
+ */
+ if (encoder != NULL && encoder->port == port &&
+ encoder->type == INTEL_OUTPUT_DP_MST)
+ return encoder;
+ }
/* Non-MST */
- for_each_pipe(dev_priv, pipe) {
- struct intel_encoder *encoder;
+ if (pipe > 0)
+ return NULL;
+ for_each_pipe(dev_priv, pipe) {
encoder = dev_priv->av_enc_map[pipe];
if (encoder == NULL)
continue;
+ if (encoder->type == INTEL_OUTPUT_DP_MST)
+ continue;
+
if (port == encoder->port)
return encoder;
}
@@ -799,9 +823,7 @@ static int i915_audio_component_sync_audio_rate(struct device *kdev, int port,
/* 1. get the pipe */
intel_encoder = get_saved_enc(dev_priv, port, pipe);
- if (!intel_encoder || !intel_encoder->base.crtc ||
- (intel_encoder->type != INTEL_OUTPUT_HDMI &&
- intel_encoder->type != INTEL_OUTPUT_DP)) {
+ if (!intel_encoder || !intel_encoder->base.crtc) {
DRM_DEBUG_KMS("Not valid for port %c\n", port_name(port));
err = -ENODEV;
goto unlock;
@@ -924,6 +946,9 @@ void i915_audio_component_init(struct drm_i915_private *dev_priv)
{
int ret;
+ if (INTEL_INFO(dev_priv)->num_pipes == 0)
+ return;
+
ret = component_add(dev_priv->drm.dev, &i915_audio_component_bind_ops);
if (ret < 0) {
DRM_ERROR("failed to add audio component (%d)\n", ret);
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
index 7ffab1abc518..e144f033f4b5 100644
--- a/drivers/gpu/drm/i915/intel_bios.c
+++ b/drivers/gpu/drm/i915/intel_bios.c
@@ -114,16 +114,18 @@ fill_detail_timing_data(struct drm_display_mode *panel_fixed_mode,
panel_fixed_mode->hsync_start = panel_fixed_mode->hdisplay +
((dvo_timing->hsync_off_hi << 8) | dvo_timing->hsync_off_lo);
panel_fixed_mode->hsync_end = panel_fixed_mode->hsync_start +
- dvo_timing->hsync_pulse_width;
+ ((dvo_timing->hsync_pulse_width_hi << 8) |
+ dvo_timing->hsync_pulse_width_lo);
panel_fixed_mode->htotal = panel_fixed_mode->hdisplay +
((dvo_timing->hblank_hi << 8) | dvo_timing->hblank_lo);
panel_fixed_mode->vdisplay = (dvo_timing->vactive_hi << 8) |
dvo_timing->vactive_lo;
panel_fixed_mode->vsync_start = panel_fixed_mode->vdisplay +
- dvo_timing->vsync_off;
+ ((dvo_timing->vsync_off_hi << 4) | dvo_timing->vsync_off_lo);
panel_fixed_mode->vsync_end = panel_fixed_mode->vsync_start +
- dvo_timing->vsync_pulse_width;
+ ((dvo_timing->vsync_pulse_width_hi << 4) |
+ dvo_timing->vsync_pulse_width_lo);
panel_fixed_mode->vtotal = panel_fixed_mode->vdisplay +
((dvo_timing->vblank_hi << 8) | dvo_timing->vblank_lo);
panel_fixed_mode->clock = dvo_timing->clock * 10;
@@ -330,17 +332,19 @@ parse_lfp_backlight(struct drm_i915_private *dev_priv,
method = &backlight_data->backlight_control[panel_type];
dev_priv->vbt.backlight.type = method->type;
+ dev_priv->vbt.backlight.controller = method->controller;
}
dev_priv->vbt.backlight.pwm_freq_hz = entry->pwm_freq_hz;
dev_priv->vbt.backlight.active_low_pwm = entry->active_low_pwm;
dev_priv->vbt.backlight.min_brightness = entry->min_brightness;
DRM_DEBUG_KMS("VBT backlight PWM modulation frequency %u Hz, "
- "active %s, min brightness %u, level %u\n",
+ "active %s, min brightness %u, level %u, controller %u\n",
dev_priv->vbt.backlight.pwm_freq_hz,
dev_priv->vbt.backlight.active_low_pwm ? "low" : "high",
dev_priv->vbt.backlight.min_brightness,
- backlight_data->level[panel_type]);
+ backlight_data->level[panel_type],
+ dev_priv->vbt.backlight.controller);
}
/* Try to find sdvo panel data */
@@ -1159,6 +1163,7 @@ static void parse_ddi_port(struct drm_i915_private *dev_priv, enum port port,
info->supports_dvi = is_dvi;
info->supports_hdmi = is_hdmi;
info->supports_dp = is_dp;
+ info->supports_edp = is_edp;
DRM_DEBUG_KMS("Port %c VBT info: DP:%d HDMI:%d DVI:%d EDP:%d CRT:%d\n",
port_name(port), is_dp, is_hdmi, is_dvi, is_edp, is_crt);
@@ -1411,13 +1416,16 @@ bool intel_bios_is_valid_vbt(const void *buf, size_t size)
return false;
}
- if (vbt->bdb_offset + sizeof(struct bdb_header) > size) {
+ if (range_overflows_t(size_t,
+ vbt->bdb_offset,
+ sizeof(struct bdb_header),
+ size)) {
DRM_DEBUG_DRIVER("BDB header incomplete\n");
return false;
}
bdb = get_bdb_header(vbt);
- if (vbt->bdb_offset + bdb->bdb_size > size) {
+ if (range_overflows_t(size_t, vbt->bdb_offset, bdb->bdb_size, size)) {
DRM_DEBUG_DRIVER("BDB incomplete\n");
return false;
}
@@ -1662,6 +1670,9 @@ bool intel_bios_is_port_edp(struct drm_i915_private *dev_priv, enum port port)
};
int i;
+ if (HAS_DDI(dev_priv))
+ return dev_priv->vbt.ddi_port_info[port].supports_edp;
+
if (!dev_priv->vbt.child_dev_num)
return false;
@@ -1779,7 +1790,7 @@ intel_bios_is_port_hpd_inverted(struct drm_i915_private *dev_priv,
{
int i;
- if (WARN_ON_ONCE(!IS_BROXTON(dev_priv)))
+ if (WARN_ON_ONCE(!IS_GEN9_LP(dev_priv)))
return false;
for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
diff --git a/drivers/gpu/drm/i915/intel_breadcrumbs.c b/drivers/gpu/drm/i915/intel_breadcrumbs.c
index c9c46a538edb..fcfa423d08bd 100644
--- a/drivers/gpu/drm/i915/intel_breadcrumbs.c
+++ b/drivers/gpu/drm/i915/intel_breadcrumbs.c
@@ -154,7 +154,7 @@ static void __intel_breadcrumbs_disable_irq(struct intel_breadcrumbs *b)
static inline struct intel_wait *to_wait(struct rb_node *node)
{
- return container_of(node, struct intel_wait, node);
+ return rb_entry(node, struct intel_wait, node);
}
static inline void __intel_breadcrumbs_finish(struct intel_breadcrumbs *b,
@@ -427,7 +427,7 @@ static bool signal_complete(struct drm_i915_gem_request *request)
static struct drm_i915_gem_request *to_signaler(struct rb_node *rb)
{
- return container_of(rb, struct drm_i915_gem_request, signaling.node);
+ return rb_entry(rb, struct drm_i915_gem_request, signaling.node);
}
static void signaler_set_rtpriority(void)
@@ -623,6 +623,12 @@ void intel_engine_fini_breadcrumbs(struct intel_engine_cs *engine)
{
struct intel_breadcrumbs *b = &engine->breadcrumbs;
+ /* The engines should be idle and all requests accounted for! */
+ WARN_ON(READ_ONCE(b->first_wait));
+ WARN_ON(!RB_EMPTY_ROOT(&b->waiters));
+ WARN_ON(READ_ONCE(b->first_signal));
+ WARN_ON(!RB_EMPTY_ROOT(&b->signals));
+
if (!IS_ERR_OR_NULL(b->signaler))
kthread_stop(b->signaler);
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
index 588470eb8d39..2bf5aca6e37c 100644
--- a/drivers/gpu/drm/i915/intel_crt.c
+++ b/drivers/gpu/drm/i915/intel_crt.c
@@ -837,12 +837,11 @@ static const struct drm_encoder_funcs intel_crt_enc_funcs = {
.destroy = intel_encoder_destroy,
};
-void intel_crt_init(struct drm_device *dev)
+void intel_crt_init(struct drm_i915_private *dev_priv)
{
struct drm_connector *connector;
struct intel_crt *crt;
struct intel_connector *intel_connector;
- struct drm_i915_private *dev_priv = to_i915(dev);
i915_reg_t adpa_reg;
u32 adpa;
@@ -882,10 +881,10 @@ void intel_crt_init(struct drm_device *dev)
connector = &intel_connector->base;
crt->connector = intel_connector;
- drm_connector_init(dev, &intel_connector->base,
+ drm_connector_init(&dev_priv->drm, &intel_connector->base,
&intel_crt_connector_funcs, DRM_MODE_CONNECTOR_VGA);
- drm_encoder_init(dev, &crt->base.base, &intel_crt_enc_funcs,
+ drm_encoder_init(&dev_priv->drm, &crt->base.base, &intel_crt_enc_funcs,
DRM_MODE_ENCODER_DAC, "CRT");
intel_connector_attach_encoder(intel_connector, &crt->base);
diff --git a/drivers/gpu/drm/i915/intel_csr.c b/drivers/gpu/drm/i915/intel_csr.c
index d7a04bca8c28..0085bc745f6a 100644
--- a/drivers/gpu/drm/i915/intel_csr.c
+++ b/drivers/gpu/drm/i915/intel_csr.c
@@ -34,6 +34,10 @@
* low-power state and comes back to normal.
*/
+#define I915_CSR_GLK "i915/glk_dmc_ver1_01.bin"
+MODULE_FIRMWARE(I915_CSR_GLK);
+#define GLK_CSR_VERSION_REQUIRED CSR_VERSION(1, 1)
+
#define I915_CSR_KBL "i915/kbl_dmc_ver1_01.bin"
MODULE_FIRMWARE(I915_CSR_KBL);
#define KBL_CSR_VERSION_REQUIRED CSR_VERSION(1, 1)
@@ -286,7 +290,9 @@ static uint32_t *parse_csr_fw(struct drm_i915_private *dev_priv,
csr->version = css_header->version;
- if (IS_KABYLAKE(dev_priv)) {
+ if (IS_GEMINILAKE(dev_priv)) {
+ required_version = GLK_CSR_VERSION_REQUIRED;
+ } else if (IS_KABYLAKE(dev_priv)) {
required_version = KBL_CSR_VERSION_REQUIRED;
} else if (IS_SKYLAKE(dev_priv)) {
required_version = SKL_CSR_VERSION_REQUIRED;
@@ -389,7 +395,7 @@ static void csr_load_work_fn(struct work_struct *work)
{
struct drm_i915_private *dev_priv;
struct intel_csr *csr;
- const struct firmware *fw;
+ const struct firmware *fw = NULL;
int ret;
dev_priv = container_of(work, typeof(*dev_priv), csr.work);
@@ -405,7 +411,7 @@ static void csr_load_work_fn(struct work_struct *work)
intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
- DRM_INFO("Finished loading %s (v%u.%u)\n",
+ DRM_INFO("Finished loading DMC firmware %s (v%u.%u)\n",
dev_priv->csr.fw_path,
CSR_VERSION_MAJOR(csr->version),
CSR_VERSION_MINOR(csr->version));
@@ -435,7 +441,9 @@ void intel_csr_ucode_init(struct drm_i915_private *dev_priv)
if (!HAS_CSR(dev_priv))
return;
- if (IS_KABYLAKE(dev_priv))
+ if (IS_GEMINILAKE(dev_priv))
+ csr->fw_path = I915_CSR_GLK;
+ else if (IS_KABYLAKE(dev_priv))
csr->fw_path = I915_CSR_KBL;
else if (IS_SKYLAKE(dev_priv))
csr->fw_path = I915_CSR_SKL;
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
index 10ec9d4b7d45..66b367d0771a 100644
--- a/drivers/gpu/drm/i915/intel_ddi.c
+++ b/drivers/gpu/drm/i915/intel_ddi.c
@@ -442,7 +442,7 @@ static int intel_ddi_hdmi_level(struct drm_i915_private *dev_priv, enum port por
hdmi_level = dev_priv->vbt.ddi_port_info[port].hdmi_level_shift;
- if (IS_BROXTON(dev_priv))
+ if (IS_GEN9_LP(dev_priv))
return hdmi_level;
if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
@@ -484,7 +484,7 @@ void intel_prepare_dp_ddi_buffers(struct intel_encoder *encoder)
const struct ddi_buf_trans *ddi_translations_edp;
const struct ddi_buf_trans *ddi_translations;
- if (IS_BROXTON(dev_priv))
+ if (IS_GEN9_LP(dev_priv))
return;
if (IS_KABYLAKE(dev_priv)) {
@@ -567,7 +567,7 @@ static void intel_prepare_hdmi_ddi_buffers(struct intel_encoder *encoder)
enum port port = intel_ddi_get_encoder_port(encoder);
const struct ddi_buf_trans *ddi_translations_hdmi;
- if (IS_BROXTON(dev_priv))
+ if (IS_GEN9_LP(dev_priv))
return;
hdmi_level = intel_ddi_hdmi_level(dev_priv, port);
@@ -1057,7 +1057,7 @@ static int bxt_calc_pll_link(struct drm_i915_private *dev_priv,
return 0;
pll = &dev_priv->shared_dplls[dpll];
- state = &pll->config.hw_state;
+ state = &pll->state.hw_state;
clock.m1 = 2;
clock.m2 = (state->pll0 & PORT_PLL_M2_MASK) << 22;
@@ -1091,7 +1091,7 @@ void intel_ddi_clock_get(struct intel_encoder *encoder,
hsw_ddi_clock_get(encoder, pipe_config);
else if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
skl_ddi_clock_get(encoder, pipe_config);
- else if (IS_BROXTON(dev_priv))
+ else if (IS_GEN9_LP(dev_priv))
bxt_ddi_clock_get(encoder, pipe_config);
}
@@ -1153,7 +1153,7 @@ bool intel_ddi_pll_select(struct intel_crtc *intel_crtc,
if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
return skl_ddi_pll_select(intel_crtc, crtc_state,
intel_encoder);
- else if (IS_BROXTON(dev_priv))
+ else if (IS_GEN9_LP(dev_priv))
return bxt_ddi_pll_select(intel_crtc, crtc_state,
intel_encoder);
else
@@ -1429,7 +1429,7 @@ bool intel_ddi_get_hw_state(struct intel_encoder *encoder,
DRM_DEBUG_KMS("No pipe for ddi port %c found\n", port_name(port));
out:
- if (ret && IS_BROXTON(dev_priv)) {
+ if (ret && IS_GEN9_LP(dev_priv)) {
tmp = I915_READ(BXT_PHY_CTL(port));
if ((tmp & (BXT_PHY_LANE_POWERDOWN_ACK |
BXT_PHY_LANE_ENABLED)) != BXT_PHY_LANE_ENABLED)
@@ -1643,7 +1643,7 @@ uint32_t ddi_signal_levels(struct intel_dp *intel_dp)
if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
skl_ddi_set_iboost(encoder, level);
- else if (IS_BROXTON(dev_priv))
+ else if (IS_GEN9_LP(dev_priv))
bxt_ddi_vswing_sequence(dev_priv, level, port, encoder->type);
return DDI_BUF_TRANS_SELECT(level);
@@ -1701,7 +1701,8 @@ static void intel_ddi_pre_enable_dp(struct intel_encoder *encoder,
static void intel_ddi_pre_enable_hdmi(struct intel_encoder *encoder,
bool has_hdmi_sink,
- struct drm_display_mode *adjusted_mode,
+ const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state,
struct intel_shared_dpll *pll)
{
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
@@ -1715,13 +1716,13 @@ static void intel_ddi_pre_enable_hdmi(struct intel_encoder *encoder,
intel_prepare_hdmi_ddi_buffers(encoder);
if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
skl_ddi_set_iboost(encoder, level);
- else if (IS_BROXTON(dev_priv))
+ else if (IS_GEN9_LP(dev_priv))
bxt_ddi_vswing_sequence(dev_priv, level, port,
INTEL_OUTPUT_HDMI);
intel_hdmi->set_infoframes(drm_encoder,
has_hdmi_sink,
- adjusted_mode);
+ crtc_state, conn_state);
}
static void intel_ddi_pre_enable(struct intel_encoder *intel_encoder,
@@ -1742,8 +1743,8 @@ static void intel_ddi_pre_enable(struct intel_encoder *intel_encoder,
}
if (type == INTEL_OUTPUT_HDMI) {
intel_ddi_pre_enable_hdmi(intel_encoder,
- crtc->config->has_hdmi_sink,
- &crtc->config->base.adjusted_mode,
+ pipe_config->has_hdmi_sink,
+ pipe_config, conn_state,
crtc->config->shared_dpll);
}
}
@@ -1949,6 +1950,19 @@ void intel_ddi_prepare_link_retrain(struct intel_dp *intel_dp)
udelay(600);
}
+bool intel_ddi_is_audio_enabled(struct drm_i915_private *dev_priv,
+ struct intel_crtc *intel_crtc)
+{
+ u32 temp;
+
+ if (intel_display_power_is_enabled(dev_priv, POWER_DOMAIN_AUDIO)) {
+ temp = I915_READ(HSW_AUD_PIN_ELD_CP_VLD);
+ if (temp & AUDIO_OUTPUT_ENABLE(intel_crtc->pipe))
+ return true;
+ }
+ return false;
+}
+
void intel_ddi_get_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config)
{
@@ -2014,11 +2028,8 @@ void intel_ddi_get_config(struct intel_encoder *encoder,
break;
}
- if (intel_display_power_is_enabled(dev_priv, POWER_DOMAIN_AUDIO)) {
- temp = I915_READ(HSW_AUD_PIN_ELD_CP_VLD);
- if (temp & AUDIO_OUTPUT_ENABLE(intel_crtc->pipe))
- pipe_config->has_audio = true;
- }
+ pipe_config->has_audio =
+ intel_ddi_is_audio_enabled(dev_priv, intel_crtc);
if (encoder->type == INTEL_OUTPUT_EDP && dev_priv->vbt.edp.bpp &&
pipe_config->pipe_bpp > dev_priv->vbt.edp.bpp) {
@@ -2042,7 +2053,7 @@ void intel_ddi_get_config(struct intel_encoder *encoder,
intel_ddi_clock_get(encoder, pipe_config);
- if (IS_BROXTON(dev_priv))
+ if (IS_GEN9_LP(dev_priv))
pipe_config->lane_lat_optim_mask =
bxt_ddi_phy_get_lane_lat_optim_mask(encoder);
}
@@ -2066,7 +2077,7 @@ static bool intel_ddi_compute_config(struct intel_encoder *encoder,
else
ret = intel_dp_compute_config(encoder, pipe_config, conn_state);
- if (IS_BROXTON(dev_priv) && ret)
+ if (IS_GEN9_LP(dev_priv) && ret)
pipe_config->lane_lat_optim_mask =
bxt_ddi_phy_calc_lane_lat_optim_mask(encoder,
pipe_config->lane_count);
@@ -2123,10 +2134,10 @@ intel_ddi_get_link_dpll(struct intel_dp *intel_dp, int clock)
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
struct intel_shared_dpll *pll = NULL;
- struct intel_shared_dpll_config tmp_pll_config;
+ struct intel_shared_dpll_state tmp_pll_state;
enum intel_dpll_id dpll_id;
- if (IS_BROXTON(dev_priv)) {
+ if (IS_GEN9_LP(dev_priv)) {
dpll_id = (enum intel_dpll_id)dig_port->port;
/*
* Select the required PLL. This works for platforms where
@@ -2139,11 +2150,11 @@ intel_ddi_get_link_dpll(struct intel_dp *intel_dp, int clock)
pll->active_mask);
return NULL;
}
- tmp_pll_config = pll->config;
+ tmp_pll_state = pll->state;
if (!bxt_ddi_dp_set_dpll_hw_state(clock,
- &pll->config.hw_state)) {
+ &pll->state.hw_state)) {
DRM_ERROR("Could not setup DPLL\n");
- pll->config = tmp_pll_config;
+ pll->state = tmp_pll_state;
return NULL;
}
} else if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
@@ -2154,9 +2165,8 @@ intel_ddi_get_link_dpll(struct intel_dp *intel_dp, int clock)
return pll;
}
-void intel_ddi_init(struct drm_device *dev, enum port port)
+void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_digital_port *intel_dig_port;
struct intel_encoder *intel_encoder;
struct drm_encoder *encoder;
@@ -2218,12 +2228,12 @@ void intel_ddi_init(struct drm_device *dev, enum port port)
intel_encoder = &intel_dig_port->base;
encoder = &intel_encoder->base;
- drm_encoder_init(dev, encoder, &intel_ddi_funcs,
+ drm_encoder_init(&dev_priv->drm, encoder, &intel_ddi_funcs,
DRM_MODE_ENCODER_TMDS, "DDI %c", port_name(port));
intel_encoder->compute_config = intel_ddi_compute_config;
intel_encoder->enable = intel_enable_ddi;
- if (IS_BROXTON(dev_priv))
+ if (IS_GEN9_LP(dev_priv))
intel_encoder->pre_pll_enable = bxt_ddi_pre_pll_enable;
intel_encoder->pre_enable = intel_ddi_pre_enable;
intel_encoder->disable = intel_disable_ddi;
@@ -2244,7 +2254,7 @@ void intel_ddi_init(struct drm_device *dev, enum port port)
* configuration so that we use the proper lane count for our
* calculations.
*/
- if (IS_BROXTON(dev_priv) && port == PORT_A) {
+ if (IS_GEN9_LP(dev_priv) && port == PORT_A) {
if (!(intel_dig_port->saved_port_bits & DDI_A_4_LANES)) {
DRM_DEBUG_KMS("BXT BIOS forgot to set DDI_A_4_LANES for port A; fixing\n");
intel_dig_port->saved_port_bits |= DDI_A_4_LANES;
diff --git a/drivers/gpu/drm/i915/intel_device_info.c b/drivers/gpu/drm/i915/intel_device_info.c
index 185e3bbc9ec9..fcf81815daff 100644
--- a/drivers/gpu/drm/i915/intel_device_info.c
+++ b/drivers/gpu/drm/i915/intel_device_info.c
@@ -24,11 +24,51 @@
#include "i915_drv.h"
+#define PLATFORM_NAME(x) [INTEL_##x] = #x
+static const char * const platform_names[] = {
+ PLATFORM_NAME(I830),
+ PLATFORM_NAME(I845G),
+ PLATFORM_NAME(I85X),
+ PLATFORM_NAME(I865G),
+ PLATFORM_NAME(I915G),
+ PLATFORM_NAME(I915GM),
+ PLATFORM_NAME(I945G),
+ PLATFORM_NAME(I945GM),
+ PLATFORM_NAME(G33),
+ PLATFORM_NAME(PINEVIEW),
+ PLATFORM_NAME(I965G),
+ PLATFORM_NAME(I965GM),
+ PLATFORM_NAME(G45),
+ PLATFORM_NAME(GM45),
+ PLATFORM_NAME(IRONLAKE),
+ PLATFORM_NAME(SANDYBRIDGE),
+ PLATFORM_NAME(IVYBRIDGE),
+ PLATFORM_NAME(VALLEYVIEW),
+ PLATFORM_NAME(HASWELL),
+ PLATFORM_NAME(BROADWELL),
+ PLATFORM_NAME(CHERRYVIEW),
+ PLATFORM_NAME(SKYLAKE),
+ PLATFORM_NAME(BROXTON),
+ PLATFORM_NAME(KABYLAKE),
+ PLATFORM_NAME(GEMINILAKE),
+};
+#undef PLATFORM_NAME
+
+const char *intel_platform_name(enum intel_platform platform)
+{
+ if (WARN_ON_ONCE(platform >= ARRAY_SIZE(platform_names) ||
+ platform_names[platform] == NULL))
+ return "<unknown>";
+
+ return platform_names[platform];
+}
+
void intel_device_info_dump(struct drm_i915_private *dev_priv)
{
const struct intel_device_info *info = &dev_priv->info;
- DRM_DEBUG_DRIVER("i915 device info: gen=%i, pciid=0x%04x rev=0x%02x",
+ DRM_DEBUG_DRIVER("i915 device info: platform=%s gen=%i pciid=0x%04x rev=0x%02x",
+ intel_platform_name(info->platform),
info->gen,
dev_priv->drm.pdev->device,
dev_priv->drm.pdev->revision);
@@ -152,7 +192,7 @@ static void gen9_sseu_info_init(struct drm_i915_private *dev_priv)
(IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) &&
hweight8(sseu->slice_mask) > 1;
sseu->has_subslice_pg =
- IS_BROXTON(dev_priv) && sseu_subslice_total(sseu) > 1;
+ IS_GEN9_LP(dev_priv) && sseu_subslice_total(sseu) > 1;
sseu->has_eu_pg = sseu->eu_per_subslice > 2;
if (IS_BROXTON(dev_priv)) {
@@ -270,6 +310,12 @@ void intel_device_info_runtime_init(struct drm_i915_private *dev_priv)
struct intel_device_info *info = mkwrite_device_info(dev_priv);
enum pipe pipe;
+ if (INTEL_GEN(dev_priv) >= 9) {
+ info->num_scalers[PIPE_A] = 2;
+ info->num_scalers[PIPE_B] = 2;
+ info->num_scalers[PIPE_C] = 1;
+ }
+
/*
* Skylake and Broxton currently don't expose the topmost plane as its
* use is exclusive with the legacy cursor and we only want to expose
@@ -278,7 +324,10 @@ void intel_device_info_runtime_init(struct drm_i915_private *dev_priv)
* we don't expose the topmost plane at all to prevent ABI breakage
* down the line.
*/
- if (IS_BROXTON(dev_priv)) {
+ if (IS_GEMINILAKE(dev_priv))
+ for_each_pipe(dev_priv, pipe)
+ info->num_sprites[pipe] = 3;
+ else if (IS_BROXTON(dev_priv)) {
info->num_sprites[PIPE_A] = 2;
info->num_sprites[PIPE_B] = 2;
info->num_sprites[PIPE_C] = 1;
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 891c86aef99d..01341670738f 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -115,15 +115,15 @@ static void chv_prepare_pll(struct intel_crtc *crtc,
const struct intel_crtc_state *pipe_config);
static void intel_begin_crtc_commit(struct drm_crtc *, struct drm_crtc_state *);
static void intel_finish_crtc_commit(struct drm_crtc *, struct drm_crtc_state *);
-static void skl_init_scalers(struct drm_i915_private *dev_priv,
- struct intel_crtc *crtc,
- struct intel_crtc_state *crtc_state);
+static void intel_crtc_init_scalers(struct intel_crtc *crtc,
+ struct intel_crtc_state *crtc_state);
static void skylake_pfit_enable(struct intel_crtc *crtc);
static void ironlake_pfit_disable(struct intel_crtc *crtc, bool force);
static void ironlake_pfit_enable(struct intel_crtc *crtc);
static void intel_modeset_setup_hw_state(struct drm_device *dev);
static void intel_pre_disable_primary_noatomic(struct drm_crtc *crtc);
static int ilk_max_pixel_rate(struct drm_atomic_state *state);
+static int glk_calc_cdclk(int max_pixclk);
static int bxt_calc_cdclk(int max_pixclk);
struct intel_limit {
@@ -614,12 +614,12 @@ static bool intel_PLL_is_valid(struct drm_i915_private *dev_priv,
INTELPllInvalid("m1 out of range\n");
if (!IS_PINEVIEW(dev_priv) && !IS_VALLEYVIEW(dev_priv) &&
- !IS_CHERRYVIEW(dev_priv) && !IS_BROXTON(dev_priv))
+ !IS_CHERRYVIEW(dev_priv) && !IS_GEN9_LP(dev_priv))
if (clock->m1 <= clock->m2)
INTELPllInvalid("m1 <= m2\n");
if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv) &&
- !IS_BROXTON(dev_priv)) {
+ !IS_GEN9_LP(dev_priv)) {
if (clock->p < limit->p.min || limit->p.max < clock->p)
INTELPllInvalid("p out of range\n");
if (clock->m < limit->m.min || limit->m.max < clock->m)
@@ -1232,7 +1232,7 @@ static void assert_cursor(struct drm_i915_private *dev_priv,
{
bool cur_state;
- if (IS_845G(dev_priv) || IS_I865G(dev_priv))
+ if (IS_I845G(dev_priv) || IS_I865G(dev_priv))
cur_state = I915_READ(CURCNTR(PIPE_A)) & CURSOR_ENABLE;
else
cur_state = I915_READ(CURCNTR(pipe)) & CURSOR_MODE;
@@ -1327,7 +1327,7 @@ static void assert_sprites_disabled(struct drm_i915_private *dev_priv,
}
} else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
for_each_sprite(dev_priv, pipe, sprite) {
- u32 val = I915_READ(SPCNTR(pipe, sprite));
+ u32 val = I915_READ(SPCNTR(pipe, PLANE_SPRITE0 + sprite));
I915_STATE_WARN(val & SP_ENABLE,
"sprite %c assertion failure, should be off on pipe %c but is still active\n",
sprite_name(pipe, sprite), pipe_name(pipe));
@@ -2137,11 +2137,10 @@ intel_fill_fb_ggtt_view(struct i915_ggtt_view *view,
const struct drm_framebuffer *fb,
unsigned int rotation)
{
+ view->type = I915_GGTT_VIEW_NORMAL;
if (drm_rotation_90_or_270(rotation)) {
- *view = i915_ggtt_view_rotated;
- view->params.rotated = to_intel_framebuffer(fb)->rot_info;
- } else {
- *view = i915_ggtt_view_normal;
+ view->type = I915_GGTT_VIEW_ROTATED;
+ view->rotated = to_intel_framebuffer(fb)->rot_info;
}
}
@@ -2149,7 +2148,7 @@ static unsigned int intel_linear_alignment(const struct drm_i915_private *dev_pr
{
if (INTEL_INFO(dev_priv)->gen >= 9)
return 256 * 1024;
- else if (IS_BROADWATER(dev_priv) || IS_CRESTLINE(dev_priv) ||
+ else if (IS_I965G(dev_priv) || IS_I965GM(dev_priv) ||
IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
return 128 * 1024;
else if (INTEL_INFO(dev_priv)->gen >= 4)
@@ -2243,10 +2242,7 @@ err:
void intel_unpin_fb_vma(struct i915_vma *vma)
{
- lockdep_assert_held(&vma->vm->dev->struct_mutex);
-
- if (WARN_ON_ONCE(!vma))
- return;
+ lockdep_assert_held(&vma->vm->i915->drm.struct_mutex);
i915_vma_unpin_fence(vma);
i915_gem_object_unpin_from_display_plane(vma);
@@ -2273,7 +2269,7 @@ u32 intel_fb_xy_to_linear(int x, int y,
int plane)
{
const struct drm_framebuffer *fb = state->base.fb;
- unsigned int cpp = drm_format_plane_cpp(fb->pixel_format, plane);
+ unsigned int cpp = fb->format->cpp[plane];
unsigned int pitch = fb->pitches[plane];
return y * pitch + x * cpp;
@@ -2342,7 +2338,7 @@ static u32 intel_adjust_tile_offset(int *x, int *y,
{
const struct drm_i915_private *dev_priv = to_i915(state->base.plane->dev);
const struct drm_framebuffer *fb = state->base.fb;
- unsigned int cpp = drm_format_plane_cpp(fb->pixel_format, plane);
+ unsigned int cpp = fb->format->cpp[plane];
unsigned int rotation = state->base.rotation;
unsigned int pitch = intel_fb_pitch(fb, plane, rotation);
@@ -2398,7 +2394,7 @@ static u32 _intel_compute_tile_offset(const struct drm_i915_private *dev_priv,
u32 alignment)
{
uint64_t fb_modifier = fb->modifier;
- unsigned int cpp = drm_format_plane_cpp(fb->pixel_format, plane);
+ unsigned int cpp = fb->format->cpp[plane];
u32 offset, offset_aligned;
if (alignment)
@@ -2453,7 +2449,7 @@ u32 intel_compute_tile_offset(int *x, int *y,
u32 alignment;
/* AUX_DIST needs only 4K alignment */
- if (fb->pixel_format == DRM_FORMAT_NV12 && plane == 1)
+ if (fb->format->format == DRM_FORMAT_NV12 && plane == 1)
alignment = 4096;
else
alignment = intel_surf_alignment(dev_priv, fb->modifier);
@@ -2466,7 +2462,7 @@ u32 intel_compute_tile_offset(int *x, int *y,
static void intel_fb_offset_to_xy(int *x, int *y,
const struct drm_framebuffer *fb, int plane)
{
- unsigned int cpp = drm_format_plane_cpp(fb->pixel_format, plane);
+ unsigned int cpp = fb->format->cpp[plane];
unsigned int pitch = fb->pitches[plane];
u32 linear_offset = fb->offsets[plane];
@@ -2494,8 +2490,7 @@ intel_fill_fb_info(struct drm_i915_private *dev_priv,
struct intel_rotation_info *rot_info = &intel_fb->rot_info;
u32 gtt_offset_rotated = 0;
unsigned int max_size = 0;
- uint32_t format = fb->pixel_format;
- int i, num_planes = drm_format_num_planes(format);
+ int i, num_planes = fb->format->num_planes;
unsigned int tile_size = intel_tile_size(dev_priv);
for (i = 0; i < num_planes; i++) {
@@ -2504,9 +2499,9 @@ intel_fill_fb_info(struct drm_i915_private *dev_priv,
u32 offset;
int x, y;
- cpp = drm_format_plane_cpp(format, i);
- width = drm_format_plane_width(fb->width, format, i);
- height = drm_format_plane_height(fb->height, format, i);
+ cpp = fb->format->cpp[i];
+ width = drm_framebuffer_plane_width(fb->width, fb, i);
+ height = drm_framebuffer_plane_height(fb->height, fb, i);
intel_fb_offset_to_xy(&x, &y, fb, i);
@@ -2688,7 +2683,7 @@ intel_alloc_initial_plane_obj(struct intel_crtc *crtc,
mutex_lock(&dev->struct_mutex);
- obj = i915_gem_object_create_stolen_for_preallocated(dev,
+ obj = i915_gem_object_create_stolen_for_preallocated(dev_priv,
base_aligned,
base_aligned,
size_aligned);
@@ -2700,7 +2695,7 @@ intel_alloc_initial_plane_obj(struct intel_crtc *crtc,
if (plane_config->tiling == I915_TILING_X)
obj->tiling_and_stride = fb->pitches[0] | I915_TILING_X;
- mode_cmd.pixel_format = fb->pixel_format;
+ mode_cmd.pixel_format = fb->format->format;
mode_cmd.width = fb->width;
mode_cmd.height = fb->height;
mode_cmd.pitches[0] = fb->pitches[0];
@@ -2795,7 +2790,7 @@ intel_find_initial_plane_obj(struct intel_crtc *intel_crtc,
* simplest solution is to just disable the primary plane now and
* pretend the BIOS never had it enabled.
*/
- to_intel_plane_state(plane_state)->base.visible = false;
+ plane_state->visible = false;
crtc_state->plane_mask &= ~(1 << drm_plane_index(primary));
intel_pre_disable_primary_noatomic(&intel_crtc->base);
intel_plane->disable_plane(primary, &intel_crtc->base);
@@ -2844,7 +2839,7 @@ valid_fb:
static int skl_max_plane_width(const struct drm_framebuffer *fb, int plane,
unsigned int rotation)
{
- int cpp = drm_format_plane_cpp(fb->pixel_format, plane);
+ int cpp = fb->format->cpp[plane];
switch (fb->modifier) {
case DRM_FORMAT_MOD_NONE:
@@ -2923,7 +2918,7 @@ static int skl_check_main_surface(struct intel_plane_state *plane_state)
* TODO: linear and Y-tiled seem fine, Yf untested,
*/
if (fb->modifier == I915_FORMAT_MOD_X_TILED) {
- int cpp = drm_format_plane_cpp(fb->pixel_format, 0);
+ int cpp = fb->format->cpp[0];
while ((x + w) * cpp > fb->pitches[0]) {
if (offset == 0) {
@@ -2991,7 +2986,7 @@ int skl_check_plane_surface(struct intel_plane_state *plane_state)
* Handle the AUX surface first since
* the main surface setup depends on it.
*/
- if (fb->pixel_format == DRM_FORMAT_NV12) {
+ if (fb->format->format == DRM_FORMAT_NV12) {
ret = skl_check_nv12_aux_surface(plane_state);
if (ret)
return ret;
@@ -3046,7 +3041,7 @@ static void i9xx_update_primary_plane(struct drm_plane *primary,
I915_WRITE(PRIMCNSTALPHA(plane), 0);
}
- switch (fb->pixel_format) {
+ switch (fb->format->format) {
case DRM_FORMAT_C8:
dspcntr |= DISPPLANE_8BPP;
break;
@@ -3161,7 +3156,7 @@ static void ironlake_update_primary_plane(struct drm_plane *primary,
if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
dspcntr |= DISPPLANE_PIPE_CSC_ENABLE;
- switch (fb->pixel_format) {
+ switch (fb->format->format) {
case DRM_FORMAT_C8:
dspcntr |= DISPPLANE_8BPP;
break;
@@ -3275,12 +3270,12 @@ u32 skl_plane_stride(const struct drm_framebuffer *fb, int plane,
* linear buffers or in number of tiles for tiled buffers.
*/
if (drm_rotation_90_or_270(rotation)) {
- int cpp = drm_format_plane_cpp(fb->pixel_format, plane);
+ int cpp = fb->format->cpp[plane];
stride /= intel_tile_height(dev_priv, fb->modifier, cpp);
} else {
stride /= intel_fb_stride_alignment(dev_priv, fb->modifier,
- fb->pixel_format);
+ fb->format->format);
}
return stride;
@@ -3375,7 +3370,8 @@ static void skylake_update_primary_plane(struct drm_plane *plane,
struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
struct drm_framebuffer *fb = plane_state->base.fb;
- int pipe = intel_crtc->pipe;
+ enum plane_id plane_id = to_intel_plane(plane)->id;
+ enum pipe pipe = to_intel_plane(plane)->pipe;
u32 plane_ctl;
unsigned int rotation = plane_state->base.rotation;
u32 stride = skl_plane_stride(fb, 0, rotation);
@@ -3394,7 +3390,7 @@ static void skylake_update_primary_plane(struct drm_plane *plane,
PLANE_CTL_PIPE_GAMMA_ENABLE |
PLANE_CTL_PIPE_CSC_ENABLE;
- plane_ctl |= skl_plane_ctl_format(fb->pixel_format);
+ plane_ctl |= skl_plane_ctl_format(fb->format->format);
plane_ctl |= skl_plane_ctl_tiling(fb->modifier);
plane_ctl |= PLANE_CTL_PLANE_GAMMA_DISABLE;
plane_ctl |= skl_plane_ctl_rotation(rotation);
@@ -3410,30 +3406,30 @@ static void skylake_update_primary_plane(struct drm_plane *plane,
intel_crtc->adjusted_x = src_x;
intel_crtc->adjusted_y = src_y;
- I915_WRITE(PLANE_CTL(pipe, 0), plane_ctl);
- I915_WRITE(PLANE_OFFSET(pipe, 0), (src_y << 16) | src_x);
- I915_WRITE(PLANE_STRIDE(pipe, 0), stride);
- I915_WRITE(PLANE_SIZE(pipe, 0), (src_h << 16) | src_w);
+ I915_WRITE(PLANE_CTL(pipe, plane_id), plane_ctl);
+ I915_WRITE(PLANE_OFFSET(pipe, plane_id), (src_y << 16) | src_x);
+ I915_WRITE(PLANE_STRIDE(pipe, plane_id), stride);
+ I915_WRITE(PLANE_SIZE(pipe, plane_id), (src_h << 16) | src_w);
if (scaler_id >= 0) {
uint32_t ps_ctrl = 0;
WARN_ON(!dst_w || !dst_h);
- ps_ctrl = PS_SCALER_EN | PS_PLANE_SEL(0) |
+ ps_ctrl = PS_SCALER_EN | PS_PLANE_SEL(plane_id) |
crtc_state->scaler_state.scalers[scaler_id].mode;
I915_WRITE(SKL_PS_CTRL(pipe, scaler_id), ps_ctrl);
I915_WRITE(SKL_PS_PWR_GATE(pipe, scaler_id), 0);
I915_WRITE(SKL_PS_WIN_POS(pipe, scaler_id), (dst_x << 16) | dst_y);
I915_WRITE(SKL_PS_WIN_SZ(pipe, scaler_id), (dst_w << 16) | dst_h);
- I915_WRITE(PLANE_POS(pipe, 0), 0);
+ I915_WRITE(PLANE_POS(pipe, plane_id), 0);
} else {
- I915_WRITE(PLANE_POS(pipe, 0), (dst_y << 16) | dst_x);
+ I915_WRITE(PLANE_POS(pipe, plane_id), (dst_y << 16) | dst_x);
}
- I915_WRITE(PLANE_SURF(pipe, 0),
+ I915_WRITE(PLANE_SURF(pipe, plane_id),
intel_plane_ggtt_offset(plane_state) + surf_addr);
- POSTING_READ(PLANE_SURF(pipe, 0));
+ POSTING_READ(PLANE_SURF(pipe, plane_id));
}
static void skylake_disable_primary_plane(struct drm_plane *primary,
@@ -3441,12 +3437,12 @@ static void skylake_disable_primary_plane(struct drm_plane *primary,
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- int pipe = intel_crtc->pipe;
+ enum plane_id plane_id = to_intel_plane(primary)->id;
+ enum pipe pipe = to_intel_plane(primary)->pipe;
- I915_WRITE(PLANE_CTL(pipe, 0), 0);
- I915_WRITE(PLANE_SURF(pipe, 0), 0);
- POSTING_READ(PLANE_SURF(pipe, 0));
+ I915_WRITE(PLANE_CTL(pipe, plane_id), 0);
+ I915_WRITE(PLANE_SURF(pipe, plane_id), 0);
+ POSTING_READ(PLANE_SURF(pipe, plane_id));
}
/* Assume fb object is pinned & idle & fenced and just update base pointers */
@@ -3555,23 +3551,19 @@ void intel_prepare_reset(struct drm_i915_private *dev_priv)
state = drm_atomic_helper_duplicate_state(dev, ctx);
if (IS_ERR(state)) {
ret = PTR_ERR(state);
- state = NULL;
DRM_ERROR("Duplicating state failed with %i\n", ret);
- goto err;
+ return;
}
ret = drm_atomic_helper_disable_all(dev, ctx);
if (ret) {
DRM_ERROR("Suspending crtc's failed with %i\n", ret);
- goto err;
+ drm_atomic_state_put(state);
+ return;
}
dev_priv->modeset_restore_state = state;
state->acquire_ctx = ctx;
- return;
-
-err:
- drm_atomic_state_put(state);
}
void intel_finish_reset(struct drm_i915_private *dev_priv)
@@ -4224,9 +4216,8 @@ static void ironlake_fdi_disable(struct drm_crtc *crtc)
udelay(100);
}
-bool intel_has_pending_fb_unpin(struct drm_device *dev)
+bool intel_has_pending_fb_unpin(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_crtc *crtc;
/* Note that we don't need to be called with mode_config.lock here
@@ -4236,7 +4227,7 @@ bool intel_has_pending_fb_unpin(struct drm_device *dev)
* cannot claim and pin a new fb without at least acquring the
* struct_mutex and so serialising with us.
*/
- for_each_intel_crtc(dev, crtc) {
+ for_each_intel_crtc(&dev_priv->drm, crtc) {
if (atomic_read(&crtc->unpin_work_count) == 0)
continue;
@@ -4766,7 +4757,7 @@ static int skl_update_scaler_plane(struct intel_crtc_state *crtc_state,
}
/* Check src format */
- switch (fb->pixel_format) {
+ switch (fb->format->format) {
case DRM_FORMAT_RGB565:
case DRM_FORMAT_XBGR8888:
case DRM_FORMAT_XRGB8888:
@@ -4782,7 +4773,7 @@ static int skl_update_scaler_plane(struct intel_crtc_state *crtc_state,
default:
DRM_DEBUG_KMS("[PLANE:%d:%s] FB:%d unsupported scaling format 0x%x\n",
intel_plane->base.base.id, intel_plane->base.name,
- fb->base.id, fb->pixel_format);
+ fb->base.id, fb->format->format);
return -EINVAL;
}
@@ -5017,11 +5008,9 @@ intel_pre_disable_primary_noatomic(struct drm_crtc *crtc)
* event which is after the vblank start event, so we need to have a
* wait-for-vblank between disabling the plane and the pipe.
*/
- if (HAS_GMCH_DISPLAY(dev_priv)) {
- intel_set_memory_cxsr(dev_priv, false);
- dev_priv->wm.vlv.cxsr = false;
+ if (HAS_GMCH_DISPLAY(dev_priv) &&
+ intel_set_memory_cxsr(dev_priv, false))
intel_wait_for_vblank(dev_priv, pipe);
- }
}
static void intel_post_plane_update(struct intel_crtc_state *old_crtc_state)
@@ -5096,11 +5085,9 @@ static void intel_pre_plane_update(struct intel_crtc_state *old_crtc_state)
* event which is after the vblank start event, so we need to have a
* wait-for-vblank between disabling the plane and the pipe.
*/
- if (old_crtc_state->base.active) {
- intel_set_memory_cxsr(dev_priv, false);
- dev_priv->wm.vlv.cxsr = false;
+ if (old_crtc_state->base.active &&
+ intel_set_memory_cxsr(dev_priv, false))
intel_wait_for_vblank(dev_priv, crtc->pipe);
- }
}
/*
@@ -5110,10 +5097,8 @@ static void intel_pre_plane_update(struct intel_crtc_state *old_crtc_state)
*
* WaCxSRDisabledForSpriteScaling:ivb
*/
- if (pipe_config->disable_lp_wm) {
- ilk_disable_lp_wm(dev);
+ if (pipe_config->disable_lp_wm && ilk_disable_lp_wm(dev))
intel_wait_for_vblank(dev_priv, crtc->pipe);
- }
/*
* If we're doing a modeset, we're done. No need to do any pre-vblank
@@ -5461,10 +5446,7 @@ static void haswell_crtc_enable(struct intel_crtc_state *pipe_config,
intel_ddi_enable_transcoder_func(crtc);
if (dev_priv->display.initial_watermarks != NULL)
- dev_priv->display.initial_watermarks(old_intel_state,
- pipe_config);
- else
- intel_update_watermarks(intel_crtc);
+ dev_priv->display.initial_watermarks(old_intel_state, pipe_config);
/* XXX: Do the pipe assertions at the right place for BXT DSI. */
if (!transcoder_is_dsi(cpu_transcoder))
@@ -5801,8 +5783,10 @@ static int intel_compute_max_dotclk(struct drm_i915_private *dev_priv)
{
int max_cdclk_freq = dev_priv->max_cdclk_freq;
- if (INTEL_INFO(dev_priv)->gen >= 9 ||
- IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
+ if (IS_GEMINILAKE(dev_priv))
+ return 2 * max_cdclk_freq;
+ else if (INTEL_INFO(dev_priv)->gen >= 9 ||
+ IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
return max_cdclk_freq;
else if (IS_CHERRYVIEW(dev_priv))
return max_cdclk_freq*95/100;
@@ -5838,6 +5822,8 @@ static void intel_update_max_cdclk(struct drm_i915_private *dev_priv)
max_cdclk = 308571;
dev_priv->max_cdclk_freq = skl_calc_cdclk(max_cdclk, vco);
+ } else if (IS_GEMINILAKE(dev_priv)) {
+ dev_priv->max_cdclk_freq = 316800;
} else if (IS_BROXTON(dev_priv)) {
dev_priv->max_cdclk_freq = 624000;
} else if (IS_BROADWELL(dev_priv)) {
@@ -5925,6 +5911,26 @@ static int bxt_de_pll_vco(struct drm_i915_private *dev_priv, int cdclk)
return dev_priv->cdclk_pll.ref * ratio;
}
+static int glk_de_pll_vco(struct drm_i915_private *dev_priv, int cdclk)
+{
+ int ratio;
+
+ if (cdclk == dev_priv->cdclk_pll.ref)
+ return 0;
+
+ switch (cdclk) {
+ default:
+ MISSING_CASE(cdclk);
+ case 79200:
+ case 158400:
+ case 316800:
+ ratio = 33;
+ break;
+ }
+
+ return dev_priv->cdclk_pll.ref * ratio;
+}
+
static void bxt_de_pll_disable(struct drm_i915_private *dev_priv)
{
I915_WRITE(BXT_DE_PLL_ENABLE, 0);
@@ -5966,7 +5972,10 @@ static void bxt_set_cdclk(struct drm_i915_private *dev_priv, int cdclk)
u32 val, divider;
int vco, ret;
- vco = bxt_de_pll_vco(dev_priv, cdclk);
+ if (IS_GEMINILAKE(dev_priv))
+ vco = glk_de_pll_vco(dev_priv, cdclk);
+ else
+ vco = bxt_de_pll_vco(dev_priv, cdclk);
DRM_DEBUG_DRIVER("Changing CDCLK to %d kHz (VCO %d kHz)\n", cdclk, vco);
@@ -5979,6 +5988,7 @@ static void bxt_set_cdclk(struct drm_i915_private *dev_priv, int cdclk)
divider = BXT_CDCLK_CD2X_DIV_SEL_2;
break;
case 3:
+ WARN(IS_GEMINILAKE(dev_priv), "Unsupported divider\n");
divider = BXT_CDCLK_CD2X_DIV_SEL_1_5;
break;
case 2:
@@ -6088,6 +6098,8 @@ sanitize:
void bxt_init_cdclk(struct drm_i915_private *dev_priv)
{
+ int cdclk;
+
bxt_sanitize_cdclk(dev_priv);
if (dev_priv->cdclk_freq != 0 && dev_priv->cdclk_pll.vco != 0)
@@ -6098,7 +6110,12 @@ void bxt_init_cdclk(struct drm_i915_private *dev_priv)
* - The initial CDCLK needs to be read from VBT.
* Need to make this change after VBT has changes for BXT.
*/
- bxt_set_cdclk(dev_priv, bxt_calc_cdclk(0));
+ if (IS_GEMINILAKE(dev_priv))
+ cdclk = glk_calc_cdclk(0);
+ else
+ cdclk = bxt_calc_cdclk(0);
+
+ bxt_set_cdclk(dev_priv, cdclk);
}
void bxt_uninit_cdclk(struct drm_i915_private *dev_priv)
@@ -6513,6 +6530,16 @@ static int valleyview_calc_cdclk(struct drm_i915_private *dev_priv,
return 200000;
}
+static int glk_calc_cdclk(int max_pixclk)
+{
+ if (max_pixclk > 2 * 158400)
+ return 316800;
+ else if (max_pixclk > 2 * 79200)
+ return 158400;
+ else
+ return 79200;
+}
+
static int bxt_calc_cdclk(int max_pixclk)
{
if (max_pixclk > 576000)
@@ -6575,15 +6602,27 @@ static int valleyview_modeset_calc_cdclk(struct drm_atomic_state *state)
static int bxt_modeset_calc_cdclk(struct drm_atomic_state *state)
{
+ struct drm_i915_private *dev_priv = to_i915(state->dev);
int max_pixclk = ilk_max_pixel_rate(state);
struct intel_atomic_state *intel_state =
to_intel_atomic_state(state);
+ int cdclk;
- intel_state->cdclk = intel_state->dev_cdclk =
- bxt_calc_cdclk(max_pixclk);
+ if (IS_GEMINILAKE(dev_priv))
+ cdclk = glk_calc_cdclk(max_pixclk);
+ else
+ cdclk = bxt_calc_cdclk(max_pixclk);
- if (!intel_state->active_crtcs)
- intel_state->dev_cdclk = bxt_calc_cdclk(0);
+ intel_state->cdclk = intel_state->dev_cdclk = cdclk;
+
+ if (!intel_state->active_crtcs) {
+ if (IS_GEMINILAKE(dev_priv))
+ cdclk = glk_calc_cdclk(0);
+ else
+ cdclk = bxt_calc_cdclk(0);
+
+ intel_state->dev_cdclk = cdclk;
+ }
return 0;
}
@@ -6833,13 +6872,13 @@ static void intel_crtc_disable_noatomic(struct drm_crtc *crtc)
if (!intel_crtc->active)
return;
- if (to_intel_plane_state(crtc->primary->state)->base.visible) {
+ if (crtc->primary->state->visible) {
WARN_ON(intel_crtc->flip_work);
intel_pre_disable_primary_noatomic(crtc);
intel_crtc_disable_planes(crtc, 1 << drm_plane_index(crtc->primary));
- to_intel_plane_state(crtc->primary->state)->base.visible = false;
+ crtc->primary->state->visible = false;
}
state = drm_atomic_state_alloc(crtc->dev);
@@ -7291,6 +7330,7 @@ static int broxton_get_display_clock_speed(struct drm_i915_private *dev_priv)
div = 2;
break;
case BXT_CDCLK_CD2X_DIV_SEL_1_5:
+ WARN(IS_GEMINILAKE(dev_priv), "Unsupported divider\n");
div = 3;
break;
case BXT_CDCLK_CD2X_DIV_SEL_2:
@@ -7510,7 +7550,7 @@ static unsigned int intel_hpll_vco(struct drm_i915_private *dev_priv)
vco_table = ctg_vco;
else if (IS_G4X(dev_priv))
vco_table = elk_vco;
- else if (IS_CRESTLINE(dev_priv))
+ else if (IS_I965GM(dev_priv))
vco_table = cl_vco;
else if (IS_PINEVIEW(dev_priv))
vco_table = pnv_vco;
@@ -8122,7 +8162,8 @@ static void i9xx_compute_dpll(struct intel_crtc *crtc,
else
dpll |= DPLLB_MODE_DAC_SERIAL;
- if (IS_I945G(dev_priv) || IS_I945GM(dev_priv) || IS_G33(dev_priv)) {
+ if (IS_I945G(dev_priv) || IS_I945GM(dev_priv) ||
+ IS_G33(dev_priv) || IS_PINEVIEW(dev_priv)) {
dpll |= (crtc_state->pixel_multiplier - 1)
<< SDVO_MULTIPLIER_SHIFT_HIRES;
}
@@ -8357,7 +8398,6 @@ void intel_mode_from_pipe_config(struct drm_display_mode *mode,
mode->type = DRM_MODE_TYPE_DRIVER;
mode->clock = pipe_config->base.adjusted_mode.crtc_clock;
- mode->flags |= pipe_config->base.adjusted_mode.flags;
mode->hsync = drm_mode_hsync(mode);
mode->vrefresh = drm_mode_vrefresh(mode);
@@ -8696,6 +8736,8 @@ i9xx_get_initial_plane_config(struct intel_crtc *crtc,
fb = &intel_fb->base;
+ fb->dev = dev;
+
if (INTEL_GEN(dev_priv) >= 4) {
if (val & DISPPLANE_TILED) {
plane_config->tiling = I915_TILING_X;
@@ -8705,8 +8747,7 @@ i9xx_get_initial_plane_config(struct intel_crtc *crtc,
pixel_format = val & DISPPLANE_PIXFORMAT_MASK;
fourcc = i9xx_format_to_fourcc(pixel_format);
- fb->pixel_format = fourcc;
- fb->bits_per_pixel = drm_format_plane_cpp(fourcc, 0) * 8;
+ fb->format = drm_format_info(fourcc);
if (INTEL_GEN(dev_priv) >= 4) {
if (plane_config->tiling)
@@ -8727,14 +8768,14 @@ i9xx_get_initial_plane_config(struct intel_crtc *crtc,
fb->pitches[0] = val & 0xffffffc0;
aligned_height = intel_fb_align_height(dev, fb->height,
- fb->pixel_format,
+ fb->format->format,
fb->modifier);
plane_config->size = fb->pitches[0] * aligned_height;
DRM_DEBUG_KMS("pipe/plane %c/%d with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n",
pipe_name(pipe), plane, fb->width, fb->height,
- fb->bits_per_pixel, base, fb->pitches[0],
+ fb->format->cpp[0] * 8, base, fb->pitches[0],
plane_config->size);
plane_config->fb = intel_fb;
@@ -8835,7 +8876,7 @@ static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
>> DPLL_MD_UDI_MULTIPLIER_SHIFT) + 1;
pipe_config->dpll_hw_state.dpll_md = tmp;
} else if (IS_I945G(dev_priv) || IS_I945GM(dev_priv) ||
- IS_G33(dev_priv)) {
+ IS_G33(dev_priv) || IS_PINEVIEW(dev_priv)) {
tmp = I915_READ(DPLL(crtc->pipe));
pipe_config->pixel_multiplier =
((tmp & SDVO_MULTIPLIER_MASK)
@@ -8888,9 +8929,8 @@ out:
return ret;
}
-static void ironlake_init_pch_refclk(struct drm_device *dev)
+static void ironlake_init_pch_refclk(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_encoder *encoder;
int i;
u32 val, final;
@@ -8902,7 +8942,7 @@ static void ironlake_init_pch_refclk(struct drm_device *dev)
bool using_ssc_source = false;
/* We need to take the global config into account */
- for_each_intel_encoder(dev, encoder) {
+ for_each_intel_encoder(&dev_priv->drm, encoder) {
switch (encoder->type) {
case INTEL_OUTPUT_LVDS:
has_panel = true;
@@ -9158,10 +9198,9 @@ static void lpt_program_fdi_mphy(struct drm_i915_private *dev_priv)
* - Sequence to enable CLKOUT_DP without spread
* - Sequence to enable CLKOUT_DP for FDI usage and configure PCH FDI I/O
*/
-static void lpt_enable_clkout_dp(struct drm_device *dev, bool with_spread,
- bool with_fdi)
+static void lpt_enable_clkout_dp(struct drm_i915_private *dev_priv,
+ bool with_spread, bool with_fdi)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
uint32_t reg, tmp;
if (WARN(with_fdi && !with_spread, "FDI requires downspread\n"))
@@ -9199,9 +9238,8 @@ static void lpt_enable_clkout_dp(struct drm_device *dev, bool with_spread,
}
/* Sequence to disable CLKOUT_DP */
-static void lpt_disable_clkout_dp(struct drm_device *dev)
+static void lpt_disable_clkout_dp(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
uint32_t reg, tmp;
mutex_lock(&dev_priv->sb_lock);
@@ -9286,12 +9324,12 @@ static void lpt_bend_clkout_dp(struct drm_i915_private *dev_priv, int steps)
#undef BEND_IDX
-static void lpt_init_pch_refclk(struct drm_device *dev)
+static void lpt_init_pch_refclk(struct drm_i915_private *dev_priv)
{
struct intel_encoder *encoder;
bool has_vga = false;
- for_each_intel_encoder(dev, encoder) {
+ for_each_intel_encoder(&dev_priv->drm, encoder) {
switch (encoder->type) {
case INTEL_OUTPUT_ANALOG:
has_vga = true;
@@ -9302,24 +9340,22 @@ static void lpt_init_pch_refclk(struct drm_device *dev)
}
if (has_vga) {
- lpt_bend_clkout_dp(to_i915(dev), 0);
- lpt_enable_clkout_dp(dev, true, true);
+ lpt_bend_clkout_dp(dev_priv, 0);
+ lpt_enable_clkout_dp(dev_priv, true, true);
} else {
- lpt_disable_clkout_dp(dev);
+ lpt_disable_clkout_dp(dev_priv);
}
}
/*
* Initialize reference clocks when the driver loads
*/
-void intel_init_pch_refclk(struct drm_device *dev)
+void intel_init_pch_refclk(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
-
if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv))
- ironlake_init_pch_refclk(dev);
+ ironlake_init_pch_refclk(dev_priv);
else if (HAS_PCH_LPT(dev_priv))
- lpt_init_pch_refclk(dev);
+ lpt_init_pch_refclk(dev_priv);
}
static void ironlake_set_pipeconf(struct drm_crtc *crtc)
@@ -9726,6 +9762,8 @@ skylake_get_initial_plane_config(struct intel_crtc *crtc,
fb = &intel_fb->base;
+ fb->dev = dev;
+
val = I915_READ(PLANE_CTL(pipe, 0));
if (!(val & PLANE_CTL_ENABLE))
goto error;
@@ -9734,8 +9772,7 @@ skylake_get_initial_plane_config(struct intel_crtc *crtc,
fourcc = skl_format_to_fourcc(pixel_format,
val & PLANE_CTL_ORDER_RGBX,
val & PLANE_CTL_ALPHA_MASK);
- fb->pixel_format = fourcc;
- fb->bits_per_pixel = drm_format_plane_cpp(fourcc, 0) * 8;
+ fb->format = drm_format_info(fourcc);
tiling = val & PLANE_CTL_TILED_MASK;
switch (tiling) {
@@ -9768,18 +9805,18 @@ skylake_get_initial_plane_config(struct intel_crtc *crtc,
val = I915_READ(PLANE_STRIDE(pipe, 0));
stride_mult = intel_fb_stride_alignment(dev_priv, fb->modifier,
- fb->pixel_format);
+ fb->format->format);
fb->pitches[0] = (val & 0x3ff) * stride_mult;
aligned_height = intel_fb_align_height(dev, fb->height,
- fb->pixel_format,
+ fb->format->format,
fb->modifier);
plane_config->size = fb->pitches[0] * aligned_height;
DRM_DEBUG_KMS("pipe %c with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n",
pipe_name(pipe), fb->width, fb->height,
- fb->bits_per_pixel, base, fb->pitches[0],
+ fb->format->cpp[0] * 8, base, fb->pitches[0],
plane_config->size);
plane_config->fb = intel_fb;
@@ -9838,6 +9875,8 @@ ironlake_get_initial_plane_config(struct intel_crtc *crtc,
fb = &intel_fb->base;
+ fb->dev = dev;
+
if (INTEL_GEN(dev_priv) >= 4) {
if (val & DISPPLANE_TILED) {
plane_config->tiling = I915_TILING_X;
@@ -9847,8 +9886,7 @@ ironlake_get_initial_plane_config(struct intel_crtc *crtc,
pixel_format = val & DISPPLANE_PIXFORMAT_MASK;
fourcc = i9xx_format_to_fourcc(pixel_format);
- fb->pixel_format = fourcc;
- fb->bits_per_pixel = drm_format_plane_cpp(fourcc, 0) * 8;
+ fb->format = drm_format_info(fourcc);
base = I915_READ(DSPSURF(pipe)) & 0xfffff000;
if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
@@ -9869,14 +9907,14 @@ ironlake_get_initial_plane_config(struct intel_crtc *crtc,
fb->pitches[0] = val & 0xffffffc0;
aligned_height = intel_fb_align_height(dev, fb->height,
- fb->pixel_format,
+ fb->format->format,
fb->modifier);
plane_config->size = fb->pitches[0] * aligned_height;
DRM_DEBUG_KMS("pipe %c with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n",
pipe_name(pipe), fb->width, fb->height,
- fb->bits_per_pixel, base, fb->pitches[0],
+ fb->format->cpp[0] * 8, base, fb->pitches[0],
plane_config->size);
plane_config->fb = intel_fb;
@@ -10166,7 +10204,6 @@ static void hsw_restore_lcpll(struct drm_i915_private *dev_priv)
*/
void hsw_enable_pc8(struct drm_i915_private *dev_priv)
{
- struct drm_device *dev = &dev_priv->drm;
uint32_t val;
DRM_DEBUG_KMS("Enabling package C8+\n");
@@ -10177,19 +10214,18 @@ void hsw_enable_pc8(struct drm_i915_private *dev_priv)
I915_WRITE(SOUTH_DSPCLK_GATE_D, val);
}
- lpt_disable_clkout_dp(dev);
+ lpt_disable_clkout_dp(dev_priv);
hsw_disable_lcpll(dev_priv, true, true);
}
void hsw_disable_pc8(struct drm_i915_private *dev_priv)
{
- struct drm_device *dev = &dev_priv->drm;
uint32_t val;
DRM_DEBUG_KMS("Disabling package C8+\n");
hsw_restore_lcpll(dev_priv);
- lpt_init_pch_refclk(dev);
+ lpt_init_pch_refclk(dev_priv);
if (HAS_PCH_LPT_LP(dev_priv)) {
val = I915_READ(SOUTH_DSPCLK_GATE_D);
@@ -10639,7 +10675,7 @@ static void haswell_get_ddi_port_state(struct intel_crtc *crtc,
if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
skylake_get_ddi_pll(dev_priv, port, pipe_config);
- else if (IS_BROXTON(dev_priv))
+ else if (IS_GEN9_LP(dev_priv))
bxt_get_ddi_pll(dev_priv, port, pipe_config);
else
haswell_get_ddi_pll(dev_priv, port, pipe_config);
@@ -10684,7 +10720,7 @@ static bool haswell_get_pipe_config(struct intel_crtc *crtc,
active = hsw_get_transcoder_state(crtc, pipe_config, &power_domain_mask);
- if (IS_BROXTON(dev_priv) &&
+ if (IS_GEN9_LP(dev_priv) &&
bxt_get_dsi_transcoder_state(crtc, pipe_config, &power_domain_mask)) {
WARN_ON(active);
active = true;
@@ -10704,7 +10740,7 @@ static bool haswell_get_pipe_config(struct intel_crtc *crtc,
I915_READ(GAMMA_MODE(crtc->pipe)) & GAMMA_MODE_MODE_MASK;
if (INTEL_GEN(dev_priv) >= 9) {
- skl_init_scalers(dev_priv, crtc, pipe_config);
+ intel_crtc_init_scalers(crtc, pipe_config);
pipe_config->scaler_state.scaler_id = -1;
pipe_config->scaler_state.scaler_users &= ~(1 << SKL_CRTC_INDEX);
@@ -10885,7 +10921,7 @@ static void intel_crtc_update_cursor(struct drm_crtc *crtc,
I915_WRITE(CURPOS(pipe), pos);
- if (IS_845G(dev_priv) || IS_I865G(dev_priv))
+ if (IS_I845G(dev_priv) || IS_I865G(dev_priv))
i845_update_cursor(crtc, base, plane_state);
else
i9xx_update_cursor(crtc, base, plane_state);
@@ -10903,11 +10939,11 @@ static bool cursor_size_ok(struct drm_i915_private *dev_priv,
* the precision of the register. Everything else requires
* square cursors, limited to a few power-of-two sizes.
*/
- if (IS_845G(dev_priv) || IS_I865G(dev_priv)) {
+ if (IS_I845G(dev_priv) || IS_I865G(dev_priv)) {
if ((width & 63) != 0)
return false;
- if (width > (IS_845G(dev_priv) ? 64 : 512))
+ if (width > (IS_I845G(dev_priv) ? 64 : 512))
return false;
if (height > 1023)
@@ -10997,7 +11033,7 @@ intel_framebuffer_create_for_mode(struct drm_device *dev,
struct drm_i915_gem_object *obj;
struct drm_mode_fb_cmd2 mode_cmd = { 0 };
- obj = i915_gem_object_create(dev,
+ obj = i915_gem_object_create(to_i915(dev),
intel_framebuffer_size_for_mode(mode, bpp));
if (IS_ERR(obj))
return ERR_CAST(obj);
@@ -11035,7 +11071,7 @@ mode_fits_in_fbdev(struct drm_device *dev,
fb = &dev_priv->fbdev->fb->base;
if (fb->pitches[0] < intel_framebuffer_pitch_for_width(mode->hdisplay,
- fb->bits_per_pixel))
+ fb->format->cpp[0] * 8))
return NULL;
if (obj->base.size < mode->vdisplay * fb->pitches[0])
@@ -11063,7 +11099,7 @@ static int intel_modeset_setup_plane_state(struct drm_atomic_state *state,
return PTR_ERR(plane_state);
if (mode)
- drm_crtc_get_hv_timing(mode, &hdisplay, &vdisplay);
+ drm_mode_get_hv_timing(mode, &hdisplay, &vdisplay);
else
hdisplay = vdisplay = 0;
@@ -12138,7 +12174,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
return -EBUSY;
/* Can't change pixel format via MI display flips. */
- if (fb->pixel_format != crtc->primary->fb->pixel_format)
+ if (fb->format != crtc->primary->fb->format)
return -EINVAL;
/*
@@ -12257,7 +12293,8 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
INIT_WORK(&work->mmio_work, intel_mmio_flip_work_func);
queue_work(system_unbound_wq, &work->mmio_work);
} else {
- request = i915_gem_request_alloc(engine, engine->last_context);
+ request = i915_gem_request_alloc(engine,
+ dev_priv->kernel_context);
if (IS_ERR(request)) {
ret = PTR_ERR(request);
goto cleanup_unpin;
@@ -12424,7 +12461,7 @@ int intel_plane_atomic_calc_changes(struct drm_crtc_state *crtc_state,
}
was_visible = old_plane_state->base.visible;
- visible = to_intel_plane_state(plane_state)->base.visible;
+ visible = plane_state->visible;
if (!was_crtc_enabled && WARN_ON(was_visible))
was_visible = false;
@@ -12440,7 +12477,7 @@ int intel_plane_atomic_calc_changes(struct drm_crtc_state *crtc_state,
* only combine the results from all planes in the current place?
*/
if (!is_crtc_enabled)
- to_intel_plane_state(plane_state)->base.visible = visible = false;
+ plane_state->visible = visible = false;
if (!was_visible && !visible)
return 0;
@@ -12786,39 +12823,7 @@ static void intel_dump_pipe_config(struct intel_crtc *crtc,
DRM_DEBUG_KMS("ips: %i, double wide: %i\n",
pipe_config->ips_enabled, pipe_config->double_wide);
- if (IS_BROXTON(dev_priv)) {
- DRM_DEBUG_KMS("dpll_hw_state: ebb0: 0x%x, ebb4: 0x%x,"
- "pll0: 0x%x, pll1: 0x%x, pll2: 0x%x, pll3: 0x%x, "
- "pll6: 0x%x, pll8: 0x%x, pll9: 0x%x, pll10: 0x%x, pcsdw12: 0x%x\n",
- pipe_config->dpll_hw_state.ebb0,
- pipe_config->dpll_hw_state.ebb4,
- pipe_config->dpll_hw_state.pll0,
- pipe_config->dpll_hw_state.pll1,
- pipe_config->dpll_hw_state.pll2,
- pipe_config->dpll_hw_state.pll3,
- pipe_config->dpll_hw_state.pll6,
- pipe_config->dpll_hw_state.pll8,
- pipe_config->dpll_hw_state.pll9,
- pipe_config->dpll_hw_state.pll10,
- pipe_config->dpll_hw_state.pcsdw12);
- } else if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
- DRM_DEBUG_KMS("dpll_hw_state: "
- "ctrl1: 0x%x, cfgcr1: 0x%x, cfgcr2: 0x%x\n",
- pipe_config->dpll_hw_state.ctrl1,
- pipe_config->dpll_hw_state.cfgcr1,
- pipe_config->dpll_hw_state.cfgcr2);
- } else if (HAS_DDI(dev_priv)) {
- DRM_DEBUG_KMS("dpll_hw_state: wrpll: 0x%x spll: 0x%x\n",
- pipe_config->dpll_hw_state.wrpll,
- pipe_config->dpll_hw_state.spll);
- } else {
- DRM_DEBUG_KMS("dpll_hw_state: dpll: 0x%x, dpll_md: 0x%x, "
- "fp0: 0x%x, fp1: 0x%x\n",
- pipe_config->dpll_hw_state.dpll,
- pipe_config->dpll_hw_state.dpll_md,
- pipe_config->dpll_hw_state.fp0,
- pipe_config->dpll_hw_state.fp1);
- }
+ intel_dpll_dump_hw_state(dev_priv, &pipe_config->dpll_hw_state);
DRM_DEBUG_KMS("planes on this crtc\n");
list_for_each_entry(plane, &dev->mode_config.plane_list, head) {
@@ -12838,7 +12843,7 @@ static void intel_dump_pipe_config(struct intel_crtc *crtc,
DRM_DEBUG_KMS("[PLANE:%d:%s] FB:%d, fb = %ux%u format = %s\n",
plane->base.id, plane->name,
fb->base.id, fb->width, fb->height,
- drm_get_format_name(fb->pixel_format, &format_name));
+ drm_get_format_name(fb->format->format, &format_name));
if (INTEL_GEN(dev_priv) >= 9)
DRM_DEBUG_KMS("\tscaler:%d src %dx%d+%d+%d dst %dx%d+%d+%d\n",
state->scaler_id,
@@ -12983,7 +12988,7 @@ intel_modeset_pipe_config(struct drm_crtc *crtc,
* computation to clearly distinguish it from the adjusted mode, which
* can be changed by the connectors in the below retry loop.
*/
- drm_crtc_get_hv_timing(&pipe_config->base.mode,
+ drm_mode_get_hv_timing(&pipe_config->base.mode,
&pipe_config->pipe_src_w,
&pipe_config->pipe_src_h);
@@ -13162,6 +13167,31 @@ intel_compare_link_m_n(const struct intel_link_m_n *m_n,
return false;
}
+static void __printf(3, 4)
+pipe_config_err(bool adjust, const char *name, const char *format, ...)
+{
+ char *level;
+ unsigned int category;
+ struct va_format vaf;
+ va_list args;
+
+ if (adjust) {
+ level = KERN_DEBUG;
+ category = DRM_UT_KMS;
+ } else {
+ level = KERN_ERR;
+ category = DRM_UT_NONE;
+ }
+
+ va_start(args, format);
+ vaf.fmt = format;
+ vaf.va = &args;
+
+ drm_printk(level, category, "mismatch in %s %pV", name, &vaf);
+
+ va_end(args);
+}
+
static bool
intel_pipe_config_compare(struct drm_i915_private *dev_priv,
struct intel_crtc_state *current_config,
@@ -13170,17 +13200,9 @@ intel_pipe_config_compare(struct drm_i915_private *dev_priv,
{
bool ret = true;
-#define INTEL_ERR_OR_DBG_KMS(fmt, ...) \
- do { \
- if (!adjust) \
- DRM_ERROR(fmt, ##__VA_ARGS__); \
- else \
- DRM_DEBUG_KMS(fmt, ##__VA_ARGS__); \
- } while (0)
-
#define PIPE_CONF_CHECK_X(name) \
if (current_config->name != pipe_config->name) { \
- INTEL_ERR_OR_DBG_KMS("mismatch in " #name " " \
+ pipe_config_err(adjust, __stringify(name), \
"(expected 0x%08x, found 0x%08x)\n", \
current_config->name, \
pipe_config->name); \
@@ -13189,7 +13211,7 @@ intel_pipe_config_compare(struct drm_i915_private *dev_priv,
#define PIPE_CONF_CHECK_I(name) \
if (current_config->name != pipe_config->name) { \
- INTEL_ERR_OR_DBG_KMS("mismatch in " #name " " \
+ pipe_config_err(adjust, __stringify(name), \
"(expected %i, found %i)\n", \
current_config->name, \
pipe_config->name); \
@@ -13198,7 +13220,7 @@ intel_pipe_config_compare(struct drm_i915_private *dev_priv,
#define PIPE_CONF_CHECK_P(name) \
if (current_config->name != pipe_config->name) { \
- INTEL_ERR_OR_DBG_KMS("mismatch in " #name " " \
+ pipe_config_err(adjust, __stringify(name), \
"(expected %p, found %p)\n", \
current_config->name, \
pipe_config->name); \
@@ -13209,7 +13231,7 @@ intel_pipe_config_compare(struct drm_i915_private *dev_priv,
if (!intel_compare_link_m_n(&current_config->name, \
&pipe_config->name,\
adjust)) { \
- INTEL_ERR_OR_DBG_KMS("mismatch in " #name " " \
+ pipe_config_err(adjust, __stringify(name), \
"(expected tu %i gmch %i/%i link %i/%i, " \
"found tu %i, gmch %i/%i link %i/%i)\n", \
current_config->name.tu, \
@@ -13235,7 +13257,7 @@ intel_pipe_config_compare(struct drm_i915_private *dev_priv,
&pipe_config->name, adjust) && \
!intel_compare_link_m_n(&current_config->alt_name, \
&pipe_config->name, adjust)) { \
- INTEL_ERR_OR_DBG_KMS("mismatch in " #name " " \
+ pipe_config_err(adjust, __stringify(name), \
"(expected tu %i gmch %i/%i link %i/%i, " \
"or tu %i gmch %i/%i link %i/%i, " \
"found tu %i, gmch %i/%i link %i/%i)\n", \
@@ -13259,8 +13281,9 @@ intel_pipe_config_compare(struct drm_i915_private *dev_priv,
#define PIPE_CONF_CHECK_FLAGS(name, mask) \
if ((current_config->name ^ pipe_config->name) & (mask)) { \
- INTEL_ERR_OR_DBG_KMS("mismatch in " #name "(" #mask ") " \
- "(expected %i, found %i)\n", \
+ pipe_config_err(adjust, __stringify(name), \
+ "(%x) (expected %i, found %i)\n", \
+ (mask), \
current_config->name & (mask), \
pipe_config->name & (mask)); \
ret = false; \
@@ -13268,7 +13291,7 @@ intel_pipe_config_compare(struct drm_i915_private *dev_priv,
#define PIPE_CONF_CHECK_CLOCK_FUZZY(name) \
if (!intel_fuzzy_clock_check(current_config->name, pipe_config->name)) { \
- INTEL_ERR_OR_DBG_KMS("mismatch in " #name " " \
+ pipe_config_err(adjust, __stringify(name), \
"(expected %i, found %i)\n", \
current_config->name, \
pipe_config->name); \
@@ -13385,7 +13408,6 @@ intel_pipe_config_compare(struct drm_i915_private *dev_priv,
#undef PIPE_CONF_CHECK_FLAGS
#undef PIPE_CONF_CHECK_CLOCK_FUZZY
#undef PIPE_CONF_QUIRK
-#undef INTEL_ERR_OR_DBG_KMS
return ret;
}
@@ -13686,9 +13708,9 @@ verify_single_dpll_state(struct drm_i915_private *dev_priv,
}
if (!crtc) {
- I915_STATE_WARN(pll->active_mask & ~pll->config.crtc_mask,
+ I915_STATE_WARN(pll->active_mask & ~pll->state.crtc_mask,
"more active pll users than references: %x vs %x\n",
- pll->active_mask, pll->config.crtc_mask);
+ pll->active_mask, pll->state.crtc_mask);
return;
}
@@ -13704,11 +13726,11 @@ verify_single_dpll_state(struct drm_i915_private *dev_priv,
"pll active mismatch (didn't expect pipe %c in active mask 0x%02x)\n",
pipe_name(drm_crtc_index(crtc)), pll->active_mask);
- I915_STATE_WARN(!(pll->config.crtc_mask & crtc_mask),
+ I915_STATE_WARN(!(pll->state.crtc_mask & crtc_mask),
"pll enabled crtcs mismatch (expected 0x%x in 0x%02x)\n",
- crtc_mask, pll->config.crtc_mask);
+ crtc_mask, pll->state.crtc_mask);
- I915_STATE_WARN(pll->on && memcmp(&pll->config.hw_state,
+ I915_STATE_WARN(pll->on && memcmp(&pll->state.hw_state,
&dpll_hw_state,
sizeof(dpll_hw_state)),
"pll hw state mismatch\n");
@@ -13734,7 +13756,7 @@ verify_shared_dpll_state(struct drm_device *dev, struct drm_crtc *crtc,
I915_STATE_WARN(pll->active_mask & crtc_mask,
"pll active mismatch (didn't expect pipe %c in active mask)\n",
pipe_name(drm_crtc_index(crtc)));
- I915_STATE_WARN(pll->config.crtc_mask & crtc_mask,
+ I915_STATE_WARN(pll->state.crtc_mask & crtc_mask,
"pll enabled crtcs mismatch (found %x in enabled mask)\n",
pipe_name(drm_crtc_index(crtc)));
}
@@ -13817,7 +13839,6 @@ static void intel_modeset_clear_plls(struct drm_atomic_state *state)
{
struct drm_device *dev = state->dev;
struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_shared_dpll_config *shared_dpll = NULL;
struct drm_crtc *crtc;
struct drm_crtc_state *crtc_state;
int i;
@@ -13838,10 +13859,7 @@ static void intel_modeset_clear_plls(struct drm_atomic_state *state)
if (!old_dpll)
continue;
- if (!shared_dpll)
- shared_dpll = intel_atomic_get_shared_dpll_state(state);
-
- intel_shared_dpll_config_put(shared_dpll, old_dpll, intel_crtc);
+ intel_release_shared_dpll(old_dpll, intel_crtc, state);
}
}
@@ -13910,14 +13928,34 @@ static int haswell_mode_set_planes_workaround(struct drm_atomic_state *state)
return 0;
}
+static int intel_lock_all_pipes(struct drm_atomic_state *state)
+{
+ struct drm_crtc *crtc;
+
+ /* Add all pipes to the state */
+ for_each_crtc(state->dev, crtc) {
+ struct drm_crtc_state *crtc_state;
+
+ crtc_state = drm_atomic_get_crtc_state(state, crtc);
+ if (IS_ERR(crtc_state))
+ return PTR_ERR(crtc_state);
+ }
+
+ return 0;
+}
+
static int intel_modeset_all_pipes(struct drm_atomic_state *state)
{
struct drm_crtc *crtc;
- struct drm_crtc_state *crtc_state;
- int ret = 0;
- /* add all active pipes to the state */
+ /*
+ * Add all pipes to the state, and force
+ * a modeset on all the active ones.
+ */
for_each_crtc(state->dev, crtc) {
+ struct drm_crtc_state *crtc_state;
+ int ret;
+
crtc_state = drm_atomic_get_crtc_state(state, crtc);
if (IS_ERR(crtc_state))
return PTR_ERR(crtc_state);
@@ -13929,14 +13967,14 @@ static int intel_modeset_all_pipes(struct drm_atomic_state *state)
ret = drm_atomic_add_affected_connectors(state, crtc);
if (ret)
- break;
+ return ret;
ret = drm_atomic_add_affected_planes(state, crtc);
if (ret)
- break;
+ return ret;
}
- return ret;
+ return 0;
}
static int intel_modeset_checks(struct drm_atomic_state *state)
@@ -13982,12 +14020,24 @@ static int intel_modeset_checks(struct drm_atomic_state *state)
if (ret < 0)
return ret;
+ /*
+ * Writes to dev_priv->atomic_cdclk_freq must protected by
+ * holding all the crtc locks, even if we don't end up
+ * touching the hardware
+ */
+ if (intel_state->cdclk != dev_priv->atomic_cdclk_freq) {
+ ret = intel_lock_all_pipes(state);
+ if (ret < 0)
+ return ret;
+ }
+
+ /* All pipes must be switched off while we change the cdclk. */
if (intel_state->dev_cdclk != dev_priv->cdclk_freq ||
- intel_state->cdclk_pll_vco != dev_priv->cdclk_pll.vco)
+ intel_state->cdclk_pll_vco != dev_priv->cdclk_pll.vco) {
ret = intel_modeset_all_pipes(state);
-
- if (ret < 0)
- return ret;
+ if (ret < 0)
+ return ret;
+ }
DRM_DEBUG_KMS("New cdclk calculated to be atomic %u, actual %u\n",
intel_state->cdclk, intel_state->dev_cdclk);
@@ -14581,7 +14631,7 @@ static int intel_atomic_commit(struct drm_device *dev,
drm_atomic_helper_swap_state(state, true);
dev_priv->wm.distrust_bios_wm = false;
- intel_shared_dpll_commit(state);
+ intel_shared_dpll_swap_state(state);
intel_atomic_track_fbs(state);
if (intel_state->modeset) {
@@ -14691,6 +14741,7 @@ static const struct drm_crtc_funcs intel_crtc_funcs = {
.page_flip = intel_crtc_page_flip,
.atomic_duplicate_state = intel_crtc_duplicate_state,
.atomic_destroy_state = intel_crtc_destroy_state,
+ .set_crc_source = intel_crtc_set_crc_source,
};
/**
@@ -14949,6 +15000,141 @@ const struct drm_plane_funcs intel_plane_funcs = {
.atomic_destroy_state = intel_plane_destroy_state,
};
+static int
+intel_legacy_cursor_update(struct drm_plane *plane,
+ struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
+ int crtc_x, int crtc_y,
+ unsigned int crtc_w, unsigned int crtc_h,
+ uint32_t src_x, uint32_t src_y,
+ uint32_t src_w, uint32_t src_h)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc->dev);
+ int ret;
+ struct drm_plane_state *old_plane_state, *new_plane_state;
+ struct intel_plane *intel_plane = to_intel_plane(plane);
+ struct drm_framebuffer *old_fb;
+ struct drm_crtc_state *crtc_state = crtc->state;
+ struct i915_vma *old_vma;
+
+ /*
+ * When crtc is inactive or there is a modeset pending,
+ * wait for it to complete in the slowpath
+ */
+ if (!crtc_state->active || needs_modeset(crtc_state) ||
+ to_intel_crtc_state(crtc_state)->update_pipe)
+ goto slow;
+
+ old_plane_state = plane->state;
+
+ /*
+ * If any parameters change that may affect watermarks,
+ * take the slowpath. Only changing fb or position should be
+ * in the fastpath.
+ */
+ if (old_plane_state->crtc != crtc ||
+ old_plane_state->src_w != src_w ||
+ old_plane_state->src_h != src_h ||
+ old_plane_state->crtc_w != crtc_w ||
+ old_plane_state->crtc_h != crtc_h ||
+ !old_plane_state->visible ||
+ old_plane_state->fb->modifier != fb->modifier)
+ goto slow;
+
+ new_plane_state = intel_plane_duplicate_state(plane);
+ if (!new_plane_state)
+ return -ENOMEM;
+
+ drm_atomic_set_fb_for_plane(new_plane_state, fb);
+
+ new_plane_state->src_x = src_x;
+ new_plane_state->src_y = src_y;
+ new_plane_state->src_w = src_w;
+ new_plane_state->src_h = src_h;
+ new_plane_state->crtc_x = crtc_x;
+ new_plane_state->crtc_y = crtc_y;
+ new_plane_state->crtc_w = crtc_w;
+ new_plane_state->crtc_h = crtc_h;
+
+ ret = intel_plane_atomic_check_with_state(to_intel_crtc_state(crtc->state),
+ to_intel_plane_state(new_plane_state));
+ if (ret)
+ goto out_free;
+
+ /* Visibility changed, must take slowpath. */
+ if (!new_plane_state->visible)
+ goto slow_free;
+
+ ret = mutex_lock_interruptible(&dev_priv->drm.struct_mutex);
+ if (ret)
+ goto out_free;
+
+ if (INTEL_INFO(dev_priv)->cursor_needs_physical) {
+ int align = IS_I830(dev_priv) ? 16 * 1024 : 256;
+
+ ret = i915_gem_object_attach_phys(intel_fb_obj(fb), align);
+ if (ret) {
+ DRM_DEBUG_KMS("failed to attach phys object\n");
+ goto out_unlock;
+ }
+ } else {
+ struct i915_vma *vma;
+
+ vma = intel_pin_and_fence_fb_obj(fb, new_plane_state->rotation);
+ if (IS_ERR(vma)) {
+ DRM_DEBUG_KMS("failed to pin object\n");
+
+ ret = PTR_ERR(vma);
+ goto out_unlock;
+ }
+
+ to_intel_plane_state(new_plane_state)->vma = vma;
+ }
+
+ old_fb = old_plane_state->fb;
+ old_vma = to_intel_plane_state(old_plane_state)->vma;
+
+ i915_gem_track_fb(intel_fb_obj(old_fb), intel_fb_obj(fb),
+ intel_plane->frontbuffer_bit);
+
+ /* Swap plane state */
+ new_plane_state->fence = old_plane_state->fence;
+ *to_intel_plane_state(old_plane_state) = *to_intel_plane_state(new_plane_state);
+ new_plane_state->fence = NULL;
+ new_plane_state->fb = old_fb;
+ to_intel_plane_state(new_plane_state)->vma = old_vma;
+
+ intel_plane->update_plane(plane,
+ to_intel_crtc_state(crtc->state),
+ to_intel_plane_state(plane->state));
+
+ intel_cleanup_plane_fb(plane, new_plane_state);
+
+out_unlock:
+ mutex_unlock(&dev_priv->drm.struct_mutex);
+out_free:
+ intel_plane_destroy_state(plane, new_plane_state);
+ return ret;
+
+slow_free:
+ intel_plane_destroy_state(plane, new_plane_state);
+slow:
+ return drm_atomic_helper_update_plane(plane, crtc, fb,
+ crtc_x, crtc_y, crtc_w, crtc_h,
+ src_x, src_y, src_w, src_h);
+}
+
+static const struct drm_plane_funcs intel_cursor_plane_funcs = {
+ .update_plane = intel_legacy_cursor_update,
+ .disable_plane = drm_atomic_helper_disable_plane,
+ .destroy = intel_plane_destroy,
+ .set_property = drm_atomic_helper_plane_set_property,
+ .atomic_get_property = intel_plane_atomic_get_property,
+ .atomic_set_property = intel_plane_atomic_set_property,
+ .atomic_duplicate_state = intel_plane_duplicate_state,
+ .atomic_destroy_state = intel_plane_destroy_state,
+};
+
static struct intel_plane *
intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe)
{
@@ -14988,6 +15174,7 @@ intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe)
primary->plane = (enum plane) !pipe;
else
primary->plane = (enum plane) pipe;
+ primary->id = PLANE_PRIMARY;
primary->frontbuffer_bit = INTEL_FRONTBUFFER_PRIMARY(pipe);
primary->check_plane = intel_check_primary_plane;
@@ -15187,13 +15374,14 @@ intel_cursor_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe)
cursor->max_downscale = 1;
cursor->pipe = pipe;
cursor->plane = pipe;
+ cursor->id = PLANE_CURSOR;
cursor->frontbuffer_bit = INTEL_FRONTBUFFER_CURSOR(pipe);
cursor->check_plane = intel_check_cursor_plane;
cursor->update_plane = intel_update_cursor_plane;
cursor->disable_plane = intel_disable_cursor_plane;
ret = drm_universal_plane_init(&dev_priv->drm, &cursor->base,
- 0, &intel_plane_funcs,
+ 0, &intel_cursor_plane_funcs,
intel_cursor_formats,
ARRAY_SIZE(intel_cursor_formats),
DRM_PLANE_TYPE_CURSOR,
@@ -15221,14 +15409,18 @@ fail:
return ERR_PTR(ret);
}
-static void skl_init_scalers(struct drm_i915_private *dev_priv,
- struct intel_crtc *crtc,
- struct intel_crtc_state *crtc_state)
+static void intel_crtc_init_scalers(struct intel_crtc *crtc,
+ struct intel_crtc_state *crtc_state)
{
struct intel_crtc_scaler_state *scaler_state =
&crtc_state->scaler_state;
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
int i;
+ crtc->num_scalers = dev_priv->info.num_scalers[crtc->pipe];
+ if (!crtc->num_scalers)
+ return;
+
for (i = 0; i < crtc->num_scalers; i++) {
struct intel_scaler *scaler = &scaler_state->scalers[i];
@@ -15260,21 +15452,12 @@ static int intel_crtc_init(struct drm_i915_private *dev_priv, enum pipe pipe)
intel_crtc->base.state = &crtc_state->base;
crtc_state->base.crtc = &intel_crtc->base;
- /* initialize shared scalers */
- if (INTEL_GEN(dev_priv) >= 9) {
- if (pipe == PIPE_C)
- intel_crtc->num_scalers = 1;
- else
- intel_crtc->num_scalers = SKL_NUM_SCALERS;
-
- skl_init_scalers(dev_priv, intel_crtc, crtc_state);
- }
-
primary = intel_primary_plane_create(dev_priv, pipe);
if (IS_ERR(primary)) {
ret = PTR_ERR(primary);
goto fail;
}
+ intel_crtc->plane_ids_mask |= BIT(primary->id);
for_each_sprite(dev_priv, pipe, sprite) {
struct intel_plane *plane;
@@ -15284,6 +15467,7 @@ static int intel_crtc_init(struct drm_i915_private *dev_priv, enum pipe pipe)
ret = PTR_ERR(plane);
goto fail;
}
+ intel_crtc->plane_ids_mask |= BIT(plane->id);
}
cursor = intel_cursor_plane_create(dev_priv, pipe);
@@ -15291,6 +15475,7 @@ static int intel_crtc_init(struct drm_i915_private *dev_priv, enum pipe pipe)
ret = PTR_ERR(cursor);
goto fail;
}
+ intel_crtc->plane_ids_mask |= BIT(cursor->id);
ret = drm_crtc_init_with_planes(&dev_priv->drm, &intel_crtc->base,
&primary->base, &cursor->base,
@@ -15308,6 +15493,9 @@ static int intel_crtc_init(struct drm_i915_private *dev_priv, enum pipe pipe)
intel_crtc->wm.cxsr_allowed = true;
+ /* initialize shared scalers */
+ intel_crtc_init_scalers(intel_crtc, crtc_state);
+
BUG_ON(pipe >= ARRAY_SIZE(dev_priv->plane_to_crtc_mapping) ||
dev_priv->plane_to_crtc_mapping[intel_crtc->plane] != NULL);
dev_priv->plane_to_crtc_mapping[intel_crtc->plane] = intel_crtc;
@@ -15444,7 +15632,7 @@ void intel_pps_unlock_regs_wa(struct drm_i915_private *dev_priv)
static void intel_pps_init(struct drm_i915_private *dev_priv)
{
- if (HAS_PCH_SPLIT(dev_priv) || IS_BROXTON(dev_priv))
+ if (HAS_PCH_SPLIT(dev_priv) || IS_GEN9_LP(dev_priv))
dev_priv->pps_mmio_base = PCH_PPS_BASE;
else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
dev_priv->pps_mmio_base = VLV_PPS_BASE;
@@ -15454,9 +15642,8 @@ static void intel_pps_init(struct drm_i915_private *dev_priv)
intel_pps_unlock_regs_wa(dev_priv);
}
-static void intel_setup_outputs(struct drm_device *dev)
+static void intel_setup_outputs(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_encoder *encoder;
bool dpd_is_edp = false;
@@ -15467,22 +15654,22 @@ static void intel_setup_outputs(struct drm_device *dev)
* prevent the registeration of both eDP and LVDS and the incorrect
* sharing of the PPS.
*/
- intel_lvds_init(dev);
+ intel_lvds_init(dev_priv);
if (intel_crt_present(dev_priv))
- intel_crt_init(dev);
+ intel_crt_init(dev_priv);
- if (IS_BROXTON(dev_priv)) {
+ if (IS_GEN9_LP(dev_priv)) {
/*
* FIXME: Broxton doesn't support port detection via the
* DDI_BUF_CTL_A or SFUSE_STRAP registers, find another way to
* detect the ports.
*/
- intel_ddi_init(dev, PORT_A);
- intel_ddi_init(dev, PORT_B);
- intel_ddi_init(dev, PORT_C);
+ intel_ddi_init(dev_priv, PORT_A);
+ intel_ddi_init(dev_priv, PORT_B);
+ intel_ddi_init(dev_priv, PORT_C);
- intel_dsi_init(dev);
+ intel_dsi_init(dev_priv);
} else if (HAS_DDI(dev_priv)) {
int found;
@@ -15494,18 +15681,18 @@ static void intel_setup_outputs(struct drm_device *dev)
found = I915_READ(DDI_BUF_CTL(PORT_A)) & DDI_INIT_DISPLAY_DETECTED;
/* WaIgnoreDDIAStrap: skl */
if (found || IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
- intel_ddi_init(dev, PORT_A);
+ intel_ddi_init(dev_priv, PORT_A);
/* DDI B, C and D detection is indicated by the SFUSE_STRAP
* register */
found = I915_READ(SFUSE_STRAP);
if (found & SFUSE_STRAP_DDIB_DETECTED)
- intel_ddi_init(dev, PORT_B);
+ intel_ddi_init(dev_priv, PORT_B);
if (found & SFUSE_STRAP_DDIC_DETECTED)
- intel_ddi_init(dev, PORT_C);
+ intel_ddi_init(dev_priv, PORT_C);
if (found & SFUSE_STRAP_DDID_DETECTED)
- intel_ddi_init(dev, PORT_D);
+ intel_ddi_init(dev_priv, PORT_D);
/*
* On SKL we don't have a way to detect DDI-E so we rely on VBT.
*/
@@ -15513,35 +15700,35 @@ static void intel_setup_outputs(struct drm_device *dev)
(dev_priv->vbt.ddi_port_info[PORT_E].supports_dp ||
dev_priv->vbt.ddi_port_info[PORT_E].supports_dvi ||
dev_priv->vbt.ddi_port_info[PORT_E].supports_hdmi))
- intel_ddi_init(dev, PORT_E);
+ intel_ddi_init(dev_priv, PORT_E);
} else if (HAS_PCH_SPLIT(dev_priv)) {
int found;
dpd_is_edp = intel_dp_is_edp(dev_priv, PORT_D);
if (has_edp_a(dev_priv))
- intel_dp_init(dev, DP_A, PORT_A);
+ intel_dp_init(dev_priv, DP_A, PORT_A);
if (I915_READ(PCH_HDMIB) & SDVO_DETECTED) {
/* PCH SDVOB multiplex with HDMIB */
- found = intel_sdvo_init(dev, PCH_SDVOB, PORT_B);
+ found = intel_sdvo_init(dev_priv, PCH_SDVOB, PORT_B);
if (!found)
- intel_hdmi_init(dev, PCH_HDMIB, PORT_B);
+ intel_hdmi_init(dev_priv, PCH_HDMIB, PORT_B);
if (!found && (I915_READ(PCH_DP_B) & DP_DETECTED))
- intel_dp_init(dev, PCH_DP_B, PORT_B);
+ intel_dp_init(dev_priv, PCH_DP_B, PORT_B);
}
if (I915_READ(PCH_HDMIC) & SDVO_DETECTED)
- intel_hdmi_init(dev, PCH_HDMIC, PORT_C);
+ intel_hdmi_init(dev_priv, PCH_HDMIC, PORT_C);
if (!dpd_is_edp && I915_READ(PCH_HDMID) & SDVO_DETECTED)
- intel_hdmi_init(dev, PCH_HDMID, PORT_D);
+ intel_hdmi_init(dev_priv, PCH_HDMID, PORT_D);
if (I915_READ(PCH_DP_C) & DP_DETECTED)
- intel_dp_init(dev, PCH_DP_C, PORT_C);
+ intel_dp_init(dev_priv, PCH_DP_C, PORT_C);
if (I915_READ(PCH_DP_D) & DP_DETECTED)
- intel_dp_init(dev, PCH_DP_D, PORT_D);
+ intel_dp_init(dev_priv, PCH_DP_D, PORT_D);
} else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
bool has_edp, has_port;
@@ -15563,16 +15750,16 @@ static void intel_setup_outputs(struct drm_device *dev)
has_edp = intel_dp_is_edp(dev_priv, PORT_B);
has_port = intel_bios_is_port_present(dev_priv, PORT_B);
if (I915_READ(VLV_DP_B) & DP_DETECTED || has_port)
- has_edp &= intel_dp_init(dev, VLV_DP_B, PORT_B);
+ has_edp &= intel_dp_init(dev_priv, VLV_DP_B, PORT_B);
if ((I915_READ(VLV_HDMIB) & SDVO_DETECTED || has_port) && !has_edp)
- intel_hdmi_init(dev, VLV_HDMIB, PORT_B);
+ intel_hdmi_init(dev_priv, VLV_HDMIB, PORT_B);
has_edp = intel_dp_is_edp(dev_priv, PORT_C);
has_port = intel_bios_is_port_present(dev_priv, PORT_C);
if (I915_READ(VLV_DP_C) & DP_DETECTED || has_port)
- has_edp &= intel_dp_init(dev, VLV_DP_C, PORT_C);
+ has_edp &= intel_dp_init(dev_priv, VLV_DP_C, PORT_C);
if ((I915_READ(VLV_HDMIC) & SDVO_DETECTED || has_port) && !has_edp)
- intel_hdmi_init(dev, VLV_HDMIC, PORT_C);
+ intel_hdmi_init(dev_priv, VLV_HDMIC, PORT_C);
if (IS_CHERRYVIEW(dev_priv)) {
/*
@@ -15581,63 +15768,63 @@ static void intel_setup_outputs(struct drm_device *dev)
*/
has_port = intel_bios_is_port_present(dev_priv, PORT_D);
if (I915_READ(CHV_DP_D) & DP_DETECTED || has_port)
- intel_dp_init(dev, CHV_DP_D, PORT_D);
+ intel_dp_init(dev_priv, CHV_DP_D, PORT_D);
if (I915_READ(CHV_HDMID) & SDVO_DETECTED || has_port)
- intel_hdmi_init(dev, CHV_HDMID, PORT_D);
+ intel_hdmi_init(dev_priv, CHV_HDMID, PORT_D);
}
- intel_dsi_init(dev);
+ intel_dsi_init(dev_priv);
} else if (!IS_GEN2(dev_priv) && !IS_PINEVIEW(dev_priv)) {
bool found = false;
if (I915_READ(GEN3_SDVOB) & SDVO_DETECTED) {
DRM_DEBUG_KMS("probing SDVOB\n");
- found = intel_sdvo_init(dev, GEN3_SDVOB, PORT_B);
+ found = intel_sdvo_init(dev_priv, GEN3_SDVOB, PORT_B);
if (!found && IS_G4X(dev_priv)) {
DRM_DEBUG_KMS("probing HDMI on SDVOB\n");
- intel_hdmi_init(dev, GEN4_HDMIB, PORT_B);
+ intel_hdmi_init(dev_priv, GEN4_HDMIB, PORT_B);
}
if (!found && IS_G4X(dev_priv))
- intel_dp_init(dev, DP_B, PORT_B);
+ intel_dp_init(dev_priv, DP_B, PORT_B);
}
/* Before G4X SDVOC doesn't have its own detect register */
if (I915_READ(GEN3_SDVOB) & SDVO_DETECTED) {
DRM_DEBUG_KMS("probing SDVOC\n");
- found = intel_sdvo_init(dev, GEN3_SDVOC, PORT_C);
+ found = intel_sdvo_init(dev_priv, GEN3_SDVOC, PORT_C);
}
if (!found && (I915_READ(GEN3_SDVOC) & SDVO_DETECTED)) {
if (IS_G4X(dev_priv)) {
DRM_DEBUG_KMS("probing HDMI on SDVOC\n");
- intel_hdmi_init(dev, GEN4_HDMIC, PORT_C);
+ intel_hdmi_init(dev_priv, GEN4_HDMIC, PORT_C);
}
if (IS_G4X(dev_priv))
- intel_dp_init(dev, DP_C, PORT_C);
+ intel_dp_init(dev_priv, DP_C, PORT_C);
}
if (IS_G4X(dev_priv) && (I915_READ(DP_D) & DP_DETECTED))
- intel_dp_init(dev, DP_D, PORT_D);
+ intel_dp_init(dev_priv, DP_D, PORT_D);
} else if (IS_GEN2(dev_priv))
- intel_dvo_init(dev);
+ intel_dvo_init(dev_priv);
if (SUPPORTS_TV(dev_priv))
- intel_tv_init(dev);
+ intel_tv_init(dev_priv);
- intel_psr_init(dev);
+ intel_psr_init(dev_priv);
- for_each_intel_encoder(dev, encoder) {
+ for_each_intel_encoder(&dev_priv->drm, encoder) {
encoder->base.possible_crtcs = encoder->crtc_mask;
encoder->base.possible_clones =
intel_encoder_clones(encoder);
}
- intel_init_pch_refclk(dev);
+ intel_init_pch_refclk(dev_priv);
- drm_helper_move_panel_connectors_to_head(dev);
+ drm_helper_move_panel_connectors_to_head(&dev_priv->drm);
}
static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb)
@@ -15874,7 +16061,7 @@ static int intel_framebuffer_init(struct drm_device *dev,
if (mode_cmd->offsets[0] != 0)
return -EINVAL;
- drm_helper_mode_fill_fb_struct(&intel_fb->base, mode_cmd);
+ drm_helper_mode_fill_fb_struct(dev, &intel_fb->base, mode_cmd);
intel_fb->obj = obj;
ret = intel_fill_fb_info(dev_priv, &intel_fb->base);
@@ -15912,6 +16099,17 @@ intel_user_framebuffer_create(struct drm_device *dev,
return fb;
}
+static void intel_atomic_state_free(struct drm_atomic_state *state)
+{
+ struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
+
+ drm_atomic_state_default_release(state);
+
+ i915_sw_fence_fini(&intel_state->commit_ready);
+
+ kfree(state);
+}
+
static const struct drm_mode_config_funcs intel_mode_funcs = {
.fb_create = intel_user_framebuffer_create,
.output_poll_changed = intel_fbdev_output_poll_changed,
@@ -15919,6 +16117,7 @@ static const struct drm_mode_config_funcs intel_mode_funcs = {
.atomic_commit = intel_atomic_commit,
.atomic_state_alloc = intel_atomic_state_alloc,
.atomic_state_clear = intel_atomic_state_clear,
+ .atomic_state_free = intel_atomic_state_free,
};
/**
@@ -15999,7 +16198,7 @@ void intel_init_display_hooks(struct drm_i915_private *dev_priv)
if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
dev_priv->display.get_display_clock_speed =
skylake_get_display_clock_speed;
- else if (IS_BROXTON(dev_priv))
+ else if (IS_GEN9_LP(dev_priv))
dev_priv->display.get_display_clock_speed =
broxton_get_display_clock_speed;
else if (IS_BROADWELL(dev_priv))
@@ -16014,14 +16213,14 @@ void intel_init_display_hooks(struct drm_i915_private *dev_priv)
else if (IS_GEN5(dev_priv))
dev_priv->display.get_display_clock_speed =
ilk_get_display_clock_speed;
- else if (IS_I945G(dev_priv) || IS_BROADWATER(dev_priv) ||
+ else if (IS_I945G(dev_priv) || IS_I965G(dev_priv) ||
IS_GEN6(dev_priv) || IS_IVYBRIDGE(dev_priv))
dev_priv->display.get_display_clock_speed =
i945_get_display_clock_speed;
else if (IS_GM45(dev_priv))
dev_priv->display.get_display_clock_speed =
gm45_get_display_clock_speed;
- else if (IS_CRESTLINE(dev_priv))
+ else if (IS_I965GM(dev_priv))
dev_priv->display.get_display_clock_speed =
i965gm_get_display_clock_speed;
else if (IS_PINEVIEW(dev_priv))
@@ -16033,7 +16232,7 @@ void intel_init_display_hooks(struct drm_i915_private *dev_priv)
else if (IS_I915G(dev_priv))
dev_priv->display.get_display_clock_speed =
i915_get_display_clock_speed;
- else if (IS_I945GM(dev_priv) || IS_845G(dev_priv))
+ else if (IS_I945GM(dev_priv) || IS_I845G(dev_priv))
dev_priv->display.get_display_clock_speed =
i9xx_misc_get_display_clock_speed;
else if (IS_I915GM(dev_priv))
@@ -16072,7 +16271,7 @@ void intel_init_display_hooks(struct drm_i915_private *dev_priv)
valleyview_modeset_commit_cdclk;
dev_priv->display.modeset_calc_cdclk =
valleyview_modeset_calc_cdclk;
- } else if (IS_BROXTON(dev_priv)) {
+ } else if (IS_GEN9_LP(dev_priv)) {
dev_priv->display.modeset_commit_cdclk =
bxt_modeset_commit_cdclk;
dev_priv->display.modeset_calc_cdclk =
@@ -16470,8 +16669,8 @@ int intel_modeset_init(struct drm_device *dev)
dev->mode_config.max_height = 8192;
}
- if (IS_845G(dev_priv) || IS_I865G(dev_priv)) {
- dev->mode_config.cursor_width = IS_845G(dev_priv) ? 64 : 512;
+ if (IS_I845G(dev_priv) || IS_I865G(dev_priv)) {
+ dev->mode_config.cursor_width = IS_I845G(dev_priv) ? 64 : 512;
dev->mode_config.cursor_height = 1023;
} else if (IS_GEN2(dev_priv)) {
dev->mode_config.cursor_width = GEN2_CURSOR_WIDTH;
@@ -16508,7 +16707,7 @@ int intel_modeset_init(struct drm_device *dev)
/* Just disable it once at startup */
i915_disable_vga(dev_priv);
- intel_setup_outputs(dev);
+ intel_setup_outputs(dev_priv);
drm_modeset_lock_all(dev);
intel_modeset_setup_hw_state(dev);
@@ -16661,7 +16860,7 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc)
* Temporarily change the plane mapping and disable everything
* ... */
plane = crtc->plane;
- to_intel_plane_state(crtc->base.primary->state)->base.visible = true;
+ crtc->base.primary->state->visible = true;
crtc->plane = !plane;
intel_crtc_disable_noatomic(&crtc->base);
crtc->plane = plane;
@@ -16813,7 +17012,8 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
dev_priv->active_crtcs = 0;
for_each_intel_crtc(dev, crtc) {
- struct intel_crtc_state *crtc_state = crtc->config;
+ struct intel_crtc_state *crtc_state =
+ to_intel_crtc_state(crtc->base.state);
__drm_atomic_helper_crtc_destroy_state(&crtc_state->base);
memset(crtc_state, 0, sizeof(*crtc_state));
@@ -16832,34 +17032,41 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
DRM_DEBUG_KMS("[CRTC:%d:%s] hw state readout: %s\n",
crtc->base.base.id, crtc->base.name,
- enableddisabled(crtc->active));
+ enableddisabled(crtc_state->base.active));
}
for (i = 0; i < dev_priv->num_shared_dpll; i++) {
struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
pll->on = pll->funcs.get_hw_state(dev_priv, pll,
- &pll->config.hw_state);
- pll->config.crtc_mask = 0;
+ &pll->state.hw_state);
+ pll->state.crtc_mask = 0;
for_each_intel_crtc(dev, crtc) {
- if (crtc->active && crtc->config->shared_dpll == pll)
- pll->config.crtc_mask |= 1 << crtc->pipe;
+ struct intel_crtc_state *crtc_state =
+ to_intel_crtc_state(crtc->base.state);
+
+ if (crtc_state->base.active &&
+ crtc_state->shared_dpll == pll)
+ pll->state.crtc_mask |= 1 << crtc->pipe;
}
- pll->active_mask = pll->config.crtc_mask;
+ pll->active_mask = pll->state.crtc_mask;
DRM_DEBUG_KMS("%s hw state readout: crtc_mask 0x%08x, on %i\n",
- pll->name, pll->config.crtc_mask, pll->on);
+ pll->name, pll->state.crtc_mask, pll->on);
}
for_each_intel_encoder(dev, encoder) {
pipe = 0;
if (encoder->get_hw_state(encoder, &pipe)) {
+ struct intel_crtc_state *crtc_state;
+
crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
+ crtc_state = to_intel_crtc_state(crtc->base.state);
encoder->base.crtc = &crtc->base;
- crtc->config->output_types |= 1 << encoder->type;
- encoder->get_config(encoder, crtc->config);
+ crtc_state->output_types |= 1 << encoder->type;
+ encoder->get_config(encoder, crtc_state);
} else {
encoder->base.crtc = NULL;
}
@@ -16900,14 +17107,16 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
}
for_each_intel_crtc(dev, crtc) {
+ struct intel_crtc_state *crtc_state =
+ to_intel_crtc_state(crtc->base.state);
int pixclk = 0;
- crtc->base.hwmode = crtc->config->base.adjusted_mode;
+ crtc->base.hwmode = crtc_state->base.adjusted_mode;
memset(&crtc->base.mode, 0, sizeof(crtc->base.mode));
- if (crtc->base.state->active) {
- intel_mode_from_pipe_config(&crtc->base.mode, crtc->config);
- intel_mode_from_pipe_config(&crtc->base.state->adjusted_mode, crtc->config);
+ if (crtc_state->base.active) {
+ intel_mode_from_pipe_config(&crtc->base.mode, crtc_state);
+ intel_mode_from_pipe_config(&crtc_state->base.adjusted_mode, crtc_state);
WARN_ON(drm_atomic_set_mode_for_crtc(crtc->base.state, &crtc->base.mode));
/*
@@ -16915,29 +17124,21 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
* the atomic core happy. It wants a valid mode if the
* crtc's enabled, so we do the above call.
*
- * At this point some state updated by the connectors
- * in their ->detect() callback has not run yet, so
- * no recalculation can be done yet.
- *
- * Even if we could do a recalculation and modeset
- * right now it would cause a double modeset if
- * fbdev or userspace chooses a different initial mode.
- *
- * If that happens, someone indicated they wanted a
- * mode change, which means it's safe to do a full
- * recalculation.
+ * But we don't set all the derived state fully, hence
+ * set a flag to indicate that a full recalculation is
+ * needed on the next commit.
*/
- crtc->base.state->mode.private_flags = I915_MODE_FLAG_INHERITED;
+ crtc_state->base.mode.private_flags = I915_MODE_FLAG_INHERITED;
if (INTEL_GEN(dev_priv) >= 9 || IS_BROADWELL(dev_priv))
- pixclk = ilk_pipe_pixel_rate(crtc->config);
+ pixclk = ilk_pipe_pixel_rate(crtc_state);
else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
- pixclk = crtc->config->base.adjusted_mode.crtc_clock;
+ pixclk = crtc_state->base.adjusted_mode.crtc_clock;
else
WARN_ON(dev_priv->display.modeset_calc_cdclk);
/* pixel rate mustn't exceed 95% of cdclk with IPS on BDW */
- if (IS_BROADWELL(dev_priv) && crtc->config->ips_enabled)
+ if (IS_BROADWELL(dev_priv) && crtc_state->ips_enabled)
pixclk = DIV_ROUND_UP(pixclk * 100, 95);
drm_calc_timestamping_constants(&crtc->base, &crtc->base.hwmode);
@@ -16946,7 +17147,7 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
dev_priv->min_pixclk[crtc->pipe] = pixclk;
- intel_pipe_config_sanity_check(dev_priv, crtc->config);
+ intel_pipe_config_sanity_check(dev_priv, crtc_state);
}
}
@@ -17120,7 +17321,7 @@ void intel_modeset_cleanup(struct drm_device *dev)
intel_cleanup_gt_powersave(dev_priv);
- intel_teardown_gmbus(dev);
+ intel_teardown_gmbus(dev_priv);
}
void intel_connector_attach_encoder(struct intel_connector *connector,
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 0b8e8eb85c19..d1670b8afbf5 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -156,38 +156,28 @@ static u8 intel_dp_max_lane_count(struct intel_dp *intel_dp)
u8 source_max, sink_max;
source_max = intel_dig_port->max_lanes;
- sink_max = drm_dp_max_lane_count(intel_dp->dpcd);
+ sink_max = intel_dp->max_sink_lane_count;
return min(source_max, sink_max);
}
-/*
- * The units on the numbers in the next two are... bizarre. Examples will
- * make it clearer; this one parallels an example in the eDP spec.
- *
- * intel_dp_max_data_rate for one lane of 2.7GHz evaluates as:
- *
- * 270000 * 1 * 8 / 10 == 216000
- *
- * The actual data capacity of that configuration is 2.16Gbit/s, so the
- * units are decakilobits. ->clock in a drm_display_mode is in kilohertz -
- * or equivalently, kilopixels per second - so for 1680x1050R it'd be
- * 119000. At 18bpp that's 2142000 kilobits per second.
- *
- * Thus the strange-looking division by 10 in intel_dp_link_required, to
- * get the result in decakilobits instead of kilobits.
- */
-
-static int
+int
intel_dp_link_required(int pixel_clock, int bpp)
{
- return (pixel_clock * bpp + 9) / 10;
+ /* pixel_clock is in kHz, divide bpp by 8 for bit to Byte conversion */
+ return DIV_ROUND_UP(pixel_clock * bpp, 8);
}
-static int
+int
intel_dp_max_data_rate(int max_link_clock, int max_lanes)
{
- return (max_link_clock * max_lanes * 8) / 10;
+ /* max_link_clock is the link symbol clock (LS_Clk) in kHz and not the
+ * link rate that is generally expressed in Gbps. Since, 8 bits of data
+ * is transmitted every LS_Clk per lane, there is no need to account for
+ * the channel encoding that is done in the PHY layer here.
+ */
+
+ return max_link_clock * max_lanes;
}
static int
@@ -223,7 +213,7 @@ intel_dp_sink_rates(struct intel_dp *intel_dp, const int **sink_rates)
*sink_rates = default_rates;
- return (intel_dp_max_link_bw(intel_dp) >> 3) + 1;
+ return (intel_dp->max_sink_link_bw >> 3) + 1;
}
static int
@@ -233,7 +223,7 @@ intel_dp_source_rates(struct intel_dp *intel_dp, const int **source_rates)
struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
int size;
- if (IS_BROXTON(dev_priv)) {
+ if (IS_GEN9_LP(dev_priv)) {
*source_rates = bxt_rates;
size = ARRAY_SIZE(bxt_rates);
} else if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
@@ -288,6 +278,44 @@ static int intel_dp_common_rates(struct intel_dp *intel_dp,
common_rates);
}
+static int intel_dp_link_rate_index(struct intel_dp *intel_dp,
+ int *common_rates, int link_rate)
+{
+ int common_len;
+ int index;
+
+ common_len = intel_dp_common_rates(intel_dp, common_rates);
+ for (index = 0; index < common_len; index++) {
+ if (link_rate == common_rates[common_len - index - 1])
+ return common_len - index - 1;
+ }
+
+ return -1;
+}
+
+int intel_dp_get_link_train_fallback_values(struct intel_dp *intel_dp,
+ int link_rate, uint8_t lane_count)
+{
+ int common_rates[DP_MAX_SUPPORTED_RATES];
+ int link_rate_index;
+
+ link_rate_index = intel_dp_link_rate_index(intel_dp,
+ common_rates,
+ link_rate);
+ if (link_rate_index > 0) {
+ intel_dp->max_sink_link_bw = drm_dp_link_rate_to_bw_code(common_rates[link_rate_index - 1]);
+ intel_dp->max_sink_lane_count = lane_count;
+ } else if (lane_count > 1) {
+ intel_dp->max_sink_link_bw = intel_dp_max_link_bw(intel_dp);
+ intel_dp->max_sink_lane_count = lane_count >> 1;
+ } else {
+ DRM_ERROR("Link Training Unsuccessful\n");
+ return -1;
+ }
+
+ return 0;
+}
+
static enum drm_mode_status
intel_dp_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
@@ -465,14 +493,50 @@ vlv_power_sequencer_kick(struct intel_dp *intel_dp)
}
}
+static enum pipe vlv_find_free_pps(struct drm_i915_private *dev_priv)
+{
+ struct intel_encoder *encoder;
+ unsigned int pipes = (1 << PIPE_A) | (1 << PIPE_B);
+
+ /*
+ * We don't have power sequencer currently.
+ * Pick one that's not used by other ports.
+ */
+ for_each_intel_encoder(&dev_priv->drm, encoder) {
+ struct intel_dp *intel_dp;
+
+ if (encoder->type != INTEL_OUTPUT_DP &&
+ encoder->type != INTEL_OUTPUT_EDP)
+ continue;
+
+ intel_dp = enc_to_intel_dp(&encoder->base);
+
+ if (encoder->type == INTEL_OUTPUT_EDP) {
+ WARN_ON(intel_dp->active_pipe != INVALID_PIPE &&
+ intel_dp->active_pipe != intel_dp->pps_pipe);
+
+ if (intel_dp->pps_pipe != INVALID_PIPE)
+ pipes &= ~(1 << intel_dp->pps_pipe);
+ } else {
+ WARN_ON(intel_dp->pps_pipe != INVALID_PIPE);
+
+ if (intel_dp->active_pipe != INVALID_PIPE)
+ pipes &= ~(1 << intel_dp->active_pipe);
+ }
+ }
+
+ if (pipes == 0)
+ return INVALID_PIPE;
+
+ return ffs(pipes) - 1;
+}
+
static enum pipe
vlv_power_sequencer_pipe(struct intel_dp *intel_dp)
{
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
struct drm_device *dev = intel_dig_port->base.base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_encoder *encoder;
- unsigned int pipes = (1 << PIPE_A) | (1 << PIPE_B);
enum pipe pipe;
lockdep_assert_held(&dev_priv->pps_mutex);
@@ -480,33 +544,20 @@ vlv_power_sequencer_pipe(struct intel_dp *intel_dp)
/* We should never land here with regular DP ports */
WARN_ON(!is_edp(intel_dp));
+ WARN_ON(intel_dp->active_pipe != INVALID_PIPE &&
+ intel_dp->active_pipe != intel_dp->pps_pipe);
+
if (intel_dp->pps_pipe != INVALID_PIPE)
return intel_dp->pps_pipe;
- /*
- * We don't have power sequencer currently.
- * Pick one that's not used by other ports.
- */
- for_each_intel_encoder(dev, encoder) {
- struct intel_dp *tmp;
-
- if (encoder->type != INTEL_OUTPUT_EDP)
- continue;
-
- tmp = enc_to_intel_dp(&encoder->base);
-
- if (tmp->pps_pipe != INVALID_PIPE)
- pipes &= ~(1 << tmp->pps_pipe);
- }
+ pipe = vlv_find_free_pps(dev_priv);
/*
* Didn't find one. This should not happen since there
* are two power sequencers and up to two eDP ports.
*/
- if (WARN_ON(pipes == 0))
+ if (WARN_ON(pipe == INVALID_PIPE))
pipe = PIPE_A;
- else
- pipe = ffs(pipes) - 1;
vlv_steal_power_sequencer(dev, pipe);
intel_dp->pps_pipe = pipe;
@@ -646,7 +697,7 @@ void intel_power_sequencer_reset(struct drm_i915_private *dev_priv)
struct intel_encoder *encoder;
if (WARN_ON(!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv) &&
- !IS_BROXTON(dev_priv)))
+ !IS_GEN9_LP(dev_priv)))
return;
/*
@@ -662,11 +713,18 @@ void intel_power_sequencer_reset(struct drm_i915_private *dev_priv)
for_each_intel_encoder(dev, encoder) {
struct intel_dp *intel_dp;
- if (encoder->type != INTEL_OUTPUT_EDP)
+ if (encoder->type != INTEL_OUTPUT_DP &&
+ encoder->type != INTEL_OUTPUT_EDP)
continue;
intel_dp = enc_to_intel_dp(&encoder->base);
- if (IS_BROXTON(dev_priv))
+
+ WARN_ON(intel_dp->active_pipe != INVALID_PIPE);
+
+ if (encoder->type != INTEL_OUTPUT_EDP)
+ continue;
+
+ if (IS_GEN9_LP(dev_priv))
intel_dp->pps_reset = true;
else
intel_dp->pps_pipe = INVALID_PIPE;
@@ -689,7 +747,7 @@ static void intel_pps_get_registers(struct drm_i915_private *dev_priv,
memset(regs, 0, sizeof(*regs));
- if (IS_BROXTON(dev_priv))
+ if (IS_GEN9_LP(dev_priv))
pps_idx = bxt_power_sequencer_idx(intel_dp);
else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
pps_idx = vlv_power_sequencer_pipe(intel_dp);
@@ -698,7 +756,7 @@ static void intel_pps_get_registers(struct drm_i915_private *dev_priv,
regs->pp_stat = PP_STATUS(pps_idx);
regs->pp_on = PP_ON_DELAYS(pps_idx);
regs->pp_off = PP_OFF_DELAYS(pps_idx);
- if (!IS_BROXTON(dev_priv))
+ if (!IS_GEN9_LP(dev_priv))
regs->pp_div = PP_DIVISOR(pps_idx);
}
@@ -1655,7 +1713,9 @@ found:
* VESA DisplayPort Ver.1.2a - 5.1.1.1 Video Colorimetry
*/
pipe_config->limited_color_range =
- bpp != 18 && drm_match_cea_mode(adjusted_mode) > 1;
+ bpp != 18 &&
+ drm_default_rgb_quant_range(adjusted_mode) ==
+ HDMI_QUANTIZATION_RANGE_LIMITED;
} else {
pipe_config->limited_color_range =
intel_dp->limited_color_range;
@@ -2402,6 +2462,8 @@ void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode)
ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
DP_SET_POWER_D3);
} else {
+ struct intel_lspcon *lspcon = dp_to_lspcon(intel_dp);
+
/*
* When turning on, we need to retry for 1ms to give the sink
* time to wake up.
@@ -2413,6 +2475,9 @@ void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode)
break;
msleep(1);
}
+
+ if (ret == 1 && lspcon->active)
+ lspcon_wait_pcon_mode(lspcon);
}
if (ret != 1)
@@ -2820,6 +2885,11 @@ static void vlv_detach_power_sequencer(struct intel_dp *intel_dp)
enum pipe pipe = intel_dp->pps_pipe;
i915_reg_t pp_on_reg = PP_ON_DELAYS(pipe);
+ WARN_ON(intel_dp->active_pipe != INVALID_PIPE);
+
+ if (WARN_ON(pipe != PIPE_A && pipe != PIPE_B))
+ return;
+
edp_panel_vdd_off_sync(intel_dp);
/*
@@ -2847,29 +2917,27 @@ static void vlv_steal_power_sequencer(struct drm_device *dev,
lockdep_assert_held(&dev_priv->pps_mutex);
- if (WARN_ON(pipe != PIPE_A && pipe != PIPE_B))
- return;
-
for_each_intel_encoder(dev, encoder) {
struct intel_dp *intel_dp;
enum port port;
- if (encoder->type != INTEL_OUTPUT_EDP)
+ if (encoder->type != INTEL_OUTPUT_DP &&
+ encoder->type != INTEL_OUTPUT_EDP)
continue;
intel_dp = enc_to_intel_dp(&encoder->base);
port = dp_to_dig_port(intel_dp)->port;
+ WARN(intel_dp->active_pipe == pipe,
+ "stealing pipe %c power sequencer from active (e)DP port %c\n",
+ pipe_name(pipe), port_name(port));
+
if (intel_dp->pps_pipe != pipe)
continue;
DRM_DEBUG_KMS("stealing pipe %c power sequencer from port %c\n",
pipe_name(pipe), port_name(port));
- WARN(encoder->base.crtc,
- "stealing pipe %c power sequencer from active eDP port %c\n",
- pipe_name(pipe), port_name(port));
-
/* make sure vdd is off before we steal it */
vlv_detach_power_sequencer(intel_dp);
}
@@ -2885,19 +2953,17 @@ static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp)
lockdep_assert_held(&dev_priv->pps_mutex);
- if (!is_edp(intel_dp))
- return;
+ WARN_ON(intel_dp->active_pipe != INVALID_PIPE);
- if (intel_dp->pps_pipe == crtc->pipe)
- return;
-
- /*
- * If another power sequencer was being used on this
- * port previously make sure to turn off vdd there while
- * we still have control of it.
- */
- if (intel_dp->pps_pipe != INVALID_PIPE)
+ if (intel_dp->pps_pipe != INVALID_PIPE &&
+ intel_dp->pps_pipe != crtc->pipe) {
+ /*
+ * If another power sequencer was being used on this
+ * port previously make sure to turn off vdd there while
+ * we still have control of it.
+ */
vlv_detach_power_sequencer(intel_dp);
+ }
/*
* We may be stealing the power
@@ -2905,6 +2971,11 @@ static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp)
*/
vlv_steal_power_sequencer(dev, crtc->pipe);
+ intel_dp->active_pipe = crtc->pipe;
+
+ if (!is_edp(intel_dp))
+ return;
+
/* now it's all ours */
intel_dp->pps_pipe = crtc->pipe;
@@ -2973,6 +3044,32 @@ intel_dp_get_link_status(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_
DP_LINK_STATUS_SIZE) == DP_LINK_STATUS_SIZE;
}
+static bool intel_dp_get_y_cord_status(struct intel_dp *intel_dp)
+{
+ uint8_t psr_caps = 0;
+
+ drm_dp_dpcd_readb(&intel_dp->aux, DP_PSR_CAPS, &psr_caps);
+ return psr_caps & DP_PSR2_SU_Y_COORDINATE_REQUIRED;
+}
+
+static bool intel_dp_get_colorimetry_status(struct intel_dp *intel_dp)
+{
+ uint8_t dprx = 0;
+
+ drm_dp_dpcd_readb(&intel_dp->aux,
+ DP_DPRX_FEATURE_ENUMERATION_LIST,
+ &dprx);
+ return dprx & DP_VSC_SDP_EXT_FOR_COLORIMETRY_SUPPORTED;
+}
+
+static bool intel_dp_get_alpm_status(struct intel_dp *intel_dp)
+{
+ uint8_t alpm_caps = 0;
+
+ drm_dp_dpcd_readb(&intel_dp->aux, DP_RECEIVER_ALPM_CAP, &alpm_caps);
+ return alpm_caps & DP_ALPM_CAP;
+}
+
/* These are source-specific values. */
uint8_t
intel_dp_voltage_max(struct intel_dp *intel_dp)
@@ -2980,7 +3077,7 @@ intel_dp_voltage_max(struct intel_dp *intel_dp)
struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
enum port port = dp_to_dig_port(intel_dp)->port;
- if (IS_BROXTON(dev_priv))
+ if (IS_GEN9_LP(dev_priv))
return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
else if (INTEL_GEN(dev_priv) >= 9) {
if (dev_priv->vbt.edp.low_vswing && port == PORT_A)
@@ -3343,7 +3440,7 @@ intel_dp_set_signal_levels(struct intel_dp *intel_dp)
if (HAS_DDI(dev_priv)) {
signal_levels = ddi_signal_levels(intel_dp);
- if (IS_BROXTON(dev_priv))
+ if (IS_GEN9_LP(dev_priv))
signal_levels = 0;
else
mask = DDI_BUF_EMP_MASK;
@@ -3491,6 +3588,12 @@ intel_dp_link_down(struct intel_dp *intel_dp)
msleep(intel_dp->panel_power_down_delay);
intel_dp->DP = DP;
+
+ if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
+ pps_lock(intel_dp);
+ intel_dp->active_pipe = INVALID_PIPE;
+ pps_unlock(intel_dp);
+ }
}
bool
@@ -3545,6 +3648,16 @@ intel_edp_init_dpcd(struct intel_dp *intel_dp)
dev_priv->psr.psr2_support = dev_priv->psr.aux_frame_sync;
DRM_DEBUG_KMS("PSR2 %s on sink",
dev_priv->psr.psr2_support ? "supported" : "not supported");
+
+ if (dev_priv->psr.psr2_support) {
+ dev_priv->psr.y_cord_support =
+ intel_dp_get_y_cord_status(intel_dp);
+ dev_priv->psr.colorimetry_support =
+ intel_dp_get_colorimetry_status(intel_dp);
+ dev_priv->psr.alpm =
+ intel_dp_get_alpm_status(intel_dp);
+ }
+
}
/* Read the eDP Display control capabilities registers */
@@ -3569,7 +3682,12 @@ intel_edp_init_dpcd(struct intel_dp *intel_dp)
if (val == 0)
break;
- /* Value read is in kHz while drm clock is saved in deca-kHz */
+ /* Value read multiplied by 200kHz gives the per-lane
+ * link rate in kHz. The source rates are, however,
+ * stored in terms of LS_Clk kHz. The full conversion
+ * back to symbols is
+ * (val * 200kHz)*(8/10 ch. encoding)*(1/8 bit to Byte)
+ */
intel_dp->sink_rates[i] = (val * 200) / 10;
}
intel_dp->num_sink_rates = i;
@@ -3835,7 +3953,7 @@ static uint8_t intel_dp_autotest_edid(struct intel_dp *intel_dp)
DRM_DEBUG_KMS("EDID read had %d NACKs, %d DEFERs\n",
intel_dp->aux.i2c_nack_count,
intel_dp->aux.i2c_defer_count);
- intel_dp->compliance_test_data = INTEL_DP_RESOLUTION_FAILSAFE;
+ intel_dp->compliance.test_data.edid = INTEL_DP_RESOLUTION_FAILSAFE;
} else {
struct edid *block = intel_connector->detect_edid;
@@ -3851,11 +3969,11 @@ static uint8_t intel_dp_autotest_edid(struct intel_dp *intel_dp)
DRM_DEBUG_KMS("Failed to write EDID checksum\n");
test_result = DP_TEST_ACK | DP_TEST_EDID_CHECKSUM_WRITE;
- intel_dp->compliance_test_data = INTEL_DP_RESOLUTION_STANDARD;
+ intel_dp->compliance.test_data.edid = INTEL_DP_RESOLUTION_STANDARD;
}
/* Set test active flag here so userspace doesn't interrupt things */
- intel_dp->compliance_test_active = 1;
+ intel_dp->compliance.test_active = 1;
return test_result;
}
@@ -3881,22 +3999,22 @@ static void intel_dp_handle_test_request(struct intel_dp *intel_dp)
switch (rxdata) {
case DP_TEST_LINK_TRAINING:
DRM_DEBUG_KMS("LINK_TRAINING test requested\n");
- intel_dp->compliance_test_type = DP_TEST_LINK_TRAINING;
+ intel_dp->compliance.test_type = DP_TEST_LINK_TRAINING;
response = intel_dp_autotest_link_training(intel_dp);
break;
case DP_TEST_LINK_VIDEO_PATTERN:
DRM_DEBUG_KMS("TEST_PATTERN test requested\n");
- intel_dp->compliance_test_type = DP_TEST_LINK_VIDEO_PATTERN;
+ intel_dp->compliance.test_type = DP_TEST_LINK_VIDEO_PATTERN;
response = intel_dp_autotest_video_pattern(intel_dp);
break;
case DP_TEST_LINK_EDID_READ:
DRM_DEBUG_KMS("EDID test requested\n");
- intel_dp->compliance_test_type = DP_TEST_LINK_EDID_READ;
+ intel_dp->compliance.test_type = DP_TEST_LINK_EDID_READ;
response = intel_dp_autotest_edid(intel_dp);
break;
case DP_TEST_LINK_PHY_TEST_PATTERN:
DRM_DEBUG_KMS("PHY_PATTERN test requested\n");
- intel_dp->compliance_test_type = DP_TEST_LINK_PHY_TEST_PATTERN;
+ intel_dp->compliance.test_type = DP_TEST_LINK_PHY_TEST_PATTERN;
response = intel_dp_autotest_phy_pattern(intel_dp);
break;
default:
@@ -4020,7 +4138,7 @@ intel_dp_check_link_status(struct intel_dp *intel_dp)
return;
/* if link training is requested we should perform it always */
- if ((intel_dp->compliance_test_type == DP_TEST_LINK_TRAINING) ||
+ if ((intel_dp->compliance.test_type == DP_TEST_LINK_TRAINING) ||
(!drm_dp_channel_eq_ok(link_status, intel_dp->lane_count))) {
DRM_DEBUG_KMS("%s: channel EQ not ok, retraining\n",
intel_encoder->base.name);
@@ -4054,9 +4172,7 @@ intel_dp_short_pulse(struct intel_dp *intel_dp)
* Clearing compliance test variables to allow capturing
* of values for next automated test request.
*/
- intel_dp->compliance_test_active = 0;
- intel_dp->compliance_test_type = 0;
- intel_dp->compliance_test_data = 0;
+ memset(&intel_dp->compliance, 0, sizeof(intel_dp->compliance));
/*
* Now read the DPCD to see if it's actually running
@@ -4148,9 +4264,10 @@ static enum drm_connector_status
edp_detect(struct intel_dp *intel_dp)
{
struct drm_device *dev = intel_dp_to_dev(intel_dp);
+ struct drm_i915_private *dev_priv = to_i915(dev);
enum drm_connector_status status;
- status = intel_panel_detect(dev);
+ status = intel_panel_detect(dev_priv);
if (status == connector_status_unknown)
status = connector_status_connected;
@@ -4289,14 +4406,14 @@ static bool bxt_digital_port_connected(struct drm_i915_private *dev_priv,
*
* Return %true if @port is connected, %false otherwise.
*/
-static bool intel_digital_port_connected(struct drm_i915_private *dev_priv,
- struct intel_digital_port *port)
+bool intel_digital_port_connected(struct drm_i915_private *dev_priv,
+ struct intel_digital_port *port)
{
if (HAS_PCH_IBX(dev_priv))
return ibx_digital_port_connected(dev_priv, port);
else if (HAS_PCH_SPLIT(dev_priv))
return cpt_digital_port_connected(dev_priv, port);
- else if (IS_BROXTON(dev_priv))
+ else if (IS_GEN9_LP(dev_priv))
return bxt_digital_port_connected(dev_priv, port);
else if (IS_GM45(dev_priv))
return gm45_digital_port_connected(dev_priv, port);
@@ -4373,9 +4490,7 @@ intel_dp_long_pulse(struct intel_connector *intel_connector)
status = connector_status_disconnected;
if (status == connector_status_disconnected) {
- intel_dp->compliance_test_active = 0;
- intel_dp->compliance_test_type = 0;
- intel_dp->compliance_test_data = 0;
+ memset(&intel_dp->compliance, 0, sizeof(intel_dp->compliance));
if (intel_dp->is_mst) {
DRM_DEBUG_KMS("MST device may have disappeared %d vs %d\n",
@@ -4396,6 +4511,12 @@ intel_dp_long_pulse(struct intel_connector *intel_connector)
yesno(intel_dp_source_supports_hbr2(intel_dp)),
yesno(drm_dp_tps3_supported(intel_dp->dpcd)));
+ /* Set the max lane count for sink */
+ intel_dp->max_sink_lane_count = drm_dp_max_lane_count(intel_dp->dpcd);
+
+ /* Set the max link BW for sink */
+ intel_dp->max_sink_link_bw = intel_dp_max_link_bw(intel_dp);
+
intel_dp_print_rates(intel_dp);
intel_dp_read_desc(intel_dp);
@@ -4751,27 +4872,41 @@ static void intel_edp_panel_vdd_sanitize(struct intel_dp *intel_dp)
edp_panel_vdd_schedule_off(intel_dp);
}
+static enum pipe vlv_active_pipe(struct intel_dp *intel_dp)
+{
+ struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
+
+ if ((intel_dp->DP & DP_PORT_EN) == 0)
+ return INVALID_PIPE;
+
+ if (IS_CHERRYVIEW(dev_priv))
+ return DP_PORT_TO_PIPE_CHV(intel_dp->DP);
+ else
+ return PORT_TO_PIPE(intel_dp->DP);
+}
+
void intel_dp_encoder_reset(struct drm_encoder *encoder)
{
struct drm_i915_private *dev_priv = to_i915(encoder->dev);
- struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
- struct intel_lspcon *lspcon = &intel_dig_port->lspcon;
- struct intel_dp *intel_dp = &intel_dig_port->dp;
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+ struct intel_lspcon *lspcon = dp_to_lspcon(intel_dp);
if (!HAS_DDI(dev_priv))
intel_dp->DP = I915_READ(intel_dp->output_reg);
- if (IS_GEN9(dev_priv) && lspcon->active)
+ if (lspcon->active)
lspcon_resume(lspcon);
- if (to_intel_encoder(encoder)->type != INTEL_OUTPUT_EDP)
- return;
-
pps_lock(intel_dp);
- /* Reinit the power sequencer, in case BIOS did something with it. */
- intel_dp_pps_init(encoder->dev, intel_dp);
- intel_edp_panel_vdd_sanitize(intel_dp);
+ if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
+ intel_dp->active_pipe = vlv_active_pipe(intel_dp);
+
+ if (is_edp(intel_dp)) {
+ /* Reinit the power sequencer, in case BIOS did something with it. */
+ intel_dp_pps_init(encoder->dev, intel_dp);
+ intel_edp_panel_vdd_sanitize(intel_dp);
+ }
pps_unlock(intel_dp);
}
@@ -4879,7 +5014,7 @@ bool intel_dp_is_edp(struct drm_i915_private *dev_priv, enum port port)
if (INTEL_GEN(dev_priv) < 5)
return false;
- if (port == PORT_A)
+ if (INTEL_GEN(dev_priv) < 9 && port == PORT_A)
return true;
return intel_bios_is_port_edp(dev_priv, port);
@@ -4926,7 +5061,7 @@ intel_pps_readout_hw_state(struct drm_i915_private *dev_priv,
pp_on = I915_READ(regs.pp_on);
pp_off = I915_READ(regs.pp_off);
- if (!IS_BROXTON(dev_priv)) {
+ if (!IS_GEN9_LP(dev_priv)) {
I915_WRITE(regs.pp_ctrl, pp_ctl);
pp_div = I915_READ(regs.pp_div);
}
@@ -4944,7 +5079,7 @@ intel_pps_readout_hw_state(struct drm_i915_private *dev_priv,
seq->t10 = (pp_off & PANEL_POWER_DOWN_DELAY_MASK) >>
PANEL_POWER_DOWN_DELAY_SHIFT;
- if (IS_BROXTON(dev_priv)) {
+ if (IS_GEN9_LP(dev_priv)) {
u16 tmp = (pp_ctl & BXT_POWER_CYCLE_DELAY_MASK) >>
BXT_POWER_CYCLE_DELAY_SHIFT;
if (tmp > 0)
@@ -5101,7 +5236,7 @@ intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
(seq->t10 << PANEL_POWER_DOWN_DELAY_SHIFT);
/* Compute the divisor for the pp clock, simply match the Bspec
* formula. */
- if (IS_BROXTON(dev_priv)) {
+ if (IS_GEN9_LP(dev_priv)) {
pp_div = I915_READ(regs.pp_ctrl);
pp_div &= ~BXT_POWER_CYCLE_DELAY_MASK;
pp_div |= (DIV_ROUND_UP((seq->t11_t12 + 1), 1000)
@@ -5127,7 +5262,7 @@ intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
I915_WRITE(regs.pp_on, pp_on);
I915_WRITE(regs.pp_off, pp_off);
- if (IS_BROXTON(dev_priv))
+ if (IS_GEN9_LP(dev_priv))
I915_WRITE(regs.pp_ctrl, pp_div);
else
I915_WRITE(regs.pp_div, pp_div);
@@ -5135,7 +5270,7 @@ intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
DRM_DEBUG_KMS("panel power sequencer register settings: PP_ON %#x, PP_OFF %#x, PP_DIV %#x\n",
I915_READ(regs.pp_on),
I915_READ(regs.pp_off),
- IS_BROXTON(dev_priv) ?
+ IS_GEN9_LP(dev_priv) ?
(I915_READ(regs.pp_ctrl) & BXT_POWER_CYCLE_DELAY_MASK) :
I915_READ(regs.pp_div));
}
@@ -5515,7 +5650,7 @@ intel_dp_drrs_init(struct intel_connector *intel_connector,
}
downclock_mode = intel_find_panel_downclock
- (dev, fixed_mode, connector);
+ (dev_priv, fixed_mode, connector);
if (!downclock_mode) {
DRM_DEBUG_KMS("Downclock mode is not found. DRRS not supported\n");
@@ -5624,10 +5759,7 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
* If the current pipe isn't valid, try the PPS pipe, and if that
* fails just assume pipe A.
*/
- if (IS_CHERRYVIEW(dev_priv))
- pipe = DP_PORT_TO_PIPE_CHV(intel_dp->DP);
- else
- pipe = PORT_TO_PIPE(intel_dp->DP);
+ pipe = vlv_active_pipe(intel_dp);
if (pipe != PIPE_A && pipe != PIPE_B)
pipe = intel_dp->pps_pipe;
@@ -5676,6 +5808,7 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
return false;
intel_dp->pps_pipe = INVALID_PIPE;
+ intel_dp->active_pipe = INVALID_PIPE;
/* intel_dp vfuncs */
if (INTEL_GEN(dev_priv) >= 9)
@@ -5704,6 +5837,9 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
else
type = DRM_MODE_CONNECTOR_DisplayPort;
+ if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
+ intel_dp->active_pipe = vlv_active_pipe(intel_dp);
+
/*
* For eDP we always set the encoder type to INTEL_OUTPUT_EDP, but
* for DP the encoder type can be set by the caller to
@@ -5793,11 +5929,10 @@ fail:
return false;
}
-bool intel_dp_init(struct drm_device *dev,
+bool intel_dp_init(struct drm_i915_private *dev_priv,
i915_reg_t output_reg,
enum port port)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_digital_port *intel_dig_port;
struct intel_encoder *intel_encoder;
struct drm_encoder *encoder;
@@ -5814,8 +5949,9 @@ bool intel_dp_init(struct drm_device *dev,
intel_encoder = &intel_dig_port->base;
encoder = &intel_encoder->base;
- if (drm_encoder_init(dev, &intel_encoder->base, &intel_dp_enc_funcs,
- DRM_MODE_ENCODER_TMDS, "DP %c", port_name(port)))
+ if (drm_encoder_init(&dev_priv->drm, &intel_encoder->base,
+ &intel_dp_enc_funcs, DRM_MODE_ENCODER_TMDS,
+ "DP %c", port_name(port)))
goto err_encoder_init;
intel_encoder->compute_config = intel_dp_compute_config;
diff --git a/drivers/gpu/drm/i915/intel_dp_mst.c b/drivers/gpu/drm/i915/intel_dp_mst.c
index b029d1026a28..38e3ca2f6f18 100644
--- a/drivers/gpu/drm/i915/intel_dp_mst.c
+++ b/drivers/gpu/drm/i915/intel_dp_mst.c
@@ -37,6 +37,8 @@ static bool intel_dp_mst_compute_config(struct intel_encoder *encoder,
struct intel_dp_mst_encoder *intel_mst = enc_to_mst(&encoder->base);
struct intel_digital_port *intel_dig_port = intel_mst->primary;
struct intel_dp *intel_dp = &intel_dig_port->dp;
+ struct intel_connector *connector =
+ to_intel_connector(conn_state->connector);
struct drm_atomic_state *state;
int bpp;
int lane_count, slots;
@@ -58,6 +60,8 @@ static bool intel_dp_mst_compute_config(struct intel_encoder *encoder,
state = pipe_config->base.state;
+ if (drm_dp_mst_port_has_audio(&intel_dp->mst_mgr, connector->port))
+ pipe_config->has_audio = true;
mst_pbn = drm_dp_calc_pbn_mode(adjusted_mode->crtc_clock, bpp);
pipe_config->pbn = mst_pbn;
@@ -83,6 +87,7 @@ static void intel_mst_disable_dp(struct intel_encoder *encoder,
struct intel_dp *intel_dp = &intel_dig_port->dp;
struct intel_connector *connector =
to_intel_connector(old_conn_state->connector);
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
int ret;
DRM_DEBUG_KMS("%d\n", intel_dp->active_mst_links);
@@ -93,6 +98,10 @@ static void intel_mst_disable_dp(struct intel_encoder *encoder,
if (ret) {
DRM_ERROR("failed to update payload %d\n", ret);
}
+ if (old_crtc_state->has_audio) {
+ intel_audio_codec_disable(encoder);
+ intel_display_power_put(dev_priv, POWER_DOMAIN_AUDIO);
+ }
}
static void intel_mst_post_disable_dp(struct intel_encoder *encoder,
@@ -205,6 +214,10 @@ static void intel_mst_enable_dp(struct intel_encoder *encoder,
ret = drm_dp_check_act_status(&intel_dp->mst_mgr);
ret = drm_dp_update_payload_part2(&intel_dp->mst_mgr);
+ if (pipe_config->has_audio) {
+ intel_display_power_get(dev_priv, POWER_DOMAIN_AUDIO);
+ intel_audio_codec_enable(encoder, pipe_config, conn_state);
+ }
}
static bool intel_dp_mst_enc_get_hw_state(struct intel_encoder *encoder,
@@ -227,6 +240,9 @@ static void intel_dp_mst_enc_get_config(struct intel_encoder *encoder,
enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
u32 temp, flags = 0;
+ pipe_config->has_audio =
+ intel_ddi_is_audio_enabled(dev_priv, crtc);
+
temp = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder));
if (temp & TRANS_DDI_PHSYNC)
flags |= DRM_MODE_FLAG_PHSYNC;
@@ -334,7 +350,17 @@ static enum drm_mode_status
intel_dp_mst_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
+ struct intel_connector *intel_connector = to_intel_connector(connector);
+ struct intel_dp *intel_dp = intel_connector->mst_port;
int max_dotclk = to_i915(connector->dev)->max_dotclk_freq;
+ int bpp = 24; /* MST uses fixed bpp */
+ int max_rate, mode_rate, max_lanes, max_link_clock;
+
+ max_link_clock = intel_dp_max_link_rate(intel_dp);
+ max_lanes = drm_dp_max_lane_count(intel_dp->dpcd);
+
+ max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes);
+ mode_rate = intel_dp_link_required(mode->clock, bpp);
/* TODO - validate mode against available PBN for link */
if (mode->clock < 10000)
@@ -343,7 +369,7 @@ intel_dp_mst_mode_valid(struct drm_connector *connector,
if (mode->flags & DRM_MODE_FLAG_DBLCLK)
return MODE_H_ILLEGAL;
- if (mode->clock > max_dotclk)
+ if (mode_rate > max_rate || mode->clock > max_dotclk)
return MODE_CLOCK_HIGH;
return MODE_OK;
@@ -561,7 +587,8 @@ intel_dp_mst_encoder_init(struct intel_digital_port *intel_dig_port, int conn_ba
/* create encoders */
intel_dp_create_fake_mst_encoders(intel_dig_port);
- ret = drm_dp_mst_topology_mgr_init(&intel_dp->mst_mgr, dev->dev, &intel_dp->aux, 16, 3, conn_base_id);
+ ret = drm_dp_mst_topology_mgr_init(&intel_dp->mst_mgr, dev,
+ &intel_dp->aux, 16, 3, conn_base_id);
if (ret) {
intel_dp->can_mst = false;
return ret;
diff --git a/drivers/gpu/drm/i915/intel_dpio_phy.c b/drivers/gpu/drm/i915/intel_dpio_phy.c
index 7a8e82dabbf2..09b670929786 100644
--- a/drivers/gpu/drm/i915/intel_dpio_phy.c
+++ b/drivers/gpu/drm/i915/intel_dpio_phy.c
@@ -131,6 +131,18 @@ struct bxt_ddi_phy_info {
enum dpio_phy rcomp_phy;
/**
+ * @reset_delay: delay in us to wait before setting the common reset
+ * bit in BXT_PHY_CTL_FAMILY, which effectively enables the phy.
+ */
+ int reset_delay;
+
+ /**
+ * @pwron_mask: Mask with the appropriate bit set that would cause the
+ * punit to power this phy if written to BXT_P_CR_GT_DISP_PWRON.
+ */
+ u32 pwron_mask;
+
+ /**
* @channel: struct containing per channel information.
*/
struct {
@@ -145,6 +157,7 @@ static const struct bxt_ddi_phy_info bxt_ddi_phy_info[] = {
[DPIO_PHY0] = {
.dual_channel = true,
.rcomp_phy = DPIO_PHY1,
+ .pwron_mask = BIT(0),
.channel = {
[DPIO_CH0] = { .port = PORT_B },
@@ -154,6 +167,7 @@ static const struct bxt_ddi_phy_info bxt_ddi_phy_info[] = {
[DPIO_PHY1] = {
.dual_channel = false,
.rcomp_phy = -1,
+ .pwron_mask = BIT(1),
.channel = {
[DPIO_CH0] = { .port = PORT_A },
@@ -161,20 +175,77 @@ static const struct bxt_ddi_phy_info bxt_ddi_phy_info[] = {
},
};
+static const struct bxt_ddi_phy_info glk_ddi_phy_info[] = {
+ [DPIO_PHY0] = {
+ .dual_channel = false,
+ .rcomp_phy = DPIO_PHY1,
+ .pwron_mask = BIT(0),
+ .reset_delay = 20,
+
+ .channel = {
+ [DPIO_CH0] = { .port = PORT_B },
+ }
+ },
+ [DPIO_PHY1] = {
+ .dual_channel = false,
+ .rcomp_phy = -1,
+ .pwron_mask = BIT(3),
+ .reset_delay = 20,
+
+ .channel = {
+ [DPIO_CH0] = { .port = PORT_A },
+ }
+ },
+ [DPIO_PHY2] = {
+ .dual_channel = false,
+ .rcomp_phy = DPIO_PHY1,
+ .pwron_mask = BIT(1),
+ .reset_delay = 20,
+
+ .channel = {
+ [DPIO_CH0] = { .port = PORT_C },
+ }
+ },
+};
+
static u32 bxt_phy_port_mask(const struct bxt_ddi_phy_info *phy_info)
{
return (phy_info->dual_channel * BIT(phy_info->channel[DPIO_CH1].port)) |
BIT(phy_info->channel[DPIO_CH0].port);
}
-void bxt_port_to_phy_channel(enum port port,
+static const struct bxt_ddi_phy_info *
+bxt_get_phy_list(struct drm_i915_private *dev_priv, int *count)
+{
+ if (IS_GEMINILAKE(dev_priv)) {
+ *count = ARRAY_SIZE(glk_ddi_phy_info);
+ return glk_ddi_phy_info;
+ } else {
+ *count = ARRAY_SIZE(bxt_ddi_phy_info);
+ return bxt_ddi_phy_info;
+ }
+}
+
+static const struct bxt_ddi_phy_info *
+bxt_get_phy_info(struct drm_i915_private *dev_priv, enum dpio_phy phy)
+{
+ int count;
+ const struct bxt_ddi_phy_info *phy_list =
+ bxt_get_phy_list(dev_priv, &count);
+
+ return &phy_list[phy];
+}
+
+void bxt_port_to_phy_channel(struct drm_i915_private *dev_priv, enum port port,
enum dpio_phy *phy, enum dpio_channel *ch)
{
- const struct bxt_ddi_phy_info *phy_info;
- int i;
+ const struct bxt_ddi_phy_info *phy_info, *phys;
+ int i, count;
- for (i = 0; i < ARRAY_SIZE(bxt_ddi_phy_info); i++) {
- phy_info = &bxt_ddi_phy_info[i];
+ phys = bxt_get_phy_list(dev_priv, &count);
+
+ for (i = 0; i < count; i++) {
+ phy_info = &phys[i];
if (port == phy_info->channel[DPIO_CH0].port) {
*phy = i;
@@ -203,7 +274,7 @@ void bxt_ddi_phy_set_signal_level(struct drm_i915_private *dev_priv,
enum dpio_phy phy;
enum dpio_channel ch;
- bxt_port_to_phy_channel(port, &phy, &ch);
+ bxt_port_to_phy_channel(dev_priv, port, &phy, &ch);
/*
* While we write to the group register to program all lanes at once we
@@ -241,10 +312,12 @@ void bxt_ddi_phy_set_signal_level(struct drm_i915_private *dev_priv,
bool bxt_ddi_phy_is_enabled(struct drm_i915_private *dev_priv,
enum dpio_phy phy)
{
- const struct bxt_ddi_phy_info *phy_info = &bxt_ddi_phy_info[phy];
+ const struct bxt_ddi_phy_info *phy_info;
enum port port;
- if (!(I915_READ(BXT_P_CR_GT_DISP_PWRON) & GT_DISPLAY_POWER_ON(phy)))
+ phy_info = bxt_get_phy_info(dev_priv, phy);
+
+ if (!(I915_READ(BXT_P_CR_GT_DISP_PWRON) & phy_info->pwron_mask))
return false;
if ((I915_READ(BXT_PORT_CL1CM_DW0(phy)) &
@@ -255,14 +328,6 @@ bool bxt_ddi_phy_is_enabled(struct drm_i915_private *dev_priv,
return false;
}
- if (phy_info->rcomp_phy == -1 &&
- !(I915_READ(BXT_PORT_REF_DW3(phy)) & GRC_DONE)) {
- DRM_DEBUG_DRIVER("DDI PHY %d powered, but GRC isn't done\n",
- phy);
-
- return false;
- }
-
if (!(I915_READ(BXT_PHY_CTL_FAMILY(phy)) & COMMON_RESET_DIS)) {
DRM_DEBUG_DRIVER("DDI PHY %d powered, but still in reset\n",
phy);
@@ -306,9 +371,11 @@ static void bxt_phy_wait_grc_done(struct drm_i915_private *dev_priv,
static void _bxt_ddi_phy_init(struct drm_i915_private *dev_priv,
enum dpio_phy phy)
{
- const struct bxt_ddi_phy_info *phy_info = &bxt_ddi_phy_info[phy];
+ const struct bxt_ddi_phy_info *phy_info;
u32 val;
+ phy_info = bxt_get_phy_info(dev_priv, phy);
+
if (bxt_ddi_phy_is_enabled(dev_priv, phy)) {
/* Still read out the GRC value for state verification */
if (phy_info->rcomp_phy != -1)
@@ -317,7 +384,6 @@ static void _bxt_ddi_phy_init(struct drm_i915_private *dev_priv,
if (bxt_ddi_phy_verify_state(dev_priv, phy)) {
DRM_DEBUG_DRIVER("DDI PHY %d already enabled, "
"won't reprogram it\n", phy);
-
return;
}
@@ -326,7 +392,7 @@ static void _bxt_ddi_phy_init(struct drm_i915_private *dev_priv,
}
val = I915_READ(BXT_P_CR_GT_DISP_PWRON);
- val |= GT_DISPLAY_POWER_ON(phy);
+ val |= phy_info->pwron_mask;
I915_WRITE(BXT_P_CR_GT_DISP_PWRON, val);
/*
@@ -367,6 +433,9 @@ static void _bxt_ddi_phy_init(struct drm_i915_private *dev_priv,
if (phy_info->rcomp_phy != -1) {
uint32_t grc_code;
+
+ bxt_phy_wait_grc_done(dev_priv, phy_info->rcomp_phy);
+
/*
* PHY0 isn't connected to an RCOMP resistor so copy over
* the corresponding calibrated value from PHY1, and disable
@@ -384,31 +453,34 @@ static void _bxt_ddi_phy_init(struct drm_i915_private *dev_priv,
I915_WRITE(BXT_PORT_REF_DW8(phy), val);
}
+ if (phy_info->reset_delay)
+ udelay(phy_info->reset_delay);
+
val = I915_READ(BXT_PHY_CTL_FAMILY(phy));
val |= COMMON_RESET_DIS;
I915_WRITE(BXT_PHY_CTL_FAMILY(phy), val);
-
- if (phy_info->rcomp_phy == -1)
- bxt_phy_wait_grc_done(dev_priv, phy);
-
}
void bxt_ddi_phy_uninit(struct drm_i915_private *dev_priv, enum dpio_phy phy)
{
+ const struct bxt_ddi_phy_info *phy_info;
uint32_t val;
+ phy_info = bxt_get_phy_info(dev_priv, phy);
+
val = I915_READ(BXT_PHY_CTL_FAMILY(phy));
val &= ~COMMON_RESET_DIS;
I915_WRITE(BXT_PHY_CTL_FAMILY(phy), val);
val = I915_READ(BXT_P_CR_GT_DISP_PWRON);
- val &= ~GT_DISPLAY_POWER_ON(phy);
+ val &= ~phy_info->pwron_mask;
I915_WRITE(BXT_P_CR_GT_DISP_PWRON, val);
}
void bxt_ddi_phy_init(struct drm_i915_private *dev_priv, enum dpio_phy phy)
{
- const struct bxt_ddi_phy_info *phy_info = &bxt_ddi_phy_info[phy];
+ const struct bxt_ddi_phy_info *phy_info =
+ bxt_get_phy_info(dev_priv, phy);
enum dpio_phy rcomp_phy = phy_info->rcomp_phy;
bool was_enabled;
@@ -461,10 +533,12 @@ __phy_reg_verify_state(struct drm_i915_private *dev_priv, enum dpio_phy phy,
bool bxt_ddi_phy_verify_state(struct drm_i915_private *dev_priv,
enum dpio_phy phy)
{
- const struct bxt_ddi_phy_info *phy_info = &bxt_ddi_phy_info[phy];
+ const struct bxt_ddi_phy_info *phy_info;
uint32_t mask;
bool ok;
+ phy_info = bxt_get_phy_info(dev_priv, phy);
+
#define _CHK(reg, mask, exp, fmt, ...) \
__phy_reg_verify_state(dev_priv, phy, reg, mask, exp, fmt, \
## __VA_ARGS__)
@@ -540,7 +614,7 @@ void bxt_ddi_phy_set_lane_optim_mask(struct intel_encoder *encoder,
enum dpio_channel ch;
int lane;
- bxt_port_to_phy_channel(port, &phy, &ch);
+ bxt_port_to_phy_channel(dev_priv, port, &phy, &ch);
for (lane = 0; lane < 4; lane++) {
u32 val = I915_READ(BXT_PORT_TX_DW14_LN(phy, ch, lane));
@@ -568,7 +642,7 @@ bxt_ddi_phy_get_lane_lat_optim_mask(struct intel_encoder *encoder)
int lane;
uint8_t mask;
- bxt_port_to_phy_channel(port, &phy, &ch);
+ bxt_port_to_phy_channel(dev_priv, port, &phy, &ch);
mask = 0;
for (lane = 0; lane < 4; lane++) {
diff --git a/drivers/gpu/drm/i915/intel_dpll_mgr.c b/drivers/gpu/drm/i915/intel_dpll_mgr.c
index a2f0e070d38d..e59e43a9f3a6 100644
--- a/drivers/gpu/drm/i915/intel_dpll_mgr.c
+++ b/drivers/gpu/drm/i915/intel_dpll_mgr.c
@@ -23,6 +23,25 @@
#include "intel_drv.h"
+/**
+ * DOC: Display PLLs
+ *
+ * Display PLLs used for driving outputs vary by platform. While some have
+ * per-pipe or per-encoder dedicated PLLs, others allow the use of any PLL
+ * from a pool. In the latter scenario, it is possible that multiple pipes
+ * share a PLL if their configurations match.
+ *
+ * This file provides an abstraction over display PLLs. The function
+ * intel_shared_dpll_init() initializes the PLLs for the given platform. The
+ * users of a PLL are tracked and that tracking is integrated with the atomic
+ * modest interface. During an atomic operation, a PLL can be requested for a
+ * given CRTC and encoder configuration by calling intel_get_shared_dpll() and
+ * a previously used PLL can be released with intel_release_shared_dpll().
+ * Changes to the users are first staged in the atomic state, and then made
+ * effective by calling intel_shared_dpll_swap_state() during the atomic
+ * commit phase.
+ */
+
struct intel_shared_dpll *
skl_find_link_pll(struct drm_i915_private *dev_priv, int clock)
{
@@ -38,11 +57,11 @@ skl_find_link_pll(struct drm_i915_private *dev_priv, int clock)
pll = &dev_priv->shared_dplls[i];
/* Only want to check enabled timings first */
- if (pll->config.crtc_mask == 0)
+ if (pll->state.crtc_mask == 0)
continue;
- if (memcmp(&dpll_hw_state, &pll->config.hw_state,
- sizeof(pll->config.hw_state)) == 0) {
+ if (memcmp(&dpll_hw_state, &pll->state.hw_state,
+ sizeof(pll->state.hw_state)) == 0) {
found = true;
break;
}
@@ -52,8 +71,8 @@ skl_find_link_pll(struct drm_i915_private *dev_priv, int clock)
for (i = DPLL_ID_SKL_DPLL1;
((found == false) && (i <= DPLL_ID_SKL_DPLL3)); i++) {
pll = &dev_priv->shared_dplls[i];
- if (pll->config.crtc_mask == 0) {
- pll->config.hw_state = dpll_hw_state;
+ if (pll->state.crtc_mask == 0) {
+ pll->state.hw_state = dpll_hw_state;
break;
}
}
@@ -61,6 +80,45 @@ skl_find_link_pll(struct drm_i915_private *dev_priv, int clock)
return pll;
}
+static void
+intel_atomic_duplicate_dpll_state(struct drm_i915_private *dev_priv,
+ struct intel_shared_dpll_state *shared_dpll)
+{
+ enum intel_dpll_id i;
+
+ /* Copy shared dpll state */
+ for (i = 0; i < dev_priv->num_shared_dpll; i++) {
+ struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
+
+ shared_dpll[i] = pll->state;
+ }
+}
+
+static struct intel_shared_dpll_state *
+intel_atomic_get_shared_dpll_state(struct drm_atomic_state *s)
+{
+ struct intel_atomic_state *state = to_intel_atomic_state(s);
+
+ WARN_ON(!drm_modeset_is_locked(&s->dev->mode_config.connection_mutex));
+
+ if (!state->dpll_set) {
+ state->dpll_set = true;
+
+ intel_atomic_duplicate_dpll_state(to_i915(s->dev),
+ state->shared_dpll);
+ }
+
+ return state->shared_dpll;
+}
+
+/**
+ * intel_get_shared_dpll_by_id - get a DPLL given its id
+ * @dev_priv: i915 device instance
+ * @id: pll id
+ *
+ * Returns:
+ * A pointer to the DPLL with @id
+ */
struct intel_shared_dpll *
intel_get_shared_dpll_by_id(struct drm_i915_private *dev_priv,
enum intel_dpll_id id)
@@ -68,6 +126,14 @@ intel_get_shared_dpll_by_id(struct drm_i915_private *dev_priv,
return &dev_priv->shared_dplls[id];
}
+/**
+ * intel_get_shared_dpll_id - get the id of a DPLL
+ * @dev_priv: i915 device instance
+ * @pll: the DPLL
+ *
+ * Returns:
+ * The id of @pll
+ */
enum intel_dpll_id
intel_get_shared_dpll_id(struct drm_i915_private *dev_priv,
struct intel_shared_dpll *pll)
@@ -79,28 +145,6 @@ intel_get_shared_dpll_id(struct drm_i915_private *dev_priv,
return (enum intel_dpll_id) (pll - dev_priv->shared_dplls);
}
-void
-intel_shared_dpll_config_get(struct intel_shared_dpll_config *config,
- struct intel_shared_dpll *pll,
- struct intel_crtc *crtc)
-{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- enum intel_dpll_id id = intel_get_shared_dpll_id(dev_priv, pll);
-
- config[id].crtc_mask |= 1 << crtc->pipe;
-}
-
-void
-intel_shared_dpll_config_put(struct intel_shared_dpll_config *config,
- struct intel_shared_dpll *pll,
- struct intel_crtc *crtc)
-{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- enum intel_dpll_id id = intel_get_shared_dpll_id(dev_priv, pll);
-
- config[id].crtc_mask &= ~(1 << crtc->pipe);
-}
-
/* For ILK+ */
void assert_shared_dpll(struct drm_i915_private *dev_priv,
struct intel_shared_dpll *pll,
@@ -118,6 +162,13 @@ void assert_shared_dpll(struct drm_i915_private *dev_priv,
pll->name, onoff(state), onoff(cur_state));
}
+/**
+ * intel_prepare_shared_dpll - call a dpll's prepare hook
+ * @crtc: CRTC which has a shared dpll
+ *
+ * This calls the PLL's prepare hook if it has one and if the PLL is not
+ * already enabled. The prepare hook is platform specific.
+ */
void intel_prepare_shared_dpll(struct intel_crtc *crtc)
{
struct drm_device *dev = crtc->base.dev;
@@ -128,24 +179,22 @@ void intel_prepare_shared_dpll(struct intel_crtc *crtc)
return;
mutex_lock(&dev_priv->dpll_lock);
- WARN_ON(!pll->config.crtc_mask);
+ WARN_ON(!pll->state.crtc_mask);
if (!pll->active_mask) {
DRM_DEBUG_DRIVER("setting up %s\n", pll->name);
WARN_ON(pll->on);
assert_shared_dpll_disabled(dev_priv, pll);
- pll->funcs.mode_set(dev_priv, pll);
+ pll->funcs.prepare(dev_priv, pll);
}
mutex_unlock(&dev_priv->dpll_lock);
}
/**
- * intel_enable_shared_dpll - enable PCH PLL
- * @dev_priv: i915 private structure
- * @pipe: pipe PLL to enable
+ * intel_enable_shared_dpll - enable a CRTC's shared DPLL
+ * @crtc: CRTC which has a shared DPLL
*
- * The PCH PLL needs to be enabled before the PCH transcoder, since it
- * drives the transcoder clock.
+ * Enable the shared DPLL used by @crtc.
*/
void intel_enable_shared_dpll(struct intel_crtc *crtc)
{
@@ -161,7 +210,7 @@ void intel_enable_shared_dpll(struct intel_crtc *crtc)
mutex_lock(&dev_priv->dpll_lock);
old_mask = pll->active_mask;
- if (WARN_ON(!(pll->config.crtc_mask & crtc_mask)) ||
+ if (WARN_ON(!(pll->state.crtc_mask & crtc_mask)) ||
WARN_ON(pll->active_mask & crtc_mask))
goto out;
@@ -186,6 +235,12 @@ out:
mutex_unlock(&dev_priv->dpll_lock);
}
+/**
+ * intel_disable_shared_dpll - disable a CRTC's shared DPLL
+ * @crtc: CRTC which has a shared DPLL
+ *
+ * Disable the shared DPLL used by @crtc.
+ */
void intel_disable_shared_dpll(struct intel_crtc *crtc)
{
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
@@ -230,7 +285,7 @@ intel_find_shared_dpll(struct intel_crtc *crtc,
{
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
struct intel_shared_dpll *pll;
- struct intel_shared_dpll_config *shared_dpll;
+ struct intel_shared_dpll_state *shared_dpll;
enum intel_dpll_id i;
shared_dpll = intel_atomic_get_shared_dpll_state(crtc_state->base.state);
@@ -270,7 +325,7 @@ static void
intel_reference_shared_dpll(struct intel_shared_dpll *pll,
struct intel_crtc_state *crtc_state)
{
- struct intel_shared_dpll_config *shared_dpll;
+ struct intel_shared_dpll_state *shared_dpll;
struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
enum intel_dpll_id i = pll->id;
@@ -284,13 +339,24 @@ intel_reference_shared_dpll(struct intel_shared_dpll *pll,
DRM_DEBUG_DRIVER("using %s for pipe %c\n", pll->name,
pipe_name(crtc->pipe));
- intel_shared_dpll_config_get(shared_dpll, pll, crtc);
+ shared_dpll[pll->id].crtc_mask |= 1 << crtc->pipe;
}
-void intel_shared_dpll_commit(struct drm_atomic_state *state)
+/**
+ * intel_shared_dpll_swap_state - make atomic DPLL configuration effective
+ * @state: atomic state
+ *
+ * This is the dpll version of drm_atomic_helper_swap_state() since the
+ * helper does not handle driver-specific global state.
+ *
+ * For consistency with atomic helpers this function does a complete swap,
+ * i.e. it also puts the current state into @state, even though there is no
+ * need for that at this moment.
+ */
+void intel_shared_dpll_swap_state(struct drm_atomic_state *state)
{
struct drm_i915_private *dev_priv = to_i915(state->dev);
- struct intel_shared_dpll_config *shared_dpll;
+ struct intel_shared_dpll_state *shared_dpll;
struct intel_shared_dpll *pll;
enum intel_dpll_id i;
@@ -299,8 +365,13 @@ void intel_shared_dpll_commit(struct drm_atomic_state *state)
shared_dpll = to_intel_atomic_state(state)->shared_dpll;
for (i = 0; i < dev_priv->num_shared_dpll; i++) {
+ struct intel_shared_dpll_state tmp;
+
pll = &dev_priv->shared_dplls[i];
- pll->config = shared_dpll[i];
+
+ tmp = pll->state;
+ pll->state = shared_dpll[i];
+ shared_dpll[i] = tmp;
}
}
@@ -323,11 +394,11 @@ static bool ibx_pch_dpll_get_hw_state(struct drm_i915_private *dev_priv,
return val & DPLL_VCO_ENABLE;
}
-static void ibx_pch_dpll_mode_set(struct drm_i915_private *dev_priv,
- struct intel_shared_dpll *pll)
+static void ibx_pch_dpll_prepare(struct drm_i915_private *dev_priv,
+ struct intel_shared_dpll *pll)
{
- I915_WRITE(PCH_FP0(pll->id), pll->config.hw_state.fp0);
- I915_WRITE(PCH_FP1(pll->id), pll->config.hw_state.fp1);
+ I915_WRITE(PCH_FP0(pll->id), pll->state.hw_state.fp0);
+ I915_WRITE(PCH_FP1(pll->id), pll->state.hw_state.fp1);
}
static void ibx_assert_pch_refclk_enabled(struct drm_i915_private *dev_priv)
@@ -349,7 +420,7 @@ static void ibx_pch_dpll_enable(struct drm_i915_private *dev_priv,
/* PCH refclock must be enabled first */
ibx_assert_pch_refclk_enabled(dev_priv);
- I915_WRITE(PCH_DPLL(pll->id), pll->config.hw_state.dpll);
+ I915_WRITE(PCH_DPLL(pll->id), pll->state.hw_state.dpll);
/* Wait for the clocks to stabilize. */
POSTING_READ(PCH_DPLL(pll->id));
@@ -360,7 +431,7 @@ static void ibx_pch_dpll_enable(struct drm_i915_private *dev_priv,
*
* So write it again.
*/
- I915_WRITE(PCH_DPLL(pll->id), pll->config.hw_state.dpll);
+ I915_WRITE(PCH_DPLL(pll->id), pll->state.hw_state.dpll);
POSTING_READ(PCH_DPLL(pll->id));
udelay(200);
}
@@ -412,8 +483,19 @@ ibx_get_dpll(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state,
return pll;
}
+static void ibx_dump_hw_state(struct drm_i915_private *dev_priv,
+ struct intel_dpll_hw_state *hw_state)
+{
+ DRM_DEBUG_KMS("dpll_hw_state: dpll: 0x%x, dpll_md: 0x%x, "
+ "fp0: 0x%x, fp1: 0x%x\n",
+ hw_state->dpll,
+ hw_state->dpll_md,
+ hw_state->fp0,
+ hw_state->fp1);
+}
+
static const struct intel_shared_dpll_funcs ibx_pch_dpll_funcs = {
- .mode_set = ibx_pch_dpll_mode_set,
+ .prepare = ibx_pch_dpll_prepare,
.enable = ibx_pch_dpll_enable,
.disable = ibx_pch_dpll_disable,
.get_hw_state = ibx_pch_dpll_get_hw_state,
@@ -422,7 +504,7 @@ static const struct intel_shared_dpll_funcs ibx_pch_dpll_funcs = {
static void hsw_ddi_wrpll_enable(struct drm_i915_private *dev_priv,
struct intel_shared_dpll *pll)
{
- I915_WRITE(WRPLL_CTL(pll->id), pll->config.hw_state.wrpll);
+ I915_WRITE(WRPLL_CTL(pll->id), pll->state.hw_state.wrpll);
POSTING_READ(WRPLL_CTL(pll->id));
udelay(20);
}
@@ -430,7 +512,7 @@ static void hsw_ddi_wrpll_enable(struct drm_i915_private *dev_priv,
static void hsw_ddi_spll_enable(struct drm_i915_private *dev_priv,
struct intel_shared_dpll *pll)
{
- I915_WRITE(SPLL_CTL, pll->config.hw_state.spll);
+ I915_WRITE(SPLL_CTL, pll->state.hw_state.spll);
POSTING_READ(SPLL_CTL);
udelay(20);
}
@@ -798,6 +880,13 @@ hsw_get_dpll(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state,
return pll;
}
+static void hsw_dump_hw_state(struct drm_i915_private *dev_priv,
+ struct intel_dpll_hw_state *hw_state)
+{
+ DRM_DEBUG_KMS("dpll_hw_state: wrpll: 0x%x spll: 0x%x\n",
+ hw_state->wrpll, hw_state->spll);
+}
+
static const struct intel_shared_dpll_funcs hsw_ddi_wrpll_funcs = {
.enable = hsw_ddi_wrpll_enable,
.disable = hsw_ddi_wrpll_disable,
@@ -873,7 +962,7 @@ static void skl_ddi_pll_write_ctrl1(struct drm_i915_private *dev_priv,
val &= ~(DPLL_CTRL1_HDMI_MODE(pll->id) | DPLL_CTRL1_SSC(pll->id) |
DPLL_CTRL1_LINK_RATE_MASK(pll->id));
- val |= pll->config.hw_state.ctrl1 << (pll->id * 6);
+ val |= pll->state.hw_state.ctrl1 << (pll->id * 6);
I915_WRITE(DPLL_CTRL1, val);
POSTING_READ(DPLL_CTRL1);
@@ -886,8 +975,8 @@ static void skl_ddi_pll_enable(struct drm_i915_private *dev_priv,
skl_ddi_pll_write_ctrl1(dev_priv, pll);
- I915_WRITE(regs[pll->id].cfgcr1, pll->config.hw_state.cfgcr1);
- I915_WRITE(regs[pll->id].cfgcr2, pll->config.hw_state.cfgcr2);
+ I915_WRITE(regs[pll->id].cfgcr1, pll->state.hw_state.cfgcr1);
+ I915_WRITE(regs[pll->id].cfgcr2, pll->state.hw_state.cfgcr2);
POSTING_READ(regs[pll->id].cfgcr1);
POSTING_READ(regs[pll->id].cfgcr2);
@@ -1353,6 +1442,16 @@ skl_get_dpll(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state,
return pll;
}
+static void skl_dump_hw_state(struct drm_i915_private *dev_priv,
+ struct intel_dpll_hw_state *hw_state)
+{
+ DRM_DEBUG_KMS("dpll_hw_state: "
+ "ctrl1: 0x%x, cfgcr1: 0x%x, cfgcr2: 0x%x\n",
+ hw_state->ctrl1,
+ hw_state->cfgcr1,
+ hw_state->cfgcr2);
+}
+
static const struct intel_shared_dpll_funcs skl_ddi_pll_funcs = {
.enable = skl_ddi_pll_enable,
.disable = skl_ddi_pll_disable,
@@ -1373,13 +1472,23 @@ static void bxt_ddi_pll_enable(struct drm_i915_private *dev_priv,
enum dpio_phy phy;
enum dpio_channel ch;
- bxt_port_to_phy_channel(port, &phy, &ch);
+ bxt_port_to_phy_channel(dev_priv, port, &phy, &ch);
/* Non-SSC reference */
temp = I915_READ(BXT_PORT_PLL_ENABLE(port));
temp |= PORT_PLL_REF_SEL;
I915_WRITE(BXT_PORT_PLL_ENABLE(port), temp);
+ if (IS_GEMINILAKE(dev_priv)) {
+ temp = I915_READ(BXT_PORT_PLL_ENABLE(port));
+ temp |= PORT_PLL_POWER_ENABLE;
+ I915_WRITE(BXT_PORT_PLL_ENABLE(port), temp);
+
+ if (wait_for_us((I915_READ(BXT_PORT_PLL_ENABLE(port)) &
+ PORT_PLL_POWER_STATE), 200))
+ DRM_ERROR("Power state not set for PLL:%d\n", port);
+ }
+
/* Disable 10 bit clock */
temp = I915_READ(BXT_PORT_PLL_EBB_4(phy, ch));
temp &= ~PORT_PLL_10BIT_CLK_ENABLE;
@@ -1388,31 +1497,31 @@ static void bxt_ddi_pll_enable(struct drm_i915_private *dev_priv,
/* Write P1 & P2 */
temp = I915_READ(BXT_PORT_PLL_EBB_0(phy, ch));
temp &= ~(PORT_PLL_P1_MASK | PORT_PLL_P2_MASK);
- temp |= pll->config.hw_state.ebb0;
+ temp |= pll->state.hw_state.ebb0;
I915_WRITE(BXT_PORT_PLL_EBB_0(phy, ch), temp);
/* Write M2 integer */
temp = I915_READ(BXT_PORT_PLL(phy, ch, 0));
temp &= ~PORT_PLL_M2_MASK;
- temp |= pll->config.hw_state.pll0;
+ temp |= pll->state.hw_state.pll0;
I915_WRITE(BXT_PORT_PLL(phy, ch, 0), temp);
/* Write N */
temp = I915_READ(BXT_PORT_PLL(phy, ch, 1));
temp &= ~PORT_PLL_N_MASK;
- temp |= pll->config.hw_state.pll1;
+ temp |= pll->state.hw_state.pll1;
I915_WRITE(BXT_PORT_PLL(phy, ch, 1), temp);
/* Write M2 fraction */
temp = I915_READ(BXT_PORT_PLL(phy, ch, 2));
temp &= ~PORT_PLL_M2_FRAC_MASK;
- temp |= pll->config.hw_state.pll2;
+ temp |= pll->state.hw_state.pll2;
I915_WRITE(BXT_PORT_PLL(phy, ch, 2), temp);
/* Write M2 fraction enable */
temp = I915_READ(BXT_PORT_PLL(phy, ch, 3));
temp &= ~PORT_PLL_M2_FRAC_ENABLE;
- temp |= pll->config.hw_state.pll3;
+ temp |= pll->state.hw_state.pll3;
I915_WRITE(BXT_PORT_PLL(phy, ch, 3), temp);
/* Write coeff */
@@ -1420,24 +1529,24 @@ static void bxt_ddi_pll_enable(struct drm_i915_private *dev_priv,
temp &= ~PORT_PLL_PROP_COEFF_MASK;
temp &= ~PORT_PLL_INT_COEFF_MASK;
temp &= ~PORT_PLL_GAIN_CTL_MASK;
- temp |= pll->config.hw_state.pll6;
+ temp |= pll->state.hw_state.pll6;
I915_WRITE(BXT_PORT_PLL(phy, ch, 6), temp);
/* Write calibration val */
temp = I915_READ(BXT_PORT_PLL(phy, ch, 8));
temp &= ~PORT_PLL_TARGET_CNT_MASK;
- temp |= pll->config.hw_state.pll8;
+ temp |= pll->state.hw_state.pll8;
I915_WRITE(BXT_PORT_PLL(phy, ch, 8), temp);
temp = I915_READ(BXT_PORT_PLL(phy, ch, 9));
temp &= ~PORT_PLL_LOCK_THRESHOLD_MASK;
- temp |= pll->config.hw_state.pll9;
+ temp |= pll->state.hw_state.pll9;
I915_WRITE(BXT_PORT_PLL(phy, ch, 9), temp);
temp = I915_READ(BXT_PORT_PLL(phy, ch, 10));
temp &= ~PORT_PLL_DCO_AMP_OVR_EN_H;
temp &= ~PORT_PLL_DCO_AMP_MASK;
- temp |= pll->config.hw_state.pll10;
+ temp |= pll->state.hw_state.pll10;
I915_WRITE(BXT_PORT_PLL(phy, ch, 10), temp);
/* Recalibrate with new settings */
@@ -1445,7 +1554,7 @@ static void bxt_ddi_pll_enable(struct drm_i915_private *dev_priv,
temp |= PORT_PLL_RECALIBRATE;
I915_WRITE(BXT_PORT_PLL_EBB_4(phy, ch), temp);
temp &= ~PORT_PLL_10BIT_CLK_ENABLE;
- temp |= pll->config.hw_state.ebb4;
+ temp |= pll->state.hw_state.ebb4;
I915_WRITE(BXT_PORT_PLL_EBB_4(phy, ch), temp);
/* Enable PLL */
@@ -1458,6 +1567,12 @@ static void bxt_ddi_pll_enable(struct drm_i915_private *dev_priv,
200))
DRM_ERROR("PLL %d not locked\n", port);
+ if (IS_GEMINILAKE(dev_priv)) {
+ temp = I915_READ(BXT_PORT_TX_DW5_LN0(phy, ch));
+ temp |= DCC_DELAY_RANGE_2;
+ I915_WRITE(BXT_PORT_TX_DW5_GRP(phy, ch), temp);
+ }
+
/*
* While we write to the group register to program all lanes at once we
* can read only lane registers and we pick lanes 0/1 for that.
@@ -1465,7 +1580,7 @@ static void bxt_ddi_pll_enable(struct drm_i915_private *dev_priv,
temp = I915_READ(BXT_PORT_PCS_DW12_LN01(phy, ch));
temp &= ~LANE_STAGGER_MASK;
temp &= ~LANESTAGGER_STRAP_OVRD;
- temp |= pll->config.hw_state.pcsdw12;
+ temp |= pll->state.hw_state.pcsdw12;
I915_WRITE(BXT_PORT_PCS_DW12_GRP(phy, ch), temp);
}
@@ -1479,6 +1594,16 @@ static void bxt_ddi_pll_disable(struct drm_i915_private *dev_priv,
temp &= ~PORT_PLL_ENABLE;
I915_WRITE(BXT_PORT_PLL_ENABLE(port), temp);
POSTING_READ(BXT_PORT_PLL_ENABLE(port));
+
+ if (IS_GEMINILAKE(dev_priv)) {
+ temp = I915_READ(BXT_PORT_PLL_ENABLE(port));
+ temp &= ~PORT_PLL_POWER_ENABLE;
+ I915_WRITE(BXT_PORT_PLL_ENABLE(port), temp);
+
+ if (wait_for_us(!(I915_READ(BXT_PORT_PLL_ENABLE(port)) &
+ PORT_PLL_POWER_STATE), 200))
+ DRM_ERROR("Power state not reset for PLL:%d\n", port);
+ }
}
static bool bxt_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv,
@@ -1491,7 +1616,7 @@ static bool bxt_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv,
enum dpio_phy phy;
enum dpio_channel ch;
- bxt_port_to_phy_channel(port, &phy, &ch);
+ bxt_port_to_phy_channel(dev_priv, port, &phy, &ch);
if (!intel_display_power_get_if_enabled(dev_priv, POWER_DOMAIN_PLLS))
return false;
@@ -1759,6 +1884,25 @@ bxt_get_dpll(struct intel_crtc *crtc,
return pll;
}
+static void bxt_dump_hw_state(struct drm_i915_private *dev_priv,
+ struct intel_dpll_hw_state *hw_state)
+{
+ DRM_DEBUG_KMS("dpll_hw_state: ebb0: 0x%x, ebb4: 0x%x,"
+ "pll0: 0x%x, pll1: 0x%x, pll2: 0x%x, pll3: 0x%x, "
+ "pll6: 0x%x, pll8: 0x%x, pll9: 0x%x, pll10: 0x%x, pcsdw12: 0x%x\n",
+ hw_state->ebb0,
+ hw_state->ebb4,
+ hw_state->pll0,
+ hw_state->pll1,
+ hw_state->pll2,
+ hw_state->pll3,
+ hw_state->pll6,
+ hw_state->pll8,
+ hw_state->pll9,
+ hw_state->pll10,
+ hw_state->pcsdw12);
+}
+
static const struct intel_shared_dpll_funcs bxt_ddi_pll_funcs = {
.enable = bxt_ddi_pll_enable,
.disable = bxt_ddi_pll_disable,
@@ -1799,6 +1943,9 @@ struct intel_dpll_mgr {
struct intel_shared_dpll *(*get_dpll)(struct intel_crtc *crtc,
struct intel_crtc_state *crtc_state,
struct intel_encoder *encoder);
+
+ void (*dump_hw_state)(struct drm_i915_private *dev_priv,
+ struct intel_dpll_hw_state *hw_state);
};
static const struct dpll_info pch_plls[] = {
@@ -1810,6 +1957,7 @@ static const struct dpll_info pch_plls[] = {
static const struct intel_dpll_mgr pch_pll_mgr = {
.dpll_info = pch_plls,
.get_dpll = ibx_get_dpll,
+ .dump_hw_state = ibx_dump_hw_state,
};
static const struct dpll_info hsw_plls[] = {
@@ -1825,6 +1973,7 @@ static const struct dpll_info hsw_plls[] = {
static const struct intel_dpll_mgr hsw_pll_mgr = {
.dpll_info = hsw_plls,
.get_dpll = hsw_get_dpll,
+ .dump_hw_state = hsw_dump_hw_state,
};
static const struct dpll_info skl_plls[] = {
@@ -1838,6 +1987,7 @@ static const struct dpll_info skl_plls[] = {
static const struct intel_dpll_mgr skl_pll_mgr = {
.dpll_info = skl_plls,
.get_dpll = skl_get_dpll,
+ .dump_hw_state = skl_dump_hw_state,
};
static const struct dpll_info bxt_plls[] = {
@@ -1850,8 +2000,15 @@ static const struct dpll_info bxt_plls[] = {
static const struct intel_dpll_mgr bxt_pll_mgr = {
.dpll_info = bxt_plls,
.get_dpll = bxt_get_dpll,
+ .dump_hw_state = bxt_dump_hw_state,
};
+/**
+ * intel_shared_dpll_init - Initialize shared DPLLs
+ * @dev: drm device
+ *
+ * Initialize shared DPLLs for @dev.
+ */
void intel_shared_dpll_init(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = to_i915(dev);
@@ -1861,7 +2018,7 @@ void intel_shared_dpll_init(struct drm_device *dev)
if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
dpll_mgr = &skl_pll_mgr;
- else if (IS_BROXTON(dev_priv))
+ else if (IS_GEN9_LP(dev_priv))
dpll_mgr = &bxt_pll_mgr;
else if (HAS_DDI(dev_priv))
dpll_mgr = &hsw_pll_mgr;
@@ -1895,6 +2052,21 @@ void intel_shared_dpll_init(struct drm_device *dev)
intel_ddi_pll_init(dev);
}
+/**
+ * intel_get_shared_dpll - get a shared DPLL for CRTC and encoder combination
+ * @crtc: CRTC
+ * @crtc_state: atomic state for @crtc
+ * @encoder: encoder
+ *
+ * Find an appropriate DPLL for the given CRTC and encoder combination. A
+ * reference from the @crtc to the returned pll is registered in the atomic
+ * state. That configuration is made effective by calling
+ * intel_shared_dpll_swap_state(). The reference should be released by calling
+ * intel_release_shared_dpll().
+ *
+ * Returns:
+ * A shared DPLL to be used by @crtc and @encoder with the given @crtc_state.
+ */
struct intel_shared_dpll *
intel_get_shared_dpll(struct intel_crtc *crtc,
struct intel_crtc_state *crtc_state,
@@ -1908,3 +2080,48 @@ intel_get_shared_dpll(struct intel_crtc *crtc,
return dpll_mgr->get_dpll(crtc, crtc_state, encoder);
}
+
+/**
+ * intel_release_shared_dpll - end use of DPLL by CRTC in atomic state
+ * @dpll: dpll in use by @crtc
+ * @crtc: crtc
+ * @state: atomic state
+ *
+ * This function releases the reference from @crtc to @dpll from the
+ * atomic @state. The new configuration is made effective by calling
+ * intel_shared_dpll_swap_state().
+ */
+void intel_release_shared_dpll(struct intel_shared_dpll *dpll,
+ struct intel_crtc *crtc,
+ struct drm_atomic_state *state)
+{
+ struct intel_shared_dpll_state *shared_dpll_state;
+
+ shared_dpll_state = intel_atomic_get_shared_dpll_state(state);
+ shared_dpll_state[dpll->id].crtc_mask &= ~(1 << crtc->pipe);
+}
+
+/**
+ * intel_shared_dpll_dump_hw_state - write hw_state to dmesg
+ * @dev_priv: i915 drm device
+ * @hw_state: hw state to be written to the log
+ *
+ * Write the relevant values in @hw_state to dmesg using DRM_DEBUG_KMS.
+ */
+void intel_dpll_dump_hw_state(struct drm_i915_private *dev_priv,
+ struct intel_dpll_hw_state *hw_state)
+{
+ if (dev_priv->dpll_mgr) {
+ dev_priv->dpll_mgr->dump_hw_state(dev_priv, hw_state);
+ } else {
+ /* fallback for platforms that don't use the shared dpll
+ * infrastructure
+ */
+ DRM_DEBUG_KMS("dpll_hw_state: dpll: 0x%x, dpll_md: 0x%x, "
+ "fp0: 0x%x, fp1: 0x%x\n",
+ hw_state->dpll,
+ hw_state->dpll_md,
+ hw_state->fp0,
+ hw_state->fp1);
+ }
+}
diff --git a/drivers/gpu/drm/i915/intel_dpll_mgr.h b/drivers/gpu/drm/i915/intel_dpll_mgr.h
index f4385353bc11..af1497eb4f9c 100644
--- a/drivers/gpu/drm/i915/intel_dpll_mgr.h
+++ b/drivers/gpu/drm/i915/intel_dpll_mgr.h
@@ -40,32 +40,72 @@ struct intel_encoder;
struct intel_shared_dpll;
struct intel_dpll_mgr;
+/**
+ * enum intel_dpll_id - possible DPLL ids
+ *
+ * Enumeration of possible IDs for a DPLL. Real shared dpll ids must be >= 0.
+ */
enum intel_dpll_id {
- DPLL_ID_PRIVATE = -1, /* non-shared dpll in use */
- /* real shared dpll ids must be >= 0 */
+ /**
+ * @DPLL_ID_PRIVATE: non-shared dpll in use
+ */
+ DPLL_ID_PRIVATE = -1,
+
+ /**
+ * @DPLL_ID_PCH_PLL_A: DPLL A in ILK, SNB and IVB
+ */
DPLL_ID_PCH_PLL_A = 0,
+ /**
+ * @DPLL_ID_PCH_PLL_B: DPLL B in ILK, SNB and IVB
+ */
DPLL_ID_PCH_PLL_B = 1,
- /* hsw/bdw */
+
+
+ /**
+ * @DPLL_ID_WRPLL1: HSW and BDW WRPLL1
+ */
DPLL_ID_WRPLL1 = 0,
+ /**
+ * @DPLL_ID_WRPLL2: HSW and BDW WRPLL2
+ */
DPLL_ID_WRPLL2 = 1,
+ /**
+ * @DPLL_ID_SPLL: HSW and BDW SPLL
+ */
DPLL_ID_SPLL = 2,
+ /**
+ * @DPLL_ID_LCPLL_810: HSW and BDW 0.81 GHz LCPLL
+ */
DPLL_ID_LCPLL_810 = 3,
+ /**
+ * @DPLL_ID_LCPLL_1350: HSW and BDW 1.35 GHz LCPLL
+ */
DPLL_ID_LCPLL_1350 = 4,
+ /**
+ * @DPLL_ID_LCPLL_2700: HSW and BDW 2.7 GHz LCPLL
+ */
DPLL_ID_LCPLL_2700 = 5,
- /* skl */
+
+ /**
+ * @DPLL_ID_SKL_DPLL0: SKL and later DPLL0
+ */
DPLL_ID_SKL_DPLL0 = 0,
+ /**
+ * @DPLL_ID_SKL_DPLL1: SKL and later DPLL1
+ */
DPLL_ID_SKL_DPLL1 = 1,
+ /**
+ * @DPLL_ID_SKL_DPLL2: SKL and later DPLL2
+ */
DPLL_ID_SKL_DPLL2 = 2,
+ /**
+ * @DPLL_ID_SKL_DPLL3: SKL and later DPLL3
+ */
DPLL_ID_SKL_DPLL3 = 3,
};
#define I915_NUM_PLLS 6
-/** Inform the state checker that the DPLL is kept enabled even if not
- * in use by any crtc.
- */
-#define INTEL_DPLL_ALWAYS_ON (1 << 0)
-
struct intel_dpll_hw_state {
/* i9xx, pch plls */
uint32_t dpll;
@@ -93,36 +133,120 @@ struct intel_dpll_hw_state {
pcsdw12;
};
-struct intel_shared_dpll_config {
- unsigned crtc_mask; /* mask of CRTCs sharing this PLL */
+/**
+ * struct intel_shared_dpll_state - hold the DPLL atomic state
+ *
+ * This structure holds an atomic state for the DPLL, that can represent
+ * either its current state (in struct &intel_shared_dpll) or a desired
+ * future state which would be applied by an atomic mode set (stored in
+ * a struct &intel_atomic_state).
+ *
+ * See also intel_get_shared_dpll() and intel_release_shared_dpll().
+ */
+struct intel_shared_dpll_state {
+ /**
+ * @crtc_mask: mask of CRTC using this DPLL, active or not
+ */
+ unsigned crtc_mask;
+
+ /**
+ * @hw_state: hardware configuration for the DPLL stored in
+ * struct &intel_dpll_hw_state.
+ */
struct intel_dpll_hw_state hw_state;
};
+/**
+ * struct intel_shared_dpll_funcs - platform specific hooks for managing DPLLs
+ */
struct intel_shared_dpll_funcs {
- /* The mode_set hook is optional and should be used together with the
- * intel_prepare_shared_dpll function. */
- void (*mode_set)(struct drm_i915_private *dev_priv,
- struct intel_shared_dpll *pll);
+ /**
+ * @prepare:
+ *
+ * Optional hook to perform operations prior to enabling the PLL.
+ * Called from intel_prepare_shared_dpll() function unless the PLL
+ * is already enabled.
+ */
+ void (*prepare)(struct drm_i915_private *dev_priv,
+ struct intel_shared_dpll *pll);
+
+ /**
+ * @enable:
+ *
+ * Hook for enabling the pll, called from intel_enable_shared_dpll()
+ * if the pll is not already enabled.
+ */
void (*enable)(struct drm_i915_private *dev_priv,
struct intel_shared_dpll *pll);
+
+ /**
+ * @disable:
+ *
+ * Hook for disabling the pll, called from intel_disable_shared_dpll()
+ * only when it is safe to disable the pll, i.e., there are no more
+ * tracked users for it.
+ */
void (*disable)(struct drm_i915_private *dev_priv,
struct intel_shared_dpll *pll);
+
+ /**
+ * @get_hw_state:
+ *
+ * Hook for reading the values currently programmed to the DPLL
+ * registers. This is used for initial hw state readout and state
+ * verification after a mode set.
+ */
bool (*get_hw_state)(struct drm_i915_private *dev_priv,
struct intel_shared_dpll *pll,
struct intel_dpll_hw_state *hw_state);
};
+/**
+ * struct intel_shared_dpll - display PLL with tracked state and users
+ */
struct intel_shared_dpll {
- struct intel_shared_dpll_config config;
+ /**
+ * @state:
+ *
+ * Store the state for the pll, including the its hw state
+ * and CRTCs using it.
+ */
+ struct intel_shared_dpll_state state;
- unsigned active_mask; /* mask of active CRTCs (i.e. DPMS on) */
- bool on; /* is the PLL actually active? Disabled during modeset */
+ /**
+ * @active_mask: mask of active CRTCs (i.e. DPMS on) using this DPLL
+ */
+ unsigned active_mask;
+
+ /**
+ * @on: is the PLL actually active? Disabled during modeset
+ */
+ bool on;
+
+ /**
+ * @name: DPLL name; used for logging
+ */
const char *name;
- /* should match the index in the dev_priv->shared_dplls array */
+
+ /**
+ * @id: unique indentifier for this DPLL; should match the index in the
+ * dev_priv->shared_dplls array
+ */
enum intel_dpll_id id;
+ /**
+ * @funcs: platform specific hooks
+ */
struct intel_shared_dpll_funcs funcs;
+#define INTEL_DPLL_ALWAYS_ON (1 << 0)
+ /**
+ * @flags:
+ *
+ * INTEL_DPLL_ALWAYS_ON
+ * Inform the state checker that the DPLL is kept enabled even if
+ * not in use by any CRTC.
+ */
uint32_t flags;
};
@@ -138,14 +262,6 @@ intel_get_shared_dpll_by_id(struct drm_i915_private *dev_priv,
enum intel_dpll_id
intel_get_shared_dpll_id(struct drm_i915_private *dev_priv,
struct intel_shared_dpll *pll);
-void
-intel_shared_dpll_config_get(struct intel_shared_dpll_config *config,
- struct intel_shared_dpll *pll,
- struct intel_crtc *crtc);
-void
-intel_shared_dpll_config_put(struct intel_shared_dpll_config *config,
- struct intel_shared_dpll *pll,
- struct intel_crtc *crtc);
void assert_shared_dpll(struct drm_i915_private *dev_priv,
struct intel_shared_dpll *pll,
bool state);
@@ -154,12 +270,18 @@ void assert_shared_dpll(struct drm_i915_private *dev_priv,
struct intel_shared_dpll *intel_get_shared_dpll(struct intel_crtc *crtc,
struct intel_crtc_state *state,
struct intel_encoder *encoder);
+void intel_release_shared_dpll(struct intel_shared_dpll *dpll,
+ struct intel_crtc *crtc,
+ struct drm_atomic_state *state);
void intel_prepare_shared_dpll(struct intel_crtc *crtc);
void intel_enable_shared_dpll(struct intel_crtc *crtc);
void intel_disable_shared_dpll(struct intel_crtc *crtc);
-void intel_shared_dpll_commit(struct drm_atomic_state *state);
+void intel_shared_dpll_swap_state(struct drm_atomic_state *state);
void intel_shared_dpll_init(struct drm_device *dev);
+void intel_dpll_dump_hw_state(struct drm_i915_private *dev_priv,
+ struct intel_dpll_hw_state *hw_state);
+
/* BXT dpll related functions */
bool bxt_ddi_dp_set_dpll_hw_state(int clock,
struct intel_dpll_hw_state *dpll_hw_state);
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index ba2323f1b92b..b9cde116dab3 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -32,6 +32,7 @@
#include "i915_drv.h"
#include <drm/drm_crtc.h>
#include <drm/drm_crtc_helper.h>
+#include <drm/drm_encoder.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_dp_dual_mode_helper.h>
#include <drm/drm_dp_mst_helper.h>
@@ -358,7 +359,7 @@ struct intel_atomic_state {
/* SKL/KBL Only */
unsigned int cdclk_pll_vco;
- struct intel_shared_dpll_config shared_dpll[I915_NUM_PLLS];
+ struct intel_shared_dpll_state shared_dpll[I915_NUM_PLLS];
/*
* Current watermarks can't be trusted during hardware readout, so
@@ -694,8 +695,9 @@ struct intel_crtc {
* some outputs connected to this crtc.
*/
bool active;
- unsigned long enabled_power_domains;
bool lowfreq_avail;
+ u8 plane_ids_mask;
+ unsigned long enabled_power_domains;
struct intel_overlay *overlay;
struct intel_flip_work *flip_work;
@@ -769,7 +771,8 @@ struct intel_plane_wm_parameters {
struct intel_plane {
struct drm_plane base;
- int plane;
+ u8 plane;
+ enum plane_id id;
enum pipe pipe;
bool can_scale;
int max_downscale;
@@ -843,11 +846,13 @@ struct intel_hdmi {
enum hdmi_picture_aspect aspect_ratio;
struct intel_connector *attached_connector;
void (*write_infoframe)(struct drm_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
enum hdmi_infoframe_type type,
const void *frame, ssize_t len);
void (*set_infoframes)(struct drm_encoder *encoder,
bool enable,
- const struct drm_display_mode *adjusted_mode);
+ const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state);
bool (*infoframe_enabled)(struct drm_encoder *encoder,
const struct intel_crtc_state *pipe_config);
};
@@ -883,6 +888,16 @@ struct intel_dp_desc {
u8 sw_minor_rev;
} __packed;
+struct intel_dp_compliance_data {
+ unsigned long edid;
+};
+
+struct intel_dp_compliance {
+ unsigned long test_type;
+ struct intel_dp_compliance_data test_data;
+ bool test_active;
+};
+
struct intel_dp {
i915_reg_t output_reg;
i915_reg_t aux_ch_ctl_reg;
@@ -905,6 +920,10 @@ struct intel_dp {
/* sink rates as reported by DP_SUPPORTED_LINK_RATES */
uint8_t num_sink_rates;
int sink_rates[DP_MAX_SUPPORTED_RATES];
+ /* Max lane count for the sink as per DPCD registers */
+ uint8_t max_sink_lane_count;
+ /* Max link BW for the sink as per DPCD registers */
+ int max_sink_link_bw;
/* sink or branch descriptor */
struct intel_dp_desc desc;
struct drm_dp_aux aux;
@@ -928,6 +947,12 @@ struct intel_dp {
*/
enum pipe pps_pipe;
/*
+ * Pipe currently driving the port. Used for preventing
+ * the use of the PPS for any pipe currentrly driving
+ * external DP as that will mess things up on VLV.
+ */
+ enum pipe active_pipe;
+ /*
* Set if the sequencer may be reset due to a power transition,
* requiring a reinitialization. Only relevant on BXT.
*/
@@ -958,9 +983,7 @@ struct intel_dp {
void (*prepare_link_retrain)(struct intel_dp *intel_dp);
/* Displayport compliance testing */
- unsigned long compliance_test_type;
- unsigned long compliance_test_data;
- bool compliance_test_active;
+ struct intel_dp_compliance compliance;
};
struct intel_lspcon {
@@ -1093,6 +1116,12 @@ dp_to_dig_port(struct intel_dp *intel_dp)
return container_of(intel_dp, struct intel_digital_port, dp);
}
+static inline struct intel_lspcon *
+dp_to_lspcon(struct intel_dp *intel_dp)
+{
+ return &dp_to_dig_port(intel_dp)->lspcon;
+}
+
static inline struct intel_digital_port *
hdmi_to_dig_port(struct intel_hdmi *intel_hdmi)
{
@@ -1145,7 +1174,7 @@ void gen9_enable_guc_interrupts(struct drm_i915_private *dev_priv);
void gen9_disable_guc_interrupts(struct drm_i915_private *dev_priv);
/* intel_crt.c */
-void intel_crt_init(struct drm_device *dev);
+void intel_crt_init(struct drm_i915_private *dev_priv);
void intel_crt_reset(struct drm_encoder *encoder);
/* intel_ddi.c */
@@ -1156,7 +1185,7 @@ void intel_ddi_fdi_post_disable(struct intel_encoder *intel_encoder,
struct drm_connector_state *old_conn_state);
void intel_prepare_dp_ddi_buffers(struct intel_encoder *encoder);
void hsw_fdi_link_train(struct drm_crtc *crtc);
-void intel_ddi_init(struct drm_device *dev, enum port port);
+void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port);
enum port intel_ddi_get_encoder_port(struct intel_encoder *intel_encoder);
bool intel_ddi_get_hw_state(struct intel_encoder *encoder, enum pipe *pipe);
void intel_ddi_enable_transcoder_func(struct drm_crtc *crtc);
@@ -1169,6 +1198,8 @@ bool intel_ddi_pll_select(struct intel_crtc *crtc,
void intel_ddi_set_pipe_settings(struct drm_crtc *crtc);
void intel_ddi_prepare_link_retrain(struct intel_dp *intel_dp);
bool intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector);
+bool intel_ddi_is_audio_enabled(struct drm_i915_private *dev_priv,
+ struct intel_crtc *intel_crtc);
void intel_ddi_get_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config);
struct intel_encoder *
@@ -1215,7 +1246,7 @@ unsigned int intel_fb_xy_to_linear(int x, int y,
void intel_add_fb_offsets(int *x, int *y,
const struct intel_plane_state *state, int plane);
unsigned int intel_rotation_info_size(const struct intel_rotation_info *rot_info);
-bool intel_has_pending_fb_unpin(struct drm_device *dev);
+bool intel_has_pending_fb_unpin(struct drm_i915_private *dev_priv);
void intel_mark_busy(struct drm_i915_private *dev_priv);
void intel_mark_idle(struct drm_i915_private *dev_priv);
void intel_crtc_restore_mode(struct drm_crtc *crtc);
@@ -1386,12 +1417,15 @@ void intel_csr_ucode_suspend(struct drm_i915_private *);
void intel_csr_ucode_resume(struct drm_i915_private *);
/* intel_dp.c */
-bool intel_dp_init(struct drm_device *dev, i915_reg_t output_reg, enum port port);
+bool intel_dp_init(struct drm_i915_private *dev_priv, i915_reg_t output_reg,
+ enum port port);
bool intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
struct intel_connector *intel_connector);
void intel_dp_set_link_params(struct intel_dp *intel_dp,
int link_rate, uint8_t lane_count,
bool link_mst);
+int intel_dp_get_link_train_fallback_values(struct intel_dp *intel_dp,
+ int link_rate, uint8_t lane_count);
void intel_dp_start_link_train(struct intel_dp *intel_dp);
void intel_dp_stop_link_train(struct intel_dp *intel_dp);
void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode);
@@ -1453,6 +1487,10 @@ bool intel_dp_read_dpcd(struct intel_dp *intel_dp);
bool __intel_dp_read_desc(struct intel_dp *intel_dp,
struct intel_dp_desc *desc);
bool intel_dp_read_desc(struct intel_dp *intel_dp);
+int intel_dp_link_required(int pixel_clock, int bpp);
+int intel_dp_max_data_rate(int max_link_clock, int max_lanes);
+bool intel_digital_port_connected(struct drm_i915_private *dev_priv,
+ struct intel_digital_port *port);
/* intel_dp_aux_backlight.c */
int intel_dp_aux_init_backlight_funcs(struct intel_connector *intel_connector);
@@ -1461,13 +1499,13 @@ int intel_dp_aux_init_backlight_funcs(struct intel_connector *intel_connector);
int intel_dp_mst_encoder_init(struct intel_digital_port *intel_dig_port, int conn_id);
void intel_dp_mst_encoder_cleanup(struct intel_digital_port *intel_dig_port);
/* intel_dsi.c */
-void intel_dsi_init(struct drm_device *dev);
+void intel_dsi_init(struct drm_i915_private *dev_priv);
/* intel_dsi_dcs_backlight.c */
int intel_dsi_dcs_init_backlight_funcs(struct intel_connector *intel_connector);
/* intel_dvo.c */
-void intel_dvo_init(struct drm_device *dev);
+void intel_dvo_init(struct drm_i915_private *dev_priv);
/* intel_hotplug.c */
void intel_hpd_poll_init(struct drm_i915_private *dev_priv);
@@ -1531,7 +1569,8 @@ void intel_fbc_cleanup_cfb(struct drm_i915_private *dev_priv);
void intel_fbc_handle_fifo_underrun_irq(struct drm_i915_private *dev_priv);
/* intel_hdmi.c */
-void intel_hdmi_init(struct drm_device *dev, i915_reg_t hdmi_reg, enum port port);
+void intel_hdmi_init(struct drm_i915_private *dev_priv, i915_reg_t hdmi_reg,
+ enum port port);
void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
struct intel_connector *intel_connector);
struct intel_hdmi *enc_to_intel_hdmi(struct drm_encoder *encoder);
@@ -1542,7 +1581,7 @@ void intel_dp_dual_mode_set_tmds_output(struct intel_hdmi *hdmi, bool enable);
/* intel_lvds.c */
-void intel_lvds_init(struct drm_device *dev);
+void intel_lvds_init(struct drm_i915_private *dev_priv);
struct intel_encoder *intel_get_lvds_encoder(struct drm_device *dev);
bool intel_is_dual_link_lvds(struct drm_device *dev);
@@ -1587,9 +1626,9 @@ int intel_panel_setup_backlight(struct drm_connector *connector,
void intel_panel_enable_backlight(struct intel_connector *connector);
void intel_panel_disable_backlight(struct intel_connector *connector);
void intel_panel_destroy_backlight(struct drm_connector *connector);
-enum drm_connector_status intel_panel_detect(struct drm_device *dev);
+enum drm_connector_status intel_panel_detect(struct drm_i915_private *dev_priv);
extern struct drm_display_mode *intel_find_panel_downclock(
- struct drm_device *dev,
+ struct drm_i915_private *dev_priv,
struct drm_display_mode *fixed_mode,
struct drm_connector *connector);
@@ -1615,7 +1654,7 @@ void intel_psr_invalidate(struct drm_i915_private *dev_priv,
void intel_psr_flush(struct drm_i915_private *dev_priv,
unsigned frontbuffer_bits,
enum fb_op_origin origin);
-void intel_psr_init(struct drm_device *dev);
+void intel_psr_init(struct drm_i915_private *dev_priv);
void intel_psr_single_frame_update(struct drm_i915_private *dev_priv,
unsigned frontbuffer_bits);
@@ -1719,7 +1758,7 @@ int ilk_wm_max_level(const struct drm_i915_private *dev_priv);
void intel_update_watermarks(struct intel_crtc *crtc);
void intel_init_pm(struct drm_i915_private *dev_priv);
void intel_init_clock_gating_hooks(struct drm_i915_private *dev_priv);
-void intel_pm_setup(struct drm_device *dev);
+void intel_pm_setup(struct drm_i915_private *dev_priv);
void intel_gpu_ips_init(struct drm_i915_private *dev_priv);
void intel_gpu_ips_teardown(void);
void intel_init_gt_powersave(struct drm_i915_private *dev_priv);
@@ -1760,7 +1799,7 @@ static inline int intel_enable_rc6(void)
}
/* intel_sdvo.c */
-bool intel_sdvo_init(struct drm_device *dev,
+bool intel_sdvo_init(struct drm_i915_private *dev_priv,
i915_reg_t reg, enum port port);
@@ -1775,7 +1814,7 @@ void intel_pipe_update_start(struct intel_crtc *crtc);
void intel_pipe_update_end(struct intel_crtc *crtc, struct intel_flip_work *work);
/* intel_tv.c */
-void intel_tv_init(struct drm_device *dev);
+void intel_tv_init(struct drm_i915_private *dev_priv);
/* intel_atomic.c */
int intel_connector_atomic_get_property(struct drm_connector *connector,
@@ -1787,8 +1826,6 @@ void intel_crtc_destroy_state(struct drm_crtc *crtc,
struct drm_crtc_state *state);
struct drm_atomic_state *intel_atomic_state_alloc(struct drm_device *dev);
void intel_atomic_state_clear(struct drm_atomic_state *);
-struct intel_shared_dpll_config *
-intel_atomic_get_shared_dpll_state(struct drm_atomic_state *s);
static inline struct intel_crtc_state *
intel_atomic_get_crtc_state(struct drm_atomic_state *state,
@@ -1802,6 +1839,20 @@ intel_atomic_get_crtc_state(struct drm_atomic_state *state,
return to_intel_crtc_state(crtc_state);
}
+static inline struct intel_crtc_state *
+intel_atomic_get_existing_crtc_state(struct drm_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ struct drm_crtc_state *crtc_state;
+
+ crtc_state = drm_atomic_get_existing_crtc_state(state, &crtc->base);
+
+ if (crtc_state)
+ return to_intel_crtc_state(crtc_state);
+ else
+ return NULL;
+}
+
static inline struct intel_plane_state *
intel_atomic_get_existing_plane_state(struct drm_atomic_state *state,
struct intel_plane *plane)
@@ -1823,6 +1874,8 @@ struct drm_plane_state *intel_plane_duplicate_state(struct drm_plane *plane);
void intel_plane_destroy_state(struct drm_plane *plane,
struct drm_plane_state *state);
extern const struct drm_plane_helper_funcs intel_plane_helper_funcs;
+int intel_plane_atomic_check_with_state(struct intel_crtc_state *crtc_state,
+ struct intel_plane_state *intel_state);
/* intel_color.c */
void intel_color_init(struct drm_crtc *crtc);
@@ -1833,4 +1886,16 @@ void intel_color_load_luts(struct drm_crtc_state *crtc_state);
/* intel_lspcon.c */
bool lspcon_init(struct intel_digital_port *intel_dig_port);
void lspcon_resume(struct intel_lspcon *lspcon);
+void lspcon_wait_pcon_mode(struct intel_lspcon *lspcon);
+
+/* intel_pipe_crc.c */
+int intel_pipe_crc_create(struct drm_minor *minor);
+void intel_pipe_crc_cleanup(struct drm_minor *minor);
+#ifdef CONFIG_DEBUG_FS
+int intel_crtc_set_crc_source(struct drm_crtc *crtc, const char *source_name,
+ size_t *values_cnt);
+#else
+#define intel_crtc_set_crc_source NULL
+#endif
+extern const struct file_operations i915_display_crc_ctl_fops;
#endif /* __INTEL_DRV_H__ */
diff --git a/drivers/gpu/drm/i915/intel_dsi.c b/drivers/gpu/drm/i915/intel_dsi.c
index 5b72c50d6f76..16732e7bc08e 100644
--- a/drivers/gpu/drm/i915/intel_dsi.c
+++ b/drivers/gpu/drm/i915/intel_dsi.c
@@ -340,7 +340,7 @@ static bool intel_dsi_compute_config(struct intel_encoder *encoder,
/* DSI uses short packets for sync events, so clear mode flags for DSI */
adjusted_mode->flags = 0;
- if (IS_BROXTON(dev_priv)) {
+ if (IS_GEN9_LP(dev_priv)) {
/* Dual link goes to DSI transcoder A. */
if (intel_dsi->ports == BIT(PORT_C))
pipe_config->cpu_transcoder = TRANSCODER_DSI_C;
@@ -379,7 +379,8 @@ static void bxt_dsi_device_ready(struct intel_encoder *encoder)
val &= ~ULPS_STATE_MASK;
val |= (ULPS_STATE_ENTER | DEVICE_READY);
I915_WRITE(MIPI_DEVICE_READY(port), val);
- usleep_range(2, 3);
+ /* at least 2us - relaxed for hrtimer subsystem optimization */
+ usleep_range(10, 50);
/* 3. Exit ULPS */
val = I915_READ(MIPI_DEVICE_READY(port));
@@ -441,7 +442,7 @@ static void intel_dsi_device_ready(struct intel_encoder *encoder)
if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
vlv_dsi_device_ready(encoder);
- else if (IS_BROXTON(dev_priv))
+ else if (IS_GEN9_LP(dev_priv))
bxt_dsi_device_ready(encoder);
}
@@ -464,7 +465,7 @@ static void intel_dsi_port_enable(struct intel_encoder *encoder)
}
for_each_dsi_port(port, intel_dsi->ports) {
- i915_reg_t port_ctrl = IS_BROXTON(dev_priv) ?
+ i915_reg_t port_ctrl = IS_GEN9_LP(dev_priv) ?
BXT_MIPI_PORT_CTRL(port) : MIPI_PORT_CTRL(port);
u32 temp;
@@ -476,7 +477,10 @@ static void intel_dsi_port_enable(struct intel_encoder *encoder)
if (intel_dsi->ports == (BIT(PORT_A) | BIT(PORT_C))) {
temp |= (intel_dsi->dual_link - 1)
<< DUAL_LINK_MODE_SHIFT;
- temp |= intel_crtc->pipe ?
+ if (IS_BROXTON(dev_priv))
+ temp |= LANE_CONFIGURATION_DUAL_LINK_A;
+ else
+ temp |= intel_crtc->pipe ?
LANE_CONFIGURATION_DUAL_LINK_B :
LANE_CONFIGURATION_DUAL_LINK_A;
}
@@ -494,7 +498,7 @@ static void intel_dsi_port_disable(struct intel_encoder *encoder)
enum port port;
for_each_dsi_port(port, intel_dsi->ports) {
- i915_reg_t port_ctrl = IS_BROXTON(dev_priv) ?
+ i915_reg_t port_ctrl = IS_GEN9_LP(dev_priv) ?
BXT_MIPI_PORT_CTRL(port) : MIPI_PORT_CTRL(port);
u32 temp;
@@ -663,7 +667,7 @@ static void intel_dsi_clear_device_ready(struct intel_encoder *encoder)
DRM_DEBUG_KMS("\n");
for_each_dsi_port(port, intel_dsi->ports) {
/* Common bit for both MIPI Port A & MIPI Port C on VLV/CHV */
- i915_reg_t port_ctrl = IS_BROXTON(dev_priv) ?
+ i915_reg_t port_ctrl = IS_GEN9_LP(dev_priv) ?
BXT_MIPI_PORT_CTRL(port) : MIPI_PORT_CTRL(PORT_A);
u32 val;
@@ -695,8 +699,6 @@ static void intel_dsi_clear_device_ready(struct intel_encoder *encoder)
I915_WRITE(MIPI_DEVICE_READY(port), 0x00);
usleep_range(2000, 2500);
}
-
- intel_disable_dsi_pll(encoder);
}
static void intel_dsi_post_disable(struct intel_encoder *encoder,
@@ -712,6 +714,8 @@ static void intel_dsi_post_disable(struct intel_encoder *encoder,
intel_dsi_clear_device_ready(encoder);
+ intel_disable_dsi_pll(encoder);
+
if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
u32 val;
@@ -755,12 +759,12 @@ static bool intel_dsi_get_hw_state(struct intel_encoder *encoder,
* configuration, otherwise accessing DSI registers will hang the
* machine. See BSpec North Display Engine registers/MIPI[BXT].
*/
- if (IS_BROXTON(dev_priv) && !intel_dsi_pll_is_enabled(dev_priv))
+ if (IS_GEN9_LP(dev_priv) && !intel_dsi_pll_is_enabled(dev_priv))
goto out_put_power;
/* XXX: this only works for one DSI output */
for_each_dsi_port(port, intel_dsi->ports) {
- i915_reg_t ctrl_reg = IS_BROXTON(dev_priv) ?
+ i915_reg_t ctrl_reg = IS_GEN9_LP(dev_priv) ?
BXT_MIPI_PORT_CTRL(port) : MIPI_PORT_CTRL(port);
bool enabled = I915_READ(ctrl_reg) & DPI_ENABLE;
@@ -785,7 +789,7 @@ static bool intel_dsi_get_hw_state(struct intel_encoder *encoder,
if (!(I915_READ(MIPI_DEVICE_READY(port)) & DEVICE_READY))
continue;
- if (IS_BROXTON(dev_priv)) {
+ if (IS_GEN9_LP(dev_priv)) {
u32 tmp = I915_READ(MIPI_CTRL(port));
tmp &= BXT_PIPE_SELECT_MASK;
tmp >>= BXT_PIPE_SELECT_SHIFT;
@@ -973,7 +977,7 @@ static void intel_dsi_get_config(struct intel_encoder *encoder,
u32 pclk;
DRM_DEBUG_KMS("\n");
- if (IS_BROXTON(dev_priv))
+ if (IS_GEN9_LP(dev_priv))
bxt_dsi_get_pipe_config(encoder, pipe_config);
pclk = intel_dsi_get_pclk(encoder, pipe_config->pipe_bpp,
@@ -1065,7 +1069,7 @@ static void set_dsi_timings(struct drm_encoder *encoder,
hbp = txbyteclkhs(hbp, bpp, lane_count, intel_dsi->burst_mode_ratio);
for_each_dsi_port(port, intel_dsi->ports) {
- if (IS_BROXTON(dev_priv)) {
+ if (IS_GEN9_LP(dev_priv)) {
/*
* Program hdisplay and vdisplay on MIPI transcoder.
* This is different from calculated hactive and
@@ -1152,7 +1156,7 @@ static void intel_dsi_prepare(struct intel_encoder *intel_encoder,
tmp &= ~READ_REQUEST_PRIORITY_MASK;
I915_WRITE(MIPI_CTRL(port), tmp |
READ_REQUEST_PRIORITY_HIGH);
- } else if (IS_BROXTON(dev_priv)) {
+ } else if (IS_GEN9_LP(dev_priv)) {
enum pipe pipe = intel_crtc->pipe;
tmp = I915_READ(MIPI_CTRL(port));
@@ -1190,7 +1194,7 @@ static void intel_dsi_prepare(struct intel_encoder *intel_encoder,
if (intel_dsi->clock_stop)
tmp |= CLOCKSTOP;
- if (IS_BROXTON(dev_priv)) {
+ if (IS_GEN9_LP(dev_priv)) {
tmp |= BXT_DPHY_DEFEATURE_EN;
if (!is_cmd_mode(intel_dsi))
tmp |= BXT_DEFEATURE_DPI_FIFO_CTR;
@@ -1241,7 +1245,7 @@ static void intel_dsi_prepare(struct intel_encoder *intel_encoder,
I915_WRITE(MIPI_INIT_COUNT(port),
txclkesc(intel_dsi->escape_clk_div, 100));
- if (IS_BROXTON(dev_priv) && (!intel_dsi->dual_link)) {
+ if (IS_GEN9_LP(dev_priv) && (!intel_dsi->dual_link)) {
/*
* BXT spec says write MIPI_INIT_COUNT for
* both the ports, even if only one is
@@ -1424,15 +1428,15 @@ static void intel_dsi_add_properties(struct intel_connector *connector)
}
}
-void intel_dsi_init(struct drm_device *dev)
+void intel_dsi_init(struct drm_i915_private *dev_priv)
{
+ struct drm_device *dev = &dev_priv->drm;
struct intel_dsi *intel_dsi;
struct intel_encoder *intel_encoder;
struct drm_encoder *encoder;
struct intel_connector *intel_connector;
struct drm_connector *connector;
struct drm_display_mode *scan, *fixed_mode = NULL;
- struct drm_i915_private *dev_priv = to_i915(dev);
enum port port;
unsigned int i;
@@ -1444,7 +1448,7 @@ void intel_dsi_init(struct drm_device *dev)
if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
dev_priv->mipi_mmio_base = VLV_MIPI_BASE;
- } else if (IS_BROXTON(dev_priv)) {
+ } else if (IS_GEN9_LP(dev_priv)) {
dev_priv->mipi_mmio_base = BXT_MIPI_BASE;
} else {
DRM_ERROR("Unsupported Mipi device to reg base");
@@ -1485,7 +1489,7 @@ void intel_dsi_init(struct drm_device *dev)
* On BYT/CHV, pipe A maps to MIPI DSI port A, pipe B maps to MIPI DSI
* port C. BXT isn't limited like this.
*/
- if (IS_BROXTON(dev_priv))
+ if (IS_GEN9_LP(dev_priv))
intel_encoder->crtc_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C);
else if (port == PORT_A)
intel_encoder->crtc_mask = BIT(PIPE_A);
diff --git a/drivers/gpu/drm/i915/intel_dsi_panel_vbt.c b/drivers/gpu/drm/i915/intel_dsi_panel_vbt.c
index 47cd1b20fb3e..8f683b8b1816 100644
--- a/drivers/gpu/drm/i915/intel_dsi_panel_vbt.c
+++ b/drivers/gpu/drm/i915/intel_dsi_panel_vbt.c
@@ -29,6 +29,7 @@
#include <drm/drm_edid.h>
#include <drm/i915_drm.h>
#include <drm/drm_panel.h>
+#include <linux/gpio/consumer.h>
#include <linux/slab.h>
#include <video/mipi_display.h>
#include <asm/intel-mid.h>
@@ -305,19 +306,44 @@ static void chv_exec_gpio(struct drm_i915_private *dev_priv,
mutex_unlock(&dev_priv->sb_lock);
}
+static void bxt_exec_gpio(struct drm_i915_private *dev_priv,
+ u8 gpio_source, u8 gpio_index, bool value)
+{
+ /* XXX: this table is a quick ugly hack. */
+ static struct gpio_desc *bxt_gpio_table[U8_MAX + 1];
+ struct gpio_desc *gpio_desc = bxt_gpio_table[gpio_index];
+
+ if (!gpio_desc) {
+ gpio_desc = devm_gpiod_get_index(dev_priv->drm.dev,
+ "panel", gpio_index,
+ value ? GPIOD_OUT_LOW :
+ GPIOD_OUT_HIGH);
+
+ if (IS_ERR_OR_NULL(gpio_desc)) {
+ DRM_ERROR("GPIO index %u request failed (%ld)\n",
+ gpio_index, PTR_ERR(gpio_desc));
+ return;
+ }
+
+ bxt_gpio_table[gpio_index] = gpio_desc;
+ }
+
+ gpiod_set_value(gpio_desc, value);
+}
+
static const u8 *mipi_exec_gpio(struct intel_dsi *intel_dsi, const u8 *data)
{
struct drm_device *dev = intel_dsi->base.base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
- u8 gpio_source, gpio_index;
+ u8 gpio_source, gpio_index = 0, gpio_number;
bool value;
DRM_DEBUG_KMS("\n");
if (dev_priv->vbt.dsi.seq_version >= 3)
- data++;
+ gpio_index = *data++;
- gpio_index = *data++;
+ gpio_number = *data++;
/* gpio source in sequence v2 only */
if (dev_priv->vbt.dsi.seq_version == 2)
@@ -329,11 +355,11 @@ static const u8 *mipi_exec_gpio(struct intel_dsi *intel_dsi, const u8 *data)
value = *data++ & 1;
if (IS_VALLEYVIEW(dev_priv))
- vlv_exec_gpio(dev_priv, gpio_source, gpio_index, value);
+ vlv_exec_gpio(dev_priv, gpio_source, gpio_number, value);
else if (IS_CHERRYVIEW(dev_priv))
- chv_exec_gpio(dev_priv, gpio_source, gpio_index, value);
+ chv_exec_gpio(dev_priv, gpio_source, gpio_number, value);
else
- DRM_DEBUG_KMS("GPIO element not supported on this platform\n");
+ bxt_exec_gpio(dev_priv, gpio_source, gpio_index, value);
return data;
}
diff --git a/drivers/gpu/drm/i915/intel_dsi_pll.c b/drivers/gpu/drm/i915/intel_dsi_pll.c
index 56eff6004bc0..61440e5c2563 100644
--- a/drivers/gpu/drm/i915/intel_dsi_pll.c
+++ b/drivers/gpu/drm/i915/intel_dsi_pll.c
@@ -156,8 +156,10 @@ static void vlv_enable_dsi_pll(struct intel_encoder *encoder,
vlv_cck_write(dev_priv, CCK_REG_DSI_PLL_CONTROL,
config->dsi_pll.ctrl & ~DSI_PLL_VCO_EN);
- /* wait at least 0.5 us after ungating before enabling VCO */
- usleep_range(1, 10);
+ /* wait at least 0.5 us after ungating before enabling VCO,
+ * allow hrtimer subsystem optimization by relaxing timing
+ */
+ usleep_range(10, 50);
vlv_cck_write(dev_priv, CCK_REG_DSI_PLL_CONTROL, config->dsi_pll.ctrl);
@@ -351,7 +353,7 @@ static u32 bxt_dsi_get_pclk(struct intel_encoder *encoder, int pipe_bpp,
u32 intel_dsi_get_pclk(struct intel_encoder *encoder, int pipe_bpp,
struct intel_crtc_state *config)
{
- if (IS_BROXTON(to_i915(encoder->base.dev)))
+ if (IS_GEN9_LP(to_i915(encoder->base.dev)))
return bxt_dsi_get_pclk(encoder, pipe_bpp, config);
else
return vlv_dsi_get_pclk(encoder, pipe_bpp, config);
@@ -504,7 +506,7 @@ static void bxt_enable_dsi_pll(struct intel_encoder *encoder,
bool intel_dsi_pll_is_enabled(struct drm_i915_private *dev_priv)
{
- if (IS_BROXTON(dev_priv))
+ if (IS_GEN9_LP(dev_priv))
return bxt_dsi_pll_is_enabled(dev_priv);
MISSING_CASE(INTEL_DEVID(dev_priv));
@@ -519,7 +521,7 @@ int intel_compute_dsi_pll(struct intel_encoder *encoder,
if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
return vlv_compute_dsi_pll(encoder, config);
- else if (IS_BROXTON(dev_priv))
+ else if (IS_GEN9_LP(dev_priv))
return bxt_compute_dsi_pll(encoder, config);
return -ENODEV;
@@ -532,7 +534,7 @@ void intel_enable_dsi_pll(struct intel_encoder *encoder,
if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
vlv_enable_dsi_pll(encoder, config);
- else if (IS_BROXTON(dev_priv))
+ else if (IS_GEN9_LP(dev_priv))
bxt_enable_dsi_pll(encoder, config);
}
@@ -542,7 +544,7 @@ void intel_disable_dsi_pll(struct intel_encoder *encoder)
if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
vlv_disable_dsi_pll(encoder);
- else if (IS_BROXTON(dev_priv))
+ else if (IS_GEN9_LP(dev_priv))
bxt_disable_dsi_pll(encoder);
}
@@ -566,7 +568,7 @@ void intel_dsi_reset_clocks(struct intel_encoder *encoder, enum port port)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- if (IS_BROXTON(dev_priv))
+ if (IS_GEN9_LP(dev_priv))
bxt_dsi_reset_clocks(encoder, port);
else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
vlv_dsi_reset_clocks(encoder, port);
diff --git a/drivers/gpu/drm/i915/intel_dvo.c b/drivers/gpu/drm/i915/intel_dvo.c
index 708645443046..50da89dcb92b 100644
--- a/drivers/gpu/drm/i915/intel_dvo.c
+++ b/drivers/gpu/drm/i915/intel_dvo.c
@@ -422,9 +422,8 @@ static enum port intel_dvo_port(i915_reg_t dvo_reg)
return PORT_C;
}
-void intel_dvo_init(struct drm_device *dev)
+void intel_dvo_init(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_encoder *intel_encoder;
struct intel_dvo *intel_dvo;
struct intel_connector *intel_connector;
@@ -511,7 +510,7 @@ void intel_dvo_init(struct drm_device *dev)
continue;
port = intel_dvo_port(dvo->dvo_reg);
- drm_encoder_init(dev, &intel_encoder->base,
+ drm_encoder_init(&dev_priv->drm, &intel_encoder->base,
&intel_dvo_enc_funcs, encoder_type,
"DVO %c", port_name(port));
@@ -523,14 +522,14 @@ void intel_dvo_init(struct drm_device *dev)
case INTEL_DVO_CHIP_TMDS:
intel_encoder->cloneable = (1 << INTEL_OUTPUT_ANALOG) |
(1 << INTEL_OUTPUT_DVO);
- drm_connector_init(dev, connector,
+ drm_connector_init(&dev_priv->drm, connector,
&intel_dvo_connector_funcs,
DRM_MODE_CONNECTOR_DVII);
encoder_type = DRM_MODE_ENCODER_TMDS;
break;
case INTEL_DVO_CHIP_LVDS:
intel_encoder->cloneable = 0;
- drm_connector_init(dev, connector,
+ drm_connector_init(&dev_priv->drm, connector,
&intel_dvo_connector_funcs,
DRM_MODE_CONNECTOR_LVDS);
encoder_type = DRM_MODE_ENCODER_LVDS;
diff --git a/drivers/gpu/drm/i915/intel_engine_cs.c b/drivers/gpu/drm/i915/intel_engine_cs.c
index 3da4d466e332..371acf109e34 100644
--- a/drivers/gpu/drm/i915/intel_engine_cs.c
+++ b/drivers/gpu/drm/i915/intel_engine_cs.c
@@ -111,13 +111,12 @@ intel_engine_setup(struct drm_i915_private *dev_priv,
/**
* intel_engines_init() - allocate, populate and init the Engine Command Streamers
- * @dev: DRM device.
+ * @dev_priv: i915 device private
*
* Return: non-zero if the initialization failed.
*/
-int intel_engines_init(struct drm_device *dev)
+int intel_engines_init(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_device_info *device_info = mkwrite_device_info(dev_priv);
unsigned int ring_mask = INTEL_INFO(dev_priv)->ring_mask;
unsigned int mask = 0;
@@ -257,7 +256,7 @@ int intel_engine_create_scratch(struct intel_engine_cs *engine, int size)
WARN_ON(engine->scratch);
- obj = i915_gem_object_create_stolen(&engine->i915->drm, size);
+ obj = i915_gem_object_create_stolen(engine->i915, size);
if (!obj)
obj = i915_gem_object_create_internal(engine->i915, size);
if (IS_ERR(obj)) {
@@ -265,7 +264,7 @@ int intel_engine_create_scratch(struct intel_engine_cs *engine, int size)
return PTR_ERR(obj);
}
- vma = i915_vma_create(obj, &engine->i915->ggtt.base, NULL);
+ vma = i915_vma_instance(obj, &engine->i915->ggtt.base, NULL);
if (IS_ERR(vma)) {
ret = PTR_ERR(vma);
goto err_unref;
@@ -305,15 +304,30 @@ int intel_engine_init_common(struct intel_engine_cs *engine)
{
int ret;
- ret = intel_engine_init_breadcrumbs(engine);
+ /* We may need to do things with the shrinker which
+ * require us to immediately switch back to the default
+ * context. This can cause a problem as pinning the
+ * default context also requires GTT space which may not
+ * be available. To avoid this we always pin the default
+ * context.
+ */
+ ret = engine->context_pin(engine, engine->i915->kernel_context);
if (ret)
return ret;
+ ret = intel_engine_init_breadcrumbs(engine);
+ if (ret)
+ goto err_unpin;
+
ret = i915_gem_render_state_init(engine);
if (ret)
- return ret;
+ goto err_unpin;
return 0;
+
+err_unpin:
+ engine->context_unpin(engine, engine->i915->kernel_context);
+ return ret;
}
/**
@@ -331,6 +345,8 @@ void intel_engine_cleanup_common(struct intel_engine_cs *engine)
intel_engine_fini_breadcrumbs(engine);
intel_engine_cleanup_cmd_parser(engine);
i915_gem_batch_pool_fini(&engine->batch_pool);
+
+ engine->context_unpin(engine, engine->i915->kernel_context);
}
u64 intel_engine_get_active_head(struct intel_engine_cs *engine)
diff --git a/drivers/gpu/drm/i915/intel_fbc.c b/drivers/gpu/drm/i915/intel_fbc.c
index f3a1d6a5cabe..89fe5c8464df 100644
--- a/drivers/gpu/drm/i915/intel_fbc.c
+++ b/drivers/gpu/drm/i915/intel_fbc.c
@@ -188,7 +188,7 @@ static void g4x_fbc_activate(struct drm_i915_private *dev_priv)
u32 dpfc_ctl;
dpfc_ctl = DPFC_CTL_PLANE(params->crtc.plane) | DPFC_SR_EN;
- if (drm_format_plane_cpp(params->fb.pixel_format, 0) == 2)
+ if (params->fb.format->cpp[0] == 2)
dpfc_ctl |= DPFC_CTL_LIMIT_2X;
else
dpfc_ctl |= DPFC_CTL_LIMIT_1X;
@@ -235,7 +235,7 @@ static void ilk_fbc_activate(struct drm_i915_private *dev_priv)
int threshold = dev_priv->fbc.threshold;
dpfc_ctl = DPFC_CTL_PLANE(params->crtc.plane);
- if (drm_format_plane_cpp(params->fb.pixel_format, 0) == 2)
+ if (params->fb.format->cpp[0] == 2)
threshold++;
switch (threshold) {
@@ -305,7 +305,7 @@ static void gen7_fbc_activate(struct drm_i915_private *dev_priv)
if (IS_IVYBRIDGE(dev_priv))
dpfc_ctl |= IVB_DPFC_CTL_PLANE(params->crtc.plane);
- if (drm_format_plane_cpp(params->fb.pixel_format, 0) == 2)
+ if (params->fb.format->cpp[0] == 2)
threshold++;
switch (threshold) {
@@ -541,7 +541,7 @@ static int find_compression_threshold(struct drm_i915_private *dev_priv,
IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
end = ggtt->stolen_size - 8 * 1024 * 1024;
else
- end = ggtt->stolen_usable_size;
+ end = U64_MAX;
/* HACK: This code depends on what we will do in *_enable_fbc. If that
* code changes, this code needs to change as well.
@@ -584,7 +584,7 @@ static int intel_fbc_alloc_cfb(struct intel_crtc *crtc)
WARN_ON(drm_mm_node_allocated(&fbc->compressed_fb));
size = intel_fbc_calculate_cfb_size(dev_priv, &fbc->state_cache);
- fb_cpp = drm_format_plane_cpp(fbc->state_cache.fb.pixel_format, 0);
+ fb_cpp = fbc->state_cache.fb.format->cpp[0];
ret = find_compression_threshold(dev_priv, &fbc->compressed_fb,
size, fb_cpp);
@@ -754,7 +754,7 @@ static void intel_fbc_update_state_cache(struct intel_crtc *crtc,
if (!cache->plane.visible)
return;
- cache->fb.pixel_format = fb->pixel_format;
+ cache->fb.format = fb->format;
cache->fb.stride = fb->pitches[0];
cache->vma = plane_state->vma;
@@ -812,7 +812,7 @@ static bool intel_fbc_can_activate(struct intel_crtc *crtc)
return false;
}
- if (!pixel_format_is_valid(dev_priv, cache->fb.pixel_format)) {
+ if (!pixel_format_is_valid(dev_priv, cache->fb.format->format)) {
fbc->no_fbc_reason = "pixel format is invalid";
return false;
}
@@ -883,7 +883,7 @@ static void intel_fbc_get_reg_params(struct intel_crtc *crtc,
params->crtc.plane = crtc->plane;
params->crtc.fence_y_offset = get_crtc_fence_y_offset(crtc);
- params->fb.pixel_format = cache->fb.pixel_format;
+ params->fb.format = cache->fb.format;
params->fb.stride = cache->fb.stride;
params->cfb_size = intel_fbc_calculate_cfb_size(dev_priv, cache);
@@ -1284,7 +1284,7 @@ void intel_fbc_init_pipe_state(struct drm_i915_private *dev_priv)
for_each_intel_crtc(&dev_priv->drm, crtc)
if (intel_crtc_active(crtc) &&
- to_intel_plane_state(crtc->base.primary->state)->base.visible)
+ crtc->base.primary->state->visible)
dev_priv->fbc.visible_pipes_mask |= (1 << crtc->pipe);
}
@@ -1305,7 +1305,7 @@ static int intel_sanitize_fbc_option(struct drm_i915_private *dev_priv)
if (!HAS_FBC(dev_priv))
return 0;
- if (IS_BROADWELL(dev_priv))
+ if (IS_BROADWELL(dev_priv) || INTEL_GEN(dev_priv) >= 9)
return 1;
return 0;
diff --git a/drivers/gpu/drm/i915/intel_fbdev.c b/drivers/gpu/drm/i915/intel_fbdev.c
index f4a8c4fc57c4..1b8ba2e77539 100644
--- a/drivers/gpu/drm/i915/intel_fbdev.c
+++ b/drivers/gpu/drm/i915/intel_fbdev.c
@@ -145,9 +145,9 @@ static int intelfb_alloc(struct drm_fb_helper *helper,
* important and we should probably use that space with FBC or other
* features. */
if (size * 2 < ggtt->stolen_usable_size)
- obj = i915_gem_object_create_stolen(dev, size);
+ obj = i915_gem_object_create_stolen(dev_priv, size);
if (obj == NULL)
- obj = i915_gem_object_create(dev, size);
+ obj = i915_gem_object_create(dev_priv, size);
if (IS_ERR(obj)) {
DRM_ERROR("failed to allocate framebuffer\n");
ret = PTR_ERR(obj);
@@ -261,7 +261,7 @@ static int intelfb_create(struct drm_fb_helper *helper,
/* This driver doesn't need a VT switch to restore the mode on resume */
info->skip_vt_switch = true;
- drm_fb_helper_fill_fix(info, fb->pitches[0], fb->depth);
+ drm_fb_helper_fill_fix(info, fb->pitches[0], fb->format->depth);
drm_fb_helper_fill_var(info, &ifbdev->helper, sizes->fb_width, sizes->fb_height);
/* If the object is shmemfs backed, it will have given us zeroed pages.
@@ -447,7 +447,7 @@ retry:
connector->name);
/* go for command line mode first */
- modes[i] = drm_pick_cmdline_mode(fb_conn, width, height);
+ modes[i] = drm_pick_cmdline_mode(fb_conn);
/* try for preferred next */
if (!modes[i]) {
@@ -621,7 +621,7 @@ static bool intel_fbdev_init_bios(struct drm_device *dev,
* rather than the current pipe's, since they differ.
*/
cur_size = intel_crtc->config->base.adjusted_mode.crtc_hdisplay;
- cur_size = cur_size * fb->base.bits_per_pixel / 8;
+ cur_size = cur_size * fb->base.format->cpp[0];
if (fb->base.pitches[0] < cur_size) {
DRM_DEBUG_KMS("fb not wide enough for plane %c (%d vs %d)\n",
pipe_name(intel_crtc->pipe),
@@ -632,14 +632,14 @@ static bool intel_fbdev_init_bios(struct drm_device *dev,
cur_size = intel_crtc->config->base.adjusted_mode.crtc_vdisplay;
cur_size = intel_fb_align_height(dev, cur_size,
- fb->base.pixel_format,
+ fb->base.format->format,
fb->base.modifier);
cur_size *= fb->base.pitches[0];
DRM_DEBUG_KMS("pipe %c area: %dx%d, bpp: %d, size: %d\n",
pipe_name(intel_crtc->pipe),
intel_crtc->config->base.adjusted_mode.crtc_hdisplay,
intel_crtc->config->base.adjusted_mode.crtc_vdisplay,
- fb->base.bits_per_pixel,
+ fb->base.format->cpp[0] * 8,
cur_size);
if (cur_size > max_size) {
@@ -660,7 +660,7 @@ static bool intel_fbdev_init_bios(struct drm_device *dev,
goto out;
}
- ifbdev->preferred_bpp = fb->base.bits_per_pixel;
+ ifbdev->preferred_bpp = fb->base.format->cpp[0] * 8;
ifbdev->fb = fb;
drm_framebuffer_reference(&ifbdev->fb->base);
@@ -713,8 +713,7 @@ int intel_fbdev_init(struct drm_device *dev)
if (!intel_fbdev_init_bios(dev, ifbdev))
ifbdev->preferred_bpp = 32;
- ret = drm_fb_helper_init(dev, &ifbdev->helper,
- INTEL_INFO(dev_priv)->num_pipes, 4);
+ ret = drm_fb_helper_init(dev, &ifbdev->helper, 4);
if (ret) {
kfree(ifbdev);
return ret;
diff --git a/drivers/gpu/drm/i915/intel_guc_fwif.h b/drivers/gpu/drm/i915/intel_guc_fwif.h
index 324ea902558b..25691f0e4c50 100644
--- a/drivers/gpu/drm/i915/intel_guc_fwif.h
+++ b/drivers/gpu/drm/i915/intel_guc_fwif.h
@@ -23,15 +23,6 @@
#ifndef _INTEL_GUC_FWIF_H
#define _INTEL_GUC_FWIF_H
-/*
- * This file is partially autogenerated, although currently with some manual
- * fixups afterwards. In future, it should be entirely autogenerated, in order
- * to ensure that the definitions herein remain in sync with those used by the
- * GuC's own firmware.
- *
- * EDITING THIS FILE IS THEREFORE NOT RECOMMENDED - YOUR CHANGES MAY BE LOST.
- */
-
#define GFXCORE_FAMILY_GEN9 12
#define GFXCORE_FAMILY_UNKNOWN 0x7fffffff
@@ -154,7 +145,7 @@
* The GuC firmware layout looks like this:
*
* +-------------------------------+
- * | guc_css_header |
+ * | uc_css_header |
* | |
* | contains major/minor version |
* +-------------------------------+
@@ -181,9 +172,16 @@
* 3. Length info of each component can be found in header, in dwords.
* 4. Modulus and exponent key are not required by driver. They may not appear
* in fw. So driver will load a truncated firmware in this case.
+ *
+ * HuC firmware layout is same as GuC firmware.
+ *
+ * HuC firmware css header is different. However, the only difference is where
+ * the version information is saved. The uc_css_header is unified to support
+ * both. Driver should get HuC version from uc_css_header.huc_sw_version, while
+ * uc_css_header.guc_sw_version for GuC.
*/
-struct guc_css_header {
+struct uc_css_header {
uint32_t module_type;
/* header_size includes all non-uCode bits, including css_header, rsa
* key, modulus key and exponent data. */
@@ -214,8 +212,16 @@ struct guc_css_header {
char username[8];
char buildnumber[12];
- uint32_t device_id;
- uint32_t guc_sw_version;
+ union {
+ struct {
+ uint32_t branch_client_version;
+ uint32_t sw_version;
+ } guc;
+ struct {
+ uint32_t sw_version;
+ uint32_t reserved;
+ } huc;
+ };
uint32_t prod_preprod_fw;
uint32_t reserved[12];
uint32_t header_info;
@@ -489,18 +495,19 @@ union guc_log_control {
} __packed;
/* This Action will be programmed in C180 - SOFT_SCRATCH_O_REG */
-enum host2guc_action {
- HOST2GUC_ACTION_DEFAULT = 0x0,
- HOST2GUC_ACTION_SAMPLE_FORCEWAKE = 0x6,
- HOST2GUC_ACTION_ALLOCATE_DOORBELL = 0x10,
- HOST2GUC_ACTION_DEALLOCATE_DOORBELL = 0x20,
- HOST2GUC_ACTION_LOG_BUFFER_FILE_FLUSH_COMPLETE = 0x30,
- HOST2GUC_ACTION_FORCE_LOG_BUFFER_FLUSH = 0x302,
- HOST2GUC_ACTION_ENTER_S_STATE = 0x501,
- HOST2GUC_ACTION_EXIT_S_STATE = 0x502,
- HOST2GUC_ACTION_SLPC_REQUEST = 0x3003,
- HOST2GUC_ACTION_UK_LOG_ENABLE_LOGGING = 0x0E000,
- HOST2GUC_ACTION_LIMIT
+enum intel_guc_action {
+ INTEL_GUC_ACTION_DEFAULT = 0x0,
+ INTEL_GUC_ACTION_SAMPLE_FORCEWAKE = 0x6,
+ INTEL_GUC_ACTION_ALLOCATE_DOORBELL = 0x10,
+ INTEL_GUC_ACTION_DEALLOCATE_DOORBELL = 0x20,
+ INTEL_GUC_ACTION_LOG_BUFFER_FILE_FLUSH_COMPLETE = 0x30,
+ INTEL_GUC_ACTION_FORCE_LOG_BUFFER_FLUSH = 0x302,
+ INTEL_GUC_ACTION_ENTER_S_STATE = 0x501,
+ INTEL_GUC_ACTION_EXIT_S_STATE = 0x502,
+ INTEL_GUC_ACTION_SLPC_REQUEST = 0x3003,
+ INTEL_GUC_ACTION_AUTHENTICATE_HUC = 0x4000,
+ INTEL_GUC_ACTION_UK_LOG_ENABLE_LOGGING = 0x0E000,
+ INTEL_GUC_ACTION_LIMIT
};
/*
@@ -509,22 +516,22 @@ enum host2guc_action {
* by the fact that all the MASK bits are set. The remaining bits
* give more detail.
*/
-#define GUC2HOST_RESPONSE_MASK ((u32)0xF0000000)
-#define GUC2HOST_IS_RESPONSE(x) ((u32)(x) >= GUC2HOST_RESPONSE_MASK)
-#define GUC2HOST_STATUS(x) (GUC2HOST_RESPONSE_MASK | (x))
+#define INTEL_GUC_RECV_MASK ((u32)0xF0000000)
+#define INTEL_GUC_RECV_IS_RESPONSE(x) ((u32)(x) >= INTEL_GUC_RECV_MASK)
+#define INTEL_GUC_RECV_STATUS(x) (INTEL_GUC_RECV_MASK | (x))
/* GUC will return status back to SOFT_SCRATCH_O_REG */
-enum guc2host_status {
- GUC2HOST_STATUS_SUCCESS = GUC2HOST_STATUS(0x0),
- GUC2HOST_STATUS_ALLOCATE_DOORBELL_FAIL = GUC2HOST_STATUS(0x10),
- GUC2HOST_STATUS_DEALLOCATE_DOORBELL_FAIL = GUC2HOST_STATUS(0x20),
- GUC2HOST_STATUS_GENERIC_FAIL = GUC2HOST_STATUS(0x0000F000)
+enum intel_guc_status {
+ INTEL_GUC_STATUS_SUCCESS = INTEL_GUC_RECV_STATUS(0x0),
+ INTEL_GUC_STATUS_ALLOCATE_DOORBELL_FAIL = INTEL_GUC_RECV_STATUS(0x10),
+ INTEL_GUC_STATUS_DEALLOCATE_DOORBELL_FAIL = INTEL_GUC_RECV_STATUS(0x20),
+ INTEL_GUC_STATUS_GENERIC_FAIL = INTEL_GUC_RECV_STATUS(0x0000F000)
};
/* This action will be programmed in C1BC - SOFT_SCRATCH_15_REG */
-enum guc2host_message {
- GUC2HOST_MSG_CRASH_DUMP_POSTED = (1 << 1),
- GUC2HOST_MSG_FLUSH_LOG_BUFFER = (1 << 3)
+enum intel_guc_recv_message {
+ INTEL_GUC_RECV_MSG_CRASH_DUMP_POSTED = BIT(1),
+ INTEL_GUC_RECV_MSG_FLUSH_LOG_BUFFER = BIT(3)
};
#endif
diff --git a/drivers/gpu/drm/i915/intel_guc_loader.c b/drivers/gpu/drm/i915/intel_guc_loader.c
index 34d6ad2cf7c1..2f1cf9aea04e 100644
--- a/drivers/gpu/drm/i915/intel_guc_loader.c
+++ b/drivers/gpu/drm/i915/intel_guc_loader.c
@@ -28,7 +28,7 @@
*/
#include <linux/firmware.h>
#include "i915_drv.h"
-#include "intel_guc.h"
+#include "intel_uc.h"
/**
* DOC: GuC-specific firmware loader
@@ -51,12 +51,6 @@
* 512K. In order to exclude 0-512K address space from GGTT, all gfx objects
* used by GuC is pinned with PIN_OFFSET_BIAS along with size of WOPCM.
*
- * Firmware log:
- * Firmware log is enabled by setting i915.guc_log_level to non-negative level.
- * Log data is printed out via reading debugfs i915_guc_log_dump. Reading from
- * i915_guc_load_status will print out firmware loading status and scratch
- * registers value.
- *
*/
#define SKL_FW_MAJOR 6
@@ -81,16 +75,16 @@ MODULE_FIRMWARE(I915_BXT_GUC_UCODE);
MODULE_FIRMWARE(I915_KBL_GUC_UCODE);
/* User-friendly representation of an enum */
-const char *intel_guc_fw_status_repr(enum intel_guc_fw_status status)
+const char *intel_uc_fw_status_repr(enum intel_uc_fw_status status)
{
switch (status) {
- case GUC_FIRMWARE_FAIL:
+ case INTEL_UC_FIRMWARE_FAIL:
return "FAIL";
- case GUC_FIRMWARE_NONE:
+ case INTEL_UC_FIRMWARE_NONE:
return "NONE";
- case GUC_FIRMWARE_PENDING:
+ case INTEL_UC_FIRMWARE_PENDING:
return "PENDING";
- case GUC_FIRMWARE_SUCCESS:
+ case INTEL_UC_FIRMWARE_SUCCESS:
return "SUCCESS";
default:
return "UNKNOWN!";
@@ -220,14 +214,14 @@ static void guc_params_init(struct drm_i915_private *dev_priv)
params[GUC_CTL_DEBUG] = GUC_LOG_DISABLED;
if (guc->ads_vma) {
- u32 ads = i915_ggtt_offset(guc->ads_vma) >> PAGE_SHIFT;
+ u32 ads = guc_ggtt_offset(guc->ads_vma) >> PAGE_SHIFT;
params[GUC_CTL_DEBUG] |= ads << GUC_ADS_ADDR_SHIFT;
params[GUC_CTL_DEBUG] |= GUC_ADS_ENABLED;
}
/* If GuC submission is enabled, set up additional parameters here */
if (i915.enable_guc_submission) {
- u32 pgs = i915_ggtt_offset(dev_priv->guc.ctx_pool_vma);
+ u32 pgs = guc_ggtt_offset(dev_priv->guc.ctx_pool_vma);
u32 ctx_in_16 = GUC_MAX_GPU_CONTEXTS / 16;
pgs >>= PAGE_SHIFT;
@@ -278,7 +272,7 @@ static inline bool guc_ucode_response(struct drm_i915_private *dev_priv,
static int guc_ucode_xfer_dma(struct drm_i915_private *dev_priv,
struct i915_vma *vma)
{
- struct intel_guc_fw *guc_fw = &dev_priv->guc.guc_fw;
+ struct intel_uc_fw *guc_fw = &dev_priv->guc.fw;
unsigned long offset;
struct sg_table *sg = vma->pages;
u32 status, rsa[UOS_RSA_SCRATCH_MAX_COUNT];
@@ -297,7 +291,7 @@ static int guc_ucode_xfer_dma(struct drm_i915_private *dev_priv,
I915_WRITE(DMA_COPY_SIZE, guc_fw->header_size + guc_fw->ucode_size);
/* Set the source address for the new blob */
- offset = i915_ggtt_offset(vma) + guc_fw->header_offset;
+ offset = guc_ggtt_offset(vma) + guc_fw->header_offset;
I915_WRITE(DMA_ADDR_0_LOW, lower_32_bits(offset));
I915_WRITE(DMA_ADDR_0_HIGH, upper_32_bits(offset) & 0xFFFF);
@@ -334,12 +328,12 @@ static int guc_ucode_xfer_dma(struct drm_i915_private *dev_priv,
return ret;
}
-static u32 guc_wopcm_size(struct drm_i915_private *dev_priv)
+u32 intel_guc_wopcm_size(struct drm_i915_private *dev_priv)
{
u32 wopcm_size = GUC_WOPCM_TOP;
/* On BXT, the top of WOPCM is reserved for RC6 context */
- if (IS_BROXTON(dev_priv))
+ if (IS_GEN9_LP(dev_priv))
wopcm_size -= BXT_GUC_WOPCM_RC6_RESERVED;
return wopcm_size;
@@ -350,29 +344,27 @@ static u32 guc_wopcm_size(struct drm_i915_private *dev_priv)
*/
static int guc_ucode_xfer(struct drm_i915_private *dev_priv)
{
- struct intel_guc_fw *guc_fw = &dev_priv->guc.guc_fw;
+ struct intel_uc_fw *guc_fw = &dev_priv->guc.fw;
struct i915_vma *vma;
int ret;
- ret = i915_gem_object_set_to_gtt_domain(guc_fw->guc_fw_obj, false);
+ ret = i915_gem_object_set_to_gtt_domain(guc_fw->obj, false);
if (ret) {
DRM_DEBUG_DRIVER("set-domain failed %d\n", ret);
return ret;
}
- vma = i915_gem_object_ggtt_pin(guc_fw->guc_fw_obj, NULL, 0, 0, 0);
+ vma = i915_gem_object_ggtt_pin(guc_fw->obj, NULL, 0, 0,
+ PIN_OFFSET_BIAS | GUC_WOPCM_TOP);
if (IS_ERR(vma)) {
DRM_DEBUG_DRIVER("pin failed %d\n", (int)PTR_ERR(vma));
return PTR_ERR(vma);
}
- /* Invalidate GuC TLB to let GuC take the latest updates to GTT. */
- I915_WRITE(GEN8_GTCR, GEN8_GTCR_INVALIDATE);
-
intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
/* init WOPCM */
- I915_WRITE(GUC_WOPCM_SIZE, guc_wopcm_size(dev_priv));
+ I915_WRITE(GUC_WOPCM_SIZE, intel_guc_wopcm_size(dev_priv));
I915_WRITE(DMA_GUC_WOPCM_OFFSET, GUC_WOPCM_OFFSET_VALUE);
/* Enable MIA caching. GuC clock gating is disabled. */
@@ -388,7 +380,7 @@ static int guc_ucode_xfer(struct drm_i915_private *dev_priv)
if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_B0))
I915_WRITE(GEN6_GFXPAUSE, 0x30FFF);
- if (IS_BROXTON(dev_priv))
+ if (IS_GEN9_LP(dev_priv))
I915_WRITE(GEN9LP_GT_PM_CONFIG, GT_DOORBELL_ENABLE);
else
I915_WRITE(GEN9_GT_PM_CONFIG, GT_DOORBELL_ENABLE);
@@ -437,7 +429,7 @@ static int guc_hw_reset(struct drm_i915_private *dev_priv)
/**
* intel_guc_setup() - finish preparing the GuC for activity
- * @dev: drm device
+ * @dev_priv: i915 device private
*
* Called from gem_init_hw() during driver loading and also after a GPU reset.
*
@@ -448,17 +440,16 @@ static int guc_hw_reset(struct drm_i915_private *dev_priv)
*
* Return: non-zero code on error
*/
-int intel_guc_setup(struct drm_device *dev)
+int intel_guc_setup(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_guc_fw *guc_fw = &dev_priv->guc.guc_fw;
- const char *fw_path = guc_fw->guc_fw_path;
+ struct intel_uc_fw *guc_fw = &dev_priv->guc.fw;
+ const char *fw_path = guc_fw->path;
int retries, ret, err;
DRM_DEBUG_DRIVER("GuC fw status: path %s, fetch %s, load %s\n",
fw_path,
- intel_guc_fw_status_repr(guc_fw->guc_fw_fetch_status),
- intel_guc_fw_status_repr(guc_fw->guc_fw_load_status));
+ intel_uc_fw_status_repr(guc_fw->fetch_status),
+ intel_uc_fw_status_repr(guc_fw->load_status));
/* Loading forbidden, or no firmware to load? */
if (!i915.enable_guc_loading) {
@@ -476,10 +467,10 @@ int intel_guc_setup(struct drm_device *dev)
}
/* Fetch failed, or already fetched but failed to load? */
- if (guc_fw->guc_fw_fetch_status != GUC_FIRMWARE_SUCCESS) {
+ if (guc_fw->fetch_status != INTEL_UC_FIRMWARE_SUCCESS) {
err = -EIO;
goto fail;
- } else if (guc_fw->guc_fw_load_status == GUC_FIRMWARE_FAIL) {
+ } else if (guc_fw->load_status == INTEL_UC_FIRMWARE_FAIL) {
err = -ENOEXEC;
goto fail;
}
@@ -487,11 +478,14 @@ int intel_guc_setup(struct drm_device *dev)
guc_interrupts_release(dev_priv);
gen9_reset_guc_interrupts(dev_priv);
- guc_fw->guc_fw_load_status = GUC_FIRMWARE_PENDING;
+ /* We need to notify the guc whenever we change the GGTT */
+ i915_ggtt_enable_guc(dev_priv);
+
+ guc_fw->load_status = INTEL_UC_FIRMWARE_PENDING;
DRM_DEBUG_DRIVER("GuC fw status: fetch %s, load %s\n",
- intel_guc_fw_status_repr(guc_fw->guc_fw_fetch_status),
- intel_guc_fw_status_repr(guc_fw->guc_fw_load_status));
+ intel_uc_fw_status_repr(guc_fw->fetch_status),
+ intel_uc_fw_status_repr(guc_fw->load_status));
err = i915_guc_submission_init(dev_priv);
if (err)
@@ -512,6 +506,7 @@ int intel_guc_setup(struct drm_device *dev)
if (err)
goto fail;
+ intel_huc_load(dev_priv);
err = guc_ucode_xfer(dev_priv);
if (!err)
break;
@@ -523,11 +518,13 @@ int intel_guc_setup(struct drm_device *dev)
"retry %d more time(s)\n", err, retries);
}
- guc_fw->guc_fw_load_status = GUC_FIRMWARE_SUCCESS;
+ guc_fw->load_status = INTEL_UC_FIRMWARE_SUCCESS;
DRM_DEBUG_DRIVER("GuC fw status: fetch %s, load %s\n",
- intel_guc_fw_status_repr(guc_fw->guc_fw_fetch_status),
- intel_guc_fw_status_repr(guc_fw->guc_fw_load_status));
+ intel_uc_fw_status_repr(guc_fw->fetch_status),
+ intel_uc_fw_status_repr(guc_fw->load_status));
+
+ intel_guc_auth_huc(dev_priv);
if (i915.enable_guc_submission) {
if (i915.guc_log_level >= 0)
@@ -542,12 +539,13 @@ int intel_guc_setup(struct drm_device *dev)
return 0;
fail:
- if (guc_fw->guc_fw_load_status == GUC_FIRMWARE_PENDING)
- guc_fw->guc_fw_load_status = GUC_FIRMWARE_FAIL;
+ if (guc_fw->load_status == INTEL_UC_FIRMWARE_PENDING)
+ guc_fw->load_status = INTEL_UC_FIRMWARE_FAIL;
guc_interrupts_release(dev_priv);
i915_guc_submission_disable(dev_priv);
i915_guc_submission_fini(dev_priv);
+ i915_ggtt_disable_guc(dev_priv);
/*
* We've failed to load the firmware :(
@@ -588,141 +586,156 @@ fail:
return ret;
}
-static void guc_fw_fetch(struct drm_device *dev, struct intel_guc_fw *guc_fw)
+void intel_uc_fw_fetch(struct drm_i915_private *dev_priv,
+ struct intel_uc_fw *uc_fw)
{
- struct pci_dev *pdev = dev->pdev;
+ struct pci_dev *pdev = dev_priv->drm.pdev;
struct drm_i915_gem_object *obj;
- const struct firmware *fw;
- struct guc_css_header *css;
+ const struct firmware *fw = NULL;
+ struct uc_css_header *css;
size_t size;
int err;
- DRM_DEBUG_DRIVER("before requesting firmware: GuC fw fetch status %s\n",
- intel_guc_fw_status_repr(guc_fw->guc_fw_fetch_status));
+ DRM_DEBUG_DRIVER("before requesting firmware: uC fw fetch status %s\n",
+ intel_uc_fw_status_repr(uc_fw->fetch_status));
- err = request_firmware(&fw, guc_fw->guc_fw_path, &pdev->dev);
+ err = request_firmware(&fw, uc_fw->path, &pdev->dev);
if (err)
goto fail;
if (!fw)
goto fail;
- DRM_DEBUG_DRIVER("fetch GuC fw from %s succeeded, fw %p\n",
- guc_fw->guc_fw_path, fw);
+ DRM_DEBUG_DRIVER("fetch uC fw from %s succeeded, fw %p\n",
+ uc_fw->path, fw);
/* Check the size of the blob before examining buffer contents */
- if (fw->size < sizeof(struct guc_css_header)) {
+ if (fw->size < sizeof(struct uc_css_header)) {
DRM_NOTE("Firmware header is missing\n");
goto fail;
}
- css = (struct guc_css_header *)fw->data;
+ css = (struct uc_css_header *)fw->data;
/* Firmware bits always start from header */
- guc_fw->header_offset = 0;
- guc_fw->header_size = (css->header_size_dw - css->modulus_size_dw -
+ uc_fw->header_offset = 0;
+ uc_fw->header_size = (css->header_size_dw - css->modulus_size_dw -
css->key_size_dw - css->exponent_size_dw) * sizeof(u32);
- if (guc_fw->header_size != sizeof(struct guc_css_header)) {
+ if (uc_fw->header_size != sizeof(struct uc_css_header)) {
DRM_NOTE("CSS header definition mismatch\n");
goto fail;
}
/* then, uCode */
- guc_fw->ucode_offset = guc_fw->header_offset + guc_fw->header_size;
- guc_fw->ucode_size = (css->size_dw - css->header_size_dw) * sizeof(u32);
+ uc_fw->ucode_offset = uc_fw->header_offset + uc_fw->header_size;
+ uc_fw->ucode_size = (css->size_dw - css->header_size_dw) * sizeof(u32);
/* now RSA */
if (css->key_size_dw != UOS_RSA_SCRATCH_MAX_COUNT) {
DRM_NOTE("RSA key size is bad\n");
goto fail;
}
- guc_fw->rsa_offset = guc_fw->ucode_offset + guc_fw->ucode_size;
- guc_fw->rsa_size = css->key_size_dw * sizeof(u32);
+ uc_fw->rsa_offset = uc_fw->ucode_offset + uc_fw->ucode_size;
+ uc_fw->rsa_size = css->key_size_dw * sizeof(u32);
/* At least, it should have header, uCode and RSA. Size of all three. */
- size = guc_fw->header_size + guc_fw->ucode_size + guc_fw->rsa_size;
+ size = uc_fw->header_size + uc_fw->ucode_size + uc_fw->rsa_size;
if (fw->size < size) {
DRM_NOTE("Missing firmware components\n");
goto fail;
}
- /* Header and uCode will be loaded to WOPCM. Size of the two. */
- size = guc_fw->header_size + guc_fw->ucode_size;
- if (size > guc_wopcm_size(to_i915(dev))) {
- DRM_NOTE("Firmware is too large to fit in WOPCM\n");
- goto fail;
- }
-
/*
* The GuC firmware image has the version number embedded at a well-known
* offset within the firmware blob; note that major / minor version are
* TWO bytes each (i.e. u16), although all pointers and offsets are defined
* in terms of bytes (u8).
*/
- guc_fw->guc_fw_major_found = css->guc_sw_version >> 16;
- guc_fw->guc_fw_minor_found = css->guc_sw_version & 0xFFFF;
-
- if (guc_fw->guc_fw_major_found != guc_fw->guc_fw_major_wanted ||
- guc_fw->guc_fw_minor_found < guc_fw->guc_fw_minor_wanted) {
- DRM_NOTE("GuC firmware version %d.%d, required %d.%d\n",
- guc_fw->guc_fw_major_found, guc_fw->guc_fw_minor_found,
- guc_fw->guc_fw_major_wanted, guc_fw->guc_fw_minor_wanted);
+ switch (uc_fw->fw) {
+ case INTEL_UC_FW_TYPE_GUC:
+ /* Header and uCode will be loaded to WOPCM. Size of the two. */
+ size = uc_fw->header_size + uc_fw->ucode_size;
+
+ /* Top 32k of WOPCM is reserved (8K stack + 24k RC6 context). */
+ if (size > intel_guc_wopcm_size(dev_priv)) {
+ DRM_ERROR("Firmware is too large to fit in WOPCM\n");
+ goto fail;
+ }
+ uc_fw->major_ver_found = css->guc.sw_version >> 16;
+ uc_fw->minor_ver_found = css->guc.sw_version & 0xFFFF;
+ break;
+
+ case INTEL_UC_FW_TYPE_HUC:
+ uc_fw->major_ver_found = css->huc.sw_version >> 16;
+ uc_fw->minor_ver_found = css->huc.sw_version & 0xFFFF;
+ break;
+
+ default:
+ DRM_ERROR("Unknown firmware type %d\n", uc_fw->fw);
+ err = -ENOEXEC;
+ goto fail;
+ }
+
+ if (uc_fw->major_ver_found != uc_fw->major_ver_wanted ||
+ uc_fw->minor_ver_found < uc_fw->minor_ver_wanted) {
+ DRM_NOTE("uC firmware version %d.%d, required %d.%d\n",
+ uc_fw->major_ver_found, uc_fw->minor_ver_found,
+ uc_fw->major_ver_wanted, uc_fw->minor_ver_wanted);
err = -ENOEXEC;
goto fail;
}
DRM_DEBUG_DRIVER("firmware version %d.%d OK (minimum %d.%d)\n",
- guc_fw->guc_fw_major_found, guc_fw->guc_fw_minor_found,
- guc_fw->guc_fw_major_wanted, guc_fw->guc_fw_minor_wanted);
+ uc_fw->major_ver_found, uc_fw->minor_ver_found,
+ uc_fw->major_ver_wanted, uc_fw->minor_ver_wanted);
- mutex_lock(&dev->struct_mutex);
- obj = i915_gem_object_create_from_data(dev, fw->data, fw->size);
- mutex_unlock(&dev->struct_mutex);
+ mutex_lock(&dev_priv->drm.struct_mutex);
+ obj = i915_gem_object_create_from_data(dev_priv, fw->data, fw->size);
+ mutex_unlock(&dev_priv->drm.struct_mutex);
if (IS_ERR_OR_NULL(obj)) {
err = obj ? PTR_ERR(obj) : -ENOMEM;
goto fail;
}
- guc_fw->guc_fw_obj = obj;
- guc_fw->guc_fw_size = fw->size;
+ uc_fw->obj = obj;
+ uc_fw->size = fw->size;
- DRM_DEBUG_DRIVER("GuC fw fetch status SUCCESS, obj %p\n",
- guc_fw->guc_fw_obj);
+ DRM_DEBUG_DRIVER("uC fw fetch status SUCCESS, obj %p\n",
+ uc_fw->obj);
release_firmware(fw);
- guc_fw->guc_fw_fetch_status = GUC_FIRMWARE_SUCCESS;
+ uc_fw->fetch_status = INTEL_UC_FIRMWARE_SUCCESS;
return;
fail:
- DRM_WARN("Failed to fetch valid GuC firmware from %s (error %d)\n",
- guc_fw->guc_fw_path, err);
- DRM_DEBUG_DRIVER("GuC fw fetch status FAIL; err %d, fw %p, obj %p\n",
- err, fw, guc_fw->guc_fw_obj);
+ DRM_WARN("Failed to fetch valid uC firmware from %s (error %d)\n",
+ uc_fw->path, err);
+ DRM_DEBUG_DRIVER("uC fw fetch status FAIL; err %d, fw %p, obj %p\n",
+ err, fw, uc_fw->obj);
- mutex_lock(&dev->struct_mutex);
- obj = guc_fw->guc_fw_obj;
+ mutex_lock(&dev_priv->drm.struct_mutex);
+ obj = uc_fw->obj;
if (obj)
i915_gem_object_put(obj);
- guc_fw->guc_fw_obj = NULL;
- mutex_unlock(&dev->struct_mutex);
+ uc_fw->obj = NULL;
+ mutex_unlock(&dev_priv->drm.struct_mutex);
release_firmware(fw); /* OK even if fw is NULL */
- guc_fw->guc_fw_fetch_status = GUC_FIRMWARE_FAIL;
+ uc_fw->fetch_status = INTEL_UC_FIRMWARE_FAIL;
}
/**
* intel_guc_init() - define parameters and fetch firmware
- * @dev: drm device
+ * @dev_priv: i915 device private
*
* Called early during driver load, but after GEM is initialised.
*
* The firmware will be transferred to the GuC's memory later,
* when intel_guc_setup() is called.
*/
-void intel_guc_init(struct drm_device *dev)
+void intel_guc_init(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_guc_fw *guc_fw = &dev_priv->guc.guc_fw;
+ struct intel_uc_fw *guc_fw = &dev_priv->guc.fw;
const char *fw_path;
if (!HAS_GUC(dev_priv)) {
@@ -740,24 +753,23 @@ void intel_guc_init(struct drm_device *dev)
fw_path = NULL;
} else if (IS_SKYLAKE(dev_priv)) {
fw_path = I915_SKL_GUC_UCODE;
- guc_fw->guc_fw_major_wanted = SKL_FW_MAJOR;
- guc_fw->guc_fw_minor_wanted = SKL_FW_MINOR;
+ guc_fw->major_ver_wanted = SKL_FW_MAJOR;
+ guc_fw->minor_ver_wanted = SKL_FW_MINOR;
} else if (IS_BROXTON(dev_priv)) {
fw_path = I915_BXT_GUC_UCODE;
- guc_fw->guc_fw_major_wanted = BXT_FW_MAJOR;
- guc_fw->guc_fw_minor_wanted = BXT_FW_MINOR;
+ guc_fw->major_ver_wanted = BXT_FW_MAJOR;
+ guc_fw->minor_ver_wanted = BXT_FW_MINOR;
} else if (IS_KABYLAKE(dev_priv)) {
fw_path = I915_KBL_GUC_UCODE;
- guc_fw->guc_fw_major_wanted = KBL_FW_MAJOR;
- guc_fw->guc_fw_minor_wanted = KBL_FW_MINOR;
+ guc_fw->major_ver_wanted = KBL_FW_MAJOR;
+ guc_fw->minor_ver_wanted = KBL_FW_MINOR;
} else {
fw_path = ""; /* unknown device */
}
- guc_fw->guc_dev = dev;
- guc_fw->guc_fw_path = fw_path;
- guc_fw->guc_fw_fetch_status = GUC_FIRMWARE_NONE;
- guc_fw->guc_fw_load_status = GUC_FIRMWARE_NONE;
+ guc_fw->path = fw_path;
+ guc_fw->fetch_status = INTEL_UC_FIRMWARE_NONE;
+ guc_fw->load_status = INTEL_UC_FIRMWARE_NONE;
/* Early (and silent) return if GuC loading is disabled */
if (!i915.enable_guc_loading)
@@ -767,30 +779,29 @@ void intel_guc_init(struct drm_device *dev)
if (*fw_path == '\0')
return;
- guc_fw->guc_fw_fetch_status = GUC_FIRMWARE_PENDING;
+ guc_fw->fetch_status = INTEL_UC_FIRMWARE_PENDING;
DRM_DEBUG_DRIVER("GuC firmware pending, path %s\n", fw_path);
- guc_fw_fetch(dev, guc_fw);
+ intel_uc_fw_fetch(dev_priv, guc_fw);
/* status must now be FAIL or SUCCESS */
}
/**
* intel_guc_fini() - clean up all allocated resources
- * @dev: drm device
+ * @dev_priv: i915 device private
*/
-void intel_guc_fini(struct drm_device *dev)
+void intel_guc_fini(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_guc_fw *guc_fw = &dev_priv->guc.guc_fw;
+ struct intel_uc_fw *guc_fw = &dev_priv->guc.fw;
- mutex_lock(&dev->struct_mutex);
+ mutex_lock(&dev_priv->drm.struct_mutex);
guc_interrupts_release(dev_priv);
i915_guc_submission_disable(dev_priv);
i915_guc_submission_fini(dev_priv);
- if (guc_fw->guc_fw_obj)
- i915_gem_object_put(guc_fw->guc_fw_obj);
- guc_fw->guc_fw_obj = NULL;
- mutex_unlock(&dev->struct_mutex);
+ if (guc_fw->obj)
+ i915_gem_object_put(guc_fw->obj);
+ guc_fw->obj = NULL;
+ mutex_unlock(&dev_priv->drm.struct_mutex);
- guc_fw->guc_fw_fetch_status = GUC_FIRMWARE_NONE;
+ guc_fw->fetch_status = INTEL_UC_FIRMWARE_NONE;
}
diff --git a/drivers/gpu/drm/i915/intel_guc_log.c b/drivers/gpu/drm/i915/intel_guc_log.c
new file mode 100644
index 000000000000..5c0f9a49da0e
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_guc_log.c
@@ -0,0 +1,658 @@
+/*
+ * Copyright © 2014-2017 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+#include <linux/debugfs.h>
+#include <linux/relay.h>
+#include "i915_drv.h"
+
+static void guc_log_capture_logs(struct intel_guc *guc);
+
+/**
+ * DOC: GuC firmware log
+ *
+ * Firmware log is enabled by setting i915.guc_log_level to non-negative level.
+ * Log data is printed out via reading debugfs i915_guc_log_dump. Reading from
+ * i915_guc_load_status will print out firmware loading status and scratch
+ * registers value.
+ *
+ */
+
+static int guc_log_flush_complete(struct intel_guc *guc)
+{
+ u32 action[] = {
+ INTEL_GUC_ACTION_LOG_BUFFER_FILE_FLUSH_COMPLETE
+ };
+
+ return intel_guc_send(guc, action, ARRAY_SIZE(action));
+}
+
+static int guc_log_flush(struct intel_guc *guc)
+{
+ u32 action[] = {
+ INTEL_GUC_ACTION_FORCE_LOG_BUFFER_FLUSH,
+ 0
+ };
+
+ return intel_guc_send(guc, action, ARRAY_SIZE(action));
+}
+
+static int guc_log_control(struct intel_guc *guc, u32 control_val)
+{
+ u32 action[] = {
+ INTEL_GUC_ACTION_UK_LOG_ENABLE_LOGGING,
+ control_val
+ };
+
+ return intel_guc_send(guc, action, ARRAY_SIZE(action));
+}
+
+
+/*
+ * Sub buffer switch callback. Called whenever relay has to switch to a new
+ * sub buffer, relay stays on the same sub buffer if 0 is returned.
+ */
+static int subbuf_start_callback(struct rchan_buf *buf,
+ void *subbuf,
+ void *prev_subbuf,
+ size_t prev_padding)
+{
+ /* Use no-overwrite mode by default, where relay will stop accepting
+ * new data if there are no empty sub buffers left.
+ * There is no strict synchronization enforced by relay between Consumer
+ * and Producer. In overwrite mode, there is a possibility of getting
+ * inconsistent/garbled data, the producer could be writing on to the
+ * same sub buffer from which Consumer is reading. This can't be avoided
+ * unless Consumer is fast enough and can always run in tandem with
+ * Producer.
+ */
+ if (relay_buf_full(buf))
+ return 0;
+
+ return 1;
+}
+
+/*
+ * file_create() callback. Creates relay file in debugfs.
+ */
+static struct dentry *create_buf_file_callback(const char *filename,
+ struct dentry *parent,
+ umode_t mode,
+ struct rchan_buf *buf,
+ int *is_global)
+{
+ struct dentry *buf_file;
+
+ /* This to enable the use of a single buffer for the relay channel and
+ * correspondingly have a single file exposed to User, through which
+ * it can collect the logs in order without any post-processing.
+ * Need to set 'is_global' even if parent is NULL for early logging.
+ */
+ *is_global = 1;
+
+ if (!parent)
+ return NULL;
+
+ /* Not using the channel filename passed as an argument, since for each
+ * channel relay appends the corresponding CPU number to the filename
+ * passed in relay_open(). This should be fine as relay just needs a
+ * dentry of the file associated with the channel buffer and that file's
+ * name need not be same as the filename passed as an argument.
+ */
+ buf_file = debugfs_create_file("guc_log", mode,
+ parent, buf, &relay_file_operations);
+ return buf_file;
+}
+
+/*
+ * file_remove() default callback. Removes relay file in debugfs.
+ */
+static int remove_buf_file_callback(struct dentry *dentry)
+{
+ debugfs_remove(dentry);
+ return 0;
+}
+
+/* relay channel callbacks */
+static struct rchan_callbacks relay_callbacks = {
+ .subbuf_start = subbuf_start_callback,
+ .create_buf_file = create_buf_file_callback,
+ .remove_buf_file = remove_buf_file_callback,
+};
+
+static void guc_log_remove_relay_file(struct intel_guc *guc)
+{
+ relay_close(guc->log.relay_chan);
+}
+
+static int guc_log_create_relay_channel(struct intel_guc *guc)
+{
+ struct drm_i915_private *dev_priv = guc_to_i915(guc);
+ struct rchan *guc_log_relay_chan;
+ size_t n_subbufs, subbuf_size;
+
+ /* Keep the size of sub buffers same as shared log buffer */
+ subbuf_size = guc->log.vma->obj->base.size;
+
+ /* Store up to 8 snapshots, which is large enough to buffer sufficient
+ * boot time logs and provides enough leeway to User, in terms of
+ * latency, for consuming the logs from relay. Also doesn't take
+ * up too much memory.
+ */
+ n_subbufs = 8;
+
+ guc_log_relay_chan = relay_open(NULL, NULL, subbuf_size,
+ n_subbufs, &relay_callbacks, dev_priv);
+ if (!guc_log_relay_chan) {
+ DRM_ERROR("Couldn't create relay chan for GuC logging\n");
+ return -ENOMEM;
+ }
+
+ GEM_BUG_ON(guc_log_relay_chan->subbuf_size < subbuf_size);
+ guc->log.relay_chan = guc_log_relay_chan;
+ return 0;
+}
+
+static int guc_log_create_relay_file(struct intel_guc *guc)
+{
+ struct drm_i915_private *dev_priv = guc_to_i915(guc);
+ struct dentry *log_dir;
+ int ret;
+
+ /* For now create the log file in /sys/kernel/debug/dri/0 dir */
+ log_dir = dev_priv->drm.primary->debugfs_root;
+
+ /* If /sys/kernel/debug/dri/0 location do not exist, then debugfs is
+ * not mounted and so can't create the relay file.
+ * The relay API seems to fit well with debugfs only, for availing relay
+ * there are 3 requirements which can be met for debugfs file only in a
+ * straightforward/clean manner :-
+ * i) Need the associated dentry pointer of the file, while opening the
+ * relay channel.
+ * ii) Should be able to use 'relay_file_operations' fops for the file.
+ * iii) Set the 'i_private' field of file's inode to the pointer of
+ * relay channel buffer.
+ */
+ if (!log_dir) {
+ DRM_ERROR("Debugfs dir not available yet for GuC log file\n");
+ return -ENODEV;
+ }
+
+ ret = relay_late_setup_files(guc->log.relay_chan, "guc_log", log_dir);
+ if (ret) {
+ DRM_ERROR("Couldn't associate relay chan with file %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void guc_move_to_next_buf(struct intel_guc *guc)
+{
+ /* Make sure the updates made in the sub buffer are visible when
+ * Consumer sees the following update to offset inside the sub buffer.
+ */
+ smp_wmb();
+
+ /* All data has been written, so now move the offset of sub buffer. */
+ relay_reserve(guc->log.relay_chan, guc->log.vma->obj->base.size);
+
+ /* Switch to the next sub buffer */
+ relay_flush(guc->log.relay_chan);
+}
+
+static void *guc_get_write_buffer(struct intel_guc *guc)
+{
+ if (!guc->log.relay_chan)
+ return NULL;
+
+ /* Just get the base address of a new sub buffer and copy data into it
+ * ourselves. NULL will be returned in no-overwrite mode, if all sub
+ * buffers are full. Could have used the relay_write() to indirectly
+ * copy the data, but that would have been bit convoluted, as we need to
+ * write to only certain locations inside a sub buffer which cannot be
+ * done without using relay_reserve() along with relay_write(). So its
+ * better to use relay_reserve() alone.
+ */
+ return relay_reserve(guc->log.relay_chan, 0);
+}
+
+static bool guc_check_log_buf_overflow(struct intel_guc *guc,
+ enum guc_log_buffer_type type,
+ unsigned int full_cnt)
+{
+ unsigned int prev_full_cnt = guc->log.prev_overflow_count[type];
+ bool overflow = false;
+
+ if (full_cnt != prev_full_cnt) {
+ overflow = true;
+
+ guc->log.prev_overflow_count[type] = full_cnt;
+ guc->log.total_overflow_count[type] += full_cnt - prev_full_cnt;
+
+ if (full_cnt < prev_full_cnt) {
+ /* buffer_full_cnt is a 4 bit counter */
+ guc->log.total_overflow_count[type] += 16;
+ }
+ DRM_ERROR_RATELIMITED("GuC log buffer overflow\n");
+ }
+
+ return overflow;
+}
+
+static unsigned int guc_get_log_buffer_size(enum guc_log_buffer_type type)
+{
+ switch (type) {
+ case GUC_ISR_LOG_BUFFER:
+ return (GUC_LOG_ISR_PAGES + 1) * PAGE_SIZE;
+ case GUC_DPC_LOG_BUFFER:
+ return (GUC_LOG_DPC_PAGES + 1) * PAGE_SIZE;
+ case GUC_CRASH_DUMP_LOG_BUFFER:
+ return (GUC_LOG_CRASH_PAGES + 1) * PAGE_SIZE;
+ default:
+ MISSING_CASE(type);
+ }
+
+ return 0;
+}
+
+static void guc_read_update_log_buffer(struct intel_guc *guc)
+{
+ unsigned int buffer_size, read_offset, write_offset, bytes_to_copy, full_cnt;
+ struct guc_log_buffer_state *log_buf_state, *log_buf_snapshot_state;
+ struct guc_log_buffer_state log_buf_state_local;
+ enum guc_log_buffer_type type;
+ void *src_data, *dst_data;
+ bool new_overflow;
+
+ if (WARN_ON(!guc->log.buf_addr))
+ return;
+
+ /* Get the pointer to shared GuC log buffer */
+ log_buf_state = src_data = guc->log.buf_addr;
+
+ /* Get the pointer to local buffer to store the logs */
+ log_buf_snapshot_state = dst_data = guc_get_write_buffer(guc);
+
+ /* Actual logs are present from the 2nd page */
+ src_data += PAGE_SIZE;
+ dst_data += PAGE_SIZE;
+
+ for (type = GUC_ISR_LOG_BUFFER; type < GUC_MAX_LOG_BUFFER; type++) {
+ /* Make a copy of the state structure, inside GuC log buffer
+ * (which is uncached mapped), on the stack to avoid reading
+ * from it multiple times.
+ */
+ memcpy(&log_buf_state_local, log_buf_state,
+ sizeof(struct guc_log_buffer_state));
+ buffer_size = guc_get_log_buffer_size(type);
+ read_offset = log_buf_state_local.read_ptr;
+ write_offset = log_buf_state_local.sampled_write_ptr;
+ full_cnt = log_buf_state_local.buffer_full_cnt;
+
+ /* Bookkeeping stuff */
+ guc->log.flush_count[type] += log_buf_state_local.flush_to_file;
+ new_overflow = guc_check_log_buf_overflow(guc, type, full_cnt);
+
+ /* Update the state of shared log buffer */
+ log_buf_state->read_ptr = write_offset;
+ log_buf_state->flush_to_file = 0;
+ log_buf_state++;
+
+ if (unlikely(!log_buf_snapshot_state))
+ continue;
+
+ /* First copy the state structure in snapshot buffer */
+ memcpy(log_buf_snapshot_state, &log_buf_state_local,
+ sizeof(struct guc_log_buffer_state));
+
+ /* The write pointer could have been updated by GuC firmware,
+ * after sending the flush interrupt to Host, for consistency
+ * set write pointer value to same value of sampled_write_ptr
+ * in the snapshot buffer.
+ */
+ log_buf_snapshot_state->write_ptr = write_offset;
+ log_buf_snapshot_state++;
+
+ /* Now copy the actual logs. */
+ if (unlikely(new_overflow)) {
+ /* copy the whole buffer in case of overflow */
+ read_offset = 0;
+ write_offset = buffer_size;
+ } else if (unlikely((read_offset > buffer_size) ||
+ (write_offset > buffer_size))) {
+ DRM_ERROR("invalid log buffer state\n");
+ /* copy whole buffer as offsets are unreliable */
+ read_offset = 0;
+ write_offset = buffer_size;
+ }
+
+ /* Just copy the newly written data */
+ if (read_offset > write_offset) {
+ i915_memcpy_from_wc(dst_data, src_data, write_offset);
+ bytes_to_copy = buffer_size - read_offset;
+ } else {
+ bytes_to_copy = write_offset - read_offset;
+ }
+ i915_memcpy_from_wc(dst_data + read_offset,
+ src_data + read_offset, bytes_to_copy);
+
+ src_data += buffer_size;
+ dst_data += buffer_size;
+ }
+
+ if (log_buf_snapshot_state)
+ guc_move_to_next_buf(guc);
+ else {
+ /* Used rate limited to avoid deluge of messages, logs might be
+ * getting consumed by User at a slow rate.
+ */
+ DRM_ERROR_RATELIMITED("no sub-buffer to capture logs\n");
+ guc->log.capture_miss_count++;
+ }
+}
+
+static void guc_log_cleanup(struct intel_guc *guc)
+{
+ struct drm_i915_private *dev_priv = guc_to_i915(guc);
+
+ lockdep_assert_held(&dev_priv->drm.struct_mutex);
+
+ /* First disable the flush interrupt */
+ gen9_disable_guc_interrupts(dev_priv);
+
+ if (guc->log.flush_wq)
+ destroy_workqueue(guc->log.flush_wq);
+
+ guc->log.flush_wq = NULL;
+
+ if (guc->log.relay_chan)
+ guc_log_remove_relay_file(guc);
+
+ guc->log.relay_chan = NULL;
+
+ if (guc->log.buf_addr)
+ i915_gem_object_unpin_map(guc->log.vma->obj);
+
+ guc->log.buf_addr = NULL;
+}
+
+static void capture_logs_work(struct work_struct *work)
+{
+ struct intel_guc *guc =
+ container_of(work, struct intel_guc, log.flush_work);
+
+ guc_log_capture_logs(guc);
+}
+
+static int guc_log_create_extras(struct intel_guc *guc)
+{
+ struct drm_i915_private *dev_priv = guc_to_i915(guc);
+ void *vaddr;
+ int ret;
+
+ lockdep_assert_held(&dev_priv->drm.struct_mutex);
+
+ /* Nothing to do */
+ if (i915.guc_log_level < 0)
+ return 0;
+
+ if (!guc->log.buf_addr) {
+ /* Create a WC (Uncached for read) vmalloc mapping of log
+ * buffer pages, so that we can directly get the data
+ * (up-to-date) from memory.
+ */
+ vaddr = i915_gem_object_pin_map(guc->log.vma->obj, I915_MAP_WC);
+ if (IS_ERR(vaddr)) {
+ ret = PTR_ERR(vaddr);
+ DRM_ERROR("Couldn't map log buffer pages %d\n", ret);
+ return ret;
+ }
+
+ guc->log.buf_addr = vaddr;
+ }
+
+ if (!guc->log.relay_chan) {
+ /* Create a relay channel, so that we have buffers for storing
+ * the GuC firmware logs, the channel will be linked with a file
+ * later on when debugfs is registered.
+ */
+ ret = guc_log_create_relay_channel(guc);
+ if (ret)
+ return ret;
+ }
+
+ if (!guc->log.flush_wq) {
+ INIT_WORK(&guc->log.flush_work, capture_logs_work);
+
+ /*
+ * GuC log buffer flush work item has to do register access to
+ * send the ack to GuC and this work item, if not synced before
+ * suspend, can potentially get executed after the GFX device is
+ * suspended.
+ * By marking the WQ as freezable, we don't have to bother about
+ * flushing of this work item from the suspend hooks, the pending
+ * work item if any will be either executed before the suspend
+ * or scheduled later on resume. This way the handling of work
+ * item can be kept same between system suspend & rpm suspend.
+ */
+ guc->log.flush_wq = alloc_ordered_workqueue("i915-guc_log",
+ WQ_HIGHPRI | WQ_FREEZABLE);
+ if (guc->log.flush_wq == NULL) {
+ DRM_ERROR("Couldn't allocate the wq for GuC logging\n");
+ return -ENOMEM;
+ }
+ }
+
+ return 0;
+}
+
+void intel_guc_log_create(struct intel_guc *guc)
+{
+ struct i915_vma *vma;
+ unsigned long offset;
+ uint32_t size, flags;
+
+ if (i915.guc_log_level > GUC_LOG_VERBOSITY_MAX)
+ i915.guc_log_level = GUC_LOG_VERBOSITY_MAX;
+
+ /* The first page is to save log buffer state. Allocate one
+ * extra page for others in case for overlap */
+ size = (1 + GUC_LOG_DPC_PAGES + 1 +
+ GUC_LOG_ISR_PAGES + 1 +
+ GUC_LOG_CRASH_PAGES + 1) << PAGE_SHIFT;
+
+ vma = guc->log.vma;
+ if (!vma) {
+ /* We require SSE 4.1 for fast reads from the GuC log buffer and
+ * it should be present on the chipsets supporting GuC based
+ * submisssions.
+ */
+ if (WARN_ON(!i915_has_memcpy_from_wc())) {
+ /* logging will not be enabled */
+ i915.guc_log_level = -1;
+ return;
+ }
+
+ vma = intel_guc_allocate_vma(guc, size);
+ if (IS_ERR(vma)) {
+ /* logging will be off */
+ i915.guc_log_level = -1;
+ return;
+ }
+
+ guc->log.vma = vma;
+
+ if (guc_log_create_extras(guc)) {
+ guc_log_cleanup(guc);
+ i915_vma_unpin_and_release(&guc->log.vma);
+ i915.guc_log_level = -1;
+ return;
+ }
+ }
+
+ /* each allocated unit is a page */
+ flags = GUC_LOG_VALID | GUC_LOG_NOTIFY_ON_HALF_FULL |
+ (GUC_LOG_DPC_PAGES << GUC_LOG_DPC_SHIFT) |
+ (GUC_LOG_ISR_PAGES << GUC_LOG_ISR_SHIFT) |
+ (GUC_LOG_CRASH_PAGES << GUC_LOG_CRASH_SHIFT);
+
+ offset = guc_ggtt_offset(vma) >> PAGE_SHIFT; /* in pages */
+ guc->log.flags = (offset << GUC_LOG_BUF_ADDR_SHIFT) | flags;
+}
+
+static int guc_log_late_setup(struct intel_guc *guc)
+{
+ struct drm_i915_private *dev_priv = guc_to_i915(guc);
+ int ret;
+
+ lockdep_assert_held(&dev_priv->drm.struct_mutex);
+
+ if (i915.guc_log_level < 0)
+ return -EINVAL;
+
+ /* If log_level was set as -1 at boot time, then setup needed to
+ * handle log buffer flush interrupts would not have been done yet,
+ * so do that now.
+ */
+ ret = guc_log_create_extras(guc);
+ if (ret)
+ goto err;
+
+ ret = guc_log_create_relay_file(guc);
+ if (ret)
+ goto err;
+
+ return 0;
+err:
+ guc_log_cleanup(guc);
+ /* logging will remain off */
+ i915.guc_log_level = -1;
+ return ret;
+}
+
+static void guc_log_capture_logs(struct intel_guc *guc)
+{
+ struct drm_i915_private *dev_priv = guc_to_i915(guc);
+
+ guc_read_update_log_buffer(guc);
+
+ /* Generally device is expected to be active only at this
+ * time, so get/put should be really quick.
+ */
+ intel_runtime_pm_get(dev_priv);
+ guc_log_flush_complete(guc);
+ intel_runtime_pm_put(dev_priv);
+}
+
+static void guc_flush_logs(struct intel_guc *guc)
+{
+ struct drm_i915_private *dev_priv = guc_to_i915(guc);
+
+ if (!i915.enable_guc_submission || (i915.guc_log_level < 0))
+ return;
+
+ /* First disable the interrupts, will be renabled afterwards */
+ gen9_disable_guc_interrupts(dev_priv);
+
+ /* Before initiating the forceful flush, wait for any pending/ongoing
+ * flush to complete otherwise forceful flush may not actually happen.
+ */
+ flush_work(&guc->log.flush_work);
+
+ /* Ask GuC to update the log buffer state */
+ guc_log_flush(guc);
+
+ /* GuC would have updated log buffer by now, so capture it */
+ guc_log_capture_logs(guc);
+}
+
+int i915_guc_log_control(struct drm_i915_private *dev_priv, u64 control_val)
+{
+ struct intel_guc *guc = &dev_priv->guc;
+
+ union guc_log_control log_param;
+ int ret;
+
+ log_param.value = control_val;
+
+ if (log_param.verbosity < GUC_LOG_VERBOSITY_MIN ||
+ log_param.verbosity > GUC_LOG_VERBOSITY_MAX)
+ return -EINVAL;
+
+ /* This combination doesn't make sense & won't have any effect */
+ if (!log_param.logging_enabled && (i915.guc_log_level < 0))
+ return 0;
+
+ ret = guc_log_control(guc, log_param.value);
+ if (ret < 0) {
+ DRM_DEBUG_DRIVER("guc_logging_control action failed %d\n", ret);
+ return ret;
+ }
+
+ i915.guc_log_level = log_param.verbosity;
+
+ /* If log_level was set as -1 at boot time, then the relay channel file
+ * wouldn't have been created by now and interrupts also would not have
+ * been enabled.
+ */
+ if (!dev_priv->guc.log.relay_chan) {
+ ret = guc_log_late_setup(guc);
+ if (!ret)
+ gen9_enable_guc_interrupts(dev_priv);
+ } else if (!log_param.logging_enabled) {
+ /* Once logging is disabled, GuC won't generate logs & send an
+ * interrupt. But there could be some data in the log buffer
+ * which is yet to be captured. So request GuC to update the log
+ * buffer state and then collect the left over logs.
+ */
+ guc_flush_logs(guc);
+
+ /* As logging is disabled, update log level to reflect that */
+ i915.guc_log_level = -1;
+ } else {
+ /* In case interrupts were disabled, enable them now */
+ gen9_enable_guc_interrupts(dev_priv);
+ }
+
+ return ret;
+}
+
+void i915_guc_log_register(struct drm_i915_private *dev_priv)
+{
+ if (!i915.enable_guc_submission)
+ return;
+
+ mutex_lock(&dev_priv->drm.struct_mutex);
+ guc_log_late_setup(&dev_priv->guc);
+ mutex_unlock(&dev_priv->drm.struct_mutex);
+}
+
+void i915_guc_log_unregister(struct drm_i915_private *dev_priv)
+{
+ if (!i915.enable_guc_submission)
+ return;
+
+ mutex_lock(&dev_priv->drm.struct_mutex);
+ guc_log_cleanup(&dev_priv->guc);
+ mutex_unlock(&dev_priv->drm.struct_mutex);
+}
diff --git a/drivers/gpu/drm/i915/intel_gvt.c b/drivers/gpu/drm/i915/intel_gvt.c
index 290384e86c63..d23c0fcff751 100644
--- a/drivers/gpu/drm/i915/intel_gvt.c
+++ b/drivers/gpu/drm/i915/intel_gvt.c
@@ -67,6 +67,11 @@ int intel_gvt_init(struct drm_i915_private *dev_priv)
return 0;
}
+ if (intel_vgpu_active(dev_priv)) {
+ DRM_DEBUG_DRIVER("GVT-g is disabled for guest\n");
+ goto bail;
+ }
+
if (!is_supported_device(dev_priv)) {
DRM_DEBUG_DRIVER("Unsupported device. GVT-g is disabled\n");
goto bail;
diff --git a/drivers/gpu/drm/i915/intel_hangcheck.c b/drivers/gpu/drm/i915/intel_hangcheck.c
index 53df5b11bff4..f05971f5586f 100644
--- a/drivers/gpu/drm/i915/intel_hangcheck.c
+++ b/drivers/gpu/drm/i915/intel_hangcheck.c
@@ -236,13 +236,13 @@ head_stuck(struct intel_engine_cs *engine, u64 acthd)
memset(&engine->hangcheck.instdone, 0,
sizeof(engine->hangcheck.instdone));
- return HANGCHECK_ACTIVE;
+ return ENGINE_ACTIVE_HEAD;
}
if (!subunits_stuck(engine))
- return HANGCHECK_ACTIVE;
+ return ENGINE_ACTIVE_SUBUNITS;
- return HANGCHECK_HUNG;
+ return ENGINE_DEAD;
}
static enum intel_engine_hangcheck_action
@@ -253,11 +253,11 @@ engine_stuck(struct intel_engine_cs *engine, u64 acthd)
u32 tmp;
ha = head_stuck(engine, acthd);
- if (ha != HANGCHECK_HUNG)
+ if (ha != ENGINE_DEAD)
return ha;
if (IS_GEN2(dev_priv))
- return HANGCHECK_HUNG;
+ return ENGINE_DEAD;
/* Is the chip hanging on a WAIT_FOR_EVENT?
* If so we can simply poke the RB_WAIT bit
@@ -270,25 +270,144 @@ engine_stuck(struct intel_engine_cs *engine, u64 acthd)
"Kicking stuck wait on %s",
engine->name);
I915_WRITE_CTL(engine, tmp);
- return HANGCHECK_KICK;
+ return ENGINE_WAIT_KICK;
}
if (INTEL_GEN(dev_priv) >= 6 && tmp & RING_WAIT_SEMAPHORE) {
switch (semaphore_passed(engine)) {
default:
- return HANGCHECK_HUNG;
+ return ENGINE_DEAD;
case 1:
i915_handle_error(dev_priv, 0,
"Kicking stuck semaphore on %s",
engine->name);
I915_WRITE_CTL(engine, tmp);
- return HANGCHECK_KICK;
+ return ENGINE_WAIT_KICK;
case 0:
- return HANGCHECK_WAIT;
+ return ENGINE_WAIT;
}
}
- return HANGCHECK_HUNG;
+ return ENGINE_DEAD;
+}
+
+static void hangcheck_load_sample(struct intel_engine_cs *engine,
+ struct intel_engine_hangcheck *hc)
+{
+ /* We don't strictly need an irq-barrier here, as we are not
+ * serving an interrupt request, be paranoid in case the
+ * barrier has side-effects (such as preventing a broken
+ * cacheline snoop) and so be sure that we can see the seqno
+ * advance. If the seqno should stick, due to a stale
+ * cacheline, we would erroneously declare the GPU hung.
+ */
+ if (engine->irq_seqno_barrier)
+ engine->irq_seqno_barrier(engine);
+
+ hc->acthd = intel_engine_get_active_head(engine);
+ hc->seqno = intel_engine_get_seqno(engine);
+}
+
+static void hangcheck_store_sample(struct intel_engine_cs *engine,
+ const struct intel_engine_hangcheck *hc)
+{
+ engine->hangcheck.acthd = hc->acthd;
+ engine->hangcheck.seqno = hc->seqno;
+ engine->hangcheck.action = hc->action;
+ engine->hangcheck.stalled = hc->stalled;
+}
+
+static enum intel_engine_hangcheck_action
+hangcheck_get_action(struct intel_engine_cs *engine,
+ const struct intel_engine_hangcheck *hc)
+{
+ if (engine->hangcheck.seqno != hc->seqno)
+ return ENGINE_ACTIVE_SEQNO;
+
+ if (i915_seqno_passed(hc->seqno, intel_engine_last_submit(engine)))
+ return ENGINE_IDLE;
+
+ return engine_stuck(engine, hc->acthd);
+}
+
+static void hangcheck_accumulate_sample(struct intel_engine_cs *engine,
+ struct intel_engine_hangcheck *hc)
+{
+ unsigned long timeout = I915_ENGINE_DEAD_TIMEOUT;
+
+ hc->action = hangcheck_get_action(engine, hc);
+
+ /* We always increment the progress
+ * if the engine is busy and still processing
+ * the same request, so that no single request
+ * can run indefinitely (such as a chain of
+ * batches). The only time we do not increment
+ * the hangcheck score on this ring, if this
+ * engine is in a legitimate wait for another
+ * engine. In that case the waiting engine is a
+ * victim and we want to be sure we catch the
+ * right culprit. Then every time we do kick
+ * the ring, make it as a progress as the seqno
+ * advancement might ensure and if not, it
+ * will catch the hanging engine.
+ */
+
+ switch (hc->action) {
+ case ENGINE_IDLE:
+ case ENGINE_ACTIVE_SEQNO:
+ /* Clear head and subunit states on seqno movement */
+ hc->acthd = 0;
+
+ memset(&engine->hangcheck.instdone, 0,
+ sizeof(engine->hangcheck.instdone));
+
+ /* Intentional fall through */
+ case ENGINE_WAIT_KICK:
+ case ENGINE_WAIT:
+ engine->hangcheck.action_timestamp = jiffies;
+ break;
+
+ case ENGINE_ACTIVE_HEAD:
+ case ENGINE_ACTIVE_SUBUNITS:
+ /* Seqno stuck with still active engine gets leeway,
+ * in hopes that it is just a long shader.
+ */
+ timeout = I915_SEQNO_DEAD_TIMEOUT;
+ break;
+
+ case ENGINE_DEAD:
+ break;
+
+ default:
+ MISSING_CASE(hc->action);
+ }
+
+ hc->stalled = time_after(jiffies,
+ engine->hangcheck.action_timestamp + timeout);
+}
+
+static void hangcheck_declare_hang(struct drm_i915_private *i915,
+ unsigned int hung,
+ unsigned int stuck)
+{
+ struct intel_engine_cs *engine;
+ char msg[80];
+ unsigned int tmp;
+ int len;
+
+ /* If some rings hung but others were still busy, only
+ * blame the hanging rings in the synopsis.
+ */
+ if (stuck != hung)
+ hung &= ~stuck;
+ len = scnprintf(msg, sizeof(msg),
+ "%s on ", stuck == hung ? "No progress" : "Hang");
+ for_each_engine_masked(engine, i915, hung, tmp)
+ len += scnprintf(msg + len, sizeof(msg) - len,
+ "%s, ", engine->name);
+ msg[len-2] = '\0';
+
+ return i915_handle_error(i915, hung, msg);
}
/*
@@ -308,10 +427,6 @@ static void i915_hangcheck_elapsed(struct work_struct *work)
enum intel_engine_id id;
unsigned int hung = 0, stuck = 0;
int busy_count = 0;
-#define BUSY 1
-#define KICK 5
-#define HUNG 20
-#define ACTIVE_DECAY 15
if (!i915.enable_hangcheck)
return;
@@ -319,6 +434,9 @@ static void i915_hangcheck_elapsed(struct work_struct *work)
if (!READ_ONCE(dev_priv->gt.awake))
return;
+ if (i915_terminally_wedged(&dev_priv->gpu_error))
+ return;
+
/* As enabling the GPU requires fairly extensive mmio access,
* periodically arm the mmio checker to see if we are triggering
* any invalid access.
@@ -326,112 +444,26 @@ static void i915_hangcheck_elapsed(struct work_struct *work)
intel_uncore_arm_unclaimed_mmio_detection(dev_priv);
for_each_engine(engine, dev_priv, id) {
- bool busy = intel_engine_has_waiter(engine);
- u64 acthd;
- u32 seqno;
- u32 submit;
+ struct intel_engine_hangcheck cur_state, *hc = &cur_state;
+ const bool busy = intel_engine_has_waiter(engine);
semaphore_clear_deadlocks(dev_priv);
- /* We don't strictly need an irq-barrier here, as we are not
- * serving an interrupt request, be paranoid in case the
- * barrier has side-effects (such as preventing a broken
- * cacheline snoop) and so be sure that we can see the seqno
- * advance. If the seqno should stick, due to a stale
- * cacheline, we would erroneously declare the GPU hung.
- */
- if (engine->irq_seqno_barrier)
- engine->irq_seqno_barrier(engine);
-
- acthd = intel_engine_get_active_head(engine);
- seqno = intel_engine_get_seqno(engine);
- submit = intel_engine_last_submit(engine);
-
- if (engine->hangcheck.seqno == seqno) {
- if (i915_seqno_passed(seqno, submit)) {
- engine->hangcheck.action = HANGCHECK_IDLE;
- } else {
- /* We always increment the hangcheck score
- * if the engine is busy and still processing
- * the same request, so that no single request
- * can run indefinitely (such as a chain of
- * batches). The only time we do not increment
- * the hangcheck score on this ring, if this
- * engine is in a legitimate wait for another
- * engine. In that case the waiting engine is a
- * victim and we want to be sure we catch the
- * right culprit. Then every time we do kick
- * the ring, add a small increment to the
- * score so that we can catch a batch that is
- * being repeatedly kicked and so responsible
- * for stalling the machine.
- */
- engine->hangcheck.action =
- engine_stuck(engine, acthd);
-
- switch (engine->hangcheck.action) {
- case HANGCHECK_IDLE:
- case HANGCHECK_WAIT:
- break;
- case HANGCHECK_ACTIVE:
- engine->hangcheck.score += BUSY;
- break;
- case HANGCHECK_KICK:
- engine->hangcheck.score += KICK;
- break;
- case HANGCHECK_HUNG:
- engine->hangcheck.score += HUNG;
- break;
- }
- }
-
- if (engine->hangcheck.score >= HANGCHECK_SCORE_RING_HUNG) {
- hung |= intel_engine_flag(engine);
- if (engine->hangcheck.action != HANGCHECK_HUNG)
- stuck |= intel_engine_flag(engine);
- }
- } else {
- engine->hangcheck.action = HANGCHECK_ACTIVE;
-
- /* Gradually reduce the count so that we catch DoS
- * attempts across multiple batches.
- */
- if (engine->hangcheck.score > 0)
- engine->hangcheck.score -= ACTIVE_DECAY;
- if (engine->hangcheck.score < 0)
- engine->hangcheck.score = 0;
-
- /* Clear head and subunit states on seqno movement */
- acthd = 0;
-
- memset(&engine->hangcheck.instdone, 0,
- sizeof(engine->hangcheck.instdone));
+ hangcheck_load_sample(engine, hc);
+ hangcheck_accumulate_sample(engine, hc);
+ hangcheck_store_sample(engine, hc);
+
+ if (engine->hangcheck.stalled) {
+ hung |= intel_engine_flag(engine);
+ if (hc->action != ENGINE_DEAD)
+ stuck |= intel_engine_flag(engine);
}
- engine->hangcheck.seqno = seqno;
- engine->hangcheck.acthd = acthd;
busy_count += busy;
}
- if (hung) {
- char msg[80];
- unsigned int tmp;
- int len;
-
- /* If some rings hung but others were still busy, only
- * blame the hanging rings in the synopsis.
- */
- if (stuck != hung)
- hung &= ~stuck;
- len = scnprintf(msg, sizeof(msg),
- "%s on ", stuck == hung ? "No progress" : "Hang");
- for_each_engine_masked(engine, dev_priv, hung, tmp)
- len += scnprintf(msg + len, sizeof(msg) - len,
- "%s, ", engine->name);
- msg[len-2] = '\0';
-
- return i915_handle_error(dev_priv, hung, msg);
- }
+ if (hung)
+ hangcheck_declare_hang(dev_priv, hung, stuck);
/* Reset timer in case GPU hangs without another request being added */
if (busy_count)
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index 02d50e334ac6..ebae2bd83918 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -134,6 +134,7 @@ hsw_dip_data_reg(struct drm_i915_private *dev_priv,
}
static void g4x_write_infoframe(struct drm_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
enum hdmi_infoframe_type type,
const void *frame, ssize_t len)
{
@@ -188,13 +189,14 @@ static bool g4x_infoframe_enabled(struct drm_encoder *encoder,
}
static void ibx_write_infoframe(struct drm_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
enum hdmi_infoframe_type type,
const void *frame, ssize_t len)
{
const uint32_t *data = frame;
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
i915_reg_t reg = TVIDEO_DIP_CTL(intel_crtc->pipe);
u32 val = I915_READ(reg);
int i;
@@ -247,13 +249,14 @@ static bool ibx_infoframe_enabled(struct drm_encoder *encoder,
}
static void cpt_write_infoframe(struct drm_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
enum hdmi_infoframe_type type,
const void *frame, ssize_t len)
{
const uint32_t *data = frame;
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
i915_reg_t reg = TVIDEO_DIP_CTL(intel_crtc->pipe);
u32 val = I915_READ(reg);
int i;
@@ -304,13 +307,14 @@ static bool cpt_infoframe_enabled(struct drm_encoder *encoder,
}
static void vlv_write_infoframe(struct drm_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
enum hdmi_infoframe_type type,
const void *frame, ssize_t len)
{
const uint32_t *data = frame;
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
i915_reg_t reg = VLV_TVIDEO_DIP_CTL(intel_crtc->pipe);
u32 val = I915_READ(reg);
int i;
@@ -362,14 +366,14 @@ static bool vlv_infoframe_enabled(struct drm_encoder *encoder,
}
static void hsw_write_infoframe(struct drm_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
enum hdmi_infoframe_type type,
const void *frame, ssize_t len)
{
const uint32_t *data = frame;
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
- enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
+ enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
i915_reg_t ctl_reg = HSW_TVIDEO_DIP_CTL(cpu_transcoder);
i915_reg_t data_reg;
int i;
@@ -426,6 +430,7 @@ static bool hsw_infoframe_enabled(struct drm_encoder *encoder,
* bytes by one.
*/
static void intel_write_infoframe(struct drm_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
union hdmi_infoframe *frame)
{
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
@@ -444,14 +449,15 @@ static void intel_write_infoframe(struct drm_encoder *encoder,
buffer[3] = 0;
len++;
- intel_hdmi->write_infoframe(encoder, frame->any.type, buffer, len);
+ intel_hdmi->write_infoframe(encoder, crtc_state, frame->any.type, buffer, len);
}
static void intel_hdmi_set_avi_infoframe(struct drm_encoder *encoder,
- const struct drm_display_mode *adjusted_mode)
+ const struct intel_crtc_state *crtc_state)
{
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
- struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
+ const struct drm_display_mode *adjusted_mode =
+ &crtc_state->base.adjusted_mode;
union hdmi_infoframe frame;
int ret;
@@ -462,19 +468,17 @@ static void intel_hdmi_set_avi_infoframe(struct drm_encoder *encoder,
return;
}
- if (intel_hdmi->rgb_quant_range_selectable) {
- if (intel_crtc->config->limited_color_range)
- frame.avi.quantization_range =
- HDMI_QUANTIZATION_RANGE_LIMITED;
- else
- frame.avi.quantization_range =
- HDMI_QUANTIZATION_RANGE_FULL;
- }
+ drm_hdmi_avi_infoframe_quant_range(&frame.avi, adjusted_mode,
+ crtc_state->limited_color_range ?
+ HDMI_QUANTIZATION_RANGE_LIMITED :
+ HDMI_QUANTIZATION_RANGE_FULL,
+ intel_hdmi->rgb_quant_range_selectable);
- intel_write_infoframe(encoder, &frame);
+ intel_write_infoframe(encoder, crtc_state, &frame);
}
-static void intel_hdmi_set_spd_infoframe(struct drm_encoder *encoder)
+static void intel_hdmi_set_spd_infoframe(struct drm_encoder *encoder,
+ const struct intel_crtc_state *crtc_state)
{
union hdmi_infoframe frame;
int ret;
@@ -487,27 +491,28 @@ static void intel_hdmi_set_spd_infoframe(struct drm_encoder *encoder)
frame.spd.sdi = HDMI_SPD_SDI_PC;
- intel_write_infoframe(encoder, &frame);
+ intel_write_infoframe(encoder, crtc_state, &frame);
}
static void
intel_hdmi_set_hdmi_infoframe(struct drm_encoder *encoder,
- const struct drm_display_mode *adjusted_mode)
+ const struct intel_crtc_state *crtc_state)
{
union hdmi_infoframe frame;
int ret;
ret = drm_hdmi_vendor_infoframe_from_display_mode(&frame.vendor.hdmi,
- adjusted_mode);
+ &crtc_state->base.adjusted_mode);
if (ret < 0)
return;
- intel_write_infoframe(encoder, &frame);
+ intel_write_infoframe(encoder, crtc_state, &frame);
}
static void g4x_set_infoframes(struct drm_encoder *encoder,
bool enable,
- const struct drm_display_mode *adjusted_mode)
+ const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state)
{
struct drm_i915_private *dev_priv = to_i915(encoder->dev);
struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
@@ -561,28 +566,22 @@ static void g4x_set_infoframes(struct drm_encoder *encoder,
I915_WRITE(reg, val);
POSTING_READ(reg);
- intel_hdmi_set_avi_infoframe(encoder, adjusted_mode);
- intel_hdmi_set_spd_infoframe(encoder);
- intel_hdmi_set_hdmi_infoframe(encoder, adjusted_mode);
+ intel_hdmi_set_avi_infoframe(encoder, crtc_state);
+ intel_hdmi_set_spd_infoframe(encoder, crtc_state);
+ intel_hdmi_set_hdmi_infoframe(encoder, crtc_state);
}
-static bool hdmi_sink_is_deep_color(struct drm_encoder *encoder)
+static bool hdmi_sink_is_deep_color(const struct drm_connector_state *conn_state)
{
- struct drm_device *dev = encoder->dev;
- struct drm_connector *connector;
-
- WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
+ struct drm_connector *connector = conn_state->connector;
/*
* HDMI cloning is only supported on g4x which doesn't
* support deep color or GCP infoframes anyway so no
* need to worry about multiple HDMI sinks here.
*/
- list_for_each_entry(connector, &dev->mode_config.connector_list, head)
- if (connector->encoder == encoder)
- return connector->display_info.bpc > 8;
- return false;
+ return connector->display_info.bpc > 8;
}
/*
@@ -628,15 +627,17 @@ static bool gcp_default_phase_possible(int pipe_bpp,
mode->crtc_htotal/2 % pixels_per_group == 0);
}
-static bool intel_hdmi_set_gcp_infoframe(struct drm_encoder *encoder)
+static bool intel_hdmi_set_gcp_infoframe(struct drm_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state)
{
struct drm_i915_private *dev_priv = to_i915(encoder->dev);
- struct intel_crtc *crtc = to_intel_crtc(encoder->crtc);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
i915_reg_t reg;
u32 val = 0;
if (HAS_DDI(dev_priv))
- reg = HSW_TVIDEO_DIP_GCP(crtc->config->cpu_transcoder);
+ reg = HSW_TVIDEO_DIP_GCP(crtc_state->cpu_transcoder);
else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
reg = VLV_TVIDEO_DIP_GCP(crtc->pipe);
else if (HAS_PCH_SPLIT(dev_priv))
@@ -645,12 +646,12 @@ static bool intel_hdmi_set_gcp_infoframe(struct drm_encoder *encoder)
return false;
/* Indicate color depth whenever the sink supports deep color */
- if (hdmi_sink_is_deep_color(encoder))
+ if (hdmi_sink_is_deep_color(conn_state))
val |= GCP_COLOR_INDICATION;
/* Enable default_phase whenever the display mode is suitably aligned */
- if (gcp_default_phase_possible(crtc->config->pipe_bpp,
- &crtc->config->base.adjusted_mode))
+ if (gcp_default_phase_possible(crtc_state->pipe_bpp,
+ &crtc_state->base.adjusted_mode))
val |= GCP_DEFAULT_PHASE_ENABLE;
I915_WRITE(reg, val);
@@ -660,10 +661,11 @@ static bool intel_hdmi_set_gcp_infoframe(struct drm_encoder *encoder)
static void ibx_set_infoframes(struct drm_encoder *encoder,
bool enable,
- const struct drm_display_mode *adjusted_mode)
+ const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state)
{
struct drm_i915_private *dev_priv = to_i915(encoder->dev);
- struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
struct intel_hdmi *intel_hdmi = &intel_dig_port->hdmi;
i915_reg_t reg = TVIDEO_DIP_CTL(intel_crtc->pipe);
@@ -699,23 +701,24 @@ static void ibx_set_infoframes(struct drm_encoder *encoder,
VIDEO_DIP_ENABLE_VENDOR | VIDEO_DIP_ENABLE_GAMUT |
VIDEO_DIP_ENABLE_SPD | VIDEO_DIP_ENABLE_GCP);
- if (intel_hdmi_set_gcp_infoframe(encoder))
+ if (intel_hdmi_set_gcp_infoframe(encoder, crtc_state, conn_state))
val |= VIDEO_DIP_ENABLE_GCP;
I915_WRITE(reg, val);
POSTING_READ(reg);
- intel_hdmi_set_avi_infoframe(encoder, adjusted_mode);
- intel_hdmi_set_spd_infoframe(encoder);
- intel_hdmi_set_hdmi_infoframe(encoder, adjusted_mode);
+ intel_hdmi_set_avi_infoframe(encoder, crtc_state);
+ intel_hdmi_set_spd_infoframe(encoder, crtc_state);
+ intel_hdmi_set_hdmi_infoframe(encoder, crtc_state);
}
static void cpt_set_infoframes(struct drm_encoder *encoder,
bool enable,
- const struct drm_display_mode *adjusted_mode)
+ const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state)
{
struct drm_i915_private *dev_priv = to_i915(encoder->dev);
- struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
i915_reg_t reg = TVIDEO_DIP_CTL(intel_crtc->pipe);
u32 val = I915_READ(reg);
@@ -741,24 +744,25 @@ static void cpt_set_infoframes(struct drm_encoder *encoder,
val &= ~(VIDEO_DIP_ENABLE_VENDOR | VIDEO_DIP_ENABLE_GAMUT |
VIDEO_DIP_ENABLE_SPD | VIDEO_DIP_ENABLE_GCP);
- if (intel_hdmi_set_gcp_infoframe(encoder))
+ if (intel_hdmi_set_gcp_infoframe(encoder, crtc_state, conn_state))
val |= VIDEO_DIP_ENABLE_GCP;
I915_WRITE(reg, val);
POSTING_READ(reg);
- intel_hdmi_set_avi_infoframe(encoder, adjusted_mode);
- intel_hdmi_set_spd_infoframe(encoder);
- intel_hdmi_set_hdmi_infoframe(encoder, adjusted_mode);
+ intel_hdmi_set_avi_infoframe(encoder, crtc_state);
+ intel_hdmi_set_spd_infoframe(encoder, crtc_state);
+ intel_hdmi_set_hdmi_infoframe(encoder, crtc_state);
}
static void vlv_set_infoframes(struct drm_encoder *encoder,
bool enable,
- const struct drm_display_mode *adjusted_mode)
+ const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state)
{
struct drm_i915_private *dev_priv = to_i915(encoder->dev);
struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
- struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
i915_reg_t reg = VLV_TVIDEO_DIP_CTL(intel_crtc->pipe);
u32 val = I915_READ(reg);
@@ -793,25 +797,25 @@ static void vlv_set_infoframes(struct drm_encoder *encoder,
VIDEO_DIP_ENABLE_VENDOR | VIDEO_DIP_ENABLE_GAMUT |
VIDEO_DIP_ENABLE_SPD | VIDEO_DIP_ENABLE_GCP);
- if (intel_hdmi_set_gcp_infoframe(encoder))
+ if (intel_hdmi_set_gcp_infoframe(encoder, crtc_state, conn_state))
val |= VIDEO_DIP_ENABLE_GCP;
I915_WRITE(reg, val);
POSTING_READ(reg);
- intel_hdmi_set_avi_infoframe(encoder, adjusted_mode);
- intel_hdmi_set_spd_infoframe(encoder);
- intel_hdmi_set_hdmi_infoframe(encoder, adjusted_mode);
+ intel_hdmi_set_avi_infoframe(encoder, crtc_state);
+ intel_hdmi_set_spd_infoframe(encoder, crtc_state);
+ intel_hdmi_set_hdmi_infoframe(encoder, crtc_state);
}
static void hsw_set_infoframes(struct drm_encoder *encoder,
bool enable,
- const struct drm_display_mode *adjusted_mode)
+ const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state)
{
struct drm_i915_private *dev_priv = to_i915(encoder->dev);
- struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
- i915_reg_t reg = HSW_TVIDEO_DIP_CTL(intel_crtc->config->cpu_transcoder);
+ i915_reg_t reg = HSW_TVIDEO_DIP_CTL(crtc_state->cpu_transcoder);
u32 val = I915_READ(reg);
assert_hdmi_port_disabled(intel_hdmi);
@@ -826,15 +830,15 @@ static void hsw_set_infoframes(struct drm_encoder *encoder,
return;
}
- if (intel_hdmi_set_gcp_infoframe(encoder))
+ if (intel_hdmi_set_gcp_infoframe(encoder, crtc_state, conn_state))
val |= VIDEO_DIP_ENABLE_GCP_HSW;
I915_WRITE(reg, val);
POSTING_READ(reg);
- intel_hdmi_set_avi_infoframe(encoder, adjusted_mode);
- intel_hdmi_set_spd_infoframe(encoder);
- intel_hdmi_set_hdmi_infoframe(encoder, adjusted_mode);
+ intel_hdmi_set_avi_infoframe(encoder, crtc_state);
+ intel_hdmi_set_spd_infoframe(encoder, crtc_state);
+ intel_hdmi_set_hdmi_infoframe(encoder, crtc_state);
}
void intel_dp_dual_mode_set_tmds_output(struct intel_hdmi *hdmi, bool enable)
@@ -853,31 +857,32 @@ void intel_dp_dual_mode_set_tmds_output(struct intel_hdmi *hdmi, bool enable)
adapter, enable);
}
-static void intel_hdmi_prepare(struct intel_encoder *encoder)
+static void intel_hdmi_prepare(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state)
{
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
- const struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode;
+ const struct drm_display_mode *adjusted_mode = &crtc_state->base.adjusted_mode;
u32 hdmi_val;
intel_dp_dual_mode_set_tmds_output(intel_hdmi, true);
hdmi_val = SDVO_ENCODING_HDMI;
- if (!HAS_PCH_SPLIT(dev_priv) && crtc->config->limited_color_range)
+ if (!HAS_PCH_SPLIT(dev_priv) && crtc_state->limited_color_range)
hdmi_val |= HDMI_COLOR_RANGE_16_235;
if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
hdmi_val |= SDVO_VSYNC_ACTIVE_HIGH;
if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
hdmi_val |= SDVO_HSYNC_ACTIVE_HIGH;
- if (crtc->config->pipe_bpp > 24)
+ if (crtc_state->pipe_bpp > 24)
hdmi_val |= HDMI_COLOR_FORMAT_12bpc;
else
hdmi_val |= SDVO_COLOR_FORMAT_8bpc;
- if (crtc->config->has_hdmi_sink)
+ if (crtc_state->has_hdmi_sink)
hdmi_val |= HDMI_MODE_SELECT_HDMI;
if (HAS_PCH_CPT(dev_priv))
@@ -980,9 +985,9 @@ static void intel_enable_hdmi_audio(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config,
struct drm_connector_state *conn_state)
{
- struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(pipe_config->base.crtc);
- WARN_ON(!crtc->config->has_hdmi_sink);
+ WARN_ON(!pipe_config->has_hdmi_sink);
DRM_DEBUG_DRIVER("Enabling HDMI audio on pipe %c\n",
pipe_name(crtc->pipe));
intel_audio_codec_enable(encoder, pipe_config, conn_state);
@@ -1016,14 +1021,13 @@ static void ibx_enable_hdmi(struct intel_encoder *encoder,
{
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
u32 temp;
temp = I915_READ(intel_hdmi->hdmi_reg);
temp |= SDVO_ENABLE;
- if (crtc->config->has_audio)
+ if (pipe_config->has_audio)
temp |= SDVO_AUDIO_ENABLE;
/*
@@ -1067,7 +1071,7 @@ static void cpt_enable_hdmi(struct intel_encoder *encoder,
{
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(pipe_config->base.crtc);
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
enum pipe pipe = crtc->pipe;
u32 temp;
@@ -1129,7 +1133,7 @@ static void intel_disable_hdmi(struct intel_encoder *encoder,
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
- struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
u32 temp;
temp = I915_READ(intel_hdmi->hdmi_reg);
@@ -1171,7 +1175,7 @@ static void intel_disable_hdmi(struct intel_encoder *encoder,
intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, true);
}
- intel_hdmi->set_infoframes(&encoder->base, false, NULL);
+ intel_hdmi->set_infoframes(&encoder->base, false, old_crtc_state, old_conn_state);
intel_dp_dual_mode_set_tmds_output(intel_hdmi, false);
}
@@ -1247,7 +1251,7 @@ hdmi_port_clock_valid(struct intel_hdmi *hdmi,
return MODE_CLOCK_HIGH;
/* BXT DPLL can't generate 223-240 MHz */
- if (IS_BROXTON(dev_priv) && clock > 223333 && clock < 240000)
+ if (IS_GEN9_LP(dev_priv) && clock > 223333 && clock < 240000)
return MODE_CLOCK_RANGE;
/* CHV DPLL can't generate 216-240 MHz */
@@ -1326,7 +1330,8 @@ bool intel_hdmi_compute_config(struct intel_encoder *encoder,
/* See CEA-861-E - 5.1 Default Encoding Parameters */
pipe_config->limited_color_range =
pipe_config->has_hdmi_sink &&
- drm_match_cea_mode(adjusted_mode) > 1;
+ drm_default_rgb_quant_range(adjusted_mode) ==
+ HDMI_QUANTIZATION_RANGE_LIMITED;
} else {
pipe_config->limited_color_range =
intel_hdmi->limited_color_range;
@@ -1643,13 +1648,12 @@ static void intel_hdmi_pre_enable(struct intel_encoder *encoder,
struct drm_connector_state *conn_state)
{
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
- const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
- intel_hdmi_prepare(encoder);
+ intel_hdmi_prepare(encoder, pipe_config);
intel_hdmi->set_infoframes(&encoder->base,
pipe_config->has_hdmi_sink,
- adjusted_mode);
+ pipe_config, conn_state);
}
static void vlv_hdmi_pre_enable(struct intel_encoder *encoder,
@@ -1660,7 +1664,6 @@ static void vlv_hdmi_pre_enable(struct intel_encoder *encoder,
struct intel_hdmi *intel_hdmi = &dport->hdmi;
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
- const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
vlv_phy_pre_encoder_enable(encoder);
@@ -1670,7 +1673,7 @@ static void vlv_hdmi_pre_enable(struct intel_encoder *encoder,
intel_hdmi->set_infoframes(&encoder->base,
pipe_config->has_hdmi_sink,
- adjusted_mode);
+ pipe_config, conn_state);
g4x_enable_hdmi(encoder, pipe_config, conn_state);
@@ -1681,7 +1684,7 @@ static void vlv_hdmi_pre_pll_enable(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config,
struct drm_connector_state *conn_state)
{
- intel_hdmi_prepare(encoder);
+ intel_hdmi_prepare(encoder, pipe_config);
vlv_phy_pre_pll_enable(encoder);
}
@@ -1690,7 +1693,7 @@ static void chv_hdmi_pre_pll_enable(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config,
struct drm_connector_state *conn_state)
{
- intel_hdmi_prepare(encoder);
+ intel_hdmi_prepare(encoder, pipe_config);
chv_phy_pre_pll_enable(encoder);
}
@@ -1733,9 +1736,6 @@ static void chv_hdmi_pre_enable(struct intel_encoder *encoder,
struct intel_hdmi *intel_hdmi = &dport->hdmi;
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_crtc *intel_crtc =
- to_intel_crtc(encoder->base.crtc);
- const struct drm_display_mode *adjusted_mode = &intel_crtc->config->base.adjusted_mode;
chv_phy_pre_encoder_enable(encoder);
@@ -1744,8 +1744,8 @@ static void chv_hdmi_pre_enable(struct intel_encoder *encoder,
chv_set_phy_signal_level(encoder, 128, 102, false);
intel_hdmi->set_infoframes(&encoder->base,
- intel_crtc->config->has_hdmi_sink,
- adjusted_mode);
+ pipe_config->has_hdmi_sink,
+ pipe_config, conn_state);
g4x_enable_hdmi(encoder, pipe_config, conn_state);
@@ -1810,13 +1810,13 @@ static u8 intel_hdmi_ddc_pin(struct drm_i915_private *dev_priv,
switch (port) {
case PORT_B:
- if (IS_BROXTON(dev_priv))
+ if (IS_GEN9_LP(dev_priv))
ddc_pin = GMBUS_PIN_1_BXT;
else
ddc_pin = GMBUS_PIN_DPB;
break;
case PORT_C:
- if (IS_BROXTON(dev_priv))
+ if (IS_GEN9_LP(dev_priv))
ddc_pin = GMBUS_PIN_2_BXT;
else
ddc_pin = GMBUS_PIN_DPC;
@@ -1934,10 +1934,9 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
}
}
-void intel_hdmi_init(struct drm_device *dev,
+void intel_hdmi_init(struct drm_i915_private *dev_priv,
i915_reg_t hdmi_reg, enum port port)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_digital_port *intel_dig_port;
struct intel_encoder *intel_encoder;
struct intel_connector *intel_connector;
@@ -1954,8 +1953,9 @@ void intel_hdmi_init(struct drm_device *dev,
intel_encoder = &intel_dig_port->base;
- drm_encoder_init(dev, &intel_encoder->base, &intel_hdmi_enc_funcs,
- DRM_MODE_ENCODER_TMDS, "HDMI %c", port_name(port));
+ drm_encoder_init(&dev_priv->drm, &intel_encoder->base,
+ &intel_hdmi_enc_funcs, DRM_MODE_ENCODER_TMDS,
+ "HDMI %c", port_name(port));
intel_encoder->compute_config = intel_hdmi_compute_config;
if (HAS_PCH_SPLIT(dev_priv)) {
diff --git a/drivers/gpu/drm/i915/intel_hotplug.c b/drivers/gpu/drm/i915/intel_hotplug.c
index 3d546c019de0..b62e3f8ad415 100644
--- a/drivers/gpu/drm/i915/intel_hotplug.c
+++ b/drivers/gpu/drm/i915/intel_hotplug.c
@@ -180,7 +180,7 @@ static void intel_hpd_irq_storm_disable(struct drm_i915_private *dev_priv)
/* Enable polling and queue hotplug re-enabling. */
if (hpd_disabled) {
- drm_kms_helper_poll_enable_locked(dev);
+ drm_kms_helper_poll_enable(dev);
mod_delayed_work(system_wq, &dev_priv->hotplug.reenable_work,
msecs_to_jiffies(HPD_STORM_REENABLE_DELAY));
}
@@ -511,7 +511,7 @@ static void i915_hpd_poll_init_work(struct work_struct *work)
}
if (enabled)
- drm_kms_helper_poll_enable_locked(dev);
+ drm_kms_helper_poll_enable(dev);
mutex_unlock(&dev->mode_config.mutex);
diff --git a/drivers/gpu/drm/i915/intel_huc.c b/drivers/gpu/drm/i915/intel_huc.c
new file mode 100644
index 000000000000..c144609425f6
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_huc.c
@@ -0,0 +1,338 @@
+/*
+ * Copyright © 2016-2017 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+#include <linux/firmware.h>
+#include "i915_drv.h"
+#include "intel_uc.h"
+
+/**
+ * DOC: HuC Firmware
+ *
+ * Motivation:
+ * GEN9 introduces a new dedicated firmware for usage in media HEVC (High
+ * Efficiency Video Coding) operations. Userspace can use the firmware
+ * capabilities by adding HuC specific commands to batch buffers.
+ *
+ * Implementation:
+ * The same firmware loader is used as the GuC. However, the actual
+ * loading to HW is deferred until GEM initialization is done.
+ *
+ * Note that HuC firmware loading must be done before GuC loading.
+ */
+
+#define BXT_HUC_FW_MAJOR 01
+#define BXT_HUC_FW_MINOR 07
+#define BXT_BLD_NUM 1398
+
+#define SKL_HUC_FW_MAJOR 01
+#define SKL_HUC_FW_MINOR 07
+#define SKL_BLD_NUM 1398
+
+#define KBL_HUC_FW_MAJOR 02
+#define KBL_HUC_FW_MINOR 00
+#define KBL_BLD_NUM 1810
+
+#define HUC_FW_PATH(platform, major, minor, bld_num) \
+ "i915/" __stringify(platform) "_huc_ver" __stringify(major) "_" \
+ __stringify(minor) "_" __stringify(bld_num) ".bin"
+
+#define I915_SKL_HUC_UCODE HUC_FW_PATH(skl, SKL_HUC_FW_MAJOR, \
+ SKL_HUC_FW_MINOR, SKL_BLD_NUM)
+MODULE_FIRMWARE(I915_SKL_HUC_UCODE);
+
+#define I915_BXT_HUC_UCODE HUC_FW_PATH(bxt, BXT_HUC_FW_MAJOR, \
+ BXT_HUC_FW_MINOR, BXT_BLD_NUM)
+MODULE_FIRMWARE(I915_BXT_HUC_UCODE);
+
+#define I915_KBL_HUC_UCODE HUC_FW_PATH(kbl, KBL_HUC_FW_MAJOR, \
+ KBL_HUC_FW_MINOR, KBL_BLD_NUM)
+MODULE_FIRMWARE(I915_KBL_HUC_UCODE);
+
+/**
+ * huc_ucode_xfer() - DMA's the firmware
+ * @dev_priv: the drm_i915_private device
+ *
+ * Transfer the firmware image to RAM for execution by the microcontroller.
+ *
+ * Return: 0 on success, non-zero on failure
+ */
+static int huc_ucode_xfer(struct drm_i915_private *dev_priv)
+{
+ struct intel_uc_fw *huc_fw = &dev_priv->huc.fw;
+ struct i915_vma *vma;
+ unsigned long offset = 0;
+ u32 size;
+ int ret;
+
+ ret = i915_gem_object_set_to_gtt_domain(huc_fw->obj, false);
+ if (ret) {
+ DRM_DEBUG_DRIVER("set-domain failed %d\n", ret);
+ return ret;
+ }
+
+ vma = i915_gem_object_ggtt_pin(huc_fw->obj, NULL, 0, 0,
+ PIN_OFFSET_BIAS | GUC_WOPCM_TOP);
+ if (IS_ERR(vma)) {
+ DRM_DEBUG_DRIVER("pin failed %d\n", (int)PTR_ERR(vma));
+ return PTR_ERR(vma);
+ }
+
+ intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
+
+ /* init WOPCM */
+ I915_WRITE(GUC_WOPCM_SIZE, intel_guc_wopcm_size(dev_priv));
+ I915_WRITE(DMA_GUC_WOPCM_OFFSET, GUC_WOPCM_OFFSET_VALUE |
+ HUC_LOADING_AGENT_GUC);
+
+ /* Set the source address for the uCode */
+ offset = guc_ggtt_offset(vma) + huc_fw->header_offset;
+ I915_WRITE(DMA_ADDR_0_LOW, lower_32_bits(offset));
+ I915_WRITE(DMA_ADDR_0_HIGH, upper_32_bits(offset) & 0xFFFF);
+
+ /* Hardware doesn't look at destination address for HuC. Set it to 0,
+ * but still program the correct address space.
+ */
+ I915_WRITE(DMA_ADDR_1_LOW, 0);
+ I915_WRITE(DMA_ADDR_1_HIGH, DMA_ADDRESS_SPACE_WOPCM);
+
+ size = huc_fw->header_size + huc_fw->ucode_size;
+ I915_WRITE(DMA_COPY_SIZE, size);
+
+ /* Start the DMA */
+ I915_WRITE(DMA_CTRL, _MASKED_BIT_ENABLE(HUC_UKERNEL | START_DMA));
+
+ /* Wait for DMA to finish */
+ ret = wait_for((I915_READ(DMA_CTRL) & START_DMA) == 0, 100);
+
+ DRM_DEBUG_DRIVER("HuC DMA transfer wait over with ret %d\n", ret);
+
+ /* Disable the bits once DMA is over */
+ I915_WRITE(DMA_CTRL, _MASKED_BIT_DISABLE(HUC_UKERNEL));
+
+ intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
+
+ /*
+ * We keep the object pages for reuse during resume. But we can unpin it
+ * now that DMA has completed, so it doesn't continue to take up space.
+ */
+ i915_vma_unpin(vma);
+
+ return ret;
+}
+
+/**
+ * intel_huc_init() - initiate HuC firmware loading request
+ * @dev_priv: the drm_i915_private device
+ *
+ * Called early during driver load, but after GEM is initialised. The loading
+ * will continue only when driver explicitly specify firmware name and version.
+ * All other cases are considered as INTEL_UC_FIRMWARE_NONE either because HW
+ * is not capable or driver yet support it. And there will be no error message
+ * for INTEL_UC_FIRMWARE_NONE cases.
+ *
+ * The DMA-copying to HW is done later when intel_huc_load() is called.
+ */
+void intel_huc_init(struct drm_i915_private *dev_priv)
+{
+ struct intel_huc *huc = &dev_priv->huc;
+ struct intel_uc_fw *huc_fw = &huc->fw;
+ const char *fw_path = NULL;
+
+ huc_fw->path = NULL;
+ huc_fw->fetch_status = INTEL_UC_FIRMWARE_NONE;
+ huc_fw->load_status = INTEL_UC_FIRMWARE_NONE;
+ huc_fw->fw = INTEL_UC_FW_TYPE_HUC;
+
+ if (!HAS_HUC_UCODE(dev_priv))
+ return;
+
+ if (IS_SKYLAKE(dev_priv)) {
+ fw_path = I915_SKL_HUC_UCODE;
+ huc_fw->major_ver_wanted = SKL_HUC_FW_MAJOR;
+ huc_fw->minor_ver_wanted = SKL_HUC_FW_MINOR;
+ } else if (IS_BROXTON(dev_priv)) {
+ fw_path = I915_BXT_HUC_UCODE;
+ huc_fw->major_ver_wanted = BXT_HUC_FW_MAJOR;
+ huc_fw->minor_ver_wanted = BXT_HUC_FW_MINOR;
+ } else if (IS_KABYLAKE(dev_priv)) {
+ fw_path = I915_KBL_HUC_UCODE;
+ huc_fw->major_ver_wanted = KBL_HUC_FW_MAJOR;
+ huc_fw->minor_ver_wanted = KBL_HUC_FW_MINOR;
+ }
+
+ huc_fw->path = fw_path;
+ huc_fw->fetch_status = INTEL_UC_FIRMWARE_PENDING;
+
+ DRM_DEBUG_DRIVER("HuC firmware pending, path %s\n", fw_path);
+
+ WARN(huc_fw->path == NULL, "HuC present but no fw path\n");
+
+ intel_uc_fw_fetch(dev_priv, huc_fw);
+}
+
+/**
+ * intel_huc_load() - load HuC uCode to device
+ * @dev_priv: the drm_i915_private device
+ *
+ * Called from guc_setup() during driver loading and also after a GPU reset.
+ * Be note that HuC loading must be done before GuC loading.
+ *
+ * The firmware image should have already been fetched into memory by the
+ * earlier call to intel_huc_init(), so here we need only check that
+ * is succeeded, and then transfer the image to the h/w.
+ *
+ * Return: non-zero code on error
+ */
+int intel_huc_load(struct drm_i915_private *dev_priv)
+{
+ struct intel_uc_fw *huc_fw = &dev_priv->huc.fw;
+ int err;
+
+ if (huc_fw->fetch_status == INTEL_UC_FIRMWARE_NONE)
+ return 0;
+
+ DRM_DEBUG_DRIVER("%s fw status: fetch %s, load %s\n",
+ huc_fw->path,
+ intel_uc_fw_status_repr(huc_fw->fetch_status),
+ intel_uc_fw_status_repr(huc_fw->load_status));
+
+ if (huc_fw->fetch_status == INTEL_UC_FIRMWARE_SUCCESS &&
+ huc_fw->load_status == INTEL_UC_FIRMWARE_FAIL)
+ return -ENOEXEC;
+
+ huc_fw->load_status = INTEL_UC_FIRMWARE_PENDING;
+
+ switch (huc_fw->fetch_status) {
+ case INTEL_UC_FIRMWARE_FAIL:
+ /* something went wrong :( */
+ err = -EIO;
+ goto fail;
+
+ case INTEL_UC_FIRMWARE_NONE:
+ case INTEL_UC_FIRMWARE_PENDING:
+ default:
+ /* "can't happen" */
+ WARN_ONCE(1, "HuC fw %s invalid fetch_status %s [%d]\n",
+ huc_fw->path,
+ intel_uc_fw_status_repr(huc_fw->fetch_status),
+ huc_fw->fetch_status);
+ err = -ENXIO;
+ goto fail;
+
+ case INTEL_UC_FIRMWARE_SUCCESS:
+ break;
+ }
+
+ err = huc_ucode_xfer(dev_priv);
+ if (err)
+ goto fail;
+
+ huc_fw->load_status = INTEL_UC_FIRMWARE_SUCCESS;
+
+ DRM_DEBUG_DRIVER("%s fw status: fetch %s, load %s\n",
+ huc_fw->path,
+ intel_uc_fw_status_repr(huc_fw->fetch_status),
+ intel_uc_fw_status_repr(huc_fw->load_status));
+
+ return 0;
+
+fail:
+ if (huc_fw->load_status == INTEL_UC_FIRMWARE_PENDING)
+ huc_fw->load_status = INTEL_UC_FIRMWARE_FAIL;
+
+ DRM_ERROR("Failed to complete HuC uCode load with ret %d\n", err);
+
+ return err;
+}
+
+/**
+ * intel_huc_fini() - clean up resources allocated for HuC
+ * @dev_priv: the drm_i915_private device
+ *
+ * Cleans up by releasing the huc firmware GEM obj.
+ */
+void intel_huc_fini(struct drm_i915_private *dev_priv)
+{
+ struct intel_uc_fw *huc_fw = &dev_priv->huc.fw;
+
+ mutex_lock(&dev_priv->drm.struct_mutex);
+ if (huc_fw->obj)
+ i915_gem_object_put(huc_fw->obj);
+ huc_fw->obj = NULL;
+ mutex_unlock(&dev_priv->drm.struct_mutex);
+
+ huc_fw->fetch_status = INTEL_UC_FIRMWARE_NONE;
+}
+
+/**
+ * intel_guc_auth_huc() - authenticate ucode
+ * @dev_priv: the drm_i915_device
+ *
+ * Triggers a HuC fw authentication request to the GuC via intel_guc_action_
+ * authenticate_huc interface.
+ */
+void intel_guc_auth_huc(struct drm_i915_private *dev_priv)
+{
+ struct intel_guc *guc = &dev_priv->guc;
+ struct intel_huc *huc = &dev_priv->huc;
+ struct i915_vma *vma;
+ int ret;
+ u32 data[2];
+
+ if (huc->fw.load_status != INTEL_UC_FIRMWARE_SUCCESS)
+ return;
+
+ vma = i915_gem_object_ggtt_pin(huc->fw.obj, NULL, 0, 0,
+ PIN_OFFSET_BIAS | GUC_WOPCM_TOP);
+ if (IS_ERR(vma)) {
+ DRM_ERROR("failed to pin huc fw object %d\n",
+ (int)PTR_ERR(vma));
+ return;
+ }
+
+ /* Specify auth action and where public signature is. */
+ data[0] = INTEL_GUC_ACTION_AUTHENTICATE_HUC;
+ data[1] = guc_ggtt_offset(vma) + huc->fw.rsa_offset;
+
+ ret = intel_guc_send(guc, data, ARRAY_SIZE(data));
+ if (ret) {
+ DRM_ERROR("HuC: GuC did not ack Auth request %d\n", ret);
+ goto out;
+ }
+
+ /* Check authentication status, it should be done by now */
+ ret = intel_wait_for_register(dev_priv,
+ HUC_STATUS2,
+ HUC_FW_VERIFIED,
+ HUC_FW_VERIFIED,
+ 50);
+
+ if (ret) {
+ DRM_ERROR("HuC: Authentication failed %d\n", ret);
+ goto out;
+ }
+
+out:
+ i915_vma_unpin(vma);
+}
+
diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c
index 83f260bb4eef..bce1ba80f277 100644
--- a/drivers/gpu/drm/i915/intel_i2c.c
+++ b/drivers/gpu/drm/i915/intel_i2c.c
@@ -72,7 +72,7 @@ static const struct gmbus_pin gmbus_pins_bxt[] = {
static const struct gmbus_pin *get_gmbus_pin(struct drm_i915_private *dev_priv,
unsigned int pin)
{
- if (IS_BROXTON(dev_priv))
+ if (IS_GEN9_LP(dev_priv))
return &gmbus_pins_bxt[pin];
else if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
return &gmbus_pins_skl[pin];
@@ -87,7 +87,7 @@ bool intel_gmbus_is_valid_pin(struct drm_i915_private *dev_priv,
{
unsigned int size;
- if (IS_BROXTON(dev_priv))
+ if (IS_GEN9_LP(dev_priv))
size = ARRAY_SIZE(gmbus_pins_bxt);
else if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
size = ARRAY_SIZE(gmbus_pins_skl);
@@ -111,10 +111,8 @@ to_intel_gmbus(struct i2c_adapter *i2c)
}
void
-intel_i2c_reset(struct drm_device *dev)
+intel_i2c_reset(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
-
I915_WRITE(GMBUS0, 0);
I915_WRITE(GMBUS4, 0);
}
@@ -141,7 +139,7 @@ static u32 get_reserved(struct intel_gmbus *bus)
u32 reserved = 0;
/* On most chips, these bits must be preserved in software. */
- if (!IS_I830(dev_priv) && !IS_845G(dev_priv))
+ if (!IS_I830(dev_priv) && !IS_I845G(dev_priv))
reserved = I915_READ_NOTRACE(bus->gpio_reg) &
(GPIO_DATA_PULLUP_DISABLE |
GPIO_CLOCK_PULLUP_DISABLE);
@@ -211,7 +209,7 @@ intel_gpio_pre_xfer(struct i2c_adapter *adapter)
adapter);
struct drm_i915_private *dev_priv = bus->dev_priv;
- intel_i2c_reset(&dev_priv->drm);
+ intel_i2c_reset(dev_priv);
intel_i2c_quirk_set(dev_priv, true);
set_data(bus, 1);
set_clock(bus, 1);
@@ -617,11 +615,10 @@ static const struct i2c_algorithm gmbus_algorithm = {
/**
* intel_gmbus_setup - instantiate all Intel i2c GMBuses
- * @dev: DRM device
+ * @dev_priv: i915 device private
*/
-int intel_setup_gmbus(struct drm_device *dev)
+int intel_setup_gmbus(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
struct pci_dev *pdev = dev_priv->drm.pdev;
struct intel_gmbus *bus;
unsigned int pin;
@@ -678,7 +675,7 @@ int intel_setup_gmbus(struct drm_device *dev)
goto err;
}
- intel_i2c_reset(&dev_priv->drm);
+ intel_i2c_reset(dev_priv);
return 0;
@@ -724,9 +721,8 @@ void intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit)
mutex_unlock(&dev_priv->gmbus_mutex);
}
-void intel_teardown_gmbus(struct drm_device *dev)
+void intel_teardown_gmbus(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_gmbus *bus;
unsigned int pin;
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index beabc17e7c8a..ebf8023d21e6 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -230,8 +230,6 @@ enum {
static int execlists_context_deferred_alloc(struct i915_gem_context *ctx,
struct intel_engine_cs *engine);
-static int intel_lr_context_pin(struct i915_gem_context *ctx,
- struct intel_engine_cs *engine);
static void execlists_init_reg_state(u32 *reg_state,
struct i915_gem_context *ctx,
struct intel_engine_cs *engine,
@@ -362,7 +360,8 @@ execlists_update_context_pdps(struct i915_hw_ppgtt *ppgtt, u32 *reg_state)
static u64 execlists_update_context(struct drm_i915_gem_request *rq)
{
struct intel_context *ce = &rq->ctx->engine[rq->engine->id];
- struct i915_hw_ppgtt *ppgtt = rq->ctx->ppgtt;
+ struct i915_hw_ppgtt *ppgtt =
+ rq->ctx->ppgtt ?: rq->i915->mm.aliasing_ppgtt;
u32 *reg_state = ce->lrc_reg_state;
reg_state[CTX_RING_TAIL+1] = rq->tail;
@@ -415,7 +414,7 @@ static void execlists_submit_ports(struct intel_engine_cs *engine)
static bool ctx_single_port_submission(const struct i915_gem_context *ctx)
{
return (IS_ENABLED(CONFIG_DRM_I915_GVT) &&
- ctx->execlists_force_single_submission);
+ i915_gem_context_force_single_submission(ctx));
}
static bool can_merge_ctx(const struct i915_gem_context *prev,
@@ -514,15 +513,6 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
RB_CLEAR_NODE(&cursor->priotree.node);
cursor->priotree.priority = INT_MAX;
- /* We keep the previous context alive until we retire the
- * following request. This ensures that any the context object
- * is still pinned for any residual writes the HW makes into it
- * on the context switch into the next object following the
- * breadcrumb. Otherwise, we may retire the context too early.
- */
- cursor->previous_context = engine->last_context;
- engine->last_context = cursor->ctx;
-
__i915_gem_request_submit(cursor);
last = cursor;
submit = true;
@@ -695,7 +685,6 @@ pt_lock_engine(struct i915_priotree *pt, struct intel_engine_cs *locked)
static void execlists_schedule(struct drm_i915_gem_request *request, int prio)
{
- static DEFINE_MUTEX(lock);
struct intel_engine_cs *engine = NULL;
struct i915_dependency *dep, *p;
struct i915_dependency stack;
@@ -704,8 +693,8 @@ static void execlists_schedule(struct drm_i915_gem_request *request, int prio)
if (prio <= READ_ONCE(request->priotree.priority))
return;
- /* Need global lock to use the temporary link inside i915_dependency */
- mutex_lock(&lock);
+ /* Need BKL in order to use the temporary link inside i915_dependency */
+ lockdep_assert_held(&request->i915->drm.struct_mutex);
stack.signaler = &request->priotree;
list_add(&stack.dfs_link, &dfs);
@@ -734,7 +723,7 @@ static void execlists_schedule(struct drm_i915_gem_request *request, int prio)
if (prio > READ_ONCE(p->signaler->priority))
list_move_tail(&p->dfs_link, &dfs);
- p = list_next_entry(dep, dfs_link);
+ list_safe_reset_next(dep, p, dfs_link);
if (!RB_EMPTY_NODE(&pt->node))
continue;
@@ -772,80 +761,14 @@ static void execlists_schedule(struct drm_i915_gem_request *request, int prio)
if (engine)
spin_unlock_irq(&engine->timeline->lock);
- mutex_unlock(&lock);
-
/* XXX Do we need to preempt to make room for us and our deps? */
}
-int intel_logical_ring_alloc_request_extras(struct drm_i915_gem_request *request)
-{
- struct intel_engine_cs *engine = request->engine;
- struct intel_context *ce = &request->ctx->engine[engine->id];
- int ret;
-
- /* Flush enough space to reduce the likelihood of waiting after
- * we start building the request - in which case we will just
- * have to repeat work.
- */
- request->reserved_space += EXECLISTS_REQUEST_SIZE;
-
- if (!ce->state) {
- ret = execlists_context_deferred_alloc(request->ctx, engine);
- if (ret)
- return ret;
- }
-
- request->ring = ce->ring;
-
- ret = intel_lr_context_pin(request->ctx, engine);
- if (ret)
- return ret;
-
- if (i915.enable_guc_submission) {
- /*
- * Check that the GuC has space for the request before
- * going any further, as the i915_add_request() call
- * later on mustn't fail ...
- */
- ret = i915_guc_wq_reserve(request);
- if (ret)
- goto err_unpin;
- }
-
- ret = intel_ring_begin(request, 0);
- if (ret)
- goto err_unreserve;
-
- if (!ce->initialised) {
- ret = engine->init_context(request);
- if (ret)
- goto err_unreserve;
-
- ce->initialised = true;
- }
-
- /* Note that after this point, we have committed to using
- * this request as it is being used to both track the
- * state of engine initialisation and liveness of the
- * golden renderstate above. Think twice before you try
- * to cancel/unwind this request now.
- */
-
- request->reserved_space -= EXECLISTS_REQUEST_SIZE;
- return 0;
-
-err_unreserve:
- if (i915.enable_guc_submission)
- i915_guc_wq_unreserve(request);
-err_unpin:
- intel_lr_context_unpin(request->ctx, engine);
- return ret;
-}
-
-static int intel_lr_context_pin(struct i915_gem_context *ctx,
- struct intel_engine_cs *engine)
+static int execlists_context_pin(struct intel_engine_cs *engine,
+ struct i915_gem_context *ctx)
{
struct intel_context *ce = &ctx->engine[engine->id];
+ unsigned int flags;
void *vaddr;
int ret;
@@ -854,8 +777,20 @@ static int intel_lr_context_pin(struct i915_gem_context *ctx,
if (ce->pin_count++)
return 0;
- ret = i915_vma_pin(ce->state, 0, GEN8_LR_CONTEXT_ALIGN,
- PIN_OFFSET_BIAS | GUC_WOPCM_TOP | PIN_GLOBAL);
+ if (!ce->state) {
+ ret = execlists_context_deferred_alloc(ctx, engine);
+ if (ret)
+ goto err;
+ }
+ GEM_BUG_ON(!ce->state);
+
+ flags = PIN_GLOBAL;
+ if (ctx->ggtt_offset_bias)
+ flags |= PIN_OFFSET_BIAS | ctx->ggtt_offset_bias;
+ if (i915_gem_context_is_kernel(ctx))
+ flags |= PIN_HIGH;
+
+ ret = i915_vma_pin(ce->state, 0, GEN8_LR_CONTEXT_ALIGN, flags);
if (ret)
goto err;
@@ -865,7 +800,7 @@ static int intel_lr_context_pin(struct i915_gem_context *ctx,
goto unpin_vma;
}
- ret = intel_ring_pin(ce->ring);
+ ret = intel_ring_pin(ce->ring, ctx->ggtt_offset_bias);
if (ret)
goto unpin_map;
@@ -877,12 +812,6 @@ static int intel_lr_context_pin(struct i915_gem_context *ctx,
ce->state->obj->mm.dirty = true;
- /* Invalidate GuC TLB. */
- if (i915.enable_guc_submission) {
- struct drm_i915_private *dev_priv = ctx->i915;
- I915_WRITE(GEN8_GTCR, GEN8_GTCR_INVALIDATE);
- }
-
i915_gem_context_get(ctx);
return 0;
@@ -895,8 +824,8 @@ err:
return ret;
}
-void intel_lr_context_unpin(struct i915_gem_context *ctx,
- struct intel_engine_cs *engine)
+static void execlists_context_unpin(struct intel_engine_cs *engine,
+ struct i915_gem_context *ctx)
{
struct intel_context *ce = &ctx->engine[engine->id];
@@ -914,6 +843,63 @@ void intel_lr_context_unpin(struct i915_gem_context *ctx,
i915_gem_context_put(ctx);
}
+static int execlists_request_alloc(struct drm_i915_gem_request *request)
+{
+ struct intel_engine_cs *engine = request->engine;
+ struct intel_context *ce = &request->ctx->engine[engine->id];
+ int ret;
+
+ GEM_BUG_ON(!ce->pin_count);
+
+ /* Flush enough space to reduce the likelihood of waiting after
+ * we start building the request - in which case we will just
+ * have to repeat work.
+ */
+ request->reserved_space += EXECLISTS_REQUEST_SIZE;
+
+ GEM_BUG_ON(!ce->ring);
+ request->ring = ce->ring;
+
+ if (i915.enable_guc_submission) {
+ /*
+ * Check that the GuC has space for the request before
+ * going any further, as the i915_add_request() call
+ * later on mustn't fail ...
+ */
+ ret = i915_guc_wq_reserve(request);
+ if (ret)
+ goto err;
+ }
+
+ ret = intel_ring_begin(request, 0);
+ if (ret)
+ goto err_unreserve;
+
+ if (!ce->initialised) {
+ ret = engine->init_context(request);
+ if (ret)
+ goto err_unreserve;
+
+ ce->initialised = true;
+ }
+
+ /* Note that after this point, we have committed to using
+ * this request as it is being used to both track the
+ * state of engine initialisation and liveness of the
+ * golden renderstate above. Think twice before you try
+ * to cancel/unwind this request now.
+ */
+
+ request->reserved_space -= EXECLISTS_REQUEST_SIZE;
+ return 0;
+
+err_unreserve:
+ if (i915.enable_guc_submission)
+ i915_guc_wq_unreserve(request);
+err:
+ return ret;
+}
+
static int intel_logical_ring_workarounds_emit(struct drm_i915_gem_request *req)
{
int ret, i;
@@ -1236,11 +1222,11 @@ static int lrc_setup_wa_ctx_obj(struct intel_engine_cs *engine, u32 size)
struct i915_vma *vma;
int err;
- obj = i915_gem_object_create(&engine->i915->drm, PAGE_ALIGN(size));
+ obj = i915_gem_object_create(engine->i915, PAGE_ALIGN(size));
if (IS_ERR(obj))
return PTR_ERR(obj);
- vma = i915_vma_create(obj, &engine->i915->ggtt.base, NULL);
+ vma = i915_vma_instance(obj, &engine->i915->ggtt.base, NULL);
if (IS_ERR(vma)) {
err = PTR_ERR(vma);
goto err;
@@ -1334,15 +1320,6 @@ out:
return ret;
}
-static void lrc_init_hws(struct intel_engine_cs *engine)
-{
- struct drm_i915_private *dev_priv = engine->i915;
-
- I915_WRITE(RING_HWS_PGA(engine->mmio_base),
- engine->status_page.ggtt_offset);
- POSTING_READ(RING_HWS_PGA(engine->mmio_base));
-}
-
static int gen8_init_common_ring(struct intel_engine_cs *engine)
{
struct drm_i915_private *dev_priv = engine->i915;
@@ -1352,20 +1329,19 @@ static int gen8_init_common_ring(struct intel_engine_cs *engine)
if (ret)
return ret;
- lrc_init_hws(engine);
-
intel_engine_reset_breadcrumbs(engine);
+ intel_engine_init_hangcheck(engine);
I915_WRITE(RING_HWSTAM(engine->mmio_base), 0xffffffff);
-
I915_WRITE(RING_MODE_GEN7(engine),
_MASKED_BIT_DISABLE(GFX_REPLAY_MODE) |
_MASKED_BIT_ENABLE(GFX_RUN_LIST_ENABLE));
+ I915_WRITE(RING_HWS_PGA(engine->mmio_base),
+ engine->status_page.ggtt_offset);
+ POSTING_READ(RING_HWS_PGA(engine->mmio_base));
DRM_DEBUG_DRIVER("Execlists enabled for %s\n", engine->name);
- intel_engine_init_hangcheck(engine);
-
/* After a GPU reset, we may have requests to replay */
if (!execlists_elsp_idle(engine)) {
engine->execlist_port[0].count = 0;
@@ -1414,7 +1390,20 @@ static void reset_common_ring(struct intel_engine_cs *engine,
{
struct drm_i915_private *dev_priv = engine->i915;
struct execlist_port *port = engine->execlist_port;
- struct intel_context *ce = &request->ctx->engine[engine->id];
+ struct intel_context *ce;
+
+ /* If the request was innocent, we leave the request in the ELSP
+ * and will try to replay it on restarting. The context image may
+ * have been corrupted by the reset, in which case we may have
+ * to service a new GPU hang, but more likely we can continue on
+ * without impact.
+ *
+ * If the request was guilty, we presume the context is corrupt
+ * and have to at least restore the RING register in the context
+ * image back to the expected values to skip over the guilty request.
+ */
+ if (!request || request->fence.error != -EIO)
+ return;
/* We want a simple context + ring to execute the breadcrumb update.
* We cannot rely on the context being intact across the GPU hang,
@@ -1423,6 +1412,7 @@ static void reset_common_ring(struct intel_engine_cs *engine,
* future request will be after userspace has had the opportunity
* to recreate its own state.
*/
+ ce = &request->ctx->engine[engine->id];
execlists_init_reg_state(ce->lrc_reg_state,
request->ctx, engine, ce->ring);
@@ -1784,13 +1774,12 @@ void intel_logical_ring_cleanup(struct intel_engine_cs *engine)
if (engine->cleanup)
engine->cleanup(engine);
- intel_engine_cleanup_common(engine);
-
if (engine->status_page.vma) {
i915_gem_object_unpin_map(engine->status_page.vma->obj);
engine->status_page.vma = NULL;
}
- intel_lr_context_unpin(dev_priv->kernel_context, engine);
+
+ intel_engine_cleanup_common(engine);
lrc_destroy_wa_ctx_obj(engine);
engine->i915 = NULL;
@@ -1815,6 +1804,12 @@ logical_ring_default_vfuncs(struct intel_engine_cs *engine)
/* Default vfuncs which can be overriden by each engine. */
engine->init_hw = gen8_init_common_ring;
engine->reset_hw = reset_common_ring;
+
+ engine->context_pin = execlists_context_pin;
+ engine->context_unpin = execlists_context_unpin;
+
+ engine->request_alloc = execlists_request_alloc;
+
engine->emit_flush = gen8_emit_flush;
engine->emit_breadcrumb = gen8_emit_breadcrumb;
engine->emit_breadcrumb_sz = gen8_emit_breadcrumb_sz;
@@ -1897,18 +1892,6 @@ logical_ring_init(struct intel_engine_cs *engine)
if (ret)
goto error;
- ret = execlists_context_deferred_alloc(dctx, engine);
- if (ret)
- goto error;
-
- /* As this is the default context, always pin it */
- ret = intel_lr_context_pin(dctx, engine);
- if (ret) {
- DRM_ERROR("Failed to pin context for %s: %d\n",
- engine->name, ret);
- goto error;
- }
-
/* And setup the hardware status page. */
ret = lrc_setup_hws(engine, dctx->engine[engine->id].state);
if (ret) {
@@ -1943,7 +1926,7 @@ int logical_render_ring_init(struct intel_engine_cs *engine)
engine->emit_breadcrumb = gen8_emit_breadcrumb_render;
engine->emit_breadcrumb_sz = gen8_emit_breadcrumb_render_sz;
- ret = intel_engine_create_scratch(engine, 4096);
+ ret = intel_engine_create_scratch(engine, PAGE_SIZE);
if (ret)
return ret;
@@ -2119,19 +2102,12 @@ static void execlists_init_reg_state(u32 *reg_state,
ASSIGN_CTX_REG(reg_state, CTX_PDP0_LDW, GEN8_RING_PDP_LDW(engine, 0),
0);
- if (USES_FULL_48BIT_PPGTT(ppgtt->base.dev)) {
+ if (ppgtt && USES_FULL_48BIT_PPGTT(ppgtt->base.dev)) {
/* 64b PPGTT (48bit canonical)
* PDP0_DESCRIPTOR contains the base address to PML4 and
* other PDP Descriptors are ignored.
*/
ASSIGN_CTX_PML4(ppgtt, reg_state);
- } else {
- /* 32b PPGTT
- * PDP*_DESCRIPTOR contains the base address of space supported.
- * With dynamic page allocation, PDPs may not be allocated at
- * this point. Point the unallocated PDPs to the scratch page
- */
- execlists_update_context_pdps(ppgtt, reg_state);
}
if (engine->id == RCS) {
@@ -2225,18 +2201,19 @@ static int execlists_context_deferred_alloc(struct i915_gem_context *ctx,
WARN_ON(ce->state);
- context_size = round_up(intel_lr_context_size(engine), 4096);
+ context_size = round_up(intel_lr_context_size(engine),
+ I915_GTT_PAGE_SIZE);
/* One extra page as the sharing data between driver and GuC */
context_size += PAGE_SIZE * LRC_PPHWSP_PN;
- ctx_obj = i915_gem_object_create(&ctx->i915->drm, context_size);
+ ctx_obj = i915_gem_object_create(ctx->i915, context_size);
if (IS_ERR(ctx_obj)) {
DRM_DEBUG_DRIVER("Alloc LRC backing obj failed.\n");
return PTR_ERR(ctx_obj);
}
- vma = i915_vma_create(ctx_obj, &ctx->i915->ggtt.base, NULL);
+ vma = i915_vma_instance(ctx_obj, &ctx->i915->ggtt.base, NULL);
if (IS_ERR(vma)) {
ret = PTR_ERR(vma);
goto error_deref_obj;
diff --git a/drivers/gpu/drm/i915/intel_lrc.h b/drivers/gpu/drm/i915/intel_lrc.h
index c1f546180ba2..0c852c024227 100644
--- a/drivers/gpu/drm/i915/intel_lrc.h
+++ b/drivers/gpu/drm/i915/intel_lrc.h
@@ -26,7 +26,7 @@
#include "intel_ringbuffer.h"
-#define GEN8_LR_CONTEXT_ALIGN 4096
+#define GEN8_LR_CONTEXT_ALIGN I915_GTT_MIN_ALIGNMENT
/* Execlists regs */
#define RING_ELSP(engine) _MMIO((engine)->mmio_base + 0x230)
@@ -63,14 +63,12 @@ enum {
};
/* Logical Rings */
-int intel_logical_ring_alloc_request_extras(struct drm_i915_gem_request *request);
-int intel_logical_ring_reserve_space(struct drm_i915_gem_request *request);
void intel_logical_ring_stop(struct intel_engine_cs *engine);
void intel_logical_ring_cleanup(struct intel_engine_cs *engine);
int logical_render_ring_init(struct intel_engine_cs *engine);
int logical_xcs_ring_init(struct intel_engine_cs *engine);
-int intel_engines_init(struct drm_device *dev);
+int intel_engines_init(struct drm_i915_private *dev_priv);
/* Logical Ring Contexts */
@@ -79,13 +77,10 @@ int intel_engines_init(struct drm_device *dev);
#define LRC_PPHWSP_PN (LRC_GUCSHR_PN + 1)
#define LRC_STATE_PN (LRC_PPHWSP_PN + 1)
+struct drm_i915_private;
struct i915_gem_context;
uint32_t intel_lr_context_size(struct intel_engine_cs *engine);
-void intel_lr_context_unpin(struct i915_gem_context *ctx,
- struct intel_engine_cs *engine);
-
-struct drm_i915_private;
void intel_lr_context_resume(struct drm_i915_private *dev_priv);
uint64_t intel_lr_context_descriptor(struct i915_gem_context *ctx,
diff --git a/drivers/gpu/drm/i915/intel_lspcon.c b/drivers/gpu/drm/i915/intel_lspcon.c
index daa523410953..c300647ef604 100644
--- a/drivers/gpu/drm/i915/intel_lspcon.c
+++ b/drivers/gpu/drm/i915/intel_lspcon.c
@@ -35,21 +35,59 @@ static struct intel_dp *lspcon_to_intel_dp(struct intel_lspcon *lspcon)
return &dig_port->dp;
}
+static const char *lspcon_mode_name(enum drm_lspcon_mode mode)
+{
+ switch (mode) {
+ case DRM_LSPCON_MODE_PCON:
+ return "PCON";
+ case DRM_LSPCON_MODE_LS:
+ return "LS";
+ case DRM_LSPCON_MODE_INVALID:
+ return "INVALID";
+ default:
+ MISSING_CASE(mode);
+ return "INVALID";
+ }
+}
+
static enum drm_lspcon_mode lspcon_get_current_mode(struct intel_lspcon *lspcon)
{
- enum drm_lspcon_mode current_mode = DRM_LSPCON_MODE_INVALID;
+ enum drm_lspcon_mode current_mode;
struct i2c_adapter *adapter = &lspcon_to_intel_dp(lspcon)->aux.ddc;
- if (drm_lspcon_get_mode(adapter, &current_mode))
+ if (drm_lspcon_get_mode(adapter, &current_mode)) {
DRM_ERROR("Error reading LSPCON mode\n");
- else
- DRM_DEBUG_KMS("Current LSPCON mode %s\n",
- current_mode == DRM_LSPCON_MODE_PCON ? "PCON" : "LS");
+ return DRM_LSPCON_MODE_INVALID;
+ }
+ return current_mode;
+}
+
+static enum drm_lspcon_mode lspcon_wait_mode(struct intel_lspcon *lspcon,
+ enum drm_lspcon_mode mode)
+{
+ enum drm_lspcon_mode current_mode;
+
+ current_mode = lspcon_get_current_mode(lspcon);
+ if (current_mode == mode || current_mode == DRM_LSPCON_MODE_INVALID)
+ goto out;
+
+ DRM_DEBUG_KMS("Waiting for LSPCON mode %s to settle\n",
+ lspcon_mode_name(mode));
+
+ wait_for((current_mode = lspcon_get_current_mode(lspcon)) == mode ||
+ current_mode == DRM_LSPCON_MODE_INVALID, 100);
+ if (current_mode != mode)
+ DRM_DEBUG_KMS("LSPCON mode hasn't settled\n");
+
+out:
+ DRM_DEBUG_KMS("Current LSPCON mode %s\n",
+ lspcon_mode_name(current_mode));
+
return current_mode;
}
static int lspcon_change_mode(struct intel_lspcon *lspcon,
- enum drm_lspcon_mode mode, bool force)
+ enum drm_lspcon_mode mode)
{
int err;
enum drm_lspcon_mode current_mode;
@@ -77,10 +115,30 @@ static int lspcon_change_mode(struct intel_lspcon *lspcon,
return 0;
}
+static bool lspcon_wake_native_aux_ch(struct intel_lspcon *lspcon)
+{
+ uint8_t rev;
+
+ if (drm_dp_dpcd_readb(&lspcon_to_intel_dp(lspcon)->aux, DP_DPCD_REV,
+ &rev) != 1) {
+ DRM_DEBUG_KMS("Native AUX CH down\n");
+ return false;
+ }
+
+ DRM_DEBUG_KMS("Native AUX CH up, DPCD version: %d.%d\n",
+ rev >> 4, rev & 0xf);
+
+ return true;
+}
+
static bool lspcon_probe(struct intel_lspcon *lspcon)
{
enum drm_dp_dual_mode_type adaptor_type;
struct i2c_adapter *adapter = &lspcon_to_intel_dp(lspcon)->aux.ddc;
+ enum drm_lspcon_mode expected_mode;
+
+ expected_mode = lspcon_wake_native_aux_ch(lspcon) ?
+ DRM_LSPCON_MODE_PCON : DRM_LSPCON_MODE_LS;
/* Lets probe the adaptor and check its type */
adaptor_type = drm_dp_dual_mode_detect(adapter);
@@ -92,7 +150,7 @@ static bool lspcon_probe(struct intel_lspcon *lspcon)
/* Yay ... got a LSPCON device */
DRM_DEBUG_KMS("LSPCON detected\n");
- lspcon->mode = lspcon_get_current_mode(lspcon);
+ lspcon->mode = lspcon_wait_mode(lspcon, expected_mode);
lspcon->active = true;
return true;
}
@@ -100,6 +158,8 @@ static bool lspcon_probe(struct intel_lspcon *lspcon)
static void lspcon_resume_in_pcon_wa(struct intel_lspcon *lspcon)
{
struct intel_dp *intel_dp = lspcon_to_intel_dp(lspcon);
+ struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+ struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
unsigned long start = jiffies;
if (!lspcon->desc_valid)
@@ -115,7 +175,8 @@ static void lspcon_resume_in_pcon_wa(struct intel_lspcon *lspcon)
if (!__intel_dp_read_desc(intel_dp, &desc))
return;
- if (!memcmp(&intel_dp->desc, &desc, sizeof(desc))) {
+ if (intel_digital_port_connected(dev_priv, dig_port) &&
+ !memcmp(&intel_dp->desc, &desc, sizeof(desc))) {
DRM_DEBUG_KMS("LSPCON recovering in PCON mode after %u ms\n",
jiffies_to_msecs(jiffies - start));
return;
@@ -132,14 +193,29 @@ static void lspcon_resume_in_pcon_wa(struct intel_lspcon *lspcon)
void lspcon_resume(struct intel_lspcon *lspcon)
{
- lspcon_resume_in_pcon_wa(lspcon);
+ enum drm_lspcon_mode expected_mode;
+
+ if (lspcon_wake_native_aux_ch(lspcon)) {
+ expected_mode = DRM_LSPCON_MODE_PCON;
+ lspcon_resume_in_pcon_wa(lspcon);
+ } else {
+ expected_mode = DRM_LSPCON_MODE_LS;
+ }
+
+ if (lspcon_wait_mode(lspcon, expected_mode) == DRM_LSPCON_MODE_PCON)
+ return;
- if (lspcon_change_mode(lspcon, DRM_LSPCON_MODE_PCON, true))
+ if (lspcon_change_mode(lspcon, DRM_LSPCON_MODE_PCON))
DRM_ERROR("LSPCON resume failed\n");
else
DRM_DEBUG_KMS("LSPCON resume success\n");
}
+void lspcon_wait_pcon_mode(struct intel_lspcon *lspcon)
+{
+ lspcon_wait_mode(lspcon, DRM_LSPCON_MODE_PCON);
+}
+
bool lspcon_init(struct intel_digital_port *intel_dig_port)
{
struct intel_dp *dp = &intel_dig_port->dp;
@@ -166,8 +242,7 @@ bool lspcon_init(struct intel_digital_port *intel_dig_port)
* 2.0 sinks.
*/
if (lspcon->active && lspcon->mode != DRM_LSPCON_MODE_PCON) {
- if (lspcon_change_mode(lspcon, DRM_LSPCON_MODE_PCON,
- true) < 0) {
+ if (lspcon_change_mode(lspcon, DRM_LSPCON_MODE_PCON) < 0) {
DRM_ERROR("LSPCON mode change to PCON failed\n");
return false;
}
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index d12ef0047d49..9ca4dc4d2378 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -460,13 +460,13 @@ static bool intel_lvds_compute_config(struct intel_encoder *intel_encoder,
static enum drm_connector_status
intel_lvds_detect(struct drm_connector *connector, bool force)
{
- struct drm_device *dev = connector->dev;
+ struct drm_i915_private *dev_priv = to_i915(connector->dev);
enum drm_connector_status status;
DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
connector->base.id, connector->name);
- status = intel_panel_detect(dev);
+ status = intel_panel_detect(dev_priv);
if (status != connector_status_unknown)
return status;
@@ -971,9 +971,9 @@ static bool intel_lvds_supported(struct drm_i915_private *dev_priv)
* Create the connector, register the LVDS DDC bus, and try to figure out what
* modes we can display on the LVDS panel (if present).
*/
-void intel_lvds_init(struct drm_device *dev)
+void intel_lvds_init(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_device *dev = &dev_priv->drm;
struct intel_lvds_encoder *lvds_encoder;
struct intel_encoder *intel_encoder;
struct intel_lvds_connector *lvds_connector;
diff --git a/drivers/gpu/drm/i915/intel_mocs.c b/drivers/gpu/drm/i915/intel_mocs.c
index 80bb9247ce66..c787fc4e6eb9 100644
--- a/drivers/gpu/drm/i915/intel_mocs.c
+++ b/drivers/gpu/drm/i915/intel_mocs.c
@@ -182,7 +182,7 @@ static bool get_mocs_settings(struct drm_i915_private *dev_priv,
table->size = ARRAY_SIZE(skylake_mocs_table);
table->table = skylake_mocs_table;
result = true;
- } else if (IS_BROXTON(dev_priv)) {
+ } else if (IS_GEN9_LP(dev_priv)) {
table->size = ARRAY_SIZE(broxton_mocs_table);
table->table = broxton_mocs_table;
result = true;
@@ -380,7 +380,7 @@ static int emit_mocs_l3cc_table(struct drm_i915_gem_request *req,
/**
* intel_mocs_init_l3cc_table() - program the mocs control table
- * @dev: The the device to be programmed.
+ * @dev_priv: i915 device private
*
* This function simply programs the mocs registers for the given table
* starting at the given address. This register set is programmed in pairs.
@@ -392,9 +392,8 @@ static int emit_mocs_l3cc_table(struct drm_i915_gem_request *req,
*
* Return: Nothing.
*/
-void intel_mocs_init_l3cc_table(struct drm_device *dev)
+void intel_mocs_init_l3cc_table(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_i915_mocs_table table;
unsigned int i;
diff --git a/drivers/gpu/drm/i915/intel_mocs.h b/drivers/gpu/drm/i915/intel_mocs.h
index a8bd9f7bfece..ce4a5dfa5f94 100644
--- a/drivers/gpu/drm/i915/intel_mocs.h
+++ b/drivers/gpu/drm/i915/intel_mocs.h
@@ -53,7 +53,7 @@
#include "i915_drv.h"
int intel_rcs_context_init_mocs(struct drm_i915_gem_request *req);
-void intel_mocs_init_l3cc_table(struct drm_device *dev);
+void intel_mocs_init_l3cc_table(struct drm_i915_private *dev_priv);
int intel_mocs_init_engine(struct intel_engine_cs *engine);
#endif
diff --git a/drivers/gpu/drm/i915/intel_opregion.c b/drivers/gpu/drm/i915/intel_opregion.c
index f4429f67a4e3..4a862a358c70 100644
--- a/drivers/gpu/drm/i915/intel_opregion.c
+++ b/drivers/gpu/drm/i915/intel_opregion.c
@@ -982,7 +982,18 @@ int intel_opregion_setup(struct drm_i915_private *dev_priv)
opregion->vbt_size = vbt_size;
} else {
vbt = base + OPREGION_VBT_OFFSET;
- vbt_size = OPREGION_ASLE_EXT_OFFSET - OPREGION_VBT_OFFSET;
+ /*
+ * The VBT specification says that if the ASLE ext
+ * mailbox is not used its area is reserved, but
+ * on some CHT boards the VBT extends into the
+ * ASLE ext area. Allow this even though it is
+ * against the spec, so we do not end up rejecting
+ * the VBT on those boards (and end up not finding the
+ * LCD panel because of this).
+ */
+ vbt_size = (mboxes & MBOX_ASLE_EXT) ?
+ OPREGION_ASLE_EXT_OFFSET : OPREGION_SIZE;
+ vbt_size -= OPREGION_VBT_OFFSET;
if (intel_bios_is_valid_vbt(vbt, vbt_size)) {
DRM_DEBUG_KMS("Found valid VBT in ACPI OpRegion (Mailbox #4)\n");
opregion->vbt = vbt;
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c
index e589e17876dc..0608fad7f593 100644
--- a/drivers/gpu/drm/i915/intel_overlay.c
+++ b/drivers/gpu/drm/i915/intel_overlay.c
@@ -187,6 +187,29 @@ struct intel_overlay {
struct i915_gem_active last_flip;
};
+static void i830_overlay_clock_gating(struct drm_i915_private *dev_priv,
+ bool enable)
+{
+ struct pci_dev *pdev = dev_priv->drm.pdev;
+ u8 val;
+
+ /* WA_OVERLAY_CLKGATE:alm */
+ if (enable)
+ I915_WRITE(DSPCLK_GATE_D, 0);
+ else
+ I915_WRITE(DSPCLK_GATE_D, OVRUNIT_CLOCK_GATE_DISABLE);
+
+ /* WA_DISABLE_L2CACHE_CLOCK_GATING:alm */
+ pci_bus_read_config_byte(pdev->bus,
+ PCI_DEVFN(0, 0), I830_CLOCK_GATE, &val);
+ if (enable)
+ val &= ~I830_L2_CACHE_CLOCK_GATE_DISABLE;
+ else
+ val |= I830_L2_CACHE_CLOCK_GATE_DISABLE;
+ pci_bus_write_config_byte(pdev->bus,
+ PCI_DEVFN(0, 0), I830_CLOCK_GATE, val);
+}
+
static struct overlay_registers __iomem *
intel_overlay_map_regs(struct intel_overlay *overlay)
{
@@ -262,6 +285,9 @@ static int intel_overlay_on(struct intel_overlay *overlay)
overlay->active = true;
+ if (IS_I830(dev_priv))
+ i830_overlay_clock_gating(dev_priv, false);
+
ring = req->ring;
intel_ring_emit(ring, MI_OVERLAY_FLIP | MI_OVERLAY_ON);
intel_ring_emit(ring, overlay->flip_addr | OFC_UPDATE);
@@ -272,8 +298,30 @@ static int intel_overlay_on(struct intel_overlay *overlay)
return intel_overlay_do_wait_request(overlay, req, NULL);
}
+static void intel_overlay_flip_prepare(struct intel_overlay *overlay,
+ struct i915_vma *vma)
+{
+ enum pipe pipe = overlay->crtc->pipe;
+
+ WARN_ON(overlay->old_vma);
+
+ i915_gem_track_fb(overlay->vma ? overlay->vma->obj : NULL,
+ vma ? vma->obj : NULL,
+ INTEL_FRONTBUFFER_OVERLAY(pipe));
+
+ intel_frontbuffer_flip_prepare(overlay->i915,
+ INTEL_FRONTBUFFER_OVERLAY(pipe));
+
+ overlay->old_vma = overlay->vma;
+ if (vma)
+ overlay->vma = i915_vma_get(vma);
+ else
+ overlay->vma = NULL;
+}
+
/* overlay needs to be enabled in OCMD reg */
static int intel_overlay_continue(struct intel_overlay *overlay,
+ struct i915_vma *vma,
bool load_polyphase_filter)
{
struct drm_i915_private *dev_priv = overlay->i915;
@@ -308,53 +356,57 @@ static int intel_overlay_continue(struct intel_overlay *overlay,
intel_ring_emit(ring, flip_addr);
intel_ring_advance(ring);
+ intel_overlay_flip_prepare(overlay, vma);
+
intel_overlay_submit_request(overlay, req, NULL);
return 0;
}
-static void intel_overlay_release_old_vid_tail(struct i915_gem_active *active,
- struct drm_i915_gem_request *req)
+static void intel_overlay_release_old_vma(struct intel_overlay *overlay)
{
- struct intel_overlay *overlay =
- container_of(active, typeof(*overlay), last_flip);
struct i915_vma *vma;
vma = fetch_and_zero(&overlay->old_vma);
if (WARN_ON(!vma))
return;
- i915_gem_track_fb(vma->obj, NULL,
- INTEL_FRONTBUFFER_OVERLAY(overlay->crtc->pipe));
+ intel_frontbuffer_flip_complete(overlay->i915,
+ INTEL_FRONTBUFFER_OVERLAY(overlay->crtc->pipe));
i915_gem_object_unpin_from_display_plane(vma);
i915_vma_put(vma);
}
+static void intel_overlay_release_old_vid_tail(struct i915_gem_active *active,
+ struct drm_i915_gem_request *req)
+{
+ struct intel_overlay *overlay =
+ container_of(active, typeof(*overlay), last_flip);
+
+ intel_overlay_release_old_vma(overlay);
+}
+
static void intel_overlay_off_tail(struct i915_gem_active *active,
struct drm_i915_gem_request *req)
{
struct intel_overlay *overlay =
container_of(active, typeof(*overlay), last_flip);
- struct i915_vma *vma;
-
- /* never have the overlay hw on without showing a frame */
- vma = fetch_and_zero(&overlay->vma);
- if (WARN_ON(!vma))
- return;
+ struct drm_i915_private *dev_priv = overlay->i915;
- i915_gem_object_unpin_from_display_plane(vma);
- i915_vma_put(vma);
+ intel_overlay_release_old_vma(overlay);
overlay->crtc->overlay = NULL;
overlay->crtc = NULL;
overlay->active = false;
+
+ if (IS_I830(dev_priv))
+ i830_overlay_clock_gating(dev_priv, true);
}
/* overlay needs to be disabled in OCMD reg */
static int intel_overlay_off(struct intel_overlay *overlay)
{
- struct drm_i915_private *dev_priv = overlay->i915;
struct drm_i915_gem_request *req;
struct intel_ring *ring;
u32 flip_addr = overlay->flip_addr;
@@ -379,25 +431,21 @@ static int intel_overlay_off(struct intel_overlay *overlay)
}
ring = req->ring;
+
/* wait for overlay to go idle */
intel_ring_emit(ring, MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE);
intel_ring_emit(ring, flip_addr);
intel_ring_emit(ring, MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
+
/* turn overlay off */
- if (IS_I830(dev_priv)) {
- /* Workaround: Don't disable the overlay fully, since otherwise
- * it dies on the next OVERLAY_ON cmd. */
- intel_ring_emit(ring, MI_NOOP);
- intel_ring_emit(ring, MI_NOOP);
- intel_ring_emit(ring, MI_NOOP);
- } else {
- intel_ring_emit(ring, MI_OVERLAY_FLIP | MI_OVERLAY_OFF);
- intel_ring_emit(ring, flip_addr);
- intel_ring_emit(ring,
- MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
- }
+ intel_ring_emit(ring, MI_OVERLAY_FLIP | MI_OVERLAY_OFF);
+ intel_ring_emit(ring, flip_addr);
+ intel_ring_emit(ring, MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
+
intel_ring_advance(ring);
+ intel_overlay_flip_prepare(overlay, NULL);
+
return intel_overlay_do_wait_request(overlay, req,
intel_overlay_off_tail);
}
@@ -542,51 +590,57 @@ static int uv_vsubsampling(u32 format)
static u32 calc_swidthsw(struct drm_i915_private *dev_priv, u32 offset, u32 width)
{
- u32 mask, shift, ret;
- if (IS_GEN2(dev_priv)) {
- mask = 0x1f;
- shift = 5;
- } else {
- mask = 0x3f;
- shift = 6;
- }
- ret = ((offset + width + mask) >> shift) - (offset >> shift);
- if (!IS_GEN2(dev_priv))
- ret <<= 1;
- ret -= 1;
- return ret << 2;
+ u32 sw;
+
+ if (IS_GEN2(dev_priv))
+ sw = ALIGN((offset & 31) + width, 32);
+ else
+ sw = ALIGN((offset & 63) + width, 64);
+
+ if (sw == 0)
+ return 0;
+
+ return (sw - 32) >> 3;
}
-static const u16 y_static_hcoeffs[N_HORIZ_Y_TAPS * N_PHASES] = {
- 0x3000, 0xb4a0, 0x1930, 0x1920, 0xb4a0,
- 0x3000, 0xb500, 0x19d0, 0x1880, 0xb440,
- 0x3000, 0xb540, 0x1a88, 0x2f80, 0xb3e0,
- 0x3000, 0xb580, 0x1b30, 0x2e20, 0xb380,
- 0x3000, 0xb5c0, 0x1bd8, 0x2cc0, 0xb320,
- 0x3020, 0xb5e0, 0x1c60, 0x2b80, 0xb2c0,
- 0x3020, 0xb5e0, 0x1cf8, 0x2a20, 0xb260,
- 0x3020, 0xb5e0, 0x1d80, 0x28e0, 0xb200,
- 0x3020, 0xb5c0, 0x1e08, 0x3f40, 0xb1c0,
- 0x3020, 0xb580, 0x1e78, 0x3ce0, 0xb160,
- 0x3040, 0xb520, 0x1ed8, 0x3aa0, 0xb120,
- 0x3040, 0xb4a0, 0x1f30, 0x3880, 0xb0e0,
- 0x3040, 0xb400, 0x1f78, 0x3680, 0xb0a0,
- 0x3020, 0xb340, 0x1fb8, 0x34a0, 0xb060,
- 0x3020, 0xb240, 0x1fe0, 0x32e0, 0xb040,
- 0x3020, 0xb140, 0x1ff8, 0x3160, 0xb020,
- 0xb000, 0x3000, 0x0800, 0x3000, 0xb000
+static const u16 y_static_hcoeffs[N_PHASES][N_HORIZ_Y_TAPS] = {
+ [ 0] = { 0x3000, 0xb4a0, 0x1930, 0x1920, 0xb4a0, },
+ [ 1] = { 0x3000, 0xb500, 0x19d0, 0x1880, 0xb440, },
+ [ 2] = { 0x3000, 0xb540, 0x1a88, 0x2f80, 0xb3e0, },
+ [ 3] = { 0x3000, 0xb580, 0x1b30, 0x2e20, 0xb380, },
+ [ 4] = { 0x3000, 0xb5c0, 0x1bd8, 0x2cc0, 0xb320, },
+ [ 5] = { 0x3020, 0xb5e0, 0x1c60, 0x2b80, 0xb2c0, },
+ [ 6] = { 0x3020, 0xb5e0, 0x1cf8, 0x2a20, 0xb260, },
+ [ 7] = { 0x3020, 0xb5e0, 0x1d80, 0x28e0, 0xb200, },
+ [ 8] = { 0x3020, 0xb5c0, 0x1e08, 0x3f40, 0xb1c0, },
+ [ 9] = { 0x3020, 0xb580, 0x1e78, 0x3ce0, 0xb160, },
+ [10] = { 0x3040, 0xb520, 0x1ed8, 0x3aa0, 0xb120, },
+ [11] = { 0x3040, 0xb4a0, 0x1f30, 0x3880, 0xb0e0, },
+ [12] = { 0x3040, 0xb400, 0x1f78, 0x3680, 0xb0a0, },
+ [13] = { 0x3020, 0xb340, 0x1fb8, 0x34a0, 0xb060, },
+ [14] = { 0x3020, 0xb240, 0x1fe0, 0x32e0, 0xb040, },
+ [15] = { 0x3020, 0xb140, 0x1ff8, 0x3160, 0xb020, },
+ [16] = { 0xb000, 0x3000, 0x0800, 0x3000, 0xb000, },
};
-static const u16 uv_static_hcoeffs[N_HORIZ_UV_TAPS * N_PHASES] = {
- 0x3000, 0x1800, 0x1800, 0xb000, 0x18d0, 0x2e60,
- 0xb000, 0x1990, 0x2ce0, 0xb020, 0x1a68, 0x2b40,
- 0xb040, 0x1b20, 0x29e0, 0xb060, 0x1bd8, 0x2880,
- 0xb080, 0x1c88, 0x3e60, 0xb0a0, 0x1d28, 0x3c00,
- 0xb0c0, 0x1db8, 0x39e0, 0xb0e0, 0x1e40, 0x37e0,
- 0xb100, 0x1eb8, 0x3620, 0xb100, 0x1f18, 0x34a0,
- 0xb100, 0x1f68, 0x3360, 0xb0e0, 0x1fa8, 0x3240,
- 0xb0c0, 0x1fe0, 0x3140, 0xb060, 0x1ff0, 0x30a0,
- 0x3000, 0x0800, 0x3000
+static const u16 uv_static_hcoeffs[N_PHASES][N_HORIZ_UV_TAPS] = {
+ [ 0] = { 0x3000, 0x1800, 0x1800, },
+ [ 1] = { 0xb000, 0x18d0, 0x2e60, },
+ [ 2] = { 0xb000, 0x1990, 0x2ce0, },
+ [ 3] = { 0xb020, 0x1a68, 0x2b40, },
+ [ 4] = { 0xb040, 0x1b20, 0x29e0, },
+ [ 5] = { 0xb060, 0x1bd8, 0x2880, },
+ [ 6] = { 0xb080, 0x1c88, 0x3e60, },
+ [ 7] = { 0xb0a0, 0x1d28, 0x3c00, },
+ [ 8] = { 0xb0c0, 0x1db8, 0x39e0, },
+ [ 9] = { 0xb0e0, 0x1e40, 0x37e0, },
+ [10] = { 0xb100, 0x1eb8, 0x3620, },
+ [11] = { 0xb100, 0x1f18, 0x34a0, },
+ [12] = { 0xb100, 0x1f68, 0x3360, },
+ [13] = { 0xb0e0, 0x1fa8, 0x3240, },
+ [14] = { 0xb0c0, 0x1fe0, 0x3140, },
+ [15] = { 0xb060, 0x1ff0, 0x30a0, },
+ [16] = { 0x3000, 0x0800, 0x3000, },
};
static void update_polyphase_filter(struct overlay_registers __iomem *regs)
@@ -659,31 +713,32 @@ static bool update_scaling_factors(struct intel_overlay *overlay,
static void update_colorkey(struct intel_overlay *overlay,
struct overlay_registers __iomem *regs)
{
+ const struct intel_plane_state *state =
+ to_intel_plane_state(overlay->crtc->base.primary->state);
u32 key = overlay->color_key;
- u32 flags;
+ u32 format = 0;
+ u32 flags = 0;
- flags = 0;
if (overlay->color_key_enabled)
flags |= DST_KEY_ENABLE;
- switch (overlay->crtc->base.primary->fb->bits_per_pixel) {
- case 8:
+ if (state->base.visible)
+ format = state->base.fb->format->format;
+
+ switch (format) {
+ case DRM_FORMAT_C8:
key = 0;
flags |= CLK_RGB8I_MASK;
break;
-
- case 16:
- if (overlay->crtc->base.primary->fb->depth == 15) {
- key = RGB15_TO_COLORKEY(key);
- flags |= CLK_RGB15_MASK;
- } else {
- key = RGB16_TO_COLORKEY(key);
- flags |= CLK_RGB16_MASK;
- }
+ case DRM_FORMAT_XRGB1555:
+ key = RGB15_TO_COLORKEY(key);
+ flags |= CLK_RGB15_MASK;
break;
-
- case 24:
- case 32:
+ case DRM_FORMAT_RGB565:
+ key = RGB16_TO_COLORKEY(key);
+ flags |= CLK_RGB16_MASK;
+ break;
+ default:
flags |= CLK_RGB24_MASK;
break;
}
@@ -756,8 +811,7 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
if (ret != 0)
return ret;
- vma = i915_gem_object_pin_to_display_plane(new_bo, 0,
- &i915_ggtt_view_normal);
+ vma = i915_gem_object_pin_to_display_plane(new_bo, 0, NULL);
if (IS_ERR(vma))
return PTR_ERR(vma);
@@ -836,18 +890,10 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
intel_overlay_unmap_regs(overlay, regs);
- ret = intel_overlay_continue(overlay, scale_changed);
+ ret = intel_overlay_continue(overlay, vma, scale_changed);
if (ret)
goto out_unpin;
- i915_gem_track_fb(overlay->vma ? overlay->vma->obj : NULL,
- vma->obj, INTEL_FRONTBUFFER_OVERLAY(pipe));
-
- overlay->old_vma = overlay->vma;
- overlay->vma = vma;
-
- intel_frontbuffer_flip(dev_priv, INTEL_FRONTBUFFER_OVERLAY(pipe));
-
return 0;
out_unpin:
@@ -921,12 +967,13 @@ static void update_pfit_vscale_ratio(struct intel_overlay *overlay)
static int check_overlay_dst(struct intel_overlay *overlay,
struct drm_intel_overlay_put_image *rec)
{
- struct drm_display_mode *mode = &overlay->crtc->base.mode;
+ const struct intel_crtc_state *pipe_config =
+ overlay->crtc->config;
- if (rec->dst_x < mode->hdisplay &&
- rec->dst_x + rec->dst_width <= mode->hdisplay &&
- rec->dst_y < mode->vdisplay &&
- rec->dst_y + rec->dst_height <= mode->vdisplay)
+ if (rec->dst_x < pipe_config->pipe_src_w &&
+ rec->dst_x + rec->dst_width <= pipe_config->pipe_src_w &&
+ rec->dst_y < pipe_config->pipe_src_h &&
+ rec->dst_y + rec->dst_height <= pipe_config->pipe_src_h)
return 0;
else
return -EINVAL;
@@ -958,7 +1005,7 @@ static int check_overlay_src(struct drm_i915_private *dev_priv,
u32 tmp;
/* check src dimensions */
- if (IS_845G(dev_priv) || IS_I830(dev_priv)) {
+ if (IS_I845G(dev_priv) || IS_I830(dev_priv)) {
if (rec->src_height > IMAGE_MAX_HEIGHT_LEGACY ||
rec->src_width > IMAGE_MAX_WIDTH_LEGACY)
return -EINVAL;
@@ -1010,7 +1057,7 @@ static int check_overlay_src(struct drm_i915_private *dev_priv,
return -EINVAL;
/* stride checking */
- if (IS_I830(dev_priv) || IS_845G(dev_priv))
+ if (IS_I830(dev_priv) || IS_I845G(dev_priv))
stride_mask = 255;
else
stride_mask = 63;
@@ -1058,33 +1105,6 @@ static int check_overlay_src(struct drm_i915_private *dev_priv,
return 0;
}
-/**
- * Return the pipe currently connected to the panel fitter,
- * or -1 if the panel fitter is not present or not in use
- */
-static int intel_panel_fitter_pipe(struct drm_i915_private *dev_priv)
-{
- u32 pfit_control;
-
- /* i830 doesn't have a panel fitter */
- if (INTEL_GEN(dev_priv) <= 3 &&
- (IS_I830(dev_priv) || !IS_MOBILE(dev_priv)))
- return -1;
-
- pfit_control = I915_READ(PFIT_CONTROL);
-
- /* See if the panel fitter is in use */
- if ((pfit_control & PFIT_ENABLE) == 0)
- return -1;
-
- /* 965 can place panel fitter on either pipe */
- if (IS_GEN4(dev_priv))
- return (pfit_control >> 29) & 0x3;
-
- /* older chips can only use pipe 1 */
- return 1;
-}
-
int intel_overlay_put_image_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
@@ -1146,7 +1166,6 @@ int intel_overlay_put_image_ioctl(struct drm_device *dev, void *data,
goto out_unlock;
if (overlay->crtc != crtc) {
- struct drm_display_mode *mode = &crtc->base.mode;
ret = intel_overlay_switch_off(overlay);
if (ret != 0)
goto out_unlock;
@@ -1159,8 +1178,8 @@ int intel_overlay_put_image_ioctl(struct drm_device *dev, void *data,
crtc->overlay = overlay;
/* line too wide, i.e. one-line-mode */
- if (mode->hdisplay > 1024 &&
- intel_panel_fitter_pipe(dev_priv) == crtc->pipe) {
+ if (crtc->config->pipe_src_w > 1024 &&
+ crtc->config->gmch_pfit.control & PFIT_ENABLE) {
overlay->pfit_active = true;
update_pfit_vscale_ratio(overlay);
} else
@@ -1215,6 +1234,7 @@ int intel_overlay_put_image_ioctl(struct drm_device *dev, void *data,
mutex_unlock(&dev->struct_mutex);
drm_modeset_unlock_all(dev);
+ i915_gem_object_put(new_bo);
kfree(params);
@@ -1392,10 +1412,9 @@ void intel_setup_overlay(struct drm_i915_private *dev_priv)
reg_bo = NULL;
if (!OVERLAY_NEEDS_PHYSICAL(dev_priv))
- reg_bo = i915_gem_object_create_stolen(&dev_priv->drm,
- PAGE_SIZE);
+ reg_bo = i915_gem_object_create_stolen(dev_priv, PAGE_SIZE);
if (reg_bo == NULL)
- reg_bo = i915_gem_object_create(&dev_priv->drm, PAGE_SIZE);
+ reg_bo = i915_gem_object_create(dev_priv, PAGE_SIZE);
if (IS_ERR(reg_bo))
goto out_free;
overlay->reg_bo = reg_bo;
diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c
index 08ab6d762ca4..1a6ff26dea20 100644
--- a/drivers/gpu/drm/i915/intel_panel.c
+++ b/drivers/gpu/drm/i915/intel_panel.c
@@ -48,7 +48,7 @@ intel_fixed_panel_mode(const struct drm_display_mode *fixed_mode,
/**
* intel_find_panel_downclock - find the reduced downclock for LVDS in EDID
- * @dev: drm device
+ * @dev_priv: i915 device instance
* @fixed_mode : panel native mode
* @connector: LVDS/eDP connector
*
@@ -56,7 +56,7 @@ intel_fixed_panel_mode(const struct drm_display_mode *fixed_mode,
* Find the reduced downclock for LVDS/eDP in EDID.
*/
struct drm_display_mode *
-intel_find_panel_downclock(struct drm_device *dev,
+intel_find_panel_downclock(struct drm_i915_private *dev_priv,
struct drm_display_mode *fixed_mode,
struct drm_connector *connector)
{
@@ -94,7 +94,7 @@ intel_find_panel_downclock(struct drm_device *dev,
}
if (temp_downclock < fixed_mode->clock)
- return drm_mode_duplicate(dev, tmp_mode);
+ return drm_mode_duplicate(&dev_priv->drm, tmp_mode);
else
return NULL;
}
@@ -375,10 +375,8 @@ out:
}
enum drm_connector_status
-intel_panel_detect(struct drm_device *dev)
+intel_panel_detect(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
-
/* Assume that the BIOS does not lie through the OpRegion... */
if (!i915.panel_ignore_lid && dev_priv->opregion.lid_state) {
return *dev_priv->opregion.lid_state & 0x1 ?
@@ -1039,10 +1037,7 @@ static void bxt_enable_backlight(struct intel_connector *connector)
enum pipe pipe = intel_get_pipe_from_connector(connector);
u32 pwm_ctl, val;
- /* To use 2nd set of backlight registers, utility pin has to be
- * enabled with PWM mode.
- * The field should only be changed when the utility pin is disabled
- */
+ /* Controller 1 uses the utility pin. */
if (panel->backlight.controller == 1) {
val = I915_READ(UTIL_PIN_CTL);
if (val & UTIL_PIN_ENABLE) {
@@ -1332,8 +1327,7 @@ static u32 i9xx_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz)
*/
static u32 i965_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz)
{
- struct drm_device *dev = connector->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
int clock;
if (IS_G4X(dev_priv))
@@ -1608,19 +1602,11 @@ bxt_setup_backlight(struct intel_connector *connector, enum pipe unused)
struct intel_panel *panel = &connector->panel;
u32 pwm_ctl, val;
- /*
- * For BXT hard coding the Backlight controller to 0.
- * TODO : Read the controller value from VBT and generalize
- */
- panel->backlight.controller = 0;
+ panel->backlight.controller = dev_priv->vbt.backlight.controller;
pwm_ctl = I915_READ(BXT_BLC_PWM_CTL(panel->backlight.controller));
- /* Keeping the check if controller 1 is to be programmed.
- * This will come into affect once the VBT parsing
- * is fixed for controller selection, and controller 1 is used
- * for a prticular display configuration.
- */
+ /* Controller 1 uses the utility pin. */
if (panel->backlight.controller == 1) {
val = I915_READ(UTIL_PIN_CTL);
panel->backlight.util_pin_active_low =
@@ -1756,7 +1742,7 @@ intel_panel_init_backlight_funcs(struct intel_panel *panel)
intel_dsi_dcs_init_backlight_funcs(connector) == 0)
return;
- if (IS_BROXTON(dev_priv)) {
+ if (IS_GEN9_LP(dev_priv)) {
panel->backlight.setup = bxt_setup_backlight;
panel->backlight.enable = bxt_enable_backlight;
panel->backlight.disable = bxt_disable_backlight;
diff --git a/drivers/gpu/drm/i915/intel_pipe_crc.c b/drivers/gpu/drm/i915/intel_pipe_crc.c
new file mode 100644
index 000000000000..c0b1f99da37b
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_pipe_crc.c
@@ -0,0 +1,1011 @@
+/*
+ * Copyright © 2013 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Author: Damien Lespiau <damien.lespiau@intel.com>
+ *
+ */
+
+#include <linux/seq_file.h>
+#include <linux/circ_buf.h>
+#include <linux/ctype.h>
+#include <linux/debugfs.h>
+#include "intel_drv.h"
+
+struct pipe_crc_info {
+ const char *name;
+ struct drm_i915_private *dev_priv;
+ enum pipe pipe;
+};
+
+/* As the drm_debugfs_init() routines are called before dev->dev_private is
+ * allocated we need to hook into the minor for release.
+ */
+static int drm_add_fake_info_node(struct drm_minor *minor,
+ struct dentry *ent, const void *key)
+{
+ struct drm_info_node *node;
+
+ node = kmalloc(sizeof(*node), GFP_KERNEL);
+ if (node == NULL) {
+ debugfs_remove(ent);
+ return -ENOMEM;
+ }
+
+ node->minor = minor;
+ node->dent = ent;
+ node->info_ent = (void *) key;
+
+ mutex_lock(&minor->debugfs_lock);
+ list_add(&node->list, &minor->debugfs_list);
+ mutex_unlock(&minor->debugfs_lock);
+
+ return 0;
+}
+
+static int i915_pipe_crc_open(struct inode *inode, struct file *filep)
+{
+ struct pipe_crc_info *info = inode->i_private;
+ struct drm_i915_private *dev_priv = info->dev_priv;
+ struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[info->pipe];
+
+ if (info->pipe >= INTEL_INFO(dev_priv)->num_pipes)
+ return -ENODEV;
+
+ spin_lock_irq(&pipe_crc->lock);
+
+ if (pipe_crc->opened) {
+ spin_unlock_irq(&pipe_crc->lock);
+ return -EBUSY; /* already open */
+ }
+
+ pipe_crc->opened = true;
+ filep->private_data = inode->i_private;
+
+ spin_unlock_irq(&pipe_crc->lock);
+
+ return 0;
+}
+
+static int i915_pipe_crc_release(struct inode *inode, struct file *filep)
+{
+ struct pipe_crc_info *info = inode->i_private;
+ struct drm_i915_private *dev_priv = info->dev_priv;
+ struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[info->pipe];
+
+ spin_lock_irq(&pipe_crc->lock);
+ pipe_crc->opened = false;
+ spin_unlock_irq(&pipe_crc->lock);
+
+ return 0;
+}
+
+/* (6 fields, 8 chars each, space separated (5) + '\n') */
+#define PIPE_CRC_LINE_LEN (6 * 8 + 5 + 1)
+/* account for \'0' */
+#define PIPE_CRC_BUFFER_LEN (PIPE_CRC_LINE_LEN + 1)
+
+static int pipe_crc_data_count(struct intel_pipe_crc *pipe_crc)
+{
+ assert_spin_locked(&pipe_crc->lock);
+ return CIRC_CNT(pipe_crc->head, pipe_crc->tail,
+ INTEL_PIPE_CRC_ENTRIES_NR);
+}
+
+static ssize_t
+i915_pipe_crc_read(struct file *filep, char __user *user_buf, size_t count,
+ loff_t *pos)
+{
+ struct pipe_crc_info *info = filep->private_data;
+ struct drm_i915_private *dev_priv = info->dev_priv;
+ struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[info->pipe];
+ char buf[PIPE_CRC_BUFFER_LEN];
+ int n_entries;
+ ssize_t bytes_read;
+
+ /*
+ * Don't allow user space to provide buffers not big enough to hold
+ * a line of data.
+ */
+ if (count < PIPE_CRC_LINE_LEN)
+ return -EINVAL;
+
+ if (pipe_crc->source == INTEL_PIPE_CRC_SOURCE_NONE)
+ return 0;
+
+ /* nothing to read */
+ spin_lock_irq(&pipe_crc->lock);
+ while (pipe_crc_data_count(pipe_crc) == 0) {
+ int ret;
+
+ if (filep->f_flags & O_NONBLOCK) {
+ spin_unlock_irq(&pipe_crc->lock);
+ return -EAGAIN;
+ }
+
+ ret = wait_event_interruptible_lock_irq(pipe_crc->wq,
+ pipe_crc_data_count(pipe_crc), pipe_crc->lock);
+ if (ret) {
+ spin_unlock_irq(&pipe_crc->lock);
+ return ret;
+ }
+ }
+
+ /* We now have one or more entries to read */
+ n_entries = count / PIPE_CRC_LINE_LEN;
+
+ bytes_read = 0;
+ while (n_entries > 0) {
+ struct intel_pipe_crc_entry *entry =
+ &pipe_crc->entries[pipe_crc->tail];
+
+ if (CIRC_CNT(pipe_crc->head, pipe_crc->tail,
+ INTEL_PIPE_CRC_ENTRIES_NR) < 1)
+ break;
+
+ BUILD_BUG_ON_NOT_POWER_OF_2(INTEL_PIPE_CRC_ENTRIES_NR);
+ pipe_crc->tail = (pipe_crc->tail + 1) &
+ (INTEL_PIPE_CRC_ENTRIES_NR - 1);
+
+ bytes_read += snprintf(buf, PIPE_CRC_BUFFER_LEN,
+ "%8u %8x %8x %8x %8x %8x\n",
+ entry->frame, entry->crc[0],
+ entry->crc[1], entry->crc[2],
+ entry->crc[3], entry->crc[4]);
+
+ spin_unlock_irq(&pipe_crc->lock);
+
+ if (copy_to_user(user_buf, buf, PIPE_CRC_LINE_LEN))
+ return -EFAULT;
+
+ user_buf += PIPE_CRC_LINE_LEN;
+ n_entries--;
+
+ spin_lock_irq(&pipe_crc->lock);
+ }
+
+ spin_unlock_irq(&pipe_crc->lock);
+
+ return bytes_read;
+}
+
+static const struct file_operations i915_pipe_crc_fops = {
+ .owner = THIS_MODULE,
+ .open = i915_pipe_crc_open,
+ .read = i915_pipe_crc_read,
+ .release = i915_pipe_crc_release,
+};
+
+static struct pipe_crc_info i915_pipe_crc_data[I915_MAX_PIPES] = {
+ {
+ .name = "i915_pipe_A_crc",
+ .pipe = PIPE_A,
+ },
+ {
+ .name = "i915_pipe_B_crc",
+ .pipe = PIPE_B,
+ },
+ {
+ .name = "i915_pipe_C_crc",
+ .pipe = PIPE_C,
+ },
+};
+
+static int i915_pipe_crc_create(struct dentry *root, struct drm_minor *minor,
+ enum pipe pipe)
+{
+ struct drm_i915_private *dev_priv = to_i915(minor->dev);
+ struct dentry *ent;
+ struct pipe_crc_info *info = &i915_pipe_crc_data[pipe];
+
+ info->dev_priv = dev_priv;
+ ent = debugfs_create_file(info->name, S_IRUGO, root, info,
+ &i915_pipe_crc_fops);
+ if (!ent)
+ return -ENOMEM;
+
+ return drm_add_fake_info_node(minor, ent, info);
+}
+
+static const char * const pipe_crc_sources[] = {
+ "none",
+ "plane1",
+ "plane2",
+ "pf",
+ "pipe",
+ "TV",
+ "DP-B",
+ "DP-C",
+ "DP-D",
+ "auto",
+};
+
+static const char *pipe_crc_source_name(enum intel_pipe_crc_source source)
+{
+ BUILD_BUG_ON(ARRAY_SIZE(pipe_crc_sources) != INTEL_PIPE_CRC_SOURCE_MAX);
+ return pipe_crc_sources[source];
+}
+
+static int display_crc_ctl_show(struct seq_file *m, void *data)
+{
+ struct drm_i915_private *dev_priv = m->private;
+ int i;
+
+ for (i = 0; i < I915_MAX_PIPES; i++)
+ seq_printf(m, "%c %s\n", pipe_name(i),
+ pipe_crc_source_name(dev_priv->pipe_crc[i].source));
+
+ return 0;
+}
+
+static int display_crc_ctl_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, display_crc_ctl_show, inode->i_private);
+}
+
+static int i8xx_pipe_crc_ctl_reg(enum intel_pipe_crc_source *source,
+ uint32_t *val)
+{
+ if (*source == INTEL_PIPE_CRC_SOURCE_AUTO)
+ *source = INTEL_PIPE_CRC_SOURCE_PIPE;
+
+ switch (*source) {
+ case INTEL_PIPE_CRC_SOURCE_PIPE:
+ *val = PIPE_CRC_ENABLE | PIPE_CRC_INCLUDE_BORDER_I8XX;
+ break;
+ case INTEL_PIPE_CRC_SOURCE_NONE:
+ *val = 0;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int i9xx_pipe_crc_auto_source(struct drm_i915_private *dev_priv,
+ enum pipe pipe,
+ enum intel_pipe_crc_source *source)
+{
+ struct drm_device *dev = &dev_priv->drm;
+ struct intel_encoder *encoder;
+ struct intel_crtc *crtc;
+ struct intel_digital_port *dig_port;
+ int ret = 0;
+
+ *source = INTEL_PIPE_CRC_SOURCE_PIPE;
+
+ drm_modeset_lock_all(dev);
+ for_each_intel_encoder(dev, encoder) {
+ if (!encoder->base.crtc)
+ continue;
+
+ crtc = to_intel_crtc(encoder->base.crtc);
+
+ if (crtc->pipe != pipe)
+ continue;
+
+ switch (encoder->type) {
+ case INTEL_OUTPUT_TVOUT:
+ *source = INTEL_PIPE_CRC_SOURCE_TV;
+ break;
+ case INTEL_OUTPUT_DP:
+ case INTEL_OUTPUT_EDP:
+ dig_port = enc_to_dig_port(&encoder->base);
+ switch (dig_port->port) {
+ case PORT_B:
+ *source = INTEL_PIPE_CRC_SOURCE_DP_B;
+ break;
+ case PORT_C:
+ *source = INTEL_PIPE_CRC_SOURCE_DP_C;
+ break;
+ case PORT_D:
+ *source = INTEL_PIPE_CRC_SOURCE_DP_D;
+ break;
+ default:
+ WARN(1, "nonexisting DP port %c\n",
+ port_name(dig_port->port));
+ break;
+ }
+ break;
+ default:
+ break;
+ }
+ }
+ drm_modeset_unlock_all(dev);
+
+ return ret;
+}
+
+static int vlv_pipe_crc_ctl_reg(struct drm_i915_private *dev_priv,
+ enum pipe pipe,
+ enum intel_pipe_crc_source *source,
+ uint32_t *val)
+{
+ bool need_stable_symbols = false;
+
+ if (*source == INTEL_PIPE_CRC_SOURCE_AUTO) {
+ int ret = i9xx_pipe_crc_auto_source(dev_priv, pipe, source);
+ if (ret)
+ return ret;
+ }
+
+ switch (*source) {
+ case INTEL_PIPE_CRC_SOURCE_PIPE:
+ *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PIPE_VLV;
+ break;
+ case INTEL_PIPE_CRC_SOURCE_DP_B:
+ *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_B_VLV;
+ need_stable_symbols = true;
+ break;
+ case INTEL_PIPE_CRC_SOURCE_DP_C:
+ *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_C_VLV;
+ need_stable_symbols = true;
+ break;
+ case INTEL_PIPE_CRC_SOURCE_DP_D:
+ if (!IS_CHERRYVIEW(dev_priv))
+ return -EINVAL;
+ *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_D_VLV;
+ need_stable_symbols = true;
+ break;
+ case INTEL_PIPE_CRC_SOURCE_NONE:
+ *val = 0;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /*
+ * When the pipe CRC tap point is after the transcoders we need
+ * to tweak symbol-level features to produce a deterministic series of
+ * symbols for a given frame. We need to reset those features only once
+ * a frame (instead of every nth symbol):
+ * - DC-balance: used to ensure a better clock recovery from the data
+ * link (SDVO)
+ * - DisplayPort scrambling: used for EMI reduction
+ */
+ if (need_stable_symbols) {
+ uint32_t tmp = I915_READ(PORT_DFT2_G4X);
+
+ tmp |= DC_BALANCE_RESET_VLV;
+ switch (pipe) {
+ case PIPE_A:
+ tmp |= PIPE_A_SCRAMBLE_RESET;
+ break;
+ case PIPE_B:
+ tmp |= PIPE_B_SCRAMBLE_RESET;
+ break;
+ case PIPE_C:
+ tmp |= PIPE_C_SCRAMBLE_RESET;
+ break;
+ default:
+ return -EINVAL;
+ }
+ I915_WRITE(PORT_DFT2_G4X, tmp);
+ }
+
+ return 0;
+}
+
+static int i9xx_pipe_crc_ctl_reg(struct drm_i915_private *dev_priv,
+ enum pipe pipe,
+ enum intel_pipe_crc_source *source,
+ uint32_t *val)
+{
+ bool need_stable_symbols = false;
+
+ if (*source == INTEL_PIPE_CRC_SOURCE_AUTO) {
+ int ret = i9xx_pipe_crc_auto_source(dev_priv, pipe, source);
+ if (ret)
+ return ret;
+ }
+
+ switch (*source) {
+ case INTEL_PIPE_CRC_SOURCE_PIPE:
+ *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PIPE_I9XX;
+ break;
+ case INTEL_PIPE_CRC_SOURCE_TV:
+ if (!SUPPORTS_TV(dev_priv))
+ return -EINVAL;
+ *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_TV_PRE;
+ break;
+ case INTEL_PIPE_CRC_SOURCE_DP_B:
+ if (!IS_G4X(dev_priv))
+ return -EINVAL;
+ *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_B_G4X;
+ need_stable_symbols = true;
+ break;
+ case INTEL_PIPE_CRC_SOURCE_DP_C:
+ if (!IS_G4X(dev_priv))
+ return -EINVAL;
+ *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_C_G4X;
+ need_stable_symbols = true;
+ break;
+ case INTEL_PIPE_CRC_SOURCE_DP_D:
+ if (!IS_G4X(dev_priv))
+ return -EINVAL;
+ *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_D_G4X;
+ need_stable_symbols = true;
+ break;
+ case INTEL_PIPE_CRC_SOURCE_NONE:
+ *val = 0;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /*
+ * When the pipe CRC tap point is after the transcoders we need
+ * to tweak symbol-level features to produce a deterministic series of
+ * symbols for a given frame. We need to reset those features only once
+ * a frame (instead of every nth symbol):
+ * - DC-balance: used to ensure a better clock recovery from the data
+ * link (SDVO)
+ * - DisplayPort scrambling: used for EMI reduction
+ */
+ if (need_stable_symbols) {
+ uint32_t tmp = I915_READ(PORT_DFT2_G4X);
+
+ WARN_ON(!IS_G4X(dev_priv));
+
+ I915_WRITE(PORT_DFT_I9XX,
+ I915_READ(PORT_DFT_I9XX) | DC_BALANCE_RESET);
+
+ if (pipe == PIPE_A)
+ tmp |= PIPE_A_SCRAMBLE_RESET;
+ else
+ tmp |= PIPE_B_SCRAMBLE_RESET;
+
+ I915_WRITE(PORT_DFT2_G4X, tmp);
+ }
+
+ return 0;
+}
+
+static void vlv_undo_pipe_scramble_reset(struct drm_i915_private *dev_priv,
+ enum pipe pipe)
+{
+ uint32_t tmp = I915_READ(PORT_DFT2_G4X);
+
+ switch (pipe) {
+ case PIPE_A:
+ tmp &= ~PIPE_A_SCRAMBLE_RESET;
+ break;
+ case PIPE_B:
+ tmp &= ~PIPE_B_SCRAMBLE_RESET;
+ break;
+ case PIPE_C:
+ tmp &= ~PIPE_C_SCRAMBLE_RESET;
+ break;
+ default:
+ return;
+ }
+ if (!(tmp & PIPE_SCRAMBLE_RESET_MASK))
+ tmp &= ~DC_BALANCE_RESET_VLV;
+ I915_WRITE(PORT_DFT2_G4X, tmp);
+
+}
+
+static void g4x_undo_pipe_scramble_reset(struct drm_i915_private *dev_priv,
+ enum pipe pipe)
+{
+ uint32_t tmp = I915_READ(PORT_DFT2_G4X);
+
+ if (pipe == PIPE_A)
+ tmp &= ~PIPE_A_SCRAMBLE_RESET;
+ else
+ tmp &= ~PIPE_B_SCRAMBLE_RESET;
+ I915_WRITE(PORT_DFT2_G4X, tmp);
+
+ if (!(tmp & PIPE_SCRAMBLE_RESET_MASK)) {
+ I915_WRITE(PORT_DFT_I9XX,
+ I915_READ(PORT_DFT_I9XX) & ~DC_BALANCE_RESET);
+ }
+}
+
+static int ilk_pipe_crc_ctl_reg(enum intel_pipe_crc_source *source,
+ uint32_t *val)
+{
+ if (*source == INTEL_PIPE_CRC_SOURCE_AUTO)
+ *source = INTEL_PIPE_CRC_SOURCE_PIPE;
+
+ switch (*source) {
+ case INTEL_PIPE_CRC_SOURCE_PLANE1:
+ *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PRIMARY_ILK;
+ break;
+ case INTEL_PIPE_CRC_SOURCE_PLANE2:
+ *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_SPRITE_ILK;
+ break;
+ case INTEL_PIPE_CRC_SOURCE_PIPE:
+ *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PIPE_ILK;
+ break;
+ case INTEL_PIPE_CRC_SOURCE_NONE:
+ *val = 0;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void hsw_trans_edp_pipe_A_crc_wa(struct drm_i915_private *dev_priv,
+ bool enable)
+{
+ struct drm_device *dev = &dev_priv->drm;
+ struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, PIPE_A);
+ struct intel_crtc_state *pipe_config;
+ struct drm_atomic_state *state;
+ int ret = 0;
+
+ drm_modeset_lock_all(dev);
+ state = drm_atomic_state_alloc(dev);
+ if (!state) {
+ ret = -ENOMEM;
+ goto unlock;
+ }
+
+ state->acquire_ctx = drm_modeset_legacy_acquire_ctx(&crtc->base);
+ pipe_config = intel_atomic_get_crtc_state(state, crtc);
+ if (IS_ERR(pipe_config)) {
+ ret = PTR_ERR(pipe_config);
+ goto put_state;
+ }
+
+ pipe_config->pch_pfit.force_thru = enable;
+ if (pipe_config->cpu_transcoder == TRANSCODER_EDP &&
+ pipe_config->pch_pfit.enabled != enable)
+ pipe_config->base.connectors_changed = true;
+
+ ret = drm_atomic_commit(state);
+
+put_state:
+ drm_atomic_state_put(state);
+unlock:
+ WARN(ret, "Toggling workaround to %i returns %i\n", enable, ret);
+ drm_modeset_unlock_all(dev);
+}
+
+static int ivb_pipe_crc_ctl_reg(struct drm_i915_private *dev_priv,
+ enum pipe pipe,
+ enum intel_pipe_crc_source *source,
+ uint32_t *val)
+{
+ if (*source == INTEL_PIPE_CRC_SOURCE_AUTO)
+ *source = INTEL_PIPE_CRC_SOURCE_PF;
+
+ switch (*source) {
+ case INTEL_PIPE_CRC_SOURCE_PLANE1:
+ *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PRIMARY_IVB;
+ break;
+ case INTEL_PIPE_CRC_SOURCE_PLANE2:
+ *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_SPRITE_IVB;
+ break;
+ case INTEL_PIPE_CRC_SOURCE_PF:
+ if (IS_HASWELL(dev_priv) && pipe == PIPE_A)
+ hsw_trans_edp_pipe_A_crc_wa(dev_priv, true);
+
+ *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PF_IVB;
+ break;
+ case INTEL_PIPE_CRC_SOURCE_NONE:
+ *val = 0;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int get_new_crc_ctl_reg(struct drm_i915_private *dev_priv,
+ enum pipe pipe,
+ enum intel_pipe_crc_source *source, u32 *val)
+{
+ if (IS_GEN2(dev_priv))
+ return i8xx_pipe_crc_ctl_reg(source, val);
+ else if (INTEL_GEN(dev_priv) < 5)
+ return i9xx_pipe_crc_ctl_reg(dev_priv, pipe, source, val);
+ else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
+ return vlv_pipe_crc_ctl_reg(dev_priv, pipe, source, val);
+ else if (IS_GEN5(dev_priv) || IS_GEN6(dev_priv))
+ return ilk_pipe_crc_ctl_reg(source, val);
+ else
+ return ivb_pipe_crc_ctl_reg(dev_priv, pipe, source, val);
+}
+
+static int pipe_crc_set_source(struct drm_i915_private *dev_priv,
+ enum pipe pipe,
+ enum intel_pipe_crc_source source)
+{
+ struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe];
+ struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
+ enum intel_display_power_domain power_domain;
+ u32 val = 0; /* shut up gcc */
+ int ret;
+
+ if (pipe_crc->source == source)
+ return 0;
+
+ /* forbid changing the source without going back to 'none' */
+ if (pipe_crc->source && source)
+ return -EINVAL;
+
+ power_domain = POWER_DOMAIN_PIPE(pipe);
+ if (!intel_display_power_get_if_enabled(dev_priv, power_domain)) {
+ DRM_DEBUG_KMS("Trying to capture CRC while pipe is off\n");
+ return -EIO;
+ }
+
+ ret = get_new_crc_ctl_reg(dev_priv, pipe, &source, &val);
+ if (ret != 0)
+ goto out;
+
+ /* none -> real source transition */
+ if (source) {
+ struct intel_pipe_crc_entry *entries;
+
+ DRM_DEBUG_DRIVER("collecting CRCs for pipe %c, %s\n",
+ pipe_name(pipe), pipe_crc_source_name(source));
+
+ entries = kcalloc(INTEL_PIPE_CRC_ENTRIES_NR,
+ sizeof(pipe_crc->entries[0]),
+ GFP_KERNEL);
+ if (!entries) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ /*
+ * When IPS gets enabled, the pipe CRC changes. Since IPS gets
+ * enabled and disabled dynamically based on package C states,
+ * user space can't make reliable use of the CRCs, so let's just
+ * completely disable it.
+ */
+ hsw_disable_ips(crtc);
+
+ spin_lock_irq(&pipe_crc->lock);
+ kfree(pipe_crc->entries);
+ pipe_crc->entries = entries;
+ pipe_crc->head = 0;
+ pipe_crc->tail = 0;
+ spin_unlock_irq(&pipe_crc->lock);
+ }
+
+ pipe_crc->source = source;
+
+ I915_WRITE(PIPE_CRC_CTL(pipe), val);
+ POSTING_READ(PIPE_CRC_CTL(pipe));
+
+ /* real source -> none transition */
+ if (!source) {
+ struct intel_pipe_crc_entry *entries;
+ struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv,
+ pipe);
+
+ DRM_DEBUG_DRIVER("stopping CRCs for pipe %c\n",
+ pipe_name(pipe));
+
+ drm_modeset_lock(&crtc->base.mutex, NULL);
+ if (crtc->base.state->active)
+ intel_wait_for_vblank(dev_priv, pipe);
+ drm_modeset_unlock(&crtc->base.mutex);
+
+ spin_lock_irq(&pipe_crc->lock);
+ entries = pipe_crc->entries;
+ pipe_crc->entries = NULL;
+ pipe_crc->head = 0;
+ pipe_crc->tail = 0;
+ spin_unlock_irq(&pipe_crc->lock);
+
+ kfree(entries);
+
+ if (IS_G4X(dev_priv))
+ g4x_undo_pipe_scramble_reset(dev_priv, pipe);
+ else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
+ vlv_undo_pipe_scramble_reset(dev_priv, pipe);
+ else if (IS_HASWELL(dev_priv) && pipe == PIPE_A)
+ hsw_trans_edp_pipe_A_crc_wa(dev_priv, false);
+
+ hsw_enable_ips(crtc);
+ }
+
+ ret = 0;
+
+out:
+ intel_display_power_put(dev_priv, power_domain);
+
+ return ret;
+}
+
+/*
+ * Parse pipe CRC command strings:
+ * command: wsp* object wsp+ name wsp+ source wsp*
+ * object: 'pipe'
+ * name: (A | B | C)
+ * source: (none | plane1 | plane2 | pf)
+ * wsp: (#0x20 | #0x9 | #0xA)+
+ *
+ * eg.:
+ * "pipe A plane1" -> Start CRC computations on plane1 of pipe A
+ * "pipe A none" -> Stop CRC
+ */
+static int display_crc_ctl_tokenize(char *buf, char *words[], int max_words)
+{
+ int n_words = 0;
+
+ while (*buf) {
+ char *end;
+
+ /* skip leading white space */
+ buf = skip_spaces(buf);
+ if (!*buf)
+ break; /* end of buffer */
+
+ /* find end of word */
+ for (end = buf; *end && !isspace(*end); end++)
+ ;
+
+ if (n_words == max_words) {
+ DRM_DEBUG_DRIVER("too many words, allowed <= %d\n",
+ max_words);
+ return -EINVAL; /* ran out of words[] before bytes */
+ }
+
+ if (*end)
+ *end++ = '\0';
+ words[n_words++] = buf;
+ buf = end;
+ }
+
+ return n_words;
+}
+
+enum intel_pipe_crc_object {
+ PIPE_CRC_OBJECT_PIPE,
+};
+
+static const char * const pipe_crc_objects[] = {
+ "pipe",
+};
+
+static int
+display_crc_ctl_parse_object(const char *buf, enum intel_pipe_crc_object *o)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(pipe_crc_objects); i++)
+ if (!strcmp(buf, pipe_crc_objects[i])) {
+ *o = i;
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static int display_crc_ctl_parse_pipe(const char *buf, enum pipe *pipe)
+{
+ const char name = buf[0];
+
+ if (name < 'A' || name >= pipe_name(I915_MAX_PIPES))
+ return -EINVAL;
+
+ *pipe = name - 'A';
+
+ return 0;
+}
+
+static int
+display_crc_ctl_parse_source(const char *buf, enum intel_pipe_crc_source *s)
+{
+ int i;
+
+ if (!buf) {
+ *s = INTEL_PIPE_CRC_SOURCE_NONE;
+ return 0;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(pipe_crc_sources); i++)
+ if (!strcmp(buf, pipe_crc_sources[i])) {
+ *s = i;
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static int display_crc_ctl_parse(struct drm_i915_private *dev_priv,
+ char *buf, size_t len)
+{
+#define N_WORDS 3
+ int n_words;
+ char *words[N_WORDS];
+ enum pipe pipe;
+ enum intel_pipe_crc_object object;
+ enum intel_pipe_crc_source source;
+
+ n_words = display_crc_ctl_tokenize(buf, words, N_WORDS);
+ if (n_words != N_WORDS) {
+ DRM_DEBUG_DRIVER("tokenize failed, a command is %d words\n",
+ N_WORDS);
+ return -EINVAL;
+ }
+
+ if (display_crc_ctl_parse_object(words[0], &object) < 0) {
+ DRM_DEBUG_DRIVER("unknown object %s\n", words[0]);
+ return -EINVAL;
+ }
+
+ if (display_crc_ctl_parse_pipe(words[1], &pipe) < 0) {
+ DRM_DEBUG_DRIVER("unknown pipe %s\n", words[1]);
+ return -EINVAL;
+ }
+
+ if (display_crc_ctl_parse_source(words[2], &source) < 0) {
+ DRM_DEBUG_DRIVER("unknown source %s\n", words[2]);
+ return -EINVAL;
+ }
+
+ return pipe_crc_set_source(dev_priv, pipe, source);
+}
+
+static ssize_t display_crc_ctl_write(struct file *file, const char __user *ubuf,
+ size_t len, loff_t *offp)
+{
+ struct seq_file *m = file->private_data;
+ struct drm_i915_private *dev_priv = m->private;
+ char *tmpbuf;
+ int ret;
+
+ if (len == 0)
+ return 0;
+
+ if (len > PAGE_SIZE - 1) {
+ DRM_DEBUG_DRIVER("expected <%lu bytes into pipe crc control\n",
+ PAGE_SIZE);
+ return -E2BIG;
+ }
+
+ tmpbuf = kmalloc(len + 1, GFP_KERNEL);
+ if (!tmpbuf)
+ return -ENOMEM;
+
+ if (copy_from_user(tmpbuf, ubuf, len)) {
+ ret = -EFAULT;
+ goto out;
+ }
+ tmpbuf[len] = '\0';
+
+ ret = display_crc_ctl_parse(dev_priv, tmpbuf, len);
+
+out:
+ kfree(tmpbuf);
+ if (ret < 0)
+ return ret;
+
+ *offp += len;
+ return len;
+}
+
+const struct file_operations i915_display_crc_ctl_fops = {
+ .owner = THIS_MODULE,
+ .open = display_crc_ctl_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .write = display_crc_ctl_write
+};
+
+void intel_display_crc_init(struct drm_i915_private *dev_priv)
+{
+ enum pipe pipe;
+
+ for_each_pipe(dev_priv, pipe) {
+ struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe];
+
+ pipe_crc->opened = false;
+ spin_lock_init(&pipe_crc->lock);
+ init_waitqueue_head(&pipe_crc->wq);
+ }
+}
+
+int intel_pipe_crc_create(struct drm_minor *minor)
+{
+ int ret, i;
+
+ for (i = 0; i < ARRAY_SIZE(i915_pipe_crc_data); i++) {
+ ret = i915_pipe_crc_create(minor->debugfs_root, minor, i);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+void intel_pipe_crc_cleanup(struct drm_minor *minor)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(i915_pipe_crc_data); i++) {
+ struct drm_info_list *info_list =
+ (struct drm_info_list *)&i915_pipe_crc_data[i];
+
+ drm_debugfs_remove_files(info_list, 1, minor);
+ }
+}
+
+int intel_crtc_set_crc_source(struct drm_crtc *crtc, const char *source_name,
+ size_t *values_cnt)
+{
+ struct drm_i915_private *dev_priv = crtc->dev->dev_private;
+ struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[crtc->index];
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ enum intel_display_power_domain power_domain;
+ enum intel_pipe_crc_source source;
+ u32 val = 0; /* shut up gcc */
+ int ret = 0;
+
+ if (display_crc_ctl_parse_source(source_name, &source) < 0) {
+ DRM_DEBUG_DRIVER("unknown source %s\n", source_name);
+ return -EINVAL;
+ }
+
+ power_domain = POWER_DOMAIN_PIPE(crtc->index);
+ if (!intel_display_power_get_if_enabled(dev_priv, power_domain)) {
+ DRM_DEBUG_KMS("Trying to capture CRC while pipe is off\n");
+ return -EIO;
+ }
+
+ ret = get_new_crc_ctl_reg(dev_priv, crtc->index, &source, &val);
+ if (ret != 0)
+ goto out;
+
+ if (source) {
+ /*
+ * When IPS gets enabled, the pipe CRC changes. Since IPS gets
+ * enabled and disabled dynamically based on package C states,
+ * user space can't make reliable use of the CRCs, so let's just
+ * completely disable it.
+ */
+ hsw_disable_ips(intel_crtc);
+ }
+
+ I915_WRITE(PIPE_CRC_CTL(crtc->index), val);
+ POSTING_READ(PIPE_CRC_CTL(crtc->index));
+
+ if (!source) {
+ if (IS_G4X(dev_priv))
+ g4x_undo_pipe_scramble_reset(dev_priv, crtc->index);
+ else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
+ vlv_undo_pipe_scramble_reset(dev_priv, crtc->index);
+ else if (IS_HASWELL(dev_priv) && crtc->index == PIPE_A)
+ hsw_trans_edp_pipe_A_crc_wa(dev_priv, false);
+
+ hsw_enable_ips(intel_crtc);
+ }
+
+ pipe_crc->skipped = 0;
+ *values_cnt = 5;
+
+out:
+ intel_display_power_put(dev_priv, power_domain);
+
+ return ret;
+}
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index ae2c0bb4b2e8..249623d45be0 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -312,23 +312,30 @@ static void chv_set_memory_pm5(struct drm_i915_private *dev_priv, bool enable)
#define FW_WM(value, plane) \
(((value) << DSPFW_ ## plane ## _SHIFT) & DSPFW_ ## plane ## _MASK)
-void intel_set_memory_cxsr(struct drm_i915_private *dev_priv, bool enable)
+static bool _intel_set_memory_cxsr(struct drm_i915_private *dev_priv, bool enable)
{
+ bool was_enabled;
u32 val;
if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
+ was_enabled = I915_READ(FW_BLC_SELF_VLV) & FW_CSPWRDWNEN;
I915_WRITE(FW_BLC_SELF_VLV, enable ? FW_CSPWRDWNEN : 0);
POSTING_READ(FW_BLC_SELF_VLV);
- dev_priv->wm.vlv.cxsr = enable;
- } else if (IS_G4X(dev_priv) || IS_CRESTLINE(dev_priv)) {
+ } else if (IS_G4X(dev_priv) || IS_I965GM(dev_priv)) {
+ was_enabled = I915_READ(FW_BLC_SELF) & FW_BLC_SELF_EN;
I915_WRITE(FW_BLC_SELF, enable ? FW_BLC_SELF_EN : 0);
POSTING_READ(FW_BLC_SELF);
} else if (IS_PINEVIEW(dev_priv)) {
- val = I915_READ(DSPFW3) & ~PINEVIEW_SELF_REFRESH_EN;
- val |= enable ? PINEVIEW_SELF_REFRESH_EN : 0;
+ val = I915_READ(DSPFW3);
+ was_enabled = val & PINEVIEW_SELF_REFRESH_EN;
+ if (enable)
+ val |= PINEVIEW_SELF_REFRESH_EN;
+ else
+ val &= ~PINEVIEW_SELF_REFRESH_EN;
I915_WRITE(DSPFW3, val);
POSTING_READ(DSPFW3);
} else if (IS_I945G(dev_priv) || IS_I945GM(dev_priv)) {
+ was_enabled = I915_READ(FW_BLC_SELF) & FW_BLC_SELF_EN;
val = enable ? _MASKED_BIT_ENABLE(FW_BLC_SELF_EN) :
_MASKED_BIT_DISABLE(FW_BLC_SELF_EN);
I915_WRITE(FW_BLC_SELF, val);
@@ -339,17 +346,33 @@ void intel_set_memory_cxsr(struct drm_i915_private *dev_priv, bool enable)
* and yet it does have the related watermark in
* FW_BLC_SELF. What's going on?
*/
+ was_enabled = I915_READ(INSTPM) & INSTPM_SELF_EN;
val = enable ? _MASKED_BIT_ENABLE(INSTPM_SELF_EN) :
_MASKED_BIT_DISABLE(INSTPM_SELF_EN);
I915_WRITE(INSTPM, val);
POSTING_READ(INSTPM);
} else {
- return;
+ return false;
}
- DRM_DEBUG_KMS("memory self-refresh is %s\n", enableddisabled(enable));
+ DRM_DEBUG_KMS("memory self-refresh is %s (was %s)\n",
+ enableddisabled(enable),
+ enableddisabled(was_enabled));
+
+ return was_enabled;
}
+bool intel_set_memory_cxsr(struct drm_i915_private *dev_priv, bool enable)
+{
+ bool ret;
+
+ mutex_lock(&dev_priv->wm.wm_mutex);
+ ret = _intel_set_memory_cxsr(dev_priv, enable);
+ dev_priv->wm.vlv.cxsr = enable;
+ mutex_unlock(&dev_priv->wm.wm_mutex);
+
+ return ret;
+}
/*
* Latency for FIFO fetches is dependent on several factors:
@@ -370,12 +393,15 @@ static const int pessimal_latency_ns = 5000;
#define VLV_FIFO_START(dsparb, dsparb2, lo_shift, hi_shift) \
((((dsparb) >> (lo_shift)) & 0xff) | ((((dsparb2) >> (hi_shift)) & 0x1) << 8))
-static int vlv_get_fifo_size(struct drm_i915_private *dev_priv,
- enum pipe pipe, int plane)
+static int vlv_get_fifo_size(struct intel_plane *plane)
{
+ struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
int sprite0_start, sprite1_start, size;
- switch (pipe) {
+ if (plane->id == PLANE_CURSOR)
+ return 63;
+
+ switch (plane->pipe) {
uint32_t dsparb, dsparb2, dsparb3;
case PIPE_A:
dsparb = I915_READ(DSPARB);
@@ -399,24 +425,21 @@ static int vlv_get_fifo_size(struct drm_i915_private *dev_priv,
return 0;
}
- switch (plane) {
- case 0:
+ switch (plane->id) {
+ case PLANE_PRIMARY:
size = sprite0_start;
break;
- case 1:
+ case PLANE_SPRITE0:
size = sprite1_start - sprite0_start;
break;
- case 2:
+ case PLANE_SPRITE1:
size = 512 - 1 - sprite1_start;
break;
default:
return 0;
}
- DRM_DEBUG_KMS("Pipe %c %s %c FIFO size: %d\n",
- pipe_name(pipe), plane == 0 ? "primary" : "sprite",
- plane == 0 ? plane_name(pipe) : sprite_name(pipe, plane - 1),
- size);
+ DRM_DEBUG_KMS("%s FIFO size: %d\n", plane->base.name, size);
return size;
}
@@ -652,7 +675,7 @@ static void pineview_update_wm(struct intel_crtc *unused_crtc)
&crtc->config->base.adjusted_mode;
const struct drm_framebuffer *fb =
crtc->base.primary->state->fb;
- int cpp = drm_format_plane_cpp(fb->pixel_format, 0);
+ int cpp = fb->format->cpp[0];
int clock = adjusted_mode->crtc_clock;
/* Display SR */
@@ -727,7 +750,7 @@ static bool g4x_compute_wm0(struct drm_i915_private *dev_priv,
clock = adjusted_mode->crtc_clock;
htotal = adjusted_mode->crtc_htotal;
hdisplay = crtc->config->pipe_src_w;
- cpp = drm_format_plane_cpp(fb->pixel_format, 0);
+ cpp = fb->format->cpp[0];
/* Use the small buffer method to calculate plane watermark */
entries = ((clock * cpp / 1000) * display_latency_ns) / 1000;
@@ -816,7 +839,7 @@ static bool g4x_compute_srwm(struct drm_i915_private *dev_priv,
clock = adjusted_mode->crtc_clock;
htotal = adjusted_mode->crtc_htotal;
hdisplay = crtc->config->pipe_src_w;
- cpp = drm_format_plane_cpp(fb->pixel_format, 0);
+ cpp = fb->format->cpp[0];
line_time_us = max(htotal * 1000 / clock, 1);
line_count = (latency_ns / line_time_us + 1000) / 1000;
@@ -842,71 +865,77 @@ static bool g4x_compute_srwm(struct drm_i915_private *dev_priv,
#define FW_WM_VLV(value, plane) \
(((value) << DSPFW_ ## plane ## _SHIFT) & DSPFW_ ## plane ## _MASK_VLV)
-static void vlv_write_wm_values(struct intel_crtc *crtc,
+static void vlv_write_wm_values(struct drm_i915_private *dev_priv,
const struct vlv_wm_values *wm)
{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- enum pipe pipe = crtc->pipe;
+ enum pipe pipe;
- I915_WRITE(VLV_DDL(pipe),
- (wm->ddl[pipe].cursor << DDL_CURSOR_SHIFT) |
- (wm->ddl[pipe].sprite[1] << DDL_SPRITE_SHIFT(1)) |
- (wm->ddl[pipe].sprite[0] << DDL_SPRITE_SHIFT(0)) |
- (wm->ddl[pipe].primary << DDL_PLANE_SHIFT));
+ for_each_pipe(dev_priv, pipe) {
+ I915_WRITE(VLV_DDL(pipe),
+ (wm->ddl[pipe].plane[PLANE_CURSOR] << DDL_CURSOR_SHIFT) |
+ (wm->ddl[pipe].plane[PLANE_SPRITE1] << DDL_SPRITE_SHIFT(1)) |
+ (wm->ddl[pipe].plane[PLANE_SPRITE0] << DDL_SPRITE_SHIFT(0)) |
+ (wm->ddl[pipe].plane[PLANE_PRIMARY] << DDL_PLANE_SHIFT));
+ }
+
+ /*
+ * Zero the (unused) WM1 watermarks, and also clear all the
+ * high order bits so that there are no out of bounds values
+ * present in the registers during the reprogramming.
+ */
+ I915_WRITE(DSPHOWM, 0);
+ I915_WRITE(DSPHOWM1, 0);
+ I915_WRITE(DSPFW4, 0);
+ I915_WRITE(DSPFW5, 0);
+ I915_WRITE(DSPFW6, 0);
I915_WRITE(DSPFW1,
FW_WM(wm->sr.plane, SR) |
- FW_WM(wm->pipe[PIPE_B].cursor, CURSORB) |
- FW_WM_VLV(wm->pipe[PIPE_B].primary, PLANEB) |
- FW_WM_VLV(wm->pipe[PIPE_A].primary, PLANEA));
+ FW_WM(wm->pipe[PIPE_B].plane[PLANE_CURSOR], CURSORB) |
+ FW_WM_VLV(wm->pipe[PIPE_B].plane[PLANE_PRIMARY], PLANEB) |
+ FW_WM_VLV(wm->pipe[PIPE_A].plane[PLANE_PRIMARY], PLANEA));
I915_WRITE(DSPFW2,
- FW_WM_VLV(wm->pipe[PIPE_A].sprite[1], SPRITEB) |
- FW_WM(wm->pipe[PIPE_A].cursor, CURSORA) |
- FW_WM_VLV(wm->pipe[PIPE_A].sprite[0], SPRITEA));
+ FW_WM_VLV(wm->pipe[PIPE_A].plane[PLANE_SPRITE1], SPRITEB) |
+ FW_WM(wm->pipe[PIPE_A].plane[PLANE_CURSOR], CURSORA) |
+ FW_WM_VLV(wm->pipe[PIPE_A].plane[PLANE_SPRITE0], SPRITEA));
I915_WRITE(DSPFW3,
FW_WM(wm->sr.cursor, CURSOR_SR));
if (IS_CHERRYVIEW(dev_priv)) {
I915_WRITE(DSPFW7_CHV,
- FW_WM_VLV(wm->pipe[PIPE_B].sprite[1], SPRITED) |
- FW_WM_VLV(wm->pipe[PIPE_B].sprite[0], SPRITEC));
+ FW_WM_VLV(wm->pipe[PIPE_B].plane[PLANE_SPRITE1], SPRITED) |
+ FW_WM_VLV(wm->pipe[PIPE_B].plane[PLANE_SPRITE0], SPRITEC));
I915_WRITE(DSPFW8_CHV,
- FW_WM_VLV(wm->pipe[PIPE_C].sprite[1], SPRITEF) |
- FW_WM_VLV(wm->pipe[PIPE_C].sprite[0], SPRITEE));
+ FW_WM_VLV(wm->pipe[PIPE_C].plane[PLANE_SPRITE1], SPRITEF) |
+ FW_WM_VLV(wm->pipe[PIPE_C].plane[PLANE_SPRITE0], SPRITEE));
I915_WRITE(DSPFW9_CHV,
- FW_WM_VLV(wm->pipe[PIPE_C].primary, PLANEC) |
- FW_WM(wm->pipe[PIPE_C].cursor, CURSORC));
+ FW_WM_VLV(wm->pipe[PIPE_C].plane[PLANE_PRIMARY], PLANEC) |
+ FW_WM(wm->pipe[PIPE_C].plane[PLANE_CURSOR], CURSORC));
I915_WRITE(DSPHOWM,
FW_WM(wm->sr.plane >> 9, SR_HI) |
- FW_WM(wm->pipe[PIPE_C].sprite[1] >> 8, SPRITEF_HI) |
- FW_WM(wm->pipe[PIPE_C].sprite[0] >> 8, SPRITEE_HI) |
- FW_WM(wm->pipe[PIPE_C].primary >> 8, PLANEC_HI) |
- FW_WM(wm->pipe[PIPE_B].sprite[1] >> 8, SPRITED_HI) |
- FW_WM(wm->pipe[PIPE_B].sprite[0] >> 8, SPRITEC_HI) |
- FW_WM(wm->pipe[PIPE_B].primary >> 8, PLANEB_HI) |
- FW_WM(wm->pipe[PIPE_A].sprite[1] >> 8, SPRITEB_HI) |
- FW_WM(wm->pipe[PIPE_A].sprite[0] >> 8, SPRITEA_HI) |
- FW_WM(wm->pipe[PIPE_A].primary >> 8, PLANEA_HI));
+ FW_WM(wm->pipe[PIPE_C].plane[PLANE_SPRITE1] >> 8, SPRITEF_HI) |
+ FW_WM(wm->pipe[PIPE_C].plane[PLANE_SPRITE0] >> 8, SPRITEE_HI) |
+ FW_WM(wm->pipe[PIPE_C].plane[PLANE_PRIMARY] >> 8, PLANEC_HI) |
+ FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE1] >> 8, SPRITED_HI) |
+ FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE0] >> 8, SPRITEC_HI) |
+ FW_WM(wm->pipe[PIPE_B].plane[PLANE_PRIMARY] >> 8, PLANEB_HI) |
+ FW_WM(wm->pipe[PIPE_A].plane[PLANE_SPRITE1] >> 8, SPRITEB_HI) |
+ FW_WM(wm->pipe[PIPE_A].plane[PLANE_SPRITE0] >> 8, SPRITEA_HI) |
+ FW_WM(wm->pipe[PIPE_A].plane[PLANE_PRIMARY] >> 8, PLANEA_HI));
} else {
I915_WRITE(DSPFW7,
- FW_WM_VLV(wm->pipe[PIPE_B].sprite[1], SPRITED) |
- FW_WM_VLV(wm->pipe[PIPE_B].sprite[0], SPRITEC));
+ FW_WM_VLV(wm->pipe[PIPE_B].plane[PLANE_SPRITE1], SPRITED) |
+ FW_WM_VLV(wm->pipe[PIPE_B].plane[PLANE_SPRITE0], SPRITEC));
I915_WRITE(DSPHOWM,
FW_WM(wm->sr.plane >> 9, SR_HI) |
- FW_WM(wm->pipe[PIPE_B].sprite[1] >> 8, SPRITED_HI) |
- FW_WM(wm->pipe[PIPE_B].sprite[0] >> 8, SPRITEC_HI) |
- FW_WM(wm->pipe[PIPE_B].primary >> 8, PLANEB_HI) |
- FW_WM(wm->pipe[PIPE_A].sprite[1] >> 8, SPRITEB_HI) |
- FW_WM(wm->pipe[PIPE_A].sprite[0] >> 8, SPRITEA_HI) |
- FW_WM(wm->pipe[PIPE_A].primary >> 8, PLANEA_HI));
+ FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE1] >> 8, SPRITED_HI) |
+ FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE0] >> 8, SPRITEC_HI) |
+ FW_WM(wm->pipe[PIPE_B].plane[PLANE_PRIMARY] >> 8, PLANEB_HI) |
+ FW_WM(wm->pipe[PIPE_A].plane[PLANE_SPRITE1] >> 8, SPRITEB_HI) |
+ FW_WM(wm->pipe[PIPE_A].plane[PLANE_SPRITE0] >> 8, SPRITEA_HI) |
+ FW_WM(wm->pipe[PIPE_A].plane[PLANE_PRIMARY] >> 8, PLANEA_HI));
}
- /* zero (unused) WM1 watermarks */
- I915_WRITE(DSPFW4, 0);
- I915_WRITE(DSPFW5, 0);
- I915_WRITE(DSPFW6, 0);
- I915_WRITE(DSPHOWM1, 0);
-
POSTING_READ(DSPFW1);
}
@@ -949,24 +978,26 @@ static void vlv_setup_wm_latency(struct drm_i915_private *dev_priv)
}
}
-static uint16_t vlv_compute_wm_level(struct intel_plane *plane,
- struct intel_crtc *crtc,
- const struct intel_plane_state *state,
+static uint16_t vlv_compute_wm_level(const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state,
int level)
{
+ struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+ const struct drm_display_mode *adjusted_mode =
+ &crtc_state->base.adjusted_mode;
int clock, htotal, cpp, width, wm;
if (dev_priv->wm.pri_latency[level] == 0)
return USHRT_MAX;
- if (!state->base.visible)
+ if (!plane_state->base.visible)
return 0;
- cpp = drm_format_plane_cpp(state->base.fb->pixel_format, 0);
- clock = crtc->config->base.adjusted_mode.crtc_clock;
- htotal = crtc->config->base.adjusted_mode.crtc_htotal;
- width = crtc->config->pipe_src_w;
+ cpp = plane_state->base.fb->format->cpp[0];
+ clock = adjusted_mode->crtc_clock;
+ htotal = adjusted_mode->crtc_htotal;
+ width = crtc_state->pipe_src_w;
if (WARN_ON(htotal == 0))
htotal = 1;
@@ -1004,7 +1035,7 @@ static void vlv_compute_fifo(struct intel_crtc *crtc)
if (state->base.visible) {
wm_state->num_active_planes++;
- total_rate += drm_format_plane_cpp(state->base.fb->pixel_format, 0);
+ total_rate += state->base.fb->format->cpp[0];
}
}
@@ -1023,7 +1054,7 @@ static void vlv_compute_fifo(struct intel_crtc *crtc)
continue;
}
- rate = drm_format_plane_cpp(state->base.fb->pixel_format, 0);
+ rate = state->base.fb->format->cpp[0];
plane->wm.fifo_size = fifo_size * rate / total_rate;
fifo_left -= plane->wm.fifo_size;
}
@@ -1053,48 +1084,45 @@ static void vlv_compute_fifo(struct intel_crtc *crtc)
WARN_ON(fifo_left != 0);
}
+static u16 vlv_invert_wm_value(u16 wm, u16 fifo_size)
+{
+ if (wm > fifo_size)
+ return USHRT_MAX;
+ else
+ return fifo_size - wm;
+}
+
static void vlv_invert_wms(struct intel_crtc *crtc)
{
struct vlv_wm_state *wm_state = &crtc->wm_state;
int level;
for (level = 0; level < wm_state->num_levels; level++) {
- struct drm_device *dev = crtc->base.dev;
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
const int sr_fifo_size =
- INTEL_INFO(to_i915(dev))->num_pipes * 512 - 1;
+ INTEL_INFO(dev_priv)->num_pipes * 512 - 1;
struct intel_plane *plane;
- wm_state->sr[level].plane = sr_fifo_size - wm_state->sr[level].plane;
- wm_state->sr[level].cursor = 63 - wm_state->sr[level].cursor;
-
- for_each_intel_plane_on_crtc(dev, crtc, plane) {
- switch (plane->base.type) {
- int sprite;
- case DRM_PLANE_TYPE_CURSOR:
- wm_state->wm[level].cursor = plane->wm.fifo_size -
- wm_state->wm[level].cursor;
- break;
- case DRM_PLANE_TYPE_PRIMARY:
- wm_state->wm[level].primary = plane->wm.fifo_size -
- wm_state->wm[level].primary;
- break;
- case DRM_PLANE_TYPE_OVERLAY:
- sprite = plane->plane;
- wm_state->wm[level].sprite[sprite] = plane->wm.fifo_size -
- wm_state->wm[level].sprite[sprite];
- break;
- }
+ wm_state->sr[level].plane =
+ vlv_invert_wm_value(wm_state->sr[level].plane,
+ sr_fifo_size);
+ wm_state->sr[level].cursor =
+ vlv_invert_wm_value(wm_state->sr[level].cursor,
+ 63);
+
+ for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
+ wm_state->wm[level].plane[plane->id] =
+ vlv_invert_wm_value(wm_state->wm[level].plane[plane->id],
+ plane->wm.fifo_size);
}
}
}
static void vlv_compute_wm(struct intel_crtc *crtc)
{
- struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
struct vlv_wm_state *wm_state = &crtc->wm_state;
struct intel_plane *plane;
- int sr_fifo_size = INTEL_INFO(dev_priv)->num_pipes * 512 - 1;
int level;
memset(wm_state, 0, sizeof(*wm_state));
@@ -1109,45 +1137,27 @@ static void vlv_compute_wm(struct intel_crtc *crtc)
if (wm_state->num_active_planes != 1)
wm_state->cxsr = false;
- if (wm_state->cxsr) {
- for (level = 0; level < wm_state->num_levels; level++) {
- wm_state->sr[level].plane = sr_fifo_size;
- wm_state->sr[level].cursor = 63;
- }
- }
-
- for_each_intel_plane_on_crtc(dev, crtc, plane) {
+ for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
struct intel_plane_state *state =
to_intel_plane_state(plane->base.state);
+ int level;
if (!state->base.visible)
continue;
/* normal watermarks */
for (level = 0; level < wm_state->num_levels; level++) {
- int wm = vlv_compute_wm_level(plane, crtc, state, level);
- int max_wm = plane->base.type == DRM_PLANE_TYPE_CURSOR ? 63 : 511;
+ int wm = vlv_compute_wm_level(crtc->config, state, level);
+ int max_wm = plane->wm.fifo_size;
/* hack */
if (WARN_ON(level == 0 && wm > max_wm))
wm = max_wm;
- if (wm > plane->wm.fifo_size)
+ if (wm > max_wm)
break;
- switch (plane->base.type) {
- int sprite;
- case DRM_PLANE_TYPE_CURSOR:
- wm_state->wm[level].cursor = wm;
- break;
- case DRM_PLANE_TYPE_PRIMARY:
- wm_state->wm[level].primary = wm;
- break;
- case DRM_PLANE_TYPE_OVERLAY:
- sprite = plane->plane;
- wm_state->wm[level].sprite[sprite] = wm;
- break;
- }
+ wm_state->wm[level].plane[plane->id] = wm;
}
wm_state->num_levels = level;
@@ -1156,26 +1166,15 @@ static void vlv_compute_wm(struct intel_crtc *crtc)
continue;
/* maxfifo watermarks */
- switch (plane->base.type) {
- int sprite, level;
- case DRM_PLANE_TYPE_CURSOR:
+ if (plane->id == PLANE_CURSOR) {
for (level = 0; level < wm_state->num_levels; level++)
wm_state->sr[level].cursor =
- wm_state->wm[level].cursor;
- break;
- case DRM_PLANE_TYPE_PRIMARY:
- for (level = 0; level < wm_state->num_levels; level++)
- wm_state->sr[level].plane =
- min(wm_state->sr[level].plane,
- wm_state->wm[level].primary);
- break;
- case DRM_PLANE_TYPE_OVERLAY:
- sprite = plane->plane;
+ wm_state->wm[level].plane[PLANE_CURSOR];
+ } else {
for (level = 0; level < wm_state->num_levels; level++)
wm_state->sr[level].plane =
- min(wm_state->sr[level].plane,
- wm_state->wm[level].sprite[sprite]);
- break;
+ max(wm_state->sr[level].plane,
+ wm_state->wm[level].plane[plane->id]);
}
}
@@ -1199,17 +1198,23 @@ static void vlv_pipe_set_fifo_size(struct intel_crtc *crtc)
int sprite0_start = 0, sprite1_start = 0, fifo_size = 0;
for_each_intel_plane_on_crtc(dev, crtc, plane) {
- if (plane->base.type == DRM_PLANE_TYPE_CURSOR) {
- WARN_ON(plane->wm.fifo_size != 63);
- continue;
- }
-
- if (plane->base.type == DRM_PLANE_TYPE_PRIMARY)
+ switch (plane->id) {
+ case PLANE_PRIMARY:
sprite0_start = plane->wm.fifo_size;
- else if (plane->plane == 0)
+ break;
+ case PLANE_SPRITE0:
sprite1_start = sprite0_start + plane->wm.fifo_size;
- else
+ break;
+ case PLANE_SPRITE1:
fifo_size = sprite1_start + plane->wm.fifo_size;
+ break;
+ case PLANE_CURSOR:
+ WARN_ON(plane->wm.fifo_size != 63);
+ break;
+ default:
+ MISSING_CASE(plane->id);
+ break;
+ }
}
WARN_ON(fifo_size != 512 - 1);
@@ -1218,6 +1223,8 @@ static void vlv_pipe_set_fifo_size(struct intel_crtc *crtc)
pipe_name(crtc->pipe), sprite0_start,
sprite1_start, fifo_size);
+ spin_lock(&dev_priv->wm.dsparb_lock);
+
switch (crtc->pipe) {
uint32_t dsparb, dsparb2, dsparb3;
case PIPE_A:
@@ -1274,20 +1281,24 @@ static void vlv_pipe_set_fifo_size(struct intel_crtc *crtc)
default:
break;
}
+
+ POSTING_READ(DSPARB);
+
+ spin_unlock(&dev_priv->wm.dsparb_lock);
}
#undef VLV_FIFO
-static void vlv_merge_wm(struct drm_device *dev,
+static void vlv_merge_wm(struct drm_i915_private *dev_priv,
struct vlv_wm_values *wm)
{
struct intel_crtc *crtc;
int num_active_crtcs = 0;
- wm->level = to_i915(dev)->wm.max_level;
+ wm->level = dev_priv->wm.max_level;
wm->cxsr = true;
- for_each_intel_crtc(dev, crtc) {
+ for_each_intel_crtc(&dev_priv->drm, crtc) {
const struct vlv_wm_state *wm_state = &crtc->wm_state;
if (!crtc->active)
@@ -1306,7 +1317,7 @@ static void vlv_merge_wm(struct drm_device *dev,
if (num_active_crtcs > 1)
wm->level = VLV_WM_LEVEL_PM2;
- for_each_intel_crtc(dev, crtc) {
+ for_each_intel_crtc(&dev_priv->drm, crtc) {
struct vlv_wm_state *wm_state = &crtc->wm_state;
enum pipe pipe = crtc->pipe;
@@ -1317,63 +1328,70 @@ static void vlv_merge_wm(struct drm_device *dev,
if (wm->cxsr)
wm->sr = wm_state->sr[wm->level];
- wm->ddl[pipe].primary = DDL_PRECISION_HIGH | 2;
- wm->ddl[pipe].sprite[0] = DDL_PRECISION_HIGH | 2;
- wm->ddl[pipe].sprite[1] = DDL_PRECISION_HIGH | 2;
- wm->ddl[pipe].cursor = DDL_PRECISION_HIGH | 2;
+ wm->ddl[pipe].plane[PLANE_PRIMARY] = DDL_PRECISION_HIGH | 2;
+ wm->ddl[pipe].plane[PLANE_SPRITE0] = DDL_PRECISION_HIGH | 2;
+ wm->ddl[pipe].plane[PLANE_SPRITE1] = DDL_PRECISION_HIGH | 2;
+ wm->ddl[pipe].plane[PLANE_CURSOR] = DDL_PRECISION_HIGH | 2;
}
}
+static bool is_disabling(int old, int new, int threshold)
+{
+ return old >= threshold && new < threshold;
+}
+
+static bool is_enabling(int old, int new, int threshold)
+{
+ return old < threshold && new >= threshold;
+}
+
static void vlv_update_wm(struct intel_crtc *crtc)
{
- struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
- struct vlv_wm_values wm = {};
+ struct vlv_wm_values *old_wm = &dev_priv->wm.vlv;
+ struct vlv_wm_values new_wm = {};
vlv_compute_wm(crtc);
- vlv_merge_wm(dev, &wm);
+ vlv_merge_wm(dev_priv, &new_wm);
- if (memcmp(&dev_priv->wm.vlv, &wm, sizeof(wm)) == 0) {
+ if (memcmp(old_wm, &new_wm, sizeof(new_wm)) == 0) {
/* FIXME should be part of crtc atomic commit */
vlv_pipe_set_fifo_size(crtc);
+
return;
}
- if (wm.level < VLV_WM_LEVEL_DDR_DVFS &&
- dev_priv->wm.vlv.level >= VLV_WM_LEVEL_DDR_DVFS)
+ if (is_disabling(old_wm->level, new_wm.level, VLV_WM_LEVEL_DDR_DVFS))
chv_set_memory_dvfs(dev_priv, false);
- if (wm.level < VLV_WM_LEVEL_PM5 &&
- dev_priv->wm.vlv.level >= VLV_WM_LEVEL_PM5)
+ if (is_disabling(old_wm->level, new_wm.level, VLV_WM_LEVEL_PM5))
chv_set_memory_pm5(dev_priv, false);
- if (!wm.cxsr && dev_priv->wm.vlv.cxsr)
- intel_set_memory_cxsr(dev_priv, false);
+ if (is_disabling(old_wm->cxsr, new_wm.cxsr, true))
+ _intel_set_memory_cxsr(dev_priv, false);
/* FIXME should be part of crtc atomic commit */
vlv_pipe_set_fifo_size(crtc);
- vlv_write_wm_values(crtc, &wm);
+ vlv_write_wm_values(dev_priv, &new_wm);
DRM_DEBUG_KMS("Setting FIFO watermarks - %c: plane=%d, cursor=%d, "
"sprite0=%d, sprite1=%d, SR: plane=%d, cursor=%d level=%d cxsr=%d\n",
- pipe_name(pipe), wm.pipe[pipe].primary, wm.pipe[pipe].cursor,
- wm.pipe[pipe].sprite[0], wm.pipe[pipe].sprite[1],
- wm.sr.plane, wm.sr.cursor, wm.level, wm.cxsr);
+ pipe_name(pipe), new_wm.pipe[pipe].plane[PLANE_PRIMARY], new_wm.pipe[pipe].plane[PLANE_CURSOR],
+ new_wm.pipe[pipe].plane[PLANE_SPRITE0], new_wm.pipe[pipe].plane[PLANE_SPRITE1],
+ new_wm.sr.plane, new_wm.sr.cursor, new_wm.level, new_wm.cxsr);
- if (wm.cxsr && !dev_priv->wm.vlv.cxsr)
- intel_set_memory_cxsr(dev_priv, true);
+ if (is_enabling(old_wm->cxsr, new_wm.cxsr, true))
+ _intel_set_memory_cxsr(dev_priv, true);
- if (wm.level >= VLV_WM_LEVEL_PM5 &&
- dev_priv->wm.vlv.level < VLV_WM_LEVEL_PM5)
+ if (is_enabling(old_wm->level, new_wm.level, VLV_WM_LEVEL_PM5))
chv_set_memory_pm5(dev_priv, true);
- if (wm.level >= VLV_WM_LEVEL_DDR_DVFS &&
- dev_priv->wm.vlv.level < VLV_WM_LEVEL_DDR_DVFS)
+ if (is_enabling(old_wm->level, new_wm.level, VLV_WM_LEVEL_DDR_DVFS))
chv_set_memory_dvfs(dev_priv, true);
- dev_priv->wm.vlv = wm;
+ *old_wm = new_wm;
}
#define single_plane_enabled(mask) is_power_of_2(mask)
@@ -1455,7 +1473,7 @@ static void i965_update_wm(struct intel_crtc *unused_crtc)
int clock = adjusted_mode->crtc_clock;
int htotal = adjusted_mode->crtc_htotal;
int hdisplay = crtc->config->pipe_src_w;
- int cpp = drm_format_plane_cpp(fb->pixel_format, 0);
+ int cpp = fb->format->cpp[0];
unsigned long line_time_us;
int entries;
@@ -1541,7 +1559,7 @@ static void i9xx_update_wm(struct intel_crtc *unused_crtc)
if (IS_GEN2(dev_priv))
cpp = 4;
else
- cpp = drm_format_plane_cpp(fb->pixel_format, 0);
+ cpp = fb->format->cpp[0];
planea_wm = intel_calculate_wm(adjusted_mode->crtc_clock,
wm_info, fifo_size, cpp,
@@ -1568,7 +1586,7 @@ static void i9xx_update_wm(struct intel_crtc *unused_crtc)
if (IS_GEN2(dev_priv))
cpp = 4;
else
- cpp = drm_format_plane_cpp(fb->pixel_format, 0);
+ cpp = fb->format->cpp[0];
planeb_wm = intel_calculate_wm(adjusted_mode->crtc_clock,
wm_info, fifo_size, cpp,
@@ -1621,7 +1639,7 @@ static void i9xx_update_wm(struct intel_crtc *unused_crtc)
if (IS_I915GM(dev_priv) || IS_I945GM(dev_priv))
cpp = 4;
else
- cpp = drm_format_plane_cpp(fb->pixel_format, 0);
+ cpp = fb->format->cpp[0];
line_time_us = max(htotal * 1000 / clock, 1);
@@ -1781,13 +1799,14 @@ static uint32_t ilk_compute_pri_wm(const struct intel_crtc_state *cstate,
uint32_t mem_value,
bool is_lp)
{
- int cpp = pstate->base.fb ?
- drm_format_plane_cpp(pstate->base.fb->pixel_format, 0) : 0;
uint32_t method1, method2;
+ int cpp;
if (!cstate->base.active || !pstate->base.visible)
return 0;
+ cpp = pstate->base.fb->format->cpp[0];
+
method1 = ilk_wm_method1(ilk_pipe_pixel_rate(cstate), cpp, mem_value);
if (!is_lp)
@@ -1809,13 +1828,14 @@ static uint32_t ilk_compute_spr_wm(const struct intel_crtc_state *cstate,
const struct intel_plane_state *pstate,
uint32_t mem_value)
{
- int cpp = pstate->base.fb ?
- drm_format_plane_cpp(pstate->base.fb->pixel_format, 0) : 0;
uint32_t method1, method2;
+ int cpp;
if (!cstate->base.active || !pstate->base.visible)
return 0;
+ cpp = pstate->base.fb->format->cpp[0];
+
method1 = ilk_wm_method1(ilk_pipe_pixel_rate(cstate), cpp, mem_value);
method2 = ilk_wm_method2(ilk_pipe_pixel_rate(cstate),
cstate->base.adjusted_mode.crtc_htotal,
@@ -1853,12 +1873,13 @@ static uint32_t ilk_compute_fbc_wm(const struct intel_crtc_state *cstate,
const struct intel_plane_state *pstate,
uint32_t pri_val)
{
- int cpp = pstate->base.fb ?
- drm_format_plane_cpp(pstate->base.fb->pixel_format, 0) : 0;
+ int cpp;
if (!cstate->base.active || !pstate->base.visible)
return 0;
+ cpp = pstate->base.fb->format->cpp[0];
+
return ilk_wm_fbc(pri_val, drm_rect_width(&pstate->base.dst), cpp);
}
@@ -2867,28 +2888,6 @@ bool ilk_disable_lp_wm(struct drm_device *dev)
#define SKL_SAGV_BLOCK_TIME 30 /* µs */
/*
- * Return the index of a plane in the SKL DDB and wm result arrays. Primary
- * plane is always in slot 0, cursor is always in slot I915_MAX_PLANES-1, and
- * other universal planes are in indices 1..n. Note that this may leave unused
- * indices between the top "sprite" plane and the cursor.
- */
-static int
-skl_wm_plane_id(const struct intel_plane *plane)
-{
- switch (plane->base.type) {
- case DRM_PLANE_TYPE_PRIMARY:
- return 0;
- case DRM_PLANE_TYPE_CURSOR:
- return PLANE_CURSOR;
- case DRM_PLANE_TYPE_OVERLAY:
- return plane->plane + 1;
- default:
- MISSING_CASE(plane->base.type);
- return plane->plane;
- }
-}
-
-/*
* FIXME: We still don't have the proper code detect if we need to apply the WA,
* so assume we'll always need it in order to avoid underruns.
*/
@@ -3010,7 +3009,6 @@ bool intel_can_enable_sagv(struct drm_atomic_state *state)
struct intel_crtc *crtc;
struct intel_plane *plane;
struct intel_crtc_state *cstate;
- struct skl_plane_wm *wm;
enum pipe pipe;
int level, latency;
@@ -3037,7 +3035,8 @@ bool intel_can_enable_sagv(struct drm_atomic_state *state)
return false;
for_each_intel_plane_on_crtc(dev, crtc, plane) {
- wm = &cstate->wm.skl.optimal.planes[skl_wm_plane_id(plane)];
+ struct skl_plane_wm *wm =
+ &cstate->wm.skl.optimal.planes[plane->id];
/* Skip this plane if it's not enabled */
if (!wm->wm[0].plane_en)
@@ -3140,28 +3139,29 @@ static void skl_ddb_entry_init_from_hw(struct skl_ddb_entry *entry, u32 reg)
void skl_ddb_get_hw_state(struct drm_i915_private *dev_priv,
struct skl_ddb_allocation *ddb /* out */)
{
- enum pipe pipe;
- int plane;
- u32 val;
+ struct intel_crtc *crtc;
memset(ddb, 0, sizeof(*ddb));
- for_each_pipe(dev_priv, pipe) {
+ for_each_intel_crtc(&dev_priv->drm, crtc) {
enum intel_display_power_domain power_domain;
+ enum plane_id plane_id;
+ enum pipe pipe = crtc->pipe;
power_domain = POWER_DOMAIN_PIPE(pipe);
if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
continue;
- for_each_universal_plane(dev_priv, pipe, plane) {
- val = I915_READ(PLANE_BUF_CFG(pipe, plane));
- skl_ddb_entry_init_from_hw(&ddb->plane[pipe][plane],
- val);
- }
+ for_each_plane_id_on_crtc(crtc, plane_id) {
+ u32 val;
- val = I915_READ(CUR_BUF_CFG(pipe));
- skl_ddb_entry_init_from_hw(&ddb->plane[pipe][PLANE_CURSOR],
- val);
+ if (plane_id != PLANE_CURSOR)
+ val = I915_READ(PLANE_BUF_CFG(pipe, plane_id));
+ else
+ val = I915_READ(CUR_BUF_CFG(pipe));
+
+ skl_ddb_entry_init_from_hw(&ddb->plane[pipe][plane_id], val);
+ }
intel_display_power_put(dev_priv, power_domain);
}
@@ -3213,13 +3213,17 @@ skl_plane_relative_data_rate(const struct intel_crtc_state *cstate,
int y)
{
struct intel_plane_state *intel_pstate = to_intel_plane_state(pstate);
- struct drm_framebuffer *fb = pstate->fb;
uint32_t down_scale_amount, data_rate;
uint32_t width = 0, height = 0;
- unsigned format = fb ? fb->pixel_format : DRM_FORMAT_XRGB8888;
+ struct drm_framebuffer *fb;
+ u32 format;
if (!intel_pstate->base.visible)
return 0;
+
+ fb = pstate->fb;
+ format = fb->format->format;
+
if (pstate->plane->type == DRM_PLANE_TYPE_CURSOR)
return 0;
if (y && format != DRM_FORMAT_NV12)
@@ -3235,13 +3239,13 @@ skl_plane_relative_data_rate(const struct intel_crtc_state *cstate,
if (format == DRM_FORMAT_NV12) {
if (y) /* y-plane data rate */
data_rate = width * height *
- drm_format_plane_cpp(format, 0);
+ fb->format->cpp[0];
else /* uv-plane data rate */
data_rate = (width / 2) * (height / 2) *
- drm_format_plane_cpp(format, 1);
+ fb->format->cpp[1];
} else {
/* for packed formats */
- data_rate = width * height * drm_format_plane_cpp(format, 0);
+ data_rate = width * height * fb->format->cpp[0];
}
down_scale_amount = skl_plane_downscale_amount(intel_pstate);
@@ -3262,30 +3266,28 @@ skl_get_total_relative_data_rate(struct intel_crtc_state *intel_cstate,
struct drm_crtc_state *cstate = &intel_cstate->base;
struct drm_atomic_state *state = cstate->state;
struct drm_plane *plane;
- const struct intel_plane *intel_plane;
const struct drm_plane_state *pstate;
- unsigned int rate, total_data_rate = 0;
- int id;
+ unsigned int total_data_rate = 0;
if (WARN_ON(!state))
return 0;
/* Calculate and cache data rate for each plane */
drm_atomic_crtc_state_for_each_plane_state(plane, pstate, cstate) {
- id = skl_wm_plane_id(to_intel_plane(plane));
- intel_plane = to_intel_plane(plane);
+ enum plane_id plane_id = to_intel_plane(plane)->id;
+ unsigned int rate;
/* packed/uv */
rate = skl_plane_relative_data_rate(intel_cstate,
pstate, 0);
- plane_data_rate[id] = rate;
+ plane_data_rate[plane_id] = rate;
total_data_rate += rate;
/* y-plane */
rate = skl_plane_relative_data_rate(intel_cstate,
pstate, 1);
- plane_y_data_rate[id] = rate;
+ plane_y_data_rate[plane_id] = rate;
total_data_rate += rate;
}
@@ -3307,7 +3309,7 @@ skl_ddb_min_alloc(const struct drm_plane_state *pstate,
return 0;
/* For packed formats, no y-plane, return 0 */
- if (y && fb->pixel_format != DRM_FORMAT_NV12)
+ if (y && fb->format->format != DRM_FORMAT_NV12)
return 0;
/* For Non Y-tile return 8-blocks */
@@ -3322,15 +3324,15 @@ skl_ddb_min_alloc(const struct drm_plane_state *pstate,
swap(src_w, src_h);
/* Halve UV plane width and height for NV12 */
- if (fb->pixel_format == DRM_FORMAT_NV12 && !y) {
+ if (fb->format->format == DRM_FORMAT_NV12 && !y) {
src_w /= 2;
src_h /= 2;
}
- if (fb->pixel_format == DRM_FORMAT_NV12 && !y)
- plane_bpp = drm_format_plane_cpp(fb->pixel_format, 1);
+ if (fb->format->format == DRM_FORMAT_NV12 && !y)
+ plane_bpp = fb->format->cpp[1];
else
- plane_bpp = drm_format_plane_cpp(fb->pixel_format, 0);
+ plane_bpp = fb->format->cpp[0];
if (drm_rotation_90_or_270(pstate->rotation)) {
switch (plane_bpp) {
@@ -3364,17 +3366,16 @@ skl_ddb_calc_min(const struct intel_crtc_state *cstate, int num_active,
struct drm_plane *plane;
drm_atomic_crtc_state_for_each_plane_state(plane, pstate, &cstate->base) {
- struct intel_plane *intel_plane = to_intel_plane(plane);
- int id = skl_wm_plane_id(intel_plane);
+ enum plane_id plane_id = to_intel_plane(plane)->id;
- if (id == PLANE_CURSOR)
+ if (plane_id == PLANE_CURSOR)
continue;
if (!pstate->visible)
continue;
- minimum[id] = skl_ddb_min_alloc(pstate, 0);
- y_minimum[id] = skl_ddb_min_alloc(pstate, 1);
+ minimum[plane_id] = skl_ddb_min_alloc(pstate, 0);
+ y_minimum[plane_id] = skl_ddb_min_alloc(pstate, 1);
}
minimum[PLANE_CURSOR] = skl_cursor_allocation(num_active);
@@ -3394,8 +3395,8 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
uint16_t minimum[I915_MAX_PLANES] = {};
uint16_t y_minimum[I915_MAX_PLANES] = {};
unsigned int total_data_rate;
+ enum plane_id plane_id;
int num_active;
- int id, i;
unsigned plane_data_rate[I915_MAX_PLANES] = {};
unsigned plane_y_data_rate[I915_MAX_PLANES] = {};
@@ -3426,9 +3427,9 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
* proportional to the data rate.
*/
- for (i = 0; i < I915_MAX_PLANES; i++) {
- alloc_size -= minimum[i];
- alloc_size -= y_minimum[i];
+ for_each_plane_id_on_crtc(intel_crtc, plane_id) {
+ alloc_size -= minimum[plane_id];
+ alloc_size -= y_minimum[plane_id];
}
ddb->plane[pipe][PLANE_CURSOR].start = alloc->end - minimum[PLANE_CURSOR];
@@ -3447,28 +3448,28 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
return 0;
start = alloc->start;
- for (id = 0; id < I915_MAX_PLANES; id++) {
+ for_each_plane_id_on_crtc(intel_crtc, plane_id) {
unsigned int data_rate, y_data_rate;
uint16_t plane_blocks, y_plane_blocks = 0;
- if (id == PLANE_CURSOR)
+ if (plane_id == PLANE_CURSOR)
continue;
- data_rate = plane_data_rate[id];
+ data_rate = plane_data_rate[plane_id];
/*
* allocation for (packed formats) or (uv-plane part of planar format):
* promote the expression to 64 bits to avoid overflowing, the
* result is < available as data_rate / total_data_rate < 1
*/
- plane_blocks = minimum[id];
+ plane_blocks = minimum[plane_id];
plane_blocks += div_u64((uint64_t)alloc_size * data_rate,
total_data_rate);
/* Leave disabled planes at (0,0) */
if (data_rate) {
- ddb->plane[pipe][id].start = start;
- ddb->plane[pipe][id].end = start + plane_blocks;
+ ddb->plane[pipe][plane_id].start = start;
+ ddb->plane[pipe][plane_id].end = start + plane_blocks;
}
start += plane_blocks;
@@ -3476,15 +3477,15 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
/*
* allocation for y_plane part of planar format:
*/
- y_data_rate = plane_y_data_rate[id];
+ y_data_rate = plane_y_data_rate[plane_id];
- y_plane_blocks = y_minimum[id];
+ y_plane_blocks = y_minimum[plane_id];
y_plane_blocks += div_u64((uint64_t)alloc_size * y_data_rate,
total_data_rate);
if (y_data_rate) {
- ddb->y_plane[pipe][id].start = start;
- ddb->y_plane[pipe][id].end = start + y_plane_blocks;
+ ddb->y_plane[pipe][plane_id].start = start;
+ ddb->y_plane[pipe][plane_id].end = start + y_plane_blocks;
}
start += y_plane_blocks;
@@ -3499,32 +3500,35 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
* should allow pixel_rate up to ~2 GHz which seems sufficient since max
* 2xcdclk is 1350 MHz and the pixel rate should never exceed that.
*/
-static uint32_t skl_wm_method1(uint32_t pixel_rate, uint8_t cpp, uint32_t latency)
+static uint_fixed_16_16_t skl_wm_method1(uint32_t pixel_rate, uint8_t cpp,
+ uint32_t latency)
{
- uint32_t wm_intermediate_val, ret;
+ uint32_t wm_intermediate_val;
+ uint_fixed_16_16_t ret;
if (latency == 0)
- return UINT_MAX;
-
- wm_intermediate_val = latency * pixel_rate * cpp / 512;
- ret = DIV_ROUND_UP(wm_intermediate_val, 1000);
+ return FP_16_16_MAX;
+ wm_intermediate_val = latency * pixel_rate * cpp;
+ ret = fixed_16_16_div_round_up_u64(wm_intermediate_val, 1000 * 512);
return ret;
}
-static uint32_t skl_wm_method2(uint32_t pixel_rate, uint32_t pipe_htotal,
- uint32_t latency, uint32_t plane_blocks_per_line)
+static uint_fixed_16_16_t skl_wm_method2(uint32_t pixel_rate,
+ uint32_t pipe_htotal,
+ uint32_t latency,
+ uint_fixed_16_16_t plane_blocks_per_line)
{
- uint32_t ret;
uint32_t wm_intermediate_val;
+ uint_fixed_16_16_t ret;
if (latency == 0)
- return UINT_MAX;
+ return FP_16_16_MAX;
wm_intermediate_val = latency * pixel_rate;
- ret = DIV_ROUND_UP(wm_intermediate_val, pipe_htotal * 1000) *
- plane_blocks_per_line;
-
+ wm_intermediate_val = DIV_ROUND_UP(wm_intermediate_val,
+ pipe_htotal * 1000);
+ ret = mul_u32_fixed_16_16(wm_intermediate_val, plane_blocks_per_line);
return ret;
}
@@ -3564,24 +3568,36 @@ static int skl_compute_plane_wm(const struct drm_i915_private *dev_priv,
struct drm_plane_state *pstate = &intel_pstate->base;
struct drm_framebuffer *fb = pstate->fb;
uint32_t latency = dev_priv->wm.skl_latency[level];
- uint32_t method1, method2;
- uint32_t plane_bytes_per_line, plane_blocks_per_line;
+ uint_fixed_16_16_t method1, method2;
+ uint_fixed_16_16_t plane_blocks_per_line;
+ uint_fixed_16_16_t selected_result;
+ uint32_t interm_pbpl;
+ uint32_t plane_bytes_per_line;
uint32_t res_blocks, res_lines;
- uint32_t selected_result;
uint8_t cpp;
uint32_t width = 0, height = 0;
uint32_t plane_pixel_rate;
- uint32_t y_tile_minimum, y_min_scanlines;
+ uint_fixed_16_16_t y_tile_minimum;
+ uint32_t y_min_scanlines;
struct intel_atomic_state *state =
to_intel_atomic_state(cstate->base.state);
bool apply_memory_bw_wa = skl_needs_memory_bw_wa(state);
+ bool y_tiled, x_tiled;
if (latency == 0 || !cstate->base.active || !intel_pstate->base.visible) {
*enabled = false;
return 0;
}
- if (apply_memory_bw_wa && fb->modifier == I915_FORMAT_MOD_X_TILED)
+ y_tiled = fb->modifier == I915_FORMAT_MOD_Y_TILED ||
+ fb->modifier == I915_FORMAT_MOD_Yf_TILED;
+ x_tiled = fb->modifier == I915_FORMAT_MOD_X_TILED;
+
+ /* Display WA #1141: kbl. */
+ if (IS_KABYLAKE(dev_priv) && dev_priv->ipc_enabled)
+ latency += 4;
+
+ if (apply_memory_bw_wa && x_tiled)
latency += 15;
width = drm_rect_width(&intel_pstate->base.src) >> 16;
@@ -3590,13 +3606,13 @@ static int skl_compute_plane_wm(const struct drm_i915_private *dev_priv,
if (drm_rotation_90_or_270(pstate->rotation))
swap(width, height);
- cpp = drm_format_plane_cpp(fb->pixel_format, 0);
+ cpp = fb->format->cpp[0];
plane_pixel_rate = skl_adjusted_plane_pixel_rate(cstate, intel_pstate);
if (drm_rotation_90_or_270(pstate->rotation)) {
- int cpp = (fb->pixel_format == DRM_FORMAT_NV12) ?
- drm_format_plane_cpp(fb->pixel_format, 1) :
- drm_format_plane_cpp(fb->pixel_format, 0);
+ int cpp = (fb->format->format == DRM_FORMAT_NV12) ?
+ fb->format->cpp[1] :
+ fb->format->cpp[0];
switch (cpp) {
case 1:
@@ -3620,16 +3636,17 @@ static int skl_compute_plane_wm(const struct drm_i915_private *dev_priv,
y_min_scanlines *= 2;
plane_bytes_per_line = width * cpp;
- if (fb->modifier == I915_FORMAT_MOD_Y_TILED ||
- fb->modifier == I915_FORMAT_MOD_Yf_TILED) {
+ if (y_tiled) {
+ interm_pbpl = DIV_ROUND_UP(plane_bytes_per_line *
+ y_min_scanlines, 512);
plane_blocks_per_line =
- DIV_ROUND_UP(plane_bytes_per_line * y_min_scanlines, 512);
- plane_blocks_per_line /= y_min_scanlines;
- } else if (fb->modifier == DRM_FORMAT_MOD_NONE) {
- plane_blocks_per_line = DIV_ROUND_UP(plane_bytes_per_line, 512)
- + 1;
+ fixed_16_16_div_round_up(interm_pbpl, y_min_scanlines);
+ } else if (x_tiled) {
+ interm_pbpl = DIV_ROUND_UP(plane_bytes_per_line, 512);
+ plane_blocks_per_line = u32_to_fixed_16_16(interm_pbpl);
} else {
- plane_blocks_per_line = DIV_ROUND_UP(plane_bytes_per_line, 512);
+ interm_pbpl = DIV_ROUND_UP(plane_bytes_per_line, 512) + 1;
+ plane_blocks_per_line = u32_to_fixed_16_16(interm_pbpl);
}
method1 = skl_wm_method1(plane_pixel_rate, cpp, latency);
@@ -3638,28 +3655,29 @@ static int skl_compute_plane_wm(const struct drm_i915_private *dev_priv,
latency,
plane_blocks_per_line);
- y_tile_minimum = plane_blocks_per_line * y_min_scanlines;
+ y_tile_minimum = mul_u32_fixed_16_16(y_min_scanlines,
+ plane_blocks_per_line);
- if (fb->modifier == I915_FORMAT_MOD_Y_TILED ||
- fb->modifier == I915_FORMAT_MOD_Yf_TILED) {
- selected_result = max(method2, y_tile_minimum);
+ if (y_tiled) {
+ selected_result = max_fixed_16_16(method2, y_tile_minimum);
} else {
if ((cpp * cstate->base.adjusted_mode.crtc_htotal / 512 < 1) &&
(plane_bytes_per_line / 512 < 1))
selected_result = method2;
- else if ((ddb_allocation / plane_blocks_per_line) >= 1)
- selected_result = min(method1, method2);
+ else if ((ddb_allocation /
+ fixed_16_16_to_u32_round_up(plane_blocks_per_line)) >= 1)
+ selected_result = min_fixed_16_16(method1, method2);
else
selected_result = method1;
}
- res_blocks = selected_result + 1;
- res_lines = DIV_ROUND_UP(selected_result, plane_blocks_per_line);
+ res_blocks = fixed_16_16_to_u32_round_up(selected_result) + 1;
+ res_lines = DIV_ROUND_UP(selected_result.val,
+ plane_blocks_per_line.val);
if (level >= 1 && level <= 7) {
- if (fb->modifier == I915_FORMAT_MOD_Y_TILED ||
- fb->modifier == I915_FORMAT_MOD_Yf_TILED) {
- res_blocks += y_tile_minimum;
+ if (y_tiled) {
+ res_blocks += fixed_16_16_to_u32_round_up(y_tile_minimum);
res_lines += y_min_scanlines;
} else {
res_blocks++;
@@ -3676,12 +3694,12 @@ static int skl_compute_plane_wm(const struct drm_i915_private *dev_priv,
if (level) {
return 0;
} else {
+ struct drm_plane *plane = pstate->plane;
+
DRM_DEBUG_KMS("Requested display configuration exceeds system watermark limitations\n");
- DRM_DEBUG_KMS("Plane %d.%d: blocks required = %u/%u, lines required = %u/31\n",
- to_intel_crtc(cstate->base.crtc)->pipe,
- skl_wm_plane_id(to_intel_plane(pstate->plane)),
+ DRM_DEBUG_KMS("[PLANE:%d:%s] blocks required = %u/%u, lines required = %u/31\n",
+ plane->base.id, plane->name,
res_blocks, ddb_allocation, res_lines);
-
return -EINVAL;
}
}
@@ -3708,7 +3726,6 @@ skl_compute_wm_level(const struct drm_i915_private *dev_priv,
uint16_t ddb_blocks;
enum pipe pipe = intel_crtc->pipe;
int ret;
- int i = skl_wm_plane_id(intel_plane);
if (state)
intel_pstate =
@@ -3731,7 +3748,7 @@ skl_compute_wm_level(const struct drm_i915_private *dev_priv,
WARN_ON(!intel_pstate->base.fb);
- ddb_blocks = skl_ddb_entry_size(&ddb->plane[pipe][i]);
+ ddb_blocks = skl_ddb_entry_size(&ddb->plane[pipe][intel_plane->id]);
ret = skl_compute_plane_wm(dev_priv,
cstate,
@@ -3750,7 +3767,10 @@ skl_compute_wm_level(const struct drm_i915_private *dev_priv,
static uint32_t
skl_compute_linetime_wm(struct intel_crtc_state *cstate)
{
+ struct drm_atomic_state *state = cstate->base.state;
+ struct drm_i915_private *dev_priv = to_i915(state->dev);
uint32_t pixel_rate;
+ uint32_t linetime_wm;
if (!cstate->base.active)
return 0;
@@ -3760,8 +3780,14 @@ skl_compute_linetime_wm(struct intel_crtc_state *cstate)
if (WARN_ON(pixel_rate == 0))
return 0;
- return DIV_ROUND_UP(8 * cstate->base.adjusted_mode.crtc_htotal * 1000,
- pixel_rate);
+ linetime_wm = DIV_ROUND_UP(8 * cstate->base.adjusted_mode.crtc_htotal *
+ 1000, pixel_rate);
+
+ /* Display WA #1135: bxt. */
+ if (IS_BROXTON(dev_priv) && dev_priv->ipc_enabled)
+ linetime_wm = DIV_ROUND_UP(linetime_wm, 2);
+
+ return linetime_wm;
}
static void skl_compute_transition_wm(struct intel_crtc_state *cstate,
@@ -3794,7 +3820,7 @@ static int skl_build_pipe_wm(struct intel_crtc_state *cstate,
for_each_intel_plane_mask(&dev_priv->drm,
intel_plane,
cstate->base.plane_mask) {
- wm = &pipe_wm->planes[skl_wm_plane_id(intel_plane)];
+ wm = &pipe_wm->planes[intel_plane->id];
for (level = 0; level <= max_level; level++) {
ret = skl_compute_wm_level(dev_priv, ddb, cstate,
@@ -3838,7 +3864,7 @@ static void skl_write_wm_level(struct drm_i915_private *dev_priv,
static void skl_write_plane_wm(struct intel_crtc *intel_crtc,
const struct skl_plane_wm *wm,
const struct skl_ddb_allocation *ddb,
- int plane)
+ enum plane_id plane_id)
{
struct drm_crtc *crtc = &intel_crtc->base;
struct drm_device *dev = crtc->dev;
@@ -3847,16 +3873,16 @@ static void skl_write_plane_wm(struct intel_crtc *intel_crtc,
enum pipe pipe = intel_crtc->pipe;
for (level = 0; level <= max_level; level++) {
- skl_write_wm_level(dev_priv, PLANE_WM(pipe, plane, level),
+ skl_write_wm_level(dev_priv, PLANE_WM(pipe, plane_id, level),
&wm->wm[level]);
}
- skl_write_wm_level(dev_priv, PLANE_WM_TRANS(pipe, plane),
+ skl_write_wm_level(dev_priv, PLANE_WM_TRANS(pipe, plane_id),
&wm->trans_wm);
- skl_ddb_entry_write(dev_priv, PLANE_BUF_CFG(pipe, plane),
- &ddb->plane[pipe][plane]);
- skl_ddb_entry_write(dev_priv, PLANE_NV12_BUF_CFG(pipe, plane),
- &ddb->y_plane[pipe][plane]);
+ skl_ddb_entry_write(dev_priv, PLANE_BUF_CFG(pipe, plane_id),
+ &ddb->plane[pipe][plane_id]);
+ skl_ddb_entry_write(dev_priv, PLANE_NV12_BUF_CFG(pipe, plane_id),
+ &ddb->y_plane[pipe][plane_id]);
}
static void skl_write_cursor_wm(struct intel_crtc *intel_crtc,
@@ -3961,17 +3987,16 @@ skl_ddb_add_affected_planes(struct intel_crtc_state *cstate)
struct drm_plane_state *plane_state;
struct drm_plane *plane;
enum pipe pipe = intel_crtc->pipe;
- int id;
WARN_ON(!drm_atomic_get_existing_crtc_state(state, crtc));
drm_for_each_plane_mask(plane, dev, cstate->base.plane_mask) {
- id = skl_wm_plane_id(to_intel_plane(plane));
+ enum plane_id plane_id = to_intel_plane(plane)->id;
- if (skl_ddb_entry_equal(&cur_ddb->plane[pipe][id],
- &new_ddb->plane[pipe][id]) &&
- skl_ddb_entry_equal(&cur_ddb->y_plane[pipe][id],
- &new_ddb->y_plane[pipe][id]))
+ if (skl_ddb_entry_equal(&cur_ddb->plane[pipe][plane_id],
+ &new_ddb->plane[pipe][plane_id]) &&
+ skl_ddb_entry_equal(&cur_ddb->y_plane[pipe][plane_id],
+ &new_ddb->y_plane[pipe][plane_id]))
continue;
plane_state = drm_atomic_get_plane_state(state, plane);
@@ -4083,7 +4108,6 @@ skl_print_wm_changes(const struct drm_atomic_state *state)
const struct intel_plane *intel_plane;
const struct skl_ddb_allocation *old_ddb = &dev_priv->wm.skl_hw.ddb;
const struct skl_ddb_allocation *new_ddb = &intel_state->wm_results.ddb;
- int id;
int i;
for_each_crtc_in_state(state, crtc, cstate, i) {
@@ -4091,11 +4115,11 @@ skl_print_wm_changes(const struct drm_atomic_state *state)
enum pipe pipe = intel_crtc->pipe;
for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) {
+ enum plane_id plane_id = intel_plane->id;
const struct skl_ddb_entry *old, *new;
- id = skl_wm_plane_id(intel_plane);
- old = &old_ddb->plane[pipe][id];
- new = &new_ddb->plane[pipe][id];
+ old = &old_ddb->plane[pipe][plane_id];
+ new = &new_ddb->plane[pipe][plane_id];
if (skl_ddb_entry_equal(old, new))
continue;
@@ -4185,17 +4209,21 @@ static void skl_atomic_update_crtc_wm(struct intel_atomic_state *state,
struct skl_pipe_wm *pipe_wm = &cstate->wm.skl.optimal;
const struct skl_ddb_allocation *ddb = &state->wm_results.ddb;
enum pipe pipe = crtc->pipe;
- int plane;
+ enum plane_id plane_id;
if (!(state->wm_results.dirty_pipes & drm_crtc_mask(&crtc->base)))
return;
I915_WRITE(PIPE_WM_LINETIME(pipe), pipe_wm->linetime);
- for_each_universal_plane(dev_priv, pipe, plane)
- skl_write_plane_wm(crtc, &pipe_wm->planes[plane], ddb, plane);
-
- skl_write_cursor_wm(crtc, &pipe_wm->planes[PLANE_CURSOR], ddb);
+ for_each_plane_id_on_crtc(crtc, plane_id) {
+ if (plane_id != PLANE_CURSOR)
+ skl_write_plane_wm(crtc, &pipe_wm->planes[plane_id],
+ ddb, plane_id);
+ else
+ skl_write_cursor_wm(crtc, &pipe_wm->planes[plane_id],
+ ddb);
+ }
}
static void skl_initial_wm(struct intel_atomic_state *state,
@@ -4310,32 +4338,29 @@ static inline void skl_wm_level_from_reg_val(uint32_t val,
void skl_pipe_wm_get_hw_state(struct drm_crtc *crtc,
struct skl_pipe_wm *out)
{
- struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(crtc->dev);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- struct intel_plane *intel_plane;
- struct skl_plane_wm *wm;
enum pipe pipe = intel_crtc->pipe;
- int level, id, max_level;
+ int level, max_level;
+ enum plane_id plane_id;
uint32_t val;
max_level = ilk_wm_max_level(dev_priv);
- for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) {
- id = skl_wm_plane_id(intel_plane);
- wm = &out->planes[id];
+ for_each_plane_id_on_crtc(intel_crtc, plane_id) {
+ struct skl_plane_wm *wm = &out->planes[plane_id];
for (level = 0; level <= max_level; level++) {
- if (id != PLANE_CURSOR)
- val = I915_READ(PLANE_WM(pipe, id, level));
+ if (plane_id != PLANE_CURSOR)
+ val = I915_READ(PLANE_WM(pipe, plane_id, level));
else
val = I915_READ(CUR_WM(pipe, level));
skl_wm_level_from_reg_val(val, &wm->wm[level]);
}
- if (id != PLANE_CURSOR)
- val = I915_READ(PLANE_WM_TRANS(pipe, id));
+ if (plane_id != PLANE_CURSOR)
+ val = I915_READ(PLANE_WM_TRANS(pipe, plane_id));
else
val = I915_READ(CUR_WM_TRANS(pipe));
@@ -4443,67 +4468,67 @@ static void vlv_read_wm_values(struct drm_i915_private *dev_priv,
for_each_pipe(dev_priv, pipe) {
tmp = I915_READ(VLV_DDL(pipe));
- wm->ddl[pipe].primary =
+ wm->ddl[pipe].plane[PLANE_PRIMARY] =
(tmp >> DDL_PLANE_SHIFT) & (DDL_PRECISION_HIGH | DRAIN_LATENCY_MASK);
- wm->ddl[pipe].cursor =
+ wm->ddl[pipe].plane[PLANE_CURSOR] =
(tmp >> DDL_CURSOR_SHIFT) & (DDL_PRECISION_HIGH | DRAIN_LATENCY_MASK);
- wm->ddl[pipe].sprite[0] =
+ wm->ddl[pipe].plane[PLANE_SPRITE0] =
(tmp >> DDL_SPRITE_SHIFT(0)) & (DDL_PRECISION_HIGH | DRAIN_LATENCY_MASK);
- wm->ddl[pipe].sprite[1] =
+ wm->ddl[pipe].plane[PLANE_SPRITE1] =
(tmp >> DDL_SPRITE_SHIFT(1)) & (DDL_PRECISION_HIGH | DRAIN_LATENCY_MASK);
}
tmp = I915_READ(DSPFW1);
wm->sr.plane = _FW_WM(tmp, SR);
- wm->pipe[PIPE_B].cursor = _FW_WM(tmp, CURSORB);
- wm->pipe[PIPE_B].primary = _FW_WM_VLV(tmp, PLANEB);
- wm->pipe[PIPE_A].primary = _FW_WM_VLV(tmp, PLANEA);
+ wm->pipe[PIPE_B].plane[PLANE_CURSOR] = _FW_WM(tmp, CURSORB);
+ wm->pipe[PIPE_B].plane[PLANE_PRIMARY] = _FW_WM_VLV(tmp, PLANEB);
+ wm->pipe[PIPE_A].plane[PLANE_PRIMARY] = _FW_WM_VLV(tmp, PLANEA);
tmp = I915_READ(DSPFW2);
- wm->pipe[PIPE_A].sprite[1] = _FW_WM_VLV(tmp, SPRITEB);
- wm->pipe[PIPE_A].cursor = _FW_WM(tmp, CURSORA);
- wm->pipe[PIPE_A].sprite[0] = _FW_WM_VLV(tmp, SPRITEA);
+ wm->pipe[PIPE_A].plane[PLANE_SPRITE1] = _FW_WM_VLV(tmp, SPRITEB);
+ wm->pipe[PIPE_A].plane[PLANE_CURSOR] = _FW_WM(tmp, CURSORA);
+ wm->pipe[PIPE_A].plane[PLANE_SPRITE0] = _FW_WM_VLV(tmp, SPRITEA);
tmp = I915_READ(DSPFW3);
wm->sr.cursor = _FW_WM(tmp, CURSOR_SR);
if (IS_CHERRYVIEW(dev_priv)) {
tmp = I915_READ(DSPFW7_CHV);
- wm->pipe[PIPE_B].sprite[1] = _FW_WM_VLV(tmp, SPRITED);
- wm->pipe[PIPE_B].sprite[0] = _FW_WM_VLV(tmp, SPRITEC);
+ wm->pipe[PIPE_B].plane[PLANE_SPRITE1] = _FW_WM_VLV(tmp, SPRITED);
+ wm->pipe[PIPE_B].plane[PLANE_SPRITE0] = _FW_WM_VLV(tmp, SPRITEC);
tmp = I915_READ(DSPFW8_CHV);
- wm->pipe[PIPE_C].sprite[1] = _FW_WM_VLV(tmp, SPRITEF);
- wm->pipe[PIPE_C].sprite[0] = _FW_WM_VLV(tmp, SPRITEE);
+ wm->pipe[PIPE_C].plane[PLANE_SPRITE1] = _FW_WM_VLV(tmp, SPRITEF);
+ wm->pipe[PIPE_C].plane[PLANE_SPRITE0] = _FW_WM_VLV(tmp, SPRITEE);
tmp = I915_READ(DSPFW9_CHV);
- wm->pipe[PIPE_C].primary = _FW_WM_VLV(tmp, PLANEC);
- wm->pipe[PIPE_C].cursor = _FW_WM(tmp, CURSORC);
+ wm->pipe[PIPE_C].plane[PLANE_PRIMARY] = _FW_WM_VLV(tmp, PLANEC);
+ wm->pipe[PIPE_C].plane[PLANE_CURSOR] = _FW_WM(tmp, CURSORC);
tmp = I915_READ(DSPHOWM);
wm->sr.plane |= _FW_WM(tmp, SR_HI) << 9;
- wm->pipe[PIPE_C].sprite[1] |= _FW_WM(tmp, SPRITEF_HI) << 8;
- wm->pipe[PIPE_C].sprite[0] |= _FW_WM(tmp, SPRITEE_HI) << 8;
- wm->pipe[PIPE_C].primary |= _FW_WM(tmp, PLANEC_HI) << 8;
- wm->pipe[PIPE_B].sprite[1] |= _FW_WM(tmp, SPRITED_HI) << 8;
- wm->pipe[PIPE_B].sprite[0] |= _FW_WM(tmp, SPRITEC_HI) << 8;
- wm->pipe[PIPE_B].primary |= _FW_WM(tmp, PLANEB_HI) << 8;
- wm->pipe[PIPE_A].sprite[1] |= _FW_WM(tmp, SPRITEB_HI) << 8;
- wm->pipe[PIPE_A].sprite[0] |= _FW_WM(tmp, SPRITEA_HI) << 8;
- wm->pipe[PIPE_A].primary |= _FW_WM(tmp, PLANEA_HI) << 8;
+ wm->pipe[PIPE_C].plane[PLANE_SPRITE1] |= _FW_WM(tmp, SPRITEF_HI) << 8;
+ wm->pipe[PIPE_C].plane[PLANE_SPRITE0] |= _FW_WM(tmp, SPRITEE_HI) << 8;
+ wm->pipe[PIPE_C].plane[PLANE_PRIMARY] |= _FW_WM(tmp, PLANEC_HI) << 8;
+ wm->pipe[PIPE_B].plane[PLANE_SPRITE1] |= _FW_WM(tmp, SPRITED_HI) << 8;
+ wm->pipe[PIPE_B].plane[PLANE_SPRITE0] |= _FW_WM(tmp, SPRITEC_HI) << 8;
+ wm->pipe[PIPE_B].plane[PLANE_PRIMARY] |= _FW_WM(tmp, PLANEB_HI) << 8;
+ wm->pipe[PIPE_A].plane[PLANE_SPRITE1] |= _FW_WM(tmp, SPRITEB_HI) << 8;
+ wm->pipe[PIPE_A].plane[PLANE_SPRITE0] |= _FW_WM(tmp, SPRITEA_HI) << 8;
+ wm->pipe[PIPE_A].plane[PLANE_PRIMARY] |= _FW_WM(tmp, PLANEA_HI) << 8;
} else {
tmp = I915_READ(DSPFW7);
- wm->pipe[PIPE_B].sprite[1] = _FW_WM_VLV(tmp, SPRITED);
- wm->pipe[PIPE_B].sprite[0] = _FW_WM_VLV(tmp, SPRITEC);
+ wm->pipe[PIPE_B].plane[PLANE_SPRITE1] = _FW_WM_VLV(tmp, SPRITED);
+ wm->pipe[PIPE_B].plane[PLANE_SPRITE0] = _FW_WM_VLV(tmp, SPRITEC);
tmp = I915_READ(DSPHOWM);
wm->sr.plane |= _FW_WM(tmp, SR_HI) << 9;
- wm->pipe[PIPE_B].sprite[1] |= _FW_WM(tmp, SPRITED_HI) << 8;
- wm->pipe[PIPE_B].sprite[0] |= _FW_WM(tmp, SPRITEC_HI) << 8;
- wm->pipe[PIPE_B].primary |= _FW_WM(tmp, PLANEB_HI) << 8;
- wm->pipe[PIPE_A].sprite[1] |= _FW_WM(tmp, SPRITEB_HI) << 8;
- wm->pipe[PIPE_A].sprite[0] |= _FW_WM(tmp, SPRITEA_HI) << 8;
- wm->pipe[PIPE_A].primary |= _FW_WM(tmp, PLANEA_HI) << 8;
+ wm->pipe[PIPE_B].plane[PLANE_SPRITE1] |= _FW_WM(tmp, SPRITED_HI) << 8;
+ wm->pipe[PIPE_B].plane[PLANE_SPRITE0] |= _FW_WM(tmp, SPRITEC_HI) << 8;
+ wm->pipe[PIPE_B].plane[PLANE_PRIMARY] |= _FW_WM(tmp, PLANEB_HI) << 8;
+ wm->pipe[PIPE_A].plane[PLANE_SPRITE1] |= _FW_WM(tmp, SPRITEB_HI) << 8;
+ wm->pipe[PIPE_A].plane[PLANE_SPRITE0] |= _FW_WM(tmp, SPRITEA_HI) << 8;
+ wm->pipe[PIPE_A].plane[PLANE_PRIMARY] |= _FW_WM(tmp, PLANEA_HI) << 8;
}
}
@@ -4520,21 +4545,8 @@ void vlv_wm_get_hw_state(struct drm_device *dev)
vlv_read_wm_values(dev_priv, wm);
- for_each_intel_plane(dev, plane) {
- switch (plane->base.type) {
- int sprite;
- case DRM_PLANE_TYPE_CURSOR:
- plane->wm.fifo_size = 63;
- break;
- case DRM_PLANE_TYPE_PRIMARY:
- plane->wm.fifo_size = vlv_get_fifo_size(dev_priv, plane->pipe, 0);
- break;
- case DRM_PLANE_TYPE_OVERLAY:
- sprite = plane->plane;
- plane->wm.fifo_size = vlv_get_fifo_size(dev_priv, plane->pipe, sprite + 1);
- break;
- }
- }
+ for_each_intel_plane(dev, plane)
+ plane->wm.fifo_size = vlv_get_fifo_size(plane);
wm->cxsr = I915_READ(FW_BLC_SELF_VLV) & FW_CSPWRDWNEN;
wm->level = VLV_WM_LEVEL_PM2;
@@ -4575,8 +4587,11 @@ void vlv_wm_get_hw_state(struct drm_device *dev)
for_each_pipe(dev_priv, pipe)
DRM_DEBUG_KMS("Initial watermarks: pipe %c, plane=%d, cursor=%d, sprite0=%d, sprite1=%d\n",
- pipe_name(pipe), wm->pipe[pipe].primary, wm->pipe[pipe].cursor,
- wm->pipe[pipe].sprite[0], wm->pipe[pipe].sprite[1]);
+ pipe_name(pipe),
+ wm->pipe[pipe].plane[PLANE_PRIMARY],
+ wm->pipe[pipe].plane[PLANE_CURSOR],
+ wm->pipe[pipe].plane[PLANE_SPRITE0],
+ wm->pipe[pipe].plane[PLANE_SPRITE1]);
DRM_DEBUG_KMS("Initial watermarks: SR plane=%d, SR cursor=%d level=%d cxsr=%d\n",
wm->sr.plane, wm->sr.cursor, wm->level, wm->cxsr);
@@ -4996,8 +5011,18 @@ static void vlv_set_rps_idle(struct drm_i915_private *dev_priv)
if (dev_priv->rps.cur_freq <= val)
return;
- /* Wake up the media well, as that takes a lot less
- * power than the Render well. */
+ /* The punit delays the write of the frequency and voltage until it
+ * determines the GPU is awake. During normal usage we don't want to
+ * waste power changing the frequency if the GPU is sleeping (rc6).
+ * However, the GPU and driver is now idle and we do not want to delay
+ * switching to minimum voltage (reducing power whilst idle) as we do
+ * not expect to be woken in the near future and so must flush the
+ * change by waking the device.
+ *
+ * We choose to take the media powerwell (either would do to trick the
+ * punit into committing the voltage change) as that takes a lot less
+ * power than the render powerwell.
+ */
intel_uncore_forcewake_get(dev_priv, FORCEWAKE_MEDIA);
valleyview_set_rps(dev_priv, val);
intel_uncore_forcewake_put(dev_priv, FORCEWAKE_MEDIA);
@@ -5219,7 +5244,7 @@ int sanitize_rc6_option(struct drm_i915_private *dev_priv, int enable_rc6)
if (!enable_rc6)
return 0;
- if (IS_BROXTON(dev_priv) && !bxt_check_bios_rc6_setup(dev_priv)) {
+ if (IS_GEN9_LP(dev_priv) && !bxt_check_bios_rc6_setup(dev_priv)) {
DRM_INFO("RC6 disabled by BIOS\n");
return 0;
}
@@ -5253,7 +5278,7 @@ static void gen6_init_rps_frequencies(struct drm_i915_private *dev_priv)
/* All of these values are in units of 50MHz */
/* static values from HW: RP0 > RP1 > RPn (min_freq) */
- if (IS_BROXTON(dev_priv)) {
+ if (IS_GEN9_LP(dev_priv)) {
u32 rp_state_cap = I915_READ(BXT_RP_STATE_CAP);
dev_priv->rps.rp0_freq = (rp_state_cap >> 16) & 0xff;
dev_priv->rps.rp1_freq = (rp_state_cap >> 8) & 0xff;
@@ -5816,7 +5841,7 @@ static void valleyview_setup_pctx(struct drm_i915_private *dev_priv)
int pcbr_offset;
pcbr_offset = (pcbr & (~4095)) - dev_priv->mm.stolen_base;
- pctx = i915_gem_object_create_stolen_for_preallocated(&dev_priv->drm,
+ pctx = i915_gem_object_create_stolen_for_preallocated(dev_priv,
pcbr_offset,
I915_GTT_OFFSET_NONE,
pctx_size);
@@ -5833,7 +5858,7 @@ static void valleyview_setup_pctx(struct drm_i915_private *dev_priv)
* overlap with other ranges, such as the frame buffer, protected
* memory, or any other relevant ranges.
*/
- pctx = i915_gem_object_create_stolen(&dev_priv->drm, pctx_size);
+ pctx = i915_gem_object_create_stolen(dev_priv, pctx_size);
if (!pctx) {
DRM_DEBUG("not enough stolen space for PCTX, disabling\n");
goto out;
@@ -6784,7 +6809,7 @@ static void __intel_autoenable_gt_powersave(struct work_struct *work)
goto out;
rcs = dev_priv->engine[RCS];
- if (rcs->last_context)
+ if (rcs->last_retired_context)
goto out;
if (!rcs->init_context)
@@ -7595,8 +7620,6 @@ static void i85x_init_clock_gating(struct drm_i915_private *dev_priv)
static void i830_init_clock_gating(struct drm_i915_private *dev_priv)
{
- I915_WRITE(DSPCLK_GATE_D, OVRUNIT_CLOCK_GATE_DISABLE);
-
I915_WRITE(MEM_MODE,
_MASKED_BIT_ENABLE(MEM_DISPLAY_A_TRICKLE_FEED_DISABLE) |
_MASKED_BIT_ENABLE(MEM_DISPLAY_B_TRICKLE_FEED_DISABLE));
@@ -7633,7 +7656,7 @@ void intel_init_clock_gating_hooks(struct drm_i915_private *dev_priv)
dev_priv->display.init_clock_gating = skylake_init_clock_gating;
else if (IS_KABYLAKE(dev_priv))
dev_priv->display.init_clock_gating = kabylake_init_clock_gating;
- else if (IS_BROXTON(dev_priv))
+ else if (IS_GEN9_LP(dev_priv))
dev_priv->display.init_clock_gating = bxt_init_clock_gating;
else if (IS_BROADWELL(dev_priv))
dev_priv->display.init_clock_gating = broadwell_init_clock_gating;
@@ -7651,9 +7674,9 @@ void intel_init_clock_gating_hooks(struct drm_i915_private *dev_priv)
dev_priv->display.init_clock_gating = ironlake_init_clock_gating;
else if (IS_G4X(dev_priv))
dev_priv->display.init_clock_gating = g4x_init_clock_gating;
- else if (IS_CRESTLINE(dev_priv))
+ else if (IS_I965GM(dev_priv))
dev_priv->display.init_clock_gating = crestline_init_clock_gating;
- else if (IS_BROADWATER(dev_priv))
+ else if (IS_I965G(dev_priv))
dev_priv->display.init_clock_gating = broadwater_init_clock_gating;
else if (IS_GEN3(dev_priv))
dev_priv->display.init_clock_gating = gen3_init_clock_gating;
@@ -7702,10 +7725,7 @@ void intel_init_pm(struct drm_i915_private *dev_priv)
DRM_DEBUG_KMS("Failed to read display plane latency. "
"Disable CxSR\n");
}
- } else if (IS_CHERRYVIEW(dev_priv)) {
- vlv_setup_wm_latency(dev_priv);
- dev_priv->display.update_wm = vlv_update_wm;
- } else if (IS_VALLEYVIEW(dev_priv)) {
+ } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
vlv_setup_wm_latency(dev_priv);
dev_priv->display.update_wm = vlv_update_wm;
} else if (IS_PINEVIEW(dev_priv)) {
@@ -7849,6 +7869,7 @@ int sandybridge_pcode_write(struct drm_i915_private *dev_priv,
}
I915_WRITE_FW(GEN6_PCODE_DATA, val);
+ I915_WRITE_FW(GEN6_PCODE_DATA1, 0);
I915_WRITE_FW(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY | mbox);
if (intel_wait_for_register_fw(dev_priv,
@@ -8041,10 +8062,8 @@ void intel_queue_rps_boost_for_request(struct drm_i915_gem_request *req)
queue_work(req->i915->wq, &boost->work);
}
-void intel_pm_setup(struct drm_device *dev)
+void intel_pm_setup(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
-
mutex_init(&dev_priv->rps.hw_lock);
spin_lock_init(&dev_priv->rps.client_lock);
diff --git a/drivers/gpu/drm/i915/intel_psr.c b/drivers/gpu/drm/i915/intel_psr.c
index c6be70686b4a..c3780d0d2baf 100644
--- a/drivers/gpu/drm/i915/intel_psr.c
+++ b/drivers/gpu/drm/i915/intel_psr.c
@@ -122,13 +122,26 @@ static void vlv_psr_setup_vsc(struct intel_dp *intel_dp)
static void skl_psr_setup_su_vsc(struct intel_dp *intel_dp)
{
struct edp_vsc_psr psr_vsc;
+ struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+ struct drm_device *dev = intel_dig_port->base.base.dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
/* Prepare VSC Header for SU as per EDP 1.4 spec, Table 6.11 */
memset(&psr_vsc, 0, sizeof(psr_vsc));
psr_vsc.sdp_header.HB0 = 0;
psr_vsc.sdp_header.HB1 = 0x7;
- psr_vsc.sdp_header.HB2 = 0x3;
- psr_vsc.sdp_header.HB3 = 0xb;
+ if (dev_priv->psr.colorimetry_support &&
+ dev_priv->psr.y_cord_support) {
+ psr_vsc.sdp_header.HB2 = 0x5;
+ psr_vsc.sdp_header.HB3 = 0x13;
+ } else if (dev_priv->psr.y_cord_support) {
+ psr_vsc.sdp_header.HB2 = 0x4;
+ psr_vsc.sdp_header.HB3 = 0xe;
+ } else {
+ psr_vsc.sdp_header.HB2 = 0x3;
+ psr_vsc.sdp_header.HB3 = 0xc;
+ }
+
intel_psr_write_vsc(intel_dp, &psr_vsc);
}
@@ -196,7 +209,11 @@ static void hsw_psr_enable_sink(struct intel_dp *intel_dp)
drm_dp_dpcd_writeb(&intel_dp->aux,
DP_SINK_DEVICE_AUX_FRAME_SYNC_CONF,
DP_AUX_FRAME_SYNC_ENABLE);
-
+ /* Enable ALPM at sink for psr2 */
+ if (dev_priv->psr.psr2_support && dev_priv->psr.alpm)
+ drm_dp_dpcd_writeb(&intel_dp->aux,
+ DP_RECEIVER_ALPM_CONFIG,
+ DP_ALPM_ENABLE);
if (dev_priv->psr.link_standby)
drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG,
DP_PSR_ENABLE | DP_PSR_MAIN_LINK_ACTIVE);
@@ -248,7 +265,7 @@ static void vlv_psr_activate(struct intel_dp *intel_dp)
VLV_EDP_PSR_ACTIVE_ENTRY);
}
-static void hsw_psr_enable_source(struct intel_dp *intel_dp)
+static void intel_enable_source_psr1(struct intel_dp *intel_dp)
{
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
struct drm_device *dev = dig_port->base.base.dev;
@@ -299,14 +316,31 @@ static void hsw_psr_enable_source(struct intel_dp *intel_dp)
val |= EDP_PSR_TP1_TP2_SEL;
I915_WRITE(EDP_PSR_CTL, val);
+}
- if (!dev_priv->psr.psr2_support)
- return;
+static void intel_enable_source_psr2(struct intel_dp *intel_dp)
+{
+ struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+ struct drm_device *dev = dig_port->base.base.dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ /*
+ * Let's respect VBT in case VBT asks a higher idle_frame value.
+ * Let's use 6 as the minimum to cover all known cases including
+ * the off-by-one issue that HW has in some cases. Also there are
+ * cases where sink should be able to train
+ * with the 5 or 6 idle patterns.
+ */
+ uint32_t idle_frames = max(6, dev_priv->vbt.psr.idle_frames);
+ uint32_t val;
+
+ val = idle_frames << EDP_PSR_IDLE_FRAME_SHIFT;
/* FIXME: selective update is probably totally broken because it doesn't
* mesh at all with our frontbuffer tracking. And the hw alone isn't
* good enough. */
- val = EDP_PSR2_ENABLE | EDP_SU_TRACK_ENABLE;
+ val |= EDP_PSR2_ENABLE |
+ EDP_SU_TRACK_ENABLE |
+ EDP_FRAMES_BEFORE_SU_ENTRY;
if (dev_priv->vbt.psr.tp2_tp3_wakeup_time > 5)
val |= EDP_PSR2_TP2_TIME_2500;
@@ -320,6 +354,19 @@ static void hsw_psr_enable_source(struct intel_dp *intel_dp)
I915_WRITE(EDP_PSR2_CTL, val);
}
+static void hsw_psr_enable_source(struct intel_dp *intel_dp)
+{
+ struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+ struct drm_device *dev = dig_port->base.base.dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+
+ /* psr1 and psr2 are mutually exclusive.*/
+ if (dev_priv->psr.psr2_support)
+ intel_enable_source_psr2(intel_dp);
+ else
+ intel_enable_source_psr1(intel_dp);
+}
+
static bool intel_psr_match_conditions(struct intel_dp *intel_dp)
{
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
@@ -387,6 +434,22 @@ static bool intel_psr_match_conditions(struct intel_dp *intel_dp)
return false;
}
+ /* PSR2 is restricted to work with panel resolutions upto 3200x2000 */
+ if (intel_crtc->config->pipe_src_w > 3200 ||
+ intel_crtc->config->pipe_src_h > 2000) {
+ dev_priv->psr.psr2_support = false;
+ return false;
+ }
+
+ /*
+ * FIXME:enable psr2 only for y-cordinate psr2 panels
+ * After gtc implementation , remove this restriction.
+ */
+ if (!dev_priv->psr.y_cord_support && dev_priv->psr.psr2_support) {
+ DRM_DEBUG_KMS("PSR2 disabled, panel does not support Y coordinate\n");
+ return false;
+ }
+
dev_priv->psr.source_ok = true;
return true;
}
@@ -397,7 +460,10 @@ static void intel_psr_activate(struct intel_dp *intel_dp)
struct drm_device *dev = intel_dig_port->base.base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
- WARN_ON(I915_READ(EDP_PSR_CTL) & EDP_PSR_ENABLE);
+ if (dev_priv->psr.psr2_support)
+ WARN_ON(I915_READ(EDP_PSR2_CTL) & EDP_PSR2_ENABLE);
+ else
+ WARN_ON(I915_READ(EDP_PSR_CTL) & EDP_PSR_ENABLE);
WARN_ON(dev_priv->psr.active);
lockdep_assert_held(&dev_priv->psr.lock);
@@ -426,6 +492,8 @@ void intel_psr_enable(struct intel_dp *intel_dp)
struct drm_device *dev = intel_dig_port->base.base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_crtc *crtc = to_intel_crtc(intel_dig_port->base.base.crtc);
+ enum transcoder cpu_transcoder = crtc->config->cpu_transcoder;
+ u32 chicken;
if (!HAS_PSR(dev_priv)) {
DRM_DEBUG_KMS("PSR not supported on this platform\n");
@@ -449,26 +517,34 @@ void intel_psr_enable(struct intel_dp *intel_dp)
dev_priv->psr.busy_frontbuffer_bits = 0;
if (HAS_DDI(dev_priv)) {
- hsw_psr_setup_vsc(intel_dp);
-
if (dev_priv->psr.psr2_support) {
- /* PSR2 is restricted to work with panel resolutions upto 3200x2000 */
- if (crtc->config->pipe_src_w > 3200 ||
- crtc->config->pipe_src_h > 2000)
- dev_priv->psr.psr2_support = false;
- else
- skl_psr_setup_su_vsc(intel_dp);
+ skl_psr_setup_su_vsc(intel_dp);
+ chicken = PSR2_VSC_ENABLE_PROG_HEADER;
+ if (dev_priv->psr.y_cord_support)
+ chicken |= PSR2_ADD_VERTICAL_LINE_COUNT;
+ I915_WRITE(CHICKEN_TRANS(cpu_transcoder), chicken);
+ I915_WRITE(EDP_PSR_DEBUG_CTL,
+ EDP_PSR_DEBUG_MASK_MEMUP |
+ EDP_PSR_DEBUG_MASK_HPD |
+ EDP_PSR_DEBUG_MASK_LPSP |
+ EDP_PSR_DEBUG_MASK_MAX_SLEEP |
+ EDP_PSR_DEBUG_MASK_DISP_REG_WRITE);
+ } else {
+ /* set up vsc header for psr1 */
+ hsw_psr_setup_vsc(intel_dp);
+ /*
+ * Per Spec: Avoid continuous PSR exit by masking MEMUP
+ * and HPD. also mask LPSP to avoid dependency on other
+ * drivers that might block runtime_pm besides
+ * preventing other hw tracking issues now we can rely
+ * on frontbuffer tracking.
+ */
+ I915_WRITE(EDP_PSR_DEBUG_CTL,
+ EDP_PSR_DEBUG_MASK_MEMUP |
+ EDP_PSR_DEBUG_MASK_HPD |
+ EDP_PSR_DEBUG_MASK_LPSP);
}
- /*
- * Per Spec: Avoid continuous PSR exit by masking MEMUP and HPD.
- * Also mask LPSP to avoid dependency on other drivers that
- * might block runtime_pm besides preventing other hw tracking
- * issues now we can rely on frontbuffer tracking.
- */
- I915_WRITE(EDP_PSR_DEBUG_CTL, EDP_PSR_DEBUG_MASK_MEMUP |
- EDP_PSR_DEBUG_MASK_HPD | EDP_PSR_DEBUG_MASK_LPSP);
-
/* Enable PSR on the panel */
hsw_psr_enable_sink(intel_dp);
@@ -544,20 +620,42 @@ static void hsw_psr_disable(struct intel_dp *intel_dp)
struct drm_i915_private *dev_priv = to_i915(dev);
if (dev_priv->psr.active) {
- I915_WRITE(EDP_PSR_CTL,
- I915_READ(EDP_PSR_CTL) & ~EDP_PSR_ENABLE);
+ i915_reg_t psr_ctl;
+ u32 psr_status_mask;
+
+ if (dev_priv->psr.aux_frame_sync)
+ drm_dp_dpcd_writeb(&intel_dp->aux,
+ DP_SINK_DEVICE_AUX_FRAME_SYNC_CONF,
+ 0);
+
+ if (dev_priv->psr.psr2_support) {
+ psr_ctl = EDP_PSR2_CTL;
+ psr_status_mask = EDP_PSR2_STATUS_STATE_MASK;
+
+ I915_WRITE(psr_ctl,
+ I915_READ(psr_ctl) &
+ ~(EDP_PSR2_ENABLE | EDP_SU_TRACK_ENABLE));
+
+ } else {
+ psr_ctl = EDP_PSR_STATUS_CTL;
+ psr_status_mask = EDP_PSR_STATUS_STATE_MASK;
+
+ I915_WRITE(psr_ctl,
+ I915_READ(psr_ctl) & ~EDP_PSR_ENABLE);
+ }
/* Wait till PSR is idle */
if (intel_wait_for_register(dev_priv,
- EDP_PSR_STATUS_CTL,
- EDP_PSR_STATUS_STATE_MASK,
- 0,
+ psr_ctl, psr_status_mask, 0,
2000))
DRM_ERROR("Timed out waiting for PSR Idle State\n");
dev_priv->psr.active = false;
} else {
- WARN_ON(I915_READ(EDP_PSR_CTL) & EDP_PSR_ENABLE);
+ if (dev_priv->psr.psr2_support)
+ WARN_ON(I915_READ(EDP_PSR2_CTL) & EDP_PSR2_ENABLE);
+ else
+ WARN_ON(I915_READ(EDP_PSR_CTL) & EDP_PSR_ENABLE);
}
}
@@ -608,13 +706,24 @@ static void intel_psr_work(struct work_struct *work)
* and be ready for re-enable.
*/
if (HAS_DDI(dev_priv)) {
- if (intel_wait_for_register(dev_priv,
- EDP_PSR_STATUS_CTL,
- EDP_PSR_STATUS_STATE_MASK,
- 0,
- 50)) {
- DRM_ERROR("Timed out waiting for PSR Idle for re-enable\n");
- return;
+ if (dev_priv->psr.psr2_support) {
+ if (intel_wait_for_register(dev_priv,
+ EDP_PSR2_STATUS_CTL,
+ EDP_PSR2_STATUS_STATE_MASK,
+ 0,
+ 50)) {
+ DRM_ERROR("Timed out waiting for PSR2 Idle for re-enable\n");
+ return;
+ }
+ } else {
+ if (intel_wait_for_register(dev_priv,
+ EDP_PSR_STATUS_CTL,
+ EDP_PSR_STATUS_STATE_MASK,
+ 0,
+ 50)) {
+ DRM_ERROR("Timed out waiting for PSR Idle for re-enable\n");
+ return;
+ }
}
} else {
if (intel_wait_for_register(dev_priv,
@@ -656,11 +765,19 @@ static void intel_psr_exit(struct drm_i915_private *dev_priv)
return;
if (HAS_DDI(dev_priv)) {
- val = I915_READ(EDP_PSR_CTL);
-
- WARN_ON(!(val & EDP_PSR_ENABLE));
-
- I915_WRITE(EDP_PSR_CTL, val & ~EDP_PSR_ENABLE);
+ if (dev_priv->psr.aux_frame_sync)
+ drm_dp_dpcd_writeb(&intel_dp->aux,
+ DP_SINK_DEVICE_AUX_FRAME_SYNC_CONF,
+ 0);
+ if (dev_priv->psr.psr2_support) {
+ val = I915_READ(EDP_PSR2_CTL);
+ WARN_ON(!(val & EDP_PSR2_ENABLE));
+ I915_WRITE(EDP_PSR2_CTL, val & ~EDP_PSR2_ENABLE);
+ } else {
+ val = I915_READ(EDP_PSR_CTL);
+ WARN_ON(!(val & EDP_PSR_ENABLE));
+ I915_WRITE(EDP_PSR_CTL, val & ~EDP_PSR_ENABLE);
+ }
} else {
val = I915_READ(VLV_PSRCTL(pipe));
@@ -813,15 +930,13 @@ void intel_psr_flush(struct drm_i915_private *dev_priv,
/**
* intel_psr_init - Init basic PSR work and mutex.
- * @dev: DRM device
+ * @dev_priv: i915 device private
*
* This function is called only once at driver load to initialize basic
* PSR stuff.
*/
-void intel_psr_init(struct drm_device *dev)
+void intel_psr_init(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
-
dev_priv->psr_mmio_base = IS_HASWELL(dev_priv) ?
HSW_EDP_PSR_BASE : BDW_EDP_PSR_BASE;
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 91cb4c422ad5..91bc4abf5d3e 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -599,10 +599,62 @@ out:
static void reset_ring_common(struct intel_engine_cs *engine,
struct drm_i915_gem_request *request)
{
- struct intel_ring *ring = request->ring;
+ /* Try to restore the logical GPU state to match the continuation
+ * of the request queue. If we skip the context/PD restore, then
+ * the next request may try to execute assuming that its context
+ * is valid and loaded on the GPU and so may try to access invalid
+ * memory, prompting repeated GPU hangs.
+ *
+ * If the request was guilty, we still restore the logical state
+ * in case the next request requires it (e.g. the aliasing ppgtt),
+ * but skip over the hung batch.
+ *
+ * If the request was innocent, we try to replay the request with
+ * the restored context.
+ */
+ if (request) {
+ struct drm_i915_private *dev_priv = request->i915;
+ struct intel_context *ce = &request->ctx->engine[engine->id];
+ struct i915_hw_ppgtt *ppgtt;
+
+ /* FIXME consider gen8 reset */
+
+ if (ce->state) {
+ I915_WRITE(CCID,
+ i915_ggtt_offset(ce->state) |
+ BIT(8) /* must be set! */ |
+ CCID_EXTENDED_STATE_SAVE |
+ CCID_EXTENDED_STATE_RESTORE |
+ CCID_EN);
+ }
- ring->head = request->postfix;
- ring->last_retired_head = -1;
+ ppgtt = request->ctx->ppgtt ?: engine->i915->mm.aliasing_ppgtt;
+ if (ppgtt) {
+ u32 pd_offset = ppgtt->pd.base.ggtt_offset << 10;
+
+ I915_WRITE(RING_PP_DIR_DCLV(engine), PP_DIR_DCLV_2G);
+ I915_WRITE(RING_PP_DIR_BASE(engine), pd_offset);
+
+ /* Wait for the PD reload to complete */
+ if (intel_wait_for_register(dev_priv,
+ RING_PP_DIR_BASE(engine),
+ BIT(0), 0,
+ 10))
+ DRM_ERROR("Wait for reload of ppgtt page-directory timed out\n");
+
+ ppgtt->pd_dirty_rings &= ~intel_engine_flag(engine);
+ }
+
+ /* If the rq hung, jump to its breadcrumb and skip the batch */
+ if (request->fence.error == -EIO) {
+ struct intel_ring *ring = request->ring;
+
+ ring->head = request->postfix;
+ ring->last_retired_head = -1;
+ }
+ } else {
+ engine->legacy_active_context = NULL;
+ }
}
static int intel_ring_workarounds_emit(struct drm_i915_gem_request *req)
@@ -1728,7 +1780,7 @@ static int init_status_page(struct intel_engine_cs *engine)
void *vaddr;
int ret;
- obj = i915_gem_object_create_internal(engine->i915, 4096);
+ obj = i915_gem_object_create_internal(engine->i915, PAGE_SIZE);
if (IS_ERR(obj)) {
DRM_ERROR("Failed to allocate status page\n");
return PTR_ERR(obj);
@@ -1738,7 +1790,7 @@ static int init_status_page(struct intel_engine_cs *engine)
if (ret)
goto err;
- vma = i915_vma_create(obj, &engine->i915->ggtt.base, NULL);
+ vma = i915_vma_instance(obj, &engine->i915->ggtt.base, NULL);
if (IS_ERR(vma)) {
ret = PTR_ERR(vma);
goto err;
@@ -1769,7 +1821,7 @@ static int init_status_page(struct intel_engine_cs *engine)
engine->status_page.vma = vma;
engine->status_page.ggtt_offset = i915_ggtt_offset(vma);
- engine->status_page.page_addr = memset(vaddr, 0, 4096);
+ engine->status_page.page_addr = memset(vaddr, 0, PAGE_SIZE);
DRM_DEBUG_DRIVER("%s hws offset: 0x%08x\n",
engine->name, i915_ggtt_offset(vma));
@@ -1797,10 +1849,9 @@ static int init_phys_status_page(struct intel_engine_cs *engine)
return 0;
}
-int intel_ring_pin(struct intel_ring *ring)
+int intel_ring_pin(struct intel_ring *ring, unsigned int offset_bias)
{
- /* Ring wraparound at offset 0 sometimes hangs. No idea why. */
- unsigned int flags = PIN_GLOBAL | PIN_OFFSET_BIAS | 4096;
+ unsigned int flags;
enum i915_map_type map;
struct i915_vma *vma = ring->vma;
void *addr;
@@ -1810,6 +1861,9 @@ int intel_ring_pin(struct intel_ring *ring)
map = HAS_LLC(ring->engine->i915) ? I915_MAP_WB : I915_MAP_WC;
+ flags = PIN_GLOBAL;
+ if (offset_bias)
+ flags |= PIN_OFFSET_BIAS | offset_bias;
if (vma->obj->stolen)
flags |= PIN_MAPPABLE;
@@ -1861,16 +1915,16 @@ intel_ring_create_vma(struct drm_i915_private *dev_priv, int size)
struct drm_i915_gem_object *obj;
struct i915_vma *vma;
- obj = i915_gem_object_create_stolen(&dev_priv->drm, size);
+ obj = i915_gem_object_create_stolen(dev_priv, size);
if (!obj)
- obj = i915_gem_object_create(&dev_priv->drm, size);
+ obj = i915_gem_object_create(dev_priv, size);
if (IS_ERR(obj))
return ERR_CAST(obj);
/* mark ring buffers as read-only from GPU side by default */
obj->gt_ro = 1;
- vma = i915_vma_create(obj, &dev_priv->ggtt.base, NULL);
+ vma = i915_vma_instance(obj, &dev_priv->ggtt.base, NULL);
if (IS_ERR(vma))
goto err;
@@ -1904,7 +1958,7 @@ intel_engine_create_ring(struct intel_engine_cs *engine, int size)
* of the buffer.
*/
ring->effective_size = size;
- if (IS_I830(engine->i915) || IS_845G(engine->i915))
+ if (IS_I830(engine->i915) || IS_I845G(engine->i915))
ring->effective_size -= 2 * CACHELINE_BYTES;
ring->last_retired_head = -1;
@@ -1931,8 +1985,26 @@ intel_ring_free(struct intel_ring *ring)
kfree(ring);
}
-static int intel_ring_context_pin(struct i915_gem_context *ctx,
- struct intel_engine_cs *engine)
+static int context_pin(struct i915_gem_context *ctx, unsigned int flags)
+{
+ struct i915_vma *vma = ctx->engine[RCS].state;
+ int ret;
+
+ /* Clear this page out of any CPU caches for coherent swap-in/out.
+ * We only want to do this on the first bind so that we do not stall
+ * on an active context (which by nature is already on the GPU).
+ */
+ if (!(vma->flags & I915_VMA_GLOBAL_BIND)) {
+ ret = i915_gem_object_set_to_gtt_domain(vma->obj, false);
+ if (ret)
+ return ret;
+ }
+
+ return i915_vma_pin(vma, 0, ctx->ggtt_alignment, PIN_GLOBAL | flags);
+}
+
+static int intel_ring_context_pin(struct intel_engine_cs *engine,
+ struct i915_gem_context *ctx)
{
struct intel_context *ce = &ctx->engine[engine->id];
int ret;
@@ -1943,13 +2015,15 @@ static int intel_ring_context_pin(struct i915_gem_context *ctx,
return 0;
if (ce->state) {
- struct i915_vma *vma;
+ unsigned int flags;
- vma = i915_gem_context_pin_legacy(ctx, PIN_HIGH);
- if (IS_ERR(vma)) {
- ret = PTR_ERR(vma);
+ flags = 0;
+ if (i915_gem_context_is_kernel(ctx))
+ flags = PIN_HIGH;
+
+ ret = context_pin(ctx, flags);
+ if (ret)
goto error;
- }
}
/* The kernel context is only used as a placeholder for flushing the
@@ -1959,7 +2033,7 @@ static int intel_ring_context_pin(struct i915_gem_context *ctx,
* as during eviction we cannot allocate and pin the renderstate in
* order to initialise the context.
*/
- if (ctx == ctx->i915->kernel_context)
+ if (i915_gem_context_is_kernel(ctx))
ce->initialised = true;
i915_gem_context_get(ctx);
@@ -1970,12 +2044,13 @@ error:
return ret;
}
-static void intel_ring_context_unpin(struct i915_gem_context *ctx,
- struct intel_engine_cs *engine)
+static void intel_ring_context_unpin(struct intel_engine_cs *engine,
+ struct i915_gem_context *ctx)
{
struct intel_context *ce = &ctx->engine[engine->id];
lockdep_assert_held(&ctx->i915->drm.struct_mutex);
+ GEM_BUG_ON(ce->pin_count == 0);
if (--ce->pin_count)
return;
@@ -2000,17 +2075,6 @@ static int intel_init_ring_buffer(struct intel_engine_cs *engine)
if (ret)
goto error;
- /* We may need to do things with the shrinker which
- * require us to immediately switch back to the default
- * context. This can cause a problem as pinning the
- * default context also requires GTT space which may not
- * be available. To avoid this we always pin the default
- * context.
- */
- ret = intel_ring_context_pin(dev_priv->kernel_context, engine);
- if (ret)
- goto error;
-
ring = intel_engine_create_ring(engine, 32 * PAGE_SIZE);
if (IS_ERR(ring)) {
ret = PTR_ERR(ring);
@@ -2028,7 +2092,8 @@ static int intel_init_ring_buffer(struct intel_engine_cs *engine)
goto error;
}
- ret = intel_ring_pin(ring);
+ /* Ring wraparound at offset 0 sometimes hangs. No idea why. */
+ ret = intel_ring_pin(ring, I915_GTT_PAGE_SIZE);
if (ret) {
intel_ring_free(ring);
goto error;
@@ -2069,8 +2134,6 @@ void intel_engine_cleanup(struct intel_engine_cs *engine)
intel_engine_cleanup_common(engine);
- intel_ring_context_unpin(dev_priv->kernel_context, engine);
-
engine->i915 = NULL;
dev_priv->engine[engine->id] = NULL;
kfree(engine);
@@ -2087,16 +2150,19 @@ void intel_legacy_submission_resume(struct drm_i915_private *dev_priv)
}
}
-int intel_ring_alloc_request_extras(struct drm_i915_gem_request *request)
+static int ring_request_alloc(struct drm_i915_gem_request *request)
{
int ret;
+ GEM_BUG_ON(!request->ctx->engine[request->engine->id].pin_count);
+
/* Flush enough space to reduce the likelihood of waiting after
* we start building the request - in which case we will just
* have to repeat work.
*/
request->reserved_space += LEGACY_REQUEST_SIZE;
+ GEM_BUG_ON(!request->engine->buffer);
request->ring = request->engine->buffer;
ret = intel_ring_begin(request, 0);
@@ -2444,11 +2510,11 @@ static void intel_ring_init_semaphores(struct drm_i915_private *dev_priv,
if (INTEL_GEN(dev_priv) >= 8 && !dev_priv->semaphore) {
struct i915_vma *vma;
- obj = i915_gem_object_create(&dev_priv->drm, 4096);
+ obj = i915_gem_object_create(dev_priv, PAGE_SIZE);
if (IS_ERR(obj))
goto err;
- vma = i915_vma_create(obj, &dev_priv->ggtt.base, NULL);
+ vma = i915_vma_instance(obj, &dev_priv->ggtt.base, NULL);
if (IS_ERR(vma))
goto err_obj;
@@ -2576,6 +2642,11 @@ static void intel_ring_default_vfuncs(struct drm_i915_private *dev_priv,
engine->init_hw = init_ring_common;
engine->reset_hw = reset_ring_common;
+ engine->context_pin = intel_ring_context_pin;
+ engine->context_unpin = intel_ring_context_unpin;
+
+ engine->request_alloc = ring_request_alloc;
+
engine->emit_breadcrumb = i9xx_emit_breadcrumb;
engine->emit_breadcrumb_sz = i9xx_emit_breadcrumb_sz;
if (i915.semaphores) {
@@ -2600,7 +2671,7 @@ static void intel_ring_default_vfuncs(struct drm_i915_private *dev_priv,
engine->emit_bb_start = gen6_emit_bb_start;
else if (INTEL_GEN(dev_priv) >= 4)
engine->emit_bb_start = i965_emit_bb_start;
- else if (IS_I830(dev_priv) || IS_845G(dev_priv))
+ else if (IS_I830(dev_priv) || IS_I845G(dev_priv))
engine->emit_bb_start = i830_emit_bb_start;
else
engine->emit_bb_start = i915_emit_bb_start;
@@ -2656,7 +2727,7 @@ int intel_init_render_ring_buffer(struct intel_engine_cs *engine)
return ret;
if (INTEL_GEN(dev_priv) >= 6) {
- ret = intel_engine_create_scratch(engine, 4096);
+ ret = intel_engine_create_scratch(engine, PAGE_SIZE);
if (ret)
return ret;
} else if (HAS_BROKEN_CS_TLB(dev_priv)) {
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index 3466b4e77e7c..79c2b8d72322 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -65,14 +65,37 @@ struct intel_hw_status_page {
GEN8_SEMAPHORE_OFFSET(from, (__ring)->id))
enum intel_engine_hangcheck_action {
- HANGCHECK_IDLE = 0,
- HANGCHECK_WAIT,
- HANGCHECK_ACTIVE,
- HANGCHECK_KICK,
- HANGCHECK_HUNG,
+ ENGINE_IDLE = 0,
+ ENGINE_WAIT,
+ ENGINE_ACTIVE_SEQNO,
+ ENGINE_ACTIVE_HEAD,
+ ENGINE_ACTIVE_SUBUNITS,
+ ENGINE_WAIT_KICK,
+ ENGINE_DEAD,
};
-#define HANGCHECK_SCORE_RING_HUNG 31
+static inline const char *
+hangcheck_action_to_str(const enum intel_engine_hangcheck_action a)
+{
+ switch (a) {
+ case ENGINE_IDLE:
+ return "idle";
+ case ENGINE_WAIT:
+ return "wait";
+ case ENGINE_ACTIVE_SEQNO:
+ return "active seqno";
+ case ENGINE_ACTIVE_HEAD:
+ return "active head";
+ case ENGINE_ACTIVE_SUBUNITS:
+ return "active subunits";
+ case ENGINE_WAIT_KICK:
+ return "wait kick";
+ case ENGINE_DEAD:
+ return "dead";
+ }
+
+ return "unknown";
+}
#define I915_MAX_SLICES 3
#define I915_MAX_SUBSLICES 3
@@ -104,10 +127,11 @@ struct intel_instdone {
struct intel_engine_hangcheck {
u64 acthd;
u32 seqno;
- int score;
enum intel_engine_hangcheck_action action;
+ unsigned long action_timestamp;
int deadlock;
struct intel_instdone instdone;
+ bool stalled;
};
struct intel_ring {
@@ -242,6 +266,11 @@ struct intel_engine_cs {
void (*reset_hw)(struct intel_engine_cs *engine,
struct drm_i915_gem_request *req);
+ int (*context_pin)(struct intel_engine_cs *engine,
+ struct i915_gem_context *ctx);
+ void (*context_unpin)(struct intel_engine_cs *engine,
+ struct i915_gem_context *ctx);
+ int (*request_alloc)(struct drm_i915_gem_request *req);
int (*init_context)(struct drm_i915_gem_request *req);
int (*emit_flush)(struct drm_i915_gem_request *request,
@@ -355,7 +384,24 @@ struct intel_engine_cs {
bool preempt_wa;
u32 ctx_desc_template;
- struct i915_gem_context *last_context;
+ /* Contexts are pinned whilst they are active on the GPU. The last
+ * context executed remains active whilst the GPU is idle - the
+ * switch away and write to the context object only occurs on the
+ * next execution. Contexts are only unpinned on retirement of the
+ * following request ensuring that we can always write to the object
+ * on the context switch even after idling. Across suspend, we switch
+ * to the kernel context and trash it as the save may not happen
+ * before the hardware is powered down.
+ */
+ struct i915_gem_context *last_retired_context;
+
+ /* We track the current MI_SET_CONTEXT in order to eliminate
+ * redudant context switches. This presumes that requests are not
+ * reordered! Or when they are the tracking is updated along with
+ * the emission of individual requests into the legacy command
+ * stream (ring).
+ */
+ struct i915_gem_context *legacy_active_context;
struct intel_engine_hangcheck hangcheck;
@@ -437,7 +483,7 @@ intel_write_status_page(struct intel_engine_cs *engine,
struct intel_ring *
intel_engine_create_ring(struct intel_engine_cs *engine, int size);
-int intel_ring_pin(struct intel_ring *ring);
+int intel_ring_pin(struct intel_ring *ring, unsigned int offset_bias);
void intel_ring_unpin(struct intel_ring *ring);
void intel_ring_free(struct intel_ring *ring);
@@ -446,8 +492,6 @@ void intel_engine_cleanup(struct intel_engine_cs *engine);
void intel_legacy_submission_resume(struct drm_i915_private *dev_priv);
-int intel_ring_alloc_request_extras(struct drm_i915_gem_request *request);
-
int __must_check intel_ring_begin(struct drm_i915_gem_request *req, int n);
int __must_check intel_ring_cacheline_align(struct drm_i915_gem_request *req);
diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c
index 87b4af092d54..c0b7e95b5b8e 100644
--- a/drivers/gpu/drm/i915/intel_runtime_pm.c
+++ b/drivers/gpu/drm/i915/intel_runtime_pm.c
@@ -453,6 +453,57 @@ static void hsw_set_power_well(struct drm_i915_private *dev_priv,
BIT(POWER_DOMAIN_AUX_C) | \
BIT(POWER_DOMAIN_INIT))
+#define GLK_DISPLAY_POWERWELL_2_POWER_DOMAINS ( \
+ BIT(POWER_DOMAIN_TRANSCODER_A) | \
+ BIT(POWER_DOMAIN_PIPE_B) | \
+ BIT(POWER_DOMAIN_TRANSCODER_B) | \
+ BIT(POWER_DOMAIN_PIPE_C) | \
+ BIT(POWER_DOMAIN_TRANSCODER_C) | \
+ BIT(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
+ BIT(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \
+ BIT(POWER_DOMAIN_PORT_DDI_B_LANES) | \
+ BIT(POWER_DOMAIN_PORT_DDI_C_LANES) | \
+ BIT(POWER_DOMAIN_AUX_B) | \
+ BIT(POWER_DOMAIN_AUX_C) | \
+ BIT(POWER_DOMAIN_AUDIO) | \
+ BIT(POWER_DOMAIN_VGA) | \
+ BIT(POWER_DOMAIN_INIT))
+#define GLK_DISPLAY_DDI_A_POWER_DOMAINS ( \
+ BIT(POWER_DOMAIN_PORT_DDI_A_LANES) | \
+ BIT(POWER_DOMAIN_INIT))
+#define GLK_DISPLAY_DDI_B_POWER_DOMAINS ( \
+ BIT(POWER_DOMAIN_PORT_DDI_B_LANES) | \
+ BIT(POWER_DOMAIN_INIT))
+#define GLK_DISPLAY_DDI_C_POWER_DOMAINS ( \
+ BIT(POWER_DOMAIN_PORT_DDI_C_LANES) | \
+ BIT(POWER_DOMAIN_INIT))
+#define GLK_DPIO_CMN_A_POWER_DOMAINS ( \
+ BIT(POWER_DOMAIN_PORT_DDI_A_LANES) | \
+ BIT(POWER_DOMAIN_AUX_A) | \
+ BIT(POWER_DOMAIN_INIT))
+#define GLK_DPIO_CMN_B_POWER_DOMAINS ( \
+ BIT(POWER_DOMAIN_PORT_DDI_B_LANES) | \
+ BIT(POWER_DOMAIN_AUX_B) | \
+ BIT(POWER_DOMAIN_INIT))
+#define GLK_DPIO_CMN_C_POWER_DOMAINS ( \
+ BIT(POWER_DOMAIN_PORT_DDI_C_LANES) | \
+ BIT(POWER_DOMAIN_AUX_C) | \
+ BIT(POWER_DOMAIN_INIT))
+#define GLK_DISPLAY_AUX_A_POWER_DOMAINS ( \
+ BIT(POWER_DOMAIN_AUX_A) | \
+ BIT(POWER_DOMAIN_INIT))
+#define GLK_DISPLAY_AUX_B_POWER_DOMAINS ( \
+ BIT(POWER_DOMAIN_AUX_B) | \
+ BIT(POWER_DOMAIN_INIT))
+#define GLK_DISPLAY_AUX_C_POWER_DOMAINS ( \
+ BIT(POWER_DOMAIN_AUX_C) | \
+ BIT(POWER_DOMAIN_INIT))
+#define GLK_DISPLAY_DC_OFF_POWER_DOMAINS ( \
+ GLK_DISPLAY_POWERWELL_2_POWER_DOMAINS | \
+ BIT(POWER_DOMAIN_MODESET) | \
+ BIT(POWER_DOMAIN_AUX_A) | \
+ BIT(POWER_DOMAIN_INIT))
+
static void assert_can_enable_dc9(struct drm_i915_private *dev_priv)
{
WARN_ONCE((I915_READ(DC_STATE_EN) & DC_STATE_EN_DC9),
@@ -530,7 +581,7 @@ static u32 gen9_dc_mask(struct drm_i915_private *dev_priv)
u32 mask;
mask = DC_STATE_EN_UPTO_DC5;
- if (IS_BROXTON(dev_priv))
+ if (IS_GEN9_LP(dev_priv))
mask |= DC_STATE_EN_DC9;
else
mask |= DC_STATE_EN_UPTO_DC6;
@@ -694,7 +745,7 @@ gen9_sanitize_power_well_requests(struct drm_i915_private *dev_priv,
}
static void skl_set_power_well(struct drm_i915_private *dev_priv,
- struct i915_power_well *power_well, bool enable)
+ struct i915_power_well *power_well, bool enable)
{
uint32_t tmp, fuse_status;
uint32_t req_mask, state_mask;
@@ -720,11 +771,14 @@ static void skl_set_power_well(struct drm_i915_private *dev_priv,
return;
}
break;
- case SKL_DISP_PW_DDI_A_E:
+ case SKL_DISP_PW_MISC_IO:
+ case SKL_DISP_PW_DDI_A_E: /* GLK_DISP_PW_DDI_A */
case SKL_DISP_PW_DDI_B:
case SKL_DISP_PW_DDI_C:
case SKL_DISP_PW_DDI_D:
- case SKL_DISP_PW_MISC_IO:
+ case GLK_DISP_PW_AUX_A:
+ case GLK_DISP_PW_AUX_B:
+ case GLK_DISP_PW_AUX_C:
break;
default:
WARN(1, "Unknown power well %lu\n", power_well->id);
@@ -884,6 +938,12 @@ static void bxt_verify_ddi_phy_power_wells(struct drm_i915_private *dev_priv)
power_well = lookup_power_well(dev_priv, BXT_DPIO_CMN_BC);
if (power_well->count > 0)
bxt_ddi_phy_verify_state(dev_priv, power_well->data);
+
+ if (IS_GEMINILAKE(dev_priv)) {
+ power_well = lookup_power_well(dev_priv, GLK_DPIO_CMN_C);
+ if (power_well->count > 0)
+ bxt_ddi_phy_verify_state(dev_priv, power_well->data);
+ }
}
static bool gen9_dc_off_power_well_enabled(struct drm_i915_private *dev_priv,
@@ -911,7 +971,7 @@ static void gen9_dc_off_power_well_enable(struct drm_i915_private *dev_priv,
gen9_assert_dbuf_enabled(dev_priv);
- if (IS_BROXTON(dev_priv))
+ if (IS_GEN9_LP(dev_priv))
bxt_verify_ddi_phy_power_wells(dev_priv);
}
@@ -2161,6 +2221,91 @@ static struct i915_power_well bxt_power_wells[] = {
},
};
+static struct i915_power_well glk_power_wells[] = {
+ {
+ .name = "always-on",
+ .always_on = 1,
+ .domains = POWER_DOMAIN_MASK,
+ .ops = &i9xx_always_on_power_well_ops,
+ },
+ {
+ .name = "power well 1",
+ /* Handled by the DMC firmware */
+ .domains = 0,
+ .ops = &skl_power_well_ops,
+ .id = SKL_DISP_PW_1,
+ },
+ {
+ .name = "DC off",
+ .domains = GLK_DISPLAY_DC_OFF_POWER_DOMAINS,
+ .ops = &gen9_dc_off_power_well_ops,
+ .id = SKL_DISP_PW_DC_OFF,
+ },
+ {
+ .name = "power well 2",
+ .domains = GLK_DISPLAY_POWERWELL_2_POWER_DOMAINS,
+ .ops = &skl_power_well_ops,
+ .id = SKL_DISP_PW_2,
+ },
+ {
+ .name = "dpio-common-a",
+ .domains = GLK_DPIO_CMN_A_POWER_DOMAINS,
+ .ops = &bxt_dpio_cmn_power_well_ops,
+ .id = BXT_DPIO_CMN_A,
+ .data = DPIO_PHY1,
+ },
+ {
+ .name = "dpio-common-b",
+ .domains = GLK_DPIO_CMN_B_POWER_DOMAINS,
+ .ops = &bxt_dpio_cmn_power_well_ops,
+ .id = BXT_DPIO_CMN_BC,
+ .data = DPIO_PHY0,
+ },
+ {
+ .name = "dpio-common-c",
+ .domains = GLK_DPIO_CMN_C_POWER_DOMAINS,
+ .ops = &bxt_dpio_cmn_power_well_ops,
+ .id = GLK_DPIO_CMN_C,
+ .data = DPIO_PHY2,
+ },
+ {
+ .name = "AUX A",
+ .domains = GLK_DISPLAY_AUX_A_POWER_DOMAINS,
+ .ops = &skl_power_well_ops,
+ .id = GLK_DISP_PW_AUX_A,
+ },
+ {
+ .name = "AUX B",
+ .domains = GLK_DISPLAY_AUX_B_POWER_DOMAINS,
+ .ops = &skl_power_well_ops,
+ .id = GLK_DISP_PW_AUX_B,
+ },
+ {
+ .name = "AUX C",
+ .domains = GLK_DISPLAY_AUX_C_POWER_DOMAINS,
+ .ops = &skl_power_well_ops,
+ .id = GLK_DISP_PW_AUX_C,
+ },
+ {
+ .name = "DDI A power well",
+ .domains = GLK_DISPLAY_DDI_A_POWER_DOMAINS,
+ .ops = &skl_power_well_ops,
+ .id = GLK_DISP_PW_DDI_A,
+ },
+ {
+ .name = "DDI B power well",
+ .domains = GLK_DISPLAY_DDI_B_POWER_DOMAINS,
+ .ops = &skl_power_well_ops,
+ .id = SKL_DISP_PW_DDI_B,
+ },
+ {
+ .name = "DDI C power well",
+ .domains = GLK_DISPLAY_DDI_C_POWER_DOMAINS,
+ .ops = &skl_power_well_ops,
+ .id = SKL_DISP_PW_DDI_C,
+ },
+};
+
static int
sanitize_disable_power_well_option(const struct drm_i915_private *dev_priv,
int disable_power_well)
@@ -2181,7 +2326,7 @@ static uint32_t get_allowed_dc_mask(const struct drm_i915_private *dev_priv,
if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
max_dc = 2;
mask = 0;
- } else if (IS_BROXTON(dev_priv)) {
+ } else if (IS_GEN9_LP(dev_priv)) {
max_dc = 1;
/*
* DC9 has a separate HW flow from the rest of the DC states,
@@ -2257,6 +2402,8 @@ int intel_power_domains_init(struct drm_i915_private *dev_priv)
set_power_wells(power_domains, skl_power_wells);
} else if (IS_BROXTON(dev_priv)) {
set_power_wells(power_domains, bxt_power_wells);
+ } else if (IS_GEMINILAKE(dev_priv)) {
+ set_power_wells(power_domains, glk_power_wells);
} else if (IS_CHERRYVIEW(dev_priv)) {
set_power_wells(power_domains, chv_power_wells);
} else if (IS_VALLEYVIEW(dev_priv)) {
@@ -2585,7 +2732,7 @@ void intel_power_domains_init_hw(struct drm_i915_private *dev_priv, bool resume)
if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
skl_display_core_init(dev_priv, resume);
- } else if (IS_BROXTON(dev_priv)) {
+ } else if (IS_GEN9_LP(dev_priv)) {
bxt_display_core_init(dev_priv, resume);
} else if (IS_CHERRYVIEW(dev_priv)) {
mutex_lock(&power_domains->lock);
@@ -2624,7 +2771,7 @@ void intel_power_domains_suspend(struct drm_i915_private *dev_priv)
if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
skl_display_core_uninit(dev_priv);
- else if (IS_BROXTON(dev_priv))
+ else if (IS_GEN9_LP(dev_priv))
bxt_display_core_uninit(dev_priv);
}
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index 27808e91cb5a..2ad13903a054 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -1296,7 +1296,7 @@ static void intel_sdvo_pre_enable(struct intel_encoder *intel_encoder,
if (INTEL_GEN(dev_priv) >= 4) {
/* done in crtc_mode_set as the dpll_md reg must be written early */
} else if (IS_I945G(dev_priv) || IS_I945GM(dev_priv) ||
- IS_G33(dev_priv)) {
+ IS_G33(dev_priv) || IS_PINEVIEW(dev_priv)) {
/* done in crtc_mode_set as it lives inside the dpll register */
} else {
sdvox |= (crtc_state->pixel_multiplier - 1)
@@ -2342,9 +2342,9 @@ intel_sdvo_is_hdmi_connector(struct intel_sdvo *intel_sdvo, int device)
}
static u8
-intel_sdvo_get_slave_addr(struct drm_device *dev, struct intel_sdvo *sdvo)
+intel_sdvo_get_slave_addr(struct drm_i915_private *dev_priv,
+ struct intel_sdvo *sdvo)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
struct sdvo_device_mapping *my_mapping, *other_mapping;
if (sdvo->port == PORT_B) {
@@ -2934,9 +2934,9 @@ static const struct i2c_algorithm intel_sdvo_ddc_proxy = {
static bool
intel_sdvo_init_ddc_proxy(struct intel_sdvo *sdvo,
- struct drm_device *dev)
+ struct drm_i915_private *dev_priv)
{
- struct pci_dev *pdev = dev->pdev;
+ struct pci_dev *pdev = dev_priv->drm.pdev;
sdvo->ddc.owner = THIS_MODULE;
sdvo->ddc.class = I2C_CLASS_DDC;
@@ -2957,10 +2957,9 @@ static void assert_sdvo_port_valid(const struct drm_i915_private *dev_priv,
WARN_ON(port != PORT_B && port != PORT_C);
}
-bool intel_sdvo_init(struct drm_device *dev,
+bool intel_sdvo_init(struct drm_i915_private *dev_priv,
i915_reg_t sdvo_reg, enum port port)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_encoder *intel_encoder;
struct intel_sdvo *intel_sdvo;
int i;
@@ -2973,16 +2972,18 @@ bool intel_sdvo_init(struct drm_device *dev,
intel_sdvo->sdvo_reg = sdvo_reg;
intel_sdvo->port = port;
- intel_sdvo->slave_addr = intel_sdvo_get_slave_addr(dev, intel_sdvo) >> 1;
+ intel_sdvo->slave_addr =
+ intel_sdvo_get_slave_addr(dev_priv, intel_sdvo) >> 1;
intel_sdvo_select_i2c_bus(dev_priv, intel_sdvo);
- if (!intel_sdvo_init_ddc_proxy(intel_sdvo, dev))
+ if (!intel_sdvo_init_ddc_proxy(intel_sdvo, dev_priv))
goto err_i2c_bus;
/* encoder type will be decided later */
intel_encoder = &intel_sdvo->base;
intel_encoder->type = INTEL_OUTPUT_SDVO;
intel_encoder->port = port;
- drm_encoder_init(dev, &intel_encoder->base, &intel_sdvo_enc_funcs, 0,
+ drm_encoder_init(&dev_priv->drm, &intel_encoder->base,
+ &intel_sdvo_enc_funcs, 0,
"SDVO %c", port_name(port));
/* Read the regs to test if we can talk to the device */
diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c
index 242a73e66d82..9ef54688872a 100644
--- a/drivers/gpu/drm/i915/intel_sprite.c
+++ b/drivers/gpu/drm/i915/intel_sprite.c
@@ -203,8 +203,8 @@ skl_update_plane(struct drm_plane *drm_plane,
struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_plane *intel_plane = to_intel_plane(drm_plane);
struct drm_framebuffer *fb = plane_state->base.fb;
- const int pipe = intel_plane->pipe;
- const int plane = intel_plane->plane + 1;
+ enum plane_id plane_id = intel_plane->id;
+ enum pipe pipe = intel_plane->pipe;
u32 plane_ctl;
const struct drm_intel_sprite_colorkey *key = &plane_state->ckey;
u32 surf_addr = plane_state->main.offset;
@@ -223,15 +223,15 @@ skl_update_plane(struct drm_plane *drm_plane,
PLANE_CTL_PIPE_GAMMA_ENABLE |
PLANE_CTL_PIPE_CSC_ENABLE;
- plane_ctl |= skl_plane_ctl_format(fb->pixel_format);
+ plane_ctl |= skl_plane_ctl_format(fb->format->format);
plane_ctl |= skl_plane_ctl_tiling(fb->modifier);
plane_ctl |= skl_plane_ctl_rotation(rotation);
if (key->flags) {
- I915_WRITE(PLANE_KEYVAL(pipe, plane), key->min_value);
- I915_WRITE(PLANE_KEYMAX(pipe, plane), key->max_value);
- I915_WRITE(PLANE_KEYMSK(pipe, plane), key->channel_mask);
+ I915_WRITE(PLANE_KEYVAL(pipe, plane_id), key->min_value);
+ I915_WRITE(PLANE_KEYMAX(pipe, plane_id), key->max_value);
+ I915_WRITE(PLANE_KEYMSK(pipe, plane_id), key->channel_mask);
}
if (key->flags & I915_SET_COLORKEY_DESTINATION)
@@ -245,36 +245,36 @@ skl_update_plane(struct drm_plane *drm_plane,
crtc_w--;
crtc_h--;
- I915_WRITE(PLANE_OFFSET(pipe, plane), (y << 16) | x);
- I915_WRITE(PLANE_STRIDE(pipe, plane), stride);
- I915_WRITE(PLANE_SIZE(pipe, plane), (src_h << 16) | src_w);
+ I915_WRITE(PLANE_OFFSET(pipe, plane_id), (y << 16) | x);
+ I915_WRITE(PLANE_STRIDE(pipe, plane_id), stride);
+ I915_WRITE(PLANE_SIZE(pipe, plane_id), (src_h << 16) | src_w);
/* program plane scaler */
if (plane_state->scaler_id >= 0) {
int scaler_id = plane_state->scaler_id;
const struct intel_scaler *scaler;
- DRM_DEBUG_KMS("plane = %d PS_PLANE_SEL(plane) = 0x%x\n", plane,
- PS_PLANE_SEL(plane));
+ DRM_DEBUG_KMS("plane = %d PS_PLANE_SEL(plane) = 0x%x\n",
+ plane_id, PS_PLANE_SEL(plane_id));
scaler = &crtc_state->scaler_state.scalers[scaler_id];
I915_WRITE(SKL_PS_CTRL(pipe, scaler_id),
- PS_SCALER_EN | PS_PLANE_SEL(plane) | scaler->mode);
+ PS_SCALER_EN | PS_PLANE_SEL(plane_id) | scaler->mode);
I915_WRITE(SKL_PS_PWR_GATE(pipe, scaler_id), 0);
I915_WRITE(SKL_PS_WIN_POS(pipe, scaler_id), (crtc_x << 16) | crtc_y);
I915_WRITE(SKL_PS_WIN_SZ(pipe, scaler_id),
((crtc_w + 1) << 16)|(crtc_h + 1));
- I915_WRITE(PLANE_POS(pipe, plane), 0);
+ I915_WRITE(PLANE_POS(pipe, plane_id), 0);
} else {
- I915_WRITE(PLANE_POS(pipe, plane), (crtc_y << 16) | crtc_x);
+ I915_WRITE(PLANE_POS(pipe, plane_id), (crtc_y << 16) | crtc_x);
}
- I915_WRITE(PLANE_CTL(pipe, plane), plane_ctl);
- I915_WRITE(PLANE_SURF(pipe, plane),
+ I915_WRITE(PLANE_CTL(pipe, plane_id), plane_ctl);
+ I915_WRITE(PLANE_SURF(pipe, plane_id),
intel_plane_ggtt_offset(plane_state) + surf_addr);
- POSTING_READ(PLANE_SURF(pipe, plane));
+ POSTING_READ(PLANE_SURF(pipe, plane_id));
}
static void
@@ -283,20 +283,20 @@ skl_disable_plane(struct drm_plane *dplane, struct drm_crtc *crtc)
struct drm_device *dev = dplane->dev;
struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_plane *intel_plane = to_intel_plane(dplane);
- const int pipe = intel_plane->pipe;
- const int plane = intel_plane->plane + 1;
+ enum plane_id plane_id = intel_plane->id;
+ enum pipe pipe = intel_plane->pipe;
- I915_WRITE(PLANE_CTL(pipe, plane), 0);
+ I915_WRITE(PLANE_CTL(pipe, plane_id), 0);
- I915_WRITE(PLANE_SURF(pipe, plane), 0);
- POSTING_READ(PLANE_SURF(pipe, plane));
+ I915_WRITE(PLANE_SURF(pipe, plane_id), 0);
+ POSTING_READ(PLANE_SURF(pipe, plane_id));
}
static void
chv_update_csc(struct intel_plane *intel_plane, uint32_t format)
{
struct drm_i915_private *dev_priv = to_i915(intel_plane->base.dev);
- int plane = intel_plane->plane;
+ enum plane_id plane_id = intel_plane->id;
/* Seems RGB data bypasses the CSC always */
if (!format_is_yuv(format))
@@ -312,23 +312,23 @@ chv_update_csc(struct intel_plane *intel_plane, uint32_t format)
* Cb and Cr apparently come in as signed already, so no
* need for any offset. For Y we need to remove the offset.
*/
- I915_WRITE(SPCSCYGOFF(plane), SPCSC_OOFF(0) | SPCSC_IOFF(-64));
- I915_WRITE(SPCSCCBOFF(plane), SPCSC_OOFF(0) | SPCSC_IOFF(0));
- I915_WRITE(SPCSCCROFF(plane), SPCSC_OOFF(0) | SPCSC_IOFF(0));
-
- I915_WRITE(SPCSCC01(plane), SPCSC_C1(4769) | SPCSC_C0(6537));
- I915_WRITE(SPCSCC23(plane), SPCSC_C1(-3330) | SPCSC_C0(0));
- I915_WRITE(SPCSCC45(plane), SPCSC_C1(-1605) | SPCSC_C0(4769));
- I915_WRITE(SPCSCC67(plane), SPCSC_C1(4769) | SPCSC_C0(0));
- I915_WRITE(SPCSCC8(plane), SPCSC_C0(8263));
-
- I915_WRITE(SPCSCYGICLAMP(plane), SPCSC_IMAX(940) | SPCSC_IMIN(64));
- I915_WRITE(SPCSCCBICLAMP(plane), SPCSC_IMAX(448) | SPCSC_IMIN(-448));
- I915_WRITE(SPCSCCRICLAMP(plane), SPCSC_IMAX(448) | SPCSC_IMIN(-448));
-
- I915_WRITE(SPCSCYGOCLAMP(plane), SPCSC_OMAX(1023) | SPCSC_OMIN(0));
- I915_WRITE(SPCSCCBOCLAMP(plane), SPCSC_OMAX(1023) | SPCSC_OMIN(0));
- I915_WRITE(SPCSCCROCLAMP(plane), SPCSC_OMAX(1023) | SPCSC_OMIN(0));
+ I915_WRITE(SPCSCYGOFF(plane_id), SPCSC_OOFF(0) | SPCSC_IOFF(-64));
+ I915_WRITE(SPCSCCBOFF(plane_id), SPCSC_OOFF(0) | SPCSC_IOFF(0));
+ I915_WRITE(SPCSCCROFF(plane_id), SPCSC_OOFF(0) | SPCSC_IOFF(0));
+
+ I915_WRITE(SPCSCC01(plane_id), SPCSC_C1(4769) | SPCSC_C0(6537));
+ I915_WRITE(SPCSCC23(plane_id), SPCSC_C1(-3330) | SPCSC_C0(0));
+ I915_WRITE(SPCSCC45(plane_id), SPCSC_C1(-1605) | SPCSC_C0(4769));
+ I915_WRITE(SPCSCC67(plane_id), SPCSC_C1(4769) | SPCSC_C0(0));
+ I915_WRITE(SPCSCC8(plane_id), SPCSC_C0(8263));
+
+ I915_WRITE(SPCSCYGICLAMP(plane_id), SPCSC_IMAX(940) | SPCSC_IMIN(64));
+ I915_WRITE(SPCSCCBICLAMP(plane_id), SPCSC_IMAX(448) | SPCSC_IMIN(-448));
+ I915_WRITE(SPCSCCRICLAMP(plane_id), SPCSC_IMAX(448) | SPCSC_IMIN(-448));
+
+ I915_WRITE(SPCSCYGOCLAMP(plane_id), SPCSC_OMAX(1023) | SPCSC_OMIN(0));
+ I915_WRITE(SPCSCCBOCLAMP(plane_id), SPCSC_OMAX(1023) | SPCSC_OMIN(0));
+ I915_WRITE(SPCSCCROCLAMP(plane_id), SPCSC_OMAX(1023) | SPCSC_OMIN(0));
}
static void
@@ -340,8 +340,8 @@ vlv_update_plane(struct drm_plane *dplane,
struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_plane *intel_plane = to_intel_plane(dplane);
struct drm_framebuffer *fb = plane_state->base.fb;
- int pipe = intel_plane->pipe;
- int plane = intel_plane->plane;
+ enum pipe pipe = intel_plane->pipe;
+ enum plane_id plane_id = intel_plane->id;
u32 sprctl;
u32 sprsurf_offset, linear_offset;
unsigned int rotation = plane_state->base.rotation;
@@ -357,7 +357,7 @@ vlv_update_plane(struct drm_plane *dplane,
sprctl = SP_ENABLE;
- switch (fb->pixel_format) {
+ switch (fb->format->format) {
case DRM_FORMAT_YUYV:
sprctl |= SP_FORMAT_YUV422 | SP_YUV_ORDER_YUYV;
break;
@@ -434,32 +434,32 @@ vlv_update_plane(struct drm_plane *dplane,
linear_offset = intel_fb_xy_to_linear(x, y, plane_state, 0);
if (key->flags) {
- I915_WRITE(SPKEYMINVAL(pipe, plane), key->min_value);
- I915_WRITE(SPKEYMAXVAL(pipe, plane), key->max_value);
- I915_WRITE(SPKEYMSK(pipe, plane), key->channel_mask);
+ I915_WRITE(SPKEYMINVAL(pipe, plane_id), key->min_value);
+ I915_WRITE(SPKEYMAXVAL(pipe, plane_id), key->max_value);
+ I915_WRITE(SPKEYMSK(pipe, plane_id), key->channel_mask);
}
if (key->flags & I915_SET_COLORKEY_SOURCE)
sprctl |= SP_SOURCE_KEY;
if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B)
- chv_update_csc(intel_plane, fb->pixel_format);
+ chv_update_csc(intel_plane, fb->format->format);
- I915_WRITE(SPSTRIDE(pipe, plane), fb->pitches[0]);
- I915_WRITE(SPPOS(pipe, plane), (crtc_y << 16) | crtc_x);
+ I915_WRITE(SPSTRIDE(pipe, plane_id), fb->pitches[0]);
+ I915_WRITE(SPPOS(pipe, plane_id), (crtc_y << 16) | crtc_x);
if (fb->modifier == I915_FORMAT_MOD_X_TILED)
- I915_WRITE(SPTILEOFF(pipe, plane), (y << 16) | x);
+ I915_WRITE(SPTILEOFF(pipe, plane_id), (y << 16) | x);
else
- I915_WRITE(SPLINOFF(pipe, plane), linear_offset);
+ I915_WRITE(SPLINOFF(pipe, plane_id), linear_offset);
- I915_WRITE(SPCONSTALPHA(pipe, plane), 0);
+ I915_WRITE(SPCONSTALPHA(pipe, plane_id), 0);
- I915_WRITE(SPSIZE(pipe, plane), (crtc_h << 16) | crtc_w);
- I915_WRITE(SPCNTR(pipe, plane), sprctl);
- I915_WRITE(SPSURF(pipe, plane),
+ I915_WRITE(SPSIZE(pipe, plane_id), (crtc_h << 16) | crtc_w);
+ I915_WRITE(SPCNTR(pipe, plane_id), sprctl);
+ I915_WRITE(SPSURF(pipe, plane_id),
intel_plane_ggtt_offset(plane_state) + sprsurf_offset);
- POSTING_READ(SPSURF(pipe, plane));
+ POSTING_READ(SPSURF(pipe, plane_id));
}
static void
@@ -468,13 +468,13 @@ vlv_disable_plane(struct drm_plane *dplane, struct drm_crtc *crtc)
struct drm_device *dev = dplane->dev;
struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_plane *intel_plane = to_intel_plane(dplane);
- int pipe = intel_plane->pipe;
- int plane = intel_plane->plane;
+ enum pipe pipe = intel_plane->pipe;
+ enum plane_id plane_id = intel_plane->id;
- I915_WRITE(SPCNTR(pipe, plane), 0);
+ I915_WRITE(SPCNTR(pipe, plane_id), 0);
- I915_WRITE(SPSURF(pipe, plane), 0);
- POSTING_READ(SPSURF(pipe, plane));
+ I915_WRITE(SPSURF(pipe, plane_id), 0);
+ POSTING_READ(SPSURF(pipe, plane_id));
}
static void
@@ -502,7 +502,7 @@ ivb_update_plane(struct drm_plane *plane,
sprctl = SPRITE_ENABLE;
- switch (fb->pixel_format) {
+ switch (fb->format->format) {
case DRM_FORMAT_XBGR8888:
sprctl |= SPRITE_FORMAT_RGBX888 | SPRITE_RGB_ORDER_RGBX;
break;
@@ -640,7 +640,7 @@ ilk_update_plane(struct drm_plane *plane,
dvscntr = DVS_ENABLE;
- switch (fb->pixel_format) {
+ switch (fb->format->format) {
case DRM_FORMAT_XBGR8888:
dvscntr |= DVS_FORMAT_RGBX888 | DVS_RGB_ORDER_XBGR;
break;
@@ -866,7 +866,7 @@ intel_check_sprite_plane(struct drm_plane *plane,
src_y = src->y1 >> 16;
src_h = drm_rect_height(src) >> 16;
- if (format_is_yuv(fb->pixel_format)) {
+ if (format_is_yuv(fb->format->format)) {
src_x &= ~1;
src_w &= ~1;
@@ -885,7 +885,7 @@ intel_check_sprite_plane(struct drm_plane *plane,
/* Check size restrictions when scaling */
if (state->base.visible && (src_w != crtc_w || src_h != crtc_h)) {
unsigned int width_bytes;
- int cpp = drm_format_plane_cpp(fb->pixel_format, 0);
+ int cpp = fb->format->cpp[0];
WARN_ON(!can_scale);
@@ -1112,6 +1112,7 @@ intel_sprite_plane_create(struct drm_i915_private *dev_priv,
intel_plane->pipe = pipe;
intel_plane->plane = plane;
+ intel_plane->id = PLANE_SPRITE0 + plane;
intel_plane->frontbuffer_bit = INTEL_FRONTBUFFER_SPRITE(pipe, plane);
intel_plane->check_plane = intel_check_sprite_plane;
diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c
index 78cdfc6833d6..eb692e4ffe01 100644
--- a/drivers/gpu/drm/i915/intel_tv.c
+++ b/drivers/gpu/drm/i915/intel_tv.c
@@ -1537,9 +1537,9 @@ static const struct drm_encoder_funcs intel_tv_enc_funcs = {
};
void
-intel_tv_init(struct drm_device *dev)
+intel_tv_init(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_device *dev = &dev_priv->drm;
struct drm_connector *connector;
struct intel_tv *intel_tv;
struct intel_encoder *intel_encoder;
diff --git a/drivers/gpu/drm/i915/intel_uc.c b/drivers/gpu/drm/i915/intel_uc.c
new file mode 100644
index 000000000000..c46bc8594f22
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_uc.c
@@ -0,0 +1,116 @@
+/*
+ * Copyright © 2016 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#include "i915_drv.h"
+#include "intel_uc.h"
+
+void intel_uc_init_early(struct drm_i915_private *dev_priv)
+{
+ mutex_init(&dev_priv->guc.send_mutex);
+}
+
+/*
+ * Read GuC command/status register (SOFT_SCRATCH_0)
+ * Return true if it contains a response rather than a command
+ */
+static bool intel_guc_recv(struct intel_guc *guc, u32 *status)
+{
+ struct drm_i915_private *dev_priv = guc_to_i915(guc);
+
+ u32 val = I915_READ(SOFT_SCRATCH(0));
+ *status = val;
+ return INTEL_GUC_RECV_IS_RESPONSE(val);
+}
+
+int intel_guc_send(struct intel_guc *guc, const u32 *action, u32 len)
+{
+ struct drm_i915_private *dev_priv = guc_to_i915(guc);
+ u32 status;
+ int i;
+ int ret;
+
+ if (WARN_ON(len < 1 || len > 15))
+ return -EINVAL;
+
+ mutex_lock(&guc->send_mutex);
+ intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
+
+ dev_priv->guc.action_count += 1;
+ dev_priv->guc.action_cmd = action[0];
+
+ for (i = 0; i < len; i++)
+ I915_WRITE(SOFT_SCRATCH(i), action[i]);
+
+ POSTING_READ(SOFT_SCRATCH(i - 1));
+
+ I915_WRITE(GUC_SEND_INTERRUPT, GUC_SEND_TRIGGER);
+
+ /*
+ * Fast commands should complete in less than 10us, so sample quickly
+ * up to that length of time, then switch to a slower sleep-wait loop.
+ * No inte_guc_send command should ever take longer than 10ms.
+ */
+ ret = wait_for_us(intel_guc_recv(guc, &status), 10);
+ if (ret)
+ ret = wait_for(intel_guc_recv(guc, &status), 10);
+ if (status != INTEL_GUC_STATUS_SUCCESS) {
+ /*
+ * Either the GuC explicitly returned an error (which
+ * we convert to -EIO here) or no response at all was
+ * received within the timeout limit (-ETIMEDOUT)
+ */
+ if (ret != -ETIMEDOUT)
+ ret = -EIO;
+
+ DRM_WARN("INTEL_GUC_SEND: Action 0x%X failed;"
+ " ret=%d status=0x%08X response=0x%08X\n",
+ action[0], ret, status, I915_READ(SOFT_SCRATCH(15)));
+
+ dev_priv->guc.action_fail += 1;
+ dev_priv->guc.action_err = ret;
+ }
+ dev_priv->guc.action_status = status;
+
+ intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
+ mutex_unlock(&guc->send_mutex);
+
+ return ret;
+}
+
+int intel_guc_sample_forcewake(struct intel_guc *guc)
+{
+ struct drm_i915_private *dev_priv = guc_to_i915(guc);
+ u32 action[2];
+
+ action[0] = INTEL_GUC_ACTION_SAMPLE_FORCEWAKE;
+ /* WaRsDisableCoarsePowerGating:skl,bxt */
+ if (!intel_enable_rc6() || NEEDS_WaRsDisableCoarsePowerGating(dev_priv))
+ action[1] = 0;
+ else
+ /* bit 0 and 1 are for Render and Media domain separately */
+ action[1] = GUC_FORCEWAKE_RENDER | GUC_FORCEWAKE_MEDIA;
+
+ return intel_guc_send(guc, action, ARRAY_SIZE(action));
+}
+
diff --git a/drivers/gpu/drm/i915/intel_guc.h b/drivers/gpu/drm/i915/intel_uc.h
index 0053258e03d3..d74f4d3ad8dc 100644
--- a/drivers/gpu/drm/i915/intel_guc.h
+++ b/drivers/gpu/drm/i915/intel_uc.h
@@ -21,13 +21,15 @@
* IN THE SOFTWARE.
*
*/
-#ifndef _INTEL_GUC_H_
-#define _INTEL_GUC_H_
+#ifndef _INTEL_UC_H_
+#define _INTEL_UC_H_
#include "intel_guc_fwif.h"
#include "i915_guc_reg.h"
#include "intel_ringbuffer.h"
+#include "i915_vma.h"
+
struct drm_i915_gem_request;
/*
@@ -74,7 +76,7 @@ struct i915_guc_client {
uint32_t proc_desc_offset;
uint32_t doorbell_offset;
- uint32_t cookie;
+ uint32_t doorbell_cookie;
uint16_t doorbell_id;
uint16_t padding[3]; /* Maintain alignment */
@@ -91,30 +93,35 @@ struct i915_guc_client {
uint64_t submissions[I915_NUM_ENGINES];
};
-enum intel_guc_fw_status {
- GUC_FIRMWARE_FAIL = -1,
- GUC_FIRMWARE_NONE = 0,
- GUC_FIRMWARE_PENDING,
- GUC_FIRMWARE_SUCCESS
+enum intel_uc_fw_status {
+ INTEL_UC_FIRMWARE_FAIL = -1,
+ INTEL_UC_FIRMWARE_NONE = 0,
+ INTEL_UC_FIRMWARE_PENDING,
+ INTEL_UC_FIRMWARE_SUCCESS
+};
+
+enum intel_uc_fw_type {
+ INTEL_UC_FW_TYPE_GUC,
+ INTEL_UC_FW_TYPE_HUC
};
/*
* This structure encapsulates all the data needed during the process
* of fetching, caching, and loading the firmware image into the GuC.
*/
-struct intel_guc_fw {
- struct drm_device * guc_dev;
- const char * guc_fw_path;
- size_t guc_fw_size;
- struct drm_i915_gem_object * guc_fw_obj;
- enum intel_guc_fw_status guc_fw_fetch_status;
- enum intel_guc_fw_status guc_fw_load_status;
-
- uint16_t guc_fw_major_wanted;
- uint16_t guc_fw_minor_wanted;
- uint16_t guc_fw_major_found;
- uint16_t guc_fw_minor_found;
-
+struct intel_uc_fw {
+ const char *path;
+ size_t size;
+ struct drm_i915_gem_object *obj;
+ enum intel_uc_fw_status fetch_status;
+ enum intel_uc_fw_status load_status;
+
+ uint16_t major_ver_wanted;
+ uint16_t minor_ver_wanted;
+ uint16_t major_ver_found;
+ uint16_t minor_ver_found;
+
+ enum intel_uc_fw_type fw;
uint32_t header_size;
uint32_t header_offset;
uint32_t rsa_size;
@@ -140,10 +147,10 @@ struct intel_guc_log {
};
struct intel_guc {
- struct intel_guc_fw guc_fw;
+ struct intel_uc_fw fw;
struct intel_guc_log log;
- /* GuC2Host interrupt related state */
+ /* intel_guc_recv interrupt related state */
bool interrupts_enabled;
struct i915_vma *ads_vma;
@@ -165,17 +172,32 @@ struct intel_guc {
uint64_t submissions[I915_NUM_ENGINES];
uint32_t last_seqno[I915_NUM_ENGINES];
- /* To serialize the Host2GuC actions */
- struct mutex action_lock;
+ /* To serialize the intel_guc_send actions */
+ struct mutex send_mutex;
+};
+
+struct intel_huc {
+ /* Generic uC firmware management */
+ struct intel_uc_fw fw;
+
+ /* HuC-specific additions */
};
+/* intel_uc.c */
+void intel_uc_init_early(struct drm_i915_private *dev_priv);
+int intel_guc_send(struct intel_guc *guc, const u32 *action, u32 len);
+int intel_guc_sample_forcewake(struct intel_guc *guc);
+
/* intel_guc_loader.c */
-extern void intel_guc_init(struct drm_device *dev);
-extern int intel_guc_setup(struct drm_device *dev);
-extern void intel_guc_fini(struct drm_device *dev);
-extern const char *intel_guc_fw_status_repr(enum intel_guc_fw_status status);
-extern int intel_guc_suspend(struct drm_device *dev);
-extern int intel_guc_resume(struct drm_device *dev);
+extern void intel_guc_init(struct drm_i915_private *dev_priv);
+extern int intel_guc_setup(struct drm_i915_private *dev_priv);
+extern void intel_guc_fini(struct drm_i915_private *dev_priv);
+extern const char *intel_uc_fw_status_repr(enum intel_uc_fw_status status);
+extern int intel_guc_suspend(struct drm_i915_private *dev_priv);
+extern int intel_guc_resume(struct drm_i915_private *dev_priv);
+void intel_uc_fw_fetch(struct drm_i915_private *dev_priv,
+ struct intel_uc_fw *uc_fw);
+u32 intel_guc_wopcm_size(struct drm_i915_private *dev_priv);
/* i915_guc_submission.c */
int i915_guc_submission_init(struct drm_i915_private *dev_priv);
@@ -184,10 +206,26 @@ int i915_guc_wq_reserve(struct drm_i915_gem_request *rq);
void i915_guc_wq_unreserve(struct drm_i915_gem_request *request);
void i915_guc_submission_disable(struct drm_i915_private *dev_priv);
void i915_guc_submission_fini(struct drm_i915_private *dev_priv);
-void i915_guc_capture_logs(struct drm_i915_private *dev_priv);
-void i915_guc_flush_logs(struct drm_i915_private *dev_priv);
-void i915_guc_register(struct drm_i915_private *dev_priv);
-void i915_guc_unregister(struct drm_i915_private *dev_priv);
+struct i915_vma *intel_guc_allocate_vma(struct intel_guc *guc, u32 size);
+
+/* intel_guc_log.c */
+void intel_guc_log_create(struct intel_guc *guc);
+void i915_guc_log_register(struct drm_i915_private *dev_priv);
+void i915_guc_log_unregister(struct drm_i915_private *dev_priv);
int i915_guc_log_control(struct drm_i915_private *dev_priv, u64 control_val);
+static inline u32 guc_ggtt_offset(struct i915_vma *vma)
+{
+ u32 offset = i915_ggtt_offset(vma);
+ GEM_BUG_ON(offset < GUC_WOPCM_TOP);
+ GEM_BUG_ON(range_overflows_t(u64, offset, vma->size, GUC_GGTT_TOP));
+ return offset;
+}
+
+/* intel_huc.c */
+void intel_huc_init(struct drm_i915_private *dev_priv);
+void intel_huc_fini(struct drm_i915_private *dev_priv);
+int intel_huc_load(struct drm_i915_private *dev_priv);
+void intel_guc_auth_huc(struct drm_i915_private *dev_priv);
+
#endif
diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
index 0bffd3f0c15d..abe08885a5ba 100644
--- a/drivers/gpu/drm/i915/intel_uncore.c
+++ b/drivers/gpu/drm/i915/intel_uncore.c
@@ -421,8 +421,7 @@ static void __intel_uncore_early_sanitize(struct drm_i915_private *dev_priv,
GT_FIFO_CTL_RC6_POLICY_STALL);
}
- /* Enable Decoupled MMIO only on BXT C stepping onwards */
- if (!IS_BXT_REVID(dev_priv, BXT_REVID_C0, REVID_FOREVER))
+ if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_B_LAST))
info->has_decoupled_mmio = false;
intel_uncore_forcewake_reset(dev_priv, restore_forcewake);
@@ -626,7 +625,14 @@ find_fw_domain(struct drm_i915_private *dev_priv, u32 offset)
dev_priv->uncore.fw_domains_table_entries,
fw_range_cmp);
- return entry ? entry->domains : 0;
+ if (!entry)
+ return 0;
+
+ WARN(entry->domains & ~dev_priv->uncore.fw_domains,
+ "Uninitialized forcewake domain(s) 0x%x accessed at 0x%x\n",
+ entry->domains & ~dev_priv->uncore.fw_domains, offset);
+
+ return entry->domains;
}
static void
@@ -1813,7 +1819,7 @@ static reset_func intel_get_gpu_reset(struct drm_i915_private *dev_priv)
return ironlake_do_reset;
else if (IS_G4X(dev_priv))
return g4x_do_reset;
- else if (IS_G33(dev_priv))
+ else if (IS_G33(dev_priv) || IS_PINEVIEW(dev_priv))
return g33_do_reset;
else if (INTEL_INFO(dev_priv)->gen >= 3)
return i915_do_reset;
diff --git a/drivers/gpu/drm/i915/intel_vbt_defs.h b/drivers/gpu/drm/i915/intel_vbt_defs.h
index 8886cab19f98..a92e7762f596 100644
--- a/drivers/gpu/drm/i915/intel_vbt_defs.h
+++ b/drivers/gpu/drm/i915/intel_vbt_defs.h
@@ -399,10 +399,12 @@ struct lvds_dvo_timing {
u8 vblank_hi:4;
u8 vactive_hi:4;
u8 hsync_off_lo;
- u8 hsync_pulse_width;
- u8 vsync_pulse_width:4;
- u8 vsync_off:4;
- u8 rsvd0:6;
+ u8 hsync_pulse_width_lo;
+ u8 vsync_pulse_width_lo:4;
+ u8 vsync_off_lo:4;
+ u8 vsync_pulse_width_hi:2;
+ u8 vsync_off_hi:2;
+ u8 hsync_pulse_width_hi:2;
u8 hsync_off_hi:2;
u8 himage_lo;
u8 vimage_lo;
@@ -414,7 +416,7 @@ struct lvds_dvo_timing {
u8 digital:2;
u8 vsync_positive:1;
u8 hsync_positive:1;
- u8 rsvd2:1;
+ u8 non_interlaced:1;
} __packed;
struct lvds_pnp_id {
OpenPOWER on IntegriCloud