diff options
Diffstat (limited to 'drivers/gpu/drm/i915')
91 files changed, 5172 insertions, 3688 deletions
diff --git a/drivers/gpu/drm/i915/Kconfig b/drivers/gpu/drm/i915/Kconfig index e9e64e8e9765..dfd95889f4b7 100644 --- a/drivers/gpu/drm/i915/Kconfig +++ b/drivers/gpu/drm/i915/Kconfig @@ -12,6 +12,7 @@ config DRM_I915 select DRM_PANEL select DRM_MIPI_DSI select RELAY + select IRQ_WORK # i915 depends on ACPI_VIDEO when ACPI is enabled # but for select to work, need to select ACPI_VIDEO's dependencies, ick select BACKLIGHT_LCD_SUPPORT if ACPI diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile index 892f52b53060..5182e3d5557d 100644 --- a/drivers/gpu/drm/i915/Makefile +++ b/drivers/gpu/drm/i915/Makefile @@ -139,7 +139,8 @@ i915-y += i915_perf.o \ i915_oa_bxt.o \ i915_oa_kblgt2.o \ i915_oa_kblgt3.o \ - i915_oa_glk.o + i915_oa_glk.o \ + i915_oa_cflgt2.o ifeq ($(CONFIG_DRM_I915_GVT),y) i915-y += intel_gvt.o @@ -150,5 +151,3 @@ endif i915-y += intel_lpe_audio.o obj-$(CONFIG_DRM_I915) += i915.o - -CFLAGS_i915_trace_points.o := -I$(src) diff --git a/drivers/gpu/drm/i915/gvt/aperture_gm.c b/drivers/gpu/drm/i915/gvt/aperture_gm.c index ca3d1925beda..7c9ec4f4f36c 100644 --- a/drivers/gpu/drm/i915/gvt/aperture_gm.c +++ b/drivers/gpu/drm/i915/gvt/aperture_gm.c @@ -173,8 +173,8 @@ static void free_vgpu_fence(struct intel_vgpu *vgpu) _clear_vgpu_fence(vgpu); for (i = 0; i < vgpu_fence_sz(vgpu); i++) { reg = vgpu->fence.regs[i]; - list_add_tail(®->link, - &dev_priv->mm.fence_list); + i915_unreserve_fence(reg); + vgpu->fence.regs[i] = NULL; } mutex_unlock(&dev_priv->drm.struct_mutex); @@ -187,24 +187,19 @@ static int alloc_vgpu_fence(struct intel_vgpu *vgpu) struct drm_i915_private *dev_priv = gvt->dev_priv; struct drm_i915_fence_reg *reg; int i; - struct list_head *pos, *q; intel_runtime_pm_get(dev_priv); /* Request fences from host */ mutex_lock(&dev_priv->drm.struct_mutex); - i = 0; - list_for_each_safe(pos, q, &dev_priv->mm.fence_list) { - reg = list_entry(pos, struct drm_i915_fence_reg, link); - if (reg->pin_count || reg->vma) - continue; - list_del(pos); + + for (i = 0; i < vgpu_fence_sz(vgpu); i++) { + reg = i915_reserve_fence(dev_priv); + if (IS_ERR(reg)) + goto out_free_fence; + vgpu->fence.regs[i] = reg; - if (++i == vgpu_fence_sz(vgpu)) - break; } - if (i != vgpu_fence_sz(vgpu)) - goto out_free_fence; _clear_vgpu_fence(vgpu); @@ -212,13 +207,14 @@ static int alloc_vgpu_fence(struct intel_vgpu *vgpu) intel_runtime_pm_put(dev_priv); return 0; out_free_fence: + gvt_vgpu_err("Failed to alloc fences\n"); /* Return fences to host, if fail */ for (i = 0; i < vgpu_fence_sz(vgpu); i++) { reg = vgpu->fence.regs[i]; if (!reg) continue; - list_add_tail(®->link, - &dev_priv->mm.fence_list); + i915_unreserve_fence(reg); + vgpu->fence.regs[i] = NULL; } mutex_unlock(&dev_priv->drm.struct_mutex); intel_runtime_pm_put(dev_priv); diff --git a/drivers/gpu/drm/i915/gvt/cfg_space.c b/drivers/gpu/drm/i915/gvt/cfg_space.c index ff3154fe6588..ab19545d59a1 100644 --- a/drivers/gpu/drm/i915/gvt/cfg_space.c +++ b/drivers/gpu/drm/i915/gvt/cfg_space.c @@ -101,7 +101,7 @@ int intel_vgpu_emulate_cfg_read(struct intel_vgpu *vgpu, unsigned int offset, if (WARN_ON(bytes > 4)) return -EINVAL; - if (WARN_ON(offset + bytes > INTEL_GVT_MAX_CFG_SPACE_SZ)) + if (WARN_ON(offset + bytes > vgpu->gvt->device_info.cfg_space_size)) return -EINVAL; memcpy(p_data, vgpu_cfg_space(vgpu) + offset, bytes); @@ -110,13 +110,25 @@ int intel_vgpu_emulate_cfg_read(struct intel_vgpu *vgpu, unsigned int offset, static int map_aperture(struct intel_vgpu *vgpu, bool map) { - u64 first_gfn, first_mfn; + phys_addr_t aperture_pa = vgpu_aperture_pa_base(vgpu); + unsigned long aperture_sz = vgpu_aperture_sz(vgpu); + u64 first_gfn; u64 val; int ret; if (map == vgpu->cfg_space.bar[INTEL_GVT_PCI_BAR_APERTURE].tracked) return 0; + if (map) { + vgpu->gm.aperture_va = memremap(aperture_pa, aperture_sz, + MEMREMAP_WC); + if (!vgpu->gm.aperture_va) + return -ENOMEM; + } else { + memunmap(vgpu->gm.aperture_va); + vgpu->gm.aperture_va = NULL; + } + val = vgpu_cfg_space(vgpu)[PCI_BASE_ADDRESS_2]; if (val & PCI_BASE_ADDRESS_MEM_TYPE_64) val = *(u64 *)(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_2); @@ -124,14 +136,16 @@ static int map_aperture(struct intel_vgpu *vgpu, bool map) val = *(u32 *)(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_2); first_gfn = (val + vgpu_aperture_offset(vgpu)) >> PAGE_SHIFT; - first_mfn = vgpu_aperture_pa_base(vgpu) >> PAGE_SHIFT; ret = intel_gvt_hypervisor_map_gfn_to_mfn(vgpu, first_gfn, - first_mfn, - vgpu_aperture_sz(vgpu) >> - PAGE_SHIFT, map); - if (ret) + aperture_pa >> PAGE_SHIFT, + aperture_sz >> PAGE_SHIFT, + map); + if (ret) { + memunmap(vgpu->gm.aperture_va); + vgpu->gm.aperture_va = NULL; return ret; + } vgpu->cfg_space.bar[INTEL_GVT_PCI_BAR_APERTURE].tracked = map; return 0; @@ -275,7 +289,7 @@ int intel_vgpu_emulate_cfg_write(struct intel_vgpu *vgpu, unsigned int offset, if (WARN_ON(bytes > 4)) return -EINVAL; - if (WARN_ON(offset + bytes > INTEL_GVT_MAX_CFG_SPACE_SZ)) + if (WARN_ON(offset + bytes > vgpu->gvt->device_info.cfg_space_size)) return -EINVAL; /* First check if it's PCI_COMMAND */ diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.c b/drivers/gpu/drm/i915/gvt/cmd_parser.c index 21c36e256884..2c0ccbb817dc 100644 --- a/drivers/gpu/drm/i915/gvt/cmd_parser.c +++ b/drivers/gpu/drm/i915/gvt/cmd_parser.c @@ -1576,11 +1576,11 @@ static int batch_buffer_needs_scan(struct parser_exec_state *s) return 1; } -static uint32_t find_bb_size(struct parser_exec_state *s) +static int find_bb_size(struct parser_exec_state *s) { unsigned long gma = 0; struct cmd_info *info; - uint32_t bb_size = 0; + int bb_size = 0; uint32_t cmd_len = 0; bool met_bb_end = false; struct intel_vgpu *vgpu = s->vgpu; @@ -1637,6 +1637,8 @@ static int perform_bb_shadow(struct parser_exec_state *s) /* get the size of the batch buffer */ bb_size = find_bb_size(s); + if (bb_size < 0) + return -EINVAL; /* allocate shadow batch buffer */ entry_obj = kmalloc(sizeof(*entry_obj), GFP_KERNEL); @@ -2603,7 +2605,8 @@ static int shadow_workload_ring_buffer(struct intel_vgpu_workload *workload) { struct intel_vgpu *vgpu = workload->vgpu; unsigned long gma_head, gma_tail, gma_top, guest_rb_size; - u32 *cs; + void *shadow_ring_buffer_va; + int ring_id = workload->ring_id; int ret; guest_rb_size = _RING_CTL_BUF_SIZE(workload->rb_ctl); @@ -2616,34 +2619,42 @@ static int shadow_workload_ring_buffer(struct intel_vgpu_workload *workload) gma_tail = workload->rb_start + workload->rb_tail; gma_top = workload->rb_start + guest_rb_size; - /* allocate shadow ring buffer */ - cs = intel_ring_begin(workload->req, workload->rb_len / sizeof(u32)); - if (IS_ERR(cs)) - return PTR_ERR(cs); + if (workload->rb_len > vgpu->reserve_ring_buffer_size[ring_id]) { + void *va = vgpu->reserve_ring_buffer_va[ring_id]; + /* realloc the new ring buffer if needed */ + vgpu->reserve_ring_buffer_va[ring_id] = + krealloc(va, workload->rb_len, GFP_KERNEL); + if (!vgpu->reserve_ring_buffer_va[ring_id]) { + gvt_vgpu_err("fail to alloc reserve ring buffer\n"); + return -ENOMEM; + } + vgpu->reserve_ring_buffer_size[ring_id] = workload->rb_len; + } + + shadow_ring_buffer_va = vgpu->reserve_ring_buffer_va[ring_id]; /* get shadow ring buffer va */ - workload->shadow_ring_buffer_va = cs; + workload->shadow_ring_buffer_va = shadow_ring_buffer_va; /* head > tail --> copy head <-> top */ if (gma_head > gma_tail) { ret = copy_gma_to_hva(vgpu, vgpu->gtt.ggtt_mm, - gma_head, gma_top, cs); + gma_head, gma_top, shadow_ring_buffer_va); if (ret < 0) { gvt_vgpu_err("fail to copy guest ring buffer\n"); return ret; } - cs += ret / sizeof(u32); + shadow_ring_buffer_va += ret; gma_head = workload->rb_start; } /* copy head or start <-> tail */ - ret = copy_gma_to_hva(vgpu, vgpu->gtt.ggtt_mm, gma_head, gma_tail, cs); + ret = copy_gma_to_hva(vgpu, vgpu->gtt.ggtt_mm, gma_head, gma_tail, + shadow_ring_buffer_va); if (ret < 0) { gvt_vgpu_err("fail to copy guest ring buffer\n"); return ret; } - cs += ret / sizeof(u32); - intel_ring_advance(workload->req, cs); return 0; } diff --git a/drivers/gpu/drm/i915/gvt/execlist.c b/drivers/gpu/drm/i915/gvt/execlist.c index 91b4300f3b39..5ec07ecf33ad 100644 --- a/drivers/gpu/drm/i915/gvt/execlist.c +++ b/drivers/gpu/drm/i915/gvt/execlist.c @@ -368,7 +368,7 @@ static void free_workload(struct intel_vgpu_workload *workload) #define get_desc_from_elsp_dwords(ed, i) \ ((struct execlist_ctx_descriptor_format *)&((ed)->data[i * 2])) -static void prepare_shadow_batch_buffer(struct intel_vgpu_workload *workload) +static int prepare_shadow_batch_buffer(struct intel_vgpu_workload *workload) { const int gmadr_bytes = workload->vgpu->gvt->device_info.gmadr_bytes_in_cmd; struct intel_shadow_bb_entry *entry_obj; @@ -379,7 +379,7 @@ static void prepare_shadow_batch_buffer(struct intel_vgpu_workload *workload) vma = i915_gem_object_ggtt_pin(entry_obj->obj, NULL, 0, 4, 0); if (IS_ERR(vma)) { - return; + return PTR_ERR(vma); } /* FIXME: we are not tracking our pinned VMA leaving it @@ -392,6 +392,7 @@ static void prepare_shadow_batch_buffer(struct intel_vgpu_workload *workload) if (gmadr_bytes == 8) entry_obj->bb_start_cmd_va[2] = 0; } + return 0; } static int update_wa_ctx_2_shadow_ctx(struct intel_shadow_wa_ctx *wa_ctx) @@ -420,7 +421,7 @@ static int update_wa_ctx_2_shadow_ctx(struct intel_shadow_wa_ctx *wa_ctx) return 0; } -static void prepare_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx) +static int prepare_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx) { struct i915_vma *vma; unsigned char *per_ctx_va = @@ -428,12 +429,12 @@ static void prepare_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx) wa_ctx->indirect_ctx.size; if (wa_ctx->indirect_ctx.size == 0) - return; + return 0; vma = i915_gem_object_ggtt_pin(wa_ctx->indirect_ctx.obj, NULL, 0, CACHELINE_BYTES, 0); if (IS_ERR(vma)) { - return; + return PTR_ERR(vma); } /* FIXME: we are not tracking our pinned VMA leaving it @@ -447,26 +448,7 @@ static void prepare_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx) memset(per_ctx_va, 0, CACHELINE_BYTES); update_wa_ctx_2_shadow_ctx(wa_ctx); -} - -static int prepare_execlist_workload(struct intel_vgpu_workload *workload) -{ - struct intel_vgpu *vgpu = workload->vgpu; - struct execlist_ctx_descriptor_format ctx[2]; - int ring_id = workload->ring_id; - - intel_vgpu_pin_mm(workload->shadow_mm); - intel_vgpu_sync_oos_pages(workload->vgpu); - intel_vgpu_flush_post_shadow(workload->vgpu); - prepare_shadow_batch_buffer(workload); - prepare_shadow_wa_ctx(&workload->wa_ctx); - if (!workload->emulate_schedule_in) - return 0; - - ctx[0] = *get_desc_from_elsp_dwords(&workload->elsp_dwords, 1); - ctx[1] = *get_desc_from_elsp_dwords(&workload->elsp_dwords, 0); - - return emulate_execlist_schedule_in(&vgpu->execlist[ring_id], ctx); + return 0; } static void release_shadow_batch_buffer(struct intel_vgpu_workload *workload) @@ -489,13 +471,62 @@ static void release_shadow_batch_buffer(struct intel_vgpu_workload *workload) } } -static void release_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx) +static int prepare_execlist_workload(struct intel_vgpu_workload *workload) { - if (!wa_ctx->indirect_ctx.obj) - return; + struct intel_vgpu *vgpu = workload->vgpu; + struct execlist_ctx_descriptor_format ctx[2]; + int ring_id = workload->ring_id; + int ret; + + ret = intel_vgpu_pin_mm(workload->shadow_mm); + if (ret) { + gvt_vgpu_err("fail to vgpu pin mm\n"); + goto out; + } + + ret = intel_vgpu_sync_oos_pages(workload->vgpu); + if (ret) { + gvt_vgpu_err("fail to vgpu sync oos pages\n"); + goto err_unpin_mm; + } - i915_gem_object_unpin_map(wa_ctx->indirect_ctx.obj); - i915_gem_object_put(wa_ctx->indirect_ctx.obj); + ret = intel_vgpu_flush_post_shadow(workload->vgpu); + if (ret) { + gvt_vgpu_err("fail to flush post shadow\n"); + goto err_unpin_mm; + } + + ret = prepare_shadow_batch_buffer(workload); + if (ret) { + gvt_vgpu_err("fail to prepare_shadow_batch_buffer\n"); + goto err_unpin_mm; + } + + ret = prepare_shadow_wa_ctx(&workload->wa_ctx); + if (ret) { + gvt_vgpu_err("fail to prepare_shadow_wa_ctx\n"); + goto err_shadow_batch; + } + + if (!workload->emulate_schedule_in) + return 0; + + ctx[0] = *get_desc_from_elsp_dwords(&workload->elsp_dwords, 1); + ctx[1] = *get_desc_from_elsp_dwords(&workload->elsp_dwords, 0); + + ret = emulate_execlist_schedule_in(&vgpu->execlist[ring_id], ctx); + if (!ret) + goto out; + else + gvt_vgpu_err("fail to emulate execlist schedule in\n"); + + release_shadow_wa_ctx(&workload->wa_ctx); +err_shadow_batch: + release_shadow_batch_buffer(workload); +err_unpin_mm: + intel_vgpu_unpin_mm(workload->shadow_mm); +out: + return ret; } static int complete_execlist_workload(struct intel_vgpu_workload *workload) @@ -511,8 +542,10 @@ static int complete_execlist_workload(struct intel_vgpu_workload *workload) gvt_dbg_el("complete workload %p status %d\n", workload, workload->status); - release_shadow_batch_buffer(workload); - release_shadow_wa_ctx(&workload->wa_ctx); + if (!workload->status) { + release_shadow_batch_buffer(workload); + release_shadow_wa_ctx(&workload->wa_ctx); + } if (workload->status || (vgpu->resetting_eng & ENGINE_MASK(ring_id))) { /* if workload->status is not successful means HW GPU @@ -820,10 +853,21 @@ static void clean_workloads(struct intel_vgpu *vgpu, unsigned long engine_mask) void intel_vgpu_clean_execlist(struct intel_vgpu *vgpu) { + enum intel_engine_id i; + struct intel_engine_cs *engine; + clean_workloads(vgpu, ALL_ENGINES); kmem_cache_destroy(vgpu->workloads); + + for_each_engine(engine, vgpu->gvt->dev_priv, i) { + kfree(vgpu->reserve_ring_buffer_va[i]); + vgpu->reserve_ring_buffer_va[i] = NULL; + vgpu->reserve_ring_buffer_size[i] = 0; + } + } +#define RESERVE_RING_BUFFER_SIZE ((1 * PAGE_SIZE)/8) int intel_vgpu_init_execlist(struct intel_vgpu *vgpu) { enum intel_engine_id i; @@ -843,7 +887,26 @@ int intel_vgpu_init_execlist(struct intel_vgpu *vgpu) if (!vgpu->workloads) return -ENOMEM; + /* each ring has a shadow ring buffer until vgpu destroyed */ + for_each_engine(engine, vgpu->gvt->dev_priv, i) { + vgpu->reserve_ring_buffer_va[i] = + kmalloc(RESERVE_RING_BUFFER_SIZE, GFP_KERNEL); + if (!vgpu->reserve_ring_buffer_va[i]) { + gvt_vgpu_err("fail to alloc reserve ring buffer\n"); + goto out; + } + vgpu->reserve_ring_buffer_size[i] = RESERVE_RING_BUFFER_SIZE; + } return 0; +out: + for_each_engine(engine, vgpu->gvt->dev_priv, i) { + if (vgpu->reserve_ring_buffer_size[i]) { + kfree(vgpu->reserve_ring_buffer_va[i]); + vgpu->reserve_ring_buffer_va[i] = NULL; + vgpu->reserve_ring_buffer_size[i] = 0; + } + } + return -ENOMEM; } void intel_vgpu_reset_execlist(struct intel_vgpu *vgpu, diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c index e6dfc3331f4b..2801d70579d8 100644 --- a/drivers/gpu/drm/i915/gvt/gtt.c +++ b/drivers/gpu/drm/i915/gvt/gtt.c @@ -1647,14 +1647,13 @@ int intel_vgpu_pin_mm(struct intel_vgpu_mm *mm) if (WARN_ON(mm->type != INTEL_GVT_MM_PPGTT)) return 0; - atomic_inc(&mm->pincount); - if (!mm->shadowed) { ret = shadow_mm(mm); if (ret) return ret; } + atomic_inc(&mm->pincount); list_del_init(&mm->lru_list); list_add_tail(&mm->lru_list, &mm->vgpu->gvt->gtt.mm_lru_list_head); return 0; @@ -1972,7 +1971,7 @@ static int alloc_scratch_pages(struct intel_vgpu *vgpu, */ se.val64 |= _PAGE_PRESENT | _PAGE_RW; if (type == GTT_TYPE_PPGTT_PDE_PT) - se.val64 |= PPAT_CACHED_INDEX; + se.val64 |= PPAT_CACHED; for (i = 0; i < page_entry_num; i++) ops->set_entry(scratch_pt, &se, i, false, 0, vgpu); diff --git a/drivers/gpu/drm/i915/gvt/gvt.c b/drivers/gpu/drm/i915/gvt/gvt.c index c27c6838eaca..aaa347f8620c 100644 --- a/drivers/gpu/drm/i915/gvt/gvt.c +++ b/drivers/gpu/drm/i915/gvt/gvt.c @@ -111,7 +111,7 @@ static void init_device_info(struct intel_gvt *gvt) if (IS_BROADWELL(gvt->dev_priv) || IS_SKYLAKE(gvt->dev_priv) || IS_KABYLAKE(gvt->dev_priv)) { info->max_support_vgpus = 8; - info->cfg_space_size = 256; + info->cfg_space_size = PCI_CFG_SPACE_EXP_SIZE; info->mmio_size = 2 * 1024 * 1024; info->mmio_bar = 0; info->gtt_start_offset = 8 * 1024 * 1024; diff --git a/drivers/gpu/drm/i915/gvt/gvt.h b/drivers/gpu/drm/i915/gvt/gvt.h index 44b719eda8c4..9c2e7c0aa38f 100644 --- a/drivers/gpu/drm/i915/gvt/gvt.h +++ b/drivers/gpu/drm/i915/gvt/gvt.h @@ -80,6 +80,7 @@ struct intel_gvt_device_info { struct intel_vgpu_gm { u64 aperture_sz; u64 hidden_sz; + void *aperture_va; struct drm_mm_node low_gm_node; struct drm_mm_node high_gm_node; }; @@ -99,7 +100,6 @@ struct intel_vgpu_mmio { bool disable_warn_untrack; }; -#define INTEL_GVT_MAX_CFG_SPACE_SZ 256 #define INTEL_GVT_MAX_BAR_NUM 4 struct intel_vgpu_pci_bar { @@ -108,7 +108,7 @@ struct intel_vgpu_pci_bar { }; struct intel_vgpu_cfg_space { - unsigned char virtual_cfg_space[INTEL_GVT_MAX_CFG_SPACE_SZ]; + unsigned char virtual_cfg_space[PCI_CFG_SPACE_EXP_SIZE]; struct intel_vgpu_pci_bar bar[INTEL_GVT_MAX_BAR_NUM]; }; @@ -165,6 +165,9 @@ struct intel_vgpu { struct list_head workload_q_head[I915_NUM_ENGINES]; struct kmem_cache *workloads; atomic_t running_workload_num; + /* 1/2K for each reserve ring buffer */ + void *reserve_ring_buffer_va[I915_NUM_ENGINES]; + int reserve_ring_buffer_size[I915_NUM_ENGINES]; DECLARE_BITMAP(tlb_handle_pending, I915_NUM_ENGINES); struct i915_gem_context *shadow_ctx; DECLARE_BITMAP(shadow_ctx_desc_updated, I915_NUM_ENGINES); @@ -474,6 +477,13 @@ int intel_vgpu_emulate_cfg_read(struct intel_vgpu *vgpu, unsigned int offset, int intel_vgpu_emulate_cfg_write(struct intel_vgpu *vgpu, unsigned int offset, void *p_data, unsigned int bytes); +static inline u64 intel_vgpu_get_bar_gpa(struct intel_vgpu *vgpu, int bar) +{ + /* We are 64bit bar. */ + return (*(u64 *)(vgpu->cfg_space.virtual_cfg_space + bar)) & + PCI_BASE_ADDRESS_MEM_MASK; +} + void intel_gvt_clean_opregion(struct intel_gvt *gvt); int intel_gvt_init_opregion(struct intel_gvt *gvt); diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c index 83e88c70272a..96060920a6fe 100644 --- a/drivers/gpu/drm/i915/gvt/kvmgt.c +++ b/drivers/gpu/drm/i915/gvt/kvmgt.c @@ -609,21 +609,20 @@ static void intel_vgpu_release_work(struct work_struct *work) __intel_vgpu_release(vgpu); } -static uint64_t intel_vgpu_get_bar0_addr(struct intel_vgpu *vgpu) +static uint64_t intel_vgpu_get_bar_addr(struct intel_vgpu *vgpu, int bar) { u32 start_lo, start_hi; u32 mem_type; - int pos = PCI_BASE_ADDRESS_0; - start_lo = (*(u32 *)(vgpu->cfg_space.virtual_cfg_space + pos)) & + start_lo = (*(u32 *)(vgpu->cfg_space.virtual_cfg_space + bar)) & PCI_BASE_ADDRESS_MEM_MASK; - mem_type = (*(u32 *)(vgpu->cfg_space.virtual_cfg_space + pos)) & + mem_type = (*(u32 *)(vgpu->cfg_space.virtual_cfg_space + bar)) & PCI_BASE_ADDRESS_MEM_TYPE_MASK; switch (mem_type) { case PCI_BASE_ADDRESS_MEM_TYPE_64: start_hi = (*(u32 *)(vgpu->cfg_space.virtual_cfg_space - + pos + 4)); + + bar + 4)); break; case PCI_BASE_ADDRESS_MEM_TYPE_32: case PCI_BASE_ADDRESS_MEM_TYPE_1M: @@ -637,6 +636,21 @@ static uint64_t intel_vgpu_get_bar0_addr(struct intel_vgpu *vgpu) return ((u64)start_hi << 32) | start_lo; } +static int intel_vgpu_bar_rw(struct intel_vgpu *vgpu, int bar, uint64_t off, + void *buf, unsigned int count, bool is_write) +{ + uint64_t bar_start = intel_vgpu_get_bar_addr(vgpu, bar); + int ret; + + if (is_write) + ret = intel_gvt_ops->emulate_mmio_write(vgpu, + bar_start + off, buf, count); + else + ret = intel_gvt_ops->emulate_mmio_read(vgpu, + bar_start + off, buf, count); + return ret; +} + static ssize_t intel_vgpu_rw(struct mdev_device *mdev, char *buf, size_t count, loff_t *ppos, bool is_write) { @@ -661,20 +675,14 @@ static ssize_t intel_vgpu_rw(struct mdev_device *mdev, char *buf, buf, count); break; case VFIO_PCI_BAR0_REGION_INDEX: - case VFIO_PCI_BAR1_REGION_INDEX: - if (is_write) { - uint64_t bar0_start = intel_vgpu_get_bar0_addr(vgpu); - - ret = intel_gvt_ops->emulate_mmio_write(vgpu, - bar0_start + pos, buf, count); - } else { - uint64_t bar0_start = intel_vgpu_get_bar0_addr(vgpu); - - ret = intel_gvt_ops->emulate_mmio_read(vgpu, - bar0_start + pos, buf, count); - } + ret = intel_vgpu_bar_rw(vgpu, PCI_BASE_ADDRESS_0, pos, + buf, count, is_write); break; case VFIO_PCI_BAR2_REGION_INDEX: + ret = intel_vgpu_bar_rw(vgpu, PCI_BASE_ADDRESS_2, pos, + buf, count, is_write); + break; + case VFIO_PCI_BAR1_REGION_INDEX: case VFIO_PCI_BAR3_REGION_INDEX: case VFIO_PCI_BAR4_REGION_INDEX: case VFIO_PCI_BAR5_REGION_INDEX: @@ -970,7 +978,7 @@ static long intel_vgpu_ioctl(struct mdev_device *mdev, unsigned int cmd, switch (info.index) { case VFIO_PCI_CONFIG_REGION_INDEX: info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index); - info.size = INTEL_GVT_MAX_CFG_SPACE_SZ; + info.size = vgpu->gvt->device_info.cfg_space_size; info.flags = VFIO_REGION_INFO_FLAG_READ | VFIO_REGION_INFO_FLAG_WRITE; break; diff --git a/drivers/gpu/drm/i915/gvt/mmio.c b/drivers/gpu/drm/i915/gvt/mmio.c index 980ec8906b1e..1e1310f50289 100644 --- a/drivers/gpu/drm/i915/gvt/mmio.c +++ b/drivers/gpu/drm/i915/gvt/mmio.c @@ -45,8 +45,7 @@ */ int intel_vgpu_gpa_to_mmio_offset(struct intel_vgpu *vgpu, u64 gpa) { - u64 gttmmio_gpa = *(u64 *)(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_0) & - ~GENMASK(3, 0); + u64 gttmmio_gpa = intel_vgpu_get_bar_gpa(vgpu, PCI_BASE_ADDRESS_0); return gpa - gttmmio_gpa; } @@ -57,6 +56,38 @@ int intel_vgpu_gpa_to_mmio_offset(struct intel_vgpu *vgpu, u64 gpa) (reg >= gvt->device_info.gtt_start_offset \ && reg < gvt->device_info.gtt_start_offset + gvt_ggtt_sz(gvt)) +static bool vgpu_gpa_is_aperture(struct intel_vgpu *vgpu, uint64_t gpa) +{ + u64 aperture_gpa = intel_vgpu_get_bar_gpa(vgpu, PCI_BASE_ADDRESS_2); + u64 aperture_sz = vgpu_aperture_sz(vgpu); + + return gpa >= aperture_gpa && gpa < aperture_gpa + aperture_sz; +} + +static int vgpu_aperture_rw(struct intel_vgpu *vgpu, uint64_t gpa, + void *pdata, unsigned int size, bool is_read) +{ + u64 aperture_gpa = intel_vgpu_get_bar_gpa(vgpu, PCI_BASE_ADDRESS_2); + u64 offset = gpa - aperture_gpa; + + if (!vgpu_gpa_is_aperture(vgpu, gpa + size - 1)) { + gvt_vgpu_err("Aperture rw out of range, offset %llx, size %d\n", + offset, size); + return -EINVAL; + } + + if (!vgpu->gm.aperture_va) { + gvt_vgpu_err("BAR is not enabled\n"); + return -ENXIO; + } + + if (is_read) + memcpy(pdata, vgpu->gm.aperture_va + offset, size); + else + memcpy(vgpu->gm.aperture_va + offset, pdata, size); + return 0; +} + static void failsafe_emulate_mmio_rw(struct intel_vgpu *vgpu, uint64_t pa, void *p_data, unsigned int bytes, bool read) { @@ -133,6 +164,12 @@ int intel_vgpu_emulate_mmio_read(struct intel_vgpu *vgpu, uint64_t pa, } mutex_lock(&gvt->lock); + if (vgpu_gpa_is_aperture(vgpu, pa)) { + ret = vgpu_aperture_rw(vgpu, pa, p_data, bytes, true); + mutex_unlock(&gvt->lock); + return ret; + } + if (atomic_read(&vgpu->gtt.n_write_protected_guest_page)) { struct intel_vgpu_guest_page *gp; @@ -224,6 +261,12 @@ int intel_vgpu_emulate_mmio_write(struct intel_vgpu *vgpu, uint64_t pa, mutex_lock(&gvt->lock); + if (vgpu_gpa_is_aperture(vgpu, pa)) { + ret = vgpu_aperture_rw(vgpu, pa, p_data, bytes, false); + mutex_unlock(&gvt->lock); + return ret; + } + if (atomic_read(&vgpu->gtt.n_write_protected_guest_page)) { struct intel_vgpu_guest_page *gp; diff --git a/drivers/gpu/drm/i915/gvt/render.c b/drivers/gpu/drm/i915/gvt/render.c index 2ea542257f03..6d066cf35478 100644 --- a/drivers/gpu/drm/i915/gvt/render.c +++ b/drivers/gpu/drm/i915/gvt/render.c @@ -293,7 +293,7 @@ static void switch_mmio_to_vgpu(struct intel_vgpu *vgpu, int ring_id) */ if (mmio->in_context && ((ctx_ctrl & inhibit_mask) != inhibit_mask) && - i915.enable_execlists) + i915_modparams.enable_execlists) continue; if (mmio->mask) diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c index 391800d2067b..d5892d24f0b6 100644 --- a/drivers/gpu/drm/i915/gvt/scheduler.c +++ b/drivers/gpu/drm/i915/gvt/scheduler.c @@ -87,7 +87,7 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload) return -EINVAL; } - page = i915_gem_object_get_page(ctx_obj, LRC_PPHWSP_PN + i); + page = i915_gem_object_get_page(ctx_obj, LRC_HEADER_PAGES + i); dst = kmap(page); intel_gvt_hypervisor_read_gpa(vgpu, context_gpa, dst, GTT_PAGE_SIZE); @@ -201,6 +201,43 @@ static void shadow_context_descriptor_update(struct i915_gem_context *ctx, ce->lrc_desc = desc; } +static int copy_workload_to_ring_buffer(struct intel_vgpu_workload *workload) +{ + struct intel_vgpu *vgpu = workload->vgpu; + void *shadow_ring_buffer_va; + u32 *cs; + + /* allocate shadow ring buffer */ + cs = intel_ring_begin(workload->req, workload->rb_len / sizeof(u32)); + if (IS_ERR(cs)) { + gvt_vgpu_err("fail to alloc size =%ld shadow ring buffer\n", + workload->rb_len); + return PTR_ERR(cs); + } + + shadow_ring_buffer_va = workload->shadow_ring_buffer_va; + + /* get shadow ring buffer va */ + workload->shadow_ring_buffer_va = cs; + + memcpy(cs, shadow_ring_buffer_va, + workload->rb_len); + + cs += workload->rb_len / sizeof(u32); + intel_ring_advance(workload->req, cs); + + return 0; +} + +void release_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx) +{ + if (!wa_ctx->indirect_ctx.obj) + return; + + i915_gem_object_unpin_map(wa_ctx->indirect_ctx.obj); + i915_gem_object_put(wa_ctx->indirect_ctx.obj); +} + /** * intel_gvt_scan_and_shadow_workload - audit the workload by scanning and * shadow it as well, include ringbuffer,wa_ctx and ctx. @@ -214,8 +251,10 @@ int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload) int ring_id = workload->ring_id; struct i915_gem_context *shadow_ctx = workload->vgpu->shadow_ctx; struct drm_i915_private *dev_priv = workload->vgpu->gvt->dev_priv; + struct intel_engine_cs *engine = dev_priv->engine[ring_id]; struct drm_i915_gem_request *rq; struct intel_vgpu *vgpu = workload->vgpu; + struct intel_ring *ring; int ret; lockdep_assert_held(&dev_priv->drm.struct_mutex); @@ -231,35 +270,56 @@ int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload) shadow_context_descriptor_update(shadow_ctx, dev_priv->engine[ring_id]); - rq = i915_gem_request_alloc(dev_priv->engine[ring_id], shadow_ctx); - if (IS_ERR(rq)) { - gvt_vgpu_err("fail to allocate gem request\n"); - ret = PTR_ERR(rq); - goto out; - } - - gvt_dbg_sched("ring id %d get i915 gem request %p\n", ring_id, rq); - - workload->req = i915_gem_request_get(rq); - ret = intel_gvt_scan_and_shadow_ringbuffer(workload); if (ret) - goto out; + goto err_scan; if ((workload->ring_id == RCS) && (workload->wa_ctx.indirect_ctx.size != 0)) { ret = intel_gvt_scan_and_shadow_wa_ctx(&workload->wa_ctx); if (ret) - goto out; + goto err_scan; + } + + /* pin shadow context by gvt even the shadow context will be pinned + * when i915 alloc request. That is because gvt will update the guest + * context from shadow context when workload is completed, and at that + * moment, i915 may already unpined the shadow context to make the + * shadow_ctx pages invalid. So gvt need to pin itself. After update + * the guest context, gvt can unpin the shadow_ctx safely. + */ + ring = engine->context_pin(engine, shadow_ctx); + if (IS_ERR(ring)) { + ret = PTR_ERR(ring); + gvt_vgpu_err("fail to pin shadow context\n"); + goto err_shadow; } ret = populate_shadow_context(workload); if (ret) - goto out; + goto err_unpin; + rq = i915_gem_request_alloc(dev_priv->engine[ring_id], shadow_ctx); + if (IS_ERR(rq)) { + gvt_vgpu_err("fail to allocate gem request\n"); + ret = PTR_ERR(rq); + goto err_unpin; + } + + gvt_dbg_sched("ring id %d get i915 gem request %p\n", ring_id, rq); + + workload->req = i915_gem_request_get(rq); + ret = copy_workload_to_ring_buffer(workload); + if (ret) + goto err_unpin; workload->shadowed = true; + return 0; -out: +err_unpin: + engine->context_unpin(engine, shadow_ctx); +err_shadow: + release_shadow_wa_ctx(&workload->wa_ctx); +err_scan: return ret; } @@ -269,8 +329,6 @@ static int dispatch_workload(struct intel_vgpu_workload *workload) struct i915_gem_context *shadow_ctx = workload->vgpu->shadow_ctx; struct drm_i915_private *dev_priv = workload->vgpu->gvt->dev_priv; struct intel_engine_cs *engine = dev_priv->engine[ring_id]; - struct intel_vgpu *vgpu = workload->vgpu; - struct intel_ring *ring; int ret = 0; gvt_dbg_sched("ring id %d prepare to dispatch workload %p\n", @@ -284,22 +342,10 @@ static int dispatch_workload(struct intel_vgpu_workload *workload) if (workload->prepare) { ret = workload->prepare(workload); - if (ret) + if (ret) { + engine->context_unpin(engine, shadow_ctx); goto out; - } - - /* pin shadow context by gvt even the shadow context will be pinned - * when i915 alloc request. That is because gvt will update the guest - * context from shadow context when workload is completed, and at that - * moment, i915 may already unpined the shadow context to make the - * shadow_ctx pages invalid. So gvt need to pin itself. After update - * the guest context, gvt can unpin the shadow_ctx safely. - */ - ring = engine->context_pin(engine, shadow_ctx); - if (IS_ERR(ring)) { - ret = PTR_ERR(ring); - gvt_vgpu_err("fail to pin shadow context\n"); - goto out; + } } out: @@ -408,7 +454,7 @@ static void update_guest_context(struct intel_vgpu_workload *workload) return; } - page = i915_gem_object_get_page(ctx_obj, LRC_PPHWSP_PN + i); + page = i915_gem_object_get_page(ctx_obj, LRC_HEADER_PAGES + i); src = kmap(page); intel_gvt_hypervisor_write_gpa(vgpu, context_gpa, src, GTT_PAGE_SIZE); diff --git a/drivers/gpu/drm/i915/gvt/scheduler.h b/drivers/gpu/drm/i915/gvt/scheduler.h index 0d431a968a32..f36b85fd6d01 100644 --- a/drivers/gpu/drm/i915/gvt/scheduler.h +++ b/drivers/gpu/drm/i915/gvt/scheduler.h @@ -140,4 +140,5 @@ int intel_vgpu_init_gvt_context(struct intel_vgpu *vgpu); void intel_vgpu_clean_gvt_context(struct intel_vgpu *vgpu); +void release_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx); #endif diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index e4d4b6b41e26..b4a6ac60e7c6 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -67,7 +67,7 @@ static int i915_capabilities(struct seq_file *m, void *data) #undef PRINT_FLAG kernel_param_lock(THIS_MODULE); -#define PRINT_PARAM(T, x) seq_print_param(m, #x, #T, &i915.x); +#define PRINT_PARAM(T, x, ...) seq_print_param(m, #x, #T, &i915_modparams.x); I915_PARAMS_FOR_EACH(PRINT_PARAM); #undef PRINT_PARAM kernel_param_unlock(THIS_MODULE); @@ -1267,7 +1267,7 @@ static int i915_hangcheck_info(struct seq_file *m, void *unused) if (waitqueue_active(&dev_priv->gpu_error.reset_queue)) seq_puts(m, "struct_mutex blocked for reset\n"); - if (!i915.enable_hangcheck) { + if (!i915_modparams.enable_hangcheck) { seq_puts(m, "Hangcheck disabled\n"); return 0; } @@ -1422,6 +1422,9 @@ static int i915_forcewake_domains(struct seq_file *m, void *data) struct intel_uncore_forcewake_domain *fw_domain; unsigned int tmp; + seq_printf(m, "user.bypass_count = %u\n", + i915->uncore.user_forcewake.count); + for_each_fw_domain(fw_domain, i915, tmp) seq_printf(m, "%s.wake_count = %u\n", intel_uncore_forcewake_domain_to_str(fw_domain->id), @@ -1699,7 +1702,7 @@ static int i915_ips_status(struct seq_file *m, void *unused) intel_runtime_pm_get(dev_priv); seq_printf(m, "Enabled by kernel parameter: %s\n", - yesno(i915.enable_ips)); + yesno(i915_modparams.enable_ips)); if (INTEL_GEN(dev_priv) >= 8) { seq_puts(m, "Currently: unknown\n"); @@ -2014,7 +2017,7 @@ static int i915_dump_lrc(struct seq_file *m, void *unused) enum intel_engine_id id; int ret; - if (!i915.enable_execlists) { + if (!i915_modparams.enable_execlists) { seq_printf(m, "Logical Ring Contexts are disabled\n"); return 0; } @@ -2443,12 +2446,8 @@ static void i915_guc_client_info(struct seq_file *m, seq_printf(m, "\tPriority %d, GuC stage index: %u, PD offset 0x%x\n", client->priority, client->stage_id, client->proc_desc_offset); - seq_printf(m, "\tDoorbell id %d, offset: 0x%lx, cookie 0x%x\n", - client->doorbell_id, client->doorbell_offset, client->doorbell_cookie); - seq_printf(m, "\tWQ size %d, offset: 0x%x, tail %d\n", - client->wq_size, client->wq_offset, client->wq_tail); - - seq_printf(m, "\tWork queue full: %u\n", client->no_wq_space); + seq_printf(m, "\tDoorbell id %d, offset: 0x%lx\n", + client->doorbell_id, client->doorbell_offset); for_each_engine(engine, dev_priv, id) { u64 submissions = client->submissions[id]; @@ -2594,7 +2593,7 @@ static int i915_guc_log_control_get(void *data, u64 *val) if (!dev_priv->guc.log.vma) return -EINVAL; - *val = i915.guc_log_level; + *val = i915_modparams.guc_log_level; return 0; } @@ -3312,7 +3311,9 @@ static int i915_engine_info(struct seq_file *m, void *unused) seq_printf(m, "\tBBADDR: 0x%08x_%08x\n", upper_32_bits(addr), lower_32_bits(addr)); - if (i915.enable_execlists) { + if (i915_modparams.enable_execlists) { + const u32 *hws = &engine->status_page.page_addr[I915_HWS_CSB_BUF0_INDEX]; + struct intel_engine_execlists * const execlists = &engine->execlists; u32 ptr, read, write; unsigned int idx; @@ -3323,8 +3324,10 @@ static int i915_engine_info(struct seq_file *m, void *unused) ptr = I915_READ(RING_CONTEXT_STATUS_PTR(engine)); read = GEN8_CSB_READ_PTR(ptr); write = GEN8_CSB_WRITE_PTR(ptr); - seq_printf(m, "\tExeclist CSB read %d, write %d, interrupt posted? %s\n", - read, write, + seq_printf(m, "\tExeclist CSB read %d [%d cached], write %d [%d from hws], interrupt posted? %s\n", + read, execlists->csb_head, + write, + intel_read_status_page(engine, intel_hws_csb_write_index(engine->i915)), yesno(test_bit(ENGINE_IRQ_EXECLIST, &engine->irq_posted))); if (read >= GEN8_CSB_ENTRIES) @@ -3335,18 +3338,19 @@ static int i915_engine_info(struct seq_file *m, void *unused) write += GEN8_CSB_ENTRIES; while (read < write) { idx = ++read % GEN8_CSB_ENTRIES; - seq_printf(m, "\tExeclist CSB[%d]: 0x%08x, context: %d\n", + seq_printf(m, "\tExeclist CSB[%d]: 0x%08x [0x%08x in hwsp], context: %d [%d in hwsp]\n", idx, I915_READ(RING_CONTEXT_STATUS_BUF_LO(engine, idx)), - I915_READ(RING_CONTEXT_STATUS_BUF_HI(engine, idx))); + hws[idx * 2], + I915_READ(RING_CONTEXT_STATUS_BUF_HI(engine, idx)), + hws[idx * 2 + 1]); } rcu_read_lock(); - for (idx = 0; idx < ARRAY_SIZE(engine->execlist_port); idx++) { + for (idx = 0; idx < execlists_num_ports(execlists); idx++) { unsigned int count; - rq = port_unpack(&engine->execlist_port[idx], - &count); + rq = port_unpack(&execlists->port[idx], &count); if (rq) { seq_printf(m, "\t\tELSP[%d] count=%d, ", idx, count); @@ -3359,7 +3363,7 @@ static int i915_engine_info(struct seq_file *m, void *unused) rcu_read_unlock(); spin_lock_irq(&engine->timeline->lock); - for (rb = engine->execlist_first; rb; rb = rb_next(rb)){ + for (rb = execlists->first; rb; rb = rb_next(rb)) { struct i915_priolist *p = rb_entry(rb, typeof(*p), node); @@ -3403,7 +3407,7 @@ static int i915_semaphore_status(struct seq_file *m, void *unused) enum intel_engine_id id; int j, ret; - if (!i915.semaphores) { + if (!i915_modparams.semaphores) { seq_puts(m, "Semaphores are disabled\n"); return 0; } @@ -3523,6 +3527,57 @@ static int i915_wa_registers(struct seq_file *m, void *unused) return 0; } +static int i915_ipc_status_show(struct seq_file *m, void *data) +{ + struct drm_i915_private *dev_priv = m->private; + + seq_printf(m, "Isochronous Priority Control: %s\n", + yesno(dev_priv->ipc_enabled)); + return 0; +} + +static int i915_ipc_status_open(struct inode *inode, struct file *file) +{ + struct drm_i915_private *dev_priv = inode->i_private; + + if (!HAS_IPC(dev_priv)) + return -ENODEV; + + return single_open(file, i915_ipc_status_show, dev_priv); +} + +static ssize_t i915_ipc_status_write(struct file *file, const char __user *ubuf, + size_t len, loff_t *offp) +{ + struct seq_file *m = file->private_data; + struct drm_i915_private *dev_priv = m->private; + int ret; + bool enable; + + ret = kstrtobool_from_user(ubuf, len, &enable); + if (ret < 0) + return ret; + + intel_runtime_pm_get(dev_priv); + if (!dev_priv->ipc_enabled && enable) + DRM_INFO("Enabling IPC: WM will be proper only after next commit\n"); + dev_priv->wm.distrust_bios_wm = true; + dev_priv->ipc_enabled = enable; + intel_enable_ipc(dev_priv); + intel_runtime_pm_put(dev_priv); + + return len; +} + +static const struct file_operations i915_ipc_status_fops = { + .owner = THIS_MODULE, + .open = i915_ipc_status_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, + .write = i915_ipc_status_write +}; + static int i915_ddb_info(struct seq_file *m, void *unused) { struct drm_i915_private *dev_priv = node_to_i915(m->private); @@ -4674,26 +4729,26 @@ static int i915_sseu_status(struct seq_file *m, void *unused) static int i915_forcewake_open(struct inode *inode, struct file *file) { - struct drm_i915_private *dev_priv = inode->i_private; + struct drm_i915_private *i915 = inode->i_private; - if (INTEL_GEN(dev_priv) < 6) + if (INTEL_GEN(i915) < 6) return 0; - intel_runtime_pm_get(dev_priv); - intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); + intel_runtime_pm_get(i915); + intel_uncore_forcewake_user_get(i915); return 0; } static int i915_forcewake_release(struct inode *inode, struct file *file) { - struct drm_i915_private *dev_priv = inode->i_private; + struct drm_i915_private *i915 = inode->i_private; - if (INTEL_GEN(dev_priv) < 6) + if (INTEL_GEN(i915) < 6) return 0; - intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); - intel_runtime_pm_put(dev_priv); + intel_uncore_forcewake_user_put(i915); + intel_runtime_pm_put(i915); return 0; } @@ -4859,7 +4914,8 @@ static const struct i915_debugfs_files { {"i915_dp_test_type", &i915_displayport_test_type_fops}, {"i915_dp_test_active", &i915_displayport_test_active_fops}, {"i915_guc_log_control", &i915_guc_log_control_fops}, - {"i915_hpd_storm_ctl", &i915_hpd_storm_ctl_fops} + {"i915_hpd_storm_ctl", &i915_hpd_storm_ctl_fops}, + {"i915_ipc_status", &i915_ipc_status_fops} }; int i915_debugfs_register(struct drm_i915_private *dev_priv) diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 9f45cfeae775..59ac9199b35d 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -58,12 +58,12 @@ static unsigned int i915_load_fail_count; bool __i915_inject_load_failure(const char *func, int line) { - if (i915_load_fail_count >= i915.inject_load_failure) + if (i915_load_fail_count >= i915_modparams.inject_load_failure) return false; - if (++i915_load_fail_count == i915.inject_load_failure) { + if (++i915_load_fail_count == i915_modparams.inject_load_failure) { DRM_INFO("Injecting failure at checkpoint %u [%s:%d]\n", - i915.inject_load_failure, func, line); + i915_modparams.inject_load_failure, func, line); return true; } @@ -106,8 +106,8 @@ __i915_printk(struct drm_i915_private *dev_priv, const char *level, static bool i915_error_injected(struct drm_i915_private *dev_priv) { - return i915.inject_load_failure && - i915_load_fail_count == i915.inject_load_failure; + return i915_modparams.inject_load_failure && + i915_load_fail_count == i915_modparams.inject_load_failure; } #define i915_load_error(dev_priv, fmt, ...) \ @@ -239,7 +239,8 @@ static void intel_detect_pch(struct drm_i915_private *dev_priv) dev_priv->pch_type = PCH_KBP; DRM_DEBUG_KMS("Found Kaby Lake PCH (KBP)\n"); WARN_ON(!IS_SKYLAKE(dev_priv) && - !IS_KABYLAKE(dev_priv)); + !IS_KABYLAKE(dev_priv) && + !IS_COFFEELAKE(dev_priv)); } else if (id == INTEL_PCH_CNP_DEVICE_ID_TYPE) { dev_priv->pch_type = PCH_CNP; DRM_DEBUG_KMS("Found Cannon Lake PCH (CNP)\n"); @@ -320,7 +321,7 @@ static int i915_getparam(struct drm_device *dev, void *data, value = USES_PPGTT(dev_priv); break; case I915_PARAM_HAS_SEMAPHORES: - value = i915.semaphores; + value = i915_modparams.semaphores; break; case I915_PARAM_HAS_SECURE_BATCHES: value = capable(CAP_SYS_ADMIN); @@ -339,7 +340,8 @@ static int i915_getparam(struct drm_device *dev, void *data, return -ENODEV; break; case I915_PARAM_HAS_GPU_RESET: - value = i915.enable_hangcheck && intel_has_gpu_reset(dev_priv); + value = i915_modparams.enable_hangcheck && + intel_has_gpu_reset(dev_priv); if (value && intel_has_reset_engine(dev_priv)) value = 2; break; @@ -868,6 +870,10 @@ static int i915_driver_init_early(struct drm_i915_private *dev_priv, memcpy(device_info, match_info, sizeof(*device_info)); device_info->device_id = dev_priv->drm.pdev->device; + BUILD_BUG_ON(INTEL_MAX_PLATFORMS > + sizeof(device_info->platform_mask) * BITS_PER_BYTE); + device_info->platform_mask = BIT(device_info->platform); + BUG_ON(device_info->gen > sizeof(device_info->gen_mask) * BITS_PER_BYTE); device_info->gen_mask = BIT(device_info->gen - 1); @@ -1030,9 +1036,9 @@ static void i915_driver_cleanup_mmio(struct drm_i915_private *dev_priv) static void intel_sanitize_options(struct drm_i915_private *dev_priv) { - i915.enable_execlists = + i915_modparams.enable_execlists = intel_sanitize_enable_execlists(dev_priv, - i915.enable_execlists); + i915_modparams.enable_execlists); /* * i915.enable_ppgtt is read-only, so do an early pass to validate the @@ -1040,12 +1046,15 @@ static void intel_sanitize_options(struct drm_i915_private *dev_priv) * do this now so that we can print out any log messages once rather * than every time we check intel_enable_ppgtt(). */ - i915.enable_ppgtt = - intel_sanitize_enable_ppgtt(dev_priv, i915.enable_ppgtt); - DRM_DEBUG_DRIVER("ppgtt mode: %i\n", i915.enable_ppgtt); + i915_modparams.enable_ppgtt = + intel_sanitize_enable_ppgtt(dev_priv, + i915_modparams.enable_ppgtt); + DRM_DEBUG_DRIVER("ppgtt mode: %i\n", i915_modparams.enable_ppgtt); - i915.semaphores = intel_sanitize_semaphores(dev_priv, i915.semaphores); - DRM_DEBUG_DRIVER("use GPU semaphores? %s\n", yesno(i915.semaphores)); + i915_modparams.semaphores = + intel_sanitize_semaphores(dev_priv, i915_modparams.semaphores); + DRM_DEBUG_DRIVER("use GPU semaphores? %s\n", + yesno(i915_modparams.semaphores)); intel_uc_sanitize_options(dev_priv); @@ -1276,7 +1285,7 @@ int i915_driver_load(struct pci_dev *pdev, const struct pci_device_id *ent) int ret; /* Enable nuclear pageflip on ILK+ */ - if (!i915.nuclear_pageflip && match_info->gen < 5) + if (!i915_modparams.nuclear_pageflip && match_info->gen < 5) driver.driver_features &= ~DRIVER_ATOMIC; ret = -ENOMEM; @@ -1340,7 +1349,7 @@ int i915_driver_load(struct pci_dev *pdev, const struct pci_device_id *ent) intel_runtime_pm_enable(dev_priv); - dev_priv->ipc_enabled = false; + intel_init_ipc(dev_priv); if (IS_ENABLED(CONFIG_DRM_I915_DEBUG)) DRM_INFO("DRM_I915_DEBUG enabled\n"); @@ -2608,6 +2617,8 @@ static int intel_runtime_resume(struct device *kdev) if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv)) intel_hpd_init(dev_priv); + intel_enable_ipc(dev_priv); + enable_rpm_wakeref_asserts(dev_priv); if (ret) diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 18d9da53282b..7ca11318ac69 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -80,8 +80,8 @@ #define DRIVER_NAME "i915" #define DRIVER_DESC "Intel Graphics" -#define DRIVER_DATE "20170818" -#define DRIVER_TIMESTAMP 1503088845 +#define DRIVER_DATE "20170929" +#define DRIVER_TIMESTAMP 1506682238 /* Use I915_STATE_WARN(x) and I915_STATE_WARN_ON() (rather than WARN() and * WARN_ON()) for hw state sanity checks to check for unexpected conditions @@ -93,7 +93,7 @@ #define I915_STATE_WARN(condition, format...) ({ \ int __ret_warn_on = !!(condition); \ if (unlikely(__ret_warn_on)) \ - if (!WARN(i915.verbose_state_checks, format)) \ + if (!WARN(i915_modparams.verbose_state_checks, format)) \ DRM_ERROR(format); \ unlikely(__ret_warn_on); \ }) @@ -126,7 +126,7 @@ static inline uint_fixed_16_16_t u32_to_fixed16(uint32_t val) { uint_fixed_16_16_t fp; - WARN_ON(val >> 16); + WARN_ON(val > U16_MAX); fp.val = val << 16; return fp; @@ -163,8 +163,8 @@ static inline uint_fixed_16_16_t max_fixed16(uint_fixed_16_16_t max1, static inline uint_fixed_16_16_t clamp_u64_to_fixed16(uint64_t val) { uint_fixed_16_16_t fp; - WARN_ON(val >> 32); - fp.val = clamp_t(uint32_t, val, 0, ~0); + WARN_ON(val > U32_MAX); + fp.val = (uint32_t) val; return fp; } @@ -181,8 +181,8 @@ static inline uint32_t mul_round_up_u32_fixed16(uint32_t val, intermediate_val = (uint64_t) val * mul.val; intermediate_val = DIV_ROUND_UP_ULL(intermediate_val, 1 << 16); - WARN_ON(intermediate_val >> 32); - return clamp_t(uint32_t, intermediate_val, 0, ~0); + WARN_ON(intermediate_val > U32_MAX); + return (uint32_t) intermediate_val; } static inline uint_fixed_16_16_t mul_fixed16(uint_fixed_16_16_t val, @@ -211,8 +211,8 @@ static inline uint32_t div_round_up_u32_fixed16(uint32_t val, interm_val = (uint64_t)val << 16; interm_val = DIV_ROUND_UP_ULL(interm_val, d.val); - WARN_ON(interm_val >> 32); - return clamp_t(uint32_t, interm_val, 0, ~0); + WARN_ON(interm_val > U32_MAX); + return (uint32_t) interm_val; } static inline uint_fixed_16_16_t mul_u32_fixed16(uint32_t val, @@ -569,6 +569,24 @@ struct i915_hotplug { (__i)++) \ for_each_if (plane_state) +#define for_each_new_intel_crtc_in_state(__state, crtc, new_crtc_state, __i) \ + for ((__i) = 0; \ + (__i) < (__state)->base.dev->mode_config.num_crtc && \ + ((crtc) = to_intel_crtc((__state)->base.crtcs[__i].ptr), \ + (new_crtc_state) = to_intel_crtc_state((__state)->base.crtcs[__i].new_state), 1); \ + (__i)++) \ + for_each_if (crtc) + + +#define for_each_oldnew_intel_plane_in_state(__state, plane, old_plane_state, new_plane_state, __i) \ + for ((__i) = 0; \ + (__i) < (__state)->base.dev->mode_config.num_total_plane && \ + ((plane) = to_intel_plane((__state)->base.planes[__i].ptr), \ + (old_plane_state) = to_intel_plane_state((__state)->base.planes[__i].old_state), \ + (new_plane_state) = to_intel_plane_state((__state)->base.planes[__i].new_state), 1); \ + (__i)++) \ + for_each_if (plane) + struct drm_i915_private; struct i915_mm_struct; struct i915_mmu_object; @@ -707,8 +725,7 @@ struct drm_i915_display_funcs { struct drm_atomic_state *old_state); void (*crtc_disable)(struct intel_crtc_state *old_crtc_state, struct drm_atomic_state *old_state); - void (*update_crtcs)(struct drm_atomic_state *state, - unsigned int *crtc_vblank_mask); + void (*update_crtcs)(struct drm_atomic_state *state); void (*audio_codec_enable)(struct drm_connector *connector, struct intel_encoder *encoder, const struct drm_display_mode *adjusted_mode); @@ -759,7 +776,6 @@ struct intel_csr { func(has_fpga_dbg); \ func(has_full_ppgtt); \ func(has_full_48bit_ppgtt); \ - func(has_gmbus_irq); \ func(has_gmch_display); \ func(has_guc); \ func(has_guc_ct); \ @@ -780,7 +796,8 @@ struct intel_csr { func(cursor_needs_physical); \ func(hws_needs_physical); \ func(overlay_needs_physical); \ - func(supports_tv); + func(supports_tv); \ + func(has_ipc); struct sseu_dev_info { u8 slice_mask; @@ -834,20 +851,28 @@ enum intel_platform { }; struct intel_device_info { - u32 display_mmio_offset; u16 device_id; + u16 gen_mask; + + u8 gen; + u8 gt; /* GT number, 0 if undefined */ + u8 num_rings; + u8 ring_mask; /* Rings supported by the HW */ + + enum intel_platform platform; + u32 platform_mask; + + u32 display_mmio_offset; + u8 num_pipes; u8 num_sprites[I915_MAX_PIPES]; u8 num_scalers[I915_MAX_PIPES]; - u8 gen; - u16 gen_mask; - enum intel_platform platform; - u8 ring_mask; /* Rings supported by the HW */ - u8 num_rings; + #define DEFINE_FLAG(name) u8 name:1 DEV_INFO_FOR_EACH_FLAG(DEFINE_FLAG); #undef DEFINE_FLAG u16 ddb_size; /* in blocks */ + /* Register offsets for the various display pipes and transcoders */ int pipe_offsets[I915_MAX_TRANSCODERS]; int trans_offsets[I915_MAX_TRANSCODERS]; @@ -982,7 +1007,8 @@ struct i915_gpu_state { u32 seqno; u32 head; u32 tail; - } *requests, execlist[2]; + } *requests, execlist[EXECLIST_MAX_PORTS]; + unsigned int num_ports; struct drm_i915_error_waiter { char comm[TASK_COMM_LEN]; @@ -1107,6 +1133,7 @@ struct intel_fbc { } fb; int cfb_size; + unsigned int gen9_wa_cfb_stride; } params; struct intel_fbc_work { @@ -1159,6 +1186,14 @@ struct i915_psr { bool y_cord_support; bool colorimetry_support; bool alpm; + + void (*enable_source)(struct intel_dp *, + const struct intel_crtc_state *); + void (*disable_source)(struct intel_dp *, + const struct intel_crtc_state *); + void (*enable_sink)(struct intel_dp *); + void (*activate)(struct intel_dp *); + void (*setup_vsc)(struct intel_dp *, const struct intel_crtc_state *); }; enum intel_pch { @@ -1465,6 +1500,11 @@ struct i915_gem_mm { struct llist_head free_list; struct work_struct free_work; + /** + * Small stash of WC pages + */ + struct pagevec wc_stash; + /** Usable portion of the GTT for GEM */ dma_addr_t stolen_base; /* limited to low memory (32-bit) */ @@ -1718,7 +1758,7 @@ struct intel_vbt_data { int crt_ddc_pin; int child_dev_num; - union child_device_config *child_dev; + struct child_device_config *child_dev; struct ddi_vbt_port_info ddi_port_info[I915_MAX_PORTS]; struct sdvo_device_mapping sdvo_mappings[2]; @@ -1812,6 +1852,20 @@ struct skl_wm_level { uint8_t plane_res_l; }; +/* Stores plane specific WM parameters */ +struct skl_wm_params { + bool x_tiled, y_tiled; + bool rc_surface; + uint32_t width; + uint8_t cpp; + uint32_t plane_pixel_rate; + uint32_t y_min_scanlines; + uint32_t plane_bytes_per_line; + uint_fixed_16_16_t plane_blocks_per_line; + uint_fixed_16_16_t y_tile_minimum; + uint32_t linetime_us; +}; + /* * This struct helps tracking the state needed for runtime PM, which puts the * device in PCI D3 state. Notice that when this happens, nothing on the @@ -2307,6 +2361,8 @@ struct drm_i915_private { DECLARE_HASHTABLE(mm_structs, 7); struct mutex mm_lock; + struct intel_ppat ppat; + /* Kernel Modesetting */ struct intel_crtc *plane_to_crtc_mapping[I915_MAX_PIPES]; @@ -2329,7 +2385,8 @@ struct drm_i915_private { struct mutex dpll_lock; unsigned int active_crtcs; - unsigned int min_pixclk[I915_MAX_PIPES]; + /* minimum acceptable cdclk for each pipe */ + int min_cdclk[I915_MAX_PIPES]; int dpio_phy_iosf_port[I915_NUM_PHYS_VLV]; @@ -2786,8 +2843,8 @@ static inline struct scatterlist *__sg_next(struct scatterlist *sg) #define for_each_sgt_dma(__dmap, __iter, __sgt) \ for ((__iter) = __sgt_iter((__sgt)->sgl, true); \ ((__dmap) = (__iter).dma + (__iter).curr); \ - (((__iter).curr += PAGE_SIZE) < (__iter).max) || \ - ((__iter) = __sgt_iter(__sg_next((__iter).sgp), true), 0)) + (((__iter).curr += PAGE_SIZE) >= (__iter).max) ? \ + (__iter) = __sgt_iter(__sg_next((__iter).sgp), true), 0 : 0) /** * for_each_sgt_page - iterate over the pages of the given sg_table @@ -2799,8 +2856,23 @@ static inline struct scatterlist *__sg_next(struct scatterlist *sg) for ((__iter) = __sgt_iter((__sgt)->sgl, false); \ ((__pp) = (__iter).pfn == 0 ? NULL : \ pfn_to_page((__iter).pfn + ((__iter).curr >> PAGE_SHIFT))); \ - (((__iter).curr += PAGE_SIZE) < (__iter).max) || \ - ((__iter) = __sgt_iter(__sg_next((__iter).sgp), false), 0)) + (((__iter).curr += PAGE_SIZE) >= (__iter).max) ? \ + (__iter) = __sgt_iter(__sg_next((__iter).sgp), false), 0 : 0) + +static inline unsigned int i915_sg_segment_size(void) +{ + unsigned int size = swiotlb_max_segment(); + + if (size == 0) + return SCATTERLIST_MAX_SEGMENT; + + size = rounddown(size, PAGE_SIZE); + /* swiotlb_max_segment_size can return 1 byte when it means one page. */ + if (size < PAGE_SIZE) + size = PAGE_SIZE; + + return size; +} static inline const struct intel_device_info * intel_info(const struct drm_i915_private *dev_priv) @@ -2817,23 +2889,21 @@ intel_info(const struct drm_i915_private *dev_priv) #define INTEL_REVID(dev_priv) ((dev_priv)->drm.pdev->revision) #define GEN_FOREVER (0) + +#define INTEL_GEN_MASK(s, e) ( \ + BUILD_BUG_ON_ZERO(!__builtin_constant_p(s)) + \ + BUILD_BUG_ON_ZERO(!__builtin_constant_p(e)) + \ + GENMASK((e) != GEN_FOREVER ? (e) - 1 : BITS_PER_LONG - 1, \ + (s) != GEN_FOREVER ? (s) - 1 : 0) \ +) + /* * Returns true if Gen is in inclusive range [Start, End]. * * Use GEN_FOREVER for unbound start and or end. */ -#define IS_GEN(dev_priv, s, e) ({ \ - unsigned int __s = (s), __e = (e); \ - BUILD_BUG_ON(!__builtin_constant_p(s)); \ - BUILD_BUG_ON(!__builtin_constant_p(e)); \ - if ((__s) != GEN_FOREVER) \ - __s = (s) - 1; \ - if ((__e) == GEN_FOREVER) \ - __e = BITS_PER_LONG - 1; \ - else \ - __e = (e) - 1; \ - !!((dev_priv)->info.gen_mask & GENMASK((__e), (__s))); \ -}) +#define IS_GEN(dev_priv, s, e) \ + (!!((dev_priv)->info.gen_mask & INTEL_GEN_MASK((s), (e)))) /* * Return true if revision is in range [since,until] inclusive. @@ -2843,38 +2913,39 @@ intel_info(const struct drm_i915_private *dev_priv) #define IS_REVID(p, since, until) \ (INTEL_REVID(p) >= (since) && INTEL_REVID(p) <= (until)) -#define IS_I830(dev_priv) ((dev_priv)->info.platform == INTEL_I830) -#define IS_I845G(dev_priv) ((dev_priv)->info.platform == INTEL_I845G) -#define IS_I85X(dev_priv) ((dev_priv)->info.platform == INTEL_I85X) -#define IS_I865G(dev_priv) ((dev_priv)->info.platform == INTEL_I865G) -#define IS_I915G(dev_priv) ((dev_priv)->info.platform == INTEL_I915G) -#define IS_I915GM(dev_priv) ((dev_priv)->info.platform == INTEL_I915GM) -#define IS_I945G(dev_priv) ((dev_priv)->info.platform == INTEL_I945G) -#define IS_I945GM(dev_priv) ((dev_priv)->info.platform == INTEL_I945GM) -#define IS_I965G(dev_priv) ((dev_priv)->info.platform == INTEL_I965G) -#define IS_I965GM(dev_priv) ((dev_priv)->info.platform == INTEL_I965GM) -#define IS_G45(dev_priv) ((dev_priv)->info.platform == INTEL_G45) -#define IS_GM45(dev_priv) ((dev_priv)->info.platform == INTEL_GM45) +#define IS_PLATFORM(dev_priv, p) ((dev_priv)->info.platform_mask & BIT(p)) + +#define IS_I830(dev_priv) IS_PLATFORM(dev_priv, INTEL_I830) +#define IS_I845G(dev_priv) IS_PLATFORM(dev_priv, INTEL_I845G) +#define IS_I85X(dev_priv) IS_PLATFORM(dev_priv, INTEL_I85X) +#define IS_I865G(dev_priv) IS_PLATFORM(dev_priv, INTEL_I865G) +#define IS_I915G(dev_priv) IS_PLATFORM(dev_priv, INTEL_I915G) +#define IS_I915GM(dev_priv) IS_PLATFORM(dev_priv, INTEL_I915GM) +#define IS_I945G(dev_priv) IS_PLATFORM(dev_priv, INTEL_I945G) +#define IS_I945GM(dev_priv) IS_PLATFORM(dev_priv, INTEL_I945GM) +#define IS_I965G(dev_priv) IS_PLATFORM(dev_priv, INTEL_I965G) +#define IS_I965GM(dev_priv) IS_PLATFORM(dev_priv, INTEL_I965GM) +#define IS_G45(dev_priv) IS_PLATFORM(dev_priv, INTEL_G45) +#define IS_GM45(dev_priv) IS_PLATFORM(dev_priv, INTEL_GM45) #define IS_G4X(dev_priv) (IS_G45(dev_priv) || IS_GM45(dev_priv)) #define IS_PINEVIEW_G(dev_priv) (INTEL_DEVID(dev_priv) == 0xa001) #define IS_PINEVIEW_M(dev_priv) (INTEL_DEVID(dev_priv) == 0xa011) -#define IS_PINEVIEW(dev_priv) ((dev_priv)->info.platform == INTEL_PINEVIEW) -#define IS_G33(dev_priv) ((dev_priv)->info.platform == INTEL_G33) +#define IS_PINEVIEW(dev_priv) IS_PLATFORM(dev_priv, INTEL_PINEVIEW) +#define IS_G33(dev_priv) IS_PLATFORM(dev_priv, INTEL_G33) #define IS_IRONLAKE_M(dev_priv) (INTEL_DEVID(dev_priv) == 0x0046) -#define IS_IVYBRIDGE(dev_priv) ((dev_priv)->info.platform == INTEL_IVYBRIDGE) -#define IS_IVB_GT1(dev_priv) (INTEL_DEVID(dev_priv) == 0x0156 || \ - INTEL_DEVID(dev_priv) == 0x0152 || \ - INTEL_DEVID(dev_priv) == 0x015a) -#define IS_VALLEYVIEW(dev_priv) ((dev_priv)->info.platform == INTEL_VALLEYVIEW) -#define IS_CHERRYVIEW(dev_priv) ((dev_priv)->info.platform == INTEL_CHERRYVIEW) -#define IS_HASWELL(dev_priv) ((dev_priv)->info.platform == INTEL_HASWELL) -#define IS_BROADWELL(dev_priv) ((dev_priv)->info.platform == INTEL_BROADWELL) -#define IS_SKYLAKE(dev_priv) ((dev_priv)->info.platform == INTEL_SKYLAKE) -#define IS_BROXTON(dev_priv) ((dev_priv)->info.platform == INTEL_BROXTON) -#define IS_KABYLAKE(dev_priv) ((dev_priv)->info.platform == INTEL_KABYLAKE) -#define IS_GEMINILAKE(dev_priv) ((dev_priv)->info.platform == INTEL_GEMINILAKE) -#define IS_COFFEELAKE(dev_priv) ((dev_priv)->info.platform == INTEL_COFFEELAKE) -#define IS_CANNONLAKE(dev_priv) ((dev_priv)->info.platform == INTEL_CANNONLAKE) +#define IS_IVYBRIDGE(dev_priv) IS_PLATFORM(dev_priv, INTEL_IVYBRIDGE) +#define IS_IVB_GT1(dev_priv) (IS_IVYBRIDGE(dev_priv) && \ + (dev_priv)->info.gt == 1) +#define IS_VALLEYVIEW(dev_priv) IS_PLATFORM(dev_priv, INTEL_VALLEYVIEW) +#define IS_CHERRYVIEW(dev_priv) IS_PLATFORM(dev_priv, INTEL_CHERRYVIEW) +#define IS_HASWELL(dev_priv) IS_PLATFORM(dev_priv, INTEL_HASWELL) +#define IS_BROADWELL(dev_priv) IS_PLATFORM(dev_priv, INTEL_BROADWELL) +#define IS_SKYLAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_SKYLAKE) +#define IS_BROXTON(dev_priv) IS_PLATFORM(dev_priv, INTEL_BROXTON) +#define IS_KABYLAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_KABYLAKE) +#define IS_GEMINILAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_GEMINILAKE) +#define IS_COFFEELAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_COFFEELAKE) +#define IS_CANNONLAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_CANNONLAKE) #define IS_MOBILE(dev_priv) ((dev_priv)->info.is_mobile) #define IS_HSW_EARLY_SDV(dev_priv) (IS_HASWELL(dev_priv) && \ (INTEL_DEVID(dev_priv) & 0xFF00) == 0x0C00) @@ -2886,11 +2957,11 @@ intel_info(const struct drm_i915_private *dev_priv) #define IS_BDW_ULX(dev_priv) (IS_BROADWELL(dev_priv) && \ (INTEL_DEVID(dev_priv) & 0xf) == 0xe) #define IS_BDW_GT3(dev_priv) (IS_BROADWELL(dev_priv) && \ - (INTEL_DEVID(dev_priv) & 0x00F0) == 0x0020) + (dev_priv)->info.gt == 3) #define IS_HSW_ULT(dev_priv) (IS_HASWELL(dev_priv) && \ (INTEL_DEVID(dev_priv) & 0xFF00) == 0x0A00) #define IS_HSW_GT3(dev_priv) (IS_HASWELL(dev_priv) && \ - (INTEL_DEVID(dev_priv) & 0x00F0) == 0x0020) + (dev_priv)->info.gt == 3) /* ULX machines are also considered ULT. */ #define IS_HSW_ULX(dev_priv) (INTEL_DEVID(dev_priv) == 0x0A0E || \ INTEL_DEVID(dev_priv) == 0x0A1E) @@ -2911,17 +2982,19 @@ intel_info(const struct drm_i915_private *dev_priv) INTEL_DEVID(dev_priv) == 0x5915 || \ INTEL_DEVID(dev_priv) == 0x591E) #define IS_SKL_GT2(dev_priv) (IS_SKYLAKE(dev_priv) && \ - (INTEL_DEVID(dev_priv) & 0x00F0) == 0x0010) + (dev_priv)->info.gt == 2) #define IS_SKL_GT3(dev_priv) (IS_SKYLAKE(dev_priv) && \ - (INTEL_DEVID(dev_priv) & 0x00F0) == 0x0020) + (dev_priv)->info.gt == 3) #define IS_SKL_GT4(dev_priv) (IS_SKYLAKE(dev_priv) && \ - (INTEL_DEVID(dev_priv) & 0x00F0) == 0x0030) + (dev_priv)->info.gt == 4) #define IS_KBL_GT2(dev_priv) (IS_KABYLAKE(dev_priv) && \ - (INTEL_DEVID(dev_priv) & 0x00F0) == 0x0010) + (dev_priv)->info.gt == 2) #define IS_KBL_GT3(dev_priv) (IS_KABYLAKE(dev_priv) && \ - (INTEL_DEVID(dev_priv) & 0x00F0) == 0x0020) + (dev_priv)->info.gt == 3) #define IS_CFL_ULT(dev_priv) (IS_COFFEELAKE(dev_priv) && \ (INTEL_DEVID(dev_priv) & 0x00F0) == 0x00A0) +#define IS_CFL_GT2(dev_priv) (IS_COFFEELAKE(dev_priv) && \ + (dev_priv)->info.gt == 2) #define IS_ALPHA_SUPPORT(intel_info) ((intel_info)->is_alpha_support) @@ -3012,9 +3085,9 @@ intel_info(const struct drm_i915_private *dev_priv) #define HAS_LOGICAL_RING_CONTEXTS(dev_priv) \ ((dev_priv)->info.has_logical_ring_contexts) -#define USES_PPGTT(dev_priv) (i915.enable_ppgtt) -#define USES_FULL_PPGTT(dev_priv) (i915.enable_ppgtt >= 2) -#define USES_FULL_48BIT_PPGTT(dev_priv) (i915.enable_ppgtt == 3) +#define USES_PPGTT(dev_priv) (i915_modparams.enable_ppgtt) +#define USES_FULL_PPGTT(dev_priv) (i915_modparams.enable_ppgtt >= 2) +#define USES_FULL_48BIT_PPGTT(dev_priv) (i915_modparams.enable_ppgtt == 3) #define HAS_OVERLAY(dev_priv) ((dev_priv)->info.has_overlay) #define OVERLAY_NEEDS_PHYSICAL(dev_priv) \ @@ -3032,9 +3105,12 @@ intel_info(const struct drm_i915_private *dev_priv) * even when in MSI mode. This results in spurious interrupt warnings if the * legacy irq no. is shared with another device. The kernel then disables that * interrupt source and so prevents the other device from working properly. + * + * Since we don't enable MSI anymore on gen4, we can always use GMBUS/AUX + * interrupts. */ -#define HAS_AUX_IRQ(dev_priv) ((dev_priv)->info.gen >= 5) -#define HAS_GMBUS_IRQ(dev_priv) ((dev_priv)->info.has_gmbus_irq) +#define HAS_AUX_IRQ(dev_priv) true +#define HAS_GMBUS_IRQ(dev_priv) (INTEL_GEN(dev_priv) >= 4) /* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte * rows, which changed the alignment requirements and fence programming. @@ -3065,6 +3141,8 @@ intel_info(const struct drm_i915_private *dev_priv) #define HAS_RUNTIME_PM(dev_priv) ((dev_priv)->info.has_runtime_pm) #define HAS_64BIT_RELOC(dev_priv) ((dev_priv)->info.has_64bit_reloc) +#define HAS_IPC(dev_priv) ((dev_priv)->info.has_ipc) + /* * For now, anything with a GuC requires uCode loading, and then supports * command submission once loaded. But these are logically independent @@ -3210,7 +3288,7 @@ static inline void i915_queue_hangcheck(struct drm_i915_private *dev_priv) { unsigned long delay; - if (unlikely(!i915.enable_hangcheck)) + if (unlikely(!i915_modparams.enable_hangcheck)) return; /* Don't continually defer the hangcheck so that it is always run at @@ -3243,6 +3321,8 @@ static inline bool intel_vgpu_active(struct drm_i915_private *dev_priv) return dev_priv->vgpu.active; } +u32 i915_pipestat_enable_mask(struct drm_i915_private *dev_priv, + enum pipe pipe); void i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe, u32 status_mask); @@ -3648,6 +3728,9 @@ i915_vm_to_ppgtt(struct i915_address_space *vm) /* i915_gem_fence_reg.c */ int __must_check i915_vma_get_fence(struct i915_vma *vma); int __must_check i915_vma_put_fence(struct i915_vma *vma); +struct drm_i915_fence_reg * +i915_reserve_fence(struct drm_i915_private *dev_priv); +void i915_unreserve_fence(struct drm_i915_fence_reg *fence); void i915_gem_revoke_fences(struct drm_i915_private *dev_priv); void i915_gem_restore_fences(struct drm_i915_private *dev_priv); @@ -4333,11 +4416,12 @@ int remap_io_mapping(struct vm_area_struct *vma, unsigned long addr, unsigned long pfn, unsigned long size, struct io_mapping *iomap); -static inline bool -intel_engine_can_store_dword(struct intel_engine_cs *engine) +static inline int intel_hws_csb_write_index(struct drm_i915_private *i915) { - return __intel_engine_can_store_dword(INTEL_GEN(engine->i915), - engine->class); + if (INTEL_GEN(i915) >= 10) + return CNL_HWS_CSB_WRITE_INDEX; + else + return I915_HWS_CSB_WRITE_INDEX; } #endif diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 19404c96eeb1..73eeb6b1f1cd 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -179,7 +179,7 @@ i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj) * the alignment of the buddy allocation will naturally match. */ phys = drm_pci_alloc(obj->base.dev, - obj->base.size, + roundup_pow_of_two(obj->base.size), roundup_pow_of_two(obj->base.size)); if (!phys) return ERR_PTR(-ENOMEM); @@ -694,10 +694,10 @@ flush_write_domain(struct drm_i915_gem_object *obj, unsigned int flush_domains) switch (obj->base.write_domain) { case I915_GEM_DOMAIN_GTT: - if (INTEL_GEN(dev_priv) >= 6 && !HAS_LLC(dev_priv)) { + if (!HAS_LLC(dev_priv)) { intel_runtime_pm_get(dev_priv); spin_lock_irq(&dev_priv->uncore.lock); - POSTING_READ_FW(RING_ACTHD(dev_priv->engine[RCS]->mmio_base)); + POSTING_READ_FW(RING_HEAD(dev_priv->engine[RCS]->mmio_base)); spin_unlock_irq(&dev_priv->uncore.lock); intel_runtime_pm_put(dev_priv); } @@ -1013,17 +1013,20 @@ gtt_user_read(struct io_mapping *mapping, loff_t base, int offset, char __user *user_data, int length) { - void *vaddr; + void __iomem *vaddr; unsigned long unwritten; /* We can use the cpu mem copy function because this is X86. */ - vaddr = (void __force *)io_mapping_map_atomic_wc(mapping, base); - unwritten = __copy_to_user_inatomic(user_data, vaddr + offset, length); + vaddr = io_mapping_map_atomic_wc(mapping, base); + unwritten = __copy_to_user_inatomic(user_data, + (void __force *)vaddr + offset, + length); io_mapping_unmap_atomic(vaddr); if (unwritten) { - vaddr = (void __force *) - io_mapping_map_wc(mapping, base, PAGE_SIZE); - unwritten = copy_to_user(user_data, vaddr + offset, length); + vaddr = io_mapping_map_wc(mapping, base, PAGE_SIZE); + unwritten = copy_to_user(user_data, + (void __force *)vaddr + offset, + length); io_mapping_unmap(vaddr); } return unwritten; @@ -1189,18 +1192,18 @@ ggtt_write(struct io_mapping *mapping, loff_t base, int offset, char __user *user_data, int length) { - void *vaddr; + void __iomem *vaddr; unsigned long unwritten; /* We can use the cpu mem copy function because this is X86. */ - vaddr = (void __force *)io_mapping_map_atomic_wc(mapping, base); - unwritten = __copy_from_user_inatomic_nocache(vaddr + offset, + vaddr = io_mapping_map_atomic_wc(mapping, base); + unwritten = __copy_from_user_inatomic_nocache((void __force *)vaddr + offset, user_data, length); io_mapping_unmap_atomic(vaddr); if (unwritten) { - vaddr = (void __force *) - io_mapping_map_wc(mapping, base, PAGE_SIZE); - unwritten = copy_from_user(vaddr + offset, user_data, length); + vaddr = io_mapping_map_wc(mapping, base, PAGE_SIZE); + unwritten = copy_from_user((void __force *)vaddr + offset, + user_data, length); io_mapping_unmap(vaddr); } @@ -2300,7 +2303,7 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj) struct sgt_iter sgt_iter; struct page *page; unsigned long last_pfn = 0; /* suppress gcc warning */ - unsigned int max_segment; + unsigned int max_segment = i915_sg_segment_size(); gfp_t noreclaim; int ret; @@ -2311,10 +2314,6 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj) GEM_BUG_ON(obj->base.read_domains & I915_GEM_GPU_DOMAINS); GEM_BUG_ON(obj->base.write_domain & I915_GEM_GPU_DOMAINS); - max_segment = swiotlb_max_segment(); - if (!max_segment) - max_segment = rounddown(UINT_MAX, PAGE_SIZE); - st = kmalloc(sizeof(*st), GFP_KERNEL); if (st == NULL) return ERR_PTR(-ENOMEM); @@ -2476,8 +2475,6 @@ static int ____i915_gem_object_get_pages(struct drm_i915_gem_object *obj) { struct sg_table *pages; - GEM_BUG_ON(i915_gem_object_has_pinned_pages(obj)); - if (unlikely(obj->mm.madv != I915_MADV_WILLNEED)) { DRM_DEBUG("Attempting to obtain a purgeable object\n"); return -EFAULT; @@ -2507,6 +2504,8 @@ int __i915_gem_object_get_pages(struct drm_i915_gem_object *obj) return err; if (unlikely(IS_ERR_OR_NULL(obj->mm.pages))) { + GEM_BUG_ON(i915_gem_object_has_pinned_pages(obj)); + err = ____i915_gem_object_get_pages(obj); if (err) goto unlock; @@ -2590,6 +2589,8 @@ void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj, if (!atomic_inc_not_zero(&obj->mm.pages_pin_count)) { if (unlikely(IS_ERR_OR_NULL(obj->mm.pages))) { + GEM_BUG_ON(i915_gem_object_has_pinned_pages(obj)); + ret = ____i915_gem_object_get_pages(obj); if (ret) goto err_unlock; @@ -2814,8 +2815,8 @@ i915_gem_reset_prepare_engine(struct intel_engine_cs *engine) * Turning off the engine->irq_tasklet until the reset is over * prevents the race. */ - tasklet_kill(&engine->irq_tasklet); - tasklet_disable(&engine->irq_tasklet); + tasklet_kill(&engine->execlists.irq_tasklet); + tasklet_disable(&engine->execlists.irq_tasklet); if (engine->irq_seqno_barrier) engine->irq_seqno_barrier(engine); @@ -2994,7 +2995,7 @@ void i915_gem_reset(struct drm_i915_private *dev_priv) void i915_gem_reset_finish_engine(struct intel_engine_cs *engine) { - tasklet_enable(&engine->irq_tasklet); + tasklet_enable(&engine->execlists.irq_tasklet); kthread_unpark(engine->breadcrumbs.signaler); } @@ -3021,9 +3022,6 @@ static void nop_submit_request(struct drm_i915_gem_request *request) static void engine_set_wedged(struct intel_engine_cs *engine) { - struct drm_i915_gem_request *request; - unsigned long flags; - /* We need to be sure that no thread is running the old callback as * we install the nop handler (otherwise we would submit a request * to hardware that will never complete). In order to prevent this @@ -3033,40 +3031,7 @@ static void engine_set_wedged(struct intel_engine_cs *engine) engine->submit_request = nop_submit_request; /* Mark all executing requests as skipped */ - spin_lock_irqsave(&engine->timeline->lock, flags); - list_for_each_entry(request, &engine->timeline->requests, link) - if (!i915_gem_request_completed(request)) - dma_fence_set_error(&request->fence, -EIO); - spin_unlock_irqrestore(&engine->timeline->lock, flags); - - /* - * Clear the execlists queue up before freeing the requests, as those - * are the ones that keep the context and ringbuffer backing objects - * pinned in place. - */ - - if (i915.enable_execlists) { - struct execlist_port *port = engine->execlist_port; - unsigned long flags; - unsigned int n; - - spin_lock_irqsave(&engine->timeline->lock, flags); - - for (n = 0; n < ARRAY_SIZE(engine->execlist_port); n++) - i915_gem_request_put(port_request(&port[n])); - memset(engine->execlist_port, 0, sizeof(engine->execlist_port)); - engine->execlist_queue = RB_ROOT; - engine->execlist_first = NULL; - - spin_unlock_irqrestore(&engine->timeline->lock, flags); - - /* The port is checked prior to scheduling a tasklet, but - * just in case we have suspended the tasklet to do the - * wedging make sure that when it wakes, it decides there - * is no work to do by clearing the irq_posted bit. - */ - clear_bit(ENGINE_IRQ_EXECLIST, &engine->irq_posted); - } + engine->cancel_requests(engine); /* Mark all pending requests as complete so that any concurrent * (lockless) lookup doesn't try and wait upon the request as we @@ -3257,11 +3222,11 @@ void i915_gem_close_object(struct drm_gem_object *gem, struct drm_file *file) struct i915_gem_context *ctx = lut->ctx; struct i915_vma *vma; + GEM_BUG_ON(ctx->file_priv == ERR_PTR(-EBADF)); if (ctx->file_priv != fpriv) continue; vma = radix_tree_delete(&ctx->handles_vma, lut->handle); - GEM_BUG_ON(vma->obj != obj); /* We allow the process to have multiple handles to the same @@ -3375,24 +3340,12 @@ static int wait_for_timeline(struct i915_gem_timeline *tl, unsigned int flags) return 0; } -static int wait_for_engine(struct intel_engine_cs *engine, int timeout_ms) -{ - return wait_for(intel_engine_is_idle(engine), timeout_ms); -} - static int wait_for_engines(struct drm_i915_private *i915) { - struct intel_engine_cs *engine; - enum intel_engine_id id; - - for_each_engine(engine, i915, id) { - if (GEM_WARN_ON(wait_for_engine(engine, 50))) { - i915_gem_set_wedged(i915); - return -EIO; - } - - GEM_BUG_ON(intel_engine_get_seqno(engine) != - intel_engine_last_submit(engine)); + if (wait_for(intel_engines_are_idle(i915), 50)) { + DRM_ERROR("Failed to idle engines, declaring wedged!\n"); + i915_gem_set_wedged(i915); + return -EIO; } return 0; @@ -4426,6 +4379,7 @@ static void __i915_gem_free_objects(struct drm_i915_private *i915, llist_for_each_entry_safe(obj, on, freed, freed) { GEM_BUG_ON(obj->bind_count); GEM_BUG_ON(atomic_read(&obj->frontbuffer_bits)); + GEM_BUG_ON(!list_empty(&obj->lut_list)); if (obj->ops->release) obj->ops->release(obj); @@ -4533,6 +4487,12 @@ static void assert_kernel_context_is_current(struct drm_i915_private *dev_priv) void i915_gem_sanitize(struct drm_i915_private *i915) { + if (i915_terminally_wedged(&i915->gpu_error)) { + mutex_lock(&i915->drm.struct_mutex); + i915_gem_unset_wedged(i915); + mutex_unlock(&i915->drm.struct_mutex); + } + /* * If we inherit context state from the BIOS or earlier occupants * of the GPU, the GPU may be in an inconsistent state when we @@ -4572,7 +4532,7 @@ int i915_gem_suspend(struct drm_i915_private *dev_priv) ret = i915_gem_wait_for_idle(dev_priv, I915_WAIT_INTERRUPTIBLE | I915_WAIT_LOCKED); - if (ret) + if (ret && ret != -EIO) goto err_unlock; assert_kernel_context_is_current(dev_priv); @@ -4594,7 +4554,8 @@ int i915_gem_suspend(struct drm_i915_private *dev_priv) * reset the GPU back to its idle, low power state. */ WARN_ON(dev_priv->gt.awake); - WARN_ON(!intel_engines_are_idle(dev_priv)); + if (WARN_ON(!intel_engines_are_idle(dev_priv))) + i915_gem_set_wedged(dev_priv); /* no hope, discard everything */ /* * Neither the BIOS, ourselves or any other kernel @@ -4616,11 +4577,12 @@ int i915_gem_suspend(struct drm_i915_private *dev_priv) * machine in an unusable condition. */ i915_gem_sanitize(dev_priv); - goto out_rpm_put; + + intel_runtime_pm_put(dev_priv); + return 0; err_unlock: mutex_unlock(&dev->struct_mutex); -out_rpm_put: intel_runtime_pm_put(dev_priv); return ret; } @@ -4776,7 +4738,7 @@ bool intel_sanitize_semaphores(struct drm_i915_private *dev_priv, int value) return false; /* TODO: make semaphores and Execlists play nicely together */ - if (i915.enable_execlists) + if (i915_modparams.enable_execlists) return false; if (value >= 0) @@ -4797,7 +4759,7 @@ int i915_gem_init(struct drm_i915_private *dev_priv) dev_priv->mm.unordered_timeline = dma_fence_context_alloc(1); - if (!i915.enable_execlists) { + if (!i915_modparams.enable_execlists) { dev_priv->gt.resume = intel_legacy_submission_resume; dev_priv->gt.cleanup_engine = intel_engine_cleanup; } else { diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c index 58a2a44f88bd..921ee369c74d 100644 --- a/drivers/gpu/drm/i915/i915_gem_context.c +++ b/drivers/gpu/drm/i915/i915_gem_context.c @@ -314,7 +314,7 @@ __create_hw_context(struct drm_i915_private *dev_priv, * present or not in use we still need a small bias as ring wraparound * at offset 0 sometimes hangs. No idea why. */ - if (HAS_GUC(dev_priv) && i915.enable_guc_loading) + if (HAS_GUC(dev_priv) && i915_modparams.enable_guc_loading) ctx->ggtt_offset_bias = GUC_WOPCM_TOP; else ctx->ggtt_offset_bias = I915_GTT_PAGE_SIZE; @@ -407,7 +407,7 @@ i915_gem_context_create_gvt(struct drm_device *dev) i915_gem_context_set_closed(ctx); /* not user accessible */ i915_gem_context_clear_bannable(ctx); i915_gem_context_set_force_single_submission(ctx); - if (!i915.enable_guc_submission) + if (!i915_modparams.enable_guc_submission) ctx->ring_size = 512 * PAGE_SIZE; /* Max ring buffer size */ GEM_BUG_ON(i915_gem_context_is_kernel(ctx)); @@ -431,7 +431,7 @@ int i915_gem_contexts_init(struct drm_i915_private *dev_priv) if (intel_vgpu_active(dev_priv) && HAS_LOGICAL_RING_CONTEXTS(dev_priv)) { - if (!i915.enable_execlists) { + if (!i915_modparams.enable_execlists) { DRM_INFO("Only EXECLIST mode is supported in vgpu.\n"); return -EINVAL; } @@ -483,7 +483,7 @@ void i915_gem_contexts_lost(struct drm_i915_private *dev_priv) } /* Force the GPU state to be restored on enabling */ - if (!i915.enable_execlists) { + if (!i915_modparams.enable_execlists) { struct i915_gem_context *ctx; list_for_each_entry(ctx, &dev_priv->contexts.list, link) { @@ -568,7 +568,7 @@ mi_set_context(struct drm_i915_gem_request *req, u32 flags) enum intel_engine_id id; const int num_rings = /* Use an extended w/a on gen7 if signalling from other rings */ - (i915.semaphores && INTEL_GEN(dev_priv) == 7) ? + (i915_modparams.semaphores && INTEL_GEN(dev_priv) == 7) ? INTEL_INFO(dev_priv)->num_rings - 1 : 0; int len; @@ -837,7 +837,7 @@ int i915_switch_context(struct drm_i915_gem_request *req) struct intel_engine_cs *engine = req->engine; lockdep_assert_held(&req->i915->drm.struct_mutex); - if (i915.enable_execlists) + if (i915_modparams.enable_execlists) return 0; if (!req->ctx->engine[engine->id].state) { diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c index 92437f455b43..d733c4d5a500 100644 --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c @@ -58,6 +58,7 @@ enum { #define __EXEC_HAS_RELOC BIT(31) #define __EXEC_VALIDATED BIT(30) +#define __EXEC_INTERNAL_FLAGS (~0u << 30) #define UPDATE PIN_OFFSET_FIXED #define BATCH_OFFSET_BIAS (256*1024) @@ -268,6 +269,11 @@ static inline u64 gen8_noncanonical_addr(u64 address) return address & GENMASK_ULL(GEN8_HIGH_ADDRESS_BIT, 0); } +static inline bool eb_use_cmdparser(const struct i915_execbuffer *eb) +{ + return eb->engine->needs_cmd_parser && eb->batch_len; +} + static int eb_create(struct i915_execbuffer *eb) { if (!(eb->args->flags & I915_EXEC_HANDLE_LUT)) { @@ -674,7 +680,7 @@ static int eb_select_context(struct i915_execbuffer *eb) static int eb_lookup_vmas(struct i915_execbuffer *eb) { struct radix_tree_root *handles_vma = &eb->ctx->handles_vma; - struct drm_i915_gem_object *uninitialized_var(obj); + struct drm_i915_gem_object *obj; unsigned int i; int err; @@ -720,19 +726,17 @@ static int eb_lookup_vmas(struct i915_execbuffer *eb) goto err_obj; } + /* transfer ref to ctx */ vma->open_count++; list_add(&lut->obj_link, &obj->lut_list); list_add(&lut->ctx_link, &eb->ctx->handles_list); lut->ctx = eb->ctx; lut->handle = handle; - /* transfer ref to ctx */ - obj = NULL; - add_vma: err = eb_add_vma(eb, i, vma); if (unlikely(err)) - goto err_obj; + goto err_vma; GEM_BUG_ON(vma != eb->vma[i]); GEM_BUG_ON(vma->exec_flags != &eb->flags[i]); @@ -761,8 +765,7 @@ add_vma: return eb_reserve(eb); err_obj: - if (obj) - i915_gem_object_put(obj); + i915_gem_object_put(obj); err_vma: eb->vma[i] = NULL; return err; @@ -1159,6 +1162,13 @@ static u32 *reloc_gpu(struct i915_execbuffer *eb, if (unlikely(!cache->rq)) { int err; + /* If we need to copy for the cmdparser, we will stall anyway */ + if (eb_use_cmdparser(eb)) + return ERR_PTR(-EWOULDBLOCK); + + if (!intel_engine_can_store_dword(eb->engine)) + return ERR_PTR(-ENODEV); + err = __reloc_gpu_alloc(eb, vma, len); if (unlikely(err)) return ERR_PTR(err); @@ -1183,9 +1193,7 @@ relocate_entry(struct i915_vma *vma, if (!eb->reloc_cache.vaddr && (DBG_FORCE_RELOC == FORCE_GPU_RELOC || - !reservation_object_test_signaled_rcu(vma->resv, true)) && - __intel_engine_can_store_dword(eb->reloc_cache.gen, - eb->engine->class)) { + !reservation_object_test_signaled_rcu(vma->resv, true))) { const unsigned int gen = eb->reloc_cache.gen; unsigned int len; u32 *batch; @@ -1577,7 +1585,7 @@ static int eb_prefault_relocations(const struct i915_execbuffer *eb) const unsigned int count = eb->buffer_count; unsigned int i; - if (unlikely(i915.prefault_disable)) + if (unlikely(i915_modparams.prefault_disable)) return 0; for (i = 0; i < count; i++) { @@ -2178,6 +2186,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, int out_fence_fd = -1; int err; + BUILD_BUG_ON(__EXEC_INTERNAL_FLAGS & ~__I915_EXEC_ILLEGAL_FLAGS); BUILD_BUG_ON(__EXEC_OBJECT_INTERNAL_FLAGS & ~__EXEC_OBJECT_UNKNOWN_FLAGS); @@ -2291,7 +2300,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, goto err_vma; } - if (eb.engine->needs_cmd_parser && eb.batch_len) { + if (eb_use_cmdparser(&eb)) { struct i915_vma *vma; vma = eb_parse(&eb, drm_is_current_master(file)); diff --git a/drivers/gpu/drm/i915/i915_gem_fence_reg.c b/drivers/gpu/drm/i915/i915_gem_fence_reg.c index 5fe2cd8c8f28..2783d63bd1ad 100644 --- a/drivers/gpu/drm/i915/i915_gem_fence_reg.c +++ b/drivers/gpu/drm/i915/i915_gem_fence_reg.c @@ -360,6 +360,57 @@ i915_vma_get_fence(struct i915_vma *vma) } /** + * i915_reserve_fence - Reserve a fence for vGPU + * @dev_priv: i915 device private + * + * This function walks the fence regs looking for a free one and remove + * it from the fence_list. It is used to reserve fence for vGPU to use. + */ +struct drm_i915_fence_reg * +i915_reserve_fence(struct drm_i915_private *dev_priv) +{ + struct drm_i915_fence_reg *fence; + int count; + int ret; + + lockdep_assert_held(&dev_priv->drm.struct_mutex); + + /* Keep at least one fence available for the display engine. */ + count = 0; + list_for_each_entry(fence, &dev_priv->mm.fence_list, link) + count += !fence->pin_count; + if (count <= 1) + return ERR_PTR(-ENOSPC); + + fence = fence_find(dev_priv); + if (IS_ERR(fence)) + return fence; + + if (fence->vma) { + /* Force-remove fence from VMA */ + ret = fence_update(fence, NULL); + if (ret) + return ERR_PTR(ret); + } + + list_del(&fence->link); + return fence; +} + +/** + * i915_unreserve_fence - Reclaim a reserved fence + * @fence: the fence reg + * + * This function add a reserved fence register from vGPU to the fence_list. + */ +void i915_unreserve_fence(struct drm_i915_fence_reg *fence) +{ + lockdep_assert_held(&fence->i915->drm.struct_mutex); + + list_add(&fence->link, &fence->i915->mm.fence_list); +} + +/** * i915_gem_revoke_fences - revoke fence state * @dev_priv: i915 device private * diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index e2410eb5d96e..4c82ceb8d318 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c @@ -180,7 +180,7 @@ int intel_sanitize_enable_ppgtt(struct drm_i915_private *dev_priv, return 0; } - if (INTEL_GEN(dev_priv) >= 8 && i915.enable_execlists) { + if (INTEL_GEN(dev_priv) >= 8 && i915_modparams.enable_execlists) { if (has_full_48bit_ppgtt) return 3; @@ -230,13 +230,13 @@ static gen8_pte_t gen8_pte_encode(dma_addr_t addr, switch (level) { case I915_CACHE_NONE: - pte |= PPAT_UNCACHED_INDEX; + pte |= PPAT_UNCACHED; break; case I915_CACHE_WT: - pte |= PPAT_DISPLAY_ELLC_INDEX; + pte |= PPAT_DISPLAY_ELLC; break; default: - pte |= PPAT_CACHED_INDEX; + pte |= PPAT_CACHED; break; } @@ -249,9 +249,9 @@ static gen8_pde_t gen8_pde_encode(const dma_addr_t addr, gen8_pde_t pde = _PAGE_PRESENT | _PAGE_RW; pde |= addr; if (level != I915_CACHE_NONE) - pde |= PPAT_CACHED_PDE_INDEX; + pde |= PPAT_CACHED_PDE; else - pde |= PPAT_UNCACHED_INDEX; + pde |= PPAT_UNCACHED; return pde; } @@ -356,39 +356,86 @@ static gen6_pte_t iris_pte_encode(dma_addr_t addr, static struct page *vm_alloc_page(struct i915_address_space *vm, gfp_t gfp) { - struct page *page; + struct pagevec *pvec = &vm->free_pages; if (I915_SELFTEST_ONLY(should_fail(&vm->fault_attr, 1))) i915_gem_shrink_all(vm->i915); - if (vm->free_pages.nr) - return vm->free_pages.pages[--vm->free_pages.nr]; + if (likely(pvec->nr)) + return pvec->pages[--pvec->nr]; + + if (!vm->pt_kmap_wc) + return alloc_page(gfp); + + /* A placeholder for a specific mutex to guard the WC stash */ + lockdep_assert_held(&vm->i915->drm.struct_mutex); + + /* Look in our global stash of WC pages... */ + pvec = &vm->i915->mm.wc_stash; + if (likely(pvec->nr)) + return pvec->pages[--pvec->nr]; + + /* Otherwise batch allocate pages to amoritize cost of set_pages_wc. */ + do { + struct page *page; + + page = alloc_page(gfp); + if (unlikely(!page)) + break; + + pvec->pages[pvec->nr++] = page; + } while (pagevec_space(pvec)); - page = alloc_page(gfp); - if (!page) + if (unlikely(!pvec->nr)) return NULL; - if (vm->pt_kmap_wc) - set_pages_array_wc(&page, 1); + set_pages_array_wc(pvec->pages, pvec->nr); - return page; + return pvec->pages[--pvec->nr]; } -static void vm_free_pages_release(struct i915_address_space *vm) +static void vm_free_pages_release(struct i915_address_space *vm, + bool immediate) { - GEM_BUG_ON(!pagevec_count(&vm->free_pages)); + struct pagevec *pvec = &vm->free_pages; - if (vm->pt_kmap_wc) - set_pages_array_wb(vm->free_pages.pages, - pagevec_count(&vm->free_pages)); + GEM_BUG_ON(!pagevec_count(pvec)); + + if (vm->pt_kmap_wc) { + struct pagevec *stash = &vm->i915->mm.wc_stash; + + /* When we use WC, first fill up the global stash and then + * only if full immediately free the overflow. + */ + + lockdep_assert_held(&vm->i915->drm.struct_mutex); + if (pagevec_space(stash)) { + do { + stash->pages[stash->nr++] = + pvec->pages[--pvec->nr]; + if (!pvec->nr) + return; + } while (pagevec_space(stash)); + + /* As we have made some room in the VM's free_pages, + * we can wait for it to fill again. Unless we are + * inside i915_address_space_fini() and must + * immediately release the pages! + */ + if (!immediate) + return; + } + + set_pages_array_wb(pvec->pages, pvec->nr); + } - __pagevec_release(&vm->free_pages); + __pagevec_release(pvec); } static void vm_free_page(struct i915_address_space *vm, struct page *page) { if (!pagevec_add(&vm->free_pages, page)) - vm_free_pages_release(vm); + vm_free_pages_release(vm, false); } static int __setup_page_dma(struct i915_address_space *vm, @@ -434,10 +481,8 @@ static void fill_page_dma(struct i915_address_space *vm, const u64 val) { u64 * const vaddr = kmap_atomic(p->page); - int i; - for (i = 0; i < 512; i++) - vaddr[i] = val; + memset64(vaddr, val, PAGE_SIZE / sizeof(val)); kunmap_atomic(vaddr); } @@ -452,12 +497,31 @@ static void fill_page_dma_32(struct i915_address_space *vm, static int setup_scratch_page(struct i915_address_space *vm, gfp_t gfp) { - return __setup_page_dma(vm, &vm->scratch_page, gfp | __GFP_ZERO); + struct page *page; + dma_addr_t addr; + + page = alloc_page(gfp | __GFP_ZERO); + if (unlikely(!page)) + return -ENOMEM; + + addr = dma_map_page(vm->dma, page, 0, PAGE_SIZE, + PCI_DMA_BIDIRECTIONAL); + if (unlikely(dma_mapping_error(vm->dma, addr))) { + __free_page(page); + return -ENOMEM; + } + + vm->scratch_page.page = page; + vm->scratch_page.daddr = addr; + return 0; } static void cleanup_scratch_page(struct i915_address_space *vm) { - cleanup_page_dma(vm, &vm->scratch_page); + struct i915_page_dma *p = &vm->scratch_page; + + dma_unmap_page(vm->dma, p->daddr, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); + __free_page(p->page); } static struct i915_page_table *alloc_pt(struct i915_address_space *vm) @@ -1102,19 +1166,22 @@ static int gen8_ppgtt_alloc_pd(struct i915_address_space *vm, unsigned int pde; gen8_for_each_pde(pt, pd, start, length, pde) { + int count = gen8_pte_count(start, length); + if (pt == vm->scratch_pt) { pt = alloc_pt(vm); if (IS_ERR(pt)) goto unwind; - gen8_initialize_pt(vm, pt); + if (count < GEN8_PTES) + gen8_initialize_pt(vm, pt); gen8_ppgtt_set_pde(vm, pd, pt, pde); pd->used_pdes++; GEM_BUG_ON(pd->used_pdes > I915_PDES); } - pt->used_ptes += gen8_pte_count(start, length); + pt->used_ptes += count; } return 0; @@ -1337,18 +1404,18 @@ static int gen8_ppgtt_init(struct i915_hw_ppgtt *ppgtt) 1ULL << 48 : 1ULL << 32; - ret = gen8_init_scratch(&ppgtt->base); - if (ret) { - ppgtt->base.total = 0; - return ret; - } - /* There are only few exceptions for gen >=6. chv and bxt. * And we are not sure about the latter so play safe for now. */ if (IS_CHERRYVIEW(dev_priv) || IS_BROXTON(dev_priv)) ppgtt->base.pt_kmap_wc = true; + ret = gen8_init_scratch(&ppgtt->base); + if (ret) { + ppgtt->base.total = 0; + return ret; + } + if (use_4lvl(vm)) { ret = setup_px(&ppgtt->base, &ppgtt->pml4); if (ret) @@ -1872,7 +1939,7 @@ static void i915_address_space_init(struct i915_address_space *vm, static void i915_address_space_fini(struct i915_address_space *vm) { if (pagevec_count(&vm->free_pages)) - vm_free_pages_release(vm); + vm_free_pages_release(vm, true); i915_gem_timeline_fini(&vm->timeline); drm_mm_takedown(&vm->mm); @@ -1885,12 +1952,12 @@ static void gtt_write_workarounds(struct drm_i915_private *dev_priv) * called on driver load and after a GPU reset, so you can place * workarounds here even if they get overwritten by GPU reset. */ - /* WaIncreaseDefaultTLBEntries:chv,bdw,skl,bxt,kbl,glk,cfl */ + /* WaIncreaseDefaultTLBEntries:chv,bdw,skl,bxt,kbl,glk,cfl,cnl */ if (IS_BROADWELL(dev_priv)) I915_WRITE(GEN8_L3_LRA_1_GPGPU, GEN8_L3_LRA_1_GPGPU_DEFAULT_VALUE_BDW); else if (IS_CHERRYVIEW(dev_priv)) I915_WRITE(GEN8_L3_LRA_1_GPGPU, GEN8_L3_LRA_1_GPGPU_DEFAULT_VALUE_CHV); - else if (IS_GEN9_BC(dev_priv)) + else if (IS_GEN9_BC(dev_priv) || IS_GEN10(dev_priv)) I915_WRITE(GEN8_L3_LRA_1_GPGPU, GEN9_L3_LRA_1_GPGPU_DEFAULT_VALUE_SKL); else if (IS_GEN9_LP(dev_priv)) I915_WRITE(GEN8_L3_LRA_1_GPGPU, GEN9_L3_LRA_1_GPGPU_DEFAULT_VALUE_BXT); @@ -1903,7 +1970,7 @@ int i915_ppgtt_init_hw(struct drm_i915_private *dev_priv) /* In the case of execlists, PPGTT is enabled by the context descriptor * and the PDPs are contained within the context itself. We don't * need to do anything here. */ - if (i915.enable_execlists) + if (i915_modparams.enable_execlists) return 0; if (!USES_PPGTT(dev_priv)) @@ -2598,6 +2665,7 @@ void i915_ggtt_cleanup_hw(struct drm_i915_private *dev_priv) { struct i915_ggtt *ggtt = &dev_priv->ggtt; struct i915_vma *vma, *vn; + struct pagevec *pvec; ggtt->base.closed = true; @@ -2621,6 +2689,13 @@ void i915_ggtt_cleanup_hw(struct drm_i915_private *dev_priv) } ggtt->base.cleanup(&ggtt->base); + + pvec = &dev_priv->mm.wc_stash; + if (pvec->nr) { + set_pages_array_wb(pvec->pages, pvec->nr); + __pagevec_release(pvec); + } + mutex_unlock(&dev_priv->drm.struct_mutex); arch_phys_wc_del(ggtt->mtrr); @@ -2716,13 +2791,13 @@ static int ggtt_probe_common(struct i915_ggtt *ggtt, u64 size) phys_addr = pci_resource_start(pdev, 0) + pci_resource_len(pdev, 0) / 2; /* - * On BXT writes larger than 64 bit to the GTT pagetable range will be - * dropped. For WC mappings in general we have 64 byte burst writes - * when the WC buffer is flushed, so we can't use it, but have to + * On BXT+/CNL+ writes larger than 64 bit to the GTT pagetable range + * will be dropped. For WC mappings in general we have 64 byte burst + * writes when the WC buffer is flushed, so we can't use it, but have to * resort to an uncached mapping. The WC issue is easily caught by the * readback check when writing GTT PTE entries. */ - if (IS_GEN9_LP(dev_priv)) + if (IS_GEN9_LP(dev_priv) || INTEL_GEN(dev_priv) >= 10) ggtt->gsm = ioremap_nocache(phys_addr, size); else ggtt->gsm = ioremap_wc(phys_addr, size); @@ -2742,41 +2817,209 @@ static int ggtt_probe_common(struct i915_ggtt *ggtt, u64 size) return 0; } -static void cnl_setup_private_ppat(struct drm_i915_private *dev_priv) +static struct intel_ppat_entry * +__alloc_ppat_entry(struct intel_ppat *ppat, unsigned int index, u8 value) +{ + struct intel_ppat_entry *entry = &ppat->entries[index]; + + GEM_BUG_ON(index >= ppat->max_entries); + GEM_BUG_ON(test_bit(index, ppat->used)); + + entry->ppat = ppat; + entry->value = value; + kref_init(&entry->ref); + set_bit(index, ppat->used); + set_bit(index, ppat->dirty); + + return entry; +} + +static void __free_ppat_entry(struct intel_ppat_entry *entry) +{ + struct intel_ppat *ppat = entry->ppat; + unsigned int index = entry - ppat->entries; + + GEM_BUG_ON(index >= ppat->max_entries); + GEM_BUG_ON(!test_bit(index, ppat->used)); + + entry->value = ppat->clear_value; + clear_bit(index, ppat->used); + set_bit(index, ppat->dirty); +} + +/** + * intel_ppat_get - get a usable PPAT entry + * @i915: i915 device instance + * @value: the PPAT value required by the caller + * + * The function tries to search if there is an existing PPAT entry which + * matches with the required value. If perfectly matched, the existing PPAT + * entry will be used. If only partially matched, it will try to check if + * there is any available PPAT index. If yes, it will allocate a new PPAT + * index for the required entry and update the HW. If not, the partially + * matched entry will be used. + */ +const struct intel_ppat_entry * +intel_ppat_get(struct drm_i915_private *i915, u8 value) +{ + struct intel_ppat *ppat = &i915->ppat; + struct intel_ppat_entry *entry; + unsigned int scanned, best_score; + int i; + + GEM_BUG_ON(!ppat->max_entries); + + scanned = best_score = 0; + for_each_set_bit(i, ppat->used, ppat->max_entries) { + unsigned int score; + + score = ppat->match(ppat->entries[i].value, value); + if (score > best_score) { + entry = &ppat->entries[i]; + if (score == INTEL_PPAT_PERFECT_MATCH) { + kref_get(&entry->ref); + return entry; + } + best_score = score; + } + scanned++; + } + + if (scanned == ppat->max_entries) { + if (!best_score) + return ERR_PTR(-ENOSPC); + + kref_get(&entry->ref); + return entry; + } + + i = find_first_zero_bit(ppat->used, ppat->max_entries); + entry = __alloc_ppat_entry(ppat, i, value); + ppat->update_hw(i915); + return entry; +} + +static void release_ppat(struct kref *kref) +{ + struct intel_ppat_entry *entry = + container_of(kref, struct intel_ppat_entry, ref); + struct drm_i915_private *i915 = entry->ppat->i915; + + __free_ppat_entry(entry); + entry->ppat->update_hw(i915); +} + +/** + * intel_ppat_put - put back the PPAT entry got from intel_ppat_get() + * @entry: an intel PPAT entry + * + * Put back the PPAT entry got from intel_ppat_get(). If the PPAT index of the + * entry is dynamically allocated, its reference count will be decreased. Once + * the reference count becomes into zero, the PPAT index becomes free again. + */ +void intel_ppat_put(const struct intel_ppat_entry *entry) +{ + struct intel_ppat *ppat = entry->ppat; + unsigned int index = entry - ppat->entries; + + GEM_BUG_ON(!ppat->max_entries); + + kref_put(&ppat->entries[index].ref, release_ppat); +} + +static void cnl_private_pat_update_hw(struct drm_i915_private *dev_priv) +{ + struct intel_ppat *ppat = &dev_priv->ppat; + int i; + + for_each_set_bit(i, ppat->dirty, ppat->max_entries) { + I915_WRITE(GEN10_PAT_INDEX(i), ppat->entries[i].value); + clear_bit(i, ppat->dirty); + } +} + +static void bdw_private_pat_update_hw(struct drm_i915_private *dev_priv) +{ + struct intel_ppat *ppat = &dev_priv->ppat; + u64 pat = 0; + int i; + + for (i = 0; i < ppat->max_entries; i++) + pat |= GEN8_PPAT(i, ppat->entries[i].value); + + bitmap_clear(ppat->dirty, 0, ppat->max_entries); + + I915_WRITE(GEN8_PRIVATE_PAT_LO, lower_32_bits(pat)); + I915_WRITE(GEN8_PRIVATE_PAT_HI, upper_32_bits(pat)); +} + +static unsigned int bdw_private_pat_match(u8 src, u8 dst) +{ + unsigned int score = 0; + enum { + AGE_MATCH = BIT(0), + TC_MATCH = BIT(1), + CA_MATCH = BIT(2), + }; + + /* Cache attribute has to be matched. */ + if (GEN8_PPAT_GET_CA(src) != GEN8_PPAT_GET_CA(dst)) + return 0; + + score |= CA_MATCH; + + if (GEN8_PPAT_GET_TC(src) == GEN8_PPAT_GET_TC(dst)) + score |= TC_MATCH; + + if (GEN8_PPAT_GET_AGE(src) == GEN8_PPAT_GET_AGE(dst)) + score |= AGE_MATCH; + + if (score == (AGE_MATCH | TC_MATCH | CA_MATCH)) + return INTEL_PPAT_PERFECT_MATCH; + + return score; +} + +static unsigned int chv_private_pat_match(u8 src, u8 dst) { + return (CHV_PPAT_GET_SNOOP(src) == CHV_PPAT_GET_SNOOP(dst)) ? + INTEL_PPAT_PERFECT_MATCH : 0; +} + +static void cnl_setup_private_ppat(struct intel_ppat *ppat) +{ + ppat->max_entries = 8; + ppat->update_hw = cnl_private_pat_update_hw; + ppat->match = bdw_private_pat_match; + ppat->clear_value = GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(3); + /* XXX: spec is unclear if this is still needed for CNL+ */ - if (!USES_PPGTT(dev_priv)) { - I915_WRITE(GEN10_PAT_INDEX(0), GEN8_PPAT_UC); + if (!USES_PPGTT(ppat->i915)) { + __alloc_ppat_entry(ppat, 0, GEN8_PPAT_UC); return; } - I915_WRITE(GEN10_PAT_INDEX(0), GEN8_PPAT_WB | GEN8_PPAT_LLC); - I915_WRITE(GEN10_PAT_INDEX(1), GEN8_PPAT_WC | GEN8_PPAT_LLCELLC); - I915_WRITE(GEN10_PAT_INDEX(2), GEN8_PPAT_WT | GEN8_PPAT_LLCELLC); - I915_WRITE(GEN10_PAT_INDEX(3), GEN8_PPAT_UC); - I915_WRITE(GEN10_PAT_INDEX(4), GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(0)); - I915_WRITE(GEN10_PAT_INDEX(5), GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(1)); - I915_WRITE(GEN10_PAT_INDEX(6), GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(2)); - I915_WRITE(GEN10_PAT_INDEX(7), GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(3)); + __alloc_ppat_entry(ppat, 0, GEN8_PPAT_WB | GEN8_PPAT_LLC); + __alloc_ppat_entry(ppat, 1, GEN8_PPAT_WC | GEN8_PPAT_LLCELLC); + __alloc_ppat_entry(ppat, 2, GEN8_PPAT_WT | GEN8_PPAT_LLCELLC); + __alloc_ppat_entry(ppat, 3, GEN8_PPAT_UC); + __alloc_ppat_entry(ppat, 4, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(0)); + __alloc_ppat_entry(ppat, 5, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(1)); + __alloc_ppat_entry(ppat, 6, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(2)); + __alloc_ppat_entry(ppat, 7, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(3)); } /* The GGTT and PPGTT need a private PPAT setup in order to handle cacheability * bits. When using advanced contexts each context stores its own PAT, but * writing this data shouldn't be harmful even in those cases. */ -static void bdw_setup_private_ppat(struct drm_i915_private *dev_priv) +static void bdw_setup_private_ppat(struct intel_ppat *ppat) { - u64 pat; + ppat->max_entries = 8; + ppat->update_hw = bdw_private_pat_update_hw; + ppat->match = bdw_private_pat_match; + ppat->clear_value = GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(3); - pat = GEN8_PPAT(0, GEN8_PPAT_WB | GEN8_PPAT_LLC) | /* for normal objects, no eLLC */ - GEN8_PPAT(1, GEN8_PPAT_WC | GEN8_PPAT_LLCELLC) | /* for something pointing to ptes? */ - GEN8_PPAT(2, GEN8_PPAT_WT | GEN8_PPAT_LLCELLC) | /* for scanout with eLLC */ - GEN8_PPAT(3, GEN8_PPAT_UC) | /* Uncached objects, mostly for scanout */ - GEN8_PPAT(4, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(0)) | - GEN8_PPAT(5, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(1)) | - GEN8_PPAT(6, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(2)) | - GEN8_PPAT(7, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(3)); - - if (!USES_PPGTT(dev_priv)) + if (!USES_PPGTT(ppat->i915)) { /* Spec: "For GGTT, there is NO pat_sel[2:0] from the entry, * so RTL will always use the value corresponding to * pat_sel = 000". @@ -2790,17 +3033,26 @@ static void bdw_setup_private_ppat(struct drm_i915_private *dev_priv) * So we can still hold onto all our assumptions wrt cpu * clflushing on LLC machines. */ - pat = GEN8_PPAT(0, GEN8_PPAT_UC); + __alloc_ppat_entry(ppat, 0, GEN8_PPAT_UC); + return; + } - /* XXX: spec defines this as 2 distinct registers. It's unclear if a 64b - * write would work. */ - I915_WRITE(GEN8_PRIVATE_PAT_LO, pat); - I915_WRITE(GEN8_PRIVATE_PAT_HI, pat >> 32); + __alloc_ppat_entry(ppat, 0, GEN8_PPAT_WB | GEN8_PPAT_LLC); /* for normal objects, no eLLC */ + __alloc_ppat_entry(ppat, 1, GEN8_PPAT_WC | GEN8_PPAT_LLCELLC); /* for something pointing to ptes? */ + __alloc_ppat_entry(ppat, 2, GEN8_PPAT_WT | GEN8_PPAT_LLCELLC); /* for scanout with eLLC */ + __alloc_ppat_entry(ppat, 3, GEN8_PPAT_UC); /* Uncached objects, mostly for scanout */ + __alloc_ppat_entry(ppat, 4, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(0)); + __alloc_ppat_entry(ppat, 5, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(1)); + __alloc_ppat_entry(ppat, 6, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(2)); + __alloc_ppat_entry(ppat, 7, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(3)); } -static void chv_setup_private_ppat(struct drm_i915_private *dev_priv) +static void chv_setup_private_ppat(struct intel_ppat *ppat) { - u64 pat; + ppat->max_entries = 8; + ppat->update_hw = bdw_private_pat_update_hw; + ppat->match = chv_private_pat_match; + ppat->clear_value = CHV_PPAT_SNOOP; /* * Map WB on BDW to snooped on CHV. @@ -2820,17 +3072,15 @@ static void chv_setup_private_ppat(struct drm_i915_private *dev_priv) * Which means we must set the snoop bit in PAT entry 0 * in order to keep the global status page working. */ - pat = GEN8_PPAT(0, CHV_PPAT_SNOOP) | - GEN8_PPAT(1, 0) | - GEN8_PPAT(2, 0) | - GEN8_PPAT(3, 0) | - GEN8_PPAT(4, CHV_PPAT_SNOOP) | - GEN8_PPAT(5, CHV_PPAT_SNOOP) | - GEN8_PPAT(6, CHV_PPAT_SNOOP) | - GEN8_PPAT(7, CHV_PPAT_SNOOP); - I915_WRITE(GEN8_PRIVATE_PAT_LO, pat); - I915_WRITE(GEN8_PRIVATE_PAT_HI, pat >> 32); + __alloc_ppat_entry(ppat, 0, CHV_PPAT_SNOOP); + __alloc_ppat_entry(ppat, 1, 0); + __alloc_ppat_entry(ppat, 2, 0); + __alloc_ppat_entry(ppat, 3, 0); + __alloc_ppat_entry(ppat, 4, CHV_PPAT_SNOOP); + __alloc_ppat_entry(ppat, 5, CHV_PPAT_SNOOP); + __alloc_ppat_entry(ppat, 6, CHV_PPAT_SNOOP); + __alloc_ppat_entry(ppat, 7, CHV_PPAT_SNOOP); } static void gen6_gmch_remove(struct i915_address_space *vm) @@ -2841,6 +3091,31 @@ static void gen6_gmch_remove(struct i915_address_space *vm) cleanup_scratch_page(vm); } +static void setup_private_pat(struct drm_i915_private *dev_priv) +{ + struct intel_ppat *ppat = &dev_priv->ppat; + int i; + + ppat->i915 = dev_priv; + + if (INTEL_GEN(dev_priv) >= 10) + cnl_setup_private_ppat(ppat); + else if (IS_CHERRYVIEW(dev_priv) || IS_GEN9_LP(dev_priv)) + chv_setup_private_ppat(ppat); + else + bdw_setup_private_ppat(ppat); + + GEM_BUG_ON(ppat->max_entries > INTEL_MAX_PPAT_ENTRIES); + + for_each_clear_bit(i, ppat->used, ppat->max_entries) { + ppat->entries[i].value = ppat->clear_value; + ppat->entries[i].ppat = ppat; + set_bit(i, ppat->dirty); + } + + ppat->update_hw(dev_priv); +} + static int gen8_gmch_probe(struct i915_ggtt *ggtt) { struct drm_i915_private *dev_priv = ggtt->base.i915; @@ -2873,14 +3148,6 @@ static int gen8_gmch_probe(struct i915_ggtt *ggtt) } ggtt->base.total = (size / sizeof(gen8_pte_t)) << PAGE_SHIFT; - - if (INTEL_GEN(dev_priv) >= 10) - cnl_setup_private_ppat(dev_priv); - else if (IS_CHERRYVIEW(dev_priv) || IS_GEN9_LP(dev_priv)) - chv_setup_private_ppat(dev_priv); - else - bdw_setup_private_ppat(dev_priv); - ggtt->base.cleanup = gen6_gmch_remove; ggtt->base.bind_vma = ggtt_bind_vma; ggtt->base.unbind_vma = ggtt_unbind_vma; @@ -2901,6 +3168,8 @@ static int gen8_gmch_probe(struct i915_ggtt *ggtt) ggtt->invalidate = gen6_ggtt_invalidate; + setup_private_pat(dev_priv); + return ggtt_probe_common(ggtt, size); } @@ -3021,7 +3290,7 @@ int i915_ggtt_probe_hw(struct drm_i915_private *dev_priv) * currently don't have any bits spare to pass in this upper * restriction! */ - if (HAS_GUC(dev_priv) && i915.enable_guc_loading) { + if (HAS_GUC(dev_priv) && i915_modparams.enable_guc_loading) { ggtt->base.total = min_t(u64, ggtt->base.total, GUC_GGTT_TOP); ggtt->mappable_end = min(ggtt->mappable_end, ggtt->base.total); } @@ -3158,13 +3427,10 @@ void i915_gem_restore_gtt_mappings(struct drm_i915_private *dev_priv) ggtt->base.closed = false; if (INTEL_GEN(dev_priv) >= 8) { - if (INTEL_GEN(dev_priv) >= 10) - cnl_setup_private_ppat(dev_priv); - else if (IS_CHERRYVIEW(dev_priv) || IS_GEN9_LP(dev_priv)) - chv_setup_private_ppat(dev_priv); - else - bdw_setup_private_ppat(dev_priv); + struct intel_ppat *ppat = &dev_priv->ppat; + bitmap_set(ppat->dirty, 0, ppat->max_entries); + dev_priv->ppat.update_hw(dev_priv); return; } diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.h b/drivers/gpu/drm/i915/i915_gem_gtt.h index b4e3aa7c0ce1..f62fb903dc24 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.h +++ b/drivers/gpu/drm/i915/i915_gem_gtt.h @@ -126,13 +126,13 @@ typedef u64 gen8_ppgtt_pml4e_t; * tables */ #define GEN8_PDPE_MASK 0x1ff -#define PPAT_UNCACHED_INDEX (_PAGE_PWT | _PAGE_PCD) -#define PPAT_CACHED_PDE_INDEX 0 /* WB LLC */ -#define PPAT_CACHED_INDEX _PAGE_PAT /* WB LLCeLLC */ -#define PPAT_DISPLAY_ELLC_INDEX _PAGE_PCD /* WT eLLC */ +#define PPAT_UNCACHED (_PAGE_PWT | _PAGE_PCD) +#define PPAT_CACHED_PDE 0 /* WB LLC */ +#define PPAT_CACHED _PAGE_PAT /* WB LLCeLLC */ +#define PPAT_DISPLAY_ELLC _PAGE_PCD /* WT eLLC */ #define CHV_PPAT_SNOOP (1<<6) -#define GEN8_PPAT_AGE(x) (x<<4) +#define GEN8_PPAT_AGE(x) ((x)<<4) #define GEN8_PPAT_LLCeLLC (3<<2) #define GEN8_PPAT_LLCELLC (2<<2) #define GEN8_PPAT_LLC (1<<2) @@ -143,6 +143,11 @@ typedef u64 gen8_ppgtt_pml4e_t; #define GEN8_PPAT_ELLC_OVERRIDE (0<<2) #define GEN8_PPAT(i, x) ((u64)(x) << ((i) * 8)) +#define GEN8_PPAT_GET_CA(x) ((x) & 3) +#define GEN8_PPAT_GET_TC(x) ((x) & (3 << 2)) +#define GEN8_PPAT_GET_AGE(x) ((x) & (3 << 4)) +#define CHV_PPAT_GET_SNOOP(x) ((x) & (1 << 6)) + struct sg_table; struct intel_rotation_info { @@ -536,6 +541,37 @@ i915_vm_to_ggtt(struct i915_address_space *vm) return container_of(vm, struct i915_ggtt, base); } +#define INTEL_MAX_PPAT_ENTRIES 8 +#define INTEL_PPAT_PERFECT_MATCH (~0U) + +struct intel_ppat; + +struct intel_ppat_entry { + struct intel_ppat *ppat; + struct kref ref; + u8 value; +}; + +struct intel_ppat { + struct intel_ppat_entry entries[INTEL_MAX_PPAT_ENTRIES]; + DECLARE_BITMAP(used, INTEL_MAX_PPAT_ENTRIES); + DECLARE_BITMAP(dirty, INTEL_MAX_PPAT_ENTRIES); + unsigned int max_entries; + u8 clear_value; + /* + * Return a score to show how two PPAT values match, + * a INTEL_PPAT_PERFECT_MATCH indicates a perfect match + */ + unsigned int (*match)(u8 src, u8 dst); + void (*update_hw)(struct drm_i915_private *i915); + + struct drm_i915_private *i915; +}; + +const struct intel_ppat_entry * +intel_ppat_get(struct drm_i915_private *i915, u8 value); +void intel_ppat_put(const struct intel_ppat_entry *entry); + int i915_gem_init_aliasing_ppgtt(struct drm_i915_private *i915); void i915_gem_fini_aliasing_ppgtt(struct drm_i915_private *i915); diff --git a/drivers/gpu/drm/i915/i915_gem_request.c b/drivers/gpu/drm/i915/i915_gem_request.c index 813a3b546d6e..4eb1a76731b2 100644 --- a/drivers/gpu/drm/i915/i915_gem_request.c +++ b/drivers/gpu/drm/i915/i915_gem_request.c @@ -1021,12 +1021,28 @@ static bool busywait_stop(unsigned long timeout, unsigned int cpu) return this_cpu != cpu; } -bool __i915_spin_request(const struct drm_i915_gem_request *req, - u32 seqno, int state, unsigned long timeout_us) +static bool __i915_spin_request(const struct drm_i915_gem_request *req, + u32 seqno, int state, unsigned long timeout_us) { struct intel_engine_cs *engine = req->engine; unsigned int irq, cpu; + GEM_BUG_ON(!seqno); + + /* + * Only wait for the request if we know it is likely to complete. + * + * We don't track the timestamps around requests, nor the average + * request length, so we do not have a good indicator that this + * request will complete within the timeout. What we do know is the + * order in which requests are executed by the engine and so we can + * tell if the request has started. If the request hasn't started yet, + * it is a fair assumption that it will not complete within our + * relatively short timeout. + */ + if (!i915_seqno_passed(intel_engine_get_seqno(engine), seqno - 1)) + return false; + /* When waiting for high frequency requests, e.g. during synchronous * rendering split between the CPU and GPU, the finite amount of time * required to set up the irq and wait upon it limits the response @@ -1040,12 +1056,8 @@ bool __i915_spin_request(const struct drm_i915_gem_request *req, irq = atomic_read(&engine->irq_count); timeout_us += local_clock_us(&cpu); do { - if (seqno != i915_gem_request_global_seqno(req)) - break; - - if (i915_seqno_passed(intel_engine_get_seqno(req->engine), - seqno)) - return true; + if (i915_seqno_passed(intel_engine_get_seqno(engine), seqno)) + return seqno == i915_gem_request_global_seqno(req); /* Seqno are meant to be ordered *before* the interrupt. If * we see an interrupt without a corresponding seqno advance, @@ -1156,7 +1168,7 @@ restart: GEM_BUG_ON(!i915_sw_fence_signaled(&req->submit)); /* Optimistic short spin before touching IRQs */ - if (i915_spin_request(req, state, 5)) + if (__i915_spin_request(req, wait.seqno, state, 5)) goto complete; set_current_state(state); @@ -1213,7 +1225,7 @@ wakeup: continue; /* Only spin if we know the GPU is processing this request */ - if (i915_spin_request(req, state, 2)) + if (__i915_spin_request(req, wait.seqno, state, 2)) break; if (!intel_wait_check_request(&wait, req)) { diff --git a/drivers/gpu/drm/i915/i915_gem_request.h b/drivers/gpu/drm/i915/i915_gem_request.h index 49a4c8994ff0..96eb52471dad 100644 --- a/drivers/gpu/drm/i915/i915_gem_request.h +++ b/drivers/gpu/drm/i915/i915_gem_request.h @@ -313,26 +313,6 @@ static inline bool i915_seqno_passed(u32 seq1, u32 seq2) } static inline bool -__i915_gem_request_started(const struct drm_i915_gem_request *req, u32 seqno) -{ - GEM_BUG_ON(!seqno); - return i915_seqno_passed(intel_engine_get_seqno(req->engine), - seqno - 1); -} - -static inline bool -i915_gem_request_started(const struct drm_i915_gem_request *req) -{ - u32 seqno; - - seqno = i915_gem_request_global_seqno(req); - if (!seqno) - return false; - - return __i915_gem_request_started(req, seqno); -} - -static inline bool __i915_gem_request_completed(const struct drm_i915_gem_request *req, u32 seqno) { GEM_BUG_ON(!seqno); @@ -352,21 +332,6 @@ i915_gem_request_completed(const struct drm_i915_gem_request *req) return __i915_gem_request_completed(req, seqno); } -bool __i915_spin_request(const struct drm_i915_gem_request *request, - u32 seqno, int state, unsigned long timeout_us); -static inline bool i915_spin_request(const struct drm_i915_gem_request *request, - int state, unsigned long timeout_us) -{ - u32 seqno; - - seqno = i915_gem_request_global_seqno(request); - if (!seqno) - return 0; - - return (__i915_gem_request_started(request, seqno) && - __i915_spin_request(request, seqno, state, timeout_us)); -} - /* We treat requests as fences. This is not be to confused with our * "fence registers" but pipeline synchronisation objects ala GL_ARB_sync. * We use the fences to synchronize access from the CPU with activity on the diff --git a/drivers/gpu/drm/i915/i915_gem_userptr.c b/drivers/gpu/drm/i915/i915_gem_userptr.c index 709efe2357ea..2d4996de7331 100644 --- a/drivers/gpu/drm/i915/i915_gem_userptr.c +++ b/drivers/gpu/drm/i915/i915_gem_userptr.c @@ -399,64 +399,42 @@ struct get_pages_work { struct task_struct *task; }; -#if IS_ENABLED(CONFIG_SWIOTLB) -#define swiotlb_active() swiotlb_nr_tbl() -#else -#define swiotlb_active() 0 -#endif - -static int -st_set_pages(struct sg_table **st, struct page **pvec, int num_pages) -{ - struct scatterlist *sg; - int ret, n; - - *st = kmalloc(sizeof(**st), GFP_KERNEL); - if (*st == NULL) - return -ENOMEM; - - if (swiotlb_active()) { - ret = sg_alloc_table(*st, num_pages, GFP_KERNEL); - if (ret) - goto err; - - for_each_sg((*st)->sgl, sg, num_pages, n) - sg_set_page(sg, pvec[n], PAGE_SIZE, 0); - } else { - ret = sg_alloc_table_from_pages(*st, pvec, num_pages, - 0, num_pages << PAGE_SHIFT, - GFP_KERNEL); - if (ret) - goto err; - } - - return 0; - -err: - kfree(*st); - *st = NULL; - return ret; -} - static struct sg_table * -__i915_gem_userptr_set_pages(struct drm_i915_gem_object *obj, - struct page **pvec, int num_pages) +__i915_gem_userptr_alloc_pages(struct drm_i915_gem_object *obj, + struct page **pvec, int num_pages) { - struct sg_table *pages; + unsigned int max_segment = i915_sg_segment_size(); + struct sg_table *st; int ret; - ret = st_set_pages(&pages, pvec, num_pages); - if (ret) + st = kmalloc(sizeof(*st), GFP_KERNEL); + if (!st) + return ERR_PTR(-ENOMEM); + +alloc_table: + ret = __sg_alloc_table_from_pages(st, pvec, num_pages, + 0, num_pages << PAGE_SHIFT, + max_segment, + GFP_KERNEL); + if (ret) { + kfree(st); return ERR_PTR(ret); + } - ret = i915_gem_gtt_prepare_pages(obj, pages); + ret = i915_gem_gtt_prepare_pages(obj, st); if (ret) { - sg_free_table(pages); - kfree(pages); + sg_free_table(st); + + if (max_segment > PAGE_SIZE) { + max_segment = PAGE_SIZE; + goto alloc_table; + } + + kfree(st); return ERR_PTR(ret); } - return pages; + return st; } static int @@ -540,7 +518,8 @@ __i915_gem_userptr_get_pages_worker(struct work_struct *_work) struct sg_table *pages = ERR_PTR(ret); if (pinned == npages) { - pages = __i915_gem_userptr_set_pages(obj, pvec, npages); + pages = __i915_gem_userptr_alloc_pages(obj, pvec, + npages); if (!IS_ERR(pages)) { __i915_gem_object_set_pages(obj, pages); pinned = 0; @@ -661,7 +640,7 @@ i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj) pages = __i915_gem_userptr_get_pages_schedule(obj); active = pages == ERR_PTR(-EAGAIN); } else { - pages = __i915_gem_userptr_set_pages(obj, pvec, num_pages); + pages = __i915_gem_userptr_alloc_pages(obj, pvec, num_pages); active = !IS_ERR(pages); } if (active) @@ -834,7 +813,9 @@ int i915_gem_init_userptr(struct drm_i915_private *dev_priv) hash_init(dev_priv->mm_structs); dev_priv->mm.userptr_wq = - alloc_workqueue("i915-userptr-acquire", WQ_HIGHPRI, 0); + alloc_workqueue("i915-userptr-acquire", + WQ_HIGHPRI | WQ_MEM_RECLAIM, + 0); if (!dev_priv->mm.userptr_wq) return -ENOMEM; diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c index 0c779671fe2d..c14552ab270b 100644 --- a/drivers/gpu/drm/i915/i915_gpu_error.c +++ b/drivers/gpu/drm/i915/i915_gpu_error.c @@ -396,6 +396,8 @@ static void error_print_context(struct drm_i915_error_state_buf *m, static void error_print_engine(struct drm_i915_error_state_buf *m, const struct drm_i915_error_engine *ee) { + int n; + err_printf(m, "%s command stream:\n", engine_str(ee->engine_id)); err_printf(m, " START: 0x%08x\n", ee->start); err_printf(m, " HEAD: 0x%08x [0x%08x]\n", ee->head, ee->rq_head); @@ -465,8 +467,11 @@ static void error_print_engine(struct drm_i915_error_state_buf *m, jiffies_to_msecs(jiffies - ee->hangcheck_timestamp)); err_printf(m, " engine reset count: %u\n", ee->reset_count); - error_print_request(m, " ELSP[0]: ", &ee->execlist[0]); - error_print_request(m, " ELSP[1]: ", &ee->execlist[1]); + for (n = 0; n < ee->num_ports; n++) { + err_printf(m, " ELSP[%d]:", n); + error_print_request(m, " ", &ee->execlist[n]); + } + error_print_context(m, " Active context: ", &ee->context); } @@ -567,7 +572,7 @@ static __always_inline void err_print_param(struct drm_i915_error_state_buf *m, static void err_print_params(struct drm_i915_error_state_buf *m, const struct i915_params *p) { -#define PRINT(T, x) err_print_param(m, #x, #T, &p->x); +#define PRINT(T, x, ...) err_print_param(m, #x, #T, &p->x); I915_PARAMS_FOR_EACH(PRINT); #undef PRINT } @@ -861,7 +866,7 @@ void __i915_gpu_state_free(struct kref *error_ref) kfree(error->overlay); kfree(error->display); -#define FREE(T, x) free_param(#T, &error->params.x); +#define FREE(T, x, ...) free_param(#T, &error->params.x); I915_PARAMS_FOR_EACH(FREE); #undef FREE @@ -1327,17 +1332,19 @@ static void engine_record_requests(struct intel_engine_cs *engine, static void error_record_engine_execlists(struct intel_engine_cs *engine, struct drm_i915_error_engine *ee) { - const struct execlist_port *port = engine->execlist_port; + const struct intel_engine_execlists * const execlists = &engine->execlists; unsigned int n; - for (n = 0; n < ARRAY_SIZE(engine->execlist_port); n++) { - struct drm_i915_gem_request *rq = port_request(&port[n]); + for (n = 0; n < execlists_num_ports(execlists); n++) { + struct drm_i915_gem_request *rq = port_request(&execlists->port[n]); if (!rq) break; record_request(rq, &ee->execlist[n]); } + + ee->num_ports = n; } static void record_context(struct drm_i915_error_context *e, @@ -1554,7 +1561,7 @@ static void i915_gem_capture_guc_log_buffer(struct drm_i915_private *dev_priv, struct i915_gpu_state *error) { /* Capturing log buf contents won't be useful if logging was disabled */ - if (!dev_priv->guc.log.vma || (i915.guc_log_level < 0)) + if (!dev_priv->guc.log.vma || (i915_modparams.guc_log_level < 0)) return; error->guc_log = i915_error_object_create(dev_priv, @@ -1696,8 +1703,8 @@ static int capture(void *data) ktime_to_timeval(ktime_sub(ktime_get(), error->i915->gt.last_init_time)); - error->params = i915; -#define DUP(T, x) dup_param(#T, &error->params.x); + error->params = i915_modparams; +#define DUP(T, x, ...) dup_param(#T, &error->params.x); I915_PARAMS_FOR_EACH(DUP); #undef DUP @@ -1751,7 +1758,7 @@ void i915_capture_error_state(struct drm_i915_private *dev_priv, struct i915_gpu_state *error; unsigned long flags; - if (!i915.error_capture) + if (!i915_modparams.error_capture) return; if (READ_ONCE(dev_priv->gpu_error.first_error)) diff --git a/drivers/gpu/drm/i915/i915_guc_submission.c b/drivers/gpu/drm/i915/i915_guc_submission.c index 48a1e9349a2c..04f1281d81a5 100644 --- a/drivers/gpu/drm/i915/i915_guc_submission.c +++ b/drivers/gpu/drm/i915/i915_guc_submission.c @@ -192,13 +192,12 @@ static int __create_doorbell(struct i915_guc_client *client) doorbell = __get_doorbell(client); doorbell->db_status = GUC_DOORBELL_ENABLED; - doorbell->cookie = client->doorbell_cookie; + doorbell->cookie = 0; err = __guc_allocate_doorbell(client->guc, client->stage_id); - if (err) { + if (err) doorbell->db_status = GUC_DOORBELL_DISABLED; - doorbell->cookie = 0; - } + return err; } @@ -306,7 +305,7 @@ static void guc_proc_desc_init(struct intel_guc *guc, desc->db_base_addr = 0; desc->stage_id = client->stage_id; - desc->wq_size_bytes = client->wq_size; + desc->wq_size_bytes = GUC_WQ_SIZE; desc->wq_status = WQ_STATUS_ACTIVE; desc->priority = client->priority; } @@ -391,8 +390,8 @@ static void guc_stage_desc_init(struct intel_guc *guc, desc->db_trigger_cpu = (uintptr_t)__get_doorbell(client); desc->db_trigger_uk = gfx_addr + client->doorbell_offset; desc->process_desc = gfx_addr + client->proc_desc_offset; - desc->wq_addr = gfx_addr + client->wq_offset; - desc->wq_size = client->wq_size; + desc->wq_addr = gfx_addr + GUC_DB_SIZE; + desc->wq_size = GUC_WQ_SIZE; desc->desc_private = (uintptr_t)client; } @@ -406,82 +405,23 @@ static void guc_stage_desc_fini(struct intel_guc *guc, memset(desc, 0, sizeof(*desc)); } -/** - * i915_guc_wq_reserve() - reserve space in the GuC's workqueue - * @request: request associated with the commands - * - * Return: 0 if space is available - * -EAGAIN if space is not currently available - * - * This function must be called (and must return 0) before a request - * is submitted to the GuC via i915_guc_submit() below. Once a result - * of 0 has been returned, it must be balanced by a corresponding - * call to submit(). - * - * Reservation allows the caller to determine in advance that space - * will be available for the next submission before committing resources - * to it, and helps avoid late failures with complicated recovery paths. - */ -int i915_guc_wq_reserve(struct drm_i915_gem_request *request) -{ - const size_t wqi_size = sizeof(struct guc_wq_item); - struct i915_guc_client *client = request->i915->guc.execbuf_client; - struct guc_process_desc *desc = __get_process_desc(client); - u32 freespace; - int ret; - - spin_lock_irq(&client->wq_lock); - freespace = CIRC_SPACE(client->wq_tail, desc->head, client->wq_size); - freespace -= client->wq_rsvd; - if (likely(freespace >= wqi_size)) { - client->wq_rsvd += wqi_size; - ret = 0; - } else { - client->no_wq_space++; - ret = -EAGAIN; - } - spin_unlock_irq(&client->wq_lock); - - return ret; -} - -static void guc_client_update_wq_rsvd(struct i915_guc_client *client, int size) -{ - unsigned long flags; - - spin_lock_irqsave(&client->wq_lock, flags); - client->wq_rsvd += size; - spin_unlock_irqrestore(&client->wq_lock, flags); -} - -void i915_guc_wq_unreserve(struct drm_i915_gem_request *request) -{ - const int wqi_size = sizeof(struct guc_wq_item); - struct i915_guc_client *client = request->i915->guc.execbuf_client; - - GEM_BUG_ON(READ_ONCE(client->wq_rsvd) < wqi_size); - guc_client_update_wq_rsvd(client, -wqi_size); -} - /* Construct a Work Item and append it to the GuC's Work Queue */ static void guc_wq_item_append(struct i915_guc_client *client, struct drm_i915_gem_request *rq) { /* wqi_len is in DWords, and does not include the one-word header */ const size_t wqi_size = sizeof(struct guc_wq_item); - const u32 wqi_len = wqi_size/sizeof(u32) - 1; + const u32 wqi_len = wqi_size / sizeof(u32) - 1; struct intel_engine_cs *engine = rq->engine; + struct i915_gem_context *ctx = rq->ctx; struct guc_process_desc *desc = __get_process_desc(client); struct guc_wq_item *wqi; - u32 freespace, tail, wq_off; + u32 ring_tail, wq_off; - /* Free space is guaranteed, see i915_guc_wq_reserve() above */ - freespace = CIRC_SPACE(client->wq_tail, desc->head, client->wq_size); - GEM_BUG_ON(freespace < wqi_size); + lockdep_assert_held(&client->wq_lock); - /* The GuC firmware wants the tail index in QWords, not bytes */ - tail = intel_ring_set_tail(rq->ring, rq->tail) >> 3; - GEM_BUG_ON(tail > WQ_RING_TAIL_MAX); + ring_tail = intel_ring_set_tail(rq->ring, rq->tail) / sizeof(u64); + GEM_BUG_ON(ring_tail > WQ_RING_TAIL_MAX); /* For now workqueue item is 4 DWs; workqueue buffer is 2 pages. So we * should not have the case where structure wqi is across page, neither @@ -491,29 +431,29 @@ static void guc_wq_item_append(struct i915_guc_client *client, * workqueue buffer dw by dw. */ BUILD_BUG_ON(wqi_size != 16); - GEM_BUG_ON(client->wq_rsvd < wqi_size); - /* postincrement WQ tail for next time */ - wq_off = client->wq_tail; + /* Free space is guaranteed. */ + wq_off = READ_ONCE(desc->tail); + GEM_BUG_ON(CIRC_SPACE(wq_off, READ_ONCE(desc->head), + GUC_WQ_SIZE) < wqi_size); GEM_BUG_ON(wq_off & (wqi_size - 1)); - client->wq_tail += wqi_size; - client->wq_tail &= client->wq_size - 1; - client->wq_rsvd -= wqi_size; /* WQ starts from the page after doorbell / process_desc */ wqi = client->vaddr + wq_off + GUC_DB_SIZE; /* Now fill in the 4-word work queue item */ wqi->header = WQ_TYPE_INORDER | - (wqi_len << WQ_LEN_SHIFT) | - (engine->guc_id << WQ_TARGET_SHIFT) | - WQ_NO_WCFLUSH_WAIT; + (wqi_len << WQ_LEN_SHIFT) | + (engine->guc_id << WQ_TARGET_SHIFT) | + WQ_NO_WCFLUSH_WAIT; - /* The GuC wants only the low-order word of the context descriptor */ - wqi->context_desc = (u32)intel_lr_context_descriptor(rq->ctx, engine); + wqi->context_desc = lower_32_bits(intel_lr_context_descriptor(ctx, engine)); - wqi->submit_element_info = tail << WQ_RING_TAIL_SHIFT; + wqi->submit_element_info = ring_tail << WQ_RING_TAIL_SHIFT; wqi->fence_id = rq->global_seqno; + + /* Postincrement WQ tail for next time. */ + WRITE_ONCE(desc->tail, (wq_off + wqi_size) & (GUC_WQ_SIZE - 1)); } static void guc_reset_wq(struct i915_guc_client *client) @@ -522,106 +462,64 @@ static void guc_reset_wq(struct i915_guc_client *client) desc->head = 0; desc->tail = 0; - - client->wq_tail = 0; } -static int guc_ring_doorbell(struct i915_guc_client *client) +static void guc_ring_doorbell(struct i915_guc_client *client) { - struct guc_process_desc *desc = __get_process_desc(client); - union guc_doorbell_qw db_cmp, db_exc, db_ret; - union guc_doorbell_qw *db; - int attempt = 2, ret = -EAGAIN; - - /* Update the tail so it is visible to GuC */ - desc->tail = client->wq_tail; - - /* current cookie */ - db_cmp.db_status = GUC_DOORBELL_ENABLED; - db_cmp.cookie = client->doorbell_cookie; + struct guc_doorbell_info *db; + u32 cookie; - /* cookie to be updated */ - db_exc.db_status = GUC_DOORBELL_ENABLED; - db_exc.cookie = client->doorbell_cookie + 1; - if (db_exc.cookie == 0) - db_exc.cookie = 1; + lockdep_assert_held(&client->wq_lock); /* pointer of current doorbell cacheline */ - db = (union guc_doorbell_qw *)__get_doorbell(client); - - while (attempt--) { - /* lets ring the doorbell */ - db_ret.value_qw = atomic64_cmpxchg((atomic64_t *)db, - db_cmp.value_qw, db_exc.value_qw); - - /* if the exchange was successfully executed */ - if (db_ret.value_qw == db_cmp.value_qw) { - /* db was successfully rung */ - client->doorbell_cookie = db_exc.cookie; - ret = 0; - break; - } - - /* XXX: doorbell was lost and need to acquire it again */ - if (db_ret.db_status == GUC_DOORBELL_DISABLED) - break; + db = __get_doorbell(client); - DRM_WARN("Cookie mismatch. Expected %d, found %d\n", - db_cmp.cookie, db_ret.cookie); - - /* update the cookie to newly read cookie from GuC */ - db_cmp.cookie = db_ret.cookie; - db_exc.cookie = db_ret.cookie + 1; - if (db_exc.cookie == 0) - db_exc.cookie = 1; - } + /* we're not expecting the doorbell cookie to change behind our back */ + cookie = READ_ONCE(db->cookie); + WARN_ON_ONCE(xchg(&db->cookie, cookie + 1) != cookie); - return ret; + /* XXX: doorbell was lost and need to acquire it again */ + GEM_BUG_ON(db->db_status != GUC_DOORBELL_ENABLED); } /** - * __i915_guc_submit() - Submit commands through GuC - * @rq: request associated with the commands - * - * The caller must have already called i915_guc_wq_reserve() above with - * a result of 0 (success), guaranteeing that there is space in the work - * queue for the new request, so enqueuing the item cannot fail. - * - * Bad Things Will Happen if the caller violates this protocol e.g. calls - * submit() when _reserve() says there's no space, or calls _submit() - * a different number of times from (successful) calls to _reserve(). + * i915_guc_submit() - Submit commands through GuC + * @engine: engine associated with the commands * * The only error here arises if the doorbell hardware isn't functioning * as expected, which really shouln't happen. */ -static void __i915_guc_submit(struct drm_i915_gem_request *rq) +static void i915_guc_submit(struct intel_engine_cs *engine) { - struct drm_i915_private *dev_priv = rq->i915; - struct intel_engine_cs *engine = rq->engine; - unsigned int engine_id = engine->id; - struct intel_guc *guc = &rq->i915->guc; + struct drm_i915_private *dev_priv = engine->i915; + struct intel_guc *guc = &dev_priv->guc; struct i915_guc_client *client = guc->execbuf_client; - unsigned long flags; - int b_ret; + struct intel_engine_execlists * const execlists = &engine->execlists; + struct execlist_port *port = execlists->port; + const unsigned int engine_id = engine->id; + unsigned int n; - /* WA to flush out the pending GMADR writes to ring buffer. */ - if (i915_vma_is_map_and_fenceable(rq->ring->vma)) - POSTING_READ_FW(GUC_STATUS); + for (n = 0; n < ARRAY_SIZE(execlists->port); n++) { + struct drm_i915_gem_request *rq; + unsigned int count; - spin_lock_irqsave(&client->wq_lock, flags); + rq = port_unpack(&port[n], &count); + if (rq && count == 0) { + port_set(&port[n], port_pack(rq, ++count)); - guc_wq_item_append(client, rq); - b_ret = guc_ring_doorbell(client); + if (i915_vma_is_map_and_fenceable(rq->ring->vma)) + POSTING_READ_FW(GUC_STATUS); - client->submissions[engine_id] += 1; + spin_lock(&client->wq_lock); - spin_unlock_irqrestore(&client->wq_lock, flags); -} + guc_wq_item_append(client, rq); + guc_ring_doorbell(client); -static void i915_guc_submit(struct drm_i915_gem_request *rq) -{ - __i915_gem_request_submit(rq); - __i915_guc_submit(rq); + client->submissions[engine_id] += 1; + + spin_unlock(&client->wq_lock); + } + } } static void nested_enable_signaling(struct drm_i915_gem_request *rq) @@ -655,27 +553,33 @@ static void port_assign(struct execlist_port *port, if (port_isset(port)) i915_gem_request_put(port_request(port)); - port_set(port, i915_gem_request_get(rq)); + port_set(port, port_pack(i915_gem_request_get(rq), port_count(port))); nested_enable_signaling(rq); } -static bool i915_guc_dequeue(struct intel_engine_cs *engine) +static void i915_guc_dequeue(struct intel_engine_cs *engine) { - struct execlist_port *port = engine->execlist_port; - struct drm_i915_gem_request *last = port_request(port); - struct rb_node *rb; + struct intel_engine_execlists * const execlists = &engine->execlists; + struct execlist_port *port = execlists->port; + struct drm_i915_gem_request *last = NULL; + const struct execlist_port * const last_port = + &execlists->port[execlists->port_mask]; bool submit = false; + struct rb_node *rb; + + if (port_isset(port)) + port++; spin_lock_irq(&engine->timeline->lock); - rb = engine->execlist_first; - GEM_BUG_ON(rb_first(&engine->execlist_queue) != rb); + rb = execlists->first; + GEM_BUG_ON(rb_first(&execlists->queue) != rb); while (rb) { struct i915_priolist *p = rb_entry(rb, typeof(*p), node); struct drm_i915_gem_request *rq, *rn; list_for_each_entry_safe(rq, rn, &p->requests, priotree.link) { if (last && rq->ctx != last->ctx) { - if (port != engine->execlist_port) { + if (port == last_port) { __list_del_many(&p->requests, &rq->priotree.link); goto done; @@ -689,50 +593,48 @@ static bool i915_guc_dequeue(struct intel_engine_cs *engine) INIT_LIST_HEAD(&rq->priotree.link); rq->priotree.priority = INT_MAX; - i915_guc_submit(rq); - trace_i915_gem_request_in(rq, port_index(port, engine)); + __i915_gem_request_submit(rq); + trace_i915_gem_request_in(rq, port_index(port, execlists)); last = rq; submit = true; } rb = rb_next(rb); - rb_erase(&p->node, &engine->execlist_queue); + rb_erase(&p->node, &execlists->queue); INIT_LIST_HEAD(&p->requests); if (p->priority != I915_PRIORITY_NORMAL) kmem_cache_free(engine->i915->priorities, p); } done: - engine->execlist_first = rb; - if (submit) + execlists->first = rb; + if (submit) { port_assign(port, last); + i915_guc_submit(engine); + } spin_unlock_irq(&engine->timeline->lock); - - return submit; } static void i915_guc_irq_handler(unsigned long data) { - struct intel_engine_cs *engine = (struct intel_engine_cs *)data; - struct execlist_port *port = engine->execlist_port; + struct intel_engine_cs * const engine = (struct intel_engine_cs *)data; + struct intel_engine_execlists * const execlists = &engine->execlists; + struct execlist_port *port = execlists->port; + const struct execlist_port * const last_port = + &execlists->port[execlists->port_mask]; struct drm_i915_gem_request *rq; - bool submit; - do { - rq = port_request(&port[0]); - while (rq && i915_gem_request_completed(rq)) { - trace_i915_gem_request_out(rq); - i915_gem_request_put(rq); + rq = port_request(&port[0]); + while (rq && i915_gem_request_completed(rq)) { + trace_i915_gem_request_out(rq); + i915_gem_request_put(rq); - port[0] = port[1]; - memset(&port[1], 0, sizeof(port[1])); + execlists_port_complete(execlists, port); - rq = port_request(&port[0]); - } + rq = port_request(&port[0]); + } - submit = false; - if (!port_count(&port[1])) - submit = i915_guc_dequeue(engine); - } while (submit); + if (!port_isset(last_port)) + i915_guc_dequeue(engine); } /* @@ -913,8 +815,6 @@ guc_client_alloc(struct drm_i915_private *dev_priv, client->engines = engines; client->priority = priority; client->doorbell_id = GUC_DOORBELL_INVALID; - client->wq_offset = GUC_DB_SIZE; - client->wq_size = GUC_WQ_SIZE; spin_lock_init(&client->wq_lock); ret = ida_simple_get(&guc->stage_ids, 0, GUC_MAX_STAGE_DESCRIPTORS, @@ -996,28 +896,39 @@ static void guc_client_free(struct i915_guc_client *client) kfree(client); } +static void guc_policy_init(struct guc_policy *policy) +{ + policy->execution_quantum = POLICY_DEFAULT_EXECUTION_QUANTUM_US; + policy->preemption_time = POLICY_DEFAULT_PREEMPTION_TIME_US; + policy->fault_time = POLICY_DEFAULT_FAULT_TIME_US; + policy->policy_flags = 0; +} + static void guc_policies_init(struct guc_policies *policies) { struct guc_policy *policy; u32 p, i; - policies->dpc_promote_time = 500000; + policies->dpc_promote_time = POLICY_DEFAULT_DPC_PROMOTE_TIME_US; policies->max_num_work_items = POLICY_MAX_NUM_WI; for (p = 0; p < GUC_CLIENT_PRIORITY_NUM; p++) { for (i = GUC_RENDER_ENGINE; i < GUC_MAX_ENGINES_NUM; i++) { policy = &policies->policy[p][i]; - policy->execution_quantum = 1000000; - policy->preemption_time = 500000; - policy->fault_time = 250000; - policy->policy_flags = 0; + guc_policy_init(policy); } } policies->is_valid = 1; } +/* + * The first 80 dwords of the register state context, containing the + * execlists and ppgtt registers. + */ +#define LR_HW_CONTEXT_SIZE (80 * sizeof(u32)) + static int guc_ads_create(struct intel_guc *guc) { struct drm_i915_private *dev_priv = guc_to_i915(guc); @@ -1032,6 +943,8 @@ static int guc_ads_create(struct intel_guc *guc) } __packed *blob; struct intel_engine_cs *engine; enum intel_engine_id id; + const u32 skipped_offset = LRC_HEADER_PAGES * PAGE_SIZE; + const u32 skipped_size = LRC_PPHWSP_SZ * PAGE_SIZE + LR_HW_CONTEXT_SIZE; u32 base; GEM_BUG_ON(guc->ads_vma); @@ -1062,13 +975,20 @@ static int guc_ads_create(struct intel_guc *guc) * engines after a reset. Here we use the Render ring default * context, which must already exist and be pinned in the GGTT, * so its address won't change after we've told the GuC where - * to find it. + * to find it. Note that we have to skip our header (1 page), + * because our GuC shared data is there. */ blob->ads.golden_context_lrca = - dev_priv->engine[RCS]->status_page.ggtt_offset; + guc_ggtt_offset(dev_priv->kernel_context->engine[RCS].state) + skipped_offset; + /* + * The GuC expects us to exclude the portion of the context image that + * it skips from the size it is to read. It starts reading from after + * the execlist context (so skipping the first page [PPHWSP] and 80 + * dwords). Weird guc is weird. + */ for_each_engine(engine, dev_priv, id) - blob->ads.eng_state_size[engine->guc_id] = engine->context_size; + blob->ads.eng_state_size[engine->guc_id] = engine->context_size - skipped_size; base = guc_ggtt_offset(vma); blob->ads.scheduler_policies = base + ptr_offset(blob, policies); @@ -1221,6 +1141,19 @@ int i915_guc_submission_enable(struct drm_i915_private *dev_priv) enum intel_engine_id id; int err; + /* + * We're using GuC work items for submitting work through GuC. Since + * we're coalescing multiple requests from a single context into a + * single work item prior to assigning it to execlist_port, we can + * never have more work items than the total number of ports (for all + * engines). The GuC firmware is controlling the HEAD of work queue, + * and it is guaranteed that it will remove the work item from the + * queue before our request is completed. + */ + BUILD_BUG_ON(ARRAY_SIZE(engine->execlists.port) * + sizeof(struct guc_wq_item) * + I915_NUM_ENGINES > GUC_WQ_SIZE); + if (!client) { client = guc_client_alloc(dev_priv, INTEL_INFO(dev_priv)->ring_mask, @@ -1248,24 +1181,15 @@ int i915_guc_submission_enable(struct drm_i915_private *dev_priv) guc_interrupts_capture(dev_priv); for_each_engine(engine, dev_priv, id) { - const int wqi_size = sizeof(struct guc_wq_item); - struct drm_i915_gem_request *rq; - + struct intel_engine_execlists * const execlists = &engine->execlists; /* The tasklet was initialised by execlists, and may be in * a state of flux (across a reset) and so we just want to * take over the callback without changing any other state * in the tasklet. */ - engine->irq_tasklet.func = i915_guc_irq_handler; + execlists->irq_tasklet.func = i915_guc_irq_handler; clear_bit(ENGINE_IRQ_EXECLIST, &engine->irq_posted); - - /* Replay the current set of previously submitted requests */ - spin_lock_irq(&engine->timeline->lock); - list_for_each_entry(rq, &engine->timeline->requests, link) { - guc_client_update_wq_rsvd(client, wqi_size); - __i915_guc_submit(rq); - } - spin_unlock_irq(&engine->timeline->lock); + tasklet_schedule(&execlists->irq_tasklet); } return 0; @@ -1310,7 +1234,7 @@ int intel_guc_suspend(struct drm_i915_private *dev_priv) /* any value greater than GUC_POWER_D0 */ data[1] = GUC_POWER_D1; /* first page is shared data with GuC */ - data[2] = guc_ggtt_offset(ctx->engine[RCS].state); + data[2] = guc_ggtt_offset(ctx->engine[RCS].state) + LRC_GUCSHR_PN * PAGE_SIZE; return intel_guc_send(guc, data, ARRAY_SIZE(data)); } @@ -1328,7 +1252,7 @@ int intel_guc_resume(struct drm_i915_private *dev_priv) if (guc->fw.load_status != INTEL_UC_FIRMWARE_SUCCESS) return 0; - if (i915.guc_log_level >= 0) + if (i915_modparams.guc_log_level >= 0) gen9_enable_guc_interrupts(dev_priv); ctx = dev_priv->kernel_context; @@ -1336,7 +1260,7 @@ int intel_guc_resume(struct drm_i915_private *dev_priv) data[0] = INTEL_GUC_ACTION_EXIT_S_STATE; data[1] = GUC_POWER_D0; /* first page is shared data with GuC */ - data[2] = guc_ggtt_offset(ctx->engine[RCS].state); + data[2] = guc_ggtt_offset(ctx->engine[RCS].state) + LRC_GUCSHR_PN * PAGE_SIZE; return intel_guc_send(guc, data, ARRAY_SIZE(data)); } diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index b63893eeca73..efd7827ff181 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -126,7 +126,7 @@ static const u32 hpd_bxt[HPD_NUM_PINS] = { POSTING_READ(GEN8_##type##_IIR(which)); \ } while (0) -#define GEN5_IRQ_RESET(type) do { \ +#define GEN3_IRQ_RESET(type) do { \ I915_WRITE(type##IMR, 0xffffffff); \ POSTING_READ(type##IMR); \ I915_WRITE(type##IER, 0); \ @@ -136,10 +136,20 @@ static const u32 hpd_bxt[HPD_NUM_PINS] = { POSTING_READ(type##IIR); \ } while (0) +#define GEN2_IRQ_RESET(type) do { \ + I915_WRITE16(type##IMR, 0xffff); \ + POSTING_READ16(type##IMR); \ + I915_WRITE16(type##IER, 0); \ + I915_WRITE16(type##IIR, 0xffff); \ + POSTING_READ16(type##IIR); \ + I915_WRITE16(type##IIR, 0xffff); \ + POSTING_READ16(type##IIR); \ +} while (0) + /* * We should clear IMR at preinstall/uninstall, and just check at postinstall. */ -static void gen5_assert_iir_is_zero(struct drm_i915_private *dev_priv, +static void gen3_assert_iir_is_zero(struct drm_i915_private *dev_priv, i915_reg_t reg) { u32 val = I915_READ(reg); @@ -155,20 +165,43 @@ static void gen5_assert_iir_is_zero(struct drm_i915_private *dev_priv, POSTING_READ(reg); } +static void gen2_assert_iir_is_zero(struct drm_i915_private *dev_priv, + i915_reg_t reg) +{ + u16 val = I915_READ16(reg); + + if (val == 0) + return; + + WARN(1, "Interrupt register 0x%x is not zero: 0x%08x\n", + i915_mmio_reg_offset(reg), val); + I915_WRITE16(reg, 0xffff); + POSTING_READ16(reg); + I915_WRITE16(reg, 0xffff); + POSTING_READ16(reg); +} + #define GEN8_IRQ_INIT_NDX(type, which, imr_val, ier_val) do { \ - gen5_assert_iir_is_zero(dev_priv, GEN8_##type##_IIR(which)); \ + gen3_assert_iir_is_zero(dev_priv, GEN8_##type##_IIR(which)); \ I915_WRITE(GEN8_##type##_IER(which), (ier_val)); \ I915_WRITE(GEN8_##type##_IMR(which), (imr_val)); \ POSTING_READ(GEN8_##type##_IMR(which)); \ } while (0) -#define GEN5_IRQ_INIT(type, imr_val, ier_val) do { \ - gen5_assert_iir_is_zero(dev_priv, type##IIR); \ +#define GEN3_IRQ_INIT(type, imr_val, ier_val) do { \ + gen3_assert_iir_is_zero(dev_priv, type##IIR); \ I915_WRITE(type##IER, (ier_val)); \ I915_WRITE(type##IMR, (imr_val)); \ POSTING_READ(type##IMR); \ } while (0) +#define GEN2_IRQ_INIT(type, imr_val, ier_val) do { \ + gen2_assert_iir_is_zero(dev_priv, type##IIR); \ + I915_WRITE16(type##IER, (ier_val)); \ + I915_WRITE16(type##IMR, (imr_val)); \ + POSTING_READ16(type##IMR); \ +} while (0) + static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir); static void gen9_guc_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir); @@ -336,7 +369,7 @@ void gen6_mask_pm_irq(struct drm_i915_private *dev_priv, u32 mask) __gen6_mask_pm_irq(dev_priv, mask); } -void gen6_reset_pm_iir(struct drm_i915_private *dev_priv, u32 reset_mask) +static void gen6_reset_pm_iir(struct drm_i915_private *dev_priv, u32 reset_mask) { i915_reg_t reg = gen6_pm_iir(dev_priv); @@ -347,7 +380,7 @@ void gen6_reset_pm_iir(struct drm_i915_private *dev_priv, u32 reset_mask) POSTING_READ(reg); } -void gen6_enable_pm_irq(struct drm_i915_private *dev_priv, u32 enable_mask) +static void gen6_enable_pm_irq(struct drm_i915_private *dev_priv, u32 enable_mask) { lockdep_assert_held(&dev_priv->irq_lock); @@ -357,7 +390,7 @@ void gen6_enable_pm_irq(struct drm_i915_private *dev_priv, u32 enable_mask) /* unmask_pm_irq provides an implicit barrier (POSTING_READ) */ } -void gen6_disable_pm_irq(struct drm_i915_private *dev_priv, u32 disable_mask) +static void gen6_disable_pm_irq(struct drm_i915_private *dev_priv, u32 disable_mask) { lockdep_assert_held(&dev_priv->irq_lock); @@ -405,7 +438,7 @@ void gen6_disable_rps_interrupts(struct drm_i915_private *dev_priv) synchronize_irq(dev_priv->drm.irq); /* Now that we will not be generating any more work, flush any - * outsanding tasks. As we are called on the RPS idle path, + * outstanding tasks. As we are called on the RPS idle path, * we will reset the GPU to minimum frequencies, so the current * state of the worker can be discarded. */ @@ -534,62 +567,16 @@ void ibx_display_interrupt_update(struct drm_i915_private *dev_priv, POSTING_READ(SDEIMR); } -static void -__i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe, - u32 enable_mask, u32 status_mask) +u32 i915_pipestat_enable_mask(struct drm_i915_private *dev_priv, + enum pipe pipe) { - i915_reg_t reg = PIPESTAT(pipe); - u32 pipestat = I915_READ(reg) & PIPESTAT_INT_ENABLE_MASK; - - lockdep_assert_held(&dev_priv->irq_lock); - WARN_ON(!intel_irqs_enabled(dev_priv)); - - if (WARN_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK || - status_mask & ~PIPESTAT_INT_STATUS_MASK, - "pipe %c: enable_mask=0x%x, status_mask=0x%x\n", - pipe_name(pipe), enable_mask, status_mask)) - return; - - if ((pipestat & enable_mask) == enable_mask) - return; - - dev_priv->pipestat_irq_mask[pipe] |= status_mask; - - /* Enable the interrupt, clear any pending status */ - pipestat |= enable_mask | status_mask; - I915_WRITE(reg, pipestat); - POSTING_READ(reg); -} - -static void -__i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe, - u32 enable_mask, u32 status_mask) -{ - i915_reg_t reg = PIPESTAT(pipe); - u32 pipestat = I915_READ(reg) & PIPESTAT_INT_ENABLE_MASK; + u32 status_mask = dev_priv->pipestat_irq_mask[pipe]; + u32 enable_mask = status_mask << 16; lockdep_assert_held(&dev_priv->irq_lock); - WARN_ON(!intel_irqs_enabled(dev_priv)); - - if (WARN_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK || - status_mask & ~PIPESTAT_INT_STATUS_MASK, - "pipe %c: enable_mask=0x%x, status_mask=0x%x\n", - pipe_name(pipe), enable_mask, status_mask)) - return; - - if ((pipestat & enable_mask) == 0) - return; - - dev_priv->pipestat_irq_mask[pipe] &= ~status_mask; - pipestat &= ~enable_mask; - I915_WRITE(reg, pipestat); - POSTING_READ(reg); -} - -static u32 vlv_get_pipestat_enable_mask(struct drm_device *dev, u32 status_mask) -{ - u32 enable_mask = status_mask << 16; + if (INTEL_GEN(dev_priv) < 5) + goto out; /* * On pipe A we don't support the PSR interrupt yet, @@ -612,35 +599,59 @@ static u32 vlv_get_pipestat_enable_mask(struct drm_device *dev, u32 status_mask) if (status_mask & SPRITE1_FLIP_DONE_INT_STATUS_VLV) enable_mask |= SPRITE1_FLIP_DONE_INT_EN_VLV; +out: + WARN_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK || + status_mask & ~PIPESTAT_INT_STATUS_MASK, + "pipe %c: enable_mask=0x%x, status_mask=0x%x\n", + pipe_name(pipe), enable_mask, status_mask); + return enable_mask; } -void -i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe, - u32 status_mask) +void i915_enable_pipestat(struct drm_i915_private *dev_priv, + enum pipe pipe, u32 status_mask) { + i915_reg_t reg = PIPESTAT(pipe); u32 enable_mask; - if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) - enable_mask = vlv_get_pipestat_enable_mask(&dev_priv->drm, - status_mask); - else - enable_mask = status_mask << 16; - __i915_enable_pipestat(dev_priv, pipe, enable_mask, status_mask); + WARN_ONCE(status_mask & ~PIPESTAT_INT_STATUS_MASK, + "pipe %c: status_mask=0x%x\n", + pipe_name(pipe), status_mask); + + lockdep_assert_held(&dev_priv->irq_lock); + WARN_ON(!intel_irqs_enabled(dev_priv)); + + if ((dev_priv->pipestat_irq_mask[pipe] & status_mask) == status_mask) + return; + + dev_priv->pipestat_irq_mask[pipe] |= status_mask; + enable_mask = i915_pipestat_enable_mask(dev_priv, pipe); + + I915_WRITE(reg, enable_mask | status_mask); + POSTING_READ(reg); } -void -i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe, - u32 status_mask) +void i915_disable_pipestat(struct drm_i915_private *dev_priv, + enum pipe pipe, u32 status_mask) { + i915_reg_t reg = PIPESTAT(pipe); u32 enable_mask; - if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) - enable_mask = vlv_get_pipestat_enable_mask(&dev_priv->drm, - status_mask); - else - enable_mask = status_mask << 16; - __i915_disable_pipestat(dev_priv, pipe, enable_mask, status_mask); + WARN_ONCE(status_mask & ~PIPESTAT_INT_STATUS_MASK, + "pipe %c: status_mask=0x%x\n", + pipe_name(pipe), status_mask); + + lockdep_assert_held(&dev_priv->irq_lock); + WARN_ON(!intel_irqs_enabled(dev_priv)); + + if ((dev_priv->pipestat_irq_mask[pipe] & status_mask) == 0) + return; + + dev_priv->pipestat_irq_mask[pipe] &= ~status_mask; + enable_mask = i915_pipestat_enable_mask(dev_priv, pipe); + + I915_WRITE(reg, enable_mask | status_mask); + POSTING_READ(reg); } /** @@ -772,6 +783,57 @@ static u32 g4x_get_vblank_counter(struct drm_device *dev, unsigned int pipe) return I915_READ(PIPE_FRMCOUNT_G4X(pipe)); } +/* + * On certain encoders on certain platforms, pipe + * scanline register will not work to get the scanline, + * since the timings are driven from the PORT or issues + * with scanline register updates. + * This function will use Framestamp and current + * timestamp registers to calculate the scanline. + */ +static u32 __intel_get_crtc_scanline_from_timestamp(struct intel_crtc *crtc) +{ + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + struct drm_vblank_crtc *vblank = + &crtc->base.dev->vblank[drm_crtc_index(&crtc->base)]; + const struct drm_display_mode *mode = &vblank->hwmode; + u32 vblank_start = mode->crtc_vblank_start; + u32 vtotal = mode->crtc_vtotal; + u32 htotal = mode->crtc_htotal; + u32 clock = mode->crtc_clock; + u32 scanline, scan_prev_time, scan_curr_time, scan_post_time; + + /* + * To avoid the race condition where we might cross into the + * next vblank just between the PIPE_FRMTMSTMP and TIMESTAMP_CTR + * reads. We make sure we read PIPE_FRMTMSTMP and TIMESTAMP_CTR + * during the same frame. + */ + do { + /* + * This field provides read back of the display + * pipe frame time stamp. The time stamp value + * is sampled at every start of vertical blank. + */ + scan_prev_time = I915_READ_FW(PIPE_FRMTMSTMP(crtc->pipe)); + + /* + * The TIMESTAMP_CTR register has the current + * time stamp value. + */ + scan_curr_time = I915_READ_FW(IVB_TIMESTAMP_CTR); + + scan_post_time = I915_READ_FW(PIPE_FRMTMSTMP(crtc->pipe)); + } while (scan_post_time != scan_prev_time); + + scanline = div_u64(mul_u32_u32(scan_curr_time - scan_prev_time, + clock), 1000 * htotal); + scanline = min(scanline, vtotal - 1); + scanline = (scanline + vblank_start) % vtotal; + + return scanline; +} + /* I915_READ_FW, only for fast reads of display block, no need for forcewake etc. */ static int __intel_get_crtc_scanline(struct intel_crtc *crtc) { @@ -788,6 +850,9 @@ static int __intel_get_crtc_scanline(struct intel_crtc *crtc) vblank = &crtc->base.dev->vblank[drm_crtc_index(&crtc->base)]; mode = &vblank->hwmode; + if (mode->private_flags & I915_MODE_FLAG_GET_SCANLINE_FROM_TIMESTAMP) + return __intel_get_crtc_scanline_from_timestamp(crtc); + vtotal = mode->crtc_vtotal; if (mode->flags & DRM_MODE_FLAG_INTERLACE) vtotal /= 2; @@ -1005,6 +1070,8 @@ static void notify_ring(struct intel_engine_cs *engine) spin_lock(&engine->breadcrumbs.irq_lock); wait = engine->breadcrumbs.irq_wait; if (wait) { + bool wakeup = engine->irq_seqno_barrier; + /* We use a callback from the dma-fence to submit * requests after waiting on our own requests. To * ensure minimum delay in queuing the next request to @@ -1017,12 +1084,18 @@ static void notify_ring(struct intel_engine_cs *engine) * and many waiters. */ if (i915_seqno_passed(intel_engine_get_seqno(engine), - wait->seqno) && - !test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, - &wait->request->fence.flags)) - rq = i915_gem_request_get(wait->request); + wait->seqno)) { + struct drm_i915_gem_request *waiter = wait->request; + + wakeup = true; + if (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, + &waiter->fence.flags) && + intel_wait_check_request(wait, waiter)) + rq = i915_gem_request_get(waiter); + } - wake_up_process(wait->tsk); + if (wakeup) + wake_up_process(wait->tsk); } else { __intel_engine_disarm_breadcrumbs(engine); } @@ -1305,10 +1378,11 @@ static void snb_gt_irq_handler(struct drm_i915_private *dev_priv, static void gen8_cs_irq_handler(struct intel_engine_cs *engine, u32 iir, int test_shift) { + struct intel_engine_execlists * const execlists = &engine->execlists; bool tasklet = false; if (iir & (GT_CONTEXT_SWITCH_INTERRUPT << test_shift)) { - if (port_count(&engine->execlist_port[0])) { + if (port_count(&execlists->port[0])) { __set_bit(ENGINE_IRQ_EXECLIST, &engine->irq_posted); tasklet = true; } @@ -1316,11 +1390,11 @@ gen8_cs_irq_handler(struct intel_engine_cs *engine, u32 iir, int test_shift) if (iir & (GT_RENDER_USER_INTERRUPT << test_shift)) { notify_ring(engine); - tasklet |= i915.enable_guc_submission; + tasklet |= i915_modparams.enable_guc_submission; } if (tasklet) - tasklet_hi_schedule(&engine->irq_tasklet); + tasklet_hi_schedule(&execlists->irq_tasklet); } static irqreturn_t gen8_gt_irq_ack(struct drm_i915_private *dev_priv, @@ -1573,11 +1647,11 @@ static void display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv, * bonkers. So let's just wait for the next vblank and read * out the buggy result. * - * On CHV sometimes the second CRC is bonkers as well, so + * On GEN8+ sometimes the second CRC is bonkers as well, so * don't trust that one either. */ if (pipe_crc->skipped == 0 || - (IS_CHERRYVIEW(dev_priv) && pipe_crc->skipped == 1)) { + (INTEL_GEN(dev_priv) >= 8 && pipe_crc->skipped == 1)) { pipe_crc->skipped++; spin_unlock(&pipe_crc->lock); return; @@ -1706,8 +1780,21 @@ static void gen9_guc_irq_handler(struct drm_i915_private *dev_priv, u32 gt_iir) } } -static void valleyview_pipestat_irq_ack(struct drm_i915_private *dev_priv, - u32 iir, u32 pipe_stats[I915_MAX_PIPES]) +static void i9xx_pipestat_irq_reset(struct drm_i915_private *dev_priv) +{ + enum pipe pipe; + + for_each_pipe(dev_priv, pipe) { + I915_WRITE(PIPESTAT(pipe), + PIPESTAT_INT_STATUS_MASK | + PIPE_FIFO_UNDERRUN_STATUS); + + dev_priv->pipestat_irq_mask[pipe] = 0; + } +} + +static void i9xx_pipestat_irq_ack(struct drm_i915_private *dev_priv, + u32 iir, u32 pipe_stats[I915_MAX_PIPES]) { int pipe; @@ -1720,7 +1807,7 @@ static void valleyview_pipestat_irq_ack(struct drm_i915_private *dev_priv, for_each_pipe(dev_priv, pipe) { i915_reg_t reg; - u32 mask, iir_bit = 0; + u32 status_mask, enable_mask, iir_bit = 0; /* * PIPESTAT bits get signalled even when the interrupt is @@ -1731,7 +1818,7 @@ static void valleyview_pipestat_irq_ack(struct drm_i915_private *dev_priv, */ /* fifo underruns are filterered in the underrun handler. */ - mask = PIPE_FIFO_UNDERRUN_STATUS; + status_mask = PIPE_FIFO_UNDERRUN_STATUS; switch (pipe) { case PIPE_A: @@ -1745,25 +1832,92 @@ static void valleyview_pipestat_irq_ack(struct drm_i915_private *dev_priv, break; } if (iir & iir_bit) - mask |= dev_priv->pipestat_irq_mask[pipe]; + status_mask |= dev_priv->pipestat_irq_mask[pipe]; - if (!mask) + if (!status_mask) continue; reg = PIPESTAT(pipe); - mask |= PIPESTAT_INT_ENABLE_MASK; - pipe_stats[pipe] = I915_READ(reg) & mask; + pipe_stats[pipe] = I915_READ(reg) & status_mask; + enable_mask = i915_pipestat_enable_mask(dev_priv, pipe); /* * Clear the PIPE*STAT regs before the IIR */ - if (pipe_stats[pipe] & (PIPE_FIFO_UNDERRUN_STATUS | - PIPESTAT_INT_STATUS_MASK)) - I915_WRITE(reg, pipe_stats[pipe]); + if (pipe_stats[pipe]) + I915_WRITE(reg, enable_mask | pipe_stats[pipe]); } spin_unlock(&dev_priv->irq_lock); } +static void i8xx_pipestat_irq_handler(struct drm_i915_private *dev_priv, + u16 iir, u32 pipe_stats[I915_MAX_PIPES]) +{ + enum pipe pipe; + + for_each_pipe(dev_priv, pipe) { + if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS) + drm_handle_vblank(&dev_priv->drm, pipe); + + if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) + i9xx_pipe_crc_irq_handler(dev_priv, pipe); + + if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) + intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); + } +} + +static void i915_pipestat_irq_handler(struct drm_i915_private *dev_priv, + u32 iir, u32 pipe_stats[I915_MAX_PIPES]) +{ + bool blc_event = false; + enum pipe pipe; + + for_each_pipe(dev_priv, pipe) { + if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS) + drm_handle_vblank(&dev_priv->drm, pipe); + + if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS) + blc_event = true; + + if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) + i9xx_pipe_crc_irq_handler(dev_priv, pipe); + + if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) + intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); + } + + if (blc_event || (iir & I915_ASLE_INTERRUPT)) + intel_opregion_asle_intr(dev_priv); +} + +static void i965_pipestat_irq_handler(struct drm_i915_private *dev_priv, + u32 iir, u32 pipe_stats[I915_MAX_PIPES]) +{ + bool blc_event = false; + enum pipe pipe; + + for_each_pipe(dev_priv, pipe) { + if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS) + drm_handle_vblank(&dev_priv->drm, pipe); + + if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS) + blc_event = true; + + if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) + i9xx_pipe_crc_irq_handler(dev_priv, pipe); + + if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) + intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); + } + + if (blc_event || (iir & I915_ASLE_INTERRUPT)) + intel_opregion_asle_intr(dev_priv); + + if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS) + gmbus_irq_handler(dev_priv); +} + static void valleyview_pipestat_irq_handler(struct drm_i915_private *dev_priv, u32 pipe_stats[I915_MAX_PIPES]) { @@ -1879,7 +2033,7 @@ static irqreturn_t valleyview_irq_handler(int irq, void *arg) /* Call regardless, as some status bits might not be * signalled in iir */ - valleyview_pipestat_irq_ack(dev_priv, iir, pipe_stats); + i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats); if (iir & (I915_LPE_PIPE_A_INTERRUPT | I915_LPE_PIPE_B_INTERRUPT)) @@ -1963,7 +2117,7 @@ static irqreturn_t cherryview_irq_handler(int irq, void *arg) /* Call regardless, as some status bits might not be * signalled in iir */ - valleyview_pipestat_irq_ack(dev_priv, iir, pipe_stats); + i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats); if (iir & (I915_LPE_PIPE_A_INTERRUPT | I915_LPE_PIPE_B_INTERRUPT | @@ -2860,7 +3014,7 @@ static void ibx_irq_reset(struct drm_i915_private *dev_priv) if (HAS_PCH_NOP(dev_priv)) return; - GEN5_IRQ_RESET(SDE); + GEN3_IRQ_RESET(SDE); if (HAS_PCH_CPT(dev_priv) || HAS_PCH_LPT(dev_priv)) I915_WRITE(SERR_INT, 0xffffffff); @@ -2888,15 +3042,13 @@ static void ibx_irq_pre_postinstall(struct drm_device *dev) static void gen5_gt_irq_reset(struct drm_i915_private *dev_priv) { - GEN5_IRQ_RESET(GT); + GEN3_IRQ_RESET(GT); if (INTEL_GEN(dev_priv) >= 6) - GEN5_IRQ_RESET(GEN6_PM); + GEN3_IRQ_RESET(GEN6_PM); } static void vlv_display_irq_reset(struct drm_i915_private *dev_priv) { - enum pipe pipe; - if (IS_CHERRYVIEW(dev_priv)) I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK_CHV); else @@ -2905,14 +3057,9 @@ static void vlv_display_irq_reset(struct drm_i915_private *dev_priv) i915_hotplug_interrupt_update_locked(dev_priv, 0xffffffff, 0); I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); - for_each_pipe(dev_priv, pipe) { - I915_WRITE(PIPESTAT(pipe), - PIPE_FIFO_UNDERRUN_STATUS | - PIPESTAT_INT_STATUS_MASK); - dev_priv->pipestat_irq_mask[pipe] = 0; - } + i9xx_pipestat_irq_reset(dev_priv); - GEN5_IRQ_RESET(VLV_); + GEN3_IRQ_RESET(VLV_); dev_priv->irq_mask = ~0; } @@ -2922,8 +3069,7 @@ static void vlv_display_irq_postinstall(struct drm_i915_private *dev_priv) u32 enable_mask; enum pipe pipe; - pipestat_mask = PLANE_FLIP_DONE_INT_STATUS_VLV | - PIPE_CRC_DONE_INTERRUPT_STATUS; + pipestat_mask = PIPE_CRC_DONE_INTERRUPT_STATUS; i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS); for_each_pipe(dev_priv, pipe) @@ -2943,7 +3089,7 @@ static void vlv_display_irq_postinstall(struct drm_i915_private *dev_priv) dev_priv->irq_mask = ~enable_mask; - GEN5_IRQ_INIT(VLV_, dev_priv->irq_mask, enable_mask); + GEN3_IRQ_INIT(VLV_, dev_priv->irq_mask, enable_mask); } /* drm_dma.h hooks @@ -2952,9 +3098,10 @@ static void ironlake_irq_reset(struct drm_device *dev) { struct drm_i915_private *dev_priv = to_i915(dev); - I915_WRITE(HWSTAM, 0xffffffff); + if (IS_GEN5(dev_priv)) + I915_WRITE(HWSTAM, 0xffffffff); - GEN5_IRQ_RESET(DE); + GEN3_IRQ_RESET(DE); if (IS_GEN7(dev_priv)) I915_WRITE(GEN7_ERR_INT, 0xffffffff); @@ -2963,7 +3110,7 @@ static void ironlake_irq_reset(struct drm_device *dev) ibx_irq_reset(dev_priv); } -static void valleyview_irq_preinstall(struct drm_device *dev) +static void valleyview_irq_reset(struct drm_device *dev) { struct drm_i915_private *dev_priv = to_i915(dev); @@ -3001,9 +3148,9 @@ static void gen8_irq_reset(struct drm_device *dev) POWER_DOMAIN_PIPE(pipe))) GEN8_IRQ_RESET_NDX(DE_PIPE, pipe); - GEN5_IRQ_RESET(GEN8_DE_PORT_); - GEN5_IRQ_RESET(GEN8_DE_MISC_); - GEN5_IRQ_RESET(GEN8_PCU_); + GEN3_IRQ_RESET(GEN8_DE_PORT_); + GEN3_IRQ_RESET(GEN8_DE_MISC_); + GEN3_IRQ_RESET(GEN8_PCU_); if (HAS_PCH_SPLIT(dev_priv)) ibx_irq_reset(dev_priv); @@ -3037,7 +3184,7 @@ void gen8_irq_power_well_pre_disable(struct drm_i915_private *dev_priv, synchronize_irq(dev_priv->drm.irq); } -static void cherryview_irq_preinstall(struct drm_device *dev) +static void cherryview_irq_reset(struct drm_device *dev) { struct drm_i915_private *dev_priv = to_i915(dev); @@ -3046,7 +3193,7 @@ static void cherryview_irq_preinstall(struct drm_device *dev) gen8_gt_irq_reset(dev_priv); - GEN5_IRQ_RESET(GEN8_PCU_); + GEN3_IRQ_RESET(GEN8_PCU_); spin_lock_irq(&dev_priv->irq_lock); if (dev_priv->display_irqs_enabled) @@ -3111,7 +3258,15 @@ static void ibx_hpd_irq_setup(struct drm_i915_private *dev_priv) static void spt_hpd_detection_setup(struct drm_i915_private *dev_priv) { - u32 hotplug; + u32 val, hotplug; + + /* Display WA #1179 WaHardHangonHotPlug: cnp */ + if (HAS_PCH_CNP(dev_priv)) { + val = I915_READ(SOUTH_CHICKEN1); + val &= ~CHASSIS_CLK_REQ_DURATION_MASK; + val |= CHASSIS_CLK_REQ_DURATION(0xf); + I915_WRITE(SOUTH_CHICKEN1, val); + } /* Enable digital hotplug on the PCH */ hotplug = I915_READ(PCH_PORT_HOTPLUG); @@ -3238,10 +3393,12 @@ static void ibx_irq_postinstall(struct drm_device *dev) if (HAS_PCH_IBX(dev_priv)) mask = SDE_GMBUS | SDE_AUX_MASK | SDE_POISON; - else + else if (HAS_PCH_CPT(dev_priv) || HAS_PCH_LPT(dev_priv)) mask = SDE_GMBUS_CPT | SDE_AUX_MASK_CPT; + else + mask = SDE_GMBUS_CPT; - gen5_assert_iir_is_zero(dev_priv, SDEIIR); + gen3_assert_iir_is_zero(dev_priv, SDEIIR); I915_WRITE(SDEIMR, ~mask); if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv) || @@ -3272,7 +3429,7 @@ static void gen5_gt_irq_postinstall(struct drm_device *dev) gt_irqs |= GT_BLT_USER_INTERRUPT | GT_BSD_USER_INTERRUPT; } - GEN5_IRQ_INIT(GT, dev_priv->gt_irq_mask, gt_irqs); + GEN3_IRQ_INIT(GT, dev_priv->gt_irq_mask, gt_irqs); if (INTEL_GEN(dev_priv) >= 6) { /* @@ -3285,7 +3442,7 @@ static void gen5_gt_irq_postinstall(struct drm_device *dev) } dev_priv->pm_imr = 0xffffffff; - GEN5_IRQ_INIT(GEN6_PM, dev_priv->pm_imr, pm_irqs); + GEN3_IRQ_INIT(GEN6_PM, dev_priv->pm_imr, pm_irqs); } } @@ -3296,18 +3453,14 @@ static int ironlake_irq_postinstall(struct drm_device *dev) if (INTEL_GEN(dev_priv) >= 7) { display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE_IVB | - DE_PCH_EVENT_IVB | DE_PLANEC_FLIP_DONE_IVB | - DE_PLANEB_FLIP_DONE_IVB | - DE_PLANEA_FLIP_DONE_IVB | DE_AUX_CHANNEL_A_IVB); + DE_PCH_EVENT_IVB | DE_AUX_CHANNEL_A_IVB); extra_mask = (DE_PIPEC_VBLANK_IVB | DE_PIPEB_VBLANK_IVB | DE_PIPEA_VBLANK_IVB | DE_ERR_INT_IVB | DE_DP_A_HOTPLUG_IVB); } else { display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT | - DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE | - DE_AUX_CHANNEL_A | - DE_PIPEB_CRC_DONE | DE_PIPEA_CRC_DONE | - DE_POISON); + DE_AUX_CHANNEL_A | DE_PIPEB_CRC_DONE | + DE_PIPEA_CRC_DONE | DE_POISON); extra_mask = (DE_PIPEA_VBLANK | DE_PIPEB_VBLANK | DE_PCU_EVENT | DE_PIPEB_FIFO_UNDERRUN | DE_PIPEA_FIFO_UNDERRUN | DE_DP_A_HOTPLUG); @@ -3315,11 +3468,9 @@ static int ironlake_irq_postinstall(struct drm_device *dev) dev_priv->irq_mask = ~display_mask; - I915_WRITE(HWSTAM, 0xeffe); - ibx_irq_pre_postinstall(dev); - GEN5_IRQ_INIT(DE, dev_priv->irq_mask, display_mask | extra_mask); + GEN3_IRQ_INIT(DE, dev_priv->irq_mask, display_mask | extra_mask); gen5_gt_irq_postinstall(dev); @@ -3429,15 +3580,13 @@ static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv) enum pipe pipe; if (INTEL_GEN(dev_priv) >= 9) { - de_pipe_masked |= GEN9_PIPE_PLANE1_FLIP_DONE | - GEN9_DE_PIPE_IRQ_FAULT_ERRORS; + de_pipe_masked |= GEN9_DE_PIPE_IRQ_FAULT_ERRORS; de_port_masked |= GEN9_AUX_CHANNEL_B | GEN9_AUX_CHANNEL_C | GEN9_AUX_CHANNEL_D; if (IS_GEN9_LP(dev_priv)) de_port_masked |= BXT_DE_PORT_GMBUS; } else { - de_pipe_masked |= GEN8_PIPE_PRIMARY_FLIP_DONE | - GEN8_DE_PIPE_IRQ_FAULT_ERRORS; + de_pipe_masked |= GEN8_DE_PIPE_IRQ_FAULT_ERRORS; } de_pipe_enables = de_pipe_masked | GEN8_PIPE_VBLANK | @@ -3460,8 +3609,8 @@ static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv) dev_priv->de_irq_mask[pipe], de_pipe_enables); - GEN5_IRQ_INIT(GEN8_DE_PORT_, ~de_port_masked, de_port_enables); - GEN5_IRQ_INIT(GEN8_DE_MISC_, ~de_misc_masked, de_misc_masked); + GEN3_IRQ_INIT(GEN8_DE_PORT_, ~de_port_masked, de_port_enables); + GEN3_IRQ_INIT(GEN8_DE_MISC_, ~de_misc_masked, de_misc_masked); if (IS_GEN9_LP(dev_priv)) bxt_hpd_detection_setup(dev_priv); @@ -3505,98 +3654,36 @@ static int cherryview_irq_postinstall(struct drm_device *dev) return 0; } -static void gen8_irq_uninstall(struct drm_device *dev) -{ - struct drm_i915_private *dev_priv = to_i915(dev); - - if (!dev_priv) - return; - - gen8_irq_reset(dev); -} - -static void valleyview_irq_uninstall(struct drm_device *dev) -{ - struct drm_i915_private *dev_priv = to_i915(dev); - - if (!dev_priv) - return; - - I915_WRITE(VLV_MASTER_IER, 0); - POSTING_READ(VLV_MASTER_IER); - - gen5_gt_irq_reset(dev_priv); - - I915_WRITE(HWSTAM, 0xffffffff); - - spin_lock_irq(&dev_priv->irq_lock); - if (dev_priv->display_irqs_enabled) - vlv_display_irq_reset(dev_priv); - spin_unlock_irq(&dev_priv->irq_lock); -} - -static void cherryview_irq_uninstall(struct drm_device *dev) +static void i8xx_irq_reset(struct drm_device *dev) { struct drm_i915_private *dev_priv = to_i915(dev); - if (!dev_priv) - return; - - I915_WRITE(GEN8_MASTER_IRQ, 0); - POSTING_READ(GEN8_MASTER_IRQ); - - gen8_gt_irq_reset(dev_priv); - - GEN5_IRQ_RESET(GEN8_PCU_); - - spin_lock_irq(&dev_priv->irq_lock); - if (dev_priv->display_irqs_enabled) - vlv_display_irq_reset(dev_priv); - spin_unlock_irq(&dev_priv->irq_lock); -} - -static void ironlake_irq_uninstall(struct drm_device *dev) -{ - struct drm_i915_private *dev_priv = to_i915(dev); + i9xx_pipestat_irq_reset(dev_priv); - if (!dev_priv) - return; - - ironlake_irq_reset(dev); -} - -static void i8xx_irq_preinstall(struct drm_device * dev) -{ - struct drm_i915_private *dev_priv = to_i915(dev); - int pipe; + I915_WRITE16(HWSTAM, 0xffff); - for_each_pipe(dev_priv, pipe) - I915_WRITE(PIPESTAT(pipe), 0); - I915_WRITE16(IMR, 0xffff); - I915_WRITE16(IER, 0x0); - POSTING_READ16(IER); + GEN2_IRQ_RESET(); } static int i8xx_irq_postinstall(struct drm_device *dev) { struct drm_i915_private *dev_priv = to_i915(dev); + u16 enable_mask; - I915_WRITE16(EMR, - ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH)); + I915_WRITE16(EMR, ~(I915_ERROR_PAGE_TABLE | + I915_ERROR_MEMORY_REFRESH)); /* Unmask the interrupts that we always want on. */ dev_priv->irq_mask = ~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | - I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | - I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | - I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT); - I915_WRITE16(IMR, dev_priv->irq_mask); + I915_DISPLAY_PIPE_B_EVENT_INTERRUPT); + + enable_mask = + I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | + I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | + I915_USER_INTERRUPT; - I915_WRITE16(IER, - I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | - I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | - I915_USER_INTERRUPT); - POSTING_READ16(IER); + GEN2_IRQ_INIT(, dev_priv->irq_mask, enable_mask); /* Interrupt setup is already guaranteed to be single-threaded, this is * just to make the assert_spin_locked check happy. */ @@ -3608,17 +3695,11 @@ static int i8xx_irq_postinstall(struct drm_device *dev) return 0; } -/* - * Returns true when a page flip has completed. - */ static irqreturn_t i8xx_irq_handler(int irq, void *arg) { struct drm_device *dev = arg; struct drm_i915_private *dev_priv = to_i915(dev); - u16 iir, new_iir; - u32 pipe_stats[2]; - int pipe; - irqreturn_t ret; + irqreturn_t ret = IRQ_NONE; if (!intel_irqs_enabled(dev_priv)) return IRQ_NONE; @@ -3626,96 +3707,50 @@ static irqreturn_t i8xx_irq_handler(int irq, void *arg) /* IRQs are synced during runtime_suspend, we don't require a wakeref */ disable_rpm_wakeref_asserts(dev_priv); - ret = IRQ_NONE; - iir = I915_READ16(IIR); - if (iir == 0) - goto out; + do { + u32 pipe_stats[I915_MAX_PIPES] = {}; + u16 iir; - while (iir) { - /* Can't rely on pipestat interrupt bit in iir as it might - * have been cleared after the pipestat interrupt was received. - * It doesn't set the bit in iir again, but it still produces - * interrupts (for non-MSI). - */ - spin_lock(&dev_priv->irq_lock); - if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT) - DRM_DEBUG("Command parser error, iir 0x%08x\n", iir); + iir = I915_READ16(IIR); + if (iir == 0) + break; - for_each_pipe(dev_priv, pipe) { - i915_reg_t reg = PIPESTAT(pipe); - pipe_stats[pipe] = I915_READ(reg); + ret = IRQ_HANDLED; - /* - * Clear the PIPE*STAT regs before the IIR - */ - if (pipe_stats[pipe] & 0x8000ffff) - I915_WRITE(reg, pipe_stats[pipe]); - } - spin_unlock(&dev_priv->irq_lock); + /* Call regardless, as some status bits might not be + * signalled in iir */ + i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats); I915_WRITE16(IIR, iir); - new_iir = I915_READ16(IIR); /* Flush posted writes */ if (iir & I915_USER_INTERRUPT) notify_ring(dev_priv->engine[RCS]); - for_each_pipe(dev_priv, pipe) { - int plane = pipe; - if (HAS_FBC(dev_priv)) - plane = !plane; - - if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS) - drm_handle_vblank(&dev_priv->drm, pipe); - - if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) - i9xx_pipe_crc_irq_handler(dev_priv, pipe); - - if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) - intel_cpu_fifo_underrun_irq_handler(dev_priv, - pipe); - } + if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT) + DRM_DEBUG("Command parser error, iir 0x%08x\n", iir); - iir = new_iir; - } - ret = IRQ_HANDLED; + i8xx_pipestat_irq_handler(dev_priv, iir, pipe_stats); + } while (0); -out: enable_rpm_wakeref_asserts(dev_priv); return ret; } -static void i8xx_irq_uninstall(struct drm_device * dev) -{ - struct drm_i915_private *dev_priv = to_i915(dev); - int pipe; - - for_each_pipe(dev_priv, pipe) { - /* Clear enable bits; then clear status bits */ - I915_WRITE(PIPESTAT(pipe), 0); - I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe))); - } - I915_WRITE16(IMR, 0xffff); - I915_WRITE16(IER, 0x0); - I915_WRITE16(IIR, I915_READ16(IIR)); -} - -static void i915_irq_preinstall(struct drm_device * dev) +static void i915_irq_reset(struct drm_device *dev) { struct drm_i915_private *dev_priv = to_i915(dev); - int pipe; if (I915_HAS_HOTPLUG(dev_priv)) { i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0); I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); } - I915_WRITE16(HWSTAM, 0xeffe); - for_each_pipe(dev_priv, pipe) - I915_WRITE(PIPESTAT(pipe), 0); - I915_WRITE(IMR, 0xffffffff); - I915_WRITE(IER, 0x0); - POSTING_READ(IER); + i9xx_pipestat_irq_reset(dev_priv); + + I915_WRITE(HWSTAM, 0xffffffff); + + GEN3_IRQ_RESET(); } static int i915_irq_postinstall(struct drm_device *dev) @@ -3723,15 +3758,14 @@ static int i915_irq_postinstall(struct drm_device *dev) struct drm_i915_private *dev_priv = to_i915(dev); u32 enable_mask; - I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH)); + I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE | + I915_ERROR_MEMORY_REFRESH)); /* Unmask the interrupts that we always want on. */ dev_priv->irq_mask = ~(I915_ASLE_INTERRUPT | I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | - I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | - I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | - I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT); + I915_DISPLAY_PIPE_B_EVENT_INTERRUPT); enable_mask = I915_ASLE_INTERRUPT | @@ -3740,20 +3774,13 @@ static int i915_irq_postinstall(struct drm_device *dev) I915_USER_INTERRUPT; if (I915_HAS_HOTPLUG(dev_priv)) { - i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0); - POSTING_READ(PORT_HOTPLUG_EN); - /* Enable in IER... */ enable_mask |= I915_DISPLAY_PORT_INTERRUPT; /* and unmask in IMR */ dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT; } - I915_WRITE(IMR, dev_priv->irq_mask); - I915_WRITE(IER, enable_mask); - POSTING_READ(IER); - - i915_enable_asle_pipestat(dev_priv); + GEN3_IRQ_INIT(, dev_priv->irq_mask, enable_mask); /* Interrupt setup is already guaranteed to be single-threaded, this is * just to make the assert_spin_locked check happy. */ @@ -3762,6 +3789,8 @@ static int i915_irq_postinstall(struct drm_device *dev) i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS); spin_unlock_irq(&dev_priv->irq_lock); + i915_enable_asle_pipestat(dev_priv); + return 0; } @@ -3769,8 +3798,7 @@ static irqreturn_t i915_irq_handler(int irq, void *arg) { struct drm_device *dev = arg; struct drm_i915_private *dev_priv = to_i915(dev); - u32 iir, new_iir, pipe_stats[I915_MAX_PIPES]; - int pipe, ret = IRQ_NONE; + irqreturn_t ret = IRQ_NONE; if (!intel_irqs_enabled(dev_priv)) return IRQ_NONE; @@ -3778,131 +3806,56 @@ static irqreturn_t i915_irq_handler(int irq, void *arg) /* IRQs are synced during runtime_suspend, we don't require a wakeref */ disable_rpm_wakeref_asserts(dev_priv); - iir = I915_READ(IIR); do { - bool irq_received = (iir) != 0; - bool blc_event = false; - - /* Can't rely on pipestat interrupt bit in iir as it might - * have been cleared after the pipestat interrupt was received. - * It doesn't set the bit in iir again, but it still produces - * interrupts (for non-MSI). - */ - spin_lock(&dev_priv->irq_lock); - if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT) - DRM_DEBUG("Command parser error, iir 0x%08x\n", iir); - - for_each_pipe(dev_priv, pipe) { - i915_reg_t reg = PIPESTAT(pipe); - pipe_stats[pipe] = I915_READ(reg); - - /* Clear the PIPE*STAT regs before the IIR */ - if (pipe_stats[pipe] & 0x8000ffff) { - I915_WRITE(reg, pipe_stats[pipe]); - irq_received = true; - } - } - spin_unlock(&dev_priv->irq_lock); + u32 pipe_stats[I915_MAX_PIPES] = {}; + u32 hotplug_status = 0; + u32 iir; - if (!irq_received) + iir = I915_READ(IIR); + if (iir == 0) break; - /* Consume port. Then clear IIR or we'll miss events */ + ret = IRQ_HANDLED; + if (I915_HAS_HOTPLUG(dev_priv) && - iir & I915_DISPLAY_PORT_INTERRUPT) { - u32 hotplug_status = i9xx_hpd_irq_ack(dev_priv); - if (hotplug_status) - i9xx_hpd_irq_handler(dev_priv, hotplug_status); - } + iir & I915_DISPLAY_PORT_INTERRUPT) + hotplug_status = i9xx_hpd_irq_ack(dev_priv); + + /* Call regardless, as some status bits might not be + * signalled in iir */ + i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats); I915_WRITE(IIR, iir); - new_iir = I915_READ(IIR); /* Flush posted writes */ if (iir & I915_USER_INTERRUPT) notify_ring(dev_priv->engine[RCS]); - for_each_pipe(dev_priv, pipe) { - int plane = pipe; - if (HAS_FBC(dev_priv)) - plane = !plane; - - if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS) - drm_handle_vblank(&dev_priv->drm, pipe); - - if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS) - blc_event = true; - - if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) - i9xx_pipe_crc_irq_handler(dev_priv, pipe); - - if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) - intel_cpu_fifo_underrun_irq_handler(dev_priv, - pipe); - } + if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT) + DRM_DEBUG("Command parser error, iir 0x%08x\n", iir); - if (blc_event || (iir & I915_ASLE_INTERRUPT)) - intel_opregion_asle_intr(dev_priv); + if (hotplug_status) + i9xx_hpd_irq_handler(dev_priv, hotplug_status); - /* With MSI, interrupts are only generated when iir - * transitions from zero to nonzero. If another bit got - * set while we were handling the existing iir bits, then - * we would never get another interrupt. - * - * This is fine on non-MSI as well, as if we hit this path - * we avoid exiting the interrupt handler only to generate - * another one. - * - * Note that for MSI this could cause a stray interrupt report - * if an interrupt landed in the time between writing IIR and - * the posting read. This should be rare enough to never - * trigger the 99% of 100,000 interrupts test for disabling - * stray interrupts. - */ - ret = IRQ_HANDLED; - iir = new_iir; - } while (iir); + i915_pipestat_irq_handler(dev_priv, iir, pipe_stats); + } while (0); enable_rpm_wakeref_asserts(dev_priv); return ret; } -static void i915_irq_uninstall(struct drm_device * dev) -{ - struct drm_i915_private *dev_priv = to_i915(dev); - int pipe; - - if (I915_HAS_HOTPLUG(dev_priv)) { - i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0); - I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); - } - - I915_WRITE16(HWSTAM, 0xffff); - for_each_pipe(dev_priv, pipe) { - /* Clear enable bits; then clear status bits */ - I915_WRITE(PIPESTAT(pipe), 0); - I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe))); - } - I915_WRITE(IMR, 0xffffffff); - I915_WRITE(IER, 0x0); - - I915_WRITE(IIR, I915_READ(IIR)); -} - -static void i965_irq_preinstall(struct drm_device * dev) +static void i965_irq_reset(struct drm_device *dev) { struct drm_i915_private *dev_priv = to_i915(dev); - int pipe; i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0); I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); - I915_WRITE(HWSTAM, 0xeffe); - for_each_pipe(dev_priv, pipe) - I915_WRITE(PIPESTAT(pipe), 0); - I915_WRITE(IMR, 0xffffffff); - I915_WRITE(IER, 0x0); - POSTING_READ(IER); + i9xx_pipestat_irq_reset(dev_priv); + + I915_WRITE(HWSTAM, 0xffffffff); + + GEN3_IRQ_RESET(); } static int i965_irq_postinstall(struct drm_device *dev) @@ -3911,31 +3864,6 @@ static int i965_irq_postinstall(struct drm_device *dev) u32 enable_mask; u32 error_mask; - /* Unmask the interrupts that we always want on. */ - dev_priv->irq_mask = ~(I915_ASLE_INTERRUPT | - I915_DISPLAY_PORT_INTERRUPT | - I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | - I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | - I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | - I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT | - I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT); - - enable_mask = ~dev_priv->irq_mask; - enable_mask &= ~(I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | - I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT); - enable_mask |= I915_USER_INTERRUPT; - - if (IS_G4X(dev_priv)) - enable_mask |= I915_BSD_USER_INTERRUPT; - - /* Interrupt setup is already guaranteed to be single-threaded, this is - * just to make the assert_spin_locked check happy. */ - spin_lock_irq(&dev_priv->irq_lock); - i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS); - i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS); - i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS); - spin_unlock_irq(&dev_priv->irq_lock); - /* * Enable some error detection, note the instruction error mask * bit is reserved, so we leave it masked. @@ -3951,12 +3879,34 @@ static int i965_irq_postinstall(struct drm_device *dev) } I915_WRITE(EMR, error_mask); - I915_WRITE(IMR, dev_priv->irq_mask); - I915_WRITE(IER, enable_mask); - POSTING_READ(IER); + /* Unmask the interrupts that we always want on. */ + dev_priv->irq_mask = + ~(I915_ASLE_INTERRUPT | + I915_DISPLAY_PORT_INTERRUPT | + I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | + I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | + I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT); - i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0); - POSTING_READ(PORT_HOTPLUG_EN); + enable_mask = + I915_ASLE_INTERRUPT | + I915_DISPLAY_PORT_INTERRUPT | + I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | + I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | + I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT | + I915_USER_INTERRUPT; + + if (IS_G4X(dev_priv)) + enable_mask |= I915_BSD_USER_INTERRUPT; + + GEN3_IRQ_INIT(, dev_priv->irq_mask, enable_mask); + + /* Interrupt setup is already guaranteed to be single-threaded, this is + * just to make the assert_spin_locked check happy. */ + spin_lock_irq(&dev_priv->irq_lock); + i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS); + i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS); + i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS); + spin_unlock_irq(&dev_priv->irq_lock); i915_enable_asle_pipestat(dev_priv); @@ -3992,9 +3942,7 @@ static irqreturn_t i965_irq_handler(int irq, void *arg) { struct drm_device *dev = arg; struct drm_i915_private *dev_priv = to_i915(dev); - u32 iir, new_iir; - u32 pipe_stats[I915_MAX_PIPES]; - int ret = IRQ_NONE, pipe; + irqreturn_t ret = IRQ_NONE; if (!intel_irqs_enabled(dev_priv)) return IRQ_NONE; @@ -4002,121 +3950,46 @@ static irqreturn_t i965_irq_handler(int irq, void *arg) /* IRQs are synced during runtime_suspend, we don't require a wakeref */ disable_rpm_wakeref_asserts(dev_priv); - iir = I915_READ(IIR); - - for (;;) { - bool irq_received = (iir) != 0; - bool blc_event = false; - - /* Can't rely on pipestat interrupt bit in iir as it might - * have been cleared after the pipestat interrupt was received. - * It doesn't set the bit in iir again, but it still produces - * interrupts (for non-MSI). - */ - spin_lock(&dev_priv->irq_lock); - if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT) - DRM_DEBUG("Command parser error, iir 0x%08x\n", iir); - - for_each_pipe(dev_priv, pipe) { - i915_reg_t reg = PIPESTAT(pipe); - pipe_stats[pipe] = I915_READ(reg); - - /* - * Clear the PIPE*STAT regs before the IIR - */ - if (pipe_stats[pipe] & 0x8000ffff) { - I915_WRITE(reg, pipe_stats[pipe]); - irq_received = true; - } - } - spin_unlock(&dev_priv->irq_lock); + do { + u32 pipe_stats[I915_MAX_PIPES] = {}; + u32 hotplug_status = 0; + u32 iir; - if (!irq_received) + iir = I915_READ(IIR); + if (iir == 0) break; ret = IRQ_HANDLED; - /* Consume port. Then clear IIR or we'll miss events */ - if (iir & I915_DISPLAY_PORT_INTERRUPT) { - u32 hotplug_status = i9xx_hpd_irq_ack(dev_priv); - if (hotplug_status) - i9xx_hpd_irq_handler(dev_priv, hotplug_status); - } + if (iir & I915_DISPLAY_PORT_INTERRUPT) + hotplug_status = i9xx_hpd_irq_ack(dev_priv); + + /* Call regardless, as some status bits might not be + * signalled in iir */ + i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats); I915_WRITE(IIR, iir); - new_iir = I915_READ(IIR); /* Flush posted writes */ if (iir & I915_USER_INTERRUPT) notify_ring(dev_priv->engine[RCS]); + if (iir & I915_BSD_USER_INTERRUPT) notify_ring(dev_priv->engine[VCS]); - for_each_pipe(dev_priv, pipe) { - if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS) - drm_handle_vblank(&dev_priv->drm, pipe); - - if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS) - blc_event = true; - - if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) - i9xx_pipe_crc_irq_handler(dev_priv, pipe); - - if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) - intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); - } - - if (blc_event || (iir & I915_ASLE_INTERRUPT)) - intel_opregion_asle_intr(dev_priv); + if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT) + DRM_DEBUG("Command parser error, iir 0x%08x\n", iir); - if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS) - gmbus_irq_handler(dev_priv); + if (hotplug_status) + i9xx_hpd_irq_handler(dev_priv, hotplug_status); - /* With MSI, interrupts are only generated when iir - * transitions from zero to nonzero. If another bit got - * set while we were handling the existing iir bits, then - * we would never get another interrupt. - * - * This is fine on non-MSI as well, as if we hit this path - * we avoid exiting the interrupt handler only to generate - * another one. - * - * Note that for MSI this could cause a stray interrupt report - * if an interrupt landed in the time between writing IIR and - * the posting read. This should be rare enough to never - * trigger the 99% of 100,000 interrupts test for disabling - * stray interrupts. - */ - iir = new_iir; - } + i965_pipestat_irq_handler(dev_priv, iir, pipe_stats); + } while (0); enable_rpm_wakeref_asserts(dev_priv); return ret; } -static void i965_irq_uninstall(struct drm_device * dev) -{ - struct drm_i915_private *dev_priv = to_i915(dev); - int pipe; - - if (!dev_priv) - return; - - i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0); - I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); - - I915_WRITE(HWSTAM, 0xffffffff); - for_each_pipe(dev_priv, pipe) - I915_WRITE(PIPESTAT(pipe), 0); - I915_WRITE(IMR, 0xffffffff); - I915_WRITE(IER, 0x0); - - for_each_pipe(dev_priv, pipe) - I915_WRITE(PIPESTAT(pipe), - I915_READ(PIPESTAT(pipe)) & 0x8000ffff); - I915_WRITE(IIR, I915_READ(IIR)); -} - /** * intel_irq_init - initializes irq support * @dev_priv: i915 device instance @@ -4197,17 +4070,17 @@ void intel_irq_init(struct drm_i915_private *dev_priv) if (IS_CHERRYVIEW(dev_priv)) { dev->driver->irq_handler = cherryview_irq_handler; - dev->driver->irq_preinstall = cherryview_irq_preinstall; + dev->driver->irq_preinstall = cherryview_irq_reset; dev->driver->irq_postinstall = cherryview_irq_postinstall; - dev->driver->irq_uninstall = cherryview_irq_uninstall; + dev->driver->irq_uninstall = cherryview_irq_reset; dev->driver->enable_vblank = i965_enable_vblank; dev->driver->disable_vblank = i965_disable_vblank; dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup; } else if (IS_VALLEYVIEW(dev_priv)) { dev->driver->irq_handler = valleyview_irq_handler; - dev->driver->irq_preinstall = valleyview_irq_preinstall; + dev->driver->irq_preinstall = valleyview_irq_reset; dev->driver->irq_postinstall = valleyview_irq_postinstall; - dev->driver->irq_uninstall = valleyview_irq_uninstall; + dev->driver->irq_uninstall = valleyview_irq_reset; dev->driver->enable_vblank = i965_enable_vblank; dev->driver->disable_vblank = i965_disable_vblank; dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup; @@ -4215,7 +4088,7 @@ void intel_irq_init(struct drm_i915_private *dev_priv) dev->driver->irq_handler = gen8_irq_handler; dev->driver->irq_preinstall = gen8_irq_reset; dev->driver->irq_postinstall = gen8_irq_postinstall; - dev->driver->irq_uninstall = gen8_irq_uninstall; + dev->driver->irq_uninstall = gen8_irq_reset; dev->driver->enable_vblank = gen8_enable_vblank; dev->driver->disable_vblank = gen8_disable_vblank; if (IS_GEN9_LP(dev_priv)) @@ -4229,29 +4102,29 @@ void intel_irq_init(struct drm_i915_private *dev_priv) dev->driver->irq_handler = ironlake_irq_handler; dev->driver->irq_preinstall = ironlake_irq_reset; dev->driver->irq_postinstall = ironlake_irq_postinstall; - dev->driver->irq_uninstall = ironlake_irq_uninstall; + dev->driver->irq_uninstall = ironlake_irq_reset; dev->driver->enable_vblank = ironlake_enable_vblank; dev->driver->disable_vblank = ironlake_disable_vblank; dev_priv->display.hpd_irq_setup = ilk_hpd_irq_setup; } else { if (IS_GEN2(dev_priv)) { - dev->driver->irq_preinstall = i8xx_irq_preinstall; + dev->driver->irq_preinstall = i8xx_irq_reset; dev->driver->irq_postinstall = i8xx_irq_postinstall; dev->driver->irq_handler = i8xx_irq_handler; - dev->driver->irq_uninstall = i8xx_irq_uninstall; + dev->driver->irq_uninstall = i8xx_irq_reset; dev->driver->enable_vblank = i8xx_enable_vblank; dev->driver->disable_vblank = i8xx_disable_vblank; } else if (IS_GEN3(dev_priv)) { - dev->driver->irq_preinstall = i915_irq_preinstall; + dev->driver->irq_preinstall = i915_irq_reset; dev->driver->irq_postinstall = i915_irq_postinstall; - dev->driver->irq_uninstall = i915_irq_uninstall; + dev->driver->irq_uninstall = i915_irq_reset; dev->driver->irq_handler = i915_irq_handler; dev->driver->enable_vblank = i8xx_enable_vblank; dev->driver->disable_vblank = i8xx_disable_vblank; } else { - dev->driver->irq_preinstall = i965_irq_preinstall; + dev->driver->irq_preinstall = i965_irq_reset; dev->driver->irq_postinstall = i965_irq_postinstall; - dev->driver->irq_uninstall = i965_irq_uninstall; + dev->driver->irq_uninstall = i965_irq_reset; dev->driver->irq_handler = i965_irq_handler; dev->driver->enable_vblank = i965_enable_vblank; dev->driver->disable_vblank = i965_disable_vblank; diff --git a/drivers/gpu/drm/i915/i915_oa_cflgt2.c b/drivers/gpu/drm/i915/i915_oa_cflgt2.c new file mode 100644 index 000000000000..368c87d7ee9a --- /dev/null +++ b/drivers/gpu/drm/i915/i915_oa_cflgt2.c @@ -0,0 +1,109 @@ +/* + * Autogenerated file by GPU Top : https://github.com/rib/gputop + * DO NOT EDIT manually! + * + * + * Copyright (c) 2015 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + */ + +#include <linux/sysfs.h> + +#include "i915_drv.h" +#include "i915_oa_cflgt2.h" + +static const struct i915_oa_reg b_counter_config_test_oa[] = { + { _MMIO(0x2740), 0x00000000 }, + { _MMIO(0x2744), 0x00800000 }, + { _MMIO(0x2714), 0xf0800000 }, + { _MMIO(0x2710), 0x00000000 }, + { _MMIO(0x2724), 0xf0800000 }, + { _MMIO(0x2720), 0x00000000 }, + { _MMIO(0x2770), 0x00000004 }, + { _MMIO(0x2774), 0x00000000 }, + { _MMIO(0x2778), 0x00000003 }, + { _MMIO(0x277c), 0x00000000 }, + { _MMIO(0x2780), 0x00000007 }, + { _MMIO(0x2784), 0x00000000 }, + { _MMIO(0x2788), 0x00100002 }, + { _MMIO(0x278c), 0x0000fff7 }, + { _MMIO(0x2790), 0x00100002 }, + { _MMIO(0x2794), 0x0000ffcf }, + { _MMIO(0x2798), 0x00100082 }, + { _MMIO(0x279c), 0x0000ffef }, + { _MMIO(0x27a0), 0x001000c2 }, + { _MMIO(0x27a4), 0x0000ffe7 }, + { _MMIO(0x27a8), 0x00100001 }, + { _MMIO(0x27ac), 0x0000ffe7 }, +}; + +static const struct i915_oa_reg flex_eu_config_test_oa[] = { +}; + +static const struct i915_oa_reg mux_config_test_oa[] = { + { _MMIO(0x9840), 0x00000080 }, + { _MMIO(0x9888), 0x11810000 }, + { _MMIO(0x9888), 0x07810013 }, + { _MMIO(0x9888), 0x1f810000 }, + { _MMIO(0x9888), 0x1d810000 }, + { _MMIO(0x9888), 0x1b930040 }, + { _MMIO(0x9888), 0x07e54000 }, + { _MMIO(0x9888), 0x1f908000 }, + { _MMIO(0x9888), 0x11900000 }, + { _MMIO(0x9888), 0x37900000 }, + { _MMIO(0x9888), 0x53900000 }, + { _MMIO(0x9888), 0x45900000 }, + { _MMIO(0x9888), 0x33900000 }, +}; + +static ssize_t +show_test_oa_id(struct device *kdev, struct device_attribute *attr, char *buf) +{ + return sprintf(buf, "1\n"); +} + +void +i915_perf_load_test_config_cflgt2(struct drm_i915_private *dev_priv) +{ + strncpy(dev_priv->perf.oa.test_config.uuid, + "74fb4902-d3d3-4237-9e90-cbdc68d0a446", + UUID_STRING_LEN); + dev_priv->perf.oa.test_config.id = 1; + + dev_priv->perf.oa.test_config.mux_regs = mux_config_test_oa; + dev_priv->perf.oa.test_config.mux_regs_len = ARRAY_SIZE(mux_config_test_oa); + + dev_priv->perf.oa.test_config.b_counter_regs = b_counter_config_test_oa; + dev_priv->perf.oa.test_config.b_counter_regs_len = ARRAY_SIZE(b_counter_config_test_oa); + + dev_priv->perf.oa.test_config.flex_regs = flex_eu_config_test_oa; + dev_priv->perf.oa.test_config.flex_regs_len = ARRAY_SIZE(flex_eu_config_test_oa); + + dev_priv->perf.oa.test_config.sysfs_metric.name = "74fb4902-d3d3-4237-9e90-cbdc68d0a446"; + dev_priv->perf.oa.test_config.sysfs_metric.attrs = dev_priv->perf.oa.test_config.attrs; + + dev_priv->perf.oa.test_config.attrs[0] = &dev_priv->perf.oa.test_config.sysfs_metric_id.attr; + + dev_priv->perf.oa.test_config.sysfs_metric_id.attr.name = "id"; + dev_priv->perf.oa.test_config.sysfs_metric_id.attr.mode = 0444; + dev_priv->perf.oa.test_config.sysfs_metric_id.show = show_test_oa_id; +} diff --git a/drivers/gpu/drm/i915/i915_oa_cflgt2.h b/drivers/gpu/drm/i915/i915_oa_cflgt2.h new file mode 100644 index 000000000000..1f3268ef2ea2 --- /dev/null +++ b/drivers/gpu/drm/i915/i915_oa_cflgt2.h @@ -0,0 +1,34 @@ +/* + * Autogenerated file by GPU Top : https://github.com/rib/gputop + * DO NOT EDIT manually! + * + * + * Copyright (c) 2015 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + */ + +#ifndef __I915_OA_CFLGT2_H__ +#define __I915_OA_CFLGT2_H__ + +extern void i915_perf_load_test_config_cflgt2(struct drm_i915_private *dev_priv); + +#endif diff --git a/drivers/gpu/drm/i915/i915_params.c b/drivers/gpu/drm/i915/i915_params.c index 8ab003dca113..9dff323a83d3 100644 --- a/drivers/gpu/drm/i915/i915_params.c +++ b/drivers/gpu/drm/i915/i915_params.c @@ -25,235 +25,171 @@ #include "i915_params.h" #include "i915_drv.h" -struct i915_params i915 __read_mostly = { - .modeset = -1, - .panel_ignore_lid = 1, - .semaphores = -1, - .lvds_channel_mode = 0, - .panel_use_ssc = -1, - .vbt_sdvo_panel_type = -1, - .enable_rc6 = -1, - .enable_dc = -1, - .enable_fbc = -1, - .enable_execlists = -1, - .enable_hangcheck = true, - .enable_ppgtt = -1, - .enable_psr = -1, - .alpha_support = IS_ENABLED(CONFIG_DRM_I915_ALPHA_SUPPORT), - .disable_power_well = -1, - .enable_ips = 1, - .fastboot = 0, - .prefault_disable = 0, - .load_detect_test = 0, - .force_reset_modeset_test = 0, - .reset = 2, - .error_capture = true, - .invert_brightness = 0, - .disable_display = 0, - .enable_cmd_parser = true, - .use_mmio_flip = 0, - .mmio_debug = 0, - .verbose_state_checks = 1, - .nuclear_pageflip = 0, - .edp_vswing = 0, - .enable_guc_loading = 0, - .enable_guc_submission = 0, - .guc_log_level = -1, - .guc_firmware_path = NULL, - .huc_firmware_path = NULL, - .enable_dp_mst = true, - .inject_load_failure = 0, - .enable_dpcd_backlight = false, - .enable_gvt = false, +#define i915_param_named(name, T, perm, desc) \ + module_param_named(name, i915_modparams.name, T, perm); \ + MODULE_PARM_DESC(name, desc) +#define i915_param_named_unsafe(name, T, perm, desc) \ + module_param_named_unsafe(name, i915_modparams.name, T, perm); \ + MODULE_PARM_DESC(name, desc) + +struct i915_params i915_modparams __read_mostly = { +#define MEMBER(T, member, value) .member = (value), + I915_PARAMS_FOR_EACH(MEMBER) +#undef MEMBER }; -module_param_named(modeset, i915.modeset, int, 0400); -MODULE_PARM_DESC(modeset, +i915_param_named(modeset, int, 0400, "Use kernel modesetting [KMS] (0=disable, " "1=on, -1=force vga console preference [default])"); -module_param_named_unsafe(panel_ignore_lid, i915.panel_ignore_lid, int, 0600); -MODULE_PARM_DESC(panel_ignore_lid, +i915_param_named_unsafe(panel_ignore_lid, int, 0600, "Override lid status (0=autodetect, 1=autodetect disabled [default], " "-1=force lid closed, -2=force lid open)"); -module_param_named_unsafe(semaphores, i915.semaphores, int, 0400); -MODULE_PARM_DESC(semaphores, +i915_param_named_unsafe(semaphores, int, 0400, "Use semaphores for inter-ring sync " "(default: -1 (use per-chip defaults))"); -module_param_named_unsafe(enable_rc6, i915.enable_rc6, int, 0400); -MODULE_PARM_DESC(enable_rc6, +i915_param_named_unsafe(enable_rc6, int, 0400, "Enable power-saving render C-state 6. " "Different stages can be selected via bitmask values " "(0 = disable; 1 = enable rc6; 2 = enable deep rc6; 4 = enable deepest rc6). " "For example, 3 would enable rc6 and deep rc6, and 7 would enable everything. " "default: -1 (use per-chip default)"); -module_param_named_unsafe(enable_dc, i915.enable_dc, int, 0400); -MODULE_PARM_DESC(enable_dc, +i915_param_named_unsafe(enable_dc, int, 0400, "Enable power-saving display C-states. " "(-1=auto [default]; 0=disable; 1=up to DC5; 2=up to DC6)"); -module_param_named_unsafe(enable_fbc, i915.enable_fbc, int, 0600); -MODULE_PARM_DESC(enable_fbc, +i915_param_named_unsafe(enable_fbc, int, 0600, "Enable frame buffer compression for power savings " "(default: -1 (use per-chip default))"); -module_param_named_unsafe(lvds_channel_mode, i915.lvds_channel_mode, int, 0400); -MODULE_PARM_DESC(lvds_channel_mode, +i915_param_named_unsafe(lvds_channel_mode, int, 0400, "Specify LVDS channel mode " "(0=probe BIOS [default], 1=single-channel, 2=dual-channel)"); -module_param_named_unsafe(lvds_use_ssc, i915.panel_use_ssc, int, 0600); -MODULE_PARM_DESC(lvds_use_ssc, +i915_param_named_unsafe(panel_use_ssc, int, 0600, "Use Spread Spectrum Clock with panels [LVDS/eDP] " "(default: auto from VBT)"); -module_param_named_unsafe(vbt_sdvo_panel_type, i915.vbt_sdvo_panel_type, int, 0400); -MODULE_PARM_DESC(vbt_sdvo_panel_type, +i915_param_named_unsafe(vbt_sdvo_panel_type, int, 0400, "Override/Ignore selection of SDVO panel mode in the VBT " "(-2=ignore, -1=auto [default], index in VBT BIOS table)"); -module_param_named_unsafe(reset, i915.reset, int, 0600); -MODULE_PARM_DESC(reset, "Attempt GPU resets (0=disabled, 1=full gpu reset, 2=engine reset [default])"); +i915_param_named_unsafe(reset, int, 0600, + "Attempt GPU resets (0=disabled, 1=full gpu reset, 2=engine reset [default])"); -module_param_named_unsafe(vbt_firmware, i915.vbt_firmware, charp, 0400); -MODULE_PARM_DESC(vbt_firmware, - "Load VBT from specified file under /lib/firmware"); +i915_param_named_unsafe(vbt_firmware, charp, 0400, + "Load VBT from specified file under /lib/firmware"); #if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR) -module_param_named(error_capture, i915.error_capture, bool, 0600); -MODULE_PARM_DESC(error_capture, +i915_param_named(error_capture, bool, 0600, "Record the GPU state following a hang. " "This information in /sys/class/drm/card<N>/error is vital for " "triaging and debugging hangs."); #endif -module_param_named_unsafe(enable_hangcheck, i915.enable_hangcheck, bool, 0644); -MODULE_PARM_DESC(enable_hangcheck, +i915_param_named_unsafe(enable_hangcheck, bool, 0644, "Periodically check GPU activity for detecting hangs. " "WARNING: Disabling this can cause system wide hangs. " "(default: true)"); -module_param_named_unsafe(enable_ppgtt, i915.enable_ppgtt, int, 0400); -MODULE_PARM_DESC(enable_ppgtt, +i915_param_named_unsafe(enable_ppgtt, int, 0400, "Override PPGTT usage. " "(-1=auto [default], 0=disabled, 1=aliasing, 2=full, 3=full with extended address space)"); -module_param_named_unsafe(enable_execlists, i915.enable_execlists, int, 0400); -MODULE_PARM_DESC(enable_execlists, +i915_param_named_unsafe(enable_execlists, int, 0400, "Override execlists usage. " "(-1=auto [default], 0=disabled, 1=enabled)"); -module_param_named_unsafe(enable_psr, i915.enable_psr, int, 0600); -MODULE_PARM_DESC(enable_psr, "Enable PSR " - "(0=disabled, 1=enabled - link mode chosen per-platform, 2=force link-standby mode, 3=force link-off mode) " - "Default: -1 (use per-chip default)"); +i915_param_named_unsafe(enable_psr, int, 0600, + "Enable PSR " + "(0=disabled, 1=enabled - link mode chosen per-platform, 2=force link-standby mode, 3=force link-off mode) " + "Default: -1 (use per-chip default)"); -module_param_named_unsafe(alpha_support, i915.alpha_support, bool, 0400); -MODULE_PARM_DESC(alpha_support, +i915_param_named_unsafe(alpha_support, bool, 0400, "Enable alpha quality driver support for latest hardware. " "See also CONFIG_DRM_I915_ALPHA_SUPPORT."); -module_param_named_unsafe(disable_power_well, i915.disable_power_well, int, 0400); -MODULE_PARM_DESC(disable_power_well, +i915_param_named_unsafe(disable_power_well, int, 0400, "Disable display power wells when possible " "(-1=auto [default], 0=power wells always on, 1=power wells disabled when possible)"); -module_param_named_unsafe(enable_ips, i915.enable_ips, int, 0600); -MODULE_PARM_DESC(enable_ips, "Enable IPS (default: true)"); +i915_param_named_unsafe(enable_ips, int, 0600, "Enable IPS (default: true)"); -module_param_named(fastboot, i915.fastboot, bool, 0600); -MODULE_PARM_DESC(fastboot, +i915_param_named(fastboot, bool, 0600, "Try to skip unnecessary mode sets at boot time (default: false)"); -module_param_named_unsafe(prefault_disable, i915.prefault_disable, bool, 0600); -MODULE_PARM_DESC(prefault_disable, +i915_param_named_unsafe(prefault_disable, bool, 0600, "Disable page prefaulting for pread/pwrite/reloc (default:false). " "For developers only."); -module_param_named_unsafe(load_detect_test, i915.load_detect_test, bool, 0600); -MODULE_PARM_DESC(load_detect_test, +i915_param_named_unsafe(load_detect_test, bool, 0600, "Force-enable the VGA load detect code for testing (default:false). " "For developers only."); -module_param_named_unsafe(force_reset_modeset_test, i915.force_reset_modeset_test, bool, 0600); -MODULE_PARM_DESC(force_reset_modeset_test, +i915_param_named_unsafe(force_reset_modeset_test, bool, 0600, "Force a modeset during gpu reset for testing (default:false). " "For developers only."); -module_param_named_unsafe(invert_brightness, i915.invert_brightness, int, 0600); -MODULE_PARM_DESC(invert_brightness, +i915_param_named_unsafe(invert_brightness, int, 0600, "Invert backlight brightness " "(-1 force normal, 0 machine defaults, 1 force inversion), please " "report PCI device ID, subsystem vendor and subsystem device ID " "to dri-devel@lists.freedesktop.org, if your machine needs it. " "It will then be included in an upcoming module version."); -module_param_named(disable_display, i915.disable_display, bool, 0400); -MODULE_PARM_DESC(disable_display, "Disable display (default: false)"); +i915_param_named(disable_display, bool, 0400, + "Disable display (default: false)"); -module_param_named_unsafe(enable_cmd_parser, i915.enable_cmd_parser, bool, 0400); -MODULE_PARM_DESC(enable_cmd_parser, - "Enable command parsing (true=enabled [default], false=disabled)"); +i915_param_named_unsafe(enable_cmd_parser, bool, 0400, + "Enable command parsing (true=enabled [default], false=disabled)"); -module_param_named_unsafe(use_mmio_flip, i915.use_mmio_flip, int, 0600); -MODULE_PARM_DESC(use_mmio_flip, - "use MMIO flips (-1=never, 0=driver discretion [default], 1=always)"); +i915_param_named_unsafe(use_mmio_flip, int, 0600, + "use MMIO flips (-1=never, 0=driver discretion [default], 1=always)"); -module_param_named(mmio_debug, i915.mmio_debug, int, 0600); -MODULE_PARM_DESC(mmio_debug, +i915_param_named(mmio_debug, int, 0600, "Enable the MMIO debug code for the first N failures (default: off). " "This may negatively affect performance."); -module_param_named(verbose_state_checks, i915.verbose_state_checks, bool, 0600); -MODULE_PARM_DESC(verbose_state_checks, +i915_param_named(verbose_state_checks, bool, 0600, "Enable verbose logs (ie. WARN_ON()) in case of unexpected hw state conditions."); -module_param_named_unsafe(nuclear_pageflip, i915.nuclear_pageflip, bool, 0400); -MODULE_PARM_DESC(nuclear_pageflip, - "Force enable atomic functionality on platforms that don't have full support yet."); +i915_param_named_unsafe(nuclear_pageflip, bool, 0400, + "Force enable atomic functionality on platforms that don't have full support yet."); /* WA to get away with the default setting in VBT for early platforms.Will be removed */ -module_param_named_unsafe(edp_vswing, i915.edp_vswing, int, 0400); -MODULE_PARM_DESC(edp_vswing, - "Ignore/Override vswing pre-emph table selection from VBT " - "(0=use value from vbt [default], 1=low power swing(200mV)," - "2=default swing(400mV))"); - -module_param_named_unsafe(enable_guc_loading, i915.enable_guc_loading, int, 0400); -MODULE_PARM_DESC(enable_guc_loading, - "Enable GuC firmware loading " - "(-1=auto, 0=never [default], 1=if available, 2=required)"); - -module_param_named_unsafe(enable_guc_submission, i915.enable_guc_submission, int, 0400); -MODULE_PARM_DESC(enable_guc_submission, - "Enable GuC submission " - "(-1=auto, 0=never [default], 1=if available, 2=required)"); - -module_param_named(guc_log_level, i915.guc_log_level, int, 0400); -MODULE_PARM_DESC(guc_log_level, +i915_param_named_unsafe(edp_vswing, int, 0400, + "Ignore/Override vswing pre-emph table selection from VBT " + "(0=use value from vbt [default], 1=low power swing(200mV)," + "2=default swing(400mV))"); + +i915_param_named_unsafe(enable_guc_loading, int, 0400, + "Enable GuC firmware loading " + "(-1=auto, 0=never [default], 1=if available, 2=required)"); + +i915_param_named_unsafe(enable_guc_submission, int, 0400, + "Enable GuC submission " + "(-1=auto, 0=never [default], 1=if available, 2=required)"); + +i915_param_named(guc_log_level, int, 0400, "GuC firmware logging level (-1:disabled (default), 0-3:enabled)"); -module_param_named_unsafe(guc_firmware_path, i915.guc_firmware_path, charp, 0400); -MODULE_PARM_DESC(guc_firmware_path, +i915_param_named_unsafe(guc_firmware_path, charp, 0400, "GuC firmware path to use instead of the default one"); -module_param_named_unsafe(huc_firmware_path, i915.huc_firmware_path, charp, 0400); -MODULE_PARM_DESC(huc_firmware_path, +i915_param_named_unsafe(huc_firmware_path, charp, 0400, "HuC firmware path to use instead of the default one"); -module_param_named_unsafe(enable_dp_mst, i915.enable_dp_mst, bool, 0600); -MODULE_PARM_DESC(enable_dp_mst, +i915_param_named_unsafe(enable_dp_mst, bool, 0600, "Enable multi-stream transport (MST) for new DisplayPort sinks. (default: true)"); -module_param_named_unsafe(inject_load_failure, i915.inject_load_failure, uint, 0400); -MODULE_PARM_DESC(inject_load_failure, + +i915_param_named_unsafe(inject_load_failure, uint, 0400, "Force an error after a number of failure check points (0:disabled (default), N:force failure at the Nth failure check point)"); -module_param_named(enable_dpcd_backlight, i915.enable_dpcd_backlight, bool, 0600); -MODULE_PARM_DESC(enable_dpcd_backlight, + +i915_param_named(enable_dpcd_backlight, bool, 0600, "Enable support for DPCD backlight control (default:false)"); -module_param_named(enable_gvt, i915.enable_gvt, bool, 0400); -MODULE_PARM_DESC(enable_gvt, +i915_param_named(enable_gvt, bool, 0400, "Enable support for Intel GVT-g graphics virtualization host support(default:false)"); diff --git a/drivers/gpu/drm/i915/i915_params.h b/drivers/gpu/drm/i915/i915_params.h index ac844709c97e..4f3f8d650194 100644 --- a/drivers/gpu/drm/i915/i915_params.h +++ b/drivers/gpu/drm/i915/i915_params.h @@ -27,56 +27,56 @@ #include <linux/cache.h> /* for __read_mostly */ -#define I915_PARAMS_FOR_EACH(func) \ - func(char *, vbt_firmware); \ - func(int, modeset); \ - func(int, panel_ignore_lid); \ - func(int, semaphores); \ - func(int, lvds_channel_mode); \ - func(int, panel_use_ssc); \ - func(int, vbt_sdvo_panel_type); \ - func(int, enable_rc6); \ - func(int, enable_dc); \ - func(int, enable_fbc); \ - func(int, enable_ppgtt); \ - func(int, enable_execlists); \ - func(int, enable_psr); \ - func(int, disable_power_well); \ - func(int, enable_ips); \ - func(int, invert_brightness); \ - func(int, enable_guc_loading); \ - func(int, enable_guc_submission); \ - func(int, guc_log_level); \ - func(char *, guc_firmware_path); \ - func(char *, huc_firmware_path); \ - func(int, use_mmio_flip); \ - func(int, mmio_debug); \ - func(int, edp_vswing); \ - func(int, reset); \ - func(unsigned int, inject_load_failure); \ +#define I915_PARAMS_FOR_EACH(param) \ + param(char *, vbt_firmware, NULL) \ + param(int, modeset, -1) \ + param(int, panel_ignore_lid, 1) \ + param(int, semaphores, -1) \ + param(int, lvds_channel_mode, 0) \ + param(int, panel_use_ssc, -1) \ + param(int, vbt_sdvo_panel_type, -1) \ + param(int, enable_rc6, -1) \ + param(int, enable_dc, -1) \ + param(int, enable_fbc, -1) \ + param(int, enable_ppgtt, -1) \ + param(int, enable_execlists, -1) \ + param(int, enable_psr, -1) \ + param(int, disable_power_well, -1) \ + param(int, enable_ips, 1) \ + param(int, invert_brightness, 0) \ + param(int, enable_guc_loading, 0) \ + param(int, enable_guc_submission, 0) \ + param(int, guc_log_level, -1) \ + param(char *, guc_firmware_path, NULL) \ + param(char *, huc_firmware_path, NULL) \ + param(int, use_mmio_flip, 0) \ + param(int, mmio_debug, 0) \ + param(int, edp_vswing, 0) \ + param(int, reset, 2) \ + param(unsigned int, inject_load_failure, 0) \ /* leave bools at the end to not create holes */ \ - func(bool, alpha_support); \ - func(bool, enable_cmd_parser); \ - func(bool, enable_hangcheck); \ - func(bool, fastboot); \ - func(bool, prefault_disable); \ - func(bool, load_detect_test); \ - func(bool, force_reset_modeset_test); \ - func(bool, error_capture); \ - func(bool, disable_display); \ - func(bool, verbose_state_checks); \ - func(bool, nuclear_pageflip); \ - func(bool, enable_dp_mst); \ - func(bool, enable_dpcd_backlight); \ - func(bool, enable_gvt) + param(bool, alpha_support, IS_ENABLED(CONFIG_DRM_I915_ALPHA_SUPPORT)) \ + param(bool, enable_cmd_parser, true) \ + param(bool, enable_hangcheck, true) \ + param(bool, fastboot, false) \ + param(bool, prefault_disable, false) \ + param(bool, load_detect_test, false) \ + param(bool, force_reset_modeset_test, false) \ + param(bool, error_capture, true) \ + param(bool, disable_display, false) \ + param(bool, verbose_state_checks, true) \ + param(bool, nuclear_pageflip, false) \ + param(bool, enable_dp_mst, true) \ + param(bool, enable_dpcd_backlight, false) \ + param(bool, enable_gvt, false) -#define MEMBER(T, member) T member +#define MEMBER(T, member, ...) T member; struct i915_params { I915_PARAMS_FOR_EACH(MEMBER); }; #undef MEMBER -extern struct i915_params i915 __read_mostly; +extern struct i915_params i915_modparams __read_mostly; #endif diff --git a/drivers/gpu/drm/i915/i915_pci.c b/drivers/gpu/drm/i915/i915_pci.c index 09d97e0990b7..da60866b6628 100644 --- a/drivers/gpu/drm/i915/i915_pci.c +++ b/drivers/gpu/drm/i915/i915_pci.c @@ -63,22 +63,23 @@ .hws_needs_physical = 1, \ .unfenced_needs_alignment = 1, \ .ring_mask = RENDER_RING, \ + .has_snoop = true, \ GEN_DEFAULT_PIPEOFFSETS, \ CURSOR_OFFSETS -static const struct intel_device_info intel_i830_info = { +static const struct intel_device_info intel_i830_info __initconst = { GEN2_FEATURES, .platform = INTEL_I830, .is_mobile = 1, .cursor_needs_physical = 1, .num_pipes = 2, /* legal, last one wins */ }; -static const struct intel_device_info intel_i845g_info = { +static const struct intel_device_info intel_i845g_info __initconst = { GEN2_FEATURES, .platform = INTEL_I845G, }; -static const struct intel_device_info intel_i85x_info = { +static const struct intel_device_info intel_i85x_info __initconst = { GEN2_FEATURES, .platform = INTEL_I85X, .is_mobile = 1, .num_pipes = 2, /* legal, last one wins */ @@ -86,7 +87,7 @@ static const struct intel_device_info intel_i85x_info = { .has_fbc = 1, }; -static const struct intel_device_info intel_i865g_info = { +static const struct intel_device_info intel_i865g_info __initconst = { GEN2_FEATURES, .platform = INTEL_I865G, }; @@ -95,10 +96,11 @@ static const struct intel_device_info intel_i865g_info = { .gen = 3, .num_pipes = 2, \ .has_gmch_display = 1, \ .ring_mask = RENDER_RING, \ + .has_snoop = true, \ GEN_DEFAULT_PIPEOFFSETS, \ CURSOR_OFFSETS -static const struct intel_device_info intel_i915g_info = { +static const struct intel_device_info intel_i915g_info __initconst = { GEN3_FEATURES, .platform = INTEL_I915G, .cursor_needs_physical = 1, .has_overlay = 1, .overlay_needs_physical = 1, @@ -106,7 +108,7 @@ static const struct intel_device_info intel_i915g_info = { .unfenced_needs_alignment = 1, }; -static const struct intel_device_info intel_i915gm_info = { +static const struct intel_device_info intel_i915gm_info __initconst = { GEN3_FEATURES, .platform = INTEL_I915GM, .is_mobile = 1, @@ -118,7 +120,7 @@ static const struct intel_device_info intel_i915gm_info = { .unfenced_needs_alignment = 1, }; -static const struct intel_device_info intel_i945g_info = { +static const struct intel_device_info intel_i945g_info __initconst = { GEN3_FEATURES, .platform = INTEL_I945G, .has_hotplug = 1, .cursor_needs_physical = 1, @@ -127,7 +129,7 @@ static const struct intel_device_info intel_i945g_info = { .unfenced_needs_alignment = 1, }; -static const struct intel_device_info intel_i945gm_info = { +static const struct intel_device_info intel_i945gm_info __initconst = { GEN3_FEATURES, .platform = INTEL_I945GM, .is_mobile = 1, .has_hotplug = 1, .cursor_needs_physical = 1, @@ -138,14 +140,14 @@ static const struct intel_device_info intel_i945gm_info = { .unfenced_needs_alignment = 1, }; -static const struct intel_device_info intel_g33_info = { +static const struct intel_device_info intel_g33_info __initconst = { GEN3_FEATURES, .platform = INTEL_G33, .has_hotplug = 1, .has_overlay = 1, }; -static const struct intel_device_info intel_pineview_info = { +static const struct intel_device_info intel_pineview_info __initconst = { GEN3_FEATURES, .platform = INTEL_PINEVIEW, .is_mobile = 1, .has_hotplug = 1, @@ -157,33 +159,36 @@ static const struct intel_device_info intel_pineview_info = { .has_hotplug = 1, \ .has_gmch_display = 1, \ .ring_mask = RENDER_RING, \ + .has_snoop = true, \ GEN_DEFAULT_PIPEOFFSETS, \ CURSOR_OFFSETS -static const struct intel_device_info intel_i965g_info = { +static const struct intel_device_info intel_i965g_info __initconst = { GEN4_FEATURES, .platform = INTEL_I965G, .has_overlay = 1, .hws_needs_physical = 1, + .has_snoop = false, }; -static const struct intel_device_info intel_i965gm_info = { +static const struct intel_device_info intel_i965gm_info __initconst = { GEN4_FEATURES, .platform = INTEL_I965GM, .is_mobile = 1, .has_fbc = 1, .has_overlay = 1, .supports_tv = 1, .hws_needs_physical = 1, + .has_snoop = false, }; -static const struct intel_device_info intel_g45_info = { +static const struct intel_device_info intel_g45_info __initconst = { GEN4_FEATURES, .platform = INTEL_G45, .has_pipe_cxsr = 1, .ring_mask = RENDER_RING | BSD_RING, }; -static const struct intel_device_info intel_gm45_info = { +static const struct intel_device_info intel_gm45_info __initconst = { GEN4_FEATURES, .platform = INTEL_GM45, .is_mobile = 1, .has_fbc = 1, @@ -195,17 +200,17 @@ static const struct intel_device_info intel_gm45_info = { #define GEN5_FEATURES \ .gen = 5, .num_pipes = 2, \ .has_hotplug = 1, \ - .has_gmbus_irq = 1, \ .ring_mask = RENDER_RING | BSD_RING, \ + .has_snoop = true, \ GEN_DEFAULT_PIPEOFFSETS, \ CURSOR_OFFSETS -static const struct intel_device_info intel_ironlake_d_info = { +static const struct intel_device_info intel_ironlake_d_info __initconst = { GEN5_FEATURES, .platform = INTEL_IRONLAKE, }; -static const struct intel_device_info intel_ironlake_m_info = { +static const struct intel_device_info intel_ironlake_m_info __initconst = { GEN5_FEATURES, .platform = INTEL_IRONLAKE, .is_mobile = 1, .has_fbc = 1, @@ -219,20 +224,38 @@ static const struct intel_device_info intel_ironlake_m_info = { .has_llc = 1, \ .has_rc6 = 1, \ .has_rc6p = 1, \ - .has_gmbus_irq = 1, \ .has_aliasing_ppgtt = 1, \ GEN_DEFAULT_PIPEOFFSETS, \ CURSOR_OFFSETS -static const struct intel_device_info intel_sandybridge_d_info = { - GEN6_FEATURES, - .platform = INTEL_SANDYBRIDGE, +#define SNB_D_PLATFORM \ + GEN6_FEATURES, \ + .platform = INTEL_SANDYBRIDGE + +static const struct intel_device_info intel_sandybridge_d_gt1_info __initconst = { + SNB_D_PLATFORM, + .gt = 1, }; -static const struct intel_device_info intel_sandybridge_m_info = { - GEN6_FEATURES, - .platform = INTEL_SANDYBRIDGE, - .is_mobile = 1, +static const struct intel_device_info intel_sandybridge_d_gt2_info __initconst = { + SNB_D_PLATFORM, + .gt = 2, +}; + +#define SNB_M_PLATFORM \ + GEN6_FEATURES, \ + .platform = INTEL_SANDYBRIDGE, \ + .is_mobile = 1 + + +static const struct intel_device_info intel_sandybridge_m_gt1_info __initconst = { + SNB_M_PLATFORM, + .gt = 1, +}; + +static const struct intel_device_info intel_sandybridge_m_gt2_info __initconst = { + SNB_M_PLATFORM, + .gt = 2, }; #define GEN7_FEATURES \ @@ -243,33 +266,51 @@ static const struct intel_device_info intel_sandybridge_m_info = { .has_llc = 1, \ .has_rc6 = 1, \ .has_rc6p = 1, \ - .has_gmbus_irq = 1, \ .has_aliasing_ppgtt = 1, \ .has_full_ppgtt = 1, \ GEN_DEFAULT_PIPEOFFSETS, \ IVB_CURSOR_OFFSETS -static const struct intel_device_info intel_ivybridge_d_info = { - GEN7_FEATURES, - .platform = INTEL_IVYBRIDGE, - .has_l3_dpf = 1, +#define IVB_D_PLATFORM \ + GEN7_FEATURES, \ + .platform = INTEL_IVYBRIDGE, \ + .has_l3_dpf = 1 + +static const struct intel_device_info intel_ivybridge_d_gt1_info __initconst = { + IVB_D_PLATFORM, + .gt = 1, }; -static const struct intel_device_info intel_ivybridge_m_info = { - GEN7_FEATURES, - .platform = INTEL_IVYBRIDGE, - .is_mobile = 1, - .has_l3_dpf = 1, +static const struct intel_device_info intel_ivybridge_d_gt2_info __initconst = { + IVB_D_PLATFORM, + .gt = 2, +}; + +#define IVB_M_PLATFORM \ + GEN7_FEATURES, \ + .platform = INTEL_IVYBRIDGE, \ + .is_mobile = 1, \ + .has_l3_dpf = 1 + +static const struct intel_device_info intel_ivybridge_m_gt1_info __initconst = { + IVB_M_PLATFORM, + .gt = 1, +}; + +static const struct intel_device_info intel_ivybridge_m_gt2_info __initconst = { + IVB_M_PLATFORM, + .gt = 2, }; -static const struct intel_device_info intel_ivybridge_q_info = { +static const struct intel_device_info intel_ivybridge_q_info __initconst = { GEN7_FEATURES, .platform = INTEL_IVYBRIDGE, + .gt = 2, .num_pipes = 0, /* legal, last one wins */ .has_l3_dpf = 1, }; -static const struct intel_device_info intel_valleyview_info = { +static const struct intel_device_info intel_valleyview_info __initconst = { .platform = INTEL_VALLEYVIEW, .gen = 7, .is_lp = 1, @@ -277,11 +318,11 @@ static const struct intel_device_info intel_valleyview_info = { .has_psr = 1, .has_runtime_pm = 1, .has_rc6 = 1, - .has_gmbus_irq = 1, .has_gmch_display = 1, .has_hotplug = 1, .has_aliasing_ppgtt = 1, .has_full_ppgtt = 1, + .has_snoop = true, .ring_mask = RENDER_RING | BSD_RING | BLT_RING, .display_mmio_offset = VLV_DISPLAY_BASE, GEN_DEFAULT_PIPEOFFSETS, @@ -299,10 +340,24 @@ static const struct intel_device_info intel_valleyview_info = { .has_rc6p = 0 /* RC6p removed-by HSW */, \ .has_runtime_pm = 1 -static const struct intel_device_info intel_haswell_info = { - HSW_FEATURES, - .platform = INTEL_HASWELL, - .has_l3_dpf = 1, +#define HSW_PLATFORM \ + HSW_FEATURES, \ + .platform = INTEL_HASWELL, \ + .has_l3_dpf = 1 + +static const struct intel_device_info intel_haswell_gt1_info __initconst = { + HSW_PLATFORM, + .gt = 1, +}; + +static const struct intel_device_info intel_haswell_gt2_info __initconst = { + HSW_PLATFORM, + .gt = 2, +}; + +static const struct intel_device_info intel_haswell_gt3_info __initconst = { + HSW_PLATFORM, + .gt = 3, }; #define BDW_FEATURES \ @@ -318,16 +373,31 @@ static const struct intel_device_info intel_haswell_info = { .gen = 8, \ .platform = INTEL_BROADWELL -static const struct intel_device_info intel_broadwell_info = { +static const struct intel_device_info intel_broadwell_gt1_info __initconst = { BDW_PLATFORM, + .gt = 1, }; -static const struct intel_device_info intel_broadwell_gt3_info = { +static const struct intel_device_info intel_broadwell_gt2_info __initconst = { BDW_PLATFORM, + .gt = 2, +}; + +static const struct intel_device_info intel_broadwell_rsvd_info __initconst = { + BDW_PLATFORM, + .gt = 3, + /* According to the device ID those devices are GT3, they were + * previously treated as not GT3, keep it like that. + */ +}; + +static const struct intel_device_info intel_broadwell_gt3_info __initconst = { + BDW_PLATFORM, + .gt = 3, .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING, }; -static const struct intel_device_info intel_cherryview_info = { +static const struct intel_device_info intel_cherryview_info __initconst = { .gen = 8, .num_pipes = 3, .has_hotplug = 1, .is_lp = 1, @@ -338,12 +408,12 @@ static const struct intel_device_info intel_cherryview_info = { .has_runtime_pm = 1, .has_resource_streamer = 1, .has_rc6 = 1, - .has_gmbus_irq = 1, .has_logical_ring_contexts = 1, .has_gmch_display = 1, .has_aliasing_ppgtt = 1, .has_full_ppgtt = 1, .has_reset_engine = 1, + .has_snoop = true, .display_mmio_offset = VLV_DISPLAY_BASE, GEN_CHV_PIPEOFFSETS, CURSOR_OFFSETS, @@ -358,13 +428,29 @@ static const struct intel_device_info intel_cherryview_info = { .has_guc = 1, \ .ddb_size = 896 -static const struct intel_device_info intel_skylake_info = { +static const struct intel_device_info intel_skylake_gt1_info __initconst = { SKL_PLATFORM, + .gt = 1, }; -static const struct intel_device_info intel_skylake_gt3_info = { +static const struct intel_device_info intel_skylake_gt2_info __initconst = { SKL_PLATFORM, - .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING, + .gt = 2, +}; + +#define SKL_GT3_PLUS_PLATFORM \ + SKL_PLATFORM, \ + .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING + + +static const struct intel_device_info intel_skylake_gt3_info __initconst = { + SKL_GT3_PLUS_PLATFORM, + .gt = 3, +}; + +static const struct intel_device_info intel_skylake_gt4_info __initconst = { + SKL_GT3_PLUS_PLATFORM, + .gt = 4, }; #define GEN9_LP_FEATURES \ @@ -383,25 +469,25 @@ static const struct intel_device_info intel_skylake_gt3_info = { .has_resource_streamer = 1, \ .has_rc6 = 1, \ .has_dp_mst = 1, \ - .has_gmbus_irq = 1, \ .has_logical_ring_contexts = 1, \ .has_guc = 1, \ .has_aliasing_ppgtt = 1, \ .has_full_ppgtt = 1, \ .has_full_48bit_ppgtt = 1, \ .has_reset_engine = 1, \ + .has_snoop = true, \ + .has_ipc = 1, \ GEN_DEFAULT_PIPEOFFSETS, \ IVB_CURSOR_OFFSETS, \ BDW_COLORS -static const struct intel_device_info intel_broxton_info = { +static const struct intel_device_info intel_broxton_info __initconst = { GEN9_LP_FEATURES, .platform = INTEL_BROXTON, .ddb_size = 512, - .has_reset_engine = false, }; -static const struct intel_device_info intel_geminilake_info = { +static const struct intel_device_info intel_geminilake_info __initconst = { GEN9_LP_FEATURES, .platform = INTEL_GEMINILAKE, .ddb_size = 1024, @@ -414,42 +500,59 @@ static const struct intel_device_info intel_geminilake_info = { .platform = INTEL_KABYLAKE, \ .has_csr = 1, \ .has_guc = 1, \ + .has_ipc = 1, \ .ddb_size = 896 -static const struct intel_device_info intel_kabylake_info = { +static const struct intel_device_info intel_kabylake_gt1_info __initconst = { KBL_PLATFORM, + .gt = 1, }; -static const struct intel_device_info intel_kabylake_gt3_info = { +static const struct intel_device_info intel_kabylake_gt2_info __initconst = { KBL_PLATFORM, + .gt = 2, +}; + +static const struct intel_device_info intel_kabylake_gt3_info __initconst = { + KBL_PLATFORM, + .gt = 3, .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING, }; #define CFL_PLATFORM \ - .is_alpha_support = 1, \ BDW_FEATURES, \ .gen = 9, \ .platform = INTEL_COFFEELAKE, \ .has_csr = 1, \ .has_guc = 1, \ + .has_ipc = 1, \ .ddb_size = 896 -static const struct intel_device_info intel_coffeelake_info = { +static const struct intel_device_info intel_coffeelake_gt1_info __initconst = { + CFL_PLATFORM, + .gt = 1, +}; + +static const struct intel_device_info intel_coffeelake_gt2_info __initconst = { CFL_PLATFORM, + .gt = 2, }; -static const struct intel_device_info intel_coffeelake_gt3_info = { +static const struct intel_device_info intel_coffeelake_gt3_info __initconst = { CFL_PLATFORM, + .gt = 3, .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING, }; -static const struct intel_device_info intel_cannonlake_info = { +static const struct intel_device_info intel_cannonlake_gt2_info __initconst = { BDW_FEATURES, .is_alpha_support = 1, .platform = INTEL_CANNONLAKE, .gen = 10, + .gt = 2, .ddb_size = 1024, .has_csr = 1, + .has_ipc = 1, .color = { .degamma_lut_size = 0, .gamma_lut_size = 1024 } }; @@ -476,31 +579,40 @@ static const struct pci_device_id pciidlist[] = { INTEL_PINEVIEW_IDS(&intel_pineview_info), INTEL_IRONLAKE_D_IDS(&intel_ironlake_d_info), INTEL_IRONLAKE_M_IDS(&intel_ironlake_m_info), - INTEL_SNB_D_IDS(&intel_sandybridge_d_info), - INTEL_SNB_M_IDS(&intel_sandybridge_m_info), + INTEL_SNB_D_GT1_IDS(&intel_sandybridge_d_gt1_info), + INTEL_SNB_D_GT2_IDS(&intel_sandybridge_d_gt2_info), + INTEL_SNB_M_GT1_IDS(&intel_sandybridge_m_gt1_info), + INTEL_SNB_M_GT2_IDS(&intel_sandybridge_m_gt2_info), INTEL_IVB_Q_IDS(&intel_ivybridge_q_info), /* must be first IVB */ - INTEL_IVB_M_IDS(&intel_ivybridge_m_info), - INTEL_IVB_D_IDS(&intel_ivybridge_d_info), - INTEL_HSW_IDS(&intel_haswell_info), + INTEL_IVB_M_GT1_IDS(&intel_ivybridge_m_gt1_info), + INTEL_IVB_M_GT2_IDS(&intel_ivybridge_m_gt2_info), + INTEL_IVB_D_GT1_IDS(&intel_ivybridge_d_gt1_info), + INTEL_IVB_D_GT2_IDS(&intel_ivybridge_d_gt2_info), + INTEL_HSW_GT1_IDS(&intel_haswell_gt1_info), + INTEL_HSW_GT2_IDS(&intel_haswell_gt2_info), + INTEL_HSW_GT3_IDS(&intel_haswell_gt3_info), INTEL_VLV_IDS(&intel_valleyview_info), - INTEL_BDW_GT12_IDS(&intel_broadwell_info), + INTEL_BDW_GT1_IDS(&intel_broadwell_gt1_info), + INTEL_BDW_GT2_IDS(&intel_broadwell_gt2_info), INTEL_BDW_GT3_IDS(&intel_broadwell_gt3_info), - INTEL_BDW_RSVD_IDS(&intel_broadwell_info), + INTEL_BDW_RSVD_IDS(&intel_broadwell_rsvd_info), INTEL_CHV_IDS(&intel_cherryview_info), - INTEL_SKL_GT1_IDS(&intel_skylake_info), - INTEL_SKL_GT2_IDS(&intel_skylake_info), + INTEL_SKL_GT1_IDS(&intel_skylake_gt1_info), + INTEL_SKL_GT2_IDS(&intel_skylake_gt2_info), INTEL_SKL_GT3_IDS(&intel_skylake_gt3_info), - INTEL_SKL_GT4_IDS(&intel_skylake_gt3_info), + INTEL_SKL_GT4_IDS(&intel_skylake_gt4_info), INTEL_BXT_IDS(&intel_broxton_info), INTEL_GLK_IDS(&intel_geminilake_info), - INTEL_KBL_GT1_IDS(&intel_kabylake_info), - INTEL_KBL_GT2_IDS(&intel_kabylake_info), + INTEL_KBL_GT1_IDS(&intel_kabylake_gt1_info), + INTEL_KBL_GT2_IDS(&intel_kabylake_gt2_info), INTEL_KBL_GT3_IDS(&intel_kabylake_gt3_info), INTEL_KBL_GT4_IDS(&intel_kabylake_gt3_info), - INTEL_CFL_S_IDS(&intel_coffeelake_info), - INTEL_CFL_H_IDS(&intel_coffeelake_info), - INTEL_CFL_U_IDS(&intel_coffeelake_gt3_info), - INTEL_CNL_IDS(&intel_cannonlake_info), + INTEL_CFL_S_GT1_IDS(&intel_coffeelake_gt1_info), + INTEL_CFL_S_GT2_IDS(&intel_coffeelake_gt2_info), + INTEL_CFL_H_GT2_IDS(&intel_coffeelake_gt2_info), + INTEL_CFL_U_GT3_IDS(&intel_coffeelake_gt3_info), + INTEL_CNL_U_GT2_IDS(&intel_cannonlake_gt2_info), + INTEL_CNL_Y_GT2_IDS(&intel_cannonlake_gt2_info), {0, 0, 0} }; MODULE_DEVICE_TABLE(pci, pciidlist); @@ -519,7 +631,7 @@ static int i915_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) (struct intel_device_info *) ent->driver_data; int err; - if (IS_ALPHA_SUPPORT(intel_info) && !i915.alpha_support) { + if (IS_ALPHA_SUPPORT(intel_info) && !i915_modparams.alpha_support) { DRM_INFO("The driver support for your hardware in this kernel version is alpha quality\n" "See CONFIG_DRM_I915_ALPHA_SUPPORT or i915.alpha_support module parameter\n" "to enable support in this kernel version, or check for kernel updates.\n"); @@ -577,10 +689,10 @@ static int __init i915_init(void) * vga_text_mode_force boot option. */ - if (i915.modeset == 0) + if (i915_modparams.modeset == 0) use_kms = false; - if (vgacon_text_force() && i915.modeset == -1) + if (vgacon_text_force() && i915_modparams.modeset == -1) use_kms = false; if (!use_kms) { diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c index 94185d610673..1383a2995a69 100644 --- a/drivers/gpu/drm/i915/i915_perf.c +++ b/drivers/gpu/drm/i915/i915_perf.c @@ -206,6 +206,7 @@ #include "i915_oa_kblgt2.h" #include "i915_oa_kblgt3.h" #include "i915_oa_glk.h" +#include "i915_oa_cflgt2.h" /* HW requires this to be a power of two, between 128k and 16M, though driver * is currently generally designed assuming the largest 16M size is used such @@ -1213,7 +1214,7 @@ static int oa_get_render_ctx_id(struct i915_perf_stream *stream) { struct drm_i915_private *dev_priv = stream->dev_priv; - if (i915.enable_execlists) + if (i915_modparams.enable_execlists) dev_priv->perf.oa.specific_ctx_id = stream->ctx->hw_id; else { struct intel_engine_cs *engine = dev_priv->engine[RCS]; @@ -1259,7 +1260,7 @@ static void oa_put_render_ctx_id(struct i915_perf_stream *stream) { struct drm_i915_private *dev_priv = stream->dev_priv; - if (i915.enable_execlists) { + if (i915_modparams.enable_execlists) { dev_priv->perf.oa.specific_ctx_id = INVALID_CTX_ID; } else { struct intel_engine_cs *engine = dev_priv->engine[RCS]; @@ -1850,8 +1851,7 @@ static int gen8_enable_metric_set(struct drm_i915_private *dev_priv, * be read back from automatically triggered reports, as part of the * RPT_ID field. */ - if (IS_SKYLAKE(dev_priv) || IS_BROXTON(dev_priv) || - IS_KABYLAKE(dev_priv) || IS_GEMINILAKE(dev_priv)) { + if (IS_GEN9(dev_priv)) { I915_WRITE(GEN8_OA_DEBUG, _MASKED_BIT_ENABLE(GEN9_OA_DEBUG_DISABLE_CLK_RATIO_REPORTS | GEN9_OA_DEBUG_INCLUDE_CLK_RATIO)); @@ -2927,6 +2927,9 @@ void i915_perf_register(struct drm_i915_private *dev_priv) i915_perf_load_test_config_kblgt3(dev_priv); } else if (IS_GEMINILAKE(dev_priv)) { i915_perf_load_test_config_glk(dev_priv); + } else if (IS_COFFEELAKE(dev_priv)) { + if (IS_CFL_GT2(dev_priv)) + i915_perf_load_test_config_cflgt2(dev_priv); } if (dev_priv->perf.oa.test_config.id == 0) @@ -3405,7 +3408,7 @@ void i915_perf_init(struct drm_i915_private *dev_priv) dev_priv->perf.oa.timestamp_frequency = 12500000; dev_priv->perf.oa.oa_formats = hsw_oa_formats; - } else if (i915.enable_execlists) { + } else if (i915_modparams.enable_execlists) { /* Note: that although we could theoretically also support the * legacy ringbuffer mode on BDW (and earlier iterations of * this driver, before upstreaming did this) it didn't seem @@ -3453,6 +3456,7 @@ void i915_perf_init(struct drm_i915_private *dev_priv) break; case INTEL_SKYLAKE: case INTEL_KABYLAKE: + case INTEL_COFFEELAKE: dev_priv->perf.oa.timestamp_frequency = 12000000; break; default: diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index ed7cd9ee2c2a..ee0d4f14ac98 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -2336,7 +2336,7 @@ enum i915_power_well_id { #define DONE_REG _MMIO(0x40b0) #define GEN8_PRIVATE_PAT_LO _MMIO(0x40e0) #define GEN8_PRIVATE_PAT_HI _MMIO(0x40e0 + 4) -#define GEN10_PAT_INDEX(index) _MMIO(0x40e0 + index*4) +#define GEN10_PAT_INDEX(index) _MMIO(0x40e0 + (index)*4) #define BSD_HWS_PGA_GEN7 _MMIO(0x04180) #define BLT_HWS_PGA_GEN7 _MMIO(0x04280) #define VEBOX_HWS_PGA_GEN7 _MMIO(0x04380) @@ -2373,6 +2373,7 @@ enum i915_power_well_id { #define GAMT_CHKN_BIT_REG _MMIO(0x4ab8) #define GAMT_CHKN_DISABLE_DYNAMIC_CREDIT_SHARING (1<<28) +#define GAMT_CHKN_DISABLE_I2M_CYCLE_ON_WR_PORT (1<<24) #if 0 #define PRB0_TAIL _MMIO(0x2030) @@ -2491,6 +2492,7 @@ enum i915_power_well_id { # define _3D_CHICKEN2_WM_READ_PIPELINED (1 << 14) #define _3D_CHICKEN3 _MMIO(0x2090) #define _3D_CHICKEN_SF_DISABLE_OBJEND_CULL (1 << 10) +#define _3D_CHICKEN3_AA_LINE_QUALITY_FIX_ENABLE (1 << 5) #define _3D_CHICKEN3_SF_DISABLE_FASTCLIP_CULL (1 << 5) #define _3D_CHICKEN_SDE_LIMIT_FIFO_POLY_DEPTH(x) ((x)<<1) /* gen8+ */ #define _3D_CHICKEN3_SF_DISABLE_PIPELINED_ATTR_FETCH (1 << 1) /* gen6 */ @@ -2728,6 +2730,11 @@ enum i915_power_well_id { #define GEN9_F2_SS_DIS_SHIFT 20 #define GEN9_F2_SS_DIS_MASK (0xf << GEN9_F2_SS_DIS_SHIFT) +#define GEN10_F2_S_ENA_SHIFT 22 +#define GEN10_F2_S_ENA_MASK (0x3f << GEN10_F2_S_ENA_SHIFT) +#define GEN10_F2_SS_DIS_SHIFT 18 +#define GEN10_F2_SS_DIS_MASK (0xf << GEN10_F2_SS_DIS_SHIFT) + #define GEN8_EU_DISABLE0 _MMIO(0x9134) #define GEN8_EU_DIS0_S0_MASK 0xffffff #define GEN8_EU_DIS0_S1_SHIFT 24 @@ -2743,6 +2750,9 @@ enum i915_power_well_id { #define GEN9_EU_DISABLE(slice) _MMIO(0x9134 + (slice)*0x4) +#define GEN10_EU_DISABLE3 _MMIO(0x9140) +#define GEN10_EU_DIS_SS_MASK 0xff + #define GEN6_BSD_SLEEP_PSMI_CONTROL _MMIO(0x12050) #define GEN6_BSD_SLEEP_MSG_DISABLE (1 << 0) #define GEN6_BSD_SLEEP_FLUSH_DISABLE (1 << 2) @@ -2938,6 +2948,9 @@ enum i915_power_well_id { #define ILK_DPFC_CHICKEN _MMIO(0x43224) #define ILK_DPFC_DISABLE_DUMMY0 (1<<8) #define ILK_DPFC_NUKE_ON_ANY_MODIFICATION (1<<23) +#define GLK_SKIP_SEG_EN (1<<12) +#define GLK_SKIP_SEG_COUNT_MASK (3<<10) +#define GLK_SKIP_SEG_COUNT(x) ((x)<<10) #define ILK_FBC_RT_BASE _MMIO(0x2128) #define ILK_FBC_RT_VALID (1<<0) #define SNB_FBC_FRONT_BUFFER (1<<1) @@ -3807,6 +3820,12 @@ enum { #define PWM1_GATING_DIS (1 << 13) /* + * GEN10 clock gating regs + */ +#define SLICE_UNIT_LEVEL_CLKGATE _MMIO(0x94d4) +#define SARBUNIT_CLKGATE_DIS (1 << 5) + +/* * Display engine regs */ @@ -4036,7 +4055,7 @@ enum { #define EDP_PSR2_FRAME_BEFORE_SU_SHIFT 4 #define EDP_PSR2_FRAME_BEFORE_SU_MASK (0xf<<4) #define EDP_PSR2_IDLE_MASK 0xf -#define EDP_FRAMES_BEFORE_SU_ENTRY (1<<4) +#define EDP_PSR2_FRAME_BEFORE_SU(a) ((a)<<4) #define EDP_PSR2_STATUS_CTL _MMIO(0x6f940) #define EDP_PSR2_STATUS_STATE_MASK (0xf<<28) @@ -6902,7 +6921,7 @@ enum { # define CHICKEN3_DGMG_DONE_FIX_DISABLE (1 << 2) #define CHICKEN_PAR1_1 _MMIO(0x42080) -#define SKL_RC_HASH_OUTSIDE (1 << 15) +#define SKL_DE_COMPRESSED_HASH_MODE (1 << 15) #define DPA_MASK_VBLANK_SRD (1 << 15) #define FORCE_ARB_IDLE_PLANES (1 << 14) #define SKL_EDP_PSR_FIX_RDWRAP (1 << 3) @@ -6916,6 +6935,10 @@ enum { #define GLK_CL1_PWR_DOWN (1 << 11) #define GLK_CL0_PWR_DOWN (1 << 10) +#define CHICKEN_MISC_4 _MMIO(0x4208c) +#define FBC_STRIDE_OVERRIDE (1 << 13) +#define FBC_STRIDE_MASK 0x1FFF + #define _CHICKEN_PIPESL_1_A 0x420b0 #define _CHICKEN_PIPESL_1_B 0x420b4 #define HSW_FBCQ_DIS (1 << 22) @@ -6934,6 +6957,7 @@ enum { #define DISP_FBC_WM_DIS (1<<15) #define DISP_ARB_CTL2 _MMIO(0x45004) #define DISP_DATA_PARTITION_5_6 (1<<6) +#define DISP_IPC_ENABLE (1<<3) #define DBUF_CTL _MMIO(0x45008) #define DBUF_POWER_REQUEST (1<<31) #define DBUF_POWER_STATE (1<<30) @@ -6975,6 +6999,7 @@ enum { # define GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC ((1<<10) | (1<<26)) # define GEN9_RHWO_OPTIMIZATION_DISABLE (1<<14) #define COMMON_SLICE_CHICKEN2 _MMIO(0x7014) +# define GEN9_PBE_COMPRESSED_HASH_SELECTION (1<<13) # define GEN9_DISABLE_GATHER_AT_SET_SHADER_COMMON_SLICE (1<<12) # define GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION (1<<8) # define GEN8_CSC2_SBE_VUE_CACHE_CONSERVATIVE (1<<0) @@ -7017,6 +7042,7 @@ enum { /* GEN8 chicken */ #define HDC_CHICKEN0 _MMIO(0x7300) +#define CNL_HDC_CHICKEN0 _MMIO(0xE5F0) #define HDC_FORCE_CSR_NON_COHERENT_OVR_DISABLE (1<<15) #define HDC_FENCE_DEST_SLM_DISABLE (1<<14) #define HDC_DONOT_FETCH_MEM_WHEN_MASKED (1<<11) @@ -7453,6 +7479,8 @@ enum { #define FDI_PHASE_SYNC_OVR(pipe) (1<<(FDIA_PHASE_SYNC_SHIFT_OVR - ((pipe) * 2))) #define FDI_PHASE_SYNC_EN(pipe) (1<<(FDIA_PHASE_SYNC_SHIFT_EN - ((pipe) * 2))) #define FDI_BC_BIFURCATION_SELECT (1 << 12) +#define CHASSIS_CLK_REQ_DURATION_MASK (0xf << 8) +#define CHASSIS_CLK_REQ_DURATION(x) ((x) << 8) #define SPT_PWM_GRANULARITY (1<<0) #define SOUTH_CHICKEN2 _MMIO(0xc2004) #define FDI_MPHY_IOSFSB_RESET_STATUS (1<<13) @@ -7470,6 +7498,7 @@ enum { #define PCH_DPLUNIT_CLOCK_GATE_DISABLE (1<<30) #define PCH_DPLSUNIT_CLOCK_GATE_DISABLE (1<<29) #define PCH_CPUNIT_CLOCK_GATE_DISABLE (1<<14) +#define CNP_PWM_CGE_GATING_DISABLE (1<<13) #define PCH_LP_PARTITION_LEVEL_DISABLE (1<<12) /* CPU: FDI_TX */ @@ -7936,8 +7965,8 @@ enum { #define GEN7_PCODE_TIMEOUT 0x2 #define GEN7_PCODE_ILLEGAL_DATA 0x3 #define GEN7_PCODE_MIN_FREQ_TABLE_GT_RATIO_OUT_OF_RANGE 0x10 -#define GEN6_PCODE_WRITE_RC6VIDS 0x4 -#define GEN6_PCODE_READ_RC6VIDS 0x5 +#define GEN6_PCODE_WRITE_RC6VIDS 0x4 +#define GEN6_PCODE_READ_RC6VIDS 0x5 #define GEN6_ENCODE_RC6_VID(mv) (((mv) - 245) / 5) #define GEN6_DECODE_RC6_VID(vids) (((vids) * 5) + 245) #define BDW_PCODE_DISPLAY_FREQ_CHANGE_REQ 0x18 @@ -7956,7 +7985,9 @@ enum { #define GEN6_PCODE_WRITE_D_COMP 0x11 #define HSW_PCODE_DE_WRITE_FREQ_REQ 0x17 #define DISPLAY_IPS_CONTROL 0x19 -#define HSW_PCODE_DYNAMIC_DUTY_CYCLE_CONTROL 0x1A + /* See also IPS_CTL */ +#define IPS_PCODE_CONTROL (1 << 30) +#define HSW_PCODE_DYNAMIC_DUTY_CYCLE_CONTROL 0x1A #define GEN9_PCODE_SAGV_CONTROL 0x21 #define GEN9_SAGV_DISABLE 0x0 #define GEN9_SAGV_IS_DISABLED 0x1 @@ -8044,10 +8075,12 @@ enum { #define FLOW_CONTROL_ENABLE (1<<15) #define PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE (1<<8) #define STALL_DOP_GATING_DISABLE (1<<5) +#define THROTTLE_12_5 (7<<2) #define GEN7_ROW_CHICKEN2 _MMIO(0xe4f4) #define GEN7_ROW_CHICKEN2_GT2 _MMIO(0xf4f4) #define DOP_CLOCK_GATING_DISABLE (1<<0) +#define PUSH_CONSTANT_DEREF_DISABLE (1<<8) #define HSW_ROW_CHICKEN3 _MMIO(0xe49c) #define HSW_ROW_CHICKEN3_L3_GLOBAL_ATOMICS_DISABLE (1 << 6) @@ -8059,9 +8092,11 @@ enum { #define HSW_SAMPLE_C_PERFORMANCE (1<<9) #define GEN8_CENTROID_PIXEL_OPT_DIS (1<<8) #define GEN9_DISABLE_OCL_OOB_SUPPRESS_LOGIC (1<<5) +#define CNL_FAST_ANISO_L1_BANKING_FIX (1<<4) #define GEN8_SAMPLER_POWER_BYPASS_DIS (1<<1) #define GEN9_HALF_SLICE_CHICKEN7 _MMIO(0xe194) +#define GEN9_SAMPLER_HASH_COMPRESSED_READ_ADDR (1<<8) #define GEN9_ENABLE_YV12_BUGFIX (1<<4) #define GEN9_ENABLE_GPGPU_PREEMPTION (1<<2) @@ -8574,7 +8609,7 @@ enum skl_power_gate { #define DPLL_CFGCR0_LINK_RATE_3240 (6 << 25) #define DPLL_CFGCR0_LINK_RATE_4050 (7 << 25) #define DPLL_CFGCR0_DCO_FRACTION_MASK (0x7fff << 10) -#define DPLL_CFGCR0_DCO_FRAC_SHIFT (10) +#define DPLL_CFGCR0_DCO_FRACTION_SHIFT (10) #define DPLL_CFGCR0_DCO_FRACTION(x) ((x) << 10) #define DPLL_CFGCR0_DCO_INTEGER_MASK (0x3ff) #define CNL_DPLL_CFGCR0(pll) _MMIO_PLL(pll, _CNL_DPLL0_CFGCR0, _CNL_DPLL1_CFGCR0) @@ -8781,6 +8816,15 @@ enum skl_power_gate { #define MIPIO_TXESC_CLK_DIV2 _MMIO(0x160008) #define GLK_TX_ESC_CLK_DIV2_MASK 0x3FF +/* Gen4+ Timestamp and Pipe Frame time stamp registers */ +#define GEN4_TIMESTAMP _MMIO(0x2358) +#define ILK_TIMESTAMP_HI _MMIO(0x70070) +#define IVB_TIMESTAMP_CTR _MMIO(0x44070) + +#define _PIPE_FRMTMSTMP_A 0x70048 +#define PIPE_FRMTMSTMP(pipe) \ + _MMIO_PIPE2(pipe, _PIPE_FRMTMSTMP_A) + /* BXT MIPI clock controls */ #define BXT_MAX_VAR_OUTPUT_KHZ 39500 @@ -9362,4 +9406,8 @@ enum skl_power_gate { #define GEN9_L3_LRA_1_GPGPU_DEFAULT_VALUE_SKL 0x67F1427F /* " " */ #define GEN9_L3_LRA_1_GPGPU_DEFAULT_VALUE_BXT 0x5FF101FF /* " " */ +#define MMCD_MISC_CTRL _MMIO(0x4ddc) /* skl+ */ +#define MMCD_PCLA (1 << 31) +#define MMCD_HOTSPOT_EN (1 << 27) + #endif /* _I915_REG_H_ */ diff --git a/drivers/gpu/drm/i915/i915_sw_fence.c b/drivers/gpu/drm/i915/i915_sw_fence.c index f29540f922af..808ea4d5b962 100644 --- a/drivers/gpu/drm/i915/i915_sw_fence.c +++ b/drivers/gpu/drm/i915/i915_sw_fence.c @@ -9,6 +9,7 @@ #include <linux/slab.h> #include <linux/dma-fence.h> +#include <linux/irq_work.h> #include <linux/reservation.h> #include "i915_sw_fence.h" @@ -356,31 +357,44 @@ struct i915_sw_dma_fence_cb { struct i915_sw_fence *fence; struct dma_fence *dma; struct timer_list timer; + struct irq_work work; }; static void timer_i915_sw_fence_wake(unsigned long data) { struct i915_sw_dma_fence_cb *cb = (struct i915_sw_dma_fence_cb *)data; + struct i915_sw_fence *fence; + + fence = xchg(&cb->fence, NULL); + if (!fence) + return; pr_warn("asynchronous wait on fence %s:%s:%x timed out\n", cb->dma->ops->get_driver_name(cb->dma), cb->dma->ops->get_timeline_name(cb->dma), cb->dma->seqno); - dma_fence_put(cb->dma); - cb->dma = NULL; - i915_sw_fence_complete(cb->fence); - cb->timer.function = NULL; + i915_sw_fence_complete(fence); } static void dma_i915_sw_fence_wake(struct dma_fence *dma, struct dma_fence_cb *data) { struct i915_sw_dma_fence_cb *cb = container_of(data, typeof(*cb), base); + struct i915_sw_fence *fence; + + fence = xchg(&cb->fence, NULL); + if (fence) + i915_sw_fence_complete(fence); + + irq_work_queue(&cb->work); +} + +static void irq_i915_sw_fence_work(struct irq_work *wrk) +{ + struct i915_sw_dma_fence_cb *cb = container_of(wrk, typeof(*cb), work); del_timer_sync(&cb->timer); - if (cb->timer.function) - i915_sw_fence_complete(cb->fence); dma_fence_put(cb->dma); kfree(cb); @@ -414,6 +428,7 @@ int i915_sw_fence_await_dma_fence(struct i915_sw_fence *fence, __setup_timer(&cb->timer, timer_i915_sw_fence_wake, (unsigned long)cb, TIMER_IRQSAFE); + init_irq_work(&cb->work, irq_i915_sw_fence_work); if (timeout) { cb->dma = dma_fence_get(dma); mod_timer(&cb->timer, round_jiffies_up(jiffies + timeout)); diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h index 6fd5c57e21f6..92f4c5bb7aa7 100644 --- a/drivers/gpu/drm/i915/i915_trace.h +++ b/drivers/gpu/drm/i915/i915_trace.h @@ -1031,5 +1031,5 @@ TRACE_EVENT(switch_mm, /* This part must be outside protection */ #undef TRACE_INCLUDE_PATH -#define TRACE_INCLUDE_PATH . +#define TRACE_INCLUDE_PATH ../../drivers/gpu/drm/i915 #include <trace/define_trace.h> diff --git a/drivers/gpu/drm/i915/intel_atomic_plane.c b/drivers/gpu/drm/i915/intel_atomic_plane.c index ee76fab7bb6f..8e6dc159f64d 100644 --- a/drivers/gpu/drm/i915/intel_atomic_plane.c +++ b/drivers/gpu/drm/i915/intel_atomic_plane.c @@ -107,7 +107,9 @@ intel_plane_destroy_state(struct drm_plane *plane, drm_atomic_helper_plane_destroy_state(plane, state); } -int intel_plane_atomic_check_with_state(struct intel_crtc_state *crtc_state, +int intel_plane_atomic_check_with_state(const struct intel_crtc_state *old_crtc_state, + struct intel_crtc_state *crtc_state, + const struct intel_plane_state *old_plane_state, struct intel_plane_state *intel_state) { struct drm_plane *plane = intel_state->base.plane; @@ -124,7 +126,7 @@ int intel_plane_atomic_check_with_state(struct intel_crtc_state *crtc_state, * anything driver-specific we need to test in that case, so * just return success. */ - if (!intel_state->base.crtc && !plane->state->crtc) + if (!intel_state->base.crtc && !old_plane_state->base.crtc) return 0; /* Clip all planes to CRTC size, or 0x0 if CRTC is disabled */ @@ -194,16 +196,21 @@ int intel_plane_atomic_check_with_state(struct intel_crtc_state *crtc_state, else crtc_state->active_planes &= ~BIT(intel_plane->id); - return intel_plane_atomic_calc_changes(&crtc_state->base, state); + return intel_plane_atomic_calc_changes(old_crtc_state, + &crtc_state->base, + old_plane_state, + state); } static int intel_plane_atomic_check(struct drm_plane *plane, - struct drm_plane_state *state) + struct drm_plane_state *new_plane_state) { - struct drm_crtc *crtc = state->crtc; - struct drm_crtc_state *drm_crtc_state; - - crtc = crtc ? crtc : plane->state->crtc; + struct drm_atomic_state *state = new_plane_state->state; + const struct drm_plane_state *old_plane_state = + drm_atomic_get_old_plane_state(state, plane); + struct drm_crtc *crtc = new_plane_state->crtc ?: old_plane_state->crtc; + const struct drm_crtc_state *old_crtc_state; + struct drm_crtc_state *new_crtc_state; /* * Both crtc and plane->crtc could be NULL if we're updating a @@ -214,29 +221,33 @@ static int intel_plane_atomic_check(struct drm_plane *plane, if (!crtc) return 0; - drm_crtc_state = drm_atomic_get_existing_crtc_state(state->state, crtc); - if (WARN_ON(!drm_crtc_state)) - return -EINVAL; + old_crtc_state = drm_atomic_get_old_crtc_state(state, crtc); + new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc); - return intel_plane_atomic_check_with_state(to_intel_crtc_state(drm_crtc_state), - to_intel_plane_state(state)); + return intel_plane_atomic_check_with_state(to_intel_crtc_state(old_crtc_state), + to_intel_crtc_state(new_crtc_state), + to_intel_plane_state(old_plane_state), + to_intel_plane_state(new_plane_state)); } static void intel_plane_atomic_update(struct drm_plane *plane, struct drm_plane_state *old_state) { + struct intel_atomic_state *state = to_intel_atomic_state(old_state->state); struct intel_plane *intel_plane = to_intel_plane(plane); - struct intel_plane_state *intel_state = - to_intel_plane_state(plane->state); - struct drm_crtc *crtc = plane->state->crtc ?: old_state->crtc; + const struct intel_plane_state *new_plane_state = + intel_atomic_get_new_plane_state(state, intel_plane); + struct drm_crtc *crtc = new_plane_state->base.crtc ?: old_state->crtc; + + if (new_plane_state->base.visible) { + const struct intel_crtc_state *new_crtc_state = + intel_atomic_get_new_crtc_state(state, to_intel_crtc(crtc)); - if (intel_state->base.visible) { trace_intel_update_plane(plane, to_intel_crtc(crtc)); intel_plane->update_plane(intel_plane, - to_intel_crtc_state(crtc->state), - intel_state); + new_crtc_state, new_plane_state); } else { trace_intel_disable_plane(plane, to_intel_crtc(crtc)); diff --git a/drivers/gpu/drm/i915/intel_audio.c b/drivers/gpu/drm/i915/intel_audio.c index d805b6e6fe71..27743be5b768 100644 --- a/drivers/gpu/drm/i915/intel_audio.c +++ b/drivers/gpu/drm/i915/intel_audio.c @@ -606,11 +606,6 @@ void intel_audio_codec_enable(struct intel_encoder *intel_encoder, connector->encoder->base.id, connector->encoder->name); - /* ELD Conn_Type */ - connector->eld[5] &= ~(3 << 2); - if (intel_crtc_has_dp_encoder(crtc_state)) - connector->eld[5] |= (1 << 2); - connector->eld[6] = drm_av_sync_delay(connector, adjusted_mode) / 2; if (dev_priv->display.audio_codec_enable) diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c index 183e87e8ea31..b881ce8596ee 100644 --- a/drivers/gpu/drm/i915/intel_bios.c +++ b/drivers/gpu/drm/i915/intel_bios.c @@ -356,7 +356,7 @@ parse_sdvo_panel_data(struct drm_i915_private *dev_priv, struct drm_display_mode *panel_fixed_mode; int index; - index = i915.vbt_sdvo_panel_type; + index = i915_modparams.vbt_sdvo_panel_type; if (index == -2) { DRM_DEBUG_KMS("Ignore SDVO panel mode from BIOS VBT tables.\n"); return; @@ -452,24 +452,24 @@ parse_general_definitions(struct drm_i915_private *dev_priv, } } -static const union child_device_config * -child_device_ptr(const struct bdb_general_definitions *p_defs, int i) +static const struct child_device_config * +child_device_ptr(const struct bdb_general_definitions *defs, int i) { - return (const void *) &p_defs->devices[i * p_defs->child_dev_size]; + return (const void *) &defs->devices[i * defs->child_dev_size]; } static void parse_sdvo_device_mapping(struct drm_i915_private *dev_priv, const struct bdb_header *bdb) { - struct sdvo_device_mapping *p_mapping; - const struct bdb_general_definitions *p_defs; - const struct old_child_dev_config *child; /* legacy */ + struct sdvo_device_mapping *mapping; + const struct bdb_general_definitions *defs; + const struct child_device_config *child; int i, child_device_num, count; u16 block_size; - p_defs = find_section(bdb, BDB_GENERAL_DEFINITIONS); - if (!p_defs) { + defs = find_section(bdb, BDB_GENERAL_DEFINITIONS); + if (!defs) { DRM_DEBUG_KMS("No general definition block is found, unable to construct sdvo mapping.\n"); return; } @@ -479,18 +479,17 @@ parse_sdvo_device_mapping(struct drm_i915_private *dev_priv, * device size matches that of the *legacy* child device config * struct. Thus, SDVO mapping will be skipped for newer VBT. */ - if (p_defs->child_dev_size != sizeof(*child)) { + if (defs->child_dev_size != LEGACY_CHILD_DEVICE_CONFIG_SIZE) { DRM_DEBUG_KMS("Unsupported child device size for SDVO mapping.\n"); return; } /* get the block size of general definitions */ - block_size = get_blocksize(p_defs); + block_size = get_blocksize(defs); /* get the number of child device */ - child_device_num = (block_size - sizeof(*p_defs)) / - p_defs->child_dev_size; + child_device_num = (block_size - sizeof(*defs)) / defs->child_dev_size; count = 0; for (i = 0; i < child_device_num; i++) { - child = &child_device_ptr(p_defs, i)->old; + child = child_device_ptr(defs, i); if (!child->device_type) { /* skip the device block if device type is invalid */ continue; @@ -514,20 +513,20 @@ parse_sdvo_device_mapping(struct drm_i915_private *dev_priv, child->slave_addr, (child->dvo_port == DEVICE_PORT_DVOB) ? "SDVOB" : "SDVOC"); - p_mapping = &dev_priv->vbt.sdvo_mappings[child->dvo_port - 1]; - if (!p_mapping->initialized) { - p_mapping->dvo_port = child->dvo_port; - p_mapping->slave_addr = child->slave_addr; - p_mapping->dvo_wiring = child->dvo_wiring; - p_mapping->ddc_pin = child->ddc_pin; - p_mapping->i2c_pin = child->i2c_pin; - p_mapping->initialized = 1; + mapping = &dev_priv->vbt.sdvo_mappings[child->dvo_port - 1]; + if (!mapping->initialized) { + mapping->dvo_port = child->dvo_port; + mapping->slave_addr = child->slave_addr; + mapping->dvo_wiring = child->dvo_wiring; + mapping->ddc_pin = child->ddc_pin; + mapping->i2c_pin = child->i2c_pin; + mapping->initialized = 1; DRM_DEBUG_KMS("SDVO device: dvo=%x, addr=%x, wiring=%d, ddc_pin=%d, i2c_pin=%d\n", - p_mapping->dvo_port, - p_mapping->slave_addr, - p_mapping->dvo_wiring, - p_mapping->ddc_pin, - p_mapping->i2c_pin); + mapping->dvo_port, + mapping->slave_addr, + mapping->dvo_wiring, + mapping->ddc_pin, + mapping->i2c_pin); } else { DRM_DEBUG_KMS("Maybe one SDVO port is shared by " "two SDVO device.\n"); @@ -577,7 +576,7 @@ parse_edp(struct drm_i915_private *dev_priv, const struct bdb_header *bdb) { const struct bdb_edp *edp; const struct edp_power_seq *edp_pps; - const struct edp_link_params *edp_link_params; + const struct edp_fast_link_params *edp_link_params; int panel_type = dev_priv->vbt.panel_type; edp = find_section(bdb, BDB_EDP); @@ -601,7 +600,7 @@ parse_edp(struct drm_i915_private *dev_priv, const struct bdb_header *bdb) /* Get the eDP sequencing and link info */ edp_pps = &edp->power_seqs[panel_type]; - edp_link_params = &edp->link_params[panel_type]; + edp_link_params = &edp->fast_link_params[panel_type]; dev_priv->vbt.edp.pps = *edp_pps; @@ -676,8 +675,9 @@ parse_edp(struct drm_i915_private *dev_priv, const struct bdb_header *bdb) uint8_t vswing; /* Don't read from VBT if module parameter has valid value*/ - if (i915.edp_vswing) { - dev_priv->vbt.edp.low_vswing = i915.edp_vswing == 1; + if (i915_modparams.edp_vswing) { + dev_priv->vbt.edp.low_vswing = + i915_modparams.edp_vswing == 1; } else { vswing = (edp->edp_vswing_preemph >> (panel_type * 4)) & 0xF; dev_priv->vbt.edp.low_vswing = vswing == 0; @@ -1113,7 +1113,7 @@ static void sanitize_aux_ch(struct drm_i915_private *dev_priv, static void parse_ddi_port(struct drm_i915_private *dev_priv, enum port port, const struct bdb_header *bdb) { - union child_device_config *it, *child = NULL; + struct child_device_config *it, *child = NULL; struct ddi_vbt_port_info *info = &dev_priv->vbt.ddi_port_info[port]; uint8_t hdmi_level_shift; int i, j; @@ -1141,7 +1141,7 @@ static void parse_ddi_port(struct drm_i915_private *dev_priv, enum port port, if (dvo_ports[port][j] == -1) break; - if (it->common.dvo_port == dvo_ports[port][j]) { + if (it->dvo_port == dvo_ports[port][j]) { if (child) { DRM_DEBUG_KMS("More than one child device for port %c in VBT, using the first.\n", port_name(port)); @@ -1154,14 +1154,21 @@ static void parse_ddi_port(struct drm_i915_private *dev_priv, enum port port, if (!child) return; - aux_channel = child->common.aux_channel; - ddc_pin = child->common.ddc_pin; + aux_channel = child->aux_channel; + ddc_pin = child->ddc_pin; - is_dvi = child->common.device_type & DEVICE_TYPE_TMDS_DVI_SIGNALING; - is_dp = child->common.device_type & DEVICE_TYPE_DISPLAYPORT_OUTPUT; - is_crt = child->common.device_type & DEVICE_TYPE_ANALOG_OUTPUT; - is_hdmi = is_dvi && (child->common.device_type & DEVICE_TYPE_NOT_HDMI_OUTPUT) == 0; - is_edp = is_dp && (child->common.device_type & DEVICE_TYPE_INTERNAL_CONNECTOR); + is_dvi = child->device_type & DEVICE_TYPE_TMDS_DVI_SIGNALING; + is_dp = child->device_type & DEVICE_TYPE_DISPLAYPORT_OUTPUT; + is_crt = child->device_type & DEVICE_TYPE_ANALOG_OUTPUT; + is_hdmi = is_dvi && (child->device_type & DEVICE_TYPE_NOT_HDMI_OUTPUT) == 0; + is_edp = is_dp && (child->device_type & DEVICE_TYPE_INTERNAL_CONNECTOR); + + if (port == PORT_A && is_dvi) { + DRM_DEBUG_KMS("VBT claims port A supports DVI%s, ignoring\n", + is_hdmi ? "/HDMI" : ""); + is_dvi = false; + is_hdmi = false; + } info->supports_dvi = is_dvi; info->supports_hdmi = is_hdmi; @@ -1210,7 +1217,7 @@ static void parse_ddi_port(struct drm_i915_private *dev_priv, enum port port, if (bdb->version >= 158) { /* The VBT HDMI level shift values match the table we have. */ - hdmi_level_shift = child->raw[7] & 0xF; + hdmi_level_shift = child->hdmi_level_shifter_value; DRM_DEBUG_KMS("VBT HDMI level shift for port %c: %d\n", port_name(port), hdmi_level_shift); @@ -1218,11 +1225,11 @@ static void parse_ddi_port(struct drm_i915_private *dev_priv, enum port port, } /* Parse the I_boost config for SKL and above */ - if (bdb->version >= 196 && child->common.iboost) { - info->dp_boost_level = translate_iboost(child->common.iboost_level & 0xF); + if (bdb->version >= 196 && child->iboost) { + info->dp_boost_level = translate_iboost(child->dp_iboost_level); DRM_DEBUG_KMS("VBT (e)DP boost level for port %c: %d\n", port_name(port), info->dp_boost_level); - info->hdmi_boost_level = translate_iboost(child->common.iboost_level >> 4); + info->hdmi_boost_level = translate_iboost(child->hdmi_iboost_level); DRM_DEBUG_KMS("VBT HDMI boost level for port %c: %d\n", port_name(port), info->hdmi_boost_level); } @@ -1250,15 +1257,15 @@ static void parse_device_mapping(struct drm_i915_private *dev_priv, const struct bdb_header *bdb) { - const struct bdb_general_definitions *p_defs; - const union child_device_config *p_child; - union child_device_config *child_dev_ptr; + const struct bdb_general_definitions *defs; + const struct child_device_config *child; + struct child_device_config *child_dev_ptr; int i, child_device_num, count; u8 expected_size; u16 block_size; - p_defs = find_section(bdb, BDB_GENERAL_DEFINITIONS); - if (!p_defs) { + defs = find_section(bdb, BDB_GENERAL_DEFINITIONS); + if (!defs) { DRM_DEBUG_KMS("No general definition block is found, no devices defined.\n"); return; } @@ -1267,41 +1274,39 @@ parse_device_mapping(struct drm_i915_private *dev_priv, } else if (bdb->version < 111) { expected_size = 27; } else if (bdb->version < 195) { - BUILD_BUG_ON(sizeof(struct old_child_dev_config) != 33); - expected_size = sizeof(struct old_child_dev_config); + expected_size = LEGACY_CHILD_DEVICE_CONFIG_SIZE; } else if (bdb->version == 195) { expected_size = 37; } else if (bdb->version <= 197) { expected_size = 38; } else { expected_size = 38; - BUILD_BUG_ON(sizeof(*p_child) < 38); + BUILD_BUG_ON(sizeof(*child) < 38); DRM_DEBUG_DRIVER("Expected child device config size for VBT version %u not known; assuming %u\n", bdb->version, expected_size); } /* Flag an error for unexpected size, but continue anyway. */ - if (p_defs->child_dev_size != expected_size) + if (defs->child_dev_size != expected_size) DRM_ERROR("Unexpected child device config size %u (expected %u for VBT version %u)\n", - p_defs->child_dev_size, expected_size, bdb->version); + defs->child_dev_size, expected_size, bdb->version); /* The legacy sized child device config is the minimum we need. */ - if (p_defs->child_dev_size < sizeof(struct old_child_dev_config)) { + if (defs->child_dev_size < LEGACY_CHILD_DEVICE_CONFIG_SIZE) { DRM_DEBUG_KMS("Child device config size %u is too small.\n", - p_defs->child_dev_size); + defs->child_dev_size); return; } /* get the block size of general definitions */ - block_size = get_blocksize(p_defs); + block_size = get_blocksize(defs); /* get the number of child device */ - child_device_num = (block_size - sizeof(*p_defs)) / - p_defs->child_dev_size; + child_device_num = (block_size - sizeof(*defs)) / defs->child_dev_size; count = 0; /* get the number of child device that is present */ for (i = 0; i < child_device_num; i++) { - p_child = child_device_ptr(p_defs, i); - if (!p_child->common.device_type) { + child = child_device_ptr(defs, i); + if (!child->device_type) { /* skip the device block if device type is invalid */ continue; } @@ -1311,7 +1316,7 @@ parse_device_mapping(struct drm_i915_private *dev_priv, DRM_DEBUG_KMS("no child dev is parsed from VBT\n"); return; } - dev_priv->vbt.child_dev = kcalloc(count, sizeof(*p_child), GFP_KERNEL); + dev_priv->vbt.child_dev = kcalloc(count, sizeof(*child), GFP_KERNEL); if (!dev_priv->vbt.child_dev) { DRM_DEBUG_KMS("No memory space for child device\n"); return; @@ -1320,8 +1325,8 @@ parse_device_mapping(struct drm_i915_private *dev_priv, dev_priv->vbt.child_dev_num = count; count = 0; for (i = 0; i < child_device_num; i++) { - p_child = child_device_ptr(p_defs, i); - if (!p_child->common.device_type) { + child = child_device_ptr(defs, i); + if (!child->device_type) { /* skip the device block if device type is invalid */ continue; } @@ -1334,8 +1339,8 @@ parse_device_mapping(struct drm_i915_private *dev_priv, * (child_dev_size) of the child device. Accessing the data must * depend on VBT version. */ - memcpy(child_dev_ptr, p_child, - min_t(size_t, p_defs->child_dev_size, sizeof(*p_child))); + memcpy(child_dev_ptr, child, + min_t(size_t, defs->child_dev_size, sizeof(*child))); /* * copied full block, now init values when they are not @@ -1343,12 +1348,12 @@ parse_device_mapping(struct drm_i915_private *dev_priv, */ if (bdb->version < 196) { /* Set default values for bits added from v196 */ - child_dev_ptr->common.iboost = 0; - child_dev_ptr->common.hpd_invert = 0; + child_dev_ptr->iboost = 0; + child_dev_ptr->hpd_invert = 0; } if (bdb->version < 192) - child_dev_ptr->common.lspcon = 0; + child_dev_ptr->lspcon = 0; } return; } @@ -1559,7 +1564,7 @@ out: */ bool intel_bios_is_tv_present(struct drm_i915_private *dev_priv) { - union child_device_config *p_child; + const struct child_device_config *child; int i; if (!dev_priv->vbt.int_tv_support) @@ -1569,11 +1574,11 @@ bool intel_bios_is_tv_present(struct drm_i915_private *dev_priv) return true; for (i = 0; i < dev_priv->vbt.child_dev_num; i++) { - p_child = dev_priv->vbt.child_dev + i; + child = dev_priv->vbt.child_dev + i; /* * If the device type is not TV, continue. */ - switch (p_child->old.device_type) { + switch (child->device_type) { case DEVICE_TYPE_INT_TV: case DEVICE_TYPE_TV: case DEVICE_TYPE_TV_SVIDEO_COMPOSITE: @@ -1584,7 +1589,7 @@ bool intel_bios_is_tv_present(struct drm_i915_private *dev_priv) /* Only when the addin_offset is non-zero, it is regarded * as present. */ - if (p_child->old.addin_offset) + if (child->addin_offset) return true; } @@ -1601,14 +1606,14 @@ bool intel_bios_is_tv_present(struct drm_i915_private *dev_priv) */ bool intel_bios_is_lvds_present(struct drm_i915_private *dev_priv, u8 *i2c_pin) { + const struct child_device_config *child; int i; if (!dev_priv->vbt.child_dev_num) return true; for (i = 0; i < dev_priv->vbt.child_dev_num; i++) { - union child_device_config *uchild = dev_priv->vbt.child_dev + i; - struct old_child_dev_config *child = &uchild->old; + child = dev_priv->vbt.child_dev + i; /* If the device type is not LFP, continue. * We have to check both the new identifiers as well as the @@ -1650,6 +1655,7 @@ bool intel_bios_is_lvds_present(struct drm_i915_private *dev_priv, u8 *i2c_pin) */ bool intel_bios_is_port_present(struct drm_i915_private *dev_priv, enum port port) { + const struct child_device_config *child; static const struct { u16 dp, hdmi; } port_mapping[] = { @@ -1668,12 +1674,12 @@ bool intel_bios_is_port_present(struct drm_i915_private *dev_priv, enum port por return false; for (i = 0; i < dev_priv->vbt.child_dev_num; i++) { - const union child_device_config *p_child = - &dev_priv->vbt.child_dev[i]; - if ((p_child->common.dvo_port == port_mapping[port].dp || - p_child->common.dvo_port == port_mapping[port].hdmi) && - (p_child->common.device_type & (DEVICE_TYPE_TMDS_DVI_SIGNALING | - DEVICE_TYPE_DISPLAYPORT_OUTPUT))) + child = dev_priv->vbt.child_dev + i; + + if ((child->dvo_port == port_mapping[port].dp || + child->dvo_port == port_mapping[port].hdmi) && + (child->device_type & (DEVICE_TYPE_TMDS_DVI_SIGNALING | + DEVICE_TYPE_DISPLAYPORT_OUTPUT))) return true; } @@ -1689,7 +1695,7 @@ bool intel_bios_is_port_present(struct drm_i915_private *dev_priv, enum port por */ bool intel_bios_is_port_edp(struct drm_i915_private *dev_priv, enum port port) { - union child_device_config *p_child; + const struct child_device_config *child; static const short port_mapping[] = { [PORT_B] = DVO_PORT_DPB, [PORT_C] = DVO_PORT_DPC, @@ -1705,10 +1711,10 @@ bool intel_bios_is_port_edp(struct drm_i915_private *dev_priv, enum port port) return false; for (i = 0; i < dev_priv->vbt.child_dev_num; i++) { - p_child = dev_priv->vbt.child_dev + i; + child = dev_priv->vbt.child_dev + i; - if (p_child->common.dvo_port == port_mapping[port] && - (p_child->common.device_type & DEVICE_TYPE_eDP_BITS) == + if (child->dvo_port == port_mapping[port] && + (child->device_type & DEVICE_TYPE_eDP_BITS) == (DEVICE_TYPE_eDP & DEVICE_TYPE_eDP_BITS)) return true; } @@ -1716,7 +1722,7 @@ bool intel_bios_is_port_edp(struct drm_i915_private *dev_priv, enum port port) return false; } -static bool child_dev_is_dp_dual_mode(const union child_device_config *p_child, +static bool child_dev_is_dp_dual_mode(const struct child_device_config *child, enum port port) { static const struct { @@ -1735,16 +1741,16 @@ static bool child_dev_is_dp_dual_mode(const union child_device_config *p_child, if (port == PORT_A || port >= ARRAY_SIZE(port_mapping)) return false; - if ((p_child->common.device_type & DEVICE_TYPE_DP_DUAL_MODE_BITS) != + if ((child->device_type & DEVICE_TYPE_DP_DUAL_MODE_BITS) != (DEVICE_TYPE_DP_DUAL_MODE & DEVICE_TYPE_DP_DUAL_MODE_BITS)) return false; - if (p_child->common.dvo_port == port_mapping[port].dp) + if (child->dvo_port == port_mapping[port].dp) return true; /* Only accept a HDMI dvo_port as DP++ if it has an AUX channel */ - if (p_child->common.dvo_port == port_mapping[port].hdmi && - p_child->common.aux_channel != 0) + if (child->dvo_port == port_mapping[port].hdmi && + child->aux_channel != 0) return true; return false; @@ -1753,13 +1759,13 @@ static bool child_dev_is_dp_dual_mode(const union child_device_config *p_child, bool intel_bios_is_port_dp_dual_mode(struct drm_i915_private *dev_priv, enum port port) { + const struct child_device_config *child; int i; for (i = 0; i < dev_priv->vbt.child_dev_num; i++) { - const union child_device_config *p_child = - &dev_priv->vbt.child_dev[i]; + child = dev_priv->vbt.child_dev + i; - if (child_dev_is_dp_dual_mode(p_child, port)) + if (child_dev_is_dp_dual_mode(child, port)) return true; } @@ -1776,17 +1782,17 @@ bool intel_bios_is_port_dp_dual_mode(struct drm_i915_private *dev_priv, bool intel_bios_is_dsi_present(struct drm_i915_private *dev_priv, enum port *port) { - union child_device_config *p_child; + const struct child_device_config *child; u8 dvo_port; int i; for (i = 0; i < dev_priv->vbt.child_dev_num; i++) { - p_child = dev_priv->vbt.child_dev + i; + child = dev_priv->vbt.child_dev + i; - if (!(p_child->common.device_type & DEVICE_TYPE_MIPI_OUTPUT)) + if (!(child->device_type & DEVICE_TYPE_MIPI_OUTPUT)) continue; - dvo_port = p_child->common.dvo_port; + dvo_port = child->dvo_port; switch (dvo_port) { case DVO_PORT_MIPIA: @@ -1816,16 +1822,19 @@ bool intel_bios_is_port_hpd_inverted(struct drm_i915_private *dev_priv, enum port port) { + const struct child_device_config *child; int i; if (WARN_ON_ONCE(!IS_GEN9_LP(dev_priv))) return false; for (i = 0; i < dev_priv->vbt.child_dev_num; i++) { - if (!dev_priv->vbt.child_dev[i].common.hpd_invert) + child = dev_priv->vbt.child_dev + i; + + if (!child->hpd_invert) continue; - switch (dev_priv->vbt.child_dev[i].common.dvo_port) { + switch (child->dvo_port) { case DVO_PORT_DPA: case DVO_PORT_HDMIA: if (port == PORT_A) @@ -1860,16 +1869,19 @@ bool intel_bios_is_lspcon_present(struct drm_i915_private *dev_priv, enum port port) { + const struct child_device_config *child; int i; if (!HAS_LSPCON(dev_priv)) return false; for (i = 0; i < dev_priv->vbt.child_dev_num; i++) { - if (!dev_priv->vbt.child_dev[i].common.lspcon) + child = dev_priv->vbt.child_dev + i; + + if (!child->lspcon) continue; - switch (dev_priv->vbt.child_dev[i].common.dvo_port) { + switch (child->dvo_port) { case DVO_PORT_DPA: case DVO_PORT_HDMIA: if (port == PORT_A) diff --git a/drivers/gpu/drm/i915/intel_breadcrumbs.c b/drivers/gpu/drm/i915/intel_breadcrumbs.c index 4e00e5cb9fa1..29c62d481cef 100644 --- a/drivers/gpu/drm/i915/intel_breadcrumbs.c +++ b/drivers/gpu/drm/i915/intel_breadcrumbs.c @@ -64,7 +64,7 @@ static unsigned long wait_timeout(void) static noinline void missed_breadcrumb(struct intel_engine_cs *engine) { - DRM_DEBUG_DRIVER("%s missed breadcrumb at %pF, irq posted? %s, current seqno=%x, last=%x\n", + DRM_DEBUG_DRIVER("%s missed breadcrumb at %pS, irq posted? %s, current seqno=%x, last=%x\n", engine->name, __builtin_return_address(0), yesno(test_bit(ENGINE_IRQ_BREADCRUMB, &engine->irq_posted)), diff --git a/drivers/gpu/drm/i915/intel_cdclk.c b/drivers/gpu/drm/i915/intel_cdclk.c index 1241e5891b29..87fc42b19336 100644 --- a/drivers/gpu/drm/i915/intel_cdclk.c +++ b/drivers/gpu/drm/i915/intel_cdclk.c @@ -417,24 +417,21 @@ static void hsw_get_cdclk(struct drm_i915_private *dev_priv, cdclk_state->cdclk = 540000; } -static int vlv_calc_cdclk(struct drm_i915_private *dev_priv, - int max_pixclk) +static int vlv_calc_cdclk(struct drm_i915_private *dev_priv, int min_cdclk) { int freq_320 = (dev_priv->hpll_freq << 1) % 320000 != 0 ? 333333 : 320000; - int limit = IS_CHERRYVIEW(dev_priv) ? 95 : 90; /* * We seem to get an unstable or solid color picture at 200MHz. * Not sure what's wrong. For now use 200MHz only when all pipes * are off. */ - if (!IS_CHERRYVIEW(dev_priv) && - max_pixclk > freq_320*limit/100) + if (IS_VALLEYVIEW(dev_priv) && min_cdclk > freq_320) return 400000; - else if (max_pixclk > 266667*limit/100) + else if (min_cdclk > 266667) return freq_320; - else if (max_pixclk > 0) + else if (min_cdclk > 0) return 266667; else return 200000; @@ -612,13 +609,13 @@ static void chv_set_cdclk(struct drm_i915_private *dev_priv, intel_display_power_put(dev_priv, POWER_DOMAIN_PIPE_A); } -static int bdw_calc_cdclk(int max_pixclk) +static int bdw_calc_cdclk(int min_cdclk) { - if (max_pixclk > 540000) + if (min_cdclk > 540000) return 675000; - else if (max_pixclk > 450000) + else if (min_cdclk > 450000) return 540000; - else if (max_pixclk > 337500) + else if (min_cdclk > 337500) return 450000; else return 337500; @@ -672,8 +669,12 @@ static void bdw_set_cdclk(struct drm_i915_private *dev_priv, val |= LCPLL_CD_SOURCE_FCLK; I915_WRITE(LCPLL_CTL, val); + /* + * According to the spec, it should be enough to poll for this 1 us. + * However, extensive testing shows that this can take longer. + */ if (wait_for_us(I915_READ(LCPLL_CTL) & - LCPLL_CD_SOURCE_FCLK_DONE, 1)) + LCPLL_CD_SOURCE_FCLK_DONE, 100)) DRM_ERROR("Switching to FCLK failed\n"); val = I915_READ(LCPLL_CTL); @@ -724,23 +725,23 @@ static void bdw_set_cdclk(struct drm_i915_private *dev_priv, cdclk, dev_priv->cdclk.hw.cdclk); } -static int skl_calc_cdclk(int max_pixclk, int vco) +static int skl_calc_cdclk(int min_cdclk, int vco) { if (vco == 8640000) { - if (max_pixclk > 540000) + if (min_cdclk > 540000) return 617143; - else if (max_pixclk > 432000) + else if (min_cdclk > 432000) return 540000; - else if (max_pixclk > 308571) + else if (min_cdclk > 308571) return 432000; else return 308571; } else { - if (max_pixclk > 540000) + if (min_cdclk > 540000) return 675000; - else if (max_pixclk > 450000) + else if (min_cdclk > 450000) return 540000; - else if (max_pixclk > 337500) + else if (min_cdclk > 337500) return 450000; else return 337500; @@ -1075,31 +1076,25 @@ void skl_uninit_cdclk(struct drm_i915_private *dev_priv) skl_set_cdclk(dev_priv, &cdclk_state); } -static int bxt_calc_cdclk(int max_pixclk) +static int bxt_calc_cdclk(int min_cdclk) { - if (max_pixclk > 576000) + if (min_cdclk > 576000) return 624000; - else if (max_pixclk > 384000) + else if (min_cdclk > 384000) return 576000; - else if (max_pixclk > 288000) + else if (min_cdclk > 288000) return 384000; - else if (max_pixclk > 144000) + else if (min_cdclk > 144000) return 288000; else return 144000; } -static int glk_calc_cdclk(int max_pixclk) +static int glk_calc_cdclk(int min_cdclk) { - /* - * FIXME: Avoid using a pixel clock that is more than 99% of the cdclk - * as a temporary workaround. Use a higher cdclk instead. (Note that - * intel_compute_max_dotclk() limits the max pixel clock to 99% of max - * cdclk.) - */ - if (max_pixclk > DIV_ROUND_UP(2 * 158400 * 99, 100)) + if (min_cdclk > 158400) return 316800; - else if (max_pixclk > DIV_ROUND_UP(2 * 79200 * 99, 100)) + else if (min_cdclk > 79200) return 158400; else return 79200; @@ -1420,11 +1415,11 @@ void bxt_uninit_cdclk(struct drm_i915_private *dev_priv) bxt_set_cdclk(dev_priv, &cdclk_state); } -static int cnl_calc_cdclk(int max_pixclk) +static int cnl_calc_cdclk(int min_cdclk) { - if (max_pixclk > 336000) + if (min_cdclk > 336000) return 528000; - else if (max_pixclk > 168000) + else if (min_cdclk > 168000) return 336000; else return 168000; @@ -1732,104 +1727,119 @@ void intel_set_cdclk(struct drm_i915_private *dev_priv, dev_priv->display.set_cdclk(dev_priv, cdclk_state); } -static int bdw_adjust_min_pipe_pixel_rate(struct intel_crtc_state *crtc_state, - int pixel_rate) +static int intel_pixel_rate_to_cdclk(struct drm_i915_private *dev_priv, + int pixel_rate) +{ + if (INTEL_GEN(dev_priv) >= 10) + /* + * FIXME: Switch to DIV_ROUND_UP(pixel_rate, 2) + * once DDI clock voltage requirements are + * handled correctly. + */ + return pixel_rate; + else if (IS_GEMINILAKE(dev_priv)) + /* + * FIXME: Avoid using a pixel clock that is more than 99% of the cdclk + * as a temporary workaround. Use a higher cdclk instead. (Note that + * intel_compute_max_dotclk() limits the max pixel clock to 99% of max + * cdclk.) + */ + return DIV_ROUND_UP(pixel_rate * 100, 2 * 99); + else if (IS_GEN9(dev_priv) || + IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) + return pixel_rate; + else if (IS_CHERRYVIEW(dev_priv)) + return DIV_ROUND_UP(pixel_rate * 100, 95); + else + return DIV_ROUND_UP(pixel_rate * 100, 90); +} + +int intel_crtc_compute_min_cdclk(const struct intel_crtc_state *crtc_state) { struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev); + int min_cdclk; + + if (!crtc_state->base.enable) + return 0; + + min_cdclk = intel_pixel_rate_to_cdclk(dev_priv, crtc_state->pixel_rate); /* pixel rate mustn't exceed 95% of cdclk with IPS on BDW */ if (IS_BROADWELL(dev_priv) && crtc_state->ips_enabled) - pixel_rate = DIV_ROUND_UP(pixel_rate * 100, 95); + min_cdclk = DIV_ROUND_UP(min_cdclk * 100, 95); /* BSpec says "Do not use DisplayPort with CDCLK less than 432 MHz, * audio enabled, port width x4, and link rate HBR2 (5.4 GHz), or else * there may be audio corruption or screen corruption." This cdclk - * restriction for GLK is 316.8 MHz and since GLK can output two - * pixels per clock, the pixel rate becomes 2 * 316.8 MHz. + * restriction for GLK is 316.8 MHz. */ if (intel_crtc_has_dp_encoder(crtc_state) && crtc_state->has_audio && crtc_state->port_clock >= 540000 && crtc_state->lane_count == 4) { - if (IS_CANNONLAKE(dev_priv)) - pixel_rate = max(316800, pixel_rate); - else if (IS_GEMINILAKE(dev_priv)) - pixel_rate = max(2 * 316800, pixel_rate); - else - pixel_rate = max(432000, pixel_rate); + if (IS_CANNONLAKE(dev_priv) || IS_GEMINILAKE(dev_priv)) { + /* Display WA #1145: glk,cnl */ + min_cdclk = max(316800, min_cdclk); + } else if (IS_GEN9(dev_priv) || IS_BROADWELL(dev_priv)) { + /* Display WA #1144: skl,bxt */ + min_cdclk = max(432000, min_cdclk); + } } /* According to BSpec, "The CD clock frequency must be at least twice * the frequency of the Azalia BCLK." and BCLK is 96 MHz by default. - * The check for GLK has to be adjusted as the platform can output - * two pixels per clock. */ - if (crtc_state->has_audio && INTEL_GEN(dev_priv) >= 9) { - if (IS_GEMINILAKE(dev_priv)) - pixel_rate = max(2 * 2 * 96000, pixel_rate); - else - pixel_rate = max(2 * 96000, pixel_rate); + if (crtc_state->has_audio && INTEL_GEN(dev_priv) >= 9) + min_cdclk = max(2 * 96000, min_cdclk); + + if (min_cdclk > dev_priv->max_cdclk_freq) { + DRM_DEBUG_KMS("required cdclk (%d kHz) exceeds max (%d kHz)\n", + min_cdclk, dev_priv->max_cdclk_freq); + return -EINVAL; } - return pixel_rate; + return min_cdclk; } -/* compute the max rate for new configuration */ -static int intel_max_pixel_rate(struct drm_atomic_state *state) +static int intel_compute_min_cdclk(struct drm_atomic_state *state) { struct intel_atomic_state *intel_state = to_intel_atomic_state(state); struct drm_i915_private *dev_priv = to_i915(state->dev); - struct drm_crtc *crtc; - struct drm_crtc_state *cstate; + struct intel_crtc *crtc; struct intel_crtc_state *crtc_state; - unsigned int max_pixel_rate = 0, i; + int min_cdclk, i; enum pipe pipe; - memcpy(intel_state->min_pixclk, dev_priv->min_pixclk, - sizeof(intel_state->min_pixclk)); - - for_each_new_crtc_in_state(state, crtc, cstate, i) { - int pixel_rate; - - crtc_state = to_intel_crtc_state(cstate); - if (!crtc_state->base.enable) { - intel_state->min_pixclk[i] = 0; - continue; - } - - pixel_rate = crtc_state->pixel_rate; + memcpy(intel_state->min_cdclk, dev_priv->min_cdclk, + sizeof(intel_state->min_cdclk)); - if (IS_BROADWELL(dev_priv) || INTEL_GEN(dev_priv) >= 9) - pixel_rate = - bdw_adjust_min_pipe_pixel_rate(crtc_state, - pixel_rate); + for_each_new_intel_crtc_in_state(intel_state, crtc, crtc_state, i) { + min_cdclk = intel_crtc_compute_min_cdclk(crtc_state); + if (min_cdclk < 0) + return min_cdclk; - intel_state->min_pixclk[i] = pixel_rate; + intel_state->min_cdclk[i] = min_cdclk; } + min_cdclk = 0; for_each_pipe(dev_priv, pipe) - max_pixel_rate = max(intel_state->min_pixclk[pipe], - max_pixel_rate); + min_cdclk = max(intel_state->min_cdclk[pipe], min_cdclk); - return max_pixel_rate; + return min_cdclk; } static int vlv_modeset_calc_cdclk(struct drm_atomic_state *state) { struct drm_i915_private *dev_priv = to_i915(state->dev); - int max_pixclk = intel_max_pixel_rate(state); - struct intel_atomic_state *intel_state = - to_intel_atomic_state(state); - int cdclk; + struct intel_atomic_state *intel_state = to_intel_atomic_state(state); + int min_cdclk, cdclk; - cdclk = vlv_calc_cdclk(dev_priv, max_pixclk); + min_cdclk = intel_compute_min_cdclk(state); + if (min_cdclk < 0) + return min_cdclk; - if (cdclk > dev_priv->max_cdclk_freq) { - DRM_DEBUG_KMS("requested cdclk (%d kHz) exceeds max (%d kHz)\n", - cdclk, dev_priv->max_cdclk_freq); - return -EINVAL; - } + cdclk = vlv_calc_cdclk(dev_priv, min_cdclk); intel_state->cdclk.logical.cdclk = cdclk; @@ -1847,22 +1857,18 @@ static int vlv_modeset_calc_cdclk(struct drm_atomic_state *state) static int bdw_modeset_calc_cdclk(struct drm_atomic_state *state) { - struct drm_i915_private *dev_priv = to_i915(state->dev); struct intel_atomic_state *intel_state = to_intel_atomic_state(state); - int max_pixclk = intel_max_pixel_rate(state); - int cdclk; + int min_cdclk, cdclk; + + min_cdclk = intel_compute_min_cdclk(state); + if (min_cdclk < 0) + return min_cdclk; /* * FIXME should also account for plane ratio * once 64bpp pixel formats are supported. */ - cdclk = bdw_calc_cdclk(max_pixclk); - - if (cdclk > dev_priv->max_cdclk_freq) { - DRM_DEBUG_KMS("requested cdclk (%d kHz) exceeds max (%d kHz)\n", - cdclk, dev_priv->max_cdclk_freq); - return -EINVAL; - } + cdclk = bdw_calc_cdclk(min_cdclk); intel_state->cdclk.logical.cdclk = cdclk; @@ -1880,10 +1886,13 @@ static int bdw_modeset_calc_cdclk(struct drm_atomic_state *state) static int skl_modeset_calc_cdclk(struct drm_atomic_state *state) { - struct intel_atomic_state *intel_state = to_intel_atomic_state(state); struct drm_i915_private *dev_priv = to_i915(state->dev); - const int max_pixclk = intel_max_pixel_rate(state); - int cdclk, vco; + struct intel_atomic_state *intel_state = to_intel_atomic_state(state); + int min_cdclk, cdclk, vco; + + min_cdclk = intel_compute_min_cdclk(state); + if (min_cdclk < 0) + return min_cdclk; vco = intel_state->cdclk.logical.vco; if (!vco) @@ -1893,13 +1902,7 @@ static int skl_modeset_calc_cdclk(struct drm_atomic_state *state) * FIXME should also account for plane ratio * once 64bpp pixel formats are supported. */ - cdclk = skl_calc_cdclk(max_pixclk, vco); - - if (cdclk > dev_priv->max_cdclk_freq) { - DRM_DEBUG_KMS("requested cdclk (%d kHz) exceeds max (%d kHz)\n", - cdclk, dev_priv->max_cdclk_freq); - return -EINVAL; - } + cdclk = skl_calc_cdclk(min_cdclk, vco); intel_state->cdclk.logical.vco = vco; intel_state->cdclk.logical.cdclk = cdclk; @@ -1920,25 +1923,21 @@ static int skl_modeset_calc_cdclk(struct drm_atomic_state *state) static int bxt_modeset_calc_cdclk(struct drm_atomic_state *state) { struct drm_i915_private *dev_priv = to_i915(state->dev); - int max_pixclk = intel_max_pixel_rate(state); - struct intel_atomic_state *intel_state = - to_intel_atomic_state(state); - int cdclk, vco; + struct intel_atomic_state *intel_state = to_intel_atomic_state(state); + int min_cdclk, cdclk, vco; + + min_cdclk = intel_compute_min_cdclk(state); + if (min_cdclk < 0) + return min_cdclk; if (IS_GEMINILAKE(dev_priv)) { - cdclk = glk_calc_cdclk(max_pixclk); + cdclk = glk_calc_cdclk(min_cdclk); vco = glk_de_pll_vco(dev_priv, cdclk); } else { - cdclk = bxt_calc_cdclk(max_pixclk); + cdclk = bxt_calc_cdclk(min_cdclk); vco = bxt_de_pll_vco(dev_priv, cdclk); } - if (cdclk > dev_priv->max_cdclk_freq) { - DRM_DEBUG_KMS("requested cdclk (%d kHz) exceeds max (%d kHz)\n", - cdclk, dev_priv->max_cdclk_freq); - return -EINVAL; - } - intel_state->cdclk.logical.vco = vco; intel_state->cdclk.logical.cdclk = cdclk; @@ -1964,19 +1963,15 @@ static int bxt_modeset_calc_cdclk(struct drm_atomic_state *state) static int cnl_modeset_calc_cdclk(struct drm_atomic_state *state) { struct drm_i915_private *dev_priv = to_i915(state->dev); - struct intel_atomic_state *intel_state = - to_intel_atomic_state(state); - int max_pixclk = intel_max_pixel_rate(state); - int cdclk, vco; + struct intel_atomic_state *intel_state = to_intel_atomic_state(state); + int min_cdclk, cdclk, vco; - cdclk = cnl_calc_cdclk(max_pixclk); - vco = cnl_cdclk_pll_vco(dev_priv, cdclk); + min_cdclk = intel_compute_min_cdclk(state); + if (min_cdclk < 0) + return min_cdclk; - if (cdclk > dev_priv->max_cdclk_freq) { - DRM_DEBUG_KMS("requested cdclk (%d kHz) exceeds max (%d kHz)\n", - cdclk, dev_priv->max_cdclk_freq); - return -EINVAL; - } + cdclk = cnl_calc_cdclk(min_cdclk); + vco = cnl_cdclk_pll_vco(dev_priv, cdclk); intel_state->cdclk.logical.vco = vco; intel_state->cdclk.logical.cdclk = cdclk; @@ -1999,14 +1994,21 @@ static int intel_compute_max_dotclk(struct drm_i915_private *dev_priv) { int max_cdclk_freq = dev_priv->max_cdclk_freq; - if (IS_GEMINILAKE(dev_priv)) + if (INTEL_GEN(dev_priv) >= 10) + /* + * FIXME: Allow '2 * max_cdclk_freq' + * once DDI clock voltage requirements are + * handled correctly. + */ + return max_cdclk_freq; + else if (IS_GEMINILAKE(dev_priv)) /* * FIXME: Limiting to 99% as a temporary workaround. See - * glk_calc_cdclk() for details. + * intel_min_cdclk() for details. */ return 2 * max_cdclk_freq * 99 / 100; - else if (INTEL_INFO(dev_priv)->gen >= 9 || - IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) + else if (IS_GEN9(dev_priv) || + IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) return max_cdclk_freq; else if (IS_CHERRYVIEW(dev_priv)) return max_cdclk_freq*95/100; diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c index 70e0ff41070c..954070255b4d 100644 --- a/drivers/gpu/drm/i915/intel_crt.c +++ b/drivers/gpu/drm/i915/intel_crt.c @@ -143,7 +143,7 @@ static void hsw_crt_get_config(struct intel_encoder *encoder, /* Note: The caller is required to filter out dpms modes not supported by the * platform. */ static void intel_crt_set_dpms(struct intel_encoder *encoder, - struct intel_crtc_state *crtc_state, + const struct intel_crtc_state *crtc_state, int mode) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); @@ -194,28 +194,28 @@ static void intel_crt_set_dpms(struct intel_encoder *encoder, } static void intel_disable_crt(struct intel_encoder *encoder, - struct intel_crtc_state *old_crtc_state, - struct drm_connector_state *old_conn_state) + const struct intel_crtc_state *old_crtc_state, + const struct drm_connector_state *old_conn_state) { intel_crt_set_dpms(encoder, old_crtc_state, DRM_MODE_DPMS_OFF); } static void pch_disable_crt(struct intel_encoder *encoder, - struct intel_crtc_state *old_crtc_state, - struct drm_connector_state *old_conn_state) + const struct intel_crtc_state *old_crtc_state, + const struct drm_connector_state *old_conn_state) { } static void pch_post_disable_crt(struct intel_encoder *encoder, - struct intel_crtc_state *old_crtc_state, - struct drm_connector_state *old_conn_state) + const struct intel_crtc_state *old_crtc_state, + const struct drm_connector_state *old_conn_state) { intel_disable_crt(encoder, old_crtc_state, old_conn_state); } static void hsw_post_disable_crt(struct intel_encoder *encoder, - struct intel_crtc_state *old_crtc_state, - struct drm_connector_state *old_conn_state) + const struct intel_crtc_state *old_crtc_state, + const struct drm_connector_state *old_conn_state) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); @@ -228,8 +228,8 @@ static void hsw_post_disable_crt(struct intel_encoder *encoder, } static void intel_enable_crt(struct intel_encoder *encoder, - struct intel_crtc_state *pipe_config, - struct drm_connector_state *conn_state) + const struct intel_crtc_state *pipe_config, + const struct drm_connector_state *conn_state) { intel_crt_set_dpms(encoder, pipe_config, DRM_MODE_DPMS_ON); } @@ -712,7 +712,7 @@ intel_crt_detect(struct drm_connector *connector, * broken monitor (without edid) to work behind a broken kvm (that fails * to have the right resistors for HP detection) needs to fix this up. * For now just bail out. */ - if (I915_HAS_HOTPLUG(dev_priv) && !i915.load_detect_test) { + if (I915_HAS_HOTPLUG(dev_priv) && !i915_modparams.load_detect_test) { status = connector_status_disconnected; goto out; } @@ -730,7 +730,7 @@ intel_crt_detect(struct drm_connector *connector, else if (INTEL_GEN(dev_priv) < 4) status = intel_crt_load_detect(crt, to_intel_crtc(connector->state->crtc)->pipe); - else if (i915.load_detect_test) + else if (i915_modparams.load_detect_test) status = connector_status_disconnected; else status = connector_status_unknown; diff --git a/drivers/gpu/drm/i915/intel_csr.c b/drivers/gpu/drm/i915/intel_csr.c index 965988f79a55..cdfb624eb82d 100644 --- a/drivers/gpu/drm/i915/intel_csr.c +++ b/drivers/gpu/drm/i915/intel_csr.c @@ -252,8 +252,14 @@ void intel_csr_load_program(struct drm_i915_private *dev_priv) } fw_size = dev_priv->csr.dmc_fw_size; + assert_rpm_wakelock_held(dev_priv); + + preempt_disable(); + for (i = 0; i < fw_size; i++) - I915_WRITE(CSR_PROGRAM(i), payload[i]); + I915_WRITE_FW(CSR_PROGRAM(i), payload[i]); + + preempt_enable(); for (i = 0; i < dev_priv->csr.mmio_count; i++) { I915_WRITE(dev_priv->csr.mmioaddr[i], diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c index 4b4fd1f8110b..93cbbcbbc193 100644 --- a/drivers/gpu/drm/i915/intel_ddi.c +++ b/drivers/gpu/drm/i915/intel_ddi.c @@ -301,10 +301,10 @@ static const struct ddi_buf_trans skl_y_ddi_translations_hdmi[] = { }; struct bxt_ddi_buf_trans { - u32 margin; /* swing value */ - u32 scale; /* scale value */ - u32 enable; /* scale enable */ - u32 deemphasis; + u8 margin; /* swing value */ + u8 scale; /* scale value */ + u8 enable; /* scale enable */ + u8 deemphasis; bool default_index; /* true if the entry represents default value */ }; @@ -354,11 +354,11 @@ static const struct bxt_ddi_buf_trans bxt_ddi_translations_hdmi[] = { }; struct cnl_ddi_buf_trans { - u32 dw2_swing_sel; - u32 dw7_n_scalar; - u32 dw4_cursor_coeff; - u32 dw4_post_cursor_2; - u32 dw4_post_cursor_1; + u8 dw2_swing_sel; + u8 dw7_n_scalar; + u8 dw4_cursor_coeff; + u8 dw4_post_cursor_2; + u8 dw4_post_cursor_1; }; /* Voltage Swing Programming for VccIO 0.85V for DP */ @@ -588,6 +588,67 @@ skl_get_buf_trans_hdmi(struct drm_i915_private *dev_priv, int *n_entries) } } +static const struct cnl_ddi_buf_trans * +cnl_get_buf_trans_hdmi(struct drm_i915_private *dev_priv, int *n_entries) +{ + u32 voltage = I915_READ(CNL_PORT_COMP_DW3) & VOLTAGE_INFO_MASK; + + if (voltage == VOLTAGE_INFO_0_85V) { + *n_entries = ARRAY_SIZE(cnl_ddi_translations_hdmi_0_85V); + return cnl_ddi_translations_hdmi_0_85V; + } else if (voltage == VOLTAGE_INFO_0_95V) { + *n_entries = ARRAY_SIZE(cnl_ddi_translations_hdmi_0_95V); + return cnl_ddi_translations_hdmi_0_95V; + } else if (voltage == VOLTAGE_INFO_1_05V) { + *n_entries = ARRAY_SIZE(cnl_ddi_translations_hdmi_1_05V); + return cnl_ddi_translations_hdmi_1_05V; + } else + MISSING_CASE(voltage); + return NULL; +} + +static const struct cnl_ddi_buf_trans * +cnl_get_buf_trans_dp(struct drm_i915_private *dev_priv, int *n_entries) +{ + u32 voltage = I915_READ(CNL_PORT_COMP_DW3) & VOLTAGE_INFO_MASK; + + if (voltage == VOLTAGE_INFO_0_85V) { + *n_entries = ARRAY_SIZE(cnl_ddi_translations_dp_0_85V); + return cnl_ddi_translations_dp_0_85V; + } else if (voltage == VOLTAGE_INFO_0_95V) { + *n_entries = ARRAY_SIZE(cnl_ddi_translations_dp_0_95V); + return cnl_ddi_translations_dp_0_95V; + } else if (voltage == VOLTAGE_INFO_1_05V) { + *n_entries = ARRAY_SIZE(cnl_ddi_translations_dp_1_05V); + return cnl_ddi_translations_dp_1_05V; + } else + MISSING_CASE(voltage); + return NULL; +} + +static const struct cnl_ddi_buf_trans * +cnl_get_buf_trans_edp(struct drm_i915_private *dev_priv, int *n_entries) +{ + u32 voltage = I915_READ(CNL_PORT_COMP_DW3) & VOLTAGE_INFO_MASK; + + if (dev_priv->vbt.edp.low_vswing) { + if (voltage == VOLTAGE_INFO_0_85V) { + *n_entries = ARRAY_SIZE(cnl_ddi_translations_edp_0_85V); + return cnl_ddi_translations_edp_0_85V; + } else if (voltage == VOLTAGE_INFO_0_95V) { + *n_entries = ARRAY_SIZE(cnl_ddi_translations_edp_0_95V); + return cnl_ddi_translations_edp_0_95V; + } else if (voltage == VOLTAGE_INFO_1_05V) { + *n_entries = ARRAY_SIZE(cnl_ddi_translations_edp_1_05V); + return cnl_ddi_translations_edp_1_05V; + } else + MISSING_CASE(voltage); + return NULL; + } else { + return cnl_get_buf_trans_dp(dev_priv, n_entries); + } +} + static int intel_ddi_hdmi_level(struct drm_i915_private *dev_priv, enum port port) { int n_hdmi_entries; @@ -599,7 +660,10 @@ static int intel_ddi_hdmi_level(struct drm_i915_private *dev_priv, enum port por if (IS_GEN9_LP(dev_priv)) return hdmi_level; - if (IS_GEN9_BC(dev_priv)) { + if (IS_CANNONLAKE(dev_priv)) { + cnl_get_buf_trans_hdmi(dev_priv, &n_hdmi_entries); + hdmi_default_entry = n_hdmi_entries - 1; + } else if (IS_GEN9_BC(dev_priv)) { skl_get_buf_trans_hdmi(dev_priv, &n_hdmi_entries); hdmi_default_entry = 8; } else if (IS_BROADWELL(dev_priv)) { @@ -688,9 +752,6 @@ static void intel_prepare_dp_ddi_buffers(struct intel_encoder *encoder) enum port port = intel_ddi_get_encoder_port(encoder); const struct ddi_buf_trans *ddi_translations; - if (IS_GEN9_LP(dev_priv)) - return; - switch (encoder->type) { case INTEL_OUTPUT_EDP: ddi_translations = intel_ddi_get_buf_trans_edp(dev_priv, @@ -741,9 +802,6 @@ static void intel_prepare_hdmi_ddi_buffers(struct intel_encoder *encoder) enum port port = intel_ddi_get_encoder_port(encoder); const struct ddi_buf_trans *ddi_translations_hdmi; - if (IS_GEN9_LP(dev_priv)) - return; - hdmi_level = intel_ddi_hdmi_level(dev_priv, port); if (IS_GEN9_BC(dev_priv)) { @@ -785,7 +843,7 @@ static void intel_wait_ddi_buf_idle(struct drm_i915_private *dev_priv, DRM_ERROR("Timeout waiting for DDI BUF %c idle bit\n", port_name(port)); } -static uint32_t hsw_pll_to_ddi_pll_sel(struct intel_shared_dpll *pll) +static uint32_t hsw_pll_to_ddi_pll_sel(const struct intel_shared_dpll *pll) { switch (pll->id) { case DPLL_ID_WRPLL1: @@ -1154,7 +1212,7 @@ static int cnl_calc_wrpll_link(struct drm_i915_private *dev_priv, dco_freq = (cfgcr0 & DPLL_CFGCR0_DCO_INTEGER_MASK) * ref_clock; dco_freq += (((cfgcr0 & DPLL_CFGCR0_DCO_FRACTION_MASK) >> - DPLL_CFGCR0_DCO_FRAC_SHIFT) * ref_clock) / 0x8000; + DPLL_CFGCR0_DCO_FRACTION_SHIFT) * ref_clock) / 0x8000; return dco_freq / (p0 * p1 * p2 * 5); } @@ -1821,10 +1879,17 @@ u8 intel_ddi_dp_voltage_max(struct intel_encoder *encoder) struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); int n_entries; - if (encoder->type == INTEL_OUTPUT_EDP) - intel_ddi_get_buf_trans_edp(dev_priv, &n_entries); - else - intel_ddi_get_buf_trans_dp(dev_priv, &n_entries); + if (IS_CANNONLAKE(dev_priv)) { + if (encoder->type == INTEL_OUTPUT_EDP) + cnl_get_buf_trans_edp(dev_priv, &n_entries); + else + cnl_get_buf_trans_dp(dev_priv, &n_entries); + } else { + if (encoder->type == INTEL_OUTPUT_EDP) + intel_ddi_get_buf_trans_edp(dev_priv, &n_entries); + else + intel_ddi_get_buf_trans_dp(dev_priv, &n_entries); + } if (WARN_ON(n_entries < 1)) n_entries = 1; @@ -1835,90 +1900,23 @@ u8 intel_ddi_dp_voltage_max(struct intel_encoder *encoder) DP_TRAIN_VOLTAGE_SWING_MASK; } -static const struct cnl_ddi_buf_trans * -cnl_get_buf_trans_hdmi(struct drm_i915_private *dev_priv, - u32 voltage, int *n_entries) -{ - if (voltage == VOLTAGE_INFO_0_85V) { - *n_entries = ARRAY_SIZE(cnl_ddi_translations_hdmi_0_85V); - return cnl_ddi_translations_hdmi_0_85V; - } else if (voltage == VOLTAGE_INFO_0_95V) { - *n_entries = ARRAY_SIZE(cnl_ddi_translations_hdmi_0_95V); - return cnl_ddi_translations_hdmi_0_95V; - } else if (voltage == VOLTAGE_INFO_1_05V) { - *n_entries = ARRAY_SIZE(cnl_ddi_translations_hdmi_1_05V); - return cnl_ddi_translations_hdmi_1_05V; - } - return NULL; -} - -static const struct cnl_ddi_buf_trans * -cnl_get_buf_trans_dp(struct drm_i915_private *dev_priv, - u32 voltage, int *n_entries) -{ - if (voltage == VOLTAGE_INFO_0_85V) { - *n_entries = ARRAY_SIZE(cnl_ddi_translations_dp_0_85V); - return cnl_ddi_translations_dp_0_85V; - } else if (voltage == VOLTAGE_INFO_0_95V) { - *n_entries = ARRAY_SIZE(cnl_ddi_translations_dp_0_95V); - return cnl_ddi_translations_dp_0_95V; - } else if (voltage == VOLTAGE_INFO_1_05V) { - *n_entries = ARRAY_SIZE(cnl_ddi_translations_dp_1_05V); - return cnl_ddi_translations_dp_1_05V; - } - return NULL; -} - -static const struct cnl_ddi_buf_trans * -cnl_get_buf_trans_edp(struct drm_i915_private *dev_priv, - u32 voltage, int *n_entries) -{ - if (dev_priv->vbt.edp.low_vswing) { - if (voltage == VOLTAGE_INFO_0_85V) { - *n_entries = ARRAY_SIZE(cnl_ddi_translations_edp_0_85V); - return cnl_ddi_translations_edp_0_85V; - } else if (voltage == VOLTAGE_INFO_0_95V) { - *n_entries = ARRAY_SIZE(cnl_ddi_translations_edp_0_95V); - return cnl_ddi_translations_edp_0_95V; - } else if (voltage == VOLTAGE_INFO_1_05V) { - *n_entries = ARRAY_SIZE(cnl_ddi_translations_edp_1_05V); - return cnl_ddi_translations_edp_1_05V; - } - return NULL; - } else { - return cnl_get_buf_trans_dp(dev_priv, voltage, n_entries); - } -} - static void cnl_ddi_vswing_program(struct drm_i915_private *dev_priv, u32 level, enum port port, int type) { const struct cnl_ddi_buf_trans *ddi_translations = NULL; - u32 n_entries, val, voltage; + u32 n_entries, val; int ln; - /* - * Values for each port type are listed in - * voltage swing programming tables. - * Vccio voltage found in PORT_COMP_DW3. - */ - voltage = I915_READ(CNL_PORT_COMP_DW3) & VOLTAGE_INFO_MASK; - if (type == INTEL_OUTPUT_HDMI) { - ddi_translations = cnl_get_buf_trans_hdmi(dev_priv, - voltage, &n_entries); + ddi_translations = cnl_get_buf_trans_hdmi(dev_priv, &n_entries); } else if (type == INTEL_OUTPUT_DP) { - ddi_translations = cnl_get_buf_trans_dp(dev_priv, - voltage, &n_entries); + ddi_translations = cnl_get_buf_trans_dp(dev_priv, &n_entries); } else if (type == INTEL_OUTPUT_EDP) { - ddi_translations = cnl_get_buf_trans_edp(dev_priv, - voltage, &n_entries); + ddi_translations = cnl_get_buf_trans_edp(dev_priv, &n_entries); } - if (ddi_translations == NULL) { - MISSING_CASE(voltage); + if (WARN_ON(ddi_translations == NULL)) return; - } if (level >= n_entries) { DRM_DEBUG_KMS("DDI translation not found for level %d. Using %d instead.", level, n_entries - 1); @@ -1941,7 +1939,7 @@ static void cnl_ddi_vswing_program(struct drm_i915_private *dev_priv, val |= RCOMP_SCALAR(0x98); I915_WRITE(CNL_PORT_TX_DW2_GRP(port), val); - /* Program PORT_TX_DW4 */ + /* Program PORT_TX_DW4 */ /* We cannot write to GRP. It would overrite individual loadgen */ for (ln = 0; ln < 4; ln++) { val = I915_READ(CNL_PORT_TX_DW4_LN(port, ln)); @@ -1953,7 +1951,7 @@ static void cnl_ddi_vswing_program(struct drm_i915_private *dev_priv, I915_WRITE(CNL_PORT_TX_DW4_LN(port, ln), val); } - /* Program PORT_TX_DW5 */ + /* Program PORT_TX_DW5 */ /* All DW5 values are fixed for every table entry */ val = I915_READ(CNL_PORT_TX_DW5_LN0(port)); val &= ~RTERM_SELECT_MASK; @@ -1961,7 +1959,7 @@ static void cnl_ddi_vswing_program(struct drm_i915_private *dev_priv, val |= TAP3_DISABLE; I915_WRITE(CNL_PORT_TX_DW5_GRP(port), val); - /* Program PORT_TX_DW7 */ + /* Program PORT_TX_DW7 */ val = I915_READ(CNL_PORT_TX_DW7_LN0(port)); val &= ~N_SCALAR_MASK; val |= N_SCALAR(ddi_translations[level].dw7_n_scalar); @@ -2054,33 +2052,46 @@ static uint32_t translate_signal_level(int signal_levels) return 0; } -uint32_t ddi_signal_levels(struct intel_dp *intel_dp) +static uint32_t intel_ddi_dp_level(struct intel_dp *intel_dp) { - struct intel_digital_port *dport = dp_to_dig_port(intel_dp); - struct drm_i915_private *dev_priv = to_i915(dport->base.base.dev); - struct intel_encoder *encoder = &dport->base; uint8_t train_set = intel_dp->train_set[0]; int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK | DP_TRAIN_PRE_EMPHASIS_MASK); + + return translate_signal_level(signal_levels); +} + +u32 bxt_signal_levels(struct intel_dp *intel_dp) +{ + struct intel_digital_port *dport = dp_to_dig_port(intel_dp); + struct drm_i915_private *dev_priv = to_i915(dport->base.base.dev); + struct intel_encoder *encoder = &dport->base; enum port port = dport->port; - uint32_t level; + u32 level = intel_ddi_dp_level(intel_dp); - level = translate_signal_level(signal_levels); + if (IS_CANNONLAKE(dev_priv)) + cnl_ddi_vswing_sequence(encoder, level); + else + bxt_ddi_vswing_sequence(dev_priv, level, port, encoder->type); + + return 0; +} + +uint32_t ddi_signal_levels(struct intel_dp *intel_dp) +{ + struct intel_digital_port *dport = dp_to_dig_port(intel_dp); + struct drm_i915_private *dev_priv = to_i915(dport->base.base.dev); + struct intel_encoder *encoder = &dport->base; + uint32_t level = intel_ddi_dp_level(intel_dp); if (IS_GEN9_BC(dev_priv)) - skl_ddi_set_iboost(encoder, level); - else if (IS_GEN9_LP(dev_priv)) - bxt_ddi_vswing_sequence(dev_priv, level, port, encoder->type); - else if (IS_CANNONLAKE(dev_priv)) { - cnl_ddi_vswing_sequence(encoder, level); - /* DDI_BUF_CTL bits 27:24 are reserved on CNL */ - return 0; - } + skl_ddi_set_iboost(encoder, level); + return DDI_BUF_TRANS_SELECT(level); } static void intel_ddi_clk_select(struct intel_encoder *encoder, - struct intel_shared_dpll *pll) + const struct intel_shared_dpll *pll) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); enum port port = intel_ddi_get_encoder_port(encoder); @@ -2129,6 +2140,7 @@ static void intel_ddi_pre_enable_dp(struct intel_encoder *encoder, struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); enum port port = intel_ddi_get_encoder_port(encoder); struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base); + uint32_t level = intel_ddi_dp_level(intel_dp); WARN_ON(link_mst && (port == PORT_A || port == PORT_E)); @@ -2141,7 +2153,13 @@ static void intel_ddi_pre_enable_dp(struct intel_encoder *encoder, intel_display_power_get(dev_priv, dig_port->ddi_io_power_domain); - intel_prepare_dp_ddi_buffers(encoder); + if (IS_CANNONLAKE(dev_priv)) + cnl_ddi_vswing_sequence(encoder, level); + else if (IS_GEN9_LP(dev_priv)) + bxt_ddi_vswing_sequence(dev_priv, level, port, encoder->type); + else + intel_prepare_dp_ddi_buffers(encoder); + intel_ddi_init_dp_buf_reg(encoder); intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON); intel_dp_start_link_train(intel_dp); @@ -2150,14 +2168,14 @@ static void intel_ddi_pre_enable_dp(struct intel_encoder *encoder, } static void intel_ddi_pre_enable_hdmi(struct intel_encoder *encoder, - bool has_hdmi_sink, + bool has_infoframe, const struct intel_crtc_state *crtc_state, const struct drm_connector_state *conn_state, - struct intel_shared_dpll *pll) + const struct intel_shared_dpll *pll) { - struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base); + struct intel_digital_port *intel_dig_port = enc_to_dig_port(&encoder->base); + struct intel_hdmi *intel_hdmi = &intel_dig_port->hdmi; struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - struct drm_encoder *drm_encoder = &encoder->base; enum port port = intel_ddi_get_encoder_port(encoder); int level = intel_ddi_hdmi_level(dev_priv, port); struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base); @@ -2167,23 +2185,25 @@ static void intel_ddi_pre_enable_hdmi(struct intel_encoder *encoder, intel_display_power_get(dev_priv, dig_port->ddi_io_power_domain); - intel_prepare_hdmi_ddi_buffers(encoder); - if (IS_GEN9_BC(dev_priv)) - skl_ddi_set_iboost(encoder, level); + if (IS_CANNONLAKE(dev_priv)) + cnl_ddi_vswing_sequence(encoder, level); else if (IS_GEN9_LP(dev_priv)) bxt_ddi_vswing_sequence(dev_priv, level, port, INTEL_OUTPUT_HDMI); - else if (IS_CANNONLAKE(dev_priv)) - cnl_ddi_vswing_sequence(encoder, level); + else + intel_prepare_hdmi_ddi_buffers(encoder); + + if (IS_GEN9_BC(dev_priv)) + skl_ddi_set_iboost(encoder, level); - intel_hdmi->set_infoframes(drm_encoder, - has_hdmi_sink, - crtc_state, conn_state); + intel_dig_port->set_infoframes(&encoder->base, + has_infoframe, + crtc_state, conn_state); } static void intel_ddi_pre_enable(struct intel_encoder *encoder, - struct intel_crtc_state *pipe_config, - struct drm_connector_state *conn_state) + const struct intel_crtc_state *pipe_config, + const struct drm_connector_state *conn_state) { int type = encoder->type; @@ -2197,21 +2217,20 @@ static void intel_ddi_pre_enable(struct intel_encoder *encoder, } if (type == INTEL_OUTPUT_HDMI) { intel_ddi_pre_enable_hdmi(encoder, - pipe_config->has_hdmi_sink, + pipe_config->has_infoframe, pipe_config, conn_state, pipe_config->shared_dpll); } } static void intel_ddi_post_disable(struct intel_encoder *intel_encoder, - struct intel_crtc_state *old_crtc_state, - struct drm_connector_state *old_conn_state) + const struct intel_crtc_state *old_crtc_state, + const struct drm_connector_state *old_conn_state) { struct drm_encoder *encoder = &intel_encoder->base; struct drm_i915_private *dev_priv = to_i915(encoder->dev); enum port port = intel_ddi_get_encoder_port(intel_encoder); struct intel_digital_port *dig_port = enc_to_dig_port(encoder); - struct intel_dp *intel_dp = NULL; int type = intel_encoder->type; uint32_t val; bool wait = false; @@ -2219,7 +2238,8 @@ static void intel_ddi_post_disable(struct intel_encoder *intel_encoder, /* old_crtc_state and old_conn_state are NULL when called from DP_MST */ if (type == INTEL_OUTPUT_DP || type == INTEL_OUTPUT_EDP) { - intel_dp = enc_to_intel_dp(encoder); + struct intel_dp *intel_dp = enc_to_intel_dp(encoder); + intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF); } @@ -2238,7 +2258,14 @@ static void intel_ddi_post_disable(struct intel_encoder *intel_encoder, if (wait) intel_wait_ddi_buf_idle(dev_priv, port); - if (intel_dp) { + if (type == INTEL_OUTPUT_HDMI) { + dig_port->set_infoframes(encoder, false, + old_crtc_state, old_conn_state); + } + + if (type == INTEL_OUTPUT_DP || type == INTEL_OUTPUT_EDP) { + struct intel_dp *intel_dp = enc_to_intel_dp(encoder); + intel_edp_panel_vdd_on(intel_dp); intel_edp_panel_off(intel_dp); } @@ -2263,8 +2290,8 @@ static void intel_ddi_post_disable(struct intel_encoder *intel_encoder, } void intel_ddi_fdi_post_disable(struct intel_encoder *encoder, - struct intel_crtc_state *old_crtc_state, - struct drm_connector_state *old_conn_state) + const struct intel_crtc_state *old_crtc_state, + const struct drm_connector_state *old_conn_state) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); uint32_t val; @@ -2296,8 +2323,8 @@ void intel_ddi_fdi_post_disable(struct intel_encoder *encoder, } static void intel_enable_ddi(struct intel_encoder *intel_encoder, - struct intel_crtc_state *pipe_config, - struct drm_connector_state *conn_state) + const struct intel_crtc_state *pipe_config, + const struct drm_connector_state *conn_state) { struct drm_encoder *encoder = &intel_encoder->base; struct drm_i915_private *dev_priv = to_i915(encoder->dev); @@ -2328,7 +2355,7 @@ static void intel_enable_ddi(struct intel_encoder *intel_encoder, intel_dp_stop_link_train(intel_dp); intel_edp_backlight_on(pipe_config, conn_state); - intel_psr_enable(intel_dp); + intel_psr_enable(intel_dp, pipe_config); intel_edp_drrs_enable(intel_dp, pipe_config); } @@ -2337,8 +2364,8 @@ static void intel_enable_ddi(struct intel_encoder *intel_encoder, } static void intel_disable_ddi(struct intel_encoder *intel_encoder, - struct intel_crtc_state *old_crtc_state, - struct drm_connector_state *old_conn_state) + const struct intel_crtc_state *old_crtc_state, + const struct drm_connector_state *old_conn_state) { struct drm_encoder *encoder = &intel_encoder->base; int type = intel_encoder->type; @@ -2356,14 +2383,14 @@ static void intel_disable_ddi(struct intel_encoder *intel_encoder, struct intel_dp *intel_dp = enc_to_intel_dp(encoder); intel_edp_drrs_disable(intel_dp, old_crtc_state); - intel_psr_disable(intel_dp); + intel_psr_disable(intel_dp, old_crtc_state); intel_edp_backlight_off(old_conn_state); } } static void bxt_ddi_pre_pll_enable(struct intel_encoder *encoder, - struct intel_crtc_state *pipe_config, - struct drm_connector_state *conn_state) + const struct intel_crtc_state *pipe_config, + const struct drm_connector_state *conn_state) { uint8_t mask = pipe_config->lane_lat_optim_mask; @@ -2435,7 +2462,7 @@ void intel_ddi_get_config(struct intel_encoder *encoder, struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc); enum transcoder cpu_transcoder = pipe_config->cpu_transcoder; - struct intel_hdmi *intel_hdmi; + struct intel_digital_port *intel_dig_port; u32 temp, flags = 0; /* XXX: DSI transcoder paranoia */ @@ -2474,9 +2501,9 @@ void intel_ddi_get_config(struct intel_encoder *encoder, switch (temp & TRANS_DDI_MODE_SELECT_MASK) { case TRANS_DDI_MODE_SELECT_HDMI: pipe_config->has_hdmi_sink = true; - intel_hdmi = enc_to_intel_hdmi(&encoder->base); + intel_dig_port = enc_to_dig_port(&encoder->base); - if (intel_hdmi->infoframe_enabled(&encoder->base, pipe_config)) + if (intel_dig_port->infoframe_enabled(&encoder->base, pipe_config)) pipe_config->has_infoframe = true; if ((temp & TRANS_DDI_HDMI_SCRAMBLING_MASK) == @@ -2729,6 +2756,8 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port) intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2); intel_encoder->cloneable = 0; + intel_infoframe_init(intel_dig_port); + if (init_dp) { if (!intel_ddi_init_dp_connector(intel_dig_port)) goto err; diff --git a/drivers/gpu/drm/i915/intel_device_info.c b/drivers/gpu/drm/i915/intel_device_info.c index 5f91ddc78c7a..875d428ea75f 100644 --- a/drivers/gpu/drm/i915/intel_device_info.c +++ b/drivers/gpu/drm/i915/intel_device_info.c @@ -82,6 +82,39 @@ void intel_device_info_dump(struct drm_i915_private *dev_priv) #undef PRINT_FLAG } +static void gen10_sseu_info_init(struct drm_i915_private *dev_priv) +{ + struct sseu_dev_info *sseu = &mkwrite_device_info(dev_priv)->sseu; + const u32 fuse2 = I915_READ(GEN8_FUSE2); + + sseu->slice_mask = (fuse2 & GEN10_F2_S_ENA_MASK) >> + GEN10_F2_S_ENA_SHIFT; + sseu->subslice_mask = (1 << 4) - 1; + sseu->subslice_mask &= ~((fuse2 & GEN10_F2_SS_DIS_MASK) >> + GEN10_F2_SS_DIS_SHIFT); + + sseu->eu_total = hweight32(~I915_READ(GEN8_EU_DISABLE0)); + sseu->eu_total += hweight32(~I915_READ(GEN8_EU_DISABLE1)); + sseu->eu_total += hweight32(~I915_READ(GEN8_EU_DISABLE2)); + sseu->eu_total += hweight8(~(I915_READ(GEN10_EU_DISABLE3) & + GEN10_EU_DIS_SS_MASK)); + + /* + * CNL is expected to always have a uniform distribution + * of EU across subslices with the exception that any one + * EU in any one subslice may be fused off for die + * recovery. + */ + sseu->eu_per_subslice = sseu_subslice_total(sseu) ? + DIV_ROUND_UP(sseu->eu_total, + sseu_subslice_total(sseu)) : 0; + + /* No restrictions on Power Gating */ + sseu->has_slice_pg = 1; + sseu->has_subslice_pg = 1; + sseu->has_eu_pg = 1; +} + static void cherryview_sseu_info_init(struct drm_i915_private *dev_priv) { struct sseu_dev_info *sseu = &mkwrite_device_info(dev_priv)->sseu; @@ -343,7 +376,7 @@ void intel_device_info_runtime_init(struct drm_i915_private *dev_priv) info->num_sprites[pipe] = 1; } - if (i915.disable_display) { + if (i915_modparams.disable_display) { DRM_INFO("Display disabled (module parameter)\n"); info->num_pipes = 0; } else if (info->num_pipes > 0 && @@ -409,10 +442,10 @@ void intel_device_info_runtime_init(struct drm_i915_private *dev_priv) cherryview_sseu_info_init(dev_priv); else if (IS_BROADWELL(dev_priv)) broadwell_sseu_info_init(dev_priv); - else if (INTEL_INFO(dev_priv)->gen >= 9) + else if (INTEL_GEN(dev_priv) == 9) gen9_sseu_info_init(dev_priv); - - info->has_snoop = !info->has_llc; + else if (INTEL_GEN(dev_priv) >= 10) + gen10_sseu_info_init(dev_priv); DRM_DEBUG_DRIVER("slice mask: %04x\n", info->sseu.slice_mask); DRM_DEBUG_DRIVER("slice total: %u\n", hweight8(info->sseu.slice_mask)); diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 00cd17c76fdc..615c58e48613 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -3701,7 +3701,7 @@ void intel_prepare_reset(struct drm_i915_private *dev_priv) /* reset doesn't touch the display */ - if (!i915.force_reset_modeset_test && + if (!i915_modparams.force_reset_modeset_test && !gpu_reset_clobbers_display(dev_priv)) return; @@ -3757,7 +3757,7 @@ void intel_finish_reset(struct drm_i915_private *dev_priv) int ret; /* reset doesn't touch the display */ - if (!i915.force_reset_modeset_test && + if (!i915_modparams.force_reset_modeset_test && !gpu_reset_clobbers_display(dev_priv)) return; @@ -3770,8 +3770,8 @@ void intel_finish_reset(struct drm_i915_private *dev_priv) if (!gpu_reset_clobbers_display(dev_priv)) { /* for testing only restore the display */ ret = __intel_display_resume(dev, state, ctx); - if (ret) - DRM_ERROR("Restoring old state failed with %i\n", ret); + if (ret) + DRM_ERROR("Restoring old state failed with %i\n", ret); } else { /* * The display has been reset as well, @@ -3804,15 +3804,14 @@ unlock: clear_bit(I915_RESET_MODESET, &dev_priv->gpu_error.flags); } -static void intel_update_pipe_config(struct intel_crtc *crtc, - struct intel_crtc_state *old_crtc_state) +static void intel_update_pipe_config(const struct intel_crtc_state *old_crtc_state, + const struct intel_crtc_state *new_crtc_state) { + struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->base.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); - struct intel_crtc_state *pipe_config = - to_intel_crtc_state(crtc->base.state); /* drm_atomic_helper_update_legacy_modeset_state might not be called. */ - crtc->base.mode = crtc->base.state->mode; + crtc->base.mode = new_crtc_state->base.mode; /* * Update pipe size and adjust fitter if needed: the reason for this is @@ -3824,17 +3823,17 @@ static void intel_update_pipe_config(struct intel_crtc *crtc, */ I915_WRITE(PIPESRC(crtc->pipe), - ((pipe_config->pipe_src_w - 1) << 16) | - (pipe_config->pipe_src_h - 1)); + ((new_crtc_state->pipe_src_w - 1) << 16) | + (new_crtc_state->pipe_src_h - 1)); /* on skylake this is done by detaching scalers */ if (INTEL_GEN(dev_priv) >= 9) { skl_detach_scalers(crtc); - if (pipe_config->pch_pfit.enabled) + if (new_crtc_state->pch_pfit.enabled) skylake_pfit_enable(crtc); } else if (HAS_PCH_SPLIT(dev_priv)) { - if (pipe_config->pch_pfit.enabled) + if (new_crtc_state->pch_pfit.enabled) ironlake_pfit_enable(crtc); else if (old_crtc_state->pch_pfit.enabled) ironlake_pfit_disable(crtc, true); @@ -4957,7 +4956,8 @@ void hsw_enable_ips(struct intel_crtc *crtc) assert_plane_enabled(dev_priv, crtc->plane); if (IS_BROADWELL(dev_priv)) { mutex_lock(&dev_priv->rps.hw_lock); - WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0xc0000000)); + WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, + IPS_ENABLE | IPS_PCODE_CONTROL)); mutex_unlock(&dev_priv->rps.hw_lock); /* Quoting Art Runyan: "its not safe to expect any particular * value in IPS_CTL bit 31 after enabling IPS through the @@ -5118,7 +5118,8 @@ static void intel_post_plane_update(struct intel_crtc_state *old_crtc_state) struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc); struct drm_atomic_state *old_state = old_crtc_state->base.state; struct intel_crtc_state *pipe_config = - to_intel_crtc_state(crtc->base.state); + intel_atomic_get_new_crtc_state(to_intel_atomic_state(old_state), + crtc); struct drm_plane *primary = crtc->base.primary; struct drm_plane_state *old_pri_state = drm_atomic_get_existing_plane_state(old_state, primary); @@ -5130,7 +5131,8 @@ static void intel_post_plane_update(struct intel_crtc_state *old_crtc_state) if (old_pri_state) { struct intel_plane_state *primary_state = - to_intel_plane_state(primary->state); + intel_atomic_get_new_plane_state(to_intel_atomic_state(old_state), + to_intel_plane(primary)); struct intel_plane_state *old_primary_state = to_intel_plane_state(old_pri_state); @@ -5159,7 +5161,8 @@ static void intel_pre_plane_update(struct intel_crtc_state *old_crtc_state, if (old_pri_state) { struct intel_plane_state *primary_state = - to_intel_plane_state(primary->state); + intel_atomic_get_new_plane_state(old_intel_state, + to_intel_plane(primary)); struct intel_plane_state *old_primary_state = to_intel_plane_state(old_pri_state); @@ -6038,7 +6041,7 @@ static void intel_crtc_disable_noatomic(struct drm_crtc *crtc, intel_crtc->enabled_power_domains = 0; dev_priv->active_crtcs &= ~(1 << intel_crtc->pipe); - dev_priv->min_pixclk[intel_crtc->pipe] = 0; + dev_priv->min_cdclk[intel_crtc->pipe] = 0; } /* @@ -6283,6 +6286,9 @@ retry: static bool pipe_config_supports_ips(struct drm_i915_private *dev_priv, struct intel_crtc_state *pipe_config) { + if (pipe_config->ips_force_disable) + return false; + if (pipe_config->pipe_bpp > 24) return false; @@ -6307,7 +6313,7 @@ static void hsw_compute_ips_config(struct intel_crtc *crtc, struct drm_device *dev = crtc->base.dev; struct drm_i915_private *dev_priv = to_i915(dev); - pipe_config->ips_enabled = i915.enable_ips && + pipe_config->ips_enabled = i915_modparams.enable_ips && hsw_crtc_supports_ips(crtc) && pipe_config_supports_ips(dev_priv, pipe_config); } @@ -6488,8 +6494,8 @@ intel_link_compute_m_n(int bits_per_pixel, int nlanes, static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv) { - if (i915.panel_use_ssc >= 0) - return i915.panel_use_ssc != 0; + if (i915_modparams.panel_use_ssc >= 0) + return i915_modparams.panel_use_ssc != 0; return dev_priv->vbt.lvds_use_ssc && !(dev_priv->quirks & QUIRK_LVDS_SSC_DISABLE); } @@ -9039,7 +9045,7 @@ static void cannonlake_get_ddi_pll(struct drm_i915_private *dev_priv, u32 temp; temp = I915_READ(DPCLKA_CFGCR0) & DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(port); - id = temp >> (port * 2); + id = temp >> DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(port); if (WARN_ON(id < SKL_DPLL0 || id > SKL_DPLL2)) return; @@ -9304,11 +9310,11 @@ static bool haswell_get_pipe_config(struct intel_crtc *crtc, pipe_config->gamma_mode = I915_READ(GAMMA_MODE(crtc->pipe)) & GAMMA_MODE_MODE_MASK; - if (IS_BROADWELL(dev_priv) || dev_priv->info.gen >= 9) { + if (IS_BROADWELL(dev_priv) || INTEL_GEN(dev_priv) >= 9) { u32 tmp = I915_READ(PIPEMISC(crtc->pipe)); bool clrspace_yuv = tmp & PIPEMISC_OUTPUT_COLORSPACE_YUV; - if (IS_GEMINILAKE(dev_priv) || dev_priv->info.gen >= 10) { + if (IS_GEMINILAKE(dev_priv) || INTEL_GEN(dev_priv) >= 10) { bool blend_mode_420 = tmp & PIPEMISC_YUV420_MODE_FULL_BLEND; @@ -9753,7 +9759,7 @@ static void i9xx_disable_cursor(struct intel_plane *plane, /* VESA 640x480x72Hz mode to set on the pipe */ -static struct drm_display_mode load_detect_mode = { +static const struct drm_display_mode load_detect_mode = { DRM_MODE("640x480", DRM_MODE_TYPE_DEFAULT, 31500, 640, 664, 704, 832, 0, 480, 489, 491, 520, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC), }; @@ -9788,7 +9794,7 @@ intel_framebuffer_pitch_for_width(int width, int bpp) } static u32 -intel_framebuffer_size_for_mode(struct drm_display_mode *mode, int bpp) +intel_framebuffer_size_for_mode(const struct drm_display_mode *mode, int bpp) { u32 pitch = intel_framebuffer_pitch_for_width(mode->hdisplay, bpp); return PAGE_ALIGN(pitch * mode->vdisplay); @@ -9796,7 +9802,7 @@ intel_framebuffer_size_for_mode(struct drm_display_mode *mode, int bpp) static struct drm_framebuffer * intel_framebuffer_create_for_mode(struct drm_device *dev, - struct drm_display_mode *mode, + const struct drm_display_mode *mode, int depth, int bpp) { struct drm_framebuffer *fb; @@ -9823,7 +9829,7 @@ intel_framebuffer_create_for_mode(struct drm_device *dev, static struct drm_framebuffer * mode_fits_in_fbdev(struct drm_device *dev, - struct drm_display_mode *mode) + const struct drm_display_mode *mode) { #ifdef CONFIG_DRM_FBDEV_EMULATION struct drm_i915_private *dev_priv = to_i915(dev); @@ -9856,7 +9862,7 @@ mode_fits_in_fbdev(struct drm_device *dev, static int intel_modeset_setup_plane_state(struct drm_atomic_state *state, struct drm_crtc *crtc, - struct drm_display_mode *mode, + const struct drm_display_mode *mode, struct drm_framebuffer *fb, int x, int y) { @@ -9890,7 +9896,7 @@ static int intel_modeset_setup_plane_state(struct drm_atomic_state *state, } int intel_get_load_detect_pipe(struct drm_connector *connector, - struct drm_display_mode *mode, + const struct drm_display_mode *mode, struct intel_load_detect_pipe *old, struct drm_modeset_acquire_ctx *ctx) { @@ -10218,7 +10224,7 @@ int intel_dotclock_calculate(int link_freq, if (!m_n->link_n) return 0; - return div_u64((u64)m_n->link_m * link_freq, m_n->link_n); + return div_u64(mul_u32_u32(m_n->link_m, link_freq), m_n->link_n); } static void ironlake_pch_clock_get(struct intel_crtc *crtc, @@ -10337,7 +10343,7 @@ static bool intel_wm_need_update(struct drm_plane *plane, return false; } -static bool needs_scaling(struct intel_plane_state *state) +static bool needs_scaling(const struct intel_plane_state *state) { int src_w = drm_rect_width(&state->base.src) >> 16; int src_h = drm_rect_height(&state->base.src) >> 16; @@ -10347,7 +10353,9 @@ static bool needs_scaling(struct intel_plane_state *state) return (src_w != dst_w || src_h != dst_h); } -int intel_plane_atomic_calc_changes(struct drm_crtc_state *crtc_state, +int intel_plane_atomic_calc_changes(const struct intel_crtc_state *old_crtc_state, + struct drm_crtc_state *crtc_state, + const struct intel_plane_state *old_plane_state, struct drm_plane_state *plane_state) { struct intel_crtc_state *pipe_config = to_intel_crtc_state(crtc_state); @@ -10356,10 +10364,8 @@ int intel_plane_atomic_calc_changes(struct drm_crtc_state *crtc_state, struct intel_plane *plane = to_intel_plane(plane_state->plane); struct drm_device *dev = crtc->dev; struct drm_i915_private *dev_priv = to_i915(dev); - struct intel_plane_state *old_plane_state = - to_intel_plane_state(plane->base.state); bool mode_changed = needs_modeset(crtc_state); - bool was_crtc_enabled = crtc->state->active; + bool was_crtc_enabled = old_crtc_state->base.active; bool is_crtc_enabled = crtc_state->active; bool turn_off, turn_on, visible, was_visible; struct drm_framebuffer *fb = plane_state->fb; @@ -10850,7 +10856,7 @@ clear_intel_crtc_state(struct intel_crtc_state *crtc_state) struct intel_dpll_hw_state dpll_hw_state; struct intel_shared_dpll *shared_dpll; struct intel_crtc_wm_state wm_state; - bool force_thru; + bool force_thru, ips_force_disable; /* FIXME: before the switch to atomic started, a new pipe_config was * kzalloc'd. Code that depends on any field being zero should be @@ -10861,6 +10867,7 @@ clear_intel_crtc_state(struct intel_crtc_state *crtc_state) shared_dpll = crtc_state->shared_dpll; dpll_hw_state = crtc_state->dpll_hw_state; force_thru = crtc_state->pch_pfit.force_thru; + ips_force_disable = crtc_state->ips_force_disable; if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) wm_state = crtc_state->wm; @@ -10874,6 +10881,7 @@ clear_intel_crtc_state(struct intel_crtc_state *crtc_state) crtc_state->shared_dpll = shared_dpll; crtc_state->dpll_hw_state = dpll_hw_state; crtc_state->pch_pfit.force_thru = force_thru; + crtc_state->ips_force_disable = ips_force_disable; if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) crtc_state->wm = wm_state; @@ -12076,7 +12084,7 @@ static int intel_atomic_check(struct drm_device *dev, return ret; } - if (i915.fastboot && + if (i915_modparams.fastboot && intel_pipe_config_compare(dev_priv, to_intel_crtc_state(old_crtc_state), pipe_config, true)) { @@ -12129,73 +12137,10 @@ u32 intel_crtc_get_vblank_counter(struct intel_crtc *crtc) return dev->driver->get_vblank_counter(dev, crtc->pipe); } -static void intel_atomic_wait_for_vblanks(struct drm_device *dev, - struct drm_i915_private *dev_priv, - unsigned crtc_mask) -{ - unsigned last_vblank_count[I915_MAX_PIPES]; - enum pipe pipe; - int ret; - - if (!crtc_mask) - return; - - for_each_pipe(dev_priv, pipe) { - struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, - pipe); - - if (!((1 << pipe) & crtc_mask)) - continue; - - ret = drm_crtc_vblank_get(&crtc->base); - if (WARN_ON(ret != 0)) { - crtc_mask &= ~(1 << pipe); - continue; - } - - last_vblank_count[pipe] = drm_crtc_vblank_count(&crtc->base); - } - - for_each_pipe(dev_priv, pipe) { - struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, - pipe); - long lret; - - if (!((1 << pipe) & crtc_mask)) - continue; - - lret = wait_event_timeout(dev->vblank[pipe].queue, - last_vblank_count[pipe] != - drm_crtc_vblank_count(&crtc->base), - msecs_to_jiffies(50)); - - WARN(!lret, "pipe %c vblank wait timed out\n", pipe_name(pipe)); - - drm_crtc_vblank_put(&crtc->base); - } -} - -static bool needs_vblank_wait(struct intel_crtc_state *crtc_state) -{ - /* fb updated, need to unpin old fb */ - if (crtc_state->fb_changed) - return true; - - /* wm changes, need vblank before final wm's */ - if (crtc_state->update_wm_post) - return true; - - if (crtc_state->wm.need_postvbl_update) - return true; - - return false; -} - static void intel_update_crtc(struct drm_crtc *crtc, struct drm_atomic_state *state, struct drm_crtc_state *old_crtc_state, - struct drm_crtc_state *new_crtc_state, - unsigned int *crtc_vblank_mask) + struct drm_crtc_state *new_crtc_state) { struct drm_device *dev = crtc->dev; struct drm_i915_private *dev_priv = to_i915(dev); @@ -12218,13 +12163,9 @@ static void intel_update_crtc(struct drm_crtc *crtc, } drm_atomic_helper_commit_planes_on_crtc(old_crtc_state); - - if (needs_vblank_wait(pipe_config)) - *crtc_vblank_mask |= drm_crtc_mask(crtc); } -static void intel_update_crtcs(struct drm_atomic_state *state, - unsigned int *crtc_vblank_mask) +static void intel_update_crtcs(struct drm_atomic_state *state) { struct drm_crtc *crtc; struct drm_crtc_state *old_crtc_state, *new_crtc_state; @@ -12235,12 +12176,11 @@ static void intel_update_crtcs(struct drm_atomic_state *state, continue; intel_update_crtc(crtc, state, old_crtc_state, - new_crtc_state, crtc_vblank_mask); + new_crtc_state); } } -static void skl_update_crtcs(struct drm_atomic_state *state, - unsigned int *crtc_vblank_mask) +static void skl_update_crtcs(struct drm_atomic_state *state) { struct drm_i915_private *dev_priv = to_i915(state->dev); struct intel_atomic_state *intel_state = to_intel_atomic_state(state); @@ -12274,7 +12214,7 @@ static void skl_update_crtcs(struct drm_atomic_state *state, unsigned int cmask = drm_crtc_mask(crtc); intel_crtc = to_intel_crtc(crtc); - cstate = to_intel_crtc_state(crtc->state); + cstate = to_intel_crtc_state(new_crtc_state); pipe = intel_crtc->pipe; if (updated & cmask || !cstate->base.active) @@ -12299,7 +12239,7 @@ static void skl_update_crtcs(struct drm_atomic_state *state, vbl_wait = true; intel_update_crtc(crtc, state, old_crtc_state, - new_crtc_state, crtc_vblank_mask); + new_crtc_state); if (vbl_wait) intel_wait_for_vblank(dev_priv, pipe); @@ -12359,9 +12299,7 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state) struct drm_crtc_state *old_crtc_state, *new_crtc_state; struct drm_crtc *crtc; struct intel_crtc_state *intel_cstate; - bool hw_check = intel_state->modeset; u64 put_domains[I915_MAX_PIPES] = {}; - unsigned crtc_vblank_mask = 0; int i; intel_atomic_commit_fence_wait(intel_state); @@ -12376,7 +12314,6 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state) if (needs_modeset(new_crtc_state) || to_intel_crtc_state(new_crtc_state)->update_pipe) { - hw_check = true; put_domains[to_intel_crtc(crtc)->pipe] = modeset_get_crtc_power_domains(crtc, @@ -12403,7 +12340,7 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state) intel_check_cpu_fifo_underruns(dev_priv); intel_check_pch_fifo_underruns(dev_priv); - if (!crtc->state->active) { + if (!new_crtc_state->active) { /* * Make sure we don't call initial_watermarks * for ILK-style watermark updates. @@ -12412,7 +12349,7 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state) */ if (INTEL_GEN(dev_priv) >= 9) dev_priv->display.initial_watermarks(intel_state, - to_intel_crtc_state(crtc->state)); + to_intel_crtc_state(new_crtc_state)); } } } @@ -12451,7 +12388,7 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state) } /* Now enable the clocks, plane, pipe, and connectors that we set up. */ - dev_priv->display.update_crtcs(state, &crtc_vblank_mask); + dev_priv->display.update_crtcs(state); /* FIXME: We should call drm_atomic_helper_commit_hw_done() here * already, but still need the state for the delayed optimization. To @@ -12462,8 +12399,7 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state) * - switch over to the vblank wait helper in the core after that since * we don't need out special handling any more. */ - if (!state->legacy_cursor_update) - intel_atomic_wait_for_vblanks(dev, dev_priv, crtc_vblank_mask); + drm_atomic_helper_wait_for_flip_done(dev, state); /* * Now that the vblank has passed, we can go ahead and program the @@ -12626,8 +12562,8 @@ static int intel_atomic_commit(struct drm_device *dev, intel_atomic_track_fbs(state); if (intel_state->modeset) { - memcpy(dev_priv->min_pixclk, intel_state->min_pixclk, - sizeof(intel_state->min_pixclk)); + memcpy(dev_priv->min_cdclk, intel_state->min_cdclk, + sizeof(intel_state->min_cdclk)); dev_priv->active_crtcs = intel_state->active_crtcs; dev_priv->cdclk.logical = intel_state->cdclk.logical; dev_priv->cdclk.actual = intel_state->cdclk.actual; @@ -12656,6 +12592,58 @@ static const struct drm_crtc_funcs intel_crtc_funcs = { .set_crc_source = intel_crtc_set_crc_source, }; +struct wait_rps_boost { + struct wait_queue_entry wait; + + struct drm_crtc *crtc; + struct drm_i915_gem_request *request; +}; + +static int do_rps_boost(struct wait_queue_entry *_wait, + unsigned mode, int sync, void *key) +{ + struct wait_rps_boost *wait = container_of(_wait, typeof(*wait), wait); + struct drm_i915_gem_request *rq = wait->request; + + gen6_rps_boost(rq, NULL); + i915_gem_request_put(rq); + + drm_crtc_vblank_put(wait->crtc); + + list_del(&wait->wait.entry); + kfree(wait); + return 1; +} + +static void add_rps_boost_after_vblank(struct drm_crtc *crtc, + struct dma_fence *fence) +{ + struct wait_rps_boost *wait; + + if (!dma_fence_is_i915(fence)) + return; + + if (INTEL_GEN(to_i915(crtc->dev)) < 6) + return; + + if (drm_crtc_vblank_get(crtc)) + return; + + wait = kmalloc(sizeof(*wait), GFP_KERNEL); + if (!wait) { + drm_crtc_vblank_put(crtc); + return; + } + + wait->request = to_request(dma_fence_get(fence)); + wait->crtc = crtc; + + wait->wait.func = do_rps_boost; + wait->wait.flags = 0; + + add_wait_queue(drm_crtc_vblank_waitqueue(crtc), &wait->wait); +} + /** * intel_prepare_plane_fb - Prepare fb for usage on plane * @plane: drm plane to prepare for @@ -12753,12 +12741,22 @@ intel_prepare_plane_fb(struct drm_plane *plane, return ret; if (!new_state->fence) { /* implicit fencing */ + struct dma_fence *fence; + ret = i915_sw_fence_await_reservation(&intel_state->commit_ready, obj->resv, NULL, false, I915_FENCE_TIMEOUT, GFP_KERNEL); if (ret < 0) return ret; + + fence = reservation_object_get_excl_rcu(obj->resv); + if (fence) { + add_rps_boost_after_vblank(new_state->crtc, fence); + dma_fence_put(fence); + } + } else { + add_rps_boost_after_vblank(new_state->crtc, new_state->fence); } return 0; @@ -12875,29 +12873,29 @@ static void intel_begin_crtc_commit(struct drm_crtc *crtc, struct drm_device *dev = crtc->dev; struct drm_i915_private *dev_priv = to_i915(dev); struct intel_crtc *intel_crtc = to_intel_crtc(crtc); - struct intel_crtc_state *intel_cstate = - to_intel_crtc_state(crtc->state); struct intel_crtc_state *old_intel_cstate = to_intel_crtc_state(old_crtc_state); struct intel_atomic_state *old_intel_state = to_intel_atomic_state(old_crtc_state->state); - bool modeset = needs_modeset(crtc->state); + struct intel_crtc_state *intel_cstate = + intel_atomic_get_new_crtc_state(old_intel_state, intel_crtc); + bool modeset = needs_modeset(&intel_cstate->base); if (!modeset && (intel_cstate->base.color_mgmt_changed || intel_cstate->update_pipe)) { - intel_color_set_csc(crtc->state); - intel_color_load_luts(crtc->state); + intel_color_set_csc(&intel_cstate->base); + intel_color_load_luts(&intel_cstate->base); } /* Perform vblank evasion around commit operation */ - intel_pipe_update_start(intel_crtc); + intel_pipe_update_start(intel_cstate); if (modeset) goto out; if (intel_cstate->update_pipe) - intel_update_pipe_config(intel_crtc, old_intel_cstate); + intel_update_pipe_config(old_intel_cstate, intel_cstate); else if (INTEL_GEN(dev_priv) >= 9) skl_detach_scalers(intel_crtc); @@ -12911,8 +12909,12 @@ static void intel_finish_crtc_commit(struct drm_crtc *crtc, struct drm_crtc_state *old_crtc_state) { struct intel_crtc *intel_crtc = to_intel_crtc(crtc); + struct intel_atomic_state *old_intel_state = + to_intel_atomic_state(old_crtc_state->state); + struct intel_crtc_state *new_crtc_state = + intel_atomic_get_new_crtc_state(old_intel_state, intel_crtc); - intel_pipe_update_end(intel_crtc); + intel_pipe_update_end(new_crtc_state); } /** @@ -13061,6 +13063,14 @@ intel_legacy_cursor_update(struct drm_plane *plane, goto slow; old_plane_state = plane->state; + /* + * Don't do an async update if there is an outstanding commit modifying + * the plane. This prevents our async update's changes from getting + * overridden by a previous synchronous update's state. + */ + if (old_plane_state->commit && + !try_wait_for_completion(&old_plane_state->commit->hw_done)) + goto slow; /* * If any parameters change that may affect watermarks, @@ -13091,6 +13101,8 @@ intel_legacy_cursor_update(struct drm_plane *plane, new_plane_state->crtc_h = crtc_h; ret = intel_plane_atomic_check_with_state(to_intel_crtc_state(crtc->state), + to_intel_crtc_state(crtc->state), /* FIXME need a new crtc state? */ + to_intel_plane_state(plane->state), to_intel_plane_state(new_plane_state)); if (ret) goto out_free; @@ -13120,17 +13132,12 @@ intel_legacy_cursor_update(struct drm_plane *plane, } old_fb = old_plane_state->fb; - old_vma = to_intel_plane_state(old_plane_state)->vma; i915_gem_track_fb(intel_fb_obj(old_fb), intel_fb_obj(fb), intel_plane->frontbuffer_bit); /* Swap plane state */ - new_plane_state->fence = old_plane_state->fence; - *to_intel_plane_state(old_plane_state) = *to_intel_plane_state(new_plane_state); - new_plane_state->fence = NULL; - new_plane_state->fb = old_fb; - to_intel_plane_state(new_plane_state)->vma = NULL; + plane->state = new_plane_state; if (plane->state->visible) { trace_intel_update_plane(plane, to_intel_crtc(crtc)); @@ -13142,13 +13149,17 @@ intel_legacy_cursor_update(struct drm_plane *plane, intel_plane->disable_plane(intel_plane, to_intel_crtc(crtc)); } + old_vma = fetch_and_zero(&to_intel_plane_state(old_plane_state)->vma); if (old_vma) intel_unpin_fb_vma(old_vma); out_unlock: mutex_unlock(&dev_priv->drm.struct_mutex); out_free: - intel_plane_destroy_state(plane, new_plane_state); + if (ret) + intel_plane_destroy_state(plane, new_plane_state); + else + intel_plane_destroy_state(plane, old_plane_state); return ret; slow: @@ -13499,7 +13510,7 @@ int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data, struct drm_crtc *drmmode_crtc; struct intel_crtc *crtc; - drmmode_crtc = drm_crtc_find(dev, pipe_from_crtc_id->crtc_id); + drmmode_crtc = drm_crtc_find(dev, file, pipe_from_crtc_id->crtc_id); if (!drmmode_crtc) return -ENOENT; @@ -13663,7 +13674,7 @@ static void intel_setup_outputs(struct drm_i915_private *dev_priv) } else if (HAS_PCH_SPLIT(dev_priv)) { int found; - dpd_is_edp = intel_dp_is_edp(dev_priv, PORT_D); + dpd_is_edp = intel_dp_is_port_edp(dev_priv, PORT_D); if (has_edp_a(dev_priv)) intel_dp_init(dev_priv, DP_A, PORT_A); @@ -13706,14 +13717,14 @@ static void intel_setup_outputs(struct drm_i915_private *dev_priv) * trust the port type the VBT declares as we've seen at least * HDMI ports that the VBT claim are DP or eDP. */ - has_edp = intel_dp_is_edp(dev_priv, PORT_B); + has_edp = intel_dp_is_port_edp(dev_priv, PORT_B); has_port = intel_bios_is_port_present(dev_priv, PORT_B); if (I915_READ(VLV_DP_B) & DP_DETECTED || has_port) has_edp &= intel_dp_init(dev_priv, VLV_DP_B, PORT_B); if ((I915_READ(VLV_HDMIB) & SDVO_DETECTED || has_port) && !has_edp) intel_hdmi_init(dev_priv, VLV_HDMIB, PORT_B); - has_edp = intel_dp_is_edp(dev_priv, PORT_C); + has_edp = intel_dp_is_port_edp(dev_priv, PORT_C); has_port = intel_bios_is_port_present(dev_priv, PORT_C); if (I915_READ(VLV_DP_C) & DP_DETECTED || has_port) has_edp &= intel_dp_init(dev_priv, VLV_DP_C, PORT_C); @@ -14206,7 +14217,7 @@ void intel_init_display_hooks(struct drm_i915_private *dev_priv) dev_priv->display.fdi_link_train = hsw_fdi_link_train; } - if (dev_priv->info.gen >= 9) + if (INTEL_GEN(dev_priv) >= 9) dev_priv->display.update_crtcs = skl_update_crtcs; else dev_priv->display.update_crtcs = intel_update_crtcs; @@ -15030,7 +15041,7 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev) for_each_intel_crtc(dev, crtc) { struct intel_crtc_state *crtc_state = to_intel_crtc_state(crtc->base.state); - int pixclk = 0; + int min_cdclk = 0; memset(&crtc->base.mode, 0, sizeof(crtc->base.mode)); if (crtc_state->base.active) { @@ -15051,22 +15062,18 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev) intel_crtc_compute_pixel_rate(crtc_state); - if (INTEL_GEN(dev_priv) >= 9 || IS_BROADWELL(dev_priv) || - IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) - pixclk = crtc_state->pixel_rate; - else - WARN_ON(dev_priv->display.modeset_calc_cdclk); - - /* pixel rate mustn't exceed 95% of cdclk with IPS on BDW */ - if (IS_BROADWELL(dev_priv) && crtc_state->ips_enabled) - pixclk = DIV_ROUND_UP(pixclk * 100, 95); + if (dev_priv->display.modeset_calc_cdclk) { + min_cdclk = intel_crtc_compute_min_cdclk(crtc_state); + if (WARN_ON(min_cdclk < 0)) + min_cdclk = 0; + } drm_calc_timestamping_constants(&crtc->base, &crtc_state->base.adjusted_mode); update_scanline_offset(crtc); } - dev_priv->min_pixclk[crtc->pipe] = pixclk; + dev_priv->min_cdclk[crtc->pipe] = min_cdclk; intel_pipe_config_sanity_check(dev_priv, crtc_state); } @@ -15184,6 +15191,7 @@ void intel_display_resume(struct drm_device *dev) if (!ret) ret = __intel_display_resume(dev, state, &ctx); + intel_enable_ipc(dev_priv); drm_modeset_drop_locks(&ctx); drm_modeset_acquire_fini(&ctx); diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index 64134947c0aa..90e756c76f10 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c @@ -42,6 +42,7 @@ #include "i915_drv.h" #define DP_LINK_CHECK_TIMEOUT (10 * 1000) +#define DP_DPRX_ESI_LEN 14 /* Compliance test status bits */ #define INTEL_DP_RESOLUTION_SHIFT_MASK 0 @@ -103,13 +104,13 @@ static const int cnl_rates[] = { 162000, 216000, 270000, static const int default_rates[] = { 162000, 270000, 540000 }; /** - * is_edp - is the given port attached to an eDP panel (either CPU or PCH) + * intel_dp_is_edp - is the given port attached to an eDP panel (either CPU or PCH) * @intel_dp: DP struct * * If a CPU or PCH DP output is attached to an eDP panel, this function * will return true, and false otherwise. */ -static bool is_edp(struct intel_dp *intel_dp) +bool intel_dp_is_edp(struct intel_dp *intel_dp) { struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); @@ -388,7 +389,7 @@ intel_dp_mode_valid(struct drm_connector *connector, max_dotclk = intel_dp_downstream_max_dotclock(intel_dp); - if (is_edp(intel_dp) && fixed_mode) { + if (intel_dp_is_edp(intel_dp) && fixed_mode) { if (mode->hdisplay > fixed_mode->hdisplay) return MODE_PANEL; @@ -597,7 +598,7 @@ vlv_power_sequencer_pipe(struct intel_dp *intel_dp) lockdep_assert_held(&dev_priv->pps_mutex); /* We should never land here with regular DP ports */ - WARN_ON(!is_edp(intel_dp)); + WARN_ON(!intel_dp_is_edp(intel_dp)); WARN_ON(intel_dp->active_pipe != INVALID_PIPE && intel_dp->active_pipe != intel_dp->pps_pipe); @@ -644,7 +645,7 @@ bxt_power_sequencer_idx(struct intel_dp *intel_dp) lockdep_assert_held(&dev_priv->pps_mutex); /* We should never land here with regular DP ports */ - WARN_ON(!is_edp(intel_dp)); + WARN_ON(!intel_dp_is_edp(intel_dp)); /* * TODO: BXT has 2 PPS instances. The correct port->PPS instance @@ -847,7 +848,7 @@ static int edp_notify_handler(struct notifier_block *this, unsigned long code, struct drm_device *dev = intel_dp_to_dev(intel_dp); struct drm_i915_private *dev_priv = to_i915(dev); - if (!is_edp(intel_dp) || code != SYS_RESTART) + if (!intel_dp_is_edp(intel_dp) || code != SYS_RESTART) return 0; pps_lock(intel_dp); @@ -907,7 +908,7 @@ intel_dp_check_edp(struct intel_dp *intel_dp) struct drm_device *dev = intel_dp_to_dev(intel_dp); struct drm_i915_private *dev_priv = to_i915(dev); - if (!is_edp(intel_dp)) + if (!intel_dp_is_edp(intel_dp)) return; if (!edp_have_panel_power(intel_dp) && !edp_have_panel_vdd(intel_dp)) { @@ -1681,7 +1682,7 @@ intel_dp_compute_config(struct intel_encoder *encoder, else pipe_config->has_audio = intel_conn_state->force_audio == HDMI_AUDIO_ON; - if (is_edp(intel_dp) && intel_connector->panel.fixed_mode) { + if (intel_dp_is_edp(intel_dp) && intel_connector->panel.fixed_mode) { struct drm_display_mode *panel_mode = intel_connector->panel.alt_fixed_mode; struct drm_display_mode *req_mode = &pipe_config->base.mode; @@ -1736,7 +1737,7 @@ intel_dp_compute_config(struct intel_encoder *encoder, /* Walk through all bpp values. Luckily they're all nicely spaced with 2 * bpc in between. */ bpp = intel_dp_compute_bpp(intel_dp, pipe_config); - if (is_edp(intel_dp)) { + if (intel_dp_is_edp(intel_dp)) { /* Get bpp from vbt only for panels that dont have bpp in edid */ if (intel_connector->base.display_info.bpc == 0 && @@ -1829,7 +1830,7 @@ found: * DPLL0 VCO may need to be adjusted to get the correct * clock for eDP. This will affect cdclk as well. */ - if (is_edp(intel_dp) && IS_GEN9_BC(dev_priv)) { + if (intel_dp_is_edp(intel_dp) && IS_GEN9_BC(dev_priv)) { int vco; switch (pipe_config->port_clock / 2) { @@ -1861,7 +1862,7 @@ void intel_dp_set_link_params(struct intel_dp *intel_dp, } static void intel_dp_prepare(struct intel_encoder *encoder, - struct intel_crtc_state *pipe_config) + const struct intel_crtc_state *pipe_config) { struct drm_device *dev = encoder->base.dev; struct drm_i915_private *dev_priv = to_i915(dev); @@ -2069,7 +2070,7 @@ static bool edp_panel_vdd_on(struct intel_dp *intel_dp) lockdep_assert_held(&dev_priv->pps_mutex); - if (!is_edp(intel_dp)) + if (!intel_dp_is_edp(intel_dp)) return false; cancel_delayed_work(&intel_dp->panel_vdd_work); @@ -2119,7 +2120,7 @@ void intel_edp_panel_vdd_on(struct intel_dp *intel_dp) { bool vdd; - if (!is_edp(intel_dp)) + if (!intel_dp_is_edp(intel_dp)) return; pps_lock(intel_dp); @@ -2203,7 +2204,7 @@ static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync) lockdep_assert_held(&dev_priv->pps_mutex); - if (!is_edp(intel_dp)) + if (!intel_dp_is_edp(intel_dp)) return; I915_STATE_WARN(!intel_dp->want_panel_vdd, "eDP port %c VDD not forced on", @@ -2226,7 +2227,7 @@ static void edp_panel_on(struct intel_dp *intel_dp) lockdep_assert_held(&dev_priv->pps_mutex); - if (!is_edp(intel_dp)) + if (!intel_dp_is_edp(intel_dp)) return; DRM_DEBUG_KMS("Turn eDP port %c panel power on\n", @@ -2267,7 +2268,7 @@ static void edp_panel_on(struct intel_dp *intel_dp) void intel_edp_panel_on(struct intel_dp *intel_dp) { - if (!is_edp(intel_dp)) + if (!intel_dp_is_edp(intel_dp)) return; pps_lock(intel_dp); @@ -2285,7 +2286,7 @@ static void edp_panel_off(struct intel_dp *intel_dp) lockdep_assert_held(&dev_priv->pps_mutex); - if (!is_edp(intel_dp)) + if (!intel_dp_is_edp(intel_dp)) return; DRM_DEBUG_KMS("Turn eDP port %c panel power off\n", @@ -2316,7 +2317,7 @@ static void edp_panel_off(struct intel_dp *intel_dp) void intel_edp_panel_off(struct intel_dp *intel_dp) { - if (!is_edp(intel_dp)) + if (!intel_dp_is_edp(intel_dp)) return; pps_lock(intel_dp); @@ -2360,7 +2361,7 @@ void intel_edp_backlight_on(const struct intel_crtc_state *crtc_state, { struct intel_dp *intel_dp = enc_to_intel_dp(conn_state->best_encoder); - if (!is_edp(intel_dp)) + if (!intel_dp_is_edp(intel_dp)) return; DRM_DEBUG_KMS("\n"); @@ -2377,7 +2378,7 @@ static void _intel_edp_backlight_off(struct intel_dp *intel_dp) u32 pp; i915_reg_t pp_ctrl_reg; - if (!is_edp(intel_dp)) + if (!intel_dp_is_edp(intel_dp)) return; pps_lock(intel_dp); @@ -2401,7 +2402,7 @@ void intel_edp_backlight_off(const struct drm_connector_state *old_conn_state) { struct intel_dp *intel_dp = enc_to_intel_dp(old_conn_state->best_encoder); - if (!is_edp(intel_dp)) + if (!intel_dp_is_edp(intel_dp)) return; DRM_DEBUG_KMS("\n"); @@ -2461,7 +2462,7 @@ static void assert_edp_pll(struct drm_i915_private *dev_priv, bool state) #define assert_edp_pll_disabled(d) assert_edp_pll((d), false) static void ironlake_edp_pll_on(struct intel_dp *intel_dp, - struct intel_crtc_state *pipe_config) + const struct intel_crtc_state *pipe_config) { struct intel_crtc *crtc = to_intel_crtc(pipe_config->base.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); @@ -2666,7 +2667,7 @@ static void intel_dp_get_config(struct intel_encoder *encoder, intel_dotclock_calculate(pipe_config->port_clock, &pipe_config->dp_m_n); - if (is_edp(intel_dp) && dev_priv->vbt.edp.bpp && + if (intel_dp_is_edp(intel_dp) && dev_priv->vbt.edp.bpp && pipe_config->pipe_bpp > dev_priv->vbt.edp.bpp) { /* * This is a big fat ugly hack. @@ -2688,33 +2689,55 @@ static void intel_dp_get_config(struct intel_encoder *encoder, } static void intel_disable_dp(struct intel_encoder *encoder, - struct intel_crtc_state *old_crtc_state, - struct drm_connector_state *old_conn_state) + const struct intel_crtc_state *old_crtc_state, + const struct drm_connector_state *old_conn_state) { struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); if (old_crtc_state->has_audio) intel_audio_codec_disable(encoder); - if (HAS_PSR(dev_priv) && !HAS_DDI(dev_priv)) - intel_psr_disable(intel_dp); - /* Make sure the panel is off before trying to change the mode. But also * ensure that we have vdd while we switch off the panel. */ intel_edp_panel_vdd_on(intel_dp); intel_edp_backlight_off(old_conn_state); intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF); intel_edp_panel_off(intel_dp); +} + +static void g4x_disable_dp(struct intel_encoder *encoder, + const struct intel_crtc_state *old_crtc_state, + const struct drm_connector_state *old_conn_state) +{ + struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); + + intel_disable_dp(encoder, old_crtc_state, old_conn_state); /* disable the port before the pipe on g4x */ - if (INTEL_GEN(dev_priv) < 5) - intel_dp_link_down(intel_dp); + intel_dp_link_down(intel_dp); +} + +static void ilk_disable_dp(struct intel_encoder *encoder, + const struct intel_crtc_state *old_crtc_state, + const struct drm_connector_state *old_conn_state) +{ + intel_disable_dp(encoder, old_crtc_state, old_conn_state); +} + +static void vlv_disable_dp(struct intel_encoder *encoder, + const struct intel_crtc_state *old_crtc_state, + const struct drm_connector_state *old_conn_state) +{ + struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); + + intel_psr_disable(intel_dp, old_crtc_state); + + intel_disable_dp(encoder, old_crtc_state, old_conn_state); } static void ilk_post_disable_dp(struct intel_encoder *encoder, - struct intel_crtc_state *old_crtc_state, - struct drm_connector_state *old_conn_state) + const struct intel_crtc_state *old_crtc_state, + const struct drm_connector_state *old_conn_state) { struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); enum port port = dp_to_dig_port(intel_dp)->port; @@ -2727,8 +2750,8 @@ static void ilk_post_disable_dp(struct intel_encoder *encoder, } static void vlv_post_disable_dp(struct intel_encoder *encoder, - struct intel_crtc_state *old_crtc_state, - struct drm_connector_state *old_conn_state) + const struct intel_crtc_state *old_crtc_state, + const struct drm_connector_state *old_conn_state) { struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); @@ -2736,8 +2759,8 @@ static void vlv_post_disable_dp(struct intel_encoder *encoder, } static void chv_post_disable_dp(struct intel_encoder *encoder, - struct intel_crtc_state *old_crtc_state, - struct drm_connector_state *old_conn_state) + const struct intel_crtc_state *old_crtc_state, + const struct drm_connector_state *old_conn_state) { struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); struct drm_device *dev = encoder->base.dev; @@ -2842,7 +2865,7 @@ _intel_dp_set_link_train(struct intel_dp *intel_dp, } static void intel_dp_enable_port(struct intel_dp *intel_dp, - struct intel_crtc_state *old_crtc_state) + const struct intel_crtc_state *old_crtc_state) { struct drm_device *dev = intel_dp_to_dev(intel_dp); struct drm_i915_private *dev_priv = to_i915(dev); @@ -2866,8 +2889,8 @@ static void intel_dp_enable_port(struct intel_dp *intel_dp, } static void intel_enable_dp(struct intel_encoder *encoder, - struct intel_crtc_state *pipe_config, - struct drm_connector_state *conn_state) + const struct intel_crtc_state *pipe_config, + const struct drm_connector_state *conn_state) { struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); struct drm_device *dev = encoder->base.dev; @@ -2914,26 +2937,26 @@ static void intel_enable_dp(struct intel_encoder *encoder, } static void g4x_enable_dp(struct intel_encoder *encoder, - struct intel_crtc_state *pipe_config, - struct drm_connector_state *conn_state) + const struct intel_crtc_state *pipe_config, + const struct drm_connector_state *conn_state) { intel_enable_dp(encoder, pipe_config, conn_state); intel_edp_backlight_on(pipe_config, conn_state); } static void vlv_enable_dp(struct intel_encoder *encoder, - struct intel_crtc_state *pipe_config, - struct drm_connector_state *conn_state) + const struct intel_crtc_state *pipe_config, + const struct drm_connector_state *conn_state) { struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); intel_edp_backlight_on(pipe_config, conn_state); - intel_psr_enable(intel_dp); + intel_psr_enable(intel_dp, pipe_config); } static void g4x_pre_enable_dp(struct intel_encoder *encoder, - struct intel_crtc_state *pipe_config, - struct drm_connector_state *conn_state) + const struct intel_crtc_state *pipe_config, + const struct drm_connector_state *conn_state) { struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); enum port port = dp_to_dig_port(intel_dp)->port; @@ -3040,7 +3063,7 @@ static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp) intel_dp->active_pipe = crtc->pipe; - if (!is_edp(intel_dp)) + if (!intel_dp_is_edp(intel_dp)) return; /* now it's all ours */ @@ -3055,8 +3078,8 @@ static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp) } static void vlv_pre_enable_dp(struct intel_encoder *encoder, - struct intel_crtc_state *pipe_config, - struct drm_connector_state *conn_state) + const struct intel_crtc_state *pipe_config, + const struct drm_connector_state *conn_state) { vlv_phy_pre_encoder_enable(encoder); @@ -3064,8 +3087,8 @@ static void vlv_pre_enable_dp(struct intel_encoder *encoder, } static void vlv_dp_pre_pll_enable(struct intel_encoder *encoder, - struct intel_crtc_state *pipe_config, - struct drm_connector_state *conn_state) + const struct intel_crtc_state *pipe_config, + const struct drm_connector_state *conn_state) { intel_dp_prepare(encoder, pipe_config); @@ -3073,8 +3096,8 @@ static void vlv_dp_pre_pll_enable(struct intel_encoder *encoder, } static void chv_pre_enable_dp(struct intel_encoder *encoder, - struct intel_crtc_state *pipe_config, - struct drm_connector_state *conn_state) + const struct intel_crtc_state *pipe_config, + const struct drm_connector_state *conn_state) { chv_phy_pre_encoder_enable(encoder); @@ -3085,8 +3108,8 @@ static void chv_pre_enable_dp(struct intel_encoder *encoder, } static void chv_dp_pre_pll_enable(struct intel_encoder *encoder, - struct intel_crtc_state *pipe_config, - struct drm_connector_state *conn_state) + const struct intel_crtc_state *pipe_config, + const struct drm_connector_state *conn_state) { intel_dp_prepare(encoder, pipe_config); @@ -3094,8 +3117,8 @@ static void chv_dp_pre_pll_enable(struct intel_encoder *encoder, } static void chv_dp_post_pll_disable(struct intel_encoder *encoder, - struct intel_crtc_state *pipe_config, - struct drm_connector_state *conn_state) + const struct intel_crtc_state *pipe_config, + const struct drm_connector_state *conn_state) { chv_phy_post_pll_disable(encoder); } @@ -3506,13 +3529,11 @@ intel_dp_set_signal_levels(struct intel_dp *intel_dp) uint32_t signal_levels, mask = 0; uint8_t train_set = intel_dp->train_set[0]; - if (HAS_DDI(dev_priv)) { + if (IS_GEN9_LP(dev_priv) || IS_CANNONLAKE(dev_priv)) { + signal_levels = bxt_signal_levels(intel_dp); + } else if (HAS_DDI(dev_priv)) { signal_levels = ddi_signal_levels(intel_dp); - - if (IS_GEN9_LP(dev_priv) || IS_CANNONLAKE(dev_priv)) - signal_levels = 0; - else - mask = DDI_BUF_EMP_MASK; + mask = DDI_BUF_EMP_MASK; } else if (IS_CHERRYVIEW(dev_priv)) { signal_levels = chv_signal_levels(intel_dp); } else if (IS_VALLEYVIEW(dev_priv)) { @@ -3784,7 +3805,7 @@ intel_dp_get_dpcd(struct intel_dp *intel_dp) return false; /* Don't clobber cached eDP rates. */ - if (!is_edp(intel_dp)) { + if (!intel_dp_is_edp(intel_dp)) { intel_dp_set_sink_rates(intel_dp); intel_dp_set_common_rates(intel_dp); } @@ -3806,7 +3827,7 @@ intel_dp_get_dpcd(struct intel_dp *intel_dp) * downstream port information. So, an early return here saves * time from performing other operations which are not required. */ - if (!is_edp(intel_dp) && !intel_dp->sink_count) + if (!intel_dp_is_edp(intel_dp) && !intel_dp->sink_count) return false; if (!drm_dp_is_branch(intel_dp->dpcd)) @@ -3828,7 +3849,7 @@ intel_dp_can_mst(struct intel_dp *intel_dp) { u8 mstm_cap; - if (!i915.enable_dp_mst) + if (!i915_modparams.enable_dp_mst) return false; if (!intel_dp->can_mst) @@ -3846,7 +3867,7 @@ intel_dp_can_mst(struct intel_dp *intel_dp) static void intel_dp_configure_mst(struct intel_dp *intel_dp) { - if (!i915.enable_dp_mst) + if (!i915_modparams.enable_dp_mst) return; if (!intel_dp->can_mst) @@ -3993,15 +4014,9 @@ intel_dp_get_sink_irq(struct intel_dp *intel_dp, u8 *sink_irq_vector) static bool intel_dp_get_sink_irq_esi(struct intel_dp *intel_dp, u8 *sink_irq_vector) { - int ret; - - ret = drm_dp_dpcd_read(&intel_dp->aux, - DP_SINK_COUNT_ESI, - sink_irq_vector, 14); - if (ret != 14) - return false; - - return true; + return drm_dp_dpcd_read(&intel_dp->aux, DP_SINK_COUNT_ESI, + sink_irq_vector, DP_DPRX_ESI_LEN) == + DP_DPRX_ESI_LEN; } static uint8_t intel_dp_autotest_link_training(struct intel_dp *intel_dp) @@ -4201,7 +4216,7 @@ intel_dp_check_mst_status(struct intel_dp *intel_dp) bool bret; if (intel_dp->is_mst) { - u8 esi[16] = { 0 }; + u8 esi[DP_DPRX_ESI_LEN] = { 0 }; int ret = 0; int retry; bool handled; @@ -4396,7 +4411,7 @@ intel_dp_detect_dpcd(struct intel_dp *intel_dp) if (!intel_dp_get_dpcd(intel_dp)) return connector_status_disconnected; - if (is_edp(intel_dp)) + if (intel_dp_is_edp(intel_dp)) return connector_status_connected; /* if there's no downstream port, we're done */ @@ -4712,7 +4727,7 @@ intel_dp_long_pulse(struct intel_connector *intel_connector) intel_display_power_get(to_i915(dev), intel_dp->aux_power_domain); /* Can't disconnect eDP, but you can close the lid... */ - if (is_edp(intel_dp)) + if (intel_dp_is_edp(intel_dp)) status = edp_detect(intel_dp); else if (intel_digital_port_connected(to_i915(dev), dp_to_dig_port(intel_dp))) @@ -4738,10 +4753,6 @@ intel_dp_long_pulse(struct intel_connector *intel_connector) if (intel_encoder->type != INTEL_OUTPUT_EDP) intel_encoder->type = INTEL_OUTPUT_DP; - DRM_DEBUG_KMS("Display Port TPS3 support: source %s, sink %s\n", - yesno(intel_dp_source_supports_hbr2(intel_dp)), - yesno(drm_dp_tps3_supported(intel_dp->dpcd))); - if (intel_dp->reset_link_params) { /* Initial max link lane count */ intel_dp->max_link_lane_count = intel_dp_max_common_lane_count(intel_dp); @@ -4792,7 +4803,7 @@ intel_dp_long_pulse(struct intel_connector *intel_connector) intel_dp->aux.i2c_defer_count = 0; intel_dp_set_edid(intel_dp); - if (is_edp(intel_dp) || intel_connector->detect_edid) + if (intel_dp_is_edp(intel_dp) || intel_connector->detect_edid) status = connector_status_connected; intel_dp->detect_done = true; @@ -4876,7 +4887,7 @@ static int intel_dp_get_modes(struct drm_connector *connector) } /* if eDP has no EDID, fall back to fixed mode */ - if (is_edp(intel_attached_dp(connector)) && + if (intel_dp_is_edp(intel_attached_dp(connector)) && intel_connector->panel.fixed_mode) { struct drm_display_mode *mode; @@ -4927,8 +4938,10 @@ intel_dp_connector_destroy(struct drm_connector *connector) if (!IS_ERR_OR_NULL(intel_connector->edid)) kfree(intel_connector->edid); - /* Can't call is_edp() since the encoder may have been destroyed - * already. */ + /* + * Can't call intel_dp_is_edp() since the encoder may have been + * destroyed already. + */ if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) intel_panel_fini(&intel_connector->panel); @@ -4942,7 +4955,7 @@ void intel_dp_encoder_destroy(struct drm_encoder *encoder) struct intel_dp *intel_dp = &intel_dig_port->dp; intel_dp_mst_encoder_cleanup(intel_dig_port); - if (is_edp(intel_dp)) { + if (intel_dp_is_edp(intel_dp)) { cancel_delayed_work_sync(&intel_dp->panel_vdd_work); /* * vdd might still be enabled do to the delayed vdd off. @@ -4968,7 +4981,7 @@ void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder) { struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base); - if (!is_edp(intel_dp)) + if (!intel_dp_is_edp(intel_dp)) return; /* @@ -5036,7 +5049,7 @@ void intel_dp_encoder_reset(struct drm_encoder *encoder) if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) intel_dp->active_pipe = vlv_active_pipe(intel_dp); - if (is_edp(intel_dp)) { + if (intel_dp_is_edp(intel_dp)) { /* Reinit the power sequencer, in case BIOS did something with it. */ intel_dp_pps_init(encoder->dev, intel_dp); intel_edp_panel_vdd_sanitize(intel_dp); @@ -5137,7 +5150,7 @@ put_power: } /* check the VBT to see whether the eDP is on another port */ -bool intel_dp_is_edp(struct drm_i915_private *dev_priv, enum port port) +bool intel_dp_is_port_edp(struct drm_i915_private *dev_priv, enum port port) { /* * eDP not supported on g4x. so bail out early just @@ -5160,7 +5173,7 @@ intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connect intel_attach_force_audio_property(connector); intel_attach_broadcast_rgb_property(connector); - if (is_edp(intel_dp)) { + if (intel_dp_is_edp(intel_dp)) { u32 allowed_scalers; allowed_scalers = BIT(DRM_MODE_SCALE_ASPECT) | BIT(DRM_MODE_SCALE_FULLSCREEN); @@ -5448,7 +5461,7 @@ static void intel_dp_pps_init(struct drm_device *dev, * The caller of this function needs to take a lock on dev_priv->drrs. */ static void intel_dp_set_drrs_state(struct drm_i915_private *dev_priv, - struct intel_crtc_state *crtc_state, + const struct intel_crtc_state *crtc_state, int refresh_rate) { struct intel_encoder *encoder; @@ -5467,11 +5480,6 @@ static void intel_dp_set_drrs_state(struct drm_i915_private *dev_priv, return; } - /* - * FIXME: This needs proper synchronization with psr state for some - * platforms that cannot have PSR and DRRS enabled at the same time. - */ - dig_port = dp_to_dig_port(intel_dp); encoder = &dig_port->base; intel_crtc = to_intel_crtc(encoder->base.crtc); @@ -5545,7 +5553,7 @@ static void intel_dp_set_drrs_state(struct drm_i915_private *dev_priv, * Initializes frontbuffer_bits and drrs.dp */ void intel_edp_drrs_enable(struct intel_dp *intel_dp, - struct intel_crtc_state *crtc_state) + const struct intel_crtc_state *crtc_state) { struct drm_device *dev = intel_dp_to_dev(intel_dp); struct drm_i915_private *dev_priv = to_i915(dev); @@ -5555,6 +5563,11 @@ void intel_edp_drrs_enable(struct intel_dp *intel_dp, return; } + if (dev_priv->psr.enabled) { + DRM_DEBUG_KMS("PSR enabled. Not enabling DRRS.\n"); + return; + } + mutex_lock(&dev_priv->drrs.mutex); if (WARN_ON(dev_priv->drrs.dp)) { DRM_ERROR("DRRS already enabled\n"); @@ -5576,7 +5589,7 @@ unlock: * */ void intel_edp_drrs_disable(struct intel_dp *intel_dp, - struct intel_crtc_state *old_crtc_state) + const struct intel_crtc_state *old_crtc_state) { struct drm_device *dev = intel_dp_to_dev(intel_dp); struct drm_i915_private *dev_priv = to_i915(dev); @@ -5826,7 +5839,7 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp, struct edid *edid; enum pipe pipe = INVALID_PIPE; - if (!is_edp(intel_dp)) + if (!intel_dp_is_edp(intel_dp)) return true; /* @@ -6042,7 +6055,7 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port, intel_dp->DP = I915_READ(intel_dp->output_reg); intel_dp->attached_connector = intel_connector; - if (intel_dp_is_edp(dev_priv, port)) + if (intel_dp_is_port_edp(dev_priv, port)) type = DRM_MODE_CONNECTOR_eDP; else type = DRM_MODE_CONNECTOR_DisplayPort; @@ -6060,7 +6073,8 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port, /* eDP only on port B and/or C on vlv/chv */ if (WARN_ON((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) && - is_edp(intel_dp) && port != PORT_B && port != PORT_C)) + intel_dp_is_edp(intel_dp) && + port != PORT_B && port != PORT_C)) return false; DRM_DEBUG_KMS("Adding %s connector on port %c\n", @@ -6088,7 +6102,7 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port, intel_connector->get_hw_state = intel_connector_get_hw_state; /* init MST on ports that can support it */ - if (HAS_DP_MST(dev_priv) && !is_edp(intel_dp) && + if (HAS_DP_MST(dev_priv) && !intel_dp_is_edp(intel_dp) && (port == PORT_B || port == PORT_C || port == PORT_D)) intel_dp_mst_encoder_init(intel_dig_port, intel_connector->base.base.id); @@ -6144,7 +6158,6 @@ bool intel_dp_init(struct drm_i915_private *dev_priv, goto err_encoder_init; intel_encoder->compute_config = intel_dp_compute_config; - intel_encoder->disable = intel_disable_dp; intel_encoder->get_hw_state = intel_dp_get_hw_state; intel_encoder->get_config = intel_dp_get_config; intel_encoder->suspend = intel_dp_encoder_suspend; @@ -6152,18 +6165,24 @@ bool intel_dp_init(struct drm_i915_private *dev_priv, intel_encoder->pre_pll_enable = chv_dp_pre_pll_enable; intel_encoder->pre_enable = chv_pre_enable_dp; intel_encoder->enable = vlv_enable_dp; + intel_encoder->disable = vlv_disable_dp; intel_encoder->post_disable = chv_post_disable_dp; intel_encoder->post_pll_disable = chv_dp_post_pll_disable; } else if (IS_VALLEYVIEW(dev_priv)) { intel_encoder->pre_pll_enable = vlv_dp_pre_pll_enable; intel_encoder->pre_enable = vlv_pre_enable_dp; intel_encoder->enable = vlv_enable_dp; + intel_encoder->disable = vlv_disable_dp; intel_encoder->post_disable = vlv_post_disable_dp; + } else if (INTEL_GEN(dev_priv) >= 5) { + intel_encoder->pre_enable = g4x_pre_enable_dp; + intel_encoder->enable = g4x_enable_dp; + intel_encoder->disable = ilk_disable_dp; + intel_encoder->post_disable = ilk_post_disable_dp; } else { intel_encoder->pre_enable = g4x_pre_enable_dp; intel_encoder->enable = g4x_enable_dp; - if (INTEL_GEN(dev_priv) >= 5) - intel_encoder->post_disable = ilk_post_disable_dp; + intel_encoder->disable = g4x_disable_dp; } intel_dig_port->port = port; @@ -6186,6 +6205,9 @@ bool intel_dp_init(struct drm_i915_private *dev_priv, intel_dig_port->hpd_pulse = intel_dp_hpd_pulse; dev_priv->hotplug.irq_port[port] = intel_dig_port; + if (port != PORT_A) + intel_infoframe_init(intel_dig_port); + if (!intel_dp_init_connector(intel_dig_port, intel_connector)) goto err_init_connector; diff --git a/drivers/gpu/drm/i915/intel_dp_aux_backlight.c b/drivers/gpu/drm/i915/intel_dp_aux_backlight.c index d2830ba3162e..2bb2ceb9d463 100644 --- a/drivers/gpu/drm/i915/intel_dp_aux_backlight.c +++ b/drivers/gpu/drm/i915/intel_dp_aux_backlight.c @@ -264,7 +264,7 @@ int intel_dp_aux_init_backlight_funcs(struct intel_connector *intel_connector) { struct intel_panel *panel = &intel_connector->panel; - if (!i915.enable_dpcd_backlight) + if (!i915_modparams.enable_dpcd_backlight) return -ENODEV; if (!intel_dp_aux_display_control_capable(intel_connector)) diff --git a/drivers/gpu/drm/i915/intel_dp_mst.c b/drivers/gpu/drm/i915/intel_dp_mst.c index 93fc8ab9bb31..9a396f483f8b 100644 --- a/drivers/gpu/drm/i915/intel_dp_mst.c +++ b/drivers/gpu/drm/i915/intel_dp_mst.c @@ -123,8 +123,8 @@ static int intel_dp_mst_atomic_check(struct drm_connector *connector, } static void intel_mst_disable_dp(struct intel_encoder *encoder, - struct intel_crtc_state *old_crtc_state, - struct drm_connector_state *old_conn_state) + const struct intel_crtc_state *old_crtc_state, + const struct drm_connector_state *old_conn_state) { struct intel_dp_mst_encoder *intel_mst = enc_to_mst(&encoder->base); struct intel_digital_port *intel_dig_port = intel_mst->primary; @@ -133,7 +133,7 @@ static void intel_mst_disable_dp(struct intel_encoder *encoder, to_intel_connector(old_conn_state->connector); int ret; - DRM_DEBUG_KMS("%d\n", intel_dp->active_mst_links); + DRM_DEBUG_KMS("active links %d\n", intel_dp->active_mst_links); drm_dp_mst_reset_vcpi_slots(&intel_dp->mst_mgr, connector->port); @@ -146,8 +146,8 @@ static void intel_mst_disable_dp(struct intel_encoder *encoder, } static void intel_mst_post_disable_dp(struct intel_encoder *encoder, - struct intel_crtc_state *old_crtc_state, - struct drm_connector_state *old_conn_state) + const struct intel_crtc_state *old_crtc_state, + const struct drm_connector_state *old_conn_state) { struct intel_dp_mst_encoder *intel_mst = enc_to_mst(&encoder->base); struct intel_digital_port *intel_dig_port = intel_mst->primary; @@ -155,8 +155,6 @@ static void intel_mst_post_disable_dp(struct intel_encoder *encoder, struct intel_connector *connector = to_intel_connector(old_conn_state->connector); - DRM_DEBUG_KMS("%d\n", intel_dp->active_mst_links); - /* this can fail */ drm_dp_check_act_status(&intel_dp->mst_mgr); /* and this can also fail */ @@ -173,11 +171,12 @@ static void intel_mst_post_disable_dp(struct intel_encoder *encoder, intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF); } + DRM_DEBUG_KMS("active links %d\n", intel_dp->active_mst_links); } static void intel_mst_pre_enable_dp(struct intel_encoder *encoder, - struct intel_crtc_state *pipe_config, - struct drm_connector_state *conn_state) + const struct intel_crtc_state *pipe_config, + const struct drm_connector_state *conn_state) { struct intel_dp_mst_encoder *intel_mst = enc_to_mst(&encoder->base); struct intel_digital_port *intel_dig_port = intel_mst->primary; @@ -195,7 +194,7 @@ static void intel_mst_pre_enable_dp(struct intel_encoder *encoder, connector->encoder = encoder; intel_mst->connector = connector; - DRM_DEBUG_KMS("%d\n", intel_dp->active_mst_links); + DRM_DEBUG_KMS("active links %d\n", intel_dp->active_mst_links); if (intel_dp->active_mst_links == 0) intel_dig_port->base.pre_enable(&intel_dig_port->base, @@ -219,8 +218,8 @@ static void intel_mst_pre_enable_dp(struct intel_encoder *encoder, } static void intel_mst_enable_dp(struct intel_encoder *encoder, - struct intel_crtc_state *pipe_config, - struct drm_connector_state *conn_state) + const struct intel_crtc_state *pipe_config, + const struct drm_connector_state *conn_state) { struct intel_dp_mst_encoder *intel_mst = enc_to_mst(&encoder->base); struct intel_digital_port *intel_dig_port = intel_mst->primary; @@ -229,7 +228,7 @@ static void intel_mst_enable_dp(struct intel_encoder *encoder, enum port port = intel_dig_port->port; int ret; - DRM_DEBUG_KMS("%d\n", intel_dp->active_mst_links); + DRM_DEBUG_KMS("active links %d\n", intel_dp->active_mst_links); if (intel_wait_for_register(dev_priv, DP_TP_STATUS(port), @@ -494,6 +493,7 @@ static void intel_dp_destroy_mst_connector(struct drm_dp_mst_topology_mgr *mgr, struct intel_connector *intel_connector = to_intel_connector(connector); struct drm_i915_private *dev_priv = to_i915(connector->dev); + DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", connector->base.id, connector->name); drm_connector_unregister(connector); if (dev_priv->fbdev) @@ -505,7 +505,6 @@ static void intel_dp_destroy_mst_connector(struct drm_dp_mst_topology_mgr *mgr, drm_modeset_unlock(&connector->dev->mode_config.connection_mutex); drm_connector_unreference(connector); - DRM_DEBUG_KMS("\n"); } static void intel_dp_mst_hotplug(struct drm_dp_mst_topology_mgr *mgr) diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index fa47285918f4..0cab667fff57 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h @@ -220,23 +220,23 @@ struct intel_encoder { struct intel_crtc_state *, struct drm_connector_state *); void (*pre_pll_enable)(struct intel_encoder *, - struct intel_crtc_state *, - struct drm_connector_state *); + const struct intel_crtc_state *, + const struct drm_connector_state *); void (*pre_enable)(struct intel_encoder *, - struct intel_crtc_state *, - struct drm_connector_state *); + const struct intel_crtc_state *, + const struct drm_connector_state *); void (*enable)(struct intel_encoder *, - struct intel_crtc_state *, - struct drm_connector_state *); + const struct intel_crtc_state *, + const struct drm_connector_state *); void (*disable)(struct intel_encoder *, - struct intel_crtc_state *, - struct drm_connector_state *); + const struct intel_crtc_state *, + const struct drm_connector_state *); void (*post_disable)(struct intel_encoder *, - struct intel_crtc_state *, - struct drm_connector_state *); + const struct intel_crtc_state *, + const struct drm_connector_state *); void (*post_pll_disable)(struct intel_encoder *, - struct intel_crtc_state *, - struct drm_connector_state *); + const struct intel_crtc_state *, + const struct drm_connector_state *); /* Read out the current hw state of this connector, returning true if * the encoder is active. If the encoder is enabled it also set the pipe * it is connected to in the pipe parameter. */ @@ -384,7 +384,8 @@ struct intel_atomic_state { unsigned int active_pipe_changes; unsigned int active_crtcs; - unsigned int min_pixclk[I915_MAX_PIPES]; + /* minimum acceptable cdclk for each pipe */ + int min_cdclk[I915_MAX_PIPES]; struct intel_shared_dpll_state shared_dpll[I915_NUM_PLLS]; @@ -493,6 +494,8 @@ struct intel_crtc_scaler_state { /* drm_mode->private_flags */ #define I915_MODE_FLAG_INHERITED 1 +/* Flag to get scanline using frame time stamps */ +#define I915_MODE_FLAG_GET_SCANLINE_FROM_TIMESTAMP (1<<1) struct intel_pipe_wm { struct intel_wm_level wm[5]; @@ -753,6 +756,7 @@ struct intel_crtc_state { struct intel_link_m_n fdi_m_n; bool ips_enabled; + bool ips_force_disable; bool enable_fbc; @@ -909,16 +913,6 @@ struct intel_hdmi { bool has_audio; bool rgb_quant_range_selectable; struct intel_connector *attached_connector; - void (*write_infoframe)(struct drm_encoder *encoder, - const struct intel_crtc_state *crtc_state, - enum hdmi_infoframe_type type, - const void *frame, ssize_t len); - void (*set_infoframes)(struct drm_encoder *encoder, - bool enable, - const struct intel_crtc_state *crtc_state, - const struct drm_connector_state *conn_state); - bool (*infoframe_enabled)(struct drm_encoder *encoder, - const struct intel_crtc_state *pipe_config); }; struct intel_dp_mst_encoder; @@ -1069,6 +1063,17 @@ struct intel_digital_port { bool release_cl2_override; uint8_t max_lanes; enum intel_display_power_domain ddi_io_power_domain; + + void (*write_infoframe)(struct drm_encoder *encoder, + const struct intel_crtc_state *crtc_state, + enum hdmi_infoframe_type type, + const void *frame, ssize_t len); + void (*set_infoframes)(struct drm_encoder *encoder, + bool enable, + const struct intel_crtc_state *crtc_state, + const struct drm_connector_state *conn_state); + bool (*infoframe_enabled)(struct drm_encoder *encoder, + const struct intel_crtc_state *pipe_config); }; struct intel_dp_mst_encoder { @@ -1189,6 +1194,30 @@ hdmi_to_dig_port(struct intel_hdmi *intel_hdmi) return container_of(intel_hdmi, struct intel_digital_port, hdmi); } +static inline struct intel_plane_state * +intel_atomic_get_new_plane_state(struct intel_atomic_state *state, + struct intel_plane *plane) +{ + return to_intel_plane_state(drm_atomic_get_new_plane_state(&state->base, + &plane->base)); +} + +static inline struct intel_crtc_state * +intel_atomic_get_old_crtc_state(struct intel_atomic_state *state, + struct intel_crtc *crtc) +{ + return to_intel_crtc_state(drm_atomic_get_old_crtc_state(&state->base, + &crtc->base)); +} + +static inline struct intel_crtc_state * +intel_atomic_get_new_crtc_state(struct intel_atomic_state *state, + struct intel_crtc *crtc) +{ + return to_intel_crtc_state(drm_atomic_get_new_crtc_state(&state->base, + &crtc->base)); +} + /* intel_fifo_underrun.c */ bool intel_set_cpu_fifo_underrun_reporting(struct drm_i915_private *dev_priv, enum pipe pipe, bool enable); @@ -1205,11 +1234,8 @@ void intel_check_pch_fifo_underruns(struct drm_i915_private *dev_priv); /* i915_irq.c */ void gen5_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask); void gen5_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask); -void gen6_reset_pm_iir(struct drm_i915_private *dev_priv, u32 mask); void gen6_mask_pm_irq(struct drm_i915_private *dev_priv, u32 mask); void gen6_unmask_pm_irq(struct drm_i915_private *dev_priv, u32 mask); -void gen6_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask); -void gen6_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask); void gen6_reset_rps_interrupts(struct drm_i915_private *dev_priv); void gen6_enable_rps_interrupts(struct drm_i915_private *dev_priv); void gen6_disable_rps_interrupts(struct drm_i915_private *dev_priv); @@ -1246,8 +1272,8 @@ void intel_crt_reset(struct drm_encoder *encoder); /* intel_ddi.c */ void intel_ddi_fdi_post_disable(struct intel_encoder *intel_encoder, - struct intel_crtc_state *old_crtc_state, - struct drm_connector_state *old_conn_state); + const struct intel_crtc_state *old_crtc_state, + const struct drm_connector_state *old_conn_state); void hsw_fdi_link_train(struct intel_crtc *crtc, const struct intel_crtc_state *crtc_state); void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port); @@ -1272,6 +1298,7 @@ void intel_ddi_clock_get(struct intel_encoder *encoder, struct intel_crtc_state *pipe_config); void intel_ddi_set_vc_payload_alloc(const struct intel_crtc_state *crtc_state, bool state); +u32 bxt_signal_levels(struct intel_dp *intel_dp); uint32_t ddi_signal_levels(struct intel_dp *intel_dp); u8 intel_ddi_dp_voltage_max(struct intel_encoder *encoder); @@ -1290,6 +1317,7 @@ void intel_audio_init(struct drm_i915_private *dev_priv); void intel_audio_deinit(struct drm_i915_private *dev_priv); /* intel_cdclk.c */ +int intel_crtc_compute_min_cdclk(const struct intel_crtc_state *crtc_state); void skl_init_cdclk(struct drm_i915_private *dev_priv); void skl_uninit_cdclk(struct drm_i915_private *dev_priv); void cnl_init_cdclk(struct drm_i915_private *dev_priv); @@ -1377,7 +1405,7 @@ void vlv_wait_port_ready(struct drm_i915_private *dev_priv, struct intel_digital_port *dport, unsigned int expected_mask); int intel_get_load_detect_pipe(struct drm_connector *connector, - struct drm_display_mode *mode, + const struct drm_display_mode *mode, struct intel_load_detect_pipe *old, struct drm_modeset_acquire_ctx *ctx); void intel_release_load_detect_pipe(struct drm_connector *connector, @@ -1401,7 +1429,9 @@ int intel_plane_atomic_set_property(struct drm_plane *plane, struct drm_plane_state *state, struct drm_property *property, uint64_t val); -int intel_plane_atomic_calc_changes(struct drm_crtc_state *crtc_state, +int intel_plane_atomic_calc_changes(const struct intel_crtc_state *old_crtc_state, + struct drm_crtc_state *crtc_state, + const struct intel_plane_state *old_plane_state, struct drm_plane_state *plane_state); void assert_pch_transcoder_disabled(struct drm_i915_private *dev_priv, @@ -1499,7 +1529,8 @@ int intel_dp_sink_crc(struct intel_dp *intel_dp, u8 *crc); bool intel_dp_compute_config(struct intel_encoder *encoder, struct intel_crtc_state *pipe_config, struct drm_connector_state *conn_state); -bool intel_dp_is_edp(struct drm_i915_private *dev_priv, enum port port); +bool intel_dp_is_edp(struct intel_dp *intel_dp); +bool intel_dp_is_port_edp(struct drm_i915_private *dev_priv, enum port port); enum irqreturn intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd); void intel_edp_backlight_on(const struct intel_crtc_state *crtc_state, @@ -1518,9 +1549,9 @@ void intel_power_sequencer_reset(struct drm_i915_private *dev_priv); uint32_t intel_dp_pack_aux(const uint8_t *src, int src_bytes); void intel_plane_destroy(struct drm_plane *plane); void intel_edp_drrs_enable(struct intel_dp *intel_dp, - struct intel_crtc_state *crtc_state); + const struct intel_crtc_state *crtc_state); void intel_edp_drrs_disable(struct intel_dp *intel_dp, - struct intel_crtc_state *crtc_state); + const struct intel_crtc_state *crtc_state); void intel_edp_drrs_invalidate(struct drm_i915_private *dev_priv, unsigned int frontbuffer_bits); void intel_edp_drrs_flush(struct drm_i915_private *dev_priv, @@ -1648,6 +1679,7 @@ void intel_hdmi_handle_sink_scrambling(struct intel_encoder *intel_encoder, bool high_tmds_clock_ratio, bool scrambling); void intel_dp_dual_mode_set_tmds_output(struct intel_hdmi *hdmi, bool enable); +void intel_infoframe_init(struct intel_digital_port *intel_dig_port); /* intel_lvds.c */ @@ -1719,8 +1751,10 @@ static inline void intel_backlight_device_unregister(struct intel_connector *con /* intel_psr.c */ -void intel_psr_enable(struct intel_dp *intel_dp); -void intel_psr_disable(struct intel_dp *intel_dp); +void intel_psr_enable(struct intel_dp *intel_dp, + const struct intel_crtc_state *crtc_state); +void intel_psr_disable(struct intel_dp *intel_dp, + const struct intel_crtc_state *old_crtc_state); void intel_psr_invalidate(struct drm_i915_private *dev_priv, unsigned frontbuffer_bits); void intel_psr_flush(struct drm_i915_private *dev_priv, @@ -1844,7 +1878,6 @@ void gen6_rps_reset_ei(struct drm_i915_private *dev_priv); void gen6_rps_idle(struct drm_i915_private *dev_priv); void gen6_rps_boost(struct drm_i915_gem_request *rq, struct intel_rps_client *rps); -void intel_queue_rps_boost_for_request(struct drm_i915_gem_request *req); void g4x_wm_get_hw_state(struct drm_device *dev); void vlv_wm_get_hw_state(struct drm_device *dev); void ilk_wm_get_hw_state(struct drm_device *dev); @@ -1867,9 +1900,11 @@ bool ilk_disable_lp_wm(struct drm_device *dev); int sanitize_rc6_option(struct drm_i915_private *dev_priv, int enable_rc6); int skl_check_pipe_max_pixel_rate(struct intel_crtc *intel_crtc, struct intel_crtc_state *cstate); +void intel_init_ipc(struct drm_i915_private *dev_priv); +void intel_enable_ipc(struct drm_i915_private *dev_priv); static inline int intel_enable_rc6(void) { - return i915.enable_rc6; + return i915_modparams.enable_rc6; } /* intel_sdvo.c */ @@ -1884,8 +1919,8 @@ struct intel_plane *intel_sprite_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe, int plane); int intel_sprite_set_colorkey(struct drm_device *dev, void *data, struct drm_file *file_priv); -void intel_pipe_update_start(struct intel_crtc *crtc); -void intel_pipe_update_end(struct intel_crtc *crtc); +void intel_pipe_update_start(const struct intel_crtc_state *new_crtc_state); +void intel_pipe_update_end(struct intel_crtc_state *new_crtc_state); /* intel_tv.c */ void intel_tv_init(struct drm_i915_private *dev_priv); @@ -1957,7 +1992,9 @@ struct drm_plane_state *intel_plane_duplicate_state(struct drm_plane *plane); void intel_plane_destroy_state(struct drm_plane *plane, struct drm_plane_state *state); extern const struct drm_plane_helper_funcs intel_plane_helper_funcs; -int intel_plane_atomic_check_with_state(struct intel_crtc_state *crtc_state, +int intel_plane_atomic_check_with_state(const struct intel_crtc_state *old_crtc_state, + struct intel_crtc_state *crtc_state, + const struct intel_plane_state *old_plane_state, struct intel_plane_state *intel_state); /* intel_color.c */ diff --git a/drivers/gpu/drm/i915/intel_dsi.c b/drivers/gpu/drm/i915/intel_dsi.c index 7442891762be..20a7b004ffd7 100644 --- a/drivers/gpu/drm/i915/intel_dsi.c +++ b/drivers/gpu/drm/i915/intel_dsi.c @@ -263,7 +263,7 @@ static int dpi_send_cmd(struct intel_dsi *intel_dsi, u32 cmd, bool hs, /* XXX: old code skips write if control unchanged */ if (cmd == I915_READ(MIPI_DPI_CONTROL(port))) - DRM_ERROR("Same special packet %02x twice in a row.\n", cmd); + DRM_DEBUG_KMS("Same special packet %02x twice in a row.\n", cmd); I915_WRITE(MIPI_DPI_CONTROL(port), cmd); @@ -330,6 +330,10 @@ static bool intel_dsi_compute_config(struct intel_encoder *encoder, adjusted_mode->flags = 0; if (IS_GEN9_LP(dev_priv)) { + /* Enable Frame time stamp based scanline reporting */ + adjusted_mode->private_flags |= + I915_MODE_FLAG_GET_SCANLINE_FROM_TIMESTAMP; + /* Dual link goes to DSI transcoder A. */ if (intel_dsi->ports == BIT(PORT_C)) pipe_config->cpu_transcoder = TRANSCODER_DSI_C; @@ -731,7 +735,7 @@ static void intel_dsi_port_disable(struct intel_encoder *encoder) } static void intel_dsi_prepare(struct intel_encoder *intel_encoder, - struct intel_crtc_state *pipe_config); + const struct intel_crtc_state *pipe_config); static void intel_dsi_unprepare(struct intel_encoder *encoder); static void intel_dsi_msleep(struct intel_dsi *intel_dsi, int msec) @@ -783,8 +787,8 @@ static void intel_dsi_msleep(struct intel_dsi *intel_dsi, int msec) */ static void intel_dsi_pre_enable(struct intel_encoder *encoder, - struct intel_crtc_state *pipe_config, - struct drm_connector_state *conn_state) + const struct intel_crtc_state *pipe_config, + const struct drm_connector_state *conn_state) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); @@ -878,8 +882,8 @@ static void intel_dsi_pre_enable(struct intel_encoder *encoder, * the pre_enable hook. */ static void intel_dsi_enable_nop(struct intel_encoder *encoder, - struct intel_crtc_state *pipe_config, - struct drm_connector_state *conn_state) + const struct intel_crtc_state *pipe_config, + const struct drm_connector_state *conn_state) { DRM_DEBUG_KMS("\n"); } @@ -889,8 +893,8 @@ static void intel_dsi_enable_nop(struct intel_encoder *encoder, * the post_disable hook. */ static void intel_dsi_disable(struct intel_encoder *encoder, - struct intel_crtc_state *old_crtc_state, - struct drm_connector_state *old_conn_state) + const struct intel_crtc_state *old_crtc_state, + const struct drm_connector_state *old_conn_state) { struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); enum port port; @@ -925,8 +929,8 @@ static void intel_dsi_clear_device_ready(struct intel_encoder *encoder) } static void intel_dsi_post_disable(struct intel_encoder *encoder, - struct intel_crtc_state *pipe_config, - struct drm_connector_state *conn_state) + const struct intel_crtc_state *pipe_config, + const struct drm_connector_state *conn_state) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); @@ -1066,7 +1070,7 @@ out_put_power: } static void bxt_dsi_get_pipe_config(struct intel_encoder *encoder, - struct intel_crtc_state *pipe_config) + struct intel_crtc_state *pipe_config) { struct drm_device *dev = encoder->base.dev; struct drm_i915_private *dev_priv = to_i915(dev); @@ -1102,6 +1106,10 @@ static void bxt_dsi_get_pipe_config(struct intel_encoder *encoder, pixel_format_from_register_bits(fmt)); bpp = pipe_config->pipe_bpp; + /* Enable Frame time stamo based scanline reporting */ + adjusted_mode->private_flags |= + I915_MODE_FLAG_GET_SCANLINE_FROM_TIMESTAMP; + /* In terms of pixels */ adjusted_mode->crtc_hdisplay = I915_READ(BXT_MIPI_TRANS_HACTIVE(port)); @@ -1370,7 +1378,7 @@ static u32 pixel_format_to_reg(enum mipi_dsi_pixel_format fmt) } static void intel_dsi_prepare(struct intel_encoder *intel_encoder, - struct intel_crtc_state *pipe_config) + const struct intel_crtc_state *pipe_config) { struct drm_encoder *encoder = &intel_encoder->base; struct drm_device *dev = encoder->dev; diff --git a/drivers/gpu/drm/i915/intel_dvo.c b/drivers/gpu/drm/i915/intel_dvo.c index c0a027274c06..5c562e1f56ae 100644 --- a/drivers/gpu/drm/i915/intel_dvo.c +++ b/drivers/gpu/drm/i915/intel_dvo.c @@ -175,8 +175,8 @@ static void intel_dvo_get_config(struct intel_encoder *encoder, } static void intel_disable_dvo(struct intel_encoder *encoder, - struct intel_crtc_state *old_crtc_state, - struct drm_connector_state *old_conn_state) + const struct intel_crtc_state *old_crtc_state, + const struct drm_connector_state *old_conn_state) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_dvo *intel_dvo = enc_to_dvo(encoder); @@ -189,8 +189,8 @@ static void intel_disable_dvo(struct intel_encoder *encoder, } static void intel_enable_dvo(struct intel_encoder *encoder, - struct intel_crtc_state *pipe_config, - struct drm_connector_state *conn_state) + const struct intel_crtc_state *pipe_config, + const struct drm_connector_state *conn_state) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_dvo *intel_dvo = enc_to_dvo(encoder); @@ -258,8 +258,8 @@ static bool intel_dvo_compute_config(struct intel_encoder *encoder, } static void intel_dvo_pre_enable(struct intel_encoder *encoder, - struct intel_crtc_state *pipe_config, - struct drm_connector_state *conn_state) + const struct intel_crtc_state *pipe_config, + const struct drm_connector_state *conn_state) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_crtc *crtc = to_intel_crtc(pipe_config->base.crtc); diff --git a/drivers/gpu/drm/i915/intel_engine_cs.c b/drivers/gpu/drm/i915/intel_engine_cs.c index 9ab596941372..a28e2a864cf1 100644 --- a/drivers/gpu/drm/i915/intel_engine_cs.c +++ b/drivers/gpu/drm/i915/intel_engine_cs.c @@ -39,6 +39,7 @@ #define GEN8_LR_CONTEXT_RENDER_SIZE (20 * PAGE_SIZE) #define GEN9_LR_CONTEXT_RENDER_SIZE (22 * PAGE_SIZE) +#define GEN10_LR_CONTEXT_RENDER_SIZE (19 * PAGE_SIZE) #define GEN8_LR_CONTEXT_OTHER_SIZE ( 2 * PAGE_SIZE) @@ -150,10 +151,11 @@ __intel_engine_context_size(struct drm_i915_private *dev_priv, u8 class) default: MISSING_CASE(INTEL_GEN(dev_priv)); case 10: + return GEN10_LR_CONTEXT_RENDER_SIZE; case 9: return GEN9_LR_CONTEXT_RENDER_SIZE; case 8: - return i915.enable_execlists ? + return i915_modparams.enable_execlists ? GEN8_LR_CONTEXT_RENDER_SIZE : GEN8_CXT_TOTAL_SIZE; case 7: @@ -301,7 +303,7 @@ int intel_engines_init(struct drm_i915_private *dev_priv) &intel_engine_classes[engine->class]; int (*init)(struct intel_engine_cs *engine); - if (i915.enable_execlists) + if (i915_modparams.enable_execlists) init = class_info->init_execlists; else init = class_info->init_legacy; @@ -380,6 +382,37 @@ static void intel_engine_init_timeline(struct intel_engine_cs *engine) engine->timeline = &engine->i915->gt.global_timeline.engine[engine->id]; } +static bool csb_force_mmio(struct drm_i915_private *i915) +{ + /* GVT emulation depends upon intercepting CSB mmio */ + if (intel_vgpu_active(i915)) + return true; + + /* + * IOMMU adds unpredictable latency causing the CSB write (from the + * GPU into the HWSP) to only be visible some time after the interrupt + * (missed breadcrumb syndrome). + */ + if (intel_vtd_active()) + return true; + + return false; +} + +static void intel_engine_init_execlist(struct intel_engine_cs *engine) +{ + struct intel_engine_execlists * const execlists = &engine->execlists; + + execlists->csb_use_mmio = csb_force_mmio(engine->i915); + + execlists->port_mask = 1; + BUILD_BUG_ON_NOT_POWER_OF_2(execlists_num_ports(execlists)); + GEM_BUG_ON(execlists_num_ports(execlists) > EXECLIST_MAX_PORTS); + + execlists->queue = RB_ROOT; + execlists->first = NULL; +} + /** * intel_engines_setup_common - setup engine state not requiring hw access * @engine: Engine to setup. @@ -391,8 +424,7 @@ static void intel_engine_init_timeline(struct intel_engine_cs *engine) */ void intel_engine_setup_common(struct intel_engine_cs *engine) { - engine->execlist_queue = RB_ROOT; - engine->execlist_first = NULL; + intel_engine_init_execlist(engine); intel_engine_init_timeline(engine); intel_engine_init_hangcheck(engine); @@ -442,6 +474,116 @@ static void intel_engine_cleanup_scratch(struct intel_engine_cs *engine) i915_vma_unpin_and_release(&engine->scratch); } +static void cleanup_phys_status_page(struct intel_engine_cs *engine) +{ + struct drm_i915_private *dev_priv = engine->i915; + + if (!dev_priv->status_page_dmah) + return; + + drm_pci_free(&dev_priv->drm, dev_priv->status_page_dmah); + engine->status_page.page_addr = NULL; +} + +static void cleanup_status_page(struct intel_engine_cs *engine) +{ + struct i915_vma *vma; + struct drm_i915_gem_object *obj; + + vma = fetch_and_zero(&engine->status_page.vma); + if (!vma) + return; + + obj = vma->obj; + + i915_vma_unpin(vma); + i915_vma_close(vma); + + i915_gem_object_unpin_map(obj); + __i915_gem_object_release_unless_active(obj); +} + +static int init_status_page(struct intel_engine_cs *engine) +{ + struct drm_i915_gem_object *obj; + struct i915_vma *vma; + unsigned int flags; + void *vaddr; + int ret; + + obj = i915_gem_object_create_internal(engine->i915, PAGE_SIZE); + if (IS_ERR(obj)) { + DRM_ERROR("Failed to allocate status page\n"); + return PTR_ERR(obj); + } + + ret = i915_gem_object_set_cache_level(obj, I915_CACHE_LLC); + if (ret) + goto err; + + vma = i915_vma_instance(obj, &engine->i915->ggtt.base, NULL); + if (IS_ERR(vma)) { + ret = PTR_ERR(vma); + goto err; + } + + flags = PIN_GLOBAL; + if (!HAS_LLC(engine->i915)) + /* On g33, we cannot place HWS above 256MiB, so + * restrict its pinning to the low mappable arena. + * Though this restriction is not documented for + * gen4, gen5, or byt, they also behave similarly + * and hang if the HWS is placed at the top of the + * GTT. To generalise, it appears that all !llc + * platforms have issues with us placing the HWS + * above the mappable region (even though we never + * actually map it). + */ + flags |= PIN_MAPPABLE; + else + flags |= PIN_HIGH; + ret = i915_vma_pin(vma, 0, 4096, flags); + if (ret) + goto err; + + vaddr = i915_gem_object_pin_map(obj, I915_MAP_WB); + if (IS_ERR(vaddr)) { + ret = PTR_ERR(vaddr); + goto err_unpin; + } + + engine->status_page.vma = vma; + engine->status_page.ggtt_offset = i915_ggtt_offset(vma); + engine->status_page.page_addr = memset(vaddr, 0, PAGE_SIZE); + + DRM_DEBUG_DRIVER("%s hws offset: 0x%08x\n", + engine->name, i915_ggtt_offset(vma)); + return 0; + +err_unpin: + i915_vma_unpin(vma); +err: + i915_gem_object_put(obj); + return ret; +} + +static int init_phys_status_page(struct intel_engine_cs *engine) +{ + struct drm_i915_private *dev_priv = engine->i915; + + GEM_BUG_ON(engine->id != RCS); + + dev_priv->status_page_dmah = + drm_pci_alloc(&dev_priv->drm, PAGE_SIZE, PAGE_SIZE); + if (!dev_priv->status_page_dmah) + return -ENOMEM; + + engine->status_page.page_addr = dev_priv->status_page_dmah->vaddr; + memset(engine->status_page.page_addr, 0, PAGE_SIZE); + + return 0; +} + /** * intel_engines_init_common - initialize cengine state which might require hw access * @engine: Engine to initialize. @@ -477,10 +619,21 @@ int intel_engine_init_common(struct intel_engine_cs *engine) ret = i915_gem_render_state_init(engine); if (ret) - goto err_unpin; + goto err_breadcrumbs; + + if (HWS_NEEDS_PHYSICAL(engine->i915)) + ret = init_phys_status_page(engine); + else + ret = init_status_page(engine); + if (ret) + goto err_rs_fini; return 0; +err_rs_fini: + i915_gem_render_state_fini(engine); +err_breadcrumbs: + intel_engine_fini_breadcrumbs(engine); err_unpin: engine->context_unpin(engine, engine->i915->kernel_context); return ret; @@ -497,6 +650,11 @@ void intel_engine_cleanup_common(struct intel_engine_cs *engine) { intel_engine_cleanup_scratch(engine); + if (HWS_NEEDS_PHYSICAL(engine->i915)) + cleanup_phys_status_page(engine); + else + cleanup_status_page(engine); + i915_gem_render_state_fini(engine); intel_engine_fini_breadcrumbs(engine); intel_engine_cleanup_cmd_parser(engine); @@ -812,6 +970,19 @@ static int gen9_init_workarounds(struct intel_engine_cs *engine) I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) | ECOCHK_DIS_TLB); + if (HAS_LLC(dev_priv)) { + /* WaCompressedResourceSamplerPbeMediaNewHashMode:skl,kbl + * + * Must match Display Engine. See + * WaCompressedResourceDisplayNewHashMode. + */ + WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2, + GEN9_PBE_COMPRESSED_HASH_SELECTION); + WA_SET_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN7, + GEN9_SAMPLER_HASH_COMPRESSED_READ_ADDR); + WA_SET_BIT(MMCD_MISC_CTRL, MMCD_PCLA | MMCD_HOTSPOT_EN); + } + /* WaClearFlowControlGpgpuContextSave:skl,bxt,kbl,glk,cfl */ /* WaDisablePartialInstShootdown:skl,bxt,kbl,glk,cfl */ WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN, @@ -981,12 +1152,14 @@ static int skl_init_workarounds(struct intel_engine_cs *engine) GEN9_GAPS_TSV_CREDIT_DISABLE)); /* WaDisableGafsUnitClkGating:skl */ - WA_SET_BIT(GEN7_UCGCTL4, GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE); + I915_WRITE(GEN7_UCGCTL4, (I915_READ(GEN7_UCGCTL4) | + GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE)); /* WaInPlaceDecompressionHang:skl */ if (IS_SKL_REVID(dev_priv, SKL_REVID_H0, REVID_FOREVER)) - WA_SET_BIT(GEN9_GAMT_ECO_REG_RW_IA, - GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS); + I915_WRITE(GEN9_GAMT_ECO_REG_RW_IA, + (I915_READ(GEN9_GAMT_ECO_REG_RW_IA) | + GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS)); /* WaDisableLSQCROPERFforOCL:skl */ ret = wa_ring_whitelist_reg(engine, GEN8_L3SQCREG4); @@ -1022,8 +1195,8 @@ static int bxt_init_workarounds(struct intel_engine_cs *engine) /* WaDisablePooledEuLoadBalancingFix:bxt */ if (IS_BXT_REVID(dev_priv, BXT_REVID_B0, REVID_FOREVER)) { - WA_SET_BIT_MASKED(FF_SLICE_CS_CHICKEN2, - GEN9_POOLED_EU_LOAD_BALANCING_FIX_DISABLE); + I915_WRITE(FF_SLICE_CS_CHICKEN2, + _MASKED_BIT_ENABLE(GEN9_POOLED_EU_LOAD_BALANCING_FIX_DISABLE)); } /* WaDisableSbeCacheDispatchPortSharing:bxt */ @@ -1059,8 +1232,56 @@ static int bxt_init_workarounds(struct intel_engine_cs *engine) /* WaInPlaceDecompressionHang:bxt */ if (IS_BXT_REVID(dev_priv, BXT_REVID_C0, REVID_FOREVER)) - WA_SET_BIT(GEN9_GAMT_ECO_REG_RW_IA, - GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS); + I915_WRITE(GEN9_GAMT_ECO_REG_RW_IA, + (I915_READ(GEN9_GAMT_ECO_REG_RW_IA) | + GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS)); + + return 0; +} + +static int cnl_init_workarounds(struct intel_engine_cs *engine) +{ + struct drm_i915_private *dev_priv = engine->i915; + int ret; + + /* WaDisableI2mCycleOnWRPort:cnl (pre-prod) */ + if (IS_CNL_REVID(dev_priv, CNL_REVID_B0, CNL_REVID_B0)) + I915_WRITE(GAMT_CHKN_BIT_REG, + (I915_READ(GAMT_CHKN_BIT_REG) | + GAMT_CHKN_DISABLE_I2M_CYCLE_ON_WR_PORT)); + + /* WaForceContextSaveRestoreNonCoherent:cnl */ + WA_SET_BIT_MASKED(CNL_HDC_CHICKEN0, + HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT); + + /* WaThrottleEUPerfToAvoidTDBackPressure:cnl(pre-prod) */ + if (IS_CNL_REVID(dev_priv, CNL_REVID_B0, CNL_REVID_B0)) + WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN, THROTTLE_12_5); + + /* WaDisableReplayBufferBankArbitrationOptimization:cnl */ + WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2, + GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION); + + /* WaDisableEnhancedSBEVertexCaching:cnl (pre-prod) */ + if (IS_CNL_REVID(dev_priv, 0, CNL_REVID_B0)) + WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2, + GEN8_CSC2_SBE_VUE_CACHE_CONSERVATIVE); + + /* WaInPlaceDecompressionHang:cnl */ + I915_WRITE(GEN9_GAMT_ECO_REG_RW_IA, + (I915_READ(GEN9_GAMT_ECO_REG_RW_IA) | + GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS)); + + /* WaPushConstantDereferenceHoldDisable:cnl */ + WA_SET_BIT_MASKED(GEN7_ROW_CHICKEN2, PUSH_CONSTANT_DEREF_DISABLE); + + /* FtrEnableFastAnisoL1BankingFix: cnl */ + WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3, CNL_FAST_ANISO_L1_BANKING_FIX); + + /* WaEnablePreemptionGranularityControlByUMD:cnl */ + ret= wa_ring_whitelist_reg(engine, GEN8_CS_CHICKEN1); + if (ret) + return ret; return 0; } @@ -1080,8 +1301,9 @@ static int kbl_init_workarounds(struct intel_engine_cs *engine) /* WaDisableDynamicCreditSharing:kbl */ if (IS_KBL_REVID(dev_priv, 0, KBL_REVID_B0)) - WA_SET_BIT(GAMT_CHKN_BIT_REG, - GAMT_CHKN_DISABLE_DYNAMIC_CREDIT_SHARING); + I915_WRITE(GAMT_CHKN_BIT_REG, + (I915_READ(GAMT_CHKN_BIT_REG) | + GAMT_CHKN_DISABLE_DYNAMIC_CREDIT_SHARING)); /* WaDisableFenceDestinationToSLM:kbl (pre-prod) */ if (IS_KBL_REVID(dev_priv, KBL_REVID_A0, KBL_REVID_A0)) @@ -1094,7 +1316,8 @@ static int kbl_init_workarounds(struct intel_engine_cs *engine) GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION); /* WaDisableGafsUnitClkGating:kbl */ - WA_SET_BIT(GEN7_UCGCTL4, GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE); + I915_WRITE(GEN7_UCGCTL4, (I915_READ(GEN7_UCGCTL4) | + GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE)); /* WaDisableSbeCacheDispatchPortSharing:kbl */ WA_SET_BIT_MASKED( @@ -1102,8 +1325,9 @@ static int kbl_init_workarounds(struct intel_engine_cs *engine) GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE); /* WaInPlaceDecompressionHang:kbl */ - WA_SET_BIT(GEN9_GAMT_ECO_REG_RW_IA, - GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS); + I915_WRITE(GEN9_GAMT_ECO_REG_RW_IA, + (I915_READ(GEN9_GAMT_ECO_REG_RW_IA) | + GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS)); /* WaDisableLSQCROPERFforOCL:kbl */ ret = wa_ring_whitelist_reg(engine, GEN8_L3SQCREG4); @@ -1147,7 +1371,8 @@ static int cfl_init_workarounds(struct intel_engine_cs *engine) GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION); /* WaDisableGafsUnitClkGating:cfl */ - WA_SET_BIT(GEN7_UCGCTL4, GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE); + I915_WRITE(GEN7_UCGCTL4, (I915_READ(GEN7_UCGCTL4) | + GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE)); /* WaDisableSbeCacheDispatchPortSharing:cfl */ WA_SET_BIT_MASKED( @@ -1155,8 +1380,9 @@ static int cfl_init_workarounds(struct intel_engine_cs *engine) GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE); /* WaInPlaceDecompressionHang:cfl */ - WA_SET_BIT(GEN9_GAMT_ECO_REG_RW_IA, - GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS); + I915_WRITE(GEN9_GAMT_ECO_REG_RW_IA, + (I915_READ(GEN9_GAMT_ECO_REG_RW_IA) | + GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS)); return 0; } @@ -1185,6 +1411,8 @@ int init_workarounds_ring(struct intel_engine_cs *engine) err = glk_init_workarounds(engine); else if (IS_COFFEELAKE(dev_priv)) err = cfl_init_workarounds(engine); + else if (IS_CANNONLAKE(dev_priv)) + err = cnl_init_workarounds(engine); else err = 0; if (err) @@ -1277,11 +1505,11 @@ bool intel_engine_is_idle(struct intel_engine_cs *engine) return false; /* Both ports drained, no more ELSP submission? */ - if (port_request(&engine->execlist_port[0])) + if (port_request(&engine->execlists.port[0])) return false; /* ELSP is empty, but there are ready requests? */ - if (READ_ONCE(engine->execlist_first)) + if (READ_ONCE(engine->execlists.first)) return false; /* Ring stopped? */ @@ -1330,8 +1558,23 @@ void intel_engines_mark_idle(struct drm_i915_private *i915) for_each_engine(engine, i915, id) { intel_engine_disarm_breadcrumbs(engine); i915_gem_batch_pool_fini(&engine->batch_pool); - tasklet_kill(&engine->irq_tasklet); - engine->no_priolist = false; + tasklet_kill(&engine->execlists.irq_tasklet); + engine->execlists.no_priolist = false; + } +} + +bool intel_engine_can_store_dword(struct intel_engine_cs *engine) +{ + switch (INTEL_GEN(engine->i915)) { + case 2: + return false; /* uses physical not virtual addresses */ + case 3: + /* maybe only uses physical not virtual addresses */ + return !(IS_I915G(engine->i915) || IS_I915GM(engine->i915)); + case 6: + return engine->class != VIDEO_DECODE_CLASS; /* b0rked */ + default: + return true; } } diff --git a/drivers/gpu/drm/i915/intel_fbc.c b/drivers/gpu/drm/i915/intel_fbc.c index 8c8ead2276e0..8e3a05505f49 100644 --- a/drivers/gpu/drm/i915/intel_fbc.c +++ b/drivers/gpu/drm/i915/intel_fbc.c @@ -291,6 +291,19 @@ static void gen7_fbc_activate(struct drm_i915_private *dev_priv) u32 dpfc_ctl; int threshold = dev_priv->fbc.threshold; + /* Display WA #0529: skl, kbl, bxt. */ + if (IS_GEN9(dev_priv) && !IS_GEMINILAKE(dev_priv)) { + u32 val = I915_READ(CHICKEN_MISC_4); + + val &= ~(FBC_STRIDE_OVERRIDE | FBC_STRIDE_MASK); + + if (i915_gem_object_get_tiling(params->vma->obj) != + I915_TILING_X) + val |= FBC_STRIDE_OVERRIDE | params->gen9_wa_cfb_stride; + + I915_WRITE(CHICKEN_MISC_4, val); + } + dpfc_ctl = 0; if (IS_IVYBRIDGE(dev_priv)) dpfc_ctl |= IVB_DPFC_CTL_PLANE(params->crtc.plane); @@ -846,7 +859,7 @@ static bool intel_fbc_can_enable(struct drm_i915_private *dev_priv) return false; } - if (!i915.enable_fbc) { + if (!i915_modparams.enable_fbc) { fbc->no_fbc_reason = "disabled per module param or by default"; return false; } @@ -881,6 +894,10 @@ static void intel_fbc_get_reg_params(struct intel_crtc *crtc, params->fb.stride = cache->fb.stride; params->cfb_size = intel_fbc_calculate_cfb_size(dev_priv, cache); + + if (IS_GEN9(dev_priv) && !IS_GEMINILAKE(dev_priv)) + params->gen9_wa_cfb_stride = DIV_ROUND_UP(cache->plane.src_w, + 32 * fbc->threshold) * 8; } static bool intel_fbc_reg_params_equal(struct intel_fbc_reg_params *params1, @@ -1293,8 +1310,8 @@ void intel_fbc_init_pipe_state(struct drm_i915_private *dev_priv) */ static int intel_sanitize_fbc_option(struct drm_i915_private *dev_priv) { - if (i915.enable_fbc >= 0) - return !!i915.enable_fbc; + if (i915_modparams.enable_fbc >= 0) + return !!i915_modparams.enable_fbc; if (!HAS_FBC(dev_priv)) return 0; @@ -1338,8 +1355,9 @@ void intel_fbc_init(struct drm_i915_private *dev_priv) if (need_fbc_vtd_wa(dev_priv)) mkwrite_device_info(dev_priv)->has_fbc = false; - i915.enable_fbc = intel_sanitize_fbc_option(dev_priv); - DRM_DEBUG_KMS("Sanitized enable_fbc value: %d\n", i915.enable_fbc); + i915_modparams.enable_fbc = intel_sanitize_fbc_option(dev_priv); + DRM_DEBUG_KMS("Sanitized enable_fbc value: %d\n", + i915_modparams.enable_fbc); if (!HAS_FBC(dev_priv)) { fbc->no_fbc_reason = "unsupported by this chipset"; diff --git a/drivers/gpu/drm/i915/intel_fbdev.c b/drivers/gpu/drm/i915/intel_fbdev.c index 262e75c00dd2..f2bb8116227c 100644 --- a/drivers/gpu/drm/i915/intel_fbdev.c +++ b/drivers/gpu/drm/i915/intel_fbdev.c @@ -206,6 +206,7 @@ static int intelfb_create(struct drm_fb_helper *helper, } mutex_lock(&dev->struct_mutex); + intel_runtime_pm_get(dev_priv); /* Pin the GGTT vma for our access via info->screen_base. * This also validates that any existing fb inherited from the @@ -269,6 +270,7 @@ static int intelfb_create(struct drm_fb_helper *helper, fb->width, fb->height, i915_ggtt_offset(vma)); ifbdev->vma = vma; + intel_runtime_pm_put(dev_priv); mutex_unlock(&dev->struct_mutex); vga_switcheroo_client_fb_set(pdev, info); return 0; @@ -276,6 +278,7 @@ static int intelfb_create(struct drm_fb_helper *helper, out_unpin: intel_unpin_fb_vma(vma); out_unlock: + intel_runtime_pm_put(dev_priv); mutex_unlock(&dev->struct_mutex); return ret; } diff --git a/drivers/gpu/drm/i915/intel_fifo_underrun.c b/drivers/gpu/drm/i915/intel_fifo_underrun.c index 04689600e337..77c123cc8817 100644 --- a/drivers/gpu/drm/i915/intel_fifo_underrun.c +++ b/drivers/gpu/drm/i915/intel_fifo_underrun.c @@ -88,14 +88,15 @@ static void i9xx_check_fifo_underruns(struct intel_crtc *crtc) { struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); i915_reg_t reg = PIPESTAT(crtc->pipe); - u32 pipestat = I915_READ(reg) & 0xffff0000; + u32 enable_mask; lockdep_assert_held(&dev_priv->irq_lock); - if ((pipestat & PIPE_FIFO_UNDERRUN_STATUS) == 0) + if ((I915_READ(reg) & PIPE_FIFO_UNDERRUN_STATUS) == 0) return; - I915_WRITE(reg, pipestat | PIPE_FIFO_UNDERRUN_STATUS); + enable_mask = i915_pipestat_enable_mask(dev_priv, crtc->pipe); + I915_WRITE(reg, enable_mask | PIPE_FIFO_UNDERRUN_STATUS); POSTING_READ(reg); trace_intel_cpu_fifo_underrun(dev_priv, crtc->pipe); @@ -108,15 +109,16 @@ static void i9xx_set_fifo_underrun_reporting(struct drm_device *dev, { struct drm_i915_private *dev_priv = to_i915(dev); i915_reg_t reg = PIPESTAT(pipe); - u32 pipestat = I915_READ(reg) & 0xffff0000; lockdep_assert_held(&dev_priv->irq_lock); if (enable) { - I915_WRITE(reg, pipestat | PIPE_FIFO_UNDERRUN_STATUS); + u32 enable_mask = i915_pipestat_enable_mask(dev_priv, pipe); + + I915_WRITE(reg, enable_mask | PIPE_FIFO_UNDERRUN_STATUS); POSTING_READ(reg); } else { - if (old && pipestat & PIPE_FIFO_UNDERRUN_STATUS) + if (old && I915_READ(reg) & PIPE_FIFO_UNDERRUN_STATUS) DRM_ERROR("pipe %c underrun\n", pipe_name(pipe)); } } diff --git a/drivers/gpu/drm/i915/intel_guc_fwif.h b/drivers/gpu/drm/i915/intel_guc_fwif.h index 5fa286074811..7eb6b4fa1d6f 100644 --- a/drivers/gpu/drm/i915/intel_guc_fwif.h +++ b/drivers/gpu/drm/i915/intel_guc_fwif.h @@ -56,10 +56,6 @@ #define WQ_LEN_SHIFT 16 #define WQ_NO_WCFLUSH_WAIT (1 << 27) #define WQ_PRESENT_WORKLOAD (1 << 28) -#define WQ_WORKLOAD_SHIFT 29 -#define WQ_WORKLOAD_GENERAL (0 << WQ_WORKLOAD_SHIFT) -#define WQ_WORKLOAD_GPGPU (1 << WQ_WORKLOAD_SHIFT) -#define WQ_WORKLOAD_TOUCH (2 << WQ_WORKLOAD_SHIFT) #define WQ_RING_TAIL_SHIFT 20 #define WQ_RING_TAIL_MAX 0x7FF /* 2^11 QWords */ @@ -388,7 +384,11 @@ struct guc_ct_buffer_desc { /* Preempt to idle on quantum expiry */ #define POLICY_PREEMPT_TO_IDLE (1<<1) -#define POLICY_MAX_NUM_WI 15 +#define POLICY_MAX_NUM_WI 15 +#define POLICY_DEFAULT_DPC_PROMOTE_TIME_US 500000 +#define POLICY_DEFAULT_EXECUTION_QUANTUM_US 1000000 +#define POLICY_DEFAULT_PREEMPTION_TIME_US 500000 +#define POLICY_DEFAULT_FAULT_TIME_US 250000 struct guc_policy { /* Time for one workload to execute. (in micro seconds) */ diff --git a/drivers/gpu/drm/i915/intel_guc_loader.c b/drivers/gpu/drm/i915/intel_guc_loader.c index 8b0ae7fce7f2..c9e25be4db40 100644 --- a/drivers/gpu/drm/i915/intel_guc_loader.c +++ b/drivers/gpu/drm/i915/intel_guc_loader.c @@ -131,14 +131,14 @@ static void guc_params_init(struct drm_i915_private *dev_priv) params[GUC_CTL_LOG_PARAMS] = guc->log.flags; - if (i915.guc_log_level >= 0) { + if (i915_modparams.guc_log_level >= 0) { params[GUC_CTL_DEBUG] = - i915.guc_log_level << GUC_LOG_VERBOSITY_SHIFT; + i915_modparams.guc_log_level << GUC_LOG_VERBOSITY_SHIFT; } else params[GUC_CTL_DEBUG] = GUC_LOG_DISABLED; /* If GuC submission is enabled, set up additional parameters here */ - if (i915.enable_guc_submission) { + if (i915_modparams.enable_guc_submission) { u32 ads = guc_ggtt_offset(guc->ads_vma) >> PAGE_SHIFT; u32 pgs = guc_ggtt_offset(dev_priv->guc.stage_desc_pool); u32 ctx_in_16 = GUC_MAX_STAGE_DESCRIPTORS / 16; @@ -368,7 +368,8 @@ int intel_guc_init_hw(struct intel_guc *guc) guc->fw.load_status = INTEL_UC_FIRMWARE_SUCCESS; DRM_INFO("GuC %s (firmware %s [version %u.%u])\n", - i915.enable_guc_submission ? "submission enabled" : "loaded", + i915_modparams.enable_guc_submission ? "submission enabled" : + "loaded", guc->fw.path, guc->fw.major_ver_found, guc->fw.minor_ver_found); @@ -390,8 +391,8 @@ int intel_guc_select_fw(struct intel_guc *guc) guc->fw.load_status = INTEL_UC_FIRMWARE_NONE; guc->fw.type = INTEL_UC_FW_TYPE_GUC; - if (i915.guc_firmware_path) { - guc->fw.path = i915.guc_firmware_path; + if (i915_modparams.guc_firmware_path) { + guc->fw.path = i915_modparams.guc_firmware_path; guc->fw.major_ver_wanted = 0; guc->fw.minor_ver_wanted = 0; } else if (IS_SKYLAKE(dev_priv)) { diff --git a/drivers/gpu/drm/i915/intel_guc_log.c b/drivers/gpu/drm/i915/intel_guc_log.c index 16d3b8719cab..6571d96704ad 100644 --- a/drivers/gpu/drm/i915/intel_guc_log.c +++ b/drivers/gpu/drm/i915/intel_guc_log.c @@ -144,7 +144,7 @@ static int guc_log_relay_file_create(struct intel_guc *guc) struct dentry *log_dir; int ret; - if (i915.guc_log_level < 0) + if (i915_modparams.guc_log_level < 0) return 0; /* For now create the log file in /sys/kernel/debug/dri/0 dir */ @@ -480,7 +480,7 @@ err_runtime: guc_log_runtime_destroy(guc); err: /* logging will remain off */ - i915.guc_log_level = -1; + i915_modparams.guc_log_level = -1; return ret; } @@ -502,7 +502,8 @@ static void guc_flush_logs(struct intel_guc *guc) { struct drm_i915_private *dev_priv = guc_to_i915(guc); - if (!i915.enable_guc_submission || (i915.guc_log_level < 0)) + if (!i915_modparams.enable_guc_submission || + (i915_modparams.guc_log_level < 0)) return; /* First disable the interrupts, will be renabled afterwards */ @@ -529,8 +530,8 @@ int intel_guc_log_create(struct intel_guc *guc) GEM_BUG_ON(guc->log.vma); - if (i915.guc_log_level > GUC_LOG_VERBOSITY_MAX) - i915.guc_log_level = GUC_LOG_VERBOSITY_MAX; + if (i915_modparams.guc_log_level > GUC_LOG_VERBOSITY_MAX) + i915_modparams.guc_log_level = GUC_LOG_VERBOSITY_MAX; /* The first page is to save log buffer state. Allocate one * extra page for others in case for overlap */ @@ -555,7 +556,7 @@ int intel_guc_log_create(struct intel_guc *guc) guc->log.vma = vma; - if (i915.guc_log_level >= 0) { + if (i915_modparams.guc_log_level >= 0) { ret = guc_log_runtime_create(guc); if (ret < 0) goto err_vma; @@ -576,7 +577,7 @@ err_vma: i915_vma_unpin_and_release(&guc->log.vma); err: /* logging will be off */ - i915.guc_log_level = -1; + i915_modparams.guc_log_level = -1; return ret; } @@ -600,7 +601,7 @@ int i915_guc_log_control(struct drm_i915_private *dev_priv, u64 control_val) return -EINVAL; /* This combination doesn't make sense & won't have any effect */ - if (!log_param.logging_enabled && (i915.guc_log_level < 0)) + if (!log_param.logging_enabled && (i915_modparams.guc_log_level < 0)) return 0; ret = guc_log_control(guc, log_param.value); @@ -610,7 +611,7 @@ int i915_guc_log_control(struct drm_i915_private *dev_priv, u64 control_val) } if (log_param.logging_enabled) { - i915.guc_log_level = log_param.verbosity; + i915_modparams.guc_log_level = log_param.verbosity; /* If log_level was set as -1 at boot time, then the relay channel file * wouldn't have been created by now and interrupts also would not have @@ -633,7 +634,7 @@ int i915_guc_log_control(struct drm_i915_private *dev_priv, u64 control_val) guc_flush_logs(guc); /* As logging is disabled, update log level to reflect that */ - i915.guc_log_level = -1; + i915_modparams.guc_log_level = -1; } return ret; @@ -641,7 +642,8 @@ int i915_guc_log_control(struct drm_i915_private *dev_priv, u64 control_val) void i915_guc_log_register(struct drm_i915_private *dev_priv) { - if (!i915.enable_guc_submission || i915.guc_log_level < 0) + if (!i915_modparams.enable_guc_submission || + (i915_modparams.guc_log_level < 0)) return; mutex_lock(&dev_priv->drm.struct_mutex); @@ -651,7 +653,7 @@ void i915_guc_log_register(struct drm_i915_private *dev_priv) void i915_guc_log_unregister(struct drm_i915_private *dev_priv) { - if (!i915.enable_guc_submission) + if (!i915_modparams.enable_guc_submission) return; mutex_lock(&dev_priv->drm.struct_mutex); diff --git a/drivers/gpu/drm/i915/intel_gvt.c b/drivers/gpu/drm/i915/intel_gvt.c index c17ed0e62b67..b4a7f31f0214 100644 --- a/drivers/gpu/drm/i915/intel_gvt.c +++ b/drivers/gpu/drm/i915/intel_gvt.c @@ -58,7 +58,7 @@ static bool is_supported_device(struct drm_i915_private *dev_priv) */ void intel_gvt_sanitize_options(struct drm_i915_private *dev_priv) { - if (!i915.enable_gvt) + if (!i915_modparams.enable_gvt) return; if (intel_vgpu_active(dev_priv)) { @@ -73,7 +73,7 @@ void intel_gvt_sanitize_options(struct drm_i915_private *dev_priv) return; bail: - i915.enable_gvt = 0; + i915_modparams.enable_gvt = 0; } /** @@ -90,17 +90,17 @@ int intel_gvt_init(struct drm_i915_private *dev_priv) { int ret; - if (!i915.enable_gvt) { + if (!i915_modparams.enable_gvt) { DRM_DEBUG_DRIVER("GVT-g is disabled by kernel params\n"); return 0; } - if (!i915.enable_execlists) { + if (!i915_modparams.enable_execlists) { DRM_ERROR("i915 GVT-g loading failed due to disabled execlists mode\n"); return -EIO; } - if (i915.enable_guc_submission) { + if (i915_modparams.enable_guc_submission) { DRM_ERROR("i915 GVT-g loading failed due to Graphics virtualization is not yet supported with GuC submission\n"); return -EIO; } @@ -123,7 +123,7 @@ int intel_gvt_init(struct drm_i915_private *dev_priv) return 0; bail: - i915.enable_gvt = 0; + i915_modparams.enable_gvt = 0; return 0; } diff --git a/drivers/gpu/drm/i915/intel_hangcheck.c b/drivers/gpu/drm/i915/intel_hangcheck.c index d9d87d96fb69..12ac270a5f93 100644 --- a/drivers/gpu/drm/i915/intel_hangcheck.c +++ b/drivers/gpu/drm/i915/intel_hangcheck.c @@ -428,7 +428,7 @@ static void i915_hangcheck_elapsed(struct work_struct *work) unsigned int hung = 0, stuck = 0; int busy_count = 0; - if (!i915.enable_hangcheck) + if (!i915_modparams.enable_hangcheck) return; if (!READ_ONCE(dev_priv->gt.awake)) diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c index e8abea7594ec..e6f8f30ce7bd 100644 --- a/drivers/gpu/drm/i915/intel_hdmi.c +++ b/drivers/gpu/drm/i915/intel_hdmi.c @@ -434,7 +434,7 @@ static void intel_write_infoframe(struct drm_encoder *encoder, const struct intel_crtc_state *crtc_state, union hdmi_infoframe *frame) { - struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder); + struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder); uint8_t buffer[VIDEO_DIP_DATA_SIZE]; ssize_t len; @@ -450,7 +450,7 @@ static void intel_write_infoframe(struct drm_encoder *encoder, buffer[3] = 0; len++; - intel_hdmi->write_infoframe(encoder, crtc_state, frame->any.type, buffer, len); + intel_dig_port->write_infoframe(encoder, crtc_state, frame->any.type, buffer, len); } static void intel_hdmi_set_avi_infoframe(struct drm_encoder *encoder, @@ -945,6 +945,7 @@ static void intel_hdmi_get_config(struct intel_encoder *encoder, struct intel_crtc_state *pipe_config) { struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base); + struct intel_digital_port *intel_dig_port = hdmi_to_dig_port(intel_hdmi); struct drm_device *dev = encoder->base.dev; struct drm_i915_private *dev_priv = to_i915(dev); u32 tmp, flags = 0; @@ -965,7 +966,7 @@ static void intel_hdmi_get_config(struct intel_encoder *encoder, if (tmp & HDMI_MODE_SELECT_HDMI) pipe_config->has_hdmi_sink = true; - if (intel_hdmi->infoframe_enabled(&encoder->base, pipe_config)) + if (intel_dig_port->infoframe_enabled(&encoder->base, pipe_config)) pipe_config->has_infoframe = true; if (tmp & SDVO_AUDIO_ENABLE) @@ -991,8 +992,8 @@ static void intel_hdmi_get_config(struct intel_encoder *encoder, } static void intel_enable_hdmi_audio(struct intel_encoder *encoder, - struct intel_crtc_state *pipe_config, - struct drm_connector_state *conn_state) + const struct intel_crtc_state *pipe_config, + const struct drm_connector_state *conn_state) { struct intel_crtc *crtc = to_intel_crtc(pipe_config->base.crtc); @@ -1003,8 +1004,8 @@ static void intel_enable_hdmi_audio(struct intel_encoder *encoder, } static void g4x_enable_hdmi(struct intel_encoder *encoder, - struct intel_crtc_state *pipe_config, - struct drm_connector_state *conn_state) + const struct intel_crtc_state *pipe_config, + const struct drm_connector_state *conn_state) { struct drm_device *dev = encoder->base.dev; struct drm_i915_private *dev_priv = to_i915(dev); @@ -1025,8 +1026,8 @@ static void g4x_enable_hdmi(struct intel_encoder *encoder, } static void ibx_enable_hdmi(struct intel_encoder *encoder, - struct intel_crtc_state *pipe_config, - struct drm_connector_state *conn_state) + const struct intel_crtc_state *pipe_config, + const struct drm_connector_state *conn_state) { struct drm_device *dev = encoder->base.dev; struct drm_i915_private *dev_priv = to_i915(dev); @@ -1075,8 +1076,8 @@ static void ibx_enable_hdmi(struct intel_encoder *encoder, } static void cpt_enable_hdmi(struct intel_encoder *encoder, - struct intel_crtc_state *pipe_config, - struct drm_connector_state *conn_state) + const struct intel_crtc_state *pipe_config, + const struct drm_connector_state *conn_state) { struct drm_device *dev = encoder->base.dev; struct drm_i915_private *dev_priv = to_i915(dev); @@ -1130,18 +1131,20 @@ static void cpt_enable_hdmi(struct intel_encoder *encoder, } static void vlv_enable_hdmi(struct intel_encoder *encoder, - struct intel_crtc_state *pipe_config, - struct drm_connector_state *conn_state) + const struct intel_crtc_state *pipe_config, + const struct drm_connector_state *conn_state) { } static void intel_disable_hdmi(struct intel_encoder *encoder, - struct intel_crtc_state *old_crtc_state, - struct drm_connector_state *old_conn_state) + const struct intel_crtc_state *old_crtc_state, + const struct drm_connector_state *old_conn_state) { struct drm_device *dev = encoder->base.dev; struct drm_i915_private *dev_priv = to_i915(dev); struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base); + struct intel_digital_port *intel_dig_port = + hdmi_to_dig_port(intel_hdmi); struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc); u32 temp; @@ -1184,14 +1187,15 @@ static void intel_disable_hdmi(struct intel_encoder *encoder, intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, true); } - intel_hdmi->set_infoframes(&encoder->base, false, old_crtc_state, old_conn_state); + intel_dig_port->set_infoframes(&encoder->base, false, + old_crtc_state, old_conn_state); intel_dp_dual_mode_set_tmds_output(intel_hdmi, false); } static void g4x_disable_hdmi(struct intel_encoder *encoder, - struct intel_crtc_state *old_crtc_state, - struct drm_connector_state *old_conn_state) + const struct intel_crtc_state *old_crtc_state, + const struct drm_connector_state *old_conn_state) { if (old_crtc_state->has_audio) intel_audio_codec_disable(encoder); @@ -1200,16 +1204,16 @@ static void g4x_disable_hdmi(struct intel_encoder *encoder, } static void pch_disable_hdmi(struct intel_encoder *encoder, - struct intel_crtc_state *old_crtc_state, - struct drm_connector_state *old_conn_state) + const struct intel_crtc_state *old_crtc_state, + const struct drm_connector_state *old_conn_state) { if (old_crtc_state->has_audio) intel_audio_codec_disable(encoder); } static void pch_post_disable_hdmi(struct intel_encoder *encoder, - struct intel_crtc_state *old_crtc_state, - struct drm_connector_state *old_conn_state) + const struct intel_crtc_state *old_crtc_state, + const struct drm_connector_state *old_conn_state) { intel_disable_hdmi(encoder, old_crtc_state, old_conn_state); } @@ -1314,7 +1318,7 @@ intel_hdmi_mode_valid(struct drm_connector *connector, return status; } -static bool hdmi_12bpc_possible(struct intel_crtc_state *crtc_state) +static bool hdmi_12bpc_possible(const struct intel_crtc_state *crtc_state) { struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev); @@ -1642,24 +1646,24 @@ static int intel_hdmi_get_modes(struct drm_connector *connector) } static void intel_hdmi_pre_enable(struct intel_encoder *encoder, - struct intel_crtc_state *pipe_config, - struct drm_connector_state *conn_state) + const struct intel_crtc_state *pipe_config, + const struct drm_connector_state *conn_state) { - struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base); + struct intel_digital_port *intel_dig_port = + enc_to_dig_port(&encoder->base); intel_hdmi_prepare(encoder, pipe_config); - intel_hdmi->set_infoframes(&encoder->base, - pipe_config->has_hdmi_sink, - pipe_config, conn_state); + intel_dig_port->set_infoframes(&encoder->base, + pipe_config->has_infoframe, + pipe_config, conn_state); } static void vlv_hdmi_pre_enable(struct intel_encoder *encoder, - struct intel_crtc_state *pipe_config, - struct drm_connector_state *conn_state) + const struct intel_crtc_state *pipe_config, + const struct drm_connector_state *conn_state) { struct intel_digital_port *dport = enc_to_dig_port(&encoder->base); - struct intel_hdmi *intel_hdmi = &dport->hdmi; struct drm_device *dev = encoder->base.dev; struct drm_i915_private *dev_priv = to_i915(dev); @@ -1669,9 +1673,9 @@ static void vlv_hdmi_pre_enable(struct intel_encoder *encoder, vlv_set_phy_signal_level(encoder, 0x2b245f5f, 0x00002000, 0x5578b83a, 0x2b247878); - intel_hdmi->set_infoframes(&encoder->base, - pipe_config->has_hdmi_sink, - pipe_config, conn_state); + dport->set_infoframes(&encoder->base, + pipe_config->has_infoframe, + pipe_config, conn_state); g4x_enable_hdmi(encoder, pipe_config, conn_state); @@ -1679,8 +1683,8 @@ static void vlv_hdmi_pre_enable(struct intel_encoder *encoder, } static void vlv_hdmi_pre_pll_enable(struct intel_encoder *encoder, - struct intel_crtc_state *pipe_config, - struct drm_connector_state *conn_state) + const struct intel_crtc_state *pipe_config, + const struct drm_connector_state *conn_state) { intel_hdmi_prepare(encoder, pipe_config); @@ -1688,8 +1692,8 @@ static void vlv_hdmi_pre_pll_enable(struct intel_encoder *encoder, } static void chv_hdmi_pre_pll_enable(struct intel_encoder *encoder, - struct intel_crtc_state *pipe_config, - struct drm_connector_state *conn_state) + const struct intel_crtc_state *pipe_config, + const struct drm_connector_state *conn_state) { intel_hdmi_prepare(encoder, pipe_config); @@ -1697,23 +1701,23 @@ static void chv_hdmi_pre_pll_enable(struct intel_encoder *encoder, } static void chv_hdmi_post_pll_disable(struct intel_encoder *encoder, - struct intel_crtc_state *old_crtc_state, - struct drm_connector_state *old_conn_state) + const struct intel_crtc_state *old_crtc_state, + const struct drm_connector_state *old_conn_state) { chv_phy_post_pll_disable(encoder); } static void vlv_hdmi_post_disable(struct intel_encoder *encoder, - struct intel_crtc_state *old_crtc_state, - struct drm_connector_state *old_conn_state) + const struct intel_crtc_state *old_crtc_state, + const struct drm_connector_state *old_conn_state) { /* Reset lanes to avoid HDMI flicker (VLV w/a) */ vlv_phy_reset_lanes(encoder); } static void chv_hdmi_post_disable(struct intel_encoder *encoder, - struct intel_crtc_state *old_crtc_state, - struct drm_connector_state *old_conn_state) + const struct intel_crtc_state *old_crtc_state, + const struct drm_connector_state *old_conn_state) { struct drm_device *dev = encoder->base.dev; struct drm_i915_private *dev_priv = to_i915(dev); @@ -1727,11 +1731,10 @@ static void chv_hdmi_post_disable(struct intel_encoder *encoder, } static void chv_hdmi_pre_enable(struct intel_encoder *encoder, - struct intel_crtc_state *pipe_config, - struct drm_connector_state *conn_state) + const struct intel_crtc_state *pipe_config, + const struct drm_connector_state *conn_state) { struct intel_digital_port *dport = enc_to_dig_port(&encoder->base); - struct intel_hdmi *intel_hdmi = &dport->hdmi; struct drm_device *dev = encoder->base.dev; struct drm_i915_private *dev_priv = to_i915(dev); @@ -1741,9 +1744,9 @@ static void chv_hdmi_pre_enable(struct intel_encoder *encoder, /* Use 800mV-0dB */ chv_set_phy_signal_level(encoder, 128, 102, false); - intel_hdmi->set_infoframes(&encoder->base, - pipe_config->has_hdmi_sink, - pipe_config, conn_state); + dport->set_infoframes(&encoder->base, + pipe_config->has_infoframe, + pipe_config, conn_state); g4x_enable_hdmi(encoder, pipe_config, conn_state); @@ -1958,6 +1961,34 @@ static u8 intel_hdmi_ddc_pin(struct drm_i915_private *dev_priv, return ddc_pin; } +void intel_infoframe_init(struct intel_digital_port *intel_dig_port) +{ + struct drm_i915_private *dev_priv = + to_i915(intel_dig_port->base.base.dev); + + if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { + intel_dig_port->write_infoframe = vlv_write_infoframe; + intel_dig_port->set_infoframes = vlv_set_infoframes; + intel_dig_port->infoframe_enabled = vlv_infoframe_enabled; + } else if (IS_G4X(dev_priv)) { + intel_dig_port->write_infoframe = g4x_write_infoframe; + intel_dig_port->set_infoframes = g4x_set_infoframes; + intel_dig_port->infoframe_enabled = g4x_infoframe_enabled; + } else if (HAS_DDI(dev_priv)) { + intel_dig_port->write_infoframe = hsw_write_infoframe; + intel_dig_port->set_infoframes = hsw_set_infoframes; + intel_dig_port->infoframe_enabled = hsw_infoframe_enabled; + } else if (HAS_PCH_IBX(dev_priv)) { + intel_dig_port->write_infoframe = ibx_write_infoframe; + intel_dig_port->set_infoframes = ibx_set_infoframes; + intel_dig_port->infoframe_enabled = ibx_infoframe_enabled; + } else { + intel_dig_port->write_infoframe = cpt_write_infoframe; + intel_dig_port->set_infoframes = cpt_set_infoframes; + intel_dig_port->infoframe_enabled = cpt_infoframe_enabled; + } +} + void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port, struct intel_connector *intel_connector) { @@ -1993,28 +2024,6 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port, return; intel_encoder->hpd_pin = intel_hpd_pin(port); - if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { - intel_hdmi->write_infoframe = vlv_write_infoframe; - intel_hdmi->set_infoframes = vlv_set_infoframes; - intel_hdmi->infoframe_enabled = vlv_infoframe_enabled; - } else if (IS_G4X(dev_priv)) { - intel_hdmi->write_infoframe = g4x_write_infoframe; - intel_hdmi->set_infoframes = g4x_set_infoframes; - intel_hdmi->infoframe_enabled = g4x_infoframe_enabled; - } else if (HAS_DDI(dev_priv)) { - intel_hdmi->write_infoframe = hsw_write_infoframe; - intel_hdmi->set_infoframes = hsw_set_infoframes; - intel_hdmi->infoframe_enabled = hsw_infoframe_enabled; - } else if (HAS_PCH_IBX(dev_priv)) { - intel_hdmi->write_infoframe = ibx_write_infoframe; - intel_hdmi->set_infoframes = ibx_set_infoframes; - intel_hdmi->infoframe_enabled = ibx_infoframe_enabled; - } else { - intel_hdmi->write_infoframe = cpt_write_infoframe; - intel_hdmi->set_infoframes = cpt_set_infoframes; - intel_hdmi->infoframe_enabled = cpt_infoframe_enabled; - } - if (HAS_DDI(dev_priv)) intel_connector->get_hw_state = intel_ddi_connector_get_hw_state; else @@ -2113,5 +2122,7 @@ void intel_hdmi_init(struct drm_i915_private *dev_priv, intel_dig_port->dp.output_reg = INVALID_MMIO_REG; intel_dig_port->max_lanes = 4; + intel_infoframe_init(intel_dig_port); + intel_hdmi_init_connector(intel_dig_port, intel_connector); } diff --git a/drivers/gpu/drm/i915/intel_huc.c b/drivers/gpu/drm/i915/intel_huc.c index 6145fa0d6773..8b4b53525422 100644 --- a/drivers/gpu/drm/i915/intel_huc.c +++ b/drivers/gpu/drm/i915/intel_huc.c @@ -155,8 +155,8 @@ void intel_huc_select_fw(struct intel_huc *huc) huc->fw.load_status = INTEL_UC_FIRMWARE_NONE; huc->fw.type = INTEL_UC_FW_TYPE_HUC; - if (i915.huc_firmware_path) { - huc->fw.path = i915.huc_firmware_path; + if (i915_modparams.huc_firmware_path) { + huc->fw.path = i915_modparams.huc_firmware_path; huc->fw.major_ver_wanted = 0; huc->fw.minor_ver_wanted = 0; } else if (IS_SKYLAKE(dev_priv)) { @@ -225,19 +225,22 @@ void intel_huc_init_hw(struct intel_huc *huc) } /** - * intel_guc_auth_huc() - authenticate ucode - * @dev_priv: the drm_i915_device + * intel_huc_auth() - Authenticate HuC uCode + * @huc: intel_huc structure + * + * Called after HuC and GuC firmware loading during intel_uc_init_hw(). * - * Triggers a HuC fw authentication request to the GuC via intel_guc_action_ - * authenticate_huc interface. + * This function pins HuC firmware image object into GGTT. + * Then it invokes GuC action to authenticate passing the offset to RSA + * signature through intel_guc_auth_huc(). It then waits for 50ms for + * firmware verification ACK and unpins the object. */ -void intel_guc_auth_huc(struct drm_i915_private *dev_priv) +void intel_huc_auth(struct intel_huc *huc) { - struct intel_guc *guc = &dev_priv->guc; - struct intel_huc *huc = &dev_priv->huc; + struct drm_i915_private *i915 = huc_to_i915(huc); + struct intel_guc *guc = &i915->guc; struct i915_vma *vma; int ret; - u32 data[2]; if (huc->fw.load_status != INTEL_UC_FIRMWARE_SUCCESS) return; @@ -250,23 +253,19 @@ void intel_guc_auth_huc(struct drm_i915_private *dev_priv) return; } - /* Specify auth action and where public signature is. */ - data[0] = INTEL_GUC_ACTION_AUTHENTICATE_HUC; - data[1] = guc_ggtt_offset(vma) + huc->fw.rsa_offset; - - ret = intel_guc_send(guc, data, ARRAY_SIZE(data)); + ret = intel_guc_auth_huc(guc, + guc_ggtt_offset(vma) + huc->fw.rsa_offset); if (ret) { DRM_ERROR("HuC: GuC did not ack Auth request %d\n", ret); goto out; } /* Check authentication status, it should be done by now */ - ret = intel_wait_for_register(dev_priv, - HUC_STATUS2, - HUC_FW_VERIFIED, - HUC_FW_VERIFIED, - 50); - + ret = intel_wait_for_register(i915, + HUC_STATUS2, + HUC_FW_VERIFIED, + HUC_FW_VERIFIED, + 50); if (ret) { DRM_ERROR("HuC: Authentication failed %d\n", ret); goto out; @@ -275,4 +274,3 @@ void intel_guc_auth_huc(struct drm_i915_private *dev_priv) out: i915_vma_unpin(vma); } - diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c index 6f972e6ec663..61cac26a8b05 100644 --- a/drivers/gpu/drm/i915/intel_lrc.c +++ b/drivers/gpu/drm/i915/intel_lrc.c @@ -244,7 +244,7 @@ int intel_sanitize_enable_execlists(struct drm_i915_private *dev_priv, int enabl if (HAS_LOGICAL_RING_CONTEXTS(dev_priv) && USES_PPGTT(dev_priv) && - i915.use_mmio_flip >= 0) + i915_modparams.use_mmio_flip >= 0) return 1; return 0; @@ -279,17 +279,73 @@ intel_lr_context_descriptor_update(struct i915_gem_context *ctx, BUILD_BUG_ON(MAX_CONTEXT_HW_ID > (1<<GEN8_CTX_ID_WIDTH)); desc = ctx->desc_template; /* bits 0-11 */ - desc |= i915_ggtt_offset(ce->state) + LRC_PPHWSP_PN * PAGE_SIZE; + desc |= i915_ggtt_offset(ce->state) + LRC_HEADER_PAGES * PAGE_SIZE; /* bits 12-31 */ desc |= (u64)ctx->hw_id << GEN8_CTX_ID_SHIFT; /* bits 32-52 */ ce->lrc_desc = desc; } -uint64_t intel_lr_context_descriptor(struct i915_gem_context *ctx, - struct intel_engine_cs *engine) +static struct i915_priolist * +lookup_priolist(struct intel_engine_cs *engine, + struct i915_priotree *pt, + int prio) { - return ctx->engine[engine->id].lrc_desc; + struct intel_engine_execlists * const execlists = &engine->execlists; + struct i915_priolist *p; + struct rb_node **parent, *rb; + bool first = true; + + if (unlikely(execlists->no_priolist)) + prio = I915_PRIORITY_NORMAL; + +find_priolist: + /* most positive priority is scheduled first, equal priorities fifo */ + rb = NULL; + parent = &execlists->queue.rb_node; + while (*parent) { + rb = *parent; + p = rb_entry(rb, typeof(*p), node); + if (prio > p->priority) { + parent = &rb->rb_left; + } else if (prio < p->priority) { + parent = &rb->rb_right; + first = false; + } else { + return p; + } + } + + if (prio == I915_PRIORITY_NORMAL) { + p = &execlists->default_priolist; + } else { + p = kmem_cache_alloc(engine->i915->priorities, GFP_ATOMIC); + /* Convert an allocation failure to a priority bump */ + if (unlikely(!p)) { + prio = I915_PRIORITY_NORMAL; /* recurses just once */ + + /* To maintain ordering with all rendering, after an + * allocation failure we have to disable all scheduling. + * Requests will then be executed in fifo, and schedule + * will ensure that dependencies are emitted in fifo. + * There will be still some reordering with existing + * requests, so if userspace lied about their + * dependencies that reordering may be visible. + */ + execlists->no_priolist = true; + goto find_priolist; + } + } + + p->priority = prio; + INIT_LIST_HEAD(&p->requests); + rb_link_node(&p->node, rb, parent); + rb_insert_color(&p->node, &execlists->queue); + + if (first) + execlists->first = &p->node; + + return ptr_pack_bits(p, first, 1); } static inline void @@ -338,12 +394,12 @@ static u64 execlists_update_context(struct drm_i915_gem_request *rq) static void execlists_submit_ports(struct intel_engine_cs *engine) { - struct execlist_port *port = engine->execlist_port; + struct execlist_port *port = engine->execlists.port; u32 __iomem *elsp = engine->i915->regs + i915_mmio_reg_offset(RING_ELSP(engine)); unsigned int n; - for (n = ARRAY_SIZE(engine->execlist_port); n--; ) { + for (n = execlists_num_ports(&engine->execlists); n--; ) { struct drm_i915_gem_request *rq; unsigned int count; u64 desc; @@ -398,7 +454,10 @@ static void port_assign(struct execlist_port *port, static void execlists_dequeue(struct intel_engine_cs *engine) { struct drm_i915_gem_request *last; - struct execlist_port *port = engine->execlist_port; + struct intel_engine_execlists * const execlists = &engine->execlists; + struct execlist_port *port = execlists->port; + const struct execlist_port * const last_port = + &execlists->port[execlists->port_mask]; struct rb_node *rb; bool submit = false; @@ -412,8 +471,6 @@ static void execlists_dequeue(struct intel_engine_cs *engine) */ last->tail = last->wa_tail; - GEM_BUG_ON(port_isset(&port[1])); - /* Hardware submission is through 2 ports. Conceptually each port * has a (RING_START, RING_HEAD, RING_TAIL) tuple. RING_START is * static for a context, and unique to each, so we only execute @@ -436,8 +493,8 @@ static void execlists_dequeue(struct intel_engine_cs *engine) */ spin_lock_irq(&engine->timeline->lock); - rb = engine->execlist_first; - GEM_BUG_ON(rb_first(&engine->execlist_queue) != rb); + rb = execlists->first; + GEM_BUG_ON(rb_first(&execlists->queue) != rb); while (rb) { struct i915_priolist *p = rb_entry(rb, typeof(*p), node); struct drm_i915_gem_request *rq, *rn; @@ -460,7 +517,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine) * combine this request with the last, then we * are done. */ - if (port != engine->execlist_port) { + if (port == last_port) { __list_del_many(&p->requests, &rq->priotree.link); goto done; @@ -485,25 +542,27 @@ static void execlists_dequeue(struct intel_engine_cs *engine) if (submit) port_assign(port, last); port++; + + GEM_BUG_ON(port_isset(port)); } INIT_LIST_HEAD(&rq->priotree.link); rq->priotree.priority = INT_MAX; __i915_gem_request_submit(rq); - trace_i915_gem_request_in(rq, port_index(port, engine)); + trace_i915_gem_request_in(rq, port_index(port, execlists)); last = rq; submit = true; } rb = rb_next(rb); - rb_erase(&p->node, &engine->execlist_queue); + rb_erase(&p->node, &execlists->queue); INIT_LIST_HEAD(&p->requests); if (p->priority != I915_PRIORITY_NORMAL) kmem_cache_free(engine->i915->priorities, p); } done: - engine->execlist_first = rb; + execlists->first = rb; if (submit) port_assign(port, last); spin_unlock_irq(&engine->timeline->lock); @@ -512,9 +571,83 @@ done: execlists_submit_ports(engine); } +static void +execlist_cancel_port_requests(struct intel_engine_execlists *execlists) +{ + struct execlist_port *port = execlists->port; + unsigned int num_ports = ARRAY_SIZE(execlists->port); + + while (num_ports-- && port_isset(port)) { + struct drm_i915_gem_request *rq = port_request(port); + + execlists_context_status_change(rq, INTEL_CONTEXT_SCHEDULE_OUT); + i915_gem_request_put(rq); + + memset(port, 0, sizeof(*port)); + port++; + } +} + +static void execlists_cancel_requests(struct intel_engine_cs *engine) +{ + struct intel_engine_execlists * const execlists = &engine->execlists; + struct drm_i915_gem_request *rq, *rn; + struct rb_node *rb; + unsigned long flags; + + spin_lock_irqsave(&engine->timeline->lock, flags); + + /* Cancel the requests on the HW and clear the ELSP tracker. */ + execlist_cancel_port_requests(execlists); + + /* Mark all executing requests as skipped. */ + list_for_each_entry(rq, &engine->timeline->requests, link) { + GEM_BUG_ON(!rq->global_seqno); + if (!i915_gem_request_completed(rq)) + dma_fence_set_error(&rq->fence, -EIO); + } + + /* Flush the queued requests to the timeline list (for retiring). */ + rb = execlists->first; + while (rb) { + struct i915_priolist *p = rb_entry(rb, typeof(*p), node); + + list_for_each_entry_safe(rq, rn, &p->requests, priotree.link) { + INIT_LIST_HEAD(&rq->priotree.link); + rq->priotree.priority = INT_MAX; + + dma_fence_set_error(&rq->fence, -EIO); + __i915_gem_request_submit(rq); + } + + rb = rb_next(rb); + rb_erase(&p->node, &execlists->queue); + INIT_LIST_HEAD(&p->requests); + if (p->priority != I915_PRIORITY_NORMAL) + kmem_cache_free(engine->i915->priorities, p); + } + + /* Remaining _unready_ requests will be nop'ed when submitted */ + + + execlists->queue = RB_ROOT; + execlists->first = NULL; + GEM_BUG_ON(port_isset(execlists->port)); + + /* + * The port is checked prior to scheduling a tasklet, but + * just in case we have suspended the tasklet to do the + * wedging make sure that when it wakes, it decides there + * is no work to do by clearing the irq_posted bit. + */ + clear_bit(ENGINE_IRQ_EXECLIST, &engine->irq_posted); + + spin_unlock_irqrestore(&engine->timeline->lock, flags); +} + static bool execlists_elsp_ready(const struct intel_engine_cs *engine) { - const struct execlist_port *port = engine->execlist_port; + const struct execlist_port *port = engine->execlists.port; return port_count(&port[0]) + port_count(&port[1]) < 2; } @@ -525,8 +658,9 @@ static bool execlists_elsp_ready(const struct intel_engine_cs *engine) */ static void intel_lrc_irq_handler(unsigned long data) { - struct intel_engine_cs *engine = (struct intel_engine_cs *)data; - struct execlist_port *port = engine->execlist_port; + struct intel_engine_cs * const engine = (struct intel_engine_cs *)data; + struct intel_engine_execlists * const execlists = &engine->execlists; + struct execlist_port *port = execlists->port; struct drm_i915_private *dev_priv = engine->i915; /* We can skip acquiring intel_runtime_pm_get() here as it was taken @@ -538,19 +672,25 @@ static void intel_lrc_irq_handler(unsigned long data) */ GEM_BUG_ON(!dev_priv->gt.awake); - intel_uncore_forcewake_get(dev_priv, engine->fw_domains); + intel_uncore_forcewake_get(dev_priv, execlists->fw_domains); /* Prefer doing test_and_clear_bit() as a two stage operation to avoid * imposing the cost of a locked atomic transaction when submitting a * new request (outside of the context-switch interrupt). */ while (test_bit(ENGINE_IRQ_EXECLIST, &engine->irq_posted)) { - u32 __iomem *csb_mmio = - dev_priv->regs + i915_mmio_reg_offset(RING_CONTEXT_STATUS_PTR(engine)); - u32 __iomem *buf = - dev_priv->regs + i915_mmio_reg_offset(RING_CONTEXT_STATUS_BUF_LO(engine, 0)); + /* The HWSP contains a (cacheable) mirror of the CSB */ + const u32 *buf = + &engine->status_page.page_addr[I915_HWS_CSB_BUF0_INDEX]; unsigned int head, tail; + /* However GVT emulation depends upon intercepting CSB mmio */ + if (unlikely(execlists->csb_use_mmio)) { + buf = (u32 * __force) + (dev_priv->regs + i915_mmio_reg_offset(RING_CONTEXT_STATUS_BUF_LO(engine, 0))); + execlists->csb_head = -1; /* force mmio read of CSB ptrs */ + } + /* The write will be ordered by the uncached read (itself * a memory barrier), so we do not need another in the form * of a locked instruction. The race between the interrupt @@ -562,9 +702,20 @@ static void intel_lrc_irq_handler(unsigned long data) * is set and we do a new loop. */ __clear_bit(ENGINE_IRQ_EXECLIST, &engine->irq_posted); - head = readl(csb_mmio); - tail = GEN8_CSB_WRITE_PTR(head); - head = GEN8_CSB_READ_PTR(head); + if (unlikely(execlists->csb_head == -1)) { /* following a reset */ + head = readl(dev_priv->regs + i915_mmio_reg_offset(RING_CONTEXT_STATUS_PTR(engine))); + tail = GEN8_CSB_WRITE_PTR(head); + head = GEN8_CSB_READ_PTR(head); + execlists->csb_head = head; + } else { + const int write_idx = + intel_hws_csb_write_index(dev_priv) - + I915_HWS_CSB_BUF0_INDEX; + + head = execlists->csb_head; + tail = READ_ONCE(buf[write_idx]); + } + while (head != tail) { struct drm_i915_gem_request *rq; unsigned int status; @@ -590,13 +741,12 @@ static void intel_lrc_irq_handler(unsigned long data) * status notifier. */ - status = readl(buf + 2 * head); + status = READ_ONCE(buf[2 * head]); /* maybe mmio! */ if (!(status & GEN8_CTX_STATUS_COMPLETED_MASK)) continue; /* Check the context/desc id for this event matches */ - GEM_DEBUG_BUG_ON(readl(buf + 2 * head + 1) != - port->context_id); + GEM_DEBUG_BUG_ON(buf[2 * head + 1] != port->context_id); rq = port_unpack(port, &count); GEM_BUG_ON(count == 0); @@ -608,8 +758,7 @@ static void intel_lrc_irq_handler(unsigned long data) trace_i915_gem_request_out(rq); i915_gem_request_put(rq); - port[0] = port[1]; - memset(&port[1], 0, sizeof(port[1])); + execlists_port_complete(execlists, port); } else { port_set(port, port_pack(rq, count)); } @@ -619,78 +768,28 @@ static void intel_lrc_irq_handler(unsigned long data) !(status & GEN8_CTX_STATUS_ACTIVE_IDLE)); } - writel(_MASKED_FIELD(GEN8_CSB_READ_PTR_MASK, head << 8), - csb_mmio); + if (head != execlists->csb_head) { + execlists->csb_head = head; + writel(_MASKED_FIELD(GEN8_CSB_READ_PTR_MASK, head << 8), + dev_priv->regs + i915_mmio_reg_offset(RING_CONTEXT_STATUS_PTR(engine))); + } } if (execlists_elsp_ready(engine)) execlists_dequeue(engine); - intel_uncore_forcewake_put(dev_priv, engine->fw_domains); + intel_uncore_forcewake_put(dev_priv, execlists->fw_domains); } -static bool -insert_request(struct intel_engine_cs *engine, - struct i915_priotree *pt, - int prio) +static void insert_request(struct intel_engine_cs *engine, + struct i915_priotree *pt, + int prio) { - struct i915_priolist *p; - struct rb_node **parent, *rb; - bool first = true; - - if (unlikely(engine->no_priolist)) - prio = I915_PRIORITY_NORMAL; + struct i915_priolist *p = lookup_priolist(engine, pt, prio); -find_priolist: - /* most positive priority is scheduled first, equal priorities fifo */ - rb = NULL; - parent = &engine->execlist_queue.rb_node; - while (*parent) { - rb = *parent; - p = rb_entry(rb, typeof(*p), node); - if (prio > p->priority) { - parent = &rb->rb_left; - } else if (prio < p->priority) { - parent = &rb->rb_right; - first = false; - } else { - list_add_tail(&pt->link, &p->requests); - return false; - } - } - - if (prio == I915_PRIORITY_NORMAL) { - p = &engine->default_priolist; - } else { - p = kmem_cache_alloc(engine->i915->priorities, GFP_ATOMIC); - /* Convert an allocation failure to a priority bump */ - if (unlikely(!p)) { - prio = I915_PRIORITY_NORMAL; /* recurses just once */ - - /* To maintain ordering with all rendering, after an - * allocation failure we have to disable all scheduling. - * Requests will then be executed in fifo, and schedule - * will ensure that dependencies are emitted in fifo. - * There will be still some reordering with existing - * requests, so if userspace lied about their - * dependencies that reordering may be visible. - */ - engine->no_priolist = true; - goto find_priolist; - } - } - - p->priority = prio; - rb_link_node(&p->node, rb, parent); - rb_insert_color(&p->node, &engine->execlist_queue); - - INIT_LIST_HEAD(&p->requests); - list_add_tail(&pt->link, &p->requests); - - if (first) - engine->execlist_first = &p->node; - - return first; + list_add_tail(&pt->link, &ptr_mask_bits(p, 1)->requests); + if (ptr_unmask_bits(p, 1) && execlists_elsp_ready(engine)) + tasklet_hi_schedule(&engine->execlists.irq_tasklet); } static void execlists_submit_request(struct drm_i915_gem_request *request) @@ -701,14 +800,9 @@ static void execlists_submit_request(struct drm_i915_gem_request *request) /* Will be called from irq-context when using foreign fences. */ spin_lock_irqsave(&engine->timeline->lock, flags); - if (insert_request(engine, - &request->priotree, - request->priotree.priority)) { - if (execlists_elsp_ready(engine)) - tasklet_hi_schedule(&engine->irq_tasklet); - } + insert_request(engine, &request->priotree, request->priotree.priority); - GEM_BUG_ON(!engine->execlist_first); + GEM_BUG_ON(!engine->execlists.first); GEM_BUG_ON(list_empty(&request->priotree.link)); spin_unlock_irqrestore(&engine->timeline->lock, flags); @@ -914,27 +1008,14 @@ static int execlists_request_alloc(struct drm_i915_gem_request *request) */ request->reserved_space += EXECLISTS_REQUEST_SIZE; - if (i915.enable_guc_submission) { - /* - * Check that the GuC has space for the request before - * going any further, as the i915_add_request() call - * later on mustn't fail ... - */ - ret = i915_guc_wq_reserve(request); - if (ret) - goto err; - } - cs = intel_ring_begin(request, 0); - if (IS_ERR(cs)) { - ret = PTR_ERR(cs); - goto err_unreserve; - } + if (IS_ERR(cs)) + return PTR_ERR(cs); if (!ce->initialised) { ret = engine->init_context(request); if (ret) - goto err_unreserve; + return ret; ce->initialised = true; } @@ -948,12 +1029,6 @@ static int execlists_request_alloc(struct drm_i915_gem_request *request) request->reserved_space -= EXECLISTS_REQUEST_SIZE; return 0; - -err_unreserve: - if (i915.enable_guc_submission) - i915_guc_wq_unreserve(request); -err: - return ret; } /* @@ -1116,13 +1191,6 @@ static u32 *gen9_init_indirectctx_bb(struct intel_engine_cs *engine, u32 *batch) return batch; } -static u32 *gen9_init_perctx_bb(struct intel_engine_cs *engine, u32 *batch) -{ - *batch++ = MI_BATCH_BUFFER_END; - - return batch; -} - #define CTX_WA_BB_OBJ_SIZE (PAGE_SIZE) static int lrc_setup_wa_ctx(struct intel_engine_cs *engine) @@ -1175,9 +1243,11 @@ static int intel_init_workaround_bb(struct intel_engine_cs *engine) return -EINVAL; switch (INTEL_GEN(engine->i915)) { + case 10: + return 0; case 9: wa_bb_fn[0] = gen9_init_indirectctx_bb; - wa_bb_fn[1] = gen9_init_perctx_bb; + wa_bb_fn[1] = NULL; break; case 8: wa_bb_fn[0] = gen8_init_indirectctx_bb; @@ -1208,7 +1278,8 @@ static int intel_init_workaround_bb(struct intel_engine_cs *engine) ret = -EINVAL; break; } - batch_ptr = wa_bb_fn[i](engine, batch_ptr); + if (wa_bb_fn[i]) + batch_ptr = wa_bb_fn[i](engine, batch_ptr); wa_bb[i]->size = batch_ptr - (batch + wa_bb[i]->offset); } @@ -1232,9 +1303,7 @@ static u8 gtiir[] = { static int gen8_init_common_ring(struct intel_engine_cs *engine) { struct drm_i915_private *dev_priv = engine->i915; - struct execlist_port *port = engine->execlist_port; - unsigned int n; - bool submit; + struct intel_engine_execlists * const execlists = &engine->execlists; int ret; ret = intel_mocs_init_engine(engine); @@ -1267,24 +1336,11 @@ static int gen8_init_common_ring(struct intel_engine_cs *engine) I915_WRITE(GEN8_GT_IIR(gtiir[engine->id]), GT_CONTEXT_SWITCH_INTERRUPT << engine->irq_shift); clear_bit(ENGINE_IRQ_EXECLIST, &engine->irq_posted); + execlists->csb_head = -1; /* After a GPU reset, we may have requests to replay */ - submit = false; - for (n = 0; n < ARRAY_SIZE(engine->execlist_port); n++) { - if (!port_isset(&port[n])) - break; - - DRM_DEBUG_DRIVER("Restarting %s:%d from 0x%x\n", - engine->name, n, - port_request(&port[n])->global_seqno); - - /* Discard the current inflight count */ - port_set(&port[n], port_request(&port[n])); - submit = true; - } - - if (submit && !i915.enable_guc_submission) - execlists_submit_ports(engine); + if (!i915_modparams.enable_guc_submission && execlists->first) + tasklet_schedule(&execlists->irq_tasklet); return 0; } @@ -1325,9 +1381,12 @@ static int gen9_init_render_ring(struct intel_engine_cs *engine) static void reset_common_ring(struct intel_engine_cs *engine, struct drm_i915_gem_request *request) { - struct execlist_port *port = engine->execlist_port; + struct intel_engine_execlists * const execlists = &engine->execlists; + struct drm_i915_gem_request *rq, *rn; struct intel_context *ce; - unsigned int n; + unsigned long flags; + + spin_lock_irqsave(&engine->timeline->lock, flags); /* * Catch up with any missed context-switch interrupts. @@ -1338,20 +1397,26 @@ static void reset_common_ring(struct intel_engine_cs *engine, * guessing the missed context-switch events by looking at what * requests were completed. */ - if (!request) { - for (n = 0; n < ARRAY_SIZE(engine->execlist_port); n++) - i915_gem_request_put(port_request(&port[n])); - memset(engine->execlist_port, 0, sizeof(engine->execlist_port)); - return; - } + execlist_cancel_port_requests(execlists); - if (request->ctx != port_request(port)->ctx) { - i915_gem_request_put(port_request(port)); - port[0] = port[1]; - memset(&port[1], 0, sizeof(port[1])); + /* Push back any incomplete requests for replay after the reset. */ + list_for_each_entry_safe_reverse(rq, rn, + &engine->timeline->requests, link) { + struct i915_priolist *p; + + if (i915_gem_request_completed(rq)) + break; + + __i915_gem_request_unsubmit(rq); + + p = lookup_priolist(engine, + &rq->priotree, + rq->priotree.priority); + list_add(&rq->priotree.link, + &ptr_mask_bits(p, 1)->requests); } - GEM_BUG_ON(request->ctx != port_request(port)->ctx); + spin_unlock_irqrestore(&engine->timeline->lock, flags); /* If the request was innocent, we leave the request in the ELSP * and will try to replay it on restarting. The context image may @@ -1363,7 +1428,7 @@ static void reset_common_ring(struct intel_engine_cs *engine, * and have to at least restore the RING register in the context * image back to the expected values to skip over the guilty request. */ - if (request->fence.error != -EIO) + if (!request || request->fence.error != -EIO) return; /* We want a simple context + ring to execute the breadcrumb update. @@ -1666,8 +1731,8 @@ void intel_logical_ring_cleanup(struct intel_engine_cs *engine) * Tasklet cannot be active at this point due intel_mark_active/idle * so this is just for documentation. */ - if (WARN_ON(test_bit(TASKLET_STATE_SCHED, &engine->irq_tasklet.state))) - tasklet_kill(&engine->irq_tasklet); + if (WARN_ON(test_bit(TASKLET_STATE_SCHED, &engine->execlists.irq_tasklet.state))) + tasklet_kill(&engine->execlists.irq_tasklet); dev_priv = engine->i915; @@ -1678,11 +1743,6 @@ void intel_logical_ring_cleanup(struct intel_engine_cs *engine) if (engine->cleanup) engine->cleanup(engine); - if (engine->status_page.vma) { - i915_gem_object_unpin_map(engine->status_page.vma->obj); - engine->status_page.vma = NULL; - } - intel_engine_cleanup_common(engine); lrc_destroy_wa_ctx(engine); @@ -1694,8 +1754,9 @@ void intel_logical_ring_cleanup(struct intel_engine_cs *engine) static void execlists_set_default_submission(struct intel_engine_cs *engine) { engine->submit_request = execlists_submit_request; + engine->cancel_requests = execlists_cancel_requests; engine->schedule = execlists_schedule; - engine->irq_tasklet.func = intel_lrc_irq_handler; + engine->execlists.irq_tasklet.func = intel_lrc_irq_handler; } static void @@ -1729,24 +1790,6 @@ logical_ring_default_irqs(struct intel_engine_cs *engine) engine->irq_keep_mask = GT_CONTEXT_SWITCH_INTERRUPT << shift; } -static int -lrc_setup_hws(struct intel_engine_cs *engine, struct i915_vma *vma) -{ - const int hws_offset = LRC_PPHWSP_PN * PAGE_SIZE; - void *hws; - - /* The HWSP is part of the default context object in LRC mode. */ - hws = i915_gem_object_pin_map(vma->obj, I915_MAP_WB); - if (IS_ERR(hws)) - return PTR_ERR(hws); - - engine->status_page.page_addr = hws + hws_offset; - engine->status_page.ggtt_offset = i915_ggtt_offset(vma) + hws_offset; - engine->status_page.vma = vma; - - return 0; -} - static void logical_ring_setup(struct intel_engine_cs *engine) { @@ -1770,32 +1813,23 @@ logical_ring_setup(struct intel_engine_cs *engine) RING_CONTEXT_STATUS_BUF_BASE(engine), FW_REG_READ); - engine->fw_domains = fw_domains; + engine->execlists.fw_domains = fw_domains; - tasklet_init(&engine->irq_tasklet, + tasklet_init(&engine->execlists.irq_tasklet, intel_lrc_irq_handler, (unsigned long)engine); logical_ring_default_vfuncs(engine); logical_ring_default_irqs(engine); } -static int -logical_ring_init(struct intel_engine_cs *engine) +static int logical_ring_init(struct intel_engine_cs *engine) { - struct i915_gem_context *dctx = engine->i915->kernel_context; int ret; ret = intel_engine_init_common(engine); if (ret) goto error; - /* And setup the hardware status page. */ - ret = lrc_setup_hws(engine, dctx->engine[engine->id].state); - if (ret) { - DRM_ERROR("Failed to set up hws %s: %d\n", engine->name, ret); - goto error; - } - return 0; error: @@ -1953,13 +1987,12 @@ static void execlists_init_reg_state(u32 *regs, CTX_REG(regs, CTX_SECOND_BB_HEAD_L, RING_SBBADDR(base), 0); CTX_REG(regs, CTX_SECOND_BB_STATE, RING_SBBSTATE(base), 0); if (rcs) { - CTX_REG(regs, CTX_BB_PER_CTX_PTR, RING_BB_PER_CTX_PTR(base), 0); + struct i915_ctx_workarounds *wa_ctx = &engine->wa_ctx; + CTX_REG(regs, CTX_RCS_INDIRECT_CTX, RING_INDIRECT_CTX(base), 0); CTX_REG(regs, CTX_RCS_INDIRECT_CTX_OFFSET, RING_INDIRECT_CTX_OFFSET(base), 0); - - if (engine->wa_ctx.vma) { - struct i915_ctx_workarounds *wa_ctx = &engine->wa_ctx; + if (wa_ctx->indirect_ctx.size) { u32 ggtt_offset = i915_ggtt_offset(wa_ctx->vma); regs[CTX_RCS_INDIRECT_CTX + 1] = @@ -1968,6 +2001,11 @@ static void execlists_init_reg_state(u32 *regs, regs[CTX_RCS_INDIRECT_CTX_OFFSET + 1] = intel_lr_indirect_ctx_offset(engine) << 6; + } + + CTX_REG(regs, CTX_BB_PER_CTX_PTR, RING_BB_PER_CTX_PTR(base), 0); + if (wa_ctx->per_ctx.size) { + u32 ggtt_offset = i915_ggtt_offset(wa_ctx->vma); regs[CTX_BB_PER_CTX_PTR + 1] = (ggtt_offset + wa_ctx->per_ctx.offset) | 0x01; @@ -2052,8 +2090,11 @@ static int execlists_context_deferred_alloc(struct i915_gem_context *ctx, context_size = round_up(engine->context_size, I915_GTT_PAGE_SIZE); - /* One extra page as the sharing data between driver and GuC */ - context_size += PAGE_SIZE * LRC_PPHWSP_PN; + /* + * Before the actual start of the context image, we insert a few pages + * for our own use and for sharing with the GuC. + */ + context_size += LRC_HEADER_PAGES * PAGE_SIZE; ctx_obj = i915_gem_object_create(ctx->i915, context_size); if (IS_ERR(ctx_obj)) { diff --git a/drivers/gpu/drm/i915/intel_lrc.h b/drivers/gpu/drm/i915/intel_lrc.h index 57ef5833c427..314adee7127a 100644 --- a/drivers/gpu/drm/i915/intel_lrc.h +++ b/drivers/gpu/drm/i915/intel_lrc.h @@ -25,6 +25,7 @@ #define _INTEL_LRC_H_ #include "intel_ringbuffer.h" +#include "i915_gem_context.h" #define GEN8_LR_CONTEXT_ALIGN I915_GTT_MIN_ALIGNMENT @@ -69,17 +70,42 @@ int logical_xcs_ring_init(struct intel_engine_cs *engine); /* Logical Ring Contexts */ -/* One extra page is added before LRC for GuC as shared data */ +/* + * We allocate a header at the start of the context image for our own + * use, therefore the actual location of the logical state is offset + * from the start of the VMA. The layout is + * + * | [guc] | [hwsp] [logical state] | + * |<- our header ->|<- context image ->| + * + */ +/* The first page is used for sharing data with the GuC */ #define LRC_GUCSHR_PN (0) -#define LRC_PPHWSP_PN (LRC_GUCSHR_PN + 1) -#define LRC_STATE_PN (LRC_PPHWSP_PN + 1) +#define LRC_GUCSHR_SZ (1) +/* At the start of the context image is its per-process HWS page */ +#define LRC_PPHWSP_PN (LRC_GUCSHR_PN + LRC_GUCSHR_SZ) +#define LRC_PPHWSP_SZ (1) +/* Finally we have the logical state for the context */ +#define LRC_STATE_PN (LRC_PPHWSP_PN + LRC_PPHWSP_SZ) + +/* + * Currently we include the PPHWSP in __intel_engine_context_size() so + * the size of the header is synonymous with the start of the PPHWSP. + */ +#define LRC_HEADER_PAGES LRC_PPHWSP_PN struct drm_i915_private; struct i915_gem_context; void intel_lr_context_resume(struct drm_i915_private *dev_priv); -uint64_t intel_lr_context_descriptor(struct i915_gem_context *ctx, - struct intel_engine_cs *engine); + +static inline uint64_t +intel_lr_context_descriptor(struct i915_gem_context *ctx, + struct intel_engine_cs *engine) +{ + return ctx->engine[engine->id].lrc_desc; +} + /* Execlists */ int intel_sanitize_enable_execlists(struct drm_i915_private *dev_priv, diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c index 8e215777c7f4..a55954a89148 100644 --- a/drivers/gpu/drm/i915/intel_lvds.c +++ b/drivers/gpu/drm/i915/intel_lvds.c @@ -229,8 +229,8 @@ static void intel_lvds_pps_init_hw(struct drm_i915_private *dev_priv, } static void intel_pre_enable_lvds(struct intel_encoder *encoder, - struct intel_crtc_state *pipe_config, - struct drm_connector_state *conn_state) + const struct intel_crtc_state *pipe_config, + const struct drm_connector_state *conn_state) { struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base); struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); @@ -306,8 +306,8 @@ static void intel_pre_enable_lvds(struct intel_encoder *encoder, * Sets the power state for the panel. */ static void intel_enable_lvds(struct intel_encoder *encoder, - struct intel_crtc_state *pipe_config, - struct drm_connector_state *conn_state) + const struct intel_crtc_state *pipe_config, + const struct drm_connector_state *conn_state) { struct drm_device *dev = encoder->base.dev; struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base); @@ -324,8 +324,8 @@ static void intel_enable_lvds(struct intel_encoder *encoder, } static void intel_disable_lvds(struct intel_encoder *encoder, - struct intel_crtc_state *old_crtc_state, - struct drm_connector_state *old_conn_state) + const struct intel_crtc_state *old_crtc_state, + const struct drm_connector_state *old_conn_state) { struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base); struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); @@ -339,8 +339,8 @@ static void intel_disable_lvds(struct intel_encoder *encoder, } static void gmch_disable_lvds(struct intel_encoder *encoder, - struct intel_crtc_state *old_crtc_state, - struct drm_connector_state *old_conn_state) + const struct intel_crtc_state *old_crtc_state, + const struct drm_connector_state *old_conn_state) { intel_panel_disable_backlight(old_conn_state); @@ -349,15 +349,15 @@ static void gmch_disable_lvds(struct intel_encoder *encoder, } static void pch_disable_lvds(struct intel_encoder *encoder, - struct intel_crtc_state *old_crtc_state, - struct drm_connector_state *old_conn_state) + const struct intel_crtc_state *old_crtc_state, + const struct drm_connector_state *old_conn_state) { intel_panel_disable_backlight(old_conn_state); } static void pch_post_disable_lvds(struct intel_encoder *encoder, - struct intel_crtc_state *old_crtc_state, - struct drm_connector_state *old_conn_state) + const struct intel_crtc_state *old_crtc_state, + const struct drm_connector_state *old_conn_state) { intel_disable_lvds(encoder, old_crtc_state, old_conn_state); } @@ -880,8 +880,8 @@ static bool compute_is_dual_link_lvds(struct intel_lvds_encoder *lvds_encoder) struct drm_i915_private *dev_priv = to_i915(dev); /* use the module option value if specified */ - if (i915.lvds_channel_mode > 0) - return i915.lvds_channel_mode == 2; + if (i915_modparams.lvds_channel_mode > 0) + return i915_modparams.lvds_channel_mode == 2; /* single channel LVDS is limited to 112 MHz */ if (lvds_encoder->attached_connector->base.panel.fixed_mode->clock diff --git a/drivers/gpu/drm/i915/intel_modes.c b/drivers/gpu/drm/i915/intel_modes.c index 951e834dd274..28a778b785ac 100644 --- a/drivers/gpu/drm/i915/intel_modes.c +++ b/drivers/gpu/drm/i915/intel_modes.c @@ -30,6 +30,21 @@ #include "intel_drv.h" #include "i915_drv.h" +static void intel_connector_update_eld_conn_type(struct drm_connector *connector) +{ + u8 conn_type; + + if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort || + connector->connector_type == DRM_MODE_CONNECTOR_eDP) { + conn_type = DRM_ELD_CONN_TYPE_DP; + } else { + conn_type = DRM_ELD_CONN_TYPE_HDMI; + } + + connector->eld[DRM_ELD_SAD_COUNT_CONN_TYPE] &= ~DRM_ELD_CONN_TYPE_MASK; + connector->eld[DRM_ELD_SAD_COUNT_CONN_TYPE] |= conn_type; +} + /** * intel_connector_update_modes - update connector from edid * @connector: DRM connector device to use @@ -44,6 +59,8 @@ int intel_connector_update_modes(struct drm_connector *connector, ret = drm_add_edid_modes(connector, edid); drm_edid_to_eld(connector, edid); + intel_connector_update_eld_conn_type(connector); + return ret; } diff --git a/drivers/gpu/drm/i915/intel_opregion.c b/drivers/gpu/drm/i915/intel_opregion.c index 98154efcb2f4..1d946240e55f 100644 --- a/drivers/gpu/drm/i915/intel_opregion.c +++ b/drivers/gpu/drm/i915/intel_opregion.c @@ -921,7 +921,7 @@ static int intel_load_vbt_firmware(struct drm_i915_private *dev_priv) { struct intel_opregion *opregion = &dev_priv->opregion; const struct firmware *fw = NULL; - const char *name = i915.vbt_firmware; + const char *name = i915_modparams.vbt_firmware; int ret; if (!name || !*name) diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c index aace22e7ccac..1b397b41cb4f 100644 --- a/drivers/gpu/drm/i915/intel_overlay.c +++ b/drivers/gpu/drm/i915/intel_overlay.c @@ -1134,7 +1134,7 @@ int intel_overlay_put_image_ioctl(struct drm_device *dev, void *data, if (!params) return -ENOMEM; - drmmode_crtc = drm_crtc_find(dev, put_image_rec->crtc_id); + drmmode_crtc = drm_crtc_find(dev, file_priv, put_image_rec->crtc_id); if (!drmmode_crtc) { ret = -ENOENT; goto out_free; diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c index 3b1c5d783ee7..adc51e452e3e 100644 --- a/drivers/gpu/drm/i915/intel_panel.c +++ b/drivers/gpu/drm/i915/intel_panel.c @@ -379,13 +379,13 @@ enum drm_connector_status intel_panel_detect(struct drm_i915_private *dev_priv) { /* Assume that the BIOS does not lie through the OpRegion... */ - if (!i915.panel_ignore_lid && dev_priv->opregion.lid_state) { + if (!i915_modparams.panel_ignore_lid && dev_priv->opregion.lid_state) { return *dev_priv->opregion.lid_state & 0x1 ? connector_status_connected : connector_status_disconnected; } - switch (i915.panel_ignore_lid) { + switch (i915_modparams.panel_ignore_lid) { case -2: return connector_status_connected; case -1: @@ -465,10 +465,10 @@ static u32 intel_panel_compute_brightness(struct intel_connector *connector, WARN_ON(panel->backlight.max == 0); - if (i915.invert_brightness < 0) + if (i915_modparams.invert_brightness < 0) return val; - if (i915.invert_brightness > 0 || + if (i915_modparams.invert_brightness > 0 || dev_priv->quirks & QUIRK_INVERT_BRIGHTNESS) { return panel->backlight.max - val + panel->backlight.min; } diff --git a/drivers/gpu/drm/i915/intel_pipe_crc.c b/drivers/gpu/drm/i915/intel_pipe_crc.c index 8fbd2bd0877f..96043a51c1bf 100644 --- a/drivers/gpu/drm/i915/intel_pipe_crc.c +++ b/drivers/gpu/drm/i915/intel_pipe_crc.c @@ -506,8 +506,8 @@ static int ilk_pipe_crc_ctl_reg(enum intel_pipe_crc_source *source, return 0; } -static void hsw_trans_edp_pipe_A_crc_wa(struct drm_i915_private *dev_priv, - bool enable) +static void hsw_pipe_A_crc_wa(struct drm_i915_private *dev_priv, + bool enable) { struct drm_device *dev = &dev_priv->drm; struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, PIPE_A); @@ -533,10 +533,24 @@ retry: goto put_state; } - pipe_config->pch_pfit.force_thru = enable; - if (pipe_config->cpu_transcoder == TRANSCODER_EDP && - pipe_config->pch_pfit.enabled != enable) - pipe_config->base.connectors_changed = true; + if (HAS_IPS(dev_priv)) { + /* + * When IPS gets enabled, the pipe CRC changes. Since IPS gets + * enabled and disabled dynamically based on package C states, + * user space can't make reliable use of the CRCs, so let's just + * completely disable it. + */ + pipe_config->ips_force_disable = enable; + if (pipe_config->ips_enabled == enable) + pipe_config->base.connectors_changed = true; + } + + if (IS_HASWELL(dev_priv)) { + pipe_config->pch_pfit.force_thru = enable; + if (pipe_config->cpu_transcoder == TRANSCODER_EDP && + pipe_config->pch_pfit.enabled != enable) + pipe_config->base.connectors_changed = true; + } ret = drm_atomic_commit(state); @@ -570,8 +584,9 @@ static int ivb_pipe_crc_ctl_reg(struct drm_i915_private *dev_priv, *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_SPRITE_IVB; break; case INTEL_PIPE_CRC_SOURCE_PF: - if (IS_HASWELL(dev_priv) && pipe == PIPE_A) - hsw_trans_edp_pipe_A_crc_wa(dev_priv, true); + if ((IS_HASWELL(dev_priv) || + IS_BROADWELL(dev_priv)) && pipe == PIPE_A) + hsw_pipe_A_crc_wa(dev_priv, true); *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PF_IVB; break; @@ -606,7 +621,6 @@ static int pipe_crc_set_source(struct drm_i915_private *dev_priv, enum intel_pipe_crc_source source) { struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe]; - struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe); enum intel_display_power_domain power_domain; u32 val = 0; /* shut up gcc */ int ret; @@ -643,14 +657,6 @@ static int pipe_crc_set_source(struct drm_i915_private *dev_priv, goto out; } - /* - * When IPS gets enabled, the pipe CRC changes. Since IPS gets - * enabled and disabled dynamically based on package C states, - * user space can't make reliable use of the CRCs, so let's just - * completely disable it. - */ - hsw_disable_ips(crtc); - spin_lock_irq(&pipe_crc->lock); kfree(pipe_crc->entries); pipe_crc->entries = entries; @@ -691,10 +697,9 @@ static int pipe_crc_set_source(struct drm_i915_private *dev_priv, g4x_undo_pipe_scramble_reset(dev_priv, pipe); else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) vlv_undo_pipe_scramble_reset(dev_priv, pipe); - else if (IS_HASWELL(dev_priv) && pipe == PIPE_A) - hsw_trans_edp_pipe_A_crc_wa(dev_priv, false); - - hsw_enable_ips(crtc); + else if ((IS_HASWELL(dev_priv) || + IS_BROADWELL(dev_priv)) && pipe == PIPE_A) + hsw_pipe_A_crc_wa(dev_priv, false); } ret = 0; @@ -914,7 +919,6 @@ int intel_crtc_set_crc_source(struct drm_crtc *crtc, const char *source_name, { struct drm_i915_private *dev_priv = crtc->dev->dev_private; struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[crtc->index]; - struct intel_crtc *intel_crtc = to_intel_crtc(crtc); enum intel_display_power_domain power_domain; enum intel_pipe_crc_source source; u32 val = 0; /* shut up gcc */ @@ -935,16 +939,6 @@ int intel_crtc_set_crc_source(struct drm_crtc *crtc, const char *source_name, if (ret != 0) goto out; - if (source) { - /* - * When IPS gets enabled, the pipe CRC changes. Since IPS gets - * enabled and disabled dynamically based on package C states, - * user space can't make reliable use of the CRCs, so let's just - * completely disable it. - */ - hsw_disable_ips(intel_crtc); - } - I915_WRITE(PIPE_CRC_CTL(crtc->index), val); POSTING_READ(PIPE_CRC_CTL(crtc->index)); @@ -953,10 +947,9 @@ int intel_crtc_set_crc_source(struct drm_crtc *crtc, const char *source_name, g4x_undo_pipe_scramble_reset(dev_priv, crtc->index); else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) vlv_undo_pipe_scramble_reset(dev_priv, crtc->index); - else if (IS_HASWELL(dev_priv) && crtc->index == PIPE_A) - hsw_trans_edp_pipe_A_crc_wa(dev_priv, false); - - hsw_enable_ips(intel_crtc); + else if ((IS_HASWELL(dev_priv) || + IS_BROADWELL(dev_priv)) && crtc->index == PIPE_A) + hsw_pipe_A_crc_wa(dev_priv, false); } pipe_crc->skipped = 0; diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index ed662937ec3c..c66af09e27a7 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -58,24 +58,23 @@ static void gen9_init_clock_gating(struct drm_i915_private *dev_priv) { + if (HAS_LLC(dev_priv)) { + /* + * WaCompressedResourceDisplayNewHashMode:skl,kbl + * Display WA#0390: skl,kbl + * + * Must match Sampler, Pixel Back End, and Media. See + * WaCompressedResourceSamplerPbeMediaNewHashMode. + */ + I915_WRITE(CHICKEN_PAR1_1, + I915_READ(CHICKEN_PAR1_1) | + SKL_DE_COMPRESSED_HASH_MODE); + } + /* See Bspec note for PSR2_CTL bit 31, Wa#828:skl,bxt,kbl,cfl */ I915_WRITE(CHICKEN_PAR1_1, I915_READ(CHICKEN_PAR1_1) | SKL_EDP_PSR_FIX_RDWRAP); - /* - * Display WA#0390: skl,bxt,kbl,glk - * - * Must match Sampler, Pixel Back End, and Media - * (0xE194 bit 8, 0x7014 bit 13, 0x4DDC bits 27 and 31). - * - * Including bits outside the page in the hash would - * require 2 (or 4?) MiB alignment of resources. Just - * assume the defaul hashing mode which only uses bits - * within the page. - */ - I915_WRITE(CHICKEN_PAR1_1, - I915_READ(CHICKEN_PAR1_1) & ~SKL_RC_HASH_OUTSIDE); - I915_WRITE(GEN8_CONFIG0, I915_READ(GEN8_CONFIG0) | GEN9_DEFAULT_FIXES); @@ -125,6 +124,7 @@ static void bxt_init_clock_gating(struct drm_i915_private *dev_priv) static void glk_init_clock_gating(struct drm_i915_private *dev_priv) { + u32 val; gen9_init_clock_gating(dev_priv); /* @@ -144,6 +144,11 @@ static void glk_init_clock_gating(struct drm_i915_private *dev_priv) I915_WRITE(CHICKEN_MISC_2, val); } + /* Display WA #1133: WaFbcSkipSegments:glk */ + val = I915_READ(ILK_DPFC_CHICKEN); + val &= ~GLK_SKIP_SEG_COUNT_MASK; + val |= GLK_SKIP_SEG_EN | GLK_SKIP_SEG_COUNT(1); + I915_WRITE(ILK_DPFC_CHICKEN, val); } static void i915_pineview_get_mem_freq(struct drm_i915_private *dev_priv) @@ -1322,21 +1327,21 @@ static int g4x_compute_pipe_wm(struct intel_crtc_state *crtc_state) int num_active_planes = hweight32(crtc_state->active_planes & ~BIT(PLANE_CURSOR)); const struct g4x_pipe_wm *raw; - struct intel_plane_state *plane_state; + const struct intel_plane_state *old_plane_state; + const struct intel_plane_state *new_plane_state; struct intel_plane *plane; enum plane_id plane_id; int i, level; unsigned int dirty = 0; - for_each_intel_plane_in_state(state, plane, plane_state, i) { - const struct intel_plane_state *old_plane_state = - to_intel_plane_state(plane->base.state); - - if (plane_state->base.crtc != &crtc->base && + for_each_oldnew_intel_plane_in_state(state, plane, + old_plane_state, + new_plane_state, i) { + if (new_plane_state->base.crtc != &crtc->base && old_plane_state->base.crtc != &crtc->base) continue; - if (g4x_raw_plane_wm_compute(crtc_state, plane_state)) + if (g4x_raw_plane_wm_compute(crtc_state, new_plane_state)) dirty |= BIT(plane->id); } @@ -1831,21 +1836,21 @@ static int vlv_compute_pipe_wm(struct intel_crtc_state *crtc_state) int num_active_planes = hweight32(crtc_state->active_planes & ~BIT(PLANE_CURSOR)); bool needs_modeset = drm_atomic_crtc_needs_modeset(&crtc_state->base); - struct intel_plane_state *plane_state; + const struct intel_plane_state *old_plane_state; + const struct intel_plane_state *new_plane_state; struct intel_plane *plane; enum plane_id plane_id; int level, ret, i; unsigned int dirty = 0; - for_each_intel_plane_in_state(state, plane, plane_state, i) { - const struct intel_plane_state *old_plane_state = - to_intel_plane_state(plane->base.state); - - if (plane_state->base.crtc != &crtc->base && + for_each_oldnew_intel_plane_in_state(state, plane, + old_plane_state, + new_plane_state, i) { + if (new_plane_state->base.crtc != &crtc->base && old_plane_state->base.crtc != &crtc->base) continue; - if (vlv_raw_plane_wm_compute(crtc_state, plane_state)) + if (vlv_raw_plane_wm_compute(crtc_state, new_plane_state)) dirty |= BIT(plane->id); } @@ -1864,7 +1869,7 @@ static int vlv_compute_pipe_wm(struct intel_crtc_state *crtc_state) /* cursor changes don't warrant a FIFO recompute */ if (dirty & ~BIT(PLANE_CURSOR)) { const struct intel_crtc_state *old_crtc_state = - to_intel_crtc_state(crtc->base.state); + intel_atomic_get_old_crtc_state(state, crtc); const struct vlv_fifo_state *old_fifo_state = &old_crtc_state->wm.vlv.fifo_state; @@ -4370,134 +4375,147 @@ skl_adjusted_plane_pixel_rate(const struct intel_crtc_state *cstate, downscale_amount); } -static int skl_compute_plane_wm(const struct drm_i915_private *dev_priv, - struct intel_crtc_state *cstate, - const struct intel_plane_state *intel_pstate, - uint16_t ddb_allocation, - int level, - uint16_t *out_blocks, /* out */ - uint8_t *out_lines, /* out */ - bool *enabled /* out */) +static int +skl_compute_plane_wm_params(const struct drm_i915_private *dev_priv, + struct intel_crtc_state *cstate, + const struct intel_plane_state *intel_pstate, + struct skl_wm_params *wp) { struct intel_plane *plane = to_intel_plane(intel_pstate->base.plane); const struct drm_plane_state *pstate = &intel_pstate->base; const struct drm_framebuffer *fb = pstate->fb; - uint32_t latency = dev_priv->wm.skl_latency[level]; - uint_fixed_16_16_t method1, method2; - uint_fixed_16_16_t plane_blocks_per_line; - uint_fixed_16_16_t selected_result; uint32_t interm_pbpl; - uint32_t plane_bytes_per_line; - uint32_t res_blocks, res_lines; - uint8_t cpp; - uint32_t width = 0; - uint32_t plane_pixel_rate; - uint_fixed_16_16_t y_tile_minimum; - uint32_t y_min_scanlines; struct intel_atomic_state *state = to_intel_atomic_state(cstate->base.state); bool apply_memory_bw_wa = skl_needs_memory_bw_wa(state); - bool y_tiled, x_tiled; - if (latency == 0 || - !intel_wm_plane_visible(cstate, intel_pstate)) { - *enabled = false; + if (!intel_wm_plane_visible(cstate, intel_pstate)) return 0; - } - - y_tiled = fb->modifier == I915_FORMAT_MOD_Y_TILED || - fb->modifier == I915_FORMAT_MOD_Yf_TILED || - fb->modifier == I915_FORMAT_MOD_Y_TILED_CCS || - fb->modifier == I915_FORMAT_MOD_Yf_TILED_CCS; - x_tiled = fb->modifier == I915_FORMAT_MOD_X_TILED; - - /* Display WA #1141: kbl,cfl */ - if ((IS_KABYLAKE(dev_priv) || IS_COFFEELAKE(dev_priv)) && - dev_priv->ipc_enabled) - latency += 4; - if (apply_memory_bw_wa && x_tiled) - latency += 15; + wp->y_tiled = fb->modifier == I915_FORMAT_MOD_Y_TILED || + fb->modifier == I915_FORMAT_MOD_Yf_TILED || + fb->modifier == I915_FORMAT_MOD_Y_TILED_CCS || + fb->modifier == I915_FORMAT_MOD_Yf_TILED_CCS; + wp->x_tiled = fb->modifier == I915_FORMAT_MOD_X_TILED; + wp->rc_surface = fb->modifier == I915_FORMAT_MOD_Y_TILED_CCS || + fb->modifier == I915_FORMAT_MOD_Yf_TILED_CCS; if (plane->id == PLANE_CURSOR) { - width = intel_pstate->base.crtc_w; + wp->width = intel_pstate->base.crtc_w; } else { /* * Src coordinates are already rotated by 270 degrees for * the 90/270 degree plane rotation cases (to match the * GTT mapping), hence no need to account for rotation here. */ - width = drm_rect_width(&intel_pstate->base.src) >> 16; + wp->width = drm_rect_width(&intel_pstate->base.src) >> 16; } - cpp = (fb->format->format == DRM_FORMAT_NV12) ? fb->format->cpp[1] : - fb->format->cpp[0]; - plane_pixel_rate = skl_adjusted_plane_pixel_rate(cstate, intel_pstate); + wp->cpp = (fb->format->format == DRM_FORMAT_NV12) ? fb->format->cpp[1] : + fb->format->cpp[0]; + wp->plane_pixel_rate = skl_adjusted_plane_pixel_rate(cstate, + intel_pstate); if (drm_rotation_90_or_270(pstate->rotation)) { - switch (cpp) { + switch (wp->cpp) { case 1: - y_min_scanlines = 16; + wp->y_min_scanlines = 16; break; case 2: - y_min_scanlines = 8; + wp->y_min_scanlines = 8; break; case 4: - y_min_scanlines = 4; + wp->y_min_scanlines = 4; break; default: - MISSING_CASE(cpp); + MISSING_CASE(wp->cpp); return -EINVAL; } } else { - y_min_scanlines = 4; + wp->y_min_scanlines = 4; } if (apply_memory_bw_wa) - y_min_scanlines *= 2; + wp->y_min_scanlines *= 2; - plane_bytes_per_line = width * cpp; - if (y_tiled) { - interm_pbpl = DIV_ROUND_UP(plane_bytes_per_line * - y_min_scanlines, 512); + wp->plane_bytes_per_line = wp->width * wp->cpp; + if (wp->y_tiled) { + interm_pbpl = DIV_ROUND_UP(wp->plane_bytes_per_line * + wp->y_min_scanlines, 512); if (INTEL_GEN(dev_priv) >= 10) interm_pbpl++; - plane_blocks_per_line = div_fixed16(interm_pbpl, - y_min_scanlines); - } else if (x_tiled && INTEL_GEN(dev_priv) == 9) { - interm_pbpl = DIV_ROUND_UP(plane_bytes_per_line, 512); - plane_blocks_per_line = u32_to_fixed16(interm_pbpl); + wp->plane_blocks_per_line = div_fixed16(interm_pbpl, + wp->y_min_scanlines); + } else if (wp->x_tiled && IS_GEN9(dev_priv)) { + interm_pbpl = DIV_ROUND_UP(wp->plane_bytes_per_line, 512); + wp->plane_blocks_per_line = u32_to_fixed16(interm_pbpl); } else { - interm_pbpl = DIV_ROUND_UP(plane_bytes_per_line, 512) + 1; - plane_blocks_per_line = u32_to_fixed16(interm_pbpl); + interm_pbpl = DIV_ROUND_UP(wp->plane_bytes_per_line, 512) + 1; + wp->plane_blocks_per_line = u32_to_fixed16(interm_pbpl); } - method1 = skl_wm_method1(dev_priv, plane_pixel_rate, cpp, latency); - method2 = skl_wm_method2(plane_pixel_rate, + wp->y_tile_minimum = mul_u32_fixed16(wp->y_min_scanlines, + wp->plane_blocks_per_line); + wp->linetime_us = fixed16_to_u32_round_up( + intel_get_linetime_us(cstate)); + + return 0; +} + +static int skl_compute_plane_wm(const struct drm_i915_private *dev_priv, + struct intel_crtc_state *cstate, + const struct intel_plane_state *intel_pstate, + uint16_t ddb_allocation, + int level, + const struct skl_wm_params *wp, + uint16_t *out_blocks, /* out */ + uint8_t *out_lines, /* out */ + bool *enabled /* out */) +{ + const struct drm_plane_state *pstate = &intel_pstate->base; + uint32_t latency = dev_priv->wm.skl_latency[level]; + uint_fixed_16_16_t method1, method2; + uint_fixed_16_16_t selected_result; + uint32_t res_blocks, res_lines; + struct intel_atomic_state *state = + to_intel_atomic_state(cstate->base.state); + bool apply_memory_bw_wa = skl_needs_memory_bw_wa(state); + + if (latency == 0 || + !intel_wm_plane_visible(cstate, intel_pstate)) { + *enabled = false; + return 0; + } + + /* Display WA #1141: kbl,cfl */ + if ((IS_KABYLAKE(dev_priv) || IS_COFFEELAKE(dev_priv) || + IS_CNL_REVID(dev_priv, CNL_REVID_A0, CNL_REVID_B0)) && + dev_priv->ipc_enabled) + latency += 4; + + if (apply_memory_bw_wa && wp->x_tiled) + latency += 15; + + method1 = skl_wm_method1(dev_priv, wp->plane_pixel_rate, + wp->cpp, latency); + method2 = skl_wm_method2(wp->plane_pixel_rate, cstate->base.adjusted_mode.crtc_htotal, latency, - plane_blocks_per_line); - - y_tile_minimum = mul_u32_fixed16(y_min_scanlines, - plane_blocks_per_line); + wp->plane_blocks_per_line); - if (y_tiled) { - selected_result = max_fixed16(method2, y_tile_minimum); + if (wp->y_tiled) { + selected_result = max_fixed16(method2, wp->y_tile_minimum); } else { - uint32_t linetime_us; - - linetime_us = fixed16_to_u32_round_up( - intel_get_linetime_us(cstate)); - if ((cpp * cstate->base.adjusted_mode.crtc_htotal / 512 < 1) && - (plane_bytes_per_line / 512 < 1)) + if ((wp->cpp * cstate->base.adjusted_mode.crtc_htotal / + 512 < 1) && (wp->plane_bytes_per_line / 512 < 1)) selected_result = method2; else if (ddb_allocation >= - fixed16_to_u32_round_up(plane_blocks_per_line)) + fixed16_to_u32_round_up(wp->plane_blocks_per_line)) selected_result = min_fixed16(method1, method2); - else if (latency >= linetime_us) + else if (latency >= wp->linetime_us) selected_result = min_fixed16(method1, method2); else selected_result = method1; @@ -4505,19 +4523,18 @@ static int skl_compute_plane_wm(const struct drm_i915_private *dev_priv, res_blocks = fixed16_to_u32_round_up(selected_result) + 1; res_lines = div_round_up_fixed16(selected_result, - plane_blocks_per_line); + wp->plane_blocks_per_line); /* Display WA #1125: skl,bxt,kbl,glk */ - if (level == 0 && - (fb->modifier == I915_FORMAT_MOD_Y_TILED_CCS || - fb->modifier == I915_FORMAT_MOD_Yf_TILED_CCS)) - res_blocks += fixed16_to_u32_round_up(y_tile_minimum); + if (level == 0 && wp->rc_surface) + res_blocks += fixed16_to_u32_round_up(wp->y_tile_minimum); /* Display WA #1126: skl,bxt,kbl,glk */ if (level >= 1 && level <= 7) { - if (y_tiled) { - res_blocks += fixed16_to_u32_round_up(y_tile_minimum); - res_lines += y_min_scanlines; + if (wp->y_tiled) { + res_blocks += fixed16_to_u32_round_up( + wp->y_tile_minimum); + res_lines += wp->y_min_scanlines; } else { res_blocks++; } @@ -4555,6 +4572,7 @@ skl_compute_wm_levels(const struct drm_i915_private *dev_priv, struct skl_ddb_allocation *ddb, struct intel_crtc_state *cstate, const struct intel_plane_state *intel_pstate, + const struct skl_wm_params *wm_params, struct skl_plane_wm *wm) { struct intel_crtc *intel_crtc = to_intel_crtc(cstate->base.crtc); @@ -4578,6 +4596,7 @@ skl_compute_wm_levels(const struct drm_i915_private *dev_priv, intel_pstate, ddb_blocks, level, + wm_params, &result->plane_res_b, &result->plane_res_l, &result->plane_en); @@ -4603,20 +4622,65 @@ skl_compute_linetime_wm(struct intel_crtc_state *cstate) linetime_wm = fixed16_to_u32_round_up(mul_u32_fixed16(8, linetime_us)); - /* Display WA #1135: bxt. */ - if (IS_BROXTON(dev_priv) && dev_priv->ipc_enabled) - linetime_wm = DIV_ROUND_UP(linetime_wm, 2); + /* Display WA #1135: bxt:ALL GLK:ALL */ + if ((IS_BROXTON(dev_priv) || IS_GEMINILAKE(dev_priv)) && + dev_priv->ipc_enabled) + linetime_wm /= 2; return linetime_wm; } static void skl_compute_transition_wm(struct intel_crtc_state *cstate, + struct skl_wm_params *wp, + struct skl_wm_level *wm_l0, + uint16_t ddb_allocation, struct skl_wm_level *trans_wm /* out */) { + struct drm_device *dev = cstate->base.crtc->dev; + const struct drm_i915_private *dev_priv = to_i915(dev); + uint16_t trans_min, trans_y_tile_min; + const uint16_t trans_amount = 10; /* This is configurable amount */ + uint16_t trans_offset_b, res_blocks; + if (!cstate->base.active) + goto exit; + + /* Transition WM are not recommended by HW team for GEN9 */ + if (INTEL_GEN(dev_priv) <= 9) + goto exit; + + /* Transition WM don't make any sense if ipc is disabled */ + if (!dev_priv->ipc_enabled) + goto exit; + + if (INTEL_GEN(dev_priv) >= 10) + trans_min = 4; + + trans_offset_b = trans_min + trans_amount; + + if (wp->y_tiled) { + trans_y_tile_min = (uint16_t) mul_round_up_u32_fixed16(2, + wp->y_tile_minimum); + res_blocks = max(wm_l0->plane_res_b, trans_y_tile_min) + + trans_offset_b; + } else { + res_blocks = wm_l0->plane_res_b + trans_offset_b; + + /* WA BUG:1938466 add one block for non y-tile planes */ + if (IS_CNL_REVID(dev_priv, CNL_REVID_A0, CNL_REVID_A0)) + res_blocks += 1; + + } + + res_blocks += 1; + + if (res_blocks < ddb_allocation) { + trans_wm->plane_res_b = res_blocks; + trans_wm->plane_en = true; return; + } - /* Until we know more, just disable transition WMs */ +exit: trans_wm->plane_en = false; } @@ -4642,14 +4706,25 @@ static int skl_build_pipe_wm(struct intel_crtc_state *cstate, const struct intel_plane_state *intel_pstate = to_intel_plane_state(pstate); enum plane_id plane_id = to_intel_plane(plane)->id; + struct skl_wm_params wm_params; + enum pipe pipe = to_intel_crtc(cstate->base.crtc)->pipe; + uint16_t ddb_blocks; wm = &pipe_wm->planes[plane_id]; + ddb_blocks = skl_ddb_entry_size(&ddb->plane[pipe][plane_id]); + memset(&wm_params, 0, sizeof(struct skl_wm_params)); + + ret = skl_compute_plane_wm_params(dev_priv, cstate, + intel_pstate, &wm_params); + if (ret) + return ret; ret = skl_compute_wm_levels(dev_priv, ddb, cstate, - intel_pstate, wm); + intel_pstate, &wm_params, wm); if (ret) return ret; - skl_compute_transition_wm(cstate, &wm->trans_wm); + skl_compute_transition_wm(cstate, &wm_params, &wm->wm[0], + ddb_blocks, &wm->trans_wm); } pipe_wm->linetime = skl_compute_linetime_wm(cstate); @@ -5748,6 +5823,30 @@ void intel_update_watermarks(struct intel_crtc *crtc) dev_priv->display.update_wm(crtc); } +void intel_enable_ipc(struct drm_i915_private *dev_priv) +{ + u32 val; + + val = I915_READ(DISP_ARB_CTL2); + + if (dev_priv->ipc_enabled) + val |= DISP_IPC_ENABLE; + else + val &= ~DISP_IPC_ENABLE; + + I915_WRITE(DISP_ARB_CTL2, val); +} + +void intel_init_ipc(struct drm_i915_private *dev_priv) +{ + dev_priv->ipc_enabled = false; + if (!HAS_IPC(dev_priv)) + return; + + dev_priv->ipc_enabled = true; + intel_enable_ipc(dev_priv); +} + /* * Lock protecting IPS related data structures */ @@ -6169,6 +6268,7 @@ void gen6_rps_boost(struct drm_i915_gem_request *rq, struct intel_rps_client *rps) { struct drm_i915_private *i915 = rq->i915; + unsigned long flags; bool boost; /* This is intentionally racy! We peek at the state here, then @@ -6178,13 +6278,13 @@ void gen6_rps_boost(struct drm_i915_gem_request *rq, return; boost = false; - spin_lock_irq(&rq->lock); + spin_lock_irqsave(&rq->lock, flags); if (!rq->waitboost && !i915_gem_request_completed(rq)) { atomic_inc(&i915->rps.num_waiters); rq->waitboost = true; boost = true; } - spin_unlock_irq(&rq->lock); + spin_unlock_irqrestore(&rq->lock, flags); if (!boost) return; @@ -7725,7 +7825,7 @@ void intel_init_gt_powersave(struct drm_i915_private *dev_priv) * RPM depends on RC6 to save restore the GT HW context, so make RC6 a * requirement. */ - if (!i915.enable_rc6) { + if (!i915_modparams.enable_rc6) { DRM_INFO("RC6 disabled, disabling runtime PM support\n"); intel_runtime_pm_get(dev_priv); } @@ -7782,7 +7882,7 @@ void intel_cleanup_gt_powersave(struct drm_i915_private *dev_priv) if (IS_VALLEYVIEW(dev_priv)) valleyview_cleanup_gt_powersave(dev_priv); - if (!i915.enable_rc6) + if (!i915_modparams.enable_rc6) intel_runtime_pm_put(dev_priv); } @@ -7904,7 +8004,7 @@ static void __intel_autoenable_gt_powersave(struct work_struct *work) if (IS_ERR(req)) goto unlock; - if (!i915.enable_execlists && i915_switch_context(req) == 0) + if (!i915_modparams.enable_execlists && i915_switch_context(req) == 0) rcs->init_context(req); /* Mark the device busy, calling intel_enable_gt_powersave() */ @@ -7980,7 +8080,7 @@ static void ilk_init_lp_watermarks(struct drm_i915_private *dev_priv) */ } -static void ironlake_init_clock_gating(struct drm_i915_private *dev_priv) +static void ilk_init_clock_gating(struct drm_i915_private *dev_priv) { uint32_t dspclk_gate = ILK_VRHUNIT_CLOCK_GATE_DISABLE; @@ -8263,7 +8363,57 @@ static void gen8_set_l3sqc_credits(struct drm_i915_private *dev_priv, I915_WRITE(GEN7_MISCCPCTL, misccpctl); } -static void kabylake_init_clock_gating(struct drm_i915_private *dev_priv) +static void cnp_init_clock_gating(struct drm_i915_private *dev_priv) +{ + if (!HAS_PCH_CNP(dev_priv)) + return; + + /* Wa #1181 */ + I915_WRITE(SOUTH_DSPCLK_GATE_D, I915_READ(SOUTH_DSPCLK_GATE_D) | + CNP_PWM_CGE_GATING_DISABLE); +} + +static void cnl_init_clock_gating(struct drm_i915_private *dev_priv) +{ + u32 val; + cnp_init_clock_gating(dev_priv); + + /* This is not an Wa. Enable for better image quality */ + I915_WRITE(_3D_CHICKEN3, + _MASKED_BIT_ENABLE(_3D_CHICKEN3_AA_LINE_QUALITY_FIX_ENABLE)); + + /* WaEnableChickenDCPR:cnl */ + I915_WRITE(GEN8_CHICKEN_DCPR_1, + I915_READ(GEN8_CHICKEN_DCPR_1) | MASK_WAKEMEM); + + /* WaFbcWakeMemOn:cnl */ + I915_WRITE(DISP_ARB_CTL, I915_READ(DISP_ARB_CTL) | + DISP_FBC_MEMORY_WAKE); + + /* WaSarbUnitClockGatingDisable:cnl (pre-prod) */ + if (IS_CNL_REVID(dev_priv, CNL_REVID_A0, CNL_REVID_B0)) + I915_WRITE(SLICE_UNIT_LEVEL_CLKGATE, + I915_READ(SLICE_UNIT_LEVEL_CLKGATE) | + SARBUNIT_CLKGATE_DIS); + + /* Display WA #1133: WaFbcSkipSegments:cnl */ + val = I915_READ(ILK_DPFC_CHICKEN); + val &= ~GLK_SKIP_SEG_COUNT_MASK; + val |= GLK_SKIP_SEG_EN | GLK_SKIP_SEG_COUNT(1); + I915_WRITE(ILK_DPFC_CHICKEN, val); +} + +static void cfl_init_clock_gating(struct drm_i915_private *dev_priv) +{ + cnp_init_clock_gating(dev_priv); + gen9_init_clock_gating(dev_priv); + + /* WaFbcNukeOnHostModify:cfl */ + I915_WRITE(ILK_DPFC_CHICKEN, I915_READ(ILK_DPFC_CHICKEN) | + ILK_DPFC_NUKE_ON_ANY_MODIFICATION); +} + +static void kbl_init_clock_gating(struct drm_i915_private *dev_priv) { gen9_init_clock_gating(dev_priv); @@ -8277,12 +8427,12 @@ static void kabylake_init_clock_gating(struct drm_i915_private *dev_priv) I915_WRITE(GEN6_UCGCTL1, I915_READ(GEN6_UCGCTL1) | GEN6_GAMUNIT_CLOCK_GATE_DISABLE); - /* WaFbcNukeOnHostModify:kbl,cfl */ + /* WaFbcNukeOnHostModify:kbl */ I915_WRITE(ILK_DPFC_CHICKEN, I915_READ(ILK_DPFC_CHICKEN) | ILK_DPFC_NUKE_ON_ANY_MODIFICATION); } -static void skylake_init_clock_gating(struct drm_i915_private *dev_priv) +static void skl_init_clock_gating(struct drm_i915_private *dev_priv) { gen9_init_clock_gating(dev_priv); @@ -8295,7 +8445,7 @@ static void skylake_init_clock_gating(struct drm_i915_private *dev_priv) ILK_DPFC_NUKE_ON_ANY_MODIFICATION); } -static void broadwell_init_clock_gating(struct drm_i915_private *dev_priv) +static void bdw_init_clock_gating(struct drm_i915_private *dev_priv) { enum pipe pipe; @@ -8353,7 +8503,7 @@ static void broadwell_init_clock_gating(struct drm_i915_private *dev_priv) I915_READ(GEN6_UCGCTL1) | GEN6_EU_TCUNIT_CLOCK_GATE_DISABLE); } -static void haswell_init_clock_gating(struct drm_i915_private *dev_priv) +static void hsw_init_clock_gating(struct drm_i915_private *dev_priv) { ilk_init_lp_watermarks(dev_priv); @@ -8407,7 +8557,7 @@ static void haswell_init_clock_gating(struct drm_i915_private *dev_priv) lpt_init_clock_gating(dev_priv); } -static void ivybridge_init_clock_gating(struct drm_i915_private *dev_priv) +static void ivb_init_clock_gating(struct drm_i915_private *dev_priv) { uint32_t snpcr; @@ -8504,7 +8654,7 @@ static void ivybridge_init_clock_gating(struct drm_i915_private *dev_priv) gen6_check_mch_setup(dev_priv); } -static void valleyview_init_clock_gating(struct drm_i915_private *dev_priv) +static void vlv_init_clock_gating(struct drm_i915_private *dev_priv) { /* WaDisableEarlyCull:vlv */ I915_WRITE(_3D_CHICKEN3, @@ -8584,7 +8734,7 @@ static void valleyview_init_clock_gating(struct drm_i915_private *dev_priv) I915_WRITE(VLV_GUNIT_CLOCK_GATE, GCFG_DIS); } -static void cherryview_init_clock_gating(struct drm_i915_private *dev_priv) +static void chv_init_clock_gating(struct drm_i915_private *dev_priv) { /* WaVSRefCountFullforceMissDisable:chv */ /* WaDSRefCountFullforceMissDisable:chv */ @@ -8644,7 +8794,7 @@ static void g4x_init_clock_gating(struct drm_i915_private *dev_priv) g4x_disable_trickle_feed(dev_priv); } -static void crestline_init_clock_gating(struct drm_i915_private *dev_priv) +static void i965gm_init_clock_gating(struct drm_i915_private *dev_priv) { I915_WRITE(RENCLK_GATE_D1, I965_RCC_CLOCK_GATE_DISABLE); I915_WRITE(RENCLK_GATE_D2, 0); @@ -8658,7 +8808,7 @@ static void crestline_init_clock_gating(struct drm_i915_private *dev_priv) I915_WRITE(CACHE_MODE_0, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE)); } -static void broadwater_init_clock_gating(struct drm_i915_private *dev_priv) +static void i965g_init_clock_gating(struct drm_i915_private *dev_priv) { I915_WRITE(RENCLK_GATE_D1, I965_RCZ_CLOCK_GATE_DISABLE | I965_RCC_CLOCK_GATE_DISABLE | @@ -8743,34 +8893,38 @@ static void nop_init_clock_gating(struct drm_i915_private *dev_priv) */ void intel_init_clock_gating_hooks(struct drm_i915_private *dev_priv) { - if (IS_SKYLAKE(dev_priv)) - dev_priv->display.init_clock_gating = skylake_init_clock_gating; - else if (IS_KABYLAKE(dev_priv) || IS_COFFEELAKE(dev_priv)) - dev_priv->display.init_clock_gating = kabylake_init_clock_gating; + if (IS_CANNONLAKE(dev_priv)) + dev_priv->display.init_clock_gating = cnl_init_clock_gating; + else if (IS_COFFEELAKE(dev_priv)) + dev_priv->display.init_clock_gating = cfl_init_clock_gating; + else if (IS_SKYLAKE(dev_priv)) + dev_priv->display.init_clock_gating = skl_init_clock_gating; + else if (IS_KABYLAKE(dev_priv)) + dev_priv->display.init_clock_gating = kbl_init_clock_gating; else if (IS_BROXTON(dev_priv)) dev_priv->display.init_clock_gating = bxt_init_clock_gating; else if (IS_GEMINILAKE(dev_priv)) dev_priv->display.init_clock_gating = glk_init_clock_gating; else if (IS_BROADWELL(dev_priv)) - dev_priv->display.init_clock_gating = broadwell_init_clock_gating; + dev_priv->display.init_clock_gating = bdw_init_clock_gating; else if (IS_CHERRYVIEW(dev_priv)) - dev_priv->display.init_clock_gating = cherryview_init_clock_gating; + dev_priv->display.init_clock_gating = chv_init_clock_gating; else if (IS_HASWELL(dev_priv)) - dev_priv->display.init_clock_gating = haswell_init_clock_gating; + dev_priv->display.init_clock_gating = hsw_init_clock_gating; else if (IS_IVYBRIDGE(dev_priv)) - dev_priv->display.init_clock_gating = ivybridge_init_clock_gating; + dev_priv->display.init_clock_gating = ivb_init_clock_gating; else if (IS_VALLEYVIEW(dev_priv)) - dev_priv->display.init_clock_gating = valleyview_init_clock_gating; + dev_priv->display.init_clock_gating = vlv_init_clock_gating; else if (IS_GEN6(dev_priv)) dev_priv->display.init_clock_gating = gen6_init_clock_gating; else if (IS_GEN5(dev_priv)) - dev_priv->display.init_clock_gating = ironlake_init_clock_gating; + dev_priv->display.init_clock_gating = ilk_init_clock_gating; else if (IS_G4X(dev_priv)) dev_priv->display.init_clock_gating = g4x_init_clock_gating; else if (IS_I965GM(dev_priv)) - dev_priv->display.init_clock_gating = crestline_init_clock_gating; + dev_priv->display.init_clock_gating = i965gm_init_clock_gating; else if (IS_I965G(dev_priv)) - dev_priv->display.init_clock_gating = broadwater_init_clock_gating; + dev_priv->display.init_clock_gating = i965g_init_clock_gating; else if (IS_GEN3(dev_priv)) dev_priv->display.init_clock_gating = gen3_init_clock_gating; else if (IS_I85X(dev_priv) || IS_I865G(dev_priv)) @@ -9132,43 +9286,6 @@ int intel_freq_opcode(struct drm_i915_private *dev_priv, int val) return DIV_ROUND_CLOSEST(val, GT_FREQUENCY_MULTIPLIER); } -struct request_boost { - struct work_struct work; - struct drm_i915_gem_request *req; -}; - -static void __intel_rps_boost_work(struct work_struct *work) -{ - struct request_boost *boost = container_of(work, struct request_boost, work); - struct drm_i915_gem_request *req = boost->req; - - if (!i915_gem_request_completed(req)) - gen6_rps_boost(req, NULL); - - i915_gem_request_put(req); - kfree(boost); -} - -void intel_queue_rps_boost_for_request(struct drm_i915_gem_request *req) -{ - struct request_boost *boost; - - if (req == NULL || INTEL_GEN(req->i915) < 6) - return; - - if (i915_gem_request_completed(req)) - return; - - boost = kmalloc(sizeof(*boost), GFP_ATOMIC); - if (boost == NULL) - return; - - boost->req = i915_gem_request_get(req); - - INIT_WORK(&boost->work, __intel_rps_boost_work); - queue_work(req->i915->wq, &boost->work); -} - void intel_pm_setup(struct drm_i915_private *dev_priv) { mutex_init(&dev_priv->rps.hw_lock); diff --git a/drivers/gpu/drm/i915/intel_psr.c b/drivers/gpu/drm/i915/intel_psr.c index 1b31ab002dae..5419cda83ba8 100644 --- a/drivers/gpu/drm/i915/intel_psr.c +++ b/drivers/gpu/drm/i915/intel_psr.c @@ -103,61 +103,55 @@ static void intel_psr_write_vsc(struct intel_dp *intel_dp, POSTING_READ(ctl_reg); } -static void vlv_psr_setup_vsc(struct intel_dp *intel_dp) +static void vlv_psr_setup_vsc(struct intel_dp *intel_dp, + const struct intel_crtc_state *crtc_state) { - struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); - struct drm_device *dev = intel_dig_port->base.base.dev; - struct drm_i915_private *dev_priv = to_i915(dev); - struct drm_crtc *crtc = intel_dig_port->base.base.crtc; - enum pipe pipe = to_intel_crtc(crtc)->pipe; + struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); uint32_t val; /* VLV auto-generate VSC package as per EDP 1.3 spec, Table 3.10 */ - val = I915_READ(VLV_VSCSDP(pipe)); + val = I915_READ(VLV_VSCSDP(crtc->pipe)); val &= ~VLV_EDP_PSR_SDP_FREQ_MASK; val |= VLV_EDP_PSR_SDP_FREQ_EVFRAME; - I915_WRITE(VLV_VSCSDP(pipe), val); + I915_WRITE(VLV_VSCSDP(crtc->pipe), val); } -static void skl_psr_setup_su_vsc(struct intel_dp *intel_dp) +static void hsw_psr_setup_vsc(struct intel_dp *intel_dp, + const struct intel_crtc_state *crtc_state) { - struct edp_vsc_psr psr_vsc; struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); - struct drm_device *dev = intel_dig_port->base.base.dev; - struct drm_i915_private *dev_priv = to_i915(dev); + struct drm_i915_private *dev_priv = to_i915(intel_dig_port->base.base.dev); + struct edp_vsc_psr psr_vsc; - /* Prepare VSC Header for SU as per EDP 1.4 spec, Table 6.11 */ - memset(&psr_vsc, 0, sizeof(psr_vsc)); - psr_vsc.sdp_header.HB0 = 0; - psr_vsc.sdp_header.HB1 = 0x7; - if (dev_priv->psr.colorimetry_support && - dev_priv->psr.y_cord_support) { - psr_vsc.sdp_header.HB2 = 0x5; - psr_vsc.sdp_header.HB3 = 0x13; - } else if (dev_priv->psr.y_cord_support) { - psr_vsc.sdp_header.HB2 = 0x4; - psr_vsc.sdp_header.HB3 = 0xe; + if (dev_priv->psr.psr2_support) { + /* Prepare VSC Header for SU as per EDP 1.4 spec, Table 6.11 */ + memset(&psr_vsc, 0, sizeof(psr_vsc)); + psr_vsc.sdp_header.HB0 = 0; + psr_vsc.sdp_header.HB1 = 0x7; + if (dev_priv->psr.colorimetry_support && + dev_priv->psr.y_cord_support) { + psr_vsc.sdp_header.HB2 = 0x5; + psr_vsc.sdp_header.HB3 = 0x13; + } else if (dev_priv->psr.y_cord_support) { + psr_vsc.sdp_header.HB2 = 0x4; + psr_vsc.sdp_header.HB3 = 0xe; + } else { + psr_vsc.sdp_header.HB2 = 0x3; + psr_vsc.sdp_header.HB3 = 0xc; + } } else { - psr_vsc.sdp_header.HB2 = 0x3; - psr_vsc.sdp_header.HB3 = 0xc; + /* Prepare VSC packet as per EDP 1.3 spec, Table 3.10 */ + memset(&psr_vsc, 0, sizeof(psr_vsc)); + psr_vsc.sdp_header.HB0 = 0; + psr_vsc.sdp_header.HB1 = 0x7; + psr_vsc.sdp_header.HB2 = 0x2; + psr_vsc.sdp_header.HB3 = 0x8; } intel_psr_write_vsc(intel_dp, &psr_vsc); } -static void hsw_psr_setup_vsc(struct intel_dp *intel_dp) -{ - struct edp_vsc_psr psr_vsc; - - /* Prepare VSC packet as per EDP 1.3 spec, Table 3.10 */ - memset(&psr_vsc, 0, sizeof(psr_vsc)); - psr_vsc.sdp_header.HB0 = 0; - psr_vsc.sdp_header.HB1 = 0x7; - psr_vsc.sdp_header.HB2 = 0x2; - psr_vsc.sdp_header.HB3 = 0x8; - intel_psr_write_vsc(intel_dp, &psr_vsc); -} - static void vlv_psr_enable_sink(struct intel_dp *intel_dp) { drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, @@ -233,16 +227,15 @@ static void hsw_psr_enable_sink(struct intel_dp *intel_dp) I915_WRITE(aux_ctl_reg, aux_ctl); } -static void vlv_psr_enable_source(struct intel_dp *intel_dp) +static void vlv_psr_enable_source(struct intel_dp *intel_dp, + const struct intel_crtc_state *crtc_state) { struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); - struct drm_device *dev = dig_port->base.base.dev; - struct drm_i915_private *dev_priv = to_i915(dev); - struct drm_crtc *crtc = dig_port->base.base.crtc; - enum pipe pipe = to_intel_crtc(crtc)->pipe; + struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev); + struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); - /* Transition from PSR_state 0 to PSR_state 1, i.e. PSR Inactive */ - I915_WRITE(VLV_PSRCTL(pipe), + /* Transition from PSR_state 0 (disabled) to PSR_state 1 (inactive) */ + I915_WRITE(VLV_PSRCTL(crtc->pipe), VLV_EDP_PSR_MODE_SW_TIMER | VLV_EDP_PSR_SRC_TRANSMITTER_STATE | VLV_EDP_PSR_ENABLE); @@ -256,16 +249,17 @@ static void vlv_psr_activate(struct intel_dp *intel_dp) struct drm_crtc *crtc = dig_port->base.base.crtc; enum pipe pipe = to_intel_crtc(crtc)->pipe; - /* Let's do the transition from PSR_state 1 to PSR_state 2 - * that is PSR transition to active - static frame transmission. - * Then Hardware is responsible for the transition to PSR_state 3 - * that is PSR active - no Remote Frame Buffer (RFB) update. + /* + * Let's do the transition from PSR_state 1 (inactive) to + * PSR_state 2 (transition to active - static frame transmission). + * Then Hardware is responsible for the transition to + * PSR_state 3 (active - no Remote Frame Buffer (RFB) update). */ I915_WRITE(VLV_PSRCTL(pipe), I915_READ(VLV_PSRCTL(pipe)) | VLV_EDP_PSR_ACTIVE_ENTRY); } -static void intel_enable_source_psr1(struct intel_dp *intel_dp) +static void hsw_activate_psr1(struct intel_dp *intel_dp) { struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); struct drm_device *dev = dig_port->base.base.dev; @@ -319,7 +313,7 @@ static void intel_enable_source_psr1(struct intel_dp *intel_dp) I915_WRITE(EDP_PSR_CTL, val); } -static void intel_enable_source_psr2(struct intel_dp *intel_dp) +static void hsw_activate_psr2(struct intel_dp *intel_dp) { struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); struct drm_device *dev = dig_port->base.base.dev; @@ -333,6 +327,7 @@ static void intel_enable_source_psr2(struct intel_dp *intel_dp) */ uint32_t idle_frames = max(6, dev_priv->vbt.psr.idle_frames); uint32_t val; + uint8_t sink_latency; val = idle_frames << EDP_PSR_IDLE_FRAME_SHIFT; @@ -340,8 +335,16 @@ static void intel_enable_source_psr2(struct intel_dp *intel_dp) * mesh at all with our frontbuffer tracking. And the hw alone isn't * good enough. */ val |= EDP_PSR2_ENABLE | - EDP_SU_TRACK_ENABLE | - EDP_FRAMES_BEFORE_SU_ENTRY; + EDP_SU_TRACK_ENABLE; + + if (drm_dp_dpcd_readb(&intel_dp->aux, + DP_SYNCHRONIZATION_LATENCY_IN_SINK, + &sink_latency) == 1) { + sink_latency &= DP_MAX_RESYNC_FRAME_COUNT_MASK; + } else { + sink_latency = 0; + } + val |= EDP_PSR2_FRAME_BEFORE_SU(sink_latency + 1); if (dev_priv->vbt.psr.tp2_tp3_wakeup_time > 5) val |= EDP_PSR2_TP2_TIME_2500; @@ -355,17 +358,22 @@ static void intel_enable_source_psr2(struct intel_dp *intel_dp) I915_WRITE(EDP_PSR2_CTL, val); } -static void hsw_psr_enable_source(struct intel_dp *intel_dp) +static void hsw_psr_activate(struct intel_dp *intel_dp) { struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); struct drm_device *dev = dig_port->base.base.dev; struct drm_i915_private *dev_priv = to_i915(dev); + /* On HSW+ after we enable PSR on source it will activate it + * as soon as it match configure idle_frame count. So + * we just actually enable it here on activation time. + */ + /* psr1 and psr2 are mutually exclusive.*/ if (dev_priv->psr.psr2_support) - intel_enable_source_psr2(intel_dp); + hsw_activate_psr2(intel_dp); else - intel_enable_source_psr1(intel_dp); + hsw_activate_psr1(intel_dp); } static bool intel_psr_match_conditions(struct intel_dp *intel_dp) @@ -397,7 +405,7 @@ static bool intel_psr_match_conditions(struct intel_dp *intel_dp) return false; } - if (!i915.enable_psr) { + if (!i915_modparams.enable_psr) { DRM_DEBUG_KMS("PSR disable by flag\n"); return false; } @@ -469,44 +477,69 @@ static void intel_psr_activate(struct intel_dp *intel_dp) WARN_ON(dev_priv->psr.active); lockdep_assert_held(&dev_priv->psr.lock); - /* Enable/Re-enable PSR on the host */ - if (HAS_DDI(dev_priv)) - /* On HSW+ after we enable PSR on source it will activate it - * as soon as it match configure idle_frame count. So - * we just actually enable it here on activation time. - */ - hsw_psr_enable_source(intel_dp); - else - vlv_psr_activate(intel_dp); - + dev_priv->psr.activate(intel_dp); dev_priv->psr.active = true; } +static void hsw_psr_enable_source(struct intel_dp *intel_dp, + const struct intel_crtc_state *crtc_state) +{ + struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); + struct drm_device *dev = dig_port->base.base.dev; + struct drm_i915_private *dev_priv = to_i915(dev); + enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; + u32 chicken; + + if (dev_priv->psr.psr2_support) { + chicken = PSR2_VSC_ENABLE_PROG_HEADER; + if (dev_priv->psr.y_cord_support) + chicken |= PSR2_ADD_VERTICAL_LINE_COUNT; + I915_WRITE(CHICKEN_TRANS(cpu_transcoder), chicken); + + I915_WRITE(EDP_PSR_DEBUG_CTL, + EDP_PSR_DEBUG_MASK_MEMUP | + EDP_PSR_DEBUG_MASK_HPD | + EDP_PSR_DEBUG_MASK_LPSP | + EDP_PSR_DEBUG_MASK_MAX_SLEEP | + EDP_PSR_DEBUG_MASK_DISP_REG_WRITE); + } else { + /* + * Per Spec: Avoid continuous PSR exit by masking MEMUP + * and HPD. also mask LPSP to avoid dependency on other + * drivers that might block runtime_pm besides + * preventing other hw tracking issues now we can rely + * on frontbuffer tracking. + */ + I915_WRITE(EDP_PSR_DEBUG_CTL, + EDP_PSR_DEBUG_MASK_MEMUP | + EDP_PSR_DEBUG_MASK_HPD | + EDP_PSR_DEBUG_MASK_LPSP); + } +} + /** * intel_psr_enable - Enable PSR * @intel_dp: Intel DP + * @crtc_state: new CRTC state * * This function can only be called after the pipe is fully trained and enabled. */ -void intel_psr_enable(struct intel_dp *intel_dp) +void intel_psr_enable(struct intel_dp *intel_dp, + const struct intel_crtc_state *crtc_state) { struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); struct drm_device *dev = intel_dig_port->base.base.dev; struct drm_i915_private *dev_priv = to_i915(dev); - struct intel_crtc *crtc = to_intel_crtc(intel_dig_port->base.base.crtc); - enum transcoder cpu_transcoder = crtc->config->cpu_transcoder; - u32 chicken; - if (!HAS_PSR(dev_priv)) { - DRM_DEBUG_KMS("PSR not supported on this platform\n"); + if (!HAS_PSR(dev_priv)) return; - } if (!is_edp_psr(intel_dp)) { DRM_DEBUG_KMS("PSR not supported by this panel\n"); return; } + WARN_ON(dev_priv->drrs.dp); mutex_lock(&dev_priv->psr.lock); if (dev_priv->psr.enabled) { DRM_DEBUG_KMS("PSR already in use\n"); @@ -518,104 +551,64 @@ void intel_psr_enable(struct intel_dp *intel_dp) dev_priv->psr.busy_frontbuffer_bits = 0; - if (HAS_DDI(dev_priv)) { - if (dev_priv->psr.psr2_support) { - skl_psr_setup_su_vsc(intel_dp); - chicken = PSR2_VSC_ENABLE_PROG_HEADER; - if (dev_priv->psr.y_cord_support) - chicken |= PSR2_ADD_VERTICAL_LINE_COUNT; - I915_WRITE(CHICKEN_TRANS(cpu_transcoder), chicken); - I915_WRITE(EDP_PSR_DEBUG_CTL, - EDP_PSR_DEBUG_MASK_MEMUP | - EDP_PSR_DEBUG_MASK_HPD | - EDP_PSR_DEBUG_MASK_LPSP | - EDP_PSR_DEBUG_MASK_MAX_SLEEP | - EDP_PSR_DEBUG_MASK_DISP_REG_WRITE); - } else { - /* set up vsc header for psr1 */ - hsw_psr_setup_vsc(intel_dp); - /* - * Per Spec: Avoid continuous PSR exit by masking MEMUP - * and HPD. also mask LPSP to avoid dependency on other - * drivers that might block runtime_pm besides - * preventing other hw tracking issues now we can rely - * on frontbuffer tracking. - */ - I915_WRITE(EDP_PSR_DEBUG_CTL, - EDP_PSR_DEBUG_MASK_MEMUP | - EDP_PSR_DEBUG_MASK_HPD | - EDP_PSR_DEBUG_MASK_LPSP); - } - - /* Enable PSR on the panel */ - hsw_psr_enable_sink(intel_dp); + dev_priv->psr.setup_vsc(intel_dp, crtc_state); + dev_priv->psr.enable_sink(intel_dp); + dev_priv->psr.enable_source(intel_dp, crtc_state); + dev_priv->psr.enabled = intel_dp; - if (INTEL_GEN(dev_priv) >= 9) - intel_psr_activate(intel_dp); + if (INTEL_GEN(dev_priv) >= 9) { + intel_psr_activate(intel_dp); } else { - vlv_psr_setup_vsc(intel_dp); - - /* Enable PSR on the panel */ - vlv_psr_enable_sink(intel_dp); - - /* On HSW+ enable_source also means go to PSR entry/active - * state as soon as idle_frame achieved and here would be - * to soon. However on VLV enable_source just enable PSR - * but let it on inactive state. So we might do this prior - * to active transition, i.e. here. + /* + * FIXME: Activation should happen immediately since this + * function is just called after pipe is fully trained and + * enabled. + * However on some platforms we face issues when first + * activation follows a modeset so quickly. + * - On VLV/CHV we get bank screen on first activation + * - On HSW/BDW we get a recoverable frozen screen until + * next exit-activate sequence. */ - vlv_psr_enable_source(intel_dp); - } - - /* - * FIXME: Activation should happen immediately since this function - * is just called after pipe is fully trained and enabled. - * However on every platform we face issues when first activation - * follows a modeset so quickly. - * - On VLV/CHV we get bank screen on first activation - * - On HSW/BDW we get a recoverable frozen screen until next - * exit-activate sequence. - */ - if (INTEL_GEN(dev_priv) < 9) schedule_delayed_work(&dev_priv->psr.work, msecs_to_jiffies(intel_dp->panel_power_cycle_delay * 5)); + } - dev_priv->psr.enabled = intel_dp; unlock: mutex_unlock(&dev_priv->psr.lock); } -static void vlv_psr_disable(struct intel_dp *intel_dp) +static void vlv_psr_disable(struct intel_dp *intel_dp, + const struct intel_crtc_state *old_crtc_state) { struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); struct drm_device *dev = intel_dig_port->base.base.dev; struct drm_i915_private *dev_priv = to_i915(dev); - struct intel_crtc *intel_crtc = - to_intel_crtc(intel_dig_port->base.base.crtc); + struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc); uint32_t val; if (dev_priv->psr.active) { - /* Put VLV PSR back to PSR_state 0 that is PSR Disabled. */ + /* Put VLV PSR back to PSR_state 0 (disabled). */ if (intel_wait_for_register(dev_priv, - VLV_PSRSTAT(intel_crtc->pipe), + VLV_PSRSTAT(crtc->pipe), VLV_EDP_PSR_IN_TRANS, 0, 1)) WARN(1, "PSR transition took longer than expected\n"); - val = I915_READ(VLV_PSRCTL(intel_crtc->pipe)); + val = I915_READ(VLV_PSRCTL(crtc->pipe)); val &= ~VLV_EDP_PSR_ACTIVE_ENTRY; val &= ~VLV_EDP_PSR_ENABLE; val &= ~VLV_EDP_PSR_MODE_MASK; - I915_WRITE(VLV_PSRCTL(intel_crtc->pipe), val); + I915_WRITE(VLV_PSRCTL(crtc->pipe), val); dev_priv->psr.active = false; } else { - WARN_ON(vlv_is_psr_active_on_pipe(dev, intel_crtc->pipe)); + WARN_ON(vlv_is_psr_active_on_pipe(dev, crtc->pipe)); } } -static void hsw_psr_disable(struct intel_dp *intel_dp) +static void hsw_psr_disable(struct intel_dp *intel_dp, + const struct intel_crtc_state *old_crtc_state) { struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); struct drm_device *dev = intel_dig_port->base.base.dev; @@ -664,26 +657,27 @@ static void hsw_psr_disable(struct intel_dp *intel_dp) /** * intel_psr_disable - Disable PSR * @intel_dp: Intel DP + * @old_crtc_state: old CRTC state * * This function needs to be called before disabling pipe. */ -void intel_psr_disable(struct intel_dp *intel_dp) +void intel_psr_disable(struct intel_dp *intel_dp, + const struct intel_crtc_state *old_crtc_state) { struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); struct drm_device *dev = intel_dig_port->base.base.dev; struct drm_i915_private *dev_priv = to_i915(dev); + if (!HAS_PSR(dev_priv)) + return; + mutex_lock(&dev_priv->psr.lock); if (!dev_priv->psr.enabled) { mutex_unlock(&dev_priv->psr.lock); return; } - /* Disable PSR on Source */ - if (HAS_DDI(dev_priv)) - hsw_psr_disable(intel_dp); - else - vlv_psr_disable(intel_dp); + dev_priv->psr.disable_source(intel_dp, old_crtc_state); /* Disable PSR on Sink */ drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, 0); @@ -783,17 +777,20 @@ static void intel_psr_exit(struct drm_i915_private *dev_priv) } else { val = I915_READ(VLV_PSRCTL(pipe)); - /* Here we do the transition from PSR_state 3 to PSR_state 5 - * directly once PSR State 4 that is active with single frame - * update can be skipped. PSR_state 5 that is PSR exit then - * Hardware is responsible to transition back to PSR_state 1 - * that is PSR inactive. Same state after - * vlv_edp_psr_enable_source. + /* + * Here we do the transition drirectly from + * PSR_state 3 (active - no Remote Frame Buffer (RFB) update) to + * PSR_state 5 (exit). + * PSR State 4 (active with single frame update) can be skipped. + * On PSR_state 5 (exit) Hardware is responsible to transition + * back to PSR_state 1 (inactive). + * Now we are at Same state after vlv_psr_enable_source. */ val &= ~VLV_EDP_PSR_ACTIVE_ENTRY; I915_WRITE(VLV_PSRCTL(pipe), val); - /* Send AUX wake up - Spec says after transitioning to PSR + /* + * Send AUX wake up - Spec says after transitioning to PSR * active we have to send AUX wake up by writing 01h in DPCD * 600h of sink device. * XXX: This might slow down the transition, but without this @@ -824,6 +821,9 @@ void intel_psr_single_frame_update(struct drm_i915_private *dev_priv, enum pipe pipe; u32 val; + if (!HAS_PSR(dev_priv)) + return; + /* * Single frame update is already supported on BDW+ but it requires * many W/A and it isn't really needed. @@ -870,6 +870,9 @@ void intel_psr_invalidate(struct drm_i915_private *dev_priv, struct drm_crtc *crtc; enum pipe pipe; + if (!HAS_PSR(dev_priv)) + return; + mutex_lock(&dev_priv->psr.lock); if (!dev_priv->psr.enabled) { mutex_unlock(&dev_priv->psr.lock); @@ -907,6 +910,9 @@ void intel_psr_flush(struct drm_i915_private *dev_priv, struct drm_crtc *crtc; enum pipe pipe; + if (!HAS_PSR(dev_priv)) + return; + mutex_lock(&dev_priv->psr.lock); if (!dev_priv->psr.enabled) { mutex_unlock(&dev_priv->psr.lock); @@ -939,12 +945,15 @@ void intel_psr_flush(struct drm_i915_private *dev_priv, */ void intel_psr_init(struct drm_i915_private *dev_priv) { + if (!HAS_PSR(dev_priv)) + return; + dev_priv->psr_mmio_base = IS_HASWELL(dev_priv) ? HSW_EDP_PSR_BASE : BDW_EDP_PSR_BASE; /* Per platform default: all disabled. */ - if (i915.enable_psr == -1) - i915.enable_psr = 0; + if (i915_modparams.enable_psr == -1) + i915_modparams.enable_psr = 0; /* Set link_standby x link_off defaults */ if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) @@ -958,15 +967,29 @@ void intel_psr_init(struct drm_i915_private *dev_priv) dev_priv->psr.link_standby = dev_priv->vbt.psr.full_link; /* Override link_standby x link_off defaults */ - if (i915.enable_psr == 2 && !dev_priv->psr.link_standby) { + if (i915_modparams.enable_psr == 2 && !dev_priv->psr.link_standby) { DRM_DEBUG_KMS("PSR: Forcing link standby\n"); dev_priv->psr.link_standby = true; } - if (i915.enable_psr == 3 && dev_priv->psr.link_standby) { + if (i915_modparams.enable_psr == 3 && dev_priv->psr.link_standby) { DRM_DEBUG_KMS("PSR: Forcing main link off\n"); dev_priv->psr.link_standby = false; } INIT_DELAYED_WORK(&dev_priv->psr.work, intel_psr_work); mutex_init(&dev_priv->psr.lock); + + if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { + dev_priv->psr.enable_source = vlv_psr_enable_source; + dev_priv->psr.disable_source = vlv_psr_disable; + dev_priv->psr.enable_sink = vlv_psr_enable_sink; + dev_priv->psr.activate = vlv_psr_activate; + dev_priv->psr.setup_vsc = vlv_psr_setup_vsc; + } else { + dev_priv->psr.enable_source = hsw_psr_enable_source; + dev_priv->psr.disable_source = hsw_psr_disable; + dev_priv->psr.enable_sink = hsw_psr_enable_sink; + dev_priv->psr.activate = hsw_psr_activate; + dev_priv->psr.setup_vsc = hsw_psr_setup_vsc; + } } diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index cdf084ef5aae..05c08b0bc172 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c @@ -402,17 +402,18 @@ static void intel_ring_setup_status_page(struct intel_engine_cs *engine) */ if (IS_GEN7(dev_priv)) { switch (engine->id) { + /* + * No more rings exist on Gen7. Default case is only to shut up + * gcc switch check warning. + */ + default: + GEM_BUG_ON(engine->id); case RCS: mmio = RENDER_HWS_PGA_GEN7; break; case BCS: mmio = BLT_HWS_PGA_GEN7; break; - /* - * VCS2 actually doesn't exist on Gen7. Only shut up - * gcc switch check warning - */ - case VCS2: case VCS: mmio = BSD_HWS_PGA_GEN7; break; @@ -427,6 +428,9 @@ static void intel_ring_setup_status_page(struct intel_engine_cs *engine) mmio = RING_HWS_PGA(engine->mmio_base); } + if (INTEL_GEN(dev_priv) >= 6) + I915_WRITE(RING_HWSTAM(engine->mmio_base), 0xffffffff); + I915_WRITE(mmio, engine->status_page.ggtt_offset); POSTING_READ(mmio); @@ -778,6 +782,24 @@ static u32 *gen6_signal(struct drm_i915_gem_request *req, u32 *cs) return cs; } +static void cancel_requests(struct intel_engine_cs *engine) +{ + struct drm_i915_gem_request *request; + unsigned long flags; + + spin_lock_irqsave(&engine->timeline->lock, flags); + + /* Mark all submitted requests as skipped. */ + list_for_each_entry(request, &engine->timeline->requests, link) { + GEM_BUG_ON(!request->global_seqno); + if (!i915_gem_request_completed(request)) + dma_fence_set_error(&request->fence, -EIO); + } + /* Remaining _unready_ requests will be nop'ed when submitted */ + + spin_unlock_irqrestore(&engine->timeline->lock, flags); +} + static void i9xx_submit_request(struct drm_i915_gem_request *request) { struct drm_i915_private *dev_priv = request->i915; @@ -1174,113 +1196,7 @@ i915_emit_bb_start(struct drm_i915_gem_request *req, return 0; } -static void cleanup_phys_status_page(struct intel_engine_cs *engine) -{ - struct drm_i915_private *dev_priv = engine->i915; - - if (!dev_priv->status_page_dmah) - return; - - drm_pci_free(&dev_priv->drm, dev_priv->status_page_dmah); - engine->status_page.page_addr = NULL; -} - -static void cleanup_status_page(struct intel_engine_cs *engine) -{ - struct i915_vma *vma; - struct drm_i915_gem_object *obj; - - vma = fetch_and_zero(&engine->status_page.vma); - if (!vma) - return; - - obj = vma->obj; - - i915_vma_unpin(vma); - i915_vma_close(vma); - - i915_gem_object_unpin_map(obj); - __i915_gem_object_release_unless_active(obj); -} - -static int init_status_page(struct intel_engine_cs *engine) -{ - struct drm_i915_gem_object *obj; - struct i915_vma *vma; - unsigned int flags; - void *vaddr; - int ret; - - obj = i915_gem_object_create_internal(engine->i915, PAGE_SIZE); - if (IS_ERR(obj)) { - DRM_ERROR("Failed to allocate status page\n"); - return PTR_ERR(obj); - } - ret = i915_gem_object_set_cache_level(obj, I915_CACHE_LLC); - if (ret) - goto err; - - vma = i915_vma_instance(obj, &engine->i915->ggtt.base, NULL); - if (IS_ERR(vma)) { - ret = PTR_ERR(vma); - goto err; - } - - flags = PIN_GLOBAL; - if (!HAS_LLC(engine->i915)) - /* On g33, we cannot place HWS above 256MiB, so - * restrict its pinning to the low mappable arena. - * Though this restriction is not documented for - * gen4, gen5, or byt, they also behave similarly - * and hang if the HWS is placed at the top of the - * GTT. To generalise, it appears that all !llc - * platforms have issues with us placing the HWS - * above the mappable region (even though we never - * actualy map it). - */ - flags |= PIN_MAPPABLE; - ret = i915_vma_pin(vma, 0, 4096, flags); - if (ret) - goto err; - - vaddr = i915_gem_object_pin_map(obj, I915_MAP_WB); - if (IS_ERR(vaddr)) { - ret = PTR_ERR(vaddr); - goto err_unpin; - } - - engine->status_page.vma = vma; - engine->status_page.ggtt_offset = i915_ggtt_offset(vma); - engine->status_page.page_addr = memset(vaddr, 0, PAGE_SIZE); - - DRM_DEBUG_DRIVER("%s hws offset: 0x%08x\n", - engine->name, i915_ggtt_offset(vma)); - return 0; - -err_unpin: - i915_vma_unpin(vma); -err: - i915_gem_object_put(obj); - return ret; -} - -static int init_phys_status_page(struct intel_engine_cs *engine) -{ - struct drm_i915_private *dev_priv = engine->i915; - - GEM_BUG_ON(engine->id != RCS); - - dev_priv->status_page_dmah = - drm_pci_alloc(&dev_priv->drm, PAGE_SIZE, PAGE_SIZE); - if (!dev_priv->status_page_dmah) - return -ENOMEM; - - engine->status_page.page_addr = dev_priv->status_page_dmah->vaddr; - memset(engine->status_page.page_addr, 0, PAGE_SIZE); - - return 0; -} int intel_ring_pin(struct intel_ring *ring, struct drm_i915_private *i915, @@ -1567,17 +1483,10 @@ static int intel_init_ring_buffer(struct intel_engine_cs *engine) if (err) goto err; - if (HWS_NEEDS_PHYSICAL(engine->i915)) - err = init_phys_status_page(engine); - else - err = init_status_page(engine); - if (err) - goto err; - ring = intel_engine_create_ring(engine, 32 * PAGE_SIZE); if (IS_ERR(ring)) { err = PTR_ERR(ring); - goto err_hws; + goto err; } /* Ring wraparound at offset 0 sometimes hangs. No idea why. */ @@ -1592,11 +1501,6 @@ static int intel_init_ring_buffer(struct intel_engine_cs *engine) err_ring: intel_ring_free(ring); -err_hws: - if (HWS_NEEDS_PHYSICAL(engine->i915)) - cleanup_phys_status_page(engine); - else - cleanup_status_page(engine); err: intel_engine_cleanup_common(engine); return err; @@ -1615,11 +1519,6 @@ void intel_engine_cleanup(struct intel_engine_cs *engine) if (engine->cleanup) engine->cleanup(engine); - if (HWS_NEEDS_PHYSICAL(dev_priv)) - cleanup_phys_status_page(engine); - else - cleanup_status_page(engine); - intel_engine_cleanup_common(engine); dev_priv->engine[engine->id] = NULL; @@ -1983,7 +1882,7 @@ static void intel_ring_init_semaphores(struct drm_i915_private *dev_priv, struct drm_i915_gem_object *obj; int ret, i; - if (!i915.semaphores) + if (!i915_modparams.semaphores) return; if (INTEL_GEN(dev_priv) >= 8 && !dev_priv->semaphore) { @@ -2083,7 +1982,7 @@ err_obj: i915_gem_object_put(obj); err: DRM_DEBUG_DRIVER("Failed to allocate space for semaphores, disabling\n"); - i915.semaphores = 0; + i915_modparams.semaphores = 0; } static void intel_ring_init_irq(struct drm_i915_private *dev_priv, @@ -2115,11 +2014,13 @@ static void intel_ring_init_irq(struct drm_i915_private *dev_priv, static void i9xx_set_default_submission(struct intel_engine_cs *engine) { engine->submit_request = i9xx_submit_request; + engine->cancel_requests = cancel_requests; } static void gen6_bsd_set_default_submission(struct intel_engine_cs *engine) { engine->submit_request = gen6_bsd_submit_request; + engine->cancel_requests = cancel_requests; } static void intel_ring_default_vfuncs(struct drm_i915_private *dev_priv, @@ -2138,7 +2039,7 @@ static void intel_ring_default_vfuncs(struct drm_i915_private *dev_priv, engine->emit_breadcrumb = i9xx_emit_breadcrumb; engine->emit_breadcrumb_sz = i9xx_emit_breadcrumb_sz; - if (i915.semaphores) { + if (i915_modparams.semaphores) { int num_rings; engine->emit_breadcrumb = gen6_sema_emit_breadcrumb; @@ -2182,7 +2083,7 @@ int intel_init_render_ring_buffer(struct intel_engine_cs *engine) engine->emit_breadcrumb = gen8_render_emit_breadcrumb; engine->emit_breadcrumb_sz = gen8_render_emit_breadcrumb_sz; engine->emit_flush = gen8_render_ring_flush; - if (i915.semaphores) { + if (i915_modparams.semaphores) { int num_rings; engine->semaphore.signal = gen8_rcs_signal; diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h index 02d8974bf9ab..56d7ae9f298b 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.h +++ b/drivers/gpu/drm/i915/intel_ringbuffer.h @@ -184,6 +184,91 @@ struct i915_priolist { int priority; }; +/** + * struct intel_engine_execlists - execlist submission queue and port state + * + * The struct intel_engine_execlists represents the combined logical state of + * driver and the hardware state for execlist mode of submission. + */ +struct intel_engine_execlists { + /** + * @irq_tasklet: softirq tasklet for bottom handler + */ + struct tasklet_struct irq_tasklet; + + /** + * @default_priolist: priority list for I915_PRIORITY_NORMAL + */ + struct i915_priolist default_priolist; + + /** + * @no_priolist: priority lists disabled + */ + bool no_priolist; + + /** + * @port: execlist port states + * + * For each hardware ELSP (ExecList Submission Port) we keep + * track of the last request and the number of times we submitted + * that port to hw. We then count the number of times the hw reports + * a context completion or preemption. As only one context can + * be active on hw, we limit resubmission of context to port[0]. This + * is called Lite Restore, of the context. + */ + struct execlist_port { + /** + * @request_count: combined request and submission count + */ + struct drm_i915_gem_request *request_count; +#define EXECLIST_COUNT_BITS 2 +#define port_request(p) ptr_mask_bits((p)->request_count, EXECLIST_COUNT_BITS) +#define port_count(p) ptr_unmask_bits((p)->request_count, EXECLIST_COUNT_BITS) +#define port_pack(rq, count) ptr_pack_bits(rq, count, EXECLIST_COUNT_BITS) +#define port_unpack(p, count) ptr_unpack_bits((p)->request_count, count, EXECLIST_COUNT_BITS) +#define port_set(p, packed) ((p)->request_count = (packed)) +#define port_isset(p) ((p)->request_count) +#define port_index(p, execlists) ((p) - (execlists)->port) + + /** + * @context_id: context ID for port + */ + GEM_DEBUG_DECL(u32 context_id); + +#define EXECLIST_MAX_PORTS 2 + } port[EXECLIST_MAX_PORTS]; + + /** + * @port_mask: number of execlist ports - 1 + */ + unsigned int port_mask; + + /** + * @queue: queue of requests, in priority lists + */ + struct rb_root queue; + + /** + * @first: leftmost level in priority @queue + */ + struct rb_node *first; + + /** + * @fw_domains: forcewake domains for irq tasklet + */ + unsigned int fw_domains; + + /** + * @csb_head: context status buffer head + */ + unsigned int csb_head; + + /** + * @csb_use_mmio: access csb through mmio, instead of hwsp + */ + bool csb_use_mmio; +}; + #define INTEL_ENGINE_CS_MAX_NAME 8 struct intel_engine_cs { @@ -306,6 +391,14 @@ struct intel_engine_cs { void (*schedule)(struct drm_i915_gem_request *request, int priority); + /* + * Cancel all requests on the hardware, or queued for execution. + * This should only cancel the ready requests that have been + * submitted to the engine (via the engine->submit_request callback). + * This is called when marking the device as wedged. + */ + void (*cancel_requests)(struct intel_engine_cs *engine); + /* Some chipsets are not quite as coherent as advertised and need * an expensive kick to force a true read of the up-to-date seqno. * However, the up-to-date seqno is not always required and the last @@ -372,25 +465,7 @@ struct intel_engine_cs { u32 *(*signal)(struct drm_i915_gem_request *req, u32 *cs); } semaphore; - /* Execlists */ - struct tasklet_struct irq_tasklet; - struct i915_priolist default_priolist; - bool no_priolist; - struct execlist_port { - struct drm_i915_gem_request *request_count; -#define EXECLIST_COUNT_BITS 2 -#define port_request(p) ptr_mask_bits((p)->request_count, EXECLIST_COUNT_BITS) -#define port_count(p) ptr_unmask_bits((p)->request_count, EXECLIST_COUNT_BITS) -#define port_pack(rq, count) ptr_pack_bits(rq, count, EXECLIST_COUNT_BITS) -#define port_unpack(p, count) ptr_unpack_bits((p)->request_count, count, EXECLIST_COUNT_BITS) -#define port_set(p, packed) ((p)->request_count = (packed)) -#define port_isset(p) ((p)->request_count) -#define port_index(p, e) ((p) - (e)->execlist_port) - GEM_DEBUG_DECL(u32 context_id); - } execlist_port[2]; - struct rb_root execlist_queue; - struct rb_node *execlist_first; - unsigned int fw_domains; + struct intel_engine_execlists execlists; /* Contexts are pinned whilst they are active on the GPU. The last * context executed remains active whilst the GPU is idle - the @@ -444,6 +519,24 @@ struct intel_engine_cs { }; static inline unsigned int +execlists_num_ports(const struct intel_engine_execlists * const execlists) +{ + return execlists->port_mask + 1; +} + +static inline void +execlists_port_complete(struct intel_engine_execlists * const execlists, + struct execlist_port * const port) +{ + const unsigned int m = execlists->port_mask; + + GEM_BUG_ON(port_index(port, execlists) != 0); + + memmove(port, port + 1, m * sizeof(struct execlist_port)); + memset(port + m, 0, sizeof(struct execlist_port)); +} + +static inline unsigned int intel_engine_flag(const struct intel_engine_cs *engine) { return BIT(engine->id); @@ -496,6 +589,10 @@ intel_write_status_page(struct intel_engine_cs *engine, int reg, u32 value) #define I915_GEM_HWS_SCRATCH_INDEX 0x40 #define I915_GEM_HWS_SCRATCH_ADDR (I915_GEM_HWS_SCRATCH_INDEX << MI_STORE_DWORD_INDEX_SHIFT) +#define I915_HWS_CSB_BUF0_INDEX 0x10 +#define I915_HWS_CSB_WRITE_INDEX 0x1f +#define CNL_HWS_CSB_WRITE_INDEX 0x2f + struct intel_ring * intel_engine_create_ring(struct intel_engine_cs *engine, int size); int intel_ring_pin(struct intel_ring *ring, @@ -735,16 +832,6 @@ bool intel_engines_are_idle(struct drm_i915_private *dev_priv); void intel_engines_mark_idle(struct drm_i915_private *i915); void intel_engines_reset_default_submission(struct drm_i915_private *i915); -static inline bool -__intel_engine_can_store_dword(unsigned int gen, unsigned int class) -{ - if (gen <= 2) - return false; /* uses physical not virtual addresses */ - - if (gen == 6 && class == VIDEO_DECODE_CLASS) - return false; /* b0rked */ - - return true; -} +bool intel_engine_can_store_dword(struct intel_engine_cs *engine); #endif /* _INTEL_RINGBUFFER_H_ */ diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c index b66d8e136aa3..7933d1bc6a1c 100644 --- a/drivers/gpu/drm/i915/intel_runtime_pm.c +++ b/drivers/gpu/drm/i915/intel_runtime_pm.c @@ -2413,7 +2413,7 @@ static uint32_t get_allowed_dc_mask(const struct drm_i915_private *dev_priv, mask = 0; } - if (!i915.disable_power_well) + if (!i915_modparams.disable_power_well) max_dc = 0; if (enable_dc >= 0 && enable_dc <= max_dc) { @@ -2471,10 +2471,11 @@ int intel_power_domains_init(struct drm_i915_private *dev_priv) { struct i915_power_domains *power_domains = &dev_priv->power_domains; - i915.disable_power_well = sanitize_disable_power_well_option(dev_priv, - i915.disable_power_well); - dev_priv->csr.allowed_dc_mask = get_allowed_dc_mask(dev_priv, - i915.enable_dc); + i915_modparams.disable_power_well = + sanitize_disable_power_well_option(dev_priv, + i915_modparams.disable_power_well); + dev_priv->csr.allowed_dc_mask = + get_allowed_dc_mask(dev_priv, i915_modparams.enable_dc); BUILD_BUG_ON(POWER_DOMAIN_NUM > 64); @@ -2535,7 +2536,7 @@ void intel_power_domains_fini(struct drm_i915_private *dev_priv) intel_display_set_init_power(dev_priv, true); /* Remove the refcount we took to keep power well support disabled. */ - if (!i915.disable_power_well) + if (!i915_modparams.disable_power_well) intel_display_power_put(dev_priv, POWER_DOMAIN_INIT); /* @@ -2707,30 +2708,67 @@ void bxt_display_core_uninit(struct drm_i915_private *dev_priv) usleep_range(10, 30); /* 10 us delay per Bspec */ } -#define CNL_PROCMON_IDX(val) \ - (((val) & (PROCESS_INFO_MASK | VOLTAGE_INFO_MASK)) >> VOLTAGE_INFO_SHIFT) -#define NUM_CNL_PROCMON \ - (CNL_PROCMON_IDX(VOLTAGE_INFO_MASK | PROCESS_INFO_MASK) + 1) +enum { + PROCMON_0_85V_DOT_0, + PROCMON_0_95V_DOT_0, + PROCMON_0_95V_DOT_1, + PROCMON_1_05V_DOT_0, + PROCMON_1_05V_DOT_1, +}; static const struct cnl_procmon { u32 dw1, dw9, dw10; -} cnl_procmon_values[NUM_CNL_PROCMON] = { - [CNL_PROCMON_IDX(VOLTAGE_INFO_0_85V | PROCESS_INFO_DOT_0)] = - { .dw1 = 0x00 << 16, .dw9 = 0x62AB67BB, .dw10 = 0x51914F96, }, - [CNL_PROCMON_IDX(VOLTAGE_INFO_0_95V | PROCESS_INFO_DOT_0)] = - { .dw1 = 0x00 << 16, .dw9 = 0x86E172C7, .dw10 = 0x77CA5EAB, }, - [CNL_PROCMON_IDX(VOLTAGE_INFO_0_95V | PROCESS_INFO_DOT_1)] = - { .dw1 = 0x00 << 16, .dw9 = 0x93F87FE1, .dw10 = 0x8AE871C5, }, - [CNL_PROCMON_IDX(VOLTAGE_INFO_1_05V | PROCESS_INFO_DOT_0)] = - { .dw1 = 0x00 << 16, .dw9 = 0x98FA82DD, .dw10 = 0x89E46DC1, }, - [CNL_PROCMON_IDX(VOLTAGE_INFO_1_05V | PROCESS_INFO_DOT_1)] = - { .dw1 = 0x44 << 16, .dw9 = 0x9A00AB25, .dw10 = 0x8AE38FF1, }, +} cnl_procmon_values[] = { + [PROCMON_0_85V_DOT_0] = + { .dw1 = 0x00000000, .dw9 = 0x62AB67BB, .dw10 = 0x51914F96, }, + [PROCMON_0_95V_DOT_0] = + { .dw1 = 0x00000000, .dw9 = 0x86E172C7, .dw10 = 0x77CA5EAB, }, + [PROCMON_0_95V_DOT_1] = + { .dw1 = 0x00000000, .dw9 = 0x93F87FE1, .dw10 = 0x8AE871C5, }, + [PROCMON_1_05V_DOT_0] = + { .dw1 = 0x00000000, .dw9 = 0x98FA82DD, .dw10 = 0x89E46DC1, }, + [PROCMON_1_05V_DOT_1] = + { .dw1 = 0x00440000, .dw9 = 0x9A00AB25, .dw10 = 0x8AE38FF1, }, }; +static void cnl_set_procmon_ref_values(struct drm_i915_private *dev_priv) +{ + const struct cnl_procmon *procmon; + u32 val; + + val = I915_READ(CNL_PORT_COMP_DW3); + switch (val & (PROCESS_INFO_MASK | VOLTAGE_INFO_MASK)) { + default: + MISSING_CASE(val); + case VOLTAGE_INFO_0_85V | PROCESS_INFO_DOT_0: + procmon = &cnl_procmon_values[PROCMON_0_85V_DOT_0]; + break; + case VOLTAGE_INFO_0_95V | PROCESS_INFO_DOT_0: + procmon = &cnl_procmon_values[PROCMON_0_95V_DOT_0]; + break; + case VOLTAGE_INFO_0_95V | PROCESS_INFO_DOT_1: + procmon = &cnl_procmon_values[PROCMON_0_95V_DOT_1]; + break; + case VOLTAGE_INFO_1_05V | PROCESS_INFO_DOT_0: + procmon = &cnl_procmon_values[PROCMON_1_05V_DOT_0]; + break; + case VOLTAGE_INFO_1_05V | PROCESS_INFO_DOT_1: + procmon = &cnl_procmon_values[PROCMON_1_05V_DOT_1]; + break; + } + + val = I915_READ(CNL_PORT_COMP_DW1); + val &= ~((0xff << 16) | 0xff); + val |= procmon->dw1; + I915_WRITE(CNL_PORT_COMP_DW1, val); + + I915_WRITE(CNL_PORT_COMP_DW9, procmon->dw9); + I915_WRITE(CNL_PORT_COMP_DW10, procmon->dw10); +} + static void cnl_display_core_init(struct drm_i915_private *dev_priv, bool resume) { struct i915_power_domains *power_domains = &dev_priv->power_domains; - const struct cnl_procmon *procmon; struct i915_power_well *well; u32 val; @@ -2746,18 +2784,7 @@ static void cnl_display_core_init(struct drm_i915_private *dev_priv, bool resume val &= ~CNL_COMP_PWR_DOWN; I915_WRITE(CHICKEN_MISC_2, val); - val = I915_READ(CNL_PORT_COMP_DW3); - procmon = &cnl_procmon_values[CNL_PROCMON_IDX(val)]; - - WARN_ON(procmon->dw10 == 0); - - val = I915_READ(CNL_PORT_COMP_DW1); - val &= ~((0xff << 16) | 0xff); - val |= procmon->dw1; - I915_WRITE(CNL_PORT_COMP_DW1, val); - - I915_WRITE(CNL_PORT_COMP_DW9, procmon->dw9); - I915_WRITE(CNL_PORT_COMP_DW10, procmon->dw10); + cnl_set_procmon_ref_values(dev_priv); val = I915_READ(CNL_PORT_COMP_DW0); val |= COMP_INIT; @@ -2784,9 +2811,6 @@ static void cnl_display_core_init(struct drm_i915_private *dev_priv, bool resume gen9_dbuf_enable(dev_priv); } -#undef CNL_PROCMON_IDX -#undef NUM_CNL_PROCMON - static void cnl_display_core_uninit(struct drm_i915_private *dev_priv) { struct i915_power_domains *power_domains = &dev_priv->power_domains; @@ -2972,7 +2996,7 @@ void intel_power_domains_init_hw(struct drm_i915_private *dev_priv, bool resume) /* For now, we need the power well to be always enabled. */ intel_display_set_init_power(dev_priv, true); /* Disable power support if the user asked so. */ - if (!i915.disable_power_well) + if (!i915_modparams.disable_power_well) intel_display_power_get(dev_priv, POWER_DOMAIN_INIT); intel_power_domains_sync_hw(dev_priv); power_domains->initializing = false; @@ -2991,7 +3015,7 @@ void intel_power_domains_suspend(struct drm_i915_private *dev_priv) * Even if power well support was disabled we still want to disable * power wells while we are system suspended. */ - if (!i915.disable_power_well) + if (!i915_modparams.disable_power_well) intel_display_power_put(dev_priv, POWER_DOMAIN_INIT); if (IS_CANNONLAKE(dev_priv)) diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c index 29a3b0f5bec7..7437944b388f 100644 --- a/drivers/gpu/drm/i915/intel_sdvo.c +++ b/drivers/gpu/drm/i915/intel_sdvo.c @@ -201,11 +201,8 @@ to_intel_sdvo_connector(struct drm_connector *connector) return container_of(connector, struct intel_sdvo_connector, base.base); } -static struct intel_sdvo_connector_state * -to_intel_sdvo_connector_state(struct drm_connector_state *conn_state) -{ - return container_of(conn_state, struct intel_sdvo_connector_state, base.base); -} +#define to_intel_sdvo_connector_state(conn_state) \ + container_of((conn_state), struct intel_sdvo_connector_state, base.base) static bool intel_sdvo_output_setup(struct intel_sdvo *intel_sdvo, uint16_t flags); @@ -998,7 +995,7 @@ static bool intel_sdvo_write_infoframe(struct intel_sdvo *intel_sdvo, } static bool intel_sdvo_set_avi_infoframe(struct intel_sdvo *intel_sdvo, - struct intel_crtc_state *pipe_config) + const struct intel_crtc_state *pipe_config) { uint8_t sdvo_data[HDMI_INFOFRAME_SIZE(AVI)]; union hdmi_infoframe frame; @@ -1032,7 +1029,7 @@ static bool intel_sdvo_set_avi_infoframe(struct intel_sdvo *intel_sdvo, } static bool intel_sdvo_set_tv_format(struct intel_sdvo *intel_sdvo, - struct drm_connector_state *conn_state) + const struct drm_connector_state *conn_state) { struct intel_sdvo_tv_format format; uint32_t format_map; @@ -1202,9 +1199,9 @@ static bool intel_sdvo_compute_config(struct intel_encoder *encoder, } while (0) static void intel_sdvo_update_props(struct intel_sdvo *intel_sdvo, - struct intel_sdvo_connector_state *sdvo_state) + const struct intel_sdvo_connector_state *sdvo_state) { - struct drm_connector_state *conn_state = &sdvo_state->base.base; + const struct drm_connector_state *conn_state = &sdvo_state->base.base; struct intel_sdvo_connector *intel_sdvo_conn = to_intel_sdvo_connector(conn_state->connector); uint16_t val; @@ -1258,14 +1255,15 @@ static void intel_sdvo_update_props(struct intel_sdvo *intel_sdvo, } static void intel_sdvo_pre_enable(struct intel_encoder *intel_encoder, - struct intel_crtc_state *crtc_state, - struct drm_connector_state *conn_state) + const struct intel_crtc_state *crtc_state, + const struct drm_connector_state *conn_state) { struct drm_i915_private *dev_priv = to_i915(intel_encoder->base.dev); struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); const struct drm_display_mode *adjusted_mode = &crtc_state->base.adjusted_mode; - struct intel_sdvo_connector_state *sdvo_state = to_intel_sdvo_connector_state(conn_state); - struct drm_display_mode *mode = &crtc_state->base.mode; + const struct intel_sdvo_connector_state *sdvo_state = + to_intel_sdvo_connector_state(conn_state); + const struct drm_display_mode *mode = &crtc_state->base.mode; struct intel_sdvo *intel_sdvo = to_sdvo(intel_encoder); u32 sdvox; struct intel_sdvo_in_out_map in_out; @@ -1507,8 +1505,8 @@ static void intel_sdvo_get_config(struct intel_encoder *encoder, } static void intel_disable_sdvo(struct intel_encoder *encoder, - struct intel_crtc_state *old_crtc_state, - struct drm_connector_state *conn_state) + const struct intel_crtc_state *old_crtc_state, + const struct drm_connector_state *conn_state) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_sdvo *intel_sdvo = to_sdvo(encoder); @@ -1552,21 +1550,21 @@ static void intel_disable_sdvo(struct intel_encoder *encoder, } static void pch_disable_sdvo(struct intel_encoder *encoder, - struct intel_crtc_state *old_crtc_state, - struct drm_connector_state *old_conn_state) + const struct intel_crtc_state *old_crtc_state, + const struct drm_connector_state *old_conn_state) { } static void pch_post_disable_sdvo(struct intel_encoder *encoder, - struct intel_crtc_state *old_crtc_state, - struct drm_connector_state *old_conn_state) + const struct intel_crtc_state *old_crtc_state, + const struct drm_connector_state *old_conn_state) { intel_disable_sdvo(encoder, old_crtc_state, old_conn_state); } static void intel_enable_sdvo(struct intel_encoder *encoder, - struct intel_crtc_state *pipe_config, - struct drm_connector_state *conn_state) + const struct intel_crtc_state *pipe_config, + const struct drm_connector_state *conn_state) { struct drm_device *dev = encoder->base.dev; struct drm_i915_private *dev_priv = to_i915(dev); diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c index 524933b01483..28a1209d87e2 100644 --- a/drivers/gpu/drm/i915/intel_sprite.c +++ b/drivers/gpu/drm/i915/intel_sprite.c @@ -70,8 +70,7 @@ int intel_usecs_to_scanlines(const struct drm_display_mode *adjusted_mode, /** * intel_pipe_update_start() - start update of a set of display registers - * @crtc: the crtc of which the registers are going to be updated - * @start_vbl_count: vblank counter return pointer used for error checking + * @new_crtc_state: the new crtc state * * Mark the start of an update to pipe registers that should be updated * atomically regarding vblank. If the next vblank will happens within @@ -79,18 +78,18 @@ int intel_usecs_to_scanlines(const struct drm_display_mode *adjusted_mode, * * After a successful call to this function, interrupts will be disabled * until a subsequent call to intel_pipe_update_end(). That is done to - * avoid random delays. The value written to @start_vbl_count should be - * supplied to intel_pipe_update_end() for error checking. + * avoid random delays. */ -void intel_pipe_update_start(struct intel_crtc *crtc) +void intel_pipe_update_start(const struct intel_crtc_state *new_crtc_state) { + struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->base.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); - const struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode; + const struct drm_display_mode *adjusted_mode = &new_crtc_state->base.adjusted_mode; long timeout = msecs_to_jiffies_timeout(1); int scanline, min, max, vblank_start; wait_queue_head_t *wq = drm_crtc_vblank_waitqueue(&crtc->base); bool need_vlv_dsi_wa = (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) && - intel_crtc_has_type(crtc->config, INTEL_OUTPUT_DSI); + intel_crtc_has_type(new_crtc_state, INTEL_OUTPUT_DSI); DEFINE_WAIT(wait); vblank_start = adjusted_mode->crtc_vblank_start; @@ -170,15 +169,15 @@ void intel_pipe_update_start(struct intel_crtc *crtc) /** * intel_pipe_update_end() - end update of a set of display registers - * @crtc: the crtc of which the registers were updated - * @start_vbl_count: start vblank counter (used for error checking) + * @new_crtc_state: the new crtc state * * Mark the end of an update started with intel_pipe_update_start(). This * re-enables interrupts and verifies the update was actually completed - * before a vblank using the value of @start_vbl_count. + * before a vblank. */ -void intel_pipe_update_end(struct intel_crtc *crtc) +void intel_pipe_update_end(struct intel_crtc_state *new_crtc_state) { + struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->base.crtc); enum pipe pipe = crtc->pipe; int scanline_end = intel_get_crtc_scanline(crtc); u32 end_vbl_count = intel_crtc_get_vblank_counter(crtc); @@ -191,14 +190,14 @@ void intel_pipe_update_end(struct intel_crtc *crtc) * Would be slightly nice to just grab the vblank count and arm the * event outside of the critical section - the spinlock might spin for a * while ... */ - if (crtc->base.state->event) { + if (new_crtc_state->base.event) { WARN_ON(drm_crtc_vblank_get(&crtc->base) != 0); spin_lock(&crtc->base.dev->event_lock); - drm_crtc_arm_vblank_event(&crtc->base, crtc->base.state->event); + drm_crtc_arm_vblank_event(&crtc->base, new_crtc_state->base.event); spin_unlock(&crtc->base.dev->event_lock); - crtc->base.state->event = NULL; + new_crtc_state->base.event = NULL; } local_irq_enable(); @@ -995,7 +994,7 @@ int intel_sprite_set_colorkey(struct drm_device *dev, void *data, set->flags & I915_SET_COLORKEY_DESTINATION) return -EINVAL; - plane = drm_plane_find(dev, set->plane_id); + plane = drm_plane_find(dev, file_priv, set->plane_id); if (!plane || plane->type != DRM_PLANE_TYPE_OVERLAY) return -ENOENT; diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c index 906893c006d8..a79a7591b2cf 100644 --- a/drivers/gpu/drm/i915/intel_tv.c +++ b/drivers/gpu/drm/i915/intel_tv.c @@ -814,8 +814,8 @@ intel_tv_get_hw_state(struct intel_encoder *encoder, enum pipe *pipe) static void intel_enable_tv(struct intel_encoder *encoder, - struct intel_crtc_state *pipe_config, - struct drm_connector_state *conn_state) + const struct intel_crtc_state *pipe_config, + const struct drm_connector_state *conn_state) { struct drm_device *dev = encoder->base.dev; struct drm_i915_private *dev_priv = to_i915(dev); @@ -829,8 +829,8 @@ intel_enable_tv(struct intel_encoder *encoder, static void intel_disable_tv(struct intel_encoder *encoder, - struct intel_crtc_state *old_crtc_state, - struct drm_connector_state *old_conn_state) + const struct intel_crtc_state *old_crtc_state, + const struct drm_connector_state *old_conn_state) { struct drm_device *dev = encoder->base.dev; struct drm_i915_private *dev_priv = to_i915(dev); @@ -838,7 +838,7 @@ intel_disable_tv(struct intel_encoder *encoder, I915_WRITE(TV_CTL, I915_READ(TV_CTL) & ~TV_ENC_ENABLE); } -static const struct tv_mode *intel_tv_mode_find(struct drm_connector_state *conn_state) +static const struct tv_mode *intel_tv_mode_find(const struct drm_connector_state *conn_state) { int format = conn_state->tv.mode; @@ -976,8 +976,8 @@ static void set_color_conversion(struct drm_i915_private *dev_priv, } static void intel_tv_pre_enable(struct intel_encoder *encoder, - struct intel_crtc_state *pipe_config, - struct drm_connector_state *conn_state) + const struct intel_crtc_state *pipe_config, + const struct drm_connector_state *conn_state) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc); @@ -1385,7 +1385,7 @@ intel_tv_get_modes(struct drm_connector *connector) mode_ptr->vsync_end = mode_ptr->vsync_start + 1; mode_ptr->vtotal = vactive_s + 33; - tmp = (u64) tv_mode->refresh * mode_ptr->vtotal; + tmp = mul_u32_u32(tv_mode->refresh, mode_ptr->vtotal); tmp *= mode_ptr->htotal; tmp = div_u64(tmp, 1000000); mode_ptr->clock = (int) tmp; diff --git a/drivers/gpu/drm/i915/intel_uc.c b/drivers/gpu/drm/i915/intel_uc.c index 0178ba42a0e5..277477890240 100644 --- a/drivers/gpu/drm/i915/intel_uc.c +++ b/drivers/gpu/drm/i915/intel_uc.c @@ -63,35 +63,35 @@ static int __intel_uc_reset_hw(struct drm_i915_private *dev_priv) void intel_uc_sanitize_options(struct drm_i915_private *dev_priv) { if (!HAS_GUC(dev_priv)) { - if (i915.enable_guc_loading > 0 || - i915.enable_guc_submission > 0) + if (i915_modparams.enable_guc_loading > 0 || + i915_modparams.enable_guc_submission > 0) DRM_INFO("Ignoring GuC options, no hardware\n"); - i915.enable_guc_loading = 0; - i915.enable_guc_submission = 0; + i915_modparams.enable_guc_loading = 0; + i915_modparams.enable_guc_submission = 0; return; } /* A negative value means "use platform default" */ - if (i915.enable_guc_loading < 0) - i915.enable_guc_loading = HAS_GUC_UCODE(dev_priv); + if (i915_modparams.enable_guc_loading < 0) + i915_modparams.enable_guc_loading = HAS_GUC_UCODE(dev_priv); /* Verify firmware version */ - if (i915.enable_guc_loading) { + if (i915_modparams.enable_guc_loading) { if (HAS_HUC_UCODE(dev_priv)) intel_huc_select_fw(&dev_priv->huc); if (intel_guc_select_fw(&dev_priv->guc)) - i915.enable_guc_loading = 0; + i915_modparams.enable_guc_loading = 0; } /* Can't enable guc submission without guc loaded */ - if (!i915.enable_guc_loading) - i915.enable_guc_submission = 0; + if (!i915_modparams.enable_guc_loading) + i915_modparams.enable_guc_submission = 0; /* A negative value means "use platform default" */ - if (i915.enable_guc_submission < 0) - i915.enable_guc_submission = HAS_GUC_SCHED(dev_priv); + if (i915_modparams.enable_guc_submission < 0) + i915_modparams.enable_guc_submission = HAS_GUC_SCHED(dev_priv); } static void gen8_guc_raise_irq(struct intel_guc *guc) @@ -290,7 +290,7 @@ static void guc_init_send_regs(struct intel_guc *guc) static void guc_capture_load_err_log(struct intel_guc *guc) { - if (!guc->log.vma || i915.guc_log_level < 0) + if (!guc->log.vma || i915_modparams.guc_log_level < 0) return; if (!guc->load_err_log) @@ -328,12 +328,33 @@ static void guc_disable_communication(struct intel_guc *guc) guc->send = intel_guc_send_nop; } +/** + * intel_guc_auth_huc() - Send action to GuC to authenticate HuC ucode + * @guc: intel_guc structure + * @rsa_offset: rsa offset w.r.t ggtt base of huc vma + * + * Triggers a HuC firmware authentication request to the GuC via intel_guc_send + * INTEL_GUC_ACTION_AUTHENTICATE_HUC interface. This function is invoked by + * intel_huc_auth(). + * + * Return: non-zero code on error + */ +int intel_guc_auth_huc(struct intel_guc *guc, u32 rsa_offset) +{ + u32 action[] = { + INTEL_GUC_ACTION_AUTHENTICATE_HUC, + rsa_offset + }; + + return intel_guc_send(guc, action, ARRAY_SIZE(action)); +} + int intel_uc_init_hw(struct drm_i915_private *dev_priv) { struct intel_guc *guc = &dev_priv->guc; int ret, attempts; - if (!i915.enable_guc_loading) + if (!i915_modparams.enable_guc_loading) return 0; guc_disable_communication(guc); @@ -342,7 +363,7 @@ int intel_uc_init_hw(struct drm_i915_private *dev_priv) /* We need to notify the guc whenever we change the GGTT */ i915_ggtt_enable_guc(dev_priv); - if (i915.enable_guc_submission) { + if (i915_modparams.enable_guc_submission) { /* * This is stuff we need to have available at fw load time * if we are planning to enable submission later @@ -390,9 +411,9 @@ int intel_uc_init_hw(struct drm_i915_private *dev_priv) if (ret) goto err_log_capture; - intel_guc_auth_huc(dev_priv); - if (i915.enable_guc_submission) { - if (i915.guc_log_level >= 0) + intel_huc_auth(&dev_priv->huc); + if (i915_modparams.enable_guc_submission) { + if (i915_modparams.guc_log_level >= 0) gen9_enable_guc_interrupts(dev_priv); ret = i915_guc_submission_enable(dev_priv); @@ -417,23 +438,24 @@ err_interrupts: err_log_capture: guc_capture_load_err_log(guc); err_submission: - if (i915.enable_guc_submission) + if (i915_modparams.enable_guc_submission) i915_guc_submission_fini(dev_priv); err_guc: i915_ggtt_disable_guc(dev_priv); DRM_ERROR("GuC init failed\n"); - if (i915.enable_guc_loading > 1 || i915.enable_guc_submission > 1) + if (i915_modparams.enable_guc_loading > 1 || + i915_modparams.enable_guc_submission > 1) ret = -EIO; else ret = 0; - if (i915.enable_guc_submission) { - i915.enable_guc_submission = 0; + if (i915_modparams.enable_guc_submission) { + i915_modparams.enable_guc_submission = 0; DRM_NOTE("Falling back from GuC submission to execlist mode\n"); } - i915.enable_guc_loading = 0; + i915_modparams.enable_guc_loading = 0; DRM_NOTE("GuC firmware loading disabled\n"); return ret; @@ -443,15 +465,15 @@ void intel_uc_fini_hw(struct drm_i915_private *dev_priv) { guc_free_load_err_log(&dev_priv->guc); - if (!i915.enable_guc_loading) + if (!i915_modparams.enable_guc_loading) return; - if (i915.enable_guc_submission) + if (i915_modparams.enable_guc_submission) i915_guc_submission_disable(dev_priv); guc_disable_communication(&dev_priv->guc); - if (i915.enable_guc_submission) { + if (i915_modparams.enable_guc_submission) { gen9_disable_guc_interrupts(dev_priv); i915_guc_submission_fini(dev_priv); } diff --git a/drivers/gpu/drm/i915/intel_uc.h b/drivers/gpu/drm/i915/intel_uc.h index 22ae52b17b0f..6966349ed737 100644 --- a/drivers/gpu/drm/i915/intel_uc.h +++ b/drivers/gpu/drm/i915/intel_uc.h @@ -52,17 +52,6 @@ struct drm_i915_gem_request; * GuC). The subsequent pages of the client object constitute the work * queue (a circular array of work items), again described in the process * descriptor. Work queue pages are mapped momentarily as required. - * - * We also keep a few statistics on failures. Ideally, these should all - * be zero! - * no_wq_space: times that the submission pre-check found no space was - * available in the work queue (note, the queue is shared, - * not per-engine). It is OK for this to be nonzero, but - * it should not be huge! - * b_fail: failed to ring the doorbell. This should never happen, unless - * somehow the hardware misbehaves, or maybe if the GuC firmware - * crashes? We probably need to reset the GPU to recover. - * retcode: errno from last guc_submit() */ struct i915_guc_client { struct i915_vma *vma; @@ -77,15 +66,8 @@ struct i915_guc_client { u16 doorbell_id; unsigned long doorbell_offset; - u32 doorbell_cookie; spinlock_t wq_lock; - uint32_t wq_offset; - uint32_t wq_size; - uint32_t wq_tail; - uint32_t wq_rsvd; - uint32_t no_wq_space; - /* Per-engine counts of GuC submissions */ uint64_t submissions[I915_NUM_ENGINES]; }; @@ -229,6 +211,7 @@ void intel_uc_fini_hw(struct drm_i915_private *dev_priv); int intel_guc_sample_forcewake(struct intel_guc *guc); int intel_guc_send_nop(struct intel_guc *guc, const u32 *action, u32 len); int intel_guc_send_mmio(struct intel_guc *guc, const u32 *action, u32 len); +int intel_guc_auth_huc(struct intel_guc *guc, u32 rsa_offset); static inline int intel_guc_send(struct intel_guc *guc, const u32 *action, u32 len) { @@ -250,8 +233,6 @@ u32 intel_guc_wopcm_size(struct drm_i915_private *dev_priv); /* i915_guc_submission.c */ int i915_guc_submission_init(struct drm_i915_private *dev_priv); int i915_guc_submission_enable(struct drm_i915_private *dev_priv); -int i915_guc_wq_reserve(struct drm_i915_gem_request *rq); -void i915_guc_wq_unreserve(struct drm_i915_gem_request *request); void i915_guc_submission_disable(struct drm_i915_private *dev_priv); void i915_guc_submission_fini(struct drm_i915_private *dev_priv); struct i915_vma *intel_guc_allocate_vma(struct intel_guc *guc, u32 size); @@ -274,6 +255,6 @@ static inline u32 guc_ggtt_offset(struct i915_vma *vma) /* intel_huc.c */ void intel_huc_select_fw(struct intel_huc *huc); void intel_huc_init_hw(struct intel_huc *huc); -void intel_guc_auth_huc(struct drm_i915_private *dev_priv); +void intel_huc_auth(struct intel_huc *huc); #endif diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c index 1d7b879cc68c..b3c3f94fc7e4 100644 --- a/drivers/gpu/drm/i915/intel_uncore.c +++ b/drivers/gpu/drm/i915/intel_uncore.c @@ -436,7 +436,8 @@ void intel_uncore_resume_early(struct drm_i915_private *dev_priv) void intel_uncore_sanitize(struct drm_i915_private *dev_priv) { - i915.enable_rc6 = sanitize_rc6_option(dev_priv, i915.enable_rc6); + i915_modparams.enable_rc6 = + sanitize_rc6_option(dev_priv, i915_modparams.enable_rc6); /* BIOS often leaves RC6 enabled, but disable it for hw init */ intel_sanitize_gt_powersave(dev_priv); @@ -490,6 +491,57 @@ void intel_uncore_forcewake_get(struct drm_i915_private *dev_priv, } /** + * intel_uncore_forcewake_user_get - claim forcewake on behalf of userspace + * @dev_priv: i915 device instance + * + * This function is a wrapper around intel_uncore_forcewake_get() to acquire + * the GT powerwell and in the process disable our debugging for the + * duration of userspace's bypass. + */ +void intel_uncore_forcewake_user_get(struct drm_i915_private *dev_priv) +{ + spin_lock_irq(&dev_priv->uncore.lock); + if (!dev_priv->uncore.user_forcewake.count++) { + intel_uncore_forcewake_get__locked(dev_priv, FORCEWAKE_ALL); + + /* Save and disable mmio debugging for the user bypass */ + dev_priv->uncore.user_forcewake.saved_mmio_check = + dev_priv->uncore.unclaimed_mmio_check; + dev_priv->uncore.user_forcewake.saved_mmio_debug = + i915_modparams.mmio_debug; + + dev_priv->uncore.unclaimed_mmio_check = 0; + i915_modparams.mmio_debug = 0; + } + spin_unlock_irq(&dev_priv->uncore.lock); +} + +/** + * intel_uncore_forcewake_user_put - release forcewake on behalf of userspace + * @dev_priv: i915 device instance + * + * This function complements intel_uncore_forcewake_user_get() and releases + * the GT powerwell taken on behalf of the userspace bypass. + */ +void intel_uncore_forcewake_user_put(struct drm_i915_private *dev_priv) +{ + spin_lock_irq(&dev_priv->uncore.lock); + if (!--dev_priv->uncore.user_forcewake.count) { + if (intel_uncore_unclaimed_mmio(dev_priv)) + dev_info(dev_priv->drm.dev, + "Invalid mmio detected during user access\n"); + + dev_priv->uncore.unclaimed_mmio_check = + dev_priv->uncore.user_forcewake.saved_mmio_check; + i915_modparams.mmio_debug = + dev_priv->uncore.user_forcewake.saved_mmio_debug; + + intel_uncore_forcewake_put__locked(dev_priv, FORCEWAKE_ALL); + } + spin_unlock_irq(&dev_priv->uncore.lock); +} + +/** * intel_uncore_forcewake_get__locked - grab forcewake domain references * @dev_priv: i915 device instance * @fw_domains: forcewake domains to get reference on @@ -790,7 +842,8 @@ __unclaimed_reg_debug(struct drm_i915_private *dev_priv, "Unclaimed %s register 0x%x\n", read ? "read from" : "write to", i915_mmio_reg_offset(reg))) - i915.mmio_debug--; /* Only report the first N failures */ + /* Only report the first N failures */ + i915_modparams.mmio_debug--; } static inline void @@ -799,7 +852,7 @@ unclaimed_reg_debug(struct drm_i915_private *dev_priv, const bool read, const bool before) { - if (likely(!i915.mmio_debug)) + if (likely(!i915_modparams.mmio_debug)) return; __unclaimed_reg_debug(dev_priv, reg, read, before); @@ -1241,102 +1294,101 @@ void intel_uncore_fini(struct drm_i915_private *dev_priv) intel_uncore_forcewake_reset(dev_priv, false); } -#define GEN_RANGE(l, h) GENMASK((h) - 1, (l) - 1) - -static const struct register_whitelist { - i915_reg_t offset_ldw, offset_udw; - uint32_t size; - /* supported gens, 0x10 for 4, 0x30 for 4 and 5, etc. */ - uint32_t gen_bitmask; -} whitelist[] = { - { .offset_ldw = RING_TIMESTAMP(RENDER_RING_BASE), - .offset_udw = RING_TIMESTAMP_UDW(RENDER_RING_BASE), - .size = 8, .gen_bitmask = GEN_RANGE(4, 9) }, -}; +static const struct reg_whitelist { + i915_reg_t offset_ldw; + i915_reg_t offset_udw; + u16 gen_mask; + u8 size; +} reg_read_whitelist[] = { { + .offset_ldw = RING_TIMESTAMP(RENDER_RING_BASE), + .offset_udw = RING_TIMESTAMP_UDW(RENDER_RING_BASE), + .gen_mask = INTEL_GEN_MASK(4, 10), + .size = 8 +} }; int i915_reg_read_ioctl(struct drm_device *dev, void *data, struct drm_file *file) { struct drm_i915_private *dev_priv = to_i915(dev); struct drm_i915_reg_read *reg = data; - struct register_whitelist const *entry = whitelist; - unsigned size; - i915_reg_t offset_ldw, offset_udw; - int i, ret = 0; - - for (i = 0; i < ARRAY_SIZE(whitelist); i++, entry++) { - if (i915_mmio_reg_offset(entry->offset_ldw) == (reg->offset & -entry->size) && - (INTEL_INFO(dev_priv)->gen_mask & entry->gen_bitmask)) + struct reg_whitelist const *entry; + unsigned int flags; + int remain; + int ret = 0; + + entry = reg_read_whitelist; + remain = ARRAY_SIZE(reg_read_whitelist); + while (remain) { + u32 entry_offset = i915_mmio_reg_offset(entry->offset_ldw); + + GEM_BUG_ON(!is_power_of_2(entry->size)); + GEM_BUG_ON(entry->size > 8); + GEM_BUG_ON(entry_offset & (entry->size - 1)); + + if (INTEL_INFO(dev_priv)->gen_mask & entry->gen_mask && + entry_offset == (reg->offset & -entry->size)) break; + entry++; + remain--; } - if (i == ARRAY_SIZE(whitelist)) + if (!remain) return -EINVAL; - /* We use the low bits to encode extra flags as the register should - * be naturally aligned (and those that are not so aligned merely - * limit the available flags for that register). - */ - offset_ldw = entry->offset_ldw; - offset_udw = entry->offset_udw; - size = entry->size; - size |= reg->offset ^ i915_mmio_reg_offset(offset_ldw); + flags = reg->offset & (entry->size - 1); intel_runtime_pm_get(dev_priv); - - switch (size) { - case 8 | 1: - reg->val = I915_READ64_2x32(offset_ldw, offset_udw); - break; - case 8: - reg->val = I915_READ64(offset_ldw); - break; - case 4: - reg->val = I915_READ(offset_ldw); - break; - case 2: - reg->val = I915_READ16(offset_ldw); - break; - case 1: - reg->val = I915_READ8(offset_ldw); - break; - default: + if (entry->size == 8 && flags == I915_REG_READ_8B_WA) + reg->val = I915_READ64_2x32(entry->offset_ldw, + entry->offset_udw); + else if (entry->size == 8 && flags == 0) + reg->val = I915_READ64(entry->offset_ldw); + else if (entry->size == 4 && flags == 0) + reg->val = I915_READ(entry->offset_ldw); + else if (entry->size == 2 && flags == 0) + reg->val = I915_READ16(entry->offset_ldw); + else if (entry->size == 1 && flags == 0) + reg->val = I915_READ8(entry->offset_ldw); + else ret = -EINVAL; - goto out; - } - -out: intel_runtime_pm_put(dev_priv); + return ret; } -static void gen3_stop_rings(struct drm_i915_private *dev_priv) +static void gen3_stop_engine(struct intel_engine_cs *engine) +{ + struct drm_i915_private *dev_priv = engine->i915; + const u32 base = engine->mmio_base; + const i915_reg_t mode = RING_MI_MODE(base); + + I915_WRITE_FW(mode, _MASKED_BIT_ENABLE(STOP_RING)); + if (intel_wait_for_register_fw(dev_priv, + mode, + MODE_IDLE, + MODE_IDLE, + 500)) + DRM_DEBUG_DRIVER("%s: timed out on STOP_RING\n", + engine->name); + + I915_WRITE_FW(RING_CTL(base), 0); + I915_WRITE_FW(RING_HEAD(base), 0); + I915_WRITE_FW(RING_TAIL(base), 0); + + /* Check acts as a post */ + if (I915_READ_FW(RING_HEAD(base)) != 0) + DRM_DEBUG_DRIVER("%s: ring head not parked\n", + engine->name); +} + +static void i915_stop_engines(struct drm_i915_private *dev_priv, + unsigned engine_mask) { struct intel_engine_cs *engine; enum intel_engine_id id; - for_each_engine(engine, dev_priv, id) { - const u32 base = engine->mmio_base; - const i915_reg_t mode = RING_MI_MODE(base); - - I915_WRITE_FW(mode, _MASKED_BIT_ENABLE(STOP_RING)); - if (intel_wait_for_register_fw(dev_priv, - mode, - MODE_IDLE, - MODE_IDLE, - 500)) - DRM_DEBUG_DRIVER("%s: timed out on STOP_RING\n", - engine->name); - - I915_WRITE_FW(RING_CTL(base), 0); - I915_WRITE_FW(RING_HEAD(base), 0); - I915_WRITE_FW(RING_TAIL(base), 0); - - /* Check acts as a post */ - if (I915_READ_FW(RING_HEAD(base)) != 0) - DRM_DEBUG_DRIVER("%s: ring head not parked\n", - engine->name); - } + for_each_engine_masked(engine, dev_priv, engine_mask, id) + gen3_stop_engine(engine); } static bool i915_reset_complete(struct pci_dev *pdev) @@ -1371,9 +1423,6 @@ static int g33_do_reset(struct drm_i915_private *dev_priv, unsigned engine_mask) { struct pci_dev *pdev = dev_priv->drm.pdev; - /* Stop engines before we reset; see g4x_do_reset() below for why. */ - gen3_stop_rings(dev_priv); - pci_write_config_byte(pdev, I915_GDRST, GRDOM_RESET_ENABLE); return wait_for(g4x_reset_complete(pdev), 500); } @@ -1388,12 +1437,6 @@ static int g4x_do_reset(struct drm_i915_private *dev_priv, unsigned engine_mask) I915_READ(VDECCLK_GATE_D) | VCP_UNIT_CLOCK_GATE_DISABLE); POSTING_READ(VDECCLK_GATE_D); - /* We stop engines, otherwise we might get failed reset and a - * dead gpu (on elk). - * WaMediaResetMainRingCleanup:ctg,elk (presumably) - */ - gen3_stop_rings(dev_priv); - pci_write_config_byte(pdev, I915_GDRST, GRDOM_MEDIA | GRDOM_RESET_ENABLE); ret = wait_for(g4x_reset_complete(pdev), 500); @@ -1662,7 +1705,7 @@ typedef int (*reset_func)(struct drm_i915_private *, unsigned engine_mask); static reset_func intel_get_gpu_reset(struct drm_i915_private *dev_priv) { - if (!i915.reset) + if (!i915_modparams.reset) return NULL; if (INTEL_INFO(dev_priv)->gen >= 8) @@ -1698,6 +1741,20 @@ int intel_gpu_reset(struct drm_i915_private *dev_priv, unsigned engine_mask) */ intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); for (retry = 0; retry < 3; retry++) { + + /* We stop engines, otherwise we might get failed reset and a + * dead gpu (on elk). Also as modern gpu as kbl can suffer + * from system hang if batchbuffer is progressing when + * the reset is issued, regardless of READY_TO_RESET ack. + * Thus assume it is best to stop engines on all gens + * where we have a gpu reset. + * + * WaMediaResetMainRingCleanup:ctg,elk (presumably) + * + * FIXME: Wa for more modern gens needs to be validated + */ + i915_stop_engines(dev_priv, engine_mask); + ret = reset(dev_priv, engine_mask); if (ret != -ETIMEDOUT) break; @@ -1722,7 +1779,7 @@ bool intel_has_reset_engine(struct drm_i915_private *dev_priv) { return (dev_priv->info.has_reset_engine && !dev_priv->guc.execbuf_client && - i915.reset >= 2); + i915_modparams.reset >= 2); } int intel_guc_reset(struct drm_i915_private *dev_priv) @@ -1747,7 +1804,7 @@ bool intel_uncore_unclaimed_mmio(struct drm_i915_private *dev_priv) bool intel_uncore_arm_unclaimed_mmio_detection(struct drm_i915_private *dev_priv) { - if (unlikely(i915.mmio_debug || + if (unlikely(i915_modparams.mmio_debug || dev_priv->uncore.unclaimed_mmio_check <= 0)) return false; @@ -1755,7 +1812,7 @@ intel_uncore_arm_unclaimed_mmio_detection(struct drm_i915_private *dev_priv) DRM_DEBUG("Unclaimed register detected, " "enabling oneshot unclaimed register reporting. " "Please use i915.mmio_debug=N for more information.\n"); - i915.mmio_debug++; + i915_modparams.mmio_debug++; dev_priv->uncore.unclaimed_mmio_check--; return true; } diff --git a/drivers/gpu/drm/i915/intel_uncore.h b/drivers/gpu/drm/i915/intel_uncore.h index 5f90278da461..03786f931905 100644 --- a/drivers/gpu/drm/i915/intel_uncore.h +++ b/drivers/gpu/drm/i915/intel_uncore.h @@ -102,6 +102,13 @@ struct intel_uncore { i915_reg_t reg_ack; } fw_domain[FW_DOMAIN_ID_COUNT]; + struct { + unsigned int count; + + int saved_mmio_check; + int saved_mmio_debug; + } user_forcewake; + int unclaimed_mmio_check; }; @@ -144,6 +151,9 @@ void intel_uncore_forcewake_get__locked(struct drm_i915_private *dev_priv, void intel_uncore_forcewake_put__locked(struct drm_i915_private *dev_priv, enum forcewake_domains domains); +void intel_uncore_forcewake_user_get(struct drm_i915_private *dev_priv); +void intel_uncore_forcewake_user_put(struct drm_i915_private *dev_priv); + int intel_wait_for_register(struct drm_i915_private *dev_priv, i915_reg_t reg, u32 mask, diff --git a/drivers/gpu/drm/i915/intel_vbt_defs.h b/drivers/gpu/drm/i915/intel_vbt_defs.h index a92e7762f596..404569c9fdfc 100644 --- a/drivers/gpu/drm/i915/intel_vbt_defs.h +++ b/drivers/gpu/drm/i915/intel_vbt_defs.h @@ -149,16 +149,19 @@ struct bdb_general_features { u8 ssc_freq:1; u8 enable_lfp_on_override:1; u8 disable_ssc_ddt:1; - u8 rsvd7:1; + u8 underscan_vga_timings:1; u8 display_clock_mode:1; - u8 rsvd8:1; /* finish byte */ + u8 vbios_hotplug_support:1; /* bits 3 */ u8 disable_smooth_vision:1; u8 single_dvi:1; - u8 rsvd9:1; + u8 rotate_180:1; /* 181 */ u8 fdi_rx_polarity_inverted:1; - u8 rsvd10:4; /* finish byte */ + u8 vbios_extended_mode:1; /* 160 */ + u8 copy_ilfp_dtd_to_sdvo_lvds_dtd:1; /* 160 */ + u8 panel_best_fit_timing:1; /* 160 */ + u8 ignore_strap_state:1; /* 160 */ /* bits 4 */ u8 legacy_monitor_detect; @@ -167,9 +170,10 @@ struct bdb_general_features { u8 int_crt_support:1; u8 int_tv_support:1; u8 int_efp_support:1; - u8 dp_ssc_enb:1; /* PCH attached eDP supports SSC */ + u8 dp_ssc_enable:1; /* PCH attached eDP supports SSC */ u8 dp_ssc_freq:1; /* SSC freq for PCH attached eDP */ - u8 rsvd11:3; /* finish byte */ + u8 dp_ssc_dongle_supported:1; + u8 rsvd11:2; /* finish byte */ } __packed; /* pre-915 */ @@ -206,6 +210,56 @@ struct bdb_general_features { #define DEVICE_TYPE_LFP_LVDS_DUAL 0x5162 #define DEVICE_TYPE_LFP_LVDS_DUAL_HDCP 0x51e2 +/* Add the device class for LFP, TV, HDMI */ +#define DEVICE_TYPE_INT_LFP 0x1022 +#define DEVICE_TYPE_INT_TV 0x1009 +#define DEVICE_TYPE_HDMI 0x60D2 +#define DEVICE_TYPE_DP 0x68C6 +#define DEVICE_TYPE_DP_DUAL_MODE 0x60D6 +#define DEVICE_TYPE_eDP 0x78C6 + +#define DEVICE_TYPE_CLASS_EXTENSION (1 << 15) +#define DEVICE_TYPE_POWER_MANAGEMENT (1 << 14) +#define DEVICE_TYPE_HOTPLUG_SIGNALING (1 << 13) +#define DEVICE_TYPE_INTERNAL_CONNECTOR (1 << 12) +#define DEVICE_TYPE_NOT_HDMI_OUTPUT (1 << 11) +#define DEVICE_TYPE_MIPI_OUTPUT (1 << 10) +#define DEVICE_TYPE_COMPOSITE_OUTPUT (1 << 9) +#define DEVICE_TYPE_DUAL_CHANNEL (1 << 8) +#define DEVICE_TYPE_HIGH_SPEED_LINK (1 << 6) +#define DEVICE_TYPE_LVDS_SINGALING (1 << 5) +#define DEVICE_TYPE_TMDS_DVI_SIGNALING (1 << 4) +#define DEVICE_TYPE_VIDEO_SIGNALING (1 << 3) +#define DEVICE_TYPE_DISPLAYPORT_OUTPUT (1 << 2) +#define DEVICE_TYPE_DIGITAL_OUTPUT (1 << 1) +#define DEVICE_TYPE_ANALOG_OUTPUT (1 << 0) + +/* + * Bits we care about when checking for DEVICE_TYPE_eDP. Depending on the + * system, the other bits may or may not be set for eDP outputs. + */ +#define DEVICE_TYPE_eDP_BITS \ + (DEVICE_TYPE_INTERNAL_CONNECTOR | \ + DEVICE_TYPE_MIPI_OUTPUT | \ + DEVICE_TYPE_COMPOSITE_OUTPUT | \ + DEVICE_TYPE_DUAL_CHANNEL | \ + DEVICE_TYPE_LVDS_SINGALING | \ + DEVICE_TYPE_TMDS_DVI_SIGNALING | \ + DEVICE_TYPE_VIDEO_SIGNALING | \ + DEVICE_TYPE_DISPLAYPORT_OUTPUT | \ + DEVICE_TYPE_ANALOG_OUTPUT) + +#define DEVICE_TYPE_DP_DUAL_MODE_BITS \ + (DEVICE_TYPE_INTERNAL_CONNECTOR | \ + DEVICE_TYPE_MIPI_OUTPUT | \ + DEVICE_TYPE_COMPOSITE_OUTPUT | \ + DEVICE_TYPE_LVDS_SINGALING | \ + DEVICE_TYPE_TMDS_DVI_SIGNALING | \ + DEVICE_TYPE_VIDEO_SIGNALING | \ + DEVICE_TYPE_DISPLAYPORT_OUTPUT | \ + DEVICE_TYPE_DIGITAL_OUTPUT | \ + DEVICE_TYPE_ANALOG_OUTPUT) + #define DEVICE_CFG_NONE 0x00 #define DEVICE_CFG_12BIT_DVOB 0x01 #define DEVICE_CFG_12BIT_DVOC 0x02 @@ -226,77 +280,126 @@ struct bdb_general_features { #define DEVICE_WIRE_DVOB_MASTER 0x0d #define DEVICE_WIRE_DVOC_MASTER 0x0e +/* dvo_port pre BDB 155 */ #define DEVICE_PORT_DVOA 0x00 /* none on 845+ */ #define DEVICE_PORT_DVOB 0x01 #define DEVICE_PORT_DVOC 0x02 +/* dvo_port BDB 155+ */ +#define DVO_PORT_HDMIA 0 +#define DVO_PORT_HDMIB 1 +#define DVO_PORT_HDMIC 2 +#define DVO_PORT_HDMID 3 +#define DVO_PORT_LVDS 4 +#define DVO_PORT_TV 5 +#define DVO_PORT_CRT 6 +#define DVO_PORT_DPB 7 +#define DVO_PORT_DPC 8 +#define DVO_PORT_DPD 9 +#define DVO_PORT_DPA 10 +#define DVO_PORT_DPE 11 /* 193 */ +#define DVO_PORT_HDMIE 12 /* 193 */ +#define DVO_PORT_MIPIA 21 /* 171 */ +#define DVO_PORT_MIPIB 22 /* 171 */ +#define DVO_PORT_MIPIC 23 /* 171 */ +#define DVO_PORT_MIPID 24 /* 171 */ + +#define LEGACY_CHILD_DEVICE_CONFIG_SIZE 33 + /* - * We used to keep this struct but without any version control. We should avoid - * using it in the future, but it should be safe to keep using it in the old - * code. Do not change; we rely on its size. + * The child device config, aka the display device data structure, provides a + * description of a port and its configuration on the platform. + * + * The child device config size has been increased, and fields have been added + * and their meaning has changed over time. Care must be taken when accessing + * basically any of the fields to ensure the correct interpretation for the BDB + * version in question. + * + * When we copy the child device configs to dev_priv->vbt.child_dev, we reserve + * space for the full structure below, and initialize the tail not actually + * present in VBT to zeros. Accessing those fields is fine, as long as the + * default zero is taken into account, again according to the BDB version. + * + * BDB versions 155 and below are considered legacy, and version 155 seems to be + * a baseline for some of the VBT documentation. When adding new fields, please + * include the BDB version when the field was added, if it's above that. */ -struct old_child_dev_config { +struct child_device_config { u16 handle; - u16 device_type; - u8 device_id[10]; /* ascii string */ - u16 addin_offset; - u8 dvo_port; /* See Device_PORT_* above */ - u8 i2c_pin; - u8 slave_addr; - u8 ddc_pin; - u16 edid_ptr; - u8 dvo_cfg; /* See DEVICE_CFG_* above */ - u8 dvo2_port; - u8 i2c2_pin; - u8 slave2_addr; - u8 ddc2_pin; - u8 capabilities; - u8 dvo_wiring;/* See DEVICE_WIRE_* above */ - u8 dvo2_wiring; - u16 extended_type; - u8 dvo_function; -} __packed; + u16 device_type; /* See DEVICE_TYPE_* above */ + + union { + u8 device_id[10]; /* ascii string */ + struct { + u8 i2c_speed; + u8 dp_onboard_redriver; /* 158 */ + u8 dp_ondock_redriver; /* 158 */ + u8 hdmi_level_shifter_value:4; /* 169 */ + u8 hdmi_max_data_rate:4; /* 204 */ + u16 dtd_buf_ptr; /* 161 */ + u8 edidless_efp:1; /* 161 */ + u8 compression_enable:1; /* 198 */ + u8 compression_method:1; /* 198 */ + u8 ganged_edp:1; /* 202 */ + u8 reserved0:4; + u8 compression_structure_index:4; /* 198 */ + u8 reserved1:4; + u8 slave_port; /* 202 */ + u8 reserved2; + } __packed; + } __packed; -/* This one contains field offsets that are known to be common for all BDB - * versions. Notice that the meaning of the contents contents may still change, - * but at least the offsets are consistent. */ - -struct common_child_dev_config { - u16 handle; - u16 device_type; - u8 not_common1[12]; - u8 dvo_port; - u8 not_common2[2]; + u16 addin_offset; + u8 dvo_port; /* See DEVICE_PORT_* and DVO_PORT_* above */ + u8 i2c_pin; + u8 slave_addr; u8 ddc_pin; u16 edid_ptr; u8 dvo_cfg; /* See DEVICE_CFG_* above */ - u8 efp_routed:1; - u8 lane_reversal:1; - u8 lspcon:1; - u8 iboost:1; - u8 hpd_invert:1; - u8 flag_reserved:3; - u8 hdmi_support:1; - u8 dp_support:1; - u8 tmds_support:1; - u8 support_reserved:5; - u8 aux_channel; - u8 not_common3[11]; - u8 iboost_level; -} __packed; + union { + struct { + u8 dvo2_port; + u8 i2c2_pin; + u8 slave2_addr; + u8 ddc2_pin; + } __packed; + struct { + u8 efp_routed:1; /* 158 */ + u8 lane_reversal:1; /* 184 */ + u8 lspcon:1; /* 192 */ + u8 iboost:1; /* 196 */ + u8 hpd_invert:1; /* 196 */ + u8 flag_reserved:3; + u8 hdmi_support:1; /* 158 */ + u8 dp_support:1; /* 158 */ + u8 tmds_support:1; /* 158 */ + u8 support_reserved:5; + u8 aux_channel; + u8 dongle_detect; + } __packed; + } __packed; + + u8 pipe_cap:2; + u8 sdvo_stall:1; /* 158 */ + u8 hpd_status:2; + u8 integrated_encoder:1; + u8 capabilities_reserved:2; + u8 dvo_wiring; /* See DEVICE_WIRE_* above */ + + union { + u8 dvo2_wiring; + u8 mipi_bridge_type; /* 171 */ + } __packed; -/* This field changes depending on the BDB version, so the most reliable way to - * read it is by checking the BDB version and reading the raw pointer. */ -union child_device_config { - /* This one is safe to be used anywhere, but the code should still check - * the BDB version. */ - u8 raw[33]; - /* This one should only be kept for legacy code. */ - struct old_child_dev_config old; - /* This one should also be safe to use anywhere, even without version - * checks. */ - struct common_child_dev_config common; + u16 extended_type; + u8 dvo_function; + u8 dp_usb_type_c:1; /* 195 */ + u8 flags2_reserved:7; /* 195 */ + u8 dp_gpio_index; /* 195 */ + u16 dp_gpio_pin_num; /* 195 */ + u8 dp_iboost_level:4; /* 196 */ + u8 hdmi_iboost_level:4; /* 196 */ } __packed; struct bdb_general_definitions { @@ -585,23 +688,38 @@ struct bdb_driver_features { #define EDP_VSWING_1_2V 3 -struct edp_link_params { +struct edp_fast_link_params { u8 rate:4; u8 lanes:4; u8 preemphasis:4; u8 vswing:4; } __packed; +struct edp_pwm_delays { + u16 pwm_on_to_backlight_enable; + u16 backlight_disable_to_pwm_off; +} __packed; + +struct edp_full_link_params { + u8 preemphasis:4; + u8 vswing:4; +} __packed; + struct bdb_edp { struct edp_power_seq power_seqs[16]; u32 color_depth; - struct edp_link_params link_params[16]; + struct edp_fast_link_params fast_link_params[16]; u32 sdrrs_msa_timing_delay; /* ith bit indicates enabled/disabled for (i+1)th panel */ - u16 edp_s3d_feature; - u16 edp_t3_optimization; - u64 edp_vswing_preemph; /* v173 */ + u16 edp_s3d_feature; /* 162 */ + u16 edp_t3_optimization; /* 165 */ + u64 edp_vswing_preemph; /* 173 */ + u16 fast_link_training; /* 182 */ + u16 dpcd_600h_write_required; /* 185 */ + struct edp_pwm_delays pwm_delays[16]; /* 186 */ + u16 full_link_params_provided; /* 199 */ + struct edp_full_link_params full_link_params[16]; /* 199 */ } __packed; struct psr_table { @@ -745,81 +863,6 @@ struct bdb_psr { #define SWF14_APM_STANDBY 0x1 #define SWF14_APM_RESTORE 0x0 -/* Add the device class for LFP, TV, HDMI */ -#define DEVICE_TYPE_INT_LFP 0x1022 -#define DEVICE_TYPE_INT_TV 0x1009 -#define DEVICE_TYPE_HDMI 0x60D2 -#define DEVICE_TYPE_DP 0x68C6 -#define DEVICE_TYPE_DP_DUAL_MODE 0x60D6 -#define DEVICE_TYPE_eDP 0x78C6 - -#define DEVICE_TYPE_CLASS_EXTENSION (1 << 15) -#define DEVICE_TYPE_POWER_MANAGEMENT (1 << 14) -#define DEVICE_TYPE_HOTPLUG_SIGNALING (1 << 13) -#define DEVICE_TYPE_INTERNAL_CONNECTOR (1 << 12) -#define DEVICE_TYPE_NOT_HDMI_OUTPUT (1 << 11) -#define DEVICE_TYPE_MIPI_OUTPUT (1 << 10) -#define DEVICE_TYPE_COMPOSITE_OUTPUT (1 << 9) -#define DEVICE_TYPE_DUAL_CHANNEL (1 << 8) -#define DEVICE_TYPE_HIGH_SPEED_LINK (1 << 6) -#define DEVICE_TYPE_LVDS_SINGALING (1 << 5) -#define DEVICE_TYPE_TMDS_DVI_SIGNALING (1 << 4) -#define DEVICE_TYPE_VIDEO_SIGNALING (1 << 3) -#define DEVICE_TYPE_DISPLAYPORT_OUTPUT (1 << 2) -#define DEVICE_TYPE_DIGITAL_OUTPUT (1 << 1) -#define DEVICE_TYPE_ANALOG_OUTPUT (1 << 0) - -/* - * Bits we care about when checking for DEVICE_TYPE_eDP - * Depending on the system, the other bits may or may not - * be set for eDP outputs. - */ -#define DEVICE_TYPE_eDP_BITS \ - (DEVICE_TYPE_INTERNAL_CONNECTOR | \ - DEVICE_TYPE_MIPI_OUTPUT | \ - DEVICE_TYPE_COMPOSITE_OUTPUT | \ - DEVICE_TYPE_DUAL_CHANNEL | \ - DEVICE_TYPE_LVDS_SINGALING | \ - DEVICE_TYPE_TMDS_DVI_SIGNALING | \ - DEVICE_TYPE_VIDEO_SIGNALING | \ - DEVICE_TYPE_DISPLAYPORT_OUTPUT | \ - DEVICE_TYPE_ANALOG_OUTPUT) - -#define DEVICE_TYPE_DP_DUAL_MODE_BITS \ - (DEVICE_TYPE_INTERNAL_CONNECTOR | \ - DEVICE_TYPE_MIPI_OUTPUT | \ - DEVICE_TYPE_COMPOSITE_OUTPUT | \ - DEVICE_TYPE_LVDS_SINGALING | \ - DEVICE_TYPE_TMDS_DVI_SIGNALING | \ - DEVICE_TYPE_VIDEO_SIGNALING | \ - DEVICE_TYPE_DISPLAYPORT_OUTPUT | \ - DEVICE_TYPE_DIGITAL_OUTPUT | \ - DEVICE_TYPE_ANALOG_OUTPUT) - -/* define the DVO port for HDMI output type */ -#define DVO_B 1 -#define DVO_C 2 -#define DVO_D 3 - -/* Possible values for the "DVO Port" field for versions >= 155: */ -#define DVO_PORT_HDMIA 0 -#define DVO_PORT_HDMIB 1 -#define DVO_PORT_HDMIC 2 -#define DVO_PORT_HDMID 3 -#define DVO_PORT_LVDS 4 -#define DVO_PORT_TV 5 -#define DVO_PORT_CRT 6 -#define DVO_PORT_DPB 7 -#define DVO_PORT_DPC 8 -#define DVO_PORT_DPD 9 -#define DVO_PORT_DPA 10 -#define DVO_PORT_DPE 11 -#define DVO_PORT_HDMIE 12 -#define DVO_PORT_MIPIA 21 -#define DVO_PORT_MIPIB 22 -#define DVO_PORT_MIPIC 23 -#define DVO_PORT_MIPID 24 - /* Block 52 contains MIPI configuration block * 6 * bdb_mipi_config, followed by 6 pps data block * block below diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_timeline.c b/drivers/gpu/drm/i915/selftests/i915_gem_timeline.c index 7a44dab631b8..4795877abe56 100644 --- a/drivers/gpu/drm/i915/selftests/i915_gem_timeline.c +++ b/drivers/gpu/drm/i915/selftests/i915_gem_timeline.c @@ -121,7 +121,7 @@ out: static unsigned int random_engine(struct rnd_state *rnd) { - return ((u64)prandom_u32_state(rnd) * I915_NUM_ENGINES) >> 32; + return i915_prandom_u32_max_state(I915_NUM_ENGINES, rnd); } static int bench_sync(void *arg) diff --git a/drivers/gpu/drm/i915/selftests/i915_random.c b/drivers/gpu/drm/i915/selftests/i915_random.c index 222c511bea49..b85872cc7fbe 100644 --- a/drivers/gpu/drm/i915/selftests/i915_random.c +++ b/drivers/gpu/drm/i915/selftests/i915_random.c @@ -41,11 +41,6 @@ u64 i915_prandom_u64_state(struct rnd_state *rnd) return x; } -static inline u32 i915_prandom_u32_max_state(u32 ep_ro, struct rnd_state *state) -{ - return upper_32_bits((u64)prandom_u32_state(state) * ep_ro); -} - void i915_random_reorder(unsigned int *order, unsigned int count, struct rnd_state *state) { diff --git a/drivers/gpu/drm/i915/selftests/i915_random.h b/drivers/gpu/drm/i915/selftests/i915_random.h index 6c9379871384..7dffedc501ca 100644 --- a/drivers/gpu/drm/i915/selftests/i915_random.h +++ b/drivers/gpu/drm/i915/selftests/i915_random.h @@ -43,6 +43,11 @@ u64 i915_prandom_u64_state(struct rnd_state *rnd); +static inline u32 i915_prandom_u32_max_state(u32 ep_ro, struct rnd_state *state) +{ + return upper_32_bits(mul_u32_u32(prandom_u32_state(state), ep_ro)); +} + unsigned int *i915_random_order(unsigned int count, struct rnd_state *state); void i915_random_reorder(unsigned int *order, diff --git a/drivers/gpu/drm/i915/selftests/intel_hangcheck.c b/drivers/gpu/drm/i915/selftests/intel_hangcheck.c index 02e52a146ed8..377c1de766ce 100644 --- a/drivers/gpu/drm/i915/selftests/intel_hangcheck.c +++ b/drivers/gpu/drm/i915/selftests/intel_hangcheck.c @@ -621,7 +621,12 @@ static int igt_wait_reset(void *arg) __i915_add_request(rq, true); if (!wait_for_hang(&h, rq)) { - pr_err("Failed to start request %x\n", rq->fence.seqno); + pr_err("Failed to start request %x, at %x\n", + rq->fence.seqno, hws_seqno(&h, rq)); + + i915_reset(i915, 0); + i915_gem_set_wedged(i915); + err = -EIO; goto out_rq; } @@ -708,10 +713,14 @@ static int igt_reset_queue(void *arg) __i915_add_request(rq, true); if (!wait_for_hang(&h, prev)) { - pr_err("Failed to start request %x\n", - prev->fence.seqno); + pr_err("Failed to start request %x, at %x\n", + prev->fence.seqno, hws_seqno(&h, prev)); i915_gem_request_put(rq); i915_gem_request_put(prev); + + i915_reset(i915, 0); + i915_gem_set_wedged(i915); + err = -EIO; goto fini; } @@ -806,7 +815,12 @@ static int igt_handle_error(void *arg) __i915_add_request(rq, true); if (!wait_for_hang(&h, rq)) { - pr_err("Failed to start request %x\n", rq->fence.seqno); + pr_err("Failed to start request %x, at %x\n", + rq->fence.seqno, hws_seqno(&h, rq)); + + i915_reset(i915, 0); + i915_gem_set_wedged(i915); + err = -EIO; goto err_request; } @@ -843,8 +857,8 @@ err_unlock: int intel_hangcheck_live_selftests(struct drm_i915_private *i915) { static const struct i915_subtest tests[] = { + SUBTEST(igt_global_reset), /* attempt to recover GPU first */ SUBTEST(igt_hang_sanitycheck), - SUBTEST(igt_global_reset), SUBTEST(igt_reset_engine), SUBTEST(igt_reset_active_engines), SUBTEST(igt_wait_reset), diff --git a/drivers/gpu/drm/i915/selftests/mock_gem_device.c b/drivers/gpu/drm/i915/selftests/mock_gem_device.c index 678723430d78..2388424a14da 100644 --- a/drivers/gpu/drm/i915/selftests/mock_gem_device.c +++ b/drivers/gpu/drm/i915/selftests/mock_gem_device.c @@ -146,6 +146,11 @@ struct drm_i915_private *mock_gem_device(void) dev_set_name(&pdev->dev, "mock"); dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); +#if IS_ENABLED(CONFIG_IOMMU_API) + /* hack to disable iommu for the fake device; force identity mapping */ + pdev->dev.archdata.iommu = (void *)-1; +#endif + dev_pm_domain_set(&pdev->dev, &pm_domain); pm_runtime_enable(&pdev->dev); pm_runtime_dont_use_autosuspend(&pdev->dev); |