diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2018-02-01 17:48:47 -0800 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2018-02-01 17:48:47 -0800 |
commit | 4bf772b14675411a69b3c807f73006de0fe4b649 (patch) | |
tree | b841e3ba0e3429695589cb0ab73871fa12f42c38 /drivers/gpu/drm/i915/gvt/execlist.c | |
parent | 3879ae653a3e98380fe2daf653338830b7ca0097 (diff) | |
parent | 24b8ef699e8221d2b7f813adaab13eec053e1507 (diff) | |
download | blackbird-op-linux-4bf772b14675411a69b3c807f73006de0fe4b649.tar.gz blackbird-op-linux-4bf772b14675411a69b3c807f73006de0fe4b649.zip |
Merge tag 'drm-for-v4.16' of git://people.freedesktop.org/~airlied/linux
Pull drm updates from Dave Airlie:
"This seems to have been a comparatively quieter merge window, I assume
due to holidays etc. The "biggest" change is AMD header cleanups, which
merge/remove a bunch of them. The AMD gpu scheduler is now being made generic
with the etnaviv driver wanting to reuse the code, hopefully other drivers
can go in the same direction.
Otherwise it's the usual lots of stuff in i915/amdgpu, not so much stuff
elsewhere.
Core:
- Add .last_close and .output_poll_changed helpers to reduce driver footprints
- Fix plane clipping
- Improved debug printing support
- Add panel orientation property
- Update edid derived properties at edid setting
- Reduction in fbdev driver footprint
- Move amdgpu scheduler into core for other drivers to use.
i915:
- Selftest and IGT improvements
- Fast boot prep work on IPS, pipe config
- HW workarounds for Cannonlake, Geminilake
- Cannonlake clock and HDMI2.0 fixes
- GPU cache invalidation and context switch improvements
- Display planes cleanup
- New PMU interface for perf queries
- New firmware support for KBL/SKL
- Geminilake HW workaround for perforamce
- Coffeelake stolen memory improvements
- GPU reset robustness work
- Cannonlake horizontal plane flipping
- GVT work
amdgpu/radeon:
- RV and Vega header file cleanups (lots of lines gone!)
- TTM operation context support
- 48-bit GPUVM support for Vega/RV
- ECC support for Vega
- Resizeable BAR support
- Multi-display sync support
- Enable swapout for reserved BOs during allocation
- S3 fixes on Raven
- GPU reset cleanup and fixes
- 2+1 level GPU page table
amdkfd:
- GFX7/8 SDMA user queues support
- Hardware scheduling for multiple processes
- dGPU prep work
rcar:
- Added R8A7743/5 support
- System suspend/resume support
sun4i:
- Multi-plane support for YUV formats
- A83T and LVDS support
msm:
- Devfreq support for GPU
tegra:
- Prep work for adding Tegra186 support
- Tegra186 HDMI support
- HDMI2.0 and zpos support by using generic helpers
tilcdc:
- Misc fixes
omapdrm:
- Support memory bandwidth limits
- DSI command mode panel cleanups
- DMM error handling
exynos:
- drop the old IPP subdriver.
etnaviv:
- Occlusion query fixes
- Job handling fixes
- Prep work for hooking in gpu scheduler
armada:
- Move closer to atomic modesetting
- Allow disabling primary plane if overlay is full screen
imx:
- Format modifier support
- Add tile prefetch to PRE
- Runtime PM support for PRG
ast:
- fix LUT loading"
* tag 'drm-for-v4.16' of git://people.freedesktop.org/~airlied/linux: (1471 commits)
drm/ast: Load lut in crtc_commit
drm: Check for lessee in DROP_MASTER ioctl
drm: fix gpu scheduler link order
drm/amd/display: Demote error print to debug print when ATOM impl missing
dma-buf: fix reservation_object_wait_timeout_rcu once more v2
drm/amdgpu: Avoid leaking PM domain on driver unbind (v2)
drm/amd/amdgpu: Add Polaris version check
drm/amdgpu: Reenable manual GPU reset from sysfs
drm/amdgpu: disable MMHUB power gating on raven
drm/ttm: Don't unreserve swapped BOs that were previously reserved
drm/ttm: Don't add swapped BOs to swap-LRU list
drm/amdgpu: only check for ECC on Vega10
drm/amd/powerplay: Fix smu_table_entry.handle type
drm/ttm: add VADDR_FLAG_UPDATED_COUNT to correctly update dma_page global count
drm: Fix PANEL_ORIENTATION_QUIRKS breaking the Kconfig DRM menuconfig
drm/radeon: fill in rb backend map on evergreen/ni.
drm/amdgpu/gfx9: fix ngg enablement to clear gds reserved memory (v2)
drm/ttm: only free pages rather than update global memory count together
drm/amdgpu: fix CPU based VM updates
drm/amdgpu: fix typo in amdgpu_vce_validate_bo
...
Diffstat (limited to 'drivers/gpu/drm/i915/gvt/execlist.c')
-rw-r--r-- | drivers/gpu/drm/i915/gvt/execlist.c | 498 |
1 files changed, 69 insertions, 429 deletions
diff --git a/drivers/gpu/drm/i915/gvt/execlist.c b/drivers/gpu/drm/i915/gvt/execlist.c index 940cdaaa3f24..769c1c24ae75 100644 --- a/drivers/gpu/drm/i915/gvt/execlist.c +++ b/drivers/gpu/drm/i915/gvt/execlist.c @@ -46,8 +46,6 @@ #define same_context(a, b) (((a)->context_id == (b)->context_id) && \ ((a)->lrca == (b)->lrca)) -static void clean_workloads(struct intel_vgpu *vgpu, unsigned long engine_mask); - static int context_switch_events[] = { [RCS] = RCS_AS_CONTEXT_SWITCH, [BCS] = BCS_AS_CONTEXT_SWITCH, @@ -135,6 +133,8 @@ static void emulate_csb_update(struct intel_vgpu_execlist *execlist, struct execlist_context_status_pointer_format ctx_status_ptr; u32 write_pointer; u32 ctx_status_ptr_reg, ctx_status_buf_reg, offset; + unsigned long hwsp_gpa; + struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; ctx_status_ptr_reg = execlist_ring_mmio(vgpu->gvt, ring_id, _EL_OFFSET_STATUS_PTR); @@ -160,6 +160,20 @@ static void emulate_csb_update(struct intel_vgpu_execlist *execlist, ctx_status_ptr.write_ptr = write_pointer; vgpu_vreg(vgpu, ctx_status_ptr_reg) = ctx_status_ptr.dw; + /* Update the CSB and CSB write pointer in HWSP */ + hwsp_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm, + vgpu->hws_pga[ring_id]); + if (hwsp_gpa != INTEL_GVT_INVALID_ADDR) { + intel_gvt_hypervisor_write_gpa(vgpu, + hwsp_gpa + I915_HWS_CSB_BUF0_INDEX * 4 + + write_pointer * 8, + status, 8); + intel_gvt_hypervisor_write_gpa(vgpu, + hwsp_gpa + + intel_hws_csb_write_index(dev_priv) * 4, + &write_pointer, 4); + } + gvt_dbg_el("vgpu%d: w pointer %u reg %x csb l %x csb h %x\n", vgpu->id, write_pointer, offset, status->ldw, status->udw); @@ -358,218 +372,47 @@ static int emulate_execlist_schedule_in(struct intel_vgpu_execlist *execlist, return 0; } -static void free_workload(struct intel_vgpu_workload *workload) -{ - intel_vgpu_unpin_mm(workload->shadow_mm); - intel_gvt_mm_unreference(workload->shadow_mm); - kmem_cache_free(workload->vgpu->workloads, workload); -} - #define get_desc_from_elsp_dwords(ed, i) \ ((struct execlist_ctx_descriptor_format *)&((ed)->data[i * 2])) -static int prepare_shadow_batch_buffer(struct intel_vgpu_workload *workload) -{ - const int gmadr_bytes = workload->vgpu->gvt->device_info.gmadr_bytes_in_cmd; - struct intel_shadow_bb_entry *entry_obj; - - /* pin the gem object to ggtt */ - list_for_each_entry(entry_obj, &workload->shadow_bb, list) { - struct i915_vma *vma; - - vma = i915_gem_object_ggtt_pin(entry_obj->obj, NULL, 0, 4, 0); - if (IS_ERR(vma)) { - return PTR_ERR(vma); - } - - /* FIXME: we are not tracking our pinned VMA leaving it - * up to the core to fix up the stray pin_count upon - * free. - */ - - /* update the relocate gma with shadow batch buffer*/ - entry_obj->bb_start_cmd_va[1] = i915_ggtt_offset(vma); - if (gmadr_bytes == 8) - entry_obj->bb_start_cmd_va[2] = 0; - } - return 0; -} - -static int update_wa_ctx_2_shadow_ctx(struct intel_shadow_wa_ctx *wa_ctx) -{ - struct intel_vgpu_workload *workload = container_of(wa_ctx, - struct intel_vgpu_workload, - wa_ctx); - int ring_id = workload->ring_id; - struct i915_gem_context *shadow_ctx = workload->vgpu->shadow_ctx; - struct drm_i915_gem_object *ctx_obj = - shadow_ctx->engine[ring_id].state->obj; - struct execlist_ring_context *shadow_ring_context; - struct page *page; - - page = i915_gem_object_get_page(ctx_obj, LRC_STATE_PN); - shadow_ring_context = kmap_atomic(page); - - shadow_ring_context->bb_per_ctx_ptr.val = - (shadow_ring_context->bb_per_ctx_ptr.val & - (~PER_CTX_ADDR_MASK)) | wa_ctx->per_ctx.shadow_gma; - shadow_ring_context->rcs_indirect_ctx.val = - (shadow_ring_context->rcs_indirect_ctx.val & - (~INDIRECT_CTX_ADDR_MASK)) | wa_ctx->indirect_ctx.shadow_gma; - - kunmap_atomic(shadow_ring_context); - return 0; -} - -static int prepare_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx) -{ - struct i915_vma *vma; - unsigned char *per_ctx_va = - (unsigned char *)wa_ctx->indirect_ctx.shadow_va + - wa_ctx->indirect_ctx.size; - - if (wa_ctx->indirect_ctx.size == 0) - return 0; - - vma = i915_gem_object_ggtt_pin(wa_ctx->indirect_ctx.obj, NULL, - 0, CACHELINE_BYTES, 0); - if (IS_ERR(vma)) { - return PTR_ERR(vma); - } - - /* FIXME: we are not tracking our pinned VMA leaving it - * up to the core to fix up the stray pin_count upon - * free. - */ - - wa_ctx->indirect_ctx.shadow_gma = i915_ggtt_offset(vma); - - wa_ctx->per_ctx.shadow_gma = *((unsigned int *)per_ctx_va + 1); - memset(per_ctx_va, 0, CACHELINE_BYTES); - - update_wa_ctx_2_shadow_ctx(wa_ctx); - return 0; -} - -static void release_shadow_batch_buffer(struct intel_vgpu_workload *workload) -{ - /* release all the shadow batch buffer */ - if (!list_empty(&workload->shadow_bb)) { - struct intel_shadow_bb_entry *entry_obj = - list_first_entry(&workload->shadow_bb, - struct intel_shadow_bb_entry, - list); - struct intel_shadow_bb_entry *temp; - - list_for_each_entry_safe(entry_obj, temp, &workload->shadow_bb, - list) { - i915_gem_object_unpin_map(entry_obj->obj); - i915_gem_object_put(entry_obj->obj); - list_del(&entry_obj->list); - kfree(entry_obj); - } - } -} - static int prepare_execlist_workload(struct intel_vgpu_workload *workload) { struct intel_vgpu *vgpu = workload->vgpu; + struct intel_vgpu_submission *s = &vgpu->submission; struct execlist_ctx_descriptor_format ctx[2]; int ring_id = workload->ring_id; int ret; - ret = intel_vgpu_pin_mm(workload->shadow_mm); - if (ret) { - gvt_vgpu_err("fail to vgpu pin mm\n"); - goto out; - } - - ret = intel_vgpu_sync_oos_pages(workload->vgpu); - if (ret) { - gvt_vgpu_err("fail to vgpu sync oos pages\n"); - goto err_unpin_mm; - } - - ret = intel_vgpu_flush_post_shadow(workload->vgpu); - if (ret) { - gvt_vgpu_err("fail to flush post shadow\n"); - goto err_unpin_mm; - } - - ret = intel_gvt_generate_request(workload); - if (ret) { - gvt_vgpu_err("fail to generate request\n"); - goto err_unpin_mm; - } - - ret = prepare_shadow_batch_buffer(workload); - if (ret) { - gvt_vgpu_err("fail to prepare_shadow_batch_buffer\n"); - goto err_unpin_mm; - } - - ret = prepare_shadow_wa_ctx(&workload->wa_ctx); - if (ret) { - gvt_vgpu_err("fail to prepare_shadow_wa_ctx\n"); - goto err_shadow_batch; - } - if (!workload->emulate_schedule_in) return 0; - ctx[0] = *get_desc_from_elsp_dwords(&workload->elsp_dwords, 1); - ctx[1] = *get_desc_from_elsp_dwords(&workload->elsp_dwords, 0); + ctx[0] = *get_desc_from_elsp_dwords(&workload->elsp_dwords, 0); + ctx[1] = *get_desc_from_elsp_dwords(&workload->elsp_dwords, 1); - ret = emulate_execlist_schedule_in(&vgpu->execlist[ring_id], ctx); - if (!ret) - goto out; - else + ret = emulate_execlist_schedule_in(&s->execlist[ring_id], ctx); + if (ret) { gvt_vgpu_err("fail to emulate execlist schedule in\n"); - - release_shadow_wa_ctx(&workload->wa_ctx); -err_shadow_batch: - release_shadow_batch_buffer(workload); -err_unpin_mm: - intel_vgpu_unpin_mm(workload->shadow_mm); -out: - return ret; + return ret; + } + return 0; } static int complete_execlist_workload(struct intel_vgpu_workload *workload) { struct intel_vgpu *vgpu = workload->vgpu; int ring_id = workload->ring_id; - struct intel_vgpu_execlist *execlist = &vgpu->execlist[ring_id]; + struct intel_vgpu_submission *s = &vgpu->submission; + struct intel_vgpu_execlist *execlist = &s->execlist[ring_id]; struct intel_vgpu_workload *next_workload; struct list_head *next = workload_q_head(vgpu, ring_id)->next; bool lite_restore = false; - int ret; + int ret = 0; gvt_dbg_el("complete workload %p status %d\n", workload, workload->status); - if (!workload->status) { - release_shadow_batch_buffer(workload); - release_shadow_wa_ctx(&workload->wa_ctx); - } - - if (workload->status || (vgpu->resetting_eng & ENGINE_MASK(ring_id))) { - /* if workload->status is not successful means HW GPU - * has occurred GPU hang or something wrong with i915/GVT, - * and GVT won't inject context switch interrupt to guest. - * So this error is a vGPU hang actually to the guest. - * According to this we should emunlate a vGPU hang. If - * there are pending workloads which are already submitted - * from guest, we should clean them up like HW GPU does. - * - * if it is in middle of engine resetting, the pending - * workloads won't be submitted to HW GPU and will be - * cleaned up during the resetting process later, so doing - * the workload clean up here doesn't have any impact. - **/ - clean_workloads(vgpu, ENGINE_MASK(ring_id)); + if (workload->status || (vgpu->resetting_eng & ENGINE_MASK(ring_id))) goto out; - } if (!list_empty(workload_q_head(vgpu, ring_id))) { struct execlist_ctx_descriptor_format *this_desc, *next_desc; @@ -584,213 +427,60 @@ static int complete_execlist_workload(struct intel_vgpu_workload *workload) if (lite_restore) { gvt_dbg_el("next context == current - no schedule-out\n"); - free_workload(workload); - return 0; + goto out; } ret = emulate_execlist_ctx_schedule_out(execlist, &workload->ctx_desc); - if (ret) - goto err; out: - free_workload(workload); - return 0; -err: - free_workload(workload); + intel_vgpu_unpin_mm(workload->shadow_mm); + intel_vgpu_destroy_workload(workload); return ret; } -#define RING_CTX_OFF(x) \ - offsetof(struct execlist_ring_context, x) - -static void read_guest_pdps(struct intel_vgpu *vgpu, - u64 ring_context_gpa, u32 pdp[8]) -{ - u64 gpa; - int i; - - gpa = ring_context_gpa + RING_CTX_OFF(pdp3_UDW.val); - - for (i = 0; i < 8; i++) - intel_gvt_hypervisor_read_gpa(vgpu, - gpa + i * 8, &pdp[7 - i], 4); -} - -static int prepare_mm(struct intel_vgpu_workload *workload) -{ - struct execlist_ctx_descriptor_format *desc = &workload->ctx_desc; - struct intel_vgpu_mm *mm; - struct intel_vgpu *vgpu = workload->vgpu; - int page_table_level; - u32 pdp[8]; - - if (desc->addressing_mode == 1) { /* legacy 32-bit */ - page_table_level = 3; - } else if (desc->addressing_mode == 3) { /* legacy 64 bit */ - page_table_level = 4; - } else { - gvt_vgpu_err("Advanced Context mode(SVM) is not supported!\n"); - return -EINVAL; - } - - read_guest_pdps(workload->vgpu, workload->ring_context_gpa, pdp); - - mm = intel_vgpu_find_ppgtt_mm(workload->vgpu, page_table_level, pdp); - if (mm) { - intel_gvt_mm_reference(mm); - } else { - - mm = intel_vgpu_create_mm(workload->vgpu, INTEL_GVT_MM_PPGTT, - pdp, page_table_level, 0); - if (IS_ERR(mm)) { - gvt_vgpu_err("fail to create mm object.\n"); - return PTR_ERR(mm); - } - } - workload->shadow_mm = mm; - return 0; -} - -#define get_last_workload(q) \ - (list_empty(q) ? NULL : container_of(q->prev, \ - struct intel_vgpu_workload, list)) - static int submit_context(struct intel_vgpu *vgpu, int ring_id, struct execlist_ctx_descriptor_format *desc, bool emulate_schedule_in) { - struct list_head *q = workload_q_head(vgpu, ring_id); - struct intel_vgpu_workload *last_workload = get_last_workload(q); + struct intel_vgpu_submission *s = &vgpu->submission; struct intel_vgpu_workload *workload = NULL; - struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; - u64 ring_context_gpa; - u32 head, tail, start, ctl, ctx_ctl, per_ctx, indirect_ctx; - int ret; - - ring_context_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm, - (u32)((desc->lrca + 1) << GTT_PAGE_SHIFT)); - if (ring_context_gpa == INTEL_GVT_INVALID_ADDR) { - gvt_vgpu_err("invalid guest context LRCA: %x\n", desc->lrca); - return -EINVAL; - } - - intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa + - RING_CTX_OFF(ring_header.val), &head, 4); - intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa + - RING_CTX_OFF(ring_tail.val), &tail, 4); + workload = intel_vgpu_create_workload(vgpu, ring_id, desc); + if (IS_ERR(workload)) + return PTR_ERR(workload); - head &= RB_HEAD_OFF_MASK; - tail &= RB_TAIL_OFF_MASK; - - if (last_workload && same_context(&last_workload->ctx_desc, desc)) { - gvt_dbg_el("ring id %d cur workload == last\n", ring_id); - gvt_dbg_el("ctx head %x real head %lx\n", head, - last_workload->rb_tail); - /* - * cannot use guest context head pointer here, - * as it might not be updated at this time - */ - head = last_workload->rb_tail; - } - - gvt_dbg_el("ring id %d begin a new workload\n", ring_id); - - workload = kmem_cache_zalloc(vgpu->workloads, GFP_KERNEL); - if (!workload) - return -ENOMEM; - - /* record some ring buffer register values for scan and shadow */ - intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa + - RING_CTX_OFF(rb_start.val), &start, 4); - intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa + - RING_CTX_OFF(rb_ctrl.val), &ctl, 4); - intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa + - RING_CTX_OFF(ctx_ctrl.val), &ctx_ctl, 4); - - INIT_LIST_HEAD(&workload->list); - INIT_LIST_HEAD(&workload->shadow_bb); - - init_waitqueue_head(&workload->shadow_ctx_status_wq); - atomic_set(&workload->shadow_ctx_active, 0); - - workload->vgpu = vgpu; - workload->ring_id = ring_id; - workload->ctx_desc = *desc; - workload->ring_context_gpa = ring_context_gpa; - workload->rb_head = head; - workload->rb_tail = tail; - workload->rb_start = start; - workload->rb_ctl = ctl; workload->prepare = prepare_execlist_workload; workload->complete = complete_execlist_workload; - workload->status = -EINPROGRESS; workload->emulate_schedule_in = emulate_schedule_in; - workload->shadowed = false; - - if (ring_id == RCS) { - intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa + - RING_CTX_OFF(bb_per_ctx_ptr.val), &per_ctx, 4); - intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa + - RING_CTX_OFF(rcs_indirect_ctx.val), &indirect_ctx, 4); - - workload->wa_ctx.indirect_ctx.guest_gma = - indirect_ctx & INDIRECT_CTX_ADDR_MASK; - workload->wa_ctx.indirect_ctx.size = - (indirect_ctx & INDIRECT_CTX_SIZE_MASK) * - CACHELINE_BYTES; - workload->wa_ctx.per_ctx.guest_gma = - per_ctx & PER_CTX_ADDR_MASK; - workload->wa_ctx.per_ctx.valid = per_ctx & 1; - } if (emulate_schedule_in) - workload->elsp_dwords = vgpu->execlist[ring_id].elsp_dwords; - - gvt_dbg_el("workload %p ring id %d head %x tail %x start %x ctl %x\n", - workload, ring_id, head, tail, start, ctl); + workload->elsp_dwords = s->execlist[ring_id].elsp_dwords; gvt_dbg_el("workload %p emulate schedule_in %d\n", workload, emulate_schedule_in); - ret = prepare_mm(workload); - if (ret) { - kmem_cache_free(vgpu->workloads, workload); - return ret; - } - - /* Only scan and shadow the first workload in the queue - * as there is only one pre-allocated buf-obj for shadow. - */ - if (list_empty(workload_q_head(vgpu, ring_id))) { - intel_runtime_pm_get(dev_priv); - mutex_lock(&dev_priv->drm.struct_mutex); - intel_gvt_scan_and_shadow_workload(workload); - mutex_unlock(&dev_priv->drm.struct_mutex); - intel_runtime_pm_put(dev_priv); - } - - queue_workload(workload); + intel_vgpu_queue_workload(workload); return 0; } int intel_vgpu_submit_execlist(struct intel_vgpu *vgpu, int ring_id) { - struct intel_vgpu_execlist *execlist = &vgpu->execlist[ring_id]; - struct execlist_ctx_descriptor_format desc[2]; + struct intel_vgpu_submission *s = &vgpu->submission; + struct intel_vgpu_execlist *execlist = &s->execlist[ring_id]; + struct execlist_ctx_descriptor_format *desc[2]; int i, ret; - desc[0] = *get_desc_from_elsp_dwords(&execlist->elsp_dwords, 1); - desc[1] = *get_desc_from_elsp_dwords(&execlist->elsp_dwords, 0); + desc[0] = get_desc_from_elsp_dwords(&execlist->elsp_dwords, 0); + desc[1] = get_desc_from_elsp_dwords(&execlist->elsp_dwords, 1); - if (!desc[0].valid) { + if (!desc[0]->valid) { gvt_vgpu_err("invalid elsp submission, desc0 is invalid\n"); goto inv_desc; } for (i = 0; i < ARRAY_SIZE(desc); i++) { - if (!desc[i].valid) + if (!desc[i]->valid) continue; - if (!desc[i].privilege_access) { + if (!desc[i]->privilege_access) { gvt_vgpu_err("unexpected GGTT elsp submission\n"); goto inv_desc; } @@ -798,9 +488,9 @@ int intel_vgpu_submit_execlist(struct intel_vgpu *vgpu, int ring_id) /* submit workload */ for (i = 0; i < ARRAY_SIZE(desc); i++) { - if (!desc[i].valid) + if (!desc[i]->valid) continue; - ret = submit_context(vgpu, ring_id, &desc[i], i == 0); + ret = submit_context(vgpu, ring_id, desc[i], i == 0); if (ret) { gvt_vgpu_err("failed to submit desc %d\n", i); return ret; @@ -811,13 +501,14 @@ int intel_vgpu_submit_execlist(struct intel_vgpu *vgpu, int ring_id) inv_desc: gvt_vgpu_err("descriptors content: desc0 %08x %08x desc1 %08x %08x\n", - desc[0].udw, desc[0].ldw, desc[1].udw, desc[1].ldw); + desc[0]->udw, desc[0]->ldw, desc[1]->udw, desc[1]->ldw); return -EINVAL; } static void init_vgpu_execlist(struct intel_vgpu *vgpu, int ring_id) { - struct intel_vgpu_execlist *execlist = &vgpu->execlist[ring_id]; + struct intel_vgpu_submission *s = &vgpu->submission; + struct intel_vgpu_execlist *execlist = &s->execlist[ring_id]; struct execlist_context_status_pointer_format ctx_status_ptr; u32 ctx_status_ptr_reg; @@ -837,91 +528,40 @@ static void init_vgpu_execlist(struct intel_vgpu *vgpu, int ring_id) vgpu_vreg(vgpu, ctx_status_ptr_reg) = ctx_status_ptr.dw; } -static void clean_workloads(struct intel_vgpu *vgpu, unsigned long engine_mask) -{ - struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; - struct intel_engine_cs *engine; - struct intel_vgpu_workload *pos, *n; - unsigned int tmp; - - /* free the unsubmited workloads in the queues. */ - for_each_engine_masked(engine, dev_priv, engine_mask, tmp) { - list_for_each_entry_safe(pos, n, - &vgpu->workload_q_head[engine->id], list) { - list_del_init(&pos->list); - free_workload(pos); - } - - clear_bit(engine->id, vgpu->shadow_ctx_desc_updated); - } -} - -void intel_vgpu_clean_execlist(struct intel_vgpu *vgpu) +static void clean_execlist(struct intel_vgpu *vgpu) { enum intel_engine_id i; struct intel_engine_cs *engine; - clean_workloads(vgpu, ALL_ENGINES); - kmem_cache_destroy(vgpu->workloads); - for_each_engine(engine, vgpu->gvt->dev_priv, i) { - kfree(vgpu->reserve_ring_buffer_va[i]); - vgpu->reserve_ring_buffer_va[i] = NULL; - vgpu->reserve_ring_buffer_size[i] = 0; - } + struct intel_vgpu_submission *s = &vgpu->submission; -} - -#define RESERVE_RING_BUFFER_SIZE ((1 * PAGE_SIZE)/8) -int intel_vgpu_init_execlist(struct intel_vgpu *vgpu) -{ - enum intel_engine_id i; - struct intel_engine_cs *engine; - - /* each ring has a virtual execlist engine */ - for_each_engine(engine, vgpu->gvt->dev_priv, i) { - init_vgpu_execlist(vgpu, i); - INIT_LIST_HEAD(&vgpu->workload_q_head[i]); - } - - vgpu->workloads = kmem_cache_create("gvt-g_vgpu_workload", - sizeof(struct intel_vgpu_workload), 0, - SLAB_HWCACHE_ALIGN, - NULL); - - if (!vgpu->workloads) - return -ENOMEM; - - /* each ring has a shadow ring buffer until vgpu destroyed */ - for_each_engine(engine, vgpu->gvt->dev_priv, i) { - vgpu->reserve_ring_buffer_va[i] = - kmalloc(RESERVE_RING_BUFFER_SIZE, GFP_KERNEL); - if (!vgpu->reserve_ring_buffer_va[i]) { - gvt_vgpu_err("fail to alloc reserve ring buffer\n"); - goto out; - } - vgpu->reserve_ring_buffer_size[i] = RESERVE_RING_BUFFER_SIZE; + kfree(s->ring_scan_buffer[i]); + s->ring_scan_buffer[i] = NULL; + s->ring_scan_buffer_size[i] = 0; } - return 0; -out: - for_each_engine(engine, vgpu->gvt->dev_priv, i) { - if (vgpu->reserve_ring_buffer_size[i]) { - kfree(vgpu->reserve_ring_buffer_va[i]); - vgpu->reserve_ring_buffer_va[i] = NULL; - vgpu->reserve_ring_buffer_size[i] = 0; - } - } - return -ENOMEM; } -void intel_vgpu_reset_execlist(struct intel_vgpu *vgpu, +static void reset_execlist(struct intel_vgpu *vgpu, unsigned long engine_mask) { struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; struct intel_engine_cs *engine; unsigned int tmp; - clean_workloads(vgpu, engine_mask); for_each_engine_masked(engine, dev_priv, engine_mask, tmp) init_vgpu_execlist(vgpu, engine->id); } + +static int init_execlist(struct intel_vgpu *vgpu) +{ + reset_execlist(vgpu, ALL_ENGINES); + return 0; +} + +const struct intel_vgpu_submission_ops intel_vgpu_execlist_submission_ops = { + .name = "execlist", + .init = init_execlist, + .reset = reset_execlist, + .clean = clean_execlist, +}; |