diff options
Diffstat (limited to 'drivers/gpu/drm/i915/gvt')
27 files changed, 493 insertions, 230 deletions
diff --git a/drivers/gpu/drm/i915/gvt/aperture_gm.c b/drivers/gpu/drm/i915/gvt/aperture_gm.c index c3d19d88da40..771420453f82 100644 --- a/drivers/gpu/drm/i915/gvt/aperture_gm.c +++ b/drivers/gpu/drm/i915/gvt/aperture_gm.c @@ -61,14 +61,14 @@ static int alloc_gm(struct intel_vgpu *vgpu, bool high_gm) flags = PIN_MAPPABLE; } - mutex_lock(&dev_priv->drm.struct_mutex); + mutex_lock(&dev_priv->ggtt.vm.mutex); mmio_hw_access_pre(dev_priv); ret = i915_gem_gtt_insert(&dev_priv->ggtt.vm, node, size, I915_GTT_PAGE_SIZE, I915_COLOR_UNEVICTABLE, start, end, flags); mmio_hw_access_post(dev_priv); - mutex_unlock(&dev_priv->drm.struct_mutex); + mutex_unlock(&dev_priv->ggtt.vm.mutex); if (ret) gvt_err("fail to alloc %s gm space from host\n", high_gm ? "high" : "low"); @@ -98,9 +98,9 @@ static int alloc_vgpu_gm(struct intel_vgpu *vgpu) return 0; out_free_aperture: - mutex_lock(&dev_priv->drm.struct_mutex); + mutex_lock(&dev_priv->ggtt.vm.mutex); drm_mm_remove_node(&vgpu->gm.low_gm_node); - mutex_unlock(&dev_priv->drm.struct_mutex); + mutex_unlock(&dev_priv->ggtt.vm.mutex); return ret; } @@ -108,10 +108,10 @@ static void free_vgpu_gm(struct intel_vgpu *vgpu) { struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; - mutex_lock(&dev_priv->drm.struct_mutex); + mutex_lock(&dev_priv->ggtt.vm.mutex); drm_mm_remove_node(&vgpu->gm.low_gm_node); drm_mm_remove_node(&vgpu->gm.high_gm_node); - mutex_unlock(&dev_priv->drm.struct_mutex); + mutex_unlock(&dev_priv->ggtt.vm.mutex); } /** @@ -172,14 +172,14 @@ static void free_vgpu_fence(struct intel_vgpu *vgpu) intel_runtime_pm_get(&dev_priv->runtime_pm); - mutex_lock(&dev_priv->drm.struct_mutex); + mutex_lock(&dev_priv->ggtt.vm.mutex); _clear_vgpu_fence(vgpu); for (i = 0; i < vgpu_fence_sz(vgpu); i++) { reg = vgpu->fence.regs[i]; i915_unreserve_fence(reg); vgpu->fence.regs[i] = NULL; } - mutex_unlock(&dev_priv->drm.struct_mutex); + mutex_unlock(&dev_priv->ggtt.vm.mutex); intel_runtime_pm_put_unchecked(&dev_priv->runtime_pm); } @@ -195,10 +195,10 @@ static int alloc_vgpu_fence(struct intel_vgpu *vgpu) intel_runtime_pm_get(rpm); /* Request fences from host */ - mutex_lock(&dev_priv->drm.struct_mutex); + mutex_lock(&dev_priv->ggtt.vm.mutex); for (i = 0; i < vgpu_fence_sz(vgpu); i++) { - reg = i915_reserve_fence(dev_priv); + reg = i915_reserve_fence(&dev_priv->ggtt); if (IS_ERR(reg)) goto out_free_fence; @@ -207,7 +207,7 @@ static int alloc_vgpu_fence(struct intel_vgpu *vgpu) _clear_vgpu_fence(vgpu); - mutex_unlock(&dev_priv->drm.struct_mutex); + mutex_unlock(&dev_priv->ggtt.vm.mutex); intel_runtime_pm_put_unchecked(rpm); return 0; out_free_fence: @@ -220,7 +220,7 @@ out_free_fence: i915_unreserve_fence(reg); vgpu->fence.regs[i] = NULL; } - mutex_unlock(&dev_priv->drm.struct_mutex); + mutex_unlock(&dev_priv->ggtt.vm.mutex); intel_runtime_pm_put_unchecked(rpm); return -ENOSPC; } diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.c b/drivers/gpu/drm/i915/gvt/cmd_parser.c index b09dc315e2da..21a176cd8acc 100644 --- a/drivers/gpu/drm/i915/gvt/cmd_parser.c +++ b/drivers/gpu/drm/i915/gvt/cmd_parser.c @@ -35,7 +35,9 @@ */ #include <linux/slab.h> + #include "i915_drv.h" +#include "gt/intel_ring.h" #include "gvt.h" #include "i915_pvinfo.h" #include "trace.h" @@ -374,21 +376,37 @@ typedef int (*parser_cmd_handler)(struct parser_exec_state *s); #define ADDR_FIX_4(x1, x2, x3, x4) (ADDR_FIX_1(x1) | ADDR_FIX_3(x2, x3, x4)) #define ADDR_FIX_5(x1, x2, x3, x4, x5) (ADDR_FIX_1(x1) | ADDR_FIX_4(x2, x3, x4, x5)) +#define DWORD_FIELD(dword, end, start) \ + FIELD_GET(GENMASK(end, start), cmd_val(s, dword)) + +#define OP_LENGTH_BIAS 2 +#define CMD_LEN(value) (value + OP_LENGTH_BIAS) + +static int gvt_check_valid_cmd_length(int len, int valid_len) +{ + if (valid_len != len) { + gvt_err("len is not valid: len=%u valid_len=%u\n", + len, valid_len); + return -EFAULT; + } + return 0; +} + struct cmd_info { const char *name; u32 opcode; -#define F_LEN_MASK (1U<<0) +#define F_LEN_MASK 3U #define F_LEN_CONST 1U #define F_LEN_VAR 0U +/* value is const although LEN maybe variable */ +#define F_LEN_VAR_FIXED (1<<1) /* * command has its own ip advance logic * e.g. MI_BATCH_START, MI_BATCH_END */ -#define F_IP_ADVANCE_CUSTOM (1<<1) - -#define F_POST_HANDLE (1<<2) +#define F_IP_ADVANCE_CUSTOM (1<<2) u32 flag; #define R_RCS BIT(RCS0) @@ -418,9 +436,12 @@ struct cmd_info { * flag == F_LEN_VAR : length bias bits * Note: length is in DWord */ - u8 len; + u32 len; parser_cmd_handler handler; + + /* valid length in DWord */ + u32 valid_len; }; struct cmd_entry { @@ -944,6 +965,18 @@ static int cmd_handler_lri(struct parser_exec_state *s) int i, ret = 0; int cmd_len = cmd_length(s); struct intel_gvt *gvt = s->vgpu->gvt; + u32 valid_len = CMD_LEN(1); + + /* + * Official intel docs are somewhat sloppy , check the definition of + * MI_LOAD_REGISTER_IMM. + */ + #define MAX_VALID_LEN 127 + if ((cmd_len < valid_len) || (cmd_len > MAX_VALID_LEN)) { + gvt_err("len is not valid: len=%u valid_len=%u\n", + cmd_len, valid_len); + return -EFAULT; + } for (i = 1; i < cmd_len; i += 2) { if (IS_BROADWELL(gvt->dev_priv) && s->ring_id != RCS0) { @@ -1375,6 +1408,15 @@ static int cmd_handler_mi_display_flip(struct parser_exec_state *s) int ret; int i; int len = cmd_length(s); + u32 valid_len = CMD_LEN(1); + + /* Flip Type == Stereo 3D Flip */ + if (DWORD_FIELD(2, 1, 0) == 2) + valid_len++; + ret = gvt_check_valid_cmd_length(cmd_length(s), + valid_len); + if (ret) + return ret; ret = decode_mi_display_flip(s, &info); if (ret) { @@ -1494,12 +1536,21 @@ static int cmd_handler_mi_store_data_imm(struct parser_exec_state *s) int op_size = (cmd_length(s) - 3) * sizeof(u32); int core_id = (cmd_val(s, 2) & (1 << 0)) ? 1 : 0; unsigned long gma, gma_low, gma_high; + u32 valid_len = CMD_LEN(2); int ret = 0; /* check ppggt */ if (!(cmd_val(s, 0) & (1 << 22))) return 0; + /* check if QWORD */ + if (DWORD_FIELD(0, 21, 21)) + valid_len++; + ret = gvt_check_valid_cmd_length(cmd_length(s), + valid_len); + if (ret) + return ret; + gma = cmd_val(s, 2) & GENMASK(31, 2); if (gmadr_bytes == 8) { @@ -1542,11 +1593,20 @@ static int cmd_handler_mi_op_2f(struct parser_exec_state *s) int op_size = (1 << ((cmd_val(s, 0) & GENMASK(20, 19)) >> 19)) * sizeof(u32); unsigned long gma, gma_high; + u32 valid_len = CMD_LEN(1); int ret = 0; if (!(cmd_val(s, 0) & (1 << 22))) return ret; + /* check inline data */ + if (cmd_val(s, 0) & BIT(18)) + valid_len = CMD_LEN(9); + ret = gvt_check_valid_cmd_length(cmd_length(s), + valid_len); + if (ret) + return ret; + gma = cmd_val(s, 1) & GENMASK(31, 2); if (gmadr_bytes == 8) { gma_high = cmd_val(s, 2) & GENMASK(15, 0); @@ -1584,6 +1644,16 @@ static int cmd_handler_mi_flush_dw(struct parser_exec_state *s) bool index_mode = false; int ret = 0; u32 hws_pga, val; + u32 valid_len = CMD_LEN(2); + + ret = gvt_check_valid_cmd_length(cmd_length(s), + valid_len); + if (ret) { + /* Check again for Qword */ + ret = gvt_check_valid_cmd_length(cmd_length(s), + ++valid_len); + return ret; + } /* Check post-sync and ppgtt bit */ if (((cmd_val(s, 0) >> 14) & 0x3) && (cmd_val(s, 1) & (1 << 2))) { @@ -1661,7 +1731,9 @@ static int batch_buffer_needs_scan(struct parser_exec_state *s) return 1; } -static int find_bb_size(struct parser_exec_state *s, unsigned long *bb_size) +static int find_bb_size(struct parser_exec_state *s, + unsigned long *bb_size, + unsigned long *bb_end_cmd_offset) { unsigned long gma = 0; const struct cmd_info *info; @@ -1673,6 +1745,7 @@ static int find_bb_size(struct parser_exec_state *s, unsigned long *bb_size) s->vgpu->gtt.ggtt_mm : s->workload->shadow_mm; *bb_size = 0; + *bb_end_cmd_offset = 0; /* get the start gm address of the batch buffer */ gma = get_gma_bb_from_cmd(s, 1); @@ -1708,6 +1781,10 @@ static int find_bb_size(struct parser_exec_state *s, unsigned long *bb_size) /* chained batch buffer */ bb_end = true; } + + if (bb_end) + *bb_end_cmd_offset = *bb_size; + cmd_len = get_cmd_length(info, cmd) << 2; *bb_size += cmd_len; gma += cmd_len; @@ -1716,12 +1793,36 @@ static int find_bb_size(struct parser_exec_state *s, unsigned long *bb_size) return 0; } +static int audit_bb_end(struct parser_exec_state *s, void *va) +{ + struct intel_vgpu *vgpu = s->vgpu; + u32 cmd = *(u32 *)va; + const struct cmd_info *info; + + info = get_cmd_info(s->vgpu->gvt, cmd, s->ring_id); + if (info == NULL) { + gvt_vgpu_err("unknown cmd 0x%x, opcode=0x%x, addr_type=%s, ring %d, workload=%p\n", + cmd, get_opcode(cmd, s->ring_id), + (s->buf_addr_type == PPGTT_BUFFER) ? + "ppgtt" : "ggtt", s->ring_id, s->workload); + return -EBADRQC; + } + + if ((info->opcode == OP_MI_BATCH_BUFFER_END) || + ((info->opcode == OP_MI_BATCH_BUFFER_START) && + (BATCH_BUFFER_2ND_LEVEL_BIT(cmd) == 0))) + return 0; + + return -EBADRQC; +} + static int perform_bb_shadow(struct parser_exec_state *s) { struct intel_vgpu *vgpu = s->vgpu; struct intel_vgpu_shadow_bb *bb; unsigned long gma = 0; unsigned long bb_size; + unsigned long bb_end_cmd_offset; int ret = 0; struct intel_vgpu_mm *mm = (s->buf_addr_type == GTT_BUFFER) ? s->vgpu->gtt.ggtt_mm : s->workload->shadow_mm; @@ -1732,7 +1833,7 @@ static int perform_bb_shadow(struct parser_exec_state *s) if (gma == INTEL_GVT_INVALID_ADDR) return -EFAULT; - ret = find_bb_size(s, &bb_size); + ret = find_bb_size(s, &bb_size, &bb_end_cmd_offset); if (ret) return ret; @@ -1788,6 +1889,10 @@ static int perform_bb_shadow(struct parser_exec_state *s) goto err_unmap; } + ret = audit_bb_end(s, bb->va + start_offset + bb_end_cmd_offset); + if (ret) + goto err_unmap; + INIT_LIST_HEAD(&bb->list); list_add(&bb->list, &s->workload->shadow_bb); @@ -1912,21 +2017,24 @@ static const struct cmd_info cmd_info[] = { {"MI_RS_CONTEXT", OP_MI_RS_CONTEXT, F_LEN_CONST, R_RCS, D_ALL, 0, 1, NULL}, - {"MI_DISPLAY_FLIP", OP_MI_DISPLAY_FLIP, F_LEN_VAR | F_POST_HANDLE, + {"MI_DISPLAY_FLIP", OP_MI_DISPLAY_FLIP, F_LEN_VAR, R_RCS | R_BCS, D_ALL, 0, 8, cmd_handler_mi_display_flip}, - {"MI_SEMAPHORE_MBOX", OP_MI_SEMAPHORE_MBOX, F_LEN_VAR, R_ALL, D_ALL, - 0, 8, NULL}, + {"MI_SEMAPHORE_MBOX", OP_MI_SEMAPHORE_MBOX, F_LEN_VAR | F_LEN_VAR_FIXED, + R_ALL, D_ALL, 0, 8, NULL, CMD_LEN(1)}, {"MI_MATH", OP_MI_MATH, F_LEN_VAR, R_ALL, D_ALL, 0, 8, NULL}, - {"MI_URB_CLEAR", OP_MI_URB_CLEAR, F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL}, + {"MI_URB_CLEAR", OP_MI_URB_CLEAR, F_LEN_VAR | F_LEN_VAR_FIXED, R_RCS, + D_ALL, 0, 8, NULL, CMD_LEN(0)}, - {"MI_SEMAPHORE_SIGNAL", OP_MI_SEMAPHORE_SIGNAL, F_LEN_VAR, R_ALL, - D_BDW_PLUS, 0, 8, NULL}, + {"MI_SEMAPHORE_SIGNAL", OP_MI_SEMAPHORE_SIGNAL, + F_LEN_VAR | F_LEN_VAR_FIXED, R_ALL, D_BDW_PLUS, 0, 8, + NULL, CMD_LEN(0)}, - {"MI_SEMAPHORE_WAIT", OP_MI_SEMAPHORE_WAIT, F_LEN_VAR, R_ALL, - D_BDW_PLUS, ADDR_FIX_1(2), 8, cmd_handler_mi_semaphore_wait}, + {"MI_SEMAPHORE_WAIT", OP_MI_SEMAPHORE_WAIT, + F_LEN_VAR | F_LEN_VAR_FIXED, R_ALL, D_BDW_PLUS, ADDR_FIX_1(2), + 8, cmd_handler_mi_semaphore_wait, CMD_LEN(2)}, {"MI_STORE_DATA_IMM", OP_MI_STORE_DATA_IMM, F_LEN_VAR, R_ALL, D_BDW_PLUS, ADDR_FIX_1(1), 10, cmd_handler_mi_store_data_imm}, @@ -1940,8 +2048,9 @@ static const struct cmd_info cmd_info[] = { {"MI_UPDATE_GTT", OP_MI_UPDATE_GTT, F_LEN_VAR, R_ALL, D_BDW_PLUS, 0, 10, cmd_handler_mi_update_gtt}, - {"MI_STORE_REGISTER_MEM", OP_MI_STORE_REGISTER_MEM, F_LEN_VAR, R_ALL, - D_ALL, ADDR_FIX_1(2), 8, cmd_handler_srm}, + {"MI_STORE_REGISTER_MEM", OP_MI_STORE_REGISTER_MEM, + F_LEN_VAR | F_LEN_VAR_FIXED, R_ALL, D_ALL, ADDR_FIX_1(2), 8, + cmd_handler_srm, CMD_LEN(2)}, {"MI_FLUSH_DW", OP_MI_FLUSH_DW, F_LEN_VAR, R_ALL, D_ALL, 0, 6, cmd_handler_mi_flush_dw}, @@ -1949,26 +2058,30 @@ static const struct cmd_info cmd_info[] = { {"MI_CLFLUSH", OP_MI_CLFLUSH, F_LEN_VAR, R_ALL, D_ALL, ADDR_FIX_1(1), 10, cmd_handler_mi_clflush}, - {"MI_REPORT_PERF_COUNT", OP_MI_REPORT_PERF_COUNT, F_LEN_VAR, R_ALL, - D_ALL, ADDR_FIX_1(1), 6, cmd_handler_mi_report_perf_count}, + {"MI_REPORT_PERF_COUNT", OP_MI_REPORT_PERF_COUNT, + F_LEN_VAR | F_LEN_VAR_FIXED, R_ALL, D_ALL, ADDR_FIX_1(1), 6, + cmd_handler_mi_report_perf_count, CMD_LEN(2)}, - {"MI_LOAD_REGISTER_MEM", OP_MI_LOAD_REGISTER_MEM, F_LEN_VAR, R_ALL, - D_ALL, ADDR_FIX_1(2), 8, cmd_handler_lrm}, + {"MI_LOAD_REGISTER_MEM", OP_MI_LOAD_REGISTER_MEM, + F_LEN_VAR | F_LEN_VAR_FIXED, R_ALL, D_ALL, ADDR_FIX_1(2), 8, + cmd_handler_lrm, CMD_LEN(2)}, - {"MI_LOAD_REGISTER_REG", OP_MI_LOAD_REGISTER_REG, F_LEN_VAR, R_ALL, - D_ALL, 0, 8, cmd_handler_lrr}, + {"MI_LOAD_REGISTER_REG", OP_MI_LOAD_REGISTER_REG, + F_LEN_VAR | F_LEN_VAR_FIXED, R_ALL, D_ALL, 0, 8, + cmd_handler_lrr, CMD_LEN(1)}, - {"MI_RS_STORE_DATA_IMM", OP_MI_RS_STORE_DATA_IMM, F_LEN_VAR, R_RCS, - D_ALL, 0, 8, NULL}, + {"MI_RS_STORE_DATA_IMM", OP_MI_RS_STORE_DATA_IMM, + F_LEN_VAR | F_LEN_VAR_FIXED, R_RCS, D_ALL, 0, + 8, NULL, CMD_LEN(2)}, - {"MI_LOAD_URB_MEM", OP_MI_LOAD_URB_MEM, F_LEN_VAR, R_RCS, D_ALL, - ADDR_FIX_1(2), 8, NULL}, + {"MI_LOAD_URB_MEM", OP_MI_LOAD_URB_MEM, F_LEN_VAR | F_LEN_VAR_FIXED, + R_RCS, D_ALL, ADDR_FIX_1(2), 8, NULL, CMD_LEN(2)}, {"MI_STORE_URM_MEM", OP_MI_STORE_URM_MEM, F_LEN_VAR, R_RCS, D_ALL, ADDR_FIX_1(2), 8, NULL}, - {"MI_OP_2E", OP_MI_2E, F_LEN_VAR, R_ALL, D_BDW_PLUS, ADDR_FIX_2(1, 2), - 8, cmd_handler_mi_op_2e}, + {"MI_OP_2E", OP_MI_2E, F_LEN_VAR | F_LEN_VAR_FIXED, R_ALL, D_BDW_PLUS, + ADDR_FIX_2(1, 2), 8, cmd_handler_mi_op_2e, CMD_LEN(3)}, {"MI_OP_2F", OP_MI_2F, F_LEN_VAR, R_ALL, D_BDW_PLUS, ADDR_FIX_1(1), 8, cmd_handler_mi_op_2f}, @@ -1978,8 +2091,8 @@ static const struct cmd_info cmd_info[] = { cmd_handler_mi_batch_buffer_start}, {"MI_CONDITIONAL_BATCH_BUFFER_END", OP_MI_CONDITIONAL_BATCH_BUFFER_END, - F_LEN_VAR, R_ALL, D_ALL, ADDR_FIX_1(2), 8, - cmd_handler_mi_conditional_batch_buffer_end}, + F_LEN_VAR | F_LEN_VAR_FIXED, R_ALL, D_ALL, ADDR_FIX_1(2), 8, + cmd_handler_mi_conditional_batch_buffer_end, CMD_LEN(2)}, {"MI_LOAD_SCAN_LINES_INCL", OP_MI_LOAD_SCAN_LINES_INCL, F_LEN_CONST, R_RCS | R_BCS, D_ALL, 0, 2, NULL}, @@ -2569,6 +2682,13 @@ static int cmd_parser_exec(struct parser_exec_state *s) cmd_length(s), s->buf_type, s->buf_addr_type, s->workload, info->name); + if ((info->flag & F_LEN_MASK) == F_LEN_VAR_FIXED) { + ret = gvt_check_valid_cmd_length(cmd_length(s), + info->valid_len); + if (ret) + return ret; + } + if (info->handler) { ret = info->handler(s); if (ret < 0) { diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.h b/drivers/gpu/drm/i915/gvt/cmd_parser.h index 286703643002..ab25d151932a 100644 --- a/drivers/gpu/drm/i915/gvt/cmd_parser.h +++ b/drivers/gpu/drm/i915/gvt/cmd_parser.h @@ -38,6 +38,10 @@ #define GVT_CMD_HASH_BITS 7 +struct intel_gvt; +struct intel_shadow_wa_ctx; +struct intel_vgpu_workload; + void intel_gvt_clean_cmd_parser(struct intel_gvt *gvt); int intel_gvt_init_cmd_parser(struct intel_gvt *gvt); diff --git a/drivers/gpu/drm/i915/gvt/debugfs.c b/drivers/gpu/drm/i915/gvt/debugfs.c index 2fb7b73b260d..285f6011a537 100644 --- a/drivers/gpu/drm/i915/gvt/debugfs.c +++ b/drivers/gpu/drm/i915/gvt/debugfs.c @@ -189,36 +189,19 @@ DEFINE_SIMPLE_ATTRIBUTE(vgpu_scan_nonprivbb_fops, /** * intel_gvt_debugfs_add_vgpu - register debugfs entries for a vGPU * @vgpu: a vGPU - * - * Returns: - * Zero on success, negative error code if failed. */ -int intel_gvt_debugfs_add_vgpu(struct intel_vgpu *vgpu) +void intel_gvt_debugfs_add_vgpu(struct intel_vgpu *vgpu) { - struct dentry *ent; char name[16] = ""; snprintf(name, 16, "vgpu%d", vgpu->id); vgpu->debugfs = debugfs_create_dir(name, vgpu->gvt->debugfs_root); - if (!vgpu->debugfs) - return -ENOMEM; - - ent = debugfs_create_bool("active", 0444, vgpu->debugfs, - &vgpu->active); - if (!ent) - return -ENOMEM; - - ent = debugfs_create_file("mmio_diff", 0444, vgpu->debugfs, - vgpu, &vgpu_mmio_diff_fops); - if (!ent) - return -ENOMEM; - ent = debugfs_create_file("scan_nonprivbb", 0644, vgpu->debugfs, - vgpu, &vgpu_scan_nonprivbb_fops); - if (!ent) - return -ENOMEM; - - return 0; + debugfs_create_bool("active", 0444, vgpu->debugfs, &vgpu->active); + debugfs_create_file("mmio_diff", 0444, vgpu->debugfs, vgpu, + &vgpu_mmio_diff_fops); + debugfs_create_file("scan_nonprivbb", 0644, vgpu->debugfs, vgpu, + &vgpu_scan_nonprivbb_fops); } /** @@ -234,27 +217,15 @@ void intel_gvt_debugfs_remove_vgpu(struct intel_vgpu *vgpu) /** * intel_gvt_debugfs_init - register gvt debugfs root entry * @gvt: GVT device - * - * Returns: - * zero on success, negative if failed. */ -int intel_gvt_debugfs_init(struct intel_gvt *gvt) +void intel_gvt_debugfs_init(struct intel_gvt *gvt) { struct drm_minor *minor = gvt->dev_priv->drm.primary; - struct dentry *ent; gvt->debugfs_root = debugfs_create_dir("gvt", minor->debugfs_root); - if (!gvt->debugfs_root) { - gvt_err("Cannot create debugfs dir\n"); - return -ENOMEM; - } - ent = debugfs_create_ulong("num_tracked_mmio", 0444, gvt->debugfs_root, - &gvt->mmio.num_tracked_mmio); - if (!ent) - return -ENOMEM; - - return 0; + debugfs_create_ulong("num_tracked_mmio", 0444, gvt->debugfs_root, + &gvt->mmio.num_tracked_mmio); } /** diff --git a/drivers/gpu/drm/i915/gvt/display.h b/drivers/gpu/drm/i915/gvt/display.h index a87f33e6a23c..b59b34046e1e 100644 --- a/drivers/gpu/drm/i915/gvt/display.h +++ b/drivers/gpu/drm/i915/gvt/display.h @@ -35,6 +35,11 @@ #ifndef _GVT_DISPLAY_H_ #define _GVT_DISPLAY_H_ +#include <linux/types.h> + +struct intel_gvt; +struct intel_vgpu; + #define SBI_REG_MAX 20 #define DPCD_SIZE 0x700 diff --git a/drivers/gpu/drm/i915/gvt/dmabuf.c b/drivers/gpu/drm/i915/gvt/dmabuf.c index 41c8ebc60c63..2477a1e5a166 100644 --- a/drivers/gpu/drm/i915/gvt/dmabuf.c +++ b/drivers/gpu/drm/i915/gvt/dmabuf.c @@ -36,13 +36,32 @@ #define GEN8_DECODE_PTE(pte) (pte & GENMASK_ULL(63, 12)) +static int vgpu_pin_dma_address(struct intel_vgpu *vgpu, + unsigned long size, + dma_addr_t dma_addr) +{ + int ret = 0; + + if (intel_gvt_hypervisor_dma_pin_guest_page(vgpu, dma_addr)) + ret = -EINVAL; + + return ret; +} + +static void vgpu_unpin_dma_address(struct intel_vgpu *vgpu, + dma_addr_t dma_addr) +{ + intel_gvt_hypervisor_dma_unmap_guest_page(vgpu, dma_addr); +} + static int vgpu_gem_get_pages( struct drm_i915_gem_object *obj) { struct drm_i915_private *dev_priv = to_i915(obj->base.dev); + struct intel_vgpu *vgpu; struct sg_table *st; struct scatterlist *sg; - int i, ret; + int i, j, ret; gen8_pte_t __iomem *gtt_entries; struct intel_vgpu_fb_info *fb_info; u32 page_num; @@ -51,6 +70,10 @@ static int vgpu_gem_get_pages( if (WARN_ON(!fb_info)) return -ENODEV; + vgpu = fb_info->obj->vgpu; + if (WARN_ON(!vgpu)) + return -ENODEV; + st = kmalloc(sizeof(*st), GFP_KERNEL); if (unlikely(!st)) return -ENOMEM; @@ -64,21 +87,53 @@ static int vgpu_gem_get_pages( gtt_entries = (gen8_pte_t __iomem *)dev_priv->ggtt.gsm + (fb_info->start >> PAGE_SHIFT); for_each_sg(st->sgl, sg, page_num, i) { + dma_addr_t dma_addr = + GEN8_DECODE_PTE(readq(>t_entries[i])); + if (vgpu_pin_dma_address(vgpu, PAGE_SIZE, dma_addr)) { + ret = -EINVAL; + goto out; + } + sg->offset = 0; sg->length = PAGE_SIZE; - sg_dma_address(sg) = - GEN8_DECODE_PTE(readq(>t_entries[i])); sg_dma_len(sg) = PAGE_SIZE; + sg_dma_address(sg) = dma_addr; } __i915_gem_object_set_pages(obj, st, PAGE_SIZE); +out: + if (ret) { + dma_addr_t dma_addr; + + for_each_sg(st->sgl, sg, i, j) { + dma_addr = sg_dma_address(sg); + if (dma_addr) + vgpu_unpin_dma_address(vgpu, dma_addr); + } + sg_free_table(st); + kfree(st); + } + + return ret; - return 0; } static void vgpu_gem_put_pages(struct drm_i915_gem_object *obj, struct sg_table *pages) { + struct scatterlist *sg; + + if (obj->base.dma_buf) { + struct intel_vgpu_fb_info *fb_info = obj->gvt_info; + struct intel_vgpu_dmabuf_obj *obj = fb_info->obj; + struct intel_vgpu *vgpu = obj->vgpu; + int i; + + for_each_sg(pages->sgl, sg, fb_info->size, i) + vgpu_unpin_dma_address(vgpu, + sg_dma_address(sg)); + } + sg_free_table(pages); kfree(pages); } @@ -152,6 +207,7 @@ static const struct drm_i915_gem_object_ops intel_vgpu_gem_ops = { static struct drm_i915_gem_object *vgpu_create_gem(struct drm_device *dev, struct intel_vgpu_fb_info *info) { + static struct lock_class_key lock_class; struct drm_i915_private *dev_priv = to_i915(dev); struct drm_i915_gem_object *obj; @@ -161,7 +217,8 @@ static struct drm_i915_gem_object *vgpu_create_gem(struct drm_device *dev, drm_gem_private_object_init(dev, &obj->base, roundup(info->size, PAGE_SIZE)); - i915_gem_object_init(obj, &intel_vgpu_gem_ops); + i915_gem_object_init(obj, &intel_vgpu_gem_ops, &lock_class); + i915_gem_object_set_readonly(obj); obj->read_domains = I915_GEM_DOMAIN_GTT; obj->write_domain = 0; @@ -491,15 +548,13 @@ int intel_vgpu_get_dmabuf(struct intel_vgpu *vgpu, unsigned int dmabuf_id) obj->gvt_info = dmabuf_obj->info; - dmabuf = i915_gem_prime_export(dev, &obj->base, DRM_CLOEXEC | DRM_RDWR); + dmabuf = i915_gem_prime_export(&obj->base, DRM_CLOEXEC | DRM_RDWR); if (IS_ERR(dmabuf)) { gvt_vgpu_err("export dma-buf failed\n"); ret = PTR_ERR(dmabuf); goto out_free_gem; } - i915_gem_object_put(obj); - ret = dma_buf_fd(dmabuf, DRM_CLOEXEC | DRM_RDWR); if (ret < 0) { gvt_vgpu_err("create dma-buf fd failed ret:%d\n", ret); @@ -524,6 +579,8 @@ int intel_vgpu_get_dmabuf(struct intel_vgpu *vgpu, unsigned int dmabuf_id) file_count(dmabuf->file), kref_read(&obj->base.refcount)); + i915_gem_object_put(obj); + return dmabuf_fd; out_free_dmabuf: diff --git a/drivers/gpu/drm/i915/gvt/edid.h b/drivers/gpu/drm/i915/gvt/edid.h index f6dfc8b795ec..dfe0cbc6aad8 100644 --- a/drivers/gpu/drm/i915/gvt/edid.h +++ b/drivers/gpu/drm/i915/gvt/edid.h @@ -35,6 +35,10 @@ #ifndef _GVT_EDID_H_ #define _GVT_EDID_H_ +#include <linux/types.h> + +struct intel_vgpu; + #define EDID_SIZE 128 #define EDID_ADDR 0x50 /* Linux hvm EDID addr */ diff --git a/drivers/gpu/drm/i915/gvt/execlist.c b/drivers/gpu/drm/i915/gvt/execlist.c index f21b8fb5b37e..d6e7a1189bad 100644 --- a/drivers/gpu/drm/i915/gvt/execlist.c +++ b/drivers/gpu/drm/i915/gvt/execlist.c @@ -534,7 +534,7 @@ static void clean_execlist(struct intel_vgpu *vgpu, struct intel_vgpu_submission *s = &vgpu->submission; intel_engine_mask_t tmp; - for_each_engine_masked(engine, dev_priv, engine_mask, tmp) { + for_each_engine_masked(engine, &dev_priv->gt, engine_mask, tmp) { kfree(s->ring_scan_buffer[engine->id]); s->ring_scan_buffer[engine->id] = NULL; s->ring_scan_buffer_size[engine->id] = 0; @@ -548,7 +548,7 @@ static void reset_execlist(struct intel_vgpu *vgpu, struct intel_engine_cs *engine; intel_engine_mask_t tmp; - for_each_engine_masked(engine, dev_priv, engine_mask, tmp) + for_each_engine_masked(engine, &dev_priv->gt, engine_mask, tmp) init_vgpu_execlist(vgpu, engine->id); } diff --git a/drivers/gpu/drm/i915/gvt/execlist.h b/drivers/gpu/drm/i915/gvt/execlist.h index 5ccc2c695848..5c0c1fd30c83 100644 --- a/drivers/gpu/drm/i915/gvt/execlist.h +++ b/drivers/gpu/drm/i915/gvt/execlist.h @@ -35,6 +35,8 @@ #ifndef _GVT_EXECLIST_H_ #define _GVT_EXECLIST_H_ +#include <linux/types.h> + struct execlist_ctx_descriptor_format { union { u32 ldw; diff --git a/drivers/gpu/drm/i915/gvt/fb_decoder.h b/drivers/gpu/drm/i915/gvt/fb_decoder.h index 60c155085029..67b6ede9e707 100644 --- a/drivers/gpu/drm/i915/gvt/fb_decoder.h +++ b/drivers/gpu/drm/i915/gvt/fb_decoder.h @@ -36,6 +36,8 @@ #ifndef _GVT_FB_DECODER_H_ #define _GVT_FB_DECODER_H_ +#include <linux/types.h> + #define _PLANE_CTL_FORMAT_SHIFT 24 #define _PLANE_CTL_TILED_SHIFT 10 #define _PIPE_V_SRCSZ_SHIFT 0 diff --git a/drivers/gpu/drm/i915/gvt/firmware.c b/drivers/gpu/drm/i915/gvt/firmware.c index 049775e8e350..b0c1fda32977 100644 --- a/drivers/gpu/drm/i915/gvt/firmware.c +++ b/drivers/gpu/drm/i915/gvt/firmware.c @@ -146,7 +146,7 @@ void intel_gvt_free_firmware(struct intel_gvt *gvt) clean_firmware_sysfs(gvt); kfree(gvt->firmware.cfg_space); - kfree(gvt->firmware.mmio); + vfree(gvt->firmware.mmio); } static int verify_firmware(struct intel_gvt *gvt, @@ -229,7 +229,7 @@ int intel_gvt_load_firmware(struct intel_gvt *gvt) firmware->cfg_space = mem; - mem = kmalloc(info->mmio_size, GFP_KERNEL); + mem = vmalloc(info->mmio_size); if (!mem) { kfree(path); kfree(firmware->cfg_space); diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c index 4b04af569c05..4a4828074cb7 100644 --- a/drivers/gpu/drm/i915/gvt/gtt.c +++ b/drivers/gpu/drm/i915/gvt/gtt.c @@ -1282,7 +1282,7 @@ static int ppgtt_populate_shadow_entry(struct intel_vgpu *vgpu, return -EINVAL; default: GEM_BUG_ON(1); - }; + } /* direct shadow */ ret = intel_gvt_hypervisor_dma_map_guest_page(vgpu, gfn, page_size, @@ -1956,7 +1956,11 @@ void _intel_vgpu_mm_release(struct kref *mm_ref) if (mm->type == INTEL_GVT_MM_PPGTT) { list_del(&mm->ppgtt_mm.list); + + mutex_lock(&mm->vgpu->gvt->gtt.ppgtt_mm_lock); list_del(&mm->ppgtt_mm.lru_list); + mutex_unlock(&mm->vgpu->gvt->gtt.ppgtt_mm_lock); + invalidate_ppgtt_mm(mm); } else { vfree(mm->ggtt_mm.virtual_ggtt); diff --git a/drivers/gpu/drm/i915/gvt/gtt.h b/drivers/gpu/drm/i915/gvt/gtt.h index 42d0394f0de2..88789316807d 100644 --- a/drivers/gpu/drm/i915/gvt/gtt.h +++ b/drivers/gpu/drm/i915/gvt/gtt.h @@ -205,17 +205,18 @@ struct intel_vgpu_gtt { struct intel_vgpu_scratch_pt scratch_pt[GTT_TYPE_MAX]; }; -extern int intel_vgpu_init_gtt(struct intel_vgpu *vgpu); -extern void intel_vgpu_clean_gtt(struct intel_vgpu *vgpu); +int intel_vgpu_init_gtt(struct intel_vgpu *vgpu); +void intel_vgpu_clean_gtt(struct intel_vgpu *vgpu); void intel_vgpu_reset_ggtt(struct intel_vgpu *vgpu, bool invalidate_old); void intel_vgpu_invalidate_ppgtt(struct intel_vgpu *vgpu); -extern int intel_gvt_init_gtt(struct intel_gvt *gvt); +int intel_gvt_init_gtt(struct intel_gvt *gvt); void intel_vgpu_reset_gtt(struct intel_vgpu *vgpu); -extern void intel_gvt_clean_gtt(struct intel_gvt *gvt); +void intel_gvt_clean_gtt(struct intel_gvt *gvt); -extern struct intel_vgpu_mm *intel_gvt_find_ppgtt_mm(struct intel_vgpu *vgpu, - int page_table_level, void *root_entry); +struct intel_vgpu_mm *intel_gvt_find_ppgtt_mm(struct intel_vgpu *vgpu, + int page_table_level, + void *root_entry); struct intel_vgpu_oos_page { struct intel_vgpu_ppgtt_spt *spt; diff --git a/drivers/gpu/drm/i915/gvt/gvt.c b/drivers/gpu/drm/i915/gvt/gvt.c index 43f4242062dd..8f37eefa0a02 100644 --- a/drivers/gpu/drm/i915/gvt/gvt.c +++ b/drivers/gpu/drm/i915/gvt/gvt.c @@ -375,9 +375,7 @@ int intel_gvt_init_device(struct drm_i915_private *dev_priv) } gvt->idle_vgpu = vgpu; - ret = intel_gvt_debugfs_init(gvt); - if (ret) - gvt_err("debugfs registration failed, go on.\n"); + intel_gvt_debugfs_init(gvt); gvt_dbg_core("gvt device initialization is done\n"); dev_priv->gvt = gvt; diff --git a/drivers/gpu/drm/i915/gvt/gvt.h b/drivers/gpu/drm/i915/gvt/gvt.h index 7a1fe44d45af..b47c6acaf9c0 100644 --- a/drivers/gpu/drm/i915/gvt/gvt.h +++ b/drivers/gpu/drm/i915/gvt/gvt.h @@ -334,6 +334,10 @@ struct intel_gvt { struct { struct engine_mmio *mmio; int ctx_mmio_count[I915_NUM_ENGINES]; + u32 *tlb_mmio_offset_list; + u32 tlb_mmio_offset_list_cnt; + u32 *mocs_mmio_offset_list; + u32 mocs_mmio_offset_list_cnt; } engine_mmio_list; struct dentry *debugfs_root; @@ -682,9 +686,9 @@ static inline void intel_gvt_mmio_set_in_ctx( gvt->mmio.mmio_attribute[offset >> 2] |= F_IN_CTX; } -int intel_gvt_debugfs_add_vgpu(struct intel_vgpu *vgpu); +void intel_gvt_debugfs_add_vgpu(struct intel_vgpu *vgpu); void intel_gvt_debugfs_remove_vgpu(struct intel_vgpu *vgpu); -int intel_gvt_debugfs_init(struct intel_gvt *gvt); +void intel_gvt_debugfs_init(struct intel_gvt *gvt); void intel_gvt_debugfs_clean(struct intel_gvt *gvt); diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c index 25f78196b964..6d28d72e6c7e 100644 --- a/drivers/gpu/drm/i915/gvt/handlers.c +++ b/drivers/gpu/drm/i915/gvt/handlers.c @@ -341,6 +341,10 @@ static int gdrst_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, gvt_dbg_mmio("vgpu%d: request VCS2 Reset\n", vgpu->id); engine_mask |= BIT(VCS1); } + if (data & GEN9_GRDOM_GUC) { + gvt_dbg_mmio("vgpu%d: request GUC Reset\n", vgpu->id); + vgpu_vreg_t(vgpu, GUC_STATUS) |= GS_MIA_IN_RESET; + } engine_mask &= INTEL_INFO(vgpu->gvt->dev_priv)->engine_mask; } @@ -460,6 +464,7 @@ static int pipeconf_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, static i915_reg_t force_nonpriv_white_list[] = { GEN9_CS_DEBUG_MODE1, //_MMIO(0x20ec) GEN9_CTX_PREEMPT_REG,//_MMIO(0x2248) + PS_INVOCATION_COUNT,//_MMIO(0x2348) GEN8_CS_CHICKEN1,//_MMIO(0x2580) _MMIO(0x2690), _MMIO(0x2694), @@ -508,7 +513,7 @@ static inline bool in_whitelist(unsigned int reg) static int force_nonpriv_write(struct intel_vgpu *vgpu, unsigned int offset, void *p_data, unsigned int bytes) { - u32 reg_nonpriv = *(u32 *)p_data; + u32 reg_nonpriv = (*(u32 *)p_data) & REG_GENMASK(25, 2); int ring_id = intel_gvt_render_mmio_to_ring_id(vgpu->gvt, offset); u32 ring_base; struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; @@ -528,7 +533,7 @@ static int force_nonpriv_write(struct intel_vgpu *vgpu, bytes); } else gvt_err("vgpu(%d) Invalid FORCE_NONPRIV write %x at offset %x\n", - vgpu->id, reg_nonpriv, offset); + vgpu->id, *(u32 *)p_data, offset); return 0; } @@ -819,13 +824,16 @@ static int trigger_aux_channel_interrupt(struct intel_vgpu *vgpu, struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; enum intel_gvt_event_type event; - if (reg == _DPA_AUX_CH_CTL) + if (reg == i915_mmio_reg_offset(DP_AUX_CH_CTL(AUX_CH_A))) event = AUX_CHANNEL_A; - else if (reg == _PCH_DPB_AUX_CH_CTL || reg == _DPB_AUX_CH_CTL) + else if (reg == _PCH_DPB_AUX_CH_CTL || + reg == i915_mmio_reg_offset(DP_AUX_CH_CTL(AUX_CH_B))) event = AUX_CHANNEL_B; - else if (reg == _PCH_DPC_AUX_CH_CTL || reg == _DPC_AUX_CH_CTL) + else if (reg == _PCH_DPC_AUX_CH_CTL || + reg == i915_mmio_reg_offset(DP_AUX_CH_CTL(AUX_CH_C))) event = AUX_CHANNEL_C; - else if (reg == _PCH_DPD_AUX_CH_CTL || reg == _DPD_AUX_CH_CTL) + else if (reg == _PCH_DPD_AUX_CH_CTL || + reg == i915_mmio_reg_offset(DP_AUX_CH_CTL(AUX_CH_D))) event = AUX_CHANNEL_D; else { WARN_ON(true); @@ -1632,6 +1640,16 @@ static int edp_psr_imr_iir_write(struct intel_vgpu *vgpu, return 0; } +static int guc_status_read(struct intel_vgpu *vgpu, + unsigned int offset, void *p_data, + unsigned int bytes) +{ + /* keep MIA_IN_RESET before clearing */ + read_vreg(vgpu, offset, p_data, bytes); + vgpu_vreg(vgpu, offset) &= ~GS_MIA_IN_RESET; + return 0; +} + static int mmio_read_from_hw(struct intel_vgpu *vgpu, unsigned int offset, void *p_data, unsigned int bytes) { @@ -2668,10 +2686,12 @@ static int init_generic_mmio_info(struct intel_gvt *gvt) MMIO_DH(EDP_PSR_IMR, D_BDW_PLUS, NULL, edp_psr_imr_iir_write); MMIO_DH(EDP_PSR_IIR, D_BDW_PLUS, NULL, edp_psr_imr_iir_write); + MMIO_DH(GUC_STATUS, D_ALL, guc_status_read, NULL); + return 0; } -static int init_broadwell_mmio_info(struct intel_gvt *gvt) +static int init_bdw_mmio_info(struct intel_gvt *gvt) { struct drm_i915_private *dev_priv = gvt->dev_priv; int ret; @@ -2796,7 +2816,7 @@ static int init_broadwell_mmio_info(struct intel_gvt *gvt) MMIO_D(CHICKEN_PIPESL_1(PIPE_C), D_BDW_PLUS); MMIO_D(WM_MISC, D_BDW); - MMIO_D(_MMIO(BDW_EDP_PSR_BASE), D_BDW); + MMIO_D(_MMIO(_SRD_CTL_EDP), D_BDW); MMIO_D(_MMIO(0x6671c), D_BDW_PLUS); MMIO_D(_MMIO(0x66c00), D_BDW_PLUS); @@ -2872,11 +2892,11 @@ static int init_skl_mmio_info(struct intel_gvt *gvt) MMIO_DH(FORCEWAKE_MEDIA_GEN9, D_SKL_PLUS, NULL, mul_force_wake_write); MMIO_DH(FORCEWAKE_ACK_MEDIA_GEN9, D_SKL_PLUS, NULL, NULL); - MMIO_F(_MMIO(_DPB_AUX_CH_CTL), 6 * 4, 0, 0, 0, D_SKL_PLUS, NULL, + MMIO_F(DP_AUX_CH_CTL(AUX_CH_B), 6 * 4, 0, 0, 0, D_SKL_PLUS, NULL, dp_aux_ch_ctl_mmio_write); - MMIO_F(_MMIO(_DPC_AUX_CH_CTL), 6 * 4, 0, 0, 0, D_SKL_PLUS, NULL, + MMIO_F(DP_AUX_CH_CTL(AUX_CH_C), 6 * 4, 0, 0, 0, D_SKL_PLUS, NULL, dp_aux_ch_ctl_mmio_write); - MMIO_F(_MMIO(_DPD_AUX_CH_CTL), 6 * 4, 0, 0, 0, D_SKL_PLUS, NULL, + MMIO_F(DP_AUX_CH_CTL(AUX_CH_D), 6 * 4, 0, 0, 0, D_SKL_PLUS, NULL, dp_aux_ch_ctl_mmio_write); MMIO_D(HSW_PWR_WELL_CTL1, D_SKL_PLUS); @@ -3360,20 +3380,20 @@ int intel_gvt_setup_mmio_info(struct intel_gvt *gvt) goto err; if (IS_BROADWELL(dev_priv)) { - ret = init_broadwell_mmio_info(gvt); + ret = init_bdw_mmio_info(gvt); if (ret) goto err; } else if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv) || IS_COFFEELAKE(dev_priv)) { - ret = init_broadwell_mmio_info(gvt); + ret = init_bdw_mmio_info(gvt); if (ret) goto err; ret = init_skl_mmio_info(gvt); if (ret) goto err; } else if (IS_BROXTON(dev_priv)) { - ret = init_broadwell_mmio_info(gvt); + ret = init_bdw_mmio_info(gvt); if (ret) goto err; ret = init_skl_mmio_info(gvt); @@ -3417,6 +3437,10 @@ int intel_gvt_for_each_tracked_mmio(struct intel_gvt *gvt, } for (i = 0; i < gvt->mmio.num_mmio_block; i++, block++) { + /* pvinfo data doesn't come from hw mmio */ + if (i915_mmio_reg_offset(block->offset) == VGT_PVINFO_PAGE) + continue; + for (j = 0; j < block->size; j += 4) { ret = handler(gvt, i915_mmio_reg_offset(block->offset) + j, diff --git a/drivers/gpu/drm/i915/gvt/hypercall.h b/drivers/gpu/drm/i915/gvt/hypercall.h index 4862fb12778e..b17c4a1599cd 100644 --- a/drivers/gpu/drm/i915/gvt/hypercall.h +++ b/drivers/gpu/drm/i915/gvt/hypercall.h @@ -33,6 +33,10 @@ #ifndef _GVT_HYPERCALL_H_ #define _GVT_HYPERCALL_H_ +#include <linux/types.h> + +struct device; + enum hypervisor_type { INTEL_GVT_HYPERVISOR_XEN = 0, INTEL_GVT_HYPERVISOR_KVM, @@ -62,6 +66,8 @@ struct intel_gvt_mpt { unsigned long size, dma_addr_t *dma_addr); void (*dma_unmap_guest_page)(unsigned long handle, dma_addr_t dma_addr); + int (*dma_pin_guest_page)(unsigned long handle, dma_addr_t dma_addr); + int (*map_gfn_to_mfn)(unsigned long handle, unsigned long gfn, unsigned long mfn, unsigned int nr, bool map); int (*set_trap_area)(unsigned long handle, u64 start, u64 end, diff --git a/drivers/gpu/drm/i915/gvt/interrupt.c b/drivers/gpu/drm/i915/gvt/interrupt.c index 951681813230..11accd3e1023 100644 --- a/drivers/gpu/drm/i915/gvt/interrupt.c +++ b/drivers/gpu/drm/i915/gvt/interrupt.c @@ -672,7 +672,7 @@ void intel_gvt_clean_irq(struct intel_gvt *gvt) hrtimer_cancel(&irq->vblank_timer.timer); } -#define VBLNAK_TIMER_PERIOD 16000000 +#define VBLANK_TIMER_PERIOD 16000000 /** * intel_gvt_init_irq - initialize GVT-g IRQ emulation subsystem @@ -704,7 +704,7 @@ int intel_gvt_init_irq(struct intel_gvt *gvt) hrtimer_init(&vblank_timer->timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS); vblank_timer->timer.function = vblank_timer_fn; - vblank_timer->period = VBLNAK_TIMER_PERIOD; + vblank_timer->period = VBLANK_TIMER_PERIOD; return 0; } diff --git a/drivers/gpu/drm/i915/gvt/interrupt.h b/drivers/gpu/drm/i915/gvt/interrupt.h index 5313fb1b33e1..fcd663811d37 100644 --- a/drivers/gpu/drm/i915/gvt/interrupt.h +++ b/drivers/gpu/drm/i915/gvt/interrupt.h @@ -32,6 +32,8 @@ #ifndef _GVT_INTERRUPT_H_ #define _GVT_INTERRUPT_H_ +#include <linux/types.h> + enum intel_gvt_event_type { RCS_MI_USER_INTERRUPT = 0, RCS_DEBUG, @@ -135,6 +137,7 @@ enum intel_gvt_event_type { struct intel_gvt_irq; struct intel_gvt; +struct intel_vgpu; typedef void (*gvt_event_virt_handler_t)(struct intel_gvt_irq *irq, enum intel_gvt_event_type event, struct intel_vgpu *vgpu); diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c index 23aa3e50cbf8..3259a1fa69e1 100644 --- a/drivers/gpu/drm/i915/gvt/kvmgt.c +++ b/drivers/gpu/drm/i915/gvt/kvmgt.c @@ -1306,7 +1306,6 @@ static long intel_vgpu_ioctl(struct mdev_device *mdev, unsigned int cmd, unsigned int i; int ret; struct vfio_region_info_cap_sparse_mmap *sparse = NULL; - size_t size; int nr_areas = 1; int cap_type_id; @@ -1349,9 +1348,8 @@ static long intel_vgpu_ioctl(struct mdev_device *mdev, unsigned int cmd, VFIO_REGION_INFO_FLAG_WRITE; info.size = gvt_aperture_sz(vgpu->gvt); - size = sizeof(*sparse) + - (nr_areas * sizeof(*sparse->areas)); - sparse = kzalloc(size, GFP_KERNEL); + sparse = kzalloc(struct_size(sparse, areas, nr_areas), + GFP_KERNEL); if (!sparse) return -ENOMEM; @@ -1416,9 +1414,9 @@ static long intel_vgpu_ioctl(struct mdev_device *mdev, unsigned int cmd, switch (cap_type_id) { case VFIO_REGION_INFO_CAP_SPARSE_MMAP: ret = vfio_info_add_capability(&caps, - &sparse->header, sizeof(*sparse) + - (sparse->nr_areas * - sizeof(*sparse->areas))); + &sparse->header, + struct_size(sparse, areas, + sparse->nr_areas)); if (ret) { kfree(sparse); return ret; @@ -1566,27 +1564,10 @@ vgpu_id_show(struct device *dev, struct device_attribute *attr, return sprintf(buf, "\n"); } -static ssize_t -hw_id_show(struct device *dev, struct device_attribute *attr, - char *buf) -{ - struct mdev_device *mdev = mdev_from_dev(dev); - - if (mdev) { - struct intel_vgpu *vgpu = (struct intel_vgpu *) - mdev_get_drvdata(mdev); - return sprintf(buf, "%u\n", - vgpu->submission.shadow[0]->gem_context->hw_id); - } - return sprintf(buf, "\n"); -} - static DEVICE_ATTR_RO(vgpu_id); -static DEVICE_ATTR_RO(hw_id); static struct attribute *intel_vgpu_attrs[] = { &dev_attr_vgpu_id.attr, - &dev_attr_hw_id.attr, NULL }; @@ -1798,9 +1779,6 @@ static int kvmgt_guest_init(struct mdev_device *mdev) "kvmgt_nr_cache_entries", 0444, vgpu->debugfs, &vgpu->vdev.nr_cache_entries); - if (!info->debugfs_cache_entries) - gvt_vgpu_err("Cannot create kvmgt debugfs entry\n"); - return 0; } @@ -1938,6 +1916,28 @@ err_unlock: return ret; } +static int kvmgt_dma_pin_guest_page(unsigned long handle, dma_addr_t dma_addr) +{ + struct kvmgt_guest_info *info; + struct gvt_dma *entry; + int ret = 0; + + if (!handle_valid(handle)) + return -ENODEV; + + info = (struct kvmgt_guest_info *)handle; + + mutex_lock(&info->vgpu->vdev.cache_lock); + entry = __gvt_cache_find_dma_addr(info->vgpu, dma_addr); + if (entry) + kref_get(&entry->ref); + else + ret = -ENOMEM; + mutex_unlock(&info->vgpu->vdev.cache_lock); + + return ret; +} + static void __gvt_dma_release(struct kref *ref) { struct gvt_dma *entry = container_of(ref, typeof(*entry), ref); @@ -2049,6 +2049,7 @@ static struct intel_gvt_mpt kvmgt_mpt = { .gfn_to_mfn = kvmgt_gfn_to_pfn, .dma_map_guest_page = kvmgt_dma_map_guest_page, .dma_unmap_guest_page = kvmgt_dma_unmap_guest_page, + .dma_pin_guest_page = kvmgt_dma_pin_guest_page, .set_opregion = kvmgt_set_opregion, .set_edid = kvmgt_set_edid, .get_vfio_device = kvmgt_get_vfio_device, diff --git a/drivers/gpu/drm/i915/gvt/mmio.h b/drivers/gpu/drm/i915/gvt/mmio.h index 5874f1cb4306..2e68f4b02c94 100644 --- a/drivers/gpu/drm/i915/gvt/mmio.h +++ b/drivers/gpu/drm/i915/gvt/mmio.h @@ -36,6 +36,8 @@ #ifndef _GVT_MMIO_H_ #define _GVT_MMIO_H_ +#include <linux/types.h> + struct intel_gvt; struct intel_vgpu; diff --git a/drivers/gpu/drm/i915/gvt/mmio_context.c b/drivers/gpu/drm/i915/gvt/mmio_context.c index 2998999e8568..aaf15916d29a 100644 --- a/drivers/gpu/drm/i915/gvt/mmio_context.c +++ b/drivers/gpu/drm/i915/gvt/mmio_context.c @@ -35,6 +35,7 @@ #include "i915_drv.h" #include "gt/intel_context.h" +#include "gt/intel_ring.h" #include "gvt.h" #include "trace.h" @@ -148,19 +149,27 @@ static struct { u32 l3cc_table[GEN9_MOCS_SIZE / 2]; } gen9_render_mocs; +static u32 gen9_mocs_mmio_offset_list[] = { + [RCS0] = 0xc800, + [VCS0] = 0xc900, + [VCS1] = 0xca00, + [BCS0] = 0xcc00, + [VECS0] = 0xcb00, +}; + static void load_render_mocs(struct drm_i915_private *dev_priv) { + struct intel_gvt *gvt = dev_priv->gvt; i915_reg_t offset; - u32 regs[] = { - [RCS0] = 0xc800, - [VCS0] = 0xc900, - [VCS1] = 0xca00, - [BCS0] = 0xcc00, - [VECS0] = 0xcb00, - }; + u32 cnt = gvt->engine_mmio_list.mocs_mmio_offset_list_cnt; + u32 *regs = gvt->engine_mmio_list.mocs_mmio_offset_list; int ring_id, i; - for (ring_id = 0; ring_id < ARRAY_SIZE(regs); ring_id++) { + /* Platform doesn't have mocs mmios. */ + if (!regs) + return; + + for (ring_id = 0; ring_id < cnt; ring_id++) { if (!HAS_ENGINE(dev_priv, ring_id)) continue; offset.reg = regs[ring_id]; @@ -327,22 +336,28 @@ out: return ret; } +static u32 gen8_tlb_mmio_offset_list[] = { + [RCS0] = 0x4260, + [VCS0] = 0x4264, + [VCS1] = 0x4268, + [BCS0] = 0x426c, + [VECS0] = 0x4270, +}; + static void handle_tlb_pending_event(struct intel_vgpu *vgpu, int ring_id) { struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; struct intel_uncore *uncore = &dev_priv->uncore; struct intel_vgpu_submission *s = &vgpu->submission; + u32 *regs = vgpu->gvt->engine_mmio_list.tlb_mmio_offset_list; + u32 cnt = vgpu->gvt->engine_mmio_list.tlb_mmio_offset_list_cnt; enum forcewake_domains fw; i915_reg_t reg; - u32 regs[] = { - [RCS0] = 0x4260, - [VCS0] = 0x4264, - [VCS1] = 0x4268, - [BCS0] = 0x426c, - [VECS0] = 0x4270, - }; - if (WARN_ON(ring_id >= ARRAY_SIZE(regs))) + if (!regs) + return; + + if (WARN_ON(ring_id >= cnt)) return; if (!test_and_clear_bit(ring_id, (void *)s->tlb_handle_pending)) @@ -565,10 +580,17 @@ void intel_gvt_init_engine_mmio_context(struct intel_gvt *gvt) { struct engine_mmio *mmio; - if (INTEL_GEN(gvt->dev_priv) >= 9) + if (INTEL_GEN(gvt->dev_priv) >= 9) { gvt->engine_mmio_list.mmio = gen9_engine_mmio_list; - else + gvt->engine_mmio_list.tlb_mmio_offset_list = gen8_tlb_mmio_offset_list; + gvt->engine_mmio_list.tlb_mmio_offset_list_cnt = ARRAY_SIZE(gen8_tlb_mmio_offset_list); + gvt->engine_mmio_list.mocs_mmio_offset_list = gen9_mocs_mmio_offset_list; + gvt->engine_mmio_list.mocs_mmio_offset_list_cnt = ARRAY_SIZE(gen9_mocs_mmio_offset_list); + } else { gvt->engine_mmio_list.mmio = gen8_engine_mmio_list; + gvt->engine_mmio_list.tlb_mmio_offset_list = gen8_tlb_mmio_offset_list; + gvt->engine_mmio_list.tlb_mmio_offset_list_cnt = ARRAY_SIZE(gen8_tlb_mmio_offset_list); + } for (mmio = gvt->engine_mmio_list.mmio; i915_mmio_reg_valid(mmio->reg); mmio++) { diff --git a/drivers/gpu/drm/i915/gvt/mpt.h b/drivers/gpu/drm/i915/gvt/mpt.h index 0f9440128123..9ad224df9c68 100644 --- a/drivers/gpu/drm/i915/gvt/mpt.h +++ b/drivers/gpu/drm/i915/gvt/mpt.h @@ -255,6 +255,21 @@ static inline void intel_gvt_hypervisor_dma_unmap_guest_page( } /** + * intel_gvt_hypervisor_dma_pin_guest_page - pin guest dma buf + * @vgpu: a vGPU + * @dma_addr: guest dma addr + * + * Returns: + * 0 on success, negative error code if failed. + */ +static inline int +intel_gvt_hypervisor_dma_pin_guest_page(struct intel_vgpu *vgpu, + dma_addr_t dma_addr) +{ + return intel_gvt_host.mpt->dma_pin_guest_page(vgpu->handle, dma_addr); +} + +/** * intel_gvt_hypervisor_map_gfn_to_mfn - map a GFN region to MFN * @vgpu: a vGPU * @gfn: guest PFN diff --git a/drivers/gpu/drm/i915/gvt/page_track.h b/drivers/gpu/drm/i915/gvt/page_track.h index fa607a71c3c0..f6eb7135583c 100644 --- a/drivers/gpu/drm/i915/gvt/page_track.h +++ b/drivers/gpu/drm/i915/gvt/page_track.h @@ -25,6 +25,9 @@ #ifndef _GVT_PAGE_TRACK_H_ #define _GVT_PAGE_TRACK_H_ +#include <linux/types.h> + +struct intel_vgpu; struct intel_vgpu_page_track; typedef int (*gvt_page_track_handler_t)( diff --git a/drivers/gpu/drm/i915/gvt/sched_policy.h b/drivers/gpu/drm/i915/gvt/sched_policy.h index 7b59e3e88b8b..3dacdad5f529 100644 --- a/drivers/gpu/drm/i915/gvt/sched_policy.h +++ b/drivers/gpu/drm/i915/gvt/sched_policy.h @@ -34,6 +34,9 @@ #ifndef __GVT_SCHED_POLICY__ #define __GVT_SCHED_POLICY__ +struct intel_gvt; +struct intel_vgpu; + struct intel_gvt_sched_policy_ops { int (*init)(struct intel_gvt *gvt); void (*clean)(struct intel_gvt *gvt); diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c index 75baff657e43..685d1e04a5ff 100644 --- a/drivers/gpu/drm/i915/gvt/scheduler.c +++ b/drivers/gpu/drm/i915/gvt/scheduler.c @@ -35,11 +35,12 @@ #include <linux/kthread.h> -#include "gem/i915_gem_context.h" #include "gem/i915_gem_pm.h" #include "gt/intel_context.h" +#include "gt/intel_ring.h" #include "i915_drv.h" +#include "i915_gem_gtt.h" #include "gvt.h" #define RING_CTX_OFF(x) \ @@ -58,7 +59,7 @@ static void set_context_pdp_root_pointer( static void update_shadow_pdps(struct intel_vgpu_workload *workload) { struct drm_i915_gem_object *ctx_obj = - workload->req->hw_context->state->obj; + workload->req->context->state->obj; struct execlist_ring_context *shadow_ring_context; struct page *page; @@ -84,8 +85,8 @@ static void sr_oa_regs(struct intel_vgpu_workload *workload, u32 *reg_state, bool save) { struct drm_i915_private *dev_priv = workload->vgpu->gvt->dev_priv; - u32 ctx_oactxctrl = dev_priv->perf.oa.ctx_oactxctrl_offset; - u32 ctx_flexeu0 = dev_priv->perf.oa.ctx_flexeu0_offset; + u32 ctx_oactxctrl = dev_priv->perf.ctx_oactxctrl_offset; + u32 ctx_flexeu0 = dev_priv->perf.ctx_flexeu0_offset; int i = 0; u32 flex_mmio[] = { i915_mmio_reg_offset(EU_PERF_CNTL0), @@ -129,7 +130,7 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload) struct intel_gvt *gvt = vgpu->gvt; int ring_id = workload->ring_id; struct drm_i915_gem_object *ctx_obj = - workload->req->hw_context->state->obj; + workload->req->context->state->obj; struct execlist_ring_context *shadow_ring_context; struct page *page; void *dst; @@ -194,7 +195,7 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload) return -EFAULT; } - page = i915_gem_object_get_page(ctx_obj, LRC_HEADER_PAGES + i); + page = i915_gem_object_get_page(ctx_obj, i); dst = kmap(page); intel_gvt_hypervisor_read_gpa(vgpu, context_gpa, dst, I915_GTT_PAGE_SIZE); @@ -204,9 +205,9 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload) return 0; } -static inline bool is_gvt_request(struct i915_request *req) +static inline bool is_gvt_request(struct i915_request *rq) { - return i915_gem_context_force_single_submission(req->gem_context); + return intel_context_force_single_submission(rq->context); } static void save_ring_hw_state(struct intel_vgpu *vgpu, int ring_id) @@ -291,9 +292,6 @@ shadow_context_descriptor_update(struct intel_context *ce, * Update bits 0-11 of the context descriptor which includes flags * like GEN8_CTX_* cached in desc_template */ - desc &= U64_MAX << 12; - desc |= ce->gem_context->desc_template & ((1ULL << 12) - 1); - desc &= ~(0x3 << GEN8_CTX_ADDRESSING_MODE_SHIFT); desc |= workload->ctx_desc.addressing_mode << GEN8_CTX_ADDRESSING_MODE_SHIFT; @@ -309,7 +307,7 @@ static int copy_workload_to_ring_buffer(struct intel_vgpu_workload *workload) u32 *cs; int err; - if (IS_GEN(req->i915, 9) && is_inhibit_context(req->hw_context)) + if (IS_GEN(req->i915, 9) && is_inhibit_context(req->context)) intel_vgpu_restore_inhibit_context(vgpu, req); /* @@ -365,10 +363,10 @@ static void release_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx) } static void set_context_ppgtt_from_shadow(struct intel_vgpu_workload *workload, - struct i915_gem_context *ctx) + struct intel_context *ce) { struct intel_vgpu_mm *mm = workload->shadow_mm; - struct i915_ppgtt *ppgtt = i915_vm_to_ppgtt(ctx->vm); + struct i915_ppgtt *ppgtt = i915_vm_to_ppgtt(ce->vm); int i = 0; if (mm->ppgtt_mm.root_entry_type == GTT_TYPE_PPGTT_ROOT_L4_ENTRY) { @@ -388,11 +386,8 @@ intel_gvt_workload_req_alloc(struct intel_vgpu_workload *workload) { struct intel_vgpu *vgpu = workload->vgpu; struct intel_vgpu_submission *s = &vgpu->submission; - struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; struct i915_request *rq; - lockdep_assert_held(&dev_priv->drm.struct_mutex); - if (workload->req) return 0; @@ -418,10 +413,9 @@ int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload) { struct intel_vgpu *vgpu = workload->vgpu; struct intel_vgpu_submission *s = &vgpu->submission; - struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; int ret; - lockdep_assert_held(&dev_priv->drm.struct_mutex); + lockdep_assert_held(&vgpu->vgpu_lock); if (workload->shadow) return 0; @@ -532,7 +526,7 @@ static void update_wa_ctx_2_shadow_ctx(struct intel_shadow_wa_ctx *wa_ctx) container_of(wa_ctx, struct intel_vgpu_workload, wa_ctx); struct i915_request *rq = workload->req; struct execlist_ring_context *shadow_ring_context = - (struct execlist_ring_context *)rq->hw_context->lrc_reg_state; + (struct execlist_ring_context *)rq->context->lrc_reg_state; shadow_ring_context->bb_per_ctx_ptr.val = (shadow_ring_context->bb_per_ctx_ptr.val & @@ -571,10 +565,18 @@ static int prepare_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx) return 0; } -static void release_shadow_batch_buffer(struct intel_vgpu_workload *workload) +static void update_vreg_in_ctx(struct intel_vgpu_workload *workload) { struct intel_vgpu *vgpu = workload->vgpu; struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; + u32 ring_base; + + ring_base = dev_priv->engine[workload->ring_id]->mmio_base; + vgpu_vreg_t(vgpu, RING_START(ring_base)) = workload->rb_start; +} + +static void release_shadow_batch_buffer(struct intel_vgpu_workload *workload) +{ struct intel_vgpu_shadow_bb *bb, *pos; if (list_empty(&workload->shadow_bb)) @@ -583,8 +585,6 @@ static void release_shadow_batch_buffer(struct intel_vgpu_workload *workload) bb = list_first_entry(&workload->shadow_bb, struct intel_vgpu_shadow_bb, list); - mutex_lock(&dev_priv->drm.struct_mutex); - list_for_each_entry_safe(bb, pos, &workload->shadow_bb, list) { if (bb->obj) { if (bb->accessing) @@ -602,8 +602,6 @@ static void release_shadow_batch_buffer(struct intel_vgpu_workload *workload) list_del(&bb->list); kfree(bb); } - - mutex_unlock(&dev_priv->drm.struct_mutex); } static int prepare_workload(struct intel_vgpu_workload *workload) @@ -627,7 +625,7 @@ static int prepare_workload(struct intel_vgpu_workload *workload) update_shadow_pdps(workload); - set_context_ppgtt_from_shadow(workload, s->shadow[ring]->gem_context); + set_context_ppgtt_from_shadow(workload, s->shadow[ring]); ret = intel_vgpu_sync_oos_pages(workload->vgpu); if (ret) { @@ -678,7 +676,6 @@ err_unpin_mm: static int dispatch_workload(struct intel_vgpu_workload *workload) { struct intel_vgpu *vgpu = workload->vgpu; - struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; struct i915_request *rq; int ring_id = workload->ring_id; int ret; @@ -687,7 +684,6 @@ static int dispatch_workload(struct intel_vgpu_workload *workload) ring_id, workload); mutex_lock(&vgpu->vgpu_lock); - mutex_lock(&dev_priv->drm.struct_mutex); ret = intel_gvt_workload_req_alloc(workload); if (ret) @@ -722,7 +718,6 @@ out: err_req: if (ret) workload->status = ret; - mutex_unlock(&dev_priv->drm.struct_mutex); mutex_unlock(&vgpu->vgpu_lock); return ret; } @@ -789,7 +784,7 @@ static void update_guest_context(struct intel_vgpu_workload *workload) struct i915_request *rq = workload->req; struct intel_vgpu *vgpu = workload->vgpu; struct intel_gvt *gvt = vgpu->gvt; - struct drm_i915_gem_object *ctx_obj = rq->hw_context->state->obj; + struct drm_i915_gem_object *ctx_obj = rq->context->state->obj; struct execlist_ring_context *shadow_ring_context; struct page *page; void *src; @@ -837,7 +832,7 @@ static void update_guest_context(struct intel_vgpu_workload *workload) return; } - page = i915_gem_object_get_page(ctx_obj, LRC_HEADER_PAGES + i); + page = i915_gem_object_get_page(ctx_obj, i); src = kmap(page); intel_gvt_hypervisor_write_gpa(vgpu, context_gpa, src, I915_GTT_PAGE_SIZE); @@ -880,7 +875,7 @@ void intel_vgpu_clean_workloads(struct intel_vgpu *vgpu, intel_engine_mask_t tmp; /* free the unsubmited workloads in the queues. */ - for_each_engine_masked(engine, dev_priv, engine_mask, tmp) { + for_each_engine_masked(engine, &dev_priv->gt, engine_mask, tmp) { list_for_each_entry_safe(pos, n, &s->workload_q_head[engine->id], list) { list_del_init(&pos->list); @@ -1019,6 +1014,13 @@ static int workload_thread(void *priv) if (need_force_wake) intel_uncore_forcewake_get(&gvt->dev_priv->uncore, FORCEWAKE_ALL); + /* + * Update the vReg of the vGPU which submitted this + * workload. The vGPU may use these registers for checking + * the context state. The value comes from GPU commands + * in this workload. + */ + update_vreg_in_ctx(workload); ret = dispatch_workload(workload); @@ -1157,7 +1159,7 @@ void intel_vgpu_clean_submission(struct intel_vgpu *vgpu) intel_vgpu_select_submission_ops(vgpu, ALL_ENGINES, 0); - i915_context_ppgtt_root_restore(s, i915_vm_to_ppgtt(s->shadow[0]->gem_context->vm)); + i915_context_ppgtt_root_restore(s, i915_vm_to_ppgtt(s->shadow[0]->vm)); for_each_engine(engine, vgpu->gvt->dev_priv, id) intel_context_unpin(s->shadow[id]); @@ -1215,30 +1217,41 @@ i915_context_ppgtt_root_save(struct intel_vgpu_submission *s, */ int intel_vgpu_setup_submission(struct intel_vgpu *vgpu) { + struct drm_i915_private *i915 = vgpu->gvt->dev_priv; struct intel_vgpu_submission *s = &vgpu->submission; struct intel_engine_cs *engine; - struct i915_gem_context *ctx; + struct i915_ppgtt *ppgtt; enum intel_engine_id i; int ret; - ctx = i915_gem_context_create_gvt(&vgpu->gvt->dev_priv->drm); - if (IS_ERR(ctx)) - return PTR_ERR(ctx); + ppgtt = i915_ppgtt_create(&i915->gt); + if (IS_ERR(ppgtt)) + return PTR_ERR(ppgtt); - i915_context_ppgtt_root_save(s, i915_vm_to_ppgtt(ctx->vm)); + i915_context_ppgtt_root_save(s, ppgtt); - for_each_engine(engine, vgpu->gvt->dev_priv, i) { + for_each_engine(engine, i915, i) { struct intel_context *ce; INIT_LIST_HEAD(&s->workload_q_head[i]); s->shadow[i] = ERR_PTR(-EINVAL); - ce = i915_gem_context_get_engine(ctx, i); + ce = intel_context_create(engine); if (IS_ERR(ce)) { ret = PTR_ERR(ce); goto out_shadow_ctx; } + i915_vm_put(ce->vm); + ce->vm = i915_vm_get(&ppgtt->vm); + intel_context_set_single_submission(ce); + + if (!USES_GUC_SUBMISSION(i915)) { /* Max ring buffer size */ + const unsigned int ring_size = 512 * SZ_4K; + + ce->ring = __intel_context_ring_size(ring_size); + } + ret = intel_context_pin(ce); intel_context_put(ce); if (ret) @@ -1264,18 +1277,19 @@ int intel_vgpu_setup_submission(struct intel_vgpu *vgpu) atomic_set(&s->running_workload_num, 0); bitmap_zero(s->tlb_handle_pending, I915_NUM_ENGINES); - i915_gem_context_put(ctx); + i915_vm_put(&ppgtt->vm); return 0; out_shadow_ctx: - i915_context_ppgtt_root_restore(s, i915_vm_to_ppgtt(ctx->vm)); - for_each_engine(engine, vgpu->gvt->dev_priv, i) { + i915_context_ppgtt_root_restore(s, ppgtt); + for_each_engine(engine, i915, i) { if (IS_ERR(s->shadow[i])) break; intel_context_unpin(s->shadow[i]); + intel_context_put(s->shadow[i]); } - i915_gem_context_put(ctx); + i915_vm_put(&ppgtt->vm); return ret; } @@ -1424,9 +1438,6 @@ static int prepare_mm(struct intel_vgpu_workload *workload) #define same_context(a, b) (((a)->context_id == (b)->context_id) && \ ((a)->lrca == (b)->lrca)) -#define get_last_workload(q) \ - (list_empty(q) ? NULL : container_of(q->prev, \ - struct intel_vgpu_workload, list)) /** * intel_vgpu_create_workload - create a vGPU workload * @vgpu: a vGPU @@ -1446,7 +1457,7 @@ intel_vgpu_create_workload(struct intel_vgpu *vgpu, int ring_id, { struct intel_vgpu_submission *s = &vgpu->submission; struct list_head *q = workload_q_head(vgpu, ring_id); - struct intel_vgpu_workload *last_workload = get_last_workload(q); + struct intel_vgpu_workload *last_workload = NULL; struct intel_vgpu_workload *workload = NULL; struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; u64 ring_context_gpa; @@ -1472,15 +1483,20 @@ intel_vgpu_create_workload(struct intel_vgpu *vgpu, int ring_id, head &= RB_HEAD_OFF_MASK; tail &= RB_TAIL_OFF_MASK; - if (last_workload && same_context(&last_workload->ctx_desc, desc)) { - gvt_dbg_el("ring id %d cur workload == last\n", ring_id); - gvt_dbg_el("ctx head %x real head %lx\n", head, - last_workload->rb_tail); - /* - * cannot use guest context head pointer here, - * as it might not be updated at this time - */ - head = last_workload->rb_tail; + list_for_each_entry_reverse(last_workload, q, list) { + + if (same_context(&last_workload->ctx_desc, desc)) { + gvt_dbg_el("ring id %d cur workload == last\n", + ring_id); + gvt_dbg_el("ctx head %x real head %lx\n", head, + last_workload->rb_tail); + /* + * cannot use guest context head pointer here, + * as it might not be updated at this time + */ + head = last_workload->rb_tail; + break; + } } gvt_dbg_el("ring id %d begin a new workload\n", ring_id); @@ -1564,9 +1580,7 @@ intel_vgpu_create_workload(struct intel_vgpu *vgpu, int ring_id, */ if (list_empty(workload_q_head(vgpu, ring_id))) { intel_runtime_pm_get(&dev_priv->runtime_pm); - mutex_lock(&dev_priv->drm.struct_mutex); ret = intel_gvt_scan_and_shadow_workload(workload); - mutex_unlock(&dev_priv->drm.struct_mutex); intel_runtime_pm_put_unchecked(&dev_priv->runtime_pm); } diff --git a/drivers/gpu/drm/i915/gvt/vgpu.c b/drivers/gpu/drm/i915/gvt/vgpu.c index 44ce3c2b9ac1..85bd9bf4f6ee 100644 --- a/drivers/gpu/drm/i915/gvt/vgpu.c +++ b/drivers/gpu/drm/i915/gvt/vgpu.c @@ -212,9 +212,9 @@ static void intel_gvt_update_vgpu_types(struct intel_gvt *gvt) */ void intel_gvt_activate_vgpu(struct intel_vgpu *vgpu) { - mutex_lock(&vgpu->gvt->lock); + mutex_lock(&vgpu->vgpu_lock); vgpu->active = true; - mutex_unlock(&vgpu->gvt->lock); + mutex_unlock(&vgpu->vgpu_lock); } /** @@ -420,9 +420,7 @@ static struct intel_vgpu *__intel_gvt_create_vgpu(struct intel_gvt *gvt, if (ret) goto out_clean_submission; - ret = intel_gvt_debugfs_add_vgpu(vgpu); - if (ret) - goto out_clean_sched_policy; + intel_gvt_debugfs_add_vgpu(vgpu); ret = intel_gvt_hypervisor_set_opregion(vgpu); if (ret) |