summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/i915/gvt
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/i915/gvt')
-rw-r--r--drivers/gpu/drm/i915/gvt/Makefile2
-rw-r--r--drivers/gpu/drm/i915/gvt/cmd_parser.c90
-rw-r--r--drivers/gpu/drm/i915/gvt/debugfs.c4
-rw-r--r--drivers/gpu/drm/i915/gvt/display.c1
-rw-r--r--drivers/gpu/drm/i915/gvt/dmabuf.c21
-rw-r--r--drivers/gpu/drm/i915/gvt/execlist.c28
-rw-r--r--drivers/gpu/drm/i915/gvt/execlist.h2
-rw-r--r--drivers/gpu/drm/i915/gvt/gtt.c62
-rw-r--r--drivers/gpu/drm/i915/gvt/gtt.h18
-rw-r--r--drivers/gpu/drm/i915/gvt/gvt.h17
-rw-r--r--drivers/gpu/drm/i915/gvt/handlers.c262
-rw-r--r--drivers/gpu/drm/i915/gvt/interrupt.c2
-rw-r--r--drivers/gpu/drm/i915/gvt/mmio.c8
-rw-r--r--drivers/gpu/drm/i915/gvt/mmio_context.c251
-rw-r--r--drivers/gpu/drm/i915/gvt/reg.h33
-rw-r--r--drivers/gpu/drm/i915/gvt/scheduler.c89
-rw-r--r--drivers/gpu/drm/i915/gvt/scheduler.h7
-rw-r--r--drivers/gpu/drm/i915/gvt/vgpu.c6
18 files changed, 539 insertions, 364 deletions
diff --git a/drivers/gpu/drm/i915/gvt/Makefile b/drivers/gpu/drm/i915/gvt/Makefile
index 271fb46d4dd0..ea8324abc784 100644
--- a/drivers/gpu/drm/i915/gvt/Makefile
+++ b/drivers/gpu/drm/i915/gvt/Makefile
@@ -5,5 +5,5 @@ GVT_SOURCE := gvt.o aperture_gm.o handlers.o vgpu.o trace_points.o firmware.o \
execlist.o scheduler.o sched_policy.o mmio_context.o cmd_parser.o debugfs.o \
fb_decoder.o dmabuf.o page_track.o
-ccflags-y += -I$(src) -I$(src)/$(GVT_DIR)
+ccflags-y += -I $(srctree)/$(src) -I $(srctree)/$(src)/$(GVT_DIR)/
i915-y += $(addprefix $(GVT_DIR)/, $(GVT_SOURCE))
diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.c b/drivers/gpu/drm/i915/gvt/cmd_parser.c
index 3592d04c33b2..de5347725564 100644
--- a/drivers/gpu/drm/i915/gvt/cmd_parser.c
+++ b/drivers/gpu/drm/i915/gvt/cmd_parser.c
@@ -391,12 +391,12 @@ struct cmd_info {
#define F_POST_HANDLE (1<<2)
u32 flag;
-#define R_RCS (1 << RCS)
-#define R_VCS1 (1 << VCS)
-#define R_VCS2 (1 << VCS2)
+#define R_RCS BIT(RCS0)
+#define R_VCS1 BIT(VCS0)
+#define R_VCS2 BIT(VCS1)
#define R_VCS (R_VCS1 | R_VCS2)
-#define R_BCS (1 << BCS)
-#define R_VECS (1 << VECS)
+#define R_BCS BIT(BCS0)
+#define R_VECS BIT(VECS0)
#define R_ALL (R_RCS | R_VCS | R_BCS | R_VECS)
/* rings that support this cmd: BLT/RCS/VCS/VECS */
u16 rings;
@@ -558,7 +558,7 @@ static const struct decode_info decode_info_vebox = {
};
static const struct decode_info *ring_decode_info[I915_NUM_ENGINES][8] = {
- [RCS] = {
+ [RCS0] = {
&decode_info_mi,
NULL,
NULL,
@@ -569,7 +569,7 @@ static const struct decode_info *ring_decode_info[I915_NUM_ENGINES][8] = {
NULL,
},
- [VCS] = {
+ [VCS0] = {
&decode_info_mi,
NULL,
NULL,
@@ -580,7 +580,7 @@ static const struct decode_info *ring_decode_info[I915_NUM_ENGINES][8] = {
NULL,
},
- [BCS] = {
+ [BCS0] = {
&decode_info_mi,
NULL,
&decode_info_2d,
@@ -591,7 +591,7 @@ static const struct decode_info *ring_decode_info[I915_NUM_ENGINES][8] = {
NULL,
},
- [VECS] = {
+ [VECS0] = {
&decode_info_mi,
NULL,
NULL,
@@ -602,7 +602,7 @@ static const struct decode_info *ring_decode_info[I915_NUM_ENGINES][8] = {
NULL,
},
- [VCS2] = {
+ [VCS1] = {
&decode_info_mi,
NULL,
NULL,
@@ -631,8 +631,7 @@ static inline const struct cmd_info *find_cmd_entry(struct intel_gvt *gvt,
struct cmd_entry *e;
hash_for_each_possible(gvt->cmd_table, e, hlist, opcode) {
- if ((opcode == e->info->opcode) &&
- (e->info->rings & (1 << ring_id)))
+ if (opcode == e->info->opcode && e->info->rings & BIT(ring_id))
return e->info;
}
return NULL;
@@ -897,12 +896,16 @@ static int cmd_reg_handler(struct parser_exec_state *s,
}
/* TODO
- * Right now only scan LRI command on KBL and in inhibit context.
- * It's good enough to support initializing mmio by lri command in
- * vgpu inhibit context on KBL.
+ * In order to let workload with inhibit context to generate
+ * correct image data into memory, vregs values will be loaded to
+ * hw via LRIs in the workload with inhibit context. But as
+ * indirect context is loaded prior to LRIs in workload, we don't
+ * want reg values specified in indirect context overwritten by
+ * LRIs in workloads. So, when scanning an indirect context, we
+ * update reg values in it into vregs, so LRIs in workload with
+ * inhibit context will restore with correct values
*/
- if ((IS_KABYLAKE(s->vgpu->gvt->dev_priv)
- || IS_COFFEELAKE(s->vgpu->gvt->dev_priv)) &&
+ if (IS_GEN(gvt->dev_priv, 9) &&
intel_gvt_mmio_is_in_ctx(gvt, offset) &&
!strncmp(cmd, "lri", 3)) {
intel_gvt_hypervisor_read_gpa(s->vgpu,
@@ -943,15 +946,12 @@ static int cmd_handler_lri(struct parser_exec_state *s)
struct intel_gvt *gvt = s->vgpu->gvt;
for (i = 1; i < cmd_len; i += 2) {
- if (IS_BROADWELL(gvt->dev_priv) &&
- (s->ring_id != RCS)) {
- if (s->ring_id == BCS &&
- cmd_reg(s, i) ==
- i915_mmio_reg_offset(DERRMR))
+ if (IS_BROADWELL(gvt->dev_priv) && s->ring_id != RCS0) {
+ if (s->ring_id == BCS0 &&
+ cmd_reg(s, i) == i915_mmio_reg_offset(DERRMR))
ret |= 0;
else
- ret |= (cmd_reg_inhibit(s, i)) ?
- -EBADRQC : 0;
+ ret |= cmd_reg_inhibit(s, i) ? -EBADRQC : 0;
}
if (ret)
break;
@@ -1047,27 +1047,27 @@ struct cmd_interrupt_event {
};
static struct cmd_interrupt_event cmd_interrupt_events[] = {
- [RCS] = {
+ [RCS0] = {
.pipe_control_notify = RCS_PIPE_CONTROL,
.mi_flush_dw = INTEL_GVT_EVENT_RESERVED,
.mi_user_interrupt = RCS_MI_USER_INTERRUPT,
},
- [BCS] = {
+ [BCS0] = {
.pipe_control_notify = INTEL_GVT_EVENT_RESERVED,
.mi_flush_dw = BCS_MI_FLUSH_DW,
.mi_user_interrupt = BCS_MI_USER_INTERRUPT,
},
- [VCS] = {
+ [VCS0] = {
.pipe_control_notify = INTEL_GVT_EVENT_RESERVED,
.mi_flush_dw = VCS_MI_FLUSH_DW,
.mi_user_interrupt = VCS_MI_USER_INTERRUPT,
},
- [VCS2] = {
+ [VCS1] = {
.pipe_control_notify = INTEL_GVT_EVENT_RESERVED,
.mi_flush_dw = VCS2_MI_FLUSH_DW,
.mi_user_interrupt = VCS2_MI_USER_INTERRUPT,
},
- [VECS] = {
+ [VECS0] = {
.pipe_control_notify = INTEL_GVT_EVENT_RESERVED,
.mi_flush_dw = VECS_MI_FLUSH_DW,
.mi_user_interrupt = VECS_MI_USER_INTERRUPT,
@@ -1081,6 +1081,7 @@ static int cmd_handler_pipe_control(struct parser_exec_state *s)
bool index_mode = false;
unsigned int post_sync;
int ret = 0;
+ u32 hws_pga, val;
post_sync = (cmd_val(s, 1) & PIPE_CONTROL_POST_SYNC_OP_MASK) >> 14;
@@ -1104,6 +1105,15 @@ static int cmd_handler_pipe_control(struct parser_exec_state *s)
index_mode = true;
ret |= cmd_address_audit(s, gma, sizeof(u64),
index_mode);
+ if (ret)
+ return ret;
+ if (index_mode) {
+ hws_pga = s->vgpu->hws_pga[s->ring_id];
+ gma = hws_pga + gma;
+ patch_value(s, cmd_ptr(s, 2), gma);
+ val = cmd_val(s, 1) & (~(1 << 21));
+ patch_value(s, cmd_ptr(s, 1), val);
+ }
}
}
}
@@ -1321,8 +1331,14 @@ static int gen8_update_plane_mmio_from_mi_display_flip(
info->tile_val << 10);
}
- vgpu_vreg_t(vgpu, PIPE_FRMCOUNT_G4X(info->pipe))++;
- intel_vgpu_trigger_virtual_event(vgpu, info->event);
+ if (info->plane == PLANE_PRIMARY)
+ vgpu_vreg_t(vgpu, PIPE_FLIPCOUNT_G4X(info->pipe))++;
+
+ if (info->async_flip)
+ intel_vgpu_trigger_virtual_event(vgpu, info->event);
+ else
+ set_bit(info->event, vgpu->irq.flip_done_event[info->pipe]);
+
return 0;
}
@@ -1567,6 +1583,7 @@ static int cmd_handler_mi_flush_dw(struct parser_exec_state *s)
unsigned long gma;
bool index_mode = false;
int ret = 0;
+ u32 hws_pga, val;
/* Check post-sync and ppgtt bit */
if (((cmd_val(s, 0) >> 14) & 0x3) && (cmd_val(s, 1) & (1 << 2))) {
@@ -1577,6 +1594,15 @@ static int cmd_handler_mi_flush_dw(struct parser_exec_state *s)
if (cmd_val(s, 0) & (1 << 21))
index_mode = true;
ret = cmd_address_audit(s, gma, sizeof(u64), index_mode);
+ if (ret)
+ return ret;
+ if (index_mode) {
+ hws_pga = s->vgpu->hws_pga[s->ring_id];
+ gma = hws_pga + gma;
+ patch_value(s, cmd_ptr(s, 1), gma);
+ val = cmd_val(s, 0) & (~(1 << 21));
+ patch_value(s, cmd_ptr(s, 0), val);
+ }
}
/* Check notify bit */
if ((cmd_val(s, 0) & (1 << 8)))
@@ -2504,7 +2530,7 @@ static const struct cmd_info cmd_info[] = {
0, 12, NULL},
{"VEB_DI_IECP", OP_VEB_DNDI_IECP_STATE, F_LEN_VAR, R_VECS, D_BDW_PLUS,
- 0, 20, NULL},
+ 0, 12, NULL},
};
static void add_cmd_entry(struct intel_gvt *gvt, struct cmd_entry *e)
diff --git a/drivers/gpu/drm/i915/gvt/debugfs.c b/drivers/gpu/drm/i915/gvt/debugfs.c
index 2ec89bcb59f1..8a9606f91e68 100644
--- a/drivers/gpu/drm/i915/gvt/debugfs.c
+++ b/drivers/gpu/drm/i915/gvt/debugfs.c
@@ -196,9 +196,9 @@ DEFINE_SIMPLE_ATTRIBUTE(vgpu_scan_nonprivbb_fops,
int intel_gvt_debugfs_add_vgpu(struct intel_vgpu *vgpu)
{
struct dentry *ent;
- char name[10] = "";
+ char name[16] = "";
- sprintf(name, "vgpu%d", vgpu->id);
+ snprintf(name, 16, "vgpu%d", vgpu->id);
vgpu->debugfs = debugfs_create_dir(name, vgpu->gvt->debugfs_root);
if (!vgpu->debugfs)
return -ENOMEM;
diff --git a/drivers/gpu/drm/i915/gvt/display.c b/drivers/gpu/drm/i915/gvt/display.c
index e3f9caa7839f..e1c313da6c00 100644
--- a/drivers/gpu/drm/i915/gvt/display.c
+++ b/drivers/gpu/drm/i915/gvt/display.c
@@ -407,7 +407,6 @@ static void emulate_vblank_on_pipe(struct intel_vgpu *vgpu, int pipe)
if (!pipe_is_enabled(vgpu, pipe))
continue;
- vgpu_vreg_t(vgpu, PIPE_FLIPCOUNT_G4X(pipe))++;
intel_vgpu_trigger_virtual_event(vgpu, event);
}
diff --git a/drivers/gpu/drm/i915/gvt/dmabuf.c b/drivers/gpu/drm/i915/gvt/dmabuf.c
index 69a9a1b2ea4a..41c8ebc60c63 100644
--- a/drivers/gpu/drm/i915/gvt/dmabuf.c
+++ b/drivers/gpu/drm/i915/gvt/dmabuf.c
@@ -45,6 +45,7 @@ static int vgpu_gem_get_pages(
int i, ret;
gen8_pte_t __iomem *gtt_entries;
struct intel_vgpu_fb_info *fb_info;
+ u32 page_num;
fb_info = (struct intel_vgpu_fb_info *)obj->gvt_info;
if (WARN_ON(!fb_info))
@@ -54,14 +55,15 @@ static int vgpu_gem_get_pages(
if (unlikely(!st))
return -ENOMEM;
- ret = sg_alloc_table(st, fb_info->size, GFP_KERNEL);
+ page_num = obj->base.size >> PAGE_SHIFT;
+ ret = sg_alloc_table(st, page_num, GFP_KERNEL);
if (ret) {
kfree(st);
return ret;
}
gtt_entries = (gen8_pte_t __iomem *)dev_priv->ggtt.gsm +
(fb_info->start >> PAGE_SHIFT);
- for_each_sg(st->sgl, sg, fb_info->size, i) {
+ for_each_sg(st->sgl, sg, page_num, i) {
sg->offset = 0;
sg->length = PAGE_SIZE;
sg_dma_address(sg) =
@@ -153,12 +155,12 @@ static struct drm_i915_gem_object *vgpu_create_gem(struct drm_device *dev,
struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_i915_gem_object *obj;
- obj = i915_gem_object_alloc(dev_priv);
+ obj = i915_gem_object_alloc();
if (obj == NULL)
return NULL;
drm_gem_private_object_init(dev, &obj->base,
- info->size << PAGE_SHIFT);
+ roundup(info->size, PAGE_SIZE));
i915_gem_object_init(obj, &intel_vgpu_gem_ops);
obj->read_domains = I915_GEM_DOMAIN_GTT;
@@ -206,11 +208,12 @@ static int vgpu_get_plane_info(struct drm_device *dev,
struct intel_vgpu_fb_info *info,
int plane_id)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_vgpu_primary_plane_format p;
struct intel_vgpu_cursor_plane_format c;
int ret, tile_height = 1;
+ memset(info, 0, sizeof(*info));
+
if (plane_id == DRM_PLANE_TYPE_PRIMARY) {
ret = intel_vgpu_decode_primary_plane(vgpu, &p);
if (ret)
@@ -267,8 +270,7 @@ static int vgpu_get_plane_info(struct drm_device *dev,
return -EINVAL;
}
- info->size = (info->stride * roundup(info->height, tile_height)
- + PAGE_SIZE - 1) >> PAGE_SHIFT;
+ info->size = info->stride * roundup(info->height, tile_height);
if (info->size == 0) {
gvt_vgpu_err("fb size is zero\n");
return -EINVAL;
@@ -278,11 +280,6 @@ static int vgpu_get_plane_info(struct drm_device *dev,
gvt_vgpu_err("Not aligned fb address:0x%llx\n", info->start);
return -EFAULT;
}
- if (((info->start >> PAGE_SHIFT) + info->size) >
- ggtt_total_entries(&dev_priv->ggtt)) {
- gvt_vgpu_err("Invalid GTT offset or size\n");
- return -EFAULT;
- }
if (!intel_gvt_ggtt_validate_range(vgpu, info->start, info->size)) {
gvt_vgpu_err("invalid gma addr\n");
diff --git a/drivers/gpu/drm/i915/gvt/execlist.c b/drivers/gpu/drm/i915/gvt/execlist.c
index 70494e394d2c..f21b8fb5b37e 100644
--- a/drivers/gpu/drm/i915/gvt/execlist.c
+++ b/drivers/gpu/drm/i915/gvt/execlist.c
@@ -47,17 +47,16 @@
((a)->lrca == (b)->lrca))
static int context_switch_events[] = {
- [RCS] = RCS_AS_CONTEXT_SWITCH,
- [BCS] = BCS_AS_CONTEXT_SWITCH,
- [VCS] = VCS_AS_CONTEXT_SWITCH,
- [VCS2] = VCS2_AS_CONTEXT_SWITCH,
- [VECS] = VECS_AS_CONTEXT_SWITCH,
+ [RCS0] = RCS_AS_CONTEXT_SWITCH,
+ [BCS0] = BCS_AS_CONTEXT_SWITCH,
+ [VCS0] = VCS_AS_CONTEXT_SWITCH,
+ [VCS1] = VCS2_AS_CONTEXT_SWITCH,
+ [VECS0] = VECS_AS_CONTEXT_SWITCH,
};
-static int ring_id_to_context_switch_event(int ring_id)
+static int ring_id_to_context_switch_event(unsigned int ring_id)
{
- if (WARN_ON(ring_id < RCS ||
- ring_id >= ARRAY_SIZE(context_switch_events)))
+ if (WARN_ON(ring_id >= ARRAY_SIZE(context_switch_events)))
return -EINVAL;
return context_switch_events[ring_id];
@@ -411,7 +410,7 @@ static int complete_execlist_workload(struct intel_vgpu_workload *workload)
gvt_dbg_el("complete workload %p status %d\n", workload,
workload->status);
- if (workload->status || (vgpu->resetting_eng & ENGINE_MASK(ring_id)))
+ if (workload->status || (vgpu->resetting_eng & BIT(ring_id)))
goto out;
if (!list_empty(workload_q_head(vgpu, ring_id))) {
@@ -527,12 +526,13 @@ static void init_vgpu_execlist(struct intel_vgpu *vgpu, int ring_id)
vgpu_vreg(vgpu, ctx_status_ptr_reg) = ctx_status_ptr.dw;
}
-static void clean_execlist(struct intel_vgpu *vgpu, unsigned long engine_mask)
+static void clean_execlist(struct intel_vgpu *vgpu,
+ intel_engine_mask_t engine_mask)
{
- unsigned int tmp;
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
struct intel_engine_cs *engine;
struct intel_vgpu_submission *s = &vgpu->submission;
+ intel_engine_mask_t tmp;
for_each_engine_masked(engine, dev_priv, engine_mask, tmp) {
kfree(s->ring_scan_buffer[engine->id]);
@@ -542,18 +542,18 @@ static void clean_execlist(struct intel_vgpu *vgpu, unsigned long engine_mask)
}
static void reset_execlist(struct intel_vgpu *vgpu,
- unsigned long engine_mask)
+ intel_engine_mask_t engine_mask)
{
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
struct intel_engine_cs *engine;
- unsigned int tmp;
+ intel_engine_mask_t tmp;
for_each_engine_masked(engine, dev_priv, engine_mask, tmp)
init_vgpu_execlist(vgpu, engine->id);
}
static int init_execlist(struct intel_vgpu *vgpu,
- unsigned long engine_mask)
+ intel_engine_mask_t engine_mask)
{
reset_execlist(vgpu, engine_mask);
return 0;
diff --git a/drivers/gpu/drm/i915/gvt/execlist.h b/drivers/gpu/drm/i915/gvt/execlist.h
index 714d709829a2..5ccc2c695848 100644
--- a/drivers/gpu/drm/i915/gvt/execlist.h
+++ b/drivers/gpu/drm/i915/gvt/execlist.h
@@ -180,6 +180,6 @@ int intel_vgpu_init_execlist(struct intel_vgpu *vgpu);
int intel_vgpu_submit_execlist(struct intel_vgpu *vgpu, int ring_id);
void intel_vgpu_reset_execlist(struct intel_vgpu *vgpu,
- unsigned long engine_mask);
+ intel_engine_mask_t engine_mask);
#endif /*_GVT_EXECLIST_H_*/
diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c
index 9814773882ec..53115bdae12b 100644
--- a/drivers/gpu/drm/i915/gvt/gtt.c
+++ b/drivers/gpu/drm/i915/gvt/gtt.c
@@ -53,13 +53,19 @@ static int preallocated_oos_pages = 8192;
*/
bool intel_gvt_ggtt_validate_range(struct intel_vgpu *vgpu, u64 addr, u32 size)
{
- if ((!vgpu_gmadr_is_valid(vgpu, addr)) || (size
- && !vgpu_gmadr_is_valid(vgpu, addr + size - 1))) {
- gvt_vgpu_err("invalid range gmadr 0x%llx size 0x%x\n",
- addr, size);
- return false;
- }
- return true;
+ if (size == 0)
+ return vgpu_gmadr_is_valid(vgpu, addr);
+
+ if (vgpu_gmadr_is_aperture(vgpu, addr) &&
+ vgpu_gmadr_is_aperture(vgpu, addr + size - 1))
+ return true;
+ else if (vgpu_gmadr_is_hidden(vgpu, addr) &&
+ vgpu_gmadr_is_hidden(vgpu, addr + size - 1))
+ return true;
+
+ gvt_dbg_mm("Invalid ggtt range at 0x%llx, size: 0x%x\n",
+ addr, size);
+ return false;
}
/* translate a guest gmadr to host gmadr */
@@ -811,7 +817,7 @@ static int reclaim_one_ppgtt_mm(struct intel_gvt *gvt);
/* Allocate shadow page table without guest page. */
static struct intel_vgpu_ppgtt_spt *ppgtt_alloc_spt(
- struct intel_vgpu *vgpu, intel_gvt_gtt_type_t type)
+ struct intel_vgpu *vgpu, enum intel_gvt_gtt_type type)
{
struct device *kdev = &vgpu->gvt->dev_priv->drm.pdev->dev;
struct intel_vgpu_ppgtt_spt *spt = NULL;
@@ -861,7 +867,7 @@ err_free_spt:
/* Allocate shadow page table associated with specific gfn. */
static struct intel_vgpu_ppgtt_spt *ppgtt_alloc_spt_gfn(
- struct intel_vgpu *vgpu, intel_gvt_gtt_type_t type,
+ struct intel_vgpu *vgpu, enum intel_gvt_gtt_type type,
unsigned long gfn, bool guest_pde_ips)
{
struct intel_vgpu_ppgtt_spt *spt;
@@ -936,13 +942,22 @@ static int ppgtt_invalidate_spt_by_shadow_entry(struct intel_vgpu *vgpu,
{
struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
struct intel_vgpu_ppgtt_spt *s;
- intel_gvt_gtt_type_t cur_pt_type;
+ enum intel_gvt_gtt_type cur_pt_type;
GEM_BUG_ON(!gtt_type_is_pt(get_next_pt_type(e->type)));
if (e->type != GTT_TYPE_PPGTT_ROOT_L3_ENTRY
&& e->type != GTT_TYPE_PPGTT_ROOT_L4_ENTRY) {
- cur_pt_type = get_next_pt_type(e->type) + 1;
+ cur_pt_type = get_next_pt_type(e->type);
+
+ if (!gtt_type_is_pt(cur_pt_type) ||
+ !gtt_type_is_pt(cur_pt_type + 1)) {
+ WARN(1, "Invalid page table type, cur_pt_type is: %d\n", cur_pt_type);
+ return -EINVAL;
+ }
+
+ cur_pt_type += 1;
+
if (ops->get_pfn(e) ==
vgpu->gtt.scratch_pt[cur_pt_type].page_mfn)
return 0;
@@ -1076,6 +1091,11 @@ static struct intel_vgpu_ppgtt_spt *ppgtt_populate_spt_by_guest_entry(
} else {
int type = get_next_pt_type(we->type);
+ if (!gtt_type_is_pt(type)) {
+ ret = -EINVAL;
+ goto err;
+ }
+
spt = ppgtt_alloc_spt_gfn(vgpu, type, ops->get_pfn(we), ips);
if (IS_ERR(spt)) {
ret = PTR_ERR(spt);
@@ -1097,6 +1117,7 @@ static struct intel_vgpu_ppgtt_spt *ppgtt_populate_spt_by_guest_entry(
err_free_spt:
ppgtt_free_spt(spt);
+ spt = NULL;
err:
gvt_vgpu_err("fail: shadow page %p guest entry 0x%llx type %d\n",
spt, we->val64, we->type);
@@ -1855,7 +1876,7 @@ static void vgpu_free_mm(struct intel_vgpu_mm *mm)
* Zero on success, negative error code in pointer if failed.
*/
struct intel_vgpu_mm *intel_vgpu_create_ppgtt_mm(struct intel_vgpu *vgpu,
- intel_gvt_gtt_type_t root_entry_type, u64 pdps[])
+ enum intel_gvt_gtt_type root_entry_type, u64 pdps[])
{
struct intel_gvt *gvt = vgpu->gvt;
struct intel_vgpu_mm *mm;
@@ -2178,7 +2199,8 @@ static int emulate_ggtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off,
struct intel_gvt_gtt_pte_ops *ops = gvt->gtt.pte_ops;
unsigned long g_gtt_index = off >> info->gtt_entry_size_shift;
unsigned long gma, gfn;
- struct intel_gvt_gtt_entry e, m;
+ struct intel_gvt_gtt_entry e = {.val64 = 0, .type = GTT_TYPE_GGTT_PTE};
+ struct intel_gvt_gtt_entry m = {.val64 = 0, .type = GTT_TYPE_GGTT_PTE};
dma_addr_t dma_addr;
int ret;
struct intel_gvt_partial_pte *partial_pte, *pos, *n;
@@ -2245,7 +2267,8 @@ static int emulate_ggtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off,
if (!partial_update && (ops->test_present(&e))) {
gfn = ops->get_pfn(&e);
- m = e;
+ m.val64 = e.val64;
+ m.type = e.type;
/* one PTE update may be issued in multiple writes and the
* first write may not construct a valid gfn
@@ -2309,7 +2332,7 @@ int intel_vgpu_emulate_ggtt_mmio_write(struct intel_vgpu *vgpu,
}
static int alloc_scratch_pages(struct intel_vgpu *vgpu,
- intel_gvt_gtt_type_t type)
+ enum intel_gvt_gtt_type type)
{
struct intel_vgpu_gtt *gtt = &vgpu->gtt;
struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
@@ -2504,6 +2527,7 @@ static void clean_spt_oos(struct intel_gvt *gvt)
list_for_each_safe(pos, n, &gtt->oos_page_free_list_head) {
oos_page = container_of(pos, struct intel_vgpu_oos_page, list);
list_del(&oos_page->list);
+ free_page((unsigned long)oos_page->mem);
kfree(oos_page);
}
}
@@ -2524,6 +2548,12 @@ static int setup_spt_oos(struct intel_gvt *gvt)
ret = -ENOMEM;
goto fail;
}
+ oos_page->mem = (void *)__get_free_pages(GFP_KERNEL, 0);
+ if (!oos_page->mem) {
+ ret = -ENOMEM;
+ kfree(oos_page);
+ goto fail;
+ }
INIT_LIST_HEAD(&oos_page->list);
INIT_LIST_HEAD(&oos_page->vm_list);
@@ -2587,7 +2617,7 @@ struct intel_vgpu_mm *intel_vgpu_find_ppgtt_mm(struct intel_vgpu *vgpu,
* Zero on success, negative error code if failed.
*/
struct intel_vgpu_mm *intel_vgpu_get_ppgtt_mm(struct intel_vgpu *vgpu,
- intel_gvt_gtt_type_t root_entry_type, u64 pdps[])
+ enum intel_gvt_gtt_type root_entry_type, u64 pdps[])
{
struct intel_vgpu_mm *mm;
diff --git a/drivers/gpu/drm/i915/gvt/gtt.h b/drivers/gpu/drm/i915/gvt/gtt.h
index edb610dc5d86..42d0394f0de2 100644
--- a/drivers/gpu/drm/i915/gvt/gtt.h
+++ b/drivers/gpu/drm/i915/gvt/gtt.h
@@ -95,8 +95,8 @@ struct intel_gvt_gtt {
unsigned long scratch_mfn;
};
-typedef enum {
- GTT_TYPE_INVALID = -1,
+enum intel_gvt_gtt_type {
+ GTT_TYPE_INVALID = 0,
GTT_TYPE_GGTT_PTE,
@@ -124,7 +124,7 @@ typedef enum {
GTT_TYPE_PPGTT_PML4_PT,
GTT_TYPE_MAX,
-} intel_gvt_gtt_type_t;
+};
enum intel_gvt_mm_type {
INTEL_GVT_MM_GGTT,
@@ -148,7 +148,7 @@ struct intel_vgpu_mm {
union {
struct {
- intel_gvt_gtt_type_t root_entry_type;
+ enum intel_gvt_gtt_type root_entry_type;
/*
* The 4 PDPs in ring context. For 48bit addressing,
* only PDP0 is valid and point to PML4. For 32it
@@ -169,7 +169,7 @@ struct intel_vgpu_mm {
};
struct intel_vgpu_mm *intel_vgpu_create_ppgtt_mm(struct intel_vgpu *vgpu,
- intel_gvt_gtt_type_t root_entry_type, u64 pdps[]);
+ enum intel_gvt_gtt_type root_entry_type, u64 pdps[]);
static inline void intel_vgpu_mm_get(struct intel_vgpu_mm *mm)
{
@@ -222,7 +222,7 @@ struct intel_vgpu_oos_page {
struct list_head list;
struct list_head vm_list;
int id;
- unsigned char mem[I915_GTT_PAGE_SIZE];
+ void *mem;
};
#define GTT_ENTRY_NUM_IN_ONE_PAGE 512
@@ -233,7 +233,7 @@ struct intel_vgpu_ppgtt_spt {
struct intel_vgpu *vgpu;
struct {
- intel_gvt_gtt_type_t type;
+ enum intel_gvt_gtt_type type;
bool pde_ips; /* for 64KB PTEs */
void *vaddr;
struct page *page;
@@ -241,7 +241,7 @@ struct intel_vgpu_ppgtt_spt {
} shadow_page;
struct {
- intel_gvt_gtt_type_t type;
+ enum intel_gvt_gtt_type type;
bool pde_ips; /* for 64KB PTEs */
unsigned long gfn;
unsigned long write_cnt;
@@ -267,7 +267,7 @@ struct intel_vgpu_mm *intel_vgpu_find_ppgtt_mm(struct intel_vgpu *vgpu,
u64 pdps[]);
struct intel_vgpu_mm *intel_vgpu_get_ppgtt_mm(struct intel_vgpu *vgpu,
- intel_gvt_gtt_type_t root_entry_type, u64 pdps[]);
+ enum intel_gvt_gtt_type root_entry_type, u64 pdps[]);
int intel_vgpu_put_ppgtt_mm(struct intel_vgpu *vgpu, u64 pdps[]);
diff --git a/drivers/gpu/drm/i915/gvt/gvt.h b/drivers/gpu/drm/i915/gvt/gvt.h
index 8bce09de4b82..f5a328b5290a 100644
--- a/drivers/gpu/drm/i915/gvt/gvt.h
+++ b/drivers/gpu/drm/i915/gvt/gvt.h
@@ -94,7 +94,6 @@ struct intel_vgpu_fence {
struct intel_vgpu_mmio {
void *vreg;
- void *sreg;
};
#define INTEL_GVT_MAX_BAR_NUM 4
@@ -111,11 +110,9 @@ struct intel_vgpu_cfg_space {
#define vgpu_cfg_space(vgpu) ((vgpu)->cfg_space.virtual_cfg_space)
-#define INTEL_GVT_MAX_PIPE 4
-
struct intel_vgpu_irq {
bool irq_warn_once[INTEL_GVT_EVENT_MAX];
- DECLARE_BITMAP(flip_done_event[INTEL_GVT_MAX_PIPE],
+ DECLARE_BITMAP(flip_done_event[I915_MAX_PIPES],
INTEL_GVT_EVENT_MAX);
};
@@ -144,9 +141,9 @@ enum {
struct intel_vgpu_submission_ops {
const char *name;
- int (*init)(struct intel_vgpu *vgpu, unsigned long engine_mask);
- void (*clean)(struct intel_vgpu *vgpu, unsigned long engine_mask);
- void (*reset)(struct intel_vgpu *vgpu, unsigned long engine_mask);
+ int (*init)(struct intel_vgpu *vgpu, intel_engine_mask_t engine_mask);
+ void (*clean)(struct intel_vgpu *vgpu, intel_engine_mask_t engine_mask);
+ void (*reset)(struct intel_vgpu *vgpu, intel_engine_mask_t engine_mask);
};
struct intel_vgpu_submission {
@@ -449,10 +446,6 @@ void intel_vgpu_write_fence(struct intel_vgpu *vgpu,
(*(u64 *)(vgpu->mmio.vreg + i915_mmio_reg_offset(reg)))
#define vgpu_vreg64(vgpu, offset) \
(*(u64 *)(vgpu->mmio.vreg + (offset)))
-#define vgpu_sreg_t(vgpu, reg) \
- (*(u32 *)(vgpu->mmio.sreg + i915_mmio_reg_offset(reg)))
-#define vgpu_sreg(vgpu, offset) \
- (*(u32 *)(vgpu->mmio.sreg + (offset)))
#define for_each_active_vgpu(gvt, vgpu, id) \
idr_for_each_entry((&(gvt)->vgpu_idr), (vgpu), (id)) \
@@ -488,7 +481,7 @@ struct intel_vgpu *intel_gvt_create_vgpu(struct intel_gvt *gvt,
void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu);
void intel_gvt_release_vgpu(struct intel_vgpu *vgpu);
void intel_gvt_reset_vgpu_locked(struct intel_vgpu *vgpu, bool dmlr,
- unsigned int engine_mask);
+ intel_engine_mask_t engine_mask);
void intel_gvt_reset_vgpu(struct intel_vgpu *vgpu);
void intel_gvt_activate_vgpu(struct intel_vgpu *vgpu);
void intel_gvt_deactivate_vgpu(struct intel_vgpu *vgpu);
diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c
index bc64b810e0d5..25f78196b964 100644
--- a/drivers/gpu/drm/i915/gvt/handlers.c
+++ b/drivers/gpu/drm/i915/gvt/handlers.c
@@ -311,7 +311,7 @@ static int mul_force_wake_write(struct intel_vgpu *vgpu,
static int gdrst_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
void *p_data, unsigned int bytes)
{
- unsigned int engine_mask = 0;
+ intel_engine_mask_t engine_mask = 0;
u32 data;
write_vreg(vgpu, offset, p_data, bytes);
@@ -323,25 +323,25 @@ static int gdrst_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
} else {
if (data & GEN6_GRDOM_RENDER) {
gvt_dbg_mmio("vgpu%d: request RCS reset\n", vgpu->id);
- engine_mask |= (1 << RCS);
+ engine_mask |= BIT(RCS0);
}
if (data & GEN6_GRDOM_MEDIA) {
gvt_dbg_mmio("vgpu%d: request VCS reset\n", vgpu->id);
- engine_mask |= (1 << VCS);
+ engine_mask |= BIT(VCS0);
}
if (data & GEN6_GRDOM_BLT) {
gvt_dbg_mmio("vgpu%d: request BCS Reset\n", vgpu->id);
- engine_mask |= (1 << BCS);
+ engine_mask |= BIT(BCS0);
}
if (data & GEN6_GRDOM_VECS) {
gvt_dbg_mmio("vgpu%d: request VECS Reset\n", vgpu->id);
- engine_mask |= (1 << VECS);
+ engine_mask |= BIT(VECS0);
}
if (data & GEN8_GRDOM_MEDIA2) {
gvt_dbg_mmio("vgpu%d: request VCS2 Reset\n", vgpu->id);
- if (HAS_BSD2(vgpu->gvt->dev_priv))
- engine_mask |= (1 << VCS2);
+ engine_mask |= BIT(VCS1);
}
+ engine_mask &= INTEL_INFO(vgpu->gvt->dev_priv)->engine_mask;
}
/* vgpu_lock already hold by emulate mmio r/w */
@@ -464,6 +464,8 @@ static i915_reg_t force_nonpriv_white_list[] = {
_MMIO(0x2690),
_MMIO(0x2694),
_MMIO(0x2698),
+ _MMIO(0x2754),
+ _MMIO(0x28a0),
_MMIO(0x4de0),
_MMIO(0x4de4),
_MMIO(0x4dfc),
@@ -750,18 +752,19 @@ static int pri_surf_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
void *p_data, unsigned int bytes)
{
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
- unsigned int index = DSPSURF_TO_PIPE(offset);
- i915_reg_t surflive_reg = DSPSURFLIVE(index);
- int flip_event[] = {
- [PIPE_A] = PRIMARY_A_FLIP_DONE,
- [PIPE_B] = PRIMARY_B_FLIP_DONE,
- [PIPE_C] = PRIMARY_C_FLIP_DONE,
- };
+ u32 pipe = DSPSURF_TO_PIPE(offset);
+ int event = SKL_FLIP_EVENT(pipe, PLANE_PRIMARY);
write_vreg(vgpu, offset, p_data, bytes);
- vgpu_vreg_t(vgpu, surflive_reg) = vgpu_vreg(vgpu, offset);
+ vgpu_vreg_t(vgpu, DSPSURFLIVE(pipe)) = vgpu_vreg(vgpu, offset);
+
+ vgpu_vreg_t(vgpu, PIPE_FLIPCOUNT_G4X(pipe))++;
+
+ if (vgpu_vreg_t(vgpu, DSPCNTR(pipe)) & PLANE_CTL_ASYNC_FLIP)
+ intel_vgpu_trigger_virtual_event(vgpu, event);
+ else
+ set_bit(event, vgpu->irq.flip_done_event[pipe]);
- set_bit(flip_event[index], vgpu->irq.flip_done_event[index]);
return 0;
}
@@ -771,18 +774,42 @@ static int pri_surf_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
static int spr_surf_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
void *p_data, unsigned int bytes)
{
- unsigned int index = SPRSURF_TO_PIPE(offset);
- i915_reg_t surflive_reg = SPRSURFLIVE(index);
- int flip_event[] = {
- [PIPE_A] = SPRITE_A_FLIP_DONE,
- [PIPE_B] = SPRITE_B_FLIP_DONE,
- [PIPE_C] = SPRITE_C_FLIP_DONE,
- };
+ u32 pipe = SPRSURF_TO_PIPE(offset);
+ int event = SKL_FLIP_EVENT(pipe, PLANE_SPRITE0);
write_vreg(vgpu, offset, p_data, bytes);
- vgpu_vreg_t(vgpu, surflive_reg) = vgpu_vreg(vgpu, offset);
+ vgpu_vreg_t(vgpu, SPRSURFLIVE(pipe)) = vgpu_vreg(vgpu, offset);
+
+ if (vgpu_vreg_t(vgpu, SPRCTL(pipe)) & PLANE_CTL_ASYNC_FLIP)
+ intel_vgpu_trigger_virtual_event(vgpu, event);
+ else
+ set_bit(event, vgpu->irq.flip_done_event[pipe]);
+
+ return 0;
+}
+
+static int reg50080_mmio_write(struct intel_vgpu *vgpu,
+ unsigned int offset, void *p_data,
+ unsigned int bytes)
+{
+ struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
+ enum pipe pipe = REG_50080_TO_PIPE(offset);
+ enum plane_id plane = REG_50080_TO_PLANE(offset);
+ int event = SKL_FLIP_EVENT(pipe, plane);
+
+ write_vreg(vgpu, offset, p_data, bytes);
+ if (plane == PLANE_PRIMARY) {
+ vgpu_vreg_t(vgpu, DSPSURFLIVE(pipe)) = vgpu_vreg(vgpu, offset);
+ vgpu_vreg_t(vgpu, PIPE_FLIPCOUNT_G4X(pipe))++;
+ } else {
+ vgpu_vreg_t(vgpu, SPRSURFLIVE(pipe)) = vgpu_vreg(vgpu, offset);
+ }
+
+ if ((vgpu_vreg(vgpu, offset) & REG50080_FLIP_TYPE_MASK) == REG50080_FLIP_TYPE_ASYNC)
+ intel_vgpu_trigger_virtual_event(vgpu, event);
+ else
+ set_bit(event, vgpu->irq.flip_done_event[pipe]);
- set_bit(flip_event[index], vgpu->irq.flip_done_event[index]);
return 0;
}
@@ -1181,7 +1208,7 @@ static int pvinfo_mmio_read(struct intel_vgpu *vgpu, unsigned int offset,
static int handle_g2v_notification(struct intel_vgpu *vgpu, int notification)
{
- intel_gvt_gtt_type_t root_entry_type = GTT_TYPE_PPGTT_ROOT_L4_ENTRY;
+ enum intel_gvt_gtt_type root_entry_type = GTT_TYPE_PPGTT_ROOT_L4_ENTRY;
struct intel_vgpu_mm *mm;
u64 *pdps;
@@ -1227,18 +1254,15 @@ static int send_display_ready_uevent(struct intel_vgpu *vgpu, int ready)
static int pvinfo_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
void *p_data, unsigned int bytes)
{
- u32 data;
- int ret;
-
- write_vreg(vgpu, offset, p_data, bytes);
- data = vgpu_vreg(vgpu, offset);
+ u32 data = *(u32 *)p_data;
+ bool invalid_write = false;
switch (offset) {
case _vgtif_reg(display_ready):
send_display_ready_uevent(vgpu, data ? 1 : 0);
break;
case _vgtif_reg(g2v_notify):
- ret = handle_g2v_notification(vgpu, data);
+ handle_g2v_notification(vgpu, data);
break;
/* add xhot and yhot to handled list to avoid error log */
case _vgtif_reg(cursor_x_hot):
@@ -1255,13 +1279,19 @@ static int pvinfo_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
case _vgtif_reg(execlist_context_descriptor_hi):
break;
case _vgtif_reg(rsv5[0])..._vgtif_reg(rsv5[3]):
+ invalid_write = true;
enter_failsafe_mode(vgpu, GVT_FAILSAFE_INSUFFICIENT_RESOURCE);
break;
default:
+ invalid_write = true;
gvt_vgpu_err("invalid pvinfo write offset %x bytes %x data %x\n",
offset, bytes, data);
break;
}
+
+ if (!invalid_write)
+ write_vreg(vgpu, offset, p_data, bytes);
+
return 0;
}
@@ -1339,7 +1369,6 @@ static int dma_ctrl_write(struct intel_vgpu *vgpu, unsigned int offset,
static int gen9_trtte_write(struct intel_vgpu *vgpu, unsigned int offset,
void *p_data, unsigned int bytes)
{
- struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
u32 trtte = *(u32 *)p_data;
if ((trtte & 1) && (trtte & (1 << 1)) == 0) {
@@ -1348,11 +1377,6 @@ static int gen9_trtte_write(struct intel_vgpu *vgpu, unsigned int offset,
return -EINVAL;
}
write_vreg(vgpu, offset, p_data, bytes);
- /* TRTTE is not per-context */
-
- mmio_hw_access_pre(dev_priv);
- I915_WRITE(_MMIO(offset), vgpu_vreg(vgpu, offset));
- mmio_hw_access_post(dev_priv);
return 0;
}
@@ -1360,15 +1384,6 @@ static int gen9_trtte_write(struct intel_vgpu *vgpu, unsigned int offset,
static int gen9_trtt_chicken_write(struct intel_vgpu *vgpu, unsigned int offset,
void *p_data, unsigned int bytes)
{
- struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
- u32 val = *(u32 *)p_data;
-
- if (val & 1) {
- /* unblock hw logic */
- mmio_hw_access_pre(dev_priv);
- I915_WRITE(_MMIO(offset), val);
- mmio_hw_access_post(dev_priv);
- }
write_vreg(vgpu, offset, p_data, bytes);
return 0;
}
@@ -1680,8 +1695,22 @@ static int ring_mode_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
bool enable_execlist;
int ret;
+ (*(u32 *)p_data) &= ~_MASKED_BIT_ENABLE(1);
+ if (IS_COFFEELAKE(vgpu->gvt->dev_priv))
+ (*(u32 *)p_data) &= ~_MASKED_BIT_ENABLE(2);
write_vreg(vgpu, offset, p_data, bytes);
+ if (data & _MASKED_BIT_ENABLE(1)) {
+ enter_failsafe_mode(vgpu, GVT_FAILSAFE_UNSUPPORTED_GUEST);
+ return 0;
+ }
+
+ if (IS_COFFEELAKE(vgpu->gvt->dev_priv) &&
+ data & _MASKED_BIT_ENABLE(2)) {
+ enter_failsafe_mode(vgpu, GVT_FAILSAFE_UNSUPPORTED_GUEST);
+ return 0;
+ }
+
/* when PPGTT mode enabled, we will check if guest has called
* pvinfo, if not, we will treat this guest as non-gvtg-aware
* guest, and stop emulating its cfg space, mmio, gtt, etc.
@@ -1704,7 +1733,7 @@ static int ring_mode_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
return 0;
ret = intel_vgpu_select_submission_ops(vgpu,
- ENGINE_MASK(ring_id),
+ BIT(ring_id),
INTEL_VGPU_EXECLIST_SUBMISSION);
if (ret)
return ret;
@@ -1724,19 +1753,19 @@ static int gvt_reg_tlb_control_handler(struct intel_vgpu *vgpu,
switch (offset) {
case 0x4260:
- id = RCS;
+ id = RCS0;
break;
case 0x4264:
- id = VCS;
+ id = VCS0;
break;
case 0x4268:
- id = VCS2;
+ id = VCS1;
break;
case 0x426c:
- id = BCS;
+ id = BCS0;
break;
case 0x4270:
- id = VECS;
+ id = VECS0;
break;
default:
return -EINVAL;
@@ -1763,6 +1792,21 @@ static int ring_reset_ctl_write(struct intel_vgpu *vgpu,
return 0;
}
+static int csfe_chicken1_mmio_write(struct intel_vgpu *vgpu,
+ unsigned int offset, void *p_data,
+ unsigned int bytes)
+{
+ u32 data = *(u32 *)p_data;
+
+ (*(u32 *)p_data) &= ~_MASKED_BIT_ENABLE(0x18);
+ write_vreg(vgpu, offset, p_data, bytes);
+
+ if (data & _MASKED_BIT_ENABLE(0x10) || data & _MASKED_BIT_ENABLE(0x8))
+ enter_failsafe_mode(vgpu, GVT_FAILSAFE_UNSUPPORTED_GUEST);
+
+ return 0;
+}
+
#define MMIO_F(reg, s, f, am, rm, d, r, w) do { \
ret = new_mmio_info(gvt, i915_mmio_reg_offset(reg), \
f, s, am, rm, d, r, w); \
@@ -1793,7 +1837,7 @@ static int ring_reset_ctl_write(struct intel_vgpu *vgpu,
MMIO_F(prefix(BLT_RING_BASE), s, f, am, rm, d, r, w); \
MMIO_F(prefix(GEN6_BSD_RING_BASE), s, f, am, rm, d, r, w); \
MMIO_F(prefix(VEBOX_RING_BASE), s, f, am, rm, d, r, w); \
- if (HAS_BSD2(dev_priv)) \
+ if (HAS_ENGINE(dev_priv, VCS1)) \
MMIO_F(prefix(GEN8_BSD2_RING_BASE), s, f, am, rm, d, r, w); \
} while (0)
@@ -1848,7 +1892,7 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
MMIO_DH(GEN7_SC_INSTDONE, D_BDW_PLUS, mmio_read_from_hw, NULL);
MMIO_GM_RDR(_MMIO(0x2148), D_ALL, NULL, NULL);
- MMIO_GM_RDR(CCID, D_ALL, NULL, NULL);
+ MMIO_GM_RDR(CCID(RENDER_RING_BASE), D_ALL, NULL, NULL);
MMIO_GM_RDR(_MMIO(0x12198), D_ALL, NULL, NULL);
MMIO_D(GEN7_CXT_SIZE, D_ALL);
@@ -1883,7 +1927,8 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
MMIO_DFH(_MMIO(0x20dc), D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
MMIO_DFH(_3D_CHICKEN3, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
MMIO_DFH(_MMIO(0x2088), D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
- MMIO_DFH(_MMIO(0x20e4), D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(FF_SLICE_CS_CHICKEN2, D_ALL,
+ F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
MMIO_DFH(_MMIO(0x2470), D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
MMIO_DFH(GAM_ECOCHK, D_ALL, F_CMD_ACCESS, NULL, NULL);
MMIO_DFH(GEN7_COMMON_SLICE_CHICKEN1, D_ALL, F_MODE_MASK | F_CMD_ACCESS,
@@ -1969,6 +2014,8 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
MMIO_DH(DSPSURF(PIPE_A), D_ALL, NULL, pri_surf_mmio_write);
MMIO_D(DSPOFFSET(PIPE_A), D_ALL);
MMIO_D(DSPSURFLIVE(PIPE_A), D_ALL);
+ MMIO_DH(REG_50080(PIPE_A, PLANE_PRIMARY), D_ALL, NULL,
+ reg50080_mmio_write);
MMIO_D(DSPCNTR(PIPE_B), D_ALL);
MMIO_D(DSPADDR(PIPE_B), D_ALL);
@@ -1978,6 +2025,8 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
MMIO_DH(DSPSURF(PIPE_B), D_ALL, NULL, pri_surf_mmio_write);
MMIO_D(DSPOFFSET(PIPE_B), D_ALL);
MMIO_D(DSPSURFLIVE(PIPE_B), D_ALL);
+ MMIO_DH(REG_50080(PIPE_B, PLANE_PRIMARY), D_ALL, NULL,
+ reg50080_mmio_write);
MMIO_D(DSPCNTR(PIPE_C), D_ALL);
MMIO_D(DSPADDR(PIPE_C), D_ALL);
@@ -1987,6 +2036,8 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
MMIO_DH(DSPSURF(PIPE_C), D_ALL, NULL, pri_surf_mmio_write);
MMIO_D(DSPOFFSET(PIPE_C), D_ALL);
MMIO_D(DSPSURFLIVE(PIPE_C), D_ALL);
+ MMIO_DH(REG_50080(PIPE_C, PLANE_PRIMARY), D_ALL, NULL,
+ reg50080_mmio_write);
MMIO_D(SPRCTL(PIPE_A), D_ALL);
MMIO_D(SPRLINOFF(PIPE_A), D_ALL);
@@ -2000,6 +2051,8 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
MMIO_D(SPROFFSET(PIPE_A), D_ALL);
MMIO_D(SPRSCALE(PIPE_A), D_ALL);
MMIO_D(SPRSURFLIVE(PIPE_A), D_ALL);
+ MMIO_DH(REG_50080(PIPE_A, PLANE_SPRITE0), D_ALL, NULL,
+ reg50080_mmio_write);
MMIO_D(SPRCTL(PIPE_B), D_ALL);
MMIO_D(SPRLINOFF(PIPE_B), D_ALL);
@@ -2013,6 +2066,8 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
MMIO_D(SPROFFSET(PIPE_B), D_ALL);
MMIO_D(SPRSCALE(PIPE_B), D_ALL);
MMIO_D(SPRSURFLIVE(PIPE_B), D_ALL);
+ MMIO_DH(REG_50080(PIPE_B, PLANE_SPRITE0), D_ALL, NULL,
+ reg50080_mmio_write);
MMIO_D(SPRCTL(PIPE_C), D_ALL);
MMIO_D(SPRLINOFF(PIPE_C), D_ALL);
@@ -2026,6 +2081,8 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
MMIO_D(SPROFFSET(PIPE_C), D_ALL);
MMIO_D(SPRSCALE(PIPE_C), D_ALL);
MMIO_D(SPRSURFLIVE(PIPE_C), D_ALL);
+ MMIO_DH(REG_50080(PIPE_C, PLANE_SPRITE0), D_ALL, NULL,
+ reg50080_mmio_write);
MMIO_D(HTOTAL(TRANSCODER_A), D_ALL);
MMIO_D(HBLANK(TRANSCODER_A), D_ALL);
@@ -2827,26 +2884,26 @@ static int init_skl_mmio_info(struct intel_gvt *gvt)
MMIO_DH(DBUF_CTL, D_SKL_PLUS, NULL, gen9_dbuf_ctl_mmio_write);
- MMIO_D(_MMIO(0xa210), D_SKL_PLUS);
+ MMIO_D(GEN9_PG_ENABLE, D_SKL_PLUS);
MMIO_D(GEN9_MEDIA_PG_IDLE_HYSTERESIS, D_SKL_PLUS);
MMIO_D(GEN9_RENDER_PG_IDLE_HYSTERESIS, D_SKL_PLUS);
MMIO_DFH(GEN9_GAMT_ECO_REG_RW_IA, D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
- MMIO_DH(_MMIO(0x4ddc), D_SKL_PLUS, NULL, NULL);
- MMIO_DH(_MMIO(0x42080), D_SKL_PLUS, NULL, NULL);
- MMIO_D(_MMIO(0x45504), D_SKL_PLUS);
- MMIO_D(_MMIO(0x45520), D_SKL_PLUS);
- MMIO_D(_MMIO(0x46000), D_SKL_PLUS);
- MMIO_DH(_MMIO(0x46010), D_SKL_PLUS, NULL, skl_lcpll_write);
- MMIO_DH(_MMIO(0x46014), D_SKL_PLUS, NULL, skl_lcpll_write);
- MMIO_D(_MMIO(0x6C040), D_SKL_PLUS);
- MMIO_D(_MMIO(0x6C048), D_SKL_PLUS);
- MMIO_D(_MMIO(0x6C050), D_SKL_PLUS);
- MMIO_D(_MMIO(0x6C044), D_SKL_PLUS);
- MMIO_D(_MMIO(0x6C04C), D_SKL_PLUS);
- MMIO_D(_MMIO(0x6C054), D_SKL_PLUS);
- MMIO_D(_MMIO(0x6c058), D_SKL_PLUS);
- MMIO_D(_MMIO(0x6c05c), D_SKL_PLUS);
- MMIO_DH(_MMIO(0x6c060), D_SKL_PLUS, dpll_status_read, NULL);
+ MMIO_DH(MMCD_MISC_CTRL, D_SKL_PLUS, NULL, NULL);
+ MMIO_DH(CHICKEN_PAR1_1, D_SKL_PLUS, NULL, NULL);
+ MMIO_D(DC_STATE_EN, D_SKL_PLUS);
+ MMIO_D(DC_STATE_DEBUG, D_SKL_PLUS);
+ MMIO_D(CDCLK_CTL, D_SKL_PLUS);
+ MMIO_DH(LCPLL1_CTL, D_SKL_PLUS, NULL, skl_lcpll_write);
+ MMIO_DH(LCPLL2_CTL, D_SKL_PLUS, NULL, skl_lcpll_write);
+ MMIO_D(_MMIO(_DPLL1_CFGCR1), D_SKL_PLUS);
+ MMIO_D(_MMIO(_DPLL2_CFGCR1), D_SKL_PLUS);
+ MMIO_D(_MMIO(_DPLL3_CFGCR1), D_SKL_PLUS);
+ MMIO_D(_MMIO(_DPLL1_CFGCR2), D_SKL_PLUS);
+ MMIO_D(_MMIO(_DPLL2_CFGCR2), D_SKL_PLUS);
+ MMIO_D(_MMIO(_DPLL3_CFGCR2), D_SKL_PLUS);
+ MMIO_D(DPLL_CTRL1, D_SKL_PLUS);
+ MMIO_D(DPLL_CTRL2, D_SKL_PLUS);
+ MMIO_DH(DPLL_STATUS, D_SKL_PLUS, dpll_status_read, NULL);
MMIO_DH(SKL_PS_WIN_POS(PIPE_A, 0), D_SKL_PLUS, NULL, pf_write);
MMIO_DH(SKL_PS_WIN_POS(PIPE_A, 1), D_SKL_PLUS, NULL, pf_write);
@@ -2965,40 +3022,41 @@ static int init_skl_mmio_info(struct intel_gvt *gvt)
MMIO_DH(_MMIO(_REG_701C4(PIPE_C, 3)), D_SKL_PLUS, NULL, NULL);
MMIO_DH(_MMIO(_REG_701C4(PIPE_C, 4)), D_SKL_PLUS, NULL, NULL);
- MMIO_D(_MMIO(0x70380), D_SKL_PLUS);
- MMIO_D(_MMIO(0x71380), D_SKL_PLUS);
+ MMIO_D(_MMIO(_PLANE_CTL_3_A), D_SKL_PLUS);
+ MMIO_D(_MMIO(_PLANE_CTL_3_B), D_SKL_PLUS);
MMIO_D(_MMIO(0x72380), D_SKL_PLUS);
MMIO_D(_MMIO(0x7239c), D_SKL_PLUS);
- MMIO_D(_MMIO(0x7039c), D_SKL_PLUS);
+ MMIO_D(_MMIO(_PLANE_SURF_3_A), D_SKL_PLUS);
- MMIO_D(_MMIO(0x8f074), D_SKL_PLUS);
- MMIO_D(_MMIO(0x8f004), D_SKL_PLUS);
- MMIO_D(_MMIO(0x8f034), D_SKL_PLUS);
+ MMIO_D(CSR_SSP_BASE, D_SKL_PLUS);
+ MMIO_D(CSR_HTP_SKL, D_SKL_PLUS);
+ MMIO_D(CSR_LAST_WRITE, D_SKL_PLUS);
- MMIO_D(_MMIO(0xb11c), D_SKL_PLUS);
+ MMIO_DFH(BDW_SCRATCH1, D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
- MMIO_D(_MMIO(0x51000), D_SKL_PLUS);
- MMIO_D(_MMIO(0x6c00c), D_SKL_PLUS);
+ MMIO_D(SKL_DFSM, D_SKL_PLUS);
+ MMIO_D(DISPIO_CR_TX_BMU_CR0, D_SKL_PLUS);
- MMIO_F(_MMIO(0xc800), 0x7f8, F_CMD_ACCESS, 0, 0, D_SKL_PLUS,
+ MMIO_F(GEN9_GFX_MOCS(0), 0x7f8, F_CMD_ACCESS, 0, 0, D_SKL_PLUS,
NULL, NULL);
- MMIO_F(_MMIO(0xb020), 0x80, F_CMD_ACCESS, 0, 0, D_SKL_PLUS,
+ MMIO_F(GEN7_L3CNTLREG2, 0x80, F_CMD_ACCESS, 0, 0, D_SKL_PLUS,
NULL, NULL);
MMIO_D(RPM_CONFIG0, D_SKL_PLUS);
MMIO_D(_MMIO(0xd08), D_SKL_PLUS);
MMIO_D(RC6_LOCATION, D_SKL_PLUS);
- MMIO_DFH(_MMIO(0x20e0), D_SKL_PLUS, F_MODE_MASK, NULL, NULL);
- MMIO_DFH(_MMIO(0x20ec), D_SKL_PLUS, F_MODE_MASK | F_CMD_ACCESS,
+ MMIO_DFH(GEN7_FF_SLICE_CS_CHICKEN1, D_SKL_PLUS,
+ F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(GEN9_CS_DEBUG_MODE1, D_SKL_PLUS, F_MODE_MASK | F_CMD_ACCESS,
NULL, NULL);
/* TRTT */
- MMIO_DFH(_MMIO(0x4de0), D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
- MMIO_DFH(_MMIO(0x4de4), D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
- MMIO_DFH(_MMIO(0x4de8), D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
- MMIO_DFH(_MMIO(0x4dec), D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
- MMIO_DFH(_MMIO(0x4df0), D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
- MMIO_DFH(_MMIO(0x4df4), D_SKL_PLUS, F_CMD_ACCESS,
+ MMIO_DFH(TRVATTL3PTRDW(0), D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(TRVATTL3PTRDW(1), D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(TRVATTL3PTRDW(2), D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(TRVATTL3PTRDW(3), D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(TRVADR, D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(TRTTE, D_SKL_PLUS, F_CMD_ACCESS,
NULL, gen9_trtte_write);
MMIO_DH(_MMIO(0x4dfc), D_SKL_PLUS, NULL, gen9_trtt_chicken_write);
@@ -3007,11 +3065,11 @@ static int init_skl_mmio_info(struct intel_gvt *gvt)
MMIO_D(_MMIO(0x46520), D_SKL_PLUS);
MMIO_D(_MMIO(0xc403c), D_SKL_PLUS);
- MMIO_D(_MMIO(0xb004), D_SKL_PLUS);
+ MMIO_DFH(GEN8_GARBCNTL, D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
MMIO_DH(DMA_CTRL, D_SKL_PLUS, NULL, dma_ctrl_write);
MMIO_D(_MMIO(0x65900), D_SKL_PLUS);
- MMIO_D(_MMIO(0x1082c0), D_SKL_PLUS);
+ MMIO_D(GEN6_STOLEN_RESERVED, D_SKL_PLUS);
MMIO_D(_MMIO(0x4068), D_SKL_PLUS);
MMIO_D(_MMIO(0x67054), D_SKL_PLUS);
MMIO_D(_MMIO(0x6e560), D_SKL_PLUS);
@@ -3036,14 +3094,17 @@ static int init_skl_mmio_info(struct intel_gvt *gvt)
MMIO_D(_MMIO(_PLANE_KEYMSK_1(PIPE_C)), D_SKL_PLUS);
MMIO_D(_MMIO(0x44500), D_SKL_PLUS);
- MMIO_DFH(GEN9_CSFE_CHICKEN1_RCS, D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
+#define CSFE_CHICKEN1_REG(base) _MMIO((base) + 0xD4)
+ MMIO_RING_DFH(CSFE_CHICKEN1_REG, D_SKL_PLUS, F_MODE_MASK | F_CMD_ACCESS,
+ NULL, csfe_chicken1_mmio_write);
+#undef CSFE_CHICKEN1_REG
MMIO_DFH(GEN8_HDC_CHICKEN1, D_SKL_PLUS, F_MODE_MASK | F_CMD_ACCESS,
NULL, NULL);
MMIO_DFH(GEN9_WM_CHICKEN3, D_SKL_PLUS, F_MODE_MASK | F_CMD_ACCESS,
NULL, NULL);
- MMIO_D(_MMIO(0x4ab8), D_KBL | D_CFL);
- MMIO_D(_MMIO(0x2248), D_SKL_PLUS);
+ MMIO_D(GAMT_CHKN_BIT_REG, D_KBL);
+ MMIO_D(GEN9_CTX_PREEMPT_REG, D_KBL | D_SKL);
return 0;
}
@@ -3216,7 +3277,7 @@ static int init_bxt_mmio_info(struct intel_gvt *gvt)
MMIO_D(GEN8_PUSHBUS_ENABLE, D_BXT);
MMIO_D(GEN8_PUSHBUS_SHIFT, D_BXT);
MMIO_D(GEN6_GFXPAUSE, D_BXT);
- MMIO_D(GEN8_L3SQCREG1, D_BXT);
+ MMIO_DFH(GEN8_L3SQCREG1, D_BXT, F_CMD_ACCESS, NULL, NULL);
MMIO_DFH(GEN9_CTX_PREEMPT_REG, D_BXT, F_CMD_ACCESS, NULL, NULL);
@@ -3489,12 +3550,11 @@ int intel_vgpu_mmio_reg_rw(struct intel_vgpu *vgpu, unsigned int offset,
return mmio_info->read(vgpu, offset, pdata, bytes);
else {
u64 ro_mask = mmio_info->ro_mask;
- u32 old_vreg = 0, old_sreg = 0;
+ u32 old_vreg = 0;
u64 data = 0;
if (intel_gvt_mmio_has_mode_mask(gvt, mmio_info->offset)) {
old_vreg = vgpu_vreg(vgpu, offset);
- old_sreg = vgpu_sreg(vgpu, offset);
}
if (likely(!ro_mask))
@@ -3516,8 +3576,6 @@ int intel_vgpu_mmio_reg_rw(struct intel_vgpu *vgpu, unsigned int offset,
vgpu_vreg(vgpu, offset) = (old_vreg & ~mask)
| (vgpu_vreg(vgpu, offset) & mask);
- vgpu_sreg(vgpu, offset) = (old_sreg & ~mask)
- | (vgpu_sreg(vgpu, offset) & mask);
}
}
diff --git a/drivers/gpu/drm/i915/gvt/interrupt.c b/drivers/gpu/drm/i915/gvt/interrupt.c
index 67125c5eec6e..951681813230 100644
--- a/drivers/gpu/drm/i915/gvt/interrupt.c
+++ b/drivers/gpu/drm/i915/gvt/interrupt.c
@@ -536,7 +536,7 @@ static void gen8_init_irq(
SET_BIT_INFO(irq, 4, VCS_MI_FLUSH_DW, INTEL_GVT_IRQ_INFO_GT1);
SET_BIT_INFO(irq, 8, VCS_AS_CONTEXT_SWITCH, INTEL_GVT_IRQ_INFO_GT1);
- if (HAS_BSD2(gvt->dev_priv)) {
+ if (HAS_ENGINE(gvt->dev_priv, VCS1)) {
SET_BIT_INFO(irq, 16, VCS2_MI_USER_INTERRUPT,
INTEL_GVT_IRQ_INFO_GT1);
SET_BIT_INFO(irq, 20, VCS2_MI_FLUSH_DW,
diff --git a/drivers/gpu/drm/i915/gvt/mmio.c b/drivers/gpu/drm/i915/gvt/mmio.c
index ed4df2f6d60b..a55178884d67 100644
--- a/drivers/gpu/drm/i915/gvt/mmio.c
+++ b/drivers/gpu/drm/i915/gvt/mmio.c
@@ -239,7 +239,6 @@ void intel_vgpu_reset_mmio(struct intel_vgpu *vgpu, bool dmlr)
if (dmlr) {
memcpy(vgpu->mmio.vreg, mmio, info->mmio_size);
- memcpy(vgpu->mmio.sreg, mmio, info->mmio_size);
vgpu_vreg_t(vgpu, GEN6_GT_THREAD_STATUS_REG) = 0;
@@ -280,7 +279,6 @@ void intel_vgpu_reset_mmio(struct intel_vgpu *vgpu, bool dmlr)
* touched
*/
memcpy(vgpu->mmio.vreg, mmio, GVT_GEN8_MMIO_RESET_OFFSET);
- memcpy(vgpu->mmio.sreg, mmio, GVT_GEN8_MMIO_RESET_OFFSET);
}
}
@@ -296,12 +294,10 @@ int intel_vgpu_init_mmio(struct intel_vgpu *vgpu)
{
const struct intel_gvt_device_info *info = &vgpu->gvt->device_info;
- vgpu->mmio.vreg = vzalloc(array_size(info->mmio_size, 2));
+ vgpu->mmio.vreg = vzalloc(info->mmio_size);
if (!vgpu->mmio.vreg)
return -ENOMEM;
- vgpu->mmio.sreg = vgpu->mmio.vreg + info->mmio_size;
-
intel_vgpu_reset_mmio(vgpu, true);
return 0;
@@ -315,5 +311,5 @@ int intel_vgpu_init_mmio(struct intel_vgpu *vgpu)
void intel_vgpu_clean_mmio(struct intel_vgpu *vgpu)
{
vfree(vgpu->mmio.vreg);
- vgpu->mmio.vreg = vgpu->mmio.sreg = NULL;
+ vgpu->mmio.vreg = NULL;
}
diff --git a/drivers/gpu/drm/i915/gvt/mmio_context.c b/drivers/gpu/drm/i915/gvt/mmio_context.c
index 7902fb162d09..90bb3df0db50 100644
--- a/drivers/gpu/drm/i915/gvt/mmio_context.c
+++ b/drivers/gpu/drm/i915/gvt/mmio_context.c
@@ -41,103 +41,104 @@
/* Raw offset is appened to each line for convenience. */
static struct engine_mmio gen8_engine_mmio_list[] __cacheline_aligned = {
- {RCS, GFX_MODE_GEN7, 0xffff, false}, /* 0x229c */
- {RCS, GEN9_CTX_PREEMPT_REG, 0x0, false}, /* 0x2248 */
- {RCS, HWSTAM, 0x0, false}, /* 0x2098 */
- {RCS, INSTPM, 0xffff, true}, /* 0x20c0 */
- {RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 0), 0, false}, /* 0x24d0 */
- {RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 1), 0, false}, /* 0x24d4 */
- {RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 2), 0, false}, /* 0x24d8 */
- {RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 3), 0, false}, /* 0x24dc */
- {RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 4), 0, false}, /* 0x24e0 */
- {RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 5), 0, false}, /* 0x24e4 */
- {RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 6), 0, false}, /* 0x24e8 */
- {RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 7), 0, false}, /* 0x24ec */
- {RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 8), 0, false}, /* 0x24f0 */
- {RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 9), 0, false}, /* 0x24f4 */
- {RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 10), 0, false}, /* 0x24f8 */
- {RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 11), 0, false}, /* 0x24fc */
- {RCS, CACHE_MODE_1, 0xffff, true}, /* 0x7004 */
- {RCS, GEN7_GT_MODE, 0xffff, true}, /* 0x7008 */
- {RCS, CACHE_MODE_0_GEN7, 0xffff, true}, /* 0x7000 */
- {RCS, GEN7_COMMON_SLICE_CHICKEN1, 0xffff, true}, /* 0x7010 */
- {RCS, HDC_CHICKEN0, 0xffff, true}, /* 0x7300 */
- {RCS, VF_GUARDBAND, 0xffff, true}, /* 0x83a4 */
-
- {BCS, RING_GFX_MODE(BLT_RING_BASE), 0xffff, false}, /* 0x2229c */
- {BCS, RING_MI_MODE(BLT_RING_BASE), 0xffff, false}, /* 0x2209c */
- {BCS, RING_INSTPM(BLT_RING_BASE), 0xffff, false}, /* 0x220c0 */
- {BCS, RING_HWSTAM(BLT_RING_BASE), 0x0, false}, /* 0x22098 */
- {BCS, RING_EXCC(BLT_RING_BASE), 0x0, false}, /* 0x22028 */
- {RCS, INVALID_MMIO_REG, 0, false } /* Terminated */
+ {RCS0, GFX_MODE_GEN7, 0xffff, false}, /* 0x229c */
+ {RCS0, GEN9_CTX_PREEMPT_REG, 0x0, false}, /* 0x2248 */
+ {RCS0, HWSTAM, 0x0, false}, /* 0x2098 */
+ {RCS0, INSTPM, 0xffff, true}, /* 0x20c0 */
+ {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 0), 0, false}, /* 0x24d0 */
+ {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 1), 0, false}, /* 0x24d4 */
+ {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 2), 0, false}, /* 0x24d8 */
+ {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 3), 0, false}, /* 0x24dc */
+ {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 4), 0, false}, /* 0x24e0 */
+ {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 5), 0, false}, /* 0x24e4 */
+ {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 6), 0, false}, /* 0x24e8 */
+ {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 7), 0, false}, /* 0x24ec */
+ {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 8), 0, false}, /* 0x24f0 */
+ {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 9), 0, false}, /* 0x24f4 */
+ {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 10), 0, false}, /* 0x24f8 */
+ {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 11), 0, false}, /* 0x24fc */
+ {RCS0, CACHE_MODE_1, 0xffff, true}, /* 0x7004 */
+ {RCS0, GEN7_GT_MODE, 0xffff, true}, /* 0x7008 */
+ {RCS0, CACHE_MODE_0_GEN7, 0xffff, true}, /* 0x7000 */
+ {RCS0, GEN7_COMMON_SLICE_CHICKEN1, 0xffff, true}, /* 0x7010 */
+ {RCS0, HDC_CHICKEN0, 0xffff, true}, /* 0x7300 */
+ {RCS0, VF_GUARDBAND, 0xffff, true}, /* 0x83a4 */
+
+ {BCS0, RING_GFX_MODE(BLT_RING_BASE), 0xffff, false}, /* 0x2229c */
+ {BCS0, RING_MI_MODE(BLT_RING_BASE), 0xffff, false}, /* 0x2209c */
+ {BCS0, RING_INSTPM(BLT_RING_BASE), 0xffff, false}, /* 0x220c0 */
+ {BCS0, RING_HWSTAM(BLT_RING_BASE), 0x0, false}, /* 0x22098 */
+ {BCS0, RING_EXCC(BLT_RING_BASE), 0xffff, false}, /* 0x22028 */
+ {RCS0, INVALID_MMIO_REG, 0, false } /* Terminated */
};
static struct engine_mmio gen9_engine_mmio_list[] __cacheline_aligned = {
- {RCS, GFX_MODE_GEN7, 0xffff, false}, /* 0x229c */
- {RCS, GEN9_CTX_PREEMPT_REG, 0x0, false}, /* 0x2248 */
- {RCS, HWSTAM, 0x0, false}, /* 0x2098 */
- {RCS, INSTPM, 0xffff, true}, /* 0x20c0 */
- {RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 0), 0, false}, /* 0x24d0 */
- {RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 1), 0, false}, /* 0x24d4 */
- {RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 2), 0, false}, /* 0x24d8 */
- {RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 3), 0, false}, /* 0x24dc */
- {RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 4), 0, false}, /* 0x24e0 */
- {RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 5), 0, false}, /* 0x24e4 */
- {RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 6), 0, false}, /* 0x24e8 */
- {RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 7), 0, false}, /* 0x24ec */
- {RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 8), 0, false}, /* 0x24f0 */
- {RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 9), 0, false}, /* 0x24f4 */
- {RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 10), 0, false}, /* 0x24f8 */
- {RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 11), 0, false}, /* 0x24fc */
- {RCS, CACHE_MODE_1, 0xffff, true}, /* 0x7004 */
- {RCS, GEN7_GT_MODE, 0xffff, true}, /* 0x7008 */
- {RCS, CACHE_MODE_0_GEN7, 0xffff, true}, /* 0x7000 */
- {RCS, GEN7_COMMON_SLICE_CHICKEN1, 0xffff, true}, /* 0x7010 */
- {RCS, HDC_CHICKEN0, 0xffff, true}, /* 0x7300 */
- {RCS, VF_GUARDBAND, 0xffff, true}, /* 0x83a4 */
-
- {RCS, GEN8_PRIVATE_PAT_LO, 0, false}, /* 0x40e0 */
- {RCS, GEN8_PRIVATE_PAT_HI, 0, false}, /* 0x40e4 */
- {RCS, GEN8_CS_CHICKEN1, 0xffff, true}, /* 0x2580 */
- {RCS, COMMON_SLICE_CHICKEN2, 0xffff, true}, /* 0x7014 */
- {RCS, GEN9_CS_DEBUG_MODE1, 0xffff, false}, /* 0x20ec */
- {RCS, GEN8_L3SQCREG4, 0, false}, /* 0xb118 */
- {RCS, GEN7_HALF_SLICE_CHICKEN1, 0xffff, true}, /* 0xe100 */
- {RCS, HALF_SLICE_CHICKEN2, 0xffff, true}, /* 0xe180 */
- {RCS, HALF_SLICE_CHICKEN3, 0xffff, true}, /* 0xe184 */
- {RCS, GEN9_HALF_SLICE_CHICKEN5, 0xffff, true}, /* 0xe188 */
- {RCS, GEN9_HALF_SLICE_CHICKEN7, 0xffff, true}, /* 0xe194 */
- {RCS, GEN8_ROW_CHICKEN, 0xffff, true}, /* 0xe4f0 */
- {RCS, TRVATTL3PTRDW(0), 0, false}, /* 0x4de0 */
- {RCS, TRVATTL3PTRDW(1), 0, false}, /* 0x4de4 */
- {RCS, TRNULLDETCT, 0, false}, /* 0x4de8 */
- {RCS, TRINVTILEDETCT, 0, false}, /* 0x4dec */
- {RCS, TRVADR, 0, false}, /* 0x4df0 */
- {RCS, TRTTE, 0, false}, /* 0x4df4 */
-
- {BCS, RING_GFX_MODE(BLT_RING_BASE), 0xffff, false}, /* 0x2229c */
- {BCS, RING_MI_MODE(BLT_RING_BASE), 0xffff, false}, /* 0x2209c */
- {BCS, RING_INSTPM(BLT_RING_BASE), 0xffff, false}, /* 0x220c0 */
- {BCS, RING_HWSTAM(BLT_RING_BASE), 0x0, false}, /* 0x22098 */
- {BCS, RING_EXCC(BLT_RING_BASE), 0x0, false}, /* 0x22028 */
-
- {VCS2, RING_EXCC(GEN8_BSD2_RING_BASE), 0xffff, false}, /* 0x1c028 */
-
- {VECS, RING_EXCC(VEBOX_RING_BASE), 0xffff, false}, /* 0x1a028 */
-
- {RCS, GEN8_HDC_CHICKEN1, 0xffff, true}, /* 0x7304 */
- {RCS, GEN9_CTX_PREEMPT_REG, 0x0, false}, /* 0x2248 */
- {RCS, GEN7_UCGCTL4, 0x0, false}, /* 0x940c */
- {RCS, GAMT_CHKN_BIT_REG, 0x0, false}, /* 0x4ab8 */
-
- {RCS, GEN9_GAMT_ECO_REG_RW_IA, 0x0, false}, /* 0x4ab0 */
- {RCS, GEN9_CSFE_CHICKEN1_RCS, 0xffff, false}, /* 0x20d4 */
- {RCS, _MMIO(0x20D8), 0xffff, true}, /* 0x20d8 */
-
- {RCS, GEN8_GARBCNTL, 0x0, false}, /* 0xb004 */
- {RCS, GEN7_FF_THREAD_MODE, 0x0, false}, /* 0x20a0 */
- {RCS, FF_SLICE_CS_CHICKEN2, 0xffff, false}, /* 0x20e4 */
- {RCS, INVALID_MMIO_REG, 0, false } /* Terminated */
+ {RCS0, GFX_MODE_GEN7, 0xffff, false}, /* 0x229c */
+ {RCS0, GEN9_CTX_PREEMPT_REG, 0x0, false}, /* 0x2248 */
+ {RCS0, HWSTAM, 0x0, false}, /* 0x2098 */
+ {RCS0, INSTPM, 0xffff, true}, /* 0x20c0 */
+ {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 0), 0, false}, /* 0x24d0 */
+ {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 1), 0, false}, /* 0x24d4 */
+ {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 2), 0, false}, /* 0x24d8 */
+ {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 3), 0, false}, /* 0x24dc */
+ {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 4), 0, false}, /* 0x24e0 */
+ {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 5), 0, false}, /* 0x24e4 */
+ {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 6), 0, false}, /* 0x24e8 */
+ {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 7), 0, false}, /* 0x24ec */
+ {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 8), 0, false}, /* 0x24f0 */
+ {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 9), 0, false}, /* 0x24f4 */
+ {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 10), 0, false}, /* 0x24f8 */
+ {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 11), 0, false}, /* 0x24fc */
+ {RCS0, CACHE_MODE_1, 0xffff, true}, /* 0x7004 */
+ {RCS0, GEN7_GT_MODE, 0xffff, true}, /* 0x7008 */
+ {RCS0, CACHE_MODE_0_GEN7, 0xffff, true}, /* 0x7000 */
+ {RCS0, GEN7_COMMON_SLICE_CHICKEN1, 0xffff, true}, /* 0x7010 */
+ {RCS0, HDC_CHICKEN0, 0xffff, true}, /* 0x7300 */
+ {RCS0, VF_GUARDBAND, 0xffff, true}, /* 0x83a4 */
+
+ {RCS0, GEN8_PRIVATE_PAT_LO, 0, false}, /* 0x40e0 */
+ {RCS0, GEN8_PRIVATE_PAT_HI, 0, false}, /* 0x40e4 */
+ {RCS0, GEN8_CS_CHICKEN1, 0xffff, true}, /* 0x2580 */
+ {RCS0, COMMON_SLICE_CHICKEN2, 0xffff, true}, /* 0x7014 */
+ {RCS0, GEN9_CS_DEBUG_MODE1, 0xffff, false}, /* 0x20ec */
+ {RCS0, GEN8_L3SQCREG4, 0, false}, /* 0xb118 */
+ {RCS0, GEN7_HALF_SLICE_CHICKEN1, 0xffff, true}, /* 0xe100 */
+ {RCS0, HALF_SLICE_CHICKEN2, 0xffff, true}, /* 0xe180 */
+ {RCS0, HALF_SLICE_CHICKEN3, 0xffff, true}, /* 0xe184 */
+ {RCS0, GEN9_HALF_SLICE_CHICKEN5, 0xffff, true}, /* 0xe188 */
+ {RCS0, GEN9_HALF_SLICE_CHICKEN7, 0xffff, true}, /* 0xe194 */
+ {RCS0, GEN8_ROW_CHICKEN, 0xffff, true}, /* 0xe4f0 */
+ {RCS0, TRVATTL3PTRDW(0), 0, true}, /* 0x4de0 */
+ {RCS0, TRVATTL3PTRDW(1), 0, true}, /* 0x4de4 */
+ {RCS0, TRNULLDETCT, 0, true}, /* 0x4de8 */
+ {RCS0, TRINVTILEDETCT, 0, true}, /* 0x4dec */
+ {RCS0, TRVADR, 0, true}, /* 0x4df0 */
+ {RCS0, TRTTE, 0, true}, /* 0x4df4 */
+ {RCS0, _MMIO(0x4dfc), 0, true},
+
+ {BCS0, RING_GFX_MODE(BLT_RING_BASE), 0xffff, false}, /* 0x2229c */
+ {BCS0, RING_MI_MODE(BLT_RING_BASE), 0xffff, false}, /* 0x2209c */
+ {BCS0, RING_INSTPM(BLT_RING_BASE), 0xffff, false}, /* 0x220c0 */
+ {BCS0, RING_HWSTAM(BLT_RING_BASE), 0x0, false}, /* 0x22098 */
+ {BCS0, RING_EXCC(BLT_RING_BASE), 0xffff, false}, /* 0x22028 */
+
+ {VCS1, RING_EXCC(GEN8_BSD2_RING_BASE), 0xffff, false}, /* 0x1c028 */
+
+ {VECS0, RING_EXCC(VEBOX_RING_BASE), 0xffff, false}, /* 0x1a028 */
+
+ {RCS0, GEN8_HDC_CHICKEN1, 0xffff, true}, /* 0x7304 */
+ {RCS0, GEN9_CTX_PREEMPT_REG, 0x0, false}, /* 0x2248 */
+ {RCS0, GEN7_UCGCTL4, 0x0, false}, /* 0x940c */
+ {RCS0, GAMT_CHKN_BIT_REG, 0x0, false}, /* 0x4ab8 */
+
+ {RCS0, GEN9_GAMT_ECO_REG_RW_IA, 0x0, false}, /* 0x4ab0 */
+ {RCS0, GEN9_CSFE_CHICKEN1_RCS, 0xffff, false}, /* 0x20d4 */
+ {RCS0, _MMIO(0x20D8), 0xffff, true}, /* 0x20d8 */
+
+ {RCS0, GEN8_GARBCNTL, 0x0, false}, /* 0xb004 */
+ {RCS0, GEN7_FF_THREAD_MODE, 0x0, false}, /* 0x20a0 */
+ {RCS0, FF_SLICE_CS_CHICKEN2, 0xffff, false}, /* 0x20e4 */
+ {RCS0, INVALID_MMIO_REG, 0, false } /* Terminated */
};
static struct {
@@ -150,11 +151,11 @@ static void load_render_mocs(struct drm_i915_private *dev_priv)
{
i915_reg_t offset;
u32 regs[] = {
- [RCS] = 0xc800,
- [VCS] = 0xc900,
- [VCS2] = 0xca00,
- [BCS] = 0xcc00,
- [VECS] = 0xcb00,
+ [RCS0] = 0xc800,
+ [VCS0] = 0xc900,
+ [VCS1] = 0xca00,
+ [BCS0] = 0xcc00,
+ [VECS0] = 0xcb00,
};
int ring_id, i;
@@ -302,7 +303,7 @@ int intel_vgpu_restore_inhibit_context(struct intel_vgpu *vgpu,
goto out;
/* no MOCS register in context except render engine */
- if (req->engine->id != RCS)
+ if (req->engine->id != RCS0)
goto out;
ret = restore_render_mocs_control_for_inhibit(vgpu, req);
@@ -328,15 +329,16 @@ out:
static void handle_tlb_pending_event(struct intel_vgpu *vgpu, int ring_id)
{
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
+ struct intel_uncore *uncore = &dev_priv->uncore;
struct intel_vgpu_submission *s = &vgpu->submission;
enum forcewake_domains fw;
i915_reg_t reg;
u32 regs[] = {
- [RCS] = 0x4260,
- [VCS] = 0x4264,
- [VCS2] = 0x4268,
- [BCS] = 0x426c,
- [VECS] = 0x4270,
+ [RCS0] = 0x4260,
+ [VCS0] = 0x4264,
+ [VCS1] = 0x4268,
+ [BCS0] = 0x426c,
+ [VECS0] = 0x4270,
};
if (WARN_ON(ring_id >= ARRAY_SIZE(regs)))
@@ -352,21 +354,21 @@ static void handle_tlb_pending_event(struct intel_vgpu *vgpu, int ring_id)
* otherwise device can go to RC6 state and interrupt invalidation
* process
*/
- fw = intel_uncore_forcewake_for_reg(dev_priv, reg,
+ fw = intel_uncore_forcewake_for_reg(uncore, reg,
FW_REG_READ | FW_REG_WRITE);
- if (ring_id == RCS && (INTEL_GEN(dev_priv) >= 9))
+ if (ring_id == RCS0 && INTEL_GEN(dev_priv) >= 9)
fw |= FORCEWAKE_RENDER;
- intel_uncore_forcewake_get(dev_priv, fw);
+ intel_uncore_forcewake_get(uncore, fw);
- I915_WRITE_FW(reg, 0x1);
+ intel_uncore_write_fw(uncore, reg, 0x1);
- if (wait_for_atomic((I915_READ_FW(reg) == 0), 50))
+ if (wait_for_atomic((intel_uncore_read_fw(uncore, reg) == 0), 50))
gvt_vgpu_err("timeout in invalidate ring (%d) tlb\n", ring_id);
else
vgpu_vreg_t(vgpu, reg) = 0;
- intel_uncore_forcewake_put(dev_priv, fw);
+ intel_uncore_forcewake_put(uncore, fw);
gvt_dbg_core("invalidate TLB for ring %d\n", ring_id);
}
@@ -379,11 +381,11 @@ static void switch_mocs(struct intel_vgpu *pre, struct intel_vgpu *next,
u32 old_v, new_v;
u32 regs[] = {
- [RCS] = 0xc800,
- [VCS] = 0xc900,
- [VCS2] = 0xca00,
- [BCS] = 0xcc00,
- [VECS] = 0xcb00,
+ [RCS0] = 0xc800,
+ [VCS0] = 0xc900,
+ [VCS1] = 0xca00,
+ [BCS0] = 0xcc00,
+ [VECS0] = 0xcb00,
};
int i;
@@ -391,8 +393,7 @@ static void switch_mocs(struct intel_vgpu *pre, struct intel_vgpu *next,
if (WARN_ON(ring_id >= ARRAY_SIZE(regs)))
return;
- if ((IS_KABYLAKE(dev_priv) || IS_BROXTON(dev_priv)
- || IS_COFFEELAKE(dev_priv)) && ring_id == RCS)
+ if (ring_id == RCS0 && IS_GEN(dev_priv, 9))
return;
if (!pre && !gen9_render_mocs.initialized)
@@ -415,7 +416,7 @@ static void switch_mocs(struct intel_vgpu *pre, struct intel_vgpu *next,
offset.reg += 4;
}
- if (ring_id == RCS) {
+ if (ring_id == RCS0) {
l3_offset.reg = 0xb020;
for (i = 0; i < GEN9_MOCS_SIZE / 2; i++) {
if (pre)
@@ -467,11 +468,10 @@ static void switch_mmio(struct intel_vgpu *pre,
continue;
/*
* No need to do save or restore of the mmio which is in context
- * state image on kabylake, it's initialized by lri command and
+ * state image on gen9, it's initialized by lri command and
* save or restore with context together.
*/
- if ((IS_KABYLAKE(dev_priv) || IS_BROXTON(dev_priv)
- || IS_COFFEELAKE(dev_priv)) && mmio->in_context)
+ if (IS_GEN(dev_priv, 9) && mmio->in_context)
continue;
// save
@@ -493,7 +493,8 @@ static void switch_mmio(struct intel_vgpu *pre,
* itself.
*/
if (mmio->in_context &&
- !is_inhibit_context(&s->shadow_ctx->__engine[ring_id]))
+ !is_inhibit_context(intel_context_lookup(s->shadow_ctx,
+ dev_priv->engine[ring_id])))
continue;
if (mmio->mask)
@@ -550,9 +551,9 @@ void intel_gvt_switch_mmio(struct intel_vgpu *pre,
* performace for batch mmio read/write, so we need
* handle forcewake mannually.
*/
- intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
+ intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
switch_mmio(pre, next, ring_id);
- intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
+ intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
}
/**
diff --git a/drivers/gpu/drm/i915/gvt/reg.h b/drivers/gpu/drm/i915/gvt/reg.h
index 428d252344f1..5b66e14c5b7b 100644
--- a/drivers/gpu/drm/i915/gvt/reg.h
+++ b/drivers/gpu/drm/i915/gvt/reg.h
@@ -60,6 +60,37 @@
#define _REG_701C0(pipe, plane) (0x701c0 + pipe * 0x1000 + (plane - 1) * 0x100)
#define _REG_701C4(pipe, plane) (0x701c4 + pipe * 0x1000 + (plane - 1) * 0x100)
+#define SKL_FLIP_EVENT(pipe, plane) (PRIMARY_A_FLIP_DONE + (plane) * 3 + (pipe))
+
+#define PLANE_CTL_ASYNC_FLIP (1 << 9)
+#define REG50080_FLIP_TYPE_MASK 0x3
+#define REG50080_FLIP_TYPE_ASYNC 0x1
+
+#define REG_50080(_pipe, _plane) ({ \
+ typeof(_pipe) (p) = (_pipe); \
+ typeof(_plane) (q) = (_plane); \
+ (((p) == PIPE_A) ? (((q) == PLANE_PRIMARY) ? (_MMIO(0x50080)) : \
+ (_MMIO(0x50090))) : \
+ (((p) == PIPE_B) ? (((q) == PLANE_PRIMARY) ? (_MMIO(0x50088)) : \
+ (_MMIO(0x50098))) : \
+ (((p) == PIPE_C) ? (((q) == PLANE_PRIMARY) ? (_MMIO(0x5008C)) : \
+ (_MMIO(0x5009C))) : \
+ (_MMIO(0x50080))))); })
+
+#define REG_50080_TO_PIPE(_reg) ({ \
+ typeof(_reg) (reg) = (_reg); \
+ (((reg) == 0x50080 || (reg) == 0x50090) ? (PIPE_A) : \
+ (((reg) == 0x50088 || (reg) == 0x50098) ? (PIPE_B) : \
+ (((reg) == 0x5008C || (reg) == 0x5009C) ? (PIPE_C) : \
+ (INVALID_PIPE)))); })
+
+#define REG_50080_TO_PLANE(_reg) ({ \
+ typeof(_reg) (reg) = (_reg); \
+ (((reg) == 0x50080 || (reg) == 0x50088 || (reg) == 0x5008C) ? \
+ (PLANE_PRIMARY) : \
+ (((reg) == 0x50090 || (reg) == 0x50098 || (reg) == 0x5009C) ? \
+ (PLANE_SPRITE0) : (I915_MAX_PLANES))); })
+
#define GFX_MODE_BIT_SET_IN_MASK(val, bit) \
((((bit) & 0xffff0000) == 0) && !!((val) & (((bit) << 16))))
@@ -71,6 +102,8 @@
#define FORCEWAKE_ACK_MEDIA_GEN9_REG 0x0D88
#define FORCEWAKE_ACK_HSW_REG 0x130044
+#define RB_HEAD_WRAP_CNT_MAX ((1 << 11) - 1)
+#define RB_HEAD_WRAP_CNT_OFF 21
#define RB_HEAD_OFF_MASK ((1U << 21) - (1U << 2))
#define RB_TAIL_OFF_MASK ((1U << 21) - (1U << 3))
#define RB_TAIL_SIZE_MASK ((1U << 21) - (1U << 12))
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c
index 05b953793316..0f919f0a43d4 100644
--- a/drivers/gpu/drm/i915/gvt/scheduler.c
+++ b/drivers/gpu/drm/i915/gvt/scheduler.c
@@ -93,7 +93,7 @@ static void sr_oa_regs(struct intel_vgpu_workload *workload,
i915_mmio_reg_offset(EU_PERF_CNTL6),
};
- if (workload->ring_id != RCS)
+ if (workload->ring_id != RCS0)
return;
if (save) {
@@ -149,7 +149,7 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload)
COPY_REG_MASKED(ctx_ctrl);
COPY_REG(ctx_timestamp);
- if (ring_id == RCS) {
+ if (ring_id == RCS0) {
COPY_REG(bb_per_ctx_ptr);
COPY_REG(rcs_indirect_ctx);
COPY_REG(rcs_indirect_ctx_offset);
@@ -177,7 +177,7 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload)
context_page_num = context_page_num >> PAGE_SHIFT;
- if (IS_BROADWELL(gvt->dev_priv) && ring_id == RCS)
+ if (IS_BROADWELL(gvt->dev_priv) && ring_id == RCS0)
context_page_num = 19;
i = 2;
@@ -298,12 +298,29 @@ static int copy_workload_to_ring_buffer(struct intel_vgpu_workload *workload)
struct i915_request *req = workload->req;
void *shadow_ring_buffer_va;
u32 *cs;
+ int err;
- if ((IS_KABYLAKE(req->i915) || IS_BROXTON(req->i915)
- || IS_COFFEELAKE(req->i915))
- && is_inhibit_context(req->hw_context))
+ if (IS_GEN(req->i915, 9) && is_inhibit_context(req->hw_context))
intel_vgpu_restore_inhibit_context(vgpu, req);
+ /*
+ * To track whether a request has started on HW, we can emit a
+ * breadcrumb at the beginning of the request and check its
+ * timeline's HWSP to see if the breadcrumb has advanced past the
+ * start of this request. Actually, the request must have the
+ * init_breadcrumb if its timeline set has_init_bread_crumb, or the
+ * scheduler might get a wrong state of it during reset. Since the
+ * requests from gvt always set the has_init_breadcrumb flag, here
+ * need to do the emit_init_breadcrumb for all the requests.
+ */
+ if (req->engine->emit_init_breadcrumb) {
+ err = req->engine->emit_init_breadcrumb(req);
+ if (err) {
+ gvt_vgpu_err("fail to emit init breadcrumb\n");
+ return err;
+ }
+ }
+
/* allocate shadow ring buffer */
cs = intel_ring_begin(workload->req, workload->rb_len / sizeof(u32));
if (IS_ERR(cs)) {
@@ -434,8 +451,7 @@ int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload)
if (ret)
goto err_unpin;
- if ((workload->ring_id == RCS) &&
- (workload->wa_ctx.indirect_ctx.size != 0)) {
+ if (workload->ring_id == RCS0 && workload->wa_ctx.indirect_ctx.size) {
ret = intel_gvt_scan_and_shadow_wa_ctx(&workload->wa_ctx);
if (ret)
goto err_shadow;
@@ -796,14 +812,35 @@ static void update_guest_context(struct intel_vgpu_workload *workload)
void *src;
unsigned long context_gpa, context_page_num;
int i;
+ struct drm_i915_private *dev_priv = gvt->dev_priv;
+ u32 ring_base;
+ u32 head, tail;
+ u16 wrap_count;
gvt_dbg_sched("ring id %d workload lrca %x\n", rq->engine->id,
workload->ctx_desc.lrca);
+ head = workload->rb_head;
+ tail = workload->rb_tail;
+ wrap_count = workload->guest_rb_head >> RB_HEAD_WRAP_CNT_OFF;
+
+ if (tail < head) {
+ if (wrap_count == RB_HEAD_WRAP_CNT_MAX)
+ wrap_count = 0;
+ else
+ wrap_count += 1;
+ }
+
+ head = (wrap_count << RB_HEAD_WRAP_CNT_OFF) | tail;
+
+ ring_base = dev_priv->engine[workload->ring_id]->mmio_base;
+ vgpu_vreg_t(vgpu, RING_TAIL(ring_base)) = tail;
+ vgpu_vreg_t(vgpu, RING_HEAD(ring_base)) = head;
+
context_page_num = rq->engine->context_size;
context_page_num = context_page_num >> PAGE_SHIFT;
- if (IS_BROADWELL(gvt->dev_priv) && rq->engine->id == RCS)
+ if (IS_BROADWELL(gvt->dev_priv) && rq->engine->id == RCS0)
context_page_num = 19;
i = 2;
@@ -851,13 +888,13 @@ static void update_guest_context(struct intel_vgpu_workload *workload)
}
void intel_vgpu_clean_workloads(struct intel_vgpu *vgpu,
- unsigned long engine_mask)
+ intel_engine_mask_t engine_mask)
{
struct intel_vgpu_submission *s = &vgpu->submission;
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
struct intel_engine_cs *engine;
struct intel_vgpu_workload *pos, *n;
- unsigned int tmp;
+ intel_engine_mask_t tmp;
/* free the unsubmited workloads in the queues. */
for_each_engine_masked(engine, dev_priv, engine_mask, tmp) {
@@ -903,8 +940,8 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id)
workload->status = 0;
}
- if (!workload->status && !(vgpu->resetting_eng &
- ENGINE_MASK(ring_id))) {
+ if (!workload->status &&
+ !(vgpu->resetting_eng & BIT(ring_id))) {
update_guest_context(workload);
for_each_set_bit(event, workload->pending_events,
@@ -927,7 +964,7 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id)
list_del_init(&workload->list);
- if (workload->status || (vgpu->resetting_eng & ENGINE_MASK(ring_id))) {
+ if (workload->status || vgpu->resetting_eng & BIT(ring_id)) {
/* if workload->status is not successful means HW GPU
* has occurred GPU hang or something wrong with i915/GVT,
* and GVT won't inject context switch interrupt to guest.
@@ -941,7 +978,7 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id)
* cleaned up during the resetting process later, so doing
* the workload clean up here doesn't have any impact.
**/
- intel_vgpu_clean_workloads(vgpu, ENGINE_MASK(ring_id));
+ intel_vgpu_clean_workloads(vgpu, BIT(ring_id));
}
workload->complete(workload);
@@ -1001,7 +1038,7 @@ static int workload_thread(void *priv)
workload->ring_id, workload);
if (need_force_wake)
- intel_uncore_forcewake_get(gvt->dev_priv,
+ intel_uncore_forcewake_get(&gvt->dev_priv->uncore,
FORCEWAKE_ALL);
ret = dispatch_workload(workload);
@@ -1023,7 +1060,7 @@ complete:
complete_current_workload(gvt, ring_id);
if (need_force_wake)
- intel_uncore_forcewake_put(gvt->dev_priv,
+ intel_uncore_forcewake_put(&gvt->dev_priv->uncore,
FORCEWAKE_ALL);
intel_runtime_pm_put_unchecked(gvt->dev_priv);
@@ -1114,9 +1151,9 @@ i915_context_ppgtt_root_restore(struct intel_vgpu_submission *s)
struct i915_hw_ppgtt *i915_ppgtt = s->shadow_ctx->ppgtt;
int i;
- if (i915_vm_is_48bit(&i915_ppgtt->vm))
+ if (i915_vm_is_4lvl(&i915_ppgtt->vm)) {
px_dma(&i915_ppgtt->pml4) = s->i915_context_pml4;
- else {
+ } else {
for (i = 0; i < GEN8_3LVL_PDPES; i++)
px_dma(i915_ppgtt->pdp.page_directory[i]) =
s->i915_context_pdps[i];
@@ -1150,7 +1187,7 @@ void intel_vgpu_clean_submission(struct intel_vgpu *vgpu)
*
*/
void intel_vgpu_reset_submission(struct intel_vgpu *vgpu,
- unsigned long engine_mask)
+ intel_engine_mask_t engine_mask)
{
struct intel_vgpu_submission *s = &vgpu->submission;
@@ -1167,7 +1204,7 @@ i915_context_ppgtt_root_save(struct intel_vgpu_submission *s)
struct i915_hw_ppgtt *i915_ppgtt = s->shadow_ctx->ppgtt;
int i;
- if (i915_vm_is_48bit(&i915_ppgtt->vm))
+ if (i915_vm_is_4lvl(&i915_ppgtt->vm))
s->i915_context_pml4 = px_dma(&i915_ppgtt->pml4);
else {
for (i = 0; i < GEN8_3LVL_PDPES; i++)
@@ -1240,7 +1277,7 @@ out_shadow_ctx:
*
*/
int intel_vgpu_select_submission_ops(struct intel_vgpu *vgpu,
- unsigned long engine_mask,
+ intel_engine_mask_t engine_mask,
unsigned int interface)
{
struct intel_vgpu_submission *s = &vgpu->submission;
@@ -1344,7 +1381,7 @@ static int prepare_mm(struct intel_vgpu_workload *workload)
struct execlist_ctx_descriptor_format *desc = &workload->ctx_desc;
struct intel_vgpu_mm *mm;
struct intel_vgpu *vgpu = workload->vgpu;
- intel_gvt_gtt_type_t root_entry_type;
+ enum intel_gvt_gtt_type root_entry_type;
u64 pdps[GVT_RING_CTX_NR_PDPS];
switch (desc->addressing_mode) {
@@ -1399,6 +1436,7 @@ intel_vgpu_create_workload(struct intel_vgpu *vgpu, int ring_id,
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
u64 ring_context_gpa;
u32 head, tail, start, ctl, ctx_ctl, per_ctx, indirect_ctx;
+ u32 guest_head;
int ret;
ring_context_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm,
@@ -1414,6 +1452,8 @@ intel_vgpu_create_workload(struct intel_vgpu *vgpu, int ring_id,
intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
RING_CTX_OFF(ring_tail.val), &tail, 4);
+ guest_head = head;
+
head &= RB_HEAD_OFF_MASK;
tail &= RB_TAIL_OFF_MASK;
@@ -1446,11 +1486,12 @@ intel_vgpu_create_workload(struct intel_vgpu *vgpu, int ring_id,
workload->ctx_desc = *desc;
workload->ring_context_gpa = ring_context_gpa;
workload->rb_head = head;
+ workload->guest_rb_head = guest_head;
workload->rb_tail = tail;
workload->rb_start = start;
workload->rb_ctl = ctl;
- if (ring_id == RCS) {
+ if (ring_id == RCS0) {
intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
RING_CTX_OFF(bb_per_ctx_ptr.val), &per_ctx, 4);
intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.h b/drivers/gpu/drm/i915/gvt/scheduler.h
index 0635b2c4bed7..c50d14a9ce85 100644
--- a/drivers/gpu/drm/i915/gvt/scheduler.h
+++ b/drivers/gpu/drm/i915/gvt/scheduler.h
@@ -100,6 +100,7 @@ struct intel_vgpu_workload {
struct execlist_ctx_descriptor_format ctx_desc;
struct execlist_ring_context *ring_context;
unsigned long rb_head, rb_tail, rb_ctl, rb_start, rb_len;
+ unsigned long guest_rb_head;
bool restore_inhibit;
struct intel_vgpu_elsp_dwords elsp_dwords;
bool emulate_schedule_in;
@@ -142,12 +143,12 @@ void intel_gvt_wait_vgpu_idle(struct intel_vgpu *vgpu);
int intel_vgpu_setup_submission(struct intel_vgpu *vgpu);
void intel_vgpu_reset_submission(struct intel_vgpu *vgpu,
- unsigned long engine_mask);
+ intel_engine_mask_t engine_mask);
void intel_vgpu_clean_submission(struct intel_vgpu *vgpu);
int intel_vgpu_select_submission_ops(struct intel_vgpu *vgpu,
- unsigned long engine_mask,
+ intel_engine_mask_t engine_mask,
unsigned int interface);
extern const struct intel_vgpu_submission_ops
@@ -160,6 +161,6 @@ intel_vgpu_create_workload(struct intel_vgpu *vgpu, int ring_id,
void intel_vgpu_destroy_workload(struct intel_vgpu_workload *workload);
void intel_vgpu_clean_workloads(struct intel_vgpu *vgpu,
- unsigned long engine_mask);
+ intel_engine_mask_t engine_mask);
#endif
diff --git a/drivers/gpu/drm/i915/gvt/vgpu.c b/drivers/gpu/drm/i915/gvt/vgpu.c
index 720e2b10adaa..44ce3c2b9ac1 100644
--- a/drivers/gpu/drm/i915/gvt/vgpu.c
+++ b/drivers/gpu/drm/i915/gvt/vgpu.c
@@ -44,7 +44,7 @@ void populate_pvinfo_page(struct intel_vgpu *vgpu)
vgpu_vreg_t(vgpu, vgtif_reg(display_ready)) = 0;
vgpu_vreg_t(vgpu, vgtif_reg(vgt_id)) = vgpu->id;
- vgpu_vreg_t(vgpu, vgtif_reg(vgt_caps)) = VGT_CAPS_FULL_48BIT_PPGTT;
+ vgpu_vreg_t(vgpu, vgtif_reg(vgt_caps)) = VGT_CAPS_FULL_PPGTT;
vgpu_vreg_t(vgpu, vgtif_reg(vgt_caps)) |= VGT_CAPS_HWSP_EMULATION;
vgpu_vreg_t(vgpu, vgtif_reg(vgt_caps)) |= VGT_CAPS_HUGE_GTT;
@@ -526,11 +526,11 @@ struct intel_vgpu *intel_gvt_create_vgpu(struct intel_gvt *gvt,
* GPU engines. For FLR, engine_mask is ignored.
*/
void intel_gvt_reset_vgpu_locked(struct intel_vgpu *vgpu, bool dmlr,
- unsigned int engine_mask)
+ intel_engine_mask_t engine_mask)
{
struct intel_gvt *gvt = vgpu->gvt;
struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
- unsigned int resetting_eng = dmlr ? ALL_ENGINES : engine_mask;
+ intel_engine_mask_t resetting_eng = dmlr ? ALL_ENGINES : engine_mask;
gvt_dbg_core("------------------------------------------\n");
gvt_dbg_core("resseting vgpu%d, dmlr %d, engine_mask %08x\n",
OpenPOWER on IntegriCloud