diff options
Diffstat (limited to 'drivers/gpu/drm/i915/gvt/mmio.c')
-rw-r--r-- | drivers/gpu/drm/i915/gvt/mmio.c | 127 |
1 files changed, 13 insertions, 114 deletions
diff --git a/drivers/gpu/drm/i915/gvt/mmio.c b/drivers/gpu/drm/i915/gvt/mmio.c index 1e1310f50289..5c869e3fdf3b 100644 --- a/drivers/gpu/drm/i915/gvt/mmio.c +++ b/drivers/gpu/drm/i915/gvt/mmio.c @@ -56,38 +56,6 @@ int intel_vgpu_gpa_to_mmio_offset(struct intel_vgpu *vgpu, u64 gpa) (reg >= gvt->device_info.gtt_start_offset \ && reg < gvt->device_info.gtt_start_offset + gvt_ggtt_sz(gvt)) -static bool vgpu_gpa_is_aperture(struct intel_vgpu *vgpu, uint64_t gpa) -{ - u64 aperture_gpa = intel_vgpu_get_bar_gpa(vgpu, PCI_BASE_ADDRESS_2); - u64 aperture_sz = vgpu_aperture_sz(vgpu); - - return gpa >= aperture_gpa && gpa < aperture_gpa + aperture_sz; -} - -static int vgpu_aperture_rw(struct intel_vgpu *vgpu, uint64_t gpa, - void *pdata, unsigned int size, bool is_read) -{ - u64 aperture_gpa = intel_vgpu_get_bar_gpa(vgpu, PCI_BASE_ADDRESS_2); - u64 offset = gpa - aperture_gpa; - - if (!vgpu_gpa_is_aperture(vgpu, gpa + size - 1)) { - gvt_vgpu_err("Aperture rw out of range, offset %llx, size %d\n", - offset, size); - return -EINVAL; - } - - if (!vgpu->gm.aperture_va) { - gvt_vgpu_err("BAR is not enabled\n"); - return -ENXIO; - } - - if (is_read) - memcpy(pdata, vgpu->gm.aperture_va + offset, size); - else - memcpy(vgpu->gm.aperture_va + offset, pdata, size); - return 0; -} - static void failsafe_emulate_mmio_rw(struct intel_vgpu *vgpu, uint64_t pa, void *p_data, unsigned int bytes, bool read) { @@ -117,25 +85,6 @@ static void failsafe_emulate_mmio_rw(struct intel_vgpu *vgpu, uint64_t pa, else memcpy(pt, p_data, bytes); - } else if (atomic_read(&vgpu->gtt.n_write_protected_guest_page)) { - struct intel_vgpu_guest_page *gp; - - /* Since we enter the failsafe mode early during guest boot, - * guest may not have chance to set up its ppgtt table, so - * there should not be any wp pages for guest. Keep the wp - * related code here in case we need to handle it in furture. - */ - gp = intel_vgpu_find_guest_page(vgpu, pa >> PAGE_SHIFT); - if (gp) { - /* remove write protection to prevent furture traps */ - intel_vgpu_clean_guest_page(vgpu, gp); - if (read) - intel_gvt_hypervisor_read_gpa(vgpu, pa, - p_data, bytes); - else - intel_gvt_hypervisor_write_gpa(vgpu, pa, - p_data, bytes); - } } mutex_unlock(&gvt->lock); } @@ -157,37 +106,12 @@ int intel_vgpu_emulate_mmio_read(struct intel_vgpu *vgpu, uint64_t pa, unsigned int offset = 0; int ret = -EINVAL; - if (vgpu->failsafe) { failsafe_emulate_mmio_rw(vgpu, pa, p_data, bytes, true); return 0; } mutex_lock(&gvt->lock); - if (vgpu_gpa_is_aperture(vgpu, pa)) { - ret = vgpu_aperture_rw(vgpu, pa, p_data, bytes, true); - mutex_unlock(&gvt->lock); - return ret; - } - - if (atomic_read(&vgpu->gtt.n_write_protected_guest_page)) { - struct intel_vgpu_guest_page *gp; - - gp = intel_vgpu_find_guest_page(vgpu, pa >> PAGE_SHIFT); - if (gp) { - ret = intel_gvt_hypervisor_read_gpa(vgpu, pa, - p_data, bytes); - if (ret) { - gvt_vgpu_err("guest page read error %d, " - "gfn 0x%lx, pa 0x%llx, var 0x%x, len %d\n", - ret, gp->gfn, pa, *(u32 *)p_data, - bytes); - } - mutex_unlock(&gvt->lock); - return ret; - } - } - offset = intel_vgpu_gpa_to_mmio_offset(vgpu, pa); if (WARN_ON(bytes > 8)) @@ -205,14 +129,12 @@ int intel_vgpu_emulate_mmio_read(struct intel_vgpu *vgpu, uint64_t pa, p_data, bytes); if (ret) goto err; - mutex_unlock(&gvt->lock); - return ret; + goto out; } if (WARN_ON_ONCE(!reg_is_mmio(gvt, offset))) { ret = intel_gvt_hypervisor_read_gpa(vgpu, pa, p_data, bytes); - mutex_unlock(&gvt->lock); - return ret; + goto out; } if (WARN_ON(!reg_is_mmio(gvt, offset + bytes - 1))) @@ -228,11 +150,13 @@ int intel_vgpu_emulate_mmio_read(struct intel_vgpu *vgpu, uint64_t pa, goto err; intel_gvt_mmio_set_accessed(gvt, offset); - mutex_unlock(&gvt->lock); - return 0; + ret = 0; + goto out; + err: gvt_vgpu_err("fail to emulate MMIO read %08x len %d\n", offset, bytes); +out: mutex_unlock(&gvt->lock); return ret; } @@ -261,30 +185,6 @@ int intel_vgpu_emulate_mmio_write(struct intel_vgpu *vgpu, uint64_t pa, mutex_lock(&gvt->lock); - if (vgpu_gpa_is_aperture(vgpu, pa)) { - ret = vgpu_aperture_rw(vgpu, pa, p_data, bytes, false); - mutex_unlock(&gvt->lock); - return ret; - } - - if (atomic_read(&vgpu->gtt.n_write_protected_guest_page)) { - struct intel_vgpu_guest_page *gp; - - gp = intel_vgpu_find_guest_page(vgpu, pa >> PAGE_SHIFT); - if (gp) { - ret = gp->handler(gp, pa, p_data, bytes); - if (ret) { - gvt_err("guest page write error %d, " - "gfn 0x%lx, pa 0x%llx, " - "var 0x%x, len %d\n", - ret, gp->gfn, pa, - *(u32 *)p_data, bytes); - } - mutex_unlock(&gvt->lock); - return ret; - } - } - offset = intel_vgpu_gpa_to_mmio_offset(vgpu, pa); if (WARN_ON(bytes > 8)) @@ -302,14 +202,12 @@ int intel_vgpu_emulate_mmio_write(struct intel_vgpu *vgpu, uint64_t pa, p_data, bytes); if (ret) goto err; - mutex_unlock(&gvt->lock); - return ret; + goto out; } if (WARN_ON_ONCE(!reg_is_mmio(gvt, offset))) { ret = intel_gvt_hypervisor_write_gpa(vgpu, pa, p_data, bytes); - mutex_unlock(&gvt->lock); - return ret; + goto out; } ret = intel_vgpu_mmio_reg_rw(vgpu, offset, p_data, bytes, false); @@ -317,11 +215,12 @@ int intel_vgpu_emulate_mmio_write(struct intel_vgpu *vgpu, uint64_t pa, goto err; intel_gvt_mmio_set_accessed(gvt, offset); - mutex_unlock(&gvt->lock); - return 0; + ret = 0; + goto out; err: gvt_vgpu_err("fail to emulate MMIO write %08x len %d\n", offset, bytes); +out: mutex_unlock(&gvt->lock); return ret; } @@ -342,10 +241,10 @@ void intel_vgpu_reset_mmio(struct intel_vgpu *vgpu, bool dmlr) memcpy(vgpu->mmio.vreg, mmio, info->mmio_size); memcpy(vgpu->mmio.sreg, mmio, info->mmio_size); - vgpu_vreg(vgpu, GEN6_GT_THREAD_STATUS_REG) = 0; + vgpu_vreg_t(vgpu, GEN6_GT_THREAD_STATUS_REG) = 0; /* set the bit 0:2(Core C-State ) to C0 */ - vgpu_vreg(vgpu, GEN6_GT_CORE_STATUS) = 0; + vgpu_vreg_t(vgpu, GEN6_GT_CORE_STATUS) = 0; vgpu->mmio.disable_warn_untrack = false; } else { |