diff options
Diffstat (limited to 'drivers/gpu/drm')
87 files changed, 1071 insertions, 558 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c index e2c3c5ec42d1..c53095b3b0fb 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c @@ -568,6 +568,7 @@ static const struct amdgpu_px_quirk amdgpu_px_quirk_list[] = { /* HG _PR3 doesn't seem to work on this A+A weston board */ { 0x1002, 0x6900, 0x1002, 0x0124, AMDGPU_PX_QUIRK_FORCE_ATPX }, { 0x1002, 0x6900, 0x1028, 0x0812, AMDGPU_PX_QUIRK_FORCE_ATPX }, + { 0x1002, 0x6900, 0x1028, 0x0813, AMDGPU_PX_QUIRK_FORCE_ATPX }, { 0, 0, 0, 0, 0 }, }; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c index bb40d2529a30..239bf2a4b3c6 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c @@ -179,8 +179,12 @@ static int amdgpu_gfx_kiq_acquire(struct amdgpu_device *adev, amdgpu_gfx_bit_to_queue(adev, queue_bit, &mec, &pipe, &queue); - /* Using pipes 2/3 from MEC 2 seems cause problems */ - if (mec == 1 && pipe > 1) + /* + * 1. Using pipes 2/3 from MEC 2 seems cause problems. + * 2. It must use queue id 0, because CGPG_IDLE/SAVE/LOAD/RUN + * only can be issued on queue 0. + */ + if ((mec == 1 && pipe > 1) || queue != 0) continue; ring->me = mec + 1; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index 6fc16eecf2dc..5afbc5e714d0 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -2262,12 +2262,12 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm, { const unsigned align = min(AMDGPU_VM_PTB_ALIGN_SIZE, AMDGPU_VM_PTE_COUNT(adev) * 8); + uint64_t init_pde_value = 0, flags; unsigned ring_instance; struct amdgpu_ring *ring; struct drm_sched_rq *rq; + unsigned long size; int r, i; - u64 flags; - uint64_t init_pde_value = 0; vm->va = RB_ROOT_CACHED; for (i = 0; i < AMDGPU_MAX_VMHUBS; i++) @@ -2318,29 +2318,21 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm, flags |= (AMDGPU_GEM_CREATE_NO_CPU_ACCESS | AMDGPU_GEM_CREATE_SHADOW); - r = amdgpu_bo_create(adev, - amdgpu_vm_bo_size(adev, adev->vm_manager.root_level), - align, true, - AMDGPU_GEM_DOMAIN_VRAM, - flags, - NULL, NULL, init_pde_value, &vm->root.base.bo); + size = amdgpu_vm_bo_size(adev, adev->vm_manager.root_level); + r = amdgpu_bo_create(adev, size, align, true, AMDGPU_GEM_DOMAIN_VRAM, + flags, NULL, NULL, init_pde_value, + &vm->root.base.bo); if (r) goto error_free_sched_entity; + r = amdgpu_bo_reserve(vm->root.base.bo, true); + if (r) + goto error_free_root; + vm->root.base.vm = vm; list_add_tail(&vm->root.base.bo_list, &vm->root.base.bo->va); - INIT_LIST_HEAD(&vm->root.base.vm_status); - - if (vm->use_cpu_for_update) { - r = amdgpu_bo_reserve(vm->root.base.bo, false); - if (r) - goto error_free_root; - - r = amdgpu_bo_kmap(vm->root.base.bo, NULL); - amdgpu_bo_unreserve(vm->root.base.bo); - if (r) - goto error_free_root; - } + list_add_tail(&vm->root.base.vm_status, &vm->evicted); + amdgpu_bo_unreserve(vm->root.base.bo); if (pasid) { unsigned long flags; diff --git a/drivers/gpu/drm/amd/amdgpu/vega10_ih.c b/drivers/gpu/drm/amd/amdgpu/vega10_ih.c index b69ceafb7888..ee14d78be2a9 100644 --- a/drivers/gpu/drm/amd/amdgpu/vega10_ih.c +++ b/drivers/gpu/drm/amd/amdgpu/vega10_ih.c @@ -278,9 +278,9 @@ static bool vega10_ih_prescreen_iv(struct amdgpu_device *adev) /* Track retry faults in per-VM fault FIFO. */ spin_lock(&adev->vm_manager.pasid_lock); vm = idr_find(&adev->vm_manager.pasid_idr, pasid); - spin_unlock(&adev->vm_manager.pasid_lock); - if (WARN_ON_ONCE(!vm)) { + if (!vm) { /* VM not found, process it normally */ + spin_unlock(&adev->vm_manager.pasid_lock); amdgpu_ih_clear_fault(adev, key); return true; } @@ -288,9 +288,11 @@ static bool vega10_ih_prescreen_iv(struct amdgpu_device *adev) r = kfifo_put(&vm->faults, key); if (!r) { /* FIFO is full. Ignore it until there is space */ + spin_unlock(&adev->vm_manager.pasid_lock); amdgpu_ih_clear_fault(adev, key); goto ignore_iv; } + spin_unlock(&adev->vm_manager.pasid_lock); /* It's the first fault for this address, process it normally */ return true; diff --git a/drivers/gpu/drm/amd/amdgpu/vi.c b/drivers/gpu/drm/amd/amdgpu/vi.c index da2b99c2d95f..1e3e05a11f7a 100644 --- a/drivers/gpu/drm/amd/amdgpu/vi.c +++ b/drivers/gpu/drm/amd/amdgpu/vi.c @@ -1049,7 +1049,6 @@ static int vi_common_early_init(void *handle) AMD_CG_SUPPORT_GFX_CP_LS | AMD_CG_SUPPORT_GFX_CGTS | AMD_CG_SUPPORT_GFX_CGTS_LS | - AMD_CG_SUPPORT_GFX_CGCG | AMD_CG_SUPPORT_GFX_CGLS | AMD_CG_SUPPORT_BIF_LS | AMD_CG_SUPPORT_HDP_MGCG | diff --git a/drivers/gpu/drm/drm_file.c b/drivers/gpu/drm/drm_file.c index b3c6e997ccdb..e394799979a6 100644 --- a/drivers/gpu/drm/drm_file.c +++ b/drivers/gpu/drm/drm_file.c @@ -559,15 +559,15 @@ EXPORT_SYMBOL(drm_read); * * Mask of POLL flags indicating the current status of the file. */ -unsigned int drm_poll(struct file *filp, struct poll_table_struct *wait) +__poll_t drm_poll(struct file *filp, struct poll_table_struct *wait) { struct drm_file *file_priv = filp->private_data; - unsigned int mask = 0; + __poll_t mask = 0; poll_wait(filp, &file_priv->event_wait, wait); if (!list_empty(&file_priv->event_list)) - mask |= POLLIN | POLLRDNORM; + mask |= EPOLLIN | EPOLLRDNORM; return mask; } diff --git a/drivers/gpu/drm/gma500/cdv_device.c b/drivers/gpu/drm/gma500/cdv_device.c index 8745971a7680..3a3bf752e03a 100644 --- a/drivers/gpu/drm/gma500/cdv_device.c +++ b/drivers/gpu/drm/gma500/cdv_device.c @@ -185,21 +185,22 @@ static int cdv_backlight_init(struct drm_device *dev) * for this and the MID devices. */ -static inline u32 CDV_MSG_READ32(uint port, uint offset) +static inline u32 CDV_MSG_READ32(int domain, uint port, uint offset) { int mcr = (0x10<<24) | (port << 16) | (offset << 8); uint32_t ret_val = 0; - struct pci_dev *pci_root = pci_get_bus_and_slot(0, 0); + struct pci_dev *pci_root = pci_get_domain_bus_and_slot(domain, 0, 0); pci_write_config_dword(pci_root, 0xD0, mcr); pci_read_config_dword(pci_root, 0xD4, &ret_val); pci_dev_put(pci_root); return ret_val; } -static inline void CDV_MSG_WRITE32(uint port, uint offset, u32 value) +static inline void CDV_MSG_WRITE32(int domain, uint port, uint offset, + u32 value) { int mcr = (0x11<<24) | (port << 16) | (offset << 8) | 0xF0; - struct pci_dev *pci_root = pci_get_bus_and_slot(0, 0); + struct pci_dev *pci_root = pci_get_domain_bus_and_slot(domain, 0, 0); pci_write_config_dword(pci_root, 0xD4, value); pci_write_config_dword(pci_root, 0xD0, mcr); pci_dev_put(pci_root); @@ -216,11 +217,12 @@ static void cdv_init_pm(struct drm_device *dev) { struct drm_psb_private *dev_priv = dev->dev_private; u32 pwr_cnt; + int domain = pci_domain_nr(dev->pdev->bus); int i; - dev_priv->apm_base = CDV_MSG_READ32(PSB_PUNIT_PORT, + dev_priv->apm_base = CDV_MSG_READ32(domain, PSB_PUNIT_PORT, PSB_APMBA) & 0xFFFF; - dev_priv->ospm_base = CDV_MSG_READ32(PSB_PUNIT_PORT, + dev_priv->ospm_base = CDV_MSG_READ32(domain, PSB_PUNIT_PORT, PSB_OSPMBA) & 0xFFFF; /* Power status */ @@ -251,7 +253,7 @@ static void cdv_errata(struct drm_device *dev) * Bonus Launch to work around the issue, by degrading * performance. */ - CDV_MSG_WRITE32(3, 0x30, 0x08027108); + CDV_MSG_WRITE32(pci_domain_nr(dev->pdev->bus), 3, 0x30, 0x08027108); } /** diff --git a/drivers/gpu/drm/gma500/gma_device.c b/drivers/gpu/drm/gma500/gma_device.c index 4a295f9ba067..a7fb6de4dd15 100644 --- a/drivers/gpu/drm/gma500/gma_device.c +++ b/drivers/gpu/drm/gma500/gma_device.c @@ -19,7 +19,9 @@ void gma_get_core_freq(struct drm_device *dev) { uint32_t clock; - struct pci_dev *pci_root = pci_get_bus_and_slot(0, 0); + struct pci_dev *pci_root = + pci_get_domain_bus_and_slot(pci_domain_nr(dev->pdev->bus), + 0, 0); struct drm_psb_private *dev_priv = dev->dev_private; /*pci_write_config_dword(pci_root, 0xD4, 0x00C32004);*/ diff --git a/drivers/gpu/drm/gma500/mid_bios.c b/drivers/gpu/drm/gma500/mid_bios.c index 1fa163373a47..7171b7475f58 100644 --- a/drivers/gpu/drm/gma500/mid_bios.c +++ b/drivers/gpu/drm/gma500/mid_bios.c @@ -32,7 +32,9 @@ static void mid_get_fuse_settings(struct drm_device *dev) { struct drm_psb_private *dev_priv = dev->dev_private; - struct pci_dev *pci_root = pci_get_bus_and_slot(0, 0); + struct pci_dev *pci_root = + pci_get_domain_bus_and_slot(pci_domain_nr(dev->pdev->bus), + 0, 0); uint32_t fuse_value = 0; uint32_t fuse_value_tmp = 0; @@ -104,7 +106,9 @@ static void mid_get_fuse_settings(struct drm_device *dev) static void mid_get_pci_revID(struct drm_psb_private *dev_priv) { uint32_t platform_rev_id = 0; - struct pci_dev *pci_gfx_root = pci_get_bus_and_slot(0, PCI_DEVFN(2, 0)); + int domain = pci_domain_nr(dev_priv->dev->pdev->bus); + struct pci_dev *pci_gfx_root = + pci_get_domain_bus_and_slot(domain, 0, PCI_DEVFN(2, 0)); if (pci_gfx_root == NULL) { WARN_ON(1); @@ -281,7 +285,9 @@ static void mid_get_vbt_data(struct drm_psb_private *dev_priv) u32 addr; u8 __iomem *vbt_virtual; struct mid_vbt_header vbt_header; - struct pci_dev *pci_gfx_root = pci_get_bus_and_slot(0, PCI_DEVFN(2, 0)); + struct pci_dev *pci_gfx_root = + pci_get_domain_bus_and_slot(pci_domain_nr(dev->pdev->bus), + 0, PCI_DEVFN(2, 0)); int ret = -1; /* Get the address of the platform config vbt */ diff --git a/drivers/gpu/drm/gma500/psb_drv.c b/drivers/gpu/drm/gma500/psb_drv.c index 38d09d4b3ed5..ac32ab5aa002 100644 --- a/drivers/gpu/drm/gma500/psb_drv.c +++ b/drivers/gpu/drm/gma500/psb_drv.c @@ -248,7 +248,11 @@ static int psb_driver_load(struct drm_device *dev, unsigned long flags) goto out_err; if (IS_MRST(dev)) { - dev_priv->aux_pdev = pci_get_bus_and_slot(0, PCI_DEVFN(3, 0)); + int domain = pci_domain_nr(dev->pdev->bus); + + dev_priv->aux_pdev = + pci_get_domain_bus_and_slot(domain, 0, + PCI_DEVFN(3, 0)); if (dev_priv->aux_pdev) { resource_start = pci_resource_start(dev_priv->aux_pdev, @@ -268,7 +272,9 @@ static int psb_driver_load(struct drm_device *dev, unsigned long flags) } dev_priv->gmbus_reg = dev_priv->aux_reg; - dev_priv->lpc_pdev = pci_get_bus_and_slot(0, PCI_DEVFN(31, 0)); + dev_priv->lpc_pdev = + pci_get_domain_bus_and_slot(domain, 0, + PCI_DEVFN(31, 0)); if (dev_priv->lpc_pdev) { pci_read_config_word(dev_priv->lpc_pdev, PSB_LPC_GBA, &dev_priv->lpc_gpio_base); diff --git a/drivers/gpu/drm/gma500/psb_drv.h b/drivers/gpu/drm/gma500/psb_drv.h index 4918efc57b7a..e8300f509023 100644 --- a/drivers/gpu/drm/gma500/psb_drv.h +++ b/drivers/gpu/drm/gma500/psb_drv.h @@ -780,38 +780,40 @@ extern const struct psb_ops cdv_chip_ops; extern int drm_idle_check_interval; /* Utilities */ -static inline u32 MRST_MSG_READ32(uint port, uint offset) +static inline u32 MRST_MSG_READ32(int domain, uint port, uint offset) { int mcr = (0xD0<<24) | (port << 16) | (offset << 8); uint32_t ret_val = 0; - struct pci_dev *pci_root = pci_get_bus_and_slot(0, 0); + struct pci_dev *pci_root = pci_get_domain_bus_and_slot(domain, 0, 0); pci_write_config_dword(pci_root, 0xD0, mcr); pci_read_config_dword(pci_root, 0xD4, &ret_val); pci_dev_put(pci_root); return ret_val; } -static inline void MRST_MSG_WRITE32(uint port, uint offset, u32 value) +static inline void MRST_MSG_WRITE32(int domain, uint port, uint offset, + u32 value) { int mcr = (0xE0<<24) | (port << 16) | (offset << 8) | 0xF0; - struct pci_dev *pci_root = pci_get_bus_and_slot(0, 0); + struct pci_dev *pci_root = pci_get_domain_bus_and_slot(domain, 0, 0); pci_write_config_dword(pci_root, 0xD4, value); pci_write_config_dword(pci_root, 0xD0, mcr); pci_dev_put(pci_root); } -static inline u32 MDFLD_MSG_READ32(uint port, uint offset) +static inline u32 MDFLD_MSG_READ32(int domain, uint port, uint offset) { int mcr = (0x10<<24) | (port << 16) | (offset << 8); uint32_t ret_val = 0; - struct pci_dev *pci_root = pci_get_bus_and_slot(0, 0); + struct pci_dev *pci_root = pci_get_domain_bus_and_slot(domain, 0, 0); pci_write_config_dword(pci_root, 0xD0, mcr); pci_read_config_dword(pci_root, 0xD4, &ret_val); pci_dev_put(pci_root); return ret_val; } -static inline void MDFLD_MSG_WRITE32(uint port, uint offset, u32 value) +static inline void MDFLD_MSG_WRITE32(int domain, uint port, uint offset, + u32 value) { int mcr = (0x11<<24) | (port << 16) | (offset << 8) | 0xF0; - struct pci_dev *pci_root = pci_get_bus_and_slot(0, 0); + struct pci_dev *pci_root = pci_get_domain_bus_and_slot(domain, 0, 0); pci_write_config_dword(pci_root, 0xD4, value); pci_write_config_dword(pci_root, 0xD0, mcr); pci_dev_put(pci_root); diff --git a/drivers/gpu/drm/i915/gvt/cfg_space.c b/drivers/gpu/drm/i915/gvt/cfg_space.c index 97bfc00d2a82..c62346fdc05d 100644 --- a/drivers/gpu/drm/i915/gvt/cfg_space.c +++ b/drivers/gpu/drm/i915/gvt/cfg_space.c @@ -119,16 +119,6 @@ static int map_aperture(struct intel_vgpu *vgpu, bool map) if (map == vgpu->cfg_space.bar[INTEL_GVT_PCI_BAR_APERTURE].tracked) return 0; - if (map) { - vgpu->gm.aperture_va = memremap(aperture_pa, aperture_sz, - MEMREMAP_WC); - if (!vgpu->gm.aperture_va) - return -ENOMEM; - } else { - memunmap(vgpu->gm.aperture_va); - vgpu->gm.aperture_va = NULL; - } - val = vgpu_cfg_space(vgpu)[PCI_BASE_ADDRESS_2]; if (val & PCI_BASE_ADDRESS_MEM_TYPE_64) val = *(u64 *)(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_2); @@ -141,11 +131,8 @@ static int map_aperture(struct intel_vgpu *vgpu, bool map) aperture_pa >> PAGE_SHIFT, aperture_sz >> PAGE_SHIFT, map); - if (ret) { - memunmap(vgpu->gm.aperture_va); - vgpu->gm.aperture_va = NULL; + if (ret) return ret; - } vgpu->cfg_space.bar[INTEL_GVT_PCI_BAR_APERTURE].tracked = map; return 0; diff --git a/drivers/gpu/drm/i915/gvt/dmabuf.c b/drivers/gpu/drm/i915/gvt/dmabuf.c index 2ab584f97dfb..2fb7b34ef561 100644 --- a/drivers/gpu/drm/i915/gvt/dmabuf.c +++ b/drivers/gpu/drm/i915/gvt/dmabuf.c @@ -472,7 +472,6 @@ int intel_vgpu_get_dmabuf(struct intel_vgpu *vgpu, unsigned int dmabuf_id) ret = PTR_ERR(dmabuf); goto out_free_gem; } - obj->base.dma_buf = dmabuf; i915_gem_object_put(obj); diff --git a/drivers/gpu/drm/i915/gvt/execlist.c b/drivers/gpu/drm/i915/gvt/execlist.c index 769c1c24ae75..70494e394d2c 100644 --- a/drivers/gpu/drm/i915/gvt/execlist.c +++ b/drivers/gpu/drm/i915/gvt/execlist.c @@ -521,24 +521,23 @@ static void init_vgpu_execlist(struct intel_vgpu *vgpu, int ring_id) ctx_status_ptr_reg = execlist_ring_mmio(vgpu->gvt, ring_id, _EL_OFFSET_STATUS_PTR); - ctx_status_ptr.dw = vgpu_vreg(vgpu, ctx_status_ptr_reg); ctx_status_ptr.read_ptr = 0; ctx_status_ptr.write_ptr = 0x7; vgpu_vreg(vgpu, ctx_status_ptr_reg) = ctx_status_ptr.dw; } -static void clean_execlist(struct intel_vgpu *vgpu) +static void clean_execlist(struct intel_vgpu *vgpu, unsigned long engine_mask) { - enum intel_engine_id i; + unsigned int tmp; + struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; struct intel_engine_cs *engine; + struct intel_vgpu_submission *s = &vgpu->submission; - for_each_engine(engine, vgpu->gvt->dev_priv, i) { - struct intel_vgpu_submission *s = &vgpu->submission; - - kfree(s->ring_scan_buffer[i]); - s->ring_scan_buffer[i] = NULL; - s->ring_scan_buffer_size[i] = 0; + for_each_engine_masked(engine, dev_priv, engine_mask, tmp) { + kfree(s->ring_scan_buffer[engine->id]); + s->ring_scan_buffer[engine->id] = NULL; + s->ring_scan_buffer_size[engine->id] = 0; } } @@ -553,9 +552,10 @@ static void reset_execlist(struct intel_vgpu *vgpu, init_vgpu_execlist(vgpu, engine->id); } -static int init_execlist(struct intel_vgpu *vgpu) +static int init_execlist(struct intel_vgpu *vgpu, + unsigned long engine_mask) { - reset_execlist(vgpu, ALL_ENGINES); + reset_execlist(vgpu, engine_mask); return 0; } diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c index a529d2bd393c..8d5317d0122d 100644 --- a/drivers/gpu/drm/i915/gvt/gtt.c +++ b/drivers/gpu/drm/i915/gvt/gtt.c @@ -997,9 +997,11 @@ static inline void ppgtt_generate_shadow_entry(struct intel_gvt_gtt_entry *se, static int ppgtt_populate_shadow_page(struct intel_vgpu_ppgtt_spt *spt) { struct intel_vgpu *vgpu = spt->vgpu; + struct intel_gvt *gvt = vgpu->gvt; + struct intel_gvt_gtt_pte_ops *ops = gvt->gtt.pte_ops; struct intel_vgpu_ppgtt_spt *s; struct intel_gvt_gtt_entry se, ge; - unsigned long i; + unsigned long gfn, i; int ret; trace_spt_change(spt->vgpu->id, "born", spt, @@ -1007,9 +1009,10 @@ static int ppgtt_populate_shadow_page(struct intel_vgpu_ppgtt_spt *spt) if (gtt_type_is_pte_pt(spt->shadow_page.type)) { for_each_present_guest_entry(spt, &ge, i) { - ret = gtt_entry_p2m(vgpu, &ge, &se); - if (ret) - goto fail; + gfn = ops->get_pfn(&ge); + if (!intel_gvt_hypervisor_is_valid_gfn(vgpu, gfn) || + gtt_entry_p2m(vgpu, &ge, &se)) + ops->set_pfn(&se, gvt->gtt.scratch_mfn); ppgtt_set_shadow_entry(spt, &se, i); } return 0; @@ -1906,7 +1909,7 @@ static int emulate_gtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off, struct intel_vgpu_mm *ggtt_mm = vgpu->gtt.ggtt_mm; struct intel_gvt_gtt_pte_ops *ops = gvt->gtt.pte_ops; unsigned long g_gtt_index = off >> info->gtt_entry_size_shift; - unsigned long gma; + unsigned long gma, gfn; struct intel_gvt_gtt_entry e, m; int ret; @@ -1925,6 +1928,16 @@ static int emulate_gtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off, bytes); if (ops->test_present(&e)) { + gfn = ops->get_pfn(&e); + + /* one PTE update may be issued in multiple writes and the + * first write may not construct a valid gfn + */ + if (!intel_gvt_hypervisor_is_valid_gfn(vgpu, gfn)) { + ops->set_pfn(&m, gvt->gtt.scratch_mfn); + goto out; + } + ret = gtt_entry_p2m(vgpu, &e, &m); if (ret) { gvt_vgpu_err("fail to translate guest gtt entry\n"); @@ -1939,6 +1952,7 @@ static int emulate_gtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off, ops->set_pfn(&m, gvt->gtt.scratch_mfn); } +out: ggtt_set_shadow_entry(ggtt_mm, &m, g_gtt_index); gtt_invalidate(gvt->dev_priv); ggtt_set_guest_entry(ggtt_mm, &e, g_gtt_index); diff --git a/drivers/gpu/drm/i915/gvt/gvt.h b/drivers/gpu/drm/i915/gvt/gvt.h index 7dc7a80213a8..c6197d990818 100644 --- a/drivers/gpu/drm/i915/gvt/gvt.h +++ b/drivers/gpu/drm/i915/gvt/gvt.h @@ -82,7 +82,6 @@ struct intel_gvt_device_info { struct intel_vgpu_gm { u64 aperture_sz; u64 hidden_sz; - void *aperture_va; struct drm_mm_node low_gm_node; struct drm_mm_node high_gm_node; }; @@ -127,7 +126,6 @@ struct intel_vgpu_irq { struct intel_vgpu_opregion { bool mapped; void *va; - void *va_gopregion; u32 gfn[INTEL_GVT_OPREGION_PAGES]; }; @@ -152,8 +150,8 @@ enum { struct intel_vgpu_submission_ops { const char *name; - int (*init)(struct intel_vgpu *vgpu); - void (*clean)(struct intel_vgpu *vgpu); + int (*init)(struct intel_vgpu *vgpu, unsigned long engine_mask); + void (*clean)(struct intel_vgpu *vgpu, unsigned long engine_mask); void (*reset)(struct intel_vgpu *vgpu, unsigned long engine_mask); }; diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c index 92d6468daeee..9be639aa3b55 100644 --- a/drivers/gpu/drm/i915/gvt/handlers.c +++ b/drivers/gpu/drm/i915/gvt/handlers.c @@ -1494,7 +1494,6 @@ static int elsp_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, static int ring_mode_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, void *p_data, unsigned int bytes) { - struct intel_vgpu_submission *s = &vgpu->submission; u32 data = *(u32 *)p_data; int ring_id = intel_gvt_render_mmio_to_ring_id(vgpu->gvt, offset); bool enable_execlist; @@ -1523,11 +1522,9 @@ static int ring_mode_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, if (!enable_execlist) return 0; - if (s->active) - return 0; - ret = intel_vgpu_select_submission_ops(vgpu, - INTEL_VGPU_EXECLIST_SUBMISSION); + ENGINE_MASK(ring_id), + INTEL_VGPU_EXECLIST_SUBMISSION); if (ret) return ret; @@ -2843,6 +2840,9 @@ static int init_skl_mmio_info(struct intel_gvt *gvt) MMIO_D(_MMIO(_PLANE_KEYVAL_1(PIPE_A)), D_SKL_PLUS); MMIO_D(_MMIO(_PLANE_KEYVAL_1(PIPE_B)), D_SKL_PLUS); MMIO_D(_MMIO(_PLANE_KEYVAL_1(PIPE_C)), D_SKL_PLUS); + MMIO_D(_MMIO(_PLANE_KEYMAX_1(PIPE_A)), D_SKL_PLUS); + MMIO_D(_MMIO(_PLANE_KEYMAX_1(PIPE_B)), D_SKL_PLUS); + MMIO_D(_MMIO(_PLANE_KEYMAX_1(PIPE_C)), D_SKL_PLUS); MMIO_D(_MMIO(_PLANE_KEYMSK_1(PIPE_A)), D_SKL_PLUS); MMIO_D(_MMIO(_PLANE_KEYMSK_1(PIPE_B)), D_SKL_PLUS); MMIO_D(_MMIO(_PLANE_KEYMSK_1(PIPE_C)), D_SKL_PLUS); diff --git a/drivers/gpu/drm/i915/gvt/hypercall.h b/drivers/gpu/drm/i915/gvt/hypercall.h index a1bd82feb827..f8e77e166246 100644 --- a/drivers/gpu/drm/i915/gvt/hypercall.h +++ b/drivers/gpu/drm/i915/gvt/hypercall.h @@ -58,6 +58,7 @@ struct intel_gvt_mpt { int (*set_opregion)(void *vgpu); int (*get_vfio_device)(void *vgpu); void (*put_vfio_device)(void *vgpu); + bool (*is_valid_gfn)(unsigned long handle, unsigned long gfn); }; extern struct intel_gvt_mpt xengt_mpt; diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c index 45bab5a6290b..021f722e2481 100644 --- a/drivers/gpu/drm/i915/gvt/kvmgt.c +++ b/drivers/gpu/drm/i915/gvt/kvmgt.c @@ -651,6 +651,39 @@ static int intel_vgpu_bar_rw(struct intel_vgpu *vgpu, int bar, uint64_t off, return ret; } +static inline bool intel_vgpu_in_aperture(struct intel_vgpu *vgpu, uint64_t off) +{ + return off >= vgpu_aperture_offset(vgpu) && + off < vgpu_aperture_offset(vgpu) + vgpu_aperture_sz(vgpu); +} + +static int intel_vgpu_aperture_rw(struct intel_vgpu *vgpu, uint64_t off, + void *buf, unsigned long count, bool is_write) +{ + void *aperture_va; + + if (!intel_vgpu_in_aperture(vgpu, off) || + !intel_vgpu_in_aperture(vgpu, off + count)) { + gvt_vgpu_err("Invalid aperture offset %llu\n", off); + return -EINVAL; + } + + aperture_va = io_mapping_map_wc(&vgpu->gvt->dev_priv->ggtt.iomap, + ALIGN_DOWN(off, PAGE_SIZE), + count + offset_in_page(off)); + if (!aperture_va) + return -EIO; + + if (is_write) + memcpy(aperture_va + offset_in_page(off), buf, count); + else + memcpy(buf, aperture_va + offset_in_page(off), count); + + io_mapping_unmap(aperture_va); + + return 0; +} + static ssize_t intel_vgpu_rw(struct mdev_device *mdev, char *buf, size_t count, loff_t *ppos, bool is_write) { @@ -679,8 +712,7 @@ static ssize_t intel_vgpu_rw(struct mdev_device *mdev, char *buf, buf, count, is_write); break; case VFIO_PCI_BAR2_REGION_INDEX: - ret = intel_vgpu_bar_rw(vgpu, PCI_BASE_ADDRESS_2, pos, - buf, count, is_write); + ret = intel_vgpu_aperture_rw(vgpu, pos, buf, count, is_write); break; case VFIO_PCI_BAR1_REGION_INDEX: case VFIO_PCI_BAR3_REGION_INDEX: @@ -701,6 +733,25 @@ static ssize_t intel_vgpu_rw(struct mdev_device *mdev, char *buf, return ret == 0 ? count : ret; } +static bool gtt_entry(struct mdev_device *mdev, loff_t *ppos) +{ + struct intel_vgpu *vgpu = mdev_get_drvdata(mdev); + unsigned int index = VFIO_PCI_OFFSET_TO_INDEX(*ppos); + struct intel_gvt *gvt = vgpu->gvt; + int offset; + + /* Only allow MMIO GGTT entry access */ + if (index != PCI_BASE_ADDRESS_0) + return false; + + offset = (u64)(*ppos & VFIO_PCI_OFFSET_MASK) - + intel_vgpu_get_bar_gpa(vgpu, PCI_BASE_ADDRESS_0); + + return (offset >= gvt->device_info.gtt_start_offset && + offset < gvt->device_info.gtt_start_offset + gvt_ggtt_sz(gvt)) ? + true : false; +} + static ssize_t intel_vgpu_read(struct mdev_device *mdev, char __user *buf, size_t count, loff_t *ppos) { @@ -710,7 +761,21 @@ static ssize_t intel_vgpu_read(struct mdev_device *mdev, char __user *buf, while (count) { size_t filled; - if (count >= 4 && !(*ppos % 4)) { + /* Only support GGTT entry 8 bytes read */ + if (count >= 8 && !(*ppos % 8) && + gtt_entry(mdev, ppos)) { + u64 val; + + ret = intel_vgpu_rw(mdev, (char *)&val, sizeof(val), + ppos, false); + if (ret <= 0) + goto read_err; + + if (copy_to_user(buf, &val, sizeof(val))) + goto read_err; + + filled = 8; + } else if (count >= 4 && !(*ppos % 4)) { u32 val; ret = intel_vgpu_rw(mdev, (char *)&val, sizeof(val), @@ -770,7 +835,21 @@ static ssize_t intel_vgpu_write(struct mdev_device *mdev, while (count) { size_t filled; - if (count >= 4 && !(*ppos % 4)) { + /* Only support GGTT entry 8 bytes write */ + if (count >= 8 && !(*ppos % 8) && + gtt_entry(mdev, ppos)) { + u64 val; + + if (copy_from_user(&val, buf, sizeof(val))) + goto write_err; + + ret = intel_vgpu_rw(mdev, (char *)&val, sizeof(val), + ppos, true); + if (ret <= 0) + goto write_err; + + filled = 8; + } else if (count >= 4 && !(*ppos % 4)) { u32 val; if (copy_from_user(&val, buf, sizeof(val))) @@ -1019,6 +1098,8 @@ static long intel_vgpu_ioctl(struct mdev_device *mdev, unsigned int cmd, if (!sparse) return -ENOMEM; + sparse->header.id = VFIO_REGION_INFO_CAP_SPARSE_MMAP; + sparse->header.version = 1; sparse->nr_areas = nr_areas; cap_type_id = VFIO_REGION_INFO_CAP_SPARSE_MMAP; sparse->areas[0].offset = @@ -1044,7 +1125,9 @@ static long intel_vgpu_ioctl(struct mdev_device *mdev, unsigned int cmd, break; default: { - struct vfio_region_info_cap_type cap_type; + struct vfio_region_info_cap_type cap_type = { + .header.id = VFIO_REGION_INFO_CAP_TYPE, + .header.version = 1 }; if (info.index >= VFIO_PCI_NUM_REGIONS + vgpu->vdev.num_regions) @@ -1061,8 +1144,8 @@ static long intel_vgpu_ioctl(struct mdev_device *mdev, unsigned int cmd, cap_type.subtype = vgpu->vdev.region[i].subtype; ret = vfio_info_add_capability(&caps, - VFIO_REGION_INFO_CAP_TYPE, - &cap_type); + &cap_type.header, + sizeof(cap_type)); if (ret) return ret; } @@ -1072,8 +1155,9 @@ static long intel_vgpu_ioctl(struct mdev_device *mdev, unsigned int cmd, switch (cap_type_id) { case VFIO_REGION_INFO_CAP_SPARSE_MMAP: ret = vfio_info_add_capability(&caps, - VFIO_REGION_INFO_CAP_SPARSE_MMAP, - sparse); + &sparse->header, sizeof(*sparse) + + (sparse->nr_areas * + sizeof(*sparse->areas))); kfree(sparse); if (ret) return ret; @@ -1570,6 +1654,21 @@ static unsigned long kvmgt_virt_to_pfn(void *addr) return PFN_DOWN(__pa(addr)); } +static bool kvmgt_is_valid_gfn(unsigned long handle, unsigned long gfn) +{ + struct kvmgt_guest_info *info; + struct kvm *kvm; + + if (!handle_valid(handle)) + return false; + + info = (struct kvmgt_guest_info *)handle; + kvm = info->kvm; + + return kvm_is_visible_gfn(kvm, gfn); + +} + struct intel_gvt_mpt kvmgt_mpt = { .host_init = kvmgt_host_init, .host_exit = kvmgt_host_exit, @@ -1585,6 +1684,7 @@ struct intel_gvt_mpt kvmgt_mpt = { .set_opregion = kvmgt_set_opregion, .get_vfio_device = kvmgt_get_vfio_device, .put_vfio_device = kvmgt_put_vfio_device, + .is_valid_gfn = kvmgt_is_valid_gfn, }; EXPORT_SYMBOL_GPL(kvmgt_mpt); diff --git a/drivers/gpu/drm/i915/gvt/mmio.c b/drivers/gpu/drm/i915/gvt/mmio.c index 562b5ad857a4..5c869e3fdf3b 100644 --- a/drivers/gpu/drm/i915/gvt/mmio.c +++ b/drivers/gpu/drm/i915/gvt/mmio.c @@ -56,38 +56,6 @@ int intel_vgpu_gpa_to_mmio_offset(struct intel_vgpu *vgpu, u64 gpa) (reg >= gvt->device_info.gtt_start_offset \ && reg < gvt->device_info.gtt_start_offset + gvt_ggtt_sz(gvt)) -static bool vgpu_gpa_is_aperture(struct intel_vgpu *vgpu, uint64_t gpa) -{ - u64 aperture_gpa = intel_vgpu_get_bar_gpa(vgpu, PCI_BASE_ADDRESS_2); - u64 aperture_sz = vgpu_aperture_sz(vgpu); - - return gpa >= aperture_gpa && gpa < aperture_gpa + aperture_sz; -} - -static int vgpu_aperture_rw(struct intel_vgpu *vgpu, uint64_t gpa, - void *pdata, unsigned int size, bool is_read) -{ - u64 aperture_gpa = intel_vgpu_get_bar_gpa(vgpu, PCI_BASE_ADDRESS_2); - u64 offset = gpa - aperture_gpa; - - if (!vgpu_gpa_is_aperture(vgpu, gpa + size - 1)) { - gvt_vgpu_err("Aperture rw out of range, offset %llx, size %d\n", - offset, size); - return -EINVAL; - } - - if (!vgpu->gm.aperture_va) { - gvt_vgpu_err("BAR is not enabled\n"); - return -ENXIO; - } - - if (is_read) - memcpy(pdata, vgpu->gm.aperture_va + offset, size); - else - memcpy(vgpu->gm.aperture_va + offset, pdata, size); - return 0; -} - static void failsafe_emulate_mmio_rw(struct intel_vgpu *vgpu, uint64_t pa, void *p_data, unsigned int bytes, bool read) { @@ -144,11 +112,6 @@ int intel_vgpu_emulate_mmio_read(struct intel_vgpu *vgpu, uint64_t pa, } mutex_lock(&gvt->lock); - if (vgpu_gpa_is_aperture(vgpu, pa)) { - ret = vgpu_aperture_rw(vgpu, pa, p_data, bytes, true); - goto out; - } - offset = intel_vgpu_gpa_to_mmio_offset(vgpu, pa); if (WARN_ON(bytes > 8)) @@ -222,11 +185,6 @@ int intel_vgpu_emulate_mmio_write(struct intel_vgpu *vgpu, uint64_t pa, mutex_lock(&gvt->lock); - if (vgpu_gpa_is_aperture(vgpu, pa)) { - ret = vgpu_aperture_rw(vgpu, pa, p_data, bytes, false); - goto out; - } - offset = intel_vgpu_gpa_to_mmio_offset(vgpu, pa); if (WARN_ON(bytes > 8)) diff --git a/drivers/gpu/drm/i915/gvt/mmio_context.c b/drivers/gpu/drm/i915/gvt/mmio_context.c index 74834395dd89..256f1bb522b7 100644 --- a/drivers/gpu/drm/i915/gvt/mmio_context.c +++ b/drivers/gpu/drm/i915/gvt/mmio_context.c @@ -80,7 +80,7 @@ static struct engine_mmio gen8_engine_mmio_list[] __cacheline_aligned = { {BCS, RING_INSTPM(BLT_RING_BASE), 0xffff, false}, /* 0x220c0 */ {BCS, RING_HWSTAM(BLT_RING_BASE), 0x0, false}, /* 0x22098 */ {BCS, RING_EXCC(BLT_RING_BASE), 0x0, false}, /* 0x22028 */ - { /* Terminated */ } + {RCS, INVALID_MMIO_REG, 0, false } /* Terminated */ }; static struct engine_mmio gen9_engine_mmio_list[] __cacheline_aligned = { @@ -118,6 +118,7 @@ static struct engine_mmio gen9_engine_mmio_list[] __cacheline_aligned = { {RCS, HALF_SLICE_CHICKEN3, 0xffff, true}, /* 0xe184 */ {RCS, GEN9_HALF_SLICE_CHICKEN5, 0xffff, true}, /* 0xe188 */ {RCS, GEN9_HALF_SLICE_CHICKEN7, 0xffff, true}, /* 0xe194 */ + {RCS, GEN8_ROW_CHICKEN, 0xffff, true}, /* 0xe4f0 */ {RCS, TRVATTL3PTRDW(0), 0, false}, /* 0x4de0 */ {RCS, TRVATTL3PTRDW(1), 0, false}, /* 0x4de4 */ {RCS, TRNULLDETCT, 0, false}, /* 0x4de8 */ @@ -146,7 +147,7 @@ static struct engine_mmio gen9_engine_mmio_list[] __cacheline_aligned = { {RCS, GEN8_GARBCNTL, 0x0, false}, /* 0xb004 */ {RCS, GEN7_FF_THREAD_MODE, 0x0, false}, /* 0x20a0 */ {RCS, FF_SLICE_CS_CHICKEN2, 0xffff, false}, /* 0x20e4 */ - { /* Terminated */ } + {RCS, INVALID_MMIO_REG, 0, false } /* Terminated */ }; static struct { @@ -167,7 +168,7 @@ static void load_render_mocs(struct drm_i915_private *dev_priv) }; int ring_id, i; - for (ring_id = 0; ring_id < I915_NUM_ENGINES; ring_id++) { + for (ring_id = 0; ring_id < ARRAY_SIZE(regs); ring_id++) { offset.reg = regs[ring_id]; for (i = 0; i < 64; i++) { gen9_render_mocs.control_table[ring_id][i] = @@ -310,8 +311,8 @@ static void switch_mmio(struct intel_vgpu *pre, if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) switch_mocs(pre, next, ring_id); - mmio = dev_priv->gvt->engine_mmio_list; - while (i915_mmio_reg_offset((mmio++)->reg)) { + for (mmio = dev_priv->gvt->engine_mmio_list; + i915_mmio_reg_valid(mmio->reg); mmio++) { if (mmio->ring_id != ring_id) continue; // save diff --git a/drivers/gpu/drm/i915/gvt/mpt.h b/drivers/gpu/drm/i915/gvt/mpt.h index ca8005a6d5fa..81aff4eacbfe 100644 --- a/drivers/gpu/drm/i915/gvt/mpt.h +++ b/drivers/gpu/drm/i915/gvt/mpt.h @@ -339,4 +339,21 @@ static inline void intel_gvt_hypervisor_put_vfio_device(struct intel_vgpu *vgpu) intel_gvt_host.mpt->put_vfio_device(vgpu); } +/** + * intel_gvt_hypervisor_is_valid_gfn - check if a visible gfn + * @vgpu: a vGPU + * @gfn: guest PFN + * + * Returns: + * true on valid gfn, false on not. + */ +static inline bool intel_gvt_hypervisor_is_valid_gfn( + struct intel_vgpu *vgpu, unsigned long gfn) +{ + if (!intel_gvt_host.mpt->is_valid_gfn) + return true; + + return intel_gvt_host.mpt->is_valid_gfn(vgpu->handle, gfn); +} + #endif /* _GVT_MPT_H_ */ diff --git a/drivers/gpu/drm/i915/gvt/opregion.c b/drivers/gpu/drm/i915/gvt/opregion.c index 8420d1fc3ddb..fa75a2eead90 100644 --- a/drivers/gpu/drm/i915/gvt/opregion.c +++ b/drivers/gpu/drm/i915/gvt/opregion.c @@ -299,21 +299,13 @@ int intel_vgpu_opregion_base_write_handler(struct intel_vgpu *vgpu, u32 gpa) { int i, ret = 0; - unsigned long pfn; gvt_dbg_core("emulate opregion from kernel\n"); switch (intel_gvt_host.hypervisor_type) { case INTEL_GVT_HYPERVISOR_KVM: - pfn = intel_gvt_hypervisor_gfn_to_mfn(vgpu, gpa >> PAGE_SHIFT); - vgpu_opregion(vgpu)->va_gopregion = memremap(pfn << PAGE_SHIFT, - INTEL_GVT_OPREGION_SIZE, - MEMREMAP_WB); - if (!vgpu_opregion(vgpu)->va_gopregion) { - gvt_vgpu_err("failed to map guest opregion\n"); - ret = -EFAULT; - } - vgpu_opregion(vgpu)->mapped = true; + for (i = 0; i < INTEL_GVT_OPREGION_PAGES; i++) + vgpu_opregion(vgpu)->gfn[i] = (gpa >> PAGE_SHIFT) + i; break; case INTEL_GVT_HYPERVISOR_XEN: /** @@ -352,10 +344,7 @@ void intel_vgpu_clean_opregion(struct intel_vgpu *vgpu) if (vgpu_opregion(vgpu)->mapped) map_vgpu_opregion(vgpu, false); } else if (intel_gvt_host.hypervisor_type == INTEL_GVT_HYPERVISOR_KVM) { - if (vgpu_opregion(vgpu)->mapped) { - memunmap(vgpu_opregion(vgpu)->va_gopregion); - vgpu_opregion(vgpu)->va_gopregion = NULL; - } + /* Guest opregion is released by VFIO */ } free_pages((unsigned long)vgpu_opregion(vgpu)->va, get_order(INTEL_GVT_OPREGION_SIZE)); @@ -480,19 +469,40 @@ static bool querying_capabilities(u32 scic) */ int intel_vgpu_emulate_opregion_request(struct intel_vgpu *vgpu, u32 swsci) { - u32 *scic, *parm; + u32 scic, parm; u32 func, subfunc; + u64 scic_pa = 0, parm_pa = 0; + int ret; switch (intel_gvt_host.hypervisor_type) { case INTEL_GVT_HYPERVISOR_XEN: - scic = vgpu_opregion(vgpu)->va + INTEL_GVT_OPREGION_SCIC; - parm = vgpu_opregion(vgpu)->va + INTEL_GVT_OPREGION_PARM; + scic = *((u32 *)vgpu_opregion(vgpu)->va + + INTEL_GVT_OPREGION_SCIC); + parm = *((u32 *)vgpu_opregion(vgpu)->va + + INTEL_GVT_OPREGION_PARM); break; case INTEL_GVT_HYPERVISOR_KVM: - scic = vgpu_opregion(vgpu)->va_gopregion + - INTEL_GVT_OPREGION_SCIC; - parm = vgpu_opregion(vgpu)->va_gopregion + - INTEL_GVT_OPREGION_PARM; + scic_pa = (vgpu_opregion(vgpu)->gfn[0] << PAGE_SHIFT) + + INTEL_GVT_OPREGION_SCIC; + parm_pa = (vgpu_opregion(vgpu)->gfn[0] << PAGE_SHIFT) + + INTEL_GVT_OPREGION_PARM; + + ret = intel_gvt_hypervisor_read_gpa(vgpu, scic_pa, + &scic, sizeof(scic)); + if (ret) { + gvt_vgpu_err("guest opregion read error %d, gpa 0x%llx, len %lu\n", + ret, scic_pa, sizeof(scic)); + return ret; + } + + ret = intel_gvt_hypervisor_read_gpa(vgpu, parm_pa, + &parm, sizeof(parm)); + if (ret) { + gvt_vgpu_err("guest opregion read error %d, gpa 0x%llx, len %lu\n", + ret, scic_pa, sizeof(scic)); + return ret; + } + break; default: gvt_vgpu_err("not supported hypervisor\n"); @@ -510,9 +520,9 @@ int intel_vgpu_emulate_opregion_request(struct intel_vgpu *vgpu, u32 swsci) return 0; } - func = GVT_OPREGION_FUNC(*scic); - subfunc = GVT_OPREGION_SUBFUNC(*scic); - if (!querying_capabilities(*scic)) { + func = GVT_OPREGION_FUNC(scic); + subfunc = GVT_OPREGION_SUBFUNC(scic); + if (!querying_capabilities(scic)) { gvt_vgpu_err("requesting runtime service: func \"%s\"," " subfunc \"%s\"\n", opregion_func_name(func), @@ -521,11 +531,43 @@ int intel_vgpu_emulate_opregion_request(struct intel_vgpu *vgpu, u32 swsci) * emulate exit status of function call, '0' means * "failure, generic, unsupported or unknown cause" */ - *scic &= ~OPREGION_SCIC_EXIT_MASK; - return 0; + scic &= ~OPREGION_SCIC_EXIT_MASK; + goto out; + } + + scic = 0; + parm = 0; + +out: + switch (intel_gvt_host.hypervisor_type) { + case INTEL_GVT_HYPERVISOR_XEN: + *((u32 *)vgpu_opregion(vgpu)->va + + INTEL_GVT_OPREGION_SCIC) = scic; + *((u32 *)vgpu_opregion(vgpu)->va + + INTEL_GVT_OPREGION_PARM) = parm; + break; + case INTEL_GVT_HYPERVISOR_KVM: + ret = intel_gvt_hypervisor_write_gpa(vgpu, scic_pa, + &scic, sizeof(scic)); + if (ret) { + gvt_vgpu_err("guest opregion write error %d, gpa 0x%llx, len %lu\n", + ret, scic_pa, sizeof(scic)); + return ret; + } + + ret = intel_gvt_hypervisor_write_gpa(vgpu, parm_pa, + &parm, sizeof(parm)); + if (ret) { + gvt_vgpu_err("guest opregion write error %d, gpa 0x%llx, len %lu\n", + ret, scic_pa, sizeof(scic)); + return ret; + } + + break; + default: + gvt_vgpu_err("not supported hypervisor\n"); + return -EINVAL; } - *scic = 0; - *parm = 0; return 0; } diff --git a/drivers/gpu/drm/i915/gvt/sched_policy.c b/drivers/gpu/drm/i915/gvt/sched_policy.c index eea1a2f92099..cc1ce361cd76 100644 --- a/drivers/gpu/drm/i915/gvt/sched_policy.c +++ b/drivers/gpu/drm/i915/gvt/sched_policy.c @@ -50,6 +50,7 @@ static bool vgpu_has_pending_workload(struct intel_vgpu *vgpu) struct vgpu_sched_data { struct list_head lru_list; struct intel_vgpu *vgpu; + bool active; ktime_t sched_in_time; ktime_t sched_out_time; @@ -308,8 +309,15 @@ static int tbs_sched_init_vgpu(struct intel_vgpu *vgpu) static void tbs_sched_clean_vgpu(struct intel_vgpu *vgpu) { + struct intel_gvt *gvt = vgpu->gvt; + struct gvt_sched_data *sched_data = gvt->scheduler.sched_data; + kfree(vgpu->sched_data); vgpu->sched_data = NULL; + + /* this vgpu id has been removed */ + if (idr_is_empty(&gvt->vgpu_idr)) + hrtimer_cancel(&sched_data->timer); } static void tbs_sched_start_schedule(struct intel_vgpu *vgpu) @@ -325,6 +333,7 @@ static void tbs_sched_start_schedule(struct intel_vgpu *vgpu) if (!hrtimer_active(&sched_data->timer)) hrtimer_start(&sched_data->timer, ktime_add_ns(ktime_get(), sched_data->period), HRTIMER_MODE_ABS); + vgpu_data->active = true; } static void tbs_sched_stop_schedule(struct intel_vgpu *vgpu) @@ -332,6 +341,7 @@ static void tbs_sched_stop_schedule(struct intel_vgpu *vgpu) struct vgpu_sched_data *vgpu_data = vgpu->sched_data; list_del_init(&vgpu_data->lru_list); + vgpu_data->active = false; } static struct intel_gvt_sched_policy_ops tbs_schedule_ops = { @@ -367,9 +377,12 @@ void intel_vgpu_clean_sched_policy(struct intel_vgpu *vgpu) void intel_vgpu_start_schedule(struct intel_vgpu *vgpu) { - gvt_dbg_core("vgpu%d: start schedule\n", vgpu->id); + struct vgpu_sched_data *vgpu_data = vgpu->sched_data; - vgpu->gvt->scheduler.sched_ops->start_schedule(vgpu); + if (!vgpu_data->active) { + gvt_dbg_core("vgpu%d: start schedule\n", vgpu->id); + vgpu->gvt->scheduler.sched_ops->start_schedule(vgpu); + } } void intel_gvt_kick_schedule(struct intel_gvt *gvt) @@ -382,6 +395,10 @@ void intel_vgpu_stop_schedule(struct intel_vgpu *vgpu) struct intel_gvt_workload_scheduler *scheduler = &vgpu->gvt->scheduler; int ring_id; + struct vgpu_sched_data *vgpu_data = vgpu->sched_data; + + if (!vgpu_data->active) + return; gvt_dbg_core("vgpu%d: stop schedule\n", vgpu->id); diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c index 0056638b0c16..b55b3580ca1d 100644 --- a/drivers/gpu/drm/i915/gvt/scheduler.c +++ b/drivers/gpu/drm/i915/gvt/scheduler.c @@ -991,7 +991,7 @@ void intel_vgpu_clean_submission(struct intel_vgpu *vgpu) { struct intel_vgpu_submission *s = &vgpu->submission; - intel_vgpu_select_submission_ops(vgpu, 0); + intel_vgpu_select_submission_ops(vgpu, ALL_ENGINES, 0); i915_gem_context_put(s->shadow_ctx); kmem_cache_destroy(s->workloads); } @@ -1079,6 +1079,7 @@ out_shadow_ctx: * */ int intel_vgpu_select_submission_ops(struct intel_vgpu *vgpu, + unsigned long engine_mask, unsigned int interface) { struct intel_vgpu_submission *s = &vgpu->submission; @@ -1091,21 +1092,21 @@ int intel_vgpu_select_submission_ops(struct intel_vgpu *vgpu, if (WARN_ON(interface >= ARRAY_SIZE(ops))) return -EINVAL; - if (s->active) { - s->ops->clean(vgpu); - s->active = false; - gvt_dbg_core("vgpu%d: de-select ops [ %s ] \n", - vgpu->id, s->ops->name); - } + if (WARN_ON(interface == 0 && engine_mask != ALL_ENGINES)) + return -EINVAL; + + if (s->active) + s->ops->clean(vgpu, engine_mask); if (interface == 0) { s->ops = NULL; s->virtual_submission_interface = 0; - gvt_dbg_core("vgpu%d: no submission ops\n", vgpu->id); + s->active = false; + gvt_dbg_core("vgpu%d: remove submission ops\n", vgpu->id); return 0; } - ret = ops[interface]->init(vgpu); + ret = ops[interface]->init(vgpu, engine_mask); if (ret) return ret; diff --git a/drivers/gpu/drm/i915/gvt/scheduler.h b/drivers/gpu/drm/i915/gvt/scheduler.h index 3de77dfa7c59..ff175a98b19e 100644 --- a/drivers/gpu/drm/i915/gvt/scheduler.h +++ b/drivers/gpu/drm/i915/gvt/scheduler.h @@ -141,6 +141,7 @@ void intel_vgpu_reset_submission(struct intel_vgpu *vgpu, void intel_vgpu_clean_submission(struct intel_vgpu *vgpu); int intel_vgpu_select_submission_ops(struct intel_vgpu *vgpu, + unsigned long engine_mask, unsigned int interface); extern const struct intel_vgpu_submission_ops diff --git a/drivers/gpu/drm/i915/gvt/trace.h b/drivers/gpu/drm/i915/gvt/trace.h index 7a2511538f34..736bd2bc5127 100644 --- a/drivers/gpu/drm/i915/gvt/trace.h +++ b/drivers/gpu/drm/i915/gvt/trace.h @@ -333,7 +333,7 @@ TRACE_EVENT(render_mmio, TP_PROTO(int old_id, int new_id, char *action, unsigned int reg, unsigned int old_val, unsigned int new_val), - TP_ARGS(old_id, new_id, action, reg, new_val, old_val), + TP_ARGS(old_id, new_id, action, reg, old_val, new_val), TP_STRUCT__entry( __field(int, old_id) diff --git a/drivers/gpu/drm/i915/gvt/vgpu.c b/drivers/gpu/drm/i915/gvt/vgpu.c index 4688619f6a1c..b87b19d8443c 100644 --- a/drivers/gpu/drm/i915/gvt/vgpu.c +++ b/drivers/gpu/drm/i915/gvt/vgpu.c @@ -258,6 +258,8 @@ void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu) intel_gvt_debugfs_remove_vgpu(vgpu); idr_remove(&gvt->vgpu_idr, vgpu->id); + if (idr_is_empty(&gvt->vgpu_idr)) + intel_gvt_clean_irq(gvt); intel_vgpu_clean_sched_policy(vgpu); intel_vgpu_clean_submission(vgpu); intel_vgpu_clean_display(vgpu); @@ -518,8 +520,7 @@ void intel_gvt_reset_vgpu_locked(struct intel_vgpu *vgpu, bool dmlr, intel_vgpu_reset_submission(vgpu, resetting_eng); /* full GPU reset or device model level reset */ if (engine_mask == ALL_ENGINES || dmlr) { - intel_vgpu_select_submission_ops(vgpu, 0); - + intel_vgpu_select_submission_ops(vgpu, ALL_ENGINES, 0); /*fence will not be reset during virtual reset */ if (dmlr) { intel_vgpu_reset_gtt(vgpu); diff --git a/drivers/gpu/drm/i915/i915_cmd_parser.c b/drivers/gpu/drm/i915/i915_cmd_parser.c index ccb5ba043b63..95478db9998b 100644 --- a/drivers/gpu/drm/i915/i915_cmd_parser.c +++ b/drivers/gpu/drm/i915/i915_cmd_parser.c @@ -1032,7 +1032,7 @@ find_reg(const struct intel_engine_cs *engine, bool is_master, u32 addr) const struct drm_i915_reg_table *table = engine->reg_tables; int count = engine->reg_table_count; - do { + for (; count > 0; ++table, --count) { if (!table->master || is_master) { const struct drm_i915_reg_descriptor *reg; @@ -1040,7 +1040,7 @@ find_reg(const struct intel_engine_cs *engine, bool is_master, u32 addr) if (reg != NULL) return reg; } - } while (table++, --count); + } return NULL; } @@ -1212,6 +1212,12 @@ static bool check_cmd(const struct intel_engine_cs *engine, continue; } + if (desc->bits[i].offset >= length) { + DRM_DEBUG_DRIVER("CMD: Rejected command 0x%08X, too short to check bitmask (%s)\n", + *cmd, engine->name); + return false; + } + dword = cmd[desc->bits[i].offset] & desc->bits[i].mask; diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 6c8da9d20c33..2f5209de0391 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -1433,19 +1433,7 @@ void i915_driver_unload(struct drm_device *dev) intel_modeset_cleanup(dev); - /* - * free the memory space allocated for the child device - * config parsed from VBT - */ - if (dev_priv->vbt.child_dev && dev_priv->vbt.child_dev_num) { - kfree(dev_priv->vbt.child_dev); - dev_priv->vbt.child_dev = NULL; - dev_priv->vbt.child_dev_num = 0; - } - kfree(dev_priv->vbt.sdvo_lvds_vbt_mode); - dev_priv->vbt.sdvo_lvds_vbt_mode = NULL; - kfree(dev_priv->vbt.lfp_lvds_vbt_mode); - dev_priv->vbt.lfp_lvds_vbt_mode = NULL; + intel_bios_cleanup(dev_priv); vga_switcheroo_unregister_client(pdev); vga_client_register(pdev, NULL, NULL, NULL); @@ -1842,6 +1830,8 @@ static int i915_drm_resume_early(struct drm_device *dev) if (IS_GEN9_LP(dev_priv) || !(dev_priv->suspended_to_idle && dev_priv->csr.dmc_payload)) intel_power_domains_init_hw(dev_priv, true); + else + intel_display_set_init_power(dev_priv, true); i915_gem_sanitize(dev_priv); diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index caebd5825279..d307429a5ae0 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -1349,6 +1349,7 @@ struct intel_vbt_data { u32 size; u8 *data; const u8 *sequence[MIPI_SEQ_MAX]; + u8 *deassert_seq; /* Used by fixup_mipi_sequences() */ } dsi; int crt_ddc_pin; @@ -3657,6 +3658,7 @@ extern void intel_i2c_reset(struct drm_i915_private *dev_priv); /* intel_bios.c */ void intel_bios_init(struct drm_i915_private *dev_priv); +void intel_bios_cleanup(struct drm_i915_private *dev_priv); bool intel_bios_is_valid_vbt(const void *buf, size_t size); bool intel_bios_is_tv_present(struct drm_i915_private *dev_priv); bool intel_bios_is_lvds_present(struct drm_i915_private *dev_priv, u8 *i2c_pin); @@ -3717,7 +3719,11 @@ extern void intel_display_print_error_state(struct drm_i915_error_state_buf *e, struct intel_display_error_state *error); int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u32 mbox, u32 *val); -int sandybridge_pcode_write(struct drm_i915_private *dev_priv, u32 mbox, u32 val); +int sandybridge_pcode_write_timeout(struct drm_i915_private *dev_priv, u32 mbox, + u32 val, int timeout_us); +#define sandybridge_pcode_write(dev_priv, mbox, val) \ + sandybridge_pcode_write_timeout(dev_priv, mbox, val, 500) + int skl_pcode_request(struct drm_i915_private *dev_priv, u32 mbox, u32 request, u32 reply_mask, u32 reply, int timeout_base_ms); diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 8bc3283484be..dd89abd2263d 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -3323,16 +3323,15 @@ i915_gem_retire_work_handler(struct work_struct *work) mutex_unlock(&dev->struct_mutex); } - /* Keep the retire handler running until we are finally idle. + /* + * Keep the retire handler running until we are finally idle. * We do not need to do this test under locking as in the worst-case * we queue the retire worker once too often. */ - if (READ_ONCE(dev_priv->gt.awake)) { - i915_queue_hangcheck(dev_priv); + if (READ_ONCE(dev_priv->gt.awake)) queue_delayed_work(dev_priv->wq, &dev_priv->gt.retire_work, round_jiffies_up_relative(HZ)); - } } static inline bool @@ -5283,6 +5282,8 @@ err_unlock: intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); mutex_unlock(&dev_priv->drm.struct_mutex); + intel_uc_fini_wq(dev_priv); + if (ret != -EIO) i915_gem_cleanup_userptr(dev_priv); diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c index 648e7536ff51..0c963fcf31ff 100644 --- a/drivers/gpu/drm/i915/i915_gem_context.c +++ b/drivers/gpu/drm/i915/i915_gem_context.c @@ -803,7 +803,7 @@ int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data, case I915_CONTEXT_PARAM_PRIORITY: { - int priority = args->value; + s64 priority = args->value; if (args->size) ret = -EINVAL; diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index c5f393870532..7e403eaa9e0f 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c @@ -377,6 +377,7 @@ static gen6_pte_t iris_pte_encode(dma_addr_t addr, static struct page *vm_alloc_page(struct i915_address_space *vm, gfp_t gfp) { struct pagevec *pvec = &vm->free_pages; + struct pagevec stash; if (I915_SELFTEST_ONLY(should_fail(&vm->fault_attr, 1))) i915_gem_shrink_all(vm->i915); @@ -395,7 +396,15 @@ static struct page *vm_alloc_page(struct i915_address_space *vm, gfp_t gfp) if (likely(pvec->nr)) return pvec->pages[--pvec->nr]; - /* Otherwise batch allocate pages to amoritize cost of set_pages_wc. */ + /* + * Otherwise batch allocate pages to amoritize cost of set_pages_wc. + * + * We have to be careful as page allocation may trigger the shrinker + * (via direct reclaim) which will fill up the WC stash underneath us. + * So we add our WB pages into a temporary pvec on the stack and merge + * them into the WC stash after all the allocations are complete. + */ + pagevec_init(&stash); do { struct page *page; @@ -403,15 +412,24 @@ static struct page *vm_alloc_page(struct i915_address_space *vm, gfp_t gfp) if (unlikely(!page)) break; - pvec->pages[pvec->nr++] = page; - } while (pagevec_space(pvec)); + stash.pages[stash.nr++] = page; + } while (stash.nr < pagevec_space(pvec)); - if (unlikely(!pvec->nr)) - return NULL; + if (stash.nr) { + int nr = min_t(int, stash.nr, pagevec_space(pvec)); + struct page **pages = stash.pages + stash.nr - nr; - set_pages_array_wc(pvec->pages, pvec->nr); + if (nr && !set_pages_array_wc(pages, nr)) { + memcpy(pvec->pages + pvec->nr, + pages, sizeof(pages[0]) * nr); + pvec->nr += nr; + stash.nr -= nr; + } - return pvec->pages[--pvec->nr]; + pagevec_release(&stash); + } + + return likely(pvec->nr) ? pvec->pages[--pvec->nr] : NULL; } static void vm_free_pages_release(struct i915_address_space *vm, @@ -1341,15 +1359,18 @@ static int gen8_ppgtt_alloc_pd(struct i915_address_space *vm, int count = gen8_pte_count(start, length); if (pt == vm->scratch_pt) { + pd->used_pdes++; + pt = alloc_pt(vm); - if (IS_ERR(pt)) + if (IS_ERR(pt)) { + pd->used_pdes--; goto unwind; + } if (count < GEN8_PTES || intel_vgpu_active(vm->i915)) gen8_initialize_pt(vm, pt); gen8_ppgtt_set_pde(vm, pd, pt, pde); - pd->used_pdes++; GEM_BUG_ON(pd->used_pdes > I915_PDES); } @@ -1373,13 +1394,16 @@ static int gen8_ppgtt_alloc_pdp(struct i915_address_space *vm, gen8_for_each_pdpe(pd, pdp, start, length, pdpe) { if (pd == vm->scratch_pd) { + pdp->used_pdpes++; + pd = alloc_pd(vm); - if (IS_ERR(pd)) + if (IS_ERR(pd)) { + pdp->used_pdpes--; goto unwind; + } gen8_initialize_pd(vm, pd); gen8_ppgtt_set_pdpe(vm, pdp, pd, pdpe); - pdp->used_pdpes++; GEM_BUG_ON(pdp->used_pdpes > i915_pdpes_per_pdp(vm)); mark_tlbs_dirty(i915_vm_to_ppgtt(vm)); @@ -2287,12 +2311,23 @@ static void gen8_check_and_clear_faults(struct drm_i915_private *dev_priv) u32 fault = I915_READ(GEN8_RING_FAULT_REG); if (fault & RING_FAULT_VALID) { + u32 fault_data0, fault_data1; + u64 fault_addr; + + fault_data0 = I915_READ(GEN8_FAULT_TLB_DATA0); + fault_data1 = I915_READ(GEN8_FAULT_TLB_DATA1); + fault_addr = ((u64)(fault_data1 & FAULT_VA_HIGH_BITS) << 44) | + ((u64)fault_data0 << 12); + DRM_DEBUG_DRIVER("Unexpected fault\n" - "\tAddr: 0x%08lx\n" + "\tAddr: 0x%08x_%08x\n" + "\tAddress space: %s\n" "\tEngine ID: %d\n" "\tSource ID: %d\n" "\tType: %d\n", - fault & PAGE_MASK, + upper_32_bits(fault_addr), + lower_32_bits(fault_addr), + fault_data1 & FAULT_GTT_SEL ? "GGTT" : "PPGTT", GEN8_RING_FAULT_ENGINE_ID(fault), RING_FAULT_SRCID(fault), RING_FAULT_FAULT_TYPE(fault)); diff --git a/drivers/gpu/drm/i915/i915_gem_request.c b/drivers/gpu/drm/i915/i915_gem_request.c index d575109f7a7f..e09d18df8b7f 100644 --- a/drivers/gpu/drm/i915/i915_gem_request.c +++ b/drivers/gpu/drm/i915/i915_gem_request.c @@ -276,6 +276,8 @@ static void mark_busy(struct drm_i915_private *i915) intel_engines_unpark(i915); + i915_queue_hangcheck(i915); + queue_delayed_work(i915->wq, &i915->gt.retire_work, round_jiffies_up_relative(HZ)); diff --git a/drivers/gpu/drm/i915/i915_gem_shrinker.c b/drivers/gpu/drm/i915/i915_gem_shrinker.c index 9029ed04879c..0e158f9287c4 100644 --- a/drivers/gpu/drm/i915/i915_gem_shrinker.c +++ b/drivers/gpu/drm/i915/i915_gem_shrinker.c @@ -363,13 +363,13 @@ i915_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc) I915_SHRINK_BOUND | I915_SHRINK_UNBOUND | I915_SHRINK_PURGEABLE); - if (freed < sc->nr_to_scan) + if (sc->nr_scanned < sc->nr_to_scan) freed += i915_gem_shrink(i915, sc->nr_to_scan - sc->nr_scanned, &sc->nr_scanned, I915_SHRINK_BOUND | I915_SHRINK_UNBOUND); - if (freed < sc->nr_to_scan && current_is_kswapd()) { + if (sc->nr_scanned < sc->nr_to_scan && current_is_kswapd()) { intel_runtime_pm_get(i915); freed += i915_gem_shrink(i915, sc->nr_to_scan - sc->nr_scanned, diff --git a/drivers/gpu/drm/i915/i915_oa_cflgt3.c b/drivers/gpu/drm/i915/i915_oa_cflgt3.c index 42ff06fe54a3..792facdb6702 100644 --- a/drivers/gpu/drm/i915/i915_oa_cflgt3.c +++ b/drivers/gpu/drm/i915/i915_oa_cflgt3.c @@ -84,9 +84,9 @@ show_test_oa_id(struct device *kdev, struct device_attribute *attr, char *buf) void i915_perf_load_test_config_cflgt3(struct drm_i915_private *dev_priv) { - strncpy(dev_priv->perf.oa.test_config.uuid, + strlcpy(dev_priv->perf.oa.test_config.uuid, "577e8e2c-3fa0-4875-8743-3538d585e3b0", - UUID_STRING_LEN); + sizeof(dev_priv->perf.oa.test_config.uuid)); dev_priv->perf.oa.test_config.id = 1; dev_priv->perf.oa.test_config.mux_regs = mux_config_test_oa; diff --git a/drivers/gpu/drm/i915/i915_oa_cnl.c b/drivers/gpu/drm/i915/i915_oa_cnl.c index ff0ac3627cc4..ba9140c87cc0 100644 --- a/drivers/gpu/drm/i915/i915_oa_cnl.c +++ b/drivers/gpu/drm/i915/i915_oa_cnl.c @@ -96,9 +96,9 @@ show_test_oa_id(struct device *kdev, struct device_attribute *attr, char *buf) void i915_perf_load_test_config_cnl(struct drm_i915_private *dev_priv) { - strncpy(dev_priv->perf.oa.test_config.uuid, + strlcpy(dev_priv->perf.oa.test_config.uuid, "db41edd4-d8e7-4730-ad11-b9a2d6833503", - UUID_STRING_LEN); + sizeof(dev_priv->perf.oa.test_config.uuid)); dev_priv->perf.oa.test_config.id = 1; dev_priv->perf.oa.test_config.mux_regs = mux_config_test_oa; diff --git a/drivers/gpu/drm/i915/i915_pci.c b/drivers/gpu/drm/i915/i915_pci.c index 36d48422b475..1c30c688f23a 100644 --- a/drivers/gpu/drm/i915/i915_pci.c +++ b/drivers/gpu/drm/i915/i915_pci.c @@ -74,19 +74,19 @@ GEN_DEFAULT_PAGE_SIZES, \ CURSOR_OFFSETS -static const struct intel_device_info intel_i830_info __initconst = { +static const struct intel_device_info intel_i830_info = { GEN2_FEATURES, .platform = INTEL_I830, .is_mobile = 1, .cursor_needs_physical = 1, .num_pipes = 2, /* legal, last one wins */ }; -static const struct intel_device_info intel_i845g_info __initconst = { +static const struct intel_device_info intel_i845g_info = { GEN2_FEATURES, .platform = INTEL_I845G, }; -static const struct intel_device_info intel_i85x_info __initconst = { +static const struct intel_device_info intel_i85x_info = { GEN2_FEATURES, .platform = INTEL_I85X, .is_mobile = 1, .num_pipes = 2, /* legal, last one wins */ @@ -94,7 +94,7 @@ static const struct intel_device_info intel_i85x_info __initconst = { .has_fbc = 1, }; -static const struct intel_device_info intel_i865g_info __initconst = { +static const struct intel_device_info intel_i865g_info = { GEN2_FEATURES, .platform = INTEL_I865G, }; @@ -108,7 +108,7 @@ static const struct intel_device_info intel_i865g_info __initconst = { GEN_DEFAULT_PAGE_SIZES, \ CURSOR_OFFSETS -static const struct intel_device_info intel_i915g_info __initconst = { +static const struct intel_device_info intel_i915g_info = { GEN3_FEATURES, .platform = INTEL_I915G, .cursor_needs_physical = 1, .has_overlay = 1, .overlay_needs_physical = 1, @@ -116,7 +116,7 @@ static const struct intel_device_info intel_i915g_info __initconst = { .unfenced_needs_alignment = 1, }; -static const struct intel_device_info intel_i915gm_info __initconst = { +static const struct intel_device_info intel_i915gm_info = { GEN3_FEATURES, .platform = INTEL_I915GM, .is_mobile = 1, @@ -128,7 +128,7 @@ static const struct intel_device_info intel_i915gm_info __initconst = { .unfenced_needs_alignment = 1, }; -static const struct intel_device_info intel_i945g_info __initconst = { +static const struct intel_device_info intel_i945g_info = { GEN3_FEATURES, .platform = INTEL_I945G, .has_hotplug = 1, .cursor_needs_physical = 1, @@ -137,7 +137,7 @@ static const struct intel_device_info intel_i945g_info __initconst = { .unfenced_needs_alignment = 1, }; -static const struct intel_device_info intel_i945gm_info __initconst = { +static const struct intel_device_info intel_i945gm_info = { GEN3_FEATURES, .platform = INTEL_I945GM, .is_mobile = 1, .has_hotplug = 1, .cursor_needs_physical = 1, @@ -148,14 +148,14 @@ static const struct intel_device_info intel_i945gm_info __initconst = { .unfenced_needs_alignment = 1, }; -static const struct intel_device_info intel_g33_info __initconst = { +static const struct intel_device_info intel_g33_info = { GEN3_FEATURES, .platform = INTEL_G33, .has_hotplug = 1, .has_overlay = 1, }; -static const struct intel_device_info intel_pineview_info __initconst = { +static const struct intel_device_info intel_pineview_info = { GEN3_FEATURES, .platform = INTEL_PINEVIEW, .is_mobile = 1, .has_hotplug = 1, @@ -172,7 +172,7 @@ static const struct intel_device_info intel_pineview_info __initconst = { GEN_DEFAULT_PAGE_SIZES, \ CURSOR_OFFSETS -static const struct intel_device_info intel_i965g_info __initconst = { +static const struct intel_device_info intel_i965g_info = { GEN4_FEATURES, .platform = INTEL_I965G, .has_overlay = 1, @@ -180,7 +180,7 @@ static const struct intel_device_info intel_i965g_info __initconst = { .has_snoop = false, }; -static const struct intel_device_info intel_i965gm_info __initconst = { +static const struct intel_device_info intel_i965gm_info = { GEN4_FEATURES, .platform = INTEL_I965GM, .is_mobile = 1, .has_fbc = 1, @@ -190,13 +190,13 @@ static const struct intel_device_info intel_i965gm_info __initconst = { .has_snoop = false, }; -static const struct intel_device_info intel_g45_info __initconst = { +static const struct intel_device_info intel_g45_info = { GEN4_FEATURES, .platform = INTEL_G45, .ring_mask = RENDER_RING | BSD_RING, }; -static const struct intel_device_info intel_gm45_info __initconst = { +static const struct intel_device_info intel_gm45_info = { GEN4_FEATURES, .platform = INTEL_GM45, .is_mobile = 1, .has_fbc = 1, @@ -215,12 +215,12 @@ static const struct intel_device_info intel_gm45_info __initconst = { GEN_DEFAULT_PAGE_SIZES, \ CURSOR_OFFSETS -static const struct intel_device_info intel_ironlake_d_info __initconst = { +static const struct intel_device_info intel_ironlake_d_info = { GEN5_FEATURES, .platform = INTEL_IRONLAKE, }; -static const struct intel_device_info intel_ironlake_m_info __initconst = { +static const struct intel_device_info intel_ironlake_m_info = { GEN5_FEATURES, .platform = INTEL_IRONLAKE, .is_mobile = 1, .has_fbc = 1, @@ -243,12 +243,12 @@ static const struct intel_device_info intel_ironlake_m_info __initconst = { GEN6_FEATURES, \ .platform = INTEL_SANDYBRIDGE -static const struct intel_device_info intel_sandybridge_d_gt1_info __initconst = { +static const struct intel_device_info intel_sandybridge_d_gt1_info = { SNB_D_PLATFORM, .gt = 1, }; -static const struct intel_device_info intel_sandybridge_d_gt2_info __initconst = { +static const struct intel_device_info intel_sandybridge_d_gt2_info = { SNB_D_PLATFORM, .gt = 2, }; @@ -259,12 +259,12 @@ static const struct intel_device_info intel_sandybridge_d_gt2_info __initconst = .is_mobile = 1 -static const struct intel_device_info intel_sandybridge_m_gt1_info __initconst = { +static const struct intel_device_info intel_sandybridge_m_gt1_info = { SNB_M_PLATFORM, .gt = 1, }; -static const struct intel_device_info intel_sandybridge_m_gt2_info __initconst = { +static const struct intel_device_info intel_sandybridge_m_gt2_info = { SNB_M_PLATFORM, .gt = 2, }; @@ -288,12 +288,12 @@ static const struct intel_device_info intel_sandybridge_m_gt2_info __initconst = .platform = INTEL_IVYBRIDGE, \ .has_l3_dpf = 1 -static const struct intel_device_info intel_ivybridge_d_gt1_info __initconst = { +static const struct intel_device_info intel_ivybridge_d_gt1_info = { IVB_D_PLATFORM, .gt = 1, }; -static const struct intel_device_info intel_ivybridge_d_gt2_info __initconst = { +static const struct intel_device_info intel_ivybridge_d_gt2_info = { IVB_D_PLATFORM, .gt = 2, }; @@ -304,17 +304,17 @@ static const struct intel_device_info intel_ivybridge_d_gt2_info __initconst = { .is_mobile = 1, \ .has_l3_dpf = 1 -static const struct intel_device_info intel_ivybridge_m_gt1_info __initconst = { +static const struct intel_device_info intel_ivybridge_m_gt1_info = { IVB_M_PLATFORM, .gt = 1, }; -static const struct intel_device_info intel_ivybridge_m_gt2_info __initconst = { +static const struct intel_device_info intel_ivybridge_m_gt2_info = { IVB_M_PLATFORM, .gt = 2, }; -static const struct intel_device_info intel_ivybridge_q_info __initconst = { +static const struct intel_device_info intel_ivybridge_q_info = { GEN7_FEATURES, .platform = INTEL_IVYBRIDGE, .gt = 2, @@ -322,7 +322,7 @@ static const struct intel_device_info intel_ivybridge_q_info __initconst = { .has_l3_dpf = 1, }; -static const struct intel_device_info intel_valleyview_info __initconst = { +static const struct intel_device_info intel_valleyview_info = { .platform = INTEL_VALLEYVIEW, .gen = 7, .is_lp = 1, @@ -358,17 +358,17 @@ static const struct intel_device_info intel_valleyview_info __initconst = { .platform = INTEL_HASWELL, \ .has_l3_dpf = 1 -static const struct intel_device_info intel_haswell_gt1_info __initconst = { +static const struct intel_device_info intel_haswell_gt1_info = { HSW_PLATFORM, .gt = 1, }; -static const struct intel_device_info intel_haswell_gt2_info __initconst = { +static const struct intel_device_info intel_haswell_gt2_info = { HSW_PLATFORM, .gt = 2, }; -static const struct intel_device_info intel_haswell_gt3_info __initconst = { +static const struct intel_device_info intel_haswell_gt3_info = { HSW_PLATFORM, .gt = 3, }; @@ -388,17 +388,17 @@ static const struct intel_device_info intel_haswell_gt3_info __initconst = { .gen = 8, \ .platform = INTEL_BROADWELL -static const struct intel_device_info intel_broadwell_gt1_info __initconst = { +static const struct intel_device_info intel_broadwell_gt1_info = { BDW_PLATFORM, .gt = 1, }; -static const struct intel_device_info intel_broadwell_gt2_info __initconst = { +static const struct intel_device_info intel_broadwell_gt2_info = { BDW_PLATFORM, .gt = 2, }; -static const struct intel_device_info intel_broadwell_rsvd_info __initconst = { +static const struct intel_device_info intel_broadwell_rsvd_info = { BDW_PLATFORM, .gt = 3, /* According to the device ID those devices are GT3, they were @@ -406,13 +406,13 @@ static const struct intel_device_info intel_broadwell_rsvd_info __initconst = { */ }; -static const struct intel_device_info intel_broadwell_gt3_info __initconst = { +static const struct intel_device_info intel_broadwell_gt3_info = { BDW_PLATFORM, .gt = 3, .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING, }; -static const struct intel_device_info intel_cherryview_info __initconst = { +static const struct intel_device_info intel_cherryview_info = { .gen = 8, .num_pipes = 3, .has_hotplug = 1, .is_lp = 1, @@ -455,12 +455,12 @@ static const struct intel_device_info intel_cherryview_info __initconst = { .gen = 9, \ .platform = INTEL_SKYLAKE -static const struct intel_device_info intel_skylake_gt1_info __initconst = { +static const struct intel_device_info intel_skylake_gt1_info = { SKL_PLATFORM, .gt = 1, }; -static const struct intel_device_info intel_skylake_gt2_info __initconst = { +static const struct intel_device_info intel_skylake_gt2_info = { SKL_PLATFORM, .gt = 2, }; @@ -470,12 +470,12 @@ static const struct intel_device_info intel_skylake_gt2_info __initconst = { .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING -static const struct intel_device_info intel_skylake_gt3_info __initconst = { +static const struct intel_device_info intel_skylake_gt3_info = { SKL_GT3_PLUS_PLATFORM, .gt = 3, }; -static const struct intel_device_info intel_skylake_gt4_info __initconst = { +static const struct intel_device_info intel_skylake_gt4_info = { SKL_GT3_PLUS_PLATFORM, .gt = 4, }; @@ -511,13 +511,13 @@ static const struct intel_device_info intel_skylake_gt4_info __initconst = { IVB_CURSOR_OFFSETS, \ BDW_COLORS -static const struct intel_device_info intel_broxton_info __initconst = { +static const struct intel_device_info intel_broxton_info = { GEN9_LP_FEATURES, .platform = INTEL_BROXTON, .ddb_size = 512, }; -static const struct intel_device_info intel_geminilake_info __initconst = { +static const struct intel_device_info intel_geminilake_info = { GEN9_LP_FEATURES, .platform = INTEL_GEMINILAKE, .ddb_size = 1024, @@ -529,17 +529,17 @@ static const struct intel_device_info intel_geminilake_info __initconst = { .gen = 9, \ .platform = INTEL_KABYLAKE -static const struct intel_device_info intel_kabylake_gt1_info __initconst = { +static const struct intel_device_info intel_kabylake_gt1_info = { KBL_PLATFORM, .gt = 1, }; -static const struct intel_device_info intel_kabylake_gt2_info __initconst = { +static const struct intel_device_info intel_kabylake_gt2_info = { KBL_PLATFORM, .gt = 2, }; -static const struct intel_device_info intel_kabylake_gt3_info __initconst = { +static const struct intel_device_info intel_kabylake_gt3_info = { KBL_PLATFORM, .gt = 3, .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING, @@ -550,17 +550,17 @@ static const struct intel_device_info intel_kabylake_gt3_info __initconst = { .gen = 9, \ .platform = INTEL_COFFEELAKE -static const struct intel_device_info intel_coffeelake_gt1_info __initconst = { +static const struct intel_device_info intel_coffeelake_gt1_info = { CFL_PLATFORM, .gt = 1, }; -static const struct intel_device_info intel_coffeelake_gt2_info __initconst = { +static const struct intel_device_info intel_coffeelake_gt2_info = { CFL_PLATFORM, .gt = 2, }; -static const struct intel_device_info intel_coffeelake_gt3_info __initconst = { +static const struct intel_device_info intel_coffeelake_gt3_info = { CFL_PLATFORM, .gt = 3, .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING, @@ -571,7 +571,7 @@ static const struct intel_device_info intel_coffeelake_gt3_info __initconst = { .ddb_size = 1024, \ GLK_COLORS -static const struct intel_device_info intel_cannonlake_gt2_info __initconst = { +static const struct intel_device_info intel_cannonlake_gt2_info = { GEN10_FEATURES, .is_alpha_support = 1, .platform = INTEL_CANNONLAKE, diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c index f0cfdece14ae..0be50e43507d 100644 --- a/drivers/gpu/drm/i915/i915_perf.c +++ b/drivers/gpu/drm/i915/i915_perf.c @@ -244,7 +244,7 @@ * The two separate pointers let us decouple read()s from tail pointer aging. * * The tail pointers are checked and updated at a limited rate within a hrtimer - * callback (the same callback that is used for delivering POLLIN events) + * callback (the same callback that is used for delivering EPOLLIN events) * * Initially the tails are marked invalid with %INVALID_TAIL_PTR which * indicates that an updated tail pointer is needed. @@ -2292,13 +2292,13 @@ static ssize_t i915_perf_read(struct file *file, mutex_unlock(&dev_priv->perf.lock); } - /* We allow the poll checking to sometimes report false positive POLLIN + /* We allow the poll checking to sometimes report false positive EPOLLIN * events where we might actually report EAGAIN on read() if there's * not really any data available. In this situation though we don't - * want to enter a busy loop between poll() reporting a POLLIN event + * want to enter a busy loop between poll() reporting a EPOLLIN event * and read() returning -EAGAIN. Clearing the oa.pollin state here * effectively ensures we back off until the next hrtimer callback - * before reporting another POLLIN event. + * before reporting another EPOLLIN event. */ if (ret >= 0 || ret == -EAGAIN) { /* Maybe make ->pollin per-stream state if we support multiple @@ -2342,12 +2342,12 @@ static enum hrtimer_restart oa_poll_check_timer_cb(struct hrtimer *hrtimer) * * Returns: any poll events that are ready without sleeping */ -static unsigned int i915_perf_poll_locked(struct drm_i915_private *dev_priv, +static __poll_t i915_perf_poll_locked(struct drm_i915_private *dev_priv, struct i915_perf_stream *stream, struct file *file, poll_table *wait) { - unsigned int events = 0; + __poll_t events = 0; stream->ops->poll_wait(stream, file, wait); @@ -2358,7 +2358,7 @@ static unsigned int i915_perf_poll_locked(struct drm_i915_private *dev_priv, * samples to read. */ if (dev_priv->perf.oa.pollin) - events |= POLLIN; + events |= EPOLLIN; return events; } @@ -2376,11 +2376,11 @@ static unsigned int i915_perf_poll_locked(struct drm_i915_private *dev_priv, * * Returns: any poll events that are ready without sleeping */ -static unsigned int i915_perf_poll(struct file *file, poll_table *wait) +static __poll_t i915_perf_poll(struct file *file, poll_table *wait) { struct i915_perf_stream *stream = file->private_data; struct drm_i915_private *dev_priv = stream->dev_priv; - int ret; + __poll_t ret; mutex_lock(&dev_priv->perf.lock); ret = i915_perf_poll_locked(dev_priv, stream, file, wait); diff --git a/drivers/gpu/drm/i915/i915_pmu.c b/drivers/gpu/drm/i915/i915_pmu.c index 55a8a1e29424..0e9b98c32b62 100644 --- a/drivers/gpu/drm/i915/i915_pmu.c +++ b/drivers/gpu/drm/i915/i915_pmu.c @@ -285,26 +285,41 @@ static u64 count_interrupts(struct drm_i915_private *i915) return sum; } -static void i915_pmu_event_destroy(struct perf_event *event) +static void engine_event_destroy(struct perf_event *event) { - WARN_ON(event->parent); + struct drm_i915_private *i915 = + container_of(event->pmu, typeof(*i915), pmu.base); + struct intel_engine_cs *engine; + + engine = intel_engine_lookup_user(i915, + engine_event_class(event), + engine_event_instance(event)); + if (WARN_ON_ONCE(!engine)) + return; + + if (engine_event_sample(event) == I915_SAMPLE_BUSY && + intel_engine_supports_stats(engine)) + intel_disable_engine_stats(engine); } -static int engine_event_init(struct perf_event *event) +static void i915_pmu_event_destroy(struct perf_event *event) { - struct drm_i915_private *i915 = - container_of(event->pmu, typeof(*i915), pmu.base); + WARN_ON(event->parent); - if (!intel_engine_lookup_user(i915, engine_event_class(event), - engine_event_instance(event))) - return -ENODEV; + if (is_engine_event(event)) + engine_event_destroy(event); +} - switch (engine_event_sample(event)) { +static int +engine_event_status(struct intel_engine_cs *engine, + enum drm_i915_pmu_engine_sample sample) +{ + switch (sample) { case I915_SAMPLE_BUSY: case I915_SAMPLE_WAIT: break; case I915_SAMPLE_SEMA: - if (INTEL_GEN(i915) < 6) + if (INTEL_GEN(engine->i915) < 6) return -ENODEV; break; default: @@ -314,6 +329,30 @@ static int engine_event_init(struct perf_event *event) return 0; } +static int engine_event_init(struct perf_event *event) +{ + struct drm_i915_private *i915 = + container_of(event->pmu, typeof(*i915), pmu.base); + struct intel_engine_cs *engine; + u8 sample; + int ret; + + engine = intel_engine_lookup_user(i915, engine_event_class(event), + engine_event_instance(event)); + if (!engine) + return -ENODEV; + + sample = engine_event_sample(event); + ret = engine_event_status(engine, sample); + if (ret) + return ret; + + if (sample == I915_SAMPLE_BUSY && intel_engine_supports_stats(engine)) + ret = intel_enable_engine_stats(engine); + + return ret; +} + static int i915_pmu_event_init(struct perf_event *event) { struct drm_i915_private *i915 = @@ -370,7 +409,94 @@ static int i915_pmu_event_init(struct perf_event *event) return 0; } -static u64 __i915_pmu_event_read(struct perf_event *event) +static u64 __get_rc6(struct drm_i915_private *i915) +{ + u64 val; + + val = intel_rc6_residency_ns(i915, + IS_VALLEYVIEW(i915) ? + VLV_GT_RENDER_RC6 : + GEN6_GT_GFX_RC6); + + if (HAS_RC6p(i915)) + val += intel_rc6_residency_ns(i915, GEN6_GT_GFX_RC6p); + + if (HAS_RC6pp(i915)) + val += intel_rc6_residency_ns(i915, GEN6_GT_GFX_RC6pp); + + return val; +} + +static u64 get_rc6(struct drm_i915_private *i915, bool locked) +{ +#if IS_ENABLED(CONFIG_PM) + unsigned long flags; + u64 val; + + if (intel_runtime_pm_get_if_in_use(i915)) { + val = __get_rc6(i915); + intel_runtime_pm_put(i915); + + /* + * If we are coming back from being runtime suspended we must + * be careful not to report a larger value than returned + * previously. + */ + + if (!locked) + spin_lock_irqsave(&i915->pmu.lock, flags); + + if (val >= i915->pmu.sample[__I915_SAMPLE_RC6_ESTIMATED].cur) { + i915->pmu.sample[__I915_SAMPLE_RC6_ESTIMATED].cur = 0; + i915->pmu.sample[__I915_SAMPLE_RC6].cur = val; + } else { + val = i915->pmu.sample[__I915_SAMPLE_RC6_ESTIMATED].cur; + } + + if (!locked) + spin_unlock_irqrestore(&i915->pmu.lock, flags); + } else { + struct pci_dev *pdev = i915->drm.pdev; + struct device *kdev = &pdev->dev; + unsigned long flags2; + + /* + * We are runtime suspended. + * + * Report the delta from when the device was suspended to now, + * on top of the last known real value, as the approximated RC6 + * counter value. + */ + if (!locked) + spin_lock_irqsave(&i915->pmu.lock, flags); + + spin_lock_irqsave(&kdev->power.lock, flags2); + + if (!i915->pmu.sample[__I915_SAMPLE_RC6_ESTIMATED].cur) + i915->pmu.suspended_jiffies_last = + kdev->power.suspended_jiffies; + + val = kdev->power.suspended_jiffies - + i915->pmu.suspended_jiffies_last; + val += jiffies - kdev->power.accounting_timestamp; + + spin_unlock_irqrestore(&kdev->power.lock, flags2); + + val = jiffies_to_nsecs(val); + val += i915->pmu.sample[__I915_SAMPLE_RC6].cur; + i915->pmu.sample[__I915_SAMPLE_RC6_ESTIMATED].cur = val; + + if (!locked) + spin_unlock_irqrestore(&i915->pmu.lock, flags); + } + + return val; +#else + return __get_rc6(i915); +#endif +} + +static u64 __i915_pmu_event_read(struct perf_event *event, bool locked) { struct drm_i915_private *i915 = container_of(event->pmu, typeof(*i915), pmu.base); @@ -387,7 +513,7 @@ static u64 __i915_pmu_event_read(struct perf_event *event) if (WARN_ON_ONCE(!engine)) { /* Do nothing */ } else if (sample == I915_SAMPLE_BUSY && - engine->pmu.busy_stats) { + intel_engine_supports_stats(engine)) { val = ktime_to_ns(intel_engine_get_busy_time(engine)); } else { val = engine->pmu.sample[sample].cur; @@ -408,18 +534,7 @@ static u64 __i915_pmu_event_read(struct perf_event *event) val = count_interrupts(i915); break; case I915_PMU_RC6_RESIDENCY: - intel_runtime_pm_get(i915); - val = intel_rc6_residency_ns(i915, - IS_VALLEYVIEW(i915) ? - VLV_GT_RENDER_RC6 : - GEN6_GT_GFX_RC6); - if (HAS_RC6p(i915)) - val += intel_rc6_residency_ns(i915, - GEN6_GT_GFX_RC6p); - if (HAS_RC6pp(i915)) - val += intel_rc6_residency_ns(i915, - GEN6_GT_GFX_RC6pp); - intel_runtime_pm_put(i915); + val = get_rc6(i915, locked); break; } } @@ -434,7 +549,7 @@ static void i915_pmu_event_read(struct perf_event *event) again: prev = local64_read(&hwc->prev_count); - new = __i915_pmu_event_read(event); + new = __i915_pmu_event_read(event, false); if (local64_cmpxchg(&hwc->prev_count, prev, new) != prev) goto again; @@ -442,12 +557,6 @@ again: local64_add(new - prev, &event->count); } -static bool engine_needs_busy_stats(struct intel_engine_cs *engine) -{ - return intel_engine_supports_stats(engine) && - (engine->pmu.enable & BIT(I915_SAMPLE_BUSY)); -} - static void i915_pmu_enable(struct perf_event *event) { struct drm_i915_private *i915 = @@ -487,21 +596,7 @@ static void i915_pmu_enable(struct perf_event *event) GEM_BUG_ON(sample >= I915_PMU_SAMPLE_BITS); GEM_BUG_ON(engine->pmu.enable_count[sample] == ~0); - if (engine->pmu.enable_count[sample]++ == 0) { - /* - * Enable engine busy stats tracking if needed or - * alternatively cancel the scheduled disable. - * - * If the delayed disable was pending, cancel it and - * in this case do not enable since it already is. - */ - if (engine_needs_busy_stats(engine) && - !engine->pmu.busy_stats) { - engine->pmu.busy_stats = true; - if (!cancel_delayed_work(&engine->pmu.disable_busy_stats)) - intel_enable_engine_stats(engine); - } - } + engine->pmu.enable_count[sample]++; } /* @@ -509,19 +604,11 @@ static void i915_pmu_enable(struct perf_event *event) * for all listeners. Even when the event was already enabled and has * an existing non-zero value. */ - local64_set(&event->hw.prev_count, __i915_pmu_event_read(event)); + local64_set(&event->hw.prev_count, __i915_pmu_event_read(event, true)); spin_unlock_irqrestore(&i915->pmu.lock, flags); } -static void __disable_busy_stats(struct work_struct *work) -{ - struct intel_engine_cs *engine = - container_of(work, typeof(*engine), pmu.disable_busy_stats.work); - - intel_disable_engine_stats(engine); -} - static void i915_pmu_disable(struct perf_event *event) { struct drm_i915_private *i915 = @@ -545,26 +632,8 @@ static void i915_pmu_disable(struct perf_event *event) * Decrement the reference count and clear the enabled * bitmask when the last listener on an event goes away. */ - if (--engine->pmu.enable_count[sample] == 0) { + if (--engine->pmu.enable_count[sample] == 0) engine->pmu.enable &= ~BIT(sample); - if (!engine_needs_busy_stats(engine) && - engine->pmu.busy_stats) { - engine->pmu.busy_stats = false; - /* - * We request a delayed disable to handle the - * rapid on/off cycles on events, which can - * happen when tools like perf stat start, in a - * nicer way. - * - * In addition, this also helps with busy stats - * accuracy with background CPU offline/online - * migration events. - */ - queue_delayed_work(system_wq, - &engine->pmu.disable_busy_stats, - round_jiffies_up_relative(HZ)); - } - } } GEM_BUG_ON(bit >= I915_PMU_MASK_BITS); @@ -797,8 +866,6 @@ static void i915_pmu_unregister_cpuhp_state(struct drm_i915_private *i915) void i915_pmu_register(struct drm_i915_private *i915) { - struct intel_engine_cs *engine; - enum intel_engine_id id; int ret; if (INTEL_GEN(i915) <= 2) { @@ -820,10 +887,6 @@ void i915_pmu_register(struct drm_i915_private *i915) hrtimer_init(&i915->pmu.timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); i915->pmu.timer.function = i915_sample; - for_each_engine(engine, i915, id) - INIT_DELAYED_WORK(&engine->pmu.disable_busy_stats, - __disable_busy_stats); - ret = perf_pmu_register(&i915->pmu.base, "i915", -1); if (ret) goto err; @@ -843,9 +906,6 @@ err: void i915_pmu_unregister(struct drm_i915_private *i915) { - struct intel_engine_cs *engine; - enum intel_engine_id id; - if (!i915->pmu.base.event_init) return; @@ -853,11 +913,6 @@ void i915_pmu_unregister(struct drm_i915_private *i915) hrtimer_cancel(&i915->pmu.timer); - for_each_engine(engine, i915, id) { - GEM_BUG_ON(engine->pmu.busy_stats); - flush_delayed_work(&engine->pmu.disable_busy_stats); - } - i915_pmu_unregister_cpuhp_state(i915); perf_pmu_unregister(&i915->pmu.base); diff --git a/drivers/gpu/drm/i915/i915_pmu.h b/drivers/gpu/drm/i915/i915_pmu.h index 40c154d13565..bb62df15afa4 100644 --- a/drivers/gpu/drm/i915/i915_pmu.h +++ b/drivers/gpu/drm/i915/i915_pmu.h @@ -27,6 +27,8 @@ enum { __I915_SAMPLE_FREQ_ACT = 0, __I915_SAMPLE_FREQ_REQ, + __I915_SAMPLE_RC6, + __I915_SAMPLE_RC6_ESTIMATED, __I915_NUM_PMU_SAMPLERS }; @@ -94,6 +96,10 @@ struct i915_pmu { * struct intel_engine_cs. */ struct i915_pmu_sample sample[__I915_NUM_PMU_SAMPLERS]; + /** + * @suspended_jiffies_last: Cached suspend time from PM core. + */ + unsigned long suspended_jiffies_last; }; #ifdef CONFIG_PERF_EVENTS diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 505c605eff98..a2108e35c599 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -2489,6 +2489,8 @@ enum i915_power_well_id { #define GEN8_FAULT_TLB_DATA0 _MMIO(0x4b10) #define GEN8_FAULT_TLB_DATA1 _MMIO(0x4b14) +#define FAULT_VA_HIGH_BITS (0xf << 0) +#define FAULT_GTT_SEL (1 << 4) #define FPGA_DBG _MMIO(0x42300) #define FPGA_DBG_RM_NOCLAIM (1<<31) diff --git a/drivers/gpu/drm/i915/i915_sysfs.c b/drivers/gpu/drm/i915/i915_sysfs.c index c74a20b80182..b33d2158c234 100644 --- a/drivers/gpu/drm/i915/i915_sysfs.c +++ b/drivers/gpu/drm/i915/i915_sysfs.c @@ -445,13 +445,13 @@ static ssize_t gt_min_freq_mhz_store(struct device *kdev, return ret ?: count; } -static DEVICE_ATTR(gt_act_freq_mhz, S_IRUGO, gt_act_freq_mhz_show, NULL); -static DEVICE_ATTR(gt_cur_freq_mhz, S_IRUGO, gt_cur_freq_mhz_show, NULL); -static DEVICE_ATTR(gt_boost_freq_mhz, S_IRUGO | S_IWUSR, gt_boost_freq_mhz_show, gt_boost_freq_mhz_store); -static DEVICE_ATTR(gt_max_freq_mhz, S_IRUGO | S_IWUSR, gt_max_freq_mhz_show, gt_max_freq_mhz_store); -static DEVICE_ATTR(gt_min_freq_mhz, S_IRUGO | S_IWUSR, gt_min_freq_mhz_show, gt_min_freq_mhz_store); +static DEVICE_ATTR_RO(gt_act_freq_mhz); +static DEVICE_ATTR_RO(gt_cur_freq_mhz); +static DEVICE_ATTR_RW(gt_boost_freq_mhz); +static DEVICE_ATTR_RW(gt_max_freq_mhz); +static DEVICE_ATTR_RW(gt_min_freq_mhz); -static DEVICE_ATTR(vlv_rpe_freq_mhz, S_IRUGO, vlv_rpe_freq_mhz_show, NULL); +static DEVICE_ATTR_RO(vlv_rpe_freq_mhz); static ssize_t gt_rp_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf); static DEVICE_ATTR(gt_RP0_freq_mhz, S_IRUGO, gt_rp_mhz_show, NULL); diff --git a/drivers/gpu/drm/i915/intel_audio.c b/drivers/gpu/drm/i915/intel_audio.c index f1502a0188eb..522d54fecb53 100644 --- a/drivers/gpu/drm/i915/intel_audio.c +++ b/drivers/gpu/drm/i915/intel_audio.c @@ -779,7 +779,7 @@ static struct intel_encoder *get_saved_enc(struct drm_i915_private *dev_priv, { struct intel_encoder *encoder; - if (WARN_ON(pipe >= INTEL_INFO(dev_priv)->num_pipes)) + if (WARN_ON(pipe >= ARRAY_SIZE(dev_priv->av_enc_map))) return NULL; /* MST */ diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c index 51108ffc28d1..b49a2df44430 100644 --- a/drivers/gpu/drm/i915/intel_bios.c +++ b/drivers/gpu/drm/i915/intel_bios.c @@ -947,6 +947,86 @@ static int goto_next_sequence_v3(const u8 *data, int index, int total) return 0; } +/* + * Get len of pre-fixed deassert fragment from a v1 init OTP sequence, + * skip all delay + gpio operands and stop at the first DSI packet op. + */ +static int get_init_otp_deassert_fragment_len(struct drm_i915_private *dev_priv) +{ + const u8 *data = dev_priv->vbt.dsi.sequence[MIPI_SEQ_INIT_OTP]; + int index, len; + + if (WARN_ON(!data || dev_priv->vbt.dsi.seq_version != 1)) + return 0; + + /* index = 1 to skip sequence byte */ + for (index = 1; data[index] != MIPI_SEQ_ELEM_END; index += len) { + switch (data[index]) { + case MIPI_SEQ_ELEM_SEND_PKT: + return index == 1 ? 0 : index; + case MIPI_SEQ_ELEM_DELAY: + len = 5; /* 1 byte for operand + uint32 */ + break; + case MIPI_SEQ_ELEM_GPIO: + len = 3; /* 1 byte for op, 1 for gpio_nr, 1 for value */ + break; + default: + return 0; + } + } + + return 0; +} + +/* + * Some v1 VBT MIPI sequences do the deassert in the init OTP sequence. + * The deassert must be done before calling intel_dsi_device_ready, so for + * these devices we split the init OTP sequence into a deassert sequence and + * the actual init OTP part. + */ +static void fixup_mipi_sequences(struct drm_i915_private *dev_priv) +{ + u8 *init_otp; + int len; + + /* Limit this to VLV for now. */ + if (!IS_VALLEYVIEW(dev_priv)) + return; + + /* Limit this to v1 vid-mode sequences */ + if (dev_priv->vbt.dsi.config->is_cmd_mode || + dev_priv->vbt.dsi.seq_version != 1) + return; + + /* Only do this if there are otp and assert seqs and no deassert seq */ + if (!dev_priv->vbt.dsi.sequence[MIPI_SEQ_INIT_OTP] || + !dev_priv->vbt.dsi.sequence[MIPI_SEQ_ASSERT_RESET] || + dev_priv->vbt.dsi.sequence[MIPI_SEQ_DEASSERT_RESET]) + return; + + /* The deassert-sequence ends at the first DSI packet */ + len = get_init_otp_deassert_fragment_len(dev_priv); + if (!len) + return; + + DRM_DEBUG_KMS("Using init OTP fragment to deassert reset\n"); + + /* Copy the fragment, update seq byte and terminate it */ + init_otp = (u8 *)dev_priv->vbt.dsi.sequence[MIPI_SEQ_INIT_OTP]; + dev_priv->vbt.dsi.deassert_seq = kmemdup(init_otp, len + 1, GFP_KERNEL); + if (!dev_priv->vbt.dsi.deassert_seq) + return; + dev_priv->vbt.dsi.deassert_seq[0] = MIPI_SEQ_DEASSERT_RESET; + dev_priv->vbt.dsi.deassert_seq[len] = MIPI_SEQ_ELEM_END; + /* Use the copy for deassert */ + dev_priv->vbt.dsi.sequence[MIPI_SEQ_DEASSERT_RESET] = + dev_priv->vbt.dsi.deassert_seq; + /* Replace the last byte of the fragment with init OTP seq byte */ + init_otp[len - 1] = MIPI_SEQ_INIT_OTP; + /* And make MIPI_MIPI_SEQ_INIT_OTP point to it */ + dev_priv->vbt.dsi.sequence[MIPI_SEQ_INIT_OTP] = init_otp + len - 1; +} + static void parse_mipi_sequence(struct drm_i915_private *dev_priv, const struct bdb_header *bdb) @@ -1016,6 +1096,8 @@ parse_mipi_sequence(struct drm_i915_private *dev_priv, dev_priv->vbt.dsi.size = seq_size; dev_priv->vbt.dsi.seq_version = sequence->version; + fixup_mipi_sequences(dev_priv); + DRM_DEBUG_DRIVER("MIPI related VBT parsing complete\n"); return; @@ -1107,6 +1189,7 @@ static void sanitize_aux_ch(struct drm_i915_private *dev_priv, } static const u8 cnp_ddc_pin_map[] = { + [0] = 0, /* N/A */ [DDC_BUS_DDI_B] = GMBUS_PIN_1_BXT, [DDC_BUS_DDI_C] = GMBUS_PIN_2_BXT, [DDC_BUS_DDI_D] = GMBUS_PIN_4_CNP, /* sic */ @@ -1115,9 +1198,14 @@ static const u8 cnp_ddc_pin_map[] = { static u8 map_ddc_pin(struct drm_i915_private *dev_priv, u8 vbt_pin) { - if (HAS_PCH_CNP(dev_priv) && - vbt_pin > 0 && vbt_pin < ARRAY_SIZE(cnp_ddc_pin_map)) - return cnp_ddc_pin_map[vbt_pin]; + if (HAS_PCH_CNP(dev_priv)) { + if (vbt_pin < ARRAY_SIZE(cnp_ddc_pin_map)) { + return cnp_ddc_pin_map[vbt_pin]; + } else { + DRM_DEBUG_KMS("Ignoring alternate pin: VBT claims DDC pin %d, which is not valid for this platform\n", vbt_pin); + return 0; + } + } return vbt_pin; } @@ -1323,11 +1411,13 @@ parse_general_definitions(struct drm_i915_private *dev_priv, expected_size = LEGACY_CHILD_DEVICE_CONFIG_SIZE; } else if (bdb->version == 195) { expected_size = 37; - } else if (bdb->version <= 197) { + } else if (bdb->version <= 215) { expected_size = 38; + } else if (bdb->version <= 216) { + expected_size = 39; } else { - expected_size = 38; - BUILD_BUG_ON(sizeof(*child) < 38); + expected_size = sizeof(*child); + BUILD_BUG_ON(sizeof(*child) < 39); DRM_DEBUG_DRIVER("Expected child device config size for VBT version %u not known; assuming %u\n", bdb->version, expected_size); } @@ -1581,6 +1671,29 @@ out: } /** + * intel_bios_cleanup - Free any resources allocated by intel_bios_init() + * @dev_priv: i915 device instance + */ +void intel_bios_cleanup(struct drm_i915_private *dev_priv) +{ + kfree(dev_priv->vbt.child_dev); + dev_priv->vbt.child_dev = NULL; + dev_priv->vbt.child_dev_num = 0; + kfree(dev_priv->vbt.sdvo_lvds_vbt_mode); + dev_priv->vbt.sdvo_lvds_vbt_mode = NULL; + kfree(dev_priv->vbt.lfp_lvds_vbt_mode); + dev_priv->vbt.lfp_lvds_vbt_mode = NULL; + kfree(dev_priv->vbt.dsi.data); + dev_priv->vbt.dsi.data = NULL; + kfree(dev_priv->vbt.dsi.pps); + dev_priv->vbt.dsi.pps = NULL; + kfree(dev_priv->vbt.dsi.config); + dev_priv->vbt.dsi.config = NULL; + kfree(dev_priv->vbt.dsi.deassert_seq); + dev_priv->vbt.dsi.deassert_seq = NULL; +} + +/** * intel_bios_is_tv_present - is integrated TV present in VBT * @dev_priv: i915 device instance * diff --git a/drivers/gpu/drm/i915/intel_breadcrumbs.c b/drivers/gpu/drm/i915/intel_breadcrumbs.c index 58c624f982d9..f54ddda9fdad 100644 --- a/drivers/gpu/drm/i915/intel_breadcrumbs.c +++ b/drivers/gpu/drm/i915/intel_breadcrumbs.c @@ -149,17 +149,6 @@ static void intel_breadcrumbs_fake_irq(struct timer_list *t) return; mod_timer(&b->fake_irq, jiffies + 1); - - /* Ensure that even if the GPU hangs, we get woken up. - * - * However, note that if no one is waiting, we never notice - * a gpu hang. Eventually, we will have to wait for a resource - * held by the GPU and so trigger a hangcheck. In the most - * pathological case, this will be upon memory starvation! To - * prevent this, we also queue the hangcheck from the retire - * worker. - */ - i915_queue_hangcheck(engine->i915); } static void irq_enable(struct intel_engine_cs *engine) @@ -605,29 +594,16 @@ void intel_engine_remove_wait(struct intel_engine_cs *engine, spin_unlock_irq(&b->rb_lock); } -static bool signal_valid(const struct drm_i915_gem_request *request) -{ - return intel_wait_check_request(&request->signaling.wait, request); -} - static bool signal_complete(const struct drm_i915_gem_request *request) { if (!request) return false; - /* If another process served as the bottom-half it may have already - * signalled that this wait is already completed. - */ - if (intel_wait_complete(&request->signaling.wait)) - return signal_valid(request); - - /* Carefully check if the request is complete, giving time for the + /* + * Carefully check if the request is complete, giving time for the * seqno to be visible or if the GPU hung. */ - if (__i915_request_irq_complete(request)) - return true; - - return false; + return __i915_request_irq_complete(request); } static struct drm_i915_gem_request *to_signaler(struct rb_node *rb) @@ -670,9 +646,13 @@ static int intel_breadcrumbs_signaler(void *arg) request = i915_gem_request_get_rcu(request); rcu_read_unlock(); if (signal_complete(request)) { - local_bh_disable(); - dma_fence_signal(&request->fence); - local_bh_enable(); /* kick start the tasklets */ + if (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, + &request->fence.flags)) { + local_bh_disable(); + dma_fence_signal(&request->fence); + GEM_BUG_ON(!i915_gem_request_completed(request)); + local_bh_enable(); /* kick start the tasklets */ + } spin_lock_irq(&b->rb_lock); diff --git a/drivers/gpu/drm/i915/intel_cdclk.c b/drivers/gpu/drm/i915/intel_cdclk.c index d77e2bec1e29..1704c8897afd 100644 --- a/drivers/gpu/drm/i915/intel_cdclk.c +++ b/drivers/gpu/drm/i915/intel_cdclk.c @@ -1370,10 +1370,15 @@ static void bxt_set_cdclk(struct drm_i915_private *dev_priv, break; } - /* Inform power controller of upcoming frequency change */ + /* + * Inform power controller of upcoming frequency change. BSpec + * requires us to wait up to 150usec, but that leads to timeouts; + * the 2ms used here is based on experiment. + */ mutex_lock(&dev_priv->pcu_lock); - ret = sandybridge_pcode_write(dev_priv, HSW_PCODE_DE_WRITE_FREQ_REQ, - 0x80000000); + ret = sandybridge_pcode_write_timeout(dev_priv, + HSW_PCODE_DE_WRITE_FREQ_REQ, + 0x80000000, 2000); mutex_unlock(&dev_priv->pcu_lock); if (ret) { @@ -1404,8 +1409,15 @@ static void bxt_set_cdclk(struct drm_i915_private *dev_priv, I915_WRITE(CDCLK_CTL, val); mutex_lock(&dev_priv->pcu_lock); - ret = sandybridge_pcode_write(dev_priv, HSW_PCODE_DE_WRITE_FREQ_REQ, - cdclk_state->voltage_level); + /* + * The timeout isn't specified, the 2ms used here is based on + * experiment. + * FIXME: Waiting for the request completion could be delayed until + * the next PCODE request based on BSpec. + */ + ret = sandybridge_pcode_write_timeout(dev_priv, + HSW_PCODE_DE_WRITE_FREQ_REQ, + cdclk_state->voltage_level, 2000); mutex_unlock(&dev_priv->pcu_lock); if (ret) { @@ -1940,6 +1952,14 @@ int intel_crtc_compute_min_cdclk(const struct intel_crtc_state *crtc_state) if (crtc_state->has_audio && INTEL_GEN(dev_priv) >= 9) min_cdclk = max(2 * 96000, min_cdclk); + /* + * On Valleyview some DSI panels lose (v|h)sync when the clock is lower + * than 320000KHz. + */ + if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI) && + IS_VALLEYVIEW(dev_priv)) + min_cdclk = max(320000, min_cdclk); + if (min_cdclk > dev_priv->max_cdclk_freq) { DRM_DEBUG_KMS("required cdclk (%d kHz) exceeds max (%d kHz)\n", min_cdclk, dev_priv->max_cdclk_freq); diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 0cd355978ab4..f288bcc7be22 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -5661,8 +5661,8 @@ static u64 get_crtc_power_domains(struct drm_crtc *crtc, if (!crtc_state->base.active) return 0; - mask = BIT(POWER_DOMAIN_PIPE(pipe)); - mask |= BIT(POWER_DOMAIN_TRANSCODER(transcoder)); + mask = BIT_ULL(POWER_DOMAIN_PIPE(pipe)); + mask |= BIT_ULL(POWER_DOMAIN_TRANSCODER(transcoder)); if (crtc_state->pch_pfit.enabled || crtc_state->pch_pfit.force_thru) mask |= BIT_ULL(POWER_DOMAIN_PIPE_PANEL_FITTER(pipe)); @@ -5674,7 +5674,7 @@ static u64 get_crtc_power_domains(struct drm_crtc *crtc, } if (HAS_DDI(dev_priv) && crtc_state->has_audio) - mask |= BIT(POWER_DOMAIN_AUDIO); + mask |= BIT_ULL(POWER_DOMAIN_AUDIO); if (crtc_state->shared_dpll) mask |= BIT_ULL(POWER_DOMAIN_PLLS); diff --git a/drivers/gpu/drm/i915/intel_dp_link_training.c b/drivers/gpu/drm/i915/intel_dp_link_training.c index 05907fa8a553..cf8fef8b6f58 100644 --- a/drivers/gpu/drm/i915/intel_dp_link_training.c +++ b/drivers/gpu/drm/i915/intel_dp_link_training.c @@ -328,14 +328,22 @@ intel_dp_start_link_train(struct intel_dp *intel_dp) return; failure_handling: - DRM_DEBUG_KMS("[CONNECTOR:%d:%s] Link Training failed at link rate = %d, lane count = %d", - intel_connector->base.base.id, - intel_connector->base.name, - intel_dp->link_rate, intel_dp->lane_count); - if (!intel_dp_get_link_train_fallback_values(intel_dp, - intel_dp->link_rate, - intel_dp->lane_count)) - /* Schedule a Hotplug Uevent to userspace to start modeset */ - schedule_work(&intel_connector->modeset_retry_work); + /* Dont fallback and prune modes if its eDP */ + if (!intel_dp_is_edp(intel_dp)) { + DRM_DEBUG_KMS("[CONNECTOR:%d:%s] Link Training failed at link rate = %d, lane count = %d", + intel_connector->base.base.id, + intel_connector->base.name, + intel_dp->link_rate, intel_dp->lane_count); + if (!intel_dp_get_link_train_fallback_values(intel_dp, + intel_dp->link_rate, + intel_dp->lane_count)) + /* Schedule a Hotplug Uevent to userspace to start modeset */ + schedule_work(&intel_connector->modeset_retry_work); + } else { + DRM_ERROR("[CONNECTOR:%d:%s] Link Training failed at link rate = %d, lane count = %d", + intel_connector->base.base.id, + intel_connector->base.name, + intel_dp->link_rate, intel_dp->lane_count); + } return; } diff --git a/drivers/gpu/drm/i915/intel_engine_cs.c b/drivers/gpu/drm/i915/intel_engine_cs.c index 6bb51a502b8b..fa960cfd2764 100644 --- a/drivers/gpu/drm/i915/intel_engine_cs.c +++ b/drivers/gpu/drm/i915/intel_engine_cs.c @@ -1458,7 +1458,9 @@ static bool ring_is_idle(struct intel_engine_cs *engine) struct drm_i915_private *dev_priv = engine->i915; bool idle = true; - intel_runtime_pm_get(dev_priv); + /* If the whole device is asleep, the engine must be idle */ + if (!intel_runtime_pm_get_if_in_use(dev_priv)) + return true; /* First check that no commands are left in the ring */ if ((I915_READ_HEAD(engine) & HEAD_ADDR) != @@ -1943,24 +1945,42 @@ intel_engine_lookup_user(struct drm_i915_private *i915, u8 class, u8 instance) */ int intel_enable_engine_stats(struct intel_engine_cs *engine) { + struct intel_engine_execlists *execlists = &engine->execlists; unsigned long flags; + int err = 0; if (!intel_engine_supports_stats(engine)) return -ENODEV; + tasklet_disable(&execlists->tasklet); spin_lock_irqsave(&engine->stats.lock, flags); - if (engine->stats.enabled == ~0) - goto busy; - if (engine->stats.enabled++ == 0) + + if (unlikely(engine->stats.enabled == ~0)) { + err = -EBUSY; + goto unlock; + } + + if (engine->stats.enabled++ == 0) { + const struct execlist_port *port = execlists->port; + unsigned int num_ports = execlists_num_ports(execlists); + engine->stats.enabled_at = ktime_get(); - spin_unlock_irqrestore(&engine->stats.lock, flags); - return 0; + /* XXX submission method oblivious? */ + while (num_ports-- && port_isset(port)) { + engine->stats.active++; + port++; + } -busy: + if (engine->stats.active) + engine->stats.start = engine->stats.enabled_at; + } + +unlock: spin_unlock_irqrestore(&engine->stats.lock, flags); + tasklet_enable(&execlists->tasklet); - return -EBUSY; + return err; } static ktime_t __intel_engine_get_busy_time(struct intel_engine_cs *engine) diff --git a/drivers/gpu/drm/i915/intel_guc_fw.c b/drivers/gpu/drm/i915/intel_guc_fw.c index cbc51c960425..3b0932942857 100644 --- a/drivers/gpu/drm/i915/intel_guc_fw.c +++ b/drivers/gpu/drm/i915/intel_guc_fw.c @@ -39,9 +39,6 @@ #define KBL_FW_MAJOR 9 #define KBL_FW_MINOR 39 -#define GLK_FW_MAJOR 10 -#define GLK_FW_MINOR 56 - #define GUC_FW_PATH(platform, major, minor) \ "i915/" __stringify(platform) "_guc_ver" __stringify(major) "_" __stringify(minor) ".bin" @@ -54,8 +51,6 @@ MODULE_FIRMWARE(I915_BXT_GUC_UCODE); #define I915_KBL_GUC_UCODE GUC_FW_PATH(kbl, KBL_FW_MAJOR, KBL_FW_MINOR) MODULE_FIRMWARE(I915_KBL_GUC_UCODE); -#define I915_GLK_GUC_UCODE GUC_FW_PATH(glk, GLK_FW_MAJOR, GLK_FW_MINOR) - static void guc_fw_select(struct intel_uc_fw *guc_fw) { struct intel_guc *guc = container_of(guc_fw, struct intel_guc, fw); @@ -82,10 +77,6 @@ static void guc_fw_select(struct intel_uc_fw *guc_fw) guc_fw->path = I915_KBL_GUC_UCODE; guc_fw->major_ver_wanted = KBL_FW_MAJOR; guc_fw->minor_ver_wanted = KBL_FW_MINOR; - } else if (IS_GEMINILAKE(dev_priv)) { - guc_fw->path = I915_GLK_GUC_UCODE; - guc_fw->major_ver_wanted = GLK_FW_MAJOR; - guc_fw->minor_ver_wanted = GLK_FW_MINOR; } else { DRM_WARN("%s: No firmware known for this platform!\n", intel_uc_fw_type_repr(guc_fw->type)); diff --git a/drivers/gpu/drm/i915/intel_hangcheck.c b/drivers/gpu/drm/i915/intel_hangcheck.c index 31f01d64c021..348a4f7ffb67 100644 --- a/drivers/gpu/drm/i915/intel_hangcheck.c +++ b/drivers/gpu/drm/i915/intel_hangcheck.c @@ -411,7 +411,6 @@ static void i915_hangcheck_elapsed(struct work_struct *work) struct intel_engine_cs *engine; enum intel_engine_id id; unsigned int hung = 0, stuck = 0; - int busy_count = 0; if (!i915_modparams.enable_hangcheck) return; @@ -429,7 +428,6 @@ static void i915_hangcheck_elapsed(struct work_struct *work) intel_uncore_arm_unclaimed_mmio_detection(dev_priv); for_each_engine(engine, dev_priv, id) { - const bool busy = intel_engine_has_waiter(engine); struct intel_engine_hangcheck hc; semaphore_clear_deadlocks(dev_priv); @@ -443,16 +441,13 @@ static void i915_hangcheck_elapsed(struct work_struct *work) if (hc.action != ENGINE_DEAD) stuck |= intel_engine_flag(engine); } - - busy_count += busy; } if (hung) hangcheck_declare_hang(dev_priv, hung, stuck); /* Reset timer in case GPU hangs without another request being added */ - if (busy_count) - i915_queue_hangcheck(dev_priv); + i915_queue_hangcheck(dev_priv); } void intel_engine_init_hangcheck(struct intel_engine_cs *engine) diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c index bced7b954d93..179d0ad3889d 100644 --- a/drivers/gpu/drm/i915/intel_hdmi.c +++ b/drivers/gpu/drm/i915/intel_hdmi.c @@ -1595,12 +1595,20 @@ intel_hdmi_set_edid(struct drm_connector *connector) struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector); struct edid *edid; bool connected = false; + struct i2c_adapter *i2c; intel_display_power_get(dev_priv, POWER_DOMAIN_GMBUS); - edid = drm_get_edid(connector, - intel_gmbus_get_adapter(dev_priv, - intel_hdmi->ddc_bus)); + i2c = intel_gmbus_get_adapter(dev_priv, intel_hdmi->ddc_bus); + + edid = drm_get_edid(connector, i2c); + + if (!edid && !intel_gmbus_is_forced_bit(i2c)) { + DRM_DEBUG_KMS("HDMI GMBUS EDID read failed, retry using GPIO bit-banging\n"); + intel_gmbus_force_bit(i2c, true); + edid = drm_get_edid(connector, i2c); + intel_gmbus_force_bit(i2c, false); + } intel_hdmi_dp_dual_mode_detect(connector, edid != NULL); diff --git a/drivers/gpu/drm/i915/intel_huc.c b/drivers/gpu/drm/i915/intel_huc.c index 974be3defa70..8ed05182f944 100644 --- a/drivers/gpu/drm/i915/intel_huc.c +++ b/drivers/gpu/drm/i915/intel_huc.c @@ -54,10 +54,6 @@ #define KBL_HUC_FW_MINOR 00 #define KBL_BLD_NUM 1810 -#define GLK_HUC_FW_MAJOR 02 -#define GLK_HUC_FW_MINOR 00 -#define GLK_BLD_NUM 1748 - #define HUC_FW_PATH(platform, major, minor, bld_num) \ "i915/" __stringify(platform) "_huc_ver" __stringify(major) "_" \ __stringify(minor) "_" __stringify(bld_num) ".bin" @@ -74,9 +70,6 @@ MODULE_FIRMWARE(I915_BXT_HUC_UCODE); KBL_HUC_FW_MINOR, KBL_BLD_NUM) MODULE_FIRMWARE(I915_KBL_HUC_UCODE); -#define I915_GLK_HUC_UCODE HUC_FW_PATH(glk, GLK_HUC_FW_MAJOR, \ - GLK_HUC_FW_MINOR, GLK_BLD_NUM) - static void huc_fw_select(struct intel_uc_fw *huc_fw) { struct intel_huc *huc = container_of(huc_fw, struct intel_huc, fw); @@ -103,10 +96,6 @@ static void huc_fw_select(struct intel_uc_fw *huc_fw) huc_fw->path = I915_KBL_HUC_UCODE; huc_fw->major_ver_wanted = KBL_HUC_FW_MAJOR; huc_fw->minor_ver_wanted = KBL_HUC_FW_MINOR; - } else if (IS_GEMINILAKE(dev_priv)) { - huc_fw->path = I915_GLK_HUC_UCODE; - huc_fw->major_ver_wanted = GLK_HUC_FW_MAJOR; - huc_fw->minor_ver_wanted = GLK_HUC_FW_MINOR; } else { DRM_WARN("%s: No firmware known for this platform!\n", intel_uc_fw_type_repr(huc_fw->type)); diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index 1db79a860b96..1a6e699e19e0 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -9149,8 +9149,8 @@ int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u32 mbox, u32 *val return 0; } -int sandybridge_pcode_write(struct drm_i915_private *dev_priv, - u32 mbox, u32 val) +int sandybridge_pcode_write_timeout(struct drm_i915_private *dev_priv, + u32 mbox, u32 val, int timeout_us) { int status; @@ -9173,7 +9173,7 @@ int sandybridge_pcode_write(struct drm_i915_private *dev_priv, if (__intel_wait_for_register_fw(dev_priv, GEN6_PCODE_MAILBOX, GEN6_PCODE_READY, 0, - 500, 0, NULL)) { + timeout_us, 0, NULL)) { DRM_ERROR("timeout waiting for pcode write of 0x%08x to mbox %x to finish for %ps\n", val, mbox, __builtin_return_address(0)); return -ETIMEDOUT; diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h index c5ff203e42d6..a0e7a6c2a57c 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.h +++ b/drivers/gpu/drm/i915/intel_ringbuffer.h @@ -366,20 +366,6 @@ struct intel_engine_cs { */ #define I915_ENGINE_SAMPLE_MAX (I915_SAMPLE_SEMA + 1) struct i915_pmu_sample sample[I915_ENGINE_SAMPLE_MAX]; - /** - * @busy_stats: Has enablement of engine stats tracking been - * requested. - */ - bool busy_stats; - /** - * @disable_busy_stats: Work item for busy stats disabling. - * - * Same as with @enable_busy_stats action, with the difference - * that we delay it in case there are rapid enable-disable - * actions, which can happen during tool startup (like perf - * stat). - */ - struct delayed_work disable_busy_stats; } pmu; /* diff --git a/drivers/gpu/drm/i915/intel_uc.c b/drivers/gpu/drm/i915/intel_uc.c index 907deac6e3fa..d82ca0f438f5 100644 --- a/drivers/gpu/drm/i915/intel_uc.c +++ b/drivers/gpu/drm/i915/intel_uc.c @@ -209,8 +209,6 @@ void intel_uc_fini_wq(struct drm_i915_private *dev_priv) if (!USES_GUC(dev_priv)) return; - GEM_BUG_ON(!HAS_GUC(dev_priv)); - intel_guc_fini_wq(&dev_priv->guc); } diff --git a/drivers/gpu/drm/i915/intel_vbt_defs.h b/drivers/gpu/drm/i915/intel_vbt_defs.h index e3d7745a9151..98dff6058d3c 100644 --- a/drivers/gpu/drm/i915/intel_vbt_defs.h +++ b/drivers/gpu/drm/i915/intel_vbt_defs.h @@ -412,6 +412,8 @@ struct child_device_config { u16 dp_gpio_pin_num; /* 195 */ u8 dp_iboost_level:4; /* 196 */ u8 hdmi_iboost_level:4; /* 196 */ + u8 dp_max_link_rate:2; /* 216 CNL+ */ + u8 dp_max_link_rate_reserved:6; /* 216 */ } __packed; struct bdb_general_definitions { diff --git a/drivers/gpu/drm/msm/dsi/pll/dsi_pll_14nm.c b/drivers/gpu/drm/msm/dsi/pll/dsi_pll_14nm.c index fe15aa64086f..71fe60e5f01f 100644 --- a/drivers/gpu/drm/msm/dsi/pll/dsi_pll_14nm.c +++ b/drivers/gpu/drm/msm/dsi/pll/dsi_pll_14nm.c @@ -698,7 +698,7 @@ static unsigned long dsi_pll_14nm_postdiv_recalc_rate(struct clk_hw *hw, val &= div_mask(width); return divider_recalc_rate(hw, parent_rate, val, NULL, - postdiv->flags); + postdiv->flags, width); } static long dsi_pll_14nm_postdiv_round_rate(struct clk_hw *hw, diff --git a/drivers/gpu/drm/nouveau/dispnv04/arb.c b/drivers/gpu/drm/nouveau/dispnv04/arb.c index 90075b676256..c79160c37f84 100644 --- a/drivers/gpu/drm/nouveau/dispnv04/arb.c +++ b/drivers/gpu/drm/nouveau/dispnv04/arb.c @@ -213,8 +213,10 @@ nv04_update_arb(struct drm_device *dev, int VClk, int bpp, if ((dev->pdev->device & 0xffff) == 0x01a0 /*CHIPSET_NFORCE*/ || (dev->pdev->device & 0xffff) == 0x01f0 /*CHIPSET_NFORCE2*/) { uint32_t type; + int domain = pci_domain_nr(dev->pdev->bus); - pci_read_config_dword(pci_get_bus_and_slot(0, 1), 0x7c, &type); + pci_read_config_dword(pci_get_domain_bus_and_slot(domain, 0, 1), + 0x7c, &type); sim_data.memory_type = (type >> 12) & 1; sim_data.memory_width = 64; diff --git a/drivers/gpu/drm/nouveau/dispnv04/hw.c b/drivers/gpu/drm/nouveau/dispnv04/hw.c index b98599002831..0c9bdf023f5b 100644 --- a/drivers/gpu/drm/nouveau/dispnv04/hw.c +++ b/drivers/gpu/drm/nouveau/dispnv04/hw.c @@ -216,12 +216,15 @@ nouveau_hw_get_clock(struct drm_device *dev, enum nvbios_pll_type plltype) { struct nvkm_pll_vals pllvals; int ret; + int domain; + + domain = pci_domain_nr(dev->pdev->bus); if (plltype == PLL_MEMORY && (dev->pdev->device & 0x0ff0) == CHIPSET_NFORCE) { uint32_t mpllP; - - pci_read_config_dword(pci_get_bus_and_slot(0, 3), 0x6c, &mpllP); + pci_read_config_dword(pci_get_domain_bus_and_slot(domain, 0, 3), + 0x6c, &mpllP); mpllP = (mpllP >> 8) & 0xf; if (!mpllP) mpllP = 4; @@ -232,7 +235,8 @@ nouveau_hw_get_clock(struct drm_device *dev, enum nvbios_pll_type plltype) (dev->pdev->device & 0xff0) == CHIPSET_NFORCE2) { uint32_t clock; - pci_read_config_dword(pci_get_bus_and_slot(0, 5), 0x4c, &clock); + pci_read_config_dword(pci_get_domain_bus_and_slot(domain, 0, 5), + 0x4c, &clock); return clock / 1000; } diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/mmu.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/mmu.h index 0760b93e9d1f..baab93398e54 100644 --- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/mmu.h +++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/mmu.h @@ -121,6 +121,7 @@ int nv41_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **); int nv44_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **); int nv50_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **); int g84_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **); +int mcp77_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **); int gf100_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **); int gk104_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **); int gk20a_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **); diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c index 41e7f2927443..80fa68d54bd3 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bo.c +++ b/drivers/gpu/drm/nouveau/nouveau_bo.c @@ -1453,11 +1453,13 @@ nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *reg) args.nv50.ro = 0; args.nv50.kind = mem->kind; args.nv50.comp = mem->comp; + argc = sizeof(args.nv50); break; case NVIF_CLASS_MEM_GF100: args.gf100.version = 0; args.gf100.ro = 0; args.gf100.kind = mem->kind; + argc = sizeof(args.gf100); break; default: WARN_ON(1); @@ -1465,7 +1467,7 @@ nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *reg) } ret = nvif_object_map_handle(&mem->mem.object, - &argc, argc, + &args, argc, &handle, &length); if (ret != 1) return ret ? ret : -EINVAL; diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c index 56fe261b6268..3e293029e3a6 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drm.c +++ b/drivers/gpu/drm/nouveau/nouveau_drm.c @@ -524,7 +524,8 @@ nouveau_get_hdmi_dev(struct nouveau_drm *drm) } /* subfunction one is a hdmi audio device? */ - drm->hdmi_device = pci_get_bus_and_slot((unsigned int)pdev->bus->number, + drm->hdmi_device = pci_get_domain_bus_and_slot(pci_domain_nr(pdev->bus), + (unsigned int)pdev->bus->number, PCI_DEVFN(PCI_SLOT(pdev->devfn), 1)); if (!drm->hdmi_device) { diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c index 073534ab6911..05cd674326a6 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c @@ -1252,7 +1252,7 @@ nvaa_chipset = { .i2c = g94_i2c_new, .imem = nv50_instmem_new, .mc = g98_mc_new, - .mmu = g84_mmu_new, + .mmu = mcp77_mmu_new, .mxm = nv50_mxm_new, .pci = g94_pci_new, .therm = g84_therm_new, @@ -1284,7 +1284,7 @@ nvac_chipset = { .i2c = g94_i2c_new, .imem = nv50_instmem_new, .mc = g98_mc_new, - .mmu = g84_mmu_new, + .mmu = mcp77_mmu_new, .mxm = nv50_mxm_new, .pci = g94_pci_new, .therm = g84_therm_new, diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/base.c index 9646adec57cb..243f0a5c8a62 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/base.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/base.c @@ -73,7 +73,8 @@ static int nvkm_bar_fini(struct nvkm_subdev *subdev, bool suspend) { struct nvkm_bar *bar = nvkm_bar(subdev); - bar->func->bar1.fini(bar); + if (bar->func->bar1.fini) + bar->func->bar1.fini(bar); return 0; } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gk20a.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gk20a.c index b10077d38839..35878fb538f2 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gk20a.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gk20a.c @@ -26,7 +26,6 @@ gk20a_bar_func = { .dtor = gf100_bar_dtor, .oneinit = gf100_bar_oneinit, .bar1.init = gf100_bar_bar1_init, - .bar1.fini = gf100_bar_bar1_fini, .bar1.wait = gf100_bar_bar1_wait, .bar1.vmm = gf100_bar_bar1_vmm, .flush = g84_bar_flush, diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv1a.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv1a.c index 4c07d10bb976..18241c6ba5fa 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv1a.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv1a.c @@ -28,8 +28,16 @@ nv1a_ram_new(struct nvkm_fb *fb, struct nvkm_ram **pram) { struct pci_dev *bridge; u32 mem, mib; + int domain = 0; + struct pci_dev *pdev = NULL; - bridge = pci_get_bus_and_slot(0, PCI_DEVFN(0, 1)); + if (dev_is_pci(fb->subdev.device->dev)) + pdev = to_pci_dev(fb->subdev.device->dev); + + if (pdev) + domain = pci_domain_nr(pdev->bus); + + bridge = pci_get_domain_bus_and_slot(domain, 0, PCI_DEVFN(0, 1)); if (!bridge) { nvkm_error(&fb->subdev, "no bridge device\n"); return -ENODEV; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/Kbuild index 352a65f9371c..67ee983bb026 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/Kbuild +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/Kbuild @@ -4,6 +4,7 @@ nvkm-y += nvkm/subdev/mmu/nv41.o nvkm-y += nvkm/subdev/mmu/nv44.o nvkm-y += nvkm/subdev/mmu/nv50.o nvkm-y += nvkm/subdev/mmu/g84.o +nvkm-y += nvkm/subdev/mmu/mcp77.o nvkm-y += nvkm/subdev/mmu/gf100.o nvkm-y += nvkm/subdev/mmu/gk104.o nvkm-y += nvkm/subdev/mmu/gk20a.o @@ -22,6 +23,7 @@ nvkm-y += nvkm/subdev/mmu/vmmnv04.o nvkm-y += nvkm/subdev/mmu/vmmnv41.o nvkm-y += nvkm/subdev/mmu/vmmnv44.o nvkm-y += nvkm/subdev/mmu/vmmnv50.o +nvkm-y += nvkm/subdev/mmu/vmmmcp77.o nvkm-y += nvkm/subdev/mmu/vmmgf100.o nvkm-y += nvkm/subdev/mmu/vmmgk104.o nvkm-y += nvkm/subdev/mmu/vmmgk20a.o diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/mcp77.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/mcp77.c new file mode 100644 index 000000000000..0527b50730d9 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/mcp77.c @@ -0,0 +1,41 @@ +/* + * Copyright 2017 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ +#include "mem.h" +#include "vmm.h" + +#include <nvif/class.h> + +static const struct nvkm_mmu_func +mcp77_mmu = { + .dma_bits = 40, + .mmu = {{ -1, -1, NVIF_CLASS_MMU_NV50}}, + .mem = {{ -1, 0, NVIF_CLASS_MEM_NV50}, nv50_mem_new, nv50_mem_map }, + .vmm = {{ -1, -1, NVIF_CLASS_VMM_NV50}, mcp77_vmm_new, false, 0x0200 }, + .kind = nv50_mmu_kind, + .kind_sys = true, +}; + +int +mcp77_mmu_new(struct nvkm_device *device, int index, struct nvkm_mmu **pmmu) +{ + return nvkm_mmu_new_(&mcp77_mmu, device, index, pmmu); +} diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.h b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.h index 6d8f61ea467a..da06e64d8a7d 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.h +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.h @@ -95,6 +95,9 @@ struct nvkm_vmm_desc { const struct nvkm_vmm_desc_func *func; }; +extern const struct nvkm_vmm_desc nv50_vmm_desc_12[]; +extern const struct nvkm_vmm_desc nv50_vmm_desc_16[]; + extern const struct nvkm_vmm_desc gk104_vmm_desc_16_12[]; extern const struct nvkm_vmm_desc gk104_vmm_desc_16_16[]; extern const struct nvkm_vmm_desc gk104_vmm_desc_17_12[]; @@ -169,6 +172,11 @@ int nv04_vmm_new_(const struct nvkm_vmm_func *, struct nvkm_mmu *, u32, const char *, struct nvkm_vmm **); int nv04_vmm_valid(struct nvkm_vmm *, void *, u32, struct nvkm_vmm_map *); +int nv50_vmm_join(struct nvkm_vmm *, struct nvkm_memory *); +void nv50_vmm_part(struct nvkm_vmm *, struct nvkm_memory *); +int nv50_vmm_valid(struct nvkm_vmm *, void *, u32, struct nvkm_vmm_map *); +void nv50_vmm_flush(struct nvkm_vmm *, int); + int gf100_vmm_new_(const struct nvkm_vmm_func *, const struct nvkm_vmm_func *, struct nvkm_mmu *, u64, u64, void *, u32, struct lock_class_key *, const char *, struct nvkm_vmm **); @@ -200,6 +208,8 @@ int nv44_vmm_new(struct nvkm_mmu *, u64, u64, void *, u32, struct lock_class_key *, const char *, struct nvkm_vmm **); int nv50_vmm_new(struct nvkm_mmu *, u64, u64, void *, u32, struct lock_class_key *, const char *, struct nvkm_vmm **); +int mcp77_vmm_new(struct nvkm_mmu *, u64, u64, void *, u32, + struct lock_class_key *, const char *, struct nvkm_vmm **); int g84_vmm_new(struct nvkm_mmu *, u64, u64, void *, u32, struct lock_class_key *, const char *, struct nvkm_vmm **); int gf100_vmm_new(struct nvkm_mmu *, u64, u64, void *, u32, diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmmcp77.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmmcp77.c new file mode 100644 index 000000000000..e63d984cbfd4 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmmcp77.c @@ -0,0 +1,45 @@ +/* + * Copyright 2017 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ +#include "vmm.h" + +static const struct nvkm_vmm_func +mcp77_vmm = { + .join = nv50_vmm_join, + .part = nv50_vmm_part, + .valid = nv50_vmm_valid, + .flush = nv50_vmm_flush, + .page_block = 1 << 29, + .page = { + { 16, &nv50_vmm_desc_16[0], NVKM_VMM_PAGE_xVxx }, + { 12, &nv50_vmm_desc_12[0], NVKM_VMM_PAGE_xVHx }, + {} + } +}; + +int +mcp77_vmm_new(struct nvkm_mmu *mmu, u64 addr, u64 size, void *argv, u32 argc, + struct lock_class_key *key, const char *name, + struct nvkm_vmm **pvmm) +{ + return nv04_vmm_new_(&mcp77_vmm, mmu, 0, addr, size, + argv, argc, key, name, pvmm); +} diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv50.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv50.c index 863a2edd9861..64f75d906202 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv50.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv50.c @@ -32,7 +32,7 @@ static inline void nv50_vmm_pgt_pte(struct nvkm_vmm *vmm, struct nvkm_mmu_pt *pt, u32 ptei, u32 ptes, struct nvkm_vmm_map *map, u64 addr) { - u64 next = addr | map->type, data; + u64 next = addr + map->type, data; u32 pten; int log2blk; @@ -69,7 +69,7 @@ nv50_vmm_pgt_dma(struct nvkm_vmm *vmm, struct nvkm_mmu_pt *pt, VMM_SPAM(vmm, "DMAA %08x %08x PTE(s)", ptei, ptes); nvkm_kmap(pt->memory); while (ptes--) { - const u64 data = *map->dma++ | map->type; + const u64 data = *map->dma++ + map->type; VMM_WO064(pt, vmm, ptei++ * 8, data); map->type += map->ctag; } @@ -163,21 +163,21 @@ nv50_vmm_pgd = { .pde = nv50_vmm_pgd_pde, }; -static const struct nvkm_vmm_desc +const struct nvkm_vmm_desc nv50_vmm_desc_12[] = { { PGT, 17, 8, 0x1000, &nv50_vmm_pgt }, { PGD, 11, 0, 0x0000, &nv50_vmm_pgd }, {} }; -static const struct nvkm_vmm_desc +const struct nvkm_vmm_desc nv50_vmm_desc_16[] = { { PGT, 13, 8, 0x1000, &nv50_vmm_pgt }, { PGD, 11, 0, 0x0000, &nv50_vmm_pgd }, {} }; -static void +void nv50_vmm_flush(struct nvkm_vmm *vmm, int level) { struct nvkm_subdev *subdev = &vmm->mmu->subdev; @@ -223,7 +223,7 @@ nv50_vmm_flush(struct nvkm_vmm *vmm, int level) mutex_unlock(&subdev->mutex); } -static int +int nv50_vmm_valid(struct nvkm_vmm *vmm, void *argv, u32 argc, struct nvkm_vmm_map *map) { @@ -321,7 +321,7 @@ nv50_vmm_valid(struct nvkm_vmm *vmm, void *argv, u32 argc, return 0; } -static void +void nv50_vmm_part(struct nvkm_vmm *vmm, struct nvkm_memory *inst) { struct nvkm_vmm_join *join; @@ -335,7 +335,7 @@ nv50_vmm_part(struct nvkm_vmm *vmm, struct nvkm_memory *inst) } } -static int +int nv50_vmm_join(struct nvkm_vmm *vmm, struct nvkm_memory *inst) { const u32 pd_offset = vmm->mmu->func->vmm.pd_offset; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/base.c index deb96de54b00..ee2431a7804e 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/base.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/base.c @@ -71,6 +71,10 @@ nvkm_pci_intr(int irq, void *arg) struct nvkm_pci *pci = arg; struct nvkm_device *device = pci->subdev.device; bool handled = false; + + if (pci->irq < 0) + return IRQ_HANDLED; + nvkm_mc_intr_unarm(device); if (pci->msi) pci->func->msi_rearm(pci); @@ -84,11 +88,6 @@ nvkm_pci_fini(struct nvkm_subdev *subdev, bool suspend) { struct nvkm_pci *pci = nvkm_pci(subdev); - if (pci->irq >= 0) { - free_irq(pci->irq, pci); - pci->irq = -1; - } - if (pci->agp.bridge) nvkm_agp_fini(pci); @@ -108,8 +107,20 @@ static int nvkm_pci_oneinit(struct nvkm_subdev *subdev) { struct nvkm_pci *pci = nvkm_pci(subdev); - if (pci_is_pcie(pci->pdev)) - return nvkm_pcie_oneinit(pci); + struct pci_dev *pdev = pci->pdev; + int ret; + + if (pci_is_pcie(pci->pdev)) { + ret = nvkm_pcie_oneinit(pci); + if (ret) + return ret; + } + + ret = request_irq(pdev->irq, nvkm_pci_intr, IRQF_SHARED, "nvkm", pci); + if (ret) + return ret; + + pci->irq = pdev->irq; return 0; } @@ -117,7 +128,6 @@ static int nvkm_pci_init(struct nvkm_subdev *subdev) { struct nvkm_pci *pci = nvkm_pci(subdev); - struct pci_dev *pdev = pci->pdev; int ret; if (pci->agp.bridge) { @@ -131,28 +141,34 @@ nvkm_pci_init(struct nvkm_subdev *subdev) if (pci->func->init) pci->func->init(pci); - ret = request_irq(pdev->irq, nvkm_pci_intr, IRQF_SHARED, "nvkm", pci); - if (ret) - return ret; - - pci->irq = pdev->irq; - /* Ensure MSI interrupts are armed, for the case where there are * already interrupts pending (for whatever reason) at load time. */ if (pci->msi) pci->func->msi_rearm(pci); - return ret; + return 0; } static void * nvkm_pci_dtor(struct nvkm_subdev *subdev) { struct nvkm_pci *pci = nvkm_pci(subdev); + nvkm_agp_dtor(pci); + + if (pci->irq >= 0) { + /* freq_irq() will call the handler, we use pci->irq == -1 + * to signal that it's been torn down and should be a noop. + */ + int irq = pci->irq; + pci->irq = -1; + free_irq(irq, pci); + } + if (pci->msi) pci_disable_msi(pci->pdev); + return nvkm_pci(subdev); } diff --git a/drivers/gpu/drm/r128/r128_state.c b/drivers/gpu/drm/r128/r128_state.c index 8fdc56c1c953..b9bfa806d346 100644 --- a/drivers/gpu/drm/r128/r128_state.c +++ b/drivers/gpu/drm/r128/r128_state.c @@ -982,25 +982,14 @@ static int r128_cce_dispatch_write_pixels(struct drm_device *dev, xbuf_size = count * sizeof(*x); ybuf_size = count * sizeof(*y); - x = kmalloc(xbuf_size, GFP_KERNEL); - if (x == NULL) - return -ENOMEM; - y = kmalloc(ybuf_size, GFP_KERNEL); - if (y == NULL) { - kfree(x); - return -ENOMEM; - } - if (copy_from_user(x, depth->x, xbuf_size)) { - kfree(x); - kfree(y); - return -EFAULT; - } - if (copy_from_user(y, depth->y, xbuf_size)) { + x = memdup_user(depth->x, xbuf_size); + if (IS_ERR(x)) + return PTR_ERR(x); + y = memdup_user(depth->y, ybuf_size); + if (IS_ERR(y)) { kfree(x); - kfree(y); - return -EFAULT; + return PTR_ERR(y); } - buffer_size = depth->n * sizeof(u32); buffer = memdup_user(depth->buffer, buffer_size); if (IS_ERR(buffer)) { diff --git a/drivers/gpu/drm/radeon/radeon_uvd.c b/drivers/gpu/drm/radeon/radeon_uvd.c index d34d1cf33895..95f4db70dd22 100644 --- a/drivers/gpu/drm/radeon/radeon_uvd.c +++ b/drivers/gpu/drm/radeon/radeon_uvd.c @@ -995,7 +995,7 @@ int radeon_uvd_calc_upll_dividers(struct radeon_device *rdev, /* calc dclk divider with current vco freq */ dclk_div = radeon_uvd_calc_upll_post_div(vco_freq, dclk, pd_min, pd_even); - if (vclk_div > pd_max) + if (dclk_div > pd_max) break; /* vco is too big, it has to stop */ /* calc score with current vco freq */ diff --git a/drivers/gpu/drm/rockchip/rockchip_lvds.c b/drivers/gpu/drm/rockchip/rockchip_lvds.c index 84911bdc27d1..e67f4ea28c0e 100644 --- a/drivers/gpu/drm/rockchip/rockchip_lvds.c +++ b/drivers/gpu/drm/rockchip/rockchip_lvds.c @@ -25,6 +25,7 @@ #include <linux/clk.h> #include <linux/mfd/syscon.h> #include <linux/of_graph.h> +#include <linux/pinctrl/devinfo.h> #include <linux/pm_runtime.h> #include <linux/regmap.h> #include <linux/reset.h> diff --git a/drivers/gpu/drm/sun4i/sun4i_hdmi_tmds_clk.c b/drivers/gpu/drm/sun4i/sun4i_hdmi_tmds_clk.c index dc332ea56f6c..3ecffa52c814 100644 --- a/drivers/gpu/drm/sun4i/sun4i_hdmi_tmds_clk.c +++ b/drivers/gpu/drm/sun4i/sun4i_hdmi_tmds_clk.c @@ -102,10 +102,13 @@ static int sun4i_tmds_determine_rate(struct clk_hw *hw, goto out; } - if (abs(rate - rounded / i) < - abs(rate - best_parent / best_div)) { + if (!best_parent || + abs(rate - rounded / i / j) < + abs(rate - best_parent / best_half / + best_div)) { best_parent = rounded; - best_div = i; + best_half = i; + best_div = j; } } } diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index 893003fc76a1..2fef09a56d16 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c @@ -1727,7 +1727,7 @@ int ttm_bo_swapout(struct ttm_bo_global *glob, struct ttm_operation_ctx *ctx) kref_get(&bo->list_kref); if (!list_empty(&bo->ddestroy)) { - ret = ttm_bo_cleanup_refs(bo, false, false, true); + ret = ttm_bo_cleanup_refs(bo, false, false, locked); kref_put(&bo->list_kref, ttm_bo_release_list); return ret; } diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c index 08a3c324242e..60fcef1593dd 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_vm.c +++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c @@ -316,7 +316,7 @@ static void ttm_bo_vm_close(struct vm_area_struct *vma) static int ttm_bo_vm_access_kmap(struct ttm_buffer_object *bo, unsigned long offset, - void *buf, int len, int write) + uint8_t *buf, int len, int write) { unsigned long page = offset >> PAGE_SHIFT; unsigned long bytes_left = len; @@ -345,6 +345,7 @@ static int ttm_bo_vm_access_kmap(struct ttm_buffer_object *bo, ttm_bo_kunmap(&map); page++; + buf += bytes; bytes_left -= bytes; offset = 0; } while (bytes_left); diff --git a/drivers/gpu/drm/vc4/vc4_gem.c b/drivers/gpu/drm/vc4/vc4_gem.c index 638540943c61..c94cce96544c 100644 --- a/drivers/gpu/drm/vc4/vc4_gem.c +++ b/drivers/gpu/drm/vc4/vc4_gem.c @@ -146,7 +146,7 @@ vc4_save_hang_state(struct drm_device *dev) struct vc4_exec_info *exec[2]; struct vc4_bo *bo; unsigned long irqflags; - unsigned int i, j, unref_list_count, prev_idx; + unsigned int i, j, k, unref_list_count; kernel_state = kcalloc(1, sizeof(*kernel_state), GFP_KERNEL); if (!kernel_state) @@ -182,7 +182,7 @@ vc4_save_hang_state(struct drm_device *dev) return; } - prev_idx = 0; + k = 0; for (i = 0; i < 2; i++) { if (!exec[i]) continue; @@ -197,7 +197,7 @@ vc4_save_hang_state(struct drm_device *dev) WARN_ON(!refcount_read(&bo->usecnt)); refcount_inc(&bo->usecnt); drm_gem_object_get(&exec[i]->bo[j]->base); - kernel_state->bo[j + prev_idx] = &exec[i]->bo[j]->base; + kernel_state->bo[k++] = &exec[i]->bo[j]->base; } list_for_each_entry(bo, &exec[i]->unref_list, unref_head) { @@ -205,12 +205,12 @@ vc4_save_hang_state(struct drm_device *dev) * because they are naturally unpurgeable. */ drm_gem_object_get(&bo->base.base); - kernel_state->bo[j + prev_idx] = &bo->base.base; - j++; + kernel_state->bo[k++] = &bo->base.base; } - prev_idx = j + 1; } + WARN_ON_ONCE(k != state->bo_count); + if (exec[0]) state->start_bin = exec[0]->ct0ca; if (exec[1]) @@ -436,6 +436,19 @@ vc4_flush_caches(struct drm_device *dev) VC4_SET_FIELD(0xf, V3D_SLCACTL_ICC)); } +static void +vc4_flush_texture_caches(struct drm_device *dev) +{ + struct vc4_dev *vc4 = to_vc4_dev(dev); + + V3D_WRITE(V3D_L2CACTL, + V3D_L2CACTL_L2CCLR); + + V3D_WRITE(V3D_SLCACTL, + VC4_SET_FIELD(0xf, V3D_SLCACTL_T1CC) | + VC4_SET_FIELD(0xf, V3D_SLCACTL_T0CC)); +} + /* Sets the registers for the next job to be actually be executed in * the hardware. * @@ -474,6 +487,14 @@ vc4_submit_next_render_job(struct drm_device *dev) if (!exec) return; + /* A previous RCL may have written to one of our textures, and + * our full cache flush at bin time may have occurred before + * that RCL completed. Flush the texture cache now, but not + * the instructions or uniforms (since we don't write those + * from an RCL). + */ + vc4_flush_texture_caches(dev); + submit_cl(dev, 1, exec->ct1ca, exec->ct1ea); } diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h index 7e5f30e234b1..d08753e8fd94 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h @@ -713,7 +713,7 @@ extern int vmw_present_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); extern int vmw_present_readback_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); -extern unsigned int vmw_fops_poll(struct file *filp, +extern __poll_t vmw_fops_poll(struct file *filp, struct poll_table_struct *wait); extern ssize_t vmw_fops_read(struct file *filp, char __user *buffer, size_t count, loff_t *offset); diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c index 01be355525e4..67f844678ac8 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c @@ -412,7 +412,7 @@ out_clips: * Wrapper around the drm_poll function that makes sure the device is * processing the fifo if drm_poll decides to wait. */ -unsigned int vmw_fops_poll(struct file *filp, struct poll_table_struct *wait) +__poll_t vmw_fops_poll(struct file *filp, struct poll_table_struct *wait) { struct drm_file *file_priv = filp->private_data; struct vmw_private *dev_priv = diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c index 87a443013cbf..ead61015cd79 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c @@ -1849,7 +1849,7 @@ u32 vmw_get_vblank_counter(struct drm_device *dev, unsigned int pipe) */ int vmw_enable_vblank(struct drm_device *dev, unsigned int pipe) { - return -ENOSYS; + return -EINVAL; } /** diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c index b8a09807c5de..3824595fece1 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c @@ -266,8 +266,8 @@ static const struct drm_connector_funcs vmw_legacy_connector_funcs = { .set_property = vmw_du_connector_set_property, .destroy = vmw_ldu_connector_destroy, .reset = vmw_du_connector_reset, - .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, - .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, + .atomic_duplicate_state = vmw_du_connector_duplicate_state, + .atomic_destroy_state = vmw_du_connector_destroy_state, .atomic_set_property = vmw_du_connector_atomic_set_property, .atomic_get_property = vmw_du_connector_atomic_get_property, }; diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c index bc5f6026573d..63a4cd794b73 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c @@ -420,8 +420,8 @@ static const struct drm_connector_funcs vmw_sou_connector_funcs = { .set_property = vmw_du_connector_set_property, .destroy = vmw_sou_connector_destroy, .reset = vmw_du_connector_reset, - .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, - .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, + .atomic_duplicate_state = vmw_du_connector_duplicate_state, + .atomic_destroy_state = vmw_du_connector_destroy_state, .atomic_set_property = vmw_du_connector_atomic_set_property, .atomic_get_property = vmw_du_connector_atomic_get_property, }; |