diff options
Diffstat (limited to 'drivers/gpu/drm/i915')
56 files changed, 2448 insertions, 1658 deletions
diff --git a/drivers/gpu/drm/i915/Kconfig b/drivers/gpu/drm/i915/Kconfig index 051eab33e4c7..20a5d0455e19 100644 --- a/drivers/gpu/drm/i915/Kconfig +++ b/drivers/gpu/drm/i915/Kconfig @@ -2,9 +2,7 @@ config DRM_I915 tristate "Intel 8xx/9xx/G3x/G4x/HD Graphics" depends on DRM depends on X86 && PCI - depends on (AGP || AGP=n) select INTEL_GTT - select AGP_INTEL if AGP select INTERVAL_TREE # we need shmfs for the swappable backing store, and in particular # the shmem_readpage() which depends upon tmpfs @@ -47,3 +45,14 @@ config DRM_I915_PRELIMINARY_HW_SUPPORT option changes the default for that module option. If in doubt, say "N". + +config DRM_I915_USERPTR + bool "Always enable userptr support" + depends on DRM_I915 + select MMU_NOTIFIER + default y + help + This option selects CONFIG_MMU_NOTIFIER if it isn't already + selected to enabled full userptr support. + + If in doubt, say "Y". diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index c5db23511184..a0f1bd711b53 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -117,9 +117,8 @@ static u64 i915_gem_obj_total_ggtt_size(struct drm_i915_gem_object *obj) u64 size = 0; struct i915_vma *vma; - list_for_each_entry(vma, &obj->vma_list, vma_link) { - if (i915_is_ggtt(vma->vm) && - drm_mm_node_allocated(&vma->node)) + list_for_each_entry(vma, &obj->vma_list, obj_link) { + if (vma->is_ggtt && drm_mm_node_allocated(&vma->node)) size += vma->node.size; } @@ -155,7 +154,7 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj) obj->madv == I915_MADV_DONTNEED ? " purgeable" : ""); if (obj->base.name) seq_printf(m, " (name: %d)", obj->base.name); - list_for_each_entry(vma, &obj->vma_list, vma_link) { + list_for_each_entry(vma, &obj->vma_list, obj_link) { if (vma->pin_count > 0) pin_count++; } @@ -164,14 +163,13 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj) seq_printf(m, " (display)"); if (obj->fence_reg != I915_FENCE_REG_NONE) seq_printf(m, " (fence: %d)", obj->fence_reg); - list_for_each_entry(vma, &obj->vma_list, vma_link) { + list_for_each_entry(vma, &obj->vma_list, obj_link) { seq_printf(m, " (%sgtt offset: %08llx, size: %08llx", - i915_is_ggtt(vma->vm) ? "g" : "pp", + vma->is_ggtt ? "g" : "pp", vma->node.start, vma->node.size); - if (i915_is_ggtt(vma->vm)) - seq_printf(m, ", type: %u)", vma->ggtt_view.type); - else - seq_puts(m, ")"); + if (vma->is_ggtt) + seq_printf(m, ", type: %u", vma->ggtt_view.type); + seq_puts(m, ")"); } if (obj->stolen) seq_printf(m, " (stolen: %08llx)", obj->stolen->start); @@ -230,7 +228,7 @@ static int i915_gem_object_list_info(struct seq_file *m, void *data) } total_obj_size = total_gtt_size = count = 0; - list_for_each_entry(vma, head, mm_list) { + list_for_each_entry(vma, head, vm_link) { seq_printf(m, " "); describe_obj(m, vma->obj); seq_printf(m, "\n"); @@ -342,13 +340,13 @@ static int per_file_stats(int id, void *ptr, void *data) stats->shared += obj->base.size; if (USES_FULL_PPGTT(obj->base.dev)) { - list_for_each_entry(vma, &obj->vma_list, vma_link) { + list_for_each_entry(vma, &obj->vma_list, obj_link) { struct i915_hw_ppgtt *ppgtt; if (!drm_mm_node_allocated(&vma->node)) continue; - if (i915_is_ggtt(vma->vm)) { + if (vma->is_ggtt) { stats->global += obj->base.size; continue; } @@ -454,12 +452,12 @@ static int i915_gem_object_info(struct seq_file *m, void* data) count, mappable_count, size, mappable_size); size = count = mappable_size = mappable_count = 0; - count_vmas(&vm->active_list, mm_list); + count_vmas(&vm->active_list, vm_link); seq_printf(m, " %u [%u] active objects, %llu [%llu] bytes\n", count, mappable_count, size, mappable_size); size = count = mappable_size = mappable_count = 0; - count_vmas(&vm->inactive_list, mm_list); + count_vmas(&vm->inactive_list, vm_link); seq_printf(m, " %u [%u] inactive objects, %llu [%llu] bytes\n", count, mappable_count, size, mappable_size); @@ -825,8 +823,11 @@ static int i915_interrupt_info(struct seq_file *m, void *data) } for_each_pipe(dev_priv, pipe) { - if (!intel_display_power_is_enabled(dev_priv, - POWER_DOMAIN_PIPE(pipe))) { + enum intel_display_power_domain power_domain; + + power_domain = POWER_DOMAIN_PIPE(pipe); + if (!intel_display_power_get_if_enabled(dev_priv, + power_domain)) { seq_printf(m, "Pipe %c power disabled\n", pipe_name(pipe)); continue; @@ -840,6 +841,8 @@ static int i915_interrupt_info(struct seq_file *m, void *data) seq_printf(m, "Pipe %c IER:\t%08x\n", pipe_name(pipe), I915_READ(GEN8_DE_PIPE_IER(pipe))); + + intel_display_power_put(dev_priv, power_domain); } seq_printf(m, "Display Engine port interrupt mask:\t%08x\n", @@ -2463,9 +2466,9 @@ static void i915_guc_client_info(struct seq_file *m, for_each_ring(ring, dev_priv, i) { seq_printf(m, "\tSubmissions: %llu %s\n", - client->submissions[i], + client->submissions[ring->guc_id], ring->name); - tot += client->submissions[i]; + tot += client->submissions[ring->guc_id]; } seq_printf(m, "\tTotal: %llu\n", tot); } @@ -2502,10 +2505,10 @@ static int i915_guc_info(struct seq_file *m, void *data) seq_printf(m, "\nGuC submissions:\n"); for_each_ring(ring, dev_priv, i) { - seq_printf(m, "\t%-24s: %10llu, last seqno 0x%08x %9d\n", - ring->name, guc.submissions[i], - guc.last_seqno[i], guc.last_seqno[i]); - total += guc.submissions[i]; + seq_printf(m, "\t%-24s: %10llu, last seqno 0x%08x\n", + ring->name, guc.submissions[ring->guc_id], + guc.last_seqno[ring->guc_id]); + total += guc.submissions[ring->guc_id]; } seq_printf(m, "\t%s: %llu\n", "Total", total); @@ -2583,6 +2586,10 @@ static int i915_edp_psr_status(struct seq_file *m, void *data) enabled = true; } } + + seq_printf(m, "Main link in standby mode: %s\n", + yesno(dev_priv->psr.link_standby)); + seq_printf(m, "HW Enabled & Active bit: %s", yesno(enabled)); if (!HAS_DDI(dev)) @@ -3221,9 +3228,11 @@ static int i915_wa_registers(struct seq_file *m, void *unused) { int i; int ret; + struct intel_engine_cs *ring; struct drm_info_node *node = (struct drm_info_node *) m->private; struct drm_device *dev = node->minor->dev; struct drm_i915_private *dev_priv = dev->dev_private; + struct i915_workarounds *workarounds = &dev_priv->workarounds; ret = mutex_lock_interruptible(&dev->struct_mutex); if (ret) @@ -3231,15 +3240,18 @@ static int i915_wa_registers(struct seq_file *m, void *unused) intel_runtime_pm_get(dev_priv); - seq_printf(m, "Workarounds applied: %d\n", dev_priv->workarounds.count); - for (i = 0; i < dev_priv->workarounds.count; ++i) { + seq_printf(m, "Workarounds applied: %d\n", workarounds->count); + for_each_ring(ring, dev_priv, i) + seq_printf(m, "HW whitelist count for %s: %d\n", + ring->name, workarounds->hw_whitelist_count[i]); + for (i = 0; i < workarounds->count; ++i) { i915_reg_t addr; u32 mask, value, read; bool ok; - addr = dev_priv->workarounds.reg[i].addr; - mask = dev_priv->workarounds.reg[i].mask; - value = dev_priv->workarounds.reg[i].value; + addr = workarounds->reg[i].addr; + mask = workarounds->reg[i].mask; + value = workarounds->reg[i].value; read = I915_READ(addr); ok = (value & mask) == (read & mask); seq_printf(m, "0x%X: 0x%08X, mask: 0x%08X, read: 0x%08x, status: %s\n", @@ -3995,6 +4007,7 @@ static int pipe_crc_set_source(struct drm_device *dev, enum pipe pipe, struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe]; struct intel_crtc *crtc = to_intel_crtc(intel_get_crtc_for_pipe(dev, pipe)); + enum intel_display_power_domain power_domain; u32 val = 0; /* shut up gcc */ int ret; @@ -4005,7 +4018,8 @@ static int pipe_crc_set_source(struct drm_device *dev, enum pipe pipe, if (pipe_crc->source && source) return -EINVAL; - if (!intel_display_power_is_enabled(dev_priv, POWER_DOMAIN_PIPE(pipe))) { + power_domain = POWER_DOMAIN_PIPE(pipe); + if (!intel_display_power_get_if_enabled(dev_priv, power_domain)) { DRM_DEBUG_KMS("Trying to capture CRC while pipe is off\n"); return -EIO; } @@ -4022,7 +4036,7 @@ static int pipe_crc_set_source(struct drm_device *dev, enum pipe pipe, ret = ivb_pipe_crc_ctl_reg(dev, pipe, &source, &val); if (ret != 0) - return ret; + goto out; /* none -> real source transition */ if (source) { @@ -4034,8 +4048,10 @@ static int pipe_crc_set_source(struct drm_device *dev, enum pipe pipe, entries = kcalloc(INTEL_PIPE_CRC_ENTRIES_NR, sizeof(pipe_crc->entries[0]), GFP_KERNEL); - if (!entries) - return -ENOMEM; + if (!entries) { + ret = -ENOMEM; + goto out; + } /* * When IPS gets enabled, the pipe CRC changes. Since IPS gets @@ -4091,7 +4107,12 @@ static int pipe_crc_set_source(struct drm_device *dev, enum pipe pipe, hsw_enable_ips(crtc); } - return 0; + ret = 0; + +out: + intel_display_power_put(dev_priv, power_domain); + + return ret; } /* diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c index 1c3d2544fec4..1c6d227aae7c 100644 --- a/drivers/gpu/drm/i915/i915_dma.c +++ b/drivers/gpu/drm/i915/i915_dma.c @@ -391,20 +391,13 @@ static int i915_load_modeset_init(struct drm_device *dev) if (ret) goto cleanup_vga_client; - /* Initialise stolen first so that we may reserve preallocated - * objects for the BIOS to KMS transition. - */ - ret = i915_gem_init_stolen(dev); - if (ret) - goto cleanup_vga_switcheroo; - intel_power_domains_init_hw(dev_priv, false); intel_csr_ucode_init(dev_priv); ret = intel_irq_install(dev_priv); if (ret) - goto cleanup_gem_stolen; + goto cleanup_csr; intel_setup_gmbus(dev); @@ -458,9 +451,8 @@ cleanup_irq: intel_guc_ucode_fini(dev); drm_irq_uninstall(dev); intel_teardown_gmbus(dev); -cleanup_gem_stolen: - i915_gem_cleanup_stolen(dev); -cleanup_vga_switcheroo: +cleanup_csr: + intel_csr_ucode_fini(dev_priv); vga_switcheroo_unregister_client(dev->pdev); cleanup_vga_client: vga_client_register(dev->pdev, NULL, NULL, NULL); @@ -816,7 +808,41 @@ static void intel_device_info_runtime_init(struct drm_device *dev) !(sfuse_strap & SFUSE_STRAP_FUSE_LOCK))) { DRM_INFO("Display fused off, disabling\n"); info->num_pipes = 0; + } else if (fuse_strap & IVB_PIPE_C_DISABLE) { + DRM_INFO("PipeC fused off\n"); + info->num_pipes -= 1; } + } else if (info->num_pipes > 0 && INTEL_INFO(dev)->gen == 9) { + u32 dfsm = I915_READ(SKL_DFSM); + u8 disabled_mask = 0; + bool invalid; + int num_bits; + + if (dfsm & SKL_DFSM_PIPE_A_DISABLE) + disabled_mask |= BIT(PIPE_A); + if (dfsm & SKL_DFSM_PIPE_B_DISABLE) + disabled_mask |= BIT(PIPE_B); + if (dfsm & SKL_DFSM_PIPE_C_DISABLE) + disabled_mask |= BIT(PIPE_C); + + num_bits = hweight8(disabled_mask); + + switch (disabled_mask) { + case BIT(PIPE_A): + case BIT(PIPE_B): + case BIT(PIPE_A) | BIT(PIPE_B): + case BIT(PIPE_A) | BIT(PIPE_C): + invalid = true; + break; + default: + invalid = false; + } + + if (num_bits > info->num_pipes || invalid) + DRM_ERROR("invalid pipe fuse configuration: 0x%x\n", + disabled_mask); + else + info->num_pipes -= num_bits; } /* Initialize slice/subslice/EU info */ @@ -855,6 +881,94 @@ static void intel_init_dpio(struct drm_i915_private *dev_priv) } } +static int i915_workqueues_init(struct drm_i915_private *dev_priv) +{ + /* + * The i915 workqueue is primarily used for batched retirement of + * requests (and thus managing bo) once the task has been completed + * by the GPU. i915_gem_retire_requests() is called directly when we + * need high-priority retirement, such as waiting for an explicit + * bo. + * + * It is also used for periodic low-priority events, such as + * idle-timers and recording error state. + * + * All tasks on the workqueue are expected to acquire the dev mutex + * so there is no point in running more than one instance of the + * workqueue at any time. Use an ordered one. + */ + dev_priv->wq = alloc_ordered_workqueue("i915", 0); + if (dev_priv->wq == NULL) + goto out_err; + + dev_priv->hotplug.dp_wq = alloc_ordered_workqueue("i915-dp", 0); + if (dev_priv->hotplug.dp_wq == NULL) + goto out_free_wq; + + dev_priv->gpu_error.hangcheck_wq = + alloc_ordered_workqueue("i915-hangcheck", 0); + if (dev_priv->gpu_error.hangcheck_wq == NULL) + goto out_free_dp_wq; + + return 0; + +out_free_dp_wq: + destroy_workqueue(dev_priv->hotplug.dp_wq); +out_free_wq: + destroy_workqueue(dev_priv->wq); +out_err: + DRM_ERROR("Failed to allocate workqueues.\n"); + + return -ENOMEM; +} + +static void i915_workqueues_cleanup(struct drm_i915_private *dev_priv) +{ + destroy_workqueue(dev_priv->gpu_error.hangcheck_wq); + destroy_workqueue(dev_priv->hotplug.dp_wq); + destroy_workqueue(dev_priv->wq); +} + +static int i915_mmio_setup(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = to_i915(dev); + int mmio_bar; + int mmio_size; + + mmio_bar = IS_GEN2(dev) ? 1 : 0; + /* + * Before gen4, the registers and the GTT are behind different BARs. + * However, from gen4 onwards, the registers and the GTT are shared + * in the same BAR, so we want to restrict this ioremap from + * clobbering the GTT which we want ioremap_wc instead. Fortunately, + * the register BAR remains the same size for all the earlier + * generations up to Ironlake. + */ + if (INTEL_INFO(dev)->gen < 5) + mmio_size = 512 * 1024; + else + mmio_size = 2 * 1024 * 1024; + dev_priv->regs = pci_iomap(dev->pdev, mmio_bar, mmio_size); + if (dev_priv->regs == NULL) { + DRM_ERROR("failed to map registers\n"); + + return -EIO; + } + + /* Try to make sure MCHBAR is enabled before poking at it */ + intel_setup_mchbar(dev); + + return 0; +} + +static void i915_mmio_cleanup(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = to_i915(dev); + + intel_teardown_mchbar(dev); + pci_iounmap(dev->pdev, dev_priv->regs); +} + /** * i915_driver_load - setup chip and create an initial config * @dev: DRM device @@ -870,7 +984,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) { struct drm_i915_private *dev_priv; struct intel_device_info *info, *device_info; - int ret = 0, mmio_bar, mmio_size; + int ret = 0; uint32_t aperture_size; info = (struct intel_device_info *) flags; @@ -897,6 +1011,10 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) mutex_init(&dev_priv->modeset_restore_lock); mutex_init(&dev_priv->av_mutex); + ret = i915_workqueues_init(dev_priv); + if (ret < 0) + goto out_free_priv; + intel_pm_setup(dev); intel_runtime_pm_get(dev_priv); @@ -915,28 +1033,12 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) if (i915_get_bridge_dev(dev)) { ret = -EIO; - goto free_priv; + goto out_runtime_pm_put; } - mmio_bar = IS_GEN2(dev) ? 1 : 0; - /* Before gen4, the registers and the GTT are behind different BARs. - * However, from gen4 onwards, the registers and the GTT are shared - * in the same BAR, so we want to restrict this ioremap from - * clobbering the GTT which we want ioremap_wc instead. Fortunately, - * the register BAR remains the same size for all the earlier - * generations up to Ironlake. - */ - if (info->gen < 5) - mmio_size = 512*1024; - else - mmio_size = 2*1024*1024; - - dev_priv->regs = pci_iomap(dev->pdev, mmio_bar, mmio_size); - if (!dev_priv->regs) { - DRM_ERROR("failed to map registers\n"); - ret = -EIO; + ret = i915_mmio_setup(dev); + if (ret < 0) goto put_bridge; - } /* This must be called before any calls to HAS_PCH_* */ intel_detect_pch(dev); @@ -945,7 +1047,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) ret = i915_gem_gtt_init(dev); if (ret) - goto out_freecsr; + goto out_uncore_fini; /* WARNING: Apparently we must kick fbdev drivers before vgacon, * otherwise the vga fbdev driver falls over. */ @@ -991,49 +1093,13 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) dev_priv->gtt.mtrr = arch_phys_wc_add(dev_priv->gtt.mappable_base, aperture_size); - /* The i915 workqueue is primarily used for batched retirement of - * requests (and thus managing bo) once the task has been completed - * by the GPU. i915_gem_retire_requests() is called directly when we - * need high-priority retirement, such as waiting for an explicit - * bo. - * - * It is also used for periodic low-priority events, such as - * idle-timers and recording error state. - * - * All tasks on the workqueue are expected to acquire the dev mutex - * so there is no point in running more than one instance of the - * workqueue at any time. Use an ordered one. - */ - dev_priv->wq = alloc_ordered_workqueue("i915", 0); - if (dev_priv->wq == NULL) { - DRM_ERROR("Failed to create our workqueue.\n"); - ret = -ENOMEM; - goto out_mtrrfree; - } - - dev_priv->hotplug.dp_wq = alloc_ordered_workqueue("i915-dp", 0); - if (dev_priv->hotplug.dp_wq == NULL) { - DRM_ERROR("Failed to create our dp workqueue.\n"); - ret = -ENOMEM; - goto out_freewq; - } - - dev_priv->gpu_error.hangcheck_wq = - alloc_ordered_workqueue("i915-hangcheck", 0); - if (dev_priv->gpu_error.hangcheck_wq == NULL) { - DRM_ERROR("Failed to create our hangcheck workqueue.\n"); - ret = -ENOMEM; - goto out_freedpwq; - } - intel_irq_init(dev_priv); intel_uncore_sanitize(dev); - /* Try to make sure MCHBAR is enabled before poking at it */ - intel_setup_mchbar(dev); intel_opregion_setup(dev); - i915_gem_load(dev); + i915_gem_load_init(dev); + i915_gem_shrinker_init(dev_priv); /* On the 945G/GM, the chipset reports the MSI capability on the * integrated graphics even though the support isn't actually there @@ -1046,8 +1112,10 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) * be lost or delayed, but we use them anyways to avoid * stuck interrupts on some machines. */ - if (!IS_I945G(dev) && !IS_I945GM(dev)) - pci_enable_msi(dev->pdev); + if (!IS_I945G(dev) && !IS_I945GM(dev)) { + if (pci_enable_msi(dev->pdev) < 0) + DRM_DEBUG_DRIVER("can't enable MSI"); + } intel_device_info_runtime_init(dev); @@ -1097,38 +1165,29 @@ out_power_well: intel_power_domains_fini(dev_priv); drm_vblank_cleanup(dev); out_gem_unload: - WARN_ON(unregister_oom_notifier(&dev_priv->mm.oom_notifier)); - unregister_shrinker(&dev_priv->mm.shrinker); + i915_gem_shrinker_cleanup(dev_priv); if (dev->pdev->msi_enabled) pci_disable_msi(dev->pdev); intel_teardown_mchbar(dev); pm_qos_remove_request(&dev_priv->pm_qos); - destroy_workqueue(dev_priv->gpu_error.hangcheck_wq); -out_freedpwq: - destroy_workqueue(dev_priv->hotplug.dp_wq); -out_freewq: - destroy_workqueue(dev_priv->wq); -out_mtrrfree: arch_phys_wc_del(dev_priv->gtt.mtrr); io_mapping_free(dev_priv->gtt.mappable); out_gtt: i915_global_gtt_cleanup(dev); -out_freecsr: - intel_csr_ucode_fini(dev_priv); +out_uncore_fini: intel_uncore_fini(dev); - pci_iounmap(dev->pdev, dev_priv->regs); + i915_mmio_cleanup(dev); put_bridge: pci_dev_put(dev_priv->bridge_dev); -free_priv: - kmem_cache_destroy(dev_priv->requests); - kmem_cache_destroy(dev_priv->vmas); - kmem_cache_destroy(dev_priv->objects); - + i915_gem_load_cleanup(dev); +out_runtime_pm_put: intel_runtime_pm_put(dev_priv); - + i915_workqueues_cleanup(dev_priv); +out_free_priv: kfree(dev_priv); + return ret; } @@ -1153,8 +1212,7 @@ int i915_driver_unload(struct drm_device *dev) i915_teardown_sysfs(dev); - WARN_ON(unregister_oom_notifier(&dev_priv->mm.oom_notifier)); - unregister_shrinker(&dev_priv->mm.shrinker); + i915_gem_shrinker_cleanup(dev_priv); io_mapping_free(dev_priv->gtt.mappable); arch_phys_wc_del(dev_priv->gtt.mtrr); @@ -1182,6 +1240,8 @@ int i915_driver_unload(struct drm_device *dev) vga_switcheroo_unregister_client(dev->pdev); vga_client_register(dev->pdev, NULL, NULL, NULL); + intel_csr_ucode_fini(dev_priv); + /* Free error state after interrupts are fully disabled. */ cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work); i915_destroy_error_state(dev); @@ -1200,27 +1260,17 @@ int i915_driver_unload(struct drm_device *dev) i915_gem_context_fini(dev); mutex_unlock(&dev->struct_mutex); intel_fbc_cleanup_cfb(dev_priv); - i915_gem_cleanup_stolen(dev); - intel_csr_ucode_fini(dev_priv); - - intel_teardown_mchbar(dev); - - destroy_workqueue(dev_priv->hotplug.dp_wq); - destroy_workqueue(dev_priv->wq); - destroy_workqueue(dev_priv->gpu_error.hangcheck_wq); pm_qos_remove_request(&dev_priv->pm_qos); i915_global_gtt_cleanup(dev); intel_uncore_fini(dev); - if (dev_priv->regs != NULL) - pci_iounmap(dev->pdev, dev_priv->regs); + i915_mmio_cleanup(dev); - kmem_cache_destroy(dev_priv->requests); - kmem_cache_destroy(dev_priv->vmas); - kmem_cache_destroy(dev_priv->objects); + i915_gem_load_cleanup(dev); pci_dev_put(dev_priv->bridge_dev); + i915_workqueues_cleanup(dev_priv); kfree(dev_priv); return 0; diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 44912ecebc1a..20e82008b8b6 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -603,13 +603,7 @@ static int i915_drm_suspend(struct drm_device *dev) intel_suspend_gt_powersave(dev); - /* - * Disable CRTCs directly since we want to preserve sw state - * for _thaw. Also, power gate the CRTC power wells. - */ - drm_modeset_lock_all(dev); intel_display_suspend(dev); - drm_modeset_unlock_all(dev); intel_dp_mst_suspend(dev); @@ -764,9 +758,7 @@ static int i915_drm_resume(struct drm_device *dev) dev_priv->display.hpd_irq_setup(dev); spin_unlock_irq(&dev_priv->irq_lock); - drm_modeset_lock_all(dev); intel_display_resume(dev); - drm_modeset_unlock_all(dev); intel_dp_mst_resume(dev); diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index afb0beee9975..10480939159c 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -59,7 +59,7 @@ #define DRIVER_NAME "i915" #define DRIVER_DESC "Intel Graphics" -#define DRIVER_DATE "20160124" +#define DRIVER_DATE "20160229" #undef WARN_ON /* Many gcc seem to no see through this and fall over :( */ @@ -261,6 +261,9 @@ struct i915_hotplug { #define for_each_pipe(__dev_priv, __p) \ for ((__p) = 0; (__p) < INTEL_INFO(__dev_priv)->num_pipes; (__p)++) +#define for_each_pipe_masked(__dev_priv, __p, __mask) \ + for ((__p) = 0; (__p) < INTEL_INFO(__dev_priv)->num_pipes; (__p)++) \ + for_each_if ((__mask) & (1 << (__p))) #define for_each_plane(__dev_priv, __pipe, __p) \ for ((__p) = 0; \ (__p) < INTEL_INFO(__dev_priv)->num_sprites[(__pipe)] + 1; \ @@ -746,6 +749,7 @@ struct intel_csr { uint32_t mmio_count; i915_reg_t mmioaddr[8]; uint32_t mmiodata[8]; + uint32_t dc_state; }; #define DEV_INFO_FOR_EACH_FLAG(func, sep) \ @@ -900,16 +904,15 @@ enum fb_op_origin { ORIGIN_DIRTYFB, }; -struct i915_fbc { +struct intel_fbc { /* This is always the inner lock when overlapping with struct_mutex and * it's the outer lock when overlapping with stolen_lock. */ struct mutex lock; unsigned threshold; - unsigned int fb_id; unsigned int possible_framebuffer_bits; unsigned int busy_bits; + unsigned int visible_pipes_mask; struct intel_crtc *crtc; - int y; struct drm_mm_node compressed_fb; struct drm_mm_node *compressed_llb; @@ -919,18 +922,52 @@ struct i915_fbc { bool enabled; bool active; + struct intel_fbc_state_cache { + struct { + unsigned int mode_flags; + uint32_t hsw_bdw_pixel_rate; + } crtc; + + struct { + unsigned int rotation; + int src_w; + int src_h; + bool visible; + } plane; + + struct { + u64 ilk_ggtt_offset; + uint32_t pixel_format; + unsigned int stride; + int fence_reg; + unsigned int tiling_mode; + } fb; + } state_cache; + + struct intel_fbc_reg_params { + struct { + enum pipe pipe; + enum plane plane; + unsigned int fence_y_offset; + } crtc; + + struct { + u64 ggtt_offset; + uint32_t pixel_format; + unsigned int stride; + int fence_reg; + } fb; + + int cfb_size; + } params; + struct intel_fbc_work { bool scheduled; + u32 scheduled_vblank; struct work_struct work; - struct drm_framebuffer *fb; - unsigned long enable_jiffies; } work; const char *no_fbc_reason; - - bool (*is_active)(struct drm_i915_private *dev_priv); - void (*activate)(struct intel_crtc *crtc); - void (*deactivate)(struct drm_i915_private *dev_priv); }; /** @@ -970,6 +1007,7 @@ struct i915_psr { unsigned busy_frontbuffer_bits; bool psr2_support; bool aux_frame_sync; + bool link_standby; }; enum intel_pch { @@ -1657,11 +1695,18 @@ struct i915_wa_reg { u32 mask; }; -#define I915_MAX_WA_REGS 16 +/* + * RING_MAX_NONPRIV_SLOTS is per-engine but at this point we are only + * allowing it for RCS as we don't foresee any requirement of having + * a whitelist for other engines. When it is really required for + * other engines then the limit need to be increased. + */ +#define I915_MAX_WA_REGS (16 + RING_MAX_NONPRIV_SLOTS) struct i915_workarounds { struct i915_wa_reg reg[I915_MAX_WA_REGS]; u32 count; + u32 hw_whitelist_count[I915_NUM_RINGS]; }; struct i915_virtual_gpu { @@ -1758,7 +1803,7 @@ struct drm_i915_private { u32 pipestat_irq_mask[I915_MAX_PIPES]; struct i915_hotplug hotplug; - struct i915_fbc fbc; + struct intel_fbc fbc; struct i915_drrs drrs; struct intel_opregion opregion; struct intel_vbt_data vbt; @@ -1807,6 +1852,7 @@ struct drm_i915_private { enum modeset_restore modeset_restore; struct mutex modeset_restore_lock; + struct drm_atomic_state *modeset_restore_state; struct list_head vm_list; /* Global list of all address spaces */ struct i915_gtt gtt; /* VM representing the global address space */ @@ -1993,6 +2039,9 @@ enum hdmi_force_audio { #define I915_GTT_OFFSET_NONE ((u32)-1) struct drm_i915_gem_object_ops { + unsigned int flags; +#define I915_GEM_OBJECT_HAS_STRUCT_PAGE 0x1 + /* Interface between the GEM object and its backing storage. * get_pages() is called once prior to the use of the associated set * of pages before to binding them into the GTT, and put_pages() is @@ -2008,6 +2057,7 @@ struct drm_i915_gem_object_ops { */ int (*get_pages)(struct drm_i915_gem_object *); void (*put_pages)(struct drm_i915_gem_object *); + int (*dmabuf_export)(struct drm_i915_gem_object *); void (*release)(struct drm_i915_gem_object *); }; @@ -2841,7 +2891,8 @@ int i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); int i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); -void i915_gem_load(struct drm_device *dev); +void i915_gem_load_init(struct drm_device *dev); +void i915_gem_load_cleanup(struct drm_device *dev); void *i915_gem_object_alloc(struct drm_device *dev); void i915_gem_object_free(struct drm_i915_gem_object *obj); void i915_gem_object_init(struct drm_i915_gem_object *obj, @@ -3105,18 +3156,11 @@ bool i915_gem_obj_is_pinned(struct drm_i915_gem_object *obj); /* Some GGTT VM helpers */ #define i915_obj_to_ggtt(obj) \ (&((struct drm_i915_private *)(obj)->base.dev->dev_private)->gtt.base) -static inline bool i915_is_ggtt(struct i915_address_space *vm) -{ - struct i915_address_space *ggtt = - &((struct drm_i915_private *)(vm)->dev->dev_private)->gtt.base; - return vm == ggtt; -} static inline struct i915_hw_ppgtt * i915_vm_to_ppgtt(struct i915_address_space *vm) { WARN_ON(i915_is_ggtt(vm)); - return container_of(vm, struct i915_hw_ppgtt, base); } @@ -3254,6 +3298,7 @@ unsigned long i915_gem_shrink(struct drm_i915_private *dev_priv, #define I915_SHRINK_ACTIVE 0x8 unsigned long i915_gem_shrink_all(struct drm_i915_private *dev_priv); void i915_gem_shrinker_init(struct drm_i915_private *dev_priv); +void i915_gem_shrinker_cleanup(struct drm_i915_private *dev_priv); /* i915_gem_tiling.c */ @@ -3424,16 +3469,14 @@ int sandybridge_pcode_write(struct drm_i915_private *dev_priv, u32 mbox, u32 val u32 vlv_punit_read(struct drm_i915_private *dev_priv, u32 addr); void vlv_punit_write(struct drm_i915_private *dev_priv, u32 addr, u32 val); u32 vlv_nc_read(struct drm_i915_private *dev_priv, u8 addr); -u32 vlv_gpio_nc_read(struct drm_i915_private *dev_priv, u32 reg); -void vlv_gpio_nc_write(struct drm_i915_private *dev_priv, u32 reg, u32 val); +u32 vlv_iosf_sb_read(struct drm_i915_private *dev_priv, u8 port, u32 reg); +void vlv_iosf_sb_write(struct drm_i915_private *dev_priv, u8 port, u32 reg, u32 val); u32 vlv_cck_read(struct drm_i915_private *dev_priv, u32 reg); void vlv_cck_write(struct drm_i915_private *dev_priv, u32 reg, u32 val); u32 vlv_ccu_read(struct drm_i915_private *dev_priv, u32 reg); void vlv_ccu_write(struct drm_i915_private *dev_priv, u32 reg, u32 val); u32 vlv_bunit_read(struct drm_i915_private *dev_priv, u32 reg); void vlv_bunit_write(struct drm_i915_private *dev_priv, u32 reg, u32 val); -u32 vlv_gps_core_read(struct drm_i915_private *dev_priv, u32 reg); -void vlv_gps_core_write(struct drm_i915_private *dev_priv, u32 reg, u32 val); u32 vlv_dpio_read(struct drm_i915_private *dev_priv, enum pipe pipe, int reg); void vlv_dpio_write(struct drm_i915_private *dev_priv, enum pipe pipe, int reg, u32 val); u32 intel_sbi_read(struct drm_i915_private *dev_priv, u16 reg, diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 371bbb28c471..3d31d3ac589e 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -138,10 +138,10 @@ i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data, pinned = 0; mutex_lock(&dev->struct_mutex); - list_for_each_entry(vma, &ggtt->base.active_list, mm_list) + list_for_each_entry(vma, &ggtt->base.active_list, vm_link) if (vma->pin_count) pinned += vma->node.size; - list_for_each_entry(vma, &ggtt->base.inactive_list, mm_list) + list_for_each_entry(vma, &ggtt->base.inactive_list, vm_link) if (vma->pin_count) pinned += vma->node.size; mutex_unlock(&dev->struct_mutex); @@ -272,7 +272,7 @@ drop_pages(struct drm_i915_gem_object *obj) int ret; drm_gem_object_reference(&obj->base); - list_for_each_entry_safe(vma, next, &obj->vma_list, vma_link) + list_for_each_entry_safe(vma, next, &obj->vma_list, obj_link) if (i915_vma_unbind(vma)) break; @@ -489,7 +489,7 @@ int i915_gem_obj_prepare_shmem_read(struct drm_i915_gem_object *obj, *needs_clflush = 0; - if (!obj->base.filp) + if (WARN_ON((obj->ops->flags & I915_GEM_OBJECT_HAS_STRUCT_PAGE) == 0)) return -EINVAL; if (!(obj->base.read_domains & I915_GEM_DOMAIN_CPU)) { @@ -2416,7 +2416,7 @@ void i915_vma_move_to_active(struct i915_vma *vma, list_move_tail(&obj->ring_list[ring->id], &ring->active_list); i915_gem_request_assign(&obj->last_read_req[ring->id], req); - list_move_tail(&vma->mm_list, &vma->vm->active_list); + list_move_tail(&vma->vm_link, &vma->vm->active_list); } static void @@ -2454,9 +2454,9 @@ i915_gem_object_retire__read(struct drm_i915_gem_object *obj, int ring) list_move_tail(&obj->global_list, &to_i915(obj->base.dev)->mm.bound_list); - list_for_each_entry(vma, &obj->vma_list, vma_link) { - if (!list_empty(&vma->mm_list)) - list_move_tail(&vma->mm_list, &vma->vm->inactive_list); + list_for_each_entry(vma, &obj->vma_list, obj_link) { + if (!list_empty(&vma->vm_link)) + list_move_tail(&vma->vm_link, &vma->vm->inactive_list); } i915_gem_request_assign(&obj->last_fenced_req, NULL); @@ -2680,7 +2680,7 @@ void i915_gem_request_free(struct kref *req_ref) if (ctx) { if (i915.enable_execlists && ctx != req->i915->kernel_context) - intel_lr_context_unpin(req); + intel_lr_context_unpin(ctx, req->ring); i915_gem_context_unreference(ctx); } @@ -2970,11 +2970,9 @@ i915_gem_retire_requests(struct drm_device *dev) i915_gem_retire_requests_ring(ring); idle &= list_empty(&ring->request_list); if (i915.enable_execlists) { - unsigned long flags; - - spin_lock_irqsave(&ring->execlist_lock, flags); + spin_lock_irq(&ring->execlist_lock); idle &= list_empty(&ring->execlist_queue); - spin_unlock_irqrestore(&ring->execlist_lock, flags); + spin_unlock_irq(&ring->execlist_lock); intel_execlists_retire_requests(ring); } @@ -3319,7 +3317,7 @@ static int __i915_vma_unbind(struct i915_vma *vma, bool wait) struct drm_i915_private *dev_priv = obj->base.dev->dev_private; int ret; - if (list_empty(&vma->vma_link)) + if (list_empty(&vma->obj_link)) return 0; if (!drm_mm_node_allocated(&vma->node)) { @@ -3338,8 +3336,7 @@ static int __i915_vma_unbind(struct i915_vma *vma, bool wait) return ret; } - if (i915_is_ggtt(vma->vm) && - vma->ggtt_view.type == I915_GGTT_VIEW_NORMAL) { + if (vma->is_ggtt && vma->ggtt_view.type == I915_GGTT_VIEW_NORMAL) { i915_gem_object_finish_gtt(obj); /* release the fence reg _after_ flushing */ @@ -3353,8 +3350,8 @@ static int __i915_vma_unbind(struct i915_vma *vma, bool wait) vma->vm->unbind_vma(vma); vma->bound = 0; - list_del_init(&vma->mm_list); - if (i915_is_ggtt(vma->vm)) { + list_del_init(&vma->vm_link); + if (vma->is_ggtt) { if (vma->ggtt_view.type == I915_GGTT_VIEW_NORMAL) { obj->map_and_fenceable = false; } else if (vma->ggtt_view.pages) { @@ -3611,7 +3608,7 @@ search_free: goto err_remove_node; list_move_tail(&obj->global_list, &dev_priv->mm.bound_list); - list_add_tail(&vma->mm_list, &vm->inactive_list); + list_add_tail(&vma->vm_link, &vm->inactive_list); return vma; @@ -3776,7 +3773,7 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write) /* And bump the LRU for this access */ vma = i915_gem_obj_to_ggtt(obj); if (vma && drm_mm_node_allocated(&vma->node) && !obj->active) - list_move_tail(&vma->mm_list, + list_move_tail(&vma->vm_link, &to_i915(obj->base.dev)->gtt.base.inactive_list); return 0; @@ -3811,7 +3808,7 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj, * catch the issue of the CS prefetch crossing page boundaries and * reading an invalid PTE on older architectures. */ - list_for_each_entry_safe(vma, next, &obj->vma_list, vma_link) { + list_for_each_entry_safe(vma, next, &obj->vma_list, obj_link) { if (!drm_mm_node_allocated(&vma->node)) continue; @@ -3874,7 +3871,7 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj, */ } - list_for_each_entry(vma, &obj->vma_list, vma_link) { + list_for_each_entry(vma, &obj->vma_list, obj_link) { if (!drm_mm_node_allocated(&vma->node)) continue; @@ -3884,7 +3881,7 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj, } } - list_for_each_entry(vma, &obj->vma_list, vma_link) + list_for_each_entry(vma, &obj->vma_list, obj_link) vma->node.color = cache_level; obj->cache_level = cache_level; @@ -4465,6 +4462,7 @@ void i915_gem_object_init(struct drm_i915_gem_object *obj, } static const struct drm_i915_gem_object_ops i915_gem_object_ops = { + .flags = I915_GEM_OBJECT_HAS_STRUCT_PAGE, .get_pages = i915_gem_object_get_pages_gtt, .put_pages = i915_gem_object_put_pages_gtt, }; @@ -4557,7 +4555,7 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj) trace_i915_gem_object_destroy(obj); - list_for_each_entry_safe(vma, next, &obj->vma_list, vma_link) { + list_for_each_entry_safe(vma, next, &obj->vma_list, obj_link) { int ret; vma->pin_count = 0; @@ -4614,7 +4612,7 @@ struct i915_vma *i915_gem_obj_to_vma(struct drm_i915_gem_object *obj, struct i915_address_space *vm) { struct i915_vma *vma; - list_for_each_entry(vma, &obj->vma_list, vma_link) { + list_for_each_entry(vma, &obj->vma_list, obj_link) { if (vma->ggtt_view.type == I915_GGTT_VIEW_NORMAL && vma->vm == vm) return vma; @@ -4631,7 +4629,7 @@ struct i915_vma *i915_gem_obj_to_ggtt_view(struct drm_i915_gem_object *obj, if (WARN_ONCE(!view, "no view specified")) return ERR_PTR(-EINVAL); - list_for_each_entry(vma, &obj->vma_list, vma_link) + list_for_each_entry(vma, &obj->vma_list, obj_link) if (vma->vm == ggtt && i915_ggtt_view_equal(&vma->ggtt_view, view)) return vma; @@ -4640,19 +4638,16 @@ struct i915_vma *i915_gem_obj_to_ggtt_view(struct drm_i915_gem_object *obj, void i915_gem_vma_destroy(struct i915_vma *vma) { - struct i915_address_space *vm = NULL; WARN_ON(vma->node.allocated); /* Keep the vma as a placeholder in the execbuffer reservation lists */ if (!list_empty(&vma->exec_list)) return; - vm = vma->vm; - - if (!i915_is_ggtt(vm)) - i915_ppgtt_put(i915_vm_to_ppgtt(vm)); + if (!vma->is_ggtt) + i915_ppgtt_put(i915_vm_to_ppgtt(vma->vm)); - list_del(&vma->vma_link); + list_del(&vma->obj_link); kmem_cache_free(to_i915(vma->obj->base.dev)->vmas, vma); } @@ -5034,7 +5029,7 @@ init_ring_lists(struct intel_engine_cs *ring) } void -i915_gem_load(struct drm_device *dev) +i915_gem_load_init(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; int i; @@ -5100,11 +5095,18 @@ i915_gem_load(struct drm_device *dev) dev_priv->mm.interruptible = true; - i915_gem_shrinker_init(dev_priv); - mutex_init(&dev_priv->fb_tracking.lock); } +void i915_gem_load_cleanup(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = to_i915(dev); + + kmem_cache_destroy(dev_priv->requests); + kmem_cache_destroy(dev_priv->vmas); + kmem_cache_destroy(dev_priv->objects); +} + void i915_gem_release(struct drm_device *dev, struct drm_file *file) { struct drm_i915_file_private *file_priv = file->driver_priv; @@ -5195,8 +5197,8 @@ u64 i915_gem_obj_offset(struct drm_i915_gem_object *o, WARN_ON(vm == &dev_priv->mm.aliasing_ppgtt->base); - list_for_each_entry(vma, &o->vma_list, vma_link) { - if (i915_is_ggtt(vma->vm) && + list_for_each_entry(vma, &o->vma_list, obj_link) { + if (vma->is_ggtt && vma->ggtt_view.type != I915_GGTT_VIEW_NORMAL) continue; if (vma->vm == vm) @@ -5214,7 +5216,7 @@ u64 i915_gem_obj_ggtt_offset_view(struct drm_i915_gem_object *o, struct i915_address_space *ggtt = i915_obj_to_ggtt(o); struct i915_vma *vma; - list_for_each_entry(vma, &o->vma_list, vma_link) + list_for_each_entry(vma, &o->vma_list, obj_link) if (vma->vm == ggtt && i915_ggtt_view_equal(&vma->ggtt_view, view)) return vma->node.start; @@ -5228,8 +5230,8 @@ bool i915_gem_obj_bound(struct drm_i915_gem_object *o, { struct i915_vma *vma; - list_for_each_entry(vma, &o->vma_list, vma_link) { - if (i915_is_ggtt(vma->vm) && + list_for_each_entry(vma, &o->vma_list, obj_link) { + if (vma->is_ggtt && vma->ggtt_view.type != I915_GGTT_VIEW_NORMAL) continue; if (vma->vm == vm && drm_mm_node_allocated(&vma->node)) @@ -5245,7 +5247,7 @@ bool i915_gem_obj_ggtt_bound_view(struct drm_i915_gem_object *o, struct i915_address_space *ggtt = i915_obj_to_ggtt(o); struct i915_vma *vma; - list_for_each_entry(vma, &o->vma_list, vma_link) + list_for_each_entry(vma, &o->vma_list, obj_link) if (vma->vm == ggtt && i915_ggtt_view_equal(&vma->ggtt_view, view) && drm_mm_node_allocated(&vma->node)) @@ -5258,7 +5260,7 @@ bool i915_gem_obj_bound_any(struct drm_i915_gem_object *o) { struct i915_vma *vma; - list_for_each_entry(vma, &o->vma_list, vma_link) + list_for_each_entry(vma, &o->vma_list, obj_link) if (drm_mm_node_allocated(&vma->node)) return true; @@ -5275,8 +5277,8 @@ unsigned long i915_gem_obj_size(struct drm_i915_gem_object *o, BUG_ON(list_empty(&o->vma_list)); - list_for_each_entry(vma, &o->vma_list, vma_link) { - if (i915_is_ggtt(vma->vm) && + list_for_each_entry(vma, &o->vma_list, obj_link) { + if (vma->is_ggtt && vma->ggtt_view.type != I915_GGTT_VIEW_NORMAL) continue; if (vma->vm == vm) @@ -5288,7 +5290,7 @@ unsigned long i915_gem_obj_size(struct drm_i915_gem_object *o, bool i915_gem_obj_is_pinned(struct drm_i915_gem_object *obj) { struct i915_vma *vma; - list_for_each_entry(vma, &obj->vma_list, vma_link) + list_for_each_entry(vma, &obj->vma_list, obj_link) if (vma->pin_count > 0) return true; @@ -5302,7 +5304,7 @@ i915_gem_object_get_dirty_page(struct drm_i915_gem_object *obj, int n) struct page *page; /* Only default objects have per-page dirty tracking */ - if (WARN_ON(obj->ops != &i915_gem_object_ops)) + if (WARN_ON((obj->ops->flags & I915_GEM_OBJECT_HAS_STRUCT_PAGE) == 0)) return NULL; page = i915_gem_object_get_page(obj, n); diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c index 6a4f64b03db6..5dd84e148bba 100644 --- a/drivers/gpu/drm/i915/i915_gem_context.c +++ b/drivers/gpu/drm/i915/i915_gem_context.c @@ -142,7 +142,7 @@ static void i915_gem_context_clean(struct intel_context *ctx) return; list_for_each_entry_safe(vma, next, &ppgtt->base.inactive_list, - mm_list) { + vm_link) { if (WARN_ON(__i915_vma_unbind_no_wait(vma))) break; } @@ -321,6 +321,18 @@ err_destroy: return ERR_PTR(ret); } +static void i915_gem_context_unpin(struct intel_context *ctx, + struct intel_engine_cs *engine) +{ + if (i915.enable_execlists) { + intel_lr_context_unpin(ctx, engine); + } else { + if (engine->id == RCS && ctx->legacy_hw_ctx.rcs_state) + i915_gem_object_ggtt_unpin(ctx->legacy_hw_ctx.rcs_state); + i915_gem_context_unreference(ctx); + } +} + void i915_gem_context_reset(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; @@ -329,22 +341,15 @@ void i915_gem_context_reset(struct drm_device *dev) if (i915.enable_execlists) { struct intel_context *ctx; - list_for_each_entry(ctx, &dev_priv->context_list, link) { + list_for_each_entry(ctx, &dev_priv->context_list, link) intel_lr_context_reset(dev, ctx); - } - - return; } for (i = 0; i < I915_NUM_RINGS; i++) { struct intel_engine_cs *ring = &dev_priv->ring[i]; - struct intel_context *lctx = ring->last_context; - - if (lctx) { - if (lctx->legacy_hw_ctx.rcs_state && i == RCS) - i915_gem_object_ggtt_unpin(lctx->legacy_hw_ctx.rcs_state); - i915_gem_context_unreference(lctx); + if (ring->last_context) { + i915_gem_context_unpin(ring->last_context, ring); ring->last_context = NULL; } } @@ -417,13 +422,6 @@ void i915_gem_context_fini(struct drm_device *dev) * to offset the do_switch part, so that i915_gem_context_unreference() * can then free the base object correctly. */ WARN_ON(!dev_priv->ring[RCS].last_context); - if (dev_priv->ring[RCS].last_context == dctx) { - /* Fake switch to NULL context */ - WARN_ON(dctx->legacy_hw_ctx.rcs_state->active); - i915_gem_object_ggtt_unpin(dctx->legacy_hw_ctx.rcs_state); - i915_gem_context_unreference(dctx); - dev_priv->ring[RCS].last_context = NULL; - } i915_gem_object_ggtt_unpin(dctx->legacy_hw_ctx.rcs_state); } @@ -432,7 +430,7 @@ void i915_gem_context_fini(struct drm_device *dev) struct intel_engine_cs *ring = &dev_priv->ring[i]; if (ring->last_context) { - i915_gem_context_unreference(ring->last_context); + i915_gem_context_unpin(ring->last_context, ring); ring->last_context = NULL; } } @@ -857,6 +855,9 @@ int i915_gem_context_create_ioctl(struct drm_device *dev, void *data, if (!contexts_enabled(dev)) return -ENODEV; + if (args->pad != 0) + return -EINVAL; + ret = i915_mutex_lock_interruptible(dev); if (ret) return ret; @@ -880,6 +881,9 @@ int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data, struct intel_context *ctx; int ret; + if (args->pad != 0) + return -EINVAL; + if (args->ctx_id == DEFAULT_CONTEXT_HANDLE) return -ENOENT; diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c index 07c6e4d320c9..ea1f8d1bd228 100644 --- a/drivers/gpu/drm/i915/i915_gem_evict.c +++ b/drivers/gpu/drm/i915/i915_gem_evict.c @@ -116,7 +116,7 @@ i915_gem_evict_something(struct drm_device *dev, struct i915_address_space *vm, search_again: /* First see if there is a large enough contiguous idle region... */ - list_for_each_entry(vma, &vm->inactive_list, mm_list) { + list_for_each_entry(vma, &vm->inactive_list, vm_link) { if (mark_free(vma, &unwind_list)) goto found; } @@ -125,7 +125,7 @@ search_again: goto none; /* Now merge in the soon-to-be-expired objects... */ - list_for_each_entry(vma, &vm->active_list, mm_list) { + list_for_each_entry(vma, &vm->active_list, vm_link) { if (mark_free(vma, &unwind_list)) goto found; } @@ -270,7 +270,7 @@ int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle) WARN_ON(!list_empty(&vm->active_list)); } - list_for_each_entry_safe(vma, next, &vm->inactive_list, mm_list) + list_for_each_entry_safe(vma, next, &vm->inactive_list, vm_link) if (vma->pin_count == 0) WARN_ON(i915_vma_unbind(vma)); diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c index 2dc08ce1079a..1328bc5021b4 100644 --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c @@ -668,7 +668,7 @@ need_reloc_mappable(struct i915_vma *vma) if (entry->relocation_count == 0) return false; - if (!i915_is_ggtt(vma->vm)) + if (!vma->is_ggtt) return false; /* See also use_cpu_reloc() */ @@ -687,8 +687,7 @@ eb_vma_misplaced(struct i915_vma *vma) struct drm_i915_gem_exec_object2 *entry = vma->exec_entry; struct drm_i915_gem_object *obj = vma->obj; - WARN_ON(entry->flags & __EXEC_OBJECT_NEEDS_MAP && - !i915_is_ggtt(vma->vm)); + WARN_ON(entry->flags & __EXEC_OBJECT_NEEDS_MAP && !vma->is_ggtt); if (entry->alignment && vma->node.start & (entry->alignment - 1)) @@ -1401,6 +1400,7 @@ eb_select_ring(struct drm_i915_private *dev_priv, bsd_idx = gen8_dispatch_bsd_ring(dev_priv, file); } else if (bsd_idx >= I915_EXEC_BSD_RING1 && bsd_idx <= I915_EXEC_BSD_RING2) { + bsd_idx >>= I915_EXEC_BSD_SHIFT; bsd_idx--; } else { DRM_DEBUG("execbuf with unknown bsd ring: %u\n", @@ -1654,7 +1654,7 @@ err: * must be freed again. If it was submitted then it is being tracked * on the active request list and no clean up is required here. */ - if (ret && req) + if (ret && !IS_ERR_OR_NULL(req)) i915_gem_request_cancel(req); mutex_unlock(&dev->struct_mutex); diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index 7377b6725c33..49e4f26b79d8 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c @@ -2132,6 +2132,25 @@ static void i915_address_space_init(struct i915_address_space *vm, list_add_tail(&vm->global_link, &dev_priv->vm_list); } +static void gtt_write_workarounds(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + + /* This function is for gtt related workarounds. This function is + * called on driver load and after a GPU reset, so you can place + * workarounds here even if they get overwritten by GPU reset. + */ + /* WaIncreaseDefaultTLBEntries:chv,bdw,skl,bxt */ + if (IS_BROADWELL(dev)) + I915_WRITE(GEN8_L3_LRA_1_GPGPU, GEN8_L3_LRA_1_GPGPU_DEFAULT_VALUE_BDW); + else if (IS_CHERRYVIEW(dev)) + I915_WRITE(GEN8_L3_LRA_1_GPGPU, GEN8_L3_LRA_1_GPGPU_DEFAULT_VALUE_CHV); + else if (IS_SKYLAKE(dev)) + I915_WRITE(GEN8_L3_LRA_1_GPGPU, GEN9_L3_LRA_1_GPGPU_DEFAULT_VALUE_SKL); + else if (IS_BROXTON(dev)) + I915_WRITE(GEN8_L3_LRA_1_GPGPU, GEN9_L3_LRA_1_GPGPU_DEFAULT_VALUE_BXT); +} + int i915_ppgtt_init(struct drm_device *dev, struct i915_hw_ppgtt *ppgtt) { struct drm_i915_private *dev_priv = dev->dev_private; @@ -2148,6 +2167,8 @@ int i915_ppgtt_init(struct drm_device *dev, struct i915_hw_ppgtt *ppgtt) int i915_ppgtt_init_hw(struct drm_device *dev) { + gtt_write_workarounds(dev); + /* In the case of execlists, PPGTT is enabled by the context descriptor * and the PDPs are contained within the context itself. We don't * need to do anything here. */ @@ -2737,7 +2758,7 @@ static int i915_gem_setup_global_gtt(struct drm_device *dev, } vma->bound |= GLOBAL_BIND; __i915_vma_set_map_and_fenceable(vma); - list_add_tail(&vma->mm_list, &ggtt_vm->inactive_list); + list_add_tail(&vma->vm_link, &ggtt_vm->inactive_list); } /* Clear any non-preallocated blocks */ @@ -2809,6 +2830,8 @@ void i915_global_gtt_cleanup(struct drm_device *dev) ppgtt->base.cleanup(&ppgtt->base); } + i915_gem_cleanup_stolen(dev); + if (drm_mm_initialized(&vm->mm)) { if (intel_vgpu_active(dev)) intel_vgt_deballoon(); @@ -3175,12 +3198,21 @@ int i915_gem_gtt_init(struct drm_device *dev) } gtt->base.dev = dev; + gtt->base.is_ggtt = true; ret = gtt->gtt_probe(dev, >t->base.total, >t->stolen_size, >t->mappable_base, >t->mappable_end); if (ret) return ret; + /* + * Initialise stolen early so that we may reserve preallocated + * objects for the BIOS to KMS transition. + */ + ret = i915_gem_init_stolen(dev); + if (ret) + goto out_gtt_cleanup; + /* GMADR is the PCI mmio aperture into the global GTT. */ DRM_INFO("Memory usable by graphics device = %lluM\n", gtt->base.total >> 20); @@ -3200,6 +3232,11 @@ int i915_gem_gtt_init(struct drm_device *dev) DRM_DEBUG_DRIVER("ppgtt mode: %i\n", i915.enable_ppgtt); return 0; + +out_gtt_cleanup: + gtt->base.cleanup(&dev_priv->gtt.base); + + return ret; } void i915_gem_restore_gtt_mappings(struct drm_device *dev) @@ -3222,7 +3259,7 @@ void i915_gem_restore_gtt_mappings(struct drm_device *dev) vm = &dev_priv->gtt.base; list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) { flush = false; - list_for_each_entry(vma, &obj->vma_list, vma_link) { + list_for_each_entry(vma, &obj->vma_list, obj_link) { if (vma->vm != vm) continue; @@ -3278,19 +3315,20 @@ __i915_gem_vma_create(struct drm_i915_gem_object *obj, if (vma == NULL) return ERR_PTR(-ENOMEM); - INIT_LIST_HEAD(&vma->vma_link); - INIT_LIST_HEAD(&vma->mm_list); + INIT_LIST_HEAD(&vma->vm_link); + INIT_LIST_HEAD(&vma->obj_link); INIT_LIST_HEAD(&vma->exec_list); vma->vm = vm; vma->obj = obj; + vma->is_ggtt = i915_is_ggtt(vm); if (i915_is_ggtt(vm)) vma->ggtt_view = *ggtt_view; - - list_add_tail(&vma->vma_link, &obj->vma_list); - if (!i915_is_ggtt(vm)) + else i915_ppgtt_get(i915_vm_to_ppgtt(vm)); + list_add_tail(&vma->obj_link, &obj->vma_list); + return vma; } @@ -3333,6 +3371,7 @@ i915_gem_obj_lookup_or_create_ggtt_vma(struct drm_i915_gem_object *obj, static struct scatterlist * rotate_pages(const dma_addr_t *in, unsigned int offset, unsigned int width, unsigned int height, + unsigned int stride, struct sg_table *st, struct scatterlist *sg) { unsigned int column, row; @@ -3344,7 +3383,7 @@ rotate_pages(const dma_addr_t *in, unsigned int offset, } for (column = 0; column < width; column++) { - src_idx = width * (height - 1) + column; + src_idx = stride * (height - 1) + column; for (row = 0; row < height; row++) { st->nents++; /* We don't need the pages, but need to initialize @@ -3355,7 +3394,7 @@ rotate_pages(const dma_addr_t *in, unsigned int offset, sg_dma_address(sg) = in[offset + src_idx]; sg_dma_len(sg) = PAGE_SIZE; sg = sg_next(sg); - src_idx -= width; + src_idx -= stride; } } @@ -3363,10 +3402,9 @@ rotate_pages(const dma_addr_t *in, unsigned int offset, } static struct sg_table * -intel_rotate_fb_obj_pages(struct i915_ggtt_view *ggtt_view, +intel_rotate_fb_obj_pages(struct intel_rotation_info *rot_info, struct drm_i915_gem_object *obj) { - struct intel_rotation_info *rot_info = &ggtt_view->params.rotation_info; unsigned int size_pages = rot_info->size >> PAGE_SHIFT; unsigned int size_pages_uv; struct sg_page_iter sg_iter; @@ -3408,6 +3446,7 @@ intel_rotate_fb_obj_pages(struct i915_ggtt_view *ggtt_view, /* Rotate the pages. */ sg = rotate_pages(page_addr_list, 0, rot_info->width_pages, rot_info->height_pages, + rot_info->width_pages, st, NULL); /* Append the UV plane if NV12. */ @@ -3423,6 +3462,7 @@ intel_rotate_fb_obj_pages(struct i915_ggtt_view *ggtt_view, rotate_pages(page_addr_list, uv_start_page, rot_info->width_pages_uv, rot_info->height_pages_uv, + rot_info->width_pages_uv, st, sg); } @@ -3504,7 +3544,7 @@ i915_get_ggtt_vma_pages(struct i915_vma *vma) vma->ggtt_view.pages = vma->obj->pages; else if (vma->ggtt_view.type == I915_GGTT_VIEW_ROTATED) vma->ggtt_view.pages = - intel_rotate_fb_obj_pages(&vma->ggtt_view, vma->obj); + intel_rotate_fb_obj_pages(&vma->ggtt_view.params.rotated, vma->obj); else if (vma->ggtt_view.type == I915_GGTT_VIEW_PARTIAL) vma->ggtt_view.pages = intel_partial_pages(&vma->ggtt_view, vma->obj); @@ -3560,13 +3600,9 @@ int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level, return 0; if (vma->bound == 0 && vma->vm->allocate_va_range) { - trace_i915_va_alloc(vma->vm, - vma->node.start, - vma->node.size, - VM_TO_TRACE_NAME(vma->vm)); - /* XXX: i915_vma_pin() will fix this +- hack */ vma->pin_count++; + trace_i915_va_alloc(vma); ret = vma->vm->allocate_va_range(vma->vm, vma->node.start, vma->node.size); @@ -3598,7 +3634,7 @@ i915_ggtt_view_size(struct drm_i915_gem_object *obj, if (view->type == I915_GGTT_VIEW_NORMAL) { return obj->base.size; } else if (view->type == I915_GGTT_VIEW_ROTATED) { - return view->params.rotation_info.size; + return view->params.rotated.size; } else if (view->type == I915_GGTT_VIEW_PARTIAL) { return view->params.partial.size << PAGE_SHIFT; } else { diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.h b/drivers/gpu/drm/i915/i915_gem_gtt.h index e5737963ab79..8774f1ba46e7 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.h +++ b/drivers/gpu/drm/i915/i915_gem_gtt.h @@ -155,7 +155,7 @@ struct i915_ggtt_view { u64 offset; unsigned int size; } partial; - struct intel_rotation_info rotation_info; + struct intel_rotation_info rotated; } params; struct sg_table *pages; @@ -183,6 +183,7 @@ struct i915_vma { #define GLOBAL_BIND (1<<0) #define LOCAL_BIND (1<<1) unsigned int bound : 4; + bool is_ggtt : 1; /** * Support different GGTT views into the same object. @@ -194,9 +195,9 @@ struct i915_vma { struct i915_ggtt_view ggtt_view; /** This object's place on the active/inactive lists */ - struct list_head mm_list; + struct list_head vm_link; - struct list_head vma_link; /* Link in the object's VMA list */ + struct list_head obj_link; /* Link in the object's VMA list */ /** This vma's place in the batchbuffer or on the eviction list */ struct list_head exec_list; @@ -275,6 +276,8 @@ struct i915_address_space { u64 start; /* Start offset always 0 for dri2 */ u64 total; /* size addr space maps (ex. 2GB for ggtt) */ + bool is_ggtt; + struct i915_page_scratch *scratch_page; struct i915_page_table *scratch_pt; struct i915_page_directory *scratch_pd; @@ -330,6 +333,8 @@ struct i915_address_space { u32 flags); }; +#define i915_is_ggtt(V) ((V)->is_ggtt) + /* The Graphics Translation Table is the way in which GEN hardware translates a * Graphics Virtual Address into a Physical Address. In addition to the normal * collateral associated with any va->pa translations GEN hardware also has a @@ -342,6 +347,8 @@ struct i915_gtt { size_t stolen_size; /* Total size of stolen memory */ size_t stolen_usable_size; /* Total size minus BIOS reserved */ + size_t stolen_reserved_base; + size_t stolen_reserved_size; u64 mappable_end; /* End offset that we can CPU map */ struct io_mapping *mappable; /* Mapping to our CPU mappable region */ phys_addr_t mappable_base; /* PA of our GMADR */ @@ -416,7 +423,7 @@ static inline uint32_t i915_pte_index(uint64_t address, uint32_t pde_shift) static inline uint32_t i915_pte_count(uint64_t addr, size_t length, uint32_t pde_shift) { - const uint64_t mask = ~((1 << pde_shift) - 1); + const uint64_t mask = ~((1ULL << pde_shift) - 1); uint64_t end; WARN_ON(length == 0); diff --git a/drivers/gpu/drm/i915/i915_gem_shrinker.c b/drivers/gpu/drm/i915/i915_gem_shrinker.c index 16da9c1422cc..d3c473ffb90a 100644 --- a/drivers/gpu/drm/i915/i915_gem_shrinker.c +++ b/drivers/gpu/drm/i915/i915_gem_shrinker.c @@ -52,7 +52,7 @@ static int num_vma_bound(struct drm_i915_gem_object *obj) struct i915_vma *vma; int count = 0; - list_for_each_entry(vma, &obj->vma_list, vma_link) { + list_for_each_entry(vma, &obj->vma_list, obj_link) { if (drm_mm_node_allocated(&vma->node)) count++; if (vma->pin_count) @@ -176,7 +176,7 @@ i915_gem_shrink(struct drm_i915_private *dev_priv, /* For the unbound phase, this should be a no-op! */ list_for_each_entry_safe(vma, v, - &obj->vma_list, vma_link) + &obj->vma_list, obj_link) if (i915_vma_unbind(vma)) break; @@ -367,8 +367,20 @@ void i915_gem_shrinker_init(struct drm_i915_private *dev_priv) dev_priv->mm.shrinker.scan_objects = i915_gem_shrinker_scan; dev_priv->mm.shrinker.count_objects = i915_gem_shrinker_count; dev_priv->mm.shrinker.seeks = DEFAULT_SEEKS; - register_shrinker(&dev_priv->mm.shrinker); + WARN_ON(register_shrinker(&dev_priv->mm.shrinker)); dev_priv->mm.oom_notifier.notifier_call = i915_gem_shrinker_oom; - register_oom_notifier(&dev_priv->mm.oom_notifier); + WARN_ON(register_oom_notifier(&dev_priv->mm.oom_notifier)); +} + +/** + * i915_gem_shrinker_cleanup - Clean up i915 shrinker + * @dev_priv: i915 device + * + * This function unregisters the i915 shrinker and OOM handler. + */ +void i915_gem_shrinker_cleanup(struct drm_i915_private *dev_priv) +{ + WARN_ON(unregister_oom_notifier(&dev_priv->mm.oom_notifier)); + unregister_shrinker(&dev_priv->mm.shrinker); } diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c index c384dc9c8a63..2e6e9fb6f80d 100644 --- a/drivers/gpu/drm/i915/i915_gem_stolen.c +++ b/drivers/gpu/drm/i915/i915_gem_stolen.c @@ -458,6 +458,9 @@ int i915_gem_init_stolen(struct drm_device *dev) return 0; } + dev_priv->gtt.stolen_reserved_base = reserved_base; + dev_priv->gtt.stolen_reserved_size = reserved_size; + /* It is possible for the reserved area to end before the end of stolen * memory, so just consider the start. */ reserved_total = stolen_top - reserved_base; @@ -635,6 +638,8 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev, if (!drm_mm_initialized(&dev_priv->mm.stolen)) return NULL; + lockdep_assert_held(&dev->struct_mutex); + DRM_DEBUG_KMS("creating preallocated stolen object: stolen_offset=%x, gtt_offset=%x, size=%x\n", stolen_offset, gtt_offset, size); @@ -692,7 +697,7 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev, vma->bound |= GLOBAL_BIND; __i915_vma_set_map_and_fenceable(vma); - list_add_tail(&vma->mm_list, &ggtt->inactive_list); + list_add_tail(&vma->vm_link, &ggtt->inactive_list); } list_add_tail(&obj->global_list, &dev_priv->mm.bound_list); diff --git a/drivers/gpu/drm/i915/i915_gem_userptr.c b/drivers/gpu/drm/i915/i915_gem_userptr.c index 19fb0bddc1cd..4b09c840d493 100644 --- a/drivers/gpu/drm/i915/i915_gem_userptr.c +++ b/drivers/gpu/drm/i915/i915_gem_userptr.c @@ -49,21 +49,18 @@ struct i915_mmu_notifier { struct hlist_node node; struct mmu_notifier mn; struct rb_root objects; - struct list_head linear; - bool has_linear; }; struct i915_mmu_object { struct i915_mmu_notifier *mn; + struct drm_i915_gem_object *obj; struct interval_tree_node it; struct list_head link; - struct drm_i915_gem_object *obj; struct work_struct work; - bool active; - bool is_linear; + bool attached; }; -static void __cancel_userptr__worker(struct work_struct *work) +static void cancel_userptr(struct work_struct *work) { struct i915_mmu_object *mo = container_of(work, typeof(*mo), work); struct drm_i915_gem_object *obj = mo->obj; @@ -81,7 +78,7 @@ static void __cancel_userptr__worker(struct work_struct *work) was_interruptible = dev_priv->mm.interruptible; dev_priv->mm.interruptible = false; - list_for_each_entry_safe(vma, tmp, &obj->vma_list, vma_link) { + list_for_each_entry_safe(vma, tmp, &obj->vma_list, obj_link) { int ret = i915_vma_unbind(vma); WARN_ON(ret && ret != -EIO); } @@ -94,24 +91,22 @@ static void __cancel_userptr__worker(struct work_struct *work) mutex_unlock(&dev->struct_mutex); } -static unsigned long cancel_userptr(struct i915_mmu_object *mo) +static void add_object(struct i915_mmu_object *mo) { - unsigned long end = mo->obj->userptr.ptr + mo->obj->base.size; - - /* The mmu_object is released late when destroying the - * GEM object so it is entirely possible to gain a - * reference on an object in the process of being freed - * since our serialisation is via the spinlock and not - * the struct_mutex - and consequently use it after it - * is freed and then double free it. - */ - if (mo->active && kref_get_unless_zero(&mo->obj->base.refcount)) { - schedule_work(&mo->work); - /* only schedule one work packet to avoid the refleak */ - mo->active = false; - } + if (mo->attached) + return; + + interval_tree_insert(&mo->it, &mo->mn->objects); + mo->attached = true; +} - return end; +static void del_object(struct i915_mmu_object *mo) +{ + if (!mo->attached) + return; + + interval_tree_remove(&mo->it, &mo->mn->objects); + mo->attached = false; } static void i915_gem_userptr_mn_invalidate_range_start(struct mmu_notifier *_mn, @@ -122,28 +117,36 @@ static void i915_gem_userptr_mn_invalidate_range_start(struct mmu_notifier *_mn, struct i915_mmu_notifier *mn = container_of(_mn, struct i915_mmu_notifier, mn); struct i915_mmu_object *mo; + struct interval_tree_node *it; + LIST_HEAD(cancelled); + + if (RB_EMPTY_ROOT(&mn->objects)) + return; /* interval ranges are inclusive, but invalidate range is exclusive */ end--; spin_lock(&mn->lock); - if (mn->has_linear) { - list_for_each_entry(mo, &mn->linear, link) { - if (mo->it.last < start || mo->it.start > end) - continue; - - cancel_userptr(mo); - } - } else { - struct interval_tree_node *it; + it = interval_tree_iter_first(&mn->objects, start, end); + while (it) { + /* The mmu_object is released late when destroying the + * GEM object so it is entirely possible to gain a + * reference on an object in the process of being freed + * since our serialisation is via the spinlock and not + * the struct_mutex - and consequently use it after it + * is freed and then double free it. To prevent that + * use-after-free we only acquire a reference on the + * object if it is not in the process of being destroyed. + */ + mo = container_of(it, struct i915_mmu_object, it); + if (kref_get_unless_zero(&mo->obj->base.refcount)) + schedule_work(&mo->work); - it = interval_tree_iter_first(&mn->objects, start, end); - while (it) { - mo = container_of(it, struct i915_mmu_object, it); - start = cancel_userptr(mo); - it = interval_tree_iter_next(it, start, end); - } + list_add(&mo->link, &cancelled); + it = interval_tree_iter_next(it, start, end); } + list_for_each_entry(mo, &cancelled, link) + del_object(mo); spin_unlock(&mn->lock); } @@ -164,8 +167,6 @@ i915_mmu_notifier_create(struct mm_struct *mm) spin_lock_init(&mn->lock); mn->mn.ops = &i915_gem_userptr_notifier; mn->objects = RB_ROOT; - INIT_LIST_HEAD(&mn->linear); - mn->has_linear = false; /* Protected by mmap_sem (write-lock) */ ret = __mmu_notifier_register(&mn->mn, mm); @@ -177,85 +178,6 @@ i915_mmu_notifier_create(struct mm_struct *mm) return mn; } -static int -i915_mmu_notifier_add(struct drm_device *dev, - struct i915_mmu_notifier *mn, - struct i915_mmu_object *mo) -{ - struct interval_tree_node *it; - int ret = 0; - - /* By this point we have already done a lot of expensive setup that - * we do not want to repeat just because the caller (e.g. X) has a - * signal pending (and partly because of that expensive setup, X - * using an interrupt timer is likely to get stuck in an EINTR loop). - */ - mutex_lock(&dev->struct_mutex); - - /* Make sure we drop the final active reference (and thereby - * remove the objects from the interval tree) before we do - * the check for overlapping objects. - */ - i915_gem_retire_requests(dev); - - spin_lock(&mn->lock); - it = interval_tree_iter_first(&mn->objects, - mo->it.start, mo->it.last); - if (it) { - struct drm_i915_gem_object *obj; - - /* We only need to check the first object in the range as it - * either has cancelled gup work queued and we need to - * return back to the user to give time for the gup-workers - * to flush their object references upon which the object will - * be removed from the interval-tree, or the the range is - * still in use by another client and the overlap is invalid. - * - * If we do have an overlap, we cannot use the interval tree - * for fast range invalidation. - */ - - obj = container_of(it, struct i915_mmu_object, it)->obj; - if (!obj->userptr.workers) - mn->has_linear = mo->is_linear = true; - else - ret = -EAGAIN; - } else - interval_tree_insert(&mo->it, &mn->objects); - - if (ret == 0) - list_add(&mo->link, &mn->linear); - - spin_unlock(&mn->lock); - mutex_unlock(&dev->struct_mutex); - - return ret; -} - -static bool i915_mmu_notifier_has_linear(struct i915_mmu_notifier *mn) -{ - struct i915_mmu_object *mo; - - list_for_each_entry(mo, &mn->linear, link) - if (mo->is_linear) - return true; - - return false; -} - -static void -i915_mmu_notifier_del(struct i915_mmu_notifier *mn, - struct i915_mmu_object *mo) -{ - spin_lock(&mn->lock); - list_del(&mo->link); - if (mo->is_linear) - mn->has_linear = i915_mmu_notifier_has_linear(mn); - else - interval_tree_remove(&mo->it, &mn->objects); - spin_unlock(&mn->lock); -} - static void i915_gem_userptr_release__mmu_notifier(struct drm_i915_gem_object *obj) { @@ -265,7 +187,9 @@ i915_gem_userptr_release__mmu_notifier(struct drm_i915_gem_object *obj) if (mo == NULL) return; - i915_mmu_notifier_del(mo->mn, mo); + spin_lock(&mo->mn->lock); + del_object(mo); + spin_unlock(&mo->mn->lock); kfree(mo); obj->userptr.mmu_object = NULL; @@ -299,7 +223,6 @@ i915_gem_userptr_init__mmu_notifier(struct drm_i915_gem_object *obj, { struct i915_mmu_notifier *mn; struct i915_mmu_object *mo; - int ret; if (flags & I915_USERPTR_UNSYNCHRONIZED) return capable(CAP_SYS_ADMIN) ? 0 : -EPERM; @@ -316,16 +239,10 @@ i915_gem_userptr_init__mmu_notifier(struct drm_i915_gem_object *obj, return -ENOMEM; mo->mn = mn; - mo->it.start = obj->userptr.ptr; - mo->it.last = mo->it.start + obj->base.size - 1; mo->obj = obj; - INIT_WORK(&mo->work, __cancel_userptr__worker); - - ret = i915_mmu_notifier_add(obj->base.dev, mn, mo); - if (ret) { - kfree(mo); - return ret; - } + mo->it.start = obj->userptr.ptr; + mo->it.last = obj->userptr.ptr + obj->base.size - 1; + INIT_WORK(&mo->work, cancel_userptr); obj->userptr.mmu_object = mo; return 0; @@ -552,8 +469,10 @@ __i915_gem_userptr_set_active(struct drm_i915_gem_object *obj, /* In order to serialise get_pages with an outstanding * cancel_userptr, we must drop the struct_mutex and try again. */ - if (!value || !work_pending(&obj->userptr.mmu_object->work)) - obj->userptr.mmu_object->active = value; + if (!value) + del_object(obj->userptr.mmu_object); + else if (!work_pending(&obj->userptr.mmu_object->work)) + add_object(obj->userptr.mmu_object); else ret = -EAGAIN; spin_unlock(&obj->userptr.mmu_object->mn->lock); @@ -789,9 +708,10 @@ i915_gem_userptr_dmabuf_export(struct drm_i915_gem_object *obj) } static const struct drm_i915_gem_object_ops i915_gem_userptr_ops = { - .dmabuf_export = i915_gem_userptr_dmabuf_export, + .flags = I915_GEM_OBJECT_HAS_STRUCT_PAGE, .get_pages = i915_gem_userptr_get_pages, .put_pages = i915_gem_userptr_put_pages, + .dmabuf_export = i915_gem_userptr_dmabuf_export, .release = i915_gem_userptr_release, }; diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c index 7eeb24427785..831895b8cb75 100644 --- a/drivers/gpu/drm/i915/i915_gpu_error.c +++ b/drivers/gpu/drm/i915/i915_gpu_error.c @@ -365,6 +365,10 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m, err_printf(m, "Reset count: %u\n", error->reset_count); err_printf(m, "Suspend count: %u\n", error->suspend_count); err_printf(m, "PCI ID: 0x%04x\n", dev->pdev->device); + err_printf(m, "PCI Revision: 0x%02x\n", dev->pdev->revision); + err_printf(m, "PCI Subsystem: %04x:%04x\n", + dev->pdev->subsystem_vendor, + dev->pdev->subsystem_device); err_printf(m, "IOMMU enabled?: %d\n", error->iommu); if (HAS_CSR(dev)) { @@ -732,7 +736,7 @@ static u32 capture_active_bo(struct drm_i915_error_buffer *err, struct i915_vma *vma; int i = 0; - list_for_each_entry(vma, head, mm_list) { + list_for_each_entry(vma, head, vm_link) { capture_bo(err++, vma); if (++i == count) break; @@ -755,7 +759,7 @@ static u32 capture_pinned_bo(struct drm_i915_error_buffer *err, if (err == last) break; - list_for_each_entry(vma, &obj->vma_list, vma_link) + list_for_each_entry(vma, &obj->vma_list, obj_link) if (vma->vm == vm && vma->pin_count > 0) capture_bo(err++, vma); } @@ -1123,12 +1127,12 @@ static void i915_gem_capture_vm(struct drm_i915_private *dev_priv, int i; i = 0; - list_for_each_entry(vma, &vm->active_list, mm_list) + list_for_each_entry(vma, &vm->active_list, vm_link) i++; error->active_bo_count[ndx] = i; list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) { - list_for_each_entry(vma, &obj->vma_list, vma_link) + list_for_each_entry(vma, &obj->vma_list, obj_link) if (vma->vm == vm && vma->pin_count > 0) i++; } diff --git a/drivers/gpu/drm/i915/i915_guc_submission.c b/drivers/gpu/drm/i915/i915_guc_submission.c index 51ae5c1f806d..d7543efc8a5e 100644 --- a/drivers/gpu/drm/i915/i915_guc_submission.c +++ b/drivers/gpu/drm/i915/i915_guc_submission.c @@ -376,6 +376,8 @@ static void guc_init_proc_desc(struct intel_guc *guc, static void guc_init_ctx_desc(struct intel_guc *guc, struct i915_guc_client *client) { + struct drm_i915_private *dev_priv = guc_to_i915(guc); + struct intel_engine_cs *ring; struct intel_context *ctx = client->owner; struct guc_context_desc desc; struct sg_table *sg; @@ -388,10 +390,8 @@ static void guc_init_ctx_desc(struct intel_guc *guc, desc.priority = client->priority; desc.db_id = client->doorbell_id; - for (i = 0; i < I915_NUM_RINGS; i++) { - struct guc_execlist_context *lrc = &desc.lrc[i]; - struct intel_ringbuffer *ringbuf = ctx->engine[i].ringbuf; - struct intel_engine_cs *ring; + for_each_ring(ring, dev_priv, i) { + struct guc_execlist_context *lrc = &desc.lrc[ring->guc_id]; struct drm_i915_gem_object *obj; uint64_t ctx_desc; @@ -406,7 +406,6 @@ static void guc_init_ctx_desc(struct intel_guc *guc, if (!obj) break; /* XXX: continue? */ - ring = ringbuf->ring; ctx_desc = intel_lr_context_descriptor(ctx, ring); lrc->context_desc = (u32)ctx_desc; @@ -414,16 +413,16 @@ static void guc_init_ctx_desc(struct intel_guc *guc, lrc->ring_lcra = i915_gem_obj_ggtt_offset(obj) + LRC_STATE_PN * PAGE_SIZE; lrc->context_id = (client->ctx_index << GUC_ELC_CTXID_OFFSET) | - (ring->id << GUC_ELC_ENGINE_OFFSET); + (ring->guc_id << GUC_ELC_ENGINE_OFFSET); - obj = ringbuf->obj; + obj = ctx->engine[i].ringbuf->obj; lrc->ring_begin = i915_gem_obj_ggtt_offset(obj); lrc->ring_end = lrc->ring_begin + obj->base.size - 1; lrc->ring_next_free_location = lrc->ring_begin; lrc->ring_current_tail_pointer_value = 0; - desc.engines_used |= (1 << ring->id); + desc.engines_used |= (1 << ring->guc_id); } WARN_ON(desc.engines_used == 0); @@ -510,7 +509,6 @@ int i915_guc_wq_check_space(struct i915_guc_client *gc) static int guc_add_workqueue_item(struct i915_guc_client *gc, struct drm_i915_gem_request *rq) { - enum intel_ring_id ring_id = rq->ring->id; struct guc_wq_item *wqi; void *base; u32 tail, wq_len, wq_off, space; @@ -544,7 +542,7 @@ static int guc_add_workqueue_item(struct i915_guc_client *gc, wq_len = sizeof(struct guc_wq_item) / sizeof(u32) - 1; wqi->header = WQ_TYPE_INORDER | (wq_len << WQ_LEN_SHIFT) | - (ring_id << WQ_TARGET_SHIFT) | + (rq->ring->guc_id << WQ_TARGET_SHIFT) | WQ_NO_WCFLUSH_WAIT; /* The GuC wants only the low-order word of the context descriptor */ @@ -560,29 +558,6 @@ static int guc_add_workqueue_item(struct i915_guc_client *gc, return 0; } -#define CTX_RING_BUFFER_START 0x08 - -/* Update the ringbuffer pointer in a saved context image */ -static void lr_context_update(struct drm_i915_gem_request *rq) -{ - enum intel_ring_id ring_id = rq->ring->id; - struct drm_i915_gem_object *ctx_obj = rq->ctx->engine[ring_id].state; - struct drm_i915_gem_object *rb_obj = rq->ringbuf->obj; - struct page *page; - uint32_t *reg_state; - - BUG_ON(!ctx_obj); - WARN_ON(!i915_gem_obj_is_pinned(ctx_obj)); - WARN_ON(!i915_gem_obj_is_pinned(rb_obj)); - - page = i915_gem_object_get_dirty_page(ctx_obj, LRC_STATE_PN); - reg_state = kmap_atomic(page); - - reg_state[CTX_RING_BUFFER_START+1] = i915_gem_obj_ggtt_offset(rb_obj); - - kunmap_atomic(reg_state); -} - /** * i915_guc_submit() - Submit commands through GuC * @client: the guc client where commands will go through @@ -594,18 +569,14 @@ int i915_guc_submit(struct i915_guc_client *client, struct drm_i915_gem_request *rq) { struct intel_guc *guc = client->guc; - enum intel_ring_id ring_id = rq->ring->id; + unsigned int engine_id = rq->ring->guc_id; int q_ret, b_ret; - /* Need this because of the deferred pin ctx and ring */ - /* Shall we move this right after ring is pinned? */ - lr_context_update(rq); - q_ret = guc_add_workqueue_item(client, rq); if (q_ret == 0) b_ret = guc_ring_doorbell(client); - client->submissions[ring_id] += 1; + client->submissions[engine_id] += 1; if (q_ret) { client->q_fail += 1; client->retcode = q_ret; @@ -615,8 +586,8 @@ int i915_guc_submit(struct i915_guc_client *client, } else { client->retcode = 0; } - guc->submissions[ring_id] += 1; - guc->last_seqno[ring_id] = rq->seqno; + guc->submissions[engine_id] += 1; + guc->last_seqno[engine_id] = rq->seqno; return q_ret; } @@ -848,7 +819,7 @@ static void init_guc_policies(struct guc_policies *policies) policies->max_num_work_items = POLICY_MAX_NUM_WI; for (p = 0; p < GUC_CTX_PRIORITY_NUM; p++) { - for (i = 0; i < I915_NUM_RINGS; i++) { + for (i = GUC_RENDER_ENGINE; i < GUC_MAX_ENGINES_NUM; i++) { policy = &policies->policy[p][i]; policy->execution_quantum = 1000000; @@ -900,7 +871,7 @@ static void guc_create_ads(struct intel_guc *guc) ads->golden_context_lrca = ring->status_page.gfx_addr; for_each_ring(ring, dev_priv, i) - ads->eng_state_size[i] = intel_lr_context_size(ring); + ads->eng_state_size[ring->guc_id] = intel_lr_context_size(ring); /* GuC scheduling policies */ policies = (void *)ads + sizeof(struct guc_ads); @@ -912,12 +883,12 @@ static void guc_create_ads(struct intel_guc *guc) /* MMIO reg state */ reg_state = (void *)policies + sizeof(struct guc_policies); - for (i = 0; i < I915_NUM_RINGS; i++) { - reg_state->mmio_white_list[i].mmio_start = - dev_priv->ring[i].mmio_base + GUC_MMIO_WHITE_LIST_START; + for_each_ring(ring, dev_priv, i) { + reg_state->mmio_white_list[ring->guc_id].mmio_start = + ring->mmio_base + GUC_MMIO_WHITE_LIST_START; /* Nothing to be saved or restored for now. */ - reg_state->mmio_white_list[i].count = 0; + reg_state->mmio_white_list[ring->guc_id].count = 0; } ads->reg_state_addr = ads->scheduler_policies + diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 25a89373df63..d1a46ef5ab3f 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -1651,6 +1651,12 @@ static void valleyview_pipestat_irq_handler(struct drm_device *dev, u32 iir) int pipe; spin_lock(&dev_priv->irq_lock); + + if (!dev_priv->display_irqs_enabled) { + spin_unlock(&dev_priv->irq_lock); + return; + } + for_each_pipe(dev_priv, pipe) { i915_reg_t reg; u32 mask, iir_bit = 0; @@ -3343,21 +3349,28 @@ void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv, unsigned int pipe_mask) { uint32_t extra_ier = GEN8_PIPE_VBLANK | GEN8_PIPE_FIFO_UNDERRUN; + enum pipe pipe; + + spin_lock_irq(&dev_priv->irq_lock); + for_each_pipe_masked(dev_priv, pipe, pipe_mask) + GEN8_IRQ_INIT_NDX(DE_PIPE, pipe, + dev_priv->de_irq_mask[pipe], + ~dev_priv->de_irq_mask[pipe] | extra_ier); + spin_unlock_irq(&dev_priv->irq_lock); +} + +void gen8_irq_power_well_pre_disable(struct drm_i915_private *dev_priv, + unsigned int pipe_mask) +{ + enum pipe pipe; spin_lock_irq(&dev_priv->irq_lock); - if (pipe_mask & 1 << PIPE_A) - GEN8_IRQ_INIT_NDX(DE_PIPE, PIPE_A, - dev_priv->de_irq_mask[PIPE_A], - ~dev_priv->de_irq_mask[PIPE_A] | extra_ier); - if (pipe_mask & 1 << PIPE_B) - GEN8_IRQ_INIT_NDX(DE_PIPE, PIPE_B, - dev_priv->de_irq_mask[PIPE_B], - ~dev_priv->de_irq_mask[PIPE_B] | extra_ier); - if (pipe_mask & 1 << PIPE_C) - GEN8_IRQ_INIT_NDX(DE_PIPE, PIPE_C, - dev_priv->de_irq_mask[PIPE_C], - ~dev_priv->de_irq_mask[PIPE_C] | extra_ier); + for_each_pipe_masked(dev_priv, pipe, pipe_mask) + GEN8_IRQ_RESET_NDX(DE_PIPE, pipe); spin_unlock_irq(&dev_priv->irq_lock); + + /* make sure we're done processing display irqs */ + synchronize_irq(dev_priv->dev->irq); } static void cherryview_irq_preinstall(struct drm_device *dev) diff --git a/drivers/gpu/drm/i915/i915_params.c b/drivers/gpu/drm/i915/i915_params.c index 8d90c256520a..278c9c40c2e0 100644 --- a/drivers/gpu/drm/i915/i915_params.c +++ b/drivers/gpu/drm/i915/i915_params.c @@ -38,7 +38,7 @@ struct i915_params i915 __read_mostly = { .enable_execlists = -1, .enable_hangcheck = true, .enable_ppgtt = -1, - .enable_psr = 0, + .enable_psr = -1, .preliminary_hw_support = IS_ENABLED(CONFIG_DRM_I915_PRELIMINARY_HW_SUPPORT), .disable_power_well = -1, .enable_ips = 1, @@ -49,7 +49,6 @@ struct i915_params i915 __read_mostly = { .invert_brightness = 0, .disable_display = 0, .enable_cmd_parser = 1, - .disable_vtd_wa = 0, .use_mmio_flip = 0, .mmio_debug = 0, .verbose_state_checks = 1, @@ -92,7 +91,7 @@ MODULE_PARM_DESC(enable_fbc, "Enable frame buffer compression for power savings " "(default: -1 (use per-chip default))"); -module_param_named_unsafe(lvds_channel_mode, i915.lvds_channel_mode, int, 0600); +module_param_named_unsafe(lvds_channel_mode, i915.lvds_channel_mode, int, 0400); MODULE_PARM_DESC(lvds_channel_mode, "Specify LVDS channel mode " "(0=probe BIOS [default], 1=single-channel, 2=dual-channel)"); @@ -102,7 +101,7 @@ MODULE_PARM_DESC(lvds_use_ssc, "Use Spread Spectrum Clock with panels [LVDS/eDP] " "(default: auto from VBT)"); -module_param_named_unsafe(vbt_sdvo_panel_type, i915.vbt_sdvo_panel_type, int, 0600); +module_param_named_unsafe(vbt_sdvo_panel_type, i915.vbt_sdvo_panel_type, int, 0400); MODULE_PARM_DESC(vbt_sdvo_panel_type, "Override/Ignore selection of SDVO panel mode in the VBT " "(-2=ignore, -1=auto [default], index in VBT BIOS table)"); @@ -127,9 +126,11 @@ MODULE_PARM_DESC(enable_execlists, "(-1=auto [default], 0=disabled, 1=enabled)"); module_param_named_unsafe(enable_psr, i915.enable_psr, int, 0600); -MODULE_PARM_DESC(enable_psr, "Enable PSR (default: false)"); +MODULE_PARM_DESC(enable_psr, "Enable PSR " + "(0=disabled, 1=enabled - link mode chosen per-platform, 2=force link-standby mode, 3=force link-off mode) " + "Default: -1 (use per-chip default)"); -module_param_named_unsafe(preliminary_hw_support, i915.preliminary_hw_support, int, 0600); +module_param_named_unsafe(preliminary_hw_support, i915.preliminary_hw_support, int, 0400); MODULE_PARM_DESC(preliminary_hw_support, "Enable preliminary hardware support."); @@ -163,12 +164,9 @@ MODULE_PARM_DESC(invert_brightness, "to dri-devel@lists.freedesktop.org, if your machine needs it. " "It will then be included in an upcoming module version."); -module_param_named(disable_display, i915.disable_display, bool, 0600); +module_param_named(disable_display, i915.disable_display, bool, 0400); MODULE_PARM_DESC(disable_display, "Disable display (default: false)"); -module_param_named_unsafe(disable_vtd_wa, i915.disable_vtd_wa, bool, 0600); -MODULE_PARM_DESC(disable_vtd_wa, "Disable all VT-d workarounds (default: false)"); - module_param_named_unsafe(enable_cmd_parser, i915.enable_cmd_parser, int, 0600); MODULE_PARM_DESC(enable_cmd_parser, "Enable command parsing (1=enabled [default], 0=disabled)"); diff --git a/drivers/gpu/drm/i915/i915_params.h b/drivers/gpu/drm/i915/i915_params.h index 529929073120..bd5026b15d3e 100644 --- a/drivers/gpu/drm/i915/i915_params.h +++ b/drivers/gpu/drm/i915/i915_params.h @@ -56,7 +56,6 @@ struct i915_params { bool load_detect_test; bool reset; bool disable_display; - bool disable_vtd_wa; bool enable_guc_submission; bool verbose_state_checks; bool nuclear_pageflip; diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 0a988895165f..f76cbf3e5d1e 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -610,16 +610,17 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg) #define IOSF_BYTE_ENABLES_SHIFT 4 #define IOSF_BAR_SHIFT 1 #define IOSF_SB_BUSY (1<<0) -#define IOSF_PORT_BUNIT 0x3 -#define IOSF_PORT_PUNIT 0x4 +#define IOSF_PORT_BUNIT 0x03 +#define IOSF_PORT_PUNIT 0x04 #define IOSF_PORT_NC 0x11 #define IOSF_PORT_DPIO 0x12 -#define IOSF_PORT_DPIO_2 0x1a #define IOSF_PORT_GPIO_NC 0x13 #define IOSF_PORT_CCK 0x14 -#define IOSF_PORT_CCU 0xA9 -#define IOSF_PORT_GPS_CORE 0x48 -#define IOSF_PORT_FLISDSI 0x1B +#define IOSF_PORT_DPIO_2 0x1a +#define IOSF_PORT_FLISDSI 0x1b +#define IOSF_PORT_GPIO_SC 0x48 +#define IOSF_PORT_GPIO_SUS 0xa8 +#define IOSF_PORT_CCU 0xa9 #define VLV_IOSF_DATA _MMIO(VLV_DISPLAY_BASE + 0x2104) #define VLV_IOSF_ADDR _MMIO(VLV_DISPLAY_BASE + 0x2108) @@ -1635,6 +1636,9 @@ enum skl_disp_power_wells { #define RING_WAIT (1<<11) /* gen3+, PRBx_CTL */ #define RING_WAIT_SEMAPHORE (1<<10) /* gen6+ */ +#define RING_FORCE_TO_NONPRIV(base, i) _MMIO(((base)+0x4D0) + (i)*4) +#define RING_MAX_NONPRIV_SLOTS 12 + #define GEN7_TLB_RD_ADDR _MMIO(0x4700) #if 0 @@ -3292,19 +3296,20 @@ enum skl_disp_power_wells { #define PORT_HOTPLUG_STAT _MMIO(dev_priv->info.display_mmio_offset + 0x61114) /* - * HDMI/DP bits are gen4+ + * HDMI/DP bits are g4x+ * * WARNING: Bspec for hpd status bits on gen4 seems to be completely confused. * Please check the detailed lore in the commit message for for experimental * evidence. */ -#define PORTD_HOTPLUG_LIVE_STATUS_G4X (1 << 29) +/* Bspec says GM45 should match G4X/VLV/CHV, but reality disagrees */ +#define PORTD_HOTPLUG_LIVE_STATUS_GM45 (1 << 29) +#define PORTC_HOTPLUG_LIVE_STATUS_GM45 (1 << 28) +#define PORTB_HOTPLUG_LIVE_STATUS_GM45 (1 << 27) +/* G4X/VLV/CHV DP/HDMI bits again match Bspec */ +#define PORTD_HOTPLUG_LIVE_STATUS_G4X (1 << 27) #define PORTC_HOTPLUG_LIVE_STATUS_G4X (1 << 28) -#define PORTB_HOTPLUG_LIVE_STATUS_G4X (1 << 27) -/* VLV DP/HDMI bits again match Bspec */ -#define PORTD_HOTPLUG_LIVE_STATUS_VLV (1 << 27) -#define PORTC_HOTPLUG_LIVE_STATUS_VLV (1 << 28) -#define PORTB_HOTPLUG_LIVE_STATUS_VLV (1 << 29) +#define PORTB_HOTPLUG_LIVE_STATUS_G4X (1 << 29) #define PORTD_HOTPLUG_INT_STATUS (3 << 21) #define PORTD_HOTPLUG_INT_LONG_PULSE (2 << 21) #define PORTD_HOTPLUG_INT_SHORT_PULSE (1 << 21) @@ -5945,6 +5950,7 @@ enum skl_disp_power_wells { #define ILK_INTERNAL_GRAPHICS_DISABLE (1 << 31) #define ILK_INTERNAL_DISPLAY_DISABLE (1 << 30) #define ILK_DISPLAY_DEBUG_DISABLE (1 << 29) +#define IVB_PIPE_C_DISABLE (1 << 28) #define ILK_HDCP_DISABLE (1 << 25) #define ILK_eDP_A_DISABLE (1 << 24) #define HSW_CDCLK_LIMIT (1 << 24) @@ -5991,10 +5997,19 @@ enum skl_disp_power_wells { #define SKL_DFSM_CDCLK_LIMIT_540 (1 << 23) #define SKL_DFSM_CDCLK_LIMIT_450 (2 << 23) #define SKL_DFSM_CDCLK_LIMIT_337_5 (3 << 23) +#define SKL_DFSM_PIPE_A_DISABLE (1 << 30) +#define SKL_DFSM_PIPE_B_DISABLE (1 << 21) +#define SKL_DFSM_PIPE_C_DISABLE (1 << 28) + +#define GEN7_FF_SLICE_CS_CHICKEN1 _MMIO(0x20e0) +#define GEN9_FFSC_PERCTX_PREEMPT_CTRL (1<<14) #define FF_SLICE_CS_CHICKEN2 _MMIO(0x20e4) #define GEN9_TSG_BARRIER_ACK_DISABLE (1<<8) +#define GEN9_CS_DEBUG_MODE1 _MMIO(0x20ec) +#define GEN8_CS_CHICKEN1 _MMIO(0x2580) + /* GEN7 chicken */ #define GEN7_COMMON_SLICE_CHICKEN1 _MMIO(0x7010) # define GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC ((1<<10) | (1<<26)) @@ -6040,6 +6055,8 @@ enum skl_disp_power_wells { #define HDC_FORCE_NON_COHERENT (1<<4) #define HDC_BARRIER_PERFORMANCE_DISABLE (1<<10) +#define GEN8_HDC_CHICKEN1 _MMIO(0x7304) + /* GEN9 chicken */ #define SLICE_ECO_CHICKEN0 _MMIO(0x7308) #define PIXEL_MASK_CAMMING_DISABLE (1 << 14) @@ -6770,6 +6787,16 @@ enum skl_disp_power_wells { #define VLV_PMWGICZ _MMIO(0x1300a4) +#define RC6_LOCATION _MMIO(0xD40) +#define RC6_CTX_IN_DRAM (1 << 0) +#define RC6_CTX_BASE _MMIO(0xD48) +#define RC6_CTX_BASE_MASK 0xFFFFFFF0 +#define PWRCTX_MAXCNT_RCSUNIT _MMIO(0x2054) +#define PWRCTX_MAXCNT_VCSUNIT0 _MMIO(0x12054) +#define PWRCTX_MAXCNT_BCSUNIT _MMIO(0x22054) +#define PWRCTX_MAXCNT_VECSUNIT _MMIO(0x1A054) +#define PWRCTX_MAXCNT_VCSUNIT1 _MMIO(0x1C054) +#define IDLE_TIME_MASK 0xFFFFF #define FORCEWAKE _MMIO(0xA18C) #define FORCEWAKE_VLV _MMIO(0x1300b0) #define FORCEWAKE_ACK_VLV _MMIO(0x1300b4) @@ -6908,6 +6935,7 @@ enum skl_disp_power_wells { #define GEN6_RPDEUC _MMIO(0xA084) #define GEN6_RPDEUCSW _MMIO(0xA088) #define GEN6_RC_STATE _MMIO(0xA094) +#define RC6_STATE (1 << 18) #define GEN6_RC1_WAKE_RATE_LIMIT _MMIO(0xA098) #define GEN6_RC6_WAKE_RATE_LIMIT _MMIO(0xA09C) #define GEN6_RC6pp_WAKE_RATE_LIMIT _MMIO(0xA0A0) @@ -7519,7 +7547,7 @@ enum skl_disp_power_wells { #define DPLL_CFGCR2_PDIV_7 (4<<2) #define DPLL_CFGCR2_CENTRAL_FREQ_MASK (3) -#define DPLL_CFGCR1(id) _MMIO_PIPE((id) - SKL_DPLL1, _DPLL1_CFGCR1, _DPLL2_CFGCR2) +#define DPLL_CFGCR1(id) _MMIO_PIPE((id) - SKL_DPLL1, _DPLL1_CFGCR1, _DPLL2_CFGCR1) #define DPLL_CFGCR2(id) _MMIO_PIPE((id) - SKL_DPLL1, _DPLL1_CFGCR2, _DPLL2_CFGCR2) /* BXT display engine PLL */ @@ -7540,6 +7568,7 @@ enum skl_disp_power_wells { #define DC_STATE_EN_UPTO_DC5_DC6_MASK 0x3 #define DC_STATE_DEBUG _MMIO(0x45520) +#define DC_STATE_DEBUG_MASK_CORES (1<<0) #define DC_STATE_DEBUG_MASK_MEMORY_UP (1<<1) /* Please see hsw_read_dcomp() and hsw_write_dcomp() before using this register, @@ -8159,4 +8188,11 @@ enum skl_disp_power_wells { #define GEN9_VEBOX_MOCS(i) _MMIO(0xcb00 + (i) * 4) /* Video MOCS registers */ #define GEN9_BLT_MOCS(i) _MMIO(0xcc00 + (i) * 4) /* Blitter MOCS registers */ +/* gamt regs */ +#define GEN8_L3_LRA_1_GPGPU _MMIO(0x4dd4) +#define GEN8_L3_LRA_1_GPGPU_DEFAULT_VALUE_BDW 0x67F1427F /* max/min for LRA1/2 */ +#define GEN8_L3_LRA_1_GPGPU_DEFAULT_VALUE_CHV 0x5FF101FF /* max/min for LRA1/2 */ +#define GEN9_L3_LRA_1_GPGPU_DEFAULT_VALUE_SKL 0x67F1427F /* " " */ +#define GEN9_L3_LRA_1_GPGPU_DEFAULT_VALUE_BXT 0x5FF101FF /* " " */ + #endif /* _I915_REG_H_ */ diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c index a2aa09ce3202..34e061a9ef06 100644 --- a/drivers/gpu/drm/i915/i915_suspend.c +++ b/drivers/gpu/drm/i915/i915_suspend.c @@ -49,7 +49,7 @@ static void i915_save_display(struct drm_device *dev) dev_priv->regfile.savePP_ON_DELAYS = I915_READ(PCH_PP_ON_DELAYS); dev_priv->regfile.savePP_OFF_DELAYS = I915_READ(PCH_PP_OFF_DELAYS); dev_priv->regfile.savePP_DIVISOR = I915_READ(PCH_PP_DIVISOR); - } else if (!IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev)) { + } else if (INTEL_INFO(dev)->gen <= 4) { dev_priv->regfile.savePP_CONTROL = I915_READ(PP_CONTROL); dev_priv->regfile.savePP_ON_DELAYS = I915_READ(PP_ON_DELAYS); dev_priv->regfile.savePP_OFF_DELAYS = I915_READ(PP_OFF_DELAYS); @@ -84,7 +84,7 @@ static void i915_restore_display(struct drm_device *dev) I915_WRITE(PCH_PP_OFF_DELAYS, dev_priv->regfile.savePP_OFF_DELAYS); I915_WRITE(PCH_PP_DIVISOR, dev_priv->regfile.savePP_DIVISOR); I915_WRITE(PCH_PP_CONTROL, dev_priv->regfile.savePP_CONTROL); - } else if (!IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev)) { + } else if (INTEL_INFO(dev)->gen <= 4) { I915_WRITE(PP_ON_DELAYS, dev_priv->regfile.savePP_ON_DELAYS); I915_WRITE(PP_OFF_DELAYS, dev_priv->regfile.savePP_OFF_DELAYS); I915_WRITE(PP_DIVISOR, dev_priv->regfile.savePP_DIVISOR); @@ -92,7 +92,7 @@ static void i915_restore_display(struct drm_device *dev) } /* only restore FBC info on the platform that supports FBC*/ - intel_fbc_disable(dev_priv); + intel_fbc_global_disable(dev_priv); /* restore FBC interval */ if (HAS_FBC(dev) && INTEL_INFO(dev)->gen <= 4 && !IS_G4X(dev)) diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h index 52b2d409945d..fa09e5581137 100644 --- a/drivers/gpu/drm/i915/i915_trace.h +++ b/drivers/gpu/drm/i915/i915_trace.h @@ -175,35 +175,24 @@ TRACE_EVENT(i915_vma_unbind, __entry->obj, __entry->offset, __entry->size, __entry->vm) ); -#define VM_TO_TRACE_NAME(vm) \ - (i915_is_ggtt(vm) ? "G" : \ - "P") - -DECLARE_EVENT_CLASS(i915_va, - TP_PROTO(struct i915_address_space *vm, u64 start, u64 length, const char *name), - TP_ARGS(vm, start, length, name), +TRACE_EVENT(i915_va_alloc, + TP_PROTO(struct i915_vma *vma), + TP_ARGS(vma), TP_STRUCT__entry( __field(struct i915_address_space *, vm) __field(u64, start) __field(u64, end) - __string(name, name) ), TP_fast_assign( - __entry->vm = vm; - __entry->start = start; - __entry->end = start + length - 1; - __assign_str(name, name); + __entry->vm = vma->vm; + __entry->start = vma->node.start; + __entry->end = vma->node.start + vma->node.size - 1; ), - TP_printk("vm=%p (%s), 0x%llx-0x%llx", - __entry->vm, __get_str(name), __entry->start, __entry->end) -); - -DEFINE_EVENT(i915_va, i915_va_alloc, - TP_PROTO(struct i915_address_space *vm, u64 start, u64 length, const char *name), - TP_ARGS(vm, start, length, name) + TP_printk("vm=%p (%c), 0x%llx-0x%llx", + __entry->vm, i915_is_ggtt(__entry->vm) ? 'G' : 'P', __entry->start, __entry->end) ); DECLARE_EVENT_CLASS(i915_px_entry, diff --git a/drivers/gpu/drm/i915/intel_atomic.c b/drivers/gpu/drm/i915/intel_atomic.c index 4625f8a9ba12..8e579a8505ac 100644 --- a/drivers/gpu/drm/i915/intel_atomic.c +++ b/drivers/gpu/drm/i915/intel_atomic.c @@ -97,6 +97,7 @@ intel_crtc_duplicate_state(struct drm_crtc *crtc) crtc_state->disable_lp_wm = false; crtc_state->disable_cxsr = false; crtc_state->wm_changed = false; + crtc_state->fb_changed = false; return &crtc_state->base; } diff --git a/drivers/gpu/drm/i915/intel_audio.c b/drivers/gpu/drm/i915/intel_audio.c index 31f6d212fb1b..30f921421b0c 100644 --- a/drivers/gpu/drm/i915/intel_audio.c +++ b/drivers/gpu/drm/i915/intel_audio.c @@ -527,6 +527,8 @@ void intel_audio_codec_enable(struct intel_encoder *intel_encoder) mutex_lock(&dev_priv->av_mutex); intel_dig_port->audio_connector = connector; + /* referred in audio callbacks */ + dev_priv->dig_port_map[port] = intel_encoder; mutex_unlock(&dev_priv->av_mutex); if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify) @@ -554,6 +556,7 @@ void intel_audio_codec_disable(struct intel_encoder *intel_encoder) mutex_lock(&dev_priv->av_mutex); intel_dig_port->audio_connector = NULL; + dev_priv->dig_port_map[port] = NULL; mutex_unlock(&dev_priv->av_mutex); if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify) diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c index 9c89df1af036..505fc5cf26f8 100644 --- a/drivers/gpu/drm/i915/intel_crt.c +++ b/drivers/gpu/drm/i915/intel_crt.c @@ -71,22 +71,29 @@ static bool intel_crt_get_hw_state(struct intel_encoder *encoder, struct intel_crt *crt = intel_encoder_to_crt(encoder); enum intel_display_power_domain power_domain; u32 tmp; + bool ret; power_domain = intel_display_port_power_domain(encoder); - if (!intel_display_power_is_enabled(dev_priv, power_domain)) + if (!intel_display_power_get_if_enabled(dev_priv, power_domain)) return false; + ret = false; + tmp = I915_READ(crt->adpa_reg); if (!(tmp & ADPA_DAC_ENABLE)) - return false; + goto out; if (HAS_PCH_CPT(dev)) *pipe = PORT_TO_PIPE_CPT(tmp); else *pipe = PORT_TO_PIPE(tmp); - return true; + ret = true; +out: + intel_display_power_put(dev_priv, power_domain); + + return ret; } static unsigned int intel_crt_get_flags(struct intel_encoder *encoder) @@ -206,9 +213,7 @@ static void pch_post_disable_crt(struct intel_encoder *encoder) static void intel_enable_crt(struct intel_encoder *encoder) { - struct intel_crt *crt = intel_encoder_to_crt(encoder); - - intel_crt_set_dpms(encoder, crt->connector->base.dpms); + intel_crt_set_dpms(encoder, DRM_MODE_DPMS_ON); } static enum drm_mode_status @@ -216,6 +221,7 @@ intel_crt_mode_valid(struct drm_connector *connector, struct drm_display_mode *mode) { struct drm_device *dev = connector->dev; + int max_dotclk = to_i915(dev)->max_dotclk_freq; int max_clock = 0; if (mode->flags & DRM_MODE_FLAG_DBLSCAN) @@ -231,6 +237,9 @@ intel_crt_mode_valid(struct drm_connector *connector, if (mode->clock > max_clock) return MODE_CLOCK_HIGH; + if (mode->clock > max_dotclk) + return MODE_CLOCK_HIGH; + /* The FDI receiver on LPT only supports 8bpc and only has 2 lanes. */ if (HAS_PCH_LPT(dev) && (ironlake_get_lanes_required(mode->clock, 270000, 24) > 2)) @@ -469,11 +478,10 @@ static bool intel_crt_detect_ddc(struct drm_connector *connector) } static enum drm_connector_status -intel_crt_load_detect(struct intel_crt *crt) +intel_crt_load_detect(struct intel_crt *crt, uint32_t pipe) { struct drm_device *dev = crt->base.base.dev; struct drm_i915_private *dev_priv = dev->dev_private; - uint32_t pipe = to_intel_crtc(crt->base.base.crtc)->pipe; uint32_t save_bclrpat; uint32_t save_vtotal; uint32_t vtotal, vactive; @@ -642,7 +650,8 @@ intel_crt_detect(struct drm_connector *connector, bool force) if (intel_crt_detect_ddc(connector)) status = connector_status_connected; else if (INTEL_INFO(dev)->gen < 4) - status = intel_crt_load_detect(crt); + status = intel_crt_load_detect(crt, + to_intel_crtc(connector->state->crtc)->pipe); else status = connector_status_unknown; intel_release_load_detect_pipe(connector, &tmp, &ctx); diff --git a/drivers/gpu/drm/i915/intel_csr.c b/drivers/gpu/drm/i915/intel_csr.c index 5c2f9a40c81b..902054efb902 100644 --- a/drivers/gpu/drm/i915/intel_csr.c +++ b/drivers/gpu/drm/i915/intel_csr.c @@ -179,7 +179,8 @@ static const struct stepping_info kbl_stepping_info[] = { static const struct stepping_info skl_stepping_info[] = { {'A', '0'}, {'B', '0'}, {'C', '0'}, {'D', '0'}, {'E', '0'}, {'F', '0'}, - {'G', '0'}, {'H', '0'}, {'I', '0'} + {'G', '0'}, {'H', '0'}, {'I', '0'}, + {'J', '0'}, {'K', '0'} }; static const struct stepping_info bxt_stepping_info[] = { @@ -219,19 +220,19 @@ static const struct stepping_info *intel_get_stepping_info(struct drm_device *de * Everytime display comes back from low power state this function is called to * copy the firmware from internal memory to registers. */ -void intel_csr_load_program(struct drm_i915_private *dev_priv) +bool intel_csr_load_program(struct drm_i915_private *dev_priv) { u32 *payload = dev_priv->csr.dmc_payload; uint32_t i, fw_size; if (!IS_GEN9(dev_priv)) { DRM_ERROR("No CSR support available for this platform\n"); - return; + return false; } if (!dev_priv->csr.dmc_payload) { DRM_ERROR("Tried to program CSR with empty payload\n"); - return; + return false; } fw_size = dev_priv->csr.dmc_fw_size; @@ -242,6 +243,10 @@ void intel_csr_load_program(struct drm_i915_private *dev_priv) I915_WRITE(dev_priv->csr.mmioaddr[i], dev_priv->csr.mmiodata[i]); } + + dev_priv->csr.dc_state = 0; + + return true; } static uint32_t *parse_csr_fw(struct drm_i915_private *dev_priv, diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c index 1f9a3687b540..62de9f4bce09 100644 --- a/drivers/gpu/drm/i915/intel_ddi.c +++ b/drivers/gpu/drm/i915/intel_ddi.c @@ -1531,7 +1531,8 @@ skl_ddi_pll_select(struct intel_crtc *intel_crtc, DPLL_CFGCR2_KDIV(wrpll_params.kdiv) | DPLL_CFGCR2_PDIV(wrpll_params.pdiv) | wrpll_params.central_freq; - } else if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT) { + } else if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT || + intel_encoder->type == INTEL_OUTPUT_DP_MST) { switch (crtc_state->port_clock / 2) { case 81000: ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_810, 0); @@ -1545,8 +1546,10 @@ skl_ddi_pll_select(struct intel_crtc *intel_crtc, } cfgcr1 = cfgcr2 = 0; - } else /* eDP */ + } else if (intel_encoder->type == INTEL_OUTPUT_EDP) { return true; + } else + return false; memset(&crtc_state->dpll_hw_state, 0, sizeof(crtc_state->dpll_hw_state)); @@ -1910,13 +1913,16 @@ bool intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector) enum transcoder cpu_transcoder; enum intel_display_power_domain power_domain; uint32_t tmp; + bool ret; power_domain = intel_display_port_power_domain(intel_encoder); - if (!intel_display_power_is_enabled(dev_priv, power_domain)) + if (!intel_display_power_get_if_enabled(dev_priv, power_domain)) return false; - if (!intel_encoder->get_hw_state(intel_encoder, &pipe)) - return false; + if (!intel_encoder->get_hw_state(intel_encoder, &pipe)) { + ret = false; + goto out; + } if (port == PORT_A) cpu_transcoder = TRANSCODER_EDP; @@ -1928,23 +1934,33 @@ bool intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector) switch (tmp & TRANS_DDI_MODE_SELECT_MASK) { case TRANS_DDI_MODE_SELECT_HDMI: case TRANS_DDI_MODE_SELECT_DVI: - return (type == DRM_MODE_CONNECTOR_HDMIA); + ret = type == DRM_MODE_CONNECTOR_HDMIA; + break; case TRANS_DDI_MODE_SELECT_DP_SST: - if (type == DRM_MODE_CONNECTOR_eDP) - return true; - return (type == DRM_MODE_CONNECTOR_DisplayPort); + ret = type == DRM_MODE_CONNECTOR_eDP || + type == DRM_MODE_CONNECTOR_DisplayPort; + break; + case TRANS_DDI_MODE_SELECT_DP_MST: /* if the transcoder is in MST state then * connector isn't connected */ - return false; + ret = false; + break; case TRANS_DDI_MODE_SELECT_FDI: - return (type == DRM_MODE_CONNECTOR_VGA); + ret = type == DRM_MODE_CONNECTOR_VGA; + break; default: - return false; + ret = false; + break; } + +out: + intel_display_power_put(dev_priv, power_domain); + + return ret; } bool intel_ddi_get_hw_state(struct intel_encoder *encoder, @@ -1956,15 +1972,18 @@ bool intel_ddi_get_hw_state(struct intel_encoder *encoder, enum intel_display_power_domain power_domain; u32 tmp; int i; + bool ret; power_domain = intel_display_port_power_domain(encoder); - if (!intel_display_power_is_enabled(dev_priv, power_domain)) + if (!intel_display_power_get_if_enabled(dev_priv, power_domain)) return false; + ret = false; + tmp = I915_READ(DDI_BUF_CTL(port)); if (!(tmp & DDI_BUF_CTL_ENABLE)) - return false; + goto out; if (port == PORT_A) { tmp = I915_READ(TRANS_DDI_FUNC_CTL(TRANSCODER_EDP)); @@ -1982,25 +2001,32 @@ bool intel_ddi_get_hw_state(struct intel_encoder *encoder, break; } - return true; - } else { - for (i = TRANSCODER_A; i <= TRANSCODER_C; i++) { - tmp = I915_READ(TRANS_DDI_FUNC_CTL(i)); + ret = true; - if ((tmp & TRANS_DDI_PORT_MASK) - == TRANS_DDI_SELECT_PORT(port)) { - if ((tmp & TRANS_DDI_MODE_SELECT_MASK) == TRANS_DDI_MODE_SELECT_DP_MST) - return false; + goto out; + } - *pipe = i; - return true; - } + for (i = TRANSCODER_A; i <= TRANSCODER_C; i++) { + tmp = I915_READ(TRANS_DDI_FUNC_CTL(i)); + + if ((tmp & TRANS_DDI_PORT_MASK) == TRANS_DDI_SELECT_PORT(port)) { + if ((tmp & TRANS_DDI_MODE_SELECT_MASK) == + TRANS_DDI_MODE_SELECT_DP_MST) + goto out; + + *pipe = i; + ret = true; + + goto out; } } DRM_DEBUG_KMS("No pipe for ddi port %c found\n", port_name(port)); - return false; +out: + intel_display_power_put(dev_priv, power_domain); + + return ret; } void intel_ddi_enable_pipe_clock(struct intel_crtc *intel_crtc) @@ -2446,12 +2472,14 @@ static bool hsw_ddi_wrpll_get_hw_state(struct drm_i915_private *dev_priv, { uint32_t val; - if (!intel_display_power_is_enabled(dev_priv, POWER_DOMAIN_PLLS)) + if (!intel_display_power_get_if_enabled(dev_priv, POWER_DOMAIN_PLLS)) return false; val = I915_READ(WRPLL_CTL(pll->id)); hw_state->wrpll = val; + intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS); + return val & WRPLL_PLL_ENABLE; } @@ -2461,12 +2489,14 @@ static bool hsw_ddi_spll_get_hw_state(struct drm_i915_private *dev_priv, { uint32_t val; - if (!intel_display_power_is_enabled(dev_priv, POWER_DOMAIN_PLLS)) + if (!intel_display_power_get_if_enabled(dev_priv, POWER_DOMAIN_PLLS)) return false; val = I915_READ(SPLL_CTL); hw_state->spll = val; + intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS); + return val & SPLL_PLL_ENABLE; } @@ -2583,16 +2613,19 @@ static bool skl_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv, uint32_t val; unsigned int dpll; const struct skl_dpll_regs *regs = skl_dpll_regs; + bool ret; - if (!intel_display_power_is_enabled(dev_priv, POWER_DOMAIN_PLLS)) + if (!intel_display_power_get_if_enabled(dev_priv, POWER_DOMAIN_PLLS)) return false; + ret = false; + /* DPLL0 is not part of the shared DPLLs, so pll->id is 0 for DPLL1 */ dpll = pll->id + 1; val = I915_READ(regs[pll->id].ctl); if (!(val & LCPLL_PLL_ENABLE)) - return false; + goto out; val = I915_READ(DPLL_CTRL1); hw_state->ctrl1 = (val >> (dpll * 6)) & 0x3f; @@ -2602,8 +2635,12 @@ static bool skl_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv, hw_state->cfgcr1 = I915_READ(regs[pll->id].cfgcr1); hw_state->cfgcr2 = I915_READ(regs[pll->id].cfgcr2); } + ret = true; - return true; +out: + intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS); + + return ret; } static void skl_shared_dplls_init(struct drm_i915_private *dev_priv) @@ -2870,13 +2907,16 @@ static bool bxt_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv, { enum port port = (enum port)pll->id; /* 1:1 port->PLL mapping */ uint32_t val; + bool ret; - if (!intel_display_power_is_enabled(dev_priv, POWER_DOMAIN_PLLS)) + if (!intel_display_power_get_if_enabled(dev_priv, POWER_DOMAIN_PLLS)) return false; + ret = false; + val = I915_READ(BXT_PORT_PLL_ENABLE(port)); if (!(val & PORT_PLL_ENABLE)) - return false; + goto out; hw_state->ebb0 = I915_READ(BXT_PORT_PLL_EBB_0(port)); hw_state->ebb0 &= PORT_PLL_P1_MASK | PORT_PLL_P2_MASK; @@ -2923,7 +2963,12 @@ static bool bxt_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv, I915_READ(BXT_PORT_PCS_DW12_LN23(port))); hw_state->pcsdw12 &= LANE_STAGGER_MASK | LANESTAGGER_STRAP_OVRD; - return true; + ret = true; + +out: + intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS); + + return ret; } static void bxt_shared_dplls_init(struct drm_i915_private *dev_priv) @@ -3058,11 +3103,15 @@ bool intel_ddi_is_audio_enabled(struct drm_i915_private *dev_priv, { u32 temp; - if (intel_display_power_is_enabled(dev_priv, POWER_DOMAIN_AUDIO)) { + if (intel_display_power_get_if_enabled(dev_priv, POWER_DOMAIN_AUDIO)) { temp = I915_READ(HSW_AUD_PIN_ELD_CP_VLD); + + intel_display_power_put(dev_priv, POWER_DOMAIN_AUDIO); + if (temp & AUDIO_OUTPUT_ENABLE(intel_crtc->pipe)) return true; } + return false; } @@ -3277,11 +3326,9 @@ void intel_ddi_init(struct drm_device *dev, enum port port) intel_encoder->get_config = intel_ddi_get_config; intel_dig_port->port = port; - dev_priv->dig_port_map[port] = intel_encoder; intel_dig_port->saved_port_bits = I915_READ(DDI_BUF_CTL(port)) & (DDI_BUF_PORT_REVERSAL | DDI_A_4_LANES); - intel_dig_port->max_lanes = max_lanes; /* * Bspec says that DDI_A_4_LANES is the only supported configuration @@ -3294,9 +3341,12 @@ void intel_ddi_init(struct drm_device *dev, enum port port) if (!(intel_dig_port->saved_port_bits & DDI_A_4_LANES)) { DRM_DEBUG_KMS("BXT BIOS forgot to set DDI_A_4_LANES for port A; fixing\n"); intel_dig_port->saved_port_bits |= DDI_A_4_LANES; + max_lanes = 4; } } + intel_dig_port->max_lanes = max_lanes; + intel_encoder->type = INTEL_OUTPUT_UNKNOWN; intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2); intel_encoder->cloneable = 0; diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index e1e7cdee3bbd..8b7b8b64b008 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -1343,18 +1343,21 @@ void assert_pipe(struct drm_i915_private *dev_priv, bool cur_state; enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv, pipe); + enum intel_display_power_domain power_domain; /* if we need the pipe quirk it must be always on */ if ((pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) || (pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE)) state = true; - if (!intel_display_power_is_enabled(dev_priv, - POWER_DOMAIN_TRANSCODER(cpu_transcoder))) { - cur_state = false; - } else { + power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder); + if (intel_display_power_get_if_enabled(dev_priv, power_domain)) { u32 val = I915_READ(PIPECONF(cpu_transcoder)); cur_state = !!(val & PIPECONF_ENABLE); + + intel_display_power_put(dev_priv, power_domain); + } else { + cur_state = false; } I915_STATE_WARN(cur_state != state, @@ -2284,7 +2287,7 @@ intel_fill_fb_ggtt_view(struct i915_ggtt_view *view, struct drm_framebuffer *fb, const struct drm_plane_state *plane_state) { struct drm_i915_private *dev_priv = to_i915(fb->dev); - struct intel_rotation_info *info = &view->params.rotation_info; + struct intel_rotation_info *info = &view->params.rotated; unsigned int tile_size, tile_width, tile_height, cpp; *view = i915_ggtt_view_normal; @@ -2306,7 +2309,7 @@ intel_fill_fb_ggtt_view(struct i915_ggtt_view *view, struct drm_framebuffer *fb, tile_size = intel_tile_size(dev_priv); cpp = drm_format_plane_cpp(fb->pixel_format, 0); - tile_width = intel_tile_width(dev_priv, cpp, fb->modifier[0]); + tile_width = intel_tile_width(dev_priv, fb->modifier[0], cpp); tile_height = tile_size / tile_width; info->width_pages = DIV_ROUND_UP(fb->pitches[0], tile_width); @@ -2448,11 +2451,11 @@ static void intel_unpin_fb_obj(struct drm_framebuffer *fb, /* Computes the linear offset to the base tile and adjusts x, y. bytes per pixel * is assumed to be a power-of-two. */ -unsigned long intel_compute_tile_offset(struct drm_i915_private *dev_priv, - int *x, int *y, - uint64_t fb_modifier, - unsigned int cpp, - unsigned int pitch) +u32 intel_compute_tile_offset(struct drm_i915_private *dev_priv, + int *x, int *y, + uint64_t fb_modifier, + unsigned int cpp, + unsigned int pitch) { if (fb_modifier != DRM_FORMAT_MOD_NONE) { unsigned int tile_size, tile_width, tile_height; @@ -2551,12 +2554,16 @@ intel_alloc_initial_plane_obj(struct intel_crtc *crtc, if (size_aligned * 2 > dev_priv->gtt.stolen_usable_size) return false; + mutex_lock(&dev->struct_mutex); + obj = i915_gem_object_create_stolen_for_preallocated(dev, base_aligned, base_aligned, size_aligned); - if (!obj) + if (!obj) { + mutex_unlock(&dev->struct_mutex); return false; + } obj->tiling_mode = plane_config->tiling; if (obj->tiling_mode == I915_TILING_X) @@ -2569,12 +2576,12 @@ intel_alloc_initial_plane_obj(struct intel_crtc *crtc, mode_cmd.modifier[0] = fb->modifier[0]; mode_cmd.flags = DRM_MODE_FB_MODIFIERS; - mutex_lock(&dev->struct_mutex); if (intel_framebuffer_init(dev, to_intel_framebuffer(fb), &mode_cmd, obj)) { DRM_DEBUG_KMS("intel fb init failed\n"); goto out_unref_obj; } + mutex_unlock(&dev->struct_mutex); DRM_DEBUG_KMS("initial plane fb obj %p\n", obj); @@ -2706,14 +2713,12 @@ static void i9xx_update_primary_plane(struct drm_plane *primary, struct drm_framebuffer *fb = plane_state->base.fb; struct drm_i915_gem_object *obj = intel_fb_obj(fb); int plane = intel_crtc->plane; - unsigned long linear_offset; - int x = plane_state->src.x1 >> 16; - int y = plane_state->src.y1 >> 16; + u32 linear_offset; u32 dspcntr; i915_reg_t reg = DSPCNTR(plane); - int pixel_size; - - pixel_size = drm_format_plane_cpp(fb->pixel_format, 0); + int cpp = drm_format_plane_cpp(fb->pixel_format, 0); + int x = plane_state->src.x1 >> 16; + int y = plane_state->src.y1 >> 16; dspcntr = DISPPLANE_GAMMA_ENABLE; @@ -2771,13 +2776,12 @@ static void i9xx_update_primary_plane(struct drm_plane *primary, if (IS_G4X(dev)) dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE; - linear_offset = y * fb->pitches[0] + x * pixel_size; + linear_offset = y * fb->pitches[0] + x * cpp; if (INTEL_INFO(dev)->gen >= 4) { intel_crtc->dspaddr_offset = intel_compute_tile_offset(dev_priv, &x, &y, - fb->modifier[0], - pixel_size, + fb->modifier[0], cpp, fb->pitches[0]); linear_offset -= intel_crtc->dspaddr_offset; } else { @@ -2794,7 +2798,7 @@ static void i9xx_update_primary_plane(struct drm_plane *primary, data and adding to linear_offset*/ linear_offset += (crtc_state->pipe_src_h - 1) * fb->pitches[0] + - (crtc_state->pipe_src_w - 1) * pixel_size; + (crtc_state->pipe_src_w - 1) * cpp; } intel_crtc->adjusted_x = x; @@ -2839,10 +2843,10 @@ static void ironlake_update_primary_plane(struct drm_plane *primary, struct drm_framebuffer *fb = plane_state->base.fb; struct drm_i915_gem_object *obj = intel_fb_obj(fb); int plane = intel_crtc->plane; - unsigned long linear_offset; + u32 linear_offset; u32 dspcntr; i915_reg_t reg = DSPCNTR(plane); - int pixel_size = drm_format_plane_cpp(fb->pixel_format, 0); + int cpp = drm_format_plane_cpp(fb->pixel_format, 0); int x = plane_state->src.x1 >> 16; int y = plane_state->src.y1 >> 16; @@ -2881,11 +2885,10 @@ static void ironlake_update_primary_plane(struct drm_plane *primary, if (!IS_HASWELL(dev) && !IS_BROADWELL(dev)) dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE; - linear_offset = y * fb->pitches[0] + x * pixel_size; + linear_offset = y * fb->pitches[0] + x * cpp; intel_crtc->dspaddr_offset = intel_compute_tile_offset(dev_priv, &x, &y, - fb->modifier[0], - pixel_size, + fb->modifier[0], cpp, fb->pitches[0]); linear_offset -= intel_crtc->dspaddr_offset; if (plane_state->base.rotation == BIT(DRM_ROTATE_180)) { @@ -2899,7 +2902,7 @@ static void ironlake_update_primary_plane(struct drm_plane *primary, data and adding to linear_offset*/ linear_offset += (crtc_state->pipe_src_h - 1) * fb->pitches[0] + - (crtc_state->pipe_src_w - 1) * pixel_size; + (crtc_state->pipe_src_w - 1) * cpp; } } @@ -2951,7 +2954,7 @@ u32 intel_plane_obj_offset(struct intel_plane *intel_plane, offset = vma->node.start; if (plane == 1) { - offset += vma->ggtt_view.params.rotation_info.uv_start_page * + offset += vma->ggtt_view.params.rotated.uv_start_page * PAGE_SIZE; } @@ -3160,9 +3163,6 @@ static void skylake_disable_primary_plane(struct drm_plane *primary, struct drm_i915_private *dev_priv = dev->dev_private; int pipe = to_intel_crtc(crtc)->pipe; - if (dev_priv->fbc.deactivate) - dev_priv->fbc.deactivate(dev_priv); - I915_WRITE(PLANE_CTL(pipe, 0), 0); I915_WRITE(PLANE_SURF(pipe, 0), 0); POSTING_READ(PLANE_SURF(pipe, 0)); @@ -4792,9 +4792,6 @@ static void intel_post_plane_update(struct intel_crtc *crtc) to_intel_crtc_state(crtc->base.state); struct drm_device *dev = crtc->base.dev; - if (atomic->wait_vblank) - intel_wait_for_vblank(dev, crtc->pipe); - intel_frontbuffer_flip(dev, atomic->fb_bits); crtc->wm.cxsr_allowed = true; @@ -4803,7 +4800,7 @@ static void intel_post_plane_update(struct intel_crtc *crtc) intel_update_watermarks(&crtc->base); if (atomic->update_fbc) - intel_fbc_update(crtc); + intel_fbc_post_update(crtc); if (atomic->post_enable_primary) intel_post_enable_primary(&crtc->base); @@ -4811,26 +4808,39 @@ static void intel_post_plane_update(struct intel_crtc *crtc) memset(atomic, 0, sizeof(*atomic)); } -static void intel_pre_plane_update(struct intel_crtc *crtc) +static void intel_pre_plane_update(struct intel_crtc_state *old_crtc_state) { + struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc); struct drm_device *dev = crtc->base.dev; struct drm_i915_private *dev_priv = dev->dev_private; struct intel_crtc_atomic_commit *atomic = &crtc->atomic; struct intel_crtc_state *pipe_config = to_intel_crtc_state(crtc->base.state); + struct drm_atomic_state *old_state = old_crtc_state->base.state; + struct drm_plane *primary = crtc->base.primary; + struct drm_plane_state *old_pri_state = + drm_atomic_get_existing_plane_state(old_state, primary); + bool modeset = needs_modeset(&pipe_config->base); - if (atomic->disable_fbc) - intel_fbc_deactivate(crtc); + if (atomic->update_fbc) + intel_fbc_pre_update(crtc); - if (crtc->atomic.disable_ips) - hsw_disable_ips(crtc); + if (old_pri_state) { + struct intel_plane_state *primary_state = + to_intel_plane_state(primary->state); + struct intel_plane_state *old_primary_state = + to_intel_plane_state(old_pri_state); - if (atomic->pre_disable_primary) - intel_pre_disable_primary(&crtc->base); + if (old_primary_state->visible && + (modeset || !primary_state->visible)) + intel_pre_disable_primary(&crtc->base); + } if (pipe_config->disable_cxsr) { crtc->wm.cxsr_allowed = false; - intel_set_memory_cxsr(dev_priv, false); + + if (old_crtc_state->base.active) + intel_set_memory_cxsr(dev_priv, false); } if (!needs_modeset(&pipe_config->base) && pipe_config->wm_changed) @@ -4931,8 +4941,6 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc) if (intel_crtc->config->has_pch_encoder) intel_wait_for_vblank(dev, pipe); intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true); - - intel_fbc_enable(intel_crtc); } /* IPS only exists on ULT machines and is tied to pipe A. */ @@ -5045,8 +5053,6 @@ static void haswell_crtc_enable(struct drm_crtc *crtc) intel_wait_for_vblank(dev, hsw_workaround_pipe); intel_wait_for_vblank(dev, hsw_workaround_pipe); } - - intel_fbc_enable(intel_crtc); } static void ironlake_pfit_disable(struct intel_crtc *crtc, bool force) @@ -5127,8 +5133,6 @@ static void ironlake_crtc_disable(struct drm_crtc *crtc) } intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true); - - intel_fbc_disable_crtc(intel_crtc); } static void haswell_crtc_disable(struct drm_crtc *crtc) @@ -5179,8 +5183,6 @@ static void haswell_crtc_disable(struct drm_crtc *crtc) intel_set_pch_fifo_underrun_reporting(dev_priv, TRANSCODER_A, true); } - - intel_fbc_disable_crtc(intel_crtc); } static void i9xx_pfit_enable(struct intel_crtc *crtc) @@ -5303,31 +5305,37 @@ intel_display_port_aux_power_domain(struct intel_encoder *intel_encoder) } } -static unsigned long get_crtc_power_domains(struct drm_crtc *crtc) +static unsigned long get_crtc_power_domains(struct drm_crtc *crtc, + struct intel_crtc_state *crtc_state) { struct drm_device *dev = crtc->dev; - struct intel_encoder *intel_encoder; + struct drm_encoder *encoder; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); enum pipe pipe = intel_crtc->pipe; unsigned long mask; - enum transcoder transcoder = intel_crtc->config->cpu_transcoder; + enum transcoder transcoder = crtc_state->cpu_transcoder; - if (!crtc->state->active) + if (!crtc_state->base.active) return 0; mask = BIT(POWER_DOMAIN_PIPE(pipe)); mask |= BIT(POWER_DOMAIN_TRANSCODER(transcoder)); - if (intel_crtc->config->pch_pfit.enabled || - intel_crtc->config->pch_pfit.force_thru) + if (crtc_state->pch_pfit.enabled || + crtc_state->pch_pfit.force_thru) mask |= BIT(POWER_DOMAIN_PIPE_PANEL_FITTER(pipe)); - for_each_encoder_on_crtc(dev, crtc, intel_encoder) + drm_for_each_encoder_mask(encoder, dev, crtc_state->base.encoder_mask) { + struct intel_encoder *intel_encoder = to_intel_encoder(encoder); + mask |= BIT(intel_display_port_power_domain(intel_encoder)); + } return mask; } -static unsigned long modeset_get_crtc_power_domains(struct drm_crtc *crtc) +static unsigned long +modeset_get_crtc_power_domains(struct drm_crtc *crtc, + struct intel_crtc_state *crtc_state) { struct drm_i915_private *dev_priv = crtc->dev->dev_private; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); @@ -5335,7 +5343,8 @@ static unsigned long modeset_get_crtc_power_domains(struct drm_crtc *crtc) unsigned long domains, new_domains, old_domains; old_domains = intel_crtc->enabled_power_domains; - intel_crtc->enabled_power_domains = new_domains = get_crtc_power_domains(crtc); + intel_crtc->enabled_power_domains = new_domains = + get_crtc_power_domains(crtc, crtc_state); domains = new_domains & ~old_domains; @@ -5354,31 +5363,6 @@ static void modeset_put_power_domains(struct drm_i915_private *dev_priv, intel_display_power_put(dev_priv, domain); } -static void modeset_update_crtc_power_domains(struct drm_atomic_state *state) -{ - struct intel_atomic_state *intel_state = to_intel_atomic_state(state); - struct drm_device *dev = state->dev; - struct drm_i915_private *dev_priv = dev->dev_private; - unsigned long put_domains[I915_MAX_PIPES] = {}; - struct drm_crtc_state *crtc_state; - struct drm_crtc *crtc; - int i; - - for_each_crtc_in_state(state, crtc, crtc_state, i) { - if (needs_modeset(crtc->state)) - put_domains[to_intel_crtc(crtc)->pipe] = - modeset_get_crtc_power_domains(crtc); - } - - if (dev_priv->display.modeset_commit_cdclk && - intel_state->dev_cdclk != dev_priv->cdclk_freq) - dev_priv->display.modeset_commit_cdclk(state); - - for (i = 0; i < I915_MAX_PIPES; i++) - if (put_domains[i]) - modeset_put_power_domains(dev_priv, put_domains[i]); -} - static int intel_compute_max_dotclk(struct drm_i915_private *dev_priv) { int max_cdclk_freq = dev_priv->max_cdclk_freq; @@ -6041,8 +6025,7 @@ static int broxton_calc_cdclk(struct drm_i915_private *dev_priv, return 144000; } -/* Compute the max pixel clock for new configuration. Uses atomic state if - * that's non-NULL, look at current state otherwise. */ +/* Compute the max pixel clock for new configuration. */ static int intel_mode_max_pixclk(struct drm_device *dev, struct drm_atomic_state *state) { @@ -6065,9 +6048,6 @@ static int intel_mode_max_pixclk(struct drm_device *dev, intel_state->min_pixclk[i] = pixclk; } - if (!intel_state->active_crtcs) - return 0; - for_each_pipe(dev_priv, pipe) max_pixclk = max(intel_state->min_pixclk[pipe], max_pixclk); @@ -6291,8 +6271,6 @@ static void i9xx_crtc_enable(struct drm_crtc *crtc) for_each_encoder_on_crtc(dev, crtc, encoder) encoder->enable(encoder); - - intel_fbc_enable(intel_crtc); } static void i9xx_pfit_disable(struct intel_crtc *crtc) @@ -6355,8 +6333,6 @@ static void i9xx_crtc_disable(struct drm_crtc *crtc) if (!IS_GEN2(dev)) intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false); - - intel_fbc_disable_crtc(intel_crtc); } static void intel_crtc_disable_noatomic(struct drm_crtc *crtc) @@ -6380,6 +6356,7 @@ static void intel_crtc_disable_noatomic(struct drm_crtc *crtc) dev_priv->display.crtc_disable(crtc); intel_crtc->active = false; + intel_fbc_disable(intel_crtc); intel_update_watermarks(crtc); intel_disable_shared_dpll(intel_crtc); @@ -6398,55 +6375,16 @@ static void intel_crtc_disable_noatomic(struct drm_crtc *crtc) */ int intel_display_suspend(struct drm_device *dev) { - struct drm_mode_config *config = &dev->mode_config; - struct drm_modeset_acquire_ctx *ctx = config->acquire_ctx; + struct drm_i915_private *dev_priv = to_i915(dev); struct drm_atomic_state *state; - struct drm_crtc *crtc; - unsigned crtc_mask = 0; - int ret = 0; - - if (WARN_ON(!ctx)) - return 0; - - lockdep_assert_held(&ctx->ww_ctx); - state = drm_atomic_state_alloc(dev); - if (WARN_ON(!state)) - return -ENOMEM; - - state->acquire_ctx = ctx; - state->allow_modeset = true; - - for_each_crtc(dev, crtc) { - struct drm_crtc_state *crtc_state = - drm_atomic_get_crtc_state(state, crtc); - - ret = PTR_ERR_OR_ZERO(crtc_state); - if (ret) - goto free; - - if (!crtc_state->active) - continue; - - crtc_state->active = false; - crtc_mask |= 1 << drm_crtc_index(crtc); - } - - if (crtc_mask) { - ret = drm_atomic_commit(state); - - if (!ret) { - for_each_crtc(dev, crtc) - if (crtc_mask & (1 << drm_crtc_index(crtc))) - crtc->state->active = true; - - return ret; - } - } + int ret; -free: + state = drm_atomic_helper_suspend(dev); + ret = PTR_ERR_OR_ZERO(state); if (ret) DRM_ERROR("Suspending crtc's failed with %i\n", ret); - drm_atomic_state_free(state); + else + dev_priv->modeset_restore_state = state; return ret; } @@ -8186,18 +8124,22 @@ static bool i9xx_get_pipe_config(struct intel_crtc *crtc, { struct drm_device *dev = crtc->base.dev; struct drm_i915_private *dev_priv = dev->dev_private; + enum intel_display_power_domain power_domain; uint32_t tmp; + bool ret; - if (!intel_display_power_is_enabled(dev_priv, - POWER_DOMAIN_PIPE(crtc->pipe))) + power_domain = POWER_DOMAIN_PIPE(crtc->pipe); + if (!intel_display_power_get_if_enabled(dev_priv, power_domain)) return false; pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe; pipe_config->shared_dpll = DPLL_ID_PRIVATE; + ret = false; + tmp = I915_READ(PIPECONF(crtc->pipe)); if (!(tmp & PIPECONF_ENABLE)) - return false; + goto out; if (IS_G4X(dev) || IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) { switch (tmp & PIPECONF_BPC_MASK) { @@ -8277,7 +8219,12 @@ static bool i9xx_get_pipe_config(struct intel_crtc *crtc, pipe_config->base.adjusted_mode.crtc_clock = pipe_config->port_clock / pipe_config->pixel_multiplier; - return true; + ret = true; + +out: + intel_display_power_put(dev_priv, power_domain); + + return ret; } static void ironlake_init_pch_refclk(struct drm_device *dev) @@ -9381,18 +9328,21 @@ static bool ironlake_get_pipe_config(struct intel_crtc *crtc, { struct drm_device *dev = crtc->base.dev; struct drm_i915_private *dev_priv = dev->dev_private; + enum intel_display_power_domain power_domain; uint32_t tmp; + bool ret; - if (!intel_display_power_is_enabled(dev_priv, - POWER_DOMAIN_PIPE(crtc->pipe))) + power_domain = POWER_DOMAIN_PIPE(crtc->pipe); + if (!intel_display_power_get_if_enabled(dev_priv, power_domain)) return false; pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe; pipe_config->shared_dpll = DPLL_ID_PRIVATE; + ret = false; tmp = I915_READ(PIPECONF(crtc->pipe)); if (!(tmp & PIPECONF_ENABLE)) - return false; + goto out; switch (tmp & PIPECONF_BPC_MASK) { case PIPECONF_6BPC: @@ -9455,7 +9405,12 @@ static bool ironlake_get_pipe_config(struct intel_crtc *crtc, ironlake_get_pfit_config(crtc, pipe_config); - return true; + ret = true; + +out: + intel_display_power_put(dev_priv, power_domain); + + return ret; } static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv) @@ -9721,9 +9676,6 @@ static int ilk_max_pixel_rate(struct drm_atomic_state *state) intel_state->min_pixclk[i] = pixel_rate; } - if (!intel_state->active_crtcs) - return 0; - for_each_pipe(dev_priv, pipe) max_pixel_rate = max(intel_state->min_pixclk[pipe], max_pixel_rate); @@ -9853,8 +9805,13 @@ static void broadwell_modeset_commit_cdclk(struct drm_atomic_state *old_state) static int haswell_crtc_compute_clock(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state) { - if (!intel_ddi_pll_select(crtc, crtc_state)) - return -EINVAL; + struct intel_encoder *intel_encoder = + intel_ddi_get_crtc_new_encoder(crtc_state); + + if (intel_encoder->type != INTEL_OUTPUT_DSI) { + if (!intel_ddi_pll_select(crtc, crtc_state)) + return -EINVAL; + } crtc->lowfreq_avail = false; @@ -9982,12 +9939,17 @@ static bool haswell_get_pipe_config(struct intel_crtc *crtc, { struct drm_device *dev = crtc->base.dev; struct drm_i915_private *dev_priv = dev->dev_private; - enum intel_display_power_domain pfit_domain; + enum intel_display_power_domain power_domain; + unsigned long power_domain_mask; uint32_t tmp; + bool ret; - if (!intel_display_power_is_enabled(dev_priv, - POWER_DOMAIN_PIPE(crtc->pipe))) + power_domain = POWER_DOMAIN_PIPE(crtc->pipe); + if (!intel_display_power_get_if_enabled(dev_priv, power_domain)) return false; + power_domain_mask = BIT(power_domain); + + ret = false; pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe; pipe_config->shared_dpll = DPLL_ID_PRIVATE; @@ -10014,13 +9976,14 @@ static bool haswell_get_pipe_config(struct intel_crtc *crtc, pipe_config->cpu_transcoder = TRANSCODER_EDP; } - if (!intel_display_power_is_enabled(dev_priv, - POWER_DOMAIN_TRANSCODER(pipe_config->cpu_transcoder))) - return false; + power_domain = POWER_DOMAIN_TRANSCODER(pipe_config->cpu_transcoder); + if (!intel_display_power_get_if_enabled(dev_priv, power_domain)) + goto out; + power_domain_mask |= BIT(power_domain); tmp = I915_READ(PIPECONF(pipe_config->cpu_transcoder)); if (!(tmp & PIPECONF_ENABLE)) - return false; + goto out; haswell_get_ddi_port_state(crtc, pipe_config); @@ -10030,14 +9993,14 @@ static bool haswell_get_pipe_config(struct intel_crtc *crtc, skl_init_scalers(dev, crtc, pipe_config); } - pfit_domain = POWER_DOMAIN_PIPE_PANEL_FITTER(crtc->pipe); - if (INTEL_INFO(dev)->gen >= 9) { pipe_config->scaler_state.scaler_id = -1; pipe_config->scaler_state.scaler_users &= ~(1 << SKL_CRTC_INDEX); } - if (intel_display_power_is_enabled(dev_priv, pfit_domain)) { + power_domain = POWER_DOMAIN_PIPE_PANEL_FITTER(crtc->pipe); + if (intel_display_power_get_if_enabled(dev_priv, power_domain)) { + power_domain_mask |= BIT(power_domain); if (INTEL_INFO(dev)->gen >= 9) skylake_get_pfit_config(crtc, pipe_config); else @@ -10055,7 +10018,13 @@ static bool haswell_get_pipe_config(struct intel_crtc *crtc, pipe_config->pixel_multiplier = 1; } - return true; + ret = true; + +out: + for_each_power_domain(power_domain, power_domain_mask) + intel_display_power_put(dev_priv, power_domain); + + return ret; } static void i845_update_cursor(struct drm_crtc *crtc, u32 base, @@ -10376,6 +10345,7 @@ mode_fits_in_fbdev(struct drm_device *dev, if (obj->base.size < mode->vdisplay * fb->pitches[0]) return NULL; + drm_framebuffer_reference(fb); return fb; #else return NULL; @@ -10431,7 +10401,7 @@ bool intel_get_load_detect_pipe(struct drm_connector *connector, struct drm_device *dev = encoder->dev; struct drm_framebuffer *fb; struct drm_mode_config *config = &dev->mode_config; - struct drm_atomic_state *state = NULL; + struct drm_atomic_state *state = NULL, *restore_state = NULL; struct drm_connector_state *connector_state; struct intel_crtc_state *crtc_state; int ret, i = -1; @@ -10440,6 +10410,8 @@ bool intel_get_load_detect_pipe(struct drm_connector *connector, connector->base.id, connector->name, encoder->base.id, encoder->name); + old->restore_state = NULL; + retry: ret = drm_modeset_lock(&config->connection_mutex, ctx); if (ret) @@ -10456,24 +10428,15 @@ retry: */ /* See if we already have a CRTC for this connector */ - if (encoder->crtc) { - crtc = encoder->crtc; + if (connector->state->crtc) { + crtc = connector->state->crtc; ret = drm_modeset_lock(&crtc->mutex, ctx); if (ret) goto fail; - ret = drm_modeset_lock(&crtc->primary->mutex, ctx); - if (ret) - goto fail; - - old->dpms_mode = connector->dpms; - old->load_detect_temp = false; /* Make sure the crtc and connector are running */ - if (connector->dpms != DRM_MODE_DPMS_ON) - connector->funcs->dpms(connector, DRM_MODE_DPMS_ON); - - return true; + goto found; } /* Find an unused one (if possible) */ @@ -10481,8 +10444,15 @@ retry: i++; if (!(encoder->possible_crtcs & (1 << i))) continue; - if (possible_crtc->state->enable) + + ret = drm_modeset_lock(&possible_crtc->mutex, ctx); + if (ret) + goto fail; + + if (possible_crtc->state->enable) { + drm_modeset_unlock(&possible_crtc->mutex); continue; + } crtc = possible_crtc; break; @@ -10496,23 +10466,22 @@ retry: goto fail; } - ret = drm_modeset_lock(&crtc->mutex, ctx); - if (ret) - goto fail; +found: + intel_crtc = to_intel_crtc(crtc); + ret = drm_modeset_lock(&crtc->primary->mutex, ctx); if (ret) goto fail; - intel_crtc = to_intel_crtc(crtc); - old->dpms_mode = connector->dpms; - old->load_detect_temp = true; - old->release_fb = NULL; - state = drm_atomic_state_alloc(dev); - if (!state) - return false; + restore_state = drm_atomic_state_alloc(dev); + if (!state || !restore_state) { + ret = -ENOMEM; + goto fail; + } state->acquire_ctx = ctx; + restore_state->acquire_ctx = ctx; connector_state = drm_atomic_get_connector_state(state, connector); if (IS_ERR(connector_state)) { @@ -10520,7 +10489,9 @@ retry: goto fail; } - connector_state->crtc = crtc; + ret = drm_atomic_set_crtc_for_connector(connector_state, crtc); + if (ret) + goto fail; crtc_state = intel_atomic_get_crtc_state(state, intel_crtc); if (IS_ERR(crtc_state)) { @@ -10544,7 +10515,6 @@ retry: if (fb == NULL) { DRM_DEBUG_KMS("creating tmp fb for load-detection\n"); fb = intel_framebuffer_create_for_mode(dev, mode, 24, 32); - old->release_fb = fb; } else DRM_DEBUG_KMS("reusing fbdev for load-detection framebuffer\n"); if (IS_ERR(fb)) { @@ -10556,15 +10526,28 @@ retry: if (ret) goto fail; - drm_mode_copy(&crtc_state->base.mode, mode); + drm_framebuffer_unreference(fb); + + ret = drm_atomic_set_mode_for_crtc(&crtc_state->base, mode); + if (ret) + goto fail; + + ret = PTR_ERR_OR_ZERO(drm_atomic_get_connector_state(restore_state, connector)); + if (!ret) + ret = PTR_ERR_OR_ZERO(drm_atomic_get_crtc_state(restore_state, crtc)); + if (!ret) + ret = PTR_ERR_OR_ZERO(drm_atomic_get_plane_state(restore_state, crtc->primary)); + if (ret) { + DRM_DEBUG_KMS("Failed to create a copy of old state to restore: %i\n", ret); + goto fail; + } if (drm_atomic_commit(state)) { DRM_DEBUG_KMS("failed to set mode on load-detect pipe\n"); - if (old->release_fb) - old->release_fb->funcs->destroy(old->release_fb); goto fail; } - crtc->primary->crtc = crtc; + + old->restore_state = restore_state; /* let the connector get through one full cycle before testing */ intel_wait_for_vblank(dev, intel_crtc->pipe); @@ -10572,7 +10555,8 @@ retry: fail: drm_atomic_state_free(state); - state = NULL; + drm_atomic_state_free(restore_state); + restore_state = state = NULL; if (ret == -EDEADLK) { drm_modeset_backoff(ctx); @@ -10586,65 +10570,24 @@ void intel_release_load_detect_pipe(struct drm_connector *connector, struct intel_load_detect_pipe *old, struct drm_modeset_acquire_ctx *ctx) { - struct drm_device *dev = connector->dev; struct intel_encoder *intel_encoder = intel_attached_encoder(connector); struct drm_encoder *encoder = &intel_encoder->base; - struct drm_crtc *crtc = encoder->crtc; - struct intel_crtc *intel_crtc = to_intel_crtc(crtc); - struct drm_atomic_state *state; - struct drm_connector_state *connector_state; - struct intel_crtc_state *crtc_state; + struct drm_atomic_state *state = old->restore_state; int ret; DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n", connector->base.id, connector->name, encoder->base.id, encoder->name); - if (old->load_detect_temp) { - state = drm_atomic_state_alloc(dev); - if (!state) - goto fail; - - state->acquire_ctx = ctx; - - connector_state = drm_atomic_get_connector_state(state, connector); - if (IS_ERR(connector_state)) - goto fail; - - crtc_state = intel_atomic_get_crtc_state(state, intel_crtc); - if (IS_ERR(crtc_state)) - goto fail; - - connector_state->crtc = NULL; - - crtc_state->base.enable = crtc_state->base.active = false; - - ret = intel_modeset_setup_plane_state(state, crtc, NULL, NULL, - 0, 0); - if (ret) - goto fail; - - ret = drm_atomic_commit(state); - if (ret) - goto fail; - - if (old->release_fb) { - drm_framebuffer_unregister_private(old->release_fb); - drm_framebuffer_unreference(old->release_fb); - } - + if (!state) return; - } - - /* Switch crtc and encoder back off if necessary */ - if (old->dpms_mode != DRM_MODE_DPMS_ON) - connector->funcs->dpms(connector, old->dpms_mode); - return; -fail: - DRM_DEBUG_KMS("Couldn't release load detect pipe.\n"); - drm_atomic_state_free(state); + ret = drm_atomic_commit(state); + if (ret) { + DRM_DEBUG_KMS("Couldn't release load detect pipe: %i\n", ret); + drm_atomic_state_free(state); + } } static int i9xx_pll_refclk(struct drm_device *dev, @@ -10914,6 +10857,7 @@ static void intel_unpin_work_fn(struct work_struct *__work) mutex_unlock(&dev->struct_mutex); intel_frontbuffer_flip_complete(dev, to_intel_plane(primary)->frontbuffer_bit); + intel_fbc_post_update(crtc); drm_framebuffer_unreference(work->old_fb); BUG_ON(atomic_read(&crtc->unpin_work_count) == 0); @@ -10995,6 +10939,12 @@ static bool page_flip_finished(struct intel_crtc *crtc) return true; /* + * BDW signals flip done immediately if the plane + * is disabled, even if the plane enable is already + * armed to occur at the next vblank :( + */ + + /* * A DSPSURFLIVE check isn't enough in case the mmio and CS flips * used the same base address. In that case the mmio flip might * have completed, but the CS hasn't even executed the flip yet. @@ -11629,6 +11579,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, crtc->primary->fb = fb; update_state_fb(crtc->primary); + intel_fbc_pre_update(intel_crtc); work->pending_flip_obj = obj; @@ -11713,7 +11664,6 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, to_intel_plane(primary)->frontbuffer_bit); mutex_unlock(&dev->struct_mutex); - intel_fbc_deactivate(intel_crtc); intel_frontbuffer_flip_prepare(dev, to_intel_plane(primary)->frontbuffer_bit); @@ -11724,7 +11674,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, cleanup_unpin: intel_unpin_fb_obj(fb, crtc->primary->state); cleanup_pending: - if (request) + if (!IS_ERR_OR_NULL(request)) i915_gem_request_cancel(request); atomic_dec(&intel_crtc->unpin_work_count); mutex_unlock(&dev->struct_mutex); @@ -11835,11 +11785,9 @@ int intel_plane_atomic_calc_changes(struct drm_crtc_state *crtc_state, struct intel_crtc *intel_crtc = to_intel_crtc(crtc); struct drm_plane *plane = plane_state->plane; struct drm_device *dev = crtc->dev; - struct drm_i915_private *dev_priv = dev->dev_private; struct intel_plane_state *old_plane_state = to_intel_plane_state(plane->state); int idx = intel_crtc->base.base.id, ret; - int i = drm_plane_index(plane); bool mode_changed = needs_modeset(crtc_state); bool was_crtc_enabled = crtc->state->active; bool is_crtc_enabled = crtc_state->active; @@ -11872,6 +11820,9 @@ int intel_plane_atomic_calc_changes(struct drm_crtc_state *crtc_state, if (!was_visible && !visible) return 0; + if (fb != old_plane_state->base.fb) + pipe_config->fb_changed = true; + turn_off = was_visible && (!visible || mode_changed); turn_on = visible && (!was_visible || mode_changed); @@ -11886,11 +11837,8 @@ int intel_plane_atomic_calc_changes(struct drm_crtc_state *crtc_state, pipe_config->wm_changed = true; /* must disable cxsr around plane enable/disable */ - if (plane->type != DRM_PLANE_TYPE_CURSOR) { - if (is_crtc_enabled) - intel_crtc->atomic.wait_vblank = true; + if (plane->type != DRM_PLANE_TYPE_CURSOR) pipe_config->disable_cxsr = true; - } } else if (intel_wm_need_update(plane, plane_state)) { pipe_config->wm_changed = true; } @@ -11901,49 +11849,9 @@ int intel_plane_atomic_calc_changes(struct drm_crtc_state *crtc_state, switch (plane->type) { case DRM_PLANE_TYPE_PRIMARY: - intel_crtc->atomic.pre_disable_primary = turn_off; intel_crtc->atomic.post_enable_primary = turn_on; + intel_crtc->atomic.update_fbc = true; - if (turn_off) { - /* - * FIXME: Actually if we will still have any other - * plane enabled on the pipe we could let IPS enabled - * still, but for now lets consider that when we make - * primary invisible by setting DSPCNTR to 0 on - * update_primary_plane function IPS needs to be - * disable. - */ - intel_crtc->atomic.disable_ips = true; - - intel_crtc->atomic.disable_fbc = true; - } - - /* - * FBC does not work on some platforms for rotated - * planes, so disable it when rotation is not 0 and - * update it when rotation is set back to 0. - * - * FIXME: This is redundant with the fbc update done in - * the primary plane enable function except that that - * one is done too late. We eventually need to unify - * this. - */ - - if (visible && - INTEL_INFO(dev)->gen <= 4 && !IS_G4X(dev) && - dev_priv->fbc.crtc == intel_crtc && - plane_state->rotation != BIT(DRM_ROTATE_0)) - intel_crtc->atomic.disable_fbc = true; - - /* - * BDW signals flip done immediately if the plane - * is disabled, even if the plane enable is already - * armed to occur at the next vblank :( - */ - if (turn_on && IS_BROADWELL(dev)) - intel_crtc->atomic.wait_vblank = true; - - intel_crtc->atomic.update_fbc |= visible || mode_changed; break; case DRM_PLANE_TYPE_CURSOR: break; @@ -11956,13 +11864,8 @@ int intel_plane_atomic_calc_changes(struct drm_crtc_state *crtc_state, */ if (IS_IVYBRIDGE(dev) && needs_scaling(to_intel_plane_state(plane_state)) && - !needs_scaling(old_plane_state)) { - to_intel_crtc_state(crtc_state)->disable_lp_wm = true; - } else if (turn_off && !mode_changed) { - intel_crtc->atomic.wait_vblank = true; - intel_crtc->atomic.update_sprite_watermarks |= - 1 << i; - } + !needs_scaling(old_plane_state)) + pipe_config->disable_lp_wm = true; break; } @@ -13130,8 +13033,6 @@ static void intel_modeset_clear_plls(struct drm_atomic_state *state) struct drm_device *dev = state->dev; struct drm_i915_private *dev_priv = to_i915(dev); struct intel_shared_dpll_config *shared_dpll = NULL; - struct intel_crtc *intel_crtc; - struct intel_crtc_state *intel_crtc_state; struct drm_crtc *crtc; struct drm_crtc_state *crtc_state; int i; @@ -13140,21 +13041,21 @@ static void intel_modeset_clear_plls(struct drm_atomic_state *state) return; for_each_crtc_in_state(state, crtc, crtc_state, i) { - int dpll; - - intel_crtc = to_intel_crtc(crtc); - intel_crtc_state = to_intel_crtc_state(crtc_state); - dpll = intel_crtc_state->shared_dpll; + struct intel_crtc *intel_crtc = to_intel_crtc(crtc); + int old_dpll = to_intel_crtc_state(crtc->state)->shared_dpll; - if (!needs_modeset(crtc_state) || dpll == DPLL_ID_PRIVATE) + if (!needs_modeset(crtc_state)) continue; - intel_crtc_state->shared_dpll = DPLL_ID_PRIVATE; + to_intel_crtc_state(crtc_state)->shared_dpll = DPLL_ID_PRIVATE; + + if (old_dpll == DPLL_ID_PRIVATE) + continue; if (!shared_dpll) shared_dpll = intel_atomic_get_shared_dpll_state(state); - shared_dpll[dpll].crtc_mask &= ~(1 << intel_crtc->pipe); + shared_dpll[old_dpll].crtc_mask &= ~(1 << intel_crtc->pipe); } } @@ -13290,6 +13191,9 @@ static int intel_modeset_checks(struct drm_atomic_state *state) if (ret < 0) return ret; + + DRM_DEBUG_KMS("New cdclk calculated to be atomic %u, actual %u\n", + intel_state->cdclk, intel_state->dev_cdclk); } else to_intel_atomic_state(state)->cdclk = dev_priv->atomic_cdclk_freq; @@ -13348,6 +13252,7 @@ static void calc_watermark_data(struct drm_atomic_state *state) static int intel_atomic_check(struct drm_device *dev, struct drm_atomic_state *state) { + struct drm_i915_private *dev_priv = to_i915(dev); struct intel_atomic_state *intel_state = to_intel_atomic_state(state); struct drm_crtc *crtc; struct drm_crtc_state *crtc_state; @@ -13390,7 +13295,7 @@ static int intel_atomic_check(struct drm_device *dev, return ret; if (i915.fastboot && - intel_pipe_config_compare(state->dev, + intel_pipe_config_compare(dev, to_intel_crtc_state(crtc->state), pipe_config, true)) { crtc_state->mode_changed = false; @@ -13416,12 +13321,13 @@ static int intel_atomic_check(struct drm_device *dev, if (ret) return ret; } else - intel_state->cdclk = to_i915(state->dev)->cdclk_freq; + intel_state->cdclk = dev_priv->cdclk_freq; - ret = drm_atomic_helper_check_planes(state->dev, state); + ret = drm_atomic_helper_check_planes(dev, state); if (ret) return ret; + intel_fbc_choose_crtc(dev_priv, state); calc_watermark_data(state); return 0; @@ -13493,6 +13399,71 @@ static int intel_atomic_prepare_commit(struct drm_device *dev, return ret; } +static void intel_atomic_wait_for_vblanks(struct drm_device *dev, + struct drm_i915_private *dev_priv, + unsigned crtc_mask) +{ + unsigned last_vblank_count[I915_MAX_PIPES]; + enum pipe pipe; + int ret; + + if (!crtc_mask) + return; + + for_each_pipe(dev_priv, pipe) { + struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; + + if (!((1 << pipe) & crtc_mask)) + continue; + + ret = drm_crtc_vblank_get(crtc); + if (WARN_ON(ret != 0)) { + crtc_mask &= ~(1 << pipe); + continue; + } + + last_vblank_count[pipe] = drm_crtc_vblank_count(crtc); + } + + for_each_pipe(dev_priv, pipe) { + struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; + long lret; + + if (!((1 << pipe) & crtc_mask)) + continue; + + lret = wait_event_timeout(dev->vblank[pipe].queue, + last_vblank_count[pipe] != + drm_crtc_vblank_count(crtc), + msecs_to_jiffies(50)); + + WARN_ON(!lret); + + drm_crtc_vblank_put(crtc); + } +} + +static bool needs_vblank_wait(struct intel_crtc_state *crtc_state) +{ + /* fb updated, need to unpin old fb */ + if (crtc_state->fb_changed) + return true; + + /* wm changes, need vblank before final wm's */ + if (crtc_state->wm_changed) + return true; + + /* + * cxsr is re-enabled after vblank. + * This is already handled by crtc_state->wm_changed, + * but added for clarity. + */ + if (crtc_state->disable_cxsr) + return true; + + return false; +} + /** * intel_atomic_commit - commit validated state object * @dev: DRM device @@ -13519,6 +13490,8 @@ static int intel_atomic_commit(struct drm_device *dev, struct drm_crtc *crtc; int ret = 0, i; bool hw_check = intel_state->modeset; + unsigned long put_domains[I915_MAX_PIPES] = {}; + unsigned crtc_vblank_mask = 0; ret = intel_atomic_prepare_commit(dev, state, async); if (ret) { @@ -13534,20 +13507,32 @@ static int intel_atomic_commit(struct drm_device *dev, sizeof(intel_state->min_pixclk)); dev_priv->active_crtcs = intel_state->active_crtcs; dev_priv->atomic_cdclk_freq = intel_state->cdclk; + + intel_display_power_get(dev_priv, POWER_DOMAIN_MODESET); } for_each_crtc_in_state(state, crtc, crtc_state, i) { struct intel_crtc *intel_crtc = to_intel_crtc(crtc); + if (needs_modeset(crtc->state) || + to_intel_crtc_state(crtc->state)->update_pipe) { + hw_check = true; + + put_domains[to_intel_crtc(crtc)->pipe] = + modeset_get_crtc_power_domains(crtc, + to_intel_crtc_state(crtc->state)); + } + if (!needs_modeset(crtc->state)) continue; - intel_pre_plane_update(intel_crtc); + intel_pre_plane_update(to_intel_crtc_state(crtc_state)); if (crtc_state->active) { intel_crtc_disable_planes(crtc, crtc_state->plane_mask); dev_priv->display.crtc_disable(crtc); intel_crtc->active = false; + intel_fbc_disable(intel_crtc); intel_disable_shared_dpll(intel_crtc); /* @@ -13570,51 +13555,53 @@ static int intel_atomic_commit(struct drm_device *dev, intel_shared_dpll_commit(state); drm_atomic_helper_update_legacy_modeset_state(state->dev, state); - modeset_update_crtc_power_domains(state); + + if (dev_priv->display.modeset_commit_cdclk && + intel_state->dev_cdclk != dev_priv->cdclk_freq) + dev_priv->display.modeset_commit_cdclk(state); } /* Now enable the clocks, plane, pipe, and connectors that we set up. */ for_each_crtc_in_state(state, crtc, crtc_state, i) { struct intel_crtc *intel_crtc = to_intel_crtc(crtc); bool modeset = needs_modeset(crtc->state); - bool update_pipe = !modeset && - to_intel_crtc_state(crtc->state)->update_pipe; - unsigned long put_domains = 0; - - if (modeset) - intel_display_power_get(dev_priv, POWER_DOMAIN_MODESET); + struct intel_crtc_state *pipe_config = + to_intel_crtc_state(crtc->state); + bool update_pipe = !modeset && pipe_config->update_pipe; if (modeset && crtc->state->active) { update_scanline_offset(to_intel_crtc(crtc)); dev_priv->display.crtc_enable(crtc); } - if (update_pipe) { - put_domains = modeset_get_crtc_power_domains(crtc); - - /* make sure intel_modeset_check_state runs */ - hw_check = true; - } - if (!modeset) - intel_pre_plane_update(intel_crtc); + intel_pre_plane_update(to_intel_crtc_state(crtc_state)); + + if (crtc->state->active && intel_crtc->atomic.update_fbc) + intel_fbc_enable(intel_crtc); if (crtc->state->active && (crtc->state->planes_changed || update_pipe)) drm_atomic_helper_commit_planes_on_crtc(crtc_state); - if (put_domains) - modeset_put_power_domains(dev_priv, put_domains); - - intel_post_plane_update(intel_crtc); - - if (modeset) - intel_display_power_put(dev_priv, POWER_DOMAIN_MODESET); + if (pipe_config->base.active && needs_vblank_wait(pipe_config)) + crtc_vblank_mask |= 1 << i; } /* FIXME: add subpixel order */ - drm_atomic_helper_wait_for_vblanks(dev, state); + if (!state->legacy_cursor_update) + intel_atomic_wait_for_vblanks(dev, dev_priv, crtc_vblank_mask); + + for_each_crtc_in_state(state, crtc, crtc_state, i) { + intel_post_plane_update(to_intel_crtc(crtc)); + + if (put_domains[i]) + modeset_put_power_domains(dev_priv, put_domains[i]); + } + + if (intel_state->modeset) + intel_display_power_put(dev_priv, POWER_DOMAIN_MODESET); mutex_lock(&dev->struct_mutex); drm_atomic_helper_cleanup_planes(dev, state); @@ -13696,7 +13683,7 @@ static bool ibx_pch_dpll_get_hw_state(struct drm_i915_private *dev_priv, { uint32_t val; - if (!intel_display_power_is_enabled(dev_priv, POWER_DOMAIN_PLLS)) + if (!intel_display_power_get_if_enabled(dev_priv, POWER_DOMAIN_PLLS)) return false; val = I915_READ(PCH_DPLL(pll->id)); @@ -13704,6 +13691,8 @@ static bool ibx_pch_dpll_get_hw_state(struct drm_i915_private *dev_priv, hw_state->fp0 = I915_READ(PCH_FP0(pll->id)); hw_state->fp1 = I915_READ(PCH_FP1(pll->id)); + intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS); + return val & DPLL_VCO_ENABLE; } @@ -14682,10 +14671,12 @@ u32 intel_fb_pitch_limit(struct drm_device *dev, uint64_t fb_modifier, u32 gen = INTEL_INFO(dev)->gen; if (gen >= 9) { + int cpp = drm_format_plane_cpp(pixel_format, 0); + /* "The stride in bytes must not exceed the of the size of 8K * pixels and 32K bytes." */ - return min(8192*drm_format_plane_cpp(pixel_format, 0), 32768); + return min(8192 * cpp, 32768); } else if (gen >= 5 && !IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev)) { return 32*1024; } else if (gen >= 4) { @@ -15517,6 +15508,17 @@ static bool intel_crtc_has_encoders(struct intel_crtc *crtc) return false; } +static bool intel_encoder_has_connectors(struct intel_encoder *encoder) +{ + struct drm_device *dev = encoder->base.dev; + struct intel_connector *connector; + + for_each_connector_on_encoder(dev, &encoder->base, connector) + return true; + + return false; +} + static void intel_sanitize_crtc(struct intel_crtc *crtc) { struct drm_device *dev = crtc->base.dev; @@ -15627,7 +15629,6 @@ static void intel_sanitize_encoder(struct intel_encoder *encoder) { struct intel_connector *connector; struct drm_device *dev = encoder->base.dev; - bool active = false; /* We need to check both for a crtc link (meaning that the * encoder is active and trying to read from a pipe) and the @@ -15635,15 +15636,7 @@ static void intel_sanitize_encoder(struct intel_encoder *encoder) bool has_active_crtc = encoder->base.crtc && to_intel_crtc(encoder->base.crtc)->active; - for_each_intel_connector(dev, connector) { - if (connector->base.encoder != &encoder->base) - continue; - - active = true; - break; - } - - if (active && !has_active_crtc) { + if (intel_encoder_has_connectors(encoder) && !has_active_crtc) { DRM_DEBUG_KMS("[ENCODER:%d:%s] has active connectors but no active pipe!\n", encoder->base.base.id, encoder->base.name); @@ -15698,10 +15691,12 @@ void i915_redisable_vga(struct drm_device *dev) * level, just check if the power well is enabled instead of trying to * follow the "don't touch the power well if we don't need it" policy * the rest of the driver uses. */ - if (!intel_display_power_is_enabled(dev_priv, POWER_DOMAIN_VGA)) + if (!intel_display_power_get_if_enabled(dev_priv, POWER_DOMAIN_VGA)) return; i915_redisable_vga_power_on(dev); + + intel_display_power_put(dev_priv, POWER_DOMAIN_VGA); } static bool primary_get_hw_state(struct intel_plane *plane) @@ -15929,63 +15924,76 @@ intel_modeset_setup_hw_state(struct drm_device *dev) for_each_intel_crtc(dev, crtc) { unsigned long put_domains; - put_domains = modeset_get_crtc_power_domains(&crtc->base); + put_domains = modeset_get_crtc_power_domains(&crtc->base, crtc->config); if (WARN_ON(put_domains)) modeset_put_power_domains(dev_priv, put_domains); } intel_display_set_init_power(dev_priv, false); + + intel_fbc_init_pipe_state(dev_priv); } void intel_display_resume(struct drm_device *dev) { - struct drm_atomic_state *state = drm_atomic_state_alloc(dev); - struct intel_connector *conn; - struct intel_plane *plane; - struct drm_crtc *crtc; + struct drm_i915_private *dev_priv = to_i915(dev); + struct drm_atomic_state *state = dev_priv->modeset_restore_state; + struct drm_modeset_acquire_ctx ctx; int ret; + bool setup = false; - if (!state) - return; - - state->acquire_ctx = dev->mode_config.acquire_ctx; + dev_priv->modeset_restore_state = NULL; - /* preserve complete old state, including dpll */ - intel_atomic_get_shared_dpll_state(state); + /* + * This is a cludge because with real atomic modeset mode_config.mutex + * won't be taken. Unfortunately some probed state like + * audio_codec_enable is still protected by mode_config.mutex, so lock + * it here for now. + */ + mutex_lock(&dev->mode_config.mutex); + drm_modeset_acquire_init(&ctx, 0); - for_each_crtc(dev, crtc) { - struct drm_crtc_state *crtc_state = - drm_atomic_get_crtc_state(state, crtc); +retry: + ret = drm_modeset_lock_all_ctx(dev, &ctx); - ret = PTR_ERR_OR_ZERO(crtc_state); - if (ret) - goto err; + if (ret == 0 && !setup) { + setup = true; - /* force a restore */ - crtc_state->mode_changed = true; + intel_modeset_setup_hw_state(dev); + i915_redisable_vga(dev); } - for_each_intel_plane(dev, plane) { - ret = PTR_ERR_OR_ZERO(drm_atomic_get_plane_state(state, &plane->base)); - if (ret) - goto err; - } + if (ret == 0 && state) { + struct drm_crtc_state *crtc_state; + struct drm_crtc *crtc; + int i; - for_each_intel_connector(dev, conn) { - ret = PTR_ERR_OR_ZERO(drm_atomic_get_connector_state(state, &conn->base)); - if (ret) - goto err; + state->acquire_ctx = &ctx; + + for_each_crtc_in_state(state, crtc, crtc_state, i) { + /* + * Force recalculation even if we restore + * current state. With fast modeset this may not result + * in a modeset when the state is compatible. + */ + crtc_state->mode_changed = true; + } + + ret = drm_atomic_commit(state); } - intel_modeset_setup_hw_state(dev); + if (ret == -EDEADLK) { + drm_modeset_backoff(&ctx); + goto retry; + } - i915_redisable_vga(dev); - ret = drm_atomic_commit(state); - if (!ret) - return; + drm_modeset_drop_locks(&ctx); + drm_modeset_acquire_fini(&ctx); + mutex_unlock(&dev->mode_config.mutex); -err: - DRM_ERROR("Restoring old state failed with %i\n", ret); - drm_atomic_state_free(state); + if (ret) { + DRM_ERROR("Restoring old state failed with %i\n", ret); + drm_atomic_state_free(state); + } } void intel_modeset_gem_init(struct drm_device *dev) @@ -15994,9 +16002,7 @@ void intel_modeset_gem_init(struct drm_device *dev) struct drm_i915_gem_object *obj; int ret; - mutex_lock(&dev->struct_mutex); intel_init_gt_powersave(dev); - mutex_unlock(&dev->struct_mutex); intel_modeset_init_hw(dev); @@ -16063,7 +16069,7 @@ void intel_modeset_cleanup(struct drm_device *dev) intel_unregister_dsm_handler(); - intel_fbc_disable(dev_priv); + intel_fbc_global_disable(dev_priv); /* flush any delayed tasks or pending work */ flush_scheduled_work(); @@ -16076,9 +16082,7 @@ void intel_modeset_cleanup(struct drm_device *dev) intel_cleanup_overlay(dev); - mutex_lock(&dev->struct_mutex); intel_cleanup_gt_powersave(dev); - mutex_unlock(&dev->struct_mutex); intel_teardown_gmbus(dev); } diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index da704c6ee5cb..f069a82deb57 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c @@ -203,6 +203,7 @@ intel_dp_mode_valid(struct drm_connector *connector, struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode; int target_clock = mode->clock; int max_rate, mode_rate, max_lanes, max_link_clock; + int max_dotclk = to_i915(connector->dev)->max_dotclk_freq; if (is_edp(intel_dp) && fixed_mode) { if (mode->hdisplay > fixed_mode->hdisplay) @@ -220,7 +221,7 @@ intel_dp_mode_valid(struct drm_connector *connector, max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes); mode_rate = intel_dp_link_required(target_clock, 18); - if (mode_rate > max_rate) + if (mode_rate > max_rate || target_clock > max_dotclk) return MODE_CLOCK_HIGH; if (mode->clock < 10000) @@ -979,7 +980,10 @@ intel_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg) if (WARN_ON(txsize > 20)) return -E2BIG; - memcpy(txbuf + HEADER_SIZE, msg->buffer, msg->size); + if (msg->buffer) + memcpy(txbuf + HEADER_SIZE, msg->buffer, msg->size); + else + WARN_ON(msg->size); ret = intel_dp_aux_ch(intel_dp, txbuf, txsize, rxbuf, rxsize); if (ret > 0) { @@ -1798,12 +1802,21 @@ static void wait_panel_off(struct intel_dp *intel_dp) static void wait_panel_power_cycle(struct intel_dp *intel_dp) { + ktime_t panel_power_on_time; + s64 panel_power_off_duration; + DRM_DEBUG_KMS("Wait for panel power cycle\n"); + /* take the difference of currrent time and panel power off time + * and then make panel wait for t11_t12 if needed. */ + panel_power_on_time = ktime_get_boottime(); + panel_power_off_duration = ktime_ms_delta(panel_power_on_time, intel_dp->panel_power_off_time); + /* When we disable the VDD override bit last we have to do the manual * wait. */ - wait_remaining_ms_from_jiffies(intel_dp->last_power_cycle, - intel_dp->panel_power_cycle_delay); + if (panel_power_off_duration < (s64)intel_dp->panel_power_cycle_delay) + wait_remaining_ms_from_jiffies(jiffies, + intel_dp->panel_power_cycle_delay - panel_power_off_duration); wait_panel_status(intel_dp, IDLE_CYCLE_MASK, IDLE_CYCLE_VALUE); } @@ -1955,7 +1968,7 @@ static void edp_panel_vdd_off_sync(struct intel_dp *intel_dp) I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg)); if ((pp & POWER_TARGET_ON) == 0) - intel_dp->last_power_cycle = jiffies; + intel_dp->panel_power_off_time = ktime_get_boottime(); power_domain = intel_display_port_aux_power_domain(intel_encoder); intel_display_power_put(dev_priv, power_domain); @@ -2104,7 +2117,7 @@ static void edp_panel_off(struct intel_dp *intel_dp) I915_WRITE(pp_ctrl_reg, pp); POSTING_READ(pp_ctrl_reg); - intel_dp->last_power_cycle = jiffies; + intel_dp->panel_power_off_time = ktime_get_boottime(); wait_panel_off(intel_dp); /* We got a reference when we enabled the VDD. */ @@ -2343,15 +2356,18 @@ static bool intel_dp_get_hw_state(struct intel_encoder *encoder, struct drm_i915_private *dev_priv = dev->dev_private; enum intel_display_power_domain power_domain; u32 tmp; + bool ret; power_domain = intel_display_port_power_domain(encoder); - if (!intel_display_power_is_enabled(dev_priv, power_domain)) + if (!intel_display_power_get_if_enabled(dev_priv, power_domain)) return false; + ret = false; + tmp = I915_READ(intel_dp->output_reg); if (!(tmp & DP_PORT_EN)) - return false; + goto out; if (IS_GEN7(dev) && port == PORT_A) { *pipe = PORT_TO_PIPE_CPT(tmp); @@ -2362,7 +2378,9 @@ static bool intel_dp_get_hw_state(struct intel_encoder *encoder, u32 trans_dp = I915_READ(TRANS_DP_CTL(p)); if (TRANS_DP_PIPE_TO_PORT(trans_dp) == port) { *pipe = p; - return true; + ret = true; + + goto out; } } @@ -2374,7 +2392,12 @@ static bool intel_dp_get_hw_state(struct intel_encoder *encoder, *pipe = PORT_TO_PIPE(tmp); } - return true; + ret = true; + +out: + intel_display_power_put(dev_priv, power_domain); + + return ret; } static void intel_dp_get_config(struct intel_encoder *encoder, @@ -3995,7 +4018,7 @@ static int intel_dp_sink_crc_stop(struct intel_dp *intel_dp) } while (--attempts && count); if (attempts == 0) { - DRM_ERROR("TIMEOUT: Sink CRC counter is not zeroed\n"); + DRM_DEBUG_KMS("TIMEOUT: Sink CRC counter is not zeroed after calculation is stopped\n"); ret = -ETIMEDOUT; } @@ -4474,20 +4497,20 @@ static bool g4x_digital_port_connected(struct drm_i915_private *dev_priv, return I915_READ(PORT_HOTPLUG_STAT) & bit; } -static bool vlv_digital_port_connected(struct drm_i915_private *dev_priv, - struct intel_digital_port *port) +static bool gm45_digital_port_connected(struct drm_i915_private *dev_priv, + struct intel_digital_port *port) { u32 bit; switch (port->port) { case PORT_B: - bit = PORTB_HOTPLUG_LIVE_STATUS_VLV; + bit = PORTB_HOTPLUG_LIVE_STATUS_GM45; break; case PORT_C: - bit = PORTC_HOTPLUG_LIVE_STATUS_VLV; + bit = PORTC_HOTPLUG_LIVE_STATUS_GM45; break; case PORT_D: - bit = PORTD_HOTPLUG_LIVE_STATUS_VLV; + bit = PORTD_HOTPLUG_LIVE_STATUS_GM45; break; default: MISSING_CASE(port->port); @@ -4535,12 +4558,12 @@ bool intel_digital_port_connected(struct drm_i915_private *dev_priv, { if (HAS_PCH_IBX(dev_priv)) return ibx_digital_port_connected(dev_priv, port); - if (HAS_PCH_SPLIT(dev_priv)) + else if (HAS_PCH_SPLIT(dev_priv)) return cpt_digital_port_connected(dev_priv, port); else if (IS_BROXTON(dev_priv)) return bxt_digital_port_connected(dev_priv, port); - else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) - return vlv_digital_port_connected(dev_priv, port); + else if (IS_GM45(dev_priv)) + return gm45_digital_port_connected(dev_priv, port); else return g4x_digital_port_connected(dev_priv, port); } @@ -5102,7 +5125,7 @@ intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connect static void intel_dp_init_panel_power_timestamps(struct intel_dp *intel_dp) { - intel_dp->last_power_cycle = jiffies; + intel_dp->panel_power_off_time = ktime_get_boottime(); intel_dp->last_power_on = jiffies; intel_dp->last_backlight_off = jiffies; } @@ -6020,7 +6043,6 @@ intel_dp_init(struct drm_device *dev, } intel_dig_port->port = port; - dev_priv->dig_port_map[port] = intel_encoder; intel_dig_port->dp.output_reg = output_reg; intel_dig_port->max_lanes = 4; diff --git a/drivers/gpu/drm/i915/intel_dp_link_training.c b/drivers/gpu/drm/i915/intel_dp_link_training.c index 88887938e0bf..0b8eefc2acc5 100644 --- a/drivers/gpu/drm/i915/intel_dp_link_training.c +++ b/drivers/gpu/drm/i915/intel_dp_link_training.c @@ -215,27 +215,46 @@ intel_dp_link_training_clock_recovery(struct intel_dp *intel_dp) } } -static void -intel_dp_link_training_channel_equalization(struct intel_dp *intel_dp) +/* + * Pick training pattern for channel equalization. Training Pattern 3 for HBR2 + * or 1.2 devices that support it, Training Pattern 2 otherwise. + */ +static u32 intel_dp_training_pattern(struct intel_dp *intel_dp) { - bool channel_eq = false; - int tries, cr_tries; - uint32_t training_pattern = DP_TRAINING_PATTERN_2; + u32 training_pattern = DP_TRAINING_PATTERN_2; + bool source_tps3, sink_tps3; /* - * Training Pattern 3 for HBR2 or 1.2 devices that support it. - * * Intel platforms that support HBR2 also support TPS3. TPS3 support is - * also mandatory for downstream devices that support HBR2. + * also mandatory for downstream devices that support HBR2. However, not + * all sinks follow the spec. * * Due to WaDisableHBR2 SKL < B0 is the only exception where TPS3 is - * supported but still not enabled. + * supported in source but still not enabled. */ - if (intel_dp_source_supports_hbr2(intel_dp) && - drm_dp_tps3_supported(intel_dp->dpcd)) + source_tps3 = intel_dp_source_supports_hbr2(intel_dp); + sink_tps3 = drm_dp_tps3_supported(intel_dp->dpcd); + + if (source_tps3 && sink_tps3) { training_pattern = DP_TRAINING_PATTERN_3; - else if (intel_dp->link_rate == 540000) - DRM_ERROR("5.4 Gbps link rate without HBR2/TPS3 support\n"); + } else if (intel_dp->link_rate == 540000) { + if (!source_tps3) + DRM_DEBUG_KMS("5.4 Gbps link rate without source HBR2/TPS3 support\n"); + if (!sink_tps3) + DRM_DEBUG_KMS("5.4 Gbps link rate without sink TPS3 support\n"); + } + + return training_pattern; +} + +static void +intel_dp_link_training_channel_equalization(struct intel_dp *intel_dp) +{ + bool channel_eq = false; + int tries, cr_tries; + u32 training_pattern; + + training_pattern = intel_dp_training_pattern(intel_dp); /* channel equalization */ if (!intel_dp_set_link_train(intel_dp, diff --git a/drivers/gpu/drm/i915/intel_dp_mst.c b/drivers/gpu/drm/i915/intel_dp_mst.c index 2a2ab306ad84..a2bd698fe2f7 100644 --- a/drivers/gpu/drm/i915/intel_dp_mst.c +++ b/drivers/gpu/drm/i915/intel_dp_mst.c @@ -371,6 +371,8 @@ static enum drm_mode_status intel_dp_mst_mode_valid(struct drm_connector *connector, struct drm_display_mode *mode) { + int max_dotclk = to_i915(connector->dev)->max_dotclk_freq; + /* TODO - validate mode against available PBN for link */ if (mode->clock < 10000) return MODE_CLOCK_LOW; @@ -378,6 +380,9 @@ intel_dp_mst_mode_valid(struct drm_connector *connector, if (mode->flags & DRM_MODE_FLAG_DBLCLK) return MODE_H_ILLEGAL; + if (mode->clock > max_dotclk) + return MODE_CLOCK_HIGH; + return MODE_OK; } diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index bf6f98134b50..4c027d69fac9 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h @@ -379,6 +379,7 @@ struct intel_crtc_state { bool update_pipe; /* can a fast modeset be performed? */ bool disable_cxsr; bool wm_changed; /* watermarks are updated */ + bool fb_changed; /* fb on any of the planes is changed */ /* Pipe source size (ie. panel fitter input size) * All planes will be positioned inside this space, @@ -492,6 +493,8 @@ struct intel_crtc_state { bool ips_enabled; + bool enable_fbc; + bool double_wide; bool dp_encoder_is_mst; @@ -542,16 +545,13 @@ struct intel_mmio_flip { */ struct intel_crtc_atomic_commit { /* Sleepable operations to perform before commit */ - bool disable_fbc; - bool disable_ips; - bool pre_disable_primary; /* Sleepable operations to perform after commit */ unsigned fb_bits; - bool wait_vblank; - bool update_fbc; bool post_enable_primary; - unsigned update_sprite_watermarks; + + /* Sleepable operations to perform before and after commit */ + bool update_fbc; }; struct intel_crtc { @@ -575,7 +575,7 @@ struct intel_crtc { /* Display surface base address adjustement for pageflips. Note that on * gen4+ this only adjusts up to a tile, offsets within a tile are * handled in the hw itself (with the TILEOFF register). */ - unsigned long dspaddr_offset; + u32 dspaddr_offset; int adjusted_x; int adjusted_y; @@ -770,9 +770,9 @@ struct intel_dp { int backlight_off_delay; struct delayed_work panel_vdd_work; bool want_panel_vdd; - unsigned long last_power_cycle; unsigned long last_power_on; unsigned long last_backlight_off; + ktime_t panel_power_off_time; struct notifier_block edp_notifier; @@ -909,9 +909,7 @@ struct intel_unpin_work { }; struct intel_load_detect_pipe { - struct drm_framebuffer *release_fb; - bool load_detect_temp; - int dpms_mode; + struct drm_atomic_state *restore_state; }; static inline struct intel_encoder * @@ -994,6 +992,8 @@ static inline bool intel_irqs_enabled(struct drm_i915_private *dev_priv) int intel_get_crtc_scanline(struct intel_crtc *crtc); void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv, unsigned int pipe_mask); +void gen8_irq_power_well_pre_disable(struct drm_i915_private *dev_priv, + unsigned int pipe_mask); /* intel_crt.c */ void intel_crt_init(struct drm_device *dev); @@ -1172,11 +1172,11 @@ void assert_fdi_rx_pll(struct drm_i915_private *dev_priv, void assert_pipe(struct drm_i915_private *dev_priv, enum pipe pipe, bool state); #define assert_pipe_enabled(d, p) assert_pipe(d, p, true) #define assert_pipe_disabled(d, p) assert_pipe(d, p, false) -unsigned long intel_compute_tile_offset(struct drm_i915_private *dev_priv, - int *x, int *y, - uint64_t fb_modifier, - unsigned int cpp, - unsigned int pitch); +u32 intel_compute_tile_offset(struct drm_i915_private *dev_priv, + int *x, int *y, + uint64_t fb_modifier, + unsigned int cpp, + unsigned int pitch); void intel_prepare_reset(struct drm_device *dev); void intel_finish_reset(struct drm_device *dev); void hsw_enable_pc8(struct drm_i915_private *dev_priv); @@ -1226,7 +1226,7 @@ u32 skl_plane_ctl_rotation(unsigned int rotation); /* intel_csr.c */ void intel_csr_ucode_init(struct drm_i915_private *); -void intel_csr_load_program(struct drm_i915_private *); +bool intel_csr_load_program(struct drm_i915_private *); void intel_csr_ucode_fini(struct drm_i915_private *); /* intel_dp.c */ @@ -1327,13 +1327,16 @@ static inline void intel_fbdev_restore_mode(struct drm_device *dev) #endif /* intel_fbc.c */ +void intel_fbc_choose_crtc(struct drm_i915_private *dev_priv, + struct drm_atomic_state *state); bool intel_fbc_is_active(struct drm_i915_private *dev_priv); -void intel_fbc_deactivate(struct intel_crtc *crtc); -void intel_fbc_update(struct intel_crtc *crtc); +void intel_fbc_pre_update(struct intel_crtc *crtc); +void intel_fbc_post_update(struct intel_crtc *crtc); void intel_fbc_init(struct drm_i915_private *dev_priv); +void intel_fbc_init_pipe_state(struct drm_i915_private *dev_priv); void intel_fbc_enable(struct intel_crtc *crtc); -void intel_fbc_disable(struct drm_i915_private *dev_priv); -void intel_fbc_disable_crtc(struct intel_crtc *crtc); +void intel_fbc_disable(struct intel_crtc *crtc); +void intel_fbc_global_disable(struct drm_i915_private *dev_priv); void intel_fbc_invalidate(struct drm_i915_private *dev_priv, unsigned int frontbuffer_bits, enum fb_op_origin origin); @@ -1432,6 +1435,8 @@ bool __intel_display_power_is_enabled(struct drm_i915_private *dev_priv, enum intel_display_power_domain domain); void intel_display_power_get(struct drm_i915_private *dev_priv, enum intel_display_power_domain domain); +bool intel_display_power_get_if_enabled(struct drm_i915_private *dev_priv, + enum intel_display_power_domain domain); void intel_display_power_put(struct drm_i915_private *dev_priv, enum intel_display_power_domain domain); @@ -1518,6 +1523,7 @@ enable_rpm_wakeref_asserts(struct drm_i915_private *dev_priv) enable_rpm_wakeref_asserts(dev_priv) void intel_runtime_pm_get(struct drm_i915_private *dev_priv); +bool intel_runtime_pm_get_if_in_use(struct drm_i915_private *dev_priv); void intel_runtime_pm_get_noresume(struct drm_i915_private *dev_priv); void intel_runtime_pm_put(struct drm_i915_private *dev_priv); @@ -1559,6 +1565,7 @@ void skl_wm_get_hw_state(struct drm_device *dev); void skl_ddb_get_hw_state(struct drm_i915_private *dev_priv, struct skl_ddb_allocation *ddb /* out */); uint32_t ilk_pipe_pixel_rate(const struct intel_crtc_state *pipe_config); +int sanitize_rc6_option(const struct drm_device *dev, int enable_rc6); /* intel_sdvo.c */ bool intel_sdvo_init(struct drm_device *dev, diff --git a/drivers/gpu/drm/i915/intel_dsi.c b/drivers/gpu/drm/i915/intel_dsi.c index 91cef3525c93..01b8e9f4c272 100644 --- a/drivers/gpu/drm/i915/intel_dsi.c +++ b/drivers/gpu/drm/i915/intel_dsi.c @@ -478,8 +478,8 @@ static void intel_dsi_pre_enable(struct intel_encoder *encoder) DRM_DEBUG_KMS("\n"); - intel_dsi_prepare(encoder); intel_enable_dsi_pll(encoder); + intel_dsi_prepare(encoder); /* Panel Enable over CRC PMIC */ if (intel_dsi->gpio_panel) @@ -634,7 +634,6 @@ static void intel_dsi_post_disable(struct intel_encoder *encoder) { struct drm_i915_private *dev_priv = encoder->base.dev->dev_private; struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); - u32 val; DRM_DEBUG_KMS("\n"); @@ -642,9 +641,13 @@ static void intel_dsi_post_disable(struct intel_encoder *encoder) intel_dsi_clear_device_ready(encoder); - val = I915_READ(DSPCLK_GATE_D); - val &= ~DPOUNIT_CLOCK_GATE_DISABLE; - I915_WRITE(DSPCLK_GATE_D, val); + if (!IS_BROXTON(dev_priv)) { + u32 val; + + val = I915_READ(DSPCLK_GATE_D); + val &= ~DPOUNIT_CLOCK_GATE_DISABLE; + I915_WRITE(DSPCLK_GATE_D, val); + } drm_panel_unprepare(intel_dsi->panel); @@ -664,13 +667,16 @@ static bool intel_dsi_get_hw_state(struct intel_encoder *encoder, struct drm_device *dev = encoder->base.dev; enum intel_display_power_domain power_domain; enum port port; + bool ret; DRM_DEBUG_KMS("\n"); power_domain = intel_display_port_power_domain(encoder); - if (!intel_display_power_is_enabled(dev_priv, power_domain)) + if (!intel_display_power_get_if_enabled(dev_priv, power_domain)) return false; + ret = false; + /* XXX: this only works for one DSI output */ for_each_dsi_port(port, intel_dsi->ports) { i915_reg_t ctrl_reg = IS_BROXTON(dev) ? @@ -691,12 +697,16 @@ static bool intel_dsi_get_hw_state(struct intel_encoder *encoder, if (dpi_enabled || (func & CMD_MODE_DATA_WIDTH_MASK)) { if (I915_READ(MIPI_DEVICE_READY(port)) & DEVICE_READY) { *pipe = port == PORT_A ? PIPE_A : PIPE_B; - return true; + ret = true; + + goto out; } } } +out: + intel_display_power_put(dev_priv, power_domain); - return false; + return ret; } static void intel_dsi_get_config(struct intel_encoder *encoder, @@ -775,10 +785,9 @@ static void set_dsi_timings(struct drm_encoder *encoder, { struct drm_device *dev = encoder->dev; struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc); struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); enum port port; - unsigned int bpp = intel_crtc->config->pipe_bpp; + unsigned int bpp = dsi_pixel_format_bpp(intel_dsi->pixel_format); unsigned int lane_count = intel_dsi->lane_count; u16 hactive, hfp, hsync, hbp, vfp, vsync, vbp; @@ -849,7 +858,7 @@ static void intel_dsi_prepare(struct intel_encoder *intel_encoder) struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); const struct drm_display_mode *adjusted_mode = &intel_crtc->config->base.adjusted_mode; enum port port; - unsigned int bpp = intel_crtc->config->pipe_bpp; + unsigned int bpp = dsi_pixel_format_bpp(intel_dsi->pixel_format); u32 val, tmp; u16 mode_hdisplay; diff --git a/drivers/gpu/drm/i915/intel_dsi.h b/drivers/gpu/drm/i915/intel_dsi.h index de7be7f3fb42..92f39227b361 100644 --- a/drivers/gpu/drm/i915/intel_dsi.h +++ b/drivers/gpu/drm/i915/intel_dsi.h @@ -34,6 +34,8 @@ #define DSI_DUAL_LINK_FRONT_BACK 1 #define DSI_DUAL_LINK_PIXEL_ALT 2 +int dsi_pixel_format_bpp(int pixel_format); + struct intel_dsi_host; struct intel_dsi { diff --git a/drivers/gpu/drm/i915/intel_dsi_panel_vbt.c b/drivers/gpu/drm/i915/intel_dsi_panel_vbt.c index 1d43e6f37fc1..7f145b4fec6a 100644 --- a/drivers/gpu/drm/i915/intel_dsi_panel_vbt.c +++ b/drivers/gpu/drm/i915/intel_dsi_panel_vbt.c @@ -204,10 +204,28 @@ static const u8 *mipi_exec_gpio(struct intel_dsi *intel_dsi, const u8 *data) struct drm_device *dev = intel_dsi->base.base.dev; struct drm_i915_private *dev_priv = dev->dev_private; + if (dev_priv->vbt.dsi.seq_version >= 3) + data++; + gpio = *data++; /* pull up/down */ - action = *data++; + action = *data++ & 1; + + if (gpio >= ARRAY_SIZE(gtable)) { + DRM_DEBUG_KMS("unknown gpio %u\n", gpio); + goto out; + } + + if (!IS_VALLEYVIEW(dev_priv)) { + DRM_DEBUG_KMS("GPIO element not supported on this platform\n"); + goto out; + } + + if (dev_priv->vbt.dsi.seq_version >= 3) { + DRM_DEBUG_KMS("GPIO element v3 not supported\n"); + goto out; + } function = gtable[gpio].function_reg; pad = gtable[gpio].pad_reg; @@ -216,16 +234,18 @@ static const u8 *mipi_exec_gpio(struct intel_dsi *intel_dsi, const u8 *data) if (!gtable[gpio].init) { /* program the function */ /* FIXME: remove constant below */ - vlv_gpio_nc_write(dev_priv, function, 0x2000CC00); + vlv_iosf_sb_write(dev_priv, IOSF_PORT_GPIO_NC, function, + 0x2000CC00); gtable[gpio].init = 1; } val = 0x4 | action; /* pull up/down */ - vlv_gpio_nc_write(dev_priv, pad, val); + vlv_iosf_sb_write(dev_priv, IOSF_PORT_GPIO_NC, pad, val); mutex_unlock(&dev_priv->sb_lock); +out: return data; } @@ -420,10 +440,7 @@ struct drm_panel *vbt_panel_init(struct intel_dsi *intel_dsi, u16 panel_id) intel_dsi->dual_link = mipi_config->dual_link; intel_dsi->pixel_overlap = mipi_config->pixel_overlap; - if (intel_dsi->pixel_format == VID_MODE_FORMAT_RGB666) - bits_per_pixel = 18; - else if (intel_dsi->pixel_format == VID_MODE_FORMAT_RGB565) - bits_per_pixel = 16; + bits_per_pixel = dsi_pixel_format_bpp(intel_dsi->pixel_format); intel_dsi->operation_mode = mipi_config->is_cmd_mode; intel_dsi->video_mode_format = mipi_config->video_transfer_mode; diff --git a/drivers/gpu/drm/i915/intel_dsi_pll.c b/drivers/gpu/drm/i915/intel_dsi_pll.c index bb5e95a1a453..70883c54cb0a 100644 --- a/drivers/gpu/drm/i915/intel_dsi_pll.c +++ b/drivers/gpu/drm/i915/intel_dsi_pll.c @@ -30,7 +30,7 @@ #include "i915_drv.h" #include "intel_dsi.h" -static int dsi_pixel_format_bpp(int pixel_format) +int dsi_pixel_format_bpp(int pixel_format) { int bpp; diff --git a/drivers/gpu/drm/i915/intel_fbc.c b/drivers/gpu/drm/i915/intel_fbc.c index a1988a486b92..0f0492f4a357 100644 --- a/drivers/gpu/drm/i915/intel_fbc.c +++ b/drivers/gpu/drm/i915/intel_fbc.c @@ -43,7 +43,7 @@ static inline bool fbc_supported(struct drm_i915_private *dev_priv) { - return dev_priv->fbc.activate != NULL; + return HAS_FBC(dev_priv); } static inline bool fbc_on_pipe_a_only(struct drm_i915_private *dev_priv) @@ -56,6 +56,11 @@ static inline bool fbc_on_plane_a_only(struct drm_i915_private *dev_priv) return INTEL_INFO(dev_priv)->gen < 4; } +static inline bool no_fbc_on_multiple_pipes(struct drm_i915_private *dev_priv) +{ + return INTEL_INFO(dev_priv)->gen <= 3; +} + /* * In some platforms where the CRTC's x:0/y:0 coordinates doesn't match the * frontbuffer's x:0/y:0 coordinates we lie to the hardware about the plane's @@ -74,19 +79,17 @@ static unsigned int get_crtc_fence_y_offset(struct intel_crtc *crtc) * write to the PLANE_SIZE register. For BDW-, the hardware looks at the value * we wrote to PIPESRC. */ -static void intel_fbc_get_plane_source_size(struct intel_crtc *crtc, +static void intel_fbc_get_plane_source_size(struct intel_fbc_state_cache *cache, int *width, int *height) { - struct intel_plane_state *plane_state = - to_intel_plane_state(crtc->base.primary->state); int w, h; - if (intel_rotation_90_or_270(plane_state->base.rotation)) { - w = drm_rect_height(&plane_state->src) >> 16; - h = drm_rect_width(&plane_state->src) >> 16; + if (intel_rotation_90_or_270(cache->plane.rotation)) { + w = cache->plane.src_h; + h = cache->plane.src_w; } else { - w = drm_rect_width(&plane_state->src) >> 16; - h = drm_rect_height(&plane_state->src) >> 16; + w = cache->plane.src_w; + h = cache->plane.src_h; } if (width) @@ -95,26 +98,23 @@ static void intel_fbc_get_plane_source_size(struct intel_crtc *crtc, *height = h; } -static int intel_fbc_calculate_cfb_size(struct intel_crtc *crtc, - struct drm_framebuffer *fb) +static int intel_fbc_calculate_cfb_size(struct drm_i915_private *dev_priv, + struct intel_fbc_state_cache *cache) { - struct drm_i915_private *dev_priv = crtc->base.dev->dev_private; int lines; - intel_fbc_get_plane_source_size(crtc, NULL, &lines); + intel_fbc_get_plane_source_size(cache, NULL, &lines); if (INTEL_INFO(dev_priv)->gen >= 7) lines = min(lines, 2048); /* Hardware needs the full buffer stride, not just the active area. */ - return lines * fb->pitches[0]; + return lines * cache->fb.stride; } static void i8xx_fbc_deactivate(struct drm_i915_private *dev_priv) { u32 fbc_ctl; - dev_priv->fbc.active = false; - /* Disable compression */ fbc_ctl = I915_READ(FBC_CONTROL); if ((fbc_ctl & FBC_CTL_EN) == 0) @@ -130,21 +130,17 @@ static void i8xx_fbc_deactivate(struct drm_i915_private *dev_priv) } } -static void i8xx_fbc_activate(struct intel_crtc *crtc) +static void i8xx_fbc_activate(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = crtc->base.dev->dev_private; - struct drm_framebuffer *fb = crtc->base.primary->fb; - struct drm_i915_gem_object *obj = intel_fb_obj(fb); + struct intel_fbc_reg_params *params = &dev_priv->fbc.params; int cfb_pitch; int i; u32 fbc_ctl; - dev_priv->fbc.active = true; - /* Note: fbc.threshold == 1 for i8xx */ - cfb_pitch = intel_fbc_calculate_cfb_size(crtc, fb) / FBC_LL_SIZE; - if (fb->pitches[0] < cfb_pitch) - cfb_pitch = fb->pitches[0]; + cfb_pitch = params->cfb_size / FBC_LL_SIZE; + if (params->fb.stride < cfb_pitch) + cfb_pitch = params->fb.stride; /* FBC_CTL wants 32B or 64B units */ if (IS_GEN2(dev_priv)) @@ -161,9 +157,9 @@ static void i8xx_fbc_activate(struct intel_crtc *crtc) /* Set it up... */ fbc_ctl2 = FBC_CTL_FENCE_DBL | FBC_CTL_IDLE_IMM | FBC_CTL_CPU_FENCE; - fbc_ctl2 |= FBC_CTL_PLANE(crtc->plane); + fbc_ctl2 |= FBC_CTL_PLANE(params->crtc.plane); I915_WRITE(FBC_CONTROL2, fbc_ctl2); - I915_WRITE(FBC_FENCE_OFF, get_crtc_fence_y_offset(crtc)); + I915_WRITE(FBC_FENCE_OFF, params->crtc.fence_y_offset); } /* enable it... */ @@ -173,7 +169,7 @@ static void i8xx_fbc_activate(struct intel_crtc *crtc) if (IS_I945GM(dev_priv)) fbc_ctl |= FBC_CTL_C3_IDLE; /* 945 needs special SR handling */ fbc_ctl |= (cfb_pitch & 0xff) << FBC_CTL_STRIDE_SHIFT; - fbc_ctl |= obj->fence_reg; + fbc_ctl |= params->fb.fence_reg; I915_WRITE(FBC_CONTROL, fbc_ctl); } @@ -182,23 +178,19 @@ static bool i8xx_fbc_is_active(struct drm_i915_private *dev_priv) return I915_READ(FBC_CONTROL) & FBC_CTL_EN; } -static void g4x_fbc_activate(struct intel_crtc *crtc) +static void g4x_fbc_activate(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = crtc->base.dev->dev_private; - struct drm_framebuffer *fb = crtc->base.primary->fb; - struct drm_i915_gem_object *obj = intel_fb_obj(fb); + struct intel_fbc_reg_params *params = &dev_priv->fbc.params; u32 dpfc_ctl; - dev_priv->fbc.active = true; - - dpfc_ctl = DPFC_CTL_PLANE(crtc->plane) | DPFC_SR_EN; - if (drm_format_plane_cpp(fb->pixel_format, 0) == 2) + dpfc_ctl = DPFC_CTL_PLANE(params->crtc.plane) | DPFC_SR_EN; + if (drm_format_plane_cpp(params->fb.pixel_format, 0) == 2) dpfc_ctl |= DPFC_CTL_LIMIT_2X; else dpfc_ctl |= DPFC_CTL_LIMIT_1X; - dpfc_ctl |= DPFC_CTL_FENCE_EN | obj->fence_reg; + dpfc_ctl |= DPFC_CTL_FENCE_EN | params->fb.fence_reg; - I915_WRITE(DPFC_FENCE_YOFF, get_crtc_fence_y_offset(crtc)); + I915_WRITE(DPFC_FENCE_YOFF, params->crtc.fence_y_offset); /* enable it... */ I915_WRITE(DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN); @@ -208,8 +200,6 @@ static void g4x_fbc_deactivate(struct drm_i915_private *dev_priv) { u32 dpfc_ctl; - dev_priv->fbc.active = false; - /* Disable compression */ dpfc_ctl = I915_READ(DPFC_CONTROL); if (dpfc_ctl & DPFC_CTL_EN) { @@ -230,19 +220,14 @@ static void intel_fbc_recompress(struct drm_i915_private *dev_priv) POSTING_READ(MSG_FBC_REND_STATE); } -static void ilk_fbc_activate(struct intel_crtc *crtc) +static void ilk_fbc_activate(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = crtc->base.dev->dev_private; - struct drm_framebuffer *fb = crtc->base.primary->fb; - struct drm_i915_gem_object *obj = intel_fb_obj(fb); + struct intel_fbc_reg_params *params = &dev_priv->fbc.params; u32 dpfc_ctl; int threshold = dev_priv->fbc.threshold; - unsigned int y_offset; - dev_priv->fbc.active = true; - - dpfc_ctl = DPFC_CTL_PLANE(crtc->plane); - if (drm_format_plane_cpp(fb->pixel_format, 0) == 2) + dpfc_ctl = DPFC_CTL_PLANE(params->crtc.plane); + if (drm_format_plane_cpp(params->fb.pixel_format, 0) == 2) threshold++; switch (threshold) { @@ -259,18 +244,17 @@ static void ilk_fbc_activate(struct intel_crtc *crtc) } dpfc_ctl |= DPFC_CTL_FENCE_EN; if (IS_GEN5(dev_priv)) - dpfc_ctl |= obj->fence_reg; + dpfc_ctl |= params->fb.fence_reg; - y_offset = get_crtc_fence_y_offset(crtc); - I915_WRITE(ILK_DPFC_FENCE_YOFF, y_offset); - I915_WRITE(ILK_FBC_RT_BASE, i915_gem_obj_ggtt_offset(obj) | ILK_FBC_RT_VALID); + I915_WRITE(ILK_DPFC_FENCE_YOFF, params->crtc.fence_y_offset); + I915_WRITE(ILK_FBC_RT_BASE, params->fb.ggtt_offset | ILK_FBC_RT_VALID); /* enable it... */ I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN); if (IS_GEN6(dev_priv)) { I915_WRITE(SNB_DPFC_CTL_SA, - SNB_CPU_FENCE_ENABLE | obj->fence_reg); - I915_WRITE(DPFC_CPU_FENCE_OFFSET, y_offset); + SNB_CPU_FENCE_ENABLE | params->fb.fence_reg); + I915_WRITE(DPFC_CPU_FENCE_OFFSET, params->crtc.fence_y_offset); } intel_fbc_recompress(dev_priv); @@ -280,8 +264,6 @@ static void ilk_fbc_deactivate(struct drm_i915_private *dev_priv) { u32 dpfc_ctl; - dev_priv->fbc.active = false; - /* Disable compression */ dpfc_ctl = I915_READ(ILK_DPFC_CONTROL); if (dpfc_ctl & DPFC_CTL_EN) { @@ -295,21 +277,17 @@ static bool ilk_fbc_is_active(struct drm_i915_private *dev_priv) return I915_READ(ILK_DPFC_CONTROL) & DPFC_CTL_EN; } -static void gen7_fbc_activate(struct intel_crtc *crtc) +static void gen7_fbc_activate(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = crtc->base.dev->dev_private; - struct drm_framebuffer *fb = crtc->base.primary->fb; - struct drm_i915_gem_object *obj = intel_fb_obj(fb); + struct intel_fbc_reg_params *params = &dev_priv->fbc.params; u32 dpfc_ctl; int threshold = dev_priv->fbc.threshold; - dev_priv->fbc.active = true; - dpfc_ctl = 0; if (IS_IVYBRIDGE(dev_priv)) - dpfc_ctl |= IVB_DPFC_CTL_PLANE(crtc->plane); + dpfc_ctl |= IVB_DPFC_CTL_PLANE(params->crtc.plane); - if (drm_format_plane_cpp(fb->pixel_format, 0) == 2) + if (drm_format_plane_cpp(params->fb.pixel_format, 0) == 2) threshold++; switch (threshold) { @@ -337,20 +315,60 @@ static void gen7_fbc_activate(struct intel_crtc *crtc) ILK_FBCQ_DIS); } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) { /* WaFbcAsynchFlipDisableFbcQueue:hsw,bdw */ - I915_WRITE(CHICKEN_PIPESL_1(crtc->pipe), - I915_READ(CHICKEN_PIPESL_1(crtc->pipe)) | + I915_WRITE(CHICKEN_PIPESL_1(params->crtc.pipe), + I915_READ(CHICKEN_PIPESL_1(params->crtc.pipe)) | HSW_FBCQ_DIS); } I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN); I915_WRITE(SNB_DPFC_CTL_SA, - SNB_CPU_FENCE_ENABLE | obj->fence_reg); - I915_WRITE(DPFC_CPU_FENCE_OFFSET, get_crtc_fence_y_offset(crtc)); + SNB_CPU_FENCE_ENABLE | params->fb.fence_reg); + I915_WRITE(DPFC_CPU_FENCE_OFFSET, params->crtc.fence_y_offset); intel_fbc_recompress(dev_priv); } +static bool intel_fbc_hw_is_active(struct drm_i915_private *dev_priv) +{ + if (INTEL_INFO(dev_priv)->gen >= 5) + return ilk_fbc_is_active(dev_priv); + else if (IS_GM45(dev_priv)) + return g4x_fbc_is_active(dev_priv); + else + return i8xx_fbc_is_active(dev_priv); +} + +static void intel_fbc_hw_activate(struct drm_i915_private *dev_priv) +{ + struct intel_fbc *fbc = &dev_priv->fbc; + + fbc->active = true; + + if (INTEL_INFO(dev_priv)->gen >= 7) + gen7_fbc_activate(dev_priv); + else if (INTEL_INFO(dev_priv)->gen >= 5) + ilk_fbc_activate(dev_priv); + else if (IS_GM45(dev_priv)) + g4x_fbc_activate(dev_priv); + else + i8xx_fbc_activate(dev_priv); +} + +static void intel_fbc_hw_deactivate(struct drm_i915_private *dev_priv) +{ + struct intel_fbc *fbc = &dev_priv->fbc; + + fbc->active = false; + + if (INTEL_INFO(dev_priv)->gen >= 5) + ilk_fbc_deactivate(dev_priv); + else if (IS_GM45(dev_priv)) + g4x_fbc_deactivate(dev_priv); + else + i8xx_fbc_deactivate(dev_priv); +} + /** * intel_fbc_is_active - Is FBC active? * @dev_priv: i915 device instance @@ -364,24 +382,24 @@ bool intel_fbc_is_active(struct drm_i915_private *dev_priv) return dev_priv->fbc.active; } -static void intel_fbc_activate(const struct drm_framebuffer *fb) -{ - struct drm_i915_private *dev_priv = fb->dev->dev_private; - struct intel_crtc *crtc = dev_priv->fbc.crtc; - - dev_priv->fbc.activate(crtc); - - dev_priv->fbc.fb_id = fb->base.id; - dev_priv->fbc.y = crtc->base.y; -} - static void intel_fbc_work_fn(struct work_struct *__work) { struct drm_i915_private *dev_priv = container_of(__work, struct drm_i915_private, fbc.work.work); - struct intel_fbc_work *work = &dev_priv->fbc.work; - struct intel_crtc *crtc = dev_priv->fbc.crtc; - int delay_ms = 50; + struct intel_fbc *fbc = &dev_priv->fbc; + struct intel_fbc_work *work = &fbc->work; + struct intel_crtc *crtc = fbc->crtc; + struct drm_vblank_crtc *vblank = &dev_priv->dev->vblank[crtc->pipe]; + + if (drm_crtc_vblank_get(&crtc->base)) { + DRM_ERROR("vblank not available for FBC on pipe %c\n", + pipe_name(crtc->pipe)); + + mutex_lock(&fbc->lock); + work->scheduled = false; + mutex_unlock(&fbc->lock); + return; + } retry: /* Delay the actual enabling to let pageflipping cease and the @@ -390,142 +408,97 @@ retry: * vblank to pass after disabling the FBC before we attempt * to modify the control registers. * - * A more complicated solution would involve tracking vblanks - * following the termination of the page-flipping sequence - * and indeed performing the enable as a co-routine and not - * waiting synchronously upon the vblank. - * * WaFbcWaitForVBlankBeforeEnable:ilk,snb + * + * It is also worth mentioning that since work->scheduled_vblank can be + * updated multiple times by the other threads, hitting the timeout is + * not an error condition. We'll just end up hitting the "goto retry" + * case below. */ - wait_remaining_ms_from_jiffies(work->enable_jiffies, delay_ms); + wait_event_timeout(vblank->queue, + drm_crtc_vblank_count(&crtc->base) != work->scheduled_vblank, + msecs_to_jiffies(50)); - mutex_lock(&dev_priv->fbc.lock); + mutex_lock(&fbc->lock); /* Were we cancelled? */ if (!work->scheduled) goto out; /* Were we delayed again while this function was sleeping? */ - if (time_after(work->enable_jiffies + msecs_to_jiffies(delay_ms), - jiffies)) { - mutex_unlock(&dev_priv->fbc.lock); + if (drm_crtc_vblank_count(&crtc->base) == work->scheduled_vblank) { + mutex_unlock(&fbc->lock); goto retry; } - if (crtc->base.primary->fb == work->fb) - intel_fbc_activate(work->fb); + intel_fbc_hw_activate(dev_priv); work->scheduled = false; out: - mutex_unlock(&dev_priv->fbc.lock); -} - -static void intel_fbc_cancel_work(struct drm_i915_private *dev_priv) -{ - WARN_ON(!mutex_is_locked(&dev_priv->fbc.lock)); - dev_priv->fbc.work.scheduled = false; + mutex_unlock(&fbc->lock); + drm_crtc_vblank_put(&crtc->base); } static void intel_fbc_schedule_activation(struct intel_crtc *crtc) { struct drm_i915_private *dev_priv = crtc->base.dev->dev_private; - struct intel_fbc_work *work = &dev_priv->fbc.work; + struct intel_fbc *fbc = &dev_priv->fbc; + struct intel_fbc_work *work = &fbc->work; + + WARN_ON(!mutex_is_locked(&fbc->lock)); - WARN_ON(!mutex_is_locked(&dev_priv->fbc.lock)); + if (drm_crtc_vblank_get(&crtc->base)) { + DRM_ERROR("vblank not available for FBC on pipe %c\n", + pipe_name(crtc->pipe)); + return; + } - /* It is useless to call intel_fbc_cancel_work() in this function since - * we're not releasing fbc.lock, so it won't have an opportunity to grab - * it to discover that it was cancelled. So we just update the expected - * jiffy count. */ - work->fb = crtc->base.primary->fb; + /* It is useless to call intel_fbc_cancel_work() or cancel_work() in + * this function since we're not releasing fbc.lock, so it won't have an + * opportunity to grab it to discover that it was cancelled. So we just + * update the expected jiffy count. */ work->scheduled = true; - work->enable_jiffies = jiffies; + work->scheduled_vblank = drm_crtc_vblank_count(&crtc->base); + drm_crtc_vblank_put(&crtc->base); schedule_work(&work->work); } -static void __intel_fbc_deactivate(struct drm_i915_private *dev_priv) +static void intel_fbc_deactivate(struct drm_i915_private *dev_priv) { - WARN_ON(!mutex_is_locked(&dev_priv->fbc.lock)); - - intel_fbc_cancel_work(dev_priv); - - if (dev_priv->fbc.active) - dev_priv->fbc.deactivate(dev_priv); -} - -/* - * intel_fbc_deactivate - deactivate FBC if it's associated with crtc - * @crtc: the CRTC - * - * This function deactivates FBC if it's associated with the provided CRTC. - */ -void intel_fbc_deactivate(struct intel_crtc *crtc) -{ - struct drm_i915_private *dev_priv = crtc->base.dev->dev_private; - - if (!fbc_supported(dev_priv)) - return; + struct intel_fbc *fbc = &dev_priv->fbc; - mutex_lock(&dev_priv->fbc.lock); - if (dev_priv->fbc.crtc == crtc) - __intel_fbc_deactivate(dev_priv); - mutex_unlock(&dev_priv->fbc.lock); -} + WARN_ON(!mutex_is_locked(&fbc->lock)); -static void set_no_fbc_reason(struct drm_i915_private *dev_priv, - const char *reason) -{ - if (dev_priv->fbc.no_fbc_reason == reason) - return; + /* Calling cancel_work() here won't help due to the fact that the work + * function grabs fbc->lock. Just set scheduled to false so the work + * function can know it was cancelled. */ + fbc->work.scheduled = false; - dev_priv->fbc.no_fbc_reason = reason; - DRM_DEBUG_KMS("Disabling FBC: %s\n", reason); + if (fbc->active) + intel_fbc_hw_deactivate(dev_priv); } -static bool crtc_can_fbc(struct intel_crtc *crtc) +static bool multiple_pipes_ok(struct intel_crtc *crtc) { struct drm_i915_private *dev_priv = crtc->base.dev->dev_private; + struct drm_plane *primary = crtc->base.primary; + struct intel_fbc *fbc = &dev_priv->fbc; + enum pipe pipe = crtc->pipe; - if (fbc_on_pipe_a_only(dev_priv) && crtc->pipe != PIPE_A) - return false; - - if (fbc_on_plane_a_only(dev_priv) && crtc->plane != PLANE_A) - return false; - - return true; -} - -static bool crtc_is_valid(struct intel_crtc *crtc) -{ - if (!intel_crtc_active(&crtc->base)) - return false; - - if (!to_intel_plane_state(crtc->base.primary->state)->visible) - return false; - - return true; -} - -static bool multiple_pipes_ok(struct drm_i915_private *dev_priv) -{ - enum pipe pipe; - int n_pipes = 0; - struct drm_crtc *crtc; - - if (INTEL_INFO(dev_priv)->gen > 4) + /* Don't even bother tracking anything we don't need. */ + if (!no_fbc_on_multiple_pipes(dev_priv)) return true; - for_each_pipe(dev_priv, pipe) { - crtc = dev_priv->pipe_to_crtc_mapping[pipe]; + WARN_ON(!drm_modeset_is_locked(&primary->mutex)); - if (intel_crtc_active(crtc) && - to_intel_plane_state(crtc->primary->state)->visible) - n_pipes++; - } + if (to_intel_plane_state(primary->state)->visible) + fbc->visible_pipes_mask |= (1 << pipe); + else + fbc->visible_pipes_mask &= ~(1 << pipe); - return (n_pipes < 2); + return (fbc->visible_pipes_mask & ~(1 << pipe)) != 0; } static int find_compression_threshold(struct drm_i915_private *dev_priv, @@ -581,16 +554,16 @@ again: static int intel_fbc_alloc_cfb(struct intel_crtc *crtc) { struct drm_i915_private *dev_priv = crtc->base.dev->dev_private; - struct drm_framebuffer *fb = crtc->base.primary->state->fb; + struct intel_fbc *fbc = &dev_priv->fbc; struct drm_mm_node *uninitialized_var(compressed_llb); int size, fb_cpp, ret; - WARN_ON(drm_mm_node_allocated(&dev_priv->fbc.compressed_fb)); + WARN_ON(drm_mm_node_allocated(&fbc->compressed_fb)); - size = intel_fbc_calculate_cfb_size(crtc, fb); - fb_cpp = drm_format_plane_cpp(fb->pixel_format, 0); + size = intel_fbc_calculate_cfb_size(dev_priv, &fbc->state_cache); + fb_cpp = drm_format_plane_cpp(fbc->state_cache.fb.pixel_format, 0); - ret = find_compression_threshold(dev_priv, &dev_priv->fbc.compressed_fb, + ret = find_compression_threshold(dev_priv, &fbc->compressed_fb, size, fb_cpp); if (!ret) goto err_llb; @@ -599,12 +572,12 @@ static int intel_fbc_alloc_cfb(struct intel_crtc *crtc) } - dev_priv->fbc.threshold = ret; + fbc->threshold = ret; if (INTEL_INFO(dev_priv)->gen >= 5) - I915_WRITE(ILK_DPFC_CB_BASE, dev_priv->fbc.compressed_fb.start); + I915_WRITE(ILK_DPFC_CB_BASE, fbc->compressed_fb.start); else if (IS_GM45(dev_priv)) { - I915_WRITE(DPFC_CB_BASE, dev_priv->fbc.compressed_fb.start); + I915_WRITE(DPFC_CB_BASE, fbc->compressed_fb.start); } else { compressed_llb = kzalloc(sizeof(*compressed_llb), GFP_KERNEL); if (!compressed_llb) @@ -615,23 +588,22 @@ static int intel_fbc_alloc_cfb(struct intel_crtc *crtc) if (ret) goto err_fb; - dev_priv->fbc.compressed_llb = compressed_llb; + fbc->compressed_llb = compressed_llb; I915_WRITE(FBC_CFB_BASE, - dev_priv->mm.stolen_base + dev_priv->fbc.compressed_fb.start); + dev_priv->mm.stolen_base + fbc->compressed_fb.start); I915_WRITE(FBC_LL_BASE, dev_priv->mm.stolen_base + compressed_llb->start); } DRM_DEBUG_KMS("reserved %llu bytes of contiguous stolen space for FBC, threshold: %d\n", - dev_priv->fbc.compressed_fb.size, - dev_priv->fbc.threshold); + fbc->compressed_fb.size, fbc->threshold); return 0; err_fb: kfree(compressed_llb); - i915_gem_stolen_remove_node(dev_priv, &dev_priv->fbc.compressed_fb); + i915_gem_stolen_remove_node(dev_priv, &fbc->compressed_fb); err_llb: pr_info_once("drm: not enough stolen space for compressed buffer (need %d more bytes), disabling. Hint: you may be able to increase stolen memory size in the BIOS to avoid this.\n", size); return -ENOSPC; @@ -639,25 +611,27 @@ err_llb: static void __intel_fbc_cleanup_cfb(struct drm_i915_private *dev_priv) { - if (drm_mm_node_allocated(&dev_priv->fbc.compressed_fb)) - i915_gem_stolen_remove_node(dev_priv, - &dev_priv->fbc.compressed_fb); - - if (dev_priv->fbc.compressed_llb) { - i915_gem_stolen_remove_node(dev_priv, - dev_priv->fbc.compressed_llb); - kfree(dev_priv->fbc.compressed_llb); + struct intel_fbc *fbc = &dev_priv->fbc; + + if (drm_mm_node_allocated(&fbc->compressed_fb)) + i915_gem_stolen_remove_node(dev_priv, &fbc->compressed_fb); + + if (fbc->compressed_llb) { + i915_gem_stolen_remove_node(dev_priv, fbc->compressed_llb); + kfree(fbc->compressed_llb); } } void intel_fbc_cleanup_cfb(struct drm_i915_private *dev_priv) { + struct intel_fbc *fbc = &dev_priv->fbc; + if (!fbc_supported(dev_priv)) return; - mutex_lock(&dev_priv->fbc.lock); + mutex_lock(&fbc->lock); __intel_fbc_cleanup_cfb(dev_priv); - mutex_unlock(&dev_priv->fbc.lock); + mutex_unlock(&fbc->lock); } static bool stride_is_valid(struct drm_i915_private *dev_priv, @@ -681,19 +655,17 @@ static bool stride_is_valid(struct drm_i915_private *dev_priv, return true; } -static bool pixel_format_is_valid(struct drm_framebuffer *fb) +static bool pixel_format_is_valid(struct drm_i915_private *dev_priv, + uint32_t pixel_format) { - struct drm_device *dev = fb->dev; - struct drm_i915_private *dev_priv = dev->dev_private; - - switch (fb->pixel_format) { + switch (pixel_format) { case DRM_FORMAT_XRGB8888: case DRM_FORMAT_XBGR8888: return true; case DRM_FORMAT_XRGB1555: case DRM_FORMAT_RGB565: /* 16bpp not supported on gen2 */ - if (IS_GEN2(dev)) + if (IS_GEN2(dev_priv)) return false; /* WaFbcOnly1to1Ratio:ctg */ if (IS_G4X(dev_priv)) @@ -713,6 +685,7 @@ static bool pixel_format_is_valid(struct drm_framebuffer *fb) static bool intel_fbc_hw_tracking_covers_screen(struct intel_crtc *crtc) { struct drm_i915_private *dev_priv = crtc->base.dev->dev_private; + struct intel_fbc *fbc = &dev_priv->fbc; unsigned int effective_w, effective_h, max_w, max_h; if (INTEL_INFO(dev_priv)->gen >= 8 || IS_HASWELL(dev_priv)) { @@ -726,87 +699,105 @@ static bool intel_fbc_hw_tracking_covers_screen(struct intel_crtc *crtc) max_h = 1536; } - intel_fbc_get_plane_source_size(crtc, &effective_w, &effective_h); + intel_fbc_get_plane_source_size(&fbc->state_cache, &effective_w, + &effective_h); effective_w += crtc->adjusted_x; effective_h += crtc->adjusted_y; return effective_w <= max_w && effective_h <= max_h; } -/** - * __intel_fbc_update - activate/deactivate FBC as needed, unlocked - * @crtc: the CRTC that triggered the update - * - * This function completely reevaluates the status of FBC, then activates, - * deactivates or maintains it on the same state. - */ -static void __intel_fbc_update(struct intel_crtc *crtc) +static void intel_fbc_update_state_cache(struct intel_crtc *crtc) { struct drm_i915_private *dev_priv = crtc->base.dev->dev_private; - struct drm_framebuffer *fb; + struct intel_fbc *fbc = &dev_priv->fbc; + struct intel_fbc_state_cache *cache = &fbc->state_cache; + struct intel_crtc_state *crtc_state = + to_intel_crtc_state(crtc->base.state); + struct intel_plane_state *plane_state = + to_intel_plane_state(crtc->base.primary->state); + struct drm_framebuffer *fb = plane_state->base.fb; struct drm_i915_gem_object *obj; - const struct drm_display_mode *adjusted_mode; - WARN_ON(!mutex_is_locked(&dev_priv->fbc.lock)); + WARN_ON(!drm_modeset_is_locked(&crtc->base.mutex)); + WARN_ON(!drm_modeset_is_locked(&crtc->base.primary->mutex)); - if (!multiple_pipes_ok(dev_priv)) { - set_no_fbc_reason(dev_priv, "more than one pipe active"); - goto out_disable; - } + cache->crtc.mode_flags = crtc_state->base.adjusted_mode.flags; + if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) + cache->crtc.hsw_bdw_pixel_rate = + ilk_pipe_pixel_rate(crtc_state); - if (!dev_priv->fbc.enabled || dev_priv->fbc.crtc != crtc) - return; + cache->plane.rotation = plane_state->base.rotation; + cache->plane.src_w = drm_rect_width(&plane_state->src) >> 16; + cache->plane.src_h = drm_rect_height(&plane_state->src) >> 16; + cache->plane.visible = plane_state->visible; - if (!crtc_is_valid(crtc)) { - set_no_fbc_reason(dev_priv, "no output"); - goto out_disable; - } + if (!cache->plane.visible) + return; - fb = crtc->base.primary->fb; obj = intel_fb_obj(fb); - adjusted_mode = &crtc->config->base.adjusted_mode; - if ((adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) || - (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)) { - set_no_fbc_reason(dev_priv, "incompatible mode"); - goto out_disable; + /* FIXME: We lack the proper locking here, so only run this on the + * platforms that need. */ + if (INTEL_INFO(dev_priv)->gen >= 5 && INTEL_INFO(dev_priv)->gen < 7) + cache->fb.ilk_ggtt_offset = i915_gem_obj_ggtt_offset(obj); + cache->fb.pixel_format = fb->pixel_format; + cache->fb.stride = fb->pitches[0]; + cache->fb.fence_reg = obj->fence_reg; + cache->fb.tiling_mode = obj->tiling_mode; +} + +static bool intel_fbc_can_activate(struct intel_crtc *crtc) +{ + struct drm_i915_private *dev_priv = crtc->base.dev->dev_private; + struct intel_fbc *fbc = &dev_priv->fbc; + struct intel_fbc_state_cache *cache = &fbc->state_cache; + + if (!cache->plane.visible) { + fbc->no_fbc_reason = "primary plane not visible"; + return false; + } + + if ((cache->crtc.mode_flags & DRM_MODE_FLAG_INTERLACE) || + (cache->crtc.mode_flags & DRM_MODE_FLAG_DBLSCAN)) { + fbc->no_fbc_reason = "incompatible mode"; + return false; } if (!intel_fbc_hw_tracking_covers_screen(crtc)) { - set_no_fbc_reason(dev_priv, "mode too large for compression"); - goto out_disable; + fbc->no_fbc_reason = "mode too large for compression"; + return false; } /* The use of a CPU fence is mandatory in order to detect writes * by the CPU to the scanout and trigger updates to the FBC. */ - if (obj->tiling_mode != I915_TILING_X || - obj->fence_reg == I915_FENCE_REG_NONE) { - set_no_fbc_reason(dev_priv, "framebuffer not tiled or fenced"); - goto out_disable; + if (cache->fb.tiling_mode != I915_TILING_X || + cache->fb.fence_reg == I915_FENCE_REG_NONE) { + fbc->no_fbc_reason = "framebuffer not tiled or fenced"; + return false; } if (INTEL_INFO(dev_priv)->gen <= 4 && !IS_G4X(dev_priv) && - crtc->base.primary->state->rotation != BIT(DRM_ROTATE_0)) { - set_no_fbc_reason(dev_priv, "rotation unsupported"); - goto out_disable; + cache->plane.rotation != BIT(DRM_ROTATE_0)) { + fbc->no_fbc_reason = "rotation unsupported"; + return false; } - if (!stride_is_valid(dev_priv, fb->pitches[0])) { - set_no_fbc_reason(dev_priv, "framebuffer stride not supported"); - goto out_disable; + if (!stride_is_valid(dev_priv, cache->fb.stride)) { + fbc->no_fbc_reason = "framebuffer stride not supported"; + return false; } - if (!pixel_format_is_valid(fb)) { - set_no_fbc_reason(dev_priv, "pixel format is invalid"); - goto out_disable; + if (!pixel_format_is_valid(dev_priv, cache->fb.pixel_format)) { + fbc->no_fbc_reason = "pixel format is invalid"; + return false; } /* WaFbcExceedCdClockThreshold:hsw,bdw */ if ((IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) && - ilk_pipe_pixel_rate(crtc->config) >= - dev_priv->cdclk_freq * 95 / 100) { - set_no_fbc_reason(dev_priv, "pixel rate is too big"); - goto out_disable; + cache->crtc.hsw_bdw_pixel_rate >= dev_priv->cdclk_freq * 95 / 100) { + fbc->no_fbc_reason = "pixel rate is too big"; + return false; } /* It is possible for the required CFB size change without a @@ -819,189 +810,322 @@ static void __intel_fbc_update(struct intel_crtc *crtc) * we didn't get any invalidate/deactivate calls, but this would require * a lot of tracking just for a specific case. If we conclude it's an * important case, we can implement it later. */ - if (intel_fbc_calculate_cfb_size(crtc, fb) > - dev_priv->fbc.compressed_fb.size * dev_priv->fbc.threshold) { - set_no_fbc_reason(dev_priv, "CFB requirements changed"); - goto out_disable; + if (intel_fbc_calculate_cfb_size(dev_priv, &fbc->state_cache) > + fbc->compressed_fb.size * fbc->threshold) { + fbc->no_fbc_reason = "CFB requirements changed"; + return false; } + return true; +} + +static bool intel_fbc_can_choose(struct intel_crtc *crtc) +{ + struct drm_i915_private *dev_priv = crtc->base.dev->dev_private; + struct intel_fbc *fbc = &dev_priv->fbc; + bool enable_by_default = IS_HASWELL(dev_priv) || + IS_BROADWELL(dev_priv); + + if (intel_vgpu_active(dev_priv->dev)) { + fbc->no_fbc_reason = "VGPU is active"; + return false; + } + + if (i915.enable_fbc < 0 && !enable_by_default) { + fbc->no_fbc_reason = "disabled per chip default"; + return false; + } + + if (!i915.enable_fbc) { + fbc->no_fbc_reason = "disabled per module param"; + return false; + } + + if (fbc_on_pipe_a_only(dev_priv) && crtc->pipe != PIPE_A) { + fbc->no_fbc_reason = "no enabled pipes can have FBC"; + return false; + } + + if (fbc_on_plane_a_only(dev_priv) && crtc->plane != PLANE_A) { + fbc->no_fbc_reason = "no enabled planes can have FBC"; + return false; + } + + return true; +} + +static void intel_fbc_get_reg_params(struct intel_crtc *crtc, + struct intel_fbc_reg_params *params) +{ + struct drm_i915_private *dev_priv = crtc->base.dev->dev_private; + struct intel_fbc *fbc = &dev_priv->fbc; + struct intel_fbc_state_cache *cache = &fbc->state_cache; + + /* Since all our fields are integer types, use memset here so the + * comparison function can rely on memcmp because the padding will be + * zero. */ + memset(params, 0, sizeof(*params)); + + params->crtc.pipe = crtc->pipe; + params->crtc.plane = crtc->plane; + params->crtc.fence_y_offset = get_crtc_fence_y_offset(crtc); + + params->fb.pixel_format = cache->fb.pixel_format; + params->fb.stride = cache->fb.stride; + params->fb.fence_reg = cache->fb.fence_reg; + + params->cfb_size = intel_fbc_calculate_cfb_size(dev_priv, cache); + + params->fb.ggtt_offset = cache->fb.ilk_ggtt_offset; +} + +static bool intel_fbc_reg_params_equal(struct intel_fbc_reg_params *params1, + struct intel_fbc_reg_params *params2) +{ + /* We can use this since intel_fbc_get_reg_params() does a memset. */ + return memcmp(params1, params2, sizeof(*params1)) == 0; +} + +void intel_fbc_pre_update(struct intel_crtc *crtc) +{ + struct drm_i915_private *dev_priv = crtc->base.dev->dev_private; + struct intel_fbc *fbc = &dev_priv->fbc; + + if (!fbc_supported(dev_priv)) + return; + + mutex_lock(&fbc->lock); + + if (!multiple_pipes_ok(crtc)) { + fbc->no_fbc_reason = "more than one pipe active"; + goto deactivate; + } + + if (!fbc->enabled || fbc->crtc != crtc) + goto unlock; + + intel_fbc_update_state_cache(crtc); + +deactivate: + intel_fbc_deactivate(dev_priv); +unlock: + mutex_unlock(&fbc->lock); +} + +static void __intel_fbc_post_update(struct intel_crtc *crtc) +{ + struct drm_i915_private *dev_priv = crtc->base.dev->dev_private; + struct intel_fbc *fbc = &dev_priv->fbc; + struct intel_fbc_reg_params old_params; + + WARN_ON(!mutex_is_locked(&fbc->lock)); + + if (!fbc->enabled || fbc->crtc != crtc) + return; + + if (!intel_fbc_can_activate(crtc)) { + WARN_ON(fbc->active); + return; + } + + old_params = fbc->params; + intel_fbc_get_reg_params(crtc, &fbc->params); + /* If the scanout has not changed, don't modify the FBC settings. * Note that we make the fundamental assumption that the fb->obj * cannot be unpinned (and have its GTT offset and fence revoked) * without first being decoupled from the scanout and FBC disabled. */ - if (dev_priv->fbc.crtc == crtc && - dev_priv->fbc.fb_id == fb->base.id && - dev_priv->fbc.y == crtc->base.y && - dev_priv->fbc.active) + if (fbc->active && + intel_fbc_reg_params_equal(&old_params, &fbc->params)) return; - if (intel_fbc_is_active(dev_priv)) { - /* We update FBC along two paths, after changing fb/crtc - * configuration (modeswitching) and after page-flipping - * finishes. For the latter, we know that not only did - * we disable the FBC at the start of the page-flip - * sequence, but also more than one vblank has passed. - * - * For the former case of modeswitching, it is possible - * to switch between two FBC valid configurations - * instantaneously so we do need to disable the FBC - * before we can modify its control registers. We also - * have to wait for the next vblank for that to take - * effect. However, since we delay enabling FBC we can - * assume that a vblank has passed since disabling and - * that we can safely alter the registers in the deferred - * callback. - * - * In the scenario that we go from a valid to invalid - * and then back to valid FBC configuration we have - * no strict enforcement that a vblank occurred since - * disabling the FBC. However, along all current pipe - * disabling paths we do need to wait for a vblank at - * some point. And we wait before enabling FBC anyway. - */ - DRM_DEBUG_KMS("deactivating FBC for update\n"); - __intel_fbc_deactivate(dev_priv); - } - + intel_fbc_deactivate(dev_priv); intel_fbc_schedule_activation(crtc); - dev_priv->fbc.no_fbc_reason = "FBC enabled (not necessarily active)"; - return; - -out_disable: - /* Multiple disables should be harmless */ - if (intel_fbc_is_active(dev_priv)) { - DRM_DEBUG_KMS("unsupported config, deactivating FBC\n"); - __intel_fbc_deactivate(dev_priv); - } + fbc->no_fbc_reason = "FBC enabled (active or scheduled)"; } -/* - * intel_fbc_update - activate/deactivate FBC as needed - * @crtc: the CRTC that triggered the update - * - * This function reevaluates the overall state and activates or deactivates FBC. - */ -void intel_fbc_update(struct intel_crtc *crtc) +void intel_fbc_post_update(struct intel_crtc *crtc) { struct drm_i915_private *dev_priv = crtc->base.dev->dev_private; + struct intel_fbc *fbc = &dev_priv->fbc; if (!fbc_supported(dev_priv)) return; - mutex_lock(&dev_priv->fbc.lock); - __intel_fbc_update(crtc); - mutex_unlock(&dev_priv->fbc.lock); + mutex_lock(&fbc->lock); + __intel_fbc_post_update(crtc); + mutex_unlock(&fbc->lock); +} + +static unsigned int intel_fbc_get_frontbuffer_bit(struct intel_fbc *fbc) +{ + if (fbc->enabled) + return to_intel_plane(fbc->crtc->base.primary)->frontbuffer_bit; + else + return fbc->possible_framebuffer_bits; } void intel_fbc_invalidate(struct drm_i915_private *dev_priv, unsigned int frontbuffer_bits, enum fb_op_origin origin) { - unsigned int fbc_bits; + struct intel_fbc *fbc = &dev_priv->fbc; if (!fbc_supported(dev_priv)) return; - if (origin == ORIGIN_GTT) + if (origin == ORIGIN_GTT || origin == ORIGIN_FLIP) return; - mutex_lock(&dev_priv->fbc.lock); + mutex_lock(&fbc->lock); - if (dev_priv->fbc.enabled) - fbc_bits = INTEL_FRONTBUFFER_PRIMARY(dev_priv->fbc.crtc->pipe); - else - fbc_bits = dev_priv->fbc.possible_framebuffer_bits; - - dev_priv->fbc.busy_bits |= (fbc_bits & frontbuffer_bits); + fbc->busy_bits |= intel_fbc_get_frontbuffer_bit(fbc) & frontbuffer_bits; - if (dev_priv->fbc.busy_bits) - __intel_fbc_deactivate(dev_priv); + if (fbc->enabled && fbc->busy_bits) + intel_fbc_deactivate(dev_priv); - mutex_unlock(&dev_priv->fbc.lock); + mutex_unlock(&fbc->lock); } void intel_fbc_flush(struct drm_i915_private *dev_priv, unsigned int frontbuffer_bits, enum fb_op_origin origin) { + struct intel_fbc *fbc = &dev_priv->fbc; + if (!fbc_supported(dev_priv)) return; - if (origin == ORIGIN_GTT) + if (origin == ORIGIN_GTT || origin == ORIGIN_FLIP) return; - mutex_lock(&dev_priv->fbc.lock); + mutex_lock(&fbc->lock); - dev_priv->fbc.busy_bits &= ~frontbuffer_bits; + fbc->busy_bits &= ~frontbuffer_bits; - if (!dev_priv->fbc.busy_bits && dev_priv->fbc.enabled) { - if (origin != ORIGIN_FLIP && dev_priv->fbc.active) { + if (!fbc->busy_bits && fbc->enabled && + (frontbuffer_bits & intel_fbc_get_frontbuffer_bit(fbc))) { + if (fbc->active) intel_fbc_recompress(dev_priv); - } else { - __intel_fbc_deactivate(dev_priv); - __intel_fbc_update(dev_priv->fbc.crtc); + else + __intel_fbc_post_update(fbc->crtc); + } + + mutex_unlock(&fbc->lock); +} + +/** + * intel_fbc_choose_crtc - select a CRTC to enable FBC on + * @dev_priv: i915 device instance + * @state: the atomic state structure + * + * This function looks at the proposed state for CRTCs and planes, then chooses + * which pipe is going to have FBC by setting intel_crtc_state->enable_fbc to + * true. + * + * Later, intel_fbc_enable is going to look for state->enable_fbc and then maybe + * enable FBC for the chosen CRTC. If it does, it will set dev_priv->fbc.crtc. + */ +void intel_fbc_choose_crtc(struct drm_i915_private *dev_priv, + struct drm_atomic_state *state) +{ + struct intel_fbc *fbc = &dev_priv->fbc; + struct drm_crtc *crtc; + struct drm_crtc_state *crtc_state; + struct drm_plane *plane; + struct drm_plane_state *plane_state; + bool fbc_crtc_present = false; + int i, j; + + mutex_lock(&fbc->lock); + + for_each_crtc_in_state(state, crtc, crtc_state, i) { + if (fbc->crtc == to_intel_crtc(crtc)) { + fbc_crtc_present = true; + break; + } + } + /* This atomic commit doesn't involve the CRTC currently tied to FBC. */ + if (!fbc_crtc_present && fbc->crtc != NULL) + goto out; + + /* Simply choose the first CRTC that is compatible and has a visible + * plane. We could go for fancier schemes such as checking the plane + * size, but this would just affect the few platforms that don't tie FBC + * to pipe or plane A. */ + for_each_plane_in_state(state, plane, plane_state, i) { + struct intel_plane_state *intel_plane_state = + to_intel_plane_state(plane_state); + + if (!intel_plane_state->visible) + continue; + + for_each_crtc_in_state(state, crtc, crtc_state, j) { + struct intel_crtc_state *intel_crtc_state = + to_intel_crtc_state(crtc_state); + + if (plane_state->crtc != crtc) + continue; + + if (!intel_fbc_can_choose(to_intel_crtc(crtc))) + break; + + intel_crtc_state->enable_fbc = true; + goto out; } } - mutex_unlock(&dev_priv->fbc.lock); +out: + mutex_unlock(&fbc->lock); } /** * intel_fbc_enable: tries to enable FBC on the CRTC * @crtc: the CRTC * - * This function checks if it's possible to enable FBC on the following CRTC, - * then enables it. Notice that it doesn't activate FBC. + * This function checks if the given CRTC was chosen for FBC, then enables it if + * possible. Notice that it doesn't activate FBC. It is valid to call + * intel_fbc_enable multiple times for the same pipe without an + * intel_fbc_disable in the middle, as long as it is deactivated. */ void intel_fbc_enable(struct intel_crtc *crtc) { struct drm_i915_private *dev_priv = crtc->base.dev->dev_private; + struct intel_fbc *fbc = &dev_priv->fbc; if (!fbc_supported(dev_priv)) return; - mutex_lock(&dev_priv->fbc.lock); - - if (dev_priv->fbc.enabled) { - WARN_ON(dev_priv->fbc.crtc == crtc); - goto out; - } - - WARN_ON(dev_priv->fbc.active); - WARN_ON(dev_priv->fbc.crtc != NULL); + mutex_lock(&fbc->lock); - if (intel_vgpu_active(dev_priv->dev)) { - set_no_fbc_reason(dev_priv, "VGPU is active"); - goto out; - } - - if (i915.enable_fbc < 0) { - set_no_fbc_reason(dev_priv, "disabled per chip default"); + if (fbc->enabled) { + WARN_ON(fbc->crtc == NULL); + if (fbc->crtc == crtc) { + WARN_ON(!crtc->config->enable_fbc); + WARN_ON(fbc->active); + } goto out; } - if (!i915.enable_fbc) { - set_no_fbc_reason(dev_priv, "disabled per module param"); + if (!crtc->config->enable_fbc) goto out; - } - if (!crtc_can_fbc(crtc)) { - set_no_fbc_reason(dev_priv, "no enabled pipes can have FBC"); - goto out; - } + WARN_ON(fbc->active); + WARN_ON(fbc->crtc != NULL); + intel_fbc_update_state_cache(crtc); if (intel_fbc_alloc_cfb(crtc)) { - set_no_fbc_reason(dev_priv, "not enough stolen memory"); + fbc->no_fbc_reason = "not enough stolen memory"; goto out; } DRM_DEBUG_KMS("Enabling FBC on pipe %c\n", pipe_name(crtc->pipe)); - dev_priv->fbc.no_fbc_reason = "FBC enabled but not active yet\n"; + fbc->no_fbc_reason = "FBC enabled but not active yet\n"; - dev_priv->fbc.enabled = true; - dev_priv->fbc.crtc = crtc; + fbc->enabled = true; + fbc->crtc = crtc; out: - mutex_unlock(&dev_priv->fbc.lock); + mutex_unlock(&fbc->lock); } /** @@ -1013,58 +1137,88 @@ out: */ static void __intel_fbc_disable(struct drm_i915_private *dev_priv) { - struct intel_crtc *crtc = dev_priv->fbc.crtc; + struct intel_fbc *fbc = &dev_priv->fbc; + struct intel_crtc *crtc = fbc->crtc; - WARN_ON(!mutex_is_locked(&dev_priv->fbc.lock)); - WARN_ON(!dev_priv->fbc.enabled); - WARN_ON(dev_priv->fbc.active); - assert_pipe_disabled(dev_priv, crtc->pipe); + WARN_ON(!mutex_is_locked(&fbc->lock)); + WARN_ON(!fbc->enabled); + WARN_ON(fbc->active); + WARN_ON(crtc->active); DRM_DEBUG_KMS("Disabling FBC on pipe %c\n", pipe_name(crtc->pipe)); __intel_fbc_cleanup_cfb(dev_priv); - dev_priv->fbc.enabled = false; - dev_priv->fbc.crtc = NULL; + fbc->enabled = false; + fbc->crtc = NULL; } /** - * intel_fbc_disable_crtc - disable FBC if it's associated with crtc + * intel_fbc_disable - disable FBC if it's associated with crtc * @crtc: the CRTC * * This function disables FBC if it's associated with the provided CRTC. */ -void intel_fbc_disable_crtc(struct intel_crtc *crtc) +void intel_fbc_disable(struct intel_crtc *crtc) { struct drm_i915_private *dev_priv = crtc->base.dev->dev_private; + struct intel_fbc *fbc = &dev_priv->fbc; if (!fbc_supported(dev_priv)) return; - mutex_lock(&dev_priv->fbc.lock); - if (dev_priv->fbc.crtc == crtc) { - WARN_ON(!dev_priv->fbc.enabled); - WARN_ON(dev_priv->fbc.active); + mutex_lock(&fbc->lock); + if (fbc->crtc == crtc) { + WARN_ON(!fbc->enabled); + WARN_ON(fbc->active); __intel_fbc_disable(dev_priv); } - mutex_unlock(&dev_priv->fbc.lock); + mutex_unlock(&fbc->lock); + + cancel_work_sync(&fbc->work.work); } /** - * intel_fbc_disable - globally disable FBC + * intel_fbc_global_disable - globally disable FBC * @dev_priv: i915 device instance * * This function disables FBC regardless of which CRTC is associated with it. */ -void intel_fbc_disable(struct drm_i915_private *dev_priv) +void intel_fbc_global_disable(struct drm_i915_private *dev_priv) { + struct intel_fbc *fbc = &dev_priv->fbc; + if (!fbc_supported(dev_priv)) return; - mutex_lock(&dev_priv->fbc.lock); - if (dev_priv->fbc.enabled) + mutex_lock(&fbc->lock); + if (fbc->enabled) __intel_fbc_disable(dev_priv); - mutex_unlock(&dev_priv->fbc.lock); + mutex_unlock(&fbc->lock); + + cancel_work_sync(&fbc->work.work); +} + +/** + * intel_fbc_init_pipe_state - initialize FBC's CRTC visibility tracking + * @dev_priv: i915 device instance + * + * The FBC code needs to track CRTC visibility since the older platforms can't + * have FBC enabled while multiple pipes are used. This function does the + * initial setup at driver load to make sure FBC is matching the real hardware. + */ +void intel_fbc_init_pipe_state(struct drm_i915_private *dev_priv) +{ + struct intel_crtc *crtc; + + /* Don't even bother tracking anything if we don't need. */ + if (!no_fbc_on_multiple_pipes(dev_priv)) + return; + + for_each_intel_crtc(dev_priv->dev, crtc) + if (intel_crtc_active(&crtc->base) && + to_intel_plane_state(crtc->base.primary->state)->visible) + dev_priv->fbc.visible_pipes_mask |= (1 << crtc->pipe); } /** @@ -1075,51 +1229,35 @@ void intel_fbc_disable(struct drm_i915_private *dev_priv) */ void intel_fbc_init(struct drm_i915_private *dev_priv) { + struct intel_fbc *fbc = &dev_priv->fbc; enum pipe pipe; - INIT_WORK(&dev_priv->fbc.work.work, intel_fbc_work_fn); - mutex_init(&dev_priv->fbc.lock); - dev_priv->fbc.enabled = false; - dev_priv->fbc.active = false; - dev_priv->fbc.work.scheduled = false; + INIT_WORK(&fbc->work.work, intel_fbc_work_fn); + mutex_init(&fbc->lock); + fbc->enabled = false; + fbc->active = false; + fbc->work.scheduled = false; if (!HAS_FBC(dev_priv)) { - dev_priv->fbc.no_fbc_reason = "unsupported by this chipset"; + fbc->no_fbc_reason = "unsupported by this chipset"; return; } for_each_pipe(dev_priv, pipe) { - dev_priv->fbc.possible_framebuffer_bits |= + fbc->possible_framebuffer_bits |= INTEL_FRONTBUFFER_PRIMARY(pipe); if (fbc_on_pipe_a_only(dev_priv)) break; } - if (INTEL_INFO(dev_priv)->gen >= 7) { - dev_priv->fbc.is_active = ilk_fbc_is_active; - dev_priv->fbc.activate = gen7_fbc_activate; - dev_priv->fbc.deactivate = ilk_fbc_deactivate; - } else if (INTEL_INFO(dev_priv)->gen >= 5) { - dev_priv->fbc.is_active = ilk_fbc_is_active; - dev_priv->fbc.activate = ilk_fbc_activate; - dev_priv->fbc.deactivate = ilk_fbc_deactivate; - } else if (IS_GM45(dev_priv)) { - dev_priv->fbc.is_active = g4x_fbc_is_active; - dev_priv->fbc.activate = g4x_fbc_activate; - dev_priv->fbc.deactivate = g4x_fbc_deactivate; - } else { - dev_priv->fbc.is_active = i8xx_fbc_is_active; - dev_priv->fbc.activate = i8xx_fbc_activate; - dev_priv->fbc.deactivate = i8xx_fbc_deactivate; - - /* This value was pulled out of someone's hat */ + /* This value was pulled out of someone's hat */ + if (INTEL_INFO(dev_priv)->gen <= 4 && !IS_GM45(dev_priv)) I915_WRITE(FBC_CONTROL, 500 << FBC_CTL_INTERVAL_SHIFT); - } /* We still don't have any sort of hardware state readout for FBC, so * deactivate it in case the BIOS activated it to make sure software * matches the hardware state. */ - if (dev_priv->fbc.is_active(dev_priv)) - dev_priv->fbc.deactivate(dev_priv); + if (intel_fbc_hw_is_active(dev_priv)) + intel_fbc_hw_deactivate(dev_priv); } diff --git a/drivers/gpu/drm/i915/intel_fbdev.c b/drivers/gpu/drm/i915/intel_fbdev.c index 09840f4380f9..97a91e631915 100644 --- a/drivers/gpu/drm/i915/intel_fbdev.c +++ b/drivers/gpu/drm/i915/intel_fbdev.c @@ -406,8 +406,8 @@ retry: continue; } - encoder = connector->encoder; - if (!encoder || WARN_ON(!encoder->crtc)) { + encoder = connector->state->best_encoder; + if (!encoder || WARN_ON(!connector->state->crtc)) { if (connector->force > DRM_FORCE_OFF) goto bail; @@ -420,7 +420,7 @@ retry: num_connectors_enabled++; - new_crtc = intel_fb_helper_crtc(fb_helper, encoder->crtc); + new_crtc = intel_fb_helper_crtc(fb_helper, connector->state->crtc); /* * Make sure we're not trying to drive multiple connectors @@ -466,17 +466,22 @@ retry: * usually contains. But since our current * code puts a mode derived from the post-pfit timings * into crtc->mode this works out correctly. + * + * This is crtc->mode and not crtc->state->mode for the + * fastboot check to work correctly. crtc_state->mode has + * I915_MODE_FLAG_INHERITED, which we clear to force check + * state. */ DRM_DEBUG_KMS("looking for current mode on connector %s\n", connector->name); - modes[i] = &encoder->crtc->mode; + modes[i] = &connector->state->crtc->mode; } crtcs[i] = new_crtc; DRM_DEBUG_KMS("connector %s on pipe %c [CRTC:%d]: %dx%d%s\n", connector->name, - pipe_name(to_intel_crtc(encoder->crtc)->pipe), - encoder->crtc->base.id, + pipe_name(to_intel_crtc(connector->state->crtc)->pipe), + connector->state->crtc->base.id, modes[i]->hdisplay, modes[i]->vdisplay, modes[i]->flags & DRM_MODE_FLAG_INTERLACE ? "i" :""); diff --git a/drivers/gpu/drm/i915/intel_guc.h b/drivers/gpu/drm/i915/intel_guc.h index 045b1491ff7a..73002e901ff2 100644 --- a/drivers/gpu/drm/i915/intel_guc.h +++ b/drivers/gpu/drm/i915/intel_guc.h @@ -46,7 +46,7 @@ struct i915_guc_client { uint32_t wq_head; /* GuC submission statistics & status */ - uint64_t submissions[I915_NUM_RINGS]; + uint64_t submissions[GUC_MAX_ENGINES_NUM]; uint32_t q_fail; uint32_t b_fail; int retcode; @@ -106,8 +106,8 @@ struct intel_guc { uint32_t action_fail; /* Total number of failures */ int32_t action_err; /* Last error code */ - uint64_t submissions[I915_NUM_RINGS]; - uint32_t last_seqno[I915_NUM_RINGS]; + uint64_t submissions[GUC_MAX_ENGINES_NUM]; + uint32_t last_seqno[GUC_MAX_ENGINES_NUM]; }; /* intel_guc_loader.c */ diff --git a/drivers/gpu/drm/i915/intel_guc_fwif.h b/drivers/gpu/drm/i915/intel_guc_fwif.h index 1856a4740b83..2de57ffe5e18 100644 --- a/drivers/gpu/drm/i915/intel_guc_fwif.h +++ b/drivers/gpu/drm/i915/intel_guc_fwif.h @@ -44,6 +44,13 @@ #define GUC_MAX_GPU_CONTEXTS 1024 #define GUC_INVALID_CTX_ID GUC_MAX_GPU_CONTEXTS +#define GUC_RENDER_ENGINE 0 +#define GUC_VIDEO_ENGINE 1 +#define GUC_BLITTER_ENGINE 2 +#define GUC_VIDEOENHANCE_ENGINE 3 +#define GUC_VIDEO_ENGINE2 4 +#define GUC_MAX_ENGINES_NUM (GUC_VIDEO_ENGINE2 + 1) + /* Work queue item header definitions */ #define WQ_STATUS_ACTIVE 1 #define WQ_STATUS_SUSPENDED 2 @@ -285,7 +292,7 @@ struct guc_context_desc { u64 db_trigger_phy; u16 db_id; - struct guc_execlist_context lrc[I915_NUM_RINGS]; + struct guc_execlist_context lrc[GUC_MAX_ENGINES_NUM]; u8 attribute; @@ -344,7 +351,7 @@ struct guc_policy { } __packed; struct guc_policies { - struct guc_policy policy[GUC_CTX_PRIORITY_NUM][I915_NUM_RINGS]; + struct guc_policy policy[GUC_CTX_PRIORITY_NUM][GUC_MAX_ENGINES_NUM]; /* In micro seconds. How much time to allow before DPC processing is * called back via interrupt (to prevent DPC queue drain starving). @@ -388,14 +395,14 @@ struct guc_mmio_regset { struct guc_mmio_reg_state { struct guc_mmio_regset global_reg; - struct guc_mmio_regset engine_reg[I915_NUM_RINGS]; + struct guc_mmio_regset engine_reg[GUC_MAX_ENGINES_NUM]; /* MMIO registers that are set as non privileged */ struct __packed { u32 mmio_start; u32 offsets[GUC_MMIO_WHITE_LIST_MAX]; u32 count; - } mmio_white_list[I915_NUM_RINGS]; + } mmio_white_list[GUC_MAX_ENGINES_NUM]; } __packed; /* GuC Additional Data Struct */ @@ -406,7 +413,7 @@ struct guc_ads { u32 golden_context_lrca; u32 scheduler_policies; u32 reserved0[3]; - u32 eng_state_size[I915_NUM_RINGS]; + u32 eng_state_size[GUC_MAX_ENGINES_NUM]; u32 reserved2[4]; } __packed; diff --git a/drivers/gpu/drm/i915/intel_guc_loader.c b/drivers/gpu/drm/i915/intel_guc_loader.c index 3accd914490f..82a3c03fbc0e 100644 --- a/drivers/gpu/drm/i915/intel_guc_loader.c +++ b/drivers/gpu/drm/i915/intel_guc_loader.c @@ -199,7 +199,7 @@ static void set_guc_init_params(struct drm_i915_private *dev_priv) * the value matches either of two values representing completion * of the GuC boot process. * - * This is used for polling the GuC status in a wait_for_atomic() + * This is used for polling the GuC status in a wait_for() * loop below. */ static inline bool guc_ucode_response(struct drm_i915_private *dev_priv, @@ -259,14 +259,14 @@ static int guc_ucode_xfer_dma(struct drm_i915_private *dev_priv) I915_WRITE(DMA_CTRL, _MASKED_BIT_ENABLE(UOS_MOVE | START_DMA)); /* - * Spin-wait for the DMA to complete & the GuC to start up. + * Wait for the DMA to complete & the GuC to start up. * NB: Docs recommend not using the interrupt for completion. * Measurements indicate this should take no more than 20ms, so a * timeout here indicates that the GuC has failed and is unusable. * (Higher levels of the driver will attempt to fall back to * execlist mode if this happens.) */ - ret = wait_for_atomic(guc_ucode_response(dev_priv, &status), 100); + ret = wait_for(guc_ucode_response(dev_priv, &status), 100); DRM_DEBUG_DRIVER("DMA status 0x%x, GuC status 0x%x\n", I915_READ(DMA_CTRL), status); diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c index 8698a643d027..a0d8daed2470 100755..100644 --- a/drivers/gpu/drm/i915/intel_hdmi.c +++ b/drivers/gpu/drm/i915/intel_hdmi.c @@ -880,15 +880,18 @@ static bool intel_hdmi_get_hw_state(struct intel_encoder *encoder, struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base); enum intel_display_power_domain power_domain; u32 tmp; + bool ret; power_domain = intel_display_port_power_domain(encoder); - if (!intel_display_power_is_enabled(dev_priv, power_domain)) + if (!intel_display_power_get_if_enabled(dev_priv, power_domain)) return false; + ret = false; + tmp = I915_READ(intel_hdmi->hdmi_reg); if (!(tmp & SDVO_ENABLE)) - return false; + goto out; if (HAS_PCH_CPT(dev)) *pipe = PORT_TO_PIPE_CPT(tmp); @@ -897,7 +900,12 @@ static bool intel_hdmi_get_hw_state(struct intel_encoder *encoder, else *pipe = PORT_TO_PIPE(tmp); - return true; + ret = true; + +out: + intel_display_power_put(dev_priv, power_domain); + + return ret; } static void intel_hdmi_get_config(struct intel_encoder *encoder, @@ -1202,11 +1210,19 @@ intel_hdmi_mode_valid(struct drm_connector *connector, struct drm_device *dev = intel_hdmi_to_dev(hdmi); enum drm_mode_status status; int clock; + int max_dotclk = to_i915(connector->dev)->max_dotclk_freq; if (mode->flags & DRM_MODE_FLAG_DBLSCAN) return MODE_NO_DBLESCAN; clock = mode->clock; + + if ((mode->flags & DRM_MODE_FLAG_3D_MASK) == DRM_MODE_FLAG_3D_FRAME_PACKING) + clock *= 2; + + if (clock > max_dotclk) + return MODE_CLOCK_HIGH; + if (mode->flags & DRM_MODE_FLAG_DBLCLK) clock *= 2; @@ -2151,7 +2167,6 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port, void intel_hdmi_init(struct drm_device *dev, i915_reg_t hdmi_reg, enum port port) { - struct drm_i915_private *dev_priv = dev->dev_private; struct intel_digital_port *intel_dig_port; struct intel_encoder *intel_encoder; struct intel_connector *intel_connector; @@ -2220,7 +2235,6 @@ void intel_hdmi_init(struct drm_device *dev, intel_encoder->cloneable |= 1 << INTEL_OUTPUT_HDMI; intel_dig_port->port = port; - dev_priv->dig_port_map[port] = intel_encoder; intel_dig_port->hdmi.hdmi_reg = hdmi_reg; intel_dig_port->dp.output_reg = INVALID_MMIO_REG; intel_dig_port->max_lanes = 4; diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c index 25254b5c1ac5..52fbe530fc9e 100644 --- a/drivers/gpu/drm/i915/intel_i2c.c +++ b/drivers/gpu/drm/i915/intel_i2c.c @@ -664,6 +664,12 @@ int intel_setup_gmbus(struct drm_device *dev) bus->adapter.algo = &gmbus_algorithm; + /* + * We wish to retry with bit banging + * after a timed out GMBUS attempt. + */ + bus->adapter.retries = 1; + /* By default use a conservative clock rate */ bus->reg0 = pin | GMBUS_RATE_100KHZ; @@ -683,7 +689,7 @@ int intel_setup_gmbus(struct drm_device *dev) return 0; err: - while (--pin) { + while (pin--) { if (!intel_gmbus_is_valid_pin(dev_priv, pin)) continue; diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c index 73d4347429df..6a978ce80244 100644 --- a/drivers/gpu/drm/i915/intel_lrc.c +++ b/drivers/gpu/drm/i915/intel_lrc.c @@ -223,9 +223,11 @@ enum { FAULT_AND_CONTINUE /* Unsupported */ }; #define GEN8_CTX_ID_SHIFT 32 -#define CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT 0x17 +#define GEN8_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT 0x17 +#define GEN9_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT 0x26 -static int intel_lr_context_pin(struct drm_i915_gem_request *rq); +static int intel_lr_context_pin(struct intel_context *ctx, + struct intel_engine_cs *engine); static void lrc_setup_hardware_status_page(struct intel_engine_cs *ring, struct drm_i915_gem_object *default_ctx_obj); @@ -393,7 +395,6 @@ static int execlists_update_context(struct drm_i915_gem_request *rq) uint32_t *reg_state = rq->ctx->engine[ring->id].lrc_reg_state; reg_state[CTX_RING_TAIL+1] = rq->tail; - reg_state[CTX_RING_BUFFER_START+1] = rq->ringbuf->vma->node.start; if (ppgtt && !USES_FULL_48BIT_PPGTT(ppgtt->base.dev)) { /* True 32b PPGTT with dynamic page allocation: update PDP @@ -599,7 +600,7 @@ static int execlists_context_queue(struct drm_i915_gem_request *request) int num_elements = 0; if (request->ctx != request->i915->kernel_context) - intel_lr_context_pin(request); + intel_lr_context_pin(request->ctx, ring); i915_gem_request_reference(request); @@ -704,7 +705,7 @@ int intel_logical_ring_alloc_request_extras(struct drm_i915_gem_request *request } if (request->ctx != request->i915->kernel_context) - ret = intel_lr_context_pin(request); + ret = intel_lr_context_pin(request->ctx, request->ring); return ret; } @@ -765,6 +766,7 @@ intel_logical_ring_advance_and_submit(struct drm_i915_gem_request *request) { struct intel_ringbuffer *ringbuf = request->ringbuf; struct drm_i915_private *dev_priv = request->i915; + struct intel_engine_cs *engine = request->ring; intel_logical_ring_advance(ringbuf); request->tail = ringbuf->tail; @@ -779,9 +781,20 @@ intel_logical_ring_advance_and_submit(struct drm_i915_gem_request *request) intel_logical_ring_emit(ringbuf, MI_NOOP); intel_logical_ring_advance(ringbuf); - if (intel_ring_stopped(request->ring)) + if (intel_ring_stopped(engine)) return 0; + if (engine->last_context != request->ctx) { + if (engine->last_context) + intel_lr_context_unpin(engine->last_context, engine); + if (request->ctx != request->i915->kernel_context) { + intel_lr_context_pin(request->ctx, engine); + engine->last_context = request->ctx; + } else { + engine->last_context = NULL; + } + } + if (dev_priv->guc.execbuf_client) i915_guc_submit(dev_priv->guc.execbuf_client, request); else @@ -1015,7 +1028,8 @@ void intel_execlists_retire_requests(struct intel_engine_cs *ring) ctx->engine[ring->id].state; if (ctx_obj && (ctx != req->i915->kernel_context)) - intel_lr_context_unpin(req); + intel_lr_context_unpin(ctx, ring); + list_del(&req->execlist_link); i915_gem_request_unreference(req); } @@ -1059,14 +1073,15 @@ int logical_ring_flush_all_caches(struct drm_i915_gem_request *req) return 0; } -static int intel_lr_context_do_pin(struct intel_engine_cs *ring, - struct intel_context *ctx) +static int intel_lr_context_do_pin(struct intel_context *ctx, + struct intel_engine_cs *ring) { struct drm_device *dev = ring->dev; struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_gem_object *ctx_obj = ctx->engine[ring->id].state; struct intel_ringbuffer *ringbuf = ctx->engine[ring->id].ringbuf; struct page *lrc_state_page; + uint32_t *lrc_reg_state; int ret; WARN_ON(!mutex_is_locked(&ring->dev->struct_mutex)); @@ -1088,7 +1103,9 @@ static int intel_lr_context_do_pin(struct intel_engine_cs *ring, ctx->engine[ring->id].lrc_vma = i915_gem_obj_to_ggtt(ctx_obj); intel_lr_context_descriptor_update(ctx, ring); - ctx->engine[ring->id].lrc_reg_state = kmap(lrc_state_page); + lrc_reg_state = kmap(lrc_state_page); + lrc_reg_state[CTX_RING_BUFFER_START+1] = ringbuf->vma->node.start; + ctx->engine[ring->id].lrc_reg_state = lrc_reg_state; ctx_obj->dirty = true; /* Invalidate GuC TLB. */ @@ -1103,41 +1120,40 @@ unpin_ctx_obj: return ret; } -static int intel_lr_context_pin(struct drm_i915_gem_request *rq) +static int intel_lr_context_pin(struct intel_context *ctx, + struct intel_engine_cs *engine) { int ret = 0; - struct intel_engine_cs *ring = rq->ring; - if (rq->ctx->engine[ring->id].pin_count++ == 0) { - ret = intel_lr_context_do_pin(ring, rq->ctx); + if (ctx->engine[engine->id].pin_count++ == 0) { + ret = intel_lr_context_do_pin(ctx, engine); if (ret) goto reset_pin_count; + + i915_gem_context_reference(ctx); } return ret; reset_pin_count: - rq->ctx->engine[ring->id].pin_count = 0; + ctx->engine[engine->id].pin_count = 0; return ret; } -void intel_lr_context_unpin(struct drm_i915_gem_request *rq) +void intel_lr_context_unpin(struct intel_context *ctx, + struct intel_engine_cs *engine) { - struct intel_engine_cs *ring = rq->ring; - struct drm_i915_gem_object *ctx_obj = rq->ctx->engine[ring->id].state; - struct intel_ringbuffer *ringbuf = rq->ringbuf; + struct drm_i915_gem_object *ctx_obj = ctx->engine[engine->id].state; - WARN_ON(!mutex_is_locked(&ring->dev->struct_mutex)); - - if (!ctx_obj) - return; - - if (--rq->ctx->engine[ring->id].pin_count == 0) { - kunmap(kmap_to_page(rq->ctx->engine[ring->id].lrc_reg_state)); - intel_unpin_ringbuffer_obj(ringbuf); + WARN_ON(!mutex_is_locked(&ctx->i915->dev->struct_mutex)); + if (--ctx->engine[engine->id].pin_count == 0) { + kunmap(kmap_to_page(ctx->engine[engine->id].lrc_reg_state)); + intel_unpin_ringbuffer_obj(ctx->engine[engine->id].ringbuf); i915_gem_object_ggtt_unpin(ctx_obj); - rq->ctx->engine[ring->id].lrc_vma = NULL; - rq->ctx->engine[ring->id].lrc_desc = 0; - rq->ctx->engine[ring->id].lrc_reg_state = NULL; + ctx->engine[engine->id].lrc_vma = NULL; + ctx->engine[engine->id].lrc_desc = 0; + ctx->engine[engine->id].lrc_reg_state = NULL; + + i915_gem_context_unreference(ctx); } } @@ -2062,7 +2078,7 @@ logical_ring_init(struct drm_device *dev, struct intel_engine_cs *ring) goto error; /* As this is the default context, always pin it */ - ret = intel_lr_context_do_pin(ring, dctx); + ret = intel_lr_context_do_pin(dctx, ring); if (ret) { DRM_ERROR( "Failed to pin and map ringbuffer %s: %d\n", @@ -2086,6 +2102,7 @@ static int logical_render_ring_init(struct drm_device *dev) ring->name = "render ring"; ring->id = RCS; ring->exec_id = I915_EXEC_RENDER; + ring->guc_id = GUC_RENDER_ENGINE; ring->mmio_base = RENDER_RING_BASE; logical_ring_default_irqs(ring, GEN8_RCS_IRQ_SHIFT); @@ -2137,6 +2154,7 @@ static int logical_bsd_ring_init(struct drm_device *dev) ring->name = "bsd ring"; ring->id = VCS; ring->exec_id = I915_EXEC_BSD; + ring->guc_id = GUC_VIDEO_ENGINE; ring->mmio_base = GEN6_BSD_RING_BASE; logical_ring_default_irqs(ring, GEN8_VCS1_IRQ_SHIFT); @@ -2153,6 +2171,7 @@ static int logical_bsd2_ring_init(struct drm_device *dev) ring->name = "bsd2 ring"; ring->id = VCS2; ring->exec_id = I915_EXEC_BSD; + ring->guc_id = GUC_VIDEO_ENGINE2; ring->mmio_base = GEN8_BSD2_RING_BASE; logical_ring_default_irqs(ring, GEN8_VCS2_IRQ_SHIFT); @@ -2169,6 +2188,7 @@ static int logical_blt_ring_init(struct drm_device *dev) ring->name = "blitter ring"; ring->id = BCS; ring->exec_id = I915_EXEC_BLT; + ring->guc_id = GUC_BLITTER_ENGINE; ring->mmio_base = BLT_RING_BASE; logical_ring_default_irqs(ring, GEN8_BCS_IRQ_SHIFT); @@ -2185,6 +2205,7 @@ static int logical_vebox_ring_init(struct drm_device *dev) ring->name = "video enhancement ring"; ring->id = VECS; ring->exec_id = I915_EXEC_VEBOX; + ring->guc_id = GUC_VIDEOENHANCE_ENGINE; ring->mmio_base = VEBOX_RING_BASE; logical_ring_default_irqs(ring, GEN8_VECS_IRQ_SHIFT); @@ -2293,6 +2314,27 @@ make_rpcs(struct drm_device *dev) return rpcs; } +static u32 intel_lr_indirect_ctx_offset(struct intel_engine_cs *ring) +{ + u32 indirect_ctx_offset; + + switch (INTEL_INFO(ring->dev)->gen) { + default: + MISSING_CASE(INTEL_INFO(ring->dev)->gen); + /* fall through */ + case 9: + indirect_ctx_offset = + GEN9_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT; + break; + case 8: + indirect_ctx_offset = + GEN8_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT; + break; + } + + return indirect_ctx_offset; +} + static int populate_lr_context(struct intel_context *ctx, struct drm_i915_gem_object *ctx_obj, struct intel_engine_cs *ring, struct intel_ringbuffer *ringbuf) @@ -2336,7 +2378,8 @@ populate_lr_context(struct intel_context *ctx, struct drm_i915_gem_object *ctx_o ASSIGN_CTX_REG(reg_state, CTX_CONTEXT_CONTROL, RING_CONTEXT_CONTROL(ring), _MASKED_BIT_ENABLE(CTX_CTRL_INHIBIT_SYN_CTX_SWITCH | CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT | - CTX_CTRL_RS_CTX_ENABLE)); + (HAS_RESOURCE_STREAMER(dev) ? + CTX_CTRL_RS_CTX_ENABLE : 0))); ASSIGN_CTX_REG(reg_state, CTX_RING_HEAD, RING_HEAD(ring->mmio_base), 0); ASSIGN_CTX_REG(reg_state, CTX_RING_TAIL, RING_TAIL(ring->mmio_base), 0); /* Ring buffer start address is not known until the buffer is pinned. @@ -2365,7 +2408,7 @@ populate_lr_context(struct intel_context *ctx, struct drm_i915_gem_object *ctx_o (wa_ctx->indirect_ctx.size / CACHELINE_DWORDS); reg_state[CTX_RCS_INDIRECT_CTX_OFFSET+1] = - CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT << 6; + intel_lr_indirect_ctx_offset(ring) << 6; reg_state[CTX_BB_PER_CTX_PTR+1] = (ggtt_offset + wa_ctx->per_ctx.offset * sizeof(uint32_t)) | diff --git a/drivers/gpu/drm/i915/intel_lrc.h b/drivers/gpu/drm/i915/intel_lrc.h index 49af638f6213..e6cda3e225d0 100644 --- a/drivers/gpu/drm/i915/intel_lrc.h +++ b/drivers/gpu/drm/i915/intel_lrc.h @@ -101,7 +101,8 @@ void intel_lr_context_free(struct intel_context *ctx); uint32_t intel_lr_context_size(struct intel_engine_cs *ring); int intel_lr_context_deferred_alloc(struct intel_context *ctx, struct intel_engine_cs *ring); -void intel_lr_context_unpin(struct drm_i915_gem_request *req); +void intel_lr_context_unpin(struct intel_context *ctx, + struct intel_engine_cs *engine); void intel_lr_context_reset(struct drm_device *dev, struct intel_context *ctx); uint64_t intel_lr_context_descriptor(struct intel_context *ctx, diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c index 811ddf7799f0..30a8403a8f4f 100644 --- a/drivers/gpu/drm/i915/intel_lvds.c +++ b/drivers/gpu/drm/i915/intel_lvds.c @@ -76,22 +76,30 @@ static bool intel_lvds_get_hw_state(struct intel_encoder *encoder, struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base); enum intel_display_power_domain power_domain; u32 tmp; + bool ret; power_domain = intel_display_port_power_domain(encoder); - if (!intel_display_power_is_enabled(dev_priv, power_domain)) + if (!intel_display_power_get_if_enabled(dev_priv, power_domain)) return false; + ret = false; + tmp = I915_READ(lvds_encoder->reg); if (!(tmp & LVDS_PORT_EN)) - return false; + goto out; if (HAS_PCH_CPT(dev)) *pipe = PORT_TO_PIPE_CPT(tmp); else *pipe = PORT_TO_PIPE(tmp); - return true; + ret = true; + +out: + intel_display_power_put(dev_priv, power_domain); + + return ret; } static void intel_lvds_get_config(struct intel_encoder *encoder, diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index 20bf854eae8c..347d4df49a9b 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -548,7 +548,7 @@ static const struct intel_watermark_params i845_wm_info = { * intel_calculate_wm - calculate watermark level * @clock_in_khz: pixel clock * @wm: chip FIFO params - * @pixel_size: display pixel size + * @cpp: bytes per pixel * @latency_ns: memory latency for the platform * * Calculate the watermark level (the level at which the display plane will @@ -564,8 +564,7 @@ static const struct intel_watermark_params i845_wm_info = { */ static unsigned long intel_calculate_wm(unsigned long clock_in_khz, const struct intel_watermark_params *wm, - int fifo_size, - int pixel_size, + int fifo_size, int cpp, unsigned long latency_ns) { long entries_required, wm_size; @@ -576,7 +575,7 @@ static unsigned long intel_calculate_wm(unsigned long clock_in_khz, * clocks go from a few thousand to several hundred thousand. * latency is usually a few thousand */ - entries_required = ((clock_in_khz / 1000) * pixel_size * latency_ns) / + entries_required = ((clock_in_khz / 1000) * cpp * latency_ns) / 1000; entries_required = DIV_ROUND_UP(entries_required, wm->cacheline_size); @@ -640,13 +639,13 @@ static void pineview_update_wm(struct drm_crtc *unused_crtc) crtc = single_enabled_crtc(dev); if (crtc) { const struct drm_display_mode *adjusted_mode = &to_intel_crtc(crtc)->config->base.adjusted_mode; - int pixel_size = crtc->primary->state->fb->bits_per_pixel / 8; + int cpp = drm_format_plane_cpp(crtc->primary->state->fb->pixel_format, 0); int clock = adjusted_mode->crtc_clock; /* Display SR */ wm = intel_calculate_wm(clock, &pineview_display_wm, pineview_display_wm.fifo_size, - pixel_size, latency->display_sr); + cpp, latency->display_sr); reg = I915_READ(DSPFW1); reg &= ~DSPFW_SR_MASK; reg |= FW_WM(wm, SR); @@ -656,7 +655,7 @@ static void pineview_update_wm(struct drm_crtc *unused_crtc) /* cursor SR */ wm = intel_calculate_wm(clock, &pineview_cursor_wm, pineview_display_wm.fifo_size, - pixel_size, latency->cursor_sr); + cpp, latency->cursor_sr); reg = I915_READ(DSPFW3); reg &= ~DSPFW_CURSOR_SR_MASK; reg |= FW_WM(wm, CURSOR_SR); @@ -665,7 +664,7 @@ static void pineview_update_wm(struct drm_crtc *unused_crtc) /* Display HPLL off SR */ wm = intel_calculate_wm(clock, &pineview_display_hplloff_wm, pineview_display_hplloff_wm.fifo_size, - pixel_size, latency->display_hpll_disable); + cpp, latency->display_hpll_disable); reg = I915_READ(DSPFW3); reg &= ~DSPFW_HPLL_SR_MASK; reg |= FW_WM(wm, HPLL_SR); @@ -674,7 +673,7 @@ static void pineview_update_wm(struct drm_crtc *unused_crtc) /* cursor HPLL off SR */ wm = intel_calculate_wm(clock, &pineview_cursor_hplloff_wm, pineview_display_hplloff_wm.fifo_size, - pixel_size, latency->cursor_hpll_disable); + cpp, latency->cursor_hpll_disable); reg = I915_READ(DSPFW3); reg &= ~DSPFW_HPLL_CURSOR_MASK; reg |= FW_WM(wm, HPLL_CURSOR); @@ -698,7 +697,7 @@ static bool g4x_compute_wm0(struct drm_device *dev, { struct drm_crtc *crtc; const struct drm_display_mode *adjusted_mode; - int htotal, hdisplay, clock, pixel_size; + int htotal, hdisplay, clock, cpp; int line_time_us, line_count; int entries, tlb_miss; @@ -713,10 +712,10 @@ static bool g4x_compute_wm0(struct drm_device *dev, clock = adjusted_mode->crtc_clock; htotal = adjusted_mode->crtc_htotal; hdisplay = to_intel_crtc(crtc)->config->pipe_src_w; - pixel_size = crtc->primary->state->fb->bits_per_pixel / 8; + cpp = drm_format_plane_cpp(crtc->primary->state->fb->pixel_format, 0); /* Use the small buffer method to calculate plane watermark */ - entries = ((clock * pixel_size / 1000) * display_latency_ns) / 1000; + entries = ((clock * cpp / 1000) * display_latency_ns) / 1000; tlb_miss = display->fifo_size*display->cacheline_size - hdisplay * 8; if (tlb_miss > 0) entries += tlb_miss; @@ -728,7 +727,7 @@ static bool g4x_compute_wm0(struct drm_device *dev, /* Use the large buffer method to calculate cursor watermark */ line_time_us = max(htotal * 1000 / clock, 1); line_count = (cursor_latency_ns / line_time_us + 1000) / 1000; - entries = line_count * crtc->cursor->state->crtc_w * pixel_size; + entries = line_count * crtc->cursor->state->crtc_w * cpp; tlb_miss = cursor->fifo_size*cursor->cacheline_size - hdisplay * 8; if (tlb_miss > 0) entries += tlb_miss; @@ -784,7 +783,7 @@ static bool g4x_compute_srwm(struct drm_device *dev, { struct drm_crtc *crtc; const struct drm_display_mode *adjusted_mode; - int hdisplay, htotal, pixel_size, clock; + int hdisplay, htotal, cpp, clock; unsigned long line_time_us; int line_count, line_size; int small, large; @@ -800,21 +799,21 @@ static bool g4x_compute_srwm(struct drm_device *dev, clock = adjusted_mode->crtc_clock; htotal = adjusted_mode->crtc_htotal; hdisplay = to_intel_crtc(crtc)->config->pipe_src_w; - pixel_size = crtc->primary->state->fb->bits_per_pixel / 8; + cpp = drm_format_plane_cpp(crtc->primary->state->fb->pixel_format, 0); line_time_us = max(htotal * 1000 / clock, 1); line_count = (latency_ns / line_time_us + 1000) / 1000; - line_size = hdisplay * pixel_size; + line_size = hdisplay * cpp; /* Use the minimum of the small and large buffer method for primary */ - small = ((clock * pixel_size / 1000) * latency_ns) / 1000; + small = ((clock * cpp / 1000) * latency_ns) / 1000; large = line_count * line_size; entries = DIV_ROUND_UP(min(small, large), display->cacheline_size); *display_wm = entries + display->guard_size; /* calculate the self-refresh watermark for display cursor */ - entries = line_count * pixel_size * crtc->cursor->state->crtc_w; + entries = line_count * cpp * crtc->cursor->state->crtc_w; entries = DIV_ROUND_UP(entries, cursor->cacheline_size); *cursor_wm = entries + cursor->guard_size; @@ -906,13 +905,13 @@ enum vlv_wm_level { static unsigned int vlv_wm_method2(unsigned int pixel_rate, unsigned int pipe_htotal, unsigned int horiz_pixels, - unsigned int bytes_per_pixel, + unsigned int cpp, unsigned int latency) { unsigned int ret; ret = (latency * pixel_rate) / (pipe_htotal * 10000); - ret = (ret + 1) * horiz_pixels * bytes_per_pixel; + ret = (ret + 1) * horiz_pixels * cpp; ret = DIV_ROUND_UP(ret, 64); return ret; @@ -941,7 +940,7 @@ static uint16_t vlv_compute_wm_level(struct intel_plane *plane, int level) { struct drm_i915_private *dev_priv = to_i915(plane->base.dev); - int clock, htotal, pixel_size, width, wm; + int clock, htotal, cpp, width, wm; if (dev_priv->wm.pri_latency[level] == 0) return USHRT_MAX; @@ -949,7 +948,7 @@ static uint16_t vlv_compute_wm_level(struct intel_plane *plane, if (!state->visible) return 0; - pixel_size = drm_format_plane_cpp(state->base.fb->pixel_format, 0); + cpp = drm_format_plane_cpp(state->base.fb->pixel_format, 0); clock = crtc->config->base.adjusted_mode.crtc_clock; htotal = crtc->config->base.adjusted_mode.crtc_htotal; width = crtc->config->pipe_src_w; @@ -965,7 +964,7 @@ static uint16_t vlv_compute_wm_level(struct intel_plane *plane, */ wm = 63; } else { - wm = vlv_wm_method2(clock, htotal, width, pixel_size, + wm = vlv_wm_method2(clock, htotal, width, cpp, dev_priv->wm.pri_latency[level] * 10); } @@ -1439,7 +1438,7 @@ static void i965_update_wm(struct drm_crtc *unused_crtc) int clock = adjusted_mode->crtc_clock; int htotal = adjusted_mode->crtc_htotal; int hdisplay = to_intel_crtc(crtc)->config->pipe_src_w; - int pixel_size = crtc->primary->state->fb->bits_per_pixel / 8; + int cpp = drm_format_plane_cpp(crtc->primary->state->fb->pixel_format, 0); unsigned long line_time_us; int entries; @@ -1447,7 +1446,7 @@ static void i965_update_wm(struct drm_crtc *unused_crtc) /* Use ns/us then divide to preserve precision */ entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) * - pixel_size * hdisplay; + cpp * hdisplay; entries = DIV_ROUND_UP(entries, I915_FIFO_LINE_SIZE); srwm = I965_FIFO_SIZE - entries; if (srwm < 0) @@ -1457,7 +1456,7 @@ static void i965_update_wm(struct drm_crtc *unused_crtc) entries, srwm); entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) * - pixel_size * crtc->cursor->state->crtc_w; + cpp * crtc->cursor->state->crtc_w; entries = DIV_ROUND_UP(entries, i965_cursor_wm_info.cacheline_size); cursor_sr = i965_cursor_wm_info.fifo_size - @@ -1518,7 +1517,7 @@ static void i9xx_update_wm(struct drm_crtc *unused_crtc) crtc = intel_get_crtc_for_plane(dev, 0); if (intel_crtc_active(crtc)) { const struct drm_display_mode *adjusted_mode; - int cpp = crtc->primary->state->fb->bits_per_pixel / 8; + int cpp = drm_format_plane_cpp(crtc->primary->state->fb->pixel_format, 0); if (IS_GEN2(dev)) cpp = 4; @@ -1540,7 +1539,7 @@ static void i9xx_update_wm(struct drm_crtc *unused_crtc) crtc = intel_get_crtc_for_plane(dev, 1); if (intel_crtc_active(crtc)) { const struct drm_display_mode *adjusted_mode; - int cpp = crtc->primary->state->fb->bits_per_pixel / 8; + int cpp = drm_format_plane_cpp(crtc->primary->state->fb->pixel_format, 0); if (IS_GEN2(dev)) cpp = 4; @@ -1586,7 +1585,7 @@ static void i9xx_update_wm(struct drm_crtc *unused_crtc) int clock = adjusted_mode->crtc_clock; int htotal = adjusted_mode->crtc_htotal; int hdisplay = to_intel_crtc(enabled)->config->pipe_src_w; - int pixel_size = enabled->primary->state->fb->bits_per_pixel / 8; + int cpp = drm_format_plane_cpp(enabled->primary->state->fb->pixel_format, 0); unsigned long line_time_us; int entries; @@ -1594,7 +1593,7 @@ static void i9xx_update_wm(struct drm_crtc *unused_crtc) /* Use ns/us then divide to preserve precision */ entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) * - pixel_size * hdisplay; + cpp * hdisplay; entries = DIV_ROUND_UP(entries, wm_info->cacheline_size); DRM_DEBUG_KMS("self-refresh entries: %d\n", entries); srwm = wm_info->fifo_size - entries; @@ -1685,15 +1684,14 @@ uint32_t ilk_pipe_pixel_rate(const struct intel_crtc_state *pipe_config) } /* latency must be in 0.1us units. */ -static uint32_t ilk_wm_method1(uint32_t pixel_rate, uint8_t bytes_per_pixel, - uint32_t latency) +static uint32_t ilk_wm_method1(uint32_t pixel_rate, uint8_t cpp, uint32_t latency) { uint64_t ret; if (WARN(latency == 0, "Latency value missing\n")) return UINT_MAX; - ret = (uint64_t) pixel_rate * bytes_per_pixel * latency; + ret = (uint64_t) pixel_rate * cpp * latency; ret = DIV_ROUND_UP_ULL(ret, 64 * 10000) + 2; return ret; @@ -1701,7 +1699,7 @@ static uint32_t ilk_wm_method1(uint32_t pixel_rate, uint8_t bytes_per_pixel, /* latency must be in 0.1us units. */ static uint32_t ilk_wm_method2(uint32_t pixel_rate, uint32_t pipe_htotal, - uint32_t horiz_pixels, uint8_t bytes_per_pixel, + uint32_t horiz_pixels, uint8_t cpp, uint32_t latency) { uint32_t ret; @@ -1712,13 +1710,13 @@ static uint32_t ilk_wm_method2(uint32_t pixel_rate, uint32_t pipe_htotal, return UINT_MAX; ret = (latency * pixel_rate) / (pipe_htotal * 10000); - ret = (ret + 1) * horiz_pixels * bytes_per_pixel; + ret = (ret + 1) * horiz_pixels * cpp; ret = DIV_ROUND_UP(ret, 64) + 2; return ret; } static uint32_t ilk_wm_fbc(uint32_t pri_val, uint32_t horiz_pixels, - uint8_t bytes_per_pixel) + uint8_t cpp) { /* * Neither of these should be possible since this function shouldn't be @@ -1726,12 +1724,12 @@ static uint32_t ilk_wm_fbc(uint32_t pri_val, uint32_t horiz_pixels, * extra paranoid to avoid a potential divide-by-zero if we screw up * elsewhere in the driver. */ - if (WARN_ON(!bytes_per_pixel)) + if (WARN_ON(!cpp)) return 0; if (WARN_ON(!horiz_pixels)) return 0; - return DIV_ROUND_UP(pri_val * 64, horiz_pixels * bytes_per_pixel) + 2; + return DIV_ROUND_UP(pri_val * 64, horiz_pixels * cpp) + 2; } struct ilk_wm_maximums { @@ -1750,13 +1748,14 @@ static uint32_t ilk_compute_pri_wm(const struct intel_crtc_state *cstate, uint32_t mem_value, bool is_lp) { - int bpp = pstate->base.fb ? pstate->base.fb->bits_per_pixel / 8 : 0; + int cpp = pstate->base.fb ? + drm_format_plane_cpp(pstate->base.fb->pixel_format, 0) : 0; uint32_t method1, method2; if (!cstate->base.active || !pstate->visible) return 0; - method1 = ilk_wm_method1(ilk_pipe_pixel_rate(cstate), bpp, mem_value); + method1 = ilk_wm_method1(ilk_pipe_pixel_rate(cstate), cpp, mem_value); if (!is_lp) return method1; @@ -1764,8 +1763,7 @@ static uint32_t ilk_compute_pri_wm(const struct intel_crtc_state *cstate, method2 = ilk_wm_method2(ilk_pipe_pixel_rate(cstate), cstate->base.adjusted_mode.crtc_htotal, drm_rect_width(&pstate->dst), - bpp, - mem_value); + cpp, mem_value); return min(method1, method2); } @@ -1778,18 +1776,18 @@ static uint32_t ilk_compute_spr_wm(const struct intel_crtc_state *cstate, const struct intel_plane_state *pstate, uint32_t mem_value) { - int bpp = pstate->base.fb ? pstate->base.fb->bits_per_pixel / 8 : 0; + int cpp = pstate->base.fb ? + drm_format_plane_cpp(pstate->base.fb->pixel_format, 0) : 0; uint32_t method1, method2; if (!cstate->base.active || !pstate->visible) return 0; - method1 = ilk_wm_method1(ilk_pipe_pixel_rate(cstate), bpp, mem_value); + method1 = ilk_wm_method1(ilk_pipe_pixel_rate(cstate), cpp, mem_value); method2 = ilk_wm_method2(ilk_pipe_pixel_rate(cstate), cstate->base.adjusted_mode.crtc_htotal, drm_rect_width(&pstate->dst), - bpp, - mem_value); + cpp, mem_value); return min(method1, method2); } @@ -1801,16 +1799,20 @@ static uint32_t ilk_compute_cur_wm(const struct intel_crtc_state *cstate, const struct intel_plane_state *pstate, uint32_t mem_value) { - int bpp = pstate->base.fb ? pstate->base.fb->bits_per_pixel / 8 : 0; + /* + * We treat the cursor plane as always-on for the purposes of watermark + * calculation. Until we have two-stage watermark programming merged, + * this is necessary to avoid flickering. + */ + int cpp = 4; + int width = pstate->visible ? pstate->base.crtc_w : 64; - if (!cstate->base.active || !pstate->visible) + if (!cstate->base.active) return 0; return ilk_wm_method2(ilk_pipe_pixel_rate(cstate), cstate->base.adjusted_mode.crtc_htotal, - drm_rect_width(&pstate->dst), - bpp, - mem_value); + width, cpp, mem_value); } /* Only for WM_LP. */ @@ -1818,12 +1820,13 @@ static uint32_t ilk_compute_fbc_wm(const struct intel_crtc_state *cstate, const struct intel_plane_state *pstate, uint32_t pri_val) { - int bpp = pstate->base.fb ? pstate->base.fb->bits_per_pixel / 8 : 0; + int cpp = pstate->base.fb ? + drm_format_plane_cpp(pstate->base.fb->pixel_format, 0) : 0; if (!cstate->base.active || !pstate->visible) return 0; - return ilk_wm_fbc(pri_val, drm_rect_width(&pstate->dst), bpp); + return ilk_wm_fbc(pri_val, drm_rect_width(&pstate->dst), cpp); } static unsigned int ilk_display_fifo_size(const struct drm_device *dev) @@ -2848,7 +2851,10 @@ void skl_ddb_get_hw_state(struct drm_i915_private *dev_priv, memset(ddb, 0, sizeof(*ddb)); for_each_pipe(dev_priv, pipe) { - if (!intel_display_power_is_enabled(dev_priv, POWER_DOMAIN_PIPE(pipe))) + enum intel_display_power_domain power_domain; + + power_domain = POWER_DOMAIN_PIPE(pipe); + if (!intel_display_power_get_if_enabled(dev_priv, power_domain)) continue; for_each_plane(dev_priv, pipe, plane) { @@ -2860,6 +2866,8 @@ void skl_ddb_get_hw_state(struct drm_i915_private *dev_priv, val = I915_READ(CUR_BUF_CFG(pipe)); skl_ddb_entry_init_from_hw(&ddb->plane[pipe][PLANE_CURSOR], val); + + intel_display_power_put(dev_priv, power_domain); } } @@ -3042,26 +3050,25 @@ static uint32_t skl_pipe_pixel_rate(const struct intel_crtc_state *config) /* * The max latency should be 257 (max the punit can code is 255 and we add 2us - * for the read latency) and bytes_per_pixel should always be <= 8, so that + * for the read latency) and cpp should always be <= 8, so that * should allow pixel_rate up to ~2 GHz which seems sufficient since max * 2xcdclk is 1350 MHz and the pixel rate should never exceed that. */ -static uint32_t skl_wm_method1(uint32_t pixel_rate, uint8_t bytes_per_pixel, - uint32_t latency) +static uint32_t skl_wm_method1(uint32_t pixel_rate, uint8_t cpp, uint32_t latency) { uint32_t wm_intermediate_val, ret; if (latency == 0) return UINT_MAX; - wm_intermediate_val = latency * pixel_rate * bytes_per_pixel / 512; + wm_intermediate_val = latency * pixel_rate * cpp / 512; ret = DIV_ROUND_UP(wm_intermediate_val, 1000); return ret; } static uint32_t skl_wm_method2(uint32_t pixel_rate, uint32_t pipe_htotal, - uint32_t horiz_pixels, uint8_t bytes_per_pixel, + uint32_t horiz_pixels, uint8_t cpp, uint64_t tiling, uint32_t latency) { uint32_t ret; @@ -3071,7 +3078,7 @@ static uint32_t skl_wm_method2(uint32_t pixel_rate, uint32_t pipe_htotal, if (latency == 0) return UINT_MAX; - plane_bytes_per_line = horiz_pixels * bytes_per_pixel; + plane_bytes_per_line = horiz_pixels * cpp; if (tiling == I915_FORMAT_MOD_Y_TILED || tiling == I915_FORMAT_MOD_Yf_TILED) { @@ -3121,23 +3128,21 @@ static bool skl_compute_plane_wm(const struct drm_i915_private *dev_priv, uint32_t plane_bytes_per_line, plane_blocks_per_line; uint32_t res_blocks, res_lines; uint32_t selected_result; - uint8_t bytes_per_pixel; + uint8_t cpp; if (latency == 0 || !cstate->base.active || !fb) return false; - bytes_per_pixel = drm_format_plane_cpp(fb->pixel_format, 0); + cpp = drm_format_plane_cpp(fb->pixel_format, 0); method1 = skl_wm_method1(skl_pipe_pixel_rate(cstate), - bytes_per_pixel, - latency); + cpp, latency); method2 = skl_wm_method2(skl_pipe_pixel_rate(cstate), cstate->base.adjusted_mode.crtc_htotal, cstate->pipe_src_w, - bytes_per_pixel, - fb->modifier[0], + cpp, fb->modifier[0], latency); - plane_bytes_per_line = cstate->pipe_src_w * bytes_per_pixel; + plane_bytes_per_line = cstate->pipe_src_w * cpp; plane_blocks_per_line = DIV_ROUND_UP(plane_bytes_per_line, 512); if (fb->modifier[0] == I915_FORMAT_MOD_Y_TILED || @@ -3145,11 +3150,11 @@ static bool skl_compute_plane_wm(const struct drm_i915_private *dev_priv, uint32_t min_scanlines = 4; uint32_t y_tile_minimum; if (intel_rotation_90_or_270(plane->state->rotation)) { - int bpp = (fb->pixel_format == DRM_FORMAT_NV12) ? + int cpp = (fb->pixel_format == DRM_FORMAT_NV12) ? drm_format_plane_cpp(fb->pixel_format, 1) : drm_format_plane_cpp(fb->pixel_format, 0); - switch (bpp) { + switch (cpp) { case 1: min_scanlines = 16; break; @@ -4116,11 +4121,13 @@ bool ironlake_set_drps(struct drm_device *dev, u8 val) static void ironlake_enable_drps(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; - u32 rgvmodectl = I915_READ(MEMMODECTL); + u32 rgvmodectl; u8 fmax, fmin, fstart, vstart; spin_lock_irq(&mchdev_lock); + rgvmodectl = I915_READ(MEMMODECTL); + /* Enable temp reporting */ I915_WRITE16(PMMISC, I915_READ(PMMISC) | MCPPCE_EN); I915_WRITE16(TSC1, I915_READ(TSC1) | TSE); @@ -4562,12 +4569,62 @@ static void intel_print_rc6_info(struct drm_device *dev, u32 mode) onoff(mode & GEN6_RC_CTL_RC6_ENABLE)); } -static int sanitize_rc6_option(const struct drm_device *dev, int enable_rc6) +static bool bxt_check_bios_rc6_setup(const struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + bool enable_rc6 = true; + unsigned long rc6_ctx_base; + + if (!(I915_READ(RC6_LOCATION) & RC6_CTX_IN_DRAM)) { + DRM_DEBUG_KMS("RC6 Base location not set properly.\n"); + enable_rc6 = false; + } + + /* + * The exact context size is not known for BXT, so assume a page size + * for this check. + */ + rc6_ctx_base = I915_READ(RC6_CTX_BASE) & RC6_CTX_BASE_MASK; + if (!((rc6_ctx_base >= dev_priv->gtt.stolen_reserved_base) && + (rc6_ctx_base + PAGE_SIZE <= dev_priv->gtt.stolen_reserved_base + + dev_priv->gtt.stolen_reserved_size))) { + DRM_DEBUG_KMS("RC6 Base address not as expected.\n"); + enable_rc6 = false; + } + + if (!(((I915_READ(PWRCTX_MAXCNT_RCSUNIT) & IDLE_TIME_MASK) > 1) && + ((I915_READ(PWRCTX_MAXCNT_VCSUNIT0) & IDLE_TIME_MASK) > 1) && + ((I915_READ(PWRCTX_MAXCNT_BCSUNIT) & IDLE_TIME_MASK) > 1) && + ((I915_READ(PWRCTX_MAXCNT_VECSUNIT) & IDLE_TIME_MASK) > 1))) { + DRM_DEBUG_KMS("Engine Idle wait time not set properly.\n"); + enable_rc6 = false; + } + + if (!(I915_READ(GEN6_RC_CONTROL) & (GEN6_RC_CTL_RC6_ENABLE | + GEN6_RC_CTL_HW_ENABLE)) && + ((I915_READ(GEN6_RC_CONTROL) & GEN6_RC_CTL_HW_ENABLE) || + !(I915_READ(GEN6_RC_STATE) & RC6_STATE))) { + DRM_DEBUG_KMS("HW/SW RC6 is not enabled by BIOS.\n"); + enable_rc6 = false; + } + + return enable_rc6; +} + +int sanitize_rc6_option(const struct drm_device *dev, int enable_rc6) { /* No RC6 before Ironlake and code is gone for ilk. */ if (INTEL_INFO(dev)->gen < 6) return 0; + if (!enable_rc6) + return 0; + + if (IS_BROXTON(dev) && !bxt_check_bios_rc6_setup(dev)) { + DRM_INFO("RC6 disabled by BIOS\n"); + return 0; + } + /* Respect the kernel parameter if it is set */ if (enable_rc6 >= 0) { int mask; @@ -5179,8 +5236,6 @@ static void cherryview_setup_pctx(struct drm_device *dev) u32 pcbr; int pctx_size = 32*1024; - WARN_ON(!mutex_is_locked(&dev->struct_mutex)); - pcbr = I915_READ(VLV_PCBR); if ((pcbr >> VLV_PCBR_ADDR_SHIFT) == 0) { DRM_DEBUG_DRIVER("BIOS didn't set up PCBR, fixing up\n"); @@ -5202,7 +5257,7 @@ static void valleyview_setup_pctx(struct drm_device *dev) u32 pcbr; int pctx_size = 24*1024; - WARN_ON(!mutex_is_locked(&dev->struct_mutex)); + mutex_lock(&dev->struct_mutex); pcbr = I915_READ(VLV_PCBR); if (pcbr) { @@ -5230,7 +5285,7 @@ static void valleyview_setup_pctx(struct drm_device *dev) pctx = i915_gem_object_create_stolen(dev, pctx_size); if (!pctx) { DRM_DEBUG("not enough stolen space for PCTX, disabling\n"); - return; + goto out; } pctx_paddr = dev_priv->mm.stolen_base + pctx->stolen->start; @@ -5239,6 +5294,7 @@ static void valleyview_setup_pctx(struct drm_device *dev) out: DRM_DEBUG_DRIVER("PCBR: 0x%08x\n", I915_READ(VLV_PCBR)); dev_priv->vlv_pctx = pctx; + mutex_unlock(&dev->struct_mutex); } static void valleyview_cleanup_pctx(struct drm_device *dev) @@ -5248,7 +5304,7 @@ static void valleyview_cleanup_pctx(struct drm_device *dev) if (WARN_ON(!dev_priv->vlv_pctx)) return; - drm_gem_object_unreference(&dev_priv->vlv_pctx->base); + drm_gem_object_unreference_unlocked(&dev_priv->vlv_pctx->base); dev_priv->vlv_pctx = NULL; } @@ -6057,7 +6113,6 @@ void intel_init_gt_powersave(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; - i915.enable_rc6 = sanitize_rc6_option(dev, i915.enable_rc6); /* * RPM depends on RC6 to save restore the GT HW context, so make RC6 a * requirement. @@ -6192,8 +6247,8 @@ void intel_enable_gt_powersave(struct drm_device *dev) return; if (IS_IRONLAKE_M(dev)) { - mutex_lock(&dev->struct_mutex); ironlake_enable_drps(dev); + mutex_lock(&dev->struct_mutex); intel_init_emon(dev); mutex_unlock(&dev->struct_mutex); } else if (INTEL_INFO(dev)->gen >= 6) { @@ -7189,9 +7244,10 @@ static int chv_gpu_freq(struct drm_i915_private *dev_priv, int val) { int div, czclk_freq = DIV_ROUND_CLOSEST(dev_priv->czclk_freq, 1000); - div = vlv_gpu_freq_div(czclk_freq) / 2; + div = vlv_gpu_freq_div(czclk_freq); if (div < 0) return div; + div /= 2; return DIV_ROUND_CLOSEST(czclk_freq * val, 2 * div) / 2; } @@ -7200,9 +7256,10 @@ static int chv_freq_opcode(struct drm_i915_private *dev_priv, int val) { int mul, czclk_freq = DIV_ROUND_CLOSEST(dev_priv->czclk_freq, 1000); - mul = vlv_gpu_freq_div(czclk_freq) / 2; + mul = vlv_gpu_freq_div(czclk_freq); if (mul < 0) return mul; + mul /= 2; /* CHV needs even values */ return DIV_ROUND_CLOSEST(val * 2 * mul, czclk_freq) * 2; diff --git a/drivers/gpu/drm/i915/intel_psr.c b/drivers/gpu/drm/i915/intel_psr.c index 9ccff3011523..0b42ada338c8 100644 --- a/drivers/gpu/drm/i915/intel_psr.c +++ b/drivers/gpu/drm/i915/intel_psr.c @@ -225,7 +225,12 @@ static void hsw_psr_enable_sink(struct intel_dp *intel_dp) (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT)); } - drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, DP_PSR_ENABLE); + if (dev_priv->psr.link_standby) + drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, + DP_PSR_ENABLE | DP_PSR_MAIN_LINK_ACTIVE); + else + drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, + DP_PSR_ENABLE); } static void vlv_psr_enable_source(struct intel_dp *intel_dp) @@ -280,6 +285,9 @@ static void hsw_psr_enable_source(struct intel_dp *intel_dp) if (IS_HASWELL(dev)) val |= EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES; + if (dev_priv->psr.link_standby) + val |= EDP_PSR_LINK_STANDBY; + I915_WRITE(EDP_PSR_CTL, val | max_sleep_time << EDP_PSR_MAX_SLEEP_TIME_SHIFT | idle_frames << EDP_PSR_IDLE_FRAME_SHIFT | @@ -304,8 +312,15 @@ static bool intel_psr_match_conditions(struct intel_dp *intel_dp) dev_priv->psr.source_ok = false; - if (IS_HASWELL(dev) && dig_port->port != PORT_A) { - DRM_DEBUG_KMS("HSW ties PSR to DDI A (eDP)\n"); + /* + * HSW spec explicitly says PSR is tied to port A. + * BDW+ platforms with DDI implementation of PSR have different + * PSR registers per transcoder and we only implement transcoder EDP + * ones. Since by Display design transcoder EDP is tied to port A + * we can safely escape based on the port A. + */ + if (HAS_DDI(dev) && dig_port->port != PORT_A) { + DRM_DEBUG_KMS("PSR condition failed: Port not supported\n"); return false; } @@ -314,6 +329,12 @@ static bool intel_psr_match_conditions(struct intel_dp *intel_dp) return false; } + if ((IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) && + !dev_priv->psr.link_standby) { + DRM_ERROR("PSR condition failed: Link off requested but not supported on this platform\n"); + return false; + } + if (IS_HASWELL(dev) && I915_READ(HSW_STEREO_3D_CTL(intel_crtc->config->cpu_transcoder)) & S3D_ENABLE) { @@ -327,12 +348,6 @@ static bool intel_psr_match_conditions(struct intel_dp *intel_dp) return false; } - if (!IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev) && - ((dev_priv->vbt.psr.full_link) || (dig_port->port != PORT_A))) { - DRM_DEBUG_KMS("PSR condition failed: Link Standby requested/needed but not supported on this platform\n"); - return false; - } - dev_priv->psr.source_ok = true; return true; } @@ -763,6 +778,36 @@ void intel_psr_init(struct drm_device *dev) dev_priv->psr_mmio_base = IS_HASWELL(dev_priv) ? HSW_EDP_PSR_BASE : BDW_EDP_PSR_BASE; + /* Per platform default */ + if (i915.enable_psr == -1) { + if (IS_HASWELL(dev) || IS_BROADWELL(dev) || + IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) + i915.enable_psr = 1; + else + i915.enable_psr = 0; + } + + /* Set link_standby x link_off defaults */ + if (IS_HASWELL(dev) || IS_BROADWELL(dev)) + /* HSW and BDW require workarounds that we don't implement. */ + dev_priv->psr.link_standby = false; + else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) + /* On VLV and CHV only standby mode is supported. */ + dev_priv->psr.link_standby = true; + else + /* For new platforms let's respect VBT back again */ + dev_priv->psr.link_standby = dev_priv->vbt.psr.full_link; + + /* Override link_standby x link_off defaults */ + if (i915.enable_psr == 2 && !dev_priv->psr.link_standby) { + DRM_DEBUG_KMS("PSR: Forcing link standby\n"); + dev_priv->psr.link_standby = true; + } + if (i915.enable_psr == 3 && dev_priv->psr.link_standby) { + DRM_DEBUG_KMS("PSR: Forcing main link off\n"); + dev_priv->psr.link_standby = false; + } + INIT_DELAYED_WORK(&dev_priv->psr.work, intel_psr_work); mutex_init(&dev_priv->psr.lock); } diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index 9030e2bca0c0..45ce45a5e122 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c @@ -746,9 +746,9 @@ static int intel_rcs_ctx_init(struct drm_i915_gem_request *req) ret = i915_gem_render_state_init(req); if (ret) - DRM_ERROR("init render state: %d\n", ret); + return ret; - return ret; + return 0; } static int wa_add(struct drm_i915_private *dev_priv, @@ -789,6 +789,22 @@ static int wa_add(struct drm_i915_private *dev_priv, #define WA_WRITE(addr, val) WA_REG(addr, 0xffffffff, val) +static int wa_ring_whitelist_reg(struct intel_engine_cs *ring, i915_reg_t reg) +{ + struct drm_i915_private *dev_priv = ring->dev->dev_private; + struct i915_workarounds *wa = &dev_priv->workarounds; + const uint32_t index = wa->hw_whitelist_count[ring->id]; + + if (WARN_ON(index >= RING_MAX_NONPRIV_SLOTS)) + return -EINVAL; + + WA_WRITE(RING_FORCE_TO_NONPRIV(ring->mmio_base, index), + i915_mmio_reg_offset(reg)); + wa->hw_whitelist_count[ring->id]++; + + return 0; +} + static int gen8_init_workarounds(struct intel_engine_cs *ring) { struct drm_device *dev = ring->dev; @@ -894,6 +910,7 @@ static int gen9_init_workarounds(struct intel_engine_cs *ring) struct drm_device *dev = ring->dev; struct drm_i915_private *dev_priv = dev->dev_private; uint32_t tmp; + int ret; /* WaEnableLbsSlaRetryTimerDecrement:skl */ I915_WRITE(BDW_SCRATCH1, I915_READ(BDW_SCRATCH1) | @@ -964,6 +981,20 @@ static int gen9_init_workarounds(struct intel_engine_cs *ring) /* WaDisableSTUnitPowerOptimization:skl,bxt */ WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN2, GEN8_ST_PO_DISABLE); + /* WaOCLCoherentLineFlush:skl,bxt */ + I915_WRITE(GEN8_L3SQCREG4, (I915_READ(GEN8_L3SQCREG4) | + GEN8_LQSC_FLUSH_COHERENT_LINES)); + + /* WaEnablePreemptionGranularityControlByUMD:skl,bxt */ + ret= wa_ring_whitelist_reg(ring, GEN8_CS_CHICKEN1); + if (ret) + return ret; + + /* WaAllowUMDToModifyHDCChicken1:skl,bxt */ + ret = wa_ring_whitelist_reg(ring, GEN8_HDC_CHICKEN1); + if (ret) + return ret; + return 0; } @@ -1019,6 +1050,16 @@ static int skl_init_workarounds(struct intel_engine_cs *ring) if (ret) return ret; + /* + * Actual WA is to disable percontext preemption granularity control + * until D0 which is the default case so this is equivalent to + * !WaDisablePerCtxtPreemptionGranularityControl:skl + */ + if (IS_SKL_REVID(dev, SKL_REVID_E0, REVID_FOREVER)) { + I915_WRITE(GEN7_FF_SLICE_CS_CHICKEN1, + _MASKED_BIT_ENABLE(GEN9_FFSC_PERCTX_PREEMPT_CTRL)); + } + if (IS_SKL_REVID(dev, 0, SKL_REVID_D0)) { /* WaDisableChickenBitTSGBarrierAckForFFSliceCS:skl */ I915_WRITE(FF_SLICE_CS_CHICKEN2, @@ -1071,6 +1112,11 @@ static int skl_init_workarounds(struct intel_engine_cs *ring) GEN7_HALF_SLICE_CHICKEN1, GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE); + /* WaDisableLSQCROPERFforOCL:skl */ + ret = wa_ring_whitelist_reg(ring, GEN8_L3SQCREG4); + if (ret) + return ret; + return skl_tune_iz_hashing(ring); } @@ -1106,6 +1152,20 @@ static int bxt_init_workarounds(struct intel_engine_cs *ring) GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE); } + /* WaDisableObjectLevelPreemptionForTrifanOrPolygon:bxt */ + /* WaDisableObjectLevelPreemptionForInstancedDraw:bxt */ + /* WaDisableObjectLevelPreemtionForInstanceId:bxt */ + /* WaDisableLSQCROPERFforOCL:bxt */ + if (IS_BXT_REVID(dev, 0, BXT_REVID_A1)) { + ret = wa_ring_whitelist_reg(ring, GEN9_CS_DEBUG_MODE1); + if (ret) + return ret; + + ret = wa_ring_whitelist_reg(ring, GEN8_L3SQCREG4); + if (ret) + return ret; + } + return 0; } @@ -1117,6 +1177,7 @@ int init_workarounds_ring(struct intel_engine_cs *ring) WARN_ON(ring->id != RCS); dev_priv->workarounds.count = 0; + dev_priv->workarounds.hw_whitelist_count[RCS] = 0; if (IS_BROADWELL(dev)) return bdw_init_workarounds(ring); @@ -2058,6 +2119,9 @@ int intel_pin_and_map_ringbuffer_obj(struct drm_device *dev, return ret; } + /* Access through the GTT requires the device to be awake. */ + assert_rpm_wakelock_held(dev_priv); + ringbuf->virtual_start = ioremap_wc(dev_priv->gtt.mappable_base + i915_gem_obj_ggtt_offset(obj), ringbuf->size); if (ringbuf->virtual_start == NULL) { diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h index b12f2aabd104..566b0ae10ce0 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.h +++ b/drivers/gpu/drm/i915/intel_ringbuffer.h @@ -158,6 +158,7 @@ struct intel_engine_cs { #define I915_NUM_RINGS 5 #define _VCS(n) (VCS + (n)) unsigned int exec_id; + unsigned int guc_id; u32 mmio_base; struct drm_device *dev; struct intel_ringbuffer *buffer; diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c index bbca527184d0..6e54d978d9d4 100644 --- a/drivers/gpu/drm/i915/intel_runtime_pm.c +++ b/drivers/gpu/drm/i915/intel_runtime_pm.c @@ -284,6 +284,13 @@ static void hsw_power_well_post_enable(struct drm_i915_private *dev_priv) 1 << PIPE_C | 1 << PIPE_B); } +static void hsw_power_well_pre_disable(struct drm_i915_private *dev_priv) +{ + if (IS_BROADWELL(dev_priv)) + gen8_irq_power_well_pre_disable(dev_priv, + 1 << PIPE_C | 1 << PIPE_B); +} + static void skl_power_well_post_enable(struct drm_i915_private *dev_priv, struct i915_power_well *power_well) { @@ -309,6 +316,14 @@ static void skl_power_well_post_enable(struct drm_i915_private *dev_priv, } } +static void skl_power_well_pre_disable(struct drm_i915_private *dev_priv, + struct i915_power_well *power_well) +{ + if (power_well->data == SKL_DISP_PW_2) + gen8_irq_power_well_pre_disable(dev_priv, + 1 << PIPE_C | 1 << PIPE_B); +} + static void hsw_set_power_well(struct drm_i915_private *dev_priv, struct i915_power_well *power_well, bool enable) { @@ -334,6 +349,7 @@ static void hsw_set_power_well(struct drm_i915_private *dev_priv, } else { if (enable_requested) { + hsw_power_well_pre_disable(dev_priv); I915_WRITE(HSW_PWR_WELL_DRIVER, 0); POSTING_READ(HSW_PWR_WELL_DRIVER); DRM_DEBUG_KMS("Requesting to disable the power well\n"); @@ -456,20 +472,61 @@ static void assert_can_disable_dc9(struct drm_i915_private *dev_priv) */ } -static void gen9_set_dc_state_debugmask_memory_up( - struct drm_i915_private *dev_priv) +static void gen9_set_dc_state_debugmask(struct drm_i915_private *dev_priv) { - uint32_t val; + uint32_t val, mask; + + mask = DC_STATE_DEBUG_MASK_MEMORY_UP; + + if (IS_BROXTON(dev_priv)) + mask |= DC_STATE_DEBUG_MASK_CORES; /* The below bit doesn't need to be cleared ever afterwards */ val = I915_READ(DC_STATE_DEBUG); - if (!(val & DC_STATE_DEBUG_MASK_MEMORY_UP)) { - val |= DC_STATE_DEBUG_MASK_MEMORY_UP; + if ((val & mask) != mask) { + val |= mask; I915_WRITE(DC_STATE_DEBUG, val); POSTING_READ(DC_STATE_DEBUG); } } +static void gen9_write_dc_state(struct drm_i915_private *dev_priv, + u32 state) +{ + int rewrites = 0; + int rereads = 0; + u32 v; + + I915_WRITE(DC_STATE_EN, state); + + /* It has been observed that disabling the dc6 state sometimes + * doesn't stick and dmc keeps returning old value. Make sure + * the write really sticks enough times and also force rewrite until + * we are confident that state is exactly what we want. + */ + do { + v = I915_READ(DC_STATE_EN); + + if (v != state) { + I915_WRITE(DC_STATE_EN, state); + rewrites++; + rereads = 0; + } else if (rereads++ > 5) { + break; + } + + } while (rewrites < 100); + + if (v != state) + DRM_ERROR("Writing dc state to 0x%x failed, now 0x%x\n", + state, v); + + /* Most of the times we need one retry, avoid spam */ + if (rewrites > 1) + DRM_DEBUG_KMS("Rewrote dc state to 0x%x %d times\n", + state, rewrites); +} + static void gen9_set_dc_state(struct drm_i915_private *dev_priv, uint32_t state) { uint32_t val; @@ -488,16 +545,21 @@ static void gen9_set_dc_state(struct drm_i915_private *dev_priv, uint32_t state) else if (i915.enable_dc == 1 && state > DC_STATE_EN_UPTO_DC5) state = DC_STATE_EN_UPTO_DC5; - if (state & DC_STATE_EN_UPTO_DC5_DC6_MASK) - gen9_set_dc_state_debugmask_memory_up(dev_priv); - val = I915_READ(DC_STATE_EN); DRM_DEBUG_KMS("Setting DC state from %02x to %02x\n", val & mask, state); + + /* Check if DMC is ignoring our DC state requests */ + if ((val & mask) != dev_priv->csr.dc_state) + DRM_ERROR("DC state mismatch (0x%x -> 0x%x)\n", + dev_priv->csr.dc_state, val & mask); + val &= ~mask; val |= state; - I915_WRITE(DC_STATE_EN, val); - POSTING_READ(DC_STATE_EN); + + gen9_write_dc_state(dev_priv, val); + + dev_priv->csr.dc_state = val & mask; } void bxt_enable_dc9(struct drm_i915_private *dev_priv) @@ -663,6 +725,9 @@ static void skl_set_power_well(struct drm_i915_private *dev_priv, state_mask = SKL_POWER_WELL_STATE(power_well->data); is_enabled = tmp & state_mask; + if (!enable && enable_requested) + skl_power_well_pre_disable(dev_priv, power_well); + if (enable) { if (!enable_requested) { WARN((tmp & state_mask) && @@ -941,6 +1006,9 @@ static void vlv_display_power_well_deinit(struct drm_i915_private *dev_priv) valleyview_disable_display_irqs(dev_priv); spin_unlock_irq(&dev_priv->irq_lock); + /* make sure we're done processing display irqs */ + synchronize_irq(dev_priv->dev->irq); + vlv_power_sequencer_reset(dev_priv); } @@ -1435,6 +1503,22 @@ static void chv_pipe_power_well_disable(struct drm_i915_private *dev_priv, chv_set_pipe_power_well(dev_priv, power_well, false); } +static void +__intel_display_power_get_domain(struct drm_i915_private *dev_priv, + enum intel_display_power_domain domain) +{ + struct i915_power_domains *power_domains = &dev_priv->power_domains; + struct i915_power_well *power_well; + int i; + + for_each_power_well(i, power_well, BIT(domain), power_domains) { + if (!power_well->count++) + intel_power_well_enable(dev_priv, power_well); + } + + power_domains->domain_use_count[domain]++; +} + /** * intel_display_power_get - grab a power domain reference * @dev_priv: i915 device instance @@ -1450,24 +1534,53 @@ static void chv_pipe_power_well_disable(struct drm_i915_private *dev_priv, void intel_display_power_get(struct drm_i915_private *dev_priv, enum intel_display_power_domain domain) { - struct i915_power_domains *power_domains; - struct i915_power_well *power_well; - int i; + struct i915_power_domains *power_domains = &dev_priv->power_domains; intel_runtime_pm_get(dev_priv); - power_domains = &dev_priv->power_domains; + mutex_lock(&power_domains->lock); + + __intel_display_power_get_domain(dev_priv, domain); + + mutex_unlock(&power_domains->lock); +} + +/** + * intel_display_power_get_if_enabled - grab a reference for an enabled display power domain + * @dev_priv: i915 device instance + * @domain: power domain to reference + * + * This function grabs a power domain reference for @domain and ensures that the + * power domain and all its parents are powered up. Therefore users should only + * grab a reference to the innermost power domain they need. + * + * Any power domain reference obtained by this function must have a symmetric + * call to intel_display_power_put() to release the reference again. + */ +bool intel_display_power_get_if_enabled(struct drm_i915_private *dev_priv, + enum intel_display_power_domain domain) +{ + struct i915_power_domains *power_domains = &dev_priv->power_domains; + bool is_enabled; + + if (!intel_runtime_pm_get_if_in_use(dev_priv)) + return false; mutex_lock(&power_domains->lock); - for_each_power_well(i, power_well, BIT(domain), power_domains) { - if (!power_well->count++) - intel_power_well_enable(dev_priv, power_well); + if (__intel_display_power_is_enabled(dev_priv, domain)) { + __intel_display_power_get_domain(dev_priv, domain); + is_enabled = true; + } else { + is_enabled = false; } - power_domains->domain_use_count[domain]++; - mutex_unlock(&power_domains->lock); + + if (!is_enabled) + intel_runtime_pm_put(dev_priv); + + return is_enabled; } /** @@ -2028,8 +2141,8 @@ static void skl_display_core_init(struct drm_i915_private *dev_priv, skl_init_cdclk(dev_priv); - if (dev_priv->csr.dmc_payload) - intel_csr_load_program(dev_priv); + if (dev_priv->csr.dmc_payload && intel_csr_load_program(dev_priv)) + gen9_set_dc_state_debugmask(dev_priv); } static void skl_display_core_uninit(struct drm_i915_private *dev_priv) @@ -2206,15 +2319,15 @@ void intel_power_domains_init_hw(struct drm_i915_private *dev_priv, bool resume) */ void intel_power_domains_suspend(struct drm_i915_private *dev_priv) { - if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) - skl_display_core_uninit(dev_priv); - /* * Even if power well support was disabled we still want to disable * power wells while we are system suspended. */ if (!i915.disable_power_well) intel_display_power_put(dev_priv, POWER_DOMAIN_INIT); + + if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) + skl_display_core_uninit(dev_priv); } /** @@ -2239,6 +2352,41 @@ void intel_runtime_pm_get(struct drm_i915_private *dev_priv) } /** + * intel_runtime_pm_get_if_in_use - grab a runtime pm reference if device in use + * @dev_priv: i915 device instance + * + * This function grabs a device-level runtime pm reference if the device is + * already in use and ensures that it is powered up. + * + * Any runtime pm reference obtained by this function must have a symmetric + * call to intel_runtime_pm_put() to release the reference again. + */ +bool intel_runtime_pm_get_if_in_use(struct drm_i915_private *dev_priv) +{ + struct drm_device *dev = dev_priv->dev; + struct device *device = &dev->pdev->dev; + + if (IS_ENABLED(CONFIG_PM)) { + int ret = pm_runtime_get_if_in_use(device); + + /* + * In cases runtime PM is disabled by the RPM core and we get + * an -EINVAL return value we are not supposed to call this + * function, since the power state is undefined. This applies + * atm to the late/early system suspend/resume handlers. + */ + WARN_ON_ONCE(ret < 0); + if (ret <= 0) + return false; + } + + atomic_inc(&dev_priv->pm.wakeref_count); + assert_rpm_wakelock_held(dev_priv); + + return true; +} + +/** * intel_runtime_pm_get_noresume - grab a runtime pm reference * @dev_priv: i915 device instance * diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c index 2e1da060b0e1..4ecc076c4041 100644 --- a/drivers/gpu/drm/i915/intel_sdvo.c +++ b/drivers/gpu/drm/i915/intel_sdvo.c @@ -1527,6 +1527,7 @@ intel_sdvo_mode_valid(struct drm_connector *connector, struct drm_display_mode *mode) { struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector); + int max_dotclk = to_i915(connector->dev)->max_dotclk_freq; if (mode->flags & DRM_MODE_FLAG_DBLSCAN) return MODE_NO_DBLESCAN; @@ -1537,6 +1538,9 @@ intel_sdvo_mode_valid(struct drm_connector *connector, if (intel_sdvo->pixel_clock_max < mode->clock) return MODE_CLOCK_HIGH; + if (mode->clock > max_dotclk) + return MODE_CLOCK_HIGH; + if (intel_sdvo->is_lvds) { if (mode->hdisplay > intel_sdvo->sdvo_lvds_fixed_mode->hdisplay) return MODE_PANEL; diff --git a/drivers/gpu/drm/i915/intel_sideband.c b/drivers/gpu/drm/i915/intel_sideband.c index 8831fc579ade..c3998188cf35 100644 --- a/drivers/gpu/drm/i915/intel_sideband.c +++ b/drivers/gpu/drm/i915/intel_sideband.c @@ -129,17 +129,18 @@ u32 vlv_nc_read(struct drm_i915_private *dev_priv, u8 addr) return val; } -u32 vlv_gpio_nc_read(struct drm_i915_private *dev_priv, u32 reg) +u32 vlv_iosf_sb_read(struct drm_i915_private *dev_priv, u8 port, u32 reg) { u32 val = 0; - vlv_sideband_rw(dev_priv, PCI_DEVFN(0, 0), IOSF_PORT_GPIO_NC, + vlv_sideband_rw(dev_priv, PCI_DEVFN(0, 0), port, SB_CRRDDA_NP, reg, &val); return val; } -void vlv_gpio_nc_write(struct drm_i915_private *dev_priv, u32 reg, u32 val) +void vlv_iosf_sb_write(struct drm_i915_private *dev_priv, + u8 port, u32 reg, u32 val) { - vlv_sideband_rw(dev_priv, PCI_DEVFN(0, 0), IOSF_PORT_GPIO_NC, + vlv_sideband_rw(dev_priv, PCI_DEVFN(0, 0), port, SB_CRWRDA_NP, reg, &val); } @@ -171,20 +172,6 @@ void vlv_ccu_write(struct drm_i915_private *dev_priv, u32 reg, u32 val) SB_CRWRDA_NP, reg, &val); } -u32 vlv_gps_core_read(struct drm_i915_private *dev_priv, u32 reg) -{ - u32 val = 0; - vlv_sideband_rw(dev_priv, PCI_DEVFN(0, 0), IOSF_PORT_GPS_CORE, - SB_CRRDDA_NP, reg, &val); - return val; -} - -void vlv_gps_core_write(struct drm_i915_private *dev_priv, u32 reg, u32 val) -{ - vlv_sideband_rw(dev_priv, PCI_DEVFN(0, 0), IOSF_PORT_GPS_CORE, - SB_CRWRDA_NP, reg, &val); -} - u32 vlv_dpio_read(struct drm_i915_private *dev_priv, enum pipe pipe, int reg) { u32 val = 0; diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c index 0875c8e0ec0a..a2582c455b36 100644 --- a/drivers/gpu/drm/i915/intel_sprite.c +++ b/drivers/gpu/drm/i915/intel_sprite.c @@ -350,8 +350,8 @@ vlv_update_plane(struct drm_plane *dplane, int pipe = intel_plane->pipe; int plane = intel_plane->plane; u32 sprctl; - unsigned long sprsurf_offset, linear_offset; - int pixel_size = drm_format_plane_cpp(fb->pixel_format, 0); + u32 sprsurf_offset, linear_offset; + int cpp = drm_format_plane_cpp(fb->pixel_format, 0); const struct drm_intel_sprite_colorkey *key = &plane_state->ckey; int crtc_x = plane_state->dst.x1; int crtc_y = plane_state->dst.y1; @@ -422,10 +422,9 @@ vlv_update_plane(struct drm_plane *dplane, crtc_w--; crtc_h--; - linear_offset = y * fb->pitches[0] + x * pixel_size; + linear_offset = y * fb->pitches[0] + x * cpp; sprsurf_offset = intel_compute_tile_offset(dev_priv, &x, &y, - fb->modifier[0], - pixel_size, + fb->modifier[0], cpp, fb->pitches[0]); linear_offset -= sprsurf_offset; @@ -434,7 +433,7 @@ vlv_update_plane(struct drm_plane *dplane, x += src_w; y += src_h; - linear_offset += src_h * fb->pitches[0] + src_w * pixel_size; + linear_offset += src_h * fb->pitches[0] + src_w * cpp; } if (key->flags) { @@ -493,8 +492,8 @@ ivb_update_plane(struct drm_plane *plane, struct drm_i915_gem_object *obj = intel_fb_obj(fb); enum pipe pipe = intel_plane->pipe; u32 sprctl, sprscale = 0; - unsigned long sprsurf_offset, linear_offset; - int pixel_size = drm_format_plane_cpp(fb->pixel_format, 0); + u32 sprsurf_offset, linear_offset; + int cpp = drm_format_plane_cpp(fb->pixel_format, 0); const struct drm_intel_sprite_colorkey *key = &plane_state->ckey; int crtc_x = plane_state->dst.x1; int crtc_y = plane_state->dst.y1; @@ -556,10 +555,9 @@ ivb_update_plane(struct drm_plane *plane, if (crtc_w != src_w || crtc_h != src_h) sprscale = SPRITE_SCALE_ENABLE | (src_w << 16) | src_h; - linear_offset = y * fb->pitches[0] + x * pixel_size; + linear_offset = y * fb->pitches[0] + x * cpp; sprsurf_offset = intel_compute_tile_offset(dev_priv, &x, &y, - fb->modifier[0], - pixel_size, + fb->modifier[0], cpp, fb->pitches[0]); linear_offset -= sprsurf_offset; @@ -570,8 +568,7 @@ ivb_update_plane(struct drm_plane *plane, if (!IS_HASWELL(dev) && !IS_BROADWELL(dev)) { x += src_w; y += src_h; - linear_offset += src_h * fb->pitches[0] + - src_w * pixel_size; + linear_offset += src_h * fb->pitches[0] + src_w * cpp; } } @@ -635,9 +632,9 @@ ilk_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb = plane_state->base.fb; struct drm_i915_gem_object *obj = intel_fb_obj(fb); int pipe = intel_plane->pipe; - unsigned long dvssurf_offset, linear_offset; u32 dvscntr, dvsscale; - int pixel_size = drm_format_plane_cpp(fb->pixel_format, 0); + u32 dvssurf_offset, linear_offset; + int cpp = drm_format_plane_cpp(fb->pixel_format, 0); const struct drm_intel_sprite_colorkey *key = &plane_state->ckey; int crtc_x = plane_state->dst.x1; int crtc_y = plane_state->dst.y1; @@ -695,10 +692,9 @@ ilk_update_plane(struct drm_plane *plane, if (crtc_w != src_w || crtc_h != src_h) dvsscale = DVS_SCALE_ENABLE | (src_w << 16) | src_h; - linear_offset = y * fb->pitches[0] + x * pixel_size; + linear_offset = y * fb->pitches[0] + x * cpp; dvssurf_offset = intel_compute_tile_offset(dev_priv, &x, &y, - fb->modifier[0], - pixel_size, + fb->modifier[0], cpp, fb->pitches[0]); linear_offset -= dvssurf_offset; @@ -707,7 +703,7 @@ ilk_update_plane(struct drm_plane *plane, x += src_w; y += src_h; - linear_offset += src_h * fb->pitches[0] + src_w * pixel_size; + linear_offset += src_h * fb->pitches[0] + src_w * cpp; } if (key->flags) { @@ -772,7 +768,6 @@ intel_check_sprite_plane(struct drm_plane *plane, int hscale, vscale; int max_scale, min_scale; bool can_scale; - int pixel_size; if (!fb) { state->visible = false; @@ -894,6 +889,7 @@ intel_check_sprite_plane(struct drm_plane *plane, /* Check size restrictions when scaling */ if (state->visible && (src_w != crtc_w || src_h != crtc_h)) { unsigned int width_bytes; + int cpp = drm_format_plane_cpp(fb->pixel_format, 0); WARN_ON(!can_scale); @@ -905,9 +901,7 @@ intel_check_sprite_plane(struct drm_plane *plane, if (src_w < 3 || src_h < 3) state->visible = false; - pixel_size = drm_format_plane_cpp(fb->pixel_format, 0); - width_bytes = ((src_x * pixel_size) & 63) + - src_w * pixel_size; + width_bytes = ((src_x * cpp) & 63) + src_w * cpp; if (INTEL_INFO(dev)->gen < 9 && (src_w > 2048 || src_h > 2048 || width_bytes > 4096 || fb->pitches[0] > 4096)) { diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c index 948cbff6c62e..6745bad5bff0 100644 --- a/drivers/gpu/drm/i915/intel_tv.c +++ b/drivers/gpu/drm/i915/intel_tv.c @@ -897,6 +897,10 @@ intel_tv_mode_valid(struct drm_connector *connector, { struct intel_tv *intel_tv = intel_attached_tv(connector); const struct tv_mode *tv_mode = intel_tv_mode_find(intel_tv); + int max_dotclk = to_i915(connector->dev)->max_dotclk_freq; + + if (mode->clock > max_dotclk) + return MODE_CLOCK_HIGH; /* Ensure TV refresh is close to desired refresh */ if (tv_mode && abs(tv_mode->refresh - drm_mode_vrefresh(mode) * 1000) @@ -1178,10 +1182,9 @@ static int intel_tv_detect_type(struct intel_tv *intel_tv, struct drm_connector *connector) { - struct drm_encoder *encoder = &intel_tv->base.base; - struct drm_crtc *crtc = encoder->crtc; + struct drm_crtc *crtc = connector->state->crtc; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); - struct drm_device *dev = encoder->dev; + struct drm_device *dev = connector->dev; struct drm_i915_private *dev_priv = dev->dev_private; u32 tv_ctl, save_tv_ctl; u32 tv_dac, save_tv_dac; @@ -1230,8 +1233,7 @@ intel_tv_detect_type(struct intel_tv *intel_tv, I915_WRITE(TV_DAC, tv_dac); POSTING_READ(TV_DAC); - intel_wait_for_vblank(intel_tv->base.base.dev, - to_intel_crtc(intel_tv->base.base.crtc)->pipe); + intel_wait_for_vblank(dev, intel_crtc->pipe); type = -1; tv_dac = I915_READ(TV_DAC); @@ -1261,8 +1263,7 @@ intel_tv_detect_type(struct intel_tv *intel_tv, POSTING_READ(TV_CTL); /* For unknown reasons the hw barfs if we don't do this vblank wait. */ - intel_wait_for_vblank(intel_tv->base.base.dev, - to_intel_crtc(intel_tv->base.base.crtc)->pipe); + intel_wait_for_vblank(dev, intel_crtc->pipe); /* Restore interrupt config */ if (connector->polled & DRM_CONNECTOR_POLL_HPD) { @@ -1420,6 +1421,7 @@ intel_tv_get_modes(struct drm_connector *connector) if (!mode_ptr) continue; strncpy(mode_ptr->name, input->name, DRM_DISPLAY_MODE_LEN); + mode_ptr->name[DRM_DISPLAY_MODE_LEN - 1] = '\0'; mode_ptr->hdisplay = hactive_s; mode_ptr->hsync_start = hactive_s + 1; diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c index bfa79e5c214e..436d8f2b8682 100644 --- a/drivers/gpu/drm/i915/intel_uncore.c +++ b/drivers/gpu/drm/i915/intel_uncore.c @@ -400,6 +400,8 @@ void intel_uncore_early_sanitize(struct drm_device *dev, bool restore_forcewake) void intel_uncore_sanitize(struct drm_device *dev) { + i915.enable_rc6 = sanitize_rc6_option(dev, i915.enable_rc6); + /* BIOS often leaves RC6 enabled, but disable it for hw init */ intel_disable_gt_powersave(dev); } |