diff options
author | Thomas Gleixner <tglx@linutronix.de> | 2016-11-16 14:19:34 +0100 |
---|---|---|
committer | Thomas Gleixner <tglx@linutronix.de> | 2016-11-16 14:19:34 +0100 |
commit | 7ce7f35b33eb42b6aa4cf176fa34372b21b8472b (patch) | |
tree | 12f3027003f1d7cce30f838265e4fab2d52c6cef /drivers/gpu | |
parent | f410770293a1fbc08906474c24104a7a11943eb6 (diff) | |
parent | 47bdf3378d62a627cfb8a54e1180c08d67078b61 (diff) | |
download | talos-obmc-linux-7ce7f35b33eb42b6aa4cf176fa34372b21b8472b.tar.gz talos-obmc-linux-7ce7f35b33eb42b6aa4cf176fa34372b21b8472b.zip |
Merge branch 'x86/cpufeature' into x86/cache
Resolve the cpu/scattered conflict.
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'drivers/gpu')
80 files changed, 779 insertions, 375 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c index 892d60fb225b..2057683f7b59 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c @@ -395,9 +395,12 @@ static int acp_hw_fini(void *handle) { int i, ret; struct device *dev; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + /* return early if no ACP */ + if (!adev->acp.acp_genpd) + return 0; + for (i = 0; i < ACP_DEVS ; i++) { dev = get_mfd_cell_dev(adev->acp.acp_cell[i].name, i); ret = pm_genpd_remove_device(&adev->acp.acp_genpd->gpd, dev); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c index 7a8bfa34682f..662976292535 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c @@ -795,10 +795,19 @@ static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device, if (!adev->pm.fw) { switch (adev->asic_type) { case CHIP_TOPAZ: - strcpy(fw_name, "amdgpu/topaz_smc.bin"); + if (((adev->pdev->device == 0x6900) && (adev->pdev->revision == 0x81)) || + ((adev->pdev->device == 0x6900) && (adev->pdev->revision == 0x83)) || + ((adev->pdev->device == 0x6907) && (adev->pdev->revision == 0x87))) + strcpy(fw_name, "amdgpu/topaz_k_smc.bin"); + else + strcpy(fw_name, "amdgpu/topaz_smc.bin"); break; case CHIP_TONGA: - strcpy(fw_name, "amdgpu/tonga_smc.bin"); + if (((adev->pdev->device == 0x6939) && (adev->pdev->revision == 0xf1)) || + ((adev->pdev->device == 0x6938) && (adev->pdev->revision == 0xf1))) + strcpy(fw_name, "amdgpu/tonga_k_smc.bin"); + else + strcpy(fw_name, "amdgpu/tonga_smc.bin"); break; case CHIP_FIJI: strcpy(fw_name, "amdgpu/fiji_smc.bin"); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c index e3281d4e3e41..086aa5c9c634 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c @@ -769,7 +769,7 @@ static void amdgpu_connector_unregister(struct drm_connector *connector) { struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector); - if (amdgpu_connector->ddc_bus->has_aux) { + if (amdgpu_connector->ddc_bus && amdgpu_connector->ddc_bus->has_aux) { drm_dp_aux_unregister(&amdgpu_connector->ddc_bus->aux); amdgpu_connector->ddc_bus->has_aux = false; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c index b0f6e6957536..82dc8d20e28a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c @@ -519,7 +519,8 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p, r = ttm_eu_reserve_buffers(&p->ticket, &p->validated, true, &duplicates); if (unlikely(r != 0)) { - DRM_ERROR("ttm_eu_reserve_buffers failed.\n"); + if (r != -ERESTARTSYS) + DRM_ERROR("ttm_eu_reserve_buffers failed.\n"); goto error_free_pages; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index b4f4a9239069..7ca07e7b25c1 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -1959,6 +1959,7 @@ int amdgpu_device_suspend(struct drm_device *dev, bool suspend, bool fbcon) /* evict remaining vram memory */ amdgpu_bo_evict_vram(adev); + amdgpu_atombios_scratch_regs_save(adev); pci_save_state(dev->pdev); if (suspend) { /* Shut down the device */ @@ -2010,6 +2011,7 @@ int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon) return r; } } + amdgpu_atombios_scratch_regs_restore(adev); /* post card */ if (!amdgpu_card_posted(adev) || !resume) { @@ -2268,8 +2270,6 @@ int amdgpu_gpu_reset(struct amdgpu_device *adev) } if (need_full_reset) { - /* save scratch */ - amdgpu_atombios_scratch_regs_save(adev); r = amdgpu_suspend(adev); retry: @@ -2279,8 +2279,9 @@ retry: amdgpu_display_stop_mc_access(adev, &save); amdgpu_wait_for_idle(adev, AMD_IP_BLOCK_TYPE_GMC); } - + amdgpu_atombios_scratch_regs_save(adev); r = amdgpu_asic_reset(adev); + amdgpu_atombios_scratch_regs_restore(adev); /* post card */ amdgpu_atom_asic_init(adev->mode_info.atom_context); @@ -2288,8 +2289,6 @@ retry: dev_info(adev->dev, "GPU reset succeeded, trying to resume\n"); r = amdgpu_resume(adev); } - /* restore scratch */ - amdgpu_atombios_scratch_regs_restore(adev); } if (!r) { amdgpu_irq_gpu_reset_resume_helper(adev); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c index 71ed27eb3dde..02ff0747197c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c @@ -735,8 +735,20 @@ static struct pci_driver amdgpu_kms_pci_driver = { static int __init amdgpu_init(void) { - amdgpu_sync_init(); - amdgpu_fence_slab_init(); + int r; + + r = amdgpu_sync_init(); + if (r) + goto error_sync; + + r = amdgpu_fence_slab_init(); + if (r) + goto error_fence; + + r = amd_sched_fence_slab_init(); + if (r) + goto error_sched; + if (vgacon_text_force()) { DRM_ERROR("VGACON disables amdgpu kernel modesetting.\n"); return -EINVAL; @@ -748,6 +760,15 @@ static int __init amdgpu_init(void) amdgpu_register_atpx_handler(); /* let modprobe override vga console setting */ return drm_pci_init(driver, pdriver); + +error_sched: + amdgpu_fence_slab_fini(); + +error_fence: + amdgpu_sync_fini(); + +error_sync: + return r; } static void __exit amdgpu_exit(void) @@ -756,6 +777,7 @@ static void __exit amdgpu_exit(void) drm_pci_exit(driver, pdriver); amdgpu_unregister_atpx_handler(); amdgpu_sync_fini(); + amd_sched_fence_slab_fini(); amdgpu_fence_slab_fini(); } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c index 3a2e42f4b897..77b34ec92632 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c @@ -68,6 +68,7 @@ int amdgpu_fence_slab_init(void) void amdgpu_fence_slab_fini(void) { + rcu_barrier(); kmem_cache_destroy(amdgpu_fence_slab); } /* diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c index 278708f5a744..9fa809876339 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c @@ -239,6 +239,7 @@ int amdgpu_irq_init(struct amdgpu_device *adev) if (r) { adev->irq.installed = false; flush_work(&adev->hotplug_work); + cancel_work_sync(&adev->reset_work); return r; } @@ -264,6 +265,7 @@ void amdgpu_irq_fini(struct amdgpu_device *adev) if (adev->irq.msi_enabled) pci_disable_msi(adev->pdev); flush_work(&adev->hotplug_work); + cancel_work_sync(&adev->reset_work); } for (i = 0; i < AMDGPU_MAX_IRQ_SRC_ID; ++i) { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c index c2c7fb140338..3938fca1ea8e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c @@ -99,6 +99,8 @@ int amdgpu_driver_load_kms(struct drm_device *dev, unsigned long flags) if ((amdgpu_runtime_pm != 0) && amdgpu_has_atpx() && + (amdgpu_is_atpx_hybrid() || + amdgpu_has_atpx_dgpu_power_cntl()) && ((flags & AMD_IS_APU) == 0)) flags |= AMD_IS_PX; @@ -459,10 +461,8 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file /* return all clocks in KHz */ dev_info.gpu_counter_freq = amdgpu_asic_get_xclk(adev) * 10; if (adev->pm.dpm_enabled) { - dev_info.max_engine_clock = - adev->pm.dpm.dyn_state.max_clock_voltage_on_ac.sclk * 10; - dev_info.max_memory_clock = - adev->pm.dpm.dyn_state.max_clock_voltage_on_ac.mclk * 10; + dev_info.max_engine_clock = amdgpu_dpm_get_sclk(adev, false) * 10; + dev_info.max_memory_clock = amdgpu_dpm_get_mclk(adev, false) * 10; } else { dev_info.max_engine_clock = adev->pm.default_sclk * 10; dev_info.max_memory_clock = adev->pm.default_mclk * 10; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c index aa074fac0c7f..f3efb1c5dae9 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c @@ -754,6 +754,10 @@ static const char *amdgpu_vram_names[] = { int amdgpu_bo_init(struct amdgpu_device *adev) { + /* reserve PAT memory space to WC for VRAM */ + arch_io_reserve_memtype_wc(adev->mc.aper_base, + adev->mc.aper_size); + /* Add an MTRR for the VRAM */ adev->mc.vram_mtrr = arch_phys_wc_add(adev->mc.aper_base, adev->mc.aper_size); @@ -769,6 +773,7 @@ void amdgpu_bo_fini(struct amdgpu_device *adev) { amdgpu_ttm_fini(adev); arch_phys_wc_del(adev->mc.vram_mtrr); + arch_io_free_memtype_wc(adev->mc.aper_base, adev->mc.aper_size); } int amdgpu_bo_fbdev_mmap(struct amdgpu_bo *bo, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index 06f24322e7c3..968c4260d7a7 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -1758,5 +1758,6 @@ void amdgpu_vm_manager_fini(struct amdgpu_device *adev) fence_put(adev->vm_manager.ids[i].first); amdgpu_sync_free(&adev->vm_manager.ids[i].active); fence_put(id->flushed_updates); + fence_put(id->last_flush); } } diff --git a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c index 1d8c375a3561..5be788b269e2 100644 --- a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c +++ b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c @@ -4075,7 +4075,7 @@ static int ci_enable_uvd_dpm(struct amdgpu_device *adev, bool enable) pi->dpm_level_enable_mask.mclk_dpm_enable_mask); } } else { - if (pi->last_mclk_dpm_enable_mask & 0x1) { + if (pi->uvd_enabled) { pi->uvd_enabled = false; pi->dpm_level_enable_mask.mclk_dpm_enable_mask |= 1; amdgpu_ci_send_msg_to_smc_with_parameter(adev, @@ -6236,6 +6236,8 @@ static int ci_dpm_sw_fini(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; + flush_work(&adev->pm.dpm.thermal.work); + mutex_lock(&adev->pm.mutex); amdgpu_pm_sysfs_fini(adev); ci_dpm_fini(adev); diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c index 4108c686aa7c..9260caef74fa 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c @@ -3151,10 +3151,6 @@ static int dce_v10_0_hw_fini(void *handle) static int dce_v10_0_suspend(void *handle) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - amdgpu_atombios_scratch_regs_save(adev); - return dce_v10_0_hw_fini(handle); } @@ -3165,8 +3161,6 @@ static int dce_v10_0_resume(void *handle) ret = dce_v10_0_hw_init(handle); - amdgpu_atombios_scratch_regs_restore(adev); - /* turn on the BL */ if (adev->mode_info.bl_encoder) { u8 bl_level = amdgpu_display_backlight_get_level(adev, diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c index f264b8f17ad1..367739bd1927 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c @@ -3215,10 +3215,6 @@ static int dce_v11_0_hw_fini(void *handle) static int dce_v11_0_suspend(void *handle) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - amdgpu_atombios_scratch_regs_save(adev); - return dce_v11_0_hw_fini(handle); } @@ -3229,8 +3225,6 @@ static int dce_v11_0_resume(void *handle) ret = dce_v11_0_hw_init(handle); - amdgpu_atombios_scratch_regs_restore(adev); - /* turn on the BL */ if (adev->mode_info.bl_encoder) { u8 bl_level = amdgpu_display_backlight_get_level(adev, diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c index b948d6cb1399..15f9fc0514b2 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c @@ -2482,10 +2482,6 @@ static int dce_v6_0_hw_fini(void *handle) static int dce_v6_0_suspend(void *handle) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - amdgpu_atombios_scratch_regs_save(adev); - return dce_v6_0_hw_fini(handle); } @@ -2496,8 +2492,6 @@ static int dce_v6_0_resume(void *handle) ret = dce_v6_0_hw_init(handle); - amdgpu_atombios_scratch_regs_restore(adev); - /* turn on the BL */ if (adev->mode_info.bl_encoder) { u8 bl_level = amdgpu_display_backlight_get_level(adev, diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c index 5966166ec94c..8c4d808db0f1 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c @@ -3033,10 +3033,6 @@ static int dce_v8_0_hw_fini(void *handle) static int dce_v8_0_suspend(void *handle) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - amdgpu_atombios_scratch_regs_save(adev); - return dce_v8_0_hw_fini(handle); } @@ -3047,8 +3043,6 @@ static int dce_v8_0_resume(void *handle) ret = dce_v8_0_hw_init(handle); - amdgpu_atombios_scratch_regs_restore(adev); - /* turn on the BL */ if (adev->mode_info.bl_encoder) { u8 bl_level = amdgpu_display_backlight_get_level(adev, diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c index ee6a48a09214..bb97182dc749 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c @@ -640,7 +640,6 @@ static const u32 stoney_mgcg_cgcg_init[] = mmCP_MEM_SLP_CNTL, 0xffffffff, 0x00020201, mmRLC_MEM_SLP_CNTL, 0xffffffff, 0x00020201, mmCGTS_SM_CTRL_REG, 0xffffffff, 0x96940200, - mmATC_MISC_CG, 0xffffffff, 0x000c0200, }; static void gfx_v8_0_set_ring_funcs(struct amdgpu_device *adev); diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c index c22ef140a542..a16b2201d52c 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c @@ -100,6 +100,7 @@ static const u32 cz_mgcg_cgcg_init[] = static const u32 stoney_mgcg_cgcg_init[] = { + mmATC_MISC_CG, 0xffffffff, 0x000c0200, mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104 }; diff --git a/drivers/gpu/drm/amd/amdgpu/kv_dpm.c b/drivers/gpu/drm/amd/amdgpu/kv_dpm.c index f8618a3881a8..71d2856222fa 100644 --- a/drivers/gpu/drm/amd/amdgpu/kv_dpm.c +++ b/drivers/gpu/drm/amd/amdgpu/kv_dpm.c @@ -3063,6 +3063,8 @@ static int kv_dpm_sw_fini(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; + flush_work(&adev->pm.dpm.thermal.work); + mutex_lock(&adev->pm.mutex); amdgpu_pm_sysfs_fini(adev); kv_dpm_fini(adev); diff --git a/drivers/gpu/drm/amd/amdgpu/si_dpm.c b/drivers/gpu/drm/amd/amdgpu/si_dpm.c index 3de7bca5854b..d6f85b1a0b93 100644 --- a/drivers/gpu/drm/amd/amdgpu/si_dpm.c +++ b/drivers/gpu/drm/amd/amdgpu/si_dpm.c @@ -3477,6 +3477,49 @@ static void si_apply_state_adjust_rules(struct amdgpu_device *adev, int i; struct si_dpm_quirk *p = si_dpm_quirk_list; + /* limit all SI kickers */ + if (adev->asic_type == CHIP_PITCAIRN) { + if ((adev->pdev->revision == 0x81) || + (adev->pdev->device == 0x6810) || + (adev->pdev->device == 0x6811) || + (adev->pdev->device == 0x6816) || + (adev->pdev->device == 0x6817) || + (adev->pdev->device == 0x6806)) + max_mclk = 120000; + } else if (adev->asic_type == CHIP_VERDE) { + if ((adev->pdev->revision == 0x81) || + (adev->pdev->revision == 0x83) || + (adev->pdev->revision == 0x87) || + (adev->pdev->device == 0x6820) || + (adev->pdev->device == 0x6821) || + (adev->pdev->device == 0x6822) || + (adev->pdev->device == 0x6823) || + (adev->pdev->device == 0x682A) || + (adev->pdev->device == 0x682B)) { + max_sclk = 75000; + max_mclk = 80000; + } + } else if (adev->asic_type == CHIP_OLAND) { + if ((adev->pdev->revision == 0xC7) || + (adev->pdev->revision == 0x80) || + (adev->pdev->revision == 0x81) || + (adev->pdev->revision == 0x83) || + (adev->pdev->device == 0x6604) || + (adev->pdev->device == 0x6605)) { + max_sclk = 75000; + max_mclk = 80000; + } + } else if (adev->asic_type == CHIP_HAINAN) { + if ((adev->pdev->revision == 0x81) || + (adev->pdev->revision == 0x83) || + (adev->pdev->revision == 0xC3) || + (adev->pdev->device == 0x6664) || + (adev->pdev->device == 0x6665) || + (adev->pdev->device == 0x6667)) { + max_sclk = 75000; + max_mclk = 80000; + } + } /* Apply dpm quirks */ while (p && p->chip_device != 0) { if (adev->pdev->vendor == p->chip_vendor && @@ -3489,22 +3532,6 @@ static void si_apply_state_adjust_rules(struct amdgpu_device *adev, } ++p; } - /* limit mclk on all R7 370 parts for stability */ - if (adev->pdev->device == 0x6811 && - adev->pdev->revision == 0x81) - max_mclk = 120000; - /* limit sclk/mclk on Jet parts for stability */ - if (adev->pdev->device == 0x6665 && - adev->pdev->revision == 0xc3) { - max_sclk = 75000; - max_mclk = 80000; - } - /* Limit clocks for some HD8600 parts */ - if (adev->pdev->device == 0x6660 && - adev->pdev->revision == 0x83) { - max_sclk = 75000; - max_mclk = 80000; - } if (rps->vce_active) { rps->evclk = adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].evclk; @@ -7777,6 +7804,8 @@ static int si_dpm_sw_fini(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; + flush_work(&adev->pm.dpm.thermal.work); + mutex_lock(&adev->pm.mutex); amdgpu_pm_sysfs_fini(adev); si_dpm_fini(adev); diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c index 8533269ec160..6feed726e299 100644 --- a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c @@ -52,6 +52,8 @@ #define VCE_V3_0_STACK_SIZE (64 * 1024) #define VCE_V3_0_DATA_SIZE ((16 * 1024 * AMDGPU_MAX_VCE_HANDLES) + (52 * 1024)) +#define FW_52_8_3 ((52 << 24) | (8 << 16) | (3 << 8)) + static void vce_v3_0_mc_resume(struct amdgpu_device *adev, int idx); static void vce_v3_0_set_ring_funcs(struct amdgpu_device *adev); static void vce_v3_0_set_irq_funcs(struct amdgpu_device *adev); @@ -382,6 +384,10 @@ static int vce_v3_0_sw_init(void *handle) if (r) return r; + /* 52.8.3 required for 3 ring support */ + if (adev->vce.fw_version < FW_52_8_3) + adev->vce.num_rings = 2; + r = amdgpu_vce_resume(adev); if (r) return r; diff --git a/drivers/gpu/drm/amd/amdgpu/vi.c b/drivers/gpu/drm/amd/amdgpu/vi.c index c0d9aad7126f..f62f1a74f890 100644 --- a/drivers/gpu/drm/amd/amdgpu/vi.c +++ b/drivers/gpu/drm/amd/amdgpu/vi.c @@ -80,7 +80,9 @@ #include "dce_virtual.h" MODULE_FIRMWARE("amdgpu/topaz_smc.bin"); +MODULE_FIRMWARE("amdgpu/topaz_k_smc.bin"); MODULE_FIRMWARE("amdgpu/tonga_smc.bin"); +MODULE_FIRMWARE("amdgpu/tonga_k_smc.bin"); MODULE_FIRMWARE("amdgpu/fiji_smc.bin"); MODULE_FIRMWARE("amdgpu/polaris10_smc.bin"); MODULE_FIRMWARE("amdgpu/polaris10_smc_sk.bin"); @@ -1651,7 +1653,7 @@ static int vi_common_early_init(void *handle) AMD_CG_SUPPORT_SDMA_MGCG | AMD_CG_SUPPORT_SDMA_LS | AMD_CG_SUPPORT_VCE_MGCG; - adev->pg_flags |= AMD_PG_SUPPORT_GFX_PG | + adev->pg_flags = AMD_PG_SUPPORT_GFX_PG | AMD_PG_SUPPORT_GFX_SMG | AMD_PG_SUPPORT_GFX_PIPELINE | AMD_PG_SUPPORT_UVD | diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c b/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c index 14f8c1f4da3d..0723758ed065 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c @@ -272,7 +272,7 @@ bool phm_check_smc_update_required_for_display_configuration(struct pp_hwmgr *hw PHM_FUNC_CHECK(hwmgr); if (hwmgr->hwmgr_func->check_smc_update_required_for_display_configuration == NULL) - return -EINVAL; + return false; return hwmgr->hwmgr_func->check_smc_update_required_for_display_configuration(hwmgr); } diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c index 1167205057b3..e03dcb6ea9c1 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c @@ -710,13 +710,15 @@ int phm_get_voltage_evv_on_sclk(struct pp_hwmgr *hwmgr, uint8_t voltage_type, uint32_t vol; int ret = 0; - if (hwmgr->chip_id < CHIP_POLARIS10) { - atomctrl_get_voltage_evv_on_sclk(hwmgr, voltage_type, sclk, id, voltage); + if (hwmgr->chip_id < CHIP_TONGA) { + ret = atomctrl_get_voltage_evv(hwmgr, id, voltage); + } else if (hwmgr->chip_id < CHIP_POLARIS10) { + ret = atomctrl_get_voltage_evv_on_sclk(hwmgr, voltage_type, sclk, id, voltage); if (*voltage >= 2000 || *voltage == 0) *voltage = 1150; } else { ret = atomctrl_get_voltage_evv_on_sclk_ai(hwmgr, voltage_type, sclk, id, &vol); - *voltage = (uint16_t)vol/100; + *voltage = (uint16_t)(vol/100); } return ret; } diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c index 1126bd4f74dc..0894527d932f 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c @@ -1320,7 +1320,8 @@ int atomctrl_get_voltage_evv_on_sclk_ai(struct pp_hwmgr *hwmgr, uint8_t voltage_ if (0 != result) return result; - *voltage = le32_to_cpu(((GET_EVV_VOLTAGE_INFO_OUTPUT_PARAMETER_V1_3 *)(&get_voltage_info_param_space))->ulVoltageLevel); + *voltage = le32_to_cpu(((GET_EVV_VOLTAGE_INFO_OUTPUT_PARAMETER_V1_3 *) + (&get_voltage_info_param_space))->ulVoltageLevel); return result; } diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c b/drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c index 7de701d8a450..4477c55a58e3 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c @@ -1201,12 +1201,15 @@ static uint32_t make_classification_flags(struct pp_hwmgr *hwmgr, static int ppt_get_num_of_vce_state_table_entries_v1_0(struct pp_hwmgr *hwmgr) { const ATOM_Tonga_POWERPLAYTABLE *pp_table = get_powerplay_table(hwmgr); - const ATOM_Tonga_VCE_State_Table *vce_state_table = - (ATOM_Tonga_VCE_State_Table *)(((unsigned long)pp_table) + le16_to_cpu(pp_table->usVCEStateTableOffset)); + const ATOM_Tonga_VCE_State_Table *vce_state_table; - if (vce_state_table == NULL) + + if (pp_table == NULL) return 0; + vce_state_table = (void *)pp_table + + le16_to_cpu(pp_table->usVCEStateTableOffset); + return vce_state_table->ucNumEntries; } diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c index 609996c84ad5..b0c929dd8beb 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c @@ -1168,8 +1168,8 @@ int smu7_enable_dpm_tasks(struct pp_hwmgr *hwmgr) tmp_result = (!smum_is_dpm_running(hwmgr)) ? 0 : -1; PP_ASSERT_WITH_CODE(tmp_result == 0, - "DPM is already running right now, no need to enable DPM!", - return 0); + "DPM is already running", + ); if (smu7_voltage_control(hwmgr)) { tmp_result = smu7_enable_voltage_control(hwmgr); @@ -1460,19 +1460,19 @@ static int smu7_get_evv_voltages(struct pp_hwmgr *hwmgr) struct phm_ppt_v1_clock_voltage_dependency_table *sclk_table = NULL; - if (table_info == NULL) - return -EINVAL; - - sclk_table = table_info->vdd_dep_on_sclk; - for (i = 0; i < SMU7_MAX_LEAKAGE_COUNT; i++) { vv_id = ATOM_VIRTUAL_VOLTAGE_ID0 + i; if (data->vdd_gfx_control == SMU7_VOLTAGE_CONTROL_BY_SVID2) { - if (0 == phm_get_sclk_for_voltage_evv(hwmgr, + if ((hwmgr->pp_table_version == PP_TABLE_V1) + && !phm_get_sclk_for_voltage_evv(hwmgr, table_info->vddgfx_lookup_table, vv_id, &sclk)) { if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_ClockStretcher)) { + if (table_info == NULL) + return -EINVAL; + sclk_table = table_info->vdd_dep_on_sclk; + for (j = 1; j < sclk_table->count; j++) { if (sclk_table->entries[j].clk == sclk && sclk_table->entries[j].cks_enable == 0) { @@ -1498,12 +1498,15 @@ static int smu7_get_evv_voltages(struct pp_hwmgr *hwmgr) } } } else { - if ((hwmgr->pp_table_version == PP_TABLE_V0) || !phm_get_sclk_for_voltage_evv(hwmgr, table_info->vddc_lookup_table, vv_id, &sclk)) { if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_ClockStretcher)) { + if (table_info == NULL) + return -EINVAL; + sclk_table = table_info->vdd_dep_on_sclk; + for (j = 1; j < sclk_table->count; j++) { if (sclk_table->entries[j].clk == sclk && sclk_table->entries[j].cks_enable == 0) { @@ -2127,15 +2130,20 @@ static int smu7_patch_acp_vddc(struct pp_hwmgr *hwmgr, } static int smu7_patch_limits_vddc(struct pp_hwmgr *hwmgr, - struct phm_clock_and_voltage_limits *tab) + struct phm_clock_and_voltage_limits *tab) { + uint32_t vddc, vddci; struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); if (tab) { - smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, (uint32_t *)&tab->vddc, - &data->vddc_leakage); - smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, (uint32_t *)&tab->vddci, - &data->vddci_leakage); + vddc = tab->vddc; + smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, &vddc, + &data->vddc_leakage); + tab->vddc = vddc; + vddci = tab->vddci; + smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, &vddci, + &data->vddci_leakage); + tab->vddci = vddci; } return 0; @@ -4225,18 +4233,26 @@ static int smu7_get_sclks(struct pp_hwmgr *hwmgr, struct amd_pp_clocks *clocks) { struct phm_ppt_v1_information *table_info = (struct phm_ppt_v1_information *)hwmgr->pptable; - struct phm_ppt_v1_clock_voltage_dependency_table *dep_sclk_table; + struct phm_ppt_v1_clock_voltage_dependency_table *dep_sclk_table = NULL; + struct phm_clock_voltage_dependency_table *sclk_table; int i; - if (table_info == NULL) - return -EINVAL; - - dep_sclk_table = table_info->vdd_dep_on_sclk; - - for (i = 0; i < dep_sclk_table->count; i++) { - clocks->clock[i] = dep_sclk_table->entries[i].clk; - clocks->count++; + if (hwmgr->pp_table_version == PP_TABLE_V1) { + if (table_info == NULL || table_info->vdd_dep_on_sclk == NULL) + return -EINVAL; + dep_sclk_table = table_info->vdd_dep_on_sclk; + for (i = 0; i < dep_sclk_table->count; i++) { + clocks->clock[i] = dep_sclk_table->entries[i].clk; + clocks->count++; + } + } else if (hwmgr->pp_table_version == PP_TABLE_V0) { + sclk_table = hwmgr->dyn_state.vddc_dependency_on_sclk; + for (i = 0; i < sclk_table->count; i++) { + clocks->clock[i] = sclk_table->entries[i].clk; + clocks->count++; + } } + return 0; } @@ -4258,17 +4274,24 @@ static int smu7_get_mclks(struct pp_hwmgr *hwmgr, struct amd_pp_clocks *clocks) (struct phm_ppt_v1_information *)hwmgr->pptable; struct phm_ppt_v1_clock_voltage_dependency_table *dep_mclk_table; int i; + struct phm_clock_voltage_dependency_table *mclk_table; - if (table_info == NULL) - return -EINVAL; - - dep_mclk_table = table_info->vdd_dep_on_mclk; - - for (i = 0; i < dep_mclk_table->count; i++) { - clocks->clock[i] = dep_mclk_table->entries[i].clk; - clocks->latency[i] = smu7_get_mem_latency(hwmgr, + if (hwmgr->pp_table_version == PP_TABLE_V1) { + if (table_info == NULL) + return -EINVAL; + dep_mclk_table = table_info->vdd_dep_on_mclk; + for (i = 0; i < dep_mclk_table->count; i++) { + clocks->clock[i] = dep_mclk_table->entries[i].clk; + clocks->latency[i] = smu7_get_mem_latency(hwmgr, dep_mclk_table->entries[i].clk); - clocks->count++; + clocks->count++; + } + } else if (hwmgr->pp_table_version == PP_TABLE_V0) { + mclk_table = hwmgr->dyn_state.vddc_dependency_on_mclk; + for (i = 0; i < mclk_table->count; i++) { + clocks->clock[i] = mclk_table->entries[i].clk; + clocks->count++; + } } return 0; } diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_thermal.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_thermal.c index fb6c6f6106d5..29d0319b22e6 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_thermal.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_thermal.c @@ -30,7 +30,7 @@ int smu7_fan_ctrl_get_fan_speed_info(struct pp_hwmgr *hwmgr, struct phm_fan_speed_info *fan_speed_info) { if (hwmgr->thermal_controller.fanInfo.bNoFan) - return 0; + return -ENODEV; fan_speed_info->supports_percent_read = true; fan_speed_info->supports_percent_write = true; @@ -60,7 +60,7 @@ int smu7_fan_ctrl_get_fan_speed_percent(struct pp_hwmgr *hwmgr, uint64_t tmp64; if (hwmgr->thermal_controller.fanInfo.bNoFan) - return 0; + return -ENODEV; duty100 = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_FDO_CTRL1, FMAX_DUTY100); @@ -89,7 +89,7 @@ int smu7_fan_ctrl_get_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t *speed) if (hwmgr->thermal_controller.fanInfo.bNoFan || (hwmgr->thermal_controller.fanInfo. ucTachometerPulsesPerRevolution == 0)) - return 0; + return -ENODEV; tach_period = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_TACH_STATUS, TACH_PERIOD); diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c index 963a24d46a93..ffe1f85ce300 100644 --- a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c +++ b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c @@ -34,9 +34,6 @@ static bool amd_sched_entity_is_ready(struct amd_sched_entity *entity); static void amd_sched_wakeup(struct amd_gpu_scheduler *sched); static void amd_sched_process_job(struct fence *f, struct fence_cb *cb); -struct kmem_cache *sched_fence_slab; -atomic_t sched_fence_slab_ref = ATOMIC_INIT(0); - /* Initialize a given run queue struct */ static void amd_sched_rq_init(struct amd_sched_rq *rq) { @@ -618,13 +615,6 @@ int amd_sched_init(struct amd_gpu_scheduler *sched, INIT_LIST_HEAD(&sched->ring_mirror_list); spin_lock_init(&sched->job_list_lock); atomic_set(&sched->hw_rq_count, 0); - if (atomic_inc_return(&sched_fence_slab_ref) == 1) { - sched_fence_slab = kmem_cache_create( - "amd_sched_fence", sizeof(struct amd_sched_fence), 0, - SLAB_HWCACHE_ALIGN, NULL); - if (!sched_fence_slab) - return -ENOMEM; - } /* Each scheduler will run on a seperate kernel thread */ sched->thread = kthread_run(amd_sched_main, sched, sched->name); @@ -645,6 +635,4 @@ void amd_sched_fini(struct amd_gpu_scheduler *sched) { if (sched->thread) kthread_stop(sched->thread); - if (atomic_dec_and_test(&sched_fence_slab_ref)) - kmem_cache_destroy(sched_fence_slab); } diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h index 7cbbbfb502ef..51068e6c3d9a 100644 --- a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h +++ b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h @@ -30,9 +30,6 @@ struct amd_gpu_scheduler; struct amd_sched_rq; -extern struct kmem_cache *sched_fence_slab; -extern atomic_t sched_fence_slab_ref; - /** * A scheduler entity is a wrapper around a job queue or a group * of other entities. Entities take turns emitting jobs from their @@ -145,6 +142,9 @@ void amd_sched_entity_fini(struct amd_gpu_scheduler *sched, struct amd_sched_entity *entity); void amd_sched_entity_push_job(struct amd_sched_job *sched_job); +int amd_sched_fence_slab_init(void); +void amd_sched_fence_slab_fini(void); + struct amd_sched_fence *amd_sched_fence_create( struct amd_sched_entity *s_entity, void *owner); void amd_sched_fence_scheduled(struct amd_sched_fence *fence); diff --git a/drivers/gpu/drm/amd/scheduler/sched_fence.c b/drivers/gpu/drm/amd/scheduler/sched_fence.c index 6b63beaf7574..88fc2d662579 100644 --- a/drivers/gpu/drm/amd/scheduler/sched_fence.c +++ b/drivers/gpu/drm/amd/scheduler/sched_fence.c @@ -27,6 +27,25 @@ #include <drm/drmP.h> #include "gpu_scheduler.h" +static struct kmem_cache *sched_fence_slab; + +int amd_sched_fence_slab_init(void) +{ + sched_fence_slab = kmem_cache_create( + "amd_sched_fence", sizeof(struct amd_sched_fence), 0, + SLAB_HWCACHE_ALIGN, NULL); + if (!sched_fence_slab) + return -ENOMEM; + + return 0; +} + +void amd_sched_fence_slab_fini(void) +{ + rcu_barrier(); + kmem_cache_destroy(sched_fence_slab); +} + struct amd_sched_fence *amd_sched_fence_create(struct amd_sched_entity *entity, void *owner) { @@ -103,7 +122,7 @@ static void amd_sched_fence_free(struct rcu_head *rcu) } /** - * amd_sched_fence_release - callback that fence can be freed + * amd_sched_fence_release_scheduled - callback that fence can be freed * * @fence: fence * @@ -118,7 +137,7 @@ static void amd_sched_fence_release_scheduled(struct fence *f) } /** - * amd_sched_fence_release_scheduled - drop extra reference + * amd_sched_fence_release_finished - drop extra reference * * @f: fence * diff --git a/drivers/gpu/drm/ast/ast_ttm.c b/drivers/gpu/drm/ast/ast_ttm.c index 608df4c90520..0743e65cb240 100644 --- a/drivers/gpu/drm/ast/ast_ttm.c +++ b/drivers/gpu/drm/ast/ast_ttm.c @@ -267,6 +267,8 @@ int ast_mm_init(struct ast_private *ast) return ret; } + arch_io_reserve_memtype_wc(pci_resource_start(dev->pdev, 0), + pci_resource_len(dev->pdev, 0)); ast->fb_mtrr = arch_phys_wc_add(pci_resource_start(dev->pdev, 0), pci_resource_len(dev->pdev, 0)); @@ -275,11 +277,15 @@ int ast_mm_init(struct ast_private *ast) void ast_mm_fini(struct ast_private *ast) { + struct drm_device *dev = ast->dev; + ttm_bo_device_release(&ast->ttm.bdev); ast_ttm_global_release(ast); arch_phys_wc_del(ast->fb_mtrr); + arch_io_free_memtype_wc(pci_resource_start(dev->pdev, 0), + pci_resource_len(dev->pdev, 0)); } void ast_ttm_placement(struct ast_bo *bo, int domain) diff --git a/drivers/gpu/drm/cirrus/cirrus_ttm.c b/drivers/gpu/drm/cirrus/cirrus_ttm.c index bb2438dd8733..5e7e63ce7bce 100644 --- a/drivers/gpu/drm/cirrus/cirrus_ttm.c +++ b/drivers/gpu/drm/cirrus/cirrus_ttm.c @@ -267,6 +267,9 @@ int cirrus_mm_init(struct cirrus_device *cirrus) return ret; } + arch_io_reserve_memtype_wc(pci_resource_start(dev->pdev, 0), + pci_resource_len(dev->pdev, 0)); + cirrus->fb_mtrr = arch_phys_wc_add(pci_resource_start(dev->pdev, 0), pci_resource_len(dev->pdev, 0)); @@ -276,6 +279,8 @@ int cirrus_mm_init(struct cirrus_device *cirrus) void cirrus_mm_fini(struct cirrus_device *cirrus) { + struct drm_device *dev = cirrus->dev; + if (!cirrus->mm_inited) return; @@ -285,6 +290,8 @@ void cirrus_mm_fini(struct cirrus_device *cirrus) arch_phys_wc_del(cirrus->fb_mtrr); cirrus->fb_mtrr = 0; + arch_io_free_memtype_wc(pci_resource_start(dev->pdev, 0), + pci_resource_len(dev->pdev, 0)); } void cirrus_ttm_placement(struct cirrus_bo *bo, int domain) diff --git a/drivers/gpu/drm/drm_atomic.c b/drivers/gpu/drm/drm_atomic.c index 23739609427d..e6862a744210 100644 --- a/drivers/gpu/drm/drm_atomic.c +++ b/drivers/gpu/drm/drm_atomic.c @@ -420,18 +420,21 @@ drm_atomic_replace_property_blob_from_id(struct drm_crtc *crtc, ssize_t expected_size, bool *replaced) { - struct drm_device *dev = crtc->dev; struct drm_property_blob *new_blob = NULL; if (blob_id != 0) { - new_blob = drm_property_lookup_blob(dev, blob_id); + new_blob = drm_property_lookup_blob(crtc->dev, blob_id); if (new_blob == NULL) return -EINVAL; - if (expected_size > 0 && expected_size != new_blob->length) + + if (expected_size > 0 && expected_size != new_blob->length) { + drm_property_unreference_blob(new_blob); return -EINVAL; + } } drm_atomic_replace_property_blob(blob, new_blob, replaced); + drm_property_unreference_blob(new_blob); return 0; } diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c index c3f83476f996..21f992605541 100644 --- a/drivers/gpu/drm/drm_atomic_helper.c +++ b/drivers/gpu/drm/drm_atomic_helper.c @@ -594,10 +594,6 @@ drm_atomic_helper_check_planes(struct drm_device *dev, struct drm_plane_state *plane_state; int i, ret = 0; - ret = drm_atomic_normalize_zpos(dev, state); - if (ret) - return ret; - for_each_plane_in_state(state, plane, plane_state, i) { const struct drm_plane_helper_funcs *funcs; diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c index 04e457117980..aa644487749c 100644 --- a/drivers/gpu/drm/drm_dp_mst_topology.c +++ b/drivers/gpu/drm/drm_dp_mst_topology.c @@ -914,6 +914,7 @@ static void drm_dp_destroy_port(struct kref *kref) /* no need to clean up vcpi * as if we have no connector we never setup a vcpi */ drm_dp_port_teardown_pdt(port, port->pdt); + port->pdt = DP_PEER_DEVICE_NONE; } kfree(port); } @@ -1159,7 +1160,9 @@ static void drm_dp_add_port(struct drm_dp_mst_branch *mstb, drm_dp_put_port(port); goto out; } - if (port->port_num >= DP_MST_LOGICAL_PORT_0) { + if ((port->pdt == DP_PEER_DEVICE_DP_LEGACY_CONV || + port->pdt == DP_PEER_DEVICE_SST_SINK) && + port->port_num >= DP_MST_LOGICAL_PORT_0) { port->cached_edid = drm_get_edid(port->connector, &port->aux.ddc); drm_mode_connector_set_tile_property(port->connector); } @@ -2919,6 +2922,7 @@ static void drm_dp_destroy_connector_work(struct work_struct *work) mgr->cbs->destroy_connector(mgr, port->connector); drm_dp_port_teardown_pdt(port, port->pdt); + port->pdt = DP_PEER_DEVICE_NONE; if (!port->input && port->vcpi.vcpi > 0) { drm_dp_mst_reset_vcpi_slots(mgr, port); diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c index 03414bde1f15..6c75e62c0b22 100644 --- a/drivers/gpu/drm/drm_fb_helper.c +++ b/drivers/gpu/drm/drm_fb_helper.c @@ -131,7 +131,12 @@ int drm_fb_helper_single_add_all_connectors(struct drm_fb_helper *fb_helper) return 0; fail: for (i = 0; i < fb_helper->connector_count; i++) { - kfree(fb_helper->connector_info[i]); + struct drm_fb_helper_connector *fb_helper_connector = + fb_helper->connector_info[i]; + + drm_connector_unreference(fb_helper_connector->connector); + + kfree(fb_helper_connector); fb_helper->connector_info[i] = NULL; } fb_helper->connector_count = 0; @@ -603,6 +608,24 @@ int drm_fb_helper_blank(int blank, struct fb_info *info) } EXPORT_SYMBOL(drm_fb_helper_blank); +static void drm_fb_helper_modeset_release(struct drm_fb_helper *helper, + struct drm_mode_set *modeset) +{ + int i; + + for (i = 0; i < modeset->num_connectors; i++) { + drm_connector_unreference(modeset->connectors[i]); + modeset->connectors[i] = NULL; + } + modeset->num_connectors = 0; + + drm_mode_destroy(helper->dev, modeset->mode); + modeset->mode = NULL; + + /* FIXME should hold a ref? */ + modeset->fb = NULL; +} + static void drm_fb_helper_crtc_free(struct drm_fb_helper *helper) { int i; @@ -612,10 +635,12 @@ static void drm_fb_helper_crtc_free(struct drm_fb_helper *helper) kfree(helper->connector_info[i]); } kfree(helper->connector_info); + for (i = 0; i < helper->crtc_count; i++) { - kfree(helper->crtc_info[i].mode_set.connectors); - if (helper->crtc_info[i].mode_set.mode) - drm_mode_destroy(helper->dev, helper->crtc_info[i].mode_set.mode); + struct drm_mode_set *modeset = &helper->crtc_info[i].mode_set; + + drm_fb_helper_modeset_release(helper, modeset); + kfree(modeset->connectors); } kfree(helper->crtc_info); } @@ -644,7 +669,9 @@ static void drm_fb_helper_dirty_work(struct work_struct *work) clip->x2 = clip->y2 = 0; spin_unlock_irqrestore(&helper->dirty_lock, flags); - helper->fb->funcs->dirty(helper->fb, NULL, 0, 0, &clip_copy, 1); + /* call dirty callback only when it has been really touched */ + if (clip_copy.x1 < clip_copy.x2 && clip_copy.y1 < clip_copy.y2) + helper->fb->funcs->dirty(helper->fb, NULL, 0, 0, &clip_copy, 1); } /** @@ -2088,7 +2115,6 @@ static void drm_setup_crtcs(struct drm_fb_helper *fb_helper) struct drm_fb_helper_crtc **crtcs; struct drm_display_mode **modes; struct drm_fb_offset *offsets; - struct drm_mode_set *modeset; bool *enabled; int width, height; int i; @@ -2136,45 +2162,35 @@ static void drm_setup_crtcs(struct drm_fb_helper *fb_helper) /* need to set the modesets up here for use later */ /* fill out the connector<->crtc mappings into the modesets */ - for (i = 0; i < fb_helper->crtc_count; i++) { - modeset = &fb_helper->crtc_info[i].mode_set; - modeset->num_connectors = 0; - modeset->fb = NULL; - } + for (i = 0; i < fb_helper->crtc_count; i++) + drm_fb_helper_modeset_release(fb_helper, + &fb_helper->crtc_info[i].mode_set); for (i = 0; i < fb_helper->connector_count; i++) { struct drm_display_mode *mode = modes[i]; struct drm_fb_helper_crtc *fb_crtc = crtcs[i]; struct drm_fb_offset *offset = &offsets[i]; - modeset = &fb_crtc->mode_set; + struct drm_mode_set *modeset = &fb_crtc->mode_set; if (mode && fb_crtc) { + struct drm_connector *connector = + fb_helper->connector_info[i]->connector; + DRM_DEBUG_KMS("desired mode %s set on crtc %d (%d,%d)\n", mode->name, fb_crtc->mode_set.crtc->base.id, offset->x, offset->y); + fb_crtc->desired_mode = mode; fb_crtc->x = offset->x; fb_crtc->y = offset->y; - if (modeset->mode) - drm_mode_destroy(dev, modeset->mode); modeset->mode = drm_mode_duplicate(dev, fb_crtc->desired_mode); - modeset->connectors[modeset->num_connectors++] = fb_helper->connector_info[i]->connector; + drm_connector_reference(connector); + modeset->connectors[modeset->num_connectors++] = connector; modeset->fb = fb_helper->fb; modeset->x = offset->x; modeset->y = offset->y; } } - - /* Clear out any old modes if there are no more connected outputs. */ - for (i = 0; i < fb_helper->crtc_count; i++) { - modeset = &fb_helper->crtc_info[i].mode_set; - if (modeset->num_connectors == 0) { - BUG_ON(modeset->fb); - if (modeset->mode) - drm_mode_destroy(dev, modeset->mode); - modeset->mode = NULL; - } - } out: kfree(crtcs); kfree(modes); diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.c b/drivers/gpu/drm/exynos/exynos_drm_drv.c index def78c8c1780..f86e7c846678 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_drv.c +++ b/drivers/gpu/drm/exynos/exynos_drm_drv.c @@ -262,6 +262,26 @@ int exynos_atomic_commit(struct drm_device *dev, struct drm_atomic_state *state, return 0; } +int exynos_atomic_check(struct drm_device *dev, + struct drm_atomic_state *state) +{ + int ret; + + ret = drm_atomic_helper_check_modeset(dev, state); + if (ret) + return ret; + + ret = drm_atomic_normalize_zpos(dev, state); + if (ret) + return ret; + + ret = drm_atomic_helper_check_planes(dev, state); + if (ret) + return ret; + + return ret; +} + static int exynos_drm_open(struct drm_device *dev, struct drm_file *file) { struct drm_exynos_file_private *file_priv; diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.h b/drivers/gpu/drm/exynos/exynos_drm_drv.h index d215149e737b..80c4d5b81689 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_drv.h +++ b/drivers/gpu/drm/exynos/exynos_drm_drv.h @@ -301,6 +301,7 @@ static inline int exynos_dpi_bind(struct drm_device *dev, int exynos_atomic_commit(struct drm_device *dev, struct drm_atomic_state *state, bool nonblock); +int exynos_atomic_check(struct drm_device *dev, struct drm_atomic_state *state); extern struct platform_driver fimd_driver; diff --git a/drivers/gpu/drm/exynos/exynos_drm_fb.c b/drivers/gpu/drm/exynos/exynos_drm_fb.c index 40ce841eb952..23cce0a3f5fc 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_fb.c +++ b/drivers/gpu/drm/exynos/exynos_drm_fb.c @@ -190,7 +190,7 @@ dma_addr_t exynos_drm_fb_dma_addr(struct drm_framebuffer *fb, int index) static const struct drm_mode_config_funcs exynos_drm_mode_config_funcs = { .fb_create = exynos_user_fb_create, .output_poll_changed = exynos_drm_output_poll_changed, - .atomic_check = drm_atomic_helper_check, + .atomic_check = exynos_atomic_check, .atomic_commit = exynos_atomic_commit, }; diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index bfb2efd8d4d4..18dfdd5c1b3b 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -1447,8 +1447,6 @@ static int i915_drm_suspend(struct drm_device *dev) dev_priv->suspend_count++; - intel_display_set_init_power(dev_priv, false); - intel_csr_ucode_suspend(dev_priv); out: @@ -1466,6 +1464,8 @@ static int i915_drm_suspend_late(struct drm_device *dev, bool hibernation) disable_rpm_wakeref_asserts(dev_priv); + intel_display_set_init_power(dev_priv, false); + fw_csr = !IS_BROXTON(dev_priv) && suspend_to_idle(dev_priv) && dev_priv->csr.dmc_payload; /* diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 8b9ee4e390c0..685e9e065287 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -2883,6 +2883,11 @@ __i915_printk(struct drm_i915_private *dev_priv, const char *level, extern long i915_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg); #endif +extern const struct dev_pm_ops i915_pm_ops; + +extern int i915_driver_load(struct pci_dev *pdev, + const struct pci_device_id *ent); +extern void i915_driver_unload(struct drm_device *dev); extern int intel_gpu_reset(struct drm_i915_private *dev_priv, u32 engine_mask); extern bool intel_has_gpu_reset(struct drm_i915_private *dev_priv); extern void i915_reset(struct drm_i915_private *dev_priv); diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 947e82c2b175..91ab7e9d6d2e 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -1806,7 +1806,7 @@ int i915_gem_fault(struct vm_area_struct *area, struct vm_fault *vmf) /* Use a partial view if it is bigger than available space */ chunk_size = MIN_CHUNK_PAGES; if (i915_gem_object_is_tiled(obj)) - chunk_size = max(chunk_size, tile_row_pages(obj)); + chunk_size = roundup(chunk_size, tile_row_pages(obj)); memset(&view, 0, sizeof(view)); view.type = I915_GGTT_VIEW_PARTIAL; @@ -3543,15 +3543,27 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj, if (view->type == I915_GGTT_VIEW_NORMAL) vma = i915_gem_object_ggtt_pin(obj, view, 0, alignment, PIN_MAPPABLE | PIN_NONBLOCK); - if (IS_ERR(vma)) - vma = i915_gem_object_ggtt_pin(obj, view, 0, alignment, 0); + if (IS_ERR(vma)) { + struct drm_i915_private *i915 = to_i915(obj->base.dev); + unsigned int flags; + + /* Valleyview is definitely limited to scanning out the first + * 512MiB. Lets presume this behaviour was inherited from the + * g4x display engine and that all earlier gen are similarly + * limited. Testing suggests that it is a little more + * complicated than this. For example, Cherryview appears quite + * happy to scanout from anywhere within its global aperture. + */ + flags = 0; + if (HAS_GMCH_DISPLAY(i915)) + flags = PIN_MAPPABLE; + vma = i915_gem_object_ggtt_pin(obj, view, 0, alignment, flags); + } if (IS_ERR(vma)) goto err_unpin_display; vma->display_alignment = max_t(u64, vma->display_alignment, alignment); - WARN_ON(obj->pin_display > i915_vma_pin_count(vma)); - i915_gem_object_flush_cpu_write_domain(obj); old_write_domain = obj->base.write_domain; @@ -3588,7 +3600,6 @@ i915_gem_object_unpin_from_display_plane(struct i915_vma *vma) list_move_tail(&vma->vm_link, &vma->vm->inactive_list); i915_vma_unpin(vma); - WARN_ON(vma->obj->pin_display > i915_vma_pin_count(vma)); } /** @@ -3745,7 +3756,12 @@ void __i915_vma_set_map_and_fenceable(struct i915_vma *vma) mappable = (vma->node.start + fence_size <= dev_priv->ggtt.mappable_end); - if (mappable && fenceable) + /* + * Explicitly disable for rotated VMA since the display does not + * need the fence and the VMA is not accessible to other users. + */ + if (mappable && fenceable && + vma->ggtt_view.type != I915_GGTT_VIEW_ROTATED) vma->flags |= I915_VMA_CAN_FENCE; else vma->flags &= ~I915_VMA_CAN_FENCE; diff --git a/drivers/gpu/drm/i915/i915_gem_fence.c b/drivers/gpu/drm/i915/i915_gem_fence.c index 8df1fa7234e8..2c7ba0ee127c 100644 --- a/drivers/gpu/drm/i915/i915_gem_fence.c +++ b/drivers/gpu/drm/i915/i915_gem_fence.c @@ -290,6 +290,8 @@ i915_vma_put_fence(struct i915_vma *vma) { struct drm_i915_fence_reg *fence = vma->fence; + assert_rpm_wakelock_held(to_i915(vma->vm->dev)); + if (!fence) return 0; @@ -341,6 +343,8 @@ i915_vma_get_fence(struct i915_vma *vma) struct drm_i915_fence_reg *fence; struct i915_vma *set = i915_gem_object_is_tiled(vma->obj) ? vma : NULL; + assert_rpm_wakelock_held(to_i915(vma->vm->dev)); + /* Just update our place in the LRU if our fence is getting reused. */ if (vma->fence) { fence = vma->fence; @@ -371,6 +375,12 @@ void i915_gem_restore_fences(struct drm_device *dev) struct drm_i915_private *dev_priv = to_i915(dev); int i; + /* Note that this may be called outside of struct_mutex, by + * runtime suspend/resume. The barrier we require is enforced by + * rpm itself - all access to fences/GTT are only within an rpm + * wakeref, and to acquire that wakeref you must pass through here. + */ + for (i = 0; i < dev_priv->num_fence_regs; i++) { struct drm_i915_fence_reg *reg = &dev_priv->fence_regs[i]; struct i915_vma *vma = reg->vma; @@ -379,10 +389,17 @@ void i915_gem_restore_fences(struct drm_device *dev) * Commit delayed tiling changes if we have an object still * attached to the fence, otherwise just clear the fence. */ - if (vma && !i915_gem_object_is_tiled(vma->obj)) + if (vma && !i915_gem_object_is_tiled(vma->obj)) { + GEM_BUG_ON(!reg->dirty); + GEM_BUG_ON(vma->obj->fault_mappable); + + list_move(®->link, &dev_priv->mm.fence_list); + vma->fence = NULL; vma = NULL; + } - fence_update(reg, vma); + fence_write(reg, vma); + reg->vma = vma; } } diff --git a/drivers/gpu/drm/i915/i915_pci.c b/drivers/gpu/drm/i915/i915_pci.c index 687c768833b3..31e6edd08dd0 100644 --- a/drivers/gpu/drm/i915/i915_pci.c +++ b/drivers/gpu/drm/i915/i915_pci.c @@ -431,9 +431,6 @@ static const struct pci_device_id pciidlist[] = { }; MODULE_DEVICE_TABLE(pci, pciidlist); -extern int i915_driver_load(struct pci_dev *pdev, - const struct pci_device_id *ent); - static int i915_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { struct intel_device_info *intel_info = @@ -463,8 +460,6 @@ static int i915_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) return i915_driver_load(pdev, ent); } -extern void i915_driver_unload(struct drm_device *dev); - static void i915_pci_remove(struct pci_dev *pdev) { struct drm_device *dev = pci_get_drvdata(pdev); @@ -473,8 +468,6 @@ static void i915_pci_remove(struct pci_dev *pdev) drm_dev_unref(dev); } -extern const struct dev_pm_ops i915_pm_ops; - static struct pci_driver i915_pci_driver = { .name = DRIVER_NAME, .id_table = pciidlist, diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c index c6e69e4cfa83..1f8af87c6294 100644 --- a/drivers/gpu/drm/i915/intel_bios.c +++ b/drivers/gpu/drm/i915/intel_bios.c @@ -1031,6 +1031,77 @@ static u8 translate_iboost(u8 val) return mapping[val]; } +static void sanitize_ddc_pin(struct drm_i915_private *dev_priv, + enum port port) +{ + const struct ddi_vbt_port_info *info = + &dev_priv->vbt.ddi_port_info[port]; + enum port p; + + if (!info->alternate_ddc_pin) + return; + + for_each_port_masked(p, (1 << port) - 1) { + struct ddi_vbt_port_info *i = &dev_priv->vbt.ddi_port_info[p]; + + if (info->alternate_ddc_pin != i->alternate_ddc_pin) + continue; + + DRM_DEBUG_KMS("port %c trying to use the same DDC pin (0x%x) as port %c, " + "disabling port %c DVI/HDMI support\n", + port_name(p), i->alternate_ddc_pin, + port_name(port), port_name(p)); + + /* + * If we have multiple ports supposedly sharing the + * pin, then dvi/hdmi couldn't exist on the shared + * port. Otherwise they share the same ddc bin and + * system couldn't communicate with them separately. + * + * Due to parsing the ports in alphabetical order, + * a higher port will always clobber a lower one. + */ + i->supports_dvi = false; + i->supports_hdmi = false; + i->alternate_ddc_pin = 0; + } +} + +static void sanitize_aux_ch(struct drm_i915_private *dev_priv, + enum port port) +{ + const struct ddi_vbt_port_info *info = + &dev_priv->vbt.ddi_port_info[port]; + enum port p; + + if (!info->alternate_aux_channel) + return; + + for_each_port_masked(p, (1 << port) - 1) { + struct ddi_vbt_port_info *i = &dev_priv->vbt.ddi_port_info[p]; + + if (info->alternate_aux_channel != i->alternate_aux_channel) + continue; + + DRM_DEBUG_KMS("port %c trying to use the same AUX CH (0x%x) as port %c, " + "disabling port %c DP support\n", + port_name(p), i->alternate_aux_channel, + port_name(port), port_name(p)); + + /* + * If we have multiple ports supposedlt sharing the + * aux channel, then DP couldn't exist on the shared + * port. Otherwise they share the same aux channel + * and system couldn't communicate with them separately. + * + * Due to parsing the ports in alphabetical order, + * a higher port will always clobber a lower one. + */ + i->supports_dp = false; + i->alternate_aux_channel = 0; + } +} + static void parse_ddi_port(struct drm_i915_private *dev_priv, enum port port, const struct bdb_header *bdb) { @@ -1105,54 +1176,15 @@ static void parse_ddi_port(struct drm_i915_private *dev_priv, enum port port, DRM_DEBUG_KMS("Port %c is internal DP\n", port_name(port)); if (is_dvi) { - if (port == PORT_E) { - info->alternate_ddc_pin = ddc_pin; - /* if DDIE share ddc pin with other port, then - * dvi/hdmi couldn't exist on the shared port. - * Otherwise they share the same ddc bin and system - * couldn't communicate with them seperately. */ - if (ddc_pin == DDC_PIN_B) { - dev_priv->vbt.ddi_port_info[PORT_B].supports_dvi = 0; - dev_priv->vbt.ddi_port_info[PORT_B].supports_hdmi = 0; - } else if (ddc_pin == DDC_PIN_C) { - dev_priv->vbt.ddi_port_info[PORT_C].supports_dvi = 0; - dev_priv->vbt.ddi_port_info[PORT_C].supports_hdmi = 0; - } else if (ddc_pin == DDC_PIN_D) { - dev_priv->vbt.ddi_port_info[PORT_D].supports_dvi = 0; - dev_priv->vbt.ddi_port_info[PORT_D].supports_hdmi = 0; - } - } else if (ddc_pin == DDC_PIN_B && port != PORT_B) - DRM_DEBUG_KMS("Unexpected DDC pin for port B\n"); - else if (ddc_pin == DDC_PIN_C && port != PORT_C) - DRM_DEBUG_KMS("Unexpected DDC pin for port C\n"); - else if (ddc_pin == DDC_PIN_D && port != PORT_D) - DRM_DEBUG_KMS("Unexpected DDC pin for port D\n"); + info->alternate_ddc_pin = ddc_pin; + + sanitize_ddc_pin(dev_priv, port); } if (is_dp) { - if (port == PORT_E) { - info->alternate_aux_channel = aux_channel; - /* if DDIE share aux channel with other port, then - * DP couldn't exist on the shared port. Otherwise - * they share the same aux channel and system - * couldn't communicate with them seperately. */ - if (aux_channel == DP_AUX_A) - dev_priv->vbt.ddi_port_info[PORT_A].supports_dp = 0; - else if (aux_channel == DP_AUX_B) - dev_priv->vbt.ddi_port_info[PORT_B].supports_dp = 0; - else if (aux_channel == DP_AUX_C) - dev_priv->vbt.ddi_port_info[PORT_C].supports_dp = 0; - else if (aux_channel == DP_AUX_D) - dev_priv->vbt.ddi_port_info[PORT_D].supports_dp = 0; - } - else if (aux_channel == DP_AUX_A && port != PORT_A) - DRM_DEBUG_KMS("Unexpected AUX channel for port A\n"); - else if (aux_channel == DP_AUX_B && port != PORT_B) - DRM_DEBUG_KMS("Unexpected AUX channel for port B\n"); - else if (aux_channel == DP_AUX_C && port != PORT_C) - DRM_DEBUG_KMS("Unexpected AUX channel for port C\n"); - else if (aux_channel == DP_AUX_D && port != PORT_D) - DRM_DEBUG_KMS("Unexpected AUX channel for port D\n"); + info->alternate_aux_channel = aux_channel; + + sanitize_aux_ch(dev_priv, port); } if (bdb->version >= 158) { diff --git a/drivers/gpu/drm/i915/intel_device_info.c b/drivers/gpu/drm/i915/intel_device_info.c index 73b6858600ac..1b20e160bc1f 100644 --- a/drivers/gpu/drm/i915/intel_device_info.c +++ b/drivers/gpu/drm/i915/intel_device_info.c @@ -192,7 +192,7 @@ static void broadwell_sseu_info_init(struct drm_i915_private *dev_priv) struct sseu_dev_info *sseu = &mkwrite_device_info(dev_priv)->sseu; const int s_max = 3, ss_max = 3, eu_max = 8; int s, ss; - u32 fuse2, eu_disable[s_max]; + u32 fuse2, eu_disable[3]; /* s_max */ fuse2 = I915_READ(GEN8_FUSE2); sseu->slice_mask = (fuse2 & GEN8_F2_S_ENA_MASK) >> GEN8_F2_S_ENA_SHIFT; diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index fbcfed63a76e..81c11499bcf0 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -2978,7 +2978,8 @@ int skl_check_plane_surface(struct intel_plane_state *plane_state) /* Rotate src coordinates to match rotated GTT view */ if (intel_rotation_90_or_270(rotation)) drm_rect_rotate(&plane_state->base.src, - fb->width, fb->height, DRM_ROTATE_270); + fb->width << 16, fb->height << 16, + DRM_ROTATE_270); /* * Handle the AUX surface first since @@ -10242,6 +10243,29 @@ static void bxt_modeset_commit_cdclk(struct drm_atomic_state *old_state) bxt_set_cdclk(to_i915(dev), req_cdclk); } +static int bdw_adjust_min_pipe_pixel_rate(struct intel_crtc_state *crtc_state, + int pixel_rate) +{ + struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev); + + /* pixel rate mustn't exceed 95% of cdclk with IPS on BDW */ + if (IS_BROADWELL(dev_priv) && crtc_state->ips_enabled) + pixel_rate = DIV_ROUND_UP(pixel_rate * 100, 95); + + /* BSpec says "Do not use DisplayPort with CDCLK less than + * 432 MHz, audio enabled, port width x4, and link rate + * HBR2 (5.4 GHz), or else there may be audio corruption or + * screen corruption." + */ + if (intel_crtc_has_dp_encoder(crtc_state) && + crtc_state->has_audio && + crtc_state->port_clock >= 540000 && + crtc_state->lane_count == 4) + pixel_rate = max(432000, pixel_rate); + + return pixel_rate; +} + /* compute the max rate for new configuration */ static int ilk_max_pixel_rate(struct drm_atomic_state *state) { @@ -10267,9 +10291,9 @@ static int ilk_max_pixel_rate(struct drm_atomic_state *state) pixel_rate = ilk_pipe_pixel_rate(crtc_state); - /* pixel rate mustn't exceed 95% of cdclk with IPS on BDW */ - if (IS_BROADWELL(dev_priv) && crtc_state->ips_enabled) - pixel_rate = DIV_ROUND_UP(pixel_rate * 100, 95); + if (IS_BROADWELL(dev_priv) || IS_GEN9(dev_priv)) + pixel_rate = bdw_adjust_min_pipe_pixel_rate(crtc_state, + pixel_rate); intel_state->min_pixclk[i] = pixel_rate; } @@ -14310,7 +14334,7 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state) for_each_plane_in_state(state, plane, plane_state, i) { struct intel_plane_state *intel_plane_state = - to_intel_plane_state(plane_state); + to_intel_plane_state(plane->state); if (!intel_plane_state->wait_req) continue; diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index 14a3cf0b7213..3581b5a7f716 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c @@ -1108,6 +1108,44 @@ intel_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg) return ret; } +static enum port intel_aux_port(struct drm_i915_private *dev_priv, + enum port port) +{ + const struct ddi_vbt_port_info *info = + &dev_priv->vbt.ddi_port_info[port]; + enum port aux_port; + + if (!info->alternate_aux_channel) { + DRM_DEBUG_KMS("using AUX %c for port %c (platform default)\n", + port_name(port), port_name(port)); + return port; + } + + switch (info->alternate_aux_channel) { + case DP_AUX_A: + aux_port = PORT_A; + break; + case DP_AUX_B: + aux_port = PORT_B; + break; + case DP_AUX_C: + aux_port = PORT_C; + break; + case DP_AUX_D: + aux_port = PORT_D; + break; + default: + MISSING_CASE(info->alternate_aux_channel); + aux_port = PORT_A; + break; + } + + DRM_DEBUG_KMS("using AUX %c for port %c (VBT)\n", + port_name(aux_port), port_name(port)); + + return aux_port; +} + static i915_reg_t g4x_aux_ctl_reg(struct drm_i915_private *dev_priv, enum port port) { @@ -1168,36 +1206,9 @@ static i915_reg_t ilk_aux_data_reg(struct drm_i915_private *dev_priv, } } -/* - * On SKL we don't have Aux for port E so we rely - * on VBT to set a proper alternate aux channel. - */ -static enum port skl_porte_aux_port(struct drm_i915_private *dev_priv) -{ - const struct ddi_vbt_port_info *info = - &dev_priv->vbt.ddi_port_info[PORT_E]; - - switch (info->alternate_aux_channel) { - case DP_AUX_A: - return PORT_A; - case DP_AUX_B: - return PORT_B; - case DP_AUX_C: - return PORT_C; - case DP_AUX_D: - return PORT_D; - default: - MISSING_CASE(info->alternate_aux_channel); - return PORT_A; - } -} - static i915_reg_t skl_aux_ctl_reg(struct drm_i915_private *dev_priv, enum port port) { - if (port == PORT_E) - port = skl_porte_aux_port(dev_priv); - switch (port) { case PORT_A: case PORT_B: @@ -1213,9 +1224,6 @@ static i915_reg_t skl_aux_ctl_reg(struct drm_i915_private *dev_priv, static i915_reg_t skl_aux_data_reg(struct drm_i915_private *dev_priv, enum port port, int index) { - if (port == PORT_E) - port = skl_porte_aux_port(dev_priv); - switch (port) { case PORT_A: case PORT_B: @@ -1253,7 +1261,8 @@ static i915_reg_t intel_aux_data_reg(struct drm_i915_private *dev_priv, static void intel_aux_reg_init(struct intel_dp *intel_dp) { struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp)); - enum port port = dp_to_dig_port(intel_dp)->port; + enum port port = intel_aux_port(dev_priv, + dp_to_dig_port(intel_dp)->port); int i; intel_dp->aux_ch_ctl_reg = intel_aux_ctl_reg(dev_priv, port); @@ -3551,8 +3560,8 @@ intel_edp_init_dpcd(struct intel_dp *intel_dp) /* Read the eDP Display control capabilities registers */ if ((intel_dp->dpcd[DP_EDP_CONFIGURATION_CAP] & DP_DPCD_DISPLAY_CONTROL_CAPABLE) && drm_dp_dpcd_read(&intel_dp->aux, DP_EDP_DPCD_REV, - intel_dp->edp_dpcd, sizeof(intel_dp->edp_dpcd) == - sizeof(intel_dp->edp_dpcd))) + intel_dp->edp_dpcd, sizeof(intel_dp->edp_dpcd)) == + sizeof(intel_dp->edp_dpcd)) DRM_DEBUG_KMS("EDP DPCD : %*ph\n", (int) sizeof(intel_dp->edp_dpcd), intel_dp->edp_dpcd); diff --git a/drivers/gpu/drm/i915/intel_fbc.c b/drivers/gpu/drm/i915/intel_fbc.c index faa67624e1ed..c43dd9abce79 100644 --- a/drivers/gpu/drm/i915/intel_fbc.c +++ b/drivers/gpu/drm/i915/intel_fbc.c @@ -104,8 +104,10 @@ static int intel_fbc_calculate_cfb_size(struct drm_i915_private *dev_priv, int lines; intel_fbc_get_plane_source_size(cache, NULL, &lines); - if (INTEL_INFO(dev_priv)->gen >= 7) + if (INTEL_GEN(dev_priv) == 7) lines = min(lines, 2048); + else if (INTEL_GEN(dev_priv) >= 8) + lines = min(lines, 2560); /* Hardware needs the full buffer stride, not just the active area. */ return lines * cache->fb.stride; diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c index f40a35f2913a..13c306173f27 100644 --- a/drivers/gpu/drm/i915/intel_hdmi.c +++ b/drivers/gpu/drm/i915/intel_hdmi.c @@ -1799,6 +1799,50 @@ intel_hdmi_add_properties(struct intel_hdmi *intel_hdmi, struct drm_connector *c intel_hdmi->aspect_ratio = HDMI_PICTURE_ASPECT_NONE; } +static u8 intel_hdmi_ddc_pin(struct drm_i915_private *dev_priv, + enum port port) +{ + const struct ddi_vbt_port_info *info = + &dev_priv->vbt.ddi_port_info[port]; + u8 ddc_pin; + + if (info->alternate_ddc_pin) { + DRM_DEBUG_KMS("Using DDC pin 0x%x for port %c (VBT)\n", + info->alternate_ddc_pin, port_name(port)); + return info->alternate_ddc_pin; + } + + switch (port) { + case PORT_B: + if (IS_BROXTON(dev_priv)) + ddc_pin = GMBUS_PIN_1_BXT; + else + ddc_pin = GMBUS_PIN_DPB; + break; + case PORT_C: + if (IS_BROXTON(dev_priv)) + ddc_pin = GMBUS_PIN_2_BXT; + else + ddc_pin = GMBUS_PIN_DPC; + break; + case PORT_D: + if (IS_CHERRYVIEW(dev_priv)) + ddc_pin = GMBUS_PIN_DPD_CHV; + else + ddc_pin = GMBUS_PIN_DPD; + break; + default: + MISSING_CASE(port); + ddc_pin = GMBUS_PIN_DPB; + break; + } + + DRM_DEBUG_KMS("Using DDC pin 0x%x for port %c (platform default)\n", + ddc_pin, port_name(port)); + + return ddc_pin; +} + void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port, struct intel_connector *intel_connector) { @@ -1808,7 +1852,6 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port, struct drm_device *dev = intel_encoder->base.dev; struct drm_i915_private *dev_priv = to_i915(dev); enum port port = intel_dig_port->port; - uint8_t alternate_ddc_pin; DRM_DEBUG_KMS("Adding HDMI connector on port %c\n", port_name(port)); @@ -1826,12 +1869,10 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port, connector->doublescan_allowed = 0; connector->stereo_allowed = 1; + intel_hdmi->ddc_bus = intel_hdmi_ddc_pin(dev_priv, port); + switch (port) { case PORT_B: - if (IS_BROXTON(dev_priv)) - intel_hdmi->ddc_bus = GMBUS_PIN_1_BXT; - else - intel_hdmi->ddc_bus = GMBUS_PIN_DPB; /* * On BXT A0/A1, sw needs to activate DDIA HPD logic and * interrupts to check the external panel connection. @@ -1842,46 +1883,17 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port, intel_encoder->hpd_pin = HPD_PORT_B; break; case PORT_C: - if (IS_BROXTON(dev_priv)) - intel_hdmi->ddc_bus = GMBUS_PIN_2_BXT; - else - intel_hdmi->ddc_bus = GMBUS_PIN_DPC; intel_encoder->hpd_pin = HPD_PORT_C; break; case PORT_D: - if (WARN_ON(IS_BROXTON(dev_priv))) - intel_hdmi->ddc_bus = GMBUS_PIN_DISABLED; - else if (IS_CHERRYVIEW(dev_priv)) - intel_hdmi->ddc_bus = GMBUS_PIN_DPD_CHV; - else - intel_hdmi->ddc_bus = GMBUS_PIN_DPD; intel_encoder->hpd_pin = HPD_PORT_D; break; case PORT_E: - /* On SKL PORT E doesn't have seperate GMBUS pin - * We rely on VBT to set a proper alternate GMBUS pin. */ - alternate_ddc_pin = - dev_priv->vbt.ddi_port_info[PORT_E].alternate_ddc_pin; - switch (alternate_ddc_pin) { - case DDC_PIN_B: - intel_hdmi->ddc_bus = GMBUS_PIN_DPB; - break; - case DDC_PIN_C: - intel_hdmi->ddc_bus = GMBUS_PIN_DPC; - break; - case DDC_PIN_D: - intel_hdmi->ddc_bus = GMBUS_PIN_DPD; - break; - default: - MISSING_CASE(alternate_ddc_pin); - } intel_encoder->hpd_pin = HPD_PORT_E; break; - case PORT_A: - intel_encoder->hpd_pin = HPD_PORT_A; - /* Internal port only for eDP. */ default: - BUG(); + MISSING_CASE(port); + return; } if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) { diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index a2f751cd187a..db24f898853c 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -3362,13 +3362,15 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate, int num_active; int id, i; + /* Clear the partitioning for disabled planes. */ + memset(ddb->plane[pipe], 0, sizeof(ddb->plane[pipe])); + memset(ddb->y_plane[pipe], 0, sizeof(ddb->y_plane[pipe])); + if (WARN_ON(!state)) return 0; if (!cstate->base.active) { ddb->pipe[pipe].start = ddb->pipe[pipe].end = 0; - memset(ddb->plane[pipe], 0, sizeof(ddb->plane[pipe])); - memset(ddb->y_plane[pipe], 0, sizeof(ddb->y_plane[pipe])); return 0; } @@ -3468,12 +3470,6 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate, return 0; } -static uint32_t skl_pipe_pixel_rate(const struct intel_crtc_state *config) -{ - /* TODO: Take into account the scalers once we support them */ - return config->base.adjusted_mode.crtc_clock; -} - /* * The max latency should be 257 (max the punit can code is 255 and we add 2us * for the read latency) and cpp should always be <= 8, so that @@ -3524,7 +3520,7 @@ static uint32_t skl_adjusted_plane_pixel_rate(const struct intel_crtc_state *cst * Adjusted plane pixel rate is just the pipe's adjusted pixel rate * with additional adjustments for plane-specific scaling. */ - adjusted_pixel_rate = skl_pipe_pixel_rate(cstate); + adjusted_pixel_rate = ilk_pipe_pixel_rate(cstate); downscale_amount = skl_plane_downscale_amount(pstate); pixel_rate = adjusted_pixel_rate * downscale_amount >> 16; @@ -3736,11 +3732,11 @@ skl_compute_linetime_wm(struct intel_crtc_state *cstate) if (!cstate->base.active) return 0; - if (WARN_ON(skl_pipe_pixel_rate(cstate) == 0)) + if (WARN_ON(ilk_pipe_pixel_rate(cstate) == 0)) return 0; return DIV_ROUND_UP(8 * cstate->base.adjusted_mode.crtc_htotal * 1000, - skl_pipe_pixel_rate(cstate)); + ilk_pipe_pixel_rate(cstate)); } static void skl_compute_transition_wm(struct intel_crtc_state *cstate, @@ -4050,6 +4046,12 @@ skl_compute_ddb(struct drm_atomic_state *state) intel_state->wm_results.dirty_pipes = ~0; } + /* + * We're not recomputing for the pipes not included in the commit, so + * make sure we start with the current state. + */ + memcpy(ddb, &dev_priv->wm.skl_hw.ddb, sizeof(*ddb)); + for_each_intel_crtc_mask(dev, intel_crtc, realloc_pipes) { struct intel_crtc_state *cstate; diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c index 6c11168facd6..a38c2fefe85a 100644 --- a/drivers/gpu/drm/i915/intel_runtime_pm.c +++ b/drivers/gpu/drm/i915/intel_runtime_pm.c @@ -1139,7 +1139,9 @@ static void vlv_display_power_well_deinit(struct drm_i915_private *dev_priv) intel_power_sequencer_reset(dev_priv); - intel_hpd_poll_init(dev_priv); + /* Prevent us from re-enabling polling on accident in late suspend */ + if (!dev_priv->drm.dev->power.is_suspended) + intel_hpd_poll_init(dev_priv); } static void vlv_display_power_well_enable(struct drm_i915_private *dev_priv, diff --git a/drivers/gpu/drm/imx/imx-drm-core.c b/drivers/gpu/drm/imx/imx-drm-core.c index 98df09c2b388..9672b579f950 100644 --- a/drivers/gpu/drm/imx/imx-drm-core.c +++ b/drivers/gpu/drm/imx/imx-drm-core.c @@ -357,8 +357,8 @@ static int imx_drm_bind(struct device *dev) int ret; drm = drm_dev_alloc(&imx_drm_driver, dev); - if (!drm) - return -ENOMEM; + if (IS_ERR(drm)) + return PTR_ERR(drm); imxdrm = devm_kzalloc(dev, sizeof(*imxdrm), GFP_KERNEL); if (!imxdrm) { @@ -436,9 +436,11 @@ static int imx_drm_bind(struct device *dev) err_fbhelper: drm_kms_helper_poll_fini(drm); +#if IS_ENABLED(CONFIG_DRM_FBDEV_EMULATION) if (imxdrm->fbhelper) drm_fbdev_cma_fini(imxdrm->fbhelper); err_unbind: +#endif component_unbind_all(drm->dev, drm); err_vblank: drm_vblank_cleanup(drm); diff --git a/drivers/gpu/drm/imx/ipuv3-crtc.c b/drivers/gpu/drm/imx/ipuv3-crtc.c index 4e1ae3fc462d..6be515a9fb69 100644 --- a/drivers/gpu/drm/imx/ipuv3-crtc.c +++ b/drivers/gpu/drm/imx/ipuv3-crtc.c @@ -68,6 +68,12 @@ static void ipu_crtc_atomic_disable(struct drm_crtc *crtc, ipu_dc_disable_channel(ipu_crtc->dc); ipu_di_disable(ipu_crtc->di); + /* + * Planes must be disabled before DC clock is removed, as otherwise the + * attached IDMACs will be left in undefined state, possibly hanging + * the IPU or even system. + */ + drm_atomic_helper_disable_planes_on_crtc(old_crtc_state, false); ipu_dc_disable(ipu); spin_lock_irq(&crtc->dev->event_lock); @@ -77,9 +83,6 @@ static void ipu_crtc_atomic_disable(struct drm_crtc *crtc, } spin_unlock_irq(&crtc->dev->event_lock); - /* always disable planes on the CRTC */ - drm_atomic_helper_disable_planes_on_crtc(old_crtc_state, true); - drm_crtc_vblank_off(crtc); } diff --git a/drivers/gpu/drm/imx/ipuv3-plane.c b/drivers/gpu/drm/imx/ipuv3-plane.c index ce22d0a0ddc8..d5864ed4d772 100644 --- a/drivers/gpu/drm/imx/ipuv3-plane.c +++ b/drivers/gpu/drm/imx/ipuv3-plane.c @@ -103,11 +103,11 @@ drm_plane_state_to_vbo(struct drm_plane_state *state) (state->src_x >> 16) / 2 - eba; } -static void ipu_plane_atomic_set_base(struct ipu_plane *ipu_plane, - struct drm_plane_state *old_state) +static void ipu_plane_atomic_set_base(struct ipu_plane *ipu_plane) { struct drm_plane *plane = &ipu_plane->base; struct drm_plane_state *state = plane->state; + struct drm_crtc_state *crtc_state = state->crtc->state; struct drm_framebuffer *fb = state->fb; unsigned long eba, ubo, vbo; int active; @@ -117,7 +117,7 @@ static void ipu_plane_atomic_set_base(struct ipu_plane *ipu_plane, switch (fb->pixel_format) { case DRM_FORMAT_YUV420: case DRM_FORMAT_YVU420: - if (old_state->fb) + if (!drm_atomic_crtc_needs_modeset(crtc_state)) break; /* @@ -149,7 +149,7 @@ static void ipu_plane_atomic_set_base(struct ipu_plane *ipu_plane, break; } - if (old_state->fb) { + if (!drm_atomic_crtc_needs_modeset(crtc_state)) { active = ipu_idmac_get_current_buffer(ipu_plane->ipu_ch); ipu_cpmem_set_buffer(ipu_plane->ipu_ch, !active, eba); ipu_idmac_select_buffer(ipu_plane->ipu_ch, !active); @@ -259,6 +259,7 @@ static int ipu_plane_atomic_check(struct drm_plane *plane, struct drm_framebuffer *fb = state->fb; struct drm_framebuffer *old_fb = old_state->fb; unsigned long eba, ubo, vbo, old_ubo, old_vbo; + int hsub, vsub; /* Ok to disable */ if (!fb) @@ -355,7 +356,9 @@ static int ipu_plane_atomic_check(struct drm_plane *plane, if ((ubo > 0xfffff8) || (vbo > 0xfffff8)) return -EINVAL; - if (old_fb) { + if (old_fb && + (old_fb->pixel_format == DRM_FORMAT_YUV420 || + old_fb->pixel_format == DRM_FORMAT_YVU420)) { old_ubo = drm_plane_state_to_ubo(old_state); old_vbo = drm_plane_state_to_vbo(old_state); if (ubo != old_ubo || vbo != old_vbo) @@ -370,6 +373,16 @@ static int ipu_plane_atomic_check(struct drm_plane *plane, if (old_fb && old_fb->pitches[1] != fb->pitches[1]) crtc_state->mode_changed = true; + + /* + * The x/y offsets must be even in case of horizontal/vertical + * chroma subsampling. + */ + hsub = drm_format_horz_chroma_subsampling(fb->pixel_format); + vsub = drm_format_vert_chroma_subsampling(fb->pixel_format); + if (((state->src_x >> 16) & (hsub - 1)) || + ((state->src_y >> 16) & (vsub - 1))) + return -EINVAL; } return 0; @@ -392,7 +405,7 @@ static void ipu_plane_atomic_update(struct drm_plane *plane, struct drm_crtc_state *crtc_state = state->crtc->state; if (!drm_atomic_crtc_needs_modeset(crtc_state)) { - ipu_plane_atomic_set_base(ipu_plane, old_state); + ipu_plane_atomic_set_base(ipu_plane); return; } } @@ -424,6 +437,7 @@ static void ipu_plane_atomic_update(struct drm_plane *plane, ipu_dp_set_global_alpha(ipu_plane->dp, false, 0, false); break; default: + ipu_dp_set_global_alpha(ipu_plane->dp, true, 0, true); break; } } @@ -437,7 +451,7 @@ static void ipu_plane_atomic_update(struct drm_plane *plane, ipu_cpmem_set_high_priority(ipu_plane->ipu_ch); ipu_idmac_set_double_buffer(ipu_plane->ipu_ch, 1); ipu_cpmem_set_stride(ipu_plane->ipu_ch, state->fb->pitches[0]); - ipu_plane_atomic_set_base(ipu_plane, old_state); + ipu_plane_atomic_set_base(ipu_plane); ipu_plane_enable(ipu_plane); } diff --git a/drivers/gpu/drm/mgag200/mgag200_ttm.c b/drivers/gpu/drm/mgag200/mgag200_ttm.c index 919b35f2ad24..dcf7d11ac380 100644 --- a/drivers/gpu/drm/mgag200/mgag200_ttm.c +++ b/drivers/gpu/drm/mgag200/mgag200_ttm.c @@ -266,6 +266,9 @@ int mgag200_mm_init(struct mga_device *mdev) return ret; } + arch_io_reserve_memtype_wc(pci_resource_start(dev->pdev, 0), + pci_resource_len(dev->pdev, 0)); + mdev->fb_mtrr = arch_phys_wc_add(pci_resource_start(dev->pdev, 0), pci_resource_len(dev->pdev, 0)); @@ -274,10 +277,14 @@ int mgag200_mm_init(struct mga_device *mdev) void mgag200_mm_fini(struct mga_device *mdev) { + struct drm_device *dev = mdev->dev; + ttm_bo_device_release(&mdev->ttm.bdev); mgag200_ttm_global_release(mdev); + arch_io_free_memtype_wc(pci_resource_start(dev->pdev, 0), + pci_resource_len(dev->pdev, 0)); arch_phys_wc_del(mdev->fb_mtrr); mdev->fb_mtrr = 0; } diff --git a/drivers/gpu/drm/msm/dsi/dsi_host.c b/drivers/gpu/drm/msm/dsi/dsi_host.c index f05ed0e1f3d6..6f240021705b 100644 --- a/drivers/gpu/drm/msm/dsi/dsi_host.c +++ b/drivers/gpu/drm/msm/dsi/dsi_host.c @@ -139,6 +139,7 @@ struct msm_dsi_host { u32 err_work_state; struct work_struct err_work; + struct work_struct hpd_work; struct workqueue_struct *workqueue; /* DSI 6G TX buffer*/ @@ -1294,6 +1295,14 @@ static void dsi_sw_reset_restore(struct msm_dsi_host *msm_host) wmb(); /* make sure dsi controller enabled again */ } +static void dsi_hpd_worker(struct work_struct *work) +{ + struct msm_dsi_host *msm_host = + container_of(work, struct msm_dsi_host, hpd_work); + + drm_helper_hpd_irq_event(msm_host->dev); +} + static void dsi_err_worker(struct work_struct *work) { struct msm_dsi_host *msm_host = @@ -1480,7 +1489,7 @@ static int dsi_host_attach(struct mipi_dsi_host *host, DBG("id=%d", msm_host->id); if (msm_host->dev) - drm_helper_hpd_irq_event(msm_host->dev); + queue_work(msm_host->workqueue, &msm_host->hpd_work); return 0; } @@ -1494,7 +1503,7 @@ static int dsi_host_detach(struct mipi_dsi_host *host, DBG("id=%d", msm_host->id); if (msm_host->dev) - drm_helper_hpd_irq_event(msm_host->dev); + queue_work(msm_host->workqueue, &msm_host->hpd_work); return 0; } @@ -1748,6 +1757,7 @@ int msm_dsi_host_init(struct msm_dsi *msm_dsi) /* setup workqueue */ msm_host->workqueue = alloc_ordered_workqueue("dsi_drm_work", 0); INIT_WORK(&msm_host->err_work, dsi_err_worker); + INIT_WORK(&msm_host->hpd_work, dsi_hpd_worker); msm_dsi->host = &msm_host->base; msm_dsi->id = msm_host->id; diff --git a/drivers/gpu/drm/msm/dsi/pll/dsi_pll_28nm.c b/drivers/gpu/drm/msm/dsi/pll/dsi_pll_28nm.c index 598fdaff0a41..26e3a01a99c2 100644 --- a/drivers/gpu/drm/msm/dsi/pll/dsi_pll_28nm.c +++ b/drivers/gpu/drm/msm/dsi/pll/dsi_pll_28nm.c @@ -521,6 +521,7 @@ static int pll_28nm_register(struct dsi_pll_28nm *pll_28nm) .parent_names = (const char *[]){ "xo" }, .num_parents = 1, .name = vco_name, + .flags = CLK_IGNORE_UNUSED, .ops = &clk_ops_dsi_pll_28nm_vco, }; struct device *dev = &pll_28nm->pdev->dev; diff --git a/drivers/gpu/drm/msm/dsi/pll/dsi_pll_28nm_8960.c b/drivers/gpu/drm/msm/dsi/pll/dsi_pll_28nm_8960.c index 38c90e1eb002..49008451085b 100644 --- a/drivers/gpu/drm/msm/dsi/pll/dsi_pll_28nm_8960.c +++ b/drivers/gpu/drm/msm/dsi/pll/dsi_pll_28nm_8960.c @@ -412,6 +412,7 @@ static int pll_28nm_register(struct dsi_pll_28nm *pll_28nm) struct clk_init_data vco_init = { .parent_names = (const char *[]){ "pxo" }, .num_parents = 1, + .flags = CLK_IGNORE_UNUSED, .ops = &clk_ops_dsi_pll_28nm_vco, }; struct device *dev = &pll_28nm->pdev->dev; diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_phy_8996.c b/drivers/gpu/drm/msm/hdmi/hdmi_phy_8996.c index aa94a553794f..143eab46ba68 100644 --- a/drivers/gpu/drm/msm/hdmi/hdmi_phy_8996.c +++ b/drivers/gpu/drm/msm/hdmi/hdmi_phy_8996.c @@ -702,6 +702,7 @@ static struct clk_init_data pll_init = { .ops = &hdmi_8996_pll_ops, .parent_names = hdmi_pll_parents, .num_parents = ARRAY_SIZE(hdmi_pll_parents), + .flags = CLK_IGNORE_UNUSED, }; int msm_hdmi_pll_8996_init(struct platform_device *pdev) diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_pll_8960.c b/drivers/gpu/drm/msm/hdmi/hdmi_pll_8960.c index 92da69aa6187..99590758c68b 100644 --- a/drivers/gpu/drm/msm/hdmi/hdmi_pll_8960.c +++ b/drivers/gpu/drm/msm/hdmi/hdmi_pll_8960.c @@ -424,6 +424,7 @@ static struct clk_init_data pll_init = { .ops = &hdmi_pll_ops, .parent_names = hdmi_pll_parents, .num_parents = ARRAY_SIZE(hdmi_pll_parents), + .flags = CLK_IGNORE_UNUSED, }; int msm_hdmi_pll_8960_init(struct platform_device *pdev) diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.c index ac9e4cde1380..8b4e3004f451 100644 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.c +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.c @@ -272,7 +272,7 @@ const struct mdp5_cfg_hw msm8x16_config = { .count = 2, .base = { 0x14000, 0x16000 }, .caps = MDP_PIPE_CAP_HFLIP | MDP_PIPE_CAP_VFLIP | - MDP_PIPE_CAP_SCALE | MDP_PIPE_CAP_DECIMATION, + MDP_PIPE_CAP_DECIMATION, }, .pipe_dma = { .count = 1, @@ -282,7 +282,7 @@ const struct mdp5_cfg_hw msm8x16_config = { .lm = { .count = 2, /* LM0 and LM3 */ .base = { 0x44000, 0x47000 }, - .nb_stages = 5, + .nb_stages = 8, .max_width = 2048, .max_height = 0xFFFF, }, diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c index fa2be7ce9468..c205c360e16d 100644 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c @@ -223,12 +223,7 @@ static void blend_setup(struct drm_crtc *crtc) plane_cnt++; } - /* - * If there is no base layer, enable border color. - * Although it's not possbile in current blend logic, - * put it here as a reminder. - */ - if (!pstates[STAGE_BASE] && plane_cnt) { + if (!pstates[STAGE_BASE]) { ctl_blend_flags |= MDP5_CTL_BLEND_OP_FLAG_BORDER_OUT; DBG("Border Color is enabled"); } @@ -365,6 +360,15 @@ static int pstate_cmp(const void *a, const void *b) return pa->state->zpos - pb->state->zpos; } +/* is there a helper for this? */ +static bool is_fullscreen(struct drm_crtc_state *cstate, + struct drm_plane_state *pstate) +{ + return (pstate->crtc_x <= 0) && (pstate->crtc_y <= 0) && + ((pstate->crtc_x + pstate->crtc_w) >= cstate->mode.hdisplay) && + ((pstate->crtc_y + pstate->crtc_h) >= cstate->mode.vdisplay); +} + static int mdp5_crtc_atomic_check(struct drm_crtc *crtc, struct drm_crtc_state *state) { @@ -375,21 +379,11 @@ static int mdp5_crtc_atomic_check(struct drm_crtc *crtc, struct plane_state pstates[STAGE_MAX + 1]; const struct mdp5_cfg_hw *hw_cfg; const struct drm_plane_state *pstate; - int cnt = 0, i; + int cnt = 0, base = 0, i; DBG("%s: check", mdp5_crtc->name); - /* verify that there are not too many planes attached to crtc - * and that we don't have conflicting mixer stages: - */ - hw_cfg = mdp5_cfg_get_hw_config(mdp5_kms->cfg); drm_atomic_crtc_state_for_each_plane_state(plane, pstate, state) { - if (cnt >= (hw_cfg->lm.nb_stages)) { - dev_err(dev->dev, "too many planes!\n"); - return -EINVAL; - } - - pstates[cnt].plane = plane; pstates[cnt].state = to_mdp5_plane_state(pstate); @@ -399,8 +393,24 @@ static int mdp5_crtc_atomic_check(struct drm_crtc *crtc, /* assign a stage based on sorted zpos property */ sort(pstates, cnt, sizeof(pstates[0]), pstate_cmp, NULL); + /* if the bottom-most layer is not fullscreen, we need to use + * it for solid-color: + */ + if ((cnt > 0) && !is_fullscreen(state, &pstates[0].state->base)) + base++; + + /* verify that there are not too many planes attached to crtc + * and that we don't have conflicting mixer stages: + */ + hw_cfg = mdp5_cfg_get_hw_config(mdp5_kms->cfg); + + if ((cnt + base) >= hw_cfg->lm.nb_stages) { + dev_err(dev->dev, "too many planes!\n"); + return -EINVAL; + } + for (i = 0; i < cnt; i++) { - pstates[i].state->stage = STAGE_BASE + i; + pstates[i].state->stage = STAGE_BASE + i + base; DBG("%s: assign pipe %s on stage=%d", mdp5_crtc->name, pipe2name(mdp5_plane_pipe(pstates[i].plane)), pstates[i].state->stage); diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c index 951c002b05df..83bf997dda03 100644 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c @@ -292,8 +292,7 @@ static int mdp5_plane_atomic_check(struct drm_plane *plane, format = to_mdp_format(msm_framebuffer_format(state->fb)); if (MDP_FORMAT_IS_YUV(format) && !pipe_supports_yuv(mdp5_plane->caps)) { - dev_err(plane->dev->dev, - "Pipe doesn't support YUV\n"); + DBG("Pipe doesn't support YUV\n"); return -EINVAL; } @@ -301,8 +300,7 @@ static int mdp5_plane_atomic_check(struct drm_plane *plane, if (!(mdp5_plane->caps & MDP_PIPE_CAP_SCALE) && (((state->src_w >> 16) != state->crtc_w) || ((state->src_h >> 16) != state->crtc_h))) { - dev_err(plane->dev->dev, - "Pipe doesn't support scaling (%dx%d -> %dx%d)\n", + DBG("Pipe doesn't support scaling (%dx%d -> %dx%d)\n", state->src_w >> 16, state->src_h >> 16, state->crtc_w, state->crtc_h); @@ -313,8 +311,7 @@ static int mdp5_plane_atomic_check(struct drm_plane *plane, vflip = !!(state->rotation & DRM_REFLECT_Y); if ((vflip && !(mdp5_plane->caps & MDP_PIPE_CAP_VFLIP)) || (hflip && !(mdp5_plane->caps & MDP_PIPE_CAP_HFLIP))) { - dev_err(plane->dev->dev, - "Pipe doesn't support flip\n"); + DBG("Pipe doesn't support flip\n"); return -EINVAL; } diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c index fb5c0b0a7594..46568fc80848 100644 --- a/drivers/gpu/drm/msm/msm_drv.c +++ b/drivers/gpu/drm/msm/msm_drv.c @@ -228,7 +228,7 @@ static int msm_drm_uninit(struct device *dev) flush_workqueue(priv->atomic_wq); destroy_workqueue(priv->atomic_wq); - if (kms) + if (kms && kms->funcs) kms->funcs->destroy(kms); if (gpu) { diff --git a/drivers/gpu/drm/msm/msm_gem_shrinker.c b/drivers/gpu/drm/msm/msm_gem_shrinker.c index 283d2841ba58..192b2d3a79cb 100644 --- a/drivers/gpu/drm/msm/msm_gem_shrinker.c +++ b/drivers/gpu/drm/msm/msm_gem_shrinker.c @@ -163,6 +163,9 @@ void msm_gem_shrinker_init(struct drm_device *dev) void msm_gem_shrinker_cleanup(struct drm_device *dev) { struct msm_drm_private *priv = dev->dev_private; - WARN_ON(unregister_vmap_purge_notifier(&priv->vmap_notifier)); - unregister_shrinker(&priv->shrinker); + + if (priv->shrinker.nr_deferred) { + WARN_ON(unregister_vmap_purge_notifier(&priv->vmap_notifier)); + unregister_shrinker(&priv->shrinker); + } } diff --git a/drivers/gpu/drm/nouveau/nouveau_acpi.c b/drivers/gpu/drm/nouveau/nouveau_acpi.c index dc57b628e074..193573d191e5 100644 --- a/drivers/gpu/drm/nouveau/nouveau_acpi.c +++ b/drivers/gpu/drm/nouveau/nouveau_acpi.c @@ -240,7 +240,8 @@ static bool nouveau_pr3_present(struct pci_dev *pdev) if (!parent_adev) return false; - return acpi_has_method(parent_adev->handle, "_PR3"); + return parent_adev->power.flags.power_resources && + acpi_has_method(parent_adev->handle, "_PR3"); } static void nouveau_dsm_pci_probe(struct pci_dev *pdev, acpi_handle *dhandle_out, diff --git a/drivers/gpu/drm/nouveau/nouveau_ttm.c b/drivers/gpu/drm/nouveau/nouveau_ttm.c index 1825dbc33192..a6dbe8258040 100644 --- a/drivers/gpu/drm/nouveau/nouveau_ttm.c +++ b/drivers/gpu/drm/nouveau/nouveau_ttm.c @@ -398,6 +398,9 @@ nouveau_ttm_init(struct nouveau_drm *drm) /* VRAM init */ drm->gem.vram_available = drm->device.info.ram_user; + arch_io_reserve_memtype_wc(device->func->resource_addr(device, 1), + device->func->resource_size(device, 1)); + ret = ttm_bo_init_mm(&drm->ttm.bdev, TTM_PL_VRAM, drm->gem.vram_available >> PAGE_SHIFT); if (ret) { @@ -430,6 +433,8 @@ nouveau_ttm_init(struct nouveau_drm *drm) void nouveau_ttm_fini(struct nouveau_drm *drm) { + struct nvkm_device *device = nvxx_device(&drm->device); + ttm_bo_clean_mm(&drm->ttm.bdev, TTM_PL_VRAM); ttm_bo_clean_mm(&drm->ttm.bdev, TTM_PL_TT); @@ -439,4 +444,7 @@ nouveau_ttm_fini(struct nouveau_drm *drm) arch_phys_wc_del(drm->ttm.mtrr); drm->ttm.mtrr = 0; + arch_io_free_memtype_wc(device->func->resource_addr(device, 1), + device->func->resource_size(device, 1)); + } diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c index 103fc8650197..a0d4a0522fdc 100644 --- a/drivers/gpu/drm/radeon/ni.c +++ b/drivers/gpu/drm/radeon/ni.c @@ -1396,9 +1396,7 @@ static void cayman_pcie_gart_fini(struct radeon_device *rdev) void cayman_cp_int_cntl_setup(struct radeon_device *rdev, int ring, u32 cp_int_cntl) { - u32 srbm_gfx_cntl = RREG32(SRBM_GFX_CNTL) & ~3; - - WREG32(SRBM_GFX_CNTL, srbm_gfx_cntl | (ring & 3)); + WREG32(SRBM_GFX_CNTL, RINGID(ring)); WREG32(CP_INT_CNTL, cp_int_cntl); } diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c index e18839d52e3e..27affbde058c 100644 --- a/drivers/gpu/drm/radeon/radeon_connectors.c +++ b/drivers/gpu/drm/radeon/radeon_connectors.c @@ -931,7 +931,7 @@ static void radeon_connector_unregister(struct drm_connector *connector) { struct radeon_connector *radeon_connector = to_radeon_connector(connector); - if (radeon_connector->ddc_bus->has_aux) { + if (radeon_connector->ddc_bus && radeon_connector->ddc_bus->has_aux) { drm_dp_aux_unregister(&radeon_connector->ddc_bus->aux); radeon_connector->ddc_bus->has_aux = false; } diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c index eb92aef46e3c..621af069a3d2 100644 --- a/drivers/gpu/drm/radeon/radeon_device.c +++ b/drivers/gpu/drm/radeon/radeon_device.c @@ -104,6 +104,14 @@ static const char radeon_family_name[][16] = { "LAST", }; +#if defined(CONFIG_VGA_SWITCHEROO) +bool radeon_has_atpx_dgpu_power_cntl(void); +bool radeon_is_atpx_hybrid(void); +#else +static inline bool radeon_has_atpx_dgpu_power_cntl(void) { return false; } +static inline bool radeon_is_atpx_hybrid(void) { return false; } +#endif + #define RADEON_PX_QUIRK_DISABLE_PX (1 << 0) #define RADEON_PX_QUIRK_LONG_WAKEUP (1 << 1) @@ -160,6 +168,11 @@ static void radeon_device_handle_px_quirks(struct radeon_device *rdev) if (rdev->px_quirk_flags & RADEON_PX_QUIRK_DISABLE_PX) rdev->flags &= ~RADEON_IS_PX; + + /* disable PX is the system doesn't support dGPU power control or hybrid gfx */ + if (!radeon_is_atpx_hybrid() && + !radeon_has_atpx_dgpu_power_cntl()) + rdev->flags &= ~RADEON_IS_PX; } /** diff --git a/drivers/gpu/drm/radeon/radeon_dp_auxch.c b/drivers/gpu/drm/radeon/radeon_dp_auxch.c index 2d465648856a..474a8a1886f7 100644 --- a/drivers/gpu/drm/radeon/radeon_dp_auxch.c +++ b/drivers/gpu/drm/radeon/radeon_dp_auxch.c @@ -105,7 +105,7 @@ radeon_dp_aux_transfer_native(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg tmp &= AUX_HPD_SEL(0x7); tmp |= AUX_HPD_SEL(chan->rec.hpd); - tmp |= AUX_EN | AUX_LS_READ_EN | AUX_HPD_DISCON(0x1); + tmp |= AUX_EN | AUX_LS_READ_EN; WREG32(AUX_CONTROL + aux_offset[instance], tmp); diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c index be30861afae9..41b72ce6613f 100644 --- a/drivers/gpu/drm/radeon/radeon_object.c +++ b/drivers/gpu/drm/radeon/radeon_object.c @@ -446,6 +446,10 @@ void radeon_bo_force_delete(struct radeon_device *rdev) int radeon_bo_init(struct radeon_device *rdev) { + /* reserve PAT memory space to WC for VRAM */ + arch_io_reserve_memtype_wc(rdev->mc.aper_base, + rdev->mc.aper_size); + /* Add an MTRR for the VRAM */ if (!rdev->fastfb_working) { rdev->mc.vram_mtrr = arch_phys_wc_add(rdev->mc.aper_base, @@ -463,6 +467,7 @@ void radeon_bo_fini(struct radeon_device *rdev) { radeon_ttm_fini(rdev); arch_phys_wc_del(rdev->mc.vram_mtrr); + arch_io_free_memtype_wc(rdev->mc.aper_base, rdev->mc.aper_size); } /* Returns how many bytes TTM can move per IB. diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c index 89bdf20344ae..c49934527a87 100644 --- a/drivers/gpu/drm/radeon/si_dpm.c +++ b/drivers/gpu/drm/radeon/si_dpm.c @@ -2999,6 +2999,49 @@ static void si_apply_state_adjust_rules(struct radeon_device *rdev, int i; struct si_dpm_quirk *p = si_dpm_quirk_list; + /* limit all SI kickers */ + if (rdev->family == CHIP_PITCAIRN) { + if ((rdev->pdev->revision == 0x81) || + (rdev->pdev->device == 0x6810) || + (rdev->pdev->device == 0x6811) || + (rdev->pdev->device == 0x6816) || + (rdev->pdev->device == 0x6817) || + (rdev->pdev->device == 0x6806)) + max_mclk = 120000; + } else if (rdev->family == CHIP_VERDE) { + if ((rdev->pdev->revision == 0x81) || + (rdev->pdev->revision == 0x83) || + (rdev->pdev->revision == 0x87) || + (rdev->pdev->device == 0x6820) || + (rdev->pdev->device == 0x6821) || + (rdev->pdev->device == 0x6822) || + (rdev->pdev->device == 0x6823) || + (rdev->pdev->device == 0x682A) || + (rdev->pdev->device == 0x682B)) { + max_sclk = 75000; + max_mclk = 80000; + } + } else if (rdev->family == CHIP_OLAND) { + if ((rdev->pdev->revision == 0xC7) || + (rdev->pdev->revision == 0x80) || + (rdev->pdev->revision == 0x81) || + (rdev->pdev->revision == 0x83) || + (rdev->pdev->device == 0x6604) || + (rdev->pdev->device == 0x6605)) { + max_sclk = 75000; + max_mclk = 80000; + } + } else if (rdev->family == CHIP_HAINAN) { + if ((rdev->pdev->revision == 0x81) || + (rdev->pdev->revision == 0x83) || + (rdev->pdev->revision == 0xC3) || + (rdev->pdev->device == 0x6664) || + (rdev->pdev->device == 0x6665) || + (rdev->pdev->device == 0x6667)) { + max_sclk = 75000; + max_mclk = 80000; + } + } /* Apply dpm quirks */ while (p && p->chip_device != 0) { if (rdev->pdev->vendor == p->chip_vendor && @@ -3011,16 +3054,6 @@ static void si_apply_state_adjust_rules(struct radeon_device *rdev, } ++p; } - /* limit mclk on all R7 370 parts for stability */ - if (rdev->pdev->device == 0x6811 && - rdev->pdev->revision == 0x81) - max_mclk = 120000; - /* limit sclk/mclk on Jet parts for stability */ - if (rdev->pdev->device == 0x6665 && - rdev->pdev->revision == 0xc3) { - max_sclk = 75000; - max_mclk = 80000; - } if (rps->vce_active) { rps->evclk = rdev->pm.dpm.vce_states[rdev->pm.dpm.vce_level].evclk; diff --git a/drivers/gpu/drm/rcar-du/rcar_du_kms.c b/drivers/gpu/drm/rcar-du/rcar_du_kms.c index bd9c3bb9252c..392c7e6de042 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_kms.c +++ b/drivers/gpu/drm/rcar-du/rcar_du_kms.c @@ -231,8 +231,16 @@ static int rcar_du_atomic_check(struct drm_device *dev, struct rcar_du_device *rcdu = dev->dev_private; int ret; - ret = drm_atomic_helper_check(dev, state); - if (ret < 0) + ret = drm_atomic_helper_check_modeset(dev, state); + if (ret) + return ret; + + ret = drm_atomic_normalize_zpos(dev, state); + if (ret) + return ret; + + ret = drm_atomic_helper_check_planes(dev, state); + if (ret) return ret; if (rcar_du_has(rcdu, RCAR_DU_FEATURE_VSP1_SOURCE)) diff --git a/drivers/gpu/drm/sti/sti_drv.c b/drivers/gpu/drm/sti/sti_drv.c index 2784919a7366..9df308565f6c 100644 --- a/drivers/gpu/drm/sti/sti_drv.c +++ b/drivers/gpu/drm/sti/sti_drv.c @@ -195,6 +195,26 @@ static void sti_atomic_work(struct work_struct *work) sti_atomic_complete(private, private->commit.state); } +static int sti_atomic_check(struct drm_device *dev, + struct drm_atomic_state *state) +{ + int ret; + + ret = drm_atomic_helper_check_modeset(dev, state); + if (ret) + return ret; + + ret = drm_atomic_normalize_zpos(dev, state); + if (ret) + return ret; + + ret = drm_atomic_helper_check_planes(dev, state); + if (ret) + return ret; + + return ret; +} + static int sti_atomic_commit(struct drm_device *drm, struct drm_atomic_state *state, bool nonblock) { @@ -248,7 +268,7 @@ static void sti_output_poll_changed(struct drm_device *ddev) static const struct drm_mode_config_funcs sti_mode_config_funcs = { .fb_create = drm_fb_cma_create, .output_poll_changed = sti_output_poll_changed, - .atomic_check = drm_atomic_helper_check, + .atomic_check = sti_atomic_check, .atomic_commit = sti_atomic_commit, }; diff --git a/drivers/gpu/drm/udl/udl_main.c b/drivers/gpu/drm/udl/udl_main.c index 29f0207fa677..873f010d9616 100644 --- a/drivers/gpu/drm/udl/udl_main.c +++ b/drivers/gpu/drm/udl/udl_main.c @@ -98,17 +98,23 @@ success: static int udl_select_std_channel(struct udl_device *udl) { int ret; - u8 set_def_chn[] = {0x57, 0xCD, 0xDC, 0xA7, - 0x1C, 0x88, 0x5E, 0x15, - 0x60, 0xFE, 0xC6, 0x97, - 0x16, 0x3D, 0x47, 0xF2}; + static const u8 set_def_chn[] = {0x57, 0xCD, 0xDC, 0xA7, + 0x1C, 0x88, 0x5E, 0x15, + 0x60, 0xFE, 0xC6, 0x97, + 0x16, 0x3D, 0x47, 0xF2}; + void *sendbuf; + + sendbuf = kmemdup(set_def_chn, sizeof(set_def_chn), GFP_KERNEL); + if (!sendbuf) + return -ENOMEM; ret = usb_control_msg(udl->udev, usb_sndctrlpipe(udl->udev, 0), NR_USB_REQUEST_CHANNEL, (USB_DIR_OUT | USB_TYPE_VENDOR), 0, 0, - set_def_chn, sizeof(set_def_chn), + sendbuf, sizeof(set_def_chn), USB_CTRL_SET_TIMEOUT); + kfree(sendbuf); return ret < 0 ? ret : 0; } diff --git a/drivers/gpu/drm/virtio/virtgpu_display.c b/drivers/gpu/drm/virtio/virtgpu_display.c index 7cf3678623c3..58048709c34e 100644 --- a/drivers/gpu/drm/virtio/virtgpu_display.c +++ b/drivers/gpu/drm/virtio/virtgpu_display.c @@ -338,8 +338,7 @@ static void vgdev_atomic_commit_tail(struct drm_atomic_state *state) drm_atomic_helper_commit_modeset_disables(dev, state); drm_atomic_helper_commit_modeset_enables(dev, state); - drm_atomic_helper_commit_planes(dev, state, - DRM_PLANE_COMMIT_ACTIVE_ONLY); + drm_atomic_helper_commit_planes(dev, state, 0); drm_atomic_helper_commit_hw_done(state); diff --git a/drivers/gpu/ipu-v3/ipu-image-convert.c b/drivers/gpu/ipu-v3/ipu-image-convert.c index 2ba7d437a2af..805b6fa7b5f4 100644 --- a/drivers/gpu/ipu-v3/ipu-image-convert.c +++ b/drivers/gpu/ipu-v3/ipu-image-convert.c @@ -1617,7 +1617,7 @@ ipu_image_convert(struct ipu_soc *ipu, enum ipu_ic_task ic_task, ctx = ipu_image_convert_prepare(ipu, ic_task, in, out, rot_mode, complete, complete_context); if (IS_ERR(ctx)) - return ERR_PTR(PTR_ERR(ctx)); + return ERR_CAST(ctx); run = kzalloc(sizeof(*run), GFP_KERNEL); if (!run) { |