summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm')
-rw-r--r--drivers/gpu/drm/Kconfig6
-rw-r--r--drivers/gpu/drm/Makefile3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c96
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_display.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c34
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gem.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.c34
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.h3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c31
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c2
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c2
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c26
-rw-r--r--drivers/gpu/drm/arc/arcpgu_drv.c5
-rw-r--r--drivers/gpu/drm/arm/display/komeda/d71/d71_component.c42
-rw-r--r--drivers/gpu/drm/arm/display/komeda/komeda_crtc.c89
-rw-r--r--drivers/gpu/drm/arm/display/komeda/komeda_dev.c5
-rw-r--r--drivers/gpu/drm/arm/display/komeda/komeda_drv.c8
-rw-r--r--drivers/gpu/drm/arm/display/komeda/komeda_kms.c5
-rw-r--r--drivers/gpu/drm/arm/display/komeda/komeda_kms.h4
-rw-r--r--drivers/gpu/drm/arm/display/komeda/komeda_pipeline.c19
-rw-r--r--drivers/gpu/drm/arm/display/komeda/komeda_pipeline.h6
-rw-r--r--drivers/gpu/drm/arm/display/komeda/komeda_pipeline_state.c2
-rw-r--r--drivers/gpu/drm/arm/display/komeda/komeda_plane.c4
-rw-r--r--drivers/gpu/drm/arm/hdlcd_drv.c6
-rw-r--r--drivers/gpu/drm/arm/malidp_drv.c5
-rw-r--r--drivers/gpu/drm/arm/malidp_hw.c3
-rw-r--r--drivers/gpu/drm/armada/armada_drv.c3
-rw-r--r--drivers/gpu/drm/armada/armada_gem.c5
-rw-r--r--drivers/gpu/drm/armada/armada_gem.h3
-rw-r--r--drivers/gpu/drm/aspeed/aspeed_gfx_drv.c3
-rw-r--r--drivers/gpu/drm/ast/Makefile2
-rw-r--r--drivers/gpu/drm/ast/ast_dp501.c5
-rw-r--r--drivers/gpu/drm/ast/ast_drv.c22
-rw-r--r--drivers/gpu/drm/ast/ast_drv.h46
-rw-r--r--drivers/gpu/drm/ast/ast_fb.c346
-rw-r--r--drivers/gpu/drm/ast/ast_main.c77
-rw-r--r--drivers/gpu/drm/ast/ast_mode.c60
-rw-r--r--drivers/gpu/drm/ast/ast_post.c7
-rw-r--r--drivers/gpu/drm/ast/ast_ttm.c7
-rw-r--r--drivers/gpu/drm/ati_pcigart.c10
-rw-r--r--drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c12
-rw-r--r--drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c18
-rw-r--r--drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.h20
-rw-r--r--drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_output.c3
-rw-r--r--drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c12
-rw-r--r--drivers/gpu/drm/bochs/bochs.h6
-rw-r--r--drivers/gpu/drm/bochs/bochs_drv.c17
-rw-r--r--drivers/gpu/drm/bochs/bochs_hw.c4
-rw-r--r--drivers/gpu/drm/bochs/bochs_kms.c8
-rw-r--r--drivers/gpu/drm/bridge/Kconfig2
-rw-r--r--drivers/gpu/drm/bridge/adv7511/adv7511_drv.c12
-rw-r--r--drivers/gpu/drm/bridge/analogix/analogix_dp_core.c292
-rw-r--r--drivers/gpu/drm/bridge/analogix/analogix_dp_core.h2
-rw-r--r--drivers/gpu/drm/bridge/dumb-vga-dac.c6
-rw-r--r--drivers/gpu/drm/bridge/megachips-stdpxxxx-ge-b850v3-fw.c1
-rw-r--r--drivers/gpu/drm/bridge/nxp-ptn3460.c3
-rw-r--r--drivers/gpu/drm/bridge/parade-ps8622.c1
-rw-r--r--drivers/gpu/drm/bridge/sii902x.c44
-rw-r--r--drivers/gpu/drm/bridge/synopsys/dw-hdmi.c51
-rw-r--r--drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c47
-rw-r--r--drivers/gpu/drm/bridge/tc358767.c673
-rw-r--r--drivers/gpu/drm/bridge/ti-sn65dsi86.c46
-rw-r--r--drivers/gpu/drm/bridge/ti-tfp410.c6
-rw-r--r--drivers/gpu/drm/cirrus/cirrus.c2
-rw-r--r--drivers/gpu/drm/drm_agpsupport.c45
-rw-r--r--drivers/gpu/drm/drm_atomic_uapi.c4
-rw-r--r--drivers/gpu/drm/drm_client.c1
-rw-r--r--drivers/gpu/drm/drm_connector.c107
-rw-r--r--drivers/gpu/drm/drm_debugfs_crc.c15
-rw-r--r--drivers/gpu/drm/drm_dma.c2
-rw-r--r--drivers/gpu/drm/drm_dp_aux_dev.c18
-rw-r--r--drivers/gpu/drm/drm_dp_helper.c31
-rw-r--r--drivers/gpu/drm/drm_dp_mst_topology.c142
-rw-r--r--drivers/gpu/drm/drm_drv.c4
-rw-r--r--drivers/gpu/drm/drm_file.c9
-rw-r--r--drivers/gpu/drm/drm_gem.c18
-rw-r--r--drivers/gpu/drm/drm_gem_framebuffer_helper.c72
-rw-r--r--drivers/gpu/drm/drm_gem_shmem_helper.c1
-rw-r--r--drivers/gpu/drm/drm_gem_vram_helper.c94
-rw-r--r--drivers/gpu/drm/drm_hdcp.c77
-rw-r--r--drivers/gpu/drm/drm_ioc32.c13
-rw-r--r--drivers/gpu/drm/drm_ioctl.c139
-rw-r--r--drivers/gpu/drm/drm_legacy_misc.c2
-rw-r--r--drivers/gpu/drm/drm_lock.c2
-rw-r--r--drivers/gpu/drm/drm_memory.c2
-rw-r--r--drivers/gpu/drm/drm_mipi_dbi.c (renamed from drivers/gpu/drm/tinydrm/mipi-dbi.c)499
-rw-r--r--drivers/gpu/drm/drm_mm.c2
-rw-r--r--drivers/gpu/drm/drm_mode_object.c4
-rw-r--r--drivers/gpu/drm/drm_modes.c17
-rw-r--r--drivers/gpu/drm/drm_prime.c848
-rw-r--r--drivers/gpu/drm/drm_scatter.c2
-rw-r--r--drivers/gpu/drm/drm_syncobj.c11
-rw-r--r--drivers/gpu/drm/drm_sysfs.c43
-rw-r--r--drivers/gpu/drm/drm_vblank.c25
-rw-r--r--drivers/gpu/drm/drm_vm.c2
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_drv.c28
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gem.c14
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gem.h3
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c1
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_drv.c21
-rw-r--r--drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_crtc.c5
-rw-r--r--drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c9
-rw-r--r--drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_kms.c1
-rw-r--r--drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.c2
-rw-r--r--drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c1
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_de.c6
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c10
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.h9
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_fbdev.c2
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_vdac.c1
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/hibmc_ttm.c8
-rw-r--r--drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c8
-rw-r--r--drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c10
-rw-r--r--drivers/gpu/drm/i2c/ch7006_priv.h1
-rw-r--r--drivers/gpu/drm/i2c/sil164_drv.c3
-rw-r--r--drivers/gpu/drm/i810/i810_dma.c17
-rw-r--r--drivers/gpu/drm/i810/i810_drv.c8
-rw-r--r--drivers/gpu/drm/i810/i810_drv.h2
-rw-r--r--drivers/gpu/drm/i915/Kconfig.debug15
-rw-r--r--drivers/gpu/drm/i915/Makefile81
-rw-r--r--drivers/gpu/drm/i915/Makefile.header-test22
-rw-r--r--drivers/gpu/drm/i915/display/Makefile6
-rw-r--r--drivers/gpu/drm/i915/display/Makefile.header-test16
-rw-r--r--drivers/gpu/drm/i915/display/icl_dsi.c185
-rw-r--r--drivers/gpu/drm/i915/display/intel_atomic_plane.c56
-rw-r--r--drivers/gpu/drm/i915/display/intel_atomic_plane.h5
-rw-r--r--drivers/gpu/drm/i915/display/intel_audio.c81
-rw-r--r--drivers/gpu/drm/i915/display/intel_bios.c28
-rw-r--r--drivers/gpu/drm/i915/display/intel_bios.h3
-rw-r--r--drivers/gpu/drm/i915/display/intel_bw.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_cdclk.c84
-rw-r--r--drivers/gpu/drm/i915/display/intel_combo_phy.c193
-rw-r--r--drivers/gpu/drm/i915/display/intel_combo_phy.h4
-rw-r--r--drivers/gpu/drm/i915/display/intel_connector.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_ddi.c442
-rw-r--r--drivers/gpu/drm/i915/display/intel_display.c1245
-rw-r--r--drivers/gpu/drm/i915/display/intel_display.h50
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_power.c658
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_power.h35
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp.c292
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp.h2
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c5
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_mst.h8
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpll_mgr.c652
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpll_mgr.h57
-rw-r--r--drivers/gpu/drm/i915/display/intel_dsi.h12
-rw-r--r--drivers/gpu/drm/i915/display/intel_gmbus.c20
-rw-r--r--drivers/gpu/drm/i915/display/intel_hdcp.c65
-rw-r--r--drivers/gpu/drm/i915/display/intel_hdcp.h2
-rw-r--r--drivers/gpu/drm/i915/display/intel_hdmi.c75
-rw-r--r--drivers/gpu/drm/i915/display/intel_hotplug.c59
-rw-r--r--drivers/gpu/drm/i915/display/intel_hotplug.h5
-rw-r--r--drivers/gpu/drm/i915/display/intel_overlay.c10
-rw-r--r--drivers/gpu/drm/i915/display/intel_pipe_crc.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_sdvo.c314
-rw-r--r--drivers/gpu/drm/i915/display/intel_sprite.c335
-rw-r--r--drivers/gpu/drm/i915/display/intel_tc.c537
-rw-r--r--drivers/gpu/drm/i915/display/intel_tc.h35
-rw-r--r--drivers/gpu/drm/i915/display/intel_vbt_defs.h3
-rw-r--r--drivers/gpu/drm/i915/display/intel_vdsc.c14
-rw-r--r--drivers/gpu/drm/i915/display/vlv_dsi.c4
-rw-r--r--drivers/gpu/drm/i915/gem/Makefile6
-rw-r--r--drivers/gpu/drm/i915/gem/Makefile.header-test16
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_client_blt.c21
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_context.c83
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_context.h6
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_context_types.h4
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c5
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c22
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_mman.c8
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_object.c97
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_object.h8
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_object_blt.c6
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_object_types.h1
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_phys.c12
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_pm.c37
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_shmem.c7
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_shrinker.c27
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_stolen.c2
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_throttle.c2
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_userptr.c11
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_wait.c14
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/huge_pages.c69
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c26
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c9
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c85
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c8
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c59
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/i915_gem_object_blt.c15
-rw-r--r--drivers/gpu/drm/i915/gt/Makefile5
-rw-r--r--drivers/gpu/drm/i915/gt/Makefile.header-test16
-rw-r--r--drivers/gpu/drm/i915/gt/gen6_renderstate.c (renamed from drivers/gpu/drm/i915/intel_renderstate_gen6.c)0
-rw-r--r--drivers/gpu/drm/i915/gt/gen7_renderstate.c (renamed from drivers/gpu/drm/i915/intel_renderstate_gen7.c)0
-rw-r--r--drivers/gpu/drm/i915/gt/gen8_renderstate.c (renamed from drivers/gpu/drm/i915/intel_renderstate_gen8.c)0
-rw-r--r--drivers/gpu/drm/i915/gt/gen9_renderstate.c (renamed from drivers/gpu/drm/i915/intel_renderstate_gen9.c)0
-rw-r--r--drivers/gpu/drm/i915/gt/intel_context.c123
-rw-r--r--drivers/gpu/drm/i915/gt/intel_context.h18
-rw-r--r--drivers/gpu/drm/i915/gt/intel_context_types.h9
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine.h75
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_cs.c203
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_pm.c7
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_pm.h6
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_types.h74
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gpu_commands.h7
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt.c250
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt.h60
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_pm.c62
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_pm.h12
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_types.h96
-rw-r--r--drivers/gpu/drm/i915/gt/intel_hangcheck.c71
-rw-r--r--drivers/gpu/drm/i915/gt/intel_lrc.c1017
-rw-r--r--drivers/gpu/drm/i915/gt/intel_mocs.c62
-rw-r--r--drivers/gpu/drm/i915/gt/intel_mocs.h6
-rw-r--r--drivers/gpu/drm/i915/gt/intel_renderstate.c (renamed from drivers/gpu/drm/i915/i915_gem_render_state.c)13
-rw-r--r--drivers/gpu/drm/i915/gt/intel_renderstate.h (renamed from drivers/gpu/drm/i915/intel_renderstate.h)10
-rw-r--r--drivers/gpu/drm/i915/gt/intel_reset.c624
-rw-r--r--drivers/gpu/drm/i915/gt/intel_reset.h75
-rw-r--r--drivers/gpu/drm/i915/gt/intel_reset_types.h50
-rw-r--r--drivers/gpu/drm/i915/gt/intel_ringbuffer.c218
-rw-r--r--drivers/gpu/drm/i915/gt/intel_timeline.c (renamed from drivers/gpu/drm/i915/i915_timeline.c)230
-rw-r--r--drivers/gpu/drm/i915/gt/intel_timeline.h93
-rw-r--r--drivers/gpu/drm/i915/gt/intel_timeline_types.h (renamed from drivers/gpu/drm/i915/i915_timeline_types.h)8
-rw-r--r--drivers/gpu/drm/i915/gt/intel_workarounds.c227
-rw-r--r--drivers/gpu/drm/i915/gt/intel_workarounds.h6
-rw-r--r--drivers/gpu/drm/i915/gt/intel_workarounds_types.h1
-rw-r--r--drivers/gpu/drm/i915/gt/mock_engine.c14
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_hangcheck.c511
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_lrc.c391
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_reset.c133
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_timeline.c (renamed from drivers/gpu/drm/i915/selftests/i915_timeline.c)131
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_workarounds.c155
-rw-r--r--drivers/gpu/drm/i915/gt/selftests/mock_timeline.c (renamed from drivers/gpu/drm/i915/selftests/mock_timeline.c)8
-rw-r--r--drivers/gpu/drm/i915/gt/selftests/mock_timeline.h (renamed from drivers/gpu/drm/i915/selftests/mock_timeline.h)6
-rw-r--r--drivers/gpu/drm/i915/gt/uc/Makefile5
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc.c (renamed from drivers/gpu/drm/i915/intel_guc.c)290
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc.h (renamed from drivers/gpu/drm/i915/intel_guc.h)33
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c (renamed from drivers/gpu/drm/i915/intel_guc_ads.c)29
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_ads.h (renamed from drivers/gpu/drm/i915/intel_guc_ads.h)0
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c (renamed from drivers/gpu/drm/i915/intel_guc_ct.c)22
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_ct.h (renamed from drivers/gpu/drm/i915/intel_guc_ct.h)11
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c181
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_fw.h (renamed from drivers/gpu/drm/i915/intel_guc_fw.h)0
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h (renamed from drivers/gpu/drm/i915/intel_guc_fwif.h)81
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_log.c (renamed from drivers/gpu/drm/i915/intel_guc_log.c)48
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_log.h (renamed from drivers/gpu/drm/i915/intel_guc_log.h)1
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_reg.h (renamed from drivers/gpu/drm/i915/intel_guc_reg.h)38
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c (renamed from drivers/gpu/drm/i915/intel_guc_submission.c)496
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_submission.h (renamed from drivers/gpu/drm/i915/intel_guc_submission.h)2
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_huc.c (renamed from drivers/gpu/drm/i915/intel_huc.c)69
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_huc.h (renamed from drivers/gpu/drm/i915/intel_huc.h)12
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_huc_fw.c53
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_huc_fw.h (renamed from drivers/gpu/drm/i915/intel_huc_fw.h)0
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_uc.c570
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_uc.h (renamed from drivers/gpu/drm/i915/intel_uc.h)39
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c540
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_uc_fw.h (renamed from drivers/gpu/drm/i915/intel_uc_fw.h)102
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_uc_fw_abi.h82
-rw-r--r--drivers/gpu/drm/i915/gt/uc/selftest_guc.c (renamed from drivers/gpu/drm/i915/selftests/intel_guc.c)49
-rw-r--r--drivers/gpu/drm/i915/gvt/dmabuf.c2
-rw-r--r--drivers/gpu/drm/i915/gvt/gtt.h13
-rw-r--r--drivers/gpu/drm/i915/gvt/scheduler.c2
-rw-r--r--drivers/gpu/drm/i915/i915_active.c321
-rw-r--r--drivers/gpu/drm/i915/i915_active.h28
-rw-r--r--drivers/gpu/drm/i915/i915_active_types.h13
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c217
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c208
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h248
-rw-r--r--drivers/gpu/drm/i915/i915_fixed.h5
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c318
-rw-r--r--drivers/gpu/drm/i915/i915_gem_batch_pool.c42
-rw-r--r--drivers/gpu/drm/i915/i915_gem_fence_reg.c32
-rw-r--r--drivers/gpu/drm/i915/i915_gem_fence_reg.h3
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c2043
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.h200
-rw-r--r--drivers/gpu/drm/i915/i915_gem_render_state.h31
-rw-r--r--drivers/gpu/drm/i915/i915_globals.h2
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.c588
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.h71
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c803
-rw-r--r--drivers/gpu/drm/i915/i915_irq.h45
-rw-r--r--drivers/gpu/drm/i915/i915_params.c5
-rw-r--r--drivers/gpu/drm/i915/i915_params.h2
-rw-r--r--drivers/gpu/drm/i915/i915_pci.c43
-rw-r--r--drivers/gpu/drm/i915/i915_perf.c304
-rw-r--r--drivers/gpu/drm/i915/i915_pmu.c4
-rw-r--r--drivers/gpu/drm/i915/i915_priolist_types.h10
-rw-r--r--drivers/gpu/drm/i915/i915_pvinfo.h7
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h231
-rw-r--r--drivers/gpu/drm/i915/i915_request.c126
-rw-r--r--drivers/gpu/drm/i915/i915_request.h24
-rw-r--r--drivers/gpu/drm/i915/i915_scheduler.c4
-rw-r--r--drivers/gpu/drm/i915/i915_scheduler_types.h1
-rw-r--r--drivers/gpu/drm/i915/i915_selftest.h27
-rw-r--r--drivers/gpu/drm/i915/i915_timeline.h94
-rw-r--r--drivers/gpu/drm/i915/i915_trace.h12
-rw-r--r--drivers/gpu/drm/i915/i915_utils.h12
-rw-r--r--drivers/gpu/drm/i915/i915_vgpu.c63
-rw-r--r--drivers/gpu/drm/i915/i915_vgpu.h7
-rw-r--r--drivers/gpu/drm/i915/i915_vma.c73
-rw-r--r--drivers/gpu/drm/i915/intel_device_info.c40
-rw-r--r--drivers/gpu/drm/i915/intel_device_info.h5
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h37
-rw-r--r--drivers/gpu/drm/i915/intel_guc_fw.c308
-rw-r--r--drivers/gpu/drm/i915/intel_gvt.c7
-rw-r--r--drivers/gpu/drm/i915/intel_gvt.h7
-rw-r--r--drivers/gpu/drm/i915/intel_huc_fw.c215
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c441
-rw-r--r--drivers/gpu/drm/i915/intel_pm.h4
-rw-r--r--drivers/gpu/drm/i915/intel_runtime_pm.c2
-rw-r--r--drivers/gpu/drm/i915/intel_runtime_pm.h2
-rw-r--r--drivers/gpu/drm/i915/intel_uc.c561
-rw-r--r--drivers/gpu/drm/i915/intel_uc_fw.c357
-rw-r--r--drivers/gpu/drm/i915/intel_uncore.c465
-rw-r--r--drivers/gpu/drm/i915/intel_uncore.h24
-rw-r--r--drivers/gpu/drm/i915/intel_wakeref.c9
-rw-r--r--drivers/gpu/drm/i915/intel_wakeref.h11
-rw-r--r--drivers/gpu/drm/i915/intel_wopcm.c40
-rw-r--r--drivers/gpu/drm/i915/intel_wopcm.h4
-rw-r--r--drivers/gpu/drm/i915/oa/Makefile0
-rw-r--r--drivers/gpu/drm/i915/oa/i915_oa_bdw.c (renamed from drivers/gpu/drm/i915/i915_oa_bdw.c)0
-rw-r--r--drivers/gpu/drm/i915/oa/i915_oa_bdw.h (renamed from drivers/gpu/drm/i915/i915_oa_bdw.h)2
-rw-r--r--drivers/gpu/drm/i915/oa/i915_oa_bxt.c (renamed from drivers/gpu/drm/i915/i915_oa_bxt.c)0
-rw-r--r--drivers/gpu/drm/i915/oa/i915_oa_bxt.h (renamed from drivers/gpu/drm/i915/i915_oa_bxt.h)2
-rw-r--r--drivers/gpu/drm/i915/oa/i915_oa_cflgt2.c (renamed from drivers/gpu/drm/i915/i915_oa_cflgt2.c)0
-rw-r--r--drivers/gpu/drm/i915/oa/i915_oa_cflgt2.h (renamed from drivers/gpu/drm/i915/i915_oa_cflgt2.h)2
-rw-r--r--drivers/gpu/drm/i915/oa/i915_oa_cflgt3.c (renamed from drivers/gpu/drm/i915/i915_oa_cflgt3.c)0
-rw-r--r--drivers/gpu/drm/i915/oa/i915_oa_cflgt3.h (renamed from drivers/gpu/drm/i915/i915_oa_cflgt3.h)2
-rw-r--r--drivers/gpu/drm/i915/oa/i915_oa_chv.c (renamed from drivers/gpu/drm/i915/i915_oa_chv.c)0
-rw-r--r--drivers/gpu/drm/i915/oa/i915_oa_chv.h (renamed from drivers/gpu/drm/i915/i915_oa_chv.h)2
-rw-r--r--drivers/gpu/drm/i915/oa/i915_oa_cnl.c (renamed from drivers/gpu/drm/i915/i915_oa_cnl.c)0
-rw-r--r--drivers/gpu/drm/i915/oa/i915_oa_cnl.h (renamed from drivers/gpu/drm/i915/i915_oa_cnl.h)2
-rw-r--r--drivers/gpu/drm/i915/oa/i915_oa_glk.c (renamed from drivers/gpu/drm/i915/i915_oa_glk.c)0
-rw-r--r--drivers/gpu/drm/i915/oa/i915_oa_glk.h (renamed from drivers/gpu/drm/i915/i915_oa_glk.h)2
-rw-r--r--drivers/gpu/drm/i915/oa/i915_oa_hsw.c (renamed from drivers/gpu/drm/i915/i915_oa_hsw.c)0
-rw-r--r--drivers/gpu/drm/i915/oa/i915_oa_hsw.h (renamed from drivers/gpu/drm/i915/i915_oa_hsw.h)2
-rw-r--r--drivers/gpu/drm/i915/oa/i915_oa_icl.c (renamed from drivers/gpu/drm/i915/i915_oa_icl.c)0
-rw-r--r--drivers/gpu/drm/i915/oa/i915_oa_icl.h (renamed from drivers/gpu/drm/i915/i915_oa_icl.h)2
-rw-r--r--drivers/gpu/drm/i915/oa/i915_oa_kblgt2.c (renamed from drivers/gpu/drm/i915/i915_oa_kblgt2.c)0
-rw-r--r--drivers/gpu/drm/i915/oa/i915_oa_kblgt2.h (renamed from drivers/gpu/drm/i915/i915_oa_kblgt2.h)2
-rw-r--r--drivers/gpu/drm/i915/oa/i915_oa_kblgt3.c (renamed from drivers/gpu/drm/i915/i915_oa_kblgt3.c)0
-rw-r--r--drivers/gpu/drm/i915/oa/i915_oa_kblgt3.h (renamed from drivers/gpu/drm/i915/i915_oa_kblgt3.h)2
-rw-r--r--drivers/gpu/drm/i915/oa/i915_oa_sklgt2.c (renamed from drivers/gpu/drm/i915/i915_oa_sklgt2.c)0
-rw-r--r--drivers/gpu/drm/i915/oa/i915_oa_sklgt2.h (renamed from drivers/gpu/drm/i915/i915_oa_sklgt2.h)2
-rw-r--r--drivers/gpu/drm/i915/oa/i915_oa_sklgt3.c (renamed from drivers/gpu/drm/i915/i915_oa_sklgt3.c)0
-rw-r--r--drivers/gpu/drm/i915/oa/i915_oa_sklgt3.h (renamed from drivers/gpu/drm/i915/i915_oa_sklgt3.h)2
-rw-r--r--drivers/gpu/drm/i915/oa/i915_oa_sklgt4.c (renamed from drivers/gpu/drm/i915/i915_oa_sklgt4.c)0
-rw-r--r--drivers/gpu/drm/i915/oa/i915_oa_sklgt4.h (renamed from drivers/gpu/drm/i915/i915_oa_sklgt4.h)2
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_active.c124
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem.c11
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem_evict.c3
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem_gtt.c4
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_live_selftests.h2
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_mock_selftests.h2
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_request.c43
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_selftest.c65
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_vma.c10
-rw-r--r--drivers/gpu/drm/i915/selftests/igt_flush_test.c5
-rw-r--r--drivers/gpu/drm/i915/selftests/igt_reset.c38
-rw-r--r--drivers/gpu/drm/i915/selftests/igt_reset.h10
-rw-r--r--drivers/gpu/drm/i915/selftests/igt_spinner.c8
-rw-r--r--drivers/gpu/drm/i915/selftests/igt_spinner.h3
-rw-r--r--drivers/gpu/drm/i915/selftests/igt_wedge_me.h58
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_gem_device.c18
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_gtt.c3
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_uncore.c4
-rw-r--r--drivers/gpu/drm/imx/dw_hdmi-imx.c14
-rw-r--r--drivers/gpu/drm/imx/imx-drm-core.c13
-rw-r--r--drivers/gpu/drm/imx/imx-ldb.c29
-rw-r--r--drivers/gpu/drm/imx/imx-tve.c16
-rw-r--r--drivers/gpu/drm/imx/ipuv3-crtc.c8
-rw-r--r--drivers/gpu/drm/imx/ipuv3-plane.c5
-rw-r--r--drivers/gpu/drm/imx/parallel-display.c8
-rw-r--r--drivers/gpu/drm/ingenic/ingenic-drm.c75
-rw-r--r--drivers/gpu/drm/lima/lima_device.c41
-rw-r--r--drivers/gpu/drm/lima/lima_drv.c20
-rw-r--r--drivers/gpu/drm/lima/lima_gem.c2
-rw-r--r--drivers/gpu/drm/lima/lima_gem_prime.c3
-rw-r--r--drivers/gpu/drm/lima/lima_object.c9
-rw-r--r--drivers/gpu/drm/lima/lima_object.h3
-rw-r--r--drivers/gpu/drm/lima/lima_vm.h4
-rw-r--r--drivers/gpu/drm/mcde/mcde_drv.c10
-rw-r--r--drivers/gpu/drm/mediatek/mtk_disp_color.c2
-rw-r--r--drivers/gpu/drm/mediatek/mtk_disp_ovl.c2
-rw-r--r--drivers/gpu/drm/mediatek/mtk_disp_rdma.c2
-rw-r--r--drivers/gpu/drm/mediatek/mtk_dpi.c18
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_crtc.c10
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c2
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_drv.c33
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_fb.c35
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_fb.h1
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_gem.c7
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_plane.c4
-rw-r--r--drivers/gpu/drm/mediatek/mtk_dsi.c14
-rw-r--r--drivers/gpu/drm/mediatek/mtk_hdmi.c14
-rw-r--r--drivers/gpu/drm/meson/meson_crtc.c16
-rw-r--r--drivers/gpu/drm/meson/meson_drv.c33
-rw-r--r--drivers/gpu/drm/meson/meson_drv.h11
-rw-r--r--drivers/gpu/drm/meson/meson_dw_hdmi.c19
-rw-r--r--drivers/gpu/drm/meson/meson_overlay.c13
-rw-r--r--drivers/gpu/drm/meson/meson_plane.c16
-rw-r--r--drivers/gpu/drm/meson/meson_registers.h2
-rw-r--r--drivers/gpu/drm/meson/meson_vclk.c7
-rw-r--r--drivers/gpu/drm/meson/meson_vclk.h4
-rw-r--r--drivers/gpu/drm/meson/meson_venc.c10
-rw-r--r--drivers/gpu/drm/meson/meson_venc.h2
-rw-r--r--drivers/gpu/drm/meson/meson_venc_cvbs.c11
-rw-r--r--drivers/gpu/drm/meson/meson_viu.c7
-rw-r--r--drivers/gpu/drm/meson/meson_vpp.c7
-rw-r--r--drivers/gpu/drm/meson/meson_vpp.h3
-rw-r--r--drivers/gpu/drm/mga/mga_dma.c13
-rw-r--r--drivers/gpu/drm/mga/mga_drv.c7
-rw-r--r--drivers/gpu/drm/mga/mga_drv.h27
-rw-r--r--drivers/gpu/drm/mga/mga_ioc32.c3
-rw-r--r--drivers/gpu/drm/mga/mga_irq.c12
-rw-r--r--drivers/gpu/drm/mga/mga_state.c8
-rw-r--r--drivers/gpu/drm/mga/mga_warp.c4
-rw-r--r--drivers/gpu/drm/mgag200/Makefile2
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_cursor.c11
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_drv.c10
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_drv.h40
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_fb.c315
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_i2c.c6
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_main.c96
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_mode.c59
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_ttm.c3
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c8
-rw-r--r--drivers/gpu/drm/msm/msm_atomic.c8
-rw-r--r--drivers/gpu/drm/msm/msm_drv.c25
-rw-r--r--drivers/gpu/drm/msm/msm_gem.c8
-rw-r--r--drivers/gpu/drm/msm/msm_gpu.c3
-rw-r--r--drivers/gpu/drm/mxsfb/mxsfb_crtc.c16
-rw-r--r--drivers/gpu/drm/mxsfb/mxsfb_drv.c16
-rw-r--r--drivers/gpu/drm/mxsfb/mxsfb_out.c1
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/wndw.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_abi16.c10
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_abi16.h1
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.c9
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.h5
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_connector.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.c10
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drm.c33
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fence.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_gem.c19
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_gem.h3
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_prime.c15
-rw-r--r--drivers/gpu/drm/omapdrm/omap_crtc.c4
-rw-r--r--drivers/gpu/drm/omapdrm/omap_debugfs.c2
-rw-r--r--drivers/gpu/drm/omapdrm/omap_drv.c22
-rw-r--r--drivers/gpu/drm/omapdrm/omap_drv.h5
-rw-r--r--drivers/gpu/drm/omapdrm/omap_fb.c4
-rw-r--r--drivers/gpu/drm/omapdrm/omap_fbdev.c4
-rw-r--r--drivers/gpu/drm/omapdrm/omap_gem.c2
-rw-r--r--drivers/gpu/drm/omapdrm/omap_gem.h3
-rw-r--r--drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c8
-rw-r--r--drivers/gpu/drm/omapdrm/omap_irq.c2
-rw-r--r--drivers/gpu/drm/panel/Kconfig18
-rw-r--r--drivers/gpu/drm/panel/Makefile2
-rw-r--r--drivers/gpu/drm/panel/panel-lvds.c5
-rw-r--r--drivers/gpu/drm/panel/panel-novatek-nt39016.c359
-rw-r--r--drivers/gpu/drm/panel/panel-raspberrypi-touchscreen.c13
-rw-r--r--drivers/gpu/drm/panel/panel-raydium-rm67191.c668
-rw-r--r--drivers/gpu/drm/panel/panel-rocktech-jh057n00900.c75
-rw-r--r--drivers/gpu/drm/panel/panel-simple.c343
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_devfreq.c3
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_device.h1
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_drv.c41
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_gem.c2
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_gpu.c2
-rw-r--r--drivers/gpu/drm/pl111/pl111_debugfs.c4
-rw-r--r--drivers/gpu/drm/pl111/pl111_display.c23
-rw-r--r--drivers/gpu/drm/pl111/pl111_drm.h11
-rw-r--r--drivers/gpu/drm/pl111/pl111_drv.c13
-rw-r--r--drivers/gpu/drm/pl111/pl111_nomadik.h3
-rw-r--r--drivers/gpu/drm/pl111/pl111_versatile.c9
-rw-r--r--drivers/gpu/drm/pl111/pl111_versatile.h3
-rw-r--r--drivers/gpu/drm/pl111/pl111_vexpress.c1
-rw-r--r--drivers/gpu/drm/qxl/qxl_cmd.c6
-rw-r--r--drivers/gpu/drm/qxl/qxl_debugfs.c8
-rw-r--r--drivers/gpu/drm/qxl/qxl_display.c11
-rw-r--r--drivers/gpu/drm/qxl/qxl_draw.c2
-rw-r--r--drivers/gpu/drm/qxl/qxl_drv.c21
-rw-r--r--drivers/gpu/drm/qxl/qxl_drv.h13
-rw-r--r--drivers/gpu/drm/qxl/qxl_gem.c3
-rw-r--r--drivers/gpu/drm/qxl/qxl_ioctl.c3
-rw-r--r--drivers/gpu/drm/qxl/qxl_irq.c4
-rw-r--r--drivers/gpu/drm/qxl/qxl_kms.c9
-rw-r--r--drivers/gpu/drm/qxl/qxl_object.c20
-rw-r--r--drivers/gpu/drm/qxl/qxl_object.h6
-rw-r--r--drivers/gpu/drm/qxl/qxl_release.c14
-rw-r--r--drivers/gpu/drm/qxl/qxl_ttm.c20
-rw-r--r--drivers/gpu/drm/r128/r128_ioc32.c3
-rw-r--r--drivers/gpu/drm/r128/r128_irq.c5
-rw-r--r--drivers/gpu/drm/radeon/radeon.h3
-rw-r--r--drivers/gpu/drm/radeon/radeon_benchmark.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_cs.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_display.c6
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.c8
-rw-r--r--drivers/gpu/drm/radeon/radeon_gem.c8
-rw-r--r--drivers/gpu/drm/radeon/radeon_mn.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.c22
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.h2
-rw-r--r--drivers/gpu/drm/radeon/radeon_prime.c18
-rw-r--r--drivers/gpu/drm/radeon/radeon_test.c8
-rw-r--r--drivers/gpu/drm/radeon/radeon_ttm.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_uvd.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_vm.c6
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_drv.c5
-rw-r--r--drivers/gpu/drm/rockchip/Makefile3
-rw-r--r--drivers/gpu/drm/rockchip/analogix_dp-rockchip.c116
-rw-r--r--drivers/gpu/drm/rockchip/cdn-dp-core.c17
-rw-r--r--drivers/gpu/drm/rockchip/cdn-dp-core.h2
-rw-r--r--drivers/gpu/drm/rockchip/dw-mipi-dsi-rockchip.c9
-rw-r--r--drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c5
-rw-r--r--drivers/gpu/drm/rockchip/inno_hdmi.c3
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_drv.c17
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_fb.c29
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_fbdev.c2
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_gem.c8
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_psr.c282
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_psr.h22
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_vop.c117
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_lvds.c16
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_rgb.c9
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_vop_reg.c11
-rw-r--r--drivers/gpu/drm/scheduler/gpu_scheduler_trace.h2
-rw-r--r--drivers/gpu/drm/scheduler/sched_entity.c3
-rw-r--r--drivers/gpu/drm/scheduler/sched_fence.c6
-rw-r--r--drivers/gpu/drm/scheduler/sched_main.c3
-rw-r--r--drivers/gpu/drm/selftests/test-drm_framebuffer.c7
-rw-r--r--drivers/gpu/drm/shmobile/shmob_drm_crtc.c3
-rw-r--r--drivers/gpu/drm/shmobile/shmob_drm_crtc.h4
-rw-r--r--drivers/gpu/drm/shmobile/shmob_drm_drv.c9
-rw-r--r--drivers/gpu/drm/shmobile/shmob_drm_kms.c1
-rw-r--r--drivers/gpu/drm/shmobile/shmob_drm_plane.c2
-rw-r--r--drivers/gpu/drm/shmobile/shmob_drm_plane.h1
-rw-r--r--drivers/gpu/drm/shmobile/shmob_drm_regs.h3
-rw-r--r--drivers/gpu/drm/sti/sti_drv.c6
-rw-r--r--drivers/gpu/drm/sti/sti_dvo.c8
-rw-r--r--drivers/gpu/drm/sti/sti_hdmi.c9
-rw-r--r--drivers/gpu/drm/sti/sti_tvout.c16
-rw-r--r--drivers/gpu/drm/stm/drv.c5
-rw-r--r--drivers/gpu/drm/stm/dw_mipi_dsi-stm.c10
-rw-r--r--drivers/gpu/drm/stm/ltdc.c2
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_backend.c16
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_crtc.c13
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_drv.c7
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_framebuffer.c1
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_frontend.c10
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c24
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_layer.c3
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_lvds.c2
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_rgb.c2
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_tcon.c28
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_tv.c4
-rw-r--r--drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c9
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_csc.c157
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_csc.h6
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c3
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_mixer.c14
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_tcon_top.c6
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_ui_layer.c2
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_vi_layer.c22
-rw-r--r--drivers/gpu/drm/tdfx/tdfx_drv.c11
-rw-r--r--drivers/gpu/drm/tegra/drm.c30
-rw-r--r--drivers/gpu/drm/tegra/gem.c7
-rw-r--r--drivers/gpu/drm/tegra/gem.h3
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_crtc.c46
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_drv.c25
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_drv.h33
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_external.c89
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_external.h1
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_panel.c20
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_plane.c4
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_tfp410.c17
-rw-r--r--drivers/gpu/drm/tiny/Kconfig (renamed from drivers/gpu/drm/tinydrm/Kconfig)64
-rw-r--r--drivers/gpu/drm/tiny/Makefile (renamed from drivers/gpu/drm/tinydrm/Makefile)6
-rw-r--r--drivers/gpu/drm/tiny/gm12u320.c814
-rw-r--r--drivers/gpu/drm/tiny/hx8357d.c (renamed from drivers/gpu/drm/tinydrm/hx8357d.c)64
-rw-r--r--drivers/gpu/drm/tiny/ili9225.c (renamed from drivers/gpu/drm/tinydrm/ili9225.c)185
-rw-r--r--drivers/gpu/drm/tiny/ili9341.c (renamed from drivers/gpu/drm/tinydrm/ili9341.c)86
-rw-r--r--drivers/gpu/drm/tiny/mi0283qt.c (renamed from drivers/gpu/drm/tinydrm/mi0283qt.c)93
-rw-r--r--drivers/gpu/drm/tiny/repaper.c (renamed from drivers/gpu/drm/tinydrm/repaper.c)61
-rw-r--r--drivers/gpu/drm/tiny/st7586.c (renamed from drivers/gpu/drm/tinydrm/st7586.c)134
-rw-r--r--drivers/gpu/drm/tiny/st7735r.c (renamed from drivers/gpu/drm/tinydrm/st7735r.c)81
-rw-r--r--drivers/gpu/drm/tinydrm/core/Makefile4
-rw-r--r--drivers/gpu/drm/tinydrm/core/tinydrm-helpers.c207
-rw-r--r--drivers/gpu/drm/tinydrm/core/tinydrm-pipe.c179
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c141
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_util.c18
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_vm.c15
-rw-r--r--drivers/gpu/drm/ttm/ttm_execbuf_util.c22
-rw-r--r--drivers/gpu/drm/ttm/ttm_tt.c2
-rw-r--r--drivers/gpu/drm/tve200/tve200_display.c8
-rw-r--r--drivers/gpu/drm/tve200/tve200_drm.h15
-rw-r--r--drivers/gpu/drm/tve200/tve200_drv.c8
-rw-r--r--drivers/gpu/drm/udl/udl_connector.c4
-rw-r--r--drivers/gpu/drm/udl/udl_connector.h2
-rw-r--r--drivers/gpu/drm/udl/udl_dmabuf.c11
-rw-r--r--drivers/gpu/drm/udl/udl_drv.c9
-rw-r--r--drivers/gpu/drm/udl/udl_drv.h11
-rw-r--r--drivers/gpu/drm/udl/udl_encoder.c6
-rw-r--r--drivers/gpu/drm/udl/udl_fb.c15
-rw-r--r--drivers/gpu/drm/udl/udl_gem.c9
-rw-r--r--drivers/gpu/drm/udl/udl_main.c6
-rw-r--r--drivers/gpu/drm/udl/udl_modeset.c6
-rw-r--r--drivers/gpu/drm/udl/udl_transfer.c4
-rw-r--r--drivers/gpu/drm/v3d/v3d_debugfs.c3
-rw-r--r--drivers/gpu/drm/v3d/v3d_drv.c6
-rw-r--r--drivers/gpu/drm/v3d/v3d_drv.h13
-rw-r--r--drivers/gpu/drm/v3d/v3d_gem.c12
-rw-r--r--drivers/gpu/drm/v3d/v3d_irq.c2
-rw-r--r--drivers/gpu/drm/vboxvideo/Makefile2
-rw-r--r--drivers/gpu/drm/vboxvideo/vbox_drv.c13
-rw-r--r--drivers/gpu/drm/vboxvideo/vbox_drv.h12
-rw-r--r--drivers/gpu/drm/vboxvideo/vbox_main.c2
-rw-r--r--drivers/gpu/drm/vboxvideo/vbox_prime.c56
-rw-r--r--drivers/gpu/drm/vc4/vc4_bo.c7
-rw-r--r--drivers/gpu/drm/vc4/vc4_crtc.c11
-rw-r--r--drivers/gpu/drm/vc4/vc4_debugfs.c1
-rw-r--r--drivers/gpu/drm/vc4/vc4_drv.c9
-rw-r--r--drivers/gpu/drm/vc4/vc4_drv.h20
-rw-r--r--drivers/gpu/drm/vc4/vc4_dsi.c17
-rw-r--r--drivers/gpu/drm/vc4/vc4_gem.c2
-rw-r--r--drivers/gpu/drm/vc4/vc4_hvs.c5
-rw-r--r--drivers/gpu/drm/vc4/vc4_kms.c4
-rw-r--r--drivers/gpu/drm/vc4/vc4_plane.c9
-rw-r--r--drivers/gpu/drm/vc4/vc4_txp.c14
-rw-r--r--drivers/gpu/drm/vc4/vc4_v3d.c4
-rw-r--r--drivers/gpu/drm/vgem/vgem_drv.c21
-rw-r--r--drivers/gpu/drm/vgem/vgem_drv.h1
-rw-r--r--drivers/gpu/drm/vgem/vgem_fence.c24
-rw-r--r--drivers/gpu/drm/via/via_dma.c43
-rw-r--r--drivers/gpu/drm/via/via_dmablit.c41
-rw-r--r--drivers/gpu/drm/via/via_drv.c7
-rw-r--r--drivers/gpu/drm/via/via_drv.h75
-rw-r--r--drivers/gpu/drm/via/via_irq.c54
-rw-r--r--drivers/gpu/drm/via/via_map.c6
-rw-r--r--drivers/gpu/drm/via/via_mm.c7
-rw-r--r--drivers/gpu/drm/via/via_verifier.c22
-rw-r--r--drivers/gpu/drm/via/via_video.c5
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_debugfs.c4
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_display.c7
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_drv.c9
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_drv.h8
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_fence.c2
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_gem.c4
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_ioctl.c30
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_kms.c4
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_plane.c8
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_prime.c5
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_ttm.c13
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_vq.c7
-rw-r--r--drivers/gpu/drm/vkms/Makefile2
-rw-r--r--drivers/gpu/drm/vkms/vkms_composer.c (renamed from drivers/gpu/drm/vkms/vkms_crc.c)169
-rw-r--r--drivers/gpu/drm/vkms/vkms_crtc.c100
-rw-r--r--drivers/gpu/drm/vkms/vkms_drv.c50
-rw-r--r--drivers/gpu/drm/vkms/vkms_drv.h44
-rw-r--r--drivers/gpu/drm/vkms/vkms_gem.c1
-rw-r--r--drivers/gpu/drm/vkms/vkms_output.c6
-rw-r--r--drivers/gpu/drm/vkms/vkms_plane.c46
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_blit.c4
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_bo.c12
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c4
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.c20
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.h9
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c52
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.c8
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_resource.c6
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_surface.c4
-rw-r--r--drivers/gpu/drm/xen/xen_drm_front.c16
-rw-r--r--drivers/gpu/drm/xen/xen_drm_front.h11
-rw-r--r--drivers/gpu/drm/xen/xen_drm_front_cfg.c4
-rw-r--r--drivers/gpu/drm/xen/xen_drm_front_conn.c1
-rw-r--r--drivers/gpu/drm/xen/xen_drm_front_conn.h7
-rw-r--r--drivers/gpu/drm/xen/xen_drm_front_evtchnl.c4
-rw-r--r--drivers/gpu/drm/xen/xen_drm_front_gem.c11
-rw-r--r--drivers/gpu/drm/xen/xen_drm_front_gem.h7
-rw-r--r--drivers/gpu/drm/xen/xen_drm_front_kms.c7
-rw-r--r--drivers/gpu/drm/zte/zx_drm_drv.c8
-rw-r--r--drivers/gpu/drm/zte/zx_hdmi.c2
-rw-r--r--drivers/gpu/drm/zte/zx_plane.c2
-rw-r--r--drivers/gpu/drm/zte/zx_tvenc.c4
-rw-r--r--drivers/gpu/drm/zte/zx_vga.c4
-rw-r--r--drivers/gpu/drm/zte/zx_vou.c5
693 files changed, 20497 insertions, 16006 deletions
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index 3c88420e3497..e67c194c2aca 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -24,6 +24,10 @@ menuconfig DRM
details. You should also select and configure AGP
(/dev/agpgart) support if it is available for your platform.
+config DRM_MIPI_DBI
+ tristate
+ depends on DRM
+
config DRM_MIPI_DSI
bool
depends on DRM
@@ -336,7 +340,7 @@ source "drivers/gpu/drm/mxsfb/Kconfig"
source "drivers/gpu/drm/meson/Kconfig"
-source "drivers/gpu/drm/tinydrm/Kconfig"
+source "drivers/gpu/drm/tiny/Kconfig"
source "drivers/gpu/drm/pl111/Kconfig"
diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile
index 9f0d2ee35794..10f8329a8b71 100644
--- a/drivers/gpu/drm/Makefile
+++ b/drivers/gpu/drm/Makefile
@@ -55,6 +55,7 @@ obj-$(CONFIG_DRM_KMS_HELPER) += drm_kms_helper.o
obj-$(CONFIG_DRM_DEBUG_SELFTEST) += selftests/
obj-$(CONFIG_DRM) += drm.o
+obj-$(CONFIG_DRM_MIPI_DBI) += drm_mipi_dbi.o
obj-$(CONFIG_DRM_MIPI_DSI) += drm_mipi_dsi.o
obj-$(CONFIG_DRM_PANEL_ORIENTATION_QUIRKS) += drm_panel_orientation_quirks.o
obj-y += arm/
@@ -111,7 +112,7 @@ obj-$(CONFIG_DRM_ARCPGU)+= arc/
obj-y += hisilicon/
obj-$(CONFIG_DRM_ZTE) += zte/
obj-$(CONFIG_DRM_MXSFB) += mxsfb/
-obj-$(CONFIG_DRM_TINYDRM) += tinydrm/
+obj-y += tiny/
obj-$(CONFIG_DRM_PL111) += pl111/
obj-$(CONFIG_DRM_TVE200) += tve200/
obj-$(CONFIG_DRM_XEN) += xen/
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
index 14d9c250b3d3..e0c47ae52fc1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
@@ -218,7 +218,7 @@ void amdgpu_amdkfd_unreserve_memory_limit(struct amdgpu_bo *bo)
static int amdgpu_amdkfd_remove_eviction_fence(struct amdgpu_bo *bo,
struct amdgpu_amdkfd_fence *ef)
{
- struct reservation_object *resv = bo->tbo.resv;
+ struct reservation_object *resv = bo->tbo.base.resv;
struct reservation_object_list *old, *new;
unsigned int i, j, k;
@@ -812,7 +812,7 @@ static int process_sync_pds_resv(struct amdkfd_process_info *process_info,
struct amdgpu_bo *pd = peer_vm->root.base.bo;
ret = amdgpu_sync_resv(NULL,
- sync, pd->tbo.resv,
+ sync, pd->tbo.base.resv,
AMDGPU_FENCE_OWNER_KFD, false);
if (ret)
return ret;
@@ -887,7 +887,7 @@ static int init_kfd_vm(struct amdgpu_vm *vm, void **process_info,
AMDGPU_FENCE_OWNER_KFD, false);
if (ret)
goto wait_pd_fail;
- ret = reservation_object_reserve_shared(vm->root.base.bo->tbo.resv, 1);
+ ret = reservation_object_reserve_shared(vm->root.base.bo->tbo.base.resv, 1);
if (ret)
goto reserve_shared_fail;
amdgpu_bo_fence(vm->root.base.bo,
@@ -2133,7 +2133,7 @@ int amdgpu_amdkfd_add_gws_to_process(void *info, void *gws, struct kgd_mem **mem
* Add process eviction fence to bo so they can
* evict each other.
*/
- ret = reservation_object_reserve_shared(gws_bo->tbo.resv, 1);
+ ret = reservation_object_reserve_shared(gws_bo->tbo.base.resv, 1);
if (ret)
goto reserve_shared_fail;
amdgpu_bo_fence(gws_bo, &process_info->eviction_fence->base, true);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
index 73b2ede773d3..ece55c8fa673 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
@@ -1505,6 +1505,7 @@ amdgpu_connector_add(struct amdgpu_device *adev,
struct amdgpu_connector_atom_dig *amdgpu_dig_connector;
struct drm_encoder *encoder;
struct amdgpu_encoder *amdgpu_encoder;
+ struct i2c_adapter *ddc = NULL;
uint32_t subpixel_order = SubPixelNone;
bool shared_ddc = false;
bool is_dp_bridge = false;
@@ -1574,17 +1575,21 @@ amdgpu_connector_add(struct amdgpu_device *adev,
amdgpu_connector->con_priv = amdgpu_dig_connector;
if (i2c_bus->valid) {
amdgpu_connector->ddc_bus = amdgpu_i2c_lookup(adev, i2c_bus);
- if (amdgpu_connector->ddc_bus)
+ if (amdgpu_connector->ddc_bus) {
has_aux = true;
- else
+ ddc = &amdgpu_connector->ddc_bus->adapter;
+ } else {
DRM_ERROR("DP: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
+ }
}
switch (connector_type) {
case DRM_MODE_CONNECTOR_VGA:
case DRM_MODE_CONNECTOR_DVIA:
default:
- drm_connector_init(dev, &amdgpu_connector->base,
- &amdgpu_connector_dp_funcs, connector_type);
+ drm_connector_init_with_ddc(dev, &amdgpu_connector->base,
+ &amdgpu_connector_dp_funcs,
+ connector_type,
+ ddc);
drm_connector_helper_add(&amdgpu_connector->base,
&amdgpu_connector_dp_helper_funcs);
connector->interlace_allowed = true;
@@ -1602,8 +1607,10 @@ amdgpu_connector_add(struct amdgpu_device *adev,
case DRM_MODE_CONNECTOR_HDMIA:
case DRM_MODE_CONNECTOR_HDMIB:
case DRM_MODE_CONNECTOR_DisplayPort:
- drm_connector_init(dev, &amdgpu_connector->base,
- &amdgpu_connector_dp_funcs, connector_type);
+ drm_connector_init_with_ddc(dev, &amdgpu_connector->base,
+ &amdgpu_connector_dp_funcs,
+ connector_type,
+ ddc);
drm_connector_helper_add(&amdgpu_connector->base,
&amdgpu_connector_dp_helper_funcs);
drm_object_attach_property(&amdgpu_connector->base.base,
@@ -1644,8 +1651,10 @@ amdgpu_connector_add(struct amdgpu_device *adev,
break;
case DRM_MODE_CONNECTOR_LVDS:
case DRM_MODE_CONNECTOR_eDP:
- drm_connector_init(dev, &amdgpu_connector->base,
- &amdgpu_connector_edp_funcs, connector_type);
+ drm_connector_init_with_ddc(dev, &amdgpu_connector->base,
+ &amdgpu_connector_edp_funcs,
+ connector_type,
+ ddc);
drm_connector_helper_add(&amdgpu_connector->base,
&amdgpu_connector_dp_helper_funcs);
drm_object_attach_property(&amdgpu_connector->base.base,
@@ -1659,13 +1668,18 @@ amdgpu_connector_add(struct amdgpu_device *adev,
} else {
switch (connector_type) {
case DRM_MODE_CONNECTOR_VGA:
- drm_connector_init(dev, &amdgpu_connector->base, &amdgpu_connector_vga_funcs, connector_type);
- drm_connector_helper_add(&amdgpu_connector->base, &amdgpu_connector_vga_helper_funcs);
if (i2c_bus->valid) {
amdgpu_connector->ddc_bus = amdgpu_i2c_lookup(adev, i2c_bus);
if (!amdgpu_connector->ddc_bus)
DRM_ERROR("VGA: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
+ else
+ ddc = &amdgpu_connector->ddc_bus->adapter;
}
+ drm_connector_init_with_ddc(dev, &amdgpu_connector->base,
+ &amdgpu_connector_vga_funcs,
+ connector_type,
+ ddc);
+ drm_connector_helper_add(&amdgpu_connector->base, &amdgpu_connector_vga_helper_funcs);
amdgpu_connector->dac_load_detect = true;
drm_object_attach_property(&amdgpu_connector->base.base,
adev->mode_info.load_detect_property,
@@ -1679,13 +1693,18 @@ amdgpu_connector_add(struct amdgpu_device *adev,
connector->doublescan_allowed = true;
break;
case DRM_MODE_CONNECTOR_DVIA:
- drm_connector_init(dev, &amdgpu_connector->base, &amdgpu_connector_vga_funcs, connector_type);
- drm_connector_helper_add(&amdgpu_connector->base, &amdgpu_connector_vga_helper_funcs);
if (i2c_bus->valid) {
amdgpu_connector->ddc_bus = amdgpu_i2c_lookup(adev, i2c_bus);
if (!amdgpu_connector->ddc_bus)
DRM_ERROR("DVIA: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
+ else
+ ddc = &amdgpu_connector->ddc_bus->adapter;
}
+ drm_connector_init_with_ddc(dev, &amdgpu_connector->base,
+ &amdgpu_connector_vga_funcs,
+ connector_type,
+ ddc);
+ drm_connector_helper_add(&amdgpu_connector->base, &amdgpu_connector_vga_helper_funcs);
amdgpu_connector->dac_load_detect = true;
drm_object_attach_property(&amdgpu_connector->base.base,
adev->mode_info.load_detect_property,
@@ -1704,13 +1723,18 @@ amdgpu_connector_add(struct amdgpu_device *adev,
if (!amdgpu_dig_connector)
goto failed;
amdgpu_connector->con_priv = amdgpu_dig_connector;
- drm_connector_init(dev, &amdgpu_connector->base, &amdgpu_connector_dvi_funcs, connector_type);
- drm_connector_helper_add(&amdgpu_connector->base, &amdgpu_connector_dvi_helper_funcs);
if (i2c_bus->valid) {
amdgpu_connector->ddc_bus = amdgpu_i2c_lookup(adev, i2c_bus);
if (!amdgpu_connector->ddc_bus)
DRM_ERROR("DVI: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
+ else
+ ddc = &amdgpu_connector->ddc_bus->adapter;
}
+ drm_connector_init_with_ddc(dev, &amdgpu_connector->base,
+ &amdgpu_connector_dvi_funcs,
+ connector_type,
+ ddc);
+ drm_connector_helper_add(&amdgpu_connector->base, &amdgpu_connector_dvi_helper_funcs);
subpixel_order = SubPixelHorizontalRGB;
drm_object_attach_property(&amdgpu_connector->base.base,
adev->mode_info.coherent_mode_property,
@@ -1754,13 +1778,18 @@ amdgpu_connector_add(struct amdgpu_device *adev,
if (!amdgpu_dig_connector)
goto failed;
amdgpu_connector->con_priv = amdgpu_dig_connector;
- drm_connector_init(dev, &amdgpu_connector->base, &amdgpu_connector_dvi_funcs, connector_type);
- drm_connector_helper_add(&amdgpu_connector->base, &amdgpu_connector_dvi_helper_funcs);
if (i2c_bus->valid) {
amdgpu_connector->ddc_bus = amdgpu_i2c_lookup(adev, i2c_bus);
if (!amdgpu_connector->ddc_bus)
DRM_ERROR("HDMI: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
+ else
+ ddc = &amdgpu_connector->ddc_bus->adapter;
}
+ drm_connector_init_with_ddc(dev, &amdgpu_connector->base,
+ &amdgpu_connector_dvi_funcs,
+ connector_type,
+ ddc);
+ drm_connector_helper_add(&amdgpu_connector->base, &amdgpu_connector_dvi_helper_funcs);
drm_object_attach_property(&amdgpu_connector->base.base,
adev->mode_info.coherent_mode_property,
1);
@@ -1796,15 +1825,20 @@ amdgpu_connector_add(struct amdgpu_device *adev,
if (!amdgpu_dig_connector)
goto failed;
amdgpu_connector->con_priv = amdgpu_dig_connector;
- drm_connector_init(dev, &amdgpu_connector->base, &amdgpu_connector_dp_funcs, connector_type);
- drm_connector_helper_add(&amdgpu_connector->base, &amdgpu_connector_dp_helper_funcs);
if (i2c_bus->valid) {
amdgpu_connector->ddc_bus = amdgpu_i2c_lookup(adev, i2c_bus);
- if (amdgpu_connector->ddc_bus)
+ if (amdgpu_connector->ddc_bus) {
has_aux = true;
- else
+ ddc = &amdgpu_connector->ddc_bus->adapter;
+ } else {
DRM_ERROR("DP: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
+ }
}
+ drm_connector_init_with_ddc(dev, &amdgpu_connector->base,
+ &amdgpu_connector_dp_funcs,
+ connector_type,
+ ddc);
+ drm_connector_helper_add(&amdgpu_connector->base, &amdgpu_connector_dp_helper_funcs);
subpixel_order = SubPixelHorizontalRGB;
drm_object_attach_property(&amdgpu_connector->base.base,
adev->mode_info.coherent_mode_property,
@@ -1838,15 +1872,20 @@ amdgpu_connector_add(struct amdgpu_device *adev,
if (!amdgpu_dig_connector)
goto failed;
amdgpu_connector->con_priv = amdgpu_dig_connector;
- drm_connector_init(dev, &amdgpu_connector->base, &amdgpu_connector_edp_funcs, connector_type);
- drm_connector_helper_add(&amdgpu_connector->base, &amdgpu_connector_dp_helper_funcs);
if (i2c_bus->valid) {
amdgpu_connector->ddc_bus = amdgpu_i2c_lookup(adev, i2c_bus);
- if (amdgpu_connector->ddc_bus)
+ if (amdgpu_connector->ddc_bus) {
has_aux = true;
- else
+ ddc = &amdgpu_connector->ddc_bus->adapter;
+ } else {
DRM_ERROR("DP: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
+ }
}
+ drm_connector_init_with_ddc(dev, &amdgpu_connector->base,
+ &amdgpu_connector_edp_funcs,
+ connector_type,
+ ddc);
+ drm_connector_helper_add(&amdgpu_connector->base, &amdgpu_connector_dp_helper_funcs);
drm_object_attach_property(&amdgpu_connector->base.base,
dev->mode_config.scaling_mode_property,
DRM_MODE_SCALE_FULLSCREEN);
@@ -1859,13 +1898,18 @@ amdgpu_connector_add(struct amdgpu_device *adev,
if (!amdgpu_dig_connector)
goto failed;
amdgpu_connector->con_priv = amdgpu_dig_connector;
- drm_connector_init(dev, &amdgpu_connector->base, &amdgpu_connector_lvds_funcs, connector_type);
- drm_connector_helper_add(&amdgpu_connector->base, &amdgpu_connector_lvds_helper_funcs);
if (i2c_bus->valid) {
amdgpu_connector->ddc_bus = amdgpu_i2c_lookup(adev, i2c_bus);
if (!amdgpu_connector->ddc_bus)
DRM_ERROR("LVDS: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
+ else
+ ddc = &amdgpu_connector->ddc_bus->adapter;
}
+ drm_connector_init_with_ddc(dev, &amdgpu_connector->base,
+ &amdgpu_connector_lvds_funcs,
+ connector_type,
+ ddc);
+ drm_connector_helper_add(&amdgpu_connector->base, &amdgpu_connector_lvds_helper_funcs);
drm_object_attach_property(&amdgpu_connector->base.base,
dev->mode_config.scaling_mode_property,
DRM_MODE_SCALE_FULLSCREEN);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index 4e4094f842e7..9ccf32c5456a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -402,7 +402,7 @@ static int amdgpu_cs_bo_validate(struct amdgpu_cs_parser *p,
struct ttm_operation_ctx ctx = {
.interruptible = true,
.no_wait_gpu = false,
- .resv = bo->tbo.resv,
+ .resv = bo->tbo.base.resv,
.flags = 0
};
uint32_t domain;
@@ -730,7 +730,7 @@ static int amdgpu_cs_sync_rings(struct amdgpu_cs_parser *p)
list_for_each_entry(e, &p->validated, tv.head) {
struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo);
- struct reservation_object *resv = bo->tbo.resv;
+ struct reservation_object *resv = bo->tbo.base.resv;
r = amdgpu_sync_resv(p->adev, &p->job->sync, resv, p->filp,
amdgpu_bo_explicit_sync(bo));
@@ -1727,7 +1727,7 @@ int amdgpu_cs_find_mapping(struct amdgpu_cs_parser *parser,
*map = mapping;
/* Double check that the BO is reserved by this CS */
- if (READ_ONCE((*bo)->tbo.resv->lock.ctx) != &parser->ticket)
+ if (reservation_object_locking_ctx((*bo)->tbo.base.resv) != &parser->ticket)
return -EINVAL;
if (!((*bo)->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS)) {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
index dc65592f41b4..f453e277ed24 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
@@ -205,7 +205,7 @@ int amdgpu_display_crtc_page_flip_target(struct drm_crtc *crtc,
goto unpin;
}
- r = reservation_object_get_fences_rcu(new_abo->tbo.resv, &work->excl,
+ r = reservation_object_get_fences_rcu(new_abo->tbo.base.resv, &work->excl,
&work->shared_count,
&work->shared);
if (unlikely(r != 0)) {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
index 974472fdfc6d..f0db7ddcb61b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
@@ -216,7 +216,7 @@ static int amdgpu_dma_buf_map_attach(struct dma_buf *dma_buf,
* fences on the reservation object into a single exclusive
* fence.
*/
- r = __reservation_object_make_exclusive(bo->tbo.resv);
+ r = __reservation_object_make_exclusive(bo->tbo.base.resv);
if (r)
goto error_unreserve;
}
@@ -268,20 +268,6 @@ error:
}
/**
- * amdgpu_gem_prime_res_obj - &drm_driver.gem_prime_res_obj implementation
- * @obj: GEM BO
- *
- * Returns:
- * The BO's reservation object.
- */
-struct reservation_object *amdgpu_gem_prime_res_obj(struct drm_gem_object *obj)
-{
- struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
-
- return bo->tbo.resv;
-}
-
-/**
* amdgpu_dma_buf_begin_cpu_access - &dma_buf_ops.begin_cpu_access implementation
* @dma_buf: Shared DMA buffer
* @direction: Direction of DMA transfer
@@ -339,14 +325,12 @@ const struct dma_buf_ops amdgpu_dmabuf_ops = {
* @gobj: GEM BO
* @flags: Flags such as DRM_CLOEXEC and DRM_RDWR.
*
- * The main work is done by the &drm_gem_prime_export helper, which in turn
- * uses &amdgpu_gem_prime_res_obj.
+ * The main work is done by the &drm_gem_prime_export helper.
*
* Returns:
* Shared DMA buffer representing the GEM BO from the given device.
*/
-struct dma_buf *amdgpu_gem_prime_export(struct drm_device *dev,
- struct drm_gem_object *gobj,
+struct dma_buf *amdgpu_gem_prime_export(struct drm_gem_object *gobj,
int flags)
{
struct amdgpu_bo *bo = gem_to_amdgpu_bo(gobj);
@@ -356,9 +340,9 @@ struct dma_buf *amdgpu_gem_prime_export(struct drm_device *dev,
bo->flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID)
return ERR_PTR(-EPERM);
- buf = drm_gem_prime_export(dev, gobj, flags);
+ buf = drm_gem_prime_export(gobj, flags);
if (!IS_ERR(buf)) {
- buf->file->f_mapping = dev->anon_inode->i_mapping;
+ buf->file->f_mapping = gobj->dev->anon_inode->i_mapping;
buf->ops = &amdgpu_dmabuf_ops;
}
@@ -396,7 +380,7 @@ amdgpu_gem_prime_import_sg_table(struct drm_device *dev,
bp.flags = 0;
bp.type = ttm_bo_type_sg;
bp.resv = resv;
- ww_mutex_lock(&resv->lock, NULL);
+ reservation_object_lock(resv, NULL);
ret = amdgpu_bo_create(adev, &bp, &bo);
if (ret)
goto error;
@@ -408,11 +392,11 @@ amdgpu_gem_prime_import_sg_table(struct drm_device *dev,
if (attach->dmabuf->ops != &amdgpu_dmabuf_ops)
bo->prime_shared_count = 1;
- ww_mutex_unlock(&resv->lock);
- return &bo->gem_base;
+ reservation_object_unlock(resv);
+ return &bo->tbo.base;
error:
- ww_mutex_unlock(&resv->lock);
+ reservation_object_unlock(resv);
return ERR_PTR(ret);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.h
index c7056cbe8685..5012e6ab58f1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.h
@@ -30,12 +30,10 @@ struct drm_gem_object *
amdgpu_gem_prime_import_sg_table(struct drm_device *dev,
struct dma_buf_attachment *attach,
struct sg_table *sg);
-struct dma_buf *amdgpu_gem_prime_export(struct drm_device *dev,
- struct drm_gem_object *gobj,
+struct dma_buf *amdgpu_gem_prime_export(struct drm_gem_object *gobj,
int flags);
struct drm_gem_object *amdgpu_gem_prime_import(struct drm_device *dev,
struct dma_buf *dma_buf);
-struct reservation_object *amdgpu_gem_prime_res_obj(struct drm_gem_object *);
void *amdgpu_gem_prime_vmap(struct drm_gem_object *obj);
void amdgpu_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr);
int amdgpu_gem_prime_mmap(struct drm_gem_object *obj,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index e3e09e6d7f42..e9046922fe94 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -1372,7 +1372,7 @@ static struct drm_driver kms_driver = {
.driver_features =
DRIVER_USE_AGP | DRIVER_ATOMIC |
DRIVER_GEM |
- DRIVER_PRIME | DRIVER_RENDER | DRIVER_MODESET | DRIVER_SYNCOBJ,
+ DRIVER_RENDER | DRIVER_MODESET | DRIVER_SYNCOBJ,
.load = amdgpu_driver_load_kms,
.open = amdgpu_driver_open_kms,
.postclose = amdgpu_driver_postclose_kms,
@@ -1396,7 +1396,6 @@ static struct drm_driver kms_driver = {
.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
.gem_prime_export = amdgpu_gem_prime_export,
.gem_prime_import = amdgpu_gem_prime_import,
- .gem_prime_res_obj = amdgpu_gem_prime_res_obj,
.gem_prime_get_sg_table = amdgpu_gem_prime_get_sg_table,
.gem_prime_import_sg_table = amdgpu_gem_prime_import_sg_table,
.gem_prime_vmap = amdgpu_gem_prime_vmap,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
index ddb07e9a71ca..d532e3d647ca 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
@@ -85,7 +85,7 @@ retry:
}
return r;
}
- *obj = &bo->gem_base;
+ *obj = &bo->tbo.base;
return 0;
}
@@ -134,7 +134,7 @@ int amdgpu_gem_object_open(struct drm_gem_object *obj,
return -EPERM;
if (abo->flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID &&
- abo->tbo.resv != vm->root.base.bo->tbo.resv)
+ abo->tbo.base.resv != vm->root.base.bo->tbo.base.resv)
return -EPERM;
r = amdgpu_bo_reserve(abo, false);
@@ -252,7 +252,7 @@ int amdgpu_gem_create_ioctl(struct drm_device *dev, void *data,
if (r)
return r;
- resv = vm->root.base.bo->tbo.resv;
+ resv = vm->root.base.bo->tbo.base.resv;
}
r = amdgpu_gem_object_create(adev, size, args->in.alignment,
@@ -433,7 +433,7 @@ int amdgpu_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
return -ENOENT;
}
robj = gem_to_amdgpu_bo(gobj);
- ret = reservation_object_wait_timeout_rcu(robj->tbo.resv, true, true,
+ ret = reservation_object_wait_timeout_rcu(robj->tbo.base.resv, true, true,
timeout);
/* ret == 0 means not signaled,
@@ -689,7 +689,7 @@ int amdgpu_gem_op_ioctl(struct drm_device *dev, void *data,
struct drm_amdgpu_gem_create_in info;
void __user *out = u64_to_user_ptr(args->value);
- info.bo_size = robj->gem_base.size;
+ info.bo_size = robj->tbo.base.size;
info.alignment = robj->tbo.mem.page_alignment << PAGE_SHIFT;
info.domains = robj->preferred_domains;
info.domain_flags = robj->flags;
@@ -820,8 +820,8 @@ static int amdgpu_debugfs_gem_bo_info(int id, void *ptr, void *data)
if (pin_count)
seq_printf(m, " pin count %d", pin_count);
- dma_buf = READ_ONCE(bo->gem_base.dma_buf);
- attachment = READ_ONCE(bo->gem_base.import_attach);
+ dma_buf = READ_ONCE(bo->tbo.base.dma_buf);
+ attachment = READ_ONCE(bo->tbo.base.import_attach);
if (attachment)
seq_printf(m, " imported from %p", dma_buf);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.h
index b8ba6e27c61f..2f17150e26e1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.h
@@ -31,7 +31,7 @@
*/
#define AMDGPU_GEM_DOMAIN_MAX 0x3
-#define gem_to_amdgpu_bo(gobj) container_of((gobj), struct amdgpu_bo, gem_base)
+#define gem_to_amdgpu_bo(gobj) container_of((gobj), struct amdgpu_bo, tbo.base)
void amdgpu_gem_object_free(struct drm_gem_object *obj);
int amdgpu_gem_object_open(struct drm_gem_object *obj,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
index 60ad1a9d56bb..0e2ec608530b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
@@ -1103,7 +1103,7 @@ void amdgpu_driver_postclose_kms(struct drm_device *dev,
amdgpu_vm_fini(adev, &fpriv->vm);
if (pasid)
- amdgpu_pasid_free_delayed(pd->tbo.resv, pasid);
+ amdgpu_pasid_free_delayed(pd->tbo.base.resv, pasid);
amdgpu_bo_unref(&pd);
idr_for_each_entry(&fpriv->bo_list_handles, list, handle)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
index 3971c201f320..50022acc8a81 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
@@ -179,7 +179,7 @@ static void amdgpu_mn_invalidate_node(struct amdgpu_mn_node *node,
if (!amdgpu_ttm_tt_affect_userptr(bo->tbo.ttm, start, end))
continue;
- r = reservation_object_wait_timeout_rcu(bo->tbo.resv,
+ r = reservation_object_wait_timeout_rcu(bo->tbo.base.resv,
true, false, MAX_SCHEDULE_TIMEOUT);
if (r <= 0)
DRM_ERROR("(%ld) failed to wait for user bo\n", r);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
index 0c0a8e83ab83..2d07f16f1789 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
@@ -82,9 +82,9 @@ static void amdgpu_bo_destroy(struct ttm_buffer_object *tbo)
amdgpu_bo_kunmap(bo);
- if (bo->gem_base.import_attach)
- drm_prime_gem_destroy(&bo->gem_base, bo->tbo.sg);
- drm_gem_object_release(&bo->gem_base);
+ if (bo->tbo.base.import_attach)
+ drm_prime_gem_destroy(&bo->tbo.base, bo->tbo.sg);
+ drm_gem_object_release(&bo->tbo.base);
/* in case amdgpu_device_recover_vram got NULL of bo->parent */
if (!list_empty(&bo->shadow_list)) {
mutex_lock(&adev->shadow_list_lock);
@@ -485,7 +485,7 @@ static int amdgpu_bo_do_create(struct amdgpu_device *adev,
bo = kzalloc(sizeof(struct amdgpu_bo), GFP_KERNEL);
if (bo == NULL)
return -ENOMEM;
- drm_gem_private_object_init(adev->ddev, &bo->gem_base, size);
+ drm_gem_private_object_init(adev->ddev, &bo->tbo.base, size);
INIT_LIST_HEAD(&bo->shadow_list);
bo->vm_bo = NULL;
bo->preferred_domains = bp->preferred_domain ? bp->preferred_domain :
@@ -527,7 +527,7 @@ static int amdgpu_bo_do_create(struct amdgpu_device *adev,
bo->tbo.mem.placement & TTM_PL_FLAG_VRAM) {
struct dma_fence *fence;
- r = amdgpu_fill_buffer(bo, 0, bo->tbo.resv, &fence);
+ r = amdgpu_fill_buffer(bo, 0, bo->tbo.base.resv, &fence);
if (unlikely(r))
goto fail_unreserve;
@@ -550,7 +550,7 @@ static int amdgpu_bo_do_create(struct amdgpu_device *adev,
fail_unreserve:
if (!bp->resv)
- ww_mutex_unlock(&bo->tbo.resv->lock);
+ reservation_object_unlock(bo->tbo.base.resv);
amdgpu_bo_unref(&bo);
return r;
}
@@ -571,7 +571,7 @@ static int amdgpu_bo_create_shadow(struct amdgpu_device *adev,
bp.flags = AMDGPU_GEM_CREATE_CPU_GTT_USWC |
AMDGPU_GEM_CREATE_SHADOW;
bp.type = ttm_bo_type_kernel;
- bp.resv = bo->tbo.resv;
+ bp.resv = bo->tbo.base.resv;
r = amdgpu_bo_do_create(adev, &bp, &bo->shadow);
if (!r) {
@@ -612,13 +612,13 @@ int amdgpu_bo_create(struct amdgpu_device *adev,
if ((flags & AMDGPU_GEM_CREATE_SHADOW) && !(adev->flags & AMD_IS_APU)) {
if (!bp->resv)
- WARN_ON(reservation_object_lock((*bo_ptr)->tbo.resv,
+ WARN_ON(reservation_object_lock((*bo_ptr)->tbo.base.resv,
NULL));
r = amdgpu_bo_create_shadow(adev, bp->size, *bo_ptr);
if (!bp->resv)
- reservation_object_unlock((*bo_ptr)->tbo.resv);
+ reservation_object_unlock((*bo_ptr)->tbo.base.resv);
if (r)
amdgpu_bo_unref(bo_ptr);
@@ -715,7 +715,7 @@ int amdgpu_bo_kmap(struct amdgpu_bo *bo, void **ptr)
return 0;
}
- r = reservation_object_wait_timeout_rcu(bo->tbo.resv, false, false,
+ r = reservation_object_wait_timeout_rcu(bo->tbo.base.resv, false, false,
MAX_SCHEDULE_TIMEOUT);
if (r < 0)
return r;
@@ -1093,7 +1093,7 @@ int amdgpu_bo_set_tiling_flags(struct amdgpu_bo *bo, u64 tiling_flags)
*/
void amdgpu_bo_get_tiling_flags(struct amdgpu_bo *bo, u64 *tiling_flags)
{
- lockdep_assert_held(&bo->tbo.resv->lock.base);
+ reservation_object_assert_held(bo->tbo.base.resv);
if (tiling_flags)
*tiling_flags = bo->tiling_flags;
@@ -1242,15 +1242,15 @@ void amdgpu_bo_release_notify(struct ttm_buffer_object *bo)
!(abo->flags & AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE))
return;
- reservation_object_lock(bo->resv, NULL);
+ reservation_object_lock(bo->base.resv, NULL);
- r = amdgpu_fill_buffer(abo, AMDGPU_POISON, bo->resv, &fence);
+ r = amdgpu_fill_buffer(abo, AMDGPU_POISON, bo->base.resv, &fence);
if (!WARN_ON(r)) {
amdgpu_bo_fence(abo, fence, false);
dma_fence_put(fence);
}
- reservation_object_unlock(bo->resv);
+ reservation_object_unlock(bo->base.resv);
}
/**
@@ -1325,7 +1325,7 @@ int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
void amdgpu_bo_fence(struct amdgpu_bo *bo, struct dma_fence *fence,
bool shared)
{
- struct reservation_object *resv = bo->tbo.resv;
+ struct reservation_object *resv = bo->tbo.base.resv;
if (shared)
reservation_object_add_shared_fence(resv, fence);
@@ -1350,7 +1350,7 @@ int amdgpu_bo_sync_wait(struct amdgpu_bo *bo, void *owner, bool intr)
int r;
amdgpu_sync_create(&sync);
- amdgpu_sync_resv(adev, &sync, bo->tbo.resv, owner, false);
+ amdgpu_sync_resv(adev, &sync, bo->tbo.base.resv, owner, false);
r = amdgpu_sync_wait(&sync, intr);
amdgpu_sync_free(&sync);
@@ -1370,7 +1370,7 @@ int amdgpu_bo_sync_wait(struct amdgpu_bo *bo, void *owner, bool intr)
u64 amdgpu_bo_gpu_offset(struct amdgpu_bo *bo)
{
WARN_ON_ONCE(bo->tbo.mem.mem_type == TTM_PL_SYSTEM);
- WARN_ON_ONCE(!ww_mutex_is_locked(&bo->tbo.resv->lock) &&
+ WARN_ON_ONCE(!reservation_object_is_locked(bo->tbo.base.resv) &&
!bo->pin_count && bo->tbo.type != ttm_bo_type_kernel);
WARN_ON_ONCE(bo->tbo.mem.start == AMDGPU_BO_INVALID_OFFSET);
WARN_ON_ONCE(bo->tbo.mem.mem_type == TTM_PL_VRAM &&
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
index 5a3c1779e200..05dde0dd04ff 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
@@ -94,7 +94,6 @@ struct amdgpu_bo {
/* per VM structure for page tables and with virtual addresses */
struct amdgpu_vm_bo_base *vm_bo;
/* Constant after initialization */
- struct drm_gem_object gem_base;
struct amdgpu_bo *parent;
struct amdgpu_bo *shadow;
@@ -192,7 +191,7 @@ static inline unsigned amdgpu_bo_gpu_page_alignment(struct amdgpu_bo *bo)
*/
static inline u64 amdgpu_bo_mmap_offset(struct amdgpu_bo *bo)
{
- return drm_vma_node_offset_addr(&bo->tbo.vma_node);
+ return drm_vma_node_offset_addr(&bo->tbo.base.vma_node);
}
/**
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index ab92b24ac4ff..8f8b7a350b8b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -227,7 +227,7 @@ static int amdgpu_verify_access(struct ttm_buffer_object *bo, struct file *filp)
if (amdgpu_ttm_tt_get_usermm(bo->ttm))
return -EPERM;
- return drm_vma_node_verify_access(&abo->gem_base.vma_node,
+ return drm_vma_node_verify_access(&abo->tbo.base.vma_node,
filp->private_data);
}
@@ -440,7 +440,7 @@ static int amdgpu_move_blit(struct ttm_buffer_object *bo,
r = amdgpu_ttm_copy_mem_to_mem(adev, &src, &dst,
new_mem->num_pages << PAGE_SHIFT,
- bo->resv, &fence);
+ bo->base.resv, &fence);
if (r)
goto error;
@@ -1494,18 +1494,18 @@ static bool amdgpu_ttm_bo_eviction_valuable(struct ttm_buffer_object *bo,
* cleanly handle page faults.
*/
if (bo->type == ttm_bo_type_kernel &&
- !reservation_object_test_signaled_rcu(bo->resv, true))
+ !reservation_object_test_signaled_rcu(bo->base.resv, true))
return false;
/* If bo is a KFD BO, check if the bo belongs to the current process.
* If true, then return false as any KFD process needs all its BOs to
* be resident to run successfully
*/
- flist = reservation_object_get_list(bo->resv);
+ flist = reservation_object_get_list(bo->base.resv);
if (flist) {
for (i = 0; i < flist->shared_count; ++i) {
f = rcu_dereference_protected(flist->shared[i],
- reservation_object_held(bo->resv));
+ reservation_object_held(bo->base.resv));
if (amdkfd_fence_check_mm(f, current->mm))
return false;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
index 5b2fea3b4a2c..f858607b17a5 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
@@ -1073,7 +1073,7 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo,
ib->length_dw = 16;
if (direct) {
- r = reservation_object_wait_timeout_rcu(bo->tbo.resv,
+ r = reservation_object_wait_timeout_rcu(bo->tbo.base.resv,
true, false,
msecs_to_jiffies(10));
if (r == 0)
@@ -1085,7 +1085,7 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo,
if (r)
goto err_free;
} else {
- r = amdgpu_sync_resv(adev, &job->sync, bo->tbo.resv,
+ r = amdgpu_sync_resv(adev, &job->sync, bo->tbo.base.resv,
AMDGPU_FENCE_OWNER_UNDEFINED, false);
if (r)
goto err_free;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index 9ab31ea3cc60..c8244ce184e8 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -302,7 +302,7 @@ static void amdgpu_vm_bo_base_init(struct amdgpu_vm_bo_base *base,
base->next = bo->vm_bo;
bo->vm_bo = base;
- if (bo->tbo.resv != vm->root.base.bo->tbo.resv)
+ if (bo->tbo.base.resv != vm->root.base.bo->tbo.base.resv)
return;
vm->bulk_moveable = false;
@@ -583,7 +583,7 @@ void amdgpu_vm_del_from_lru_notify(struct ttm_buffer_object *bo)
for (bo_base = abo->vm_bo; bo_base; bo_base = bo_base->next) {
struct amdgpu_vm *vm = bo_base->vm;
- if (abo->tbo.resv == vm->root.base.bo->tbo.resv)
+ if (abo->tbo.base.resv == vm->root.base.bo->tbo.base.resv)
vm->bulk_moveable = false;
}
@@ -834,7 +834,7 @@ static void amdgpu_vm_bo_param(struct amdgpu_device *adev, struct amdgpu_vm *vm,
bp->flags |= AMDGPU_GEM_CREATE_SHADOW;
bp->type = ttm_bo_type_kernel;
if (vm->root.base.bo)
- bp->resv = vm->root.base.bo->tbo.resv;
+ bp->resv = vm->root.base.bo->tbo.base.resv;
}
/**
@@ -1702,7 +1702,7 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev,
ttm = container_of(bo->tbo.ttm, struct ttm_dma_tt, ttm);
pages_addr = ttm->dma_address;
}
- exclusive = reservation_object_get_excl(bo->tbo.resv);
+ exclusive = reservation_object_get_excl(bo->tbo.base.resv);
}
if (bo) {
@@ -1712,7 +1712,7 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev,
flags = 0x0;
}
- if (clear || (bo && bo->tbo.resv == vm->root.base.bo->tbo.resv))
+ if (clear || (bo && bo->tbo.base.resv == vm->root.base.bo->tbo.base.resv))
last_update = &vm->last_update;
else
last_update = &bo_va->last_pt_update;
@@ -1743,7 +1743,7 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev,
* the evicted list so that it gets validated again on the
* next command submission.
*/
- if (bo && bo->tbo.resv == vm->root.base.bo->tbo.resv) {
+ if (bo && bo->tbo.base.resv == vm->root.base.bo->tbo.base.resv) {
uint32_t mem_type = bo->tbo.mem.mem_type;
if (!(bo->preferred_domains & amdgpu_mem_type_to_domain(mem_type)))
@@ -1879,7 +1879,7 @@ static void amdgpu_vm_free_mapping(struct amdgpu_device *adev,
*/
static void amdgpu_vm_prt_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
{
- struct reservation_object *resv = vm->root.base.bo->tbo.resv;
+ struct reservation_object *resv = vm->root.base.bo->tbo.base.resv;
struct dma_fence *excl, **shared;
unsigned i, shared_count;
int r;
@@ -1993,7 +1993,7 @@ int amdgpu_vm_handle_moved(struct amdgpu_device *adev,
while (!list_empty(&vm->invalidated)) {
bo_va = list_first_entry(&vm->invalidated, struct amdgpu_bo_va,
base.vm_status);
- resv = bo_va->base.bo->tbo.resv;
+ resv = bo_va->base.bo->tbo.base.resv;
spin_unlock(&vm->invalidated_lock);
/* Try to reserve the BO to avoid clearing its ptes */
@@ -2084,7 +2084,7 @@ static void amdgpu_vm_bo_insert_map(struct amdgpu_device *adev,
if (mapping->flags & AMDGPU_PTE_PRT)
amdgpu_vm_prt_get(adev);
- if (bo && bo->tbo.resv == vm->root.base.bo->tbo.resv &&
+ if (bo && bo->tbo.base.resv == vm->root.base.bo->tbo.base.resv &&
!bo_va->base.moved) {
list_move(&bo_va->base.vm_status, &vm->moved);
}
@@ -2416,7 +2416,8 @@ void amdgpu_vm_bo_trace_cs(struct amdgpu_vm *vm, struct ww_acquire_ctx *ticket)
struct amdgpu_bo *bo;
bo = mapping->bo_va->base.bo;
- if (READ_ONCE(bo->tbo.resv->lock.ctx) != ticket)
+ if (reservation_object_locking_ctx(bo->tbo.base.resv) !=
+ ticket)
continue;
}
@@ -2443,7 +2444,7 @@ void amdgpu_vm_bo_rmv(struct amdgpu_device *adev,
struct amdgpu_vm_bo_base **base;
if (bo) {
- if (bo->tbo.resv == vm->root.base.bo->tbo.resv)
+ if (bo->tbo.base.resv == vm->root.base.bo->tbo.base.resv)
vm->bulk_moveable = false;
for (base = &bo_va->base.bo->vm_bo; *base;
@@ -2507,7 +2508,7 @@ void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev,
for (bo_base = bo->vm_bo; bo_base; bo_base = bo_base->next) {
struct amdgpu_vm *vm = bo_base->vm;
- if (evicted && bo->tbo.resv == vm->root.base.bo->tbo.resv) {
+ if (evicted && bo->tbo.base.resv == vm->root.base.bo->tbo.base.resv) {
amdgpu_vm_bo_evicted(bo_base);
continue;
}
@@ -2518,7 +2519,7 @@ void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev,
if (bo->tbo.type == ttm_bo_type_kernel)
amdgpu_vm_bo_relocated(bo_base);
- else if (bo->tbo.resv == vm->root.base.bo->tbo.resv)
+ else if (bo->tbo.base.resv == vm->root.base.bo->tbo.base.resv)
amdgpu_vm_bo_moved(bo_base);
else
amdgpu_vm_bo_invalidated(bo_base);
@@ -2648,7 +2649,7 @@ void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint32_t min_vm_size,
*/
long amdgpu_vm_wait_idle(struct amdgpu_vm *vm, long timeout)
{
- return reservation_object_wait_timeout_rcu(vm->root.base.bo->tbo.resv,
+ return reservation_object_wait_timeout_rcu(vm->root.base.bo->tbo.base.resv,
true, true, timeout);
}
@@ -2723,7 +2724,7 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
if (r)
goto error_free_root;
- r = reservation_object_reserve_shared(root->tbo.resv, 1);
+ r = reservation_object_reserve_shared(root->tbo.base.resv, 1);
if (r)
goto error_unreserve;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c
index ddd181f5ed37..61fc584cbb1a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c
@@ -72,7 +72,7 @@ static int amdgpu_vm_sdma_prepare(struct amdgpu_vm_update_params *p,
if (r)
return r;
- r = amdgpu_sync_resv(p->adev, &p->job->sync, root->tbo.resv,
+ r = amdgpu_sync_resv(p->adev, &p->job->sync, root->tbo.base.resv,
owner, false);
if (r)
return r;
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 0bcb7662e2d9..722c70d40d3b 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -5695,7 +5695,7 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
* deadlock during GPU reset when this fence will not signal
* but we hold reservation lock for the BO.
*/
- r = reservation_object_wait_timeout_rcu(abo->tbo.resv, true,
+ r = reservation_object_wait_timeout_rcu(abo->tbo.base.resv, true,
false,
msecs_to_jiffies(5000));
if (unlikely(r <= 0))
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
index 6e205ee36ac3..16218a202b59 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
@@ -156,6 +156,26 @@ dm_dp_mst_connector_destroy(struct drm_connector *connector)
kfree(amdgpu_dm_connector);
}
+static int
+amdgpu_dm_mst_connector_late_register(struct drm_connector *connector)
+{
+ struct amdgpu_dm_connector *amdgpu_dm_connector =
+ to_amdgpu_dm_connector(connector);
+ struct drm_dp_mst_port *port = amdgpu_dm_connector->port;
+
+ return drm_dp_mst_connector_late_register(connector, port);
+}
+
+static void
+amdgpu_dm_mst_connector_early_unregister(struct drm_connector *connector)
+{
+ struct amdgpu_dm_connector *amdgpu_dm_connector =
+ to_amdgpu_dm_connector(connector);
+ struct drm_dp_mst_port *port = amdgpu_dm_connector->port;
+
+ drm_dp_mst_connector_early_unregister(connector, port);
+}
+
static const struct drm_connector_funcs dm_dp_mst_connector_funcs = {
.detect = dm_dp_mst_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
@@ -164,7 +184,9 @@ static const struct drm_connector_funcs dm_dp_mst_connector_funcs = {
.atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
.atomic_set_property = amdgpu_dm_connector_atomic_set_property,
- .atomic_get_property = amdgpu_dm_connector_atomic_get_property
+ .atomic_get_property = amdgpu_dm_connector_atomic_get_property,
+ .late_register = amdgpu_dm_mst_connector_late_register,
+ .early_unregister = amdgpu_dm_mst_connector_early_unregister,
};
static int dm_dp_mst_get_modes(struct drm_connector *connector)
@@ -388,7 +410,7 @@ void amdgpu_dm_initialize_dp_connector(struct amdgpu_display_manager *dm,
struct amdgpu_dm_connector *aconnector)
{
aconnector->dm_dp_aux.aux.name = "dmdc";
- aconnector->dm_dp_aux.aux.dev = dm->adev->dev;
+ aconnector->dm_dp_aux.aux.dev = aconnector->base.kdev;
aconnector->dm_dp_aux.aux.transfer = dm_dp_aux_transfer;
aconnector->dm_dp_aux.ddc_service = aconnector->dc_link->ddc;
diff --git a/drivers/gpu/drm/arc/arcpgu_drv.c b/drivers/gpu/drm/arc/arcpgu_drv.c
index af60c6d7a5f4..6b7f791685ec 100644
--- a/drivers/gpu/drm/arc/arcpgu_drv.c
+++ b/drivers/gpu/drm/arc/arcpgu_drv.c
@@ -135,8 +135,7 @@ static int arcpgu_debugfs_init(struct drm_minor *minor)
#endif
static struct drm_driver arcpgu_drm_driver = {
- .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME |
- DRIVER_ATOMIC,
+ .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_ATOMIC,
.name = "arcpgu",
.desc = "ARC PGU Controller",
.date = "20160219",
@@ -150,8 +149,6 @@ static struct drm_driver arcpgu_drm_driver = {
.gem_free_object_unlocked = drm_gem_cma_free_object,
.gem_print_info = drm_gem_cma_print_info,
.gem_vm_ops = &drm_gem_cma_vm_ops,
- .gem_prime_export = drm_gem_prime_export,
- .gem_prime_import = drm_gem_prime_import,
.gem_prime_get_sg_table = drm_gem_cma_prime_get_sg_table,
.gem_prime_import_sg_table = drm_gem_cma_prime_import_sg_table,
.gem_prime_vmap = drm_gem_cma_prime_vmap,
diff --git a/drivers/gpu/drm/arm/display/komeda/d71/d71_component.c b/drivers/gpu/drm/arm/display/komeda/d71/d71_component.c
index 4073a452e24a..55a8cc94808a 100644
--- a/drivers/gpu/drm/arm/display/komeda/d71/d71_component.c
+++ b/drivers/gpu/drm/arm/display/komeda/d71/d71_component.c
@@ -4,8 +4,6 @@
* Author: James.Qian.Wang <james.qian.wang@arm.com>
*
*/
-
-#include <drm/drm_print.h>
#include "d71_dev.h"
#include "komeda_kms.h"
#include "malidp_io.h"
@@ -804,7 +802,7 @@ static int d71_downscaling_clk_check(struct komeda_pipeline *pipe,
denominator = (mode->htotal - 1) * v_out - 2 * v_in;
}
- return aclk_rate * denominator >= mode->clock * 1000 * fraction ?
+ return aclk_rate * denominator >= mode->crtc_clock * 1000 * fraction ?
0 : -EINVAL;
}
@@ -1032,21 +1030,31 @@ static void d71_timing_ctrlr_update(struct komeda_component *c,
struct komeda_component_state *state)
{
struct drm_crtc_state *crtc_st = state->crtc->state;
+ struct drm_display_mode *mode = &crtc_st->adjusted_mode;
u32 __iomem *reg = c->reg;
- struct videomode vm;
+ u32 hactive, hfront_porch, hback_porch, hsync_len;
+ u32 vactive, vfront_porch, vback_porch, vsync_len;
u32 value;
- drm_display_mode_to_videomode(&crtc_st->adjusted_mode, &vm);
-
- malidp_write32(reg, BS_ACTIVESIZE, HV_SIZE(vm.hactive, vm.vactive));
- malidp_write32(reg, BS_HINTERVALS, BS_H_INTVALS(vm.hfront_porch,
- vm.hback_porch));
- malidp_write32(reg, BS_VINTERVALS, BS_V_INTVALS(vm.vfront_porch,
- vm.vback_porch));
-
- value = BS_SYNC_VSW(vm.vsync_len) | BS_SYNC_HSW(vm.hsync_len);
- value |= vm.flags & DISPLAY_FLAGS_VSYNC_HIGH ? BS_SYNC_VSP : 0;
- value |= vm.flags & DISPLAY_FLAGS_HSYNC_HIGH ? BS_SYNC_HSP : 0;
+ hactive = mode->crtc_hdisplay;
+ hfront_porch = mode->crtc_hsync_start - mode->crtc_hdisplay;
+ hsync_len = mode->crtc_hsync_end - mode->crtc_hsync_start;
+ hback_porch = mode->crtc_htotal - mode->crtc_hsync_end;
+
+ vactive = mode->crtc_vdisplay;
+ vfront_porch = mode->crtc_vsync_start - mode->crtc_vdisplay;
+ vsync_len = mode->crtc_vsync_end - mode->crtc_vsync_start;
+ vback_porch = mode->crtc_vtotal - mode->crtc_vsync_end;
+
+ malidp_write32(reg, BS_ACTIVESIZE, HV_SIZE(hactive, vactive));
+ malidp_write32(reg, BS_HINTERVALS, BS_H_INTVALS(hfront_porch,
+ hback_porch));
+ malidp_write32(reg, BS_VINTERVALS, BS_V_INTVALS(vfront_porch,
+ vback_porch));
+
+ value = BS_SYNC_VSW(vsync_len) | BS_SYNC_HSW(hsync_len);
+ value |= mode->flags & DRM_MODE_FLAG_PVSYNC ? BS_SYNC_VSP : 0;
+ value |= mode->flags & DRM_MODE_FLAG_PHSYNC ? BS_SYNC_HSP : 0;
malidp_write32(reg, BS_SYNC, value);
malidp_write32(reg, BS_PROG_LINE, D71_DEFAULT_PREPRETCH_LINE - 1);
@@ -1054,6 +1062,10 @@ static void d71_timing_ctrlr_update(struct komeda_component *c,
/* configure bs control register */
value = BS_CTRL_EN | BS_CTRL_VM;
+ if (c->pipeline->dual_link) {
+ malidp_write32(reg, BS_DRIFT_TO, hfront_porch + 16);
+ value |= BS_CTRL_DL;
+ }
malidp_write32(reg, BLK_CONTROL, value);
}
diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_crtc.c b/drivers/gpu/drm/arm/display/komeda/komeda_crtc.c
index f4400788ab94..fa9a4593bb37 100644
--- a/drivers/gpu/drm/arm/display/komeda/komeda_crtc.c
+++ b/drivers/gpu/drm/arm/display/komeda/komeda_crtc.c
@@ -27,8 +27,8 @@ static void komeda_crtc_update_clock_ratio(struct komeda_crtc_state *kcrtc_st)
return;
}
- pxlclk = kcrtc_st->base.adjusted_mode.clock * 1000;
- aclk = komeda_calc_aclk(kcrtc_st);
+ pxlclk = kcrtc_st->base.adjusted_mode.crtc_clock * 1000;
+ aclk = komeda_crtc_get_aclk(kcrtc_st);
kcrtc_st->clock_ratio = div64_u64(aclk << 32, pxlclk);
}
@@ -74,14 +74,6 @@ komeda_crtc_atomic_check(struct drm_crtc *crtc,
return 0;
}
-unsigned long komeda_calc_aclk(struct komeda_crtc_state *kcrtc_st)
-{
- struct komeda_dev *mdev = kcrtc_st->base.crtc->dev->dev_private;
- unsigned long pxlclk = kcrtc_st->base.adjusted_mode.clock;
-
- return clk_round_rate(mdev->aclk, pxlclk * 1000);
-}
-
/* For active a crtc, mainly need two parts of preparation
* 1. adjust display operation mode.
* 2. enable needed clk
@@ -92,7 +84,7 @@ komeda_crtc_prepare(struct komeda_crtc *kcrtc)
struct komeda_dev *mdev = kcrtc->base.dev->dev_private;
struct komeda_pipeline *master = kcrtc->master;
struct komeda_crtc_state *kcrtc_st = to_kcrtc_st(kcrtc->base.state);
- unsigned long pxlclk_rate = kcrtc_st->base.adjusted_mode.clock * 1000;
+ struct drm_display_mode *mode = &kcrtc_st->base.adjusted_mode;
u32 new_mode;
int err;
@@ -118,7 +110,7 @@ komeda_crtc_prepare(struct komeda_crtc *kcrtc)
* to enable it again.
*/
if (new_mode != KOMEDA_MODE_DUAL_DISP) {
- err = clk_set_rate(mdev->aclk, komeda_calc_aclk(kcrtc_st));
+ err = clk_set_rate(mdev->aclk, komeda_crtc_get_aclk(kcrtc_st));
if (err)
DRM_ERROR("failed to set aclk.\n");
err = clk_prepare_enable(mdev->aclk);
@@ -126,7 +118,7 @@ komeda_crtc_prepare(struct komeda_crtc *kcrtc)
DRM_ERROR("failed to enable aclk.\n");
}
- err = clk_set_rate(master->pxlclk, pxlclk_rate);
+ err = clk_set_rate(master->pxlclk, mode->crtc_clock * 1000);
if (err)
DRM_ERROR("failed to set pxlclk for pipe%d\n", master->id);
err = clk_prepare_enable(master->pxlclk);
@@ -342,29 +334,58 @@ komeda_crtc_atomic_flush(struct drm_crtc *crtc,
komeda_crtc_do_flush(crtc, old);
}
+/* Returns the minimum frequency of the aclk rate (main engine clock) in Hz */
+static unsigned long
+komeda_calc_min_aclk_rate(struct komeda_crtc *kcrtc,
+ unsigned long pxlclk)
+{
+ /* Once dual-link one display pipeline drives two display outputs,
+ * the aclk needs run on the double rate of pxlclk
+ */
+ if (kcrtc->master->dual_link)
+ return pxlclk * 2;
+ else
+ return pxlclk;
+}
+
+/* Get current aclk rate that specified by state */
+unsigned long komeda_crtc_get_aclk(struct komeda_crtc_state *kcrtc_st)
+{
+ struct drm_crtc *crtc = kcrtc_st->base.crtc;
+ struct komeda_dev *mdev = crtc->dev->dev_private;
+ unsigned long pxlclk = kcrtc_st->base.adjusted_mode.crtc_clock * 1000;
+ unsigned long min_aclk;
+
+ min_aclk = komeda_calc_min_aclk_rate(to_kcrtc(crtc), pxlclk);
+
+ return clk_round_rate(mdev->aclk, min_aclk);
+}
+
static enum drm_mode_status
komeda_crtc_mode_valid(struct drm_crtc *crtc, const struct drm_display_mode *m)
{
struct komeda_dev *mdev = crtc->dev->dev_private;
struct komeda_crtc *kcrtc = to_kcrtc(crtc);
struct komeda_pipeline *master = kcrtc->master;
- long mode_clk, pxlclk;
+ unsigned long min_pxlclk, min_aclk;
if (m->flags & DRM_MODE_FLAG_INTERLACE)
return MODE_NO_INTERLACE;
- mode_clk = m->clock * 1000;
- pxlclk = clk_round_rate(master->pxlclk, mode_clk);
- if (pxlclk != mode_clk) {
- DRM_DEBUG_ATOMIC("pxlclk doesn't support %ld Hz\n", mode_clk);
+ min_pxlclk = m->clock * 1000;
+ if (master->dual_link)
+ min_pxlclk /= 2;
+
+ if (min_pxlclk != clk_round_rate(master->pxlclk, min_pxlclk)) {
+ DRM_DEBUG_ATOMIC("pxlclk doesn't support %lu Hz\n", min_pxlclk);
return MODE_NOCLOCK;
}
- /* main engine clock must be faster than pxlclk*/
- if (clk_round_rate(mdev->aclk, mode_clk) < pxlclk) {
- DRM_DEBUG_ATOMIC("engine clk can't satisfy the requirement of %s-clk: %ld.\n",
- m->name, pxlclk);
+ min_aclk = komeda_calc_min_aclk_rate(to_kcrtc(crtc), min_pxlclk);
+ if (clk_round_rate(mdev->aclk, min_aclk) < min_aclk) {
+ DRM_DEBUG_ATOMIC("engine clk can't satisfy the requirement of %s-clk: %lu.\n",
+ m->name, min_pxlclk);
return MODE_CLOCK_HIGH;
}
@@ -377,10 +398,22 @@ static bool komeda_crtc_mode_fixup(struct drm_crtc *crtc,
struct drm_display_mode *adjusted_mode)
{
struct komeda_crtc *kcrtc = to_kcrtc(crtc);
- struct komeda_pipeline *master = kcrtc->master;
- long mode_clk = m->clock * 1000;
+ unsigned long clk_rate;
+
+ drm_mode_set_crtcinfo(adjusted_mode, 0);
+ /* In dual link half the horizontal settings */
+ if (kcrtc->master->dual_link) {
+ adjusted_mode->crtc_clock /= 2;
+ adjusted_mode->crtc_hdisplay /= 2;
+ adjusted_mode->crtc_hsync_start /= 2;
+ adjusted_mode->crtc_hsync_end /= 2;
+ adjusted_mode->crtc_htotal /= 2;
+ }
- adjusted_mode->clock = clk_round_rate(master->pxlclk, mode_clk) / 1000;
+ clk_rate = adjusted_mode->crtc_clock * 1000;
+ /* crtc_clock will be used as the komeda output pixel clock */
+ adjusted_mode->crtc_clock = clk_round_rate(kcrtc->master->pxlclk,
+ clk_rate) / 1000;
return true;
}
@@ -488,10 +521,8 @@ int komeda_kms_setup_crtcs(struct komeda_kms_dev *kms,
else
sprintf(str, "None");
- DRM_INFO("crtc%d: master(pipe-%d) slave(%s) output: %s.\n",
- kms->n_crtcs, master->id, str,
- master->of_output_dev ?
- master->of_output_dev->full_name : "None");
+ DRM_INFO("CRTC-%d: master(pipe-%d) slave(%s).\n",
+ kms->n_crtcs, master->id, str);
kms->n_crtcs++;
}
diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_dev.c b/drivers/gpu/drm/arm/display/komeda/komeda_dev.c
index 5a118984de33..1ff7f4b2c620 100644
--- a/drivers/gpu/drm/arm/display/komeda/komeda_dev.c
+++ b/drivers/gpu/drm/arm/display/komeda/komeda_dev.c
@@ -121,11 +121,14 @@ static int komeda_parse_pipe_dt(struct komeda_dev *mdev, struct device_node *np)
pipe->pxlclk = clk;
/* enum ports */
- pipe->of_output_dev =
+ pipe->of_output_links[0] =
of_graph_get_remote_node(np, KOMEDA_OF_PORT_OUTPUT, 0);
+ pipe->of_output_links[1] =
+ of_graph_get_remote_node(np, KOMEDA_OF_PORT_OUTPUT, 1);
pipe->of_output_port =
of_graph_get_port_by_id(np, KOMEDA_OF_PORT_OUTPUT);
+ pipe->dual_link = pipe->of_output_links[0] && pipe->of_output_links[1];
pipe->of_node = np;
return 0;
diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_drv.c b/drivers/gpu/drm/arm/display/komeda/komeda_drv.c
index cfa5068d9d1e..69ace6f9055d 100644
--- a/drivers/gpu/drm/arm/display/komeda/komeda_drv.c
+++ b/drivers/gpu/drm/arm/display/komeda/komeda_drv.c
@@ -83,11 +83,12 @@ static int compare_of(struct device *dev, void *data)
static void komeda_add_slave(struct device *master,
struct component_match **match,
- struct device_node *np, int port)
+ struct device_node *np,
+ u32 port, u32 endpoint)
{
struct device_node *remote;
- remote = of_graph_get_remote_node(np, port, 0);
+ remote = of_graph_get_remote_node(np, port, endpoint);
if (remote) {
drm_of_component_match_add(master, match, compare_of, remote);
of_node_put(remote);
@@ -108,7 +109,8 @@ static int komeda_platform_probe(struct platform_device *pdev)
continue;
/* add connector */
- komeda_add_slave(dev, &match, child, KOMEDA_OF_PORT_OUTPUT);
+ komeda_add_slave(dev, &match, child, KOMEDA_OF_PORT_OUTPUT, 0);
+ komeda_add_slave(dev, &match, child, KOMEDA_OF_PORT_OUTPUT, 1);
}
return component_master_add_with_match(dev, &komeda_master_ops, match);
diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_kms.c b/drivers/gpu/drm/arm/display/komeda/komeda_kms.c
index 419a8b0e5de8..89191a555c84 100644
--- a/drivers/gpu/drm/arm/display/komeda/komeda_kms.c
+++ b/drivers/gpu/drm/arm/display/komeda/komeda_kms.c
@@ -55,16 +55,13 @@ static irqreturn_t komeda_kms_irq_handler(int irq, void *data)
}
static struct drm_driver komeda_kms_driver = {
- .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC |
- DRIVER_PRIME | DRIVER_HAVE_IRQ,
+ .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC,
.lastclose = drm_fb_helper_lastclose,
.gem_free_object_unlocked = drm_gem_cma_free_object,
.gem_vm_ops = &drm_gem_cma_vm_ops,
.dumb_create = komeda_gem_cma_dumb_create,
.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
- .gem_prime_export = drm_gem_prime_export,
- .gem_prime_import = drm_gem_prime_import,
.gem_prime_get_sg_table = drm_gem_cma_prime_get_sg_table,
.gem_prime_import_sg_table = drm_gem_cma_prime_import_sg_table,
.gem_prime_vmap = drm_gem_cma_prime_vmap,
diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_kms.h b/drivers/gpu/drm/arm/display/komeda/komeda_kms.h
index 8c89fc245b83..45c498e15e7a 100644
--- a/drivers/gpu/drm/arm/display/komeda/komeda_kms.h
+++ b/drivers/gpu/drm/arm/display/komeda/komeda_kms.h
@@ -14,8 +14,6 @@
#include <drm/drm_device.h>
#include <drm/drm_writeback.h>
#include <drm/drm_print.h>
-#include <video/videomode.h>
-#include <video/display_timing.h>
/**
* struct komeda_plane - komeda instance of drm_plane
@@ -168,7 +166,7 @@ static inline bool has_flip_h(u32 rot)
return !!(rotation & DRM_MODE_REFLECT_X);
}
-unsigned long komeda_calc_aclk(struct komeda_crtc_state *kcrtc_st);
+unsigned long komeda_crtc_get_aclk(struct komeda_crtc_state *kcrtc_st);
int komeda_kms_setup_crtcs(struct komeda_kms_dev *kms, struct komeda_dev *mdev);
diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_pipeline.c b/drivers/gpu/drm/arm/display/komeda/komeda_pipeline.c
index 78e44d9e1520..452e505a1fd3 100644
--- a/drivers/gpu/drm/arm/display/komeda/komeda_pipeline.c
+++ b/drivers/gpu/drm/arm/display/komeda/komeda_pipeline.c
@@ -54,7 +54,8 @@ void komeda_pipeline_destroy(struct komeda_dev *mdev,
clk_put(pipe->pxlclk);
- of_node_put(pipe->of_output_dev);
+ of_node_put(pipe->of_output_links[0]);
+ of_node_put(pipe->of_output_links[1]);
of_node_put(pipe->of_output_port);
of_node_put(pipe->of_node);
@@ -246,9 +247,15 @@ static void komeda_pipeline_dump(struct komeda_pipeline *pipe)
struct komeda_component *c;
int id;
- DRM_INFO("Pipeline-%d: n_layers: %d, n_scalers: %d, output: %s\n",
+ DRM_INFO("Pipeline-%d: n_layers: %d, n_scalers: %d, output: %s.\n",
pipe->id, pipe->n_layers, pipe->n_scalers,
- pipe->of_output_dev ? pipe->of_output_dev->full_name : "none");
+ pipe->dual_link ? "dual-link" : "single-link");
+ DRM_INFO(" output_link[0]: %s.\n",
+ pipe->of_output_links[0] ?
+ pipe->of_output_links[0]->full_name : "none");
+ DRM_INFO(" output_link[1]: %s.\n",
+ pipe->of_output_links[1] ?
+ pipe->of_output_links[1]->full_name : "none");
dp_for_each_set_bit(id, pipe->avail_comps) {
c = komeda_pipeline_get_component(pipe, id);
@@ -305,6 +312,12 @@ static void komeda_pipeline_assemble(struct komeda_pipeline *pipe)
layer->right = komeda_get_layer_split_right_layer(pipe, layer);
}
+
+ if (pipe->dual_link && !pipe->ctrlr->supports_dual_link) {
+ pipe->dual_link = false;
+ DRM_WARN("PIPE-%d doesn't support dual-link, ignore DT dual-link configuration.\n",
+ pipe->id);
+ }
}
/* if pipeline_A accept another pipeline_B's component as input, treat
diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_pipeline.h b/drivers/gpu/drm/arm/display/komeda/komeda_pipeline.h
index a90bcbb3cb23..a7a84e66549d 100644
--- a/drivers/gpu/drm/arm/display/komeda/komeda_pipeline.h
+++ b/drivers/gpu/drm/arm/display/komeda/komeda_pipeline.h
@@ -416,8 +416,10 @@ struct komeda_pipeline {
struct device_node *of_node;
/** @of_output_port: pipeline output port */
struct device_node *of_output_port;
- /** @of_output_dev: output connector device node */
- struct device_node *of_output_dev;
+ /** @of_output_links: output connector device nodes */
+ struct device_node *of_output_links[2];
+ /** @dual_link: true if of_output_links[0] and [1] are both valid */
+ bool dual_link;
};
/**
diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_pipeline_state.c b/drivers/gpu/drm/arm/display/komeda/komeda_pipeline_state.c
index 950235af1e79..ea26bc9c2d00 100644
--- a/drivers/gpu/drm/arm/display/komeda/komeda_pipeline_state.c
+++ b/drivers/gpu/drm/arm/display/komeda/komeda_pipeline_state.c
@@ -473,7 +473,7 @@ komeda_scaler_check_cfg(struct komeda_scaler *scaler,
err = pipe->funcs->downscaling_clk_check(pipe,
&kcrtc_st->base.adjusted_mode,
- komeda_calc_aclk(kcrtc_st), dflow);
+ komeda_crtc_get_aclk(kcrtc_st), dflow);
if (err) {
DRM_DEBUG_ATOMIC("aclk can't satisfy the clock requirement of the downscaling\n");
return err;
diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_plane.c b/drivers/gpu/drm/arm/display/komeda/komeda_plane.c
index c095af154216..98e915e325dd 100644
--- a/drivers/gpu/drm/arm/display/komeda/komeda_plane.c
+++ b/drivers/gpu/drm/arm/display/komeda/komeda_plane.c
@@ -158,7 +158,7 @@ static void komeda_plane_reset(struct drm_plane *plane)
static struct drm_plane_state *
komeda_plane_atomic_duplicate_state(struct drm_plane *plane)
{
- struct komeda_plane_state *new, *old;
+ struct komeda_plane_state *new;
if (WARN_ON(!plane->state))
return NULL;
@@ -169,8 +169,6 @@ komeda_plane_atomic_duplicate_state(struct drm_plane *plane)
__drm_atomic_helper_plane_duplicate_state(plane, &new->base);
- old = to_kplane_st(plane->state);
-
return &new->base;
}
diff --git a/drivers/gpu/drm/arm/hdlcd_drv.c b/drivers/gpu/drm/arm/hdlcd_drv.c
index 8fc0b884c428..27c46a2838c5 100644
--- a/drivers/gpu/drm/arm/hdlcd_drv.c
+++ b/drivers/gpu/drm/arm/hdlcd_drv.c
@@ -229,9 +229,7 @@ static int hdlcd_debugfs_init(struct drm_minor *minor)
DEFINE_DRM_GEM_CMA_FOPS(fops);
static struct drm_driver hdlcd_driver = {
- .driver_features = DRIVER_GEM |
- DRIVER_MODESET | DRIVER_PRIME |
- DRIVER_ATOMIC,
+ .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC,
.irq_handler = hdlcd_irq,
.irq_preinstall = hdlcd_irq_preinstall,
.irq_postinstall = hdlcd_irq_postinstall,
@@ -242,8 +240,6 @@ static struct drm_driver hdlcd_driver = {
.dumb_create = drm_gem_cma_dumb_create,
.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
- .gem_prime_export = drm_gem_prime_export,
- .gem_prime_import = drm_gem_prime_import,
.gem_prime_get_sg_table = drm_gem_cma_prime_get_sg_table,
.gem_prime_import_sg_table = drm_gem_cma_prime_import_sg_table,
.gem_prime_vmap = drm_gem_cma_prime_vmap,
diff --git a/drivers/gpu/drm/arm/malidp_drv.c b/drivers/gpu/drm/arm/malidp_drv.c
index f25ec4382277..c27ff456eddc 100644
--- a/drivers/gpu/drm/arm/malidp_drv.c
+++ b/drivers/gpu/drm/arm/malidp_drv.c
@@ -561,15 +561,12 @@ static int malidp_debugfs_init(struct drm_minor *minor)
#endif //CONFIG_DEBUG_FS
static struct drm_driver malidp_driver = {
- .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC |
- DRIVER_PRIME,
+ .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC,
.gem_free_object_unlocked = drm_gem_cma_free_object,
.gem_vm_ops = &drm_gem_cma_vm_ops,
.dumb_create = malidp_dumb_create,
.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
- .gem_prime_export = drm_gem_prime_export,
- .gem_prime_import = drm_gem_prime_import,
.gem_prime_get_sg_table = drm_gem_cma_prime_get_sg_table,
.gem_prime_import_sg_table = drm_gem_cma_prime_import_sg_table,
.gem_prime_vmap = drm_gem_cma_prime_vmap,
diff --git a/drivers/gpu/drm/arm/malidp_hw.c b/drivers/gpu/drm/arm/malidp_hw.c
index 50af399d7f6f..380be66d4c6e 100644
--- a/drivers/gpu/drm/arm/malidp_hw.c
+++ b/drivers/gpu/drm/arm/malidp_hw.c
@@ -385,6 +385,7 @@ int malidp_format_get_bpp(u32 fmt)
switch (fmt) {
case DRM_FORMAT_VUY101010:
bpp = 30;
+ break;
case DRM_FORMAT_YUV420_10BIT:
bpp = 15;
break;
@@ -1309,7 +1310,7 @@ static irqreturn_t malidp_se_irq(int irq, void *arg)
break;
case MW_RESTART:
drm_writeback_signal_completion(&malidp->mw_connector, 0);
- /* fall through to a new start */
+ /* fall through - to a new start */
case MW_START:
/* writeback started, need to emulate one-shot mode */
hw->disable_memwrite(hwdev);
diff --git a/drivers/gpu/drm/armada/armada_drv.c b/drivers/gpu/drm/armada/armada_drv.c
index 521464f08ccd..055c92bc88bf 100644
--- a/drivers/gpu/drm/armada/armada_drv.c
+++ b/drivers/gpu/drm/armada/armada_drv.c
@@ -40,8 +40,7 @@ static struct drm_driver armada_drm_driver = {
.name = "armada-drm",
.desc = "Armada SoC DRM",
.date = "20120730",
- .driver_features = DRIVER_GEM | DRIVER_MODESET |
- DRIVER_PRIME | DRIVER_ATOMIC,
+ .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC,
.ioctls = armada_ioctls,
.fops = &armada_drm_fops,
};
diff --git a/drivers/gpu/drm/armada/armada_gem.c b/drivers/gpu/drm/armada/armada_gem.c
index 874b2968a866..60c509784fa3 100644
--- a/drivers/gpu/drm/armada/armada_gem.c
+++ b/drivers/gpu/drm/armada/armada_gem.c
@@ -482,8 +482,7 @@ static const struct dma_buf_ops armada_gem_prime_dmabuf_ops = {
};
struct dma_buf *
-armada_gem_prime_export(struct drm_device *dev, struct drm_gem_object *obj,
- int flags)
+armada_gem_prime_export(struct drm_gem_object *obj, int flags)
{
DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
@@ -492,7 +491,7 @@ armada_gem_prime_export(struct drm_device *dev, struct drm_gem_object *obj,
exp_info.flags = O_RDWR;
exp_info.priv = obj;
- return drm_gem_dmabuf_export(dev, &exp_info);
+ return drm_gem_dmabuf_export(obj->dev, &exp_info);
}
struct drm_gem_object *
diff --git a/drivers/gpu/drm/armada/armada_gem.h b/drivers/gpu/drm/armada/armada_gem.h
index 1dd80540b8ce..de04cc2c8f0e 100644
--- a/drivers/gpu/drm/armada/armada_gem.h
+++ b/drivers/gpu/drm/armada/armada_gem.h
@@ -32,8 +32,7 @@ struct armada_gem_object *armada_gem_alloc_private_object(struct drm_device *,
size_t);
int armada_gem_dumb_create(struct drm_file *, struct drm_device *,
struct drm_mode_create_dumb *);
-struct dma_buf *armada_gem_prime_export(struct drm_device *dev,
- struct drm_gem_object *obj, int flags);
+struct dma_buf *armada_gem_prime_export(struct drm_gem_object *obj, int flags);
struct drm_gem_object *armada_gem_prime_import(struct drm_device *,
struct dma_buf *);
int armada_gem_map_import(struct armada_gem_object *);
diff --git a/drivers/gpu/drm/aspeed/aspeed_gfx_drv.c b/drivers/gpu/drm/aspeed/aspeed_gfx_drv.c
index eeb22eccd1fc..ada2f6aca906 100644
--- a/drivers/gpu/drm/aspeed/aspeed_gfx_drv.c
+++ b/drivers/gpu/drm/aspeed/aspeed_gfx_drv.c
@@ -194,8 +194,7 @@ static void aspeed_gfx_unload(struct drm_device *drm)
DEFINE_DRM_GEM_CMA_FOPS(fops);
static struct drm_driver aspeed_gfx_driver = {
- .driver_features = DRIVER_GEM | DRIVER_MODESET |
- DRIVER_PRIME | DRIVER_ATOMIC,
+ .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC,
.gem_create_object = drm_cma_gem_create_object_default_funcs,
.dumb_create = drm_gem_cma_dumb_create,
.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
diff --git a/drivers/gpu/drm/ast/Makefile b/drivers/gpu/drm/ast/Makefile
index b086dae17013..561f7c4199e4 100644
--- a/drivers/gpu/drm/ast/Makefile
+++ b/drivers/gpu/drm/ast/Makefile
@@ -3,6 +3,6 @@
# Makefile for the drm device driver. This driver provides support for the
# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
-ast-y := ast_drv.o ast_main.o ast_mode.o ast_fb.o ast_ttm.o ast_post.o ast_dp501.o
+ast-y := ast_drv.o ast_main.o ast_mode.o ast_ttm.o ast_post.o ast_dp501.o
obj-$(CONFIG_DRM_AST) := ast.o
diff --git a/drivers/gpu/drm/ast/ast_dp501.c b/drivers/gpu/drm/ast/ast_dp501.c
index 4c7375b45281..98cd69269263 100644
--- a/drivers/gpu/drm/ast/ast_dp501.c
+++ b/drivers/gpu/drm/ast/ast_dp501.c
@@ -1,8 +1,11 @@
// SPDX-License-Identifier: GPL-2.0
+#include <linux/delay.h>
#include <linux/firmware.h>
-#include <drm/drmP.h>
+#include <linux/module.h>
+
#include "ast_drv.h"
+
MODULE_FIRMWARE("ast_dp501_fw.bin");
static int ast_load_dp501_microcode(struct drm_device *dev)
diff --git a/drivers/gpu/drm/ast/ast_drv.c b/drivers/gpu/drm/ast/ast_drv.c
index 3811997e78c4..6ed6ff49efc0 100644
--- a/drivers/gpu/drm/ast/ast_drv.c
+++ b/drivers/gpu/drm/ast/ast_drv.c
@@ -25,12 +25,17 @@
/*
* Authors: Dave Airlie <airlied@redhat.com>
*/
-#include <linux/module.h>
+
#include <linux/console.h>
+#include <linux/module.h>
+#include <linux/pci.h>
-#include <drm/drmP.h>
#include <drm/drm_crtc_helper.h>
+#include <drm/drm_drv.h>
+#include <drm/drm_gem_vram_helper.h>
+#include <drm/drm_pci.h>
#include <drm/drm_probe_helper.h>
+#include <drm/drm_vram_mm_helper.h>
#include "ast_drv.h"
@@ -100,28 +105,21 @@ ast_pci_remove(struct pci_dev *pdev)
static int ast_drm_freeze(struct drm_device *dev)
{
drm_kms_helper_poll_disable(dev);
-
pci_save_state(dev->pdev);
+ drm_fb_helper_set_suspend_unlocked(dev->fb_helper, true);
- console_lock();
- ast_fbdev_set_suspend(dev, 1);
- console_unlock();
return 0;
}
static int ast_drm_thaw(struct drm_device *dev)
{
- int error = 0;
-
ast_post_gpu(dev);
drm_mode_config_reset(dev);
drm_helper_resume_force_mode(dev);
+ drm_fb_helper_set_suspend_unlocked(dev->fb_helper, false);
- console_lock();
- ast_fbdev_set_suspend(dev, 0);
- console_unlock();
- return error;
+ return 0;
}
static int ast_drm_resume(struct drm_device *dev)
diff --git a/drivers/gpu/drm/ast/ast_drv.h b/drivers/gpu/drm/ast/ast_drv.h
index 684e15e64a62..244cc7c382af 100644
--- a/drivers/gpu/drm/ast/ast_drv.h
+++ b/drivers/gpu/drm/ast/ast_drv.h
@@ -28,17 +28,18 @@
#ifndef __AST_DRV_H__
#define __AST_DRV_H__
-#include <drm/drm_encoder.h>
-#include <drm/drm_fb_helper.h>
-
-#include <drm/drm_gem.h>
-#include <drm/drm_gem_vram_helper.h>
-
-#include <drm/drm_vram_mm_helper.h>
-
+#include <linux/types.h>
+#include <linux/io.h>
#include <linux/i2c.h>
#include <linux/i2c-algo-bit.h>
+#include <drm/drm_connector.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_encoder.h>
+#include <drm/drm_mode.h>
+#include <drm/drm_framebuffer.h>
+#include <drm/drm_fb_helper.h>
+
#define DRIVER_AUTHOR "Dave Airlie"
#define DRIVER_NAME "ast"
@@ -81,8 +82,6 @@ enum ast_tx_chip {
#define AST_DRAM_4Gx16 7
#define AST_DRAM_8Gx16 8
-struct ast_fbdev;
-
struct ast_private {
struct drm_device *dev;
@@ -96,8 +95,6 @@ struct ast_private {
uint32_t mclk;
uint32_t vram_size;
- struct ast_fbdev *fbdev;
-
int fb_mtrr;
struct drm_gem_object *cursor_cache;
@@ -239,24 +236,9 @@ struct ast_encoder {
struct drm_encoder base;
};
-struct ast_framebuffer {
- struct drm_framebuffer base;
- struct drm_gem_object *obj;
-};
-
-struct ast_fbdev {
- struct drm_fb_helper helper; /* must be first */
- struct ast_framebuffer afb;
- void *sysram;
- int size;
- int x1, y1, x2, y2; /* dirty rect */
- spinlock_t dirty_lock;
-};
-
#define to_ast_crtc(x) container_of(x, struct ast_crtc, base)
#define to_ast_connector(x) container_of(x, struct ast_connector, base)
#define to_ast_encoder(x) container_of(x, struct ast_encoder, base)
-#define to_ast_framebuffer(x) container_of(x, struct ast_framebuffer, base)
struct ast_vbios_stdtable {
u8 misc;
@@ -296,16 +278,6 @@ struct ast_vbios_mode_info {
extern int ast_mode_init(struct drm_device *dev);
extern void ast_mode_fini(struct drm_device *dev);
-int ast_framebuffer_init(struct drm_device *dev,
- struct ast_framebuffer *ast_fb,
- const struct drm_mode_fb_cmd2 *mode_cmd,
- struct drm_gem_object *obj);
-
-int ast_fbdev_init(struct drm_device *dev);
-void ast_fbdev_fini(struct drm_device *dev);
-void ast_fbdev_set_suspend(struct drm_device *dev, int state);
-void ast_fbdev_set_base(struct ast_private *ast, unsigned long gpu_addr);
-
#define AST_MM_ALIGN_SHIFT 4
#define AST_MM_ALIGN_MASK ((1 << AST_MM_ALIGN_SHIFT) - 1)
diff --git a/drivers/gpu/drm/ast/ast_fb.c b/drivers/gpu/drm/ast/ast_fb.c
deleted file mode 100644
index 8200b25dad16..000000000000
--- a/drivers/gpu/drm/ast/ast_fb.c
+++ /dev/null
@@ -1,346 +0,0 @@
-/*
- * Copyright 2012 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sub license, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
- * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
- * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
- * USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- */
-/*
- * Authors: Dave Airlie <airlied@redhat.com>
- */
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/errno.h>
-#include <linux/string.h>
-#include <linux/mm.h>
-#include <linux/tty.h>
-#include <linux/sysrq.h>
-#include <linux/delay.h>
-#include <linux/init.h>
-
-
-#include <drm/drmP.h>
-#include <drm/drm_crtc.h>
-#include <drm/drm_fb_helper.h>
-#include <drm/drm_util.h>
-#include <drm/drm_crtc_helper.h>
-
-#include "ast_drv.h"
-
-static void ast_dirty_update(struct ast_fbdev *afbdev,
- int x, int y, int width, int height)
-{
- int i;
- struct drm_gem_vram_object *gbo;
- int src_offset, dst_offset;
- int bpp = afbdev->afb.base.format->cpp[0];
- int ret;
- u8 *dst;
- bool unmap = false;
- bool store_for_later = false;
- int x2, y2;
- unsigned long flags;
-
- gbo = drm_gem_vram_of_gem(afbdev->afb.obj);
-
- if (drm_can_sleep()) {
- /* We pin the BO so it won't be moved during the
- * update. The actual location, video RAM or system
- * memory, is not important.
- */
- ret = drm_gem_vram_pin(gbo, 0);
- if (ret) {
- if (ret != -EBUSY)
- return;
- store_for_later = true;
- }
- } else {
- store_for_later = true;
- }
-
- x2 = x + width - 1;
- y2 = y + height - 1;
- spin_lock_irqsave(&afbdev->dirty_lock, flags);
-
- if (afbdev->y1 < y)
- y = afbdev->y1;
- if (afbdev->y2 > y2)
- y2 = afbdev->y2;
- if (afbdev->x1 < x)
- x = afbdev->x1;
- if (afbdev->x2 > x2)
- x2 = afbdev->x2;
-
- if (store_for_later) {
- afbdev->x1 = x;
- afbdev->x2 = x2;
- afbdev->y1 = y;
- afbdev->y2 = y2;
- spin_unlock_irqrestore(&afbdev->dirty_lock, flags);
- return;
- }
-
- afbdev->x1 = afbdev->y1 = INT_MAX;
- afbdev->x2 = afbdev->y2 = 0;
- spin_unlock_irqrestore(&afbdev->dirty_lock, flags);
-
- dst = drm_gem_vram_kmap(gbo, false, NULL);
- if (IS_ERR(dst)) {
- DRM_ERROR("failed to kmap fb updates\n");
- goto out;
- } else if (!dst) {
- dst = drm_gem_vram_kmap(gbo, true, NULL);
- if (IS_ERR(dst)) {
- DRM_ERROR("failed to kmap fb updates\n");
- goto out;
- }
- unmap = true;
- }
-
- for (i = y; i <= y2; i++) {
- /* assume equal stride for now */
- src_offset = dst_offset =
- i * afbdev->afb.base.pitches[0] + (x * bpp);
- memcpy_toio(dst + dst_offset, afbdev->sysram + src_offset,
- (x2 - x + 1) * bpp);
- }
-
- if (unmap)
- drm_gem_vram_kunmap(gbo);
-
-out:
- drm_gem_vram_unpin(gbo);
-}
-
-static void ast_fillrect(struct fb_info *info,
- const struct fb_fillrect *rect)
-{
- struct ast_fbdev *afbdev = info->par;
- drm_fb_helper_sys_fillrect(info, rect);
- ast_dirty_update(afbdev, rect->dx, rect->dy, rect->width,
- rect->height);
-}
-
-static void ast_copyarea(struct fb_info *info,
- const struct fb_copyarea *area)
-{
- struct ast_fbdev *afbdev = info->par;
- drm_fb_helper_sys_copyarea(info, area);
- ast_dirty_update(afbdev, area->dx, area->dy, area->width,
- area->height);
-}
-
-static void ast_imageblit(struct fb_info *info,
- const struct fb_image *image)
-{
- struct ast_fbdev *afbdev = info->par;
- drm_fb_helper_sys_imageblit(info, image);
- ast_dirty_update(afbdev, image->dx, image->dy, image->width,
- image->height);
-}
-
-static struct fb_ops astfb_ops = {
- .owner = THIS_MODULE,
- .fb_check_var = drm_fb_helper_check_var,
- .fb_set_par = drm_fb_helper_set_par,
- .fb_fillrect = ast_fillrect,
- .fb_copyarea = ast_copyarea,
- .fb_imageblit = ast_imageblit,
- .fb_pan_display = drm_fb_helper_pan_display,
- .fb_blank = drm_fb_helper_blank,
- .fb_setcmap = drm_fb_helper_setcmap,
-};
-
-static int astfb_create_object(struct ast_fbdev *afbdev,
- const struct drm_mode_fb_cmd2 *mode_cmd,
- struct drm_gem_object **gobj_p)
-{
- struct drm_device *dev = afbdev->helper.dev;
- u32 size;
- struct drm_gem_object *gobj;
- int ret = 0;
-
- size = mode_cmd->pitches[0] * mode_cmd->height;
- ret = ast_gem_create(dev, size, true, &gobj);
- if (ret)
- return ret;
-
- *gobj_p = gobj;
- return ret;
-}
-
-static int astfb_create(struct drm_fb_helper *helper,
- struct drm_fb_helper_surface_size *sizes)
-{
- struct ast_fbdev *afbdev =
- container_of(helper, struct ast_fbdev, helper);
- struct drm_device *dev = afbdev->helper.dev;
- struct drm_mode_fb_cmd2 mode_cmd;
- struct drm_framebuffer *fb;
- struct fb_info *info;
- int size, ret;
- void *sysram;
- struct drm_gem_object *gobj = NULL;
- mode_cmd.width = sizes->surface_width;
- mode_cmd.height = sizes->surface_height;
- mode_cmd.pitches[0] = mode_cmd.width * ((sizes->surface_bpp + 7)/8);
-
- mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp,
- sizes->surface_depth);
-
- size = mode_cmd.pitches[0] * mode_cmd.height;
-
- ret = astfb_create_object(afbdev, &mode_cmd, &gobj);
- if (ret) {
- DRM_ERROR("failed to create fbcon backing object %d\n", ret);
- return ret;
- }
-
- sysram = vmalloc(size);
- if (!sysram)
- return -ENOMEM;
-
- info = drm_fb_helper_alloc_fbi(helper);
- if (IS_ERR(info)) {
- ret = PTR_ERR(info);
- goto out;
- }
- ret = ast_framebuffer_init(dev, &afbdev->afb, &mode_cmd, gobj);
- if (ret)
- goto out;
-
- afbdev->sysram = sysram;
- afbdev->size = size;
-
- fb = &afbdev->afb.base;
- afbdev->helper.fb = fb;
-
- info->fbops = &astfb_ops;
-
- info->apertures->ranges[0].base = pci_resource_start(dev->pdev, 0);
- info->apertures->ranges[0].size = pci_resource_len(dev->pdev, 0);
-
- drm_fb_helper_fill_info(info, &afbdev->helper, sizes);
-
- info->screen_base = sysram;
- info->screen_size = size;
-
- info->pixmap.flags = FB_PIXMAP_SYSTEM;
-
- DRM_DEBUG_KMS("allocated %dx%d\n",
- fb->width, fb->height);
-
- return 0;
-
-out:
- vfree(sysram);
- return ret;
-}
-
-static const struct drm_fb_helper_funcs ast_fb_helper_funcs = {
- .fb_probe = astfb_create,
-};
-
-static void ast_fbdev_destroy(struct drm_device *dev,
- struct ast_fbdev *afbdev)
-{
- struct ast_framebuffer *afb = &afbdev->afb;
-
- drm_helper_force_disable_all(dev);
- drm_fb_helper_unregister_fbi(&afbdev->helper);
-
- if (afb->obj) {
- drm_gem_object_put_unlocked(afb->obj);
- afb->obj = NULL;
- }
- drm_fb_helper_fini(&afbdev->helper);
-
- vfree(afbdev->sysram);
- drm_framebuffer_unregister_private(&afb->base);
- drm_framebuffer_cleanup(&afb->base);
-}
-
-int ast_fbdev_init(struct drm_device *dev)
-{
- struct ast_private *ast = dev->dev_private;
- struct ast_fbdev *afbdev;
- int ret;
-
- afbdev = kzalloc(sizeof(struct ast_fbdev), GFP_KERNEL);
- if (!afbdev)
- return -ENOMEM;
-
- ast->fbdev = afbdev;
- spin_lock_init(&afbdev->dirty_lock);
-
- drm_fb_helper_prepare(dev, &afbdev->helper, &ast_fb_helper_funcs);
-
- ret = drm_fb_helper_init(dev, &afbdev->helper, 1);
- if (ret)
- goto free;
-
- ret = drm_fb_helper_single_add_all_connectors(&afbdev->helper);
- if (ret)
- goto fini;
-
- /* disable all the possible outputs/crtcs before entering KMS mode */
- drm_helper_disable_unused_functions(dev);
-
- ret = drm_fb_helper_initial_config(&afbdev->helper, 32);
- if (ret)
- goto fini;
-
- return 0;
-
-fini:
- drm_fb_helper_fini(&afbdev->helper);
-free:
- kfree(afbdev);
- return ret;
-}
-
-void ast_fbdev_fini(struct drm_device *dev)
-{
- struct ast_private *ast = dev->dev_private;
-
- if (!ast->fbdev)
- return;
-
- ast_fbdev_destroy(dev, ast->fbdev);
- kfree(ast->fbdev);
- ast->fbdev = NULL;
-}
-
-void ast_fbdev_set_suspend(struct drm_device *dev, int state)
-{
- struct ast_private *ast = dev->dev_private;
-
- if (!ast->fbdev)
- return;
-
- drm_fb_helper_set_suspend(&ast->fbdev->helper, state);
-}
-
-void ast_fbdev_set_base(struct ast_private *ast, unsigned long gpu_addr)
-{
- ast->fbdev->helper.fbdev->fix.smem_start =
- ast->fbdev->helper.fbdev->apertures->ranges[0].base + gpu_addr;
- ast->fbdev->helper.fbdev->fix.smem_len = ast->vram_size - gpu_addr;
-}
diff --git a/drivers/gpu/drm/ast/ast_main.c b/drivers/gpu/drm/ast/ast_main.c
index 4c7e31cb45ff..dab77b2bc8ac 100644
--- a/drivers/gpu/drm/ast/ast_main.c
+++ b/drivers/gpu/drm/ast/ast_main.c
@@ -25,12 +25,17 @@
/*
* Authors: Dave Airlie <airlied@redhat.com>
*/
-#include <drm/drmP.h>
-#include "ast_drv.h"
+#include <linux/pci.h>
-#include <drm/drm_fb_helper.h>
#include <drm/drm_crtc_helper.h>
+#include <drm/drm_fb_helper.h>
+#include <drm/drm_gem.h>
+#include <drm/drm_gem_framebuffer_helper.h>
+#include <drm/drm_gem_vram_helper.h>
+#include <drm/drm_vram_mm_helper.h>
+
+#include "ast_drv.h"
void ast_set_index_reg_mask(struct ast_private *ast,
uint32_t base, uint8_t index,
@@ -383,67 +388,8 @@ static int ast_get_dram_info(struct drm_device *dev)
return 0;
}
-static void ast_user_framebuffer_destroy(struct drm_framebuffer *fb)
-{
- struct ast_framebuffer *ast_fb = to_ast_framebuffer(fb);
-
- drm_gem_object_put_unlocked(ast_fb->obj);
- drm_framebuffer_cleanup(fb);
- kfree(ast_fb);
-}
-
-static const struct drm_framebuffer_funcs ast_fb_funcs = {
- .destroy = ast_user_framebuffer_destroy,
-};
-
-
-int ast_framebuffer_init(struct drm_device *dev,
- struct ast_framebuffer *ast_fb,
- const struct drm_mode_fb_cmd2 *mode_cmd,
- struct drm_gem_object *obj)
-{
- int ret;
-
- drm_helper_mode_fill_fb_struct(dev, &ast_fb->base, mode_cmd);
- ast_fb->obj = obj;
- ret = drm_framebuffer_init(dev, &ast_fb->base, &ast_fb_funcs);
- if (ret) {
- DRM_ERROR("framebuffer init failed %d\n", ret);
- return ret;
- }
- return 0;
-}
-
-static struct drm_framebuffer *
-ast_user_framebuffer_create(struct drm_device *dev,
- struct drm_file *filp,
- const struct drm_mode_fb_cmd2 *mode_cmd)
-{
- struct drm_gem_object *obj;
- struct ast_framebuffer *ast_fb;
- int ret;
-
- obj = drm_gem_object_lookup(filp, mode_cmd->handles[0]);
- if (obj == NULL)
- return ERR_PTR(-ENOENT);
-
- ast_fb = kzalloc(sizeof(*ast_fb), GFP_KERNEL);
- if (!ast_fb) {
- drm_gem_object_put_unlocked(obj);
- return ERR_PTR(-ENOMEM);
- }
-
- ret = ast_framebuffer_init(dev, ast_fb, mode_cmd, obj);
- if (ret) {
- drm_gem_object_put_unlocked(obj);
- kfree(ast_fb);
- return ERR_PTR(ret);
- }
- return &ast_fb->base;
-}
-
static const struct drm_mode_config_funcs ast_mode_funcs = {
- .fb_create = ast_user_framebuffer_create,
+ .fb_create = drm_gem_fb_create
};
static u32 ast_get_vram_info(struct drm_device *dev)
@@ -561,7 +507,7 @@ int ast_driver_load(struct drm_device *dev, unsigned long flags)
if (ret)
goto out_free;
- ret = ast_fbdev_init(dev);
+ ret = drm_fbdev_generic_setup(dev, 32);
if (ret)
goto out_free;
@@ -579,7 +525,6 @@ void ast_driver_unload(struct drm_device *dev)
ast_release_firmware(dev);
kfree(ast->dp501_fw_addr);
ast_mode_fini(dev);
- ast_fbdev_fini(dev);
drm_mode_config_cleanup(dev);
ast_mm_fini(ast);
@@ -609,6 +554,6 @@ int ast_gem_create(struct drm_device *dev,
DRM_ERROR("failed to allocate GEM object\n");
return ret;
}
- *obj = &gbo->gem;
+ *obj = &gbo->bo.base;
return 0;
}
diff --git a/drivers/gpu/drm/ast/ast_mode.c b/drivers/gpu/drm/ast/ast_mode.c
index ffccbef962a4..1c899a6e87b7 100644
--- a/drivers/gpu/drm/ast/ast_mode.c
+++ b/drivers/gpu/drm/ast/ast_mode.c
@@ -27,14 +27,18 @@
/*
* Authors: Dave Airlie <airlied@redhat.com>
*/
+
#include <linux/export.h>
-#include <drm/drmP.h>
+#include <linux/pci.h>
+
#include <drm/drm_crtc.h>
#include <drm/drm_crtc_helper.h>
+#include <drm/drm_fourcc.h>
+#include <drm/drm_gem_vram_helper.h>
#include <drm/drm_plane_helper.h>
#include <drm/drm_probe_helper.h>
-#include "ast_drv.h"
+#include "ast_drv.h"
#include "ast_tables.h"
static struct ast_i2c_chan *ast_i2c_create(struct drm_device *dev);
@@ -525,28 +529,16 @@ static int ast_crtc_do_set_base(struct drm_crtc *crtc,
struct drm_framebuffer *fb,
int x, int y, int atomic)
{
- struct ast_private *ast = crtc->dev->dev_private;
- struct drm_gem_object *obj;
- struct ast_framebuffer *ast_fb;
struct drm_gem_vram_object *gbo;
int ret;
s64 gpu_addr;
- void *base;
if (!atomic && fb) {
- ast_fb = to_ast_framebuffer(fb);
- obj = ast_fb->obj;
- gbo = drm_gem_vram_of_gem(obj);
-
- /* unmap if console */
- if (&ast->fbdev->afb == ast_fb)
- drm_gem_vram_kunmap(gbo);
+ gbo = drm_gem_vram_of_gem(fb->obj[0]);
drm_gem_vram_unpin(gbo);
}
- ast_fb = to_ast_framebuffer(crtc->primary->fb);
- obj = ast_fb->obj;
- gbo = drm_gem_vram_of_gem(obj);
+ gbo = drm_gem_vram_of_gem(crtc->primary->fb->obj[0]);
ret = drm_gem_vram_pin(gbo, DRM_GEM_VRAM_PL_FLAG_VRAM);
if (ret)
@@ -557,17 +549,6 @@ static int ast_crtc_do_set_base(struct drm_crtc *crtc,
goto err_drm_gem_vram_unpin;
}
- if (&ast->fbdev->afb == ast_fb) {
- /* if pushing console in kmap it */
- base = drm_gem_vram_kmap(gbo, true, NULL);
- if (IS_ERR(base)) {
- ret = PTR_ERR(base);
- DRM_ERROR("failed to kmap fbcon\n");
- } else {
- ast_fbdev_set_base(ast, gpu_addr);
- }
- }
-
ast_set_offset_reg(crtc);
ast_set_start_address_crt1(crtc, (u32)gpu_addr);
@@ -624,14 +605,10 @@ static void ast_crtc_disable(struct drm_crtc *crtc)
DRM_DEBUG_KMS("\n");
ast_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
if (crtc->primary->fb) {
- struct ast_private *ast = crtc->dev->dev_private;
- struct ast_framebuffer *ast_fb = to_ast_framebuffer(crtc->primary->fb);
- struct drm_gem_object *obj = ast_fb->obj;
- struct drm_gem_vram_object *gbo = drm_gem_vram_of_gem(obj);
-
- /* unmap if console */
- if (&ast->fbdev->afb == ast_fb)
- drm_gem_vram_kunmap(gbo);
+ struct drm_framebuffer *fb = crtc->primary->fb;
+ struct drm_gem_vram_object *gbo =
+ drm_gem_vram_of_gem(fb->obj[0]);
+
drm_gem_vram_unpin(gbo);
}
crtc->primary->fb = NULL;
@@ -890,7 +867,14 @@ static int ast_connector_init(struct drm_device *dev)
return -ENOMEM;
connector = &ast_connector->base;
- drm_connector_init(dev, connector, &ast_connector_funcs, DRM_MODE_CONNECTOR_VGA);
+ ast_connector->i2c = ast_i2c_create(dev);
+ if (!ast_connector->i2c)
+ DRM_ERROR("failed to add ddc bus for connector\n");
+
+ drm_connector_init_with_ddc(dev, connector,
+ &ast_connector_funcs,
+ DRM_MODE_CONNECTOR_VGA,
+ &ast_connector->i2c->adapter);
drm_connector_helper_add(connector, &ast_connector_helper_funcs);
@@ -904,10 +888,6 @@ static int ast_connector_init(struct drm_device *dev)
encoder = list_first_entry(&dev->mode_config.encoder_list, struct drm_encoder, head);
drm_connector_attach_encoder(connector, encoder);
- ast_connector->i2c = ast_i2c_create(dev);
- if (!ast_connector->i2c)
- DRM_ERROR("failed to add ddc bus for connector\n");
-
return 0;
}
diff --git a/drivers/gpu/drm/ast/ast_post.c b/drivers/gpu/drm/ast/ast_post.c
index f7d421359d56..e1d9cdf6ec1d 100644
--- a/drivers/gpu/drm/ast/ast_post.c
+++ b/drivers/gpu/drm/ast/ast_post.c
@@ -26,10 +26,13 @@
* Authors: Dave Airlie <airlied@redhat.com>
*/
-#include <drm/drmP.h>
-#include "ast_drv.h"
+#include <linux/delay.h>
+#include <linux/pci.h>
+
+#include <drm/drm_print.h>
#include "ast_dram_tables.h"
+#include "ast_drv.h"
static void ast_post_chip_2300(struct drm_device *dev);
static void ast_post_chip_2500(struct drm_device *dev);
diff --git a/drivers/gpu/drm/ast/ast_ttm.c b/drivers/gpu/drm/ast/ast_ttm.c
index 779c53efee8e..c52d92294171 100644
--- a/drivers/gpu/drm/ast/ast_ttm.c
+++ b/drivers/gpu/drm/ast/ast_ttm.c
@@ -25,7 +25,12 @@
/*
* Authors: Dave Airlie <airlied@redhat.com>
*/
-#include <drm/drmP.h>
+
+#include <linux/pci.h>
+
+#include <drm/drm_print.h>
+#include <drm/drm_gem_vram_helper.h>
+#include <drm/drm_vram_mm_helper.h>
#include "ast_drv.h"
diff --git a/drivers/gpu/drm/ati_pcigart.c b/drivers/gpu/drm/ati_pcigart.c
index 2a413e291a60..580aa2676358 100644
--- a/drivers/gpu/drm/ati_pcigart.c
+++ b/drivers/gpu/drm/ati_pcigart.c
@@ -35,7 +35,6 @@
#include <drm/ati_pcigart.h>
#include <drm/drm_device.h>
-#include <drm/drm_os_linux.h>
#include <drm/drm_pci.h>
#include <drm/drm_print.h>
@@ -169,6 +168,7 @@ int drm_ati_pcigart_init(struct drm_device *dev, struct drm_ati_pcigart_info *ga
page_base = (u32) entry->busaddr[i];
for (j = 0; j < (PAGE_SIZE / ATI_PCIGART_PAGE_SIZE); j++) {
+ u32 offset;
u32 val;
switch(gart_info->gart_reg_if) {
@@ -184,10 +184,12 @@ int drm_ati_pcigart_init(struct drm_device *dev, struct drm_ati_pcigart_info *ga
break;
}
if (gart_info->gart_table_location ==
- DRM_ATI_GART_MAIN)
+ DRM_ATI_GART_MAIN) {
pci_gart[gart_idx] = cpu_to_le32(val);
- else
- DRM_WRITE32(map, gart_idx * sizeof(u32), val);
+ } else {
+ offset = gart_idx * sizeof(u32);
+ writel(val, (void __iomem *)map->handle + offset);
+ }
gart_idx++;
page_base += ATI_PCIGART_PAGE_SIZE;
}
diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c
index 6c6c7cf3c3e8..f2e73e6d46b8 100644
--- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c
+++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c
@@ -8,15 +8,19 @@
*/
#include <linux/clk.h>
+#include <linux/mfd/atmel-hlcdc.h>
+#include <linux/pinctrl/consumer.h>
#include <linux/pm.h>
#include <linux/pm_runtime.h>
-#include <linux/pinctrl/consumer.h>
+#include <video/videomode.h>
+
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc.h>
+#include <drm/drm_modeset_helper_vtables.h>
#include <drm/drm_probe_helper.h>
-#include <drm/drmP.h>
-
-#include <video/videomode.h>
+#include <drm/drm_vblank.h>
#include "atmel_hlcdc_dc.h"
diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c
index 9bab6e5ba76b..92640298ad41 100644
--- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c
+++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c
@@ -11,8 +11,20 @@
#include <linux/clk.h>
#include <linux/irq.h>
#include <linux/irqchip.h>
+#include <linux/mfd/atmel-hlcdc.h>
#include <linux/module.h>
#include <linux/pm_runtime.h>
+#include <linux/platform_device.h>
+
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_drv.h>
+#include <drm/drm_fb_helper.h>
+#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_gem_framebuffer_helper.h>
+#include <drm/drm_irq.h>
+#include <drm/drm_probe_helper.h>
+#include <drm/drm_vblank.h>
#include "atmel_hlcdc_dc.h"
@@ -823,9 +835,7 @@ static void atmel_hlcdc_dc_irq_uninstall(struct drm_device *dev)
DEFINE_DRM_GEM_CMA_FOPS(fops);
static struct drm_driver atmel_hlcdc_dc_driver = {
- .driver_features = DRIVER_GEM |
- DRIVER_MODESET | DRIVER_PRIME |
- DRIVER_ATOMIC,
+ .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC,
.irq_handler = atmel_hlcdc_dc_irq_handler,
.irq_preinstall = atmel_hlcdc_dc_irq_uninstall,
.irq_postinstall = atmel_hlcdc_dc_irq_postinstall,
@@ -834,8 +844,6 @@ static struct drm_driver atmel_hlcdc_dc_driver = {
.gem_vm_ops = &drm_gem_cma_vm_ops,
.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
- .gem_prime_import = drm_gem_prime_import,
- .gem_prime_export = drm_gem_prime_export,
.gem_prime_get_sg_table = drm_gem_cma_prime_get_sg_table,
.gem_prime_import_sg_table = drm_gem_cma_prime_import_sg_table,
.gem_prime_vmap = drm_gem_cma_prime_vmap,
diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.h b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.h
index 7300e3fd273e..469d4507e576 100644
--- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.h
+++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.h
@@ -11,23 +11,9 @@
#ifndef DRM_ATMEL_HLCDC_H
#define DRM_ATMEL_HLCDC_H
-#include <linux/clk.h>
-#include <linux/dmapool.h>
-#include <linux/irqdomain.h>
-#include <linux/mfd/atmel-hlcdc.h>
-#include <linux/pwm.h>
-
-#include <drm/drm_atomic.h>
-#include <drm/drm_atomic_helper.h>
-#include <drm/drm_crtc.h>
-#include <drm/drm_probe_helper.h>
-#include <drm/drm_fb_helper.h>
-#include <drm/drm_fb_cma_helper.h>
-#include <drm/drm_gem_cma_helper.h>
-#include <drm/drm_gem_framebuffer_helper.h>
-#include <drm/drm_panel.h>
-#include <drm/drm_plane_helper.h>
-#include <drm/drmP.h>
+#include <linux/regmap.h>
+
+#include <drm/drm_plane.h>
#define ATMEL_HLCDC_LAYER_CHER 0x0
#define ATMEL_HLCDC_LAYER_CHDR 0x4
diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_output.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_output.c
index 7e08318b262e..375fa84c548b 100644
--- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_output.c
+++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_output.c
@@ -8,9 +8,10 @@
* Author: Boris BREZILLON <boris.brezillon@free-electrons.com>
*/
+#include <linux/media-bus-format.h>
#include <linux/of_graph.h>
-#include <drm/drmP.h>
+#include <drm/drm_encoder.h>
#include <drm/drm_of.h>
#include <drm/drm_bridge.h>
diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c
index 4127aca212bb..89f5a756fa37 100644
--- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c
+++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c
@@ -6,6 +6,16 @@
* Author: Boris BREZILLON <boris.brezillon@free-electrons.com>
*/
+#include <linux/dmapool.h>
+#include <linux/mfd/atmel-hlcdc.h>
+
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_fourcc.h>
+#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_plane_helper.h>
+
#include "atmel_hlcdc_dc.h"
/**
@@ -361,7 +371,7 @@ atmel_hlcdc_plane_update_general_settings(struct atmel_hlcdc_plane *plane,
atmel_hlcdc_layer_write_cfg(&plane->layer, ATMEL_HLCDC_LAYER_DMA_CFG,
cfg);
- cfg = ATMEL_HLCDC_LAYER_DMA;
+ cfg = ATMEL_HLCDC_LAYER_DMA | ATMEL_HLCDC_LAYER_REP;
if (plane->base.type != DRM_PLANE_TYPE_PRIMARY) {
cfg |= ATMEL_HLCDC_LAYER_OVR | ATMEL_HLCDC_LAYER_ITER2BL |
diff --git a/drivers/gpu/drm/bochs/bochs.h b/drivers/gpu/drm/bochs/bochs.h
index 2a65434500ee..68483a2fc12c 100644
--- a/drivers/gpu/drm/bochs/bochs.h
+++ b/drivers/gpu/drm/bochs/bochs.h
@@ -1,17 +1,15 @@
/* SPDX-License-Identifier: GPL-2.0 */
+
#include <linux/io.h>
#include <linux/console.h>
-#include <drm/drmP.h>
#include <drm/drm_crtc.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_encoder.h>
#include <drm/drm_fb_helper.h>
-#include <drm/drm_simple_kms_helper.h>
-
#include <drm/drm_gem.h>
#include <drm/drm_gem_vram_helper.h>
-
+#include <drm/drm_simple_kms_helper.h>
#include <drm/drm_vram_mm_helper.h>
/* ---------------------------------------------------------------------- */
diff --git a/drivers/gpu/drm/bochs/bochs_drv.c b/drivers/gpu/drm/bochs/bochs_drv.c
index 8f3a5bda9d03..770e1625d05e 100644
--- a/drivers/gpu/drm/bochs/bochs_drv.c
+++ b/drivers/gpu/drm/bochs/bochs_drv.c
@@ -2,11 +2,10 @@
/*
*/
-#include <linux/mm.h>
#include <linux/module.h>
-#include <linux/slab.h>
-#include <drm/drm_fb_helper.h>
-#include <drm/drm_probe_helper.h>
+#include <linux/pci.h>
+
+#include <drm/drm_drv.h>
#include <drm/drm_atomic_helper.h>
#include "bochs.h"
@@ -65,8 +64,7 @@ static const struct file_operations bochs_fops = {
};
static struct drm_driver bochs_driver = {
- .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC |
- DRIVER_PRIME,
+ .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC,
.fops = &bochs_fops,
.name = "bochs-drm",
.desc = "bochs dispi vga interface (qemu stdvga)",
@@ -74,7 +72,6 @@ static struct drm_driver bochs_driver = {
.major = 1,
.minor = 0,
DRM_GEM_VRAM_DRIVER,
- DRM_GEM_VRAM_DRIVER_PRIME,
};
/* ---------------------------------------------------------------------- */
@@ -83,16 +80,14 @@ static struct drm_driver bochs_driver = {
#ifdef CONFIG_PM_SLEEP
static int bochs_pm_suspend(struct device *dev)
{
- struct pci_dev *pdev = to_pci_dev(dev);
- struct drm_device *drm_dev = pci_get_drvdata(pdev);
+ struct drm_device *drm_dev = dev_get_drvdata(dev);
return drm_mode_config_helper_suspend(drm_dev);
}
static int bochs_pm_resume(struct device *dev)
{
- struct pci_dev *pdev = to_pci_dev(dev);
- struct drm_device *drm_dev = pci_get_drvdata(pdev);
+ struct drm_device *drm_dev = dev_get_drvdata(dev);
return drm_mode_config_helper_resume(drm_dev);
}
diff --git a/drivers/gpu/drm/bochs/bochs_hw.c b/drivers/gpu/drm/bochs/bochs_hw.c
index ebfea8744fe6..e567bdfa2ab8 100644
--- a/drivers/gpu/drm/bochs/bochs_hw.c
+++ b/drivers/gpu/drm/bochs/bochs_hw.c
@@ -2,6 +2,10 @@
/*
*/
+#include <linux/pci.h>
+
+#include <drm/drm_fourcc.h>
+
#include "bochs.h"
/* ---------------------------------------------------------------------- */
diff --git a/drivers/gpu/drm/bochs/bochs_kms.c b/drivers/gpu/drm/bochs/bochs_kms.c
index 359030d5d818..02a9c1ed165b 100644
--- a/drivers/gpu/drm/bochs/bochs_kms.c
+++ b/drivers/gpu/drm/bochs/bochs_kms.c
@@ -2,12 +2,14 @@
/*
*/
-#include "bochs.h"
+#include <linux/moduleparam.h>
+
#include <drm/drm_atomic_helper.h>
-#include <drm/drm_plane_helper.h>
-#include <drm/drm_atomic_uapi.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_probe_helper.h>
+#include <drm/drm_vblank.h>
+
+#include "bochs.h"
static int defx = 1024;
static int defy = 768;
diff --git a/drivers/gpu/drm/bridge/Kconfig b/drivers/gpu/drm/bridge/Kconfig
index e4e22bbae2a7..1cc9f502c1f2 100644
--- a/drivers/gpu/drm/bridge/Kconfig
+++ b/drivers/gpu/drm/bridge/Kconfig
@@ -21,7 +21,7 @@ config DRM_ANALOGIX_ANX78XX
select DRM_KMS_HELPER
select REGMAP_I2C
---help---
- ANX78XX is an ultra-low Full-HD SlimPort transmitter
+ ANX78XX is an ultra-low power Full-HD SlimPort transmitter
designed for portable devices. The ANX78XX transforms
the HDMI output of an application processor to MyDP
or DisplayPort.
diff --git a/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c b/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
index f6d2681f6927..98bccace8c1c 100644
--- a/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
+++ b/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
@@ -874,9 +874,6 @@ static int adv7511_bridge_attach(struct drm_bridge *bridge)
&adv7511_connector_helper_funcs);
drm_connector_attach_encoder(&adv->connector, bridge->encoder);
- if (adv->type == ADV7533)
- ret = adv7533_attach_dsi(adv);
-
if (adv->i2c_main->irq)
regmap_write(adv->regmap, ADV7511_REG_INT_ENABLE(0),
ADV7511_INT0_HPD);
@@ -1222,8 +1219,17 @@ static int adv7511_probe(struct i2c_client *i2c, const struct i2c_device_id *id)
drm_bridge_add(&adv7511->bridge);
adv7511_audio_init(dev, adv7511);
+
+ if (adv7511->type == ADV7533) {
+ ret = adv7533_attach_dsi(adv7511);
+ if (ret)
+ goto err_remove_bridge;
+ }
+
return 0;
+err_remove_bridge:
+ drm_bridge_remove(&adv7511->bridge);
err_unregister_cec:
i2c_unregister_device(adv7511->i2c_cec);
if (adv7511->cec_clk)
diff --git a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c
index 3f7f4880be09..f2f7f69d6cc3 100644
--- a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c
+++ b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c
@@ -19,6 +19,7 @@
#include <linux/platform_device.h>
#include <drm/bridge/analogix_dp.h>
+#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc.h>
#include <drm/drm_device.h>
@@ -101,63 +102,7 @@ static int analogix_dp_detect_hpd(struct analogix_dp_device *dp)
return 0;
}
-int analogix_dp_psr_enabled(struct analogix_dp_device *dp)
-{
-
- return dp->psr_enable;
-}
-EXPORT_SYMBOL_GPL(analogix_dp_psr_enabled);
-
-int analogix_dp_enable_psr(struct analogix_dp_device *dp)
-{
- struct dp_sdp psr_vsc;
-
- if (!dp->psr_enable)
- return 0;
-
- /* Prepare VSC packet as per EDP 1.4 spec, Table 6.9 */
- memset(&psr_vsc, 0, sizeof(psr_vsc));
- psr_vsc.sdp_header.HB0 = 0;
- psr_vsc.sdp_header.HB1 = 0x7;
- psr_vsc.sdp_header.HB2 = 0x2;
- psr_vsc.sdp_header.HB3 = 0x8;
-
- psr_vsc.db[0] = 0;
- psr_vsc.db[1] = EDP_VSC_PSR_STATE_ACTIVE | EDP_VSC_PSR_CRC_VALUES_VALID;
-
- return analogix_dp_send_psr_spd(dp, &psr_vsc, true);
-}
-EXPORT_SYMBOL_GPL(analogix_dp_enable_psr);
-
-int analogix_dp_disable_psr(struct analogix_dp_device *dp)
-{
- struct dp_sdp psr_vsc;
- int ret;
-
- if (!dp->psr_enable)
- return 0;
-
- /* Prepare VSC packet as per EDP 1.4 spec, Table 6.9 */
- memset(&psr_vsc, 0, sizeof(psr_vsc));
- psr_vsc.sdp_header.HB0 = 0;
- psr_vsc.sdp_header.HB1 = 0x7;
- psr_vsc.sdp_header.HB2 = 0x2;
- psr_vsc.sdp_header.HB3 = 0x8;
-
- psr_vsc.db[0] = 0;
- psr_vsc.db[1] = 0;
-
- ret = drm_dp_dpcd_writeb(&dp->aux, DP_SET_POWER, DP_SET_POWER_D0);
- if (ret != 1) {
- dev_err(dp->dev, "Failed to set DP Power0 %d\n", ret);
- return ret;
- }
-
- return analogix_dp_send_psr_spd(dp, &psr_vsc, false);
-}
-EXPORT_SYMBOL_GPL(analogix_dp_disable_psr);
-
-static int analogix_dp_detect_sink_psr(struct analogix_dp_device *dp)
+static bool analogix_dp_detect_sink_psr(struct analogix_dp_device *dp)
{
unsigned char psr_version;
int ret;
@@ -165,14 +110,11 @@ static int analogix_dp_detect_sink_psr(struct analogix_dp_device *dp)
ret = drm_dp_dpcd_readb(&dp->aux, DP_PSR_SUPPORT, &psr_version);
if (ret != 1) {
dev_err(dp->dev, "failed to get PSR version, disable it\n");
- return ret;
+ return false;
}
dev_dbg(dp->dev, "Panel PSR version : %x\n", psr_version);
-
- dp->psr_enable = (psr_version & DP_PSR_IS_SUPPORTED) ? true : false;
-
- return 0;
+ return psr_version & DP_PSR_IS_SUPPORTED;
}
static int analogix_dp_enable_sink_psr(struct analogix_dp_device *dp)
@@ -195,7 +137,7 @@ static int analogix_dp_enable_sink_psr(struct analogix_dp_device *dp)
}
/* Main-Link transmitter remains active during PSR active states */
- psr_en = DP_PSR_MAIN_LINK_ACTIVE | DP_PSR_CRC_VERIFICATION;
+ psr_en = DP_PSR_CRC_VERIFICATION;
ret = drm_dp_dpcd_writeb(&dp->aux, DP_PSR_EN_CFG, psr_en);
if (ret != 1) {
dev_err(dp->dev, "failed to set panel psr\n");
@@ -203,8 +145,7 @@ static int analogix_dp_enable_sink_psr(struct analogix_dp_device *dp)
}
/* Enable psr function */
- psr_en = DP_PSR_ENABLE | DP_PSR_MAIN_LINK_ACTIVE |
- DP_PSR_CRC_VERIFICATION;
+ psr_en = DP_PSR_ENABLE | DP_PSR_CRC_VERIFICATION;
ret = drm_dp_dpcd_writeb(&dp->aux, DP_PSR_EN_CFG, psr_en);
if (ret != 1) {
dev_err(dp->dev, "failed to set panel psr\n");
@@ -213,10 +154,11 @@ static int analogix_dp_enable_sink_psr(struct analogix_dp_device *dp)
analogix_dp_enable_psr_crc(dp);
+ dp->psr_supported = true;
+
return 0;
end:
dev_err(dp->dev, "enable psr fail, force to disable psr\n");
- dp->psr_enable = false;
return ret;
}
@@ -1031,24 +973,90 @@ static int analogix_dp_commit(struct analogix_dp_device *dp)
}
}
- ret = analogix_dp_detect_sink_psr(dp);
+ /* Check whether panel supports fast training */
+ ret = analogix_dp_fast_link_train_detection(dp);
if (ret)
return ret;
- if (dp->psr_enable) {
+ if (analogix_dp_detect_sink_psr(dp)) {
ret = analogix_dp_enable_sink_psr(dp);
if (ret)
return ret;
}
- /* Check whether panel supports fast training */
- ret = analogix_dp_fast_link_train_detection(dp);
- if (ret)
- dp->psr_enable = false;
+ return ret;
+}
+
+static int analogix_dp_enable_psr(struct analogix_dp_device *dp)
+{
+ struct dp_sdp psr_vsc;
+ int ret;
+ u8 sink;
+
+ ret = drm_dp_dpcd_readb(&dp->aux, DP_PSR_STATUS, &sink);
+ if (ret != 1)
+ DRM_DEV_ERROR(dp->dev, "Failed to read psr status %d\n", ret);
+ else if (sink == DP_PSR_SINK_ACTIVE_RFB)
+ return 0;
+
+ /* Prepare VSC packet as per EDP 1.4 spec, Table 6.9 */
+ memset(&psr_vsc, 0, sizeof(psr_vsc));
+ psr_vsc.sdp_header.HB0 = 0;
+ psr_vsc.sdp_header.HB1 = 0x7;
+ psr_vsc.sdp_header.HB2 = 0x2;
+ psr_vsc.sdp_header.HB3 = 0x8;
+ psr_vsc.db[0] = 0;
+ psr_vsc.db[1] = EDP_VSC_PSR_STATE_ACTIVE | EDP_VSC_PSR_CRC_VALUES_VALID;
+
+ ret = analogix_dp_send_psr_spd(dp, &psr_vsc, true);
+ if (!ret)
+ analogix_dp_set_analog_power_down(dp, POWER_ALL, true);
return ret;
}
+static int analogix_dp_disable_psr(struct analogix_dp_device *dp)
+{
+ struct dp_sdp psr_vsc;
+ int ret;
+ u8 sink;
+
+ analogix_dp_set_analog_power_down(dp, POWER_ALL, false);
+
+ ret = drm_dp_dpcd_writeb(&dp->aux, DP_SET_POWER, DP_SET_POWER_D0);
+ if (ret != 1) {
+ DRM_DEV_ERROR(dp->dev, "Failed to set DP Power0 %d\n", ret);
+ return ret;
+ }
+
+ ret = drm_dp_dpcd_readb(&dp->aux, DP_PSR_STATUS, &sink);
+ if (ret != 1) {
+ DRM_DEV_ERROR(dp->dev, "Failed to read psr status %d\n", ret);
+ return ret;
+ } else if (sink == DP_PSR_SINK_INACTIVE) {
+ DRM_DEV_ERROR(dp->dev, "sink inactive, skip disable psr");
+ return 0;
+ }
+
+ ret = analogix_dp_train_link(dp);
+ if (ret) {
+ DRM_DEV_ERROR(dp->dev, "Failed to train the link %d\n", ret);
+ return ret;
+ }
+
+ /* Prepare VSC packet as per EDP 1.4 spec, Table 6.9 */
+ memset(&psr_vsc, 0, sizeof(psr_vsc));
+ psr_vsc.sdp_header.HB0 = 0;
+ psr_vsc.sdp_header.HB1 = 0x7;
+ psr_vsc.sdp_header.HB2 = 0x2;
+ psr_vsc.sdp_header.HB3 = 0x8;
+
+ psr_vsc.db[0] = 0;
+ psr_vsc.db[1] = 0;
+
+ return analogix_dp_send_psr_spd(dp, &psr_vsc, true);
+}
+
/*
* This function is a bit of a catch-all for panel preparation, hopefully
* simplifying the logic of functions that need to prepare/unprepare the panel
@@ -1139,9 +1147,37 @@ analogix_dp_best_encoder(struct drm_connector *connector)
return dp->encoder;
}
+
+static int analogix_dp_atomic_check(struct drm_connector *connector,
+ struct drm_atomic_state *state)
+{
+ struct analogix_dp_device *dp = to_dp(connector);
+ struct drm_connector_state *conn_state;
+ struct drm_crtc_state *crtc_state;
+
+ conn_state = drm_atomic_get_new_connector_state(state, connector);
+ if (WARN_ON(!conn_state))
+ return -ENODEV;
+
+ conn_state->self_refresh_aware = true;
+
+ if (!conn_state->crtc)
+ return 0;
+
+ crtc_state = drm_atomic_get_new_crtc_state(state, conn_state->crtc);
+ if (!crtc_state)
+ return 0;
+
+ if (crtc_state->self_refresh_active && !dp->psr_supported)
+ return -EINVAL;
+
+ return 0;
+}
+
static const struct drm_connector_helper_funcs analogix_dp_connector_helper_funcs = {
.get_modes = analogix_dp_get_modes,
.best_encoder = analogix_dp_best_encoder,
+ .atomic_check = analogix_dp_atomic_check,
};
static enum drm_connector_status
@@ -1233,11 +1269,42 @@ static int analogix_dp_bridge_attach(struct drm_bridge *bridge)
return 0;
}
-static void analogix_dp_bridge_pre_enable(struct drm_bridge *bridge)
+static
+struct drm_crtc *analogix_dp_get_new_crtc(struct analogix_dp_device *dp,
+ struct drm_atomic_state *state)
+{
+ struct drm_encoder *encoder = dp->encoder;
+ struct drm_connector *connector;
+ struct drm_connector_state *conn_state;
+
+ connector = drm_atomic_get_new_connector_for_encoder(state, encoder);
+ if (!connector)
+ return NULL;
+
+ conn_state = drm_atomic_get_new_connector_state(state, connector);
+ if (!conn_state)
+ return NULL;
+
+ return conn_state->crtc;
+}
+
+static void analogix_dp_bridge_atomic_pre_enable(struct drm_bridge *bridge,
+ struct drm_atomic_state *state)
{
struct analogix_dp_device *dp = bridge->driver_private;
+ struct drm_crtc *crtc;
+ struct drm_crtc_state *old_crtc_state;
int ret;
+ crtc = analogix_dp_get_new_crtc(dp, state);
+ if (!crtc)
+ return;
+
+ old_crtc_state = drm_atomic_get_old_crtc_state(state, crtc);
+ /* Don't touch the panel if we're coming back from PSR */
+ if (old_crtc_state && old_crtc_state->self_refresh_active)
+ return;
+
ret = analogix_dp_prepare_panel(dp, true, true);
if (ret)
DRM_ERROR("failed to setup the panel ret = %d\n", ret);
@@ -1298,10 +1365,27 @@ out_dp_clk_pre:
return ret;
}
-static void analogix_dp_bridge_enable(struct drm_bridge *bridge)
+static void analogix_dp_bridge_atomic_enable(struct drm_bridge *bridge,
+ struct drm_atomic_state *state)
{
struct analogix_dp_device *dp = bridge->driver_private;
+ struct drm_crtc *crtc;
+ struct drm_crtc_state *old_crtc_state;
int timeout_loop = 0;
+ int ret;
+
+ crtc = analogix_dp_get_new_crtc(dp, state);
+ if (!crtc)
+ return;
+
+ old_crtc_state = drm_atomic_get_old_crtc_state(state, crtc);
+ /* Not a full enable, just disable PSR and continue */
+ if (old_crtc_state && old_crtc_state->self_refresh_active) {
+ ret = analogix_dp_disable_psr(dp);
+ if (ret)
+ DRM_ERROR("Failed to disable psr %d\n", ret);
+ return;
+ }
if (dp->dpms_mode == DRM_MODE_DPMS_ON)
return;
@@ -1350,11 +1434,56 @@ static void analogix_dp_bridge_disable(struct drm_bridge *bridge)
if (ret)
DRM_ERROR("failed to setup the panel ret = %d\n", ret);
- dp->psr_enable = false;
dp->fast_train_enable = false;
+ dp->psr_supported = false;
dp->dpms_mode = DRM_MODE_DPMS_OFF;
}
+static void analogix_dp_bridge_atomic_disable(struct drm_bridge *bridge,
+ struct drm_atomic_state *state)
+{
+ struct analogix_dp_device *dp = bridge->driver_private;
+ struct drm_crtc *crtc;
+ struct drm_crtc_state *new_crtc_state = NULL;
+
+ crtc = analogix_dp_get_new_crtc(dp, state);
+ if (!crtc)
+ goto out;
+
+ new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
+ if (!new_crtc_state)
+ goto out;
+
+ /* Don't do a full disable on PSR transitions */
+ if (new_crtc_state->self_refresh_active)
+ return;
+
+out:
+ analogix_dp_bridge_disable(bridge);
+}
+
+static
+void analogix_dp_bridge_atomic_post_disable(struct drm_bridge *bridge,
+ struct drm_atomic_state *state)
+{
+ struct analogix_dp_device *dp = bridge->driver_private;
+ struct drm_crtc *crtc;
+ struct drm_crtc_state *new_crtc_state;
+ int ret;
+
+ crtc = analogix_dp_get_new_crtc(dp, state);
+ if (!crtc)
+ return;
+
+ new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
+ if (!new_crtc_state || !new_crtc_state->self_refresh_active)
+ return;
+
+ ret = analogix_dp_enable_psr(dp);
+ if (ret)
+ DRM_ERROR("Failed to enable psr (%d)\n", ret);
+}
+
static void analogix_dp_bridge_mode_set(struct drm_bridge *bridge,
const struct drm_display_mode *orig_mode,
const struct drm_display_mode *mode)
@@ -1432,16 +1561,11 @@ static void analogix_dp_bridge_mode_set(struct drm_bridge *bridge,
video->interlaced = true;
}
-static void analogix_dp_bridge_nop(struct drm_bridge *bridge)
-{
- /* do nothing */
-}
-
static const struct drm_bridge_funcs analogix_dp_bridge_funcs = {
- .pre_enable = analogix_dp_bridge_pre_enable,
- .enable = analogix_dp_bridge_enable,
- .disable = analogix_dp_bridge_disable,
- .post_disable = analogix_dp_bridge_nop,
+ .atomic_pre_enable = analogix_dp_bridge_atomic_pre_enable,
+ .atomic_enable = analogix_dp_bridge_atomic_enable,
+ .atomic_disable = analogix_dp_bridge_atomic_disable,
+ .atomic_post_disable = analogix_dp_bridge_atomic_post_disable,
.mode_set = analogix_dp_bridge_mode_set,
.attach = analogix_dp_bridge_attach,
};
diff --git a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.h b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.h
index da058252dcaf..c051502d7fbf 100644
--- a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.h
+++ b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.h
@@ -171,8 +171,8 @@ struct analogix_dp_device {
int dpms_mode;
struct gpio_desc *hpd_gpiod;
bool force_hpd;
- bool psr_enable;
bool fast_train_enable;
+ bool psr_supported;
struct mutex panel_lock;
bool panel_is_modeset;
diff --git a/drivers/gpu/drm/bridge/dumb-vga-dac.c b/drivers/gpu/drm/bridge/dumb-vga-dac.c
index d32885b906ae..8ef6539ae78a 100644
--- a/drivers/gpu/drm/bridge/dumb-vga-dac.c
+++ b/drivers/gpu/drm/bridge/dumb-vga-dac.c
@@ -111,8 +111,10 @@ static int dumb_vga_attach(struct drm_bridge *bridge)
drm_connector_helper_add(&vga->connector,
&dumb_vga_con_helper_funcs);
- ret = drm_connector_init(bridge->dev, &vga->connector,
- &dumb_vga_con_funcs, DRM_MODE_CONNECTOR_VGA);
+ ret = drm_connector_init_with_ddc(bridge->dev, &vga->connector,
+ &dumb_vga_con_funcs,
+ DRM_MODE_CONNECTOR_VGA,
+ vga->ddc);
if (ret) {
DRM_ERROR("Failed to initialize connector\n");
return ret;
diff --git a/drivers/gpu/drm/bridge/megachips-stdpxxxx-ge-b850v3-fw.c b/drivers/gpu/drm/bridge/megachips-stdpxxxx-ge-b850v3-fw.c
index 79311f8354bd..6e81e5db57f2 100644
--- a/drivers/gpu/drm/bridge/megachips-stdpxxxx-ge-b850v3-fw.c
+++ b/drivers/gpu/drm/bridge/megachips-stdpxxxx-ge-b850v3-fw.c
@@ -19,7 +19,6 @@
* Host -> LVDS|--(STDP4028)--|DP -> DP|--(STDP2690)--|DP++ -> Video output
*/
-#include <linux/gpio.h>
#include <linux/i2c.h>
#include <linux/module.h>
#include <linux/of.h>
diff --git a/drivers/gpu/drm/bridge/nxp-ptn3460.c b/drivers/gpu/drm/bridge/nxp-ptn3460.c
index 98bc650b8c95..d4a1cc5052c3 100644
--- a/drivers/gpu/drm/bridge/nxp-ptn3460.c
+++ b/drivers/gpu/drm/bridge/nxp-ptn3460.c
@@ -6,13 +6,10 @@
*/
#include <linux/delay.h>
-#include <linux/gpio.h>
#include <linux/gpio/consumer.h>
#include <linux/i2c.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_gpio.h>
-
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc.h>
#include <drm/drm_edid.h>
diff --git a/drivers/gpu/drm/bridge/parade-ps8622.c b/drivers/gpu/drm/bridge/parade-ps8622.c
index 2d88146e4836..93c68e2e9484 100644
--- a/drivers/gpu/drm/bridge/parade-ps8622.c
+++ b/drivers/gpu/drm/bridge/parade-ps8622.c
@@ -8,7 +8,6 @@
#include <linux/backlight.h>
#include <linux/delay.h>
#include <linux/err.h>
-#include <linux/gpio.h>
#include <linux/gpio/consumer.h>
#include <linux/i2c.h>
#include <linux/module.h>
diff --git a/drivers/gpu/drm/bridge/sii902x.c b/drivers/gpu/drm/bridge/sii902x.c
index dd7aa466b280..38f75ac580df 100644
--- a/drivers/gpu/drm/bridge/sii902x.c
+++ b/drivers/gpu/drm/bridge/sii902x.c
@@ -158,6 +158,8 @@
#define SII902X_I2C_BUS_ACQUISITION_TIMEOUT_MS 500
+#define SII902X_AUDIO_PORT_INDEX 3
+
struct sii902x {
struct i2c_client *i2c;
struct regmap *regmap;
@@ -568,13 +570,14 @@ static int sii902x_audio_hw_params(struct device *dev, void *data,
return ret;
}
- mclk_rate = clk_get_rate(sii902x->audio.mclk);
-
- ret = sii902x_select_mclk_div(&i2s_config_reg, params->sample_rate,
- mclk_rate);
- if (mclk_rate != ret * params->sample_rate)
- dev_dbg(dev, "Inaccurate reference clock (%ld/%d != %u)\n",
- mclk_rate, ret, params->sample_rate);
+ if (sii902x->audio.mclk) {
+ mclk_rate = clk_get_rate(sii902x->audio.mclk);
+ ret = sii902x_select_mclk_div(&i2s_config_reg,
+ params->sample_rate, mclk_rate);
+ if (mclk_rate != ret * params->sample_rate)
+ dev_dbg(dev, "Inaccurate reference clock (%ld/%d != %u)\n",
+ mclk_rate, ret, params->sample_rate);
+ }
mutex_lock(&sii902x->mutex);
@@ -662,7 +665,8 @@ static void sii902x_audio_shutdown(struct device *dev, void *data)
clk_disable_unprepare(sii902x->audio.mclk);
}
-int sii902x_audio_digital_mute(struct device *dev, void *data, bool enable)
+static int sii902x_audio_digital_mute(struct device *dev,
+ void *data, bool enable)
{
struct sii902x *sii902x = dev_get_drvdata(dev);
@@ -690,11 +694,32 @@ static int sii902x_audio_get_eld(struct device *dev, void *data,
return 0;
}
+static int sii902x_audio_get_dai_id(struct snd_soc_component *component,
+ struct device_node *endpoint)
+{
+ struct of_endpoint of_ep;
+ int ret;
+
+ ret = of_graph_parse_endpoint(endpoint, &of_ep);
+ if (ret < 0)
+ return ret;
+
+ /*
+ * HDMI sound should be located at reg = <3>
+ * Return expected DAI index 0.
+ */
+ if (of_ep.port == SII902X_AUDIO_PORT_INDEX)
+ return 0;
+
+ return -EINVAL;
+}
+
static const struct hdmi_codec_ops sii902x_audio_codec_ops = {
.hw_params = sii902x_audio_hw_params,
.audio_shutdown = sii902x_audio_shutdown,
.digital_mute = sii902x_audio_digital_mute,
.get_eld = sii902x_audio_get_eld,
+ .get_dai_id = sii902x_audio_get_dai_id,
};
static int sii902x_audio_codec_init(struct sii902x *sii902x,
@@ -750,10 +775,11 @@ static int sii902x_audio_codec_init(struct sii902x *sii902x,
sii902x->audio.i2s_fifo_sequence[i] |= audio_fifo_id[i] |
i2s_lane_id[lanes[i]] | SII902X_TPI_I2S_FIFO_ENABLE;
+ sii902x->audio.mclk = devm_clk_get_optional(dev, "mclk");
if (IS_ERR(sii902x->audio.mclk)) {
dev_err(dev, "%s: No clock (audio mclk) found: %ld\n",
__func__, PTR_ERR(sii902x->audio.mclk));
- return 0;
+ return PTR_ERR(sii902x->audio.mclk);
}
sii902x->audio.pdev = platform_device_register_data(
diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c
index c6490949d9db..83b94b66e464 100644
--- a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c
+++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c
@@ -508,8 +508,14 @@ static void hdmi_set_cts_n(struct dw_hdmi *hdmi, unsigned int cts,
/* nshift factor = 0 */
hdmi_modb(hdmi, 0, HDMI_AUD_CTS3_N_SHIFT_MASK, HDMI_AUD_CTS3);
- hdmi_writeb(hdmi, ((cts >> 16) & HDMI_AUD_CTS3_AUDCTS19_16_MASK) |
- HDMI_AUD_CTS3_CTS_MANUAL, HDMI_AUD_CTS3);
+ /* Use automatic CTS generation mode when CTS is not set */
+ if (cts)
+ hdmi_writeb(hdmi, ((cts >> 16) &
+ HDMI_AUD_CTS3_AUDCTS19_16_MASK) |
+ HDMI_AUD_CTS3_CTS_MANUAL,
+ HDMI_AUD_CTS3);
+ else
+ hdmi_writeb(hdmi, 0, HDMI_AUD_CTS3);
hdmi_writeb(hdmi, (cts >> 8) & 0xff, HDMI_AUD_CTS2);
hdmi_writeb(hdmi, cts & 0xff, HDMI_AUD_CTS1);
@@ -579,24 +585,33 @@ static void hdmi_set_clk_regenerator(struct dw_hdmi *hdmi,
{
unsigned long ftdms = pixel_clk;
unsigned int n, cts;
+ u8 config3;
u64 tmp;
n = hdmi_compute_n(sample_rate, pixel_clk);
- /*
- * Compute the CTS value from the N value. Note that CTS and N
- * can be up to 20 bits in total, so we need 64-bit math. Also
- * note that our TDMS clock is not fully accurate; it is accurate
- * to kHz. This can introduce an unnecessary remainder in the
- * calculation below, so we don't try to warn about that.
- */
- tmp = (u64)ftdms * n;
- do_div(tmp, 128 * sample_rate);
- cts = tmp;
+ config3 = hdmi_readb(hdmi, HDMI_CONFIG3_ID);
- dev_dbg(hdmi->dev, "%s: fs=%uHz ftdms=%lu.%03luMHz N=%d cts=%d\n",
- __func__, sample_rate, ftdms / 1000000, (ftdms / 1000) % 1000,
- n, cts);
+ /* Only compute CTS when using internal AHB audio */
+ if (config3 & HDMI_CONFIG3_AHBAUDDMA) {
+ /*
+ * Compute the CTS value from the N value. Note that CTS and N
+ * can be up to 20 bits in total, so we need 64-bit math. Also
+ * note that our TDMS clock is not fully accurate; it is
+ * accurate to kHz. This can introduce an unnecessary remainder
+ * in the calculation below, so we don't try to warn about that.
+ */
+ tmp = (u64)ftdms * n;
+ do_div(tmp, 128 * sample_rate);
+ cts = tmp;
+
+ dev_dbg(hdmi->dev, "%s: fs=%uHz ftdms=%lu.%03luMHz N=%d cts=%d\n",
+ __func__, sample_rate,
+ ftdms / 1000000, (ftdms / 1000) % 1000,
+ n, cts);
+ } else {
+ cts = 0;
+ }
spin_lock_irq(&hdmi->audio_lock);
hdmi->audio_n = n;
@@ -2185,8 +2200,10 @@ static int dw_hdmi_bridge_attach(struct drm_bridge *bridge)
drm_connector_helper_add(connector, &dw_hdmi_connector_helper_funcs);
- drm_connector_init(bridge->dev, connector, &dw_hdmi_connector_funcs,
- DRM_MODE_CONNECTOR_HDMIA);
+ drm_connector_init_with_ddc(bridge->dev, connector,
+ &dw_hdmi_connector_funcs,
+ DRM_MODE_CONNECTOR_HDMIA,
+ hdmi->ddc);
drm_connector_attach_encoder(connector, encoder);
diff --git a/drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c b/drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c
index 281c58bab1a1..675442bfc1bd 100644
--- a/drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c
+++ b/drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c
@@ -10,6 +10,7 @@
#include <linux/clk.h>
#include <linux/component.h>
+#include <linux/debugfs.h>
#include <linux/iopoll.h>
#include <linux/module.h>
#include <linux/of_device.h>
@@ -89,6 +90,8 @@
#define VID_MODE_TYPE_NON_BURST_SYNC_EVENTS 0x1
#define VID_MODE_TYPE_BURST 0x2
#define VID_MODE_TYPE_MASK 0x3
+#define VID_MODE_VPG_ENABLE BIT(16)
+#define VID_MODE_VPG_HORIZONTAL BIT(24)
#define DSI_VID_PKT_SIZE 0x3c
#define VID_PKT_SIZE(p) ((p) & 0x3fff)
@@ -233,6 +236,13 @@ struct dw_mipi_dsi {
u32 format;
unsigned long mode_flags;
+#ifdef CONFIG_DEBUG_FS
+ struct dentry *debugfs;
+
+ bool vpg;
+ bool vpg_horizontal;
+#endif /* CONFIG_DEBUG_FS */
+
struct dw_mipi_dsi *master; /* dual-dsi master ptr */
struct dw_mipi_dsi *slave; /* dual-dsi slave ptr */
@@ -518,6 +528,13 @@ static void dw_mipi_dsi_video_mode_config(struct dw_mipi_dsi *dsi)
else
val |= VID_MODE_TYPE_NON_BURST_SYNC_EVENTS;
+#ifdef CONFIG_DEBUG_FS
+ if (dsi->vpg) {
+ val |= VID_MODE_VPG_ENABLE;
+ val |= dsi->vpg_horizontal ? VID_MODE_VPG_HORIZONTAL : 0;
+ }
+#endif /* CONFIG_DEBUG_FS */
+
dsi_write(dsi, DSI_VID_MODE_CFG, val);
}
@@ -930,6 +947,33 @@ static const struct drm_bridge_funcs dw_mipi_dsi_bridge_funcs = {
.attach = dw_mipi_dsi_bridge_attach,
};
+#ifdef CONFIG_DEBUG_FS
+
+static void dw_mipi_dsi_debugfs_init(struct dw_mipi_dsi *dsi)
+{
+ dsi->debugfs = debugfs_create_dir(dev_name(dsi->dev), NULL);
+ if (IS_ERR(dsi->debugfs)) {
+ dev_err(dsi->dev, "failed to create debugfs root\n");
+ return;
+ }
+
+ debugfs_create_bool("vpg", 0660, dsi->debugfs, &dsi->vpg);
+ debugfs_create_bool("vpg_horizontal", 0660, dsi->debugfs,
+ &dsi->vpg_horizontal);
+}
+
+static void dw_mipi_dsi_debugfs_remove(struct dw_mipi_dsi *dsi)
+{
+ debugfs_remove_recursive(dsi->debugfs);
+}
+
+#else
+
+static void dw_mipi_dsi_debugfs_init(struct dw_mipi_dsi *dsi) { }
+static void dw_mipi_dsi_debugfs_remove(struct dw_mipi_dsi *dsi) { }
+
+#endif /* CONFIG_DEBUG_FS */
+
static struct dw_mipi_dsi *
__dw_mipi_dsi_probe(struct platform_device *pdev,
const struct dw_mipi_dsi_plat_data *plat_data)
@@ -1000,6 +1044,7 @@ __dw_mipi_dsi_probe(struct platform_device *pdev,
clk_disable_unprepare(dsi->pclk);
}
+ dw_mipi_dsi_debugfs_init(dsi);
pm_runtime_enable(dev);
dsi->dsi_host.ops = &dw_mipi_dsi_host_ops;
@@ -1007,6 +1052,7 @@ __dw_mipi_dsi_probe(struct platform_device *pdev,
ret = mipi_dsi_host_register(&dsi->dsi_host);
if (ret) {
dev_err(dev, "Failed to register MIPI host: %d\n", ret);
+ dw_mipi_dsi_debugfs_remove(dsi);
return ERR_PTR(ret);
}
@@ -1024,6 +1070,7 @@ static void __dw_mipi_dsi_remove(struct dw_mipi_dsi *dsi)
mipi_dsi_host_unregister(&dsi->dsi_host);
pm_runtime_disable(dsi->dev);
+ dw_mipi_dsi_debugfs_remove(dsi);
}
void dw_mipi_dsi_set_slave(struct dw_mipi_dsi *dsi, struct dw_mipi_dsi *slave)
diff --git a/drivers/gpu/drm/bridge/tc358767.c b/drivers/gpu/drm/bridge/tc358767.c
index 13ade28a36a8..42f03a985ac0 100644
--- a/drivers/gpu/drm/bridge/tc358767.c
+++ b/drivers/gpu/drm/bridge/tc358767.c
@@ -15,6 +15,7 @@
* Author: Rob Clark <robdclark@gmail.com>
*/
+#include <linux/bitfield.h>
#include <linux/clk.h>
#include <linux/device.h>
#include <linux/gpio/consumer.h>
@@ -47,6 +48,7 @@
/* Video Path */
#define VPCTRL0 0x0450
+#define VSDELAY GENMASK(31, 20)
#define OPXLFMT_RGB666 (0 << 8)
#define OPXLFMT_RGB888 (1 << 8)
#define FRMSYNC_DISABLED (0 << 4) /* Video Timing Gen Disabled */
@@ -54,9 +56,17 @@
#define MSF_DISABLED (0 << 0) /* Magic Square FRC disabled */
#define MSF_ENABLED (1 << 0) /* Magic Square FRC enabled */
#define HTIM01 0x0454
+#define HPW GENMASK(8, 0)
+#define HBPR GENMASK(24, 16)
#define HTIM02 0x0458
+#define HDISPR GENMASK(10, 0)
+#define HFPR GENMASK(24, 16)
#define VTIM01 0x045c
+#define VSPR GENMASK(7, 0)
+#define VBPR GENMASK(23, 16)
#define VTIM02 0x0460
+#define VFPR GENMASK(23, 16)
+#define VDISPR GENMASK(10, 0)
#define VFUEN0 0x0464
#define VFUEN BIT(0) /* Video Frame Timing Upload */
@@ -70,6 +80,13 @@
#define DP0_VIDSRC_DSI_RX (1 << 0)
#define DP0_VIDSRC_DPI_RX (2 << 0)
#define DP0_VIDSRC_COLOR_BAR (3 << 0)
+#define SYSRSTENB 0x050c
+#define ENBI2C (1 << 0)
+#define ENBLCD0 (1 << 2)
+#define ENBBM (1 << 3)
+#define ENBDSIRX (1 << 4)
+#define ENBREG (1 << 5)
+#define ENBHDCP (1 << 8)
#define GPIOM 0x0540
#define GPIOC 0x0544
#define GPIOO 0x0548
@@ -99,19 +116,35 @@
/* Main Channel */
#define DP0_SECSAMPLE 0x0640
#define DP0_VIDSYNCDELAY 0x0644
+#define VID_SYNC_DLY GENMASK(15, 0)
+#define THRESH_DLY GENMASK(31, 16)
+
#define DP0_TOTALVAL 0x0648
+#define H_TOTAL GENMASK(15, 0)
+#define V_TOTAL GENMASK(31, 16)
#define DP0_STARTVAL 0x064c
+#define H_START GENMASK(15, 0)
+#define V_START GENMASK(31, 16)
#define DP0_ACTIVEVAL 0x0650
+#define H_ACT GENMASK(15, 0)
+#define V_ACT GENMASK(31, 16)
+
#define DP0_SYNCVAL 0x0654
+#define VS_WIDTH GENMASK(30, 16)
+#define HS_WIDTH GENMASK(14, 0)
#define SYNCVAL_HS_POL_ACTIVE_LOW (1 << 15)
#define SYNCVAL_VS_POL_ACTIVE_LOW (1 << 31)
#define DP0_MISC 0x0658
#define TU_SIZE_RECOMMENDED (63) /* LSCLK cycles per TU */
+#define MAX_TU_SYMBOL GENMASK(28, 23)
+#define TU_SIZE GENMASK(21, 16)
#define BPC_6 (0 << 5)
#define BPC_8 (1 << 5)
/* AUX channel */
#define DP0_AUXCFG0 0x0660
+#define DP0_AUXCFG0_BSIZE GENMASK(11, 8)
+#define DP0_AUXCFG0_ADDR_ONLY BIT(4)
#define DP0_AUXCFG1 0x0664
#define AUX_RX_FILTER_EN BIT(16)
@@ -119,10 +152,10 @@
#define DP0_AUXWDATA(i) (0x066c + (i) * 4)
#define DP0_AUXRDATA(i) (0x067c + (i) * 4)
#define DP0_AUXSTATUS 0x068c
-#define AUX_STATUS_MASK 0xf0
-#define AUX_STATUS_SHIFT 4
-#define AUX_TIMEOUT BIT(1)
-#define AUX_BUSY BIT(0)
+#define AUX_BYTES GENMASK(15, 8)
+#define AUX_STATUS GENMASK(7, 4)
+#define AUX_TIMEOUT BIT(1)
+#define AUX_BUSY BIT(0)
#define DP0_AUXI2CADR 0x0698
/* Link Training */
@@ -183,6 +216,12 @@
/* Test & Debug */
#define TSTCTL 0x0a00
+#define COLOR_R GENMASK(31, 24)
+#define COLOR_G GENMASK(23, 16)
+#define COLOR_B GENMASK(15, 8)
+#define ENI2CFILTER BIT(4)
+#define COLOR_BAR_MODE GENMASK(1, 0)
+#define COLOR_BAR_MODE_BARS 2
#define PLL_DBG 0x0a04
static bool tc_test_pattern;
@@ -241,137 +280,131 @@ static inline struct tc_data *connector_to_tc(struct drm_connector *c)
return container_of(c, struct tc_data, connector);
}
-/* Simple macros to avoid repeated error checks */
-#define tc_write(reg, var) \
- do { \
- ret = regmap_write(tc->regmap, reg, var); \
- if (ret) \
- goto err; \
- } while (0)
-#define tc_read(reg, var) \
- do { \
- ret = regmap_read(tc->regmap, reg, var); \
- if (ret) \
- goto err; \
- } while (0)
-
-static inline int tc_poll_timeout(struct regmap *map, unsigned int addr,
+static inline int tc_poll_timeout(struct tc_data *tc, unsigned int addr,
unsigned int cond_mask,
unsigned int cond_value,
unsigned long sleep_us, u64 timeout_us)
{
- ktime_t timeout = ktime_add_us(ktime_get(), timeout_us);
unsigned int val;
- int ret;
- for (;;) {
- ret = regmap_read(map, addr, &val);
- if (ret)
- break;
- if ((val & cond_mask) == cond_value)
- break;
- if (timeout_us && ktime_compare(ktime_get(), timeout) > 0) {
- ret = regmap_read(map, addr, &val);
- break;
- }
- if (sleep_us)
- usleep_range((sleep_us >> 2) + 1, sleep_us);
- }
- return ret ?: (((val & cond_mask) == cond_value) ? 0 : -ETIMEDOUT);
+ return regmap_read_poll_timeout(tc->regmap, addr, val,
+ (val & cond_mask) == cond_value,
+ sleep_us, timeout_us);
}
-static int tc_aux_wait_busy(struct tc_data *tc, unsigned int timeout_ms)
+static int tc_aux_wait_busy(struct tc_data *tc)
{
- return tc_poll_timeout(tc->regmap, DP0_AUXSTATUS, AUX_BUSY, 0,
- 1000, 1000 * timeout_ms);
+ return tc_poll_timeout(tc, DP0_AUXSTATUS, AUX_BUSY, 0, 1000, 100000);
}
-static int tc_aux_get_status(struct tc_data *tc, u8 *reply)
+static int tc_aux_write_data(struct tc_data *tc, const void *data,
+ size_t size)
{
- int ret;
- u32 value;
+ u32 auxwdata[DP_AUX_MAX_PAYLOAD_BYTES / sizeof(u32)] = { 0 };
+ int ret, count = ALIGN(size, sizeof(u32));
- ret = regmap_read(tc->regmap, DP0_AUXSTATUS, &value);
- if (ret < 0)
+ memcpy(auxwdata, data, size);
+
+ ret = regmap_raw_write(tc->regmap, DP0_AUXWDATA(0), auxwdata, count);
+ if (ret)
return ret;
- if (value & AUX_BUSY) {
- dev_err(tc->dev, "aux busy!\n");
- return -EBUSY;
- }
+ return size;
+}
- if (value & AUX_TIMEOUT) {
- dev_err(tc->dev, "aux access timeout!\n");
- return -ETIMEDOUT;
- }
+static int tc_aux_read_data(struct tc_data *tc, void *data, size_t size)
+{
+ u32 auxrdata[DP_AUX_MAX_PAYLOAD_BYTES / sizeof(u32)];
+ int ret, count = ALIGN(size, sizeof(u32));
- *reply = (value & AUX_STATUS_MASK) >> AUX_STATUS_SHIFT;
- return 0;
+ ret = regmap_raw_read(tc->regmap, DP0_AUXRDATA(0), auxrdata, count);
+ if (ret)
+ return ret;
+
+ memcpy(data, auxrdata, size);
+
+ return size;
+}
+
+static u32 tc_auxcfg0(struct drm_dp_aux_msg *msg, size_t size)
+{
+ u32 auxcfg0 = msg->request;
+
+ if (size)
+ auxcfg0 |= FIELD_PREP(DP0_AUXCFG0_BSIZE, size - 1);
+ else
+ auxcfg0 |= DP0_AUXCFG0_ADDR_ONLY;
+
+ return auxcfg0;
}
static ssize_t tc_aux_transfer(struct drm_dp_aux *aux,
struct drm_dp_aux_msg *msg)
{
struct tc_data *tc = aux_to_tc(aux);
- size_t size = min_t(size_t, 8, msg->size);
+ size_t size = min_t(size_t, DP_AUX_MAX_PAYLOAD_BYTES - 1, msg->size);
u8 request = msg->request & ~DP_AUX_I2C_MOT;
- u8 *buf = msg->buffer;
- u32 tmp = 0;
- int i = 0;
+ u32 auxstatus;
int ret;
- if (size == 0)
- return 0;
-
- ret = tc_aux_wait_busy(tc, 100);
+ ret = tc_aux_wait_busy(tc);
if (ret)
- goto err;
+ return ret;
- if (request == DP_AUX_I2C_WRITE || request == DP_AUX_NATIVE_WRITE) {
- /* Store data */
- while (i < size) {
- if (request == DP_AUX_NATIVE_WRITE)
- tmp = tmp | (buf[i] << (8 * (i & 0x3)));
- else
- tmp = (tmp << 8) | buf[i];
- i++;
- if (((i % 4) == 0) || (i == size)) {
- tc_write(DP0_AUXWDATA((i - 1) >> 2), tmp);
- tmp = 0;
- }
+ switch (request) {
+ case DP_AUX_NATIVE_READ:
+ case DP_AUX_I2C_READ:
+ break;
+ case DP_AUX_NATIVE_WRITE:
+ case DP_AUX_I2C_WRITE:
+ if (size) {
+ ret = tc_aux_write_data(tc, msg->buffer, size);
+ if (ret < 0)
+ return ret;
}
- } else if (request != DP_AUX_I2C_READ &&
- request != DP_AUX_NATIVE_READ) {
+ break;
+ default:
return -EINVAL;
}
/* Store address */
- tc_write(DP0_AUXADDR, msg->address);
+ ret = regmap_write(tc->regmap, DP0_AUXADDR, msg->address);
+ if (ret)
+ return ret;
/* Start transfer */
- tc_write(DP0_AUXCFG0, ((size - 1) << 8) | request);
+ ret = regmap_write(tc->regmap, DP0_AUXCFG0, tc_auxcfg0(msg, size));
+ if (ret)
+ return ret;
- ret = tc_aux_wait_busy(tc, 100);
+ ret = tc_aux_wait_busy(tc);
if (ret)
- goto err;
+ return ret;
- ret = tc_aux_get_status(tc, &msg->reply);
+ ret = regmap_read(tc->regmap, DP0_AUXSTATUS, &auxstatus);
if (ret)
- goto err;
+ return ret;
- if (request == DP_AUX_I2C_READ || request == DP_AUX_NATIVE_READ) {
- /* Read data */
- while (i < size) {
- if ((i % 4) == 0)
- tc_read(DP0_AUXRDATA(i >> 2), &tmp);
- buf[i] = tmp & 0xff;
- tmp = tmp >> 8;
- i++;
- }
+ if (auxstatus & AUX_TIMEOUT)
+ return -ETIMEDOUT;
+ /*
+ * For some reason address-only DP_AUX_I2C_WRITE (MOT), still
+ * reports 1 byte transferred in its status. To deal we that
+ * we ignore aux_bytes field if we know that this was an
+ * address-only transfer
+ */
+ if (size)
+ size = FIELD_GET(AUX_BYTES, auxstatus);
+ msg->reply = FIELD_GET(AUX_STATUS, auxstatus);
+
+ switch (request) {
+ case DP_AUX_NATIVE_READ:
+ case DP_AUX_I2C_READ:
+ if (size)
+ return tc_aux_read_data(tc, msg->buffer, size);
+ break;
}
return size;
-err:
- return ret;
}
static const char * const training_pattern1_errors[] = {
@@ -411,10 +444,18 @@ static u32 tc_srcctrl(struct tc_data *tc)
return reg;
}
-static void tc_wait_pll_lock(struct tc_data *tc)
+static int tc_pllupdate(struct tc_data *tc, unsigned int pllctrl)
{
+ int ret;
+
+ ret = regmap_write(tc->regmap, pllctrl, PLLUPDATE | PLLEN);
+ if (ret)
+ return ret;
+
/* Wait for PLL to lock: up to 2.09 ms, depending on refclk */
usleep_range(3000, 6000);
+
+ return 0;
}
static int tc_pxl_pll_en(struct tc_data *tc, u32 refclk, u32 pixelclock)
@@ -428,6 +469,7 @@ static int tc_pxl_pll_en(struct tc_data *tc, u32 refclk, u32 pixelclock)
int ext_div[] = {1, 2, 3, 5, 7};
int best_pixelclock = 0;
int vco_hi = 0;
+ u32 pxl_pllparam;
dev_dbg(tc->dev, "PLL: requested %d pixelclock, ref %d\n", pixelclock,
refclk);
@@ -497,24 +539,23 @@ static int tc_pxl_pll_en(struct tc_data *tc, u32 refclk, u32 pixelclock)
best_mul = 0;
/* Power up PLL and switch to bypass */
- tc_write(PXL_PLLCTRL, PLLBYP | PLLEN);
-
- tc_write(PXL_PLLPARAM,
- (vco_hi << 24) | /* For PLL VCO >= 300 MHz = 1 */
- (ext_div[best_pre] << 20) | /* External Pre-divider */
- (ext_div[best_post] << 16) | /* External Post-divider */
- IN_SEL_REFCLK | /* Use RefClk as PLL input */
- (best_div << 8) | /* Divider for PLL RefClk */
- (best_mul << 0)); /* Multiplier for PLL */
+ ret = regmap_write(tc->regmap, PXL_PLLCTRL, PLLBYP | PLLEN);
+ if (ret)
+ return ret;
- /* Force PLL parameter update and disable bypass */
- tc_write(PXL_PLLCTRL, PLLUPDATE | PLLEN);
+ pxl_pllparam = vco_hi << 24; /* For PLL VCO >= 300 MHz = 1 */
+ pxl_pllparam |= ext_div[best_pre] << 20; /* External Pre-divider */
+ pxl_pllparam |= ext_div[best_post] << 16; /* External Post-divider */
+ pxl_pllparam |= IN_SEL_REFCLK; /* Use RefClk as PLL input */
+ pxl_pllparam |= best_div << 8; /* Divider for PLL RefClk */
+ pxl_pllparam |= best_mul; /* Multiplier for PLL */
- tc_wait_pll_lock(tc);
+ ret = regmap_write(tc->regmap, PXL_PLLPARAM, pxl_pllparam);
+ if (ret)
+ return ret;
- return 0;
-err:
- return ret;
+ /* Force PLL parameter update and disable bypass */
+ return tc_pllupdate(tc, PXL_PLLCTRL);
}
static int tc_pxl_pll_dis(struct tc_data *tc)
@@ -525,7 +566,6 @@ static int tc_pxl_pll_dis(struct tc_data *tc)
static int tc_stream_clock_calc(struct tc_data *tc)
{
- int ret;
/*
* If the Stream clock and Link Symbol clock are
* asynchronous with each other, the value of M changes over
@@ -541,56 +581,63 @@ static int tc_stream_clock_calc(struct tc_data *tc)
* M/N = f_STRMCLK / f_LSCLK
*
*/
- tc_write(DP0_VIDMNGEN1, 32768);
-
- return 0;
-err:
- return ret;
+ return regmap_write(tc->regmap, DP0_VIDMNGEN1, 32768);
}
-static int tc_aux_link_setup(struct tc_data *tc)
+static int tc_set_syspllparam(struct tc_data *tc)
{
unsigned long rate;
- u32 value;
- int ret;
+ u32 pllparam = SYSCLK_SEL_LSCLK | LSCLK_DIV_2;
rate = clk_get_rate(tc->refclk);
switch (rate) {
case 38400000:
- value = REF_FREQ_38M4;
+ pllparam |= REF_FREQ_38M4;
break;
case 26000000:
- value = REF_FREQ_26M;
+ pllparam |= REF_FREQ_26M;
break;
case 19200000:
- value = REF_FREQ_19M2;
+ pllparam |= REF_FREQ_19M2;
break;
case 13000000:
- value = REF_FREQ_13M;
+ pllparam |= REF_FREQ_13M;
break;
default:
dev_err(tc->dev, "Invalid refclk rate: %lu Hz\n", rate);
return -EINVAL;
}
- /* Setup DP-PHY / PLL */
- value |= SYSCLK_SEL_LSCLK | LSCLK_DIV_2;
- tc_write(SYS_PLLPARAM, value);
+ return regmap_write(tc->regmap, SYS_PLLPARAM, pllparam);
+}
+
+static int tc_aux_link_setup(struct tc_data *tc)
+{
+ int ret;
+ u32 dp0_auxcfg1;
- tc_write(DP_PHY_CTRL, BGREN | PWR_SW_EN | PHY_A0_EN);
+ /* Setup DP-PHY / PLL */
+ ret = tc_set_syspllparam(tc);
+ if (ret)
+ goto err;
+ ret = regmap_write(tc->regmap, DP_PHY_CTRL,
+ BGREN | PWR_SW_EN | PHY_A0_EN);
+ if (ret)
+ goto err;
/*
* Initially PLLs are in bypass. Force PLL parameter update,
* disable PLL bypass, enable PLL
*/
- tc_write(DP0_PLLCTRL, PLLUPDATE | PLLEN);
- tc_wait_pll_lock(tc);
+ ret = tc_pllupdate(tc, DP0_PLLCTRL);
+ if (ret)
+ goto err;
- tc_write(DP1_PLLCTRL, PLLUPDATE | PLLEN);
- tc_wait_pll_lock(tc);
+ ret = tc_pllupdate(tc, DP1_PLLCTRL);
+ if (ret)
+ goto err;
- ret = tc_poll_timeout(tc->regmap, DP_PHY_CTRL, PHY_RDY, PHY_RDY, 1,
- 1000);
+ ret = tc_poll_timeout(tc, DP_PHY_CTRL, PHY_RDY, PHY_RDY, 1, 1000);
if (ret == -ETIMEDOUT) {
dev_err(tc->dev, "Timeout waiting for PHY to become ready");
return ret;
@@ -599,9 +646,13 @@ static int tc_aux_link_setup(struct tc_data *tc)
}
/* Setup AUX link */
- tc_write(DP0_AUXCFG1, AUX_RX_FILTER_EN |
- (0x06 << 8) | /* Aux Bit Period Calculator Threshold */
- (0x3f << 0)); /* Aux Response Timeout Timer */
+ dp0_auxcfg1 = AUX_RX_FILTER_EN;
+ dp0_auxcfg1 |= 0x06 << 8; /* Aux Bit Period Calculator Threshold */
+ dp0_auxcfg1 |= 0x3f << 0; /* Aux Response Timeout Timer */
+
+ ret = regmap_write(tc->regmap, DP0_AUXCFG1, dp0_auxcfg1);
+ if (ret)
+ goto err;
return 0;
err:
@@ -612,8 +663,7 @@ err:
static int tc_get_display_props(struct tc_data *tc)
{
int ret;
- /* temp buffer */
- u8 tmp[8];
+ u8 reg;
/* Read DP Rx Link Capability */
ret = drm_dp_link_probe(&tc->aux, &tc->link.base);
@@ -629,21 +679,21 @@ static int tc_get_display_props(struct tc_data *tc)
tc->link.base.num_lanes = 2;
}
- ret = drm_dp_dpcd_readb(&tc->aux, DP_MAX_DOWNSPREAD, tmp);
+ ret = drm_dp_dpcd_readb(&tc->aux, DP_MAX_DOWNSPREAD, &reg);
if (ret < 0)
goto err_dpcd_read;
- tc->link.spread = tmp[0] & DP_MAX_DOWNSPREAD_0_5;
+ tc->link.spread = reg & DP_MAX_DOWNSPREAD_0_5;
- ret = drm_dp_dpcd_readb(&tc->aux, DP_MAIN_LINK_CHANNEL_CODING, tmp);
+ ret = drm_dp_dpcd_readb(&tc->aux, DP_MAIN_LINK_CHANNEL_CODING, &reg);
if (ret < 0)
goto err_dpcd_read;
tc->link.scrambler_dis = false;
/* read assr */
- ret = drm_dp_dpcd_readb(&tc->aux, DP_EDP_CONFIGURATION_SET, tmp);
+ ret = drm_dp_dpcd_readb(&tc->aux, DP_EDP_CONFIGURATION_SET, &reg);
if (ret < 0)
goto err_dpcd_read;
- tc->link.assr = tmp[0] & DP_ALTERNATE_SCRAMBLER_RESET_ENABLE;
+ tc->link.assr = reg & DP_ALTERNATE_SCRAMBLER_RESET_ENABLE;
dev_dbg(tc->dev, "DPCD rev: %d.%d, rate: %s, lanes: %d, framing: %s\n",
tc->link.base.revision >> 4, tc->link.base.revision & 0x0f,
@@ -677,6 +727,7 @@ static int tc_set_video_mode(struct tc_data *tc,
int upper_margin = mode->vtotal - mode->vsync_end;
int lower_margin = mode->vsync_start - mode->vdisplay;
int vsync_len = mode->vsync_end - mode->vsync_start;
+ u32 dp0_syncval;
/*
* Recommended maximum number of symbols transferred in a transfer unit:
@@ -701,156 +752,193 @@ static int tc_set_video_mode(struct tc_data *tc,
* assume we do not need any delay when DPI is a source of
* sync signals
*/
- tc_write(VPCTRL0, (0 << 20) /* VSDELAY */ |
- OPXLFMT_RGB888 | FRMSYNC_DISABLED | MSF_DISABLED);
- tc_write(HTIM01, (ALIGN(left_margin, 2) << 16) | /* H back porch */
- (ALIGN(hsync_len, 2) << 0)); /* Hsync */
- tc_write(HTIM02, (ALIGN(right_margin, 2) << 16) | /* H front porch */
- (ALIGN(mode->hdisplay, 2) << 0)); /* width */
- tc_write(VTIM01, (upper_margin << 16) | /* V back porch */
- (vsync_len << 0)); /* Vsync */
- tc_write(VTIM02, (lower_margin << 16) | /* V front porch */
- (mode->vdisplay << 0)); /* height */
- tc_write(VFUEN0, VFUEN); /* update settings */
+ ret = regmap_write(tc->regmap, VPCTRL0,
+ FIELD_PREP(VSDELAY, 0) |
+ OPXLFMT_RGB888 | FRMSYNC_DISABLED | MSF_DISABLED);
+ if (ret)
+ return ret;
+
+ ret = regmap_write(tc->regmap, HTIM01,
+ FIELD_PREP(HBPR, ALIGN(left_margin, 2)) |
+ FIELD_PREP(HPW, ALIGN(hsync_len, 2)));
+ if (ret)
+ return ret;
+
+ ret = regmap_write(tc->regmap, HTIM02,
+ FIELD_PREP(HDISPR, ALIGN(mode->hdisplay, 2)) |
+ FIELD_PREP(HFPR, ALIGN(right_margin, 2)));
+ if (ret)
+ return ret;
+
+ ret = regmap_write(tc->regmap, VTIM01,
+ FIELD_PREP(VBPR, upper_margin) |
+ FIELD_PREP(VSPR, vsync_len));
+ if (ret)
+ return ret;
+
+ ret = regmap_write(tc->regmap, VTIM02,
+ FIELD_PREP(VFPR, lower_margin) |
+ FIELD_PREP(VDISPR, mode->vdisplay));
+ if (ret)
+ return ret;
+
+ ret = regmap_write(tc->regmap, VFUEN0, VFUEN); /* update settings */
+ if (ret)
+ return ret;
/* Test pattern settings */
- tc_write(TSTCTL,
- (120 << 24) | /* Red Color component value */
- (20 << 16) | /* Green Color component value */
- (99 << 8) | /* Blue Color component value */
- (1 << 4) | /* Enable I2C Filter */
- (2 << 0) | /* Color bar Mode */
- 0);
+ ret = regmap_write(tc->regmap, TSTCTL,
+ FIELD_PREP(COLOR_R, 120) |
+ FIELD_PREP(COLOR_G, 20) |
+ FIELD_PREP(COLOR_B, 99) |
+ ENI2CFILTER |
+ FIELD_PREP(COLOR_BAR_MODE, COLOR_BAR_MODE_BARS));
+ if (ret)
+ return ret;
/* DP Main Stream Attributes */
vid_sync_dly = hsync_len + left_margin + mode->hdisplay;
- tc_write(DP0_VIDSYNCDELAY,
- (max_tu_symbol << 16) | /* thresh_dly */
- (vid_sync_dly << 0));
+ ret = regmap_write(tc->regmap, DP0_VIDSYNCDELAY,
+ FIELD_PREP(THRESH_DLY, max_tu_symbol) |
+ FIELD_PREP(VID_SYNC_DLY, vid_sync_dly));
+
+ ret = regmap_write(tc->regmap, DP0_TOTALVAL,
+ FIELD_PREP(H_TOTAL, mode->htotal) |
+ FIELD_PREP(V_TOTAL, mode->vtotal));
+ if (ret)
+ return ret;
+
+ ret = regmap_write(tc->regmap, DP0_STARTVAL,
+ FIELD_PREP(H_START, left_margin + hsync_len) |
+ FIELD_PREP(V_START, upper_margin + vsync_len));
+ if (ret)
+ return ret;
- tc_write(DP0_TOTALVAL, (mode->vtotal << 16) | (mode->htotal));
+ ret = regmap_write(tc->regmap, DP0_ACTIVEVAL,
+ FIELD_PREP(V_ACT, mode->vdisplay) |
+ FIELD_PREP(H_ACT, mode->hdisplay));
+ if (ret)
+ return ret;
- tc_write(DP0_STARTVAL,
- ((upper_margin + vsync_len) << 16) |
- ((left_margin + hsync_len) << 0));
+ dp0_syncval = FIELD_PREP(VS_WIDTH, vsync_len) |
+ FIELD_PREP(HS_WIDTH, hsync_len);
- tc_write(DP0_ACTIVEVAL, (mode->vdisplay << 16) | (mode->hdisplay));
+ if (mode->flags & DRM_MODE_FLAG_NVSYNC)
+ dp0_syncval |= SYNCVAL_VS_POL_ACTIVE_LOW;
- tc_write(DP0_SYNCVAL, (vsync_len << 16) | (hsync_len << 0) |
- ((mode->flags & DRM_MODE_FLAG_NHSYNC) ? SYNCVAL_HS_POL_ACTIVE_LOW : 0) |
- ((mode->flags & DRM_MODE_FLAG_NVSYNC) ? SYNCVAL_VS_POL_ACTIVE_LOW : 0));
+ if (mode->flags & DRM_MODE_FLAG_NHSYNC)
+ dp0_syncval |= SYNCVAL_HS_POL_ACTIVE_LOW;
- tc_write(DPIPXLFMT, VS_POL_ACTIVE_LOW | HS_POL_ACTIVE_LOW |
- DE_POL_ACTIVE_HIGH | SUB_CFG_TYPE_CONFIG1 | DPI_BPP_RGB888);
+ ret = regmap_write(tc->regmap, DP0_SYNCVAL, dp0_syncval);
+ if (ret)
+ return ret;
- tc_write(DP0_MISC, (max_tu_symbol << 23) | (TU_SIZE_RECOMMENDED << 16) |
+ ret = regmap_write(tc->regmap, DPIPXLFMT,
+ VS_POL_ACTIVE_LOW | HS_POL_ACTIVE_LOW |
+ DE_POL_ACTIVE_HIGH | SUB_CFG_TYPE_CONFIG1 |
+ DPI_BPP_RGB888);
+ if (ret)
+ return ret;
+
+ ret = regmap_write(tc->regmap, DP0_MISC,
+ FIELD_PREP(MAX_TU_SYMBOL, max_tu_symbol) |
+ FIELD_PREP(TU_SIZE, TU_SIZE_RECOMMENDED) |
BPC_8);
+ if (ret)
+ return ret;
return 0;
-err:
- return ret;
}
static int tc_wait_link_training(struct tc_data *tc)
{
- u32 timeout = 1000;
u32 value;
int ret;
- do {
- udelay(1);
- tc_read(DP0_LTSTAT, &value);
- } while ((!(value & LT_LOOPDONE)) && (--timeout));
-
- if (timeout == 0) {
+ ret = tc_poll_timeout(tc, DP0_LTSTAT, LT_LOOPDONE,
+ LT_LOOPDONE, 1, 1000);
+ if (ret) {
dev_err(tc->dev, "Link training timeout waiting for LT_LOOPDONE!\n");
- return -ETIMEDOUT;
+ return ret;
}
- return (value >> 8) & 0x7;
+ ret = regmap_read(tc->regmap, DP0_LTSTAT, &value);
+ if (ret)
+ return ret;
-err:
- return ret;
+ return (value >> 8) & 0x7;
}
static int tc_main_link_enable(struct tc_data *tc)
{
struct drm_dp_aux *aux = &tc->aux;
struct device *dev = tc->dev;
- unsigned int rate;
u32 dp_phy_ctrl;
- int timeout;
u32 value;
int ret;
- u8 tmp[8];
+ u8 tmp[DP_LINK_STATUS_SIZE];
dev_dbg(tc->dev, "link enable\n");
- tc_read(DP0CTL, &value);
- if (WARN_ON(value & DP_EN))
- tc_write(DP0CTL, 0);
+ ret = regmap_read(tc->regmap, DP0CTL, &value);
+ if (ret)
+ return ret;
+
+ if (WARN_ON(value & DP_EN)) {
+ ret = regmap_write(tc->regmap, DP0CTL, 0);
+ if (ret)
+ return ret;
+ }
- tc_write(DP0_SRCCTRL, tc_srcctrl(tc));
+ ret = regmap_write(tc->regmap, DP0_SRCCTRL, tc_srcctrl(tc));
+ if (ret)
+ return ret;
/* SSCG and BW27 on DP1 must be set to the same as on DP0 */
- tc_write(DP1_SRCCTRL,
+ ret = regmap_write(tc->regmap, DP1_SRCCTRL,
(tc->link.spread ? DP0_SRCCTRL_SSCG : 0) |
((tc->link.base.rate != 162000) ? DP0_SRCCTRL_BW27 : 0));
+ if (ret)
+ return ret;
- rate = clk_get_rate(tc->refclk);
- switch (rate) {
- case 38400000:
- value = REF_FREQ_38M4;
- break;
- case 26000000:
- value = REF_FREQ_26M;
- break;
- case 19200000:
- value = REF_FREQ_19M2;
- break;
- case 13000000:
- value = REF_FREQ_13M;
- break;
- default:
- return -EINVAL;
- }
- value |= SYSCLK_SEL_LSCLK | LSCLK_DIV_2;
- tc_write(SYS_PLLPARAM, value);
+ ret = tc_set_syspllparam(tc);
+ if (ret)
+ return ret;
/* Setup Main Link */
dp_phy_ctrl = BGREN | PWR_SW_EN | PHY_A0_EN | PHY_M0_EN;
if (tc->link.base.num_lanes == 2)
dp_phy_ctrl |= PHY_2LANE;
- tc_write(DP_PHY_CTRL, dp_phy_ctrl);
+
+ ret = regmap_write(tc->regmap, DP_PHY_CTRL, dp_phy_ctrl);
+ if (ret)
+ return ret;
/* PLL setup */
- tc_write(DP0_PLLCTRL, PLLUPDATE | PLLEN);
- tc_wait_pll_lock(tc);
+ ret = tc_pllupdate(tc, DP0_PLLCTRL);
+ if (ret)
+ return ret;
- tc_write(DP1_PLLCTRL, PLLUPDATE | PLLEN);
- tc_wait_pll_lock(tc);
+ ret = tc_pllupdate(tc, DP1_PLLCTRL);
+ if (ret)
+ return ret;
/* Reset/Enable Main Links */
dp_phy_ctrl |= DP_PHY_RST | PHY_M1_RST | PHY_M0_RST;
- tc_write(DP_PHY_CTRL, dp_phy_ctrl);
+ ret = regmap_write(tc->regmap, DP_PHY_CTRL, dp_phy_ctrl);
usleep_range(100, 200);
dp_phy_ctrl &= ~(DP_PHY_RST | PHY_M1_RST | PHY_M0_RST);
- tc_write(DP_PHY_CTRL, dp_phy_ctrl);
-
- timeout = 1000;
- do {
- tc_read(DP_PHY_CTRL, &value);
- udelay(1);
- } while ((!(value & PHY_RDY)) && (--timeout));
+ ret = regmap_write(tc->regmap, DP_PHY_CTRL, dp_phy_ctrl);
- if (timeout == 0) {
+ ret = tc_poll_timeout(tc, DP_PHY_CTRL, PHY_RDY, PHY_RDY, 1, 1000);
+ if (ret) {
dev_err(dev, "timeout waiting for phy become ready");
- return -ETIMEDOUT;
+ return ret;
}
/* Set misc: 8 bits per color */
ret = regmap_update_bits(tc->regmap, DP0_MISC, BPC_8, BPC_8);
if (ret)
- goto err;
+ return ret;
/*
* ASSR mode
@@ -903,53 +991,71 @@ static int tc_main_link_enable(struct tc_data *tc)
/* Clock-Recovery */
/* Set DPCD 0x102 for Training Pattern 1 */
- tc_write(DP0_SNKLTCTRL, DP_LINK_SCRAMBLING_DISABLE |
- DP_TRAINING_PATTERN_1);
+ ret = regmap_write(tc->regmap, DP0_SNKLTCTRL,
+ DP_LINK_SCRAMBLING_DISABLE |
+ DP_TRAINING_PATTERN_1);
+ if (ret)
+ return ret;
- tc_write(DP0_LTLOOPCTRL,
- (15 << 28) | /* Defer Iteration Count */
- (15 << 24) | /* Loop Iteration Count */
- (0xd << 0)); /* Loop Timer Delay */
+ ret = regmap_write(tc->regmap, DP0_LTLOOPCTRL,
+ (15 << 28) | /* Defer Iteration Count */
+ (15 << 24) | /* Loop Iteration Count */
+ (0xd << 0)); /* Loop Timer Delay */
+ if (ret)
+ return ret;
- tc_write(DP0_SRCCTRL, tc_srcctrl(tc) | DP0_SRCCTRL_SCRMBLDIS |
- DP0_SRCCTRL_AUTOCORRECT | DP0_SRCCTRL_TP1);
+ ret = regmap_write(tc->regmap, DP0_SRCCTRL,
+ tc_srcctrl(tc) | DP0_SRCCTRL_SCRMBLDIS |
+ DP0_SRCCTRL_AUTOCORRECT |
+ DP0_SRCCTRL_TP1);
+ if (ret)
+ return ret;
/* Enable DP0 to start Link Training */
- tc_write(DP0CTL,
- ((tc->link.base.capabilities & DP_LINK_CAP_ENHANCED_FRAMING) ? EF_EN : 0) |
- DP_EN);
+ ret = regmap_write(tc->regmap, DP0CTL,
+ ((tc->link.base.capabilities &
+ DP_LINK_CAP_ENHANCED_FRAMING) ? EF_EN : 0) |
+ DP_EN);
+ if (ret)
+ return ret;
/* wait */
+
ret = tc_wait_link_training(tc);
if (ret < 0)
- goto err;
+ return ret;
if (ret) {
dev_err(tc->dev, "Link training phase 1 failed: %s\n",
training_pattern1_errors[ret]);
- ret = -ENODEV;
- goto err;
+ return -ENODEV;
}
/* Channel Equalization */
/* Set DPCD 0x102 for Training Pattern 2 */
- tc_write(DP0_SNKLTCTRL, DP_LINK_SCRAMBLING_DISABLE |
- DP_TRAINING_PATTERN_2);
+ ret = regmap_write(tc->regmap, DP0_SNKLTCTRL,
+ DP_LINK_SCRAMBLING_DISABLE |
+ DP_TRAINING_PATTERN_2);
+ if (ret)
+ return ret;
- tc_write(DP0_SRCCTRL, tc_srcctrl(tc) | DP0_SRCCTRL_SCRMBLDIS |
- DP0_SRCCTRL_AUTOCORRECT | DP0_SRCCTRL_TP2);
+ ret = regmap_write(tc->regmap, DP0_SRCCTRL,
+ tc_srcctrl(tc) | DP0_SRCCTRL_SCRMBLDIS |
+ DP0_SRCCTRL_AUTOCORRECT |
+ DP0_SRCCTRL_TP2);
+ if (ret)
+ return ret;
/* wait */
ret = tc_wait_link_training(tc);
if (ret < 0)
- goto err;
+ return ret;
if (ret) {
dev_err(tc->dev, "Link training phase 2 failed: %s\n",
training_pattern2_errors[ret]);
- ret = -ENODEV;
- goto err;
+ return -ENODEV;
}
/*
@@ -962,7 +1068,10 @@ static int tc_main_link_enable(struct tc_data *tc)
*/
/* Clear Training Pattern, set AutoCorrect Mode = 1 */
- tc_write(DP0_SRCCTRL, tc_srcctrl(tc) | DP0_SRCCTRL_AUTOCORRECT);
+ ret = regmap_write(tc->regmap, DP0_SRCCTRL, tc_srcctrl(tc) |
+ DP0_SRCCTRL_AUTOCORRECT);
+ if (ret)
+ return ret;
/* Clear DPCD 0x102 */
/* Note: Can Not use DP0_SNKLTCTRL (0x06E4) short cut */
@@ -1006,7 +1115,7 @@ static int tc_main_link_enable(struct tc_data *tc)
dev_err(dev, "0x0205 SINK_STATUS: 0x%02x\n", tmp[3]);
dev_err(dev, "0x0206 ADJUST_REQUEST_LANE0_1: 0x%02x\n", tmp[4]);
dev_err(dev, "0x0207 ADJUST_REQUEST_LANE2_3: 0x%02x\n", tmp[5]);
- goto err;
+ return ret;
}
return 0;
@@ -1015,7 +1124,6 @@ err_dpcd_read:
return ret;
err_dpcd_write:
dev_err(tc->dev, "Failed to write DPCD: %d\n", ret);
-err:
return ret;
}
@@ -1025,12 +1133,11 @@ static int tc_main_link_disable(struct tc_data *tc)
dev_dbg(tc->dev, "link disable\n");
- tc_write(DP0_SRCCTRL, 0);
- tc_write(DP0CTL, 0);
+ ret = regmap_write(tc->regmap, DP0_SRCCTRL, 0);
+ if (ret)
+ return ret;
- return 0;
-err:
- return ret;
+ return regmap_write(tc->regmap, DP0CTL, 0);
}
static int tc_stream_enable(struct tc_data *tc)
@@ -1045,7 +1152,7 @@ static int tc_stream_enable(struct tc_data *tc)
ret = tc_pxl_pll_en(tc, clk_get_rate(tc->refclk),
1000 * tc->mode.clock);
if (ret)
- goto err;
+ return ret;
}
ret = tc_set_video_mode(tc, &tc->mode);
@@ -1060,7 +1167,9 @@ static int tc_stream_enable(struct tc_data *tc)
value = VID_MN_GEN | DP_EN;
if (tc->link.base.capabilities & DP_LINK_CAP_ENHANCED_FRAMING)
value |= EF_EN;
- tc_write(DP0CTL, value);
+ ret = regmap_write(tc->regmap, DP0CTL, value);
+ if (ret)
+ return ret;
/*
* VID_EN assertion should be delayed by at least N * LSCLK
* cycles from the time VID_MN_GEN is enabled in order to
@@ -1070,36 +1179,35 @@ static int tc_stream_enable(struct tc_data *tc)
*/
usleep_range(500, 1000);
value |= VID_EN;
- tc_write(DP0CTL, value);
+ ret = regmap_write(tc->regmap, DP0CTL, value);
+ if (ret)
+ return ret;
/* Set input interface */
value = DP0_AUDSRC_NO_INPUT;
if (tc_test_pattern)
value |= DP0_VIDSRC_COLOR_BAR;
else
value |= DP0_VIDSRC_DPI_RX;
- tc_write(SYSCTRL, value);
+ ret = regmap_write(tc->regmap, SYSCTRL, value);
+ if (ret)
+ return ret;
return 0;
-err:
- return ret;
}
static int tc_stream_disable(struct tc_data *tc)
{
int ret;
- u32 val;
dev_dbg(tc->dev, "disable video stream\n");
- tc_read(DP0CTL, &val);
- val &= ~VID_EN;
- tc_write(DP0CTL, val);
+ ret = regmap_update_bits(tc->regmap, DP0CTL, VID_EN, 0);
+ if (ret)
+ return ret;
tc_pxl_pll_dis(tc);
return 0;
-err:
- return ret;
}
static void tc_bridge_pre_enable(struct drm_bridge *bridge)
@@ -1251,7 +1359,9 @@ static enum drm_connector_status tc_connector_detect(struct drm_connector *conne
return connector_status_unknown;
}
- tc_read(GPIOI, &val);
+ ret = regmap_read(tc->regmap, GPIOI, &val);
+ if (ret)
+ return connector_status_unknown;
conn = val & BIT(tc->hpd_pin);
@@ -1259,9 +1369,6 @@ static enum drm_connector_status tc_connector_detect(struct drm_connector *conne
return connector_status_connected;
else
return connector_status_disconnected;
-
-err:
- return connector_status_unknown;
}
static const struct drm_connector_funcs tc_connector_funcs = {
@@ -1497,6 +1604,22 @@ static int tc_probe(struct i2c_client *client, const struct i2c_device_id *id)
tc->assr = (tc->rev == 0x6601); /* Enable ASSR for eDP panels */
+ if (!tc->reset_gpio) {
+ /*
+ * If the reset pin isn't present, do a software reset. It isn't
+ * as thorough as the hardware reset, as we can't reset the I2C
+ * communication block for obvious reasons, but it's getting the
+ * chip into a defined state.
+ */
+ regmap_update_bits(tc->regmap, SYSRSTENB,
+ ENBLCD0 | ENBBM | ENBDSIRX | ENBREG | ENBHDCP,
+ 0);
+ regmap_update_bits(tc->regmap, SYSRSTENB,
+ ENBLCD0 | ENBBM | ENBDSIRX | ENBREG | ENBHDCP,
+ ENBLCD0 | ENBBM | ENBDSIRX | ENBREG | ENBHDCP);
+ usleep_range(5000, 10000);
+ }
+
if (tc->hpd_pin >= 0) {
u32 lcnt_reg = tc->hpd_pin == 0 ? INT_GP0_LCNT : INT_GP1_LCNT;
u32 h_lc = INT_GPIO_H(tc->hpd_pin) | INT_GPIO_LC(tc->hpd_pin);
diff --git a/drivers/gpu/drm/bridge/ti-sn65dsi86.c b/drivers/gpu/drm/bridge/ti-sn65dsi86.c
index b77a52d05061..0a580957c8cf 100644
--- a/drivers/gpu/drm/bridge/ti-sn65dsi86.c
+++ b/drivers/gpu/drm/bridge/ti-sn65dsi86.c
@@ -1,9 +1,11 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ * datasheet: http://www.ti.com/lit/ds/symlink/sn65dsi86.pdf
*/
#include <linux/clk.h>
+#include <linux/debugfs.h>
#include <linux/gpio/consumer.h>
#include <linux/i2c.h>
#include <linux/iopoll.h>
@@ -90,6 +92,7 @@ struct ti_sn_bridge {
struct drm_dp_aux aux;
struct drm_bridge bridge;
struct drm_connector connector;
+ struct dentry *debugfs;
struct device_node *host_node;
struct mipi_dsi_device *dsi;
struct clk *refclk;
@@ -155,6 +158,42 @@ static const struct dev_pm_ops ti_sn_bridge_pm_ops = {
SET_RUNTIME_PM_OPS(ti_sn_bridge_suspend, ti_sn_bridge_resume, NULL)
};
+static int status_show(struct seq_file *s, void *data)
+{
+ struct ti_sn_bridge *pdata = s->private;
+ unsigned int reg, val;
+
+ seq_puts(s, "STATUS REGISTERS:\n");
+
+ pm_runtime_get_sync(pdata->dev);
+
+ /* IRQ Status Registers, see Table 31 in datasheet */
+ for (reg = 0xf0; reg <= 0xf8; reg++) {
+ regmap_read(pdata->regmap, reg, &val);
+ seq_printf(s, "[0x%02x] = 0x%08x\n", reg, val);
+ }
+
+ pm_runtime_put(pdata->dev);
+
+ return 0;
+}
+
+DEFINE_SHOW_ATTRIBUTE(status);
+
+static void ti_sn_debugfs_init(struct ti_sn_bridge *pdata)
+{
+ pdata->debugfs = debugfs_create_dir(dev_name(pdata->dev), NULL);
+
+ debugfs_create_file("status", 0600, pdata->debugfs, pdata,
+ &status_fops);
+}
+
+static void ti_sn_debugfs_remove(struct ti_sn_bridge *pdata)
+{
+ debugfs_remove_recursive(pdata->debugfs);
+ pdata->debugfs = NULL;
+}
+
/* Connector funcs */
static struct ti_sn_bridge *
connector_to_ti_sn_bridge(struct drm_connector *connector)
@@ -275,8 +314,7 @@ static int ti_sn_bridge_attach(struct drm_bridge *bridge)
/* TODO: setting to 4 lanes always for now */
dsi->lanes = 4;
dsi->format = MIPI_DSI_FMT_RGB888;
- dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_SYNC_PULSE |
- MIPI_DSI_MODE_EOT_PACKET | MIPI_DSI_MODE_VIDEO_HSE;
+ dsi->mode_flags = MIPI_DSI_MODE_VIDEO;
/* check if continuous dsi clock is required or not */
pm_runtime_get_sync(pdata->dev);
@@ -731,6 +769,8 @@ static int ti_sn_bridge_probe(struct i2c_client *client,
drm_bridge_add(&pdata->bridge);
+ ti_sn_debugfs_init(pdata);
+
return 0;
}
@@ -741,6 +781,8 @@ static int ti_sn_bridge_remove(struct i2c_client *client)
if (!pdata)
return -EINVAL;
+ ti_sn_debugfs_remove(pdata);
+
of_node_put(pdata->host_node);
pm_runtime_disable(pdata->dev);
diff --git a/drivers/gpu/drm/bridge/ti-tfp410.c b/drivers/gpu/drm/bridge/ti-tfp410.c
index dbf35c7bc85e..61cc2354ef1b 100644
--- a/drivers/gpu/drm/bridge/ti-tfp410.c
+++ b/drivers/gpu/drm/bridge/ti-tfp410.c
@@ -134,8 +134,10 @@ static int tfp410_attach(struct drm_bridge *bridge)
drm_connector_helper_add(&dvi->connector,
&tfp410_con_helper_funcs);
- ret = drm_connector_init(bridge->dev, &dvi->connector,
- &tfp410_con_funcs, dvi->connector_type);
+ ret = drm_connector_init_with_ddc(bridge->dev, &dvi->connector,
+ &tfp410_con_funcs,
+ dvi->connector_type,
+ dvi->ddc);
if (ret) {
dev_err(dvi->dev, "drm_connector_init() failed: %d\n", ret);
return ret;
diff --git a/drivers/gpu/drm/cirrus/cirrus.c b/drivers/gpu/drm/cirrus/cirrus.c
index be4ea370ba31..36a69aec8a4b 100644
--- a/drivers/gpu/drm/cirrus/cirrus.c
+++ b/drivers/gpu/drm/cirrus/cirrus.c
@@ -513,7 +513,7 @@ static void cirrus_mode_config_init(struct cirrus_device *cirrus)
DEFINE_DRM_GEM_SHMEM_FOPS(cirrus_fops);
static struct drm_driver cirrus_driver = {
- .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_ATOMIC | DRIVER_PRIME,
+ .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_ATOMIC,
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
diff --git a/drivers/gpu/drm/drm_agpsupport.c b/drivers/gpu/drm/drm_agpsupport.c
index 117b8ee98243..6e09f27fd9d6 100644
--- a/drivers/gpu/drm/drm_agpsupport.c
+++ b/drivers/gpu/drm/drm_agpsupport.c
@@ -1,4 +1,4 @@
-/**
+/*
* \file drm_agpsupport.c
* DRM support for AGP/GART backend
*
@@ -465,46 +465,3 @@ void drm_legacy_agp_clear(struct drm_device *dev)
dev->agp->acquired = 0;
dev->agp->enabled = 0;
}
-
-/**
- * Binds a collection of pages into AGP memory at the given offset, returning
- * the AGP memory structure containing them.
- *
- * No reference is held on the pages during this time -- it is up to the
- * caller to handle that.
- */
-struct agp_memory *
-drm_agp_bind_pages(struct drm_device *dev,
- struct page **pages,
- unsigned long num_pages,
- uint32_t gtt_offset,
- u32 type)
-{
- struct agp_memory *mem;
- int ret, i;
-
- DRM_DEBUG("\n");
-
- mem = agp_allocate_memory(dev->agp->bridge, num_pages,
- type);
- if (mem == NULL) {
- DRM_ERROR("Failed to allocate memory for %ld pages\n",
- num_pages);
- return NULL;
- }
-
- for (i = 0; i < num_pages; i++)
- mem->pages[i] = pages[i];
- mem->page_count = num_pages;
-
- mem->is_flushed = true;
- ret = agp_bind_memory(mem, gtt_offset / PAGE_SIZE);
- if (ret != 0) {
- DRM_ERROR("Failed to bind AGP memory: %d\n", ret);
- agp_free_memory(mem);
- return NULL;
- }
-
- return mem;
-}
-EXPORT_SYMBOL(drm_agp_bind_pages);
diff --git a/drivers/gpu/drm/drm_atomic_uapi.c b/drivers/gpu/drm/drm_atomic_uapi.c
index abe38bdf85ae..19ae119f1a5d 100644
--- a/drivers/gpu/drm/drm_atomic_uapi.c
+++ b/drivers/gpu/drm/drm_atomic_uapi.c
@@ -747,6 +747,8 @@ static int drm_atomic_connector_set_property(struct drm_connector *connector,
return -EINVAL;
}
state->content_protection = val;
+ } else if (property == config->hdcp_content_type_property) {
+ state->hdcp_content_type = val;
} else if (property == connector->colorspace_property) {
state->colorspace = val;
} else if (property == config->writeback_fb_id_property) {
@@ -831,6 +833,8 @@ drm_atomic_connector_get_property(struct drm_connector *connector,
state->hdr_output_metadata->base.id : 0;
} else if (property == config->content_protection_property) {
*val = state->content_protection;
+ } else if (property == config->hdcp_content_type_property) {
+ *val = state->hdcp_content_type;
} else if (property == config->writeback_fb_id_property) {
/* Writeback framebuffer is one-shot, write and forget */
*val = 0;
diff --git a/drivers/gpu/drm/drm_client.c b/drivers/gpu/drm/drm_client.c
index e1dafb0cc5e2..d9a2e3695525 100644
--- a/drivers/gpu/drm/drm_client.c
+++ b/drivers/gpu/drm/drm_client.c
@@ -59,7 +59,6 @@ static void drm_client_close(struct drm_client_dev *client)
drm_file_free(client->file);
}
-EXPORT_SYMBOL(drm_client_close);
/**
* drm_client_init - Initialise a DRM client
diff --git a/drivers/gpu/drm/drm_connector.c b/drivers/gpu/drm/drm_connector.c
index b3f2cf7eae9c..354798bad576 100644
--- a/drivers/gpu/drm/drm_connector.c
+++ b/drivers/gpu/drm/drm_connector.c
@@ -92,6 +92,7 @@ static struct drm_conn_prop_enum_list drm_connector_enum_list[] = {
{ DRM_MODE_CONNECTOR_DSI, "DSI" },
{ DRM_MODE_CONNECTOR_DPI, "DPI" },
{ DRM_MODE_CONNECTOR_WRITEBACK, "Writeback" },
+ { DRM_MODE_CONNECTOR_SPI, "SPI" },
};
void drm_connector_ida_init(void)
@@ -140,8 +141,7 @@ static void drm_connector_get_cmdline_mode(struct drm_connector *connector)
}
DRM_DEBUG_KMS("cmdline mode for connector %s %s %dx%d@%dHz%s%s%s\n",
- connector->name,
- mode->name,
+ connector->name, mode->name,
mode->xres, mode->yres,
mode->refresh_specified ? mode->refresh : 60,
mode->rb ? " reduced blanking" : "",
@@ -298,6 +298,41 @@ out_put:
EXPORT_SYMBOL(drm_connector_init);
/**
+ * drm_connector_init_with_ddc - Init a preallocated connector
+ * @dev: DRM device
+ * @connector: the connector to init
+ * @funcs: callbacks for this connector
+ * @connector_type: user visible type of the connector
+ * @ddc: pointer to the associated ddc adapter
+ *
+ * Initialises a preallocated connector. Connectors should be
+ * subclassed as part of driver connector objects.
+ *
+ * Ensures that the ddc field of the connector is correctly set.
+ *
+ * Returns:
+ * Zero on success, error code on failure.
+ */
+int drm_connector_init_with_ddc(struct drm_device *dev,
+ struct drm_connector *connector,
+ const struct drm_connector_funcs *funcs,
+ int connector_type,
+ struct i2c_adapter *ddc)
+{
+ int ret;
+
+ ret = drm_connector_init(dev, connector, funcs, connector_type);
+ if (ret)
+ return ret;
+
+ /* provide ddc symlink in sysfs */
+ connector->ddc = ddc;
+
+ return ret;
+}
+EXPORT_SYMBOL(drm_connector_init_with_ddc);
+
+/**
* drm_connector_attach_edid_property - attach edid property.
* @connector: the connector
*
@@ -948,10 +983,70 @@ static const struct drm_prop_enum_list hdmi_colorspaces[] = {
* - If the state is DESIRED, kernel should attempt to re-authenticate the
* link whenever possible. This includes across disable/enable, dpms,
* hotplug, downstream device changes, link status failures, etc..
- * - Userspace is responsible for polling the property to determine when
- * the value transitions from ENABLED to DESIRED. This signifies the link
- * is no longer protected and userspace should take appropriate action
- * (whatever that might be).
+ * - Kernel sends uevent with the connector id and property id through
+ * @drm_hdcp_update_content_protection, upon below kernel triggered
+ * scenarios:
+ * DESIRED -> ENABLED (authentication success)
+ * ENABLED -> DESIRED (termination of authentication)
+ * - Please note no uevents for userspace triggered property state changes,
+ * which can't fail such as
+ * DESIRED/ENABLED -> UNDESIRED
+ * UNDESIRED -> DESIRED
+ * - Userspace is responsible for polling the property or listen to uevents
+ * to determine when the value transitions from ENABLED to DESIRED.
+ * This signifies the link is no longer protected and userspace should
+ * take appropriate action (whatever that might be).
+ *
+ * HDCP Content Type:
+ * This Enum property is used by the userspace to declare the content type
+ * of the display stream, to kernel. Here display stream stands for any
+ * display content that userspace intended to display through HDCP
+ * encryption.
+ *
+ * Content Type of a stream is decided by the owner of the stream, as
+ * "HDCP Type0" or "HDCP Type1".
+ *
+ * The value of the property can be one of the below:
+ * - "HDCP Type0": DRM_MODE_HDCP_CONTENT_TYPE0 = 0
+ * - "HDCP Type1": DRM_MODE_HDCP_CONTENT_TYPE1 = 1
+ *
+ * When kernel starts the HDCP authentication (see "Content Protection"
+ * for details), it uses the content type in "HDCP Content Type"
+ * for performing the HDCP authentication with the display sink.
+ *
+ * Please note in HDCP spec versions, a link can be authenticated with
+ * HDCP 2.2 for Content Type 0/Content Type 1. Where as a link can be
+ * authenticated with HDCP1.4 only for Content Type 0(though it is implicit
+ * in nature. As there is no reference for Content Type in HDCP1.4).
+ *
+ * HDCP2.2 authentication protocol itself takes the "Content Type" as a
+ * parameter, which is a input for the DP HDCP2.2 encryption algo.
+ *
+ * In case of Type 0 content protection request, kernel driver can choose
+ * either of HDCP spec versions 1.4 and 2.2. When HDCP2.2 is used for
+ * "HDCP Type 0", a HDCP 2.2 capable repeater in the downstream can send
+ * that content to a HDCP 1.4 authenticated HDCP sink (Type0 link).
+ * But if the content is classified as "HDCP Type 1", above mentioned
+ * HDCP 2.2 repeater wont send the content to the HDCP sink as it can't
+ * authenticate the HDCP1.4 capable sink for "HDCP Type 1".
+ *
+ * Please note userspace can be ignorant of the HDCP versions used by the
+ * kernel driver to achieve the "HDCP Content Type".
+ *
+ * At current scenario, classifying a content as Type 1 ensures that the
+ * content will be displayed only through the HDCP2.2 encrypted link.
+ *
+ * Note that the HDCP Content Type property is introduced at HDCP 2.2, and
+ * defaults to type 0. It is only exposed by drivers supporting HDCP 2.2
+ * (hence supporting Type 0 and Type 1). Based on how next versions of
+ * HDCP specs are defined content Type could be used for higher versions
+ * too.
+ *
+ * If content type is changed when "Content Protection" is not UNDESIRED,
+ * then kernel will disable the HDCP and re-enable with new type in the
+ * same atomic commit. And when "Content Protection" is ENABLED, it means
+ * that link is HDCP authenticated and encrypted, for the transmission of
+ * the Type of stream mentioned at "HDCP Content Type".
*
* HDR_OUTPUT_METADATA:
* Connector property to enable userspace to send HDR Metadata to
diff --git a/drivers/gpu/drm/drm_debugfs_crc.c b/drivers/gpu/drm/drm_debugfs_crc.c
index 7ca486d750e9..be1b7ba92ffe 100644
--- a/drivers/gpu/drm/drm_debugfs_crc.c
+++ b/drivers/gpu/drm/drm_debugfs_crc.c
@@ -66,9 +66,18 @@
* the reported CRCs of frames that should have the same contents.
*
* On the driver side the implementation effort is minimal, drivers only need to
- * implement &drm_crtc_funcs.set_crc_source. The debugfs files are automatically
- * set up if that vfunc is set. CRC samples need to be captured in the driver by
- * calling drm_crtc_add_crc_entry().
+ * implement &drm_crtc_funcs.set_crc_source and &drm_crtc_funcs.verify_crc_source.
+ * The debugfs files are automatically set up if those vfuncs are set. CRC samples
+ * need to be captured in the driver by calling drm_crtc_add_crc_entry().
+ * Depending on the driver and HW requirements, &drm_crtc_funcs.set_crc_source
+ * may result in a commit (even a full modeset).
+ *
+ * CRC results must be reliable across non-full-modeset atomic commits, so if a
+ * commit via DRM_IOCTL_MODE_ATOMIC would disable or otherwise interfere with
+ * CRC generation, then the driver must mark that commit as a full modeset
+ * (drm_atomic_crtc_needs_modeset() should return true). As a result, to ensure
+ * consistent results, generic userspace must re-setup CRC generation after a
+ * legacy SETCRTC or an atomic commit with DRM_MODE_ATOMIC_ALLOW_MODESET.
*/
static int crc_control_show(struct seq_file *m, void *data)
diff --git a/drivers/gpu/drm/drm_dma.c b/drivers/gpu/drm/drm_dma.c
index 5ef0227eaa0e..e45b07890c5a 100644
--- a/drivers/gpu/drm/drm_dma.c
+++ b/drivers/gpu/drm/drm_dma.c
@@ -1,4 +1,4 @@
-/**
+/*
* \file drm_dma.c
* DMA IOCTL and function support
*
diff --git a/drivers/gpu/drm/drm_dp_aux_dev.c b/drivers/gpu/drm/drm_dp_aux_dev.c
index 5be28e3295f3..0cfb386754c3 100644
--- a/drivers/gpu/drm/drm_dp_aux_dev.c
+++ b/drivers/gpu/drm/drm_dp_aux_dev.c
@@ -37,6 +37,7 @@
#include <drm/drm_crtc.h>
#include <drm/drm_dp_helper.h>
+#include <drm/drm_dp_mst_helper.h>
#include <drm/drm_print.h>
#include "drm_crtc_helper_internal.h"
@@ -82,8 +83,7 @@ static struct drm_dp_aux_dev *alloc_drm_dp_aux_dev(struct drm_dp_aux *aux)
kref_init(&aux_dev->refcount);
mutex_lock(&aux_idr_mutex);
- index = idr_alloc_cyclic(&aux_idr, aux_dev, 0, DRM_AUX_MINORS,
- GFP_KERNEL);
+ index = idr_alloc(&aux_idr, aux_dev, 0, DRM_AUX_MINORS, GFP_KERNEL);
mutex_unlock(&aux_idr_mutex);
if (index < 0) {
kfree(aux_dev);
@@ -163,7 +163,12 @@ static ssize_t auxdev_read_iter(struct kiocb *iocb, struct iov_iter *to)
break;
}
- res = drm_dp_dpcd_read(aux_dev->aux, pos, buf, todo);
+ if (aux_dev->aux->is_remote)
+ res = drm_dp_mst_dpcd_read(aux_dev->aux, pos, buf,
+ todo);
+ else
+ res = drm_dp_dpcd_read(aux_dev->aux, pos, buf, todo);
+
if (res <= 0)
break;
@@ -210,7 +215,12 @@ static ssize_t auxdev_write_iter(struct kiocb *iocb, struct iov_iter *from)
break;
}
- res = drm_dp_dpcd_write(aux_dev->aux, pos, buf, todo);
+ if (aux_dev->aux->is_remote)
+ res = drm_dp_mst_dpcd_write(aux_dev->aux, pos, buf,
+ todo);
+ else
+ res = drm_dp_dpcd_write(aux_dev->aux, pos, buf, todo);
+
if (res <= 0)
break;
diff --git a/drivers/gpu/drm/drm_dp_helper.c b/drivers/gpu/drm/drm_dp_helper.c
index 0b994d083a89..ffc68d305afe 100644
--- a/drivers/gpu/drm/drm_dp_helper.c
+++ b/drivers/gpu/drm/drm_dp_helper.c
@@ -152,38 +152,15 @@ EXPORT_SYMBOL(drm_dp_link_train_channel_eq_delay);
u8 drm_dp_link_rate_to_bw_code(int link_rate)
{
- switch (link_rate) {
- default:
- WARN(1, "unknown DP link rate %d, using %x\n", link_rate,
- DP_LINK_BW_1_62);
- /* fall through */
- case 162000:
- return DP_LINK_BW_1_62;
- case 270000:
- return DP_LINK_BW_2_7;
- case 540000:
- return DP_LINK_BW_5_4;
- case 810000:
- return DP_LINK_BW_8_1;
- }
+ /* Spec says link_bw = link_rate / 0.27Gbps */
+ return link_rate / 27000;
}
EXPORT_SYMBOL(drm_dp_link_rate_to_bw_code);
int drm_dp_bw_code_to_link_rate(u8 link_bw)
{
- switch (link_bw) {
- default:
- WARN(1, "unknown DP link BW code %x, using 162000\n", link_bw);
- /* fall through */
- case DP_LINK_BW_1_62:
- return 162000;
- case DP_LINK_BW_2_7:
- return 270000;
- case DP_LINK_BW_5_4:
- return 540000;
- case DP_LINK_BW_8_1:
- return 810000;
- }
+ /* Spec says link_rate = link_bw * 0.27Gbps */
+ return link_bw * 27000;
}
EXPORT_SYMBOL(drm_dp_bw_code_to_link_rate);
diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c
index 0984b9a34d55..82add736e17d 100644
--- a/drivers/gpu/drm/drm_dp_mst_topology.c
+++ b/drivers/gpu/drm/drm_dp_mst_topology.c
@@ -36,6 +36,8 @@
#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
+#include "drm_crtc_helper_internal.h"
+
/**
* DOC: dp mst helper
*
@@ -53,6 +55,9 @@ static int drm_dp_dpcd_write_payload(struct drm_dp_mst_topology_mgr *mgr,
int id,
struct drm_dp_payload *payload);
+static int drm_dp_send_dpcd_read(struct drm_dp_mst_topology_mgr *mgr,
+ struct drm_dp_mst_port *port,
+ int offset, int size, u8 *bytes);
static int drm_dp_send_dpcd_write(struct drm_dp_mst_topology_mgr *mgr,
struct drm_dp_mst_port *port,
int offset, int size, u8 *bytes);
@@ -1483,6 +1488,52 @@ static bool drm_dp_port_setup_pdt(struct drm_dp_mst_port *port)
return send_link;
}
+/**
+ * drm_dp_mst_dpcd_read() - read a series of bytes from the DPCD via sideband
+ * @aux: Fake sideband AUX CH
+ * @offset: address of the (first) register to read
+ * @buffer: buffer to store the register values
+ * @size: number of bytes in @buffer
+ *
+ * Performs the same functionality for remote devices via
+ * sideband messaging as drm_dp_dpcd_read() does for local
+ * devices via actual AUX CH.
+ *
+ * Return: Number of bytes read, or negative error code on failure.
+ */
+ssize_t drm_dp_mst_dpcd_read(struct drm_dp_aux *aux,
+ unsigned int offset, void *buffer, size_t size)
+{
+ struct drm_dp_mst_port *port = container_of(aux, struct drm_dp_mst_port,
+ aux);
+
+ return drm_dp_send_dpcd_read(port->mgr, port,
+ offset, size, buffer);
+}
+
+/**
+ * drm_dp_mst_dpcd_write() - write a series of bytes to the DPCD via sideband
+ * @aux: Fake sideband AUX CH
+ * @offset: address of the (first) register to write
+ * @buffer: buffer containing the values to write
+ * @size: number of bytes in @buffer
+ *
+ * Performs the same functionality for remote devices via
+ * sideband messaging as drm_dp_dpcd_write() does for local
+ * devices via actual AUX CH.
+ *
+ * Return: 0 on success, negative error code on failure.
+ */
+ssize_t drm_dp_mst_dpcd_write(struct drm_dp_aux *aux,
+ unsigned int offset, void *buffer, size_t size)
+{
+ struct drm_dp_mst_port *port = container_of(aux, struct drm_dp_mst_port,
+ aux);
+
+ return drm_dp_send_dpcd_write(port->mgr, port,
+ offset, size, buffer);
+}
+
static void drm_dp_check_mstb_guid(struct drm_dp_mst_branch *mstb, u8 *guid)
{
int ret;
@@ -1526,6 +1577,46 @@ static void build_mst_prop_path(const struct drm_dp_mst_branch *mstb,
strlcat(proppath, temp, proppath_size);
}
+/**
+ * drm_dp_mst_connector_late_register() - Late MST connector registration
+ * @connector: The MST connector
+ * @port: The MST port for this connector
+ *
+ * Helper to register the remote aux device for this MST port. Drivers should
+ * call this from their mst connector's late_register hook to enable MST aux
+ * devices.
+ *
+ * Return: 0 on success, negative error code on failure.
+ */
+int drm_dp_mst_connector_late_register(struct drm_connector *connector,
+ struct drm_dp_mst_port *port)
+{
+ DRM_DEBUG_KMS("registering %s remote bus for %s\n",
+ port->aux.name, connector->kdev->kobj.name);
+
+ port->aux.dev = connector->kdev;
+ return drm_dp_aux_register_devnode(&port->aux);
+}
+EXPORT_SYMBOL(drm_dp_mst_connector_late_register);
+
+/**
+ * drm_dp_mst_connector_early_unregister() - Early MST connector unregistration
+ * @connector: The MST connector
+ * @port: The MST port for this connector
+ *
+ * Helper to unregister the remote aux device for this MST port, registered by
+ * drm_dp_mst_connector_late_register(). Drivers should call this from their mst
+ * connector's early_unregister hook.
+ */
+void drm_dp_mst_connector_early_unregister(struct drm_connector *connector,
+ struct drm_dp_mst_port *port)
+{
+ DRM_DEBUG_KMS("unregistering %s remote bus for %s\n",
+ port->aux.name, connector->kdev->kobj.name);
+ drm_dp_aux_unregister_devnode(&port->aux);
+}
+EXPORT_SYMBOL(drm_dp_mst_connector_early_unregister);
+
static void drm_dp_add_port(struct drm_dp_mst_branch *mstb,
struct drm_device *dev,
struct drm_dp_link_addr_reply_port *port_msg)
@@ -1548,6 +1639,7 @@ static void drm_dp_add_port(struct drm_dp_mst_branch *mstb,
port->mgr = mstb->mgr;
port->aux.name = "DPMST";
port->aux.dev = dev->dev;
+ port->aux.is_remote = true;
/*
* Make sure the memory allocation for our parent branch stays
@@ -1816,7 +1908,6 @@ static bool drm_dp_validate_guid(struct drm_dp_mst_topology_mgr *mgr,
return false;
}
-#if 0
static int build_dpcd_read(struct drm_dp_sideband_msg_tx *msg, u8 port_num, u32 offset, u8 num_bytes)
{
struct drm_dp_sideband_msg_req_body req;
@@ -1829,7 +1920,6 @@ static int build_dpcd_read(struct drm_dp_sideband_msg_tx *msg, u8 port_num, u32
return 0;
}
-#endif
static int drm_dp_send_sideband_msg(struct drm_dp_mst_topology_mgr *mgr,
bool up, u8 *msg, int len)
@@ -2441,26 +2531,58 @@ int drm_dp_update_payload_part2(struct drm_dp_mst_topology_mgr *mgr)
}
EXPORT_SYMBOL(drm_dp_update_payload_part2);
-#if 0 /* unused as of yet */
static int drm_dp_send_dpcd_read(struct drm_dp_mst_topology_mgr *mgr,
struct drm_dp_mst_port *port,
- int offset, int size)
+ int offset, int size, u8 *bytes)
{
int len;
+ int ret = 0;
struct drm_dp_sideband_msg_tx *txmsg;
+ struct drm_dp_mst_branch *mstb;
+
+ mstb = drm_dp_mst_topology_get_mstb_validated(mgr, port->parent);
+ if (!mstb)
+ return -EINVAL;
txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
- if (!txmsg)
- return -ENOMEM;
+ if (!txmsg) {
+ ret = -ENOMEM;
+ goto fail_put;
+ }
- len = build_dpcd_read(txmsg, port->port_num, 0, 8);
+ len = build_dpcd_read(txmsg, port->port_num, offset, size);
txmsg->dst = port->parent;
drm_dp_queue_down_tx(mgr, txmsg);
- return 0;
+ ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
+ if (ret < 0)
+ goto fail_free;
+
+ /* DPCD read should never be NACKed */
+ if (txmsg->reply.reply_type == 1) {
+ DRM_ERROR("mstb %p port %d: DPCD read on addr 0x%x for %d bytes NAKed\n",
+ mstb, port->port_num, offset, size);
+ ret = -EIO;
+ goto fail_free;
+ }
+
+ if (txmsg->reply.u.remote_dpcd_read_ack.num_bytes != size) {
+ ret = -EPROTO;
+ goto fail_free;
+ }
+
+ ret = min_t(size_t, txmsg->reply.u.remote_dpcd_read_ack.num_bytes,
+ size);
+ memcpy(bytes, txmsg->reply.u.remote_dpcd_read_ack.bytes, ret);
+
+fail_free:
+ kfree(txmsg);
+fail_put:
+ drm_dp_mst_topology_put_mstb(mstb);
+
+ return ret;
}
-#endif
static int drm_dp_send_dpcd_write(struct drm_dp_mst_topology_mgr *mgr,
struct drm_dp_mst_port *port,
@@ -2489,7 +2611,7 @@ static int drm_dp_send_dpcd_write(struct drm_dp_mst_topology_mgr *mgr,
ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
if (ret > 0) {
if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK)
- ret = -EINVAL;
+ ret = -EIO;
else
ret = 0;
}
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
index 9d00947ca447..e652305d8f98 100644
--- a/drivers/gpu/drm/drm_drv.c
+++ b/drivers/gpu/drm/drm_drv.c
@@ -978,14 +978,14 @@ int drm_dev_register(struct drm_device *dev, unsigned long flags)
if (ret)
goto err_minors;
- dev->registered = true;
-
if (dev->driver->load) {
ret = dev->driver->load(dev, flags);
if (ret)
goto err_minors;
}
+ dev->registered = true;
+
if (drm_core_check_feature(dev, DRIVER_MODESET))
drm_modeset_register_all(dev);
diff --git a/drivers/gpu/drm/drm_file.c b/drivers/gpu/drm/drm_file.c
index 754af25fe255..ea34bc991858 100644
--- a/drivers/gpu/drm/drm_file.c
+++ b/drivers/gpu/drm/drm_file.c
@@ -147,8 +147,7 @@ struct drm_file *drm_file_alloc(struct drm_minor *minor)
if (drm_core_check_feature(dev, DRIVER_SYNCOBJ))
drm_syncobj_open(file);
- if (drm_core_check_feature(dev, DRIVER_PRIME))
- drm_prime_init_file_private(&file->prime);
+ drm_prime_init_file_private(&file->prime);
if (dev->driver->open) {
ret = dev->driver->open(dev, file);
@@ -159,8 +158,7 @@ struct drm_file *drm_file_alloc(struct drm_minor *minor)
return file;
out_prime_destroy:
- if (drm_core_check_feature(dev, DRIVER_PRIME))
- drm_prime_destroy_file_private(&file->prime);
+ drm_prime_destroy_file_private(&file->prime);
if (drm_core_check_feature(dev, DRIVER_SYNCOBJ))
drm_syncobj_release(file);
if (drm_core_check_feature(dev, DRIVER_GEM))
@@ -253,8 +251,7 @@ void drm_file_free(struct drm_file *file)
if (dev->driver->postclose)
dev->driver->postclose(dev, file);
- if (drm_core_check_feature(dev, DRIVER_PRIME))
- drm_prime_destroy_file_private(&file->prime);
+ drm_prime_destroy_file_private(&file->prime);
WARN_ON(!list_empty(&file->event_list));
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
index a8c4468f03d9..afc38cece3f5 100644
--- a/drivers/gpu/drm/drm_gem.c
+++ b/drivers/gpu/drm/drm_gem.c
@@ -39,6 +39,7 @@
#include <linux/mem_encrypt.h>
#include <linux/pagevec.h>
+#include <drm/drm.h>
#include <drm/drm_device.h>
#include <drm/drm_drv.h>
#include <drm/drm_file.h>
@@ -254,8 +255,7 @@ drm_gem_object_release_handle(int id, void *ptr, void *data)
else if (dev->driver->gem_close_object)
dev->driver->gem_close_object(obj, file_priv);
- if (drm_core_check_feature(dev, DRIVER_PRIME))
- drm_gem_remove_prime_handles(obj, file_priv);
+ drm_gem_remove_prime_handles(obj, file_priv);
drm_vma_node_revoke(&obj->vma_node, file_priv);
drm_gem_object_handle_put_unlocked(obj);
@@ -1288,8 +1288,8 @@ retry:
if (contended != -1) {
struct drm_gem_object *obj = objs[contended];
- ret = ww_mutex_lock_slow_interruptible(&obj->resv->lock,
- acquire_ctx);
+ ret = reservation_object_lock_slow_interruptible(obj->resv,
+ acquire_ctx);
if (ret) {
ww_acquire_done(acquire_ctx);
return ret;
@@ -1300,16 +1300,16 @@ retry:
if (i == contended)
continue;
- ret = ww_mutex_lock_interruptible(&objs[i]->resv->lock,
- acquire_ctx);
+ ret = reservation_object_lock_interruptible(objs[i]->resv,
+ acquire_ctx);
if (ret) {
int j;
for (j = 0; j < i; j++)
- ww_mutex_unlock(&objs[j]->resv->lock);
+ reservation_object_unlock(objs[j]->resv);
if (contended != -1 && contended >= i)
- ww_mutex_unlock(&objs[contended]->resv->lock);
+ reservation_object_unlock(objs[contended]->resv);
if (ret == -EDEADLK) {
contended = i;
@@ -1334,7 +1334,7 @@ drm_gem_unlock_reservations(struct drm_gem_object **objs, int count,
int i;
for (i = 0; i < count; i++)
- ww_mutex_unlock(&objs[i]->resv->lock);
+ reservation_object_unlock(objs[i]->resv);
ww_acquire_fini(acquire_ctx);
}
diff --git a/drivers/gpu/drm/drm_gem_framebuffer_helper.c b/drivers/gpu/drm/drm_gem_framebuffer_helper.c
index 8fcbabf02dfd..f61304054786 100644
--- a/drivers/gpu/drm/drm_gem_framebuffer_helper.c
+++ b/drivers/gpu/drm/drm_gem_framebuffer_helper.c
@@ -271,11 +271,11 @@ EXPORT_SYMBOL_GPL(drm_gem_fb_create_with_dirty);
* @plane: Plane
* @state: Plane state the fence will be attached to
*
- * This function prepares a GEM backed framebuffer for scanout by checking if
- * the plane framebuffer has a DMA-BUF attached. If it does, it extracts the
- * exclusive fence and attaches it to the plane state for the atomic helper to
- * wait on. This function can be used as the &drm_plane_helper_funcs.prepare_fb
- * callback.
+ * This function extracts the exclusive fence from &drm_gem_object.resv and
+ * attaches it to plane state for the atomic helper to wait on. This is
+ * necessary to correctly implement implicit synchronization for any buffers
+ * shared as a struct &dma_buf. This function can be used as the
+ * &drm_plane_helper_funcs.prepare_fb callback.
*
* There is no need for &drm_plane_helper_funcs.cleanup_fb hook for simple
* gem based framebuffer drivers which have their buffers always pinned in
@@ -287,17 +287,15 @@ EXPORT_SYMBOL_GPL(drm_gem_fb_create_with_dirty);
int drm_gem_fb_prepare_fb(struct drm_plane *plane,
struct drm_plane_state *state)
{
- struct dma_buf *dma_buf;
+ struct drm_gem_object *obj;
struct dma_fence *fence;
if (!state->fb)
return 0;
- dma_buf = drm_gem_fb_get_obj(state->fb, 0)->dma_buf;
- if (dma_buf) {
- fence = reservation_object_get_excl_rcu(dma_buf->resv);
- drm_atomic_set_fence_for_plane(state, fence);
- }
+ obj = drm_gem_fb_get_obj(state->fb, 0);
+ fence = reservation_object_get_excl_rcu(obj->resv);
+ drm_atomic_set_fence_for_plane(state, fence);
return 0;
}
@@ -309,10 +307,11 @@ EXPORT_SYMBOL_GPL(drm_gem_fb_prepare_fb);
* @pipe: Simple display pipe
* @plane_state: Plane state
*
- * This function uses drm_gem_fb_prepare_fb() to check if the plane FB has a
- * &dma_buf attached, extracts the exclusive fence and attaches it to plane
- * state for the atomic helper to wait on. Drivers can use this as their
- * &drm_simple_display_pipe_funcs.prepare_fb callback.
+ * This function uses drm_gem_fb_prepare_fb() to extract the exclusive fence
+ * from &drm_gem_object.resv and attaches it to plane state for the atomic
+ * helper to wait on. This is necessary to correctly implement implicit
+ * synchronization for any buffers shared as a struct &dma_buf. Drivers can use
+ * this as their &drm_simple_display_pipe_funcs.prepare_fb callback.
*
* See drm_atomic_set_fence_for_plane() for a discussion of implicit and
* explicit fencing in atomic modeset updates.
@@ -323,46 +322,3 @@ int drm_gem_fb_simple_display_pipe_prepare_fb(struct drm_simple_display_pipe *pi
return drm_gem_fb_prepare_fb(&pipe->plane, plane_state);
}
EXPORT_SYMBOL(drm_gem_fb_simple_display_pipe_prepare_fb);
-
-/**
- * drm_gem_fbdev_fb_create - Create a GEM backed &drm_framebuffer for fbdev
- * emulation
- * @dev: DRM device
- * @sizes: fbdev size description
- * @pitch_align: Optional pitch alignment
- * @obj: GEM object backing the framebuffer
- * @funcs: Optional vtable to be used for the new framebuffer object when the
- * dirty callback is needed.
- *
- * This function creates a framebuffer from a &drm_fb_helper_surface_size
- * description for use in the &drm_fb_helper_funcs.fb_probe callback.
- *
- * Returns:
- * Pointer to a &drm_framebuffer on success or an error pointer on failure.
- */
-struct drm_framebuffer *
-drm_gem_fbdev_fb_create(struct drm_device *dev,
- struct drm_fb_helper_surface_size *sizes,
- unsigned int pitch_align, struct drm_gem_object *obj,
- const struct drm_framebuffer_funcs *funcs)
-{
- struct drm_mode_fb_cmd2 mode_cmd = { 0 };
-
- mode_cmd.width = sizes->surface_width;
- mode_cmd.height = sizes->surface_height;
- mode_cmd.pitches[0] = sizes->surface_width *
- DIV_ROUND_UP(sizes->surface_bpp, 8);
- if (pitch_align)
- mode_cmd.pitches[0] = roundup(mode_cmd.pitches[0],
- pitch_align);
- mode_cmd.pixel_format = drm_driver_legacy_fb_format(dev, sizes->surface_bpp,
- sizes->surface_depth);
- if (obj->size < mode_cmd.pitches[0] * mode_cmd.height)
- return ERR_PTR(-EINVAL);
-
- if (!funcs)
- funcs = &drm_gem_fb_funcs;
-
- return drm_gem_fb_alloc(dev, &mode_cmd, &obj, 1, funcs);
-}
-EXPORT_SYMBOL(drm_gem_fbdev_fb_create);
diff --git a/drivers/gpu/drm/drm_gem_shmem_helper.c b/drivers/gpu/drm/drm_gem_shmem_helper.c
index 472ea5d81f82..2f64667ac805 100644
--- a/drivers/gpu/drm/drm_gem_shmem_helper.c
+++ b/drivers/gpu/drm/drm_gem_shmem_helper.c
@@ -10,6 +10,7 @@
#include <linux/slab.h>
#include <linux/vmalloc.h>
+#include <drm/drm.h>
#include <drm/drm_device.h>
#include <drm/drm_drv.h>
#include <drm/drm_gem_shmem_helper.h>
diff --git a/drivers/gpu/drm/drm_gem_vram_helper.c b/drivers/gpu/drm/drm_gem_vram_helper.c
index 4de782ca26b2..fd751078bae1 100644
--- a/drivers/gpu/drm/drm_gem_vram_helper.c
+++ b/drivers/gpu/drm/drm_gem_vram_helper.c
@@ -7,6 +7,8 @@
#include <drm/drm_vram_mm_helper.h>
#include <drm/ttm/ttm_page_alloc.h>
+static const struct drm_gem_object_funcs drm_gem_vram_object_funcs;
+
/**
* DOC: overview
*
@@ -24,7 +26,7 @@ static void drm_gem_vram_cleanup(struct drm_gem_vram_object *gbo)
* TTM buffer object in 'bo' has already been cleaned
* up; only release the GEM object.
*/
- drm_gem_object_release(&gbo->gem);
+ drm_gem_object_release(&gbo->bo.base);
}
static void drm_gem_vram_destroy(struct drm_gem_vram_object *gbo)
@@ -80,7 +82,10 @@ static int drm_gem_vram_init(struct drm_device *dev,
int ret;
size_t acc_size;
- ret = drm_gem_object_init(dev, &gbo->gem, size);
+ if (!gbo->bo.base.funcs)
+ gbo->bo.base.funcs = &drm_gem_vram_object_funcs;
+
+ ret = drm_gem_object_init(dev, &gbo->bo.base, size);
if (ret)
return ret;
@@ -98,7 +103,7 @@ static int drm_gem_vram_init(struct drm_device *dev,
return 0;
err_drm_gem_object_release:
- drm_gem_object_release(&gbo->gem);
+ drm_gem_object_release(&gbo->bo.base);
return ret;
}
@@ -163,7 +168,7 @@ EXPORT_SYMBOL(drm_gem_vram_put);
*/
u64 drm_gem_vram_mmap_offset(struct drm_gem_vram_object *gbo)
{
- return drm_vma_node_offset_addr(&gbo->bo.vma_node);
+ return drm_vma_node_offset_addr(&gbo->bo.base.vma_node);
}
EXPORT_SYMBOL(drm_gem_vram_mmap_offset);
@@ -378,11 +383,11 @@ int drm_gem_vram_fill_create_dumb(struct drm_file *file,
if (IS_ERR(gbo))
return PTR_ERR(gbo);
- ret = drm_gem_handle_create(file, &gbo->gem, &handle);
+ ret = drm_gem_handle_create(file, &gbo->bo.base, &handle);
if (ret)
goto err_drm_gem_object_put_unlocked;
- drm_gem_object_put_unlocked(&gbo->gem);
+ drm_gem_object_put_unlocked(&gbo->bo.base);
args->pitch = pitch;
args->size = size;
@@ -391,7 +396,7 @@ int drm_gem_vram_fill_create_dumb(struct drm_file *file,
return 0;
err_drm_gem_object_put_unlocked:
- drm_gem_object_put_unlocked(&gbo->gem);
+ drm_gem_object_put_unlocked(&gbo->bo.base);
return ret;
}
EXPORT_SYMBOL(drm_gem_vram_fill_create_dumb);
@@ -441,7 +446,7 @@ int drm_gem_vram_bo_driver_verify_access(struct ttm_buffer_object *bo,
{
struct drm_gem_vram_object *gbo = drm_gem_vram_of_bo(bo);
- return drm_vma_node_verify_access(&gbo->gem.vma_node,
+ return drm_vma_node_verify_access(&gbo->bo.base.vma_node,
filp->private_data);
}
EXPORT_SYMBOL(drm_gem_vram_bo_driver_verify_access);
@@ -460,21 +465,24 @@ const struct drm_vram_mm_funcs drm_gem_vram_mm_funcs = {
EXPORT_SYMBOL(drm_gem_vram_mm_funcs);
/*
- * Helpers for struct drm_driver
+ * Helpers for struct drm_gem_object_funcs
*/
/**
- * drm_gem_vram_driver_gem_free_object_unlocked() - \
- Implements &struct drm_driver.gem_free_object_unlocked
- * @gem: GEM object. Refers to &struct drm_gem_vram_object.gem
+ * drm_gem_vram_object_free() - \
+ Implements &struct drm_gem_object_funcs.free
+ * @gem: GEM object. Refers to &struct drm_gem_vram_object.gem
*/
-void drm_gem_vram_driver_gem_free_object_unlocked(struct drm_gem_object *gem)
+static void drm_gem_vram_object_free(struct drm_gem_object *gem)
{
struct drm_gem_vram_object *gbo = drm_gem_vram_of_gem(gem);
drm_gem_vram_put(gbo);
}
-EXPORT_SYMBOL(drm_gem_vram_driver_gem_free_object_unlocked);
+
+/*
+ * Helpers for dump buffers
+ */
/**
* drm_gem_vram_driver_create_dumb() - \
@@ -536,19 +544,19 @@ int drm_gem_vram_driver_dumb_mmap_offset(struct drm_file *file,
EXPORT_SYMBOL(drm_gem_vram_driver_dumb_mmap_offset);
/*
- * PRIME helpers for struct drm_driver
+ * PRIME helpers
*/
/**
- * drm_gem_vram_driver_gem_prime_pin() - \
- Implements &struct drm_driver.gem_prime_pin
+ * drm_gem_vram_object_pin() - \
+ Implements &struct drm_gem_object_funcs.pin
* @gem: The GEM object to pin
*
* Returns:
* 0 on success, or
* a negative errno code otherwise.
*/
-int drm_gem_vram_driver_gem_prime_pin(struct drm_gem_object *gem)
+static int drm_gem_vram_object_pin(struct drm_gem_object *gem)
{
struct drm_gem_vram_object *gbo = drm_gem_vram_of_gem(gem);
@@ -562,31 +570,29 @@ int drm_gem_vram_driver_gem_prime_pin(struct drm_gem_object *gem)
*/
return drm_gem_vram_pin(gbo, 0);
}
-EXPORT_SYMBOL(drm_gem_vram_driver_gem_prime_pin);
/**
- * drm_gem_vram_driver_gem_prime_unpin() - \
- Implements &struct drm_driver.gem_prime_unpin
+ * drm_gem_vram_object_unpin() - \
+ Implements &struct drm_gem_object_funcs.unpin
* @gem: The GEM object to unpin
*/
-void drm_gem_vram_driver_gem_prime_unpin(struct drm_gem_object *gem)
+static void drm_gem_vram_object_unpin(struct drm_gem_object *gem)
{
struct drm_gem_vram_object *gbo = drm_gem_vram_of_gem(gem);
drm_gem_vram_unpin(gbo);
}
-EXPORT_SYMBOL(drm_gem_vram_driver_gem_prime_unpin);
/**
- * drm_gem_vram_driver_gem_prime_vmap() - \
- Implements &struct drm_driver.gem_prime_vmap
+ * drm_gem_vram_object_vmap() - \
+ Implements &struct drm_gem_object_funcs.vmap
* @gem: The GEM object to map
*
* Returns:
* The buffers virtual address on success, or
* NULL otherwise.
*/
-void *drm_gem_vram_driver_gem_prime_vmap(struct drm_gem_object *gem)
+static void *drm_gem_vram_object_vmap(struct drm_gem_object *gem)
{
struct drm_gem_vram_object *gbo = drm_gem_vram_of_gem(gem);
int ret;
@@ -602,40 +608,30 @@ void *drm_gem_vram_driver_gem_prime_vmap(struct drm_gem_object *gem)
}
return base;
}
-EXPORT_SYMBOL(drm_gem_vram_driver_gem_prime_vmap);
/**
- * drm_gem_vram_driver_gem_prime_vunmap() - \
- Implements &struct drm_driver.gem_prime_vunmap
+ * drm_gem_vram_object_vunmap() - \
+ Implements &struct drm_gem_object_funcs.vunmap
* @gem: The GEM object to unmap
* @vaddr: The mapping's base address
*/
-void drm_gem_vram_driver_gem_prime_vunmap(struct drm_gem_object *gem,
- void *vaddr)
+static void drm_gem_vram_object_vunmap(struct drm_gem_object *gem,
+ void *vaddr)
{
struct drm_gem_vram_object *gbo = drm_gem_vram_of_gem(gem);
drm_gem_vram_kunmap(gbo);
drm_gem_vram_unpin(gbo);
}
-EXPORT_SYMBOL(drm_gem_vram_driver_gem_prime_vunmap);
-/**
- * drm_gem_vram_driver_gem_prime_mmap() - \
- Implements &struct drm_driver.gem_prime_mmap
- * @gem: The GEM object to map
- * @vma: The VMA describing the mapping
- *
- * Returns:
- * 0 on success, or
- * a negative errno code otherwise.
+/*
+ * GEM object funcs
*/
-int drm_gem_vram_driver_gem_prime_mmap(struct drm_gem_object *gem,
- struct vm_area_struct *vma)
-{
- struct drm_gem_vram_object *gbo = drm_gem_vram_of_gem(gem);
- gbo->gem.vma_node.vm_node.start = gbo->bo.vma_node.vm_node.start;
- return drm_gem_prime_mmap(gem, vma);
-}
-EXPORT_SYMBOL(drm_gem_vram_driver_gem_prime_mmap);
+static const struct drm_gem_object_funcs drm_gem_vram_object_funcs = {
+ .free = drm_gem_vram_object_free,
+ .pin = drm_gem_vram_object_pin,
+ .unpin = drm_gem_vram_object_unpin,
+ .vmap = drm_gem_vram_object_vmap,
+ .vunmap = drm_gem_vram_object_vunmap
+};
diff --git a/drivers/gpu/drm/drm_hdcp.c b/drivers/gpu/drm/drm_hdcp.c
index cd837bd409f7..9191633a3c43 100644
--- a/drivers/gpu/drm/drm_hdcp.c
+++ b/drivers/gpu/drm/drm_hdcp.c
@@ -271,6 +271,13 @@ exit:
*
* SRM should be presented in the name of "display_hdcp_srm.bin".
*
+ * Format of the SRM table, that userspace needs to write into the binary file,
+ * is defined at:
+ * 1. Renewability chapter on 55th page of HDCP 1.4 specification
+ * https://www.digital-cp.com/sites/default/files/specifications/HDCP%20Specification%20Rev1_4_Secure.pdf
+ * 2. Renewability chapter on 63rd page of HDCP 2.2 specification
+ * https://www.digital-cp.com/sites/default/files/specifications/HDCP%20on%20HDMI%20Specification%20Rev2_2_Final1.pdf
+ *
* Returns:
* TRUE on any of the KSV is revoked, else FALSE.
*/
@@ -344,23 +351,45 @@ static struct drm_prop_enum_list drm_cp_enum_list[] = {
};
DRM_ENUM_NAME_FN(drm_get_content_protection_name, drm_cp_enum_list)
+static struct drm_prop_enum_list drm_hdcp_content_type_enum_list[] = {
+ { DRM_MODE_HDCP_CONTENT_TYPE0, "HDCP Type0" },
+ { DRM_MODE_HDCP_CONTENT_TYPE1, "HDCP Type1" },
+};
+DRM_ENUM_NAME_FN(drm_get_hdcp_content_type_name,
+ drm_hdcp_content_type_enum_list)
+
/**
* drm_connector_attach_content_protection_property - attach content protection
* property
*
* @connector: connector to attach CP property on.
+ * @hdcp_content_type: is HDCP Content Type property needed for connector
*
* This is used to add support for content protection on select connectors.
* Content Protection is intentionally vague to allow for different underlying
* technologies, however it is most implemented by HDCP.
*
+ * When hdcp_content_type is true enum property called HDCP Content Type is
+ * created (if it is not already) and attached to the connector.
+ *
+ * This property is used for sending the protected content's stream type
+ * from userspace to kernel on selected connectors. Protected content provider
+ * will decide their type of their content and declare the same to kernel.
+ *
+ * Content type will be used during the HDCP 2.2 authentication.
+ * Content type will be set to &drm_connector_state.hdcp_content_type.
+ *
* The content protection will be set to &drm_connector_state.content_protection
*
+ * When kernel triggered content protection state change like DESIRED->ENABLED
+ * and ENABLED->DESIRED, will use drm_hdcp_update_content_protection() to update
+ * the content protection state of a connector.
+ *
* Returns:
* Zero on success, negative errno on failure.
*/
int drm_connector_attach_content_protection_property(
- struct drm_connector *connector)
+ struct drm_connector *connector, bool hdcp_content_type)
{
struct drm_device *dev = connector->dev;
struct drm_property *prop =
@@ -377,6 +406,52 @@ int drm_connector_attach_content_protection_property(
DRM_MODE_CONTENT_PROTECTION_UNDESIRED);
dev->mode_config.content_protection_property = prop;
+ if (!hdcp_content_type)
+ return 0;
+
+ prop = dev->mode_config.hdcp_content_type_property;
+ if (!prop)
+ prop = drm_property_create_enum(dev, 0, "HDCP Content Type",
+ drm_hdcp_content_type_enum_list,
+ ARRAY_SIZE(
+ drm_hdcp_content_type_enum_list));
+ if (!prop)
+ return -ENOMEM;
+
+ drm_object_attach_property(&connector->base, prop,
+ DRM_MODE_HDCP_CONTENT_TYPE0);
+ dev->mode_config.hdcp_content_type_property = prop;
+
return 0;
}
EXPORT_SYMBOL(drm_connector_attach_content_protection_property);
+
+/**
+ * drm_hdcp_update_content_protection - Updates the content protection state
+ * of a connector
+ *
+ * @connector: drm_connector on which content protection state needs an update
+ * @val: New state of the content protection property
+ *
+ * This function can be used by display drivers, to update the kernel triggered
+ * content protection state changes of a drm_connector such as DESIRED->ENABLED
+ * and ENABLED->DESIRED. No uevent for DESIRED->UNDESIRED or ENABLED->UNDESIRED,
+ * as userspace is triggering such state change and kernel performs it without
+ * fail.This function update the new state of the property into the connector's
+ * state and generate an uevent to notify the userspace.
+ */
+void drm_hdcp_update_content_protection(struct drm_connector *connector,
+ u64 val)
+{
+ struct drm_device *dev = connector->dev;
+ struct drm_connector_state *state = connector->state;
+
+ WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
+ if (state->content_protection == val)
+ return;
+
+ state->content_protection = val;
+ drm_sysfs_connector_status_event(connector,
+ dev->mode_config.content_protection_property);
+}
+EXPORT_SYMBOL(drm_hdcp_update_content_protection);
diff --git a/drivers/gpu/drm/drm_ioc32.c b/drivers/gpu/drm/drm_ioc32.c
index a16b6dc2fa47..22c7fd7196c8 100644
--- a/drivers/gpu/drm/drm_ioc32.c
+++ b/drivers/gpu/drm/drm_ioc32.c
@@ -108,7 +108,7 @@ static int compat_drm_version(struct file *file, unsigned int cmd,
.desc = compat_ptr(v32.desc),
};
err = drm_ioctl_kernel(file, drm_version, &v,
- DRM_UNLOCKED|DRM_RENDER_ALLOW);
+ DRM_RENDER_ALLOW);
if (err)
return err;
@@ -142,7 +142,7 @@ static int compat_drm_getunique(struct file *file, unsigned int cmd,
.unique = compat_ptr(uq32.unique),
};
- err = drm_ioctl_kernel(file, drm_getunique, &uq, DRM_UNLOCKED);
+ err = drm_ioctl_kernel(file, drm_getunique, &uq, 0);
if (err)
return err;
@@ -181,7 +181,7 @@ static int compat_drm_getmap(struct file *file, unsigned int cmd,
return -EFAULT;
map.offset = m32.offset;
- err = drm_ioctl_kernel(file, drm_legacy_getmap_ioctl, &map, DRM_UNLOCKED);
+ err = drm_ioctl_kernel(file, drm_legacy_getmap_ioctl, &map, 0);
if (err)
return err;
@@ -267,7 +267,7 @@ static int compat_drm_getclient(struct file *file, unsigned int cmd,
client.idx = c32.idx;
- err = drm_ioctl_kernel(file, drm_getclient, &client, DRM_UNLOCKED);
+ err = drm_ioctl_kernel(file, drm_getclient, &client, 0);
if (err)
return err;
@@ -297,7 +297,7 @@ static int compat_drm_getstats(struct file *file, unsigned int cmd,
drm_stats32_t __user *argp = (void __user *)arg;
int err;
- err = drm_ioctl_kernel(file, drm_noop, NULL, DRM_UNLOCKED);
+ err = drm_ioctl_kernel(file, drm_noop, NULL, 0);
if (err)
return err;
@@ -895,8 +895,7 @@ static int compat_drm_mode_addfb2(struct file *file, unsigned int cmd,
sizeof(req64.modifier)))
return -EFAULT;
- err = drm_ioctl_kernel(file, drm_mode_addfb2, &req64,
- DRM_UNLOCKED);
+ err = drm_ioctl_kernel(file, drm_mode_addfb2, &req64, 0);
if (err)
return err;
diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c
index bd810454d239..f675a3bb2c88 100644
--- a/drivers/gpu/drm/drm_ioctl.c
+++ b/drivers/gpu/drm/drm_ioctl.c
@@ -570,24 +570,23 @@ EXPORT_SYMBOL(drm_ioctl_permit);
/* Ioctl table */
static const struct drm_ioctl_desc drm_ioctls[] = {
- DRM_IOCTL_DEF(DRM_IOCTL_VERSION, drm_version,
- DRM_UNLOCKED|DRM_RENDER_ALLOW),
- DRM_IOCTL_DEF(DRM_IOCTL_GET_UNIQUE, drm_getunique, DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_IOCTL_GET_MAGIC, drm_getmagic, DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_IOCTL_VERSION, drm_version, DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF(DRM_IOCTL_GET_UNIQUE, drm_getunique, 0),
+ DRM_IOCTL_DEF(DRM_IOCTL_GET_MAGIC, drm_getmagic, 0),
DRM_IOCTL_DEF(DRM_IOCTL_IRQ_BUSID, drm_irq_by_busid, DRM_MASTER|DRM_ROOT_ONLY),
- DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_GET_MAP, drm_legacy_getmap_ioctl, DRM_UNLOCKED),
+ DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_GET_MAP, drm_legacy_getmap_ioctl, 0),
- DRM_IOCTL_DEF(DRM_IOCTL_GET_CLIENT, drm_getclient, DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_IOCTL_GET_STATS, drm_getstats, DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_IOCTL_GET_CAP, drm_getcap, DRM_UNLOCKED|DRM_RENDER_ALLOW),
- DRM_IOCTL_DEF(DRM_IOCTL_SET_CLIENT_CAP, drm_setclientcap, DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_IOCTL_SET_VERSION, drm_setversion, DRM_UNLOCKED | DRM_MASTER),
+ DRM_IOCTL_DEF(DRM_IOCTL_GET_CLIENT, drm_getclient, 0),
+ DRM_IOCTL_DEF(DRM_IOCTL_GET_STATS, drm_getstats, 0),
+ DRM_IOCTL_DEF(DRM_IOCTL_GET_CAP, drm_getcap, DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF(DRM_IOCTL_SET_CLIENT_CAP, drm_setclientcap, 0),
+ DRM_IOCTL_DEF(DRM_IOCTL_SET_VERSION, drm_setversion, DRM_MASTER),
DRM_IOCTL_DEF(DRM_IOCTL_SET_UNIQUE, drm_invalid_op, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF(DRM_IOCTL_BLOCK, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF(DRM_IOCTL_UNBLOCK, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF(DRM_IOCTL_AUTH_MAGIC, drm_authmagic, DRM_UNLOCKED|DRM_MASTER),
+ DRM_IOCTL_DEF(DRM_IOCTL_AUTH_MAGIC, drm_authmagic, DRM_MASTER),
DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_ADD_MAP, drm_legacy_addmap_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_RM_MAP, drm_legacy_rmmap_ioctl, DRM_AUTH),
@@ -595,8 +594,8 @@ static const struct drm_ioctl_desc drm_ioctls[] = {
DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_SET_SAREA_CTX, drm_legacy_setsareactx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_GET_SAREA_CTX, drm_legacy_getsareactx, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_IOCTL_SET_MASTER, drm_setmaster_ioctl, DRM_UNLOCKED|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF(DRM_IOCTL_DROP_MASTER, drm_dropmaster_ioctl, DRM_UNLOCKED|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF(DRM_IOCTL_SET_MASTER, drm_setmaster_ioctl, DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF(DRM_IOCTL_DROP_MASTER, drm_dropmaster_ioctl, DRM_ROOT_ONLY),
DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_ADD_CTX, drm_legacy_addctx, DRM_AUTH|DRM_ROOT_ONLY),
DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_RM_CTX, drm_legacy_rmctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
@@ -642,74 +641,74 @@ static const struct drm_ioctl_desc drm_ioctls[] = {
DRM_IOCTL_DEF(DRM_IOCTL_UPDATE_DRAW, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF(DRM_IOCTL_GEM_CLOSE, drm_gem_close_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
- DRM_IOCTL_DEF(DRM_IOCTL_GEM_FLINK, drm_gem_flink_ioctl, DRM_AUTH|DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_IOCTL_GEM_OPEN, drm_gem_open_ioctl, DRM_AUTH|DRM_UNLOCKED),
-
- DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETRESOURCES, drm_mode_getresources, DRM_UNLOCKED),
-
- DRM_IOCTL_DEF(DRM_IOCTL_PRIME_HANDLE_TO_FD, drm_prime_handle_to_fd_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
- DRM_IOCTL_DEF(DRM_IOCTL_PRIME_FD_TO_HANDLE, drm_prime_fd_to_handle_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
-
- DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPLANERESOURCES, drm_mode_getplane_res, DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETCRTC, drm_mode_getcrtc, DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETCRTC, drm_mode_setcrtc, DRM_MASTER|DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPLANE, drm_mode_getplane, DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETPLANE, drm_mode_setplane, DRM_MASTER|DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_IOCTL_MODE_CURSOR, drm_mode_cursor_ioctl, DRM_MASTER|DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETGAMMA, drm_mode_gamma_get_ioctl, DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETGAMMA, drm_mode_gamma_set_ioctl, DRM_MASTER|DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETENCODER, drm_mode_getencoder, DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETCONNECTOR, drm_mode_getconnector, DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_IOCTL_MODE_ATTACHMODE, drm_noop, DRM_MASTER|DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_IOCTL_MODE_DETACHMODE, drm_noop, DRM_MASTER|DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPROPERTY, drm_mode_getproperty_ioctl, DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETPROPERTY, drm_connector_property_set_ioctl, DRM_MASTER|DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPROPBLOB, drm_mode_getblob_ioctl, DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETFB, drm_mode_getfb, DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_IOCTL_MODE_ADDFB, drm_mode_addfb_ioctl, DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_IOCTL_MODE_ADDFB2, drm_mode_addfb2_ioctl, DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_IOCTL_MODE_RMFB, drm_mode_rmfb_ioctl, DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_IOCTL_MODE_PAGE_FLIP, drm_mode_page_flip_ioctl, DRM_MASTER|DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_IOCTL_MODE_DIRTYFB, drm_mode_dirtyfb_ioctl, DRM_MASTER|DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_IOCTL_MODE_CREATE_DUMB, drm_mode_create_dumb_ioctl, DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_IOCTL_MODE_MAP_DUMB, drm_mode_mmap_dumb_ioctl, DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_IOCTL_MODE_DESTROY_DUMB, drm_mode_destroy_dumb_ioctl, DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_IOCTL_MODE_OBJ_GETPROPERTIES, drm_mode_obj_get_properties_ioctl, DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_IOCTL_MODE_OBJ_SETPROPERTY, drm_mode_obj_set_property_ioctl, DRM_MASTER|DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_IOCTL_MODE_CURSOR2, drm_mode_cursor2_ioctl, DRM_MASTER|DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_IOCTL_MODE_ATOMIC, drm_mode_atomic_ioctl, DRM_MASTER|DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_IOCTL_MODE_CREATEPROPBLOB, drm_mode_createblob_ioctl, DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_IOCTL_MODE_DESTROYPROPBLOB, drm_mode_destroyblob_ioctl, DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_IOCTL_GEM_CLOSE, drm_gem_close_ioctl, DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF(DRM_IOCTL_GEM_FLINK, drm_gem_flink_ioctl, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_IOCTL_GEM_OPEN, drm_gem_open_ioctl, DRM_AUTH),
+
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETRESOURCES, drm_mode_getresources, 0),
+
+ DRM_IOCTL_DEF(DRM_IOCTL_PRIME_HANDLE_TO_FD, drm_prime_handle_to_fd_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF(DRM_IOCTL_PRIME_FD_TO_HANDLE, drm_prime_fd_to_handle_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
+
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPLANERESOURCES, drm_mode_getplane_res, 0),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETCRTC, drm_mode_getcrtc, 0),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETCRTC, drm_mode_setcrtc, DRM_MASTER),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPLANE, drm_mode_getplane, 0),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETPLANE, drm_mode_setplane, DRM_MASTER),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_CURSOR, drm_mode_cursor_ioctl, DRM_MASTER),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETGAMMA, drm_mode_gamma_get_ioctl, 0),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETGAMMA, drm_mode_gamma_set_ioctl, DRM_MASTER),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETENCODER, drm_mode_getencoder, 0),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETCONNECTOR, drm_mode_getconnector, 0),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_ATTACHMODE, drm_noop, DRM_MASTER),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_DETACHMODE, drm_noop, DRM_MASTER),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPROPERTY, drm_mode_getproperty_ioctl, 0),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETPROPERTY, drm_connector_property_set_ioctl, DRM_MASTER),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPROPBLOB, drm_mode_getblob_ioctl, 0),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETFB, drm_mode_getfb, 0),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_ADDFB, drm_mode_addfb_ioctl, 0),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_ADDFB2, drm_mode_addfb2_ioctl, 0),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_RMFB, drm_mode_rmfb_ioctl, 0),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_PAGE_FLIP, drm_mode_page_flip_ioctl, DRM_MASTER),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_DIRTYFB, drm_mode_dirtyfb_ioctl, DRM_MASTER),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_CREATE_DUMB, drm_mode_create_dumb_ioctl, 0),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_MAP_DUMB, drm_mode_mmap_dumb_ioctl, 0),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_DESTROY_DUMB, drm_mode_destroy_dumb_ioctl, 0),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_OBJ_GETPROPERTIES, drm_mode_obj_get_properties_ioctl, 0),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_OBJ_SETPROPERTY, drm_mode_obj_set_property_ioctl, DRM_MASTER),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_CURSOR2, drm_mode_cursor2_ioctl, DRM_MASTER),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_ATOMIC, drm_mode_atomic_ioctl, DRM_MASTER),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_CREATEPROPBLOB, drm_mode_createblob_ioctl, 0),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_DESTROYPROPBLOB, drm_mode_destroyblob_ioctl, 0),
DRM_IOCTL_DEF(DRM_IOCTL_SYNCOBJ_CREATE, drm_syncobj_create_ioctl,
- DRM_UNLOCKED|DRM_RENDER_ALLOW),
+ DRM_RENDER_ALLOW),
DRM_IOCTL_DEF(DRM_IOCTL_SYNCOBJ_DESTROY, drm_syncobj_destroy_ioctl,
- DRM_UNLOCKED|DRM_RENDER_ALLOW),
+ DRM_RENDER_ALLOW),
DRM_IOCTL_DEF(DRM_IOCTL_SYNCOBJ_HANDLE_TO_FD, drm_syncobj_handle_to_fd_ioctl,
- DRM_UNLOCKED|DRM_RENDER_ALLOW),
+ DRM_RENDER_ALLOW),
DRM_IOCTL_DEF(DRM_IOCTL_SYNCOBJ_FD_TO_HANDLE, drm_syncobj_fd_to_handle_ioctl,
- DRM_UNLOCKED|DRM_RENDER_ALLOW),
+ DRM_RENDER_ALLOW),
DRM_IOCTL_DEF(DRM_IOCTL_SYNCOBJ_TRANSFER, drm_syncobj_transfer_ioctl,
- DRM_UNLOCKED|DRM_RENDER_ALLOW),
+ DRM_RENDER_ALLOW),
DRM_IOCTL_DEF(DRM_IOCTL_SYNCOBJ_WAIT, drm_syncobj_wait_ioctl,
- DRM_UNLOCKED|DRM_RENDER_ALLOW),
+ DRM_RENDER_ALLOW),
DRM_IOCTL_DEF(DRM_IOCTL_SYNCOBJ_TIMELINE_WAIT, drm_syncobj_timeline_wait_ioctl,
- DRM_UNLOCKED|DRM_RENDER_ALLOW),
+ DRM_RENDER_ALLOW),
DRM_IOCTL_DEF(DRM_IOCTL_SYNCOBJ_RESET, drm_syncobj_reset_ioctl,
- DRM_UNLOCKED|DRM_RENDER_ALLOW),
+ DRM_RENDER_ALLOW),
DRM_IOCTL_DEF(DRM_IOCTL_SYNCOBJ_SIGNAL, drm_syncobj_signal_ioctl,
- DRM_UNLOCKED|DRM_RENDER_ALLOW),
+ DRM_RENDER_ALLOW),
DRM_IOCTL_DEF(DRM_IOCTL_SYNCOBJ_TIMELINE_SIGNAL, drm_syncobj_timeline_signal_ioctl,
- DRM_UNLOCKED|DRM_RENDER_ALLOW),
+ DRM_RENDER_ALLOW),
DRM_IOCTL_DEF(DRM_IOCTL_SYNCOBJ_QUERY, drm_syncobj_query_ioctl,
- DRM_UNLOCKED|DRM_RENDER_ALLOW),
- DRM_IOCTL_DEF(DRM_IOCTL_CRTC_GET_SEQUENCE, drm_crtc_get_sequence_ioctl, DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_IOCTL_CRTC_QUEUE_SEQUENCE, drm_crtc_queue_sequence_ioctl, DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_IOCTL_MODE_CREATE_LEASE, drm_mode_create_lease_ioctl, DRM_MASTER|DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_IOCTL_MODE_LIST_LESSEES, drm_mode_list_lessees_ioctl, DRM_MASTER|DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_IOCTL_MODE_GET_LEASE, drm_mode_get_lease_ioctl, DRM_MASTER|DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_IOCTL_MODE_REVOKE_LEASE, drm_mode_revoke_lease_ioctl, DRM_MASTER|DRM_UNLOCKED),
+ DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF(DRM_IOCTL_CRTC_GET_SEQUENCE, drm_crtc_get_sequence_ioctl, 0),
+ DRM_IOCTL_DEF(DRM_IOCTL_CRTC_QUEUE_SEQUENCE, drm_crtc_queue_sequence_ioctl, 0),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_CREATE_LEASE, drm_mode_create_lease_ioctl, DRM_MASTER),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_LIST_LESSEES, drm_mode_list_lessees_ioctl, DRM_MASTER),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_GET_LEASE, drm_mode_get_lease_ioctl, DRM_MASTER),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_REVOKE_LEASE, drm_mode_revoke_lease_ioctl, DRM_MASTER),
};
#define DRM_CORE_IOCTL_COUNT ARRAY_SIZE( drm_ioctls )
@@ -777,7 +776,7 @@ long drm_ioctl_kernel(struct file *file, drm_ioctl_t *func, void *kdata,
return retcode;
/* Enforce sane locking for modern driver ioctls. */
- if (!drm_core_check_feature(dev, DRIVER_LEGACY) ||
+ if (likely(!drm_core_check_feature(dev, DRIVER_LEGACY)) ||
(flags & DRM_UNLOCKED))
retcode = func(dev, kdata, file_priv);
else {
diff --git a/drivers/gpu/drm/drm_legacy_misc.c b/drivers/gpu/drm/drm_legacy_misc.c
index 4d3a11cfd979..8f54e6a78b6f 100644
--- a/drivers/gpu/drm/drm_legacy_misc.c
+++ b/drivers/gpu/drm/drm_legacy_misc.c
@@ -1,4 +1,4 @@
-/**
+/*
* \file drm_legacy_misc.c
* Misc legacy support functions.
*
diff --git a/drivers/gpu/drm/drm_lock.c b/drivers/gpu/drm/drm_lock.c
index 68b18b0e290c..2e8ce99d0baa 100644
--- a/drivers/gpu/drm/drm_lock.c
+++ b/drivers/gpu/drm/drm_lock.c
@@ -1,4 +1,4 @@
-/**
+/*
* \file drm_lock.c
* IOCTLs for locking
*
diff --git a/drivers/gpu/drm/drm_memory.c b/drivers/gpu/drm/drm_memory.c
index b634e1670190..0bec6dbb0142 100644
--- a/drivers/gpu/drm/drm_memory.c
+++ b/drivers/gpu/drm/drm_memory.c
@@ -1,4 +1,4 @@
-/**
+/*
* \file drm_memory.c
* Memory management wrappers for DRM
*
diff --git a/drivers/gpu/drm/tinydrm/mipi-dbi.c b/drivers/gpu/drm/drm_mipi_dbi.c
index ca9da654fc6f..1961f713aaab 100644
--- a/drivers/gpu/drm/tinydrm/mipi-dbi.c
+++ b/drivers/gpu/drm/drm_mipi_dbi.c
@@ -13,17 +13,18 @@
#include <linux/regulator/consumer.h>
#include <linux/spi/spi.h>
+#include <drm/drm_connector.h>
#include <drm/drm_damage_helper.h>
#include <drm/drm_drv.h>
-#include <drm/drm_fb_cma_helper.h>
#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_format_helper.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_gem_framebuffer_helper.h>
-#include <drm/drm_vblank.h>
+#include <drm/drm_mipi_dbi.h>
+#include <drm/drm_modes.h>
+#include <drm/drm_probe_helper.h>
#include <drm/drm_rect.h>
-#include <drm/tinydrm/mipi-dbi.h>
-#include <drm/tinydrm/tinydrm-helpers.h>
+#include <drm/drm_vblank.h>
#include <video/mipi_display.h>
#define MIPI_DBI_MAX_SPI_READ_SPEED 2000000 /* 2MHz */
@@ -98,17 +99,17 @@ static const u8 mipi_dbi_dcs_read_commands[] = {
0, /* sentinel */
};
-static bool mipi_dbi_command_is_read(struct mipi_dbi *mipi, u8 cmd)
+static bool mipi_dbi_command_is_read(struct mipi_dbi *dbi, u8 cmd)
{
unsigned int i;
- if (!mipi->read_commands)
+ if (!dbi->read_commands)
return false;
for (i = 0; i < 0xff; i++) {
- if (!mipi->read_commands[i])
+ if (!dbi->read_commands[i])
return false;
- if (cmd == mipi->read_commands[i])
+ if (cmd == dbi->read_commands[i])
return true;
}
@@ -117,7 +118,7 @@ static bool mipi_dbi_command_is_read(struct mipi_dbi *mipi, u8 cmd)
/**
* mipi_dbi_command_read - MIPI DCS read command
- * @mipi: MIPI structure
+ * @dbi: MIPI DBI structure
* @cmd: Command
* @val: Value read
*
@@ -126,21 +127,21 @@ static bool mipi_dbi_command_is_read(struct mipi_dbi *mipi, u8 cmd)
* Returns:
* Zero on success, negative error code on failure.
*/
-int mipi_dbi_command_read(struct mipi_dbi *mipi, u8 cmd, u8 *val)
+int mipi_dbi_command_read(struct mipi_dbi *dbi, u8 cmd, u8 *val)
{
- if (!mipi->read_commands)
+ if (!dbi->read_commands)
return -EACCES;
- if (!mipi_dbi_command_is_read(mipi, cmd))
+ if (!mipi_dbi_command_is_read(dbi, cmd))
return -EINVAL;
- return mipi_dbi_command_buf(mipi, cmd, val, 1);
+ return mipi_dbi_command_buf(dbi, cmd, val, 1);
}
EXPORT_SYMBOL(mipi_dbi_command_read);
/**
* mipi_dbi_command_buf - MIPI DCS command with parameter(s) in an array
- * @mipi: MIPI structure
+ * @dbi: MIPI DBI structure
* @cmd: Command
* @data: Parameter buffer
* @len: Buffer length
@@ -148,7 +149,7 @@ EXPORT_SYMBOL(mipi_dbi_command_read);
* Returns:
* Zero on success, negative error code on failure.
*/
-int mipi_dbi_command_buf(struct mipi_dbi *mipi, u8 cmd, u8 *data, size_t len)
+int mipi_dbi_command_buf(struct mipi_dbi *dbi, u8 cmd, u8 *data, size_t len)
{
u8 *cmdbuf;
int ret;
@@ -158,9 +159,9 @@ int mipi_dbi_command_buf(struct mipi_dbi *mipi, u8 cmd, u8 *data, size_t len)
if (!cmdbuf)
return -ENOMEM;
- mutex_lock(&mipi->cmdlock);
- ret = mipi->command(mipi, cmdbuf, data, len);
- mutex_unlock(&mipi->cmdlock);
+ mutex_lock(&dbi->cmdlock);
+ ret = dbi->command(dbi, cmdbuf, data, len);
+ mutex_unlock(&dbi->cmdlock);
kfree(cmdbuf);
@@ -169,7 +170,7 @@ int mipi_dbi_command_buf(struct mipi_dbi *mipi, u8 cmd, u8 *data, size_t len)
EXPORT_SYMBOL(mipi_dbi_command_buf);
/* This should only be used by mipi_dbi_command() */
-int mipi_dbi_command_stackbuf(struct mipi_dbi *mipi, u8 cmd, u8 *data, size_t len)
+int mipi_dbi_command_stackbuf(struct mipi_dbi *dbi, u8 cmd, u8 *data, size_t len)
{
u8 *buf;
int ret;
@@ -178,7 +179,7 @@ int mipi_dbi_command_stackbuf(struct mipi_dbi *mipi, u8 cmd, u8 *data, size_t le
if (!buf)
return -ENOMEM;
- ret = mipi_dbi_command_buf(mipi, cmd, buf, len);
+ ret = mipi_dbi_command_buf(dbi, cmd, buf, len);
kfree(buf);
@@ -199,8 +200,9 @@ EXPORT_SYMBOL(mipi_dbi_command_stackbuf);
int mipi_dbi_buf_copy(void *dst, struct drm_framebuffer *fb,
struct drm_rect *clip, bool swap)
{
- struct drm_gem_cma_object *cma_obj = drm_fb_cma_get_gem_obj(fb, 0);
- struct dma_buf_attachment *import_attach = cma_obj->base.import_attach;
+ struct drm_gem_object *gem = drm_gem_fb_get_obj(fb, 0);
+ struct drm_gem_cma_object *cma_obj = to_drm_gem_cma_obj(gem);
+ struct dma_buf_attachment *import_attach = gem->import_attach;
struct drm_format_name_buf format_name;
void *src = cma_obj->vaddr;
int ret = 0;
@@ -238,16 +240,18 @@ EXPORT_SYMBOL(mipi_dbi_buf_copy);
static void mipi_dbi_fb_dirty(struct drm_framebuffer *fb, struct drm_rect *rect)
{
- struct drm_gem_cma_object *cma_obj = drm_fb_cma_get_gem_obj(fb, 0);
- struct mipi_dbi *mipi = drm_to_mipi_dbi(fb->dev);
+ struct drm_gem_object *gem = drm_gem_fb_get_obj(fb, 0);
+ struct drm_gem_cma_object *cma_obj = to_drm_gem_cma_obj(gem);
+ struct mipi_dbi_dev *dbidev = drm_to_mipi_dbi_dev(fb->dev);
unsigned int height = rect->y2 - rect->y1;
unsigned int width = rect->x2 - rect->x1;
- bool swap = mipi->swap_bytes;
+ struct mipi_dbi *dbi = &dbidev->dbi;
+ bool swap = dbi->swap_bytes;
int idx, ret = 0;
bool full;
void *tr;
- if (!mipi->enabled)
+ if (!dbidev->enabled)
return;
if (!drm_dev_enter(fb->dev, &idx))
@@ -257,24 +261,24 @@ static void mipi_dbi_fb_dirty(struct drm_framebuffer *fb, struct drm_rect *rect)
DRM_DEBUG_KMS("Flushing [FB:%d] " DRM_RECT_FMT "\n", fb->base.id, DRM_RECT_ARG(rect));
- if (!mipi->dc || !full || swap ||
+ if (!dbi->dc || !full || swap ||
fb->format->format == DRM_FORMAT_XRGB8888) {
- tr = mipi->tx_buf;
- ret = mipi_dbi_buf_copy(mipi->tx_buf, fb, rect, swap);
+ tr = dbidev->tx_buf;
+ ret = mipi_dbi_buf_copy(dbidev->tx_buf, fb, rect, swap);
if (ret)
goto err_msg;
} else {
tr = cma_obj->vaddr;
}
- mipi_dbi_command(mipi, MIPI_DCS_SET_COLUMN_ADDRESS,
+ mipi_dbi_command(dbi, MIPI_DCS_SET_COLUMN_ADDRESS,
(rect->x1 >> 8) & 0xff, rect->x1 & 0xff,
((rect->x2 - 1) >> 8) & 0xff, (rect->x2 - 1) & 0xff);
- mipi_dbi_command(mipi, MIPI_DCS_SET_PAGE_ADDRESS,
+ mipi_dbi_command(dbi, MIPI_DCS_SET_PAGE_ADDRESS,
(rect->y1 >> 8) & 0xff, rect->y1 & 0xff,
((rect->y2 - 1) >> 8) & 0xff, (rect->y2 - 1) & 0xff);
- ret = mipi_dbi_command_buf(mipi, MIPI_DCS_WRITE_MEMORY_START, tr,
+ ret = mipi_dbi_command_buf(dbi, MIPI_DCS_WRITE_MEMORY_START, tr,
width * height * 2);
err_msg:
if (ret)
@@ -312,7 +316,7 @@ EXPORT_SYMBOL(mipi_dbi_pipe_update);
/**
* mipi_dbi_enable_flush - MIPI DBI enable helper
- * @mipi: MIPI DBI structure
+ * @dbidev: MIPI DBI device structure
* @crtc_state: CRTC state
* @plane_state: Plane state
*
@@ -324,7 +328,7 @@ EXPORT_SYMBOL(mipi_dbi_pipe_update);
* framebuffer flushing, can't use this function since they both use the same
* flushing code.
*/
-void mipi_dbi_enable_flush(struct mipi_dbi *mipi,
+void mipi_dbi_enable_flush(struct mipi_dbi_dev *dbidev,
struct drm_crtc_state *crtc_state,
struct drm_plane_state *plane_state)
{
@@ -337,36 +341,37 @@ void mipi_dbi_enable_flush(struct mipi_dbi *mipi,
};
int idx;
- if (!drm_dev_enter(&mipi->drm, &idx))
+ if (!drm_dev_enter(&dbidev->drm, &idx))
return;
- mipi->enabled = true;
+ dbidev->enabled = true;
mipi_dbi_fb_dirty(fb, &rect);
- backlight_enable(mipi->backlight);
+ backlight_enable(dbidev->backlight);
drm_dev_exit(idx);
}
EXPORT_SYMBOL(mipi_dbi_enable_flush);
-static void mipi_dbi_blank(struct mipi_dbi *mipi)
+static void mipi_dbi_blank(struct mipi_dbi_dev *dbidev)
{
- struct drm_device *drm = &mipi->drm;
+ struct drm_device *drm = &dbidev->drm;
u16 height = drm->mode_config.min_height;
u16 width = drm->mode_config.min_width;
+ struct mipi_dbi *dbi = &dbidev->dbi;
size_t len = width * height * 2;
int idx;
if (!drm_dev_enter(drm, &idx))
return;
- memset(mipi->tx_buf, 0, len);
+ memset(dbidev->tx_buf, 0, len);
- mipi_dbi_command(mipi, MIPI_DCS_SET_COLUMN_ADDRESS, 0, 0,
+ mipi_dbi_command(dbi, MIPI_DCS_SET_COLUMN_ADDRESS, 0, 0,
(width >> 8) & 0xFF, (width - 1) & 0xFF);
- mipi_dbi_command(mipi, MIPI_DCS_SET_PAGE_ADDRESS, 0, 0,
+ mipi_dbi_command(dbi, MIPI_DCS_SET_PAGE_ADDRESS, 0, 0,
(height >> 8) & 0xFF, (height - 1) & 0xFF);
- mipi_dbi_command_buf(mipi, MIPI_DCS_WRITE_MEMORY_START,
- (u8 *)mipi->tx_buf, len);
+ mipi_dbi_command_buf(dbi, MIPI_DCS_WRITE_MEMORY_START,
+ (u8 *)dbidev->tx_buf, len);
drm_dev_exit(idx);
}
@@ -381,25 +386,79 @@ static void mipi_dbi_blank(struct mipi_dbi *mipi)
*/
void mipi_dbi_pipe_disable(struct drm_simple_display_pipe *pipe)
{
- struct mipi_dbi *mipi = drm_to_mipi_dbi(pipe->crtc.dev);
+ struct mipi_dbi_dev *dbidev = drm_to_mipi_dbi_dev(pipe->crtc.dev);
- if (!mipi->enabled)
+ if (!dbidev->enabled)
return;
DRM_DEBUG_KMS("\n");
- mipi->enabled = false;
+ dbidev->enabled = false;
- if (mipi->backlight)
- backlight_disable(mipi->backlight);
+ if (dbidev->backlight)
+ backlight_disable(dbidev->backlight);
else
- mipi_dbi_blank(mipi);
+ mipi_dbi_blank(dbidev);
- if (mipi->regulator)
- regulator_disable(mipi->regulator);
+ if (dbidev->regulator)
+ regulator_disable(dbidev->regulator);
}
EXPORT_SYMBOL(mipi_dbi_pipe_disable);
+static int mipi_dbi_connector_get_modes(struct drm_connector *connector)
+{
+ struct mipi_dbi_dev *dbidev = drm_to_mipi_dbi_dev(connector->dev);
+ struct drm_display_mode *mode;
+
+ mode = drm_mode_duplicate(connector->dev, &dbidev->mode);
+ if (!mode) {
+ DRM_ERROR("Failed to duplicate mode\n");
+ return 0;
+ }
+
+ if (mode->name[0] == '\0')
+ drm_mode_set_name(mode);
+
+ mode->type |= DRM_MODE_TYPE_PREFERRED;
+ drm_mode_probed_add(connector, mode);
+
+ if (mode->width_mm) {
+ connector->display_info.width_mm = mode->width_mm;
+ connector->display_info.height_mm = mode->height_mm;
+ }
+
+ return 1;
+}
+
+static const struct drm_connector_helper_funcs mipi_dbi_connector_hfuncs = {
+ .get_modes = mipi_dbi_connector_get_modes,
+};
+
+static const struct drm_connector_funcs mipi_dbi_connector_funcs = {
+ .reset = drm_atomic_helper_connector_reset,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .destroy = drm_connector_cleanup,
+ .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+};
+
+static int mipi_dbi_rotate_mode(struct drm_display_mode *mode,
+ unsigned int rotation)
+{
+ if (rotation == 0 || rotation == 180) {
+ return 0;
+ } else if (rotation == 90 || rotation == 270) {
+ swap(mode->hdisplay, mode->vdisplay);
+ swap(mode->hsync_start, mode->vsync_start);
+ swap(mode->hsync_end, mode->vsync_end);
+ swap(mode->htotal, mode->vtotal);
+ swap(mode->width_mm, mode->height_mm);
+ return 0;
+ } else {
+ return -EINVAL;
+ }
+}
+
static const struct drm_mode_config_funcs mipi_dbi_mode_config_funcs = {
.fb_create = drm_gem_fb_create_with_dirty,
.atomic_check = drm_atomic_helper_check,
@@ -412,60 +471,111 @@ static const uint32_t mipi_dbi_formats[] = {
};
/**
- * mipi_dbi_init - MIPI DBI initialization
- * @mipi: &mipi_dbi structure to initialize
+ * mipi_dbi_dev_init_with_formats - MIPI DBI device initialization with custom formats
+ * @dbidev: MIPI DBI device structure to initialize
* @funcs: Display pipe functions
+ * @formats: Array of supported formats (DRM_FORMAT\_\*).
+ * @format_count: Number of elements in @formats
* @mode: Display mode
* @rotation: Initial rotation in degrees Counter Clock Wise
+ * @tx_buf_size: Allocate a transmit buffer of this size.
*
* This function sets up a &drm_simple_display_pipe with a &drm_connector that
* has one fixed &drm_display_mode which is rotated according to @rotation.
* This mode is used to set the mode config min/max width/height properties.
- * Additionally &mipi_dbi.tx_buf is allocated.
*
- * Supported formats: Native RGB565 and emulated XRGB8888.
+ * Use mipi_dbi_dev_init() if you don't need custom formats.
+ *
+ * Note:
+ * Some of the helper functions expects RGB565 to be the default format and the
+ * transmit buffer sized to fit that.
*
* Returns:
* Zero on success, negative error code on failure.
*/
-int mipi_dbi_init(struct mipi_dbi *mipi,
- const struct drm_simple_display_pipe_funcs *funcs,
- const struct drm_display_mode *mode, unsigned int rotation)
+int mipi_dbi_dev_init_with_formats(struct mipi_dbi_dev *dbidev,
+ const struct drm_simple_display_pipe_funcs *funcs,
+ const uint32_t *formats, unsigned int format_count,
+ const struct drm_display_mode *mode,
+ unsigned int rotation, size_t tx_buf_size)
{
- size_t bufsize = mode->vdisplay * mode->hdisplay * sizeof(u16);
- struct drm_device *drm = &mipi->drm;
+ static const uint64_t modifiers[] = {
+ DRM_FORMAT_MOD_LINEAR,
+ DRM_FORMAT_MOD_INVALID
+ };
+ struct drm_device *drm = &dbidev->drm;
int ret;
- if (!mipi->command)
+ if (!dbidev->dbi.command)
return -EINVAL;
- mutex_init(&mipi->cmdlock);
-
- mipi->tx_buf = devm_kmalloc(drm->dev, bufsize, GFP_KERNEL);
- if (!mipi->tx_buf)
+ dbidev->tx_buf = devm_kmalloc(drm->dev, tx_buf_size, GFP_KERNEL);
+ if (!dbidev->tx_buf)
return -ENOMEM;
- /* TODO: Maybe add DRM_MODE_CONNECTOR_SPI */
- ret = tinydrm_display_pipe_init(drm, &mipi->pipe, funcs,
- DRM_MODE_CONNECTOR_VIRTUAL,
- mipi_dbi_formats,
- ARRAY_SIZE(mipi_dbi_formats), mode,
- rotation);
+ drm_mode_copy(&dbidev->mode, mode);
+ ret = mipi_dbi_rotate_mode(&dbidev->mode, rotation);
+ if (ret) {
+ DRM_ERROR("Illegal rotation value %u\n", rotation);
+ return -EINVAL;
+ }
+
+ drm_connector_helper_add(&dbidev->connector, &mipi_dbi_connector_hfuncs);
+ ret = drm_connector_init(drm, &dbidev->connector, &mipi_dbi_connector_funcs,
+ DRM_MODE_CONNECTOR_SPI);
+ if (ret)
+ return ret;
+
+ ret = drm_simple_display_pipe_init(drm, &dbidev->pipe, funcs, formats, format_count,
+ modifiers, &dbidev->connector);
if (ret)
return ret;
- drm_plane_enable_fb_damage_clips(&mipi->pipe.plane);
+ drm_plane_enable_fb_damage_clips(&dbidev->pipe.plane);
drm->mode_config.funcs = &mipi_dbi_mode_config_funcs;
- drm->mode_config.preferred_depth = 16;
- mipi->rotation = rotation;
+ drm->mode_config.min_width = dbidev->mode.hdisplay;
+ drm->mode_config.max_width = dbidev->mode.hdisplay;
+ drm->mode_config.min_height = dbidev->mode.vdisplay;
+ drm->mode_config.max_height = dbidev->mode.vdisplay;
+ dbidev->rotation = rotation;
- DRM_DEBUG_KMS("preferred_depth=%u, rotation = %u\n",
- drm->mode_config.preferred_depth, rotation);
+ DRM_DEBUG_KMS("rotation = %u\n", rotation);
return 0;
}
-EXPORT_SYMBOL(mipi_dbi_init);
+EXPORT_SYMBOL(mipi_dbi_dev_init_with_formats);
+
+/**
+ * mipi_dbi_dev_init - MIPI DBI device initialization
+ * @dbidev: MIPI DBI device structure to initialize
+ * @funcs: Display pipe functions
+ * @mode: Display mode
+ * @rotation: Initial rotation in degrees Counter Clock Wise
+ *
+ * This function sets up a &drm_simple_display_pipe with a &drm_connector that
+ * has one fixed &drm_display_mode which is rotated according to @rotation.
+ * This mode is used to set the mode config min/max width/height properties.
+ * Additionally &mipi_dbi.tx_buf is allocated.
+ *
+ * Supported formats: Native RGB565 and emulated XRGB8888.
+ *
+ * Returns:
+ * Zero on success, negative error code on failure.
+ */
+int mipi_dbi_dev_init(struct mipi_dbi_dev *dbidev,
+ const struct drm_simple_display_pipe_funcs *funcs,
+ const struct drm_display_mode *mode, unsigned int rotation)
+{
+ size_t bufsize = mode->vdisplay * mode->hdisplay * sizeof(u16);
+
+ dbidev->drm.mode_config.preferred_depth = 16;
+
+ return mipi_dbi_dev_init_with_formats(dbidev, funcs, mipi_dbi_formats,
+ ARRAY_SIZE(mipi_dbi_formats), mode,
+ rotation, bufsize);
+}
+EXPORT_SYMBOL(mipi_dbi_dev_init);
/**
* mipi_dbi_release - DRM driver release helper
@@ -477,37 +587,37 @@ EXPORT_SYMBOL(mipi_dbi_init);
*/
void mipi_dbi_release(struct drm_device *drm)
{
- struct mipi_dbi *dbi = drm_to_mipi_dbi(drm);
+ struct mipi_dbi_dev *dbidev = drm_to_mipi_dbi_dev(drm);
DRM_DEBUG_DRIVER("\n");
drm_mode_config_cleanup(drm);
drm_dev_fini(drm);
- kfree(dbi);
+ kfree(dbidev);
}
EXPORT_SYMBOL(mipi_dbi_release);
/**
* mipi_dbi_hw_reset - Hardware reset of controller
- * @mipi: MIPI DBI structure
+ * @dbi: MIPI DBI structure
*
* Reset controller if the &mipi_dbi->reset gpio is set.
*/
-void mipi_dbi_hw_reset(struct mipi_dbi *mipi)
+void mipi_dbi_hw_reset(struct mipi_dbi *dbi)
{
- if (!mipi->reset)
+ if (!dbi->reset)
return;
- gpiod_set_value_cansleep(mipi->reset, 0);
+ gpiod_set_value_cansleep(dbi->reset, 0);
usleep_range(20, 1000);
- gpiod_set_value_cansleep(mipi->reset, 1);
+ gpiod_set_value_cansleep(dbi->reset, 1);
msleep(120);
}
EXPORT_SYMBOL(mipi_dbi_hw_reset);
/**
* mipi_dbi_display_is_on - Check if display is on
- * @mipi: MIPI DBI structure
+ * @dbi: MIPI DBI structure
*
* This function checks the Power Mode register (if readable) to see if
* display output is turned on. This can be used to see if the bootloader
@@ -517,11 +627,11 @@ EXPORT_SYMBOL(mipi_dbi_hw_reset);
* Returns:
* true if the display can be verified to be on, false otherwise.
*/
-bool mipi_dbi_display_is_on(struct mipi_dbi *mipi)
+bool mipi_dbi_display_is_on(struct mipi_dbi *dbi)
{
u8 val;
- if (mipi_dbi_command_read(mipi, MIPI_DCS_GET_POWER_MODE, &val))
+ if (mipi_dbi_command_read(dbi, MIPI_DCS_GET_POWER_MODE, &val))
return false;
val &= ~DCS_POWER_MODE_RESERVED_MASK;
@@ -537,28 +647,29 @@ bool mipi_dbi_display_is_on(struct mipi_dbi *mipi)
}
EXPORT_SYMBOL(mipi_dbi_display_is_on);
-static int mipi_dbi_poweron_reset_conditional(struct mipi_dbi *mipi, bool cond)
+static int mipi_dbi_poweron_reset_conditional(struct mipi_dbi_dev *dbidev, bool cond)
{
- struct device *dev = mipi->drm.dev;
+ struct device *dev = dbidev->drm.dev;
+ struct mipi_dbi *dbi = &dbidev->dbi;
int ret;
- if (mipi->regulator) {
- ret = regulator_enable(mipi->regulator);
+ if (dbidev->regulator) {
+ ret = regulator_enable(dbidev->regulator);
if (ret) {
DRM_DEV_ERROR(dev, "Failed to enable regulator (%d)\n", ret);
return ret;
}
}
- if (cond && mipi_dbi_display_is_on(mipi))
+ if (cond && mipi_dbi_display_is_on(dbi))
return 1;
- mipi_dbi_hw_reset(mipi);
- ret = mipi_dbi_command(mipi, MIPI_DCS_SOFT_RESET);
+ mipi_dbi_hw_reset(dbi);
+ ret = mipi_dbi_command(dbi, MIPI_DCS_SOFT_RESET);
if (ret) {
DRM_DEV_ERROR(dev, "Failed to send reset command (%d)\n", ret);
- if (mipi->regulator)
- regulator_disable(mipi->regulator);
+ if (dbidev->regulator)
+ regulator_disable(dbidev->regulator);
return ret;
}
@@ -567,7 +678,7 @@ static int mipi_dbi_poweron_reset_conditional(struct mipi_dbi *mipi, bool cond)
* per MIPI DSC spec should wait 5ms after soft reset. If we didn't,
* we assume worst case and wait 120ms.
*/
- if (mipi->reset)
+ if (dbi->reset)
usleep_range(5000, 20000);
else
msleep(120);
@@ -577,7 +688,7 @@ static int mipi_dbi_poweron_reset_conditional(struct mipi_dbi *mipi, bool cond)
/**
* mipi_dbi_poweron_reset - MIPI DBI poweron and reset
- * @mipi: MIPI DBI structure
+ * @dbidev: MIPI DBI device structure
*
* This function enables the regulator if used and does a hardware and software
* reset.
@@ -585,15 +696,15 @@ static int mipi_dbi_poweron_reset_conditional(struct mipi_dbi *mipi, bool cond)
* Returns:
* Zero on success, or a negative error code.
*/
-int mipi_dbi_poweron_reset(struct mipi_dbi *mipi)
+int mipi_dbi_poweron_reset(struct mipi_dbi_dev *dbidev)
{
- return mipi_dbi_poweron_reset_conditional(mipi, false);
+ return mipi_dbi_poweron_reset_conditional(dbidev, false);
}
EXPORT_SYMBOL(mipi_dbi_poweron_reset);
/**
* mipi_dbi_poweron_conditional_reset - MIPI DBI poweron and conditional reset
- * @mipi: MIPI DBI structure
+ * @dbidev: MIPI DBI device structure
*
* This function enables the regulator if used and if the display is off, it
* does a hardware and software reset. If mipi_dbi_display_is_on() determines
@@ -603,9 +714,9 @@ EXPORT_SYMBOL(mipi_dbi_poweron_reset);
* Zero if the controller was reset, 1 if the display was already on, or a
* negative error code.
*/
-int mipi_dbi_poweron_conditional_reset(struct mipi_dbi *mipi)
+int mipi_dbi_poweron_conditional_reset(struct mipi_dbi_dev *dbidev)
{
- return mipi_dbi_poweron_reset_conditional(mipi, true);
+ return mipi_dbi_poweron_reset_conditional(dbidev, true);
}
EXPORT_SYMBOL(mipi_dbi_poweron_conditional_reset);
@@ -629,6 +740,15 @@ u32 mipi_dbi_spi_cmd_max_speed(struct spi_device *spi, size_t len)
}
EXPORT_SYMBOL(mipi_dbi_spi_cmd_max_speed);
+static bool mipi_dbi_machine_little_endian(void)
+{
+#if defined(__LITTLE_ENDIAN)
+ return true;
+#else
+ return false;
+#endif
+}
+
/*
* MIPI DBI Type C Option 1
*
@@ -647,15 +767,15 @@ EXPORT_SYMBOL(mipi_dbi_spi_cmd_max_speed);
* 76543210
*/
-static int mipi_dbi_spi1e_transfer(struct mipi_dbi *mipi, int dc,
+static int mipi_dbi_spi1e_transfer(struct mipi_dbi *dbi, int dc,
const void *buf, size_t len,
unsigned int bpw)
{
- bool swap_bytes = (bpw == 16 && tinydrm_machine_little_endian());
- size_t chunk, max_chunk = mipi->tx_buf9_len;
- struct spi_device *spi = mipi->spi;
+ bool swap_bytes = (bpw == 16 && mipi_dbi_machine_little_endian());
+ size_t chunk, max_chunk = dbi->tx_buf9_len;
+ struct spi_device *spi = dbi->spi;
struct spi_transfer tr = {
- .tx_buf = mipi->tx_buf9,
+ .tx_buf = dbi->tx_buf9,
.bits_per_word = 8,
};
struct spi_message m;
@@ -675,13 +795,11 @@ static int mipi_dbi_spi1e_transfer(struct mipi_dbi *mipi, int dc,
return -EINVAL;
/* Command: pad no-op's (zeroes) at beginning of block */
- dst = mipi->tx_buf9;
+ dst = dbi->tx_buf9;
memset(dst, 0, 9);
dst[8] = *src;
tr.len = 9;
- tinydrm_dbg_spi_message(spi, &m);
-
return spi_sync(spi, &m);
}
@@ -697,7 +815,7 @@ static int mipi_dbi_spi1e_transfer(struct mipi_dbi *mipi, int dc,
chunk = min(len, max_chunk);
len -= chunk;
- dst = mipi->tx_buf9;
+ dst = dbi->tx_buf9;
if (chunk < 8) {
u8 val, carry = 0;
@@ -759,7 +877,6 @@ static int mipi_dbi_spi1e_transfer(struct mipi_dbi *mipi, int dc,
tr.len = chunk + added;
- tinydrm_dbg_spi_message(spi, &m);
ret = spi_sync(spi, &m);
if (ret)
return ret;
@@ -768,11 +885,11 @@ static int mipi_dbi_spi1e_transfer(struct mipi_dbi *mipi, int dc,
return 0;
}
-static int mipi_dbi_spi1_transfer(struct mipi_dbi *mipi, int dc,
+static int mipi_dbi_spi1_transfer(struct mipi_dbi *dbi, int dc,
const void *buf, size_t len,
unsigned int bpw)
{
- struct spi_device *spi = mipi->spi;
+ struct spi_device *spi = dbi->spi;
struct spi_transfer tr = {
.bits_per_word = 9,
};
@@ -783,12 +900,12 @@ static int mipi_dbi_spi1_transfer(struct mipi_dbi *mipi, int dc,
u16 *dst16;
int ret;
- if (!tinydrm_spi_bpw_supported(spi, 9))
- return mipi_dbi_spi1e_transfer(mipi, dc, buf, len, bpw);
+ if (!spi_is_bpw_supported(spi, 9))
+ return mipi_dbi_spi1e_transfer(dbi, dc, buf, len, bpw);
tr.speed_hz = mipi_dbi_spi_cmd_max_speed(spi, len);
- max_chunk = mipi->tx_buf9_len;
- dst16 = mipi->tx_buf9;
+ max_chunk = dbi->tx_buf9_len;
+ dst16 = dbi->tx_buf9;
if (drm_debug & DRM_UT_DRIVER)
pr_debug("[drm:%s] dc=%d, max_chunk=%zu, transfers:\n",
@@ -803,7 +920,7 @@ static int mipi_dbi_spi1_transfer(struct mipi_dbi *mipi, int dc,
size_t chunk = min(len, max_chunk);
unsigned int i;
- if (bpw == 16 && tinydrm_machine_little_endian()) {
+ if (bpw == 16 && mipi_dbi_machine_little_endian()) {
for (i = 0; i < (chunk * 2); i += 2) {
dst16[i] = *src16 >> 8;
dst16[i + 1] = *src16++ & 0xFF;
@@ -823,7 +940,6 @@ static int mipi_dbi_spi1_transfer(struct mipi_dbi *mipi, int dc,
tr.len = chunk;
len -= chunk;
- tinydrm_dbg_spi_message(spi, &m);
ret = spi_sync(spi, &m);
if (ret)
return ret;
@@ -832,30 +948,30 @@ static int mipi_dbi_spi1_transfer(struct mipi_dbi *mipi, int dc,
return 0;
}
-static int mipi_dbi_typec1_command(struct mipi_dbi *mipi, u8 *cmd,
+static int mipi_dbi_typec1_command(struct mipi_dbi *dbi, u8 *cmd,
u8 *parameters, size_t num)
{
unsigned int bpw = (*cmd == MIPI_DCS_WRITE_MEMORY_START) ? 16 : 8;
int ret;
- if (mipi_dbi_command_is_read(mipi, *cmd))
+ if (mipi_dbi_command_is_read(dbi, *cmd))
return -ENOTSUPP;
MIPI_DBI_DEBUG_COMMAND(*cmd, parameters, num);
- ret = mipi_dbi_spi1_transfer(mipi, 0, cmd, 1, 8);
+ ret = mipi_dbi_spi1_transfer(dbi, 0, cmd, 1, 8);
if (ret || !num)
return ret;
- return mipi_dbi_spi1_transfer(mipi, 1, parameters, num, bpw);
+ return mipi_dbi_spi1_transfer(dbi, 1, parameters, num, bpw);
}
/* MIPI DBI Type C Option 3 */
-static int mipi_dbi_typec3_command_read(struct mipi_dbi *mipi, u8 *cmd,
+static int mipi_dbi_typec3_command_read(struct mipi_dbi *dbi, u8 *cmd,
u8 *data, size_t len)
{
- struct spi_device *spi = mipi->spi;
+ struct spi_device *spi = dbi->spi;
u32 speed_hz = min_t(u32, MIPI_DBI_MAX_SPI_READ_SPEED,
spi->max_speed_hz / 2);
struct spi_transfer tr[2] = {
@@ -892,15 +1008,13 @@ static int mipi_dbi_typec3_command_read(struct mipi_dbi *mipi, u8 *cmd,
return -ENOMEM;
tr[1].rx_buf = buf;
- gpiod_set_value_cansleep(mipi->dc, 0);
+ gpiod_set_value_cansleep(dbi->dc, 0);
spi_message_init_with_transfers(&m, tr, ARRAY_SIZE(tr));
ret = spi_sync(spi, &m);
if (ret)
goto err_free;
- tinydrm_dbg_spi_message(spi, &m);
-
if (tr[1].len == len) {
memcpy(data, buf, len);
} else {
@@ -918,42 +1032,42 @@ err_free:
return ret;
}
-static int mipi_dbi_typec3_command(struct mipi_dbi *mipi, u8 *cmd,
+static int mipi_dbi_typec3_command(struct mipi_dbi *dbi, u8 *cmd,
u8 *par, size_t num)
{
- struct spi_device *spi = mipi->spi;
+ struct spi_device *spi = dbi->spi;
unsigned int bpw = 8;
u32 speed_hz;
int ret;
- if (mipi_dbi_command_is_read(mipi, *cmd))
- return mipi_dbi_typec3_command_read(mipi, cmd, par, num);
+ if (mipi_dbi_command_is_read(dbi, *cmd))
+ return mipi_dbi_typec3_command_read(dbi, cmd, par, num);
MIPI_DBI_DEBUG_COMMAND(*cmd, par, num);
- gpiod_set_value_cansleep(mipi->dc, 0);
+ gpiod_set_value_cansleep(dbi->dc, 0);
speed_hz = mipi_dbi_spi_cmd_max_speed(spi, 1);
- ret = tinydrm_spi_transfer(spi, speed_hz, NULL, 8, cmd, 1);
+ ret = mipi_dbi_spi_transfer(spi, speed_hz, 8, cmd, 1);
if (ret || !num)
return ret;
- if (*cmd == MIPI_DCS_WRITE_MEMORY_START && !mipi->swap_bytes)
+ if (*cmd == MIPI_DCS_WRITE_MEMORY_START && !dbi->swap_bytes)
bpw = 16;
- gpiod_set_value_cansleep(mipi->dc, 1);
+ gpiod_set_value_cansleep(dbi->dc, 1);
speed_hz = mipi_dbi_spi_cmd_max_speed(spi, num);
- return tinydrm_spi_transfer(spi, speed_hz, NULL, bpw, par, num);
+ return mipi_dbi_spi_transfer(spi, speed_hz, bpw, par, num);
}
/**
- * mipi_dbi_spi_init - Initialize MIPI DBI SPI interfaced controller
+ * mipi_dbi_spi_init - Initialize MIPI DBI SPI interface
* @spi: SPI device
- * @mipi: &mipi_dbi structure to initialize
+ * @dbi: MIPI DBI structure to initialize
* @dc: D/C gpio (optional)
*
- * This function sets &mipi_dbi->command, enables &mipi->read_commands for the
- * usual read commands. It should be followed by a call to mipi_dbi_init() or
+ * This function sets &mipi_dbi->command, enables &mipi_dbi->read_commands for the
+ * usual read commands. It should be followed by a call to mipi_dbi_dev_init() or
* a driver-specific init.
*
* If @dc is set, a Type C Option 3 interface is assumed, if not
@@ -968,18 +1082,12 @@ static int mipi_dbi_typec3_command(struct mipi_dbi *mipi, u8 *cmd,
* Returns:
* Zero on success, negative error code on failure.
*/
-int mipi_dbi_spi_init(struct spi_device *spi, struct mipi_dbi *mipi,
+int mipi_dbi_spi_init(struct spi_device *spi, struct mipi_dbi *dbi,
struct gpio_desc *dc)
{
- size_t tx_size = tinydrm_spi_max_transfer_size(spi, 0);
struct device *dev = &spi->dev;
int ret;
- if (tx_size < 16) {
- DRM_ERROR("SPI transmit buffer too small: %zu\n", tx_size);
- return -EINVAL;
- }
-
/*
* Even though it's not the SPI device that does DMA (the master does),
* the dma mask is necessary for the dma_alloc_wc() in
@@ -998,29 +1106,75 @@ int mipi_dbi_spi_init(struct spi_device *spi, struct mipi_dbi *mipi,
}
}
- mipi->spi = spi;
- mipi->read_commands = mipi_dbi_dcs_read_commands;
+ dbi->spi = spi;
+ dbi->read_commands = mipi_dbi_dcs_read_commands;
if (dc) {
- mipi->command = mipi_dbi_typec3_command;
- mipi->dc = dc;
- if (tinydrm_machine_little_endian() &&
- !tinydrm_spi_bpw_supported(spi, 16))
- mipi->swap_bytes = true;
+ dbi->command = mipi_dbi_typec3_command;
+ dbi->dc = dc;
+ if (mipi_dbi_machine_little_endian() && !spi_is_bpw_supported(spi, 16))
+ dbi->swap_bytes = true;
} else {
- mipi->command = mipi_dbi_typec1_command;
- mipi->tx_buf9_len = tx_size;
- mipi->tx_buf9 = devm_kmalloc(dev, tx_size, GFP_KERNEL);
- if (!mipi->tx_buf9)
+ dbi->command = mipi_dbi_typec1_command;
+ dbi->tx_buf9_len = SZ_16K;
+ dbi->tx_buf9 = devm_kmalloc(dev, dbi->tx_buf9_len, GFP_KERNEL);
+ if (!dbi->tx_buf9)
return -ENOMEM;
}
+ mutex_init(&dbi->cmdlock);
+
DRM_DEBUG_DRIVER("SPI speed: %uMHz\n", spi->max_speed_hz / 1000000);
return 0;
}
EXPORT_SYMBOL(mipi_dbi_spi_init);
+/**
+ * mipi_dbi_spi_transfer - SPI transfer helper
+ * @spi: SPI device
+ * @speed_hz: Override speed (optional)
+ * @bpw: Bits per word
+ * @buf: Buffer to transfer
+ * @len: Buffer length
+ *
+ * This SPI transfer helper breaks up the transfer of @buf into chunks which
+ * the SPI controller driver can handle.
+ *
+ * Returns:
+ * Zero on success, negative error code on failure.
+ */
+int mipi_dbi_spi_transfer(struct spi_device *spi, u32 speed_hz,
+ u8 bpw, const void *buf, size_t len)
+{
+ size_t max_chunk = spi_max_transfer_size(spi);
+ struct spi_transfer tr = {
+ .bits_per_word = bpw,
+ .speed_hz = speed_hz,
+ };
+ struct spi_message m;
+ size_t chunk;
+ int ret;
+
+ spi_message_init_with_transfers(&m, &tr, 1);
+
+ while (len) {
+ chunk = min(len, max_chunk);
+
+ tr.tx_buf = buf;
+ tr.len = chunk;
+ buf += chunk;
+ len -= chunk;
+
+ ret = spi_sync(spi, &m);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(mipi_dbi_spi_transfer);
+
#endif /* CONFIG_SPI */
#ifdef CONFIG_DEBUG_FS
@@ -1030,13 +1184,13 @@ static ssize_t mipi_dbi_debugfs_command_write(struct file *file,
size_t count, loff_t *ppos)
{
struct seq_file *m = file->private_data;
- struct mipi_dbi *mipi = m->private;
+ struct mipi_dbi_dev *dbidev = m->private;
u8 val, cmd = 0, parameters[64];
char *buf, *pos, *token;
unsigned int i;
int ret, idx;
- if (!drm_dev_enter(&mipi->drm, &idx))
+ if (!drm_dev_enter(&dbidev->drm, &idx))
return -ENODEV;
buf = memdup_user_nul(ubuf, count);
@@ -1075,7 +1229,7 @@ static ssize_t mipi_dbi_debugfs_command_write(struct file *file,
}
}
- ret = mipi_dbi_command_buf(mipi, cmd, parameters, i);
+ ret = mipi_dbi_command_buf(&dbidev->dbi, cmd, parameters, i);
err_free:
kfree(buf);
@@ -1087,16 +1241,17 @@ err_exit:
static int mipi_dbi_debugfs_command_show(struct seq_file *m, void *unused)
{
- struct mipi_dbi *mipi = m->private;
+ struct mipi_dbi_dev *dbidev = m->private;
+ struct mipi_dbi *dbi = &dbidev->dbi;
u8 cmd, val[4];
int ret, idx;
size_t len;
- if (!drm_dev_enter(&mipi->drm, &idx))
+ if (!drm_dev_enter(&dbidev->drm, &idx))
return -ENODEV;
for (cmd = 0; cmd < 255; cmd++) {
- if (!mipi_dbi_command_is_read(mipi, cmd))
+ if (!mipi_dbi_command_is_read(dbi, cmd))
continue;
switch (cmd) {
@@ -1116,7 +1271,7 @@ static int mipi_dbi_debugfs_command_show(struct seq_file *m, void *unused)
}
seq_printf(m, "%02x: ", cmd);
- ret = mipi_dbi_command_buf(mipi, cmd, val, len);
+ ret = mipi_dbi_command_buf(dbi, cmd, val, len);
if (ret) {
seq_puts(m, "XX\n");
continue;
@@ -1158,12 +1313,12 @@ static const struct file_operations mipi_dbi_debugfs_command_fops = {
*/
int mipi_dbi_debugfs_init(struct drm_minor *minor)
{
- struct mipi_dbi *mipi = drm_to_mipi_dbi(minor->dev);
+ struct mipi_dbi_dev *dbidev = drm_to_mipi_dbi_dev(minor->dev);
umode_t mode = S_IFREG | S_IWUSR;
- if (mipi->read_commands)
+ if (dbidev->dbi.read_commands)
mode |= S_IRUGO;
- debugfs_create_file("command", mode, minor->debugfs_root, mipi,
+ debugfs_create_file("command", mode, minor->debugfs_root, dbidev,
&mipi_dbi_debugfs_command_fops);
return 0;
diff --git a/drivers/gpu/drm/drm_mm.c b/drivers/gpu/drm/drm_mm.c
index 9a59865ce574..4581c5387372 100644
--- a/drivers/gpu/drm/drm_mm.c
+++ b/drivers/gpu/drm/drm_mm.c
@@ -472,7 +472,7 @@ int drm_mm_insert_node_in_range(struct drm_mm * const mm,
u64 remainder_mask;
bool once;
- DRM_MM_BUG_ON(range_start >= range_end);
+ DRM_MM_BUG_ON(range_start > range_end);
if (unlikely(size == 0 || range_end - range_start < size))
return -ENOSPC;
diff --git a/drivers/gpu/drm/drm_mode_object.c b/drivers/gpu/drm/drm_mode_object.c
index 1c6e51135962..c355ba8e6d5d 100644
--- a/drivers/gpu/drm/drm_mode_object.c
+++ b/drivers/gpu/drm/drm_mode_object.c
@@ -42,6 +42,8 @@ int __drm_mode_object_add(struct drm_device *dev, struct drm_mode_object *obj,
{
int ret;
+ WARN_ON(dev->registered && !obj_free_cb);
+
mutex_lock(&dev->mode_config.idr_mutex);
ret = idr_alloc(&dev->mode_config.object_idr, register_obj ? obj : NULL,
1, 0, GFP_KERNEL);
@@ -102,6 +104,8 @@ void drm_mode_object_register(struct drm_device *dev,
void drm_mode_object_unregister(struct drm_device *dev,
struct drm_mode_object *object)
{
+ WARN_ON(dev->registered && !object->free_cb);
+
mutex_lock(&dev->mode_config.idr_mutex);
if (object->id) {
idr_remove(&dev->mode_config.object_idr, object->id);
diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c
index 80fcd5dc1558..226a1d0720cf 100644
--- a/drivers/gpu/drm/drm_modes.c
+++ b/drivers/gpu/drm/drm_modes.c
@@ -1912,8 +1912,11 @@ void drm_mode_convert_to_umode(struct drm_mode_modeinfo *out,
case HDMI_PICTURE_ASPECT_256_135:
out->flags |= DRM_MODE_FLAG_PIC_AR_256_135;
break;
- case HDMI_PICTURE_ASPECT_RESERVED:
default:
+ WARN(1, "Invalid aspect ratio (0%x) on mode\n",
+ in->picture_aspect_ratio);
+ /* fall through */
+ case HDMI_PICTURE_ASPECT_NONE:
out->flags |= DRM_MODE_FLAG_PIC_AR_NONE;
break;
}
@@ -1972,20 +1975,22 @@ int drm_mode_convert_umode(struct drm_device *dev,
switch (in->flags & DRM_MODE_FLAG_PIC_AR_MASK) {
case DRM_MODE_FLAG_PIC_AR_4_3:
- out->picture_aspect_ratio |= HDMI_PICTURE_ASPECT_4_3;
+ out->picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3;
break;
case DRM_MODE_FLAG_PIC_AR_16_9:
- out->picture_aspect_ratio |= HDMI_PICTURE_ASPECT_16_9;
+ out->picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9;
break;
case DRM_MODE_FLAG_PIC_AR_64_27:
- out->picture_aspect_ratio |= HDMI_PICTURE_ASPECT_64_27;
+ out->picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27;
break;
case DRM_MODE_FLAG_PIC_AR_256_135:
- out->picture_aspect_ratio |= HDMI_PICTURE_ASPECT_256_135;
+ out->picture_aspect_ratio = HDMI_PICTURE_ASPECT_256_135;
break;
- default:
+ case DRM_MODE_FLAG_PIC_AR_NONE:
out->picture_aspect_ratio = HDMI_PICTURE_ASPECT_NONE;
break;
+ default:
+ return -EINVAL;
}
out->status = drm_mode_validate_driver(dev, out);
diff --git a/drivers/gpu/drm/drm_prime.c b/drivers/gpu/drm/drm_prime.c
index d0c01318076b..0a2316e0e812 100644
--- a/drivers/gpu/drm/drm_prime.c
+++ b/drivers/gpu/drm/drm_prime.c
@@ -30,6 +30,7 @@
#include <linux/dma-buf.h>
#include <linux/rbtree.h>
+#include <drm/drm.h>
#include <drm/drm_drv.h>
#include <drm/drm_file.h>
#include <drm/drm_framebuffer.h>
@@ -38,47 +39,52 @@
#include "drm_internal.h"
-/*
- * DMA-BUF/GEM Object references and lifetime overview:
- *
- * On the export the dma_buf holds a reference to the exporting GEM
- * object. It takes this reference in handle_to_fd_ioctl, when it
- * first calls .prime_export and stores the exporting GEM object in
- * the dma_buf priv. This reference needs to be released when the
- * final reference to the &dma_buf itself is dropped and its
- * &dma_buf_ops.release function is called. For GEM-based drivers,
- * the dma_buf should be exported using drm_gem_dmabuf_export() and
- * then released by drm_gem_dmabuf_release().
- *
- * On the import the importing GEM object holds a reference to the
- * dma_buf (which in turn holds a ref to the exporting GEM object).
- * It takes that reference in the fd_to_handle ioctl.
- * It calls dma_buf_get, creates an attachment to it and stores the
- * attachment in the GEM object. When this attachment is destroyed
- * when the imported object is destroyed, we remove the attachment
- * and drop the reference to the dma_buf.
- *
- * When all the references to the &dma_buf are dropped, i.e. when
- * userspace has closed both handles to the imported GEM object (through the
- * FD_TO_HANDLE IOCTL) and closed the file descriptor of the exported
- * (through the HANDLE_TO_FD IOCTL) dma_buf, and all kernel-internal references
- * are also gone, then the dma_buf gets destroyed. This can also happen as a
- * part of the clean up procedure in the drm_release() function if userspace
- * fails to properly clean up. Note that both the kernel and userspace (by
- * keeeping the PRIME file descriptors open) can hold references onto a
- * &dma_buf.
- *
- * Thus the chain of references always flows in one direction
- * (avoiding loops): importing_gem -> dmabuf -> exporting_gem
- *
- * Self-importing: if userspace is using PRIME as a replacement for flink
- * then it will get a fd->handle request for a GEM object that it created.
- * Drivers should detect this situation and return back the gem object
- * from the dma-buf private. Prime will do this automatically for drivers that
- * use the drm_gem_prime_{import,export} helpers.
- *
- * GEM struct &dma_buf_ops symbols are now exported. They can be resued by
- * drivers which implement GEM interface.
+/**
+ * DOC: overview and lifetime rules
+ *
+ * Similar to GEM global names, PRIME file descriptors are also used to share
+ * buffer objects across processes. They offer additional security: as file
+ * descriptors must be explicitly sent over UNIX domain sockets to be shared
+ * between applications, they can't be guessed like the globally unique GEM
+ * names.
+ *
+ * Drivers that support the PRIME API implement the
+ * &drm_driver.prime_handle_to_fd and &drm_driver.prime_fd_to_handle operations.
+ * GEM based drivers must use drm_gem_prime_handle_to_fd() and
+ * drm_gem_prime_fd_to_handle() to implement these. For GEM based drivers the
+ * actual driver interfaces is provided through the &drm_gem_object_funcs.export
+ * and &drm_driver.gem_prime_import hooks.
+ *
+ * &dma_buf_ops implementations for GEM drivers are all individually exported
+ * for drivers which need to overwrite or reimplement some of them.
+ *
+ * Reference Counting for GEM Drivers
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ * On the export the &dma_buf holds a reference to the exported buffer object,
+ * usually a &drm_gem_object. It takes this reference in the PRIME_HANDLE_TO_FD
+ * IOCTL, when it first calls &drm_gem_object_funcs.export
+ * and stores the exporting GEM object in the &dma_buf.priv field. This
+ * reference needs to be released when the final reference to the &dma_buf
+ * itself is dropped and its &dma_buf_ops.release function is called. For
+ * GEM-based drivers, the &dma_buf should be exported using
+ * drm_gem_dmabuf_export() and then released by drm_gem_dmabuf_release().
+ *
+ * Thus the chain of references always flows in one direction, avoiding loops:
+ * importing GEM object -> dma-buf -> exported GEM bo. A further complication
+ * are the lookup caches for import and export. These are required to guarantee
+ * that any given object will always have only one uniqe userspace handle. This
+ * is required to allow userspace to detect duplicated imports, since some GEM
+ * drivers do fail command submissions if a given buffer object is listed more
+ * than once. These import and export caches in &drm_prime_file_private only
+ * retain a weak reference, which is cleaned up when the corresponding object is
+ * released.
+ *
+ * Self-importing: If userspace is using PRIME as a replacement for flink then
+ * it will get a fd->handle request for a GEM object that it created. Drivers
+ * should detect this situation and return back the underlying object from the
+ * dma-buf private. For GEM based drivers this is handled in
+ * drm_gem_prime_import() already.
*/
struct drm_prime_member {
@@ -181,42 +187,6 @@ static int drm_prime_lookup_buf_handle(struct drm_prime_file_private *prime_fpri
return -ENOENT;
}
-/**
- * drm_gem_map_attach - dma_buf attach implementation for GEM
- * @dma_buf: buffer to attach device to
- * @attach: buffer attachment data
- *
- * Calls &drm_driver.gem_prime_pin for device specific handling. This can be
- * used as the &dma_buf_ops.attach callback.
- *
- * Returns 0 on success, negative error code on failure.
- */
-int drm_gem_map_attach(struct dma_buf *dma_buf,
- struct dma_buf_attachment *attach)
-{
- struct drm_gem_object *obj = dma_buf->priv;
-
- return drm_gem_pin(obj);
-}
-EXPORT_SYMBOL(drm_gem_map_attach);
-
-/**
- * drm_gem_map_detach - dma_buf detach implementation for GEM
- * @dma_buf: buffer to detach from
- * @attach: attachment to be detached
- *
- * Cleans up &dma_buf_attachment. This can be used as the &dma_buf_ops.detach
- * callback.
- */
-void drm_gem_map_detach(struct dma_buf *dma_buf,
- struct dma_buf_attachment *attach)
-{
- struct drm_gem_object *obj = dma_buf->priv;
-
- drm_gem_unpin(obj);
-}
-EXPORT_SYMBOL(drm_gem_map_detach);
-
void drm_prime_remove_buf_handle_locked(struct drm_prime_file_private *prime_fpriv,
struct dma_buf *dma_buf)
{
@@ -242,67 +212,21 @@ void drm_prime_remove_buf_handle_locked(struct drm_prime_file_private *prime_fpr
}
}
-/**
- * drm_gem_map_dma_buf - map_dma_buf implementation for GEM
- * @attach: attachment whose scatterlist is to be returned
- * @dir: direction of DMA transfer
- *
- * Calls &drm_driver.gem_prime_get_sg_table and then maps the scatterlist. This
- * can be used as the &dma_buf_ops.map_dma_buf callback.
- *
- * Returns sg_table containing the scatterlist to be returned; returns ERR_PTR
- * on error. May return -EINTR if it is interrupted by a signal.
- */
-
-struct sg_table *drm_gem_map_dma_buf(struct dma_buf_attachment *attach,
- enum dma_data_direction dir)
+void drm_prime_init_file_private(struct drm_prime_file_private *prime_fpriv)
{
- struct drm_gem_object *obj = attach->dmabuf->priv;
- struct sg_table *sgt;
-
- if (WARN_ON(dir == DMA_NONE))
- return ERR_PTR(-EINVAL);
-
- if (obj->funcs)
- sgt = obj->funcs->get_sg_table(obj);
- else
- sgt = obj->dev->driver->gem_prime_get_sg_table(obj);
-
- if (!dma_map_sg_attrs(attach->dev, sgt->sgl, sgt->nents, dir,
- DMA_ATTR_SKIP_CPU_SYNC)) {
- sg_free_table(sgt);
- kfree(sgt);
- sgt = ERR_PTR(-ENOMEM);
- }
-
- return sgt;
+ mutex_init(&prime_fpriv->lock);
+ prime_fpriv->dmabufs = RB_ROOT;
+ prime_fpriv->handles = RB_ROOT;
}
-EXPORT_SYMBOL(drm_gem_map_dma_buf);
-/**
- * drm_gem_unmap_dma_buf - unmap_dma_buf implementation for GEM
- * @attach: attachment to unmap buffer from
- * @sgt: scatterlist info of the buffer to unmap
- * @dir: direction of DMA transfer
- *
- * This can be used as the &dma_buf_ops.unmap_dma_buf callback.
- */
-void drm_gem_unmap_dma_buf(struct dma_buf_attachment *attach,
- struct sg_table *sgt,
- enum dma_data_direction dir)
+void drm_prime_destroy_file_private(struct drm_prime_file_private *prime_fpriv)
{
- if (!sgt)
- return;
-
- dma_unmap_sg_attrs(attach->dev, sgt->sgl, sgt->nents, dir,
- DMA_ATTR_SKIP_CPU_SYNC);
- sg_free_table(sgt);
- kfree(sgt);
+ /* by now drm_gem_release should've made sure the list is empty */
+ WARN_ON(!RB_EMPTY_ROOT(&prime_fpriv->dmabufs));
}
-EXPORT_SYMBOL(drm_gem_unmap_dma_buf);
/**
- * drm_gem_dmabuf_export - dma_buf export implementation for GEM
+ * drm_gem_dmabuf_export - &dma_buf export implementation for GEM
* @dev: parent device for the exported dmabuf
* @exp_info: the export information used by dma_buf_export()
*
@@ -330,11 +254,11 @@ struct dma_buf *drm_gem_dmabuf_export(struct drm_device *dev,
EXPORT_SYMBOL(drm_gem_dmabuf_export);
/**
- * drm_gem_dmabuf_release - dma_buf release implementation for GEM
+ * drm_gem_dmabuf_release - &dma_buf release implementation for GEM
* @dma_buf: buffer to be released
*
* Generic release function for dma_bufs exported as PRIME buffers. GEM drivers
- * must use this in their dma_buf ops structure as the release callback.
+ * must use this in their &dma_buf_ops structure as the release callback.
* drm_gem_dmabuf_release() should be used in conjunction with
* drm_gem_dmabuf_export().
*/
@@ -351,128 +275,100 @@ void drm_gem_dmabuf_release(struct dma_buf *dma_buf)
EXPORT_SYMBOL(drm_gem_dmabuf_release);
/**
- * drm_gem_dmabuf_vmap - dma_buf vmap implementation for GEM
- * @dma_buf: buffer to be mapped
+ * drm_gem_prime_fd_to_handle - PRIME import function for GEM drivers
+ * @dev: dev to export the buffer from
+ * @file_priv: drm file-private structure
+ * @prime_fd: fd id of the dma-buf which should be imported
+ * @handle: pointer to storage for the handle of the imported buffer object
*
- * Sets up a kernel virtual mapping. This can be used as the &dma_buf_ops.vmap
- * callback.
+ * This is the PRIME import function which must be used mandatorily by GEM
+ * drivers to ensure correct lifetime management of the underlying GEM object.
+ * The actual importing of GEM object from the dma-buf is done through the
+ * &drm_driver.gem_prime_import driver callback.
*
- * Returns the kernel virtual address.
+ * Returns 0 on success or a negative error code on failure.
*/
-void *drm_gem_dmabuf_vmap(struct dma_buf *dma_buf)
+int drm_gem_prime_fd_to_handle(struct drm_device *dev,
+ struct drm_file *file_priv, int prime_fd,
+ uint32_t *handle)
{
- struct drm_gem_object *obj = dma_buf->priv;
- void *vaddr;
+ struct dma_buf *dma_buf;
+ struct drm_gem_object *obj;
+ int ret;
- vaddr = drm_gem_vmap(obj);
- if (IS_ERR(vaddr))
- vaddr = NULL;
+ dma_buf = dma_buf_get(prime_fd);
+ if (IS_ERR(dma_buf))
+ return PTR_ERR(dma_buf);
- return vaddr;
-}
-EXPORT_SYMBOL(drm_gem_dmabuf_vmap);
+ mutex_lock(&file_priv->prime.lock);
-/**
- * drm_gem_dmabuf_vunmap - dma_buf vunmap implementation for GEM
- * @dma_buf: buffer to be unmapped
- * @vaddr: the virtual address of the buffer
- *
- * Releases a kernel virtual mapping. This can be used as the
- * &dma_buf_ops.vunmap callback.
- */
-void drm_gem_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr)
-{
- struct drm_gem_object *obj = dma_buf->priv;
+ ret = drm_prime_lookup_buf_handle(&file_priv->prime,
+ dma_buf, handle);
+ if (ret == 0)
+ goto out_put;
- drm_gem_vunmap(obj, vaddr);
-}
-EXPORT_SYMBOL(drm_gem_dmabuf_vunmap);
+ /* never seen this one, need to import */
+ mutex_lock(&dev->object_name_lock);
+ if (dev->driver->gem_prime_import)
+ obj = dev->driver->gem_prime_import(dev, dma_buf);
+ else
+ obj = drm_gem_prime_import(dev, dma_buf);
+ if (IS_ERR(obj)) {
+ ret = PTR_ERR(obj);
+ goto out_unlock;
+ }
-/**
- * drm_gem_dmabuf_mmap - dma_buf mmap implementation for GEM
- * @dma_buf: buffer to be mapped
- * @vma: virtual address range
- *
- * Provides memory mapping for the buffer. This can be used as the
- * &dma_buf_ops.mmap callback.
- *
- * Returns 0 on success or a negative error code on failure.
- */
-int drm_gem_dmabuf_mmap(struct dma_buf *dma_buf, struct vm_area_struct *vma)
-{
- struct drm_gem_object *obj = dma_buf->priv;
- struct drm_device *dev = obj->dev;
+ if (obj->dma_buf) {
+ WARN_ON(obj->dma_buf != dma_buf);
+ } else {
+ obj->dma_buf = dma_buf;
+ get_dma_buf(dma_buf);
+ }
- if (!dev->driver->gem_prime_mmap)
- return -ENOSYS;
+ /* _handle_create_tail unconditionally unlocks dev->object_name_lock. */
+ ret = drm_gem_handle_create_tail(file_priv, obj, handle);
+ drm_gem_object_put_unlocked(obj);
+ if (ret)
+ goto out_put;
- return dev->driver->gem_prime_mmap(obj, vma);
-}
-EXPORT_SYMBOL(drm_gem_dmabuf_mmap);
+ ret = drm_prime_add_buf_handle(&file_priv->prime,
+ dma_buf, *handle);
+ mutex_unlock(&file_priv->prime.lock);
+ if (ret)
+ goto fail;
-static const struct dma_buf_ops drm_gem_prime_dmabuf_ops = {
- .cache_sgt_mapping = true,
- .attach = drm_gem_map_attach,
- .detach = drm_gem_map_detach,
- .map_dma_buf = drm_gem_map_dma_buf,
- .unmap_dma_buf = drm_gem_unmap_dma_buf,
- .release = drm_gem_dmabuf_release,
- .mmap = drm_gem_dmabuf_mmap,
- .vmap = drm_gem_dmabuf_vmap,
- .vunmap = drm_gem_dmabuf_vunmap,
-};
+ dma_buf_put(dma_buf);
-/**
- * DOC: PRIME Helpers
- *
- * Drivers can implement @gem_prime_export and @gem_prime_import in terms of
- * simpler APIs by using the helper functions @drm_gem_prime_export and
- * @drm_gem_prime_import. These functions implement dma-buf support in terms of
- * six lower-level driver callbacks:
- *
- * Export callbacks:
- *
- * * @gem_prime_pin (optional): prepare a GEM object for exporting
- * * @gem_prime_get_sg_table: provide a scatter/gather table of pinned pages
- * * @gem_prime_vmap: vmap a buffer exported by your driver
- * * @gem_prime_vunmap: vunmap a buffer exported by your driver
- * * @gem_prime_mmap (optional): mmap a buffer exported by your driver
- *
- * Import callback:
- *
- * * @gem_prime_import_sg_table (import): produce a GEM object from another
- * driver's scatter/gather table
- */
+ return 0;
-/**
- * drm_gem_prime_export - helper library implementation of the export callback
- * @dev: drm_device to export from
- * @obj: GEM object to export
- * @flags: flags like DRM_CLOEXEC and DRM_RDWR
- *
- * This is the implementation of the gem_prime_export functions for GEM drivers
- * using the PRIME helpers.
- */
-struct dma_buf *drm_gem_prime_export(struct drm_device *dev,
- struct drm_gem_object *obj,
- int flags)
+fail:
+ /* hmm, if driver attached, we are relying on the free-object path
+ * to detach.. which seems ok..
+ */
+ drm_gem_handle_delete(file_priv, *handle);
+ dma_buf_put(dma_buf);
+ return ret;
+
+out_unlock:
+ mutex_unlock(&dev->object_name_lock);
+out_put:
+ mutex_unlock(&file_priv->prime.lock);
+ dma_buf_put(dma_buf);
+ return ret;
+}
+EXPORT_SYMBOL(drm_gem_prime_fd_to_handle);
+
+int drm_prime_fd_to_handle_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
{
- struct dma_buf_export_info exp_info = {
- .exp_name = KBUILD_MODNAME, /* white lie for debug */
- .owner = dev->driver->fops->owner,
- .ops = &drm_gem_prime_dmabuf_ops,
- .size = obj->size,
- .flags = flags,
- .priv = obj,
- .resv = obj->resv,
- };
+ struct drm_prime_handle *args = data;
- if (dev->driver->gem_prime_res_obj)
- exp_info.resv = dev->driver->gem_prime_res_obj(obj);
+ if (!dev->driver->prime_fd_to_handle)
+ return -ENOSYS;
- return drm_gem_dmabuf_export(dev, &exp_info);
+ return dev->driver->prime_fd_to_handle(dev, file_priv,
+ args->fd, &args->handle);
}
-EXPORT_SYMBOL(drm_gem_prime_export);
static struct dma_buf *export_and_register_object(struct drm_device *dev,
struct drm_gem_object *obj,
@@ -489,9 +385,9 @@ static struct dma_buf *export_and_register_object(struct drm_device *dev,
if (obj->funcs && obj->funcs->export)
dmabuf = obj->funcs->export(obj, flags);
else if (dev->driver->gem_prime_export)
- dmabuf = dev->driver->gem_prime_export(dev, obj, flags);
+ dmabuf = dev->driver->gem_prime_export(obj, flags);
else
- dmabuf = drm_gem_prime_export(dev, obj, flags);
+ dmabuf = drm_gem_prime_export(obj, flags);
if (IS_ERR(dmabuf)) {
/* normally the created dma-buf takes ownership of the ref,
* but if that fails then drop the ref
@@ -521,7 +417,7 @@ static struct dma_buf *export_and_register_object(struct drm_device *dev,
* This is the PRIME export function which must be used mandatorily by GEM
* drivers to ensure correct lifetime management of the underlying GEM object.
* The actual exporting from GEM object to a dma-buf is done through the
- * gem_prime_export driver callback.
+ * &drm_driver.gem_prime_export driver callback.
*/
int drm_gem_prime_handle_to_fd(struct drm_device *dev,
struct drm_file *file_priv, uint32_t handle,
@@ -610,6 +506,195 @@ out_unlock:
}
EXPORT_SYMBOL(drm_gem_prime_handle_to_fd);
+int drm_prime_handle_to_fd_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_prime_handle *args = data;
+
+ if (!dev->driver->prime_handle_to_fd)
+ return -ENOSYS;
+
+ /* check flags are valid */
+ if (args->flags & ~(DRM_CLOEXEC | DRM_RDWR))
+ return -EINVAL;
+
+ return dev->driver->prime_handle_to_fd(dev, file_priv,
+ args->handle, args->flags, &args->fd);
+}
+
+/**
+ * DOC: PRIME Helpers
+ *
+ * Drivers can implement &drm_gem_object_funcs.export and
+ * &drm_driver.gem_prime_import in terms of simpler APIs by using the helper
+ * functions drm_gem_prime_export() and drm_gem_prime_import(). These functions
+ * implement dma-buf support in terms of some lower-level helpers, which are
+ * again exported for drivers to use individually:
+ *
+ * Exporting buffers
+ * ~~~~~~~~~~~~~~~~~
+ *
+ * Optional pinning of buffers is handled at dma-buf attach and detach time in
+ * drm_gem_map_attach() and drm_gem_map_detach(). Backing storage itself is
+ * handled by drm_gem_map_dma_buf() and drm_gem_unmap_dma_buf(), which relies on
+ * &drm_gem_object_funcs.get_sg_table.
+ *
+ * For kernel-internal access there's drm_gem_dmabuf_vmap() and
+ * drm_gem_dmabuf_vunmap(). Userspace mmap support is provided by
+ * drm_gem_dmabuf_mmap().
+ *
+ * Note that these export helpers can only be used if the underlying backing
+ * storage is fully coherent and either permanently pinned, or it is safe to pin
+ * it indefinitely.
+ *
+ * FIXME: The underlying helper functions are named rather inconsistently.
+ *
+ * Exporting buffers
+ * ~~~~~~~~~~~~~~~~~
+ *
+ * Importing dma-bufs using drm_gem_prime_import() relies on
+ * &drm_driver.gem_prime_import_sg_table.
+ *
+ * Note that similarly to the export helpers this permanently pins the
+ * underlying backing storage. Which is ok for scanout, but is not the best
+ * option for sharing lots of buffers for rendering.
+ */
+
+/**
+ * drm_gem_map_attach - dma_buf attach implementation for GEM
+ * @dma_buf: buffer to attach device to
+ * @attach: buffer attachment data
+ *
+ * Calls &drm_gem_object_funcs.pin for device specific handling. This can be
+ * used as the &dma_buf_ops.attach callback. Must be used together with
+ * drm_gem_map_detach().
+ *
+ * Returns 0 on success, negative error code on failure.
+ */
+int drm_gem_map_attach(struct dma_buf *dma_buf,
+ struct dma_buf_attachment *attach)
+{
+ struct drm_gem_object *obj = dma_buf->priv;
+
+ return drm_gem_pin(obj);
+}
+EXPORT_SYMBOL(drm_gem_map_attach);
+
+/**
+ * drm_gem_map_detach - dma_buf detach implementation for GEM
+ * @dma_buf: buffer to detach from
+ * @attach: attachment to be detached
+ *
+ * Calls &drm_gem_object_funcs.pin for device specific handling. Cleans up
+ * &dma_buf_attachment from drm_gem_map_attach(). This can be used as the
+ * &dma_buf_ops.detach callback.
+ */
+void drm_gem_map_detach(struct dma_buf *dma_buf,
+ struct dma_buf_attachment *attach)
+{
+ struct drm_gem_object *obj = dma_buf->priv;
+
+ drm_gem_unpin(obj);
+}
+EXPORT_SYMBOL(drm_gem_map_detach);
+
+/**
+ * drm_gem_map_dma_buf - map_dma_buf implementation for GEM
+ * @attach: attachment whose scatterlist is to be returned
+ * @dir: direction of DMA transfer
+ *
+ * Calls &drm_gem_object_funcs.get_sg_table and then maps the scatterlist. This
+ * can be used as the &dma_buf_ops.map_dma_buf callback. Should be used together
+ * with drm_gem_unmap_dma_buf().
+ *
+ * Returns:sg_table containing the scatterlist to be returned; returns ERR_PTR
+ * on error. May return -EINTR if it is interrupted by a signal.
+ */
+struct sg_table *drm_gem_map_dma_buf(struct dma_buf_attachment *attach,
+ enum dma_data_direction dir)
+{
+ struct drm_gem_object *obj = attach->dmabuf->priv;
+ struct sg_table *sgt;
+
+ if (WARN_ON(dir == DMA_NONE))
+ return ERR_PTR(-EINVAL);
+
+ if (obj->funcs)
+ sgt = obj->funcs->get_sg_table(obj);
+ else
+ sgt = obj->dev->driver->gem_prime_get_sg_table(obj);
+
+ if (!dma_map_sg_attrs(attach->dev, sgt->sgl, sgt->nents, dir,
+ DMA_ATTR_SKIP_CPU_SYNC)) {
+ sg_free_table(sgt);
+ kfree(sgt);
+ sgt = ERR_PTR(-ENOMEM);
+ }
+
+ return sgt;
+}
+EXPORT_SYMBOL(drm_gem_map_dma_buf);
+
+/**
+ * drm_gem_unmap_dma_buf - unmap_dma_buf implementation for GEM
+ * @attach: attachment to unmap buffer from
+ * @sgt: scatterlist info of the buffer to unmap
+ * @dir: direction of DMA transfer
+ *
+ * This can be used as the &dma_buf_ops.unmap_dma_buf callback.
+ */
+void drm_gem_unmap_dma_buf(struct dma_buf_attachment *attach,
+ struct sg_table *sgt,
+ enum dma_data_direction dir)
+{
+ if (!sgt)
+ return;
+
+ dma_unmap_sg_attrs(attach->dev, sgt->sgl, sgt->nents, dir,
+ DMA_ATTR_SKIP_CPU_SYNC);
+ sg_free_table(sgt);
+ kfree(sgt);
+}
+EXPORT_SYMBOL(drm_gem_unmap_dma_buf);
+
+/**
+ * drm_gem_dmabuf_vmap - dma_buf vmap implementation for GEM
+ * @dma_buf: buffer to be mapped
+ *
+ * Sets up a kernel virtual mapping. This can be used as the &dma_buf_ops.vmap
+ * callback. Calls into &drm_gem_object_funcs.vmap for device specific handling.
+ *
+ * Returns the kernel virtual address or NULL on failure.
+ */
+void *drm_gem_dmabuf_vmap(struct dma_buf *dma_buf)
+{
+ struct drm_gem_object *obj = dma_buf->priv;
+ void *vaddr;
+
+ vaddr = drm_gem_vmap(obj);
+ if (IS_ERR(vaddr))
+ vaddr = NULL;
+
+ return vaddr;
+}
+EXPORT_SYMBOL(drm_gem_dmabuf_vmap);
+
+/**
+ * drm_gem_dmabuf_vunmap - dma_buf vunmap implementation for GEM
+ * @dma_buf: buffer to be unmapped
+ * @vaddr: the virtual address of the buffer
+ *
+ * Releases a kernel virtual mapping. This can be used as the
+ * &dma_buf_ops.vunmap callback. Calls into &drm_gem_object_funcs.vunmap for device specific handling.
+ */
+void drm_gem_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr)
+{
+ struct drm_gem_object *obj = dma_buf->priv;
+
+ drm_gem_vunmap(obj, vaddr);
+}
+EXPORT_SYMBOL(drm_gem_dmabuf_vunmap);
+
/**
* drm_gem_prime_mmap - PRIME mmap function for GEM drivers
* @obj: GEM object
@@ -657,14 +742,117 @@ out:
EXPORT_SYMBOL(drm_gem_prime_mmap);
/**
+ * drm_gem_dmabuf_mmap - dma_buf mmap implementation for GEM
+ * @dma_buf: buffer to be mapped
+ * @vma: virtual address range
+ *
+ * Provides memory mapping for the buffer. This can be used as the
+ * &dma_buf_ops.mmap callback. It just forwards to &drm_driver.gem_prime_mmap,
+ * which should be set to drm_gem_prime_mmap().
+ *
+ * FIXME: There's really no point to this wrapper, drivers which need anything
+ * else but drm_gem_prime_mmap can roll their own &dma_buf_ops.mmap callback.
+ *
+ * Returns 0 on success or a negative error code on failure.
+ */
+int drm_gem_dmabuf_mmap(struct dma_buf *dma_buf, struct vm_area_struct *vma)
+{
+ struct drm_gem_object *obj = dma_buf->priv;
+ struct drm_device *dev = obj->dev;
+
+ if (!dev->driver->gem_prime_mmap)
+ return -ENOSYS;
+
+ return dev->driver->gem_prime_mmap(obj, vma);
+}
+EXPORT_SYMBOL(drm_gem_dmabuf_mmap);
+
+static const struct dma_buf_ops drm_gem_prime_dmabuf_ops = {
+ .cache_sgt_mapping = true,
+ .attach = drm_gem_map_attach,
+ .detach = drm_gem_map_detach,
+ .map_dma_buf = drm_gem_map_dma_buf,
+ .unmap_dma_buf = drm_gem_unmap_dma_buf,
+ .release = drm_gem_dmabuf_release,
+ .mmap = drm_gem_dmabuf_mmap,
+ .vmap = drm_gem_dmabuf_vmap,
+ .vunmap = drm_gem_dmabuf_vunmap,
+};
+
+/**
+ * drm_prime_pages_to_sg - converts a page array into an sg list
+ * @pages: pointer to the array of page pointers to convert
+ * @nr_pages: length of the page vector
+ *
+ * This helper creates an sg table object from a set of pages
+ * the driver is responsible for mapping the pages into the
+ * importers address space for use with dma_buf itself.
+ *
+ * This is useful for implementing &drm_gem_object_funcs.get_sg_table.
+ */
+struct sg_table *drm_prime_pages_to_sg(struct page **pages, unsigned int nr_pages)
+{
+ struct sg_table *sg = NULL;
+ int ret;
+
+ sg = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
+ if (!sg) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ ret = sg_alloc_table_from_pages(sg, pages, nr_pages, 0,
+ nr_pages << PAGE_SHIFT, GFP_KERNEL);
+ if (ret)
+ goto out;
+
+ return sg;
+out:
+ kfree(sg);
+ return ERR_PTR(ret);
+}
+EXPORT_SYMBOL(drm_prime_pages_to_sg);
+
+/**
+ * drm_gem_prime_export - helper library implementation of the export callback
+ * @obj: GEM object to export
+ * @flags: flags like DRM_CLOEXEC and DRM_RDWR
+ *
+ * This is the implementation of the &drm_gem_object_funcs.export functions for GEM drivers
+ * using the PRIME helpers. It is used as the default in
+ * drm_gem_prime_handle_to_fd().
+ */
+struct dma_buf *drm_gem_prime_export(struct drm_gem_object *obj,
+ int flags)
+{
+ struct drm_device *dev = obj->dev;
+ struct dma_buf_export_info exp_info = {
+ .exp_name = KBUILD_MODNAME, /* white lie for debug */
+ .owner = dev->driver->fops->owner,
+ .ops = &drm_gem_prime_dmabuf_ops,
+ .size = obj->size,
+ .flags = flags,
+ .priv = obj,
+ .resv = obj->resv,
+ };
+
+ return drm_gem_dmabuf_export(dev, &exp_info);
+}
+EXPORT_SYMBOL(drm_gem_prime_export);
+
+/**
* drm_gem_prime_import_dev - core implementation of the import callback
* @dev: drm_device to import into
* @dma_buf: dma-buf object to import
* @attach_dev: struct device to dma_buf attach
*
- * This is the core of drm_gem_prime_import. It's designed to be called by
- * drivers who want to use a different device structure than dev->dev for
- * attaching via dma_buf.
+ * This is the core of drm_gem_prime_import(). It's designed to be called by
+ * drivers who want to use a different device structure than &drm_device.dev for
+ * attaching via dma_buf. This function calls
+ * &drm_driver.gem_prime_import_sg_table internally.
+ *
+ * Drivers must arrange to call drm_prime_gem_destroy() from their
+ * &drm_gem_object_funcs.free hook when using this function.
*/
struct drm_gem_object *drm_gem_prime_import_dev(struct drm_device *dev,
struct dma_buf *dma_buf,
@@ -709,6 +897,7 @@ struct drm_gem_object *drm_gem_prime_import_dev(struct drm_device *dev,
}
obj->import_attach = attach;
+ obj->resv = dma_buf->resv;
return obj;
@@ -728,7 +917,12 @@ EXPORT_SYMBOL(drm_gem_prime_import_dev);
* @dma_buf: dma-buf object to import
*
* This is the implementation of the gem_prime_import functions for GEM drivers
- * using the PRIME helpers.
+ * using the PRIME helpers. Drivers can use this as their
+ * &drm_driver.gem_prime_import implementation. It is used as the default
+ * implementation in drm_gem_prime_fd_to_handle().
+ *
+ * Drivers must arrange to call drm_prime_gem_destroy() from their
+ * &drm_gem_object_funcs.free hook when using this function.
*/
struct drm_gem_object *drm_gem_prime_import(struct drm_device *dev,
struct dma_buf *dma_buf)
@@ -738,154 +932,6 @@ struct drm_gem_object *drm_gem_prime_import(struct drm_device *dev,
EXPORT_SYMBOL(drm_gem_prime_import);
/**
- * drm_gem_prime_fd_to_handle - PRIME import function for GEM drivers
- * @dev: dev to export the buffer from
- * @file_priv: drm file-private structure
- * @prime_fd: fd id of the dma-buf which should be imported
- * @handle: pointer to storage for the handle of the imported buffer object
- *
- * This is the PRIME import function which must be used mandatorily by GEM
- * drivers to ensure correct lifetime management of the underlying GEM object.
- * The actual importing of GEM object from the dma-buf is done through the
- * gem_import_export driver callback.
- */
-int drm_gem_prime_fd_to_handle(struct drm_device *dev,
- struct drm_file *file_priv, int prime_fd,
- uint32_t *handle)
-{
- struct dma_buf *dma_buf;
- struct drm_gem_object *obj;
- int ret;
-
- dma_buf = dma_buf_get(prime_fd);
- if (IS_ERR(dma_buf))
- return PTR_ERR(dma_buf);
-
- mutex_lock(&file_priv->prime.lock);
-
- ret = drm_prime_lookup_buf_handle(&file_priv->prime,
- dma_buf, handle);
- if (ret == 0)
- goto out_put;
-
- /* never seen this one, need to import */
- mutex_lock(&dev->object_name_lock);
- if (dev->driver->gem_prime_import)
- obj = dev->driver->gem_prime_import(dev, dma_buf);
- else
- obj = drm_gem_prime_import(dev, dma_buf);
- if (IS_ERR(obj)) {
- ret = PTR_ERR(obj);
- goto out_unlock;
- }
-
- if (obj->dma_buf) {
- WARN_ON(obj->dma_buf != dma_buf);
- } else {
- obj->dma_buf = dma_buf;
- get_dma_buf(dma_buf);
- }
-
- /* _handle_create_tail unconditionally unlocks dev->object_name_lock. */
- ret = drm_gem_handle_create_tail(file_priv, obj, handle);
- drm_gem_object_put_unlocked(obj);
- if (ret)
- goto out_put;
-
- ret = drm_prime_add_buf_handle(&file_priv->prime,
- dma_buf, *handle);
- mutex_unlock(&file_priv->prime.lock);
- if (ret)
- goto fail;
-
- dma_buf_put(dma_buf);
-
- return 0;
-
-fail:
- /* hmm, if driver attached, we are relying on the free-object path
- * to detach.. which seems ok..
- */
- drm_gem_handle_delete(file_priv, *handle);
- dma_buf_put(dma_buf);
- return ret;
-
-out_unlock:
- mutex_unlock(&dev->object_name_lock);
-out_put:
- mutex_unlock(&file_priv->prime.lock);
- dma_buf_put(dma_buf);
- return ret;
-}
-EXPORT_SYMBOL(drm_gem_prime_fd_to_handle);
-
-int drm_prime_handle_to_fd_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_prime_handle *args = data;
-
- if (!drm_core_check_feature(dev, DRIVER_PRIME))
- return -EOPNOTSUPP;
-
- if (!dev->driver->prime_handle_to_fd)
- return -ENOSYS;
-
- /* check flags are valid */
- if (args->flags & ~(DRM_CLOEXEC | DRM_RDWR))
- return -EINVAL;
-
- return dev->driver->prime_handle_to_fd(dev, file_priv,
- args->handle, args->flags, &args->fd);
-}
-
-int drm_prime_fd_to_handle_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_prime_handle *args = data;
-
- if (!drm_core_check_feature(dev, DRIVER_PRIME))
- return -EOPNOTSUPP;
-
- if (!dev->driver->prime_fd_to_handle)
- return -ENOSYS;
-
- return dev->driver->prime_fd_to_handle(dev, file_priv,
- args->fd, &args->handle);
-}
-
-/**
- * drm_prime_pages_to_sg - converts a page array into an sg list
- * @pages: pointer to the array of page pointers to convert
- * @nr_pages: length of the page vector
- *
- * This helper creates an sg table object from a set of pages
- * the driver is responsible for mapping the pages into the
- * importers address space for use with dma_buf itself.
- */
-struct sg_table *drm_prime_pages_to_sg(struct page **pages, unsigned int nr_pages)
-{
- struct sg_table *sg = NULL;
- int ret;
-
- sg = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
- if (!sg) {
- ret = -ENOMEM;
- goto out;
- }
-
- ret = sg_alloc_table_from_pages(sg, pages, nr_pages, 0,
- nr_pages << PAGE_SHIFT, GFP_KERNEL);
- if (ret)
- goto out;
-
- return sg;
-out:
- kfree(sg);
- return ERR_PTR(ret);
-}
-EXPORT_SYMBOL(drm_prime_pages_to_sg);
-
-/**
* drm_prime_sg_to_page_addr_arrays - convert an sg table into a page array
* @sgt: scatter-gather table to convert
* @pages: optional array of page pointers to store the page array in
@@ -894,6 +940,9 @@ EXPORT_SYMBOL(drm_prime_pages_to_sg);
*
* Exports an sg table into an array of pages and addresses. This is currently
* required by the TTM driver in order to do correct fault handling.
+ *
+ * Drivers can use this in their &drm_driver.gem_prime_import_sg_table
+ * implementation.
*/
int drm_prime_sg_to_page_addr_arrays(struct sg_table *sgt, struct page **pages,
dma_addr_t *addrs, int max_entries)
@@ -934,7 +983,7 @@ EXPORT_SYMBOL(drm_prime_sg_to_page_addr_arrays);
* @sg: the sg-table which was pinned at import time
*
* This is the cleanup functions which GEM drivers need to call when they use
- * @drm_gem_prime_import to import dma-bufs.
+ * drm_gem_prime_import() or drm_gem_prime_import_dev() to import dma-bufs.
*/
void drm_prime_gem_destroy(struct drm_gem_object *obj, struct sg_table *sg)
{
@@ -949,16 +998,3 @@ void drm_prime_gem_destroy(struct drm_gem_object *obj, struct sg_table *sg)
dma_buf_put(dma_buf);
}
EXPORT_SYMBOL(drm_prime_gem_destroy);
-
-void drm_prime_init_file_private(struct drm_prime_file_private *prime_fpriv)
-{
- mutex_init(&prime_fpriv->lock);
- prime_fpriv->dmabufs = RB_ROOT;
- prime_fpriv->handles = RB_ROOT;
-}
-
-void drm_prime_destroy_file_private(struct drm_prime_file_private *prime_fpriv)
-{
- /* by now drm_gem_release should've made sure the list is empty */
- WARN_ON(!RB_EMPTY_ROOT(&prime_fpriv->dmabufs));
-}
diff --git a/drivers/gpu/drm/drm_scatter.c b/drivers/gpu/drm/drm_scatter.c
index 2d7790f14b0c..d5c386154246 100644
--- a/drivers/gpu/drm/drm_scatter.c
+++ b/drivers/gpu/drm/drm_scatter.c
@@ -1,4 +1,4 @@
-/**
+/*
* \file drm_scatter.c
* IOCTLs to manage scatter/gather memory
*
diff --git a/drivers/gpu/drm/drm_syncobj.c b/drivers/gpu/drm/drm_syncobj.c
index a199c8d56b95..1438dcb3ebb1 100644
--- a/drivers/gpu/drm/drm_syncobj.c
+++ b/drivers/gpu/drm/drm_syncobj.c
@@ -53,6 +53,7 @@
#include <linux/sync_file.h>
#include <linux/uaccess.h>
+#include <drm/drm.h>
#include <drm/drm_drv.h>
#include <drm/drm_file.h>
#include <drm/drm_gem.h>
@@ -1297,14 +1298,14 @@ int drm_syncobj_query_ioctl(struct drm_device *dev, void *data,
struct dma_fence *iter, *last_signaled = NULL;
dma_fence_chain_for_each(iter, fence) {
- if (!iter)
- break;
- dma_fence_put(last_signaled);
- last_signaled = dma_fence_get(iter);
- if (!to_dma_fence_chain(last_signaled)->prev_seqno)
+ if (iter->context != fence->context) {
+ dma_fence_put(iter);
/* It is most likely that timeline has
* unorder points. */
break;
+ }
+ dma_fence_put(last_signaled);
+ last_signaled = dma_fence_get(iter);
}
point = dma_fence_is_signaled(last_signaled) ?
last_signaled->seqno :
diff --git a/drivers/gpu/drm/drm_sysfs.c b/drivers/gpu/drm/drm_sysfs.c
index ad10810bc972..dd2bc85f43cc 100644
--- a/drivers/gpu/drm/drm_sysfs.c
+++ b/drivers/gpu/drm/drm_sysfs.c
@@ -14,6 +14,7 @@
#include <linux/err.h>
#include <linux/export.h>
#include <linux/gfp.h>
+#include <linux/i2c.h>
#include <linux/kdev_t.h>
#include <linux/slab.h>
@@ -26,6 +27,7 @@
#include <drm/drm_sysfs.h>
#include "drm_internal.h"
+#include "drm_crtc_internal.h"
#define to_drm_minor(d) dev_get_drvdata(d)
#define to_drm_connector(d) dev_get_drvdata(d)
@@ -294,6 +296,9 @@ int drm_sysfs_connector_add(struct drm_connector *connector)
/* Let userspace know we have a new connector */
drm_sysfs_hotplug_event(dev);
+ if (connector->ddc)
+ return sysfs_create_link(&connector->kdev->kobj,
+ &connector->ddc->dev.kobj, "ddc");
return 0;
}
@@ -301,6 +306,10 @@ void drm_sysfs_connector_remove(struct drm_connector *connector)
{
if (!connector->kdev)
return;
+
+ if (connector->ddc)
+ sysfs_remove_link(&connector->kdev->kobj, "ddc");
+
DRM_DEBUG("removing \"%s\" from sysfs\n",
connector->name);
@@ -325,6 +334,9 @@ void drm_sysfs_lease_event(struct drm_device *dev)
* Send a uevent for the DRM device specified by @dev. Currently we only
* set HOTPLUG=1 in the uevent environment, but this could be expanded to
* deal with other types of events.
+ *
+ * Any new uapi should be using the drm_sysfs_connector_status_event()
+ * for uevents on connector status change.
*/
void drm_sysfs_hotplug_event(struct drm_device *dev)
{
@@ -337,6 +349,37 @@ void drm_sysfs_hotplug_event(struct drm_device *dev)
}
EXPORT_SYMBOL(drm_sysfs_hotplug_event);
+/**
+ * drm_sysfs_connector_status_event - generate a DRM uevent for connector
+ * property status change
+ * @connector: connector on which property status changed
+ * @property: connector property whose status changed.
+ *
+ * Send a uevent for the DRM device specified by @dev. Currently we
+ * set HOTPLUG=1 and connector id along with the attached property id
+ * related to the status change.
+ */
+void drm_sysfs_connector_status_event(struct drm_connector *connector,
+ struct drm_property *property)
+{
+ struct drm_device *dev = connector->dev;
+ char hotplug_str[] = "HOTPLUG=1", conn_id[21], prop_id[21];
+ char *envp[4] = { hotplug_str, conn_id, prop_id, NULL };
+
+ WARN_ON(!drm_mode_obj_find_prop_id(&connector->base,
+ property->base.id));
+
+ snprintf(conn_id, ARRAY_SIZE(conn_id),
+ "CONNECTOR=%u", connector->base.id);
+ snprintf(prop_id, ARRAY_SIZE(prop_id),
+ "PROPERTY=%u", property->base.id);
+
+ DRM_DEBUG("generating connector status event\n");
+
+ kobject_uevent_env(&dev->primary->kdev->kobj, KOBJ_CHANGE, envp);
+}
+EXPORT_SYMBOL(drm_sysfs_connector_status_event);
+
static void drm_sysfs_release(struct device *dev)
{
kfree(dev);
diff --git a/drivers/gpu/drm/drm_vblank.c b/drivers/gpu/drm/drm_vblank.c
index 603ab105125d..fd1fbc77871f 100644
--- a/drivers/gpu/drm/drm_vblank.c
+++ b/drivers/gpu/drm/drm_vblank.c
@@ -31,7 +31,6 @@
#include <drm/drm_drv.h>
#include <drm/drm_framebuffer.h>
#include <drm/drm_print.h>
-#include <drm/drm_os_linux.h>
#include <drm/drm_vblank.h>
#include "drm_internal.h"
@@ -1670,12 +1669,28 @@ int drm_wait_vblank_ioctl(struct drm_device *dev, void *data,
}
if (req_seq != seq) {
+ int wait;
+
DRM_DEBUG("waiting on vblank count %llu, crtc %u\n",
req_seq, pipe);
- DRM_WAIT_ON(ret, vblank->queue, 3 * HZ,
- vblank_passed(drm_vblank_count(dev, pipe),
- req_seq) ||
- !READ_ONCE(vblank->enabled));
+ wait = wait_event_interruptible_timeout(vblank->queue,
+ vblank_passed(drm_vblank_count(dev, pipe), req_seq) ||
+ !READ_ONCE(vblank->enabled),
+ msecs_to_jiffies(3000));
+
+ switch (wait) {
+ case 0:
+ /* timeout */
+ ret = -EBUSY;
+ break;
+ case -ERESTARTSYS:
+ /* interrupted by signal */
+ ret = -EINTR;
+ break;
+ default:
+ ret = 0;
+ break;
+ }
}
if (ret != -EINTR) {
diff --git a/drivers/gpu/drm/drm_vm.c b/drivers/gpu/drm/drm_vm.c
index 05f7c5833946..52e87e4869a5 100644
--- a/drivers/gpu/drm/drm_vm.c
+++ b/drivers/gpu/drm/drm_vm.c
@@ -1,4 +1,4 @@
-/**
+/*
* \file drm_vm.c
* Memory mapping for DRM
*
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_drv.c b/drivers/gpu/drm/etnaviv/etnaviv_drv.c
index 7eb7cf9c3fa8..08e033c1758d 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_drv.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_drv.c
@@ -430,17 +430,17 @@ static int etnaviv_ioctl_pm_query_sig(struct drm_device *dev, void *data,
static const struct drm_ioctl_desc etnaviv_ioctls[] = {
#define ETNA_IOCTL(n, func, flags) \
DRM_IOCTL_DEF_DRV(ETNAVIV_##n, etnaviv_ioctl_##func, flags)
- ETNA_IOCTL(GET_PARAM, get_param, DRM_AUTH|DRM_RENDER_ALLOW),
- ETNA_IOCTL(GEM_NEW, gem_new, DRM_AUTH|DRM_RENDER_ALLOW),
- ETNA_IOCTL(GEM_INFO, gem_info, DRM_AUTH|DRM_RENDER_ALLOW),
- ETNA_IOCTL(GEM_CPU_PREP, gem_cpu_prep, DRM_AUTH|DRM_RENDER_ALLOW),
- ETNA_IOCTL(GEM_CPU_FINI, gem_cpu_fini, DRM_AUTH|DRM_RENDER_ALLOW),
- ETNA_IOCTL(GEM_SUBMIT, gem_submit, DRM_AUTH|DRM_RENDER_ALLOW),
- ETNA_IOCTL(WAIT_FENCE, wait_fence, DRM_AUTH|DRM_RENDER_ALLOW),
- ETNA_IOCTL(GEM_USERPTR, gem_userptr, DRM_AUTH|DRM_RENDER_ALLOW),
- ETNA_IOCTL(GEM_WAIT, gem_wait, DRM_AUTH|DRM_RENDER_ALLOW),
- ETNA_IOCTL(PM_QUERY_DOM, pm_query_dom, DRM_AUTH|DRM_RENDER_ALLOW),
- ETNA_IOCTL(PM_QUERY_SIG, pm_query_sig, DRM_AUTH|DRM_RENDER_ALLOW),
+ ETNA_IOCTL(GET_PARAM, get_param, DRM_RENDER_ALLOW),
+ ETNA_IOCTL(GEM_NEW, gem_new, DRM_RENDER_ALLOW),
+ ETNA_IOCTL(GEM_INFO, gem_info, DRM_RENDER_ALLOW),
+ ETNA_IOCTL(GEM_CPU_PREP, gem_cpu_prep, DRM_RENDER_ALLOW),
+ ETNA_IOCTL(GEM_CPU_FINI, gem_cpu_fini, DRM_RENDER_ALLOW),
+ ETNA_IOCTL(GEM_SUBMIT, gem_submit, DRM_RENDER_ALLOW),
+ ETNA_IOCTL(WAIT_FENCE, wait_fence, DRM_RENDER_ALLOW),
+ ETNA_IOCTL(GEM_USERPTR, gem_userptr, DRM_RENDER_ALLOW),
+ ETNA_IOCTL(GEM_WAIT, gem_wait, DRM_RENDER_ALLOW),
+ ETNA_IOCTL(PM_QUERY_DOM, pm_query_dom, DRM_RENDER_ALLOW),
+ ETNA_IOCTL(PM_QUERY_SIG, pm_query_sig, DRM_RENDER_ALLOW),
};
static const struct vm_operations_struct vm_ops = {
@@ -462,17 +462,13 @@ static const struct file_operations fops = {
};
static struct drm_driver etnaviv_drm_driver = {
- .driver_features = DRIVER_GEM |
- DRIVER_PRIME |
- DRIVER_RENDER,
+ .driver_features = DRIVER_GEM | DRIVER_RENDER,
.open = etnaviv_open,
.postclose = etnaviv_postclose,
.gem_free_object_unlocked = etnaviv_gem_free_object,
.gem_vm_ops = &vm_ops,
.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
- .gem_prime_export = drm_gem_prime_export,
- .gem_prime_import = drm_gem_prime_import,
.gem_prime_pin = etnaviv_gem_prime_pin,
.gem_prime_unpin = etnaviv_gem_prime_unpin,
.gem_prime_get_sg_table = etnaviv_gem_prime_get_sg_table,
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem.c b/drivers/gpu/drm/etnaviv/etnaviv_gem.c
index e8778ebb72e6..17ca602db60a 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gem.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gem.c
@@ -565,8 +565,7 @@ void etnaviv_gem_obj_add(struct drm_device *dev, struct drm_gem_object *obj)
}
static int etnaviv_gem_new_impl(struct drm_device *dev, u32 size, u32 flags,
- struct reservation_object *robj, const struct etnaviv_gem_ops *ops,
- struct drm_gem_object **obj)
+ const struct etnaviv_gem_ops *ops, struct drm_gem_object **obj)
{
struct etnaviv_gem_object *etnaviv_obj;
unsigned sz = sizeof(*etnaviv_obj);
@@ -594,8 +593,6 @@ static int etnaviv_gem_new_impl(struct drm_device *dev, u32 size, u32 flags,
etnaviv_obj->flags = flags;
etnaviv_obj->ops = ops;
- if (robj)
- etnaviv_obj->base.resv = robj;
mutex_init(&etnaviv_obj->lock);
INIT_LIST_HEAD(&etnaviv_obj->vram_list);
@@ -614,7 +611,7 @@ int etnaviv_gem_new_handle(struct drm_device *dev, struct drm_file *file,
size = PAGE_ALIGN(size);
- ret = etnaviv_gem_new_impl(dev, size, flags, NULL,
+ ret = etnaviv_gem_new_impl(dev, size, flags,
&etnaviv_gem_shmem_ops, &obj);
if (ret)
goto fail;
@@ -646,13 +643,12 @@ fail:
}
int etnaviv_gem_new_private(struct drm_device *dev, size_t size, u32 flags,
- struct reservation_object *robj, const struct etnaviv_gem_ops *ops,
- struct etnaviv_gem_object **res)
+ const struct etnaviv_gem_ops *ops, struct etnaviv_gem_object **res)
{
struct drm_gem_object *obj;
int ret;
- ret = etnaviv_gem_new_impl(dev, size, flags, robj, ops, &obj);
+ ret = etnaviv_gem_new_impl(dev, size, flags, ops, &obj);
if (ret)
return ret;
@@ -734,7 +730,7 @@ int etnaviv_gem_new_userptr(struct drm_device *dev, struct drm_file *file,
struct etnaviv_gem_object *etnaviv_obj;
int ret;
- ret = etnaviv_gem_new_private(dev, size, ETNA_BO_CACHED, NULL,
+ ret = etnaviv_gem_new_private(dev, size, ETNA_BO_CACHED,
&etnaviv_gem_userptr_ops, &etnaviv_obj);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem.h b/drivers/gpu/drm/etnaviv/etnaviv_gem.h
index 753c458497d0..fcd5d71b502f 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gem.h
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gem.h
@@ -112,8 +112,7 @@ void etnaviv_submit_put(struct etnaviv_gem_submit * submit);
int etnaviv_gem_wait_bo(struct etnaviv_gpu *gpu, struct drm_gem_object *obj,
struct timespec *timeout);
int etnaviv_gem_new_private(struct drm_device *dev, size_t size, u32 flags,
- struct reservation_object *robj, const struct etnaviv_gem_ops *ops,
- struct etnaviv_gem_object **res);
+ const struct etnaviv_gem_ops *ops, struct etnaviv_gem_object **res);
void etnaviv_gem_obj_add(struct drm_device *dev, struct drm_gem_object *obj);
struct page **etnaviv_gem_get_pages(struct etnaviv_gem_object *obj);
void etnaviv_gem_put_pages(struct etnaviv_gem_object *obj);
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c b/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c
index 00e8b6a817e3..a05292e8ed6f 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c
@@ -109,7 +109,6 @@ struct drm_gem_object *etnaviv_gem_prime_import_sg_table(struct drm_device *dev,
int ret, npages;
ret = etnaviv_gem_new_private(dev, size, ETNA_BO_WC,
- attach->dmabuf->resv,
&etnaviv_gem_prime_ops, &etnaviv_obj);
if (ret < 0)
return ERR_PTR(ret);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.c b/drivers/gpu/drm/exynos/exynos_drm_drv.c
index 58baf49d9926..cc53dcad25e4 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.c
@@ -75,29 +75,29 @@ static const struct vm_operations_struct exynos_drm_gem_vm_ops = {
static const struct drm_ioctl_desc exynos_ioctls[] = {
DRM_IOCTL_DEF_DRV(EXYNOS_GEM_CREATE, exynos_drm_gem_create_ioctl,
- DRM_AUTH | DRM_RENDER_ALLOW),
+ DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(EXYNOS_GEM_MAP, exynos_drm_gem_map_ioctl,
- DRM_AUTH | DRM_RENDER_ALLOW),
+ DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(EXYNOS_GEM_GET, exynos_drm_gem_get_ioctl,
DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(EXYNOS_VIDI_CONNECTION, vidi_connection_ioctl,
DRM_AUTH),
DRM_IOCTL_DEF_DRV(EXYNOS_G2D_GET_VER, exynos_g2d_get_ver_ioctl,
- DRM_AUTH | DRM_RENDER_ALLOW),
+ DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(EXYNOS_G2D_SET_CMDLIST, exynos_g2d_set_cmdlist_ioctl,
- DRM_AUTH | DRM_RENDER_ALLOW),
+ DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(EXYNOS_G2D_EXEC, exynos_g2d_exec_ioctl,
- DRM_AUTH | DRM_RENDER_ALLOW),
+ DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(EXYNOS_IPP_GET_RESOURCES,
exynos_drm_ipp_get_res_ioctl,
- DRM_AUTH | DRM_RENDER_ALLOW),
+ DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(EXYNOS_IPP_GET_CAPS, exynos_drm_ipp_get_caps_ioctl,
- DRM_AUTH | DRM_RENDER_ALLOW),
+ DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(EXYNOS_IPP_GET_LIMITS,
exynos_drm_ipp_get_limits_ioctl,
- DRM_AUTH | DRM_RENDER_ALLOW),
+ DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(EXYNOS_IPP_COMMIT, exynos_drm_ipp_commit_ioctl,
- DRM_AUTH | DRM_RENDER_ALLOW),
+ DRM_RENDER_ALLOW),
};
static const struct file_operations exynos_drm_driver_fops = {
@@ -112,7 +112,7 @@ static const struct file_operations exynos_drm_driver_fops = {
};
static struct drm_driver exynos_drm_driver = {
- .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME
+ .driver_features = DRIVER_MODESET | DRIVER_GEM
| DRIVER_ATOMIC | DRIVER_RENDER,
.open = exynos_drm_open,
.lastclose = drm_fb_helper_lastclose,
@@ -122,7 +122,6 @@ static struct drm_driver exynos_drm_driver = {
.dumb_create = exynos_drm_gem_dumb_create,
.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
- .gem_prime_export = drm_gem_prime_export,
.gem_prime_import = exynos_drm_gem_prime_import,
.gem_prime_get_sg_table = exynos_drm_gem_prime_get_sg_table,
.gem_prime_import_sg_table = exynos_drm_gem_prime_import_sg_table,
diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_crtc.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_crtc.c
index f4635bea0265..b9ca81a6f80f 100644
--- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_crtc.c
+++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_crtc.c
@@ -8,12 +8,13 @@
#include <linux/clk.h>
#include <linux/regmap.h>
-#include <drm/drmP.h>
+#include <video/videomode.h>
+
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc.h>
#include <drm/drm_probe_helper.h>
-#include <video/videomode.h>
+#include <drm/drm_vblank.h>
#include "fsl_dcu_drm_crtc.h"
#include "fsl_dcu_drm_drv.h"
diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c
index e81daaaa5965..f15d2e7967a3 100644
--- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c
+++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c
@@ -18,13 +18,15 @@
#include <linux/pm_runtime.h>
#include <linux/regmap.h>
-#include <drm/drmP.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_drv.h>
#include <drm/drm_fb_cma_helper.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_irq.h>
#include <drm/drm_modeset_helper.h>
#include <drm/drm_probe_helper.h>
+#include <drm/drm_vblank.h>
#include "fsl_dcu_drm_crtc.h"
#include "fsl_dcu_drm_drv.h"
@@ -133,8 +135,7 @@ static irqreturn_t fsl_dcu_drm_irq(int irq, void *arg)
DEFINE_DRM_GEM_CMA_FOPS(fsl_dcu_drm_fops);
static struct drm_driver fsl_dcu_drm_driver = {
- .driver_features = DRIVER_GEM | DRIVER_MODESET
- | DRIVER_PRIME | DRIVER_ATOMIC,
+ .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC,
.load = fsl_dcu_load,
.unload = fsl_dcu_unload,
.irq_handler = fsl_dcu_drm_irq,
@@ -144,8 +145,6 @@ static struct drm_driver fsl_dcu_drm_driver = {
.gem_vm_ops = &drm_gem_cma_vm_ops,
.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
- .gem_prime_import = drm_gem_prime_import,
- .gem_prime_export = drm_gem_prime_export,
.gem_prime_get_sg_table = drm_gem_cma_prime_get_sg_table,
.gem_prime_import_sg_table = drm_gem_cma_prime_import_sg_table,
.gem_prime_vmap = drm_gem_cma_prime_vmap,
diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_kms.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_kms.c
index 2467c8934405..d763f53f480c 100644
--- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_kms.c
+++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_kms.c
@@ -5,7 +5,6 @@
* Freescale DCU drm device driver
*/
-#include <drm/drmP.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_fb_cma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.c
index 6f2f65030dd1..86fac677fe69 100644
--- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.c
+++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.c
@@ -7,10 +7,10 @@
#include <linux/regmap.h>
-#include <drm/drmP.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc.h>
#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_fourcc.h>
#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_plane_helper.h>
#include <drm/drm_probe_helper.h>
diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c
index c49e9e3740f8..279d83eaffc0 100644
--- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c
+++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c
@@ -8,7 +8,6 @@
#include <linux/backlight.h>
#include <linux/of_graph.h>
-#include <drm/drmP.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_of.h>
#include <drm/drm_panel.h>
diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_de.c b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_de.c
index 08657a3627f3..cc4c41748cfb 100644
--- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_de.c
+++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_de.c
@@ -11,10 +11,16 @@
* Jianhua Li <lijianhua@huawei.com>
*/
+#include <linux/delay.h>
+
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_fourcc.h>
+#include <drm/drm_gem_vram_helper.h>
#include <drm/drm_plane_helper.h>
+#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
+#include <drm/drm_vblank.h>
#include "hibmc_drm_drv.h"
#include "hibmc_drm_regs.h"
diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c
index ce89e56937b0..2ae538835781 100644
--- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c
+++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c
@@ -13,9 +13,16 @@
#include <linux/console.h>
#include <linux/module.h>
+#include <linux/pci.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_drv.h>
+#include <drm/drm_gem_vram_helper.h>
+#include <drm/drm_irq.h>
+#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
+#include <drm/drm_vblank.h>
+#include <drm/drm_vram_mm_helper.h>
#include "hibmc_drm_drv.h"
#include "hibmc_drm_regs.h"
@@ -51,10 +58,9 @@ static struct drm_driver hibmc_driver = {
.desc = "hibmc drm driver",
.major = 1,
.minor = 0,
- .gem_free_object_unlocked =
- drm_gem_vram_driver_gem_free_object_unlocked,
.dumb_create = hibmc_dumb_create,
.dumb_map_offset = drm_gem_vram_driver_dumb_mmap_offset,
+ .gem_prime_mmap = drm_gem_prime_mmap,
.irq_handler = hibmc_drm_interrupt,
};
diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.h b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.h
index 69348bf54a84..e58ecd7edcf8 100644
--- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.h
+++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.h
@@ -14,12 +14,11 @@
#ifndef HIBMC_DRM_DRV_H
#define HIBMC_DRM_DRV_H
-#include <drm/drmP.h>
-#include <drm/drm_atomic.h>
#include <drm/drm_fb_helper.h>
-#include <drm/drm_gem.h>
-#include <drm/drm_gem_vram_helper.h>
-#include <drm/drm_vram_mm_helper.h>
+#include <drm/drm_framebuffer.h>
+
+struct drm_device;
+struct drm_gem_object;
struct hibmc_framebuffer {
struct drm_framebuffer fb;
diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_fbdev.c b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_fbdev.c
index af1ea4cceffa..b4c1cea051e8 100644
--- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_fbdev.c
+++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_fbdev.c
@@ -13,6 +13,8 @@
#include <drm/drm_crtc.h>
#include <drm/drm_fb_helper.h>
+#include <drm/drm_fourcc.h>
+#include <drm/drm_gem_vram_helper.h>
#include <drm/drm_probe_helper.h>
#include "hibmc_drm_drv.h"
diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_vdac.c b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_vdac.c
index 634a3bf018b2..6d98fdc06f6c 100644
--- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_vdac.c
+++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_vdac.c
@@ -13,6 +13,7 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_probe_helper.h>
+#include <drm/drm_print.h>
#include "hibmc_drm_drv.h"
#include "hibmc_drm_regs.h"
diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_ttm.c b/drivers/gpu/drm/hisilicon/hibmc/hibmc_ttm.c
index 5d4a03cd7d50..9f6e473e6295 100644
--- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_ttm.c
+++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_ttm.c
@@ -11,7 +11,13 @@
* Jianhua Li <lijianhua@huawei.com>
*/
+#include <linux/pci.h>
+
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_gem.h>
+#include <drm/drm_gem_vram_helper.h>
+#include <drm/drm_print.h>
+#include <drm/drm_vram_mm_helper.h>
#include "hibmc_drm_drv.h"
@@ -60,7 +66,7 @@ int hibmc_gem_create(struct drm_device *dev, u32 size, bool iskernel,
DRM_ERROR("failed to allocate GEM object: %d\n", ret);
return ret;
}
- *obj = &gbo->gem;
+ *obj = &gbo->bo.base;
return 0;
}
diff --git a/drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c
index ad7042ae2241..0df1afdf319d 100644
--- a/drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c
+++ b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c
@@ -13,19 +13,23 @@
#include <linux/bitops.h>
#include <linux/clk.h>
-#include <video/display_timing.h>
#include <linux/mfd/syscon.h>
+#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/reset.h>
-#include <drm/drmP.h>
+#include <video/display_timing.h>
+
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc.h>
+#include <drm/drm_drv.h>
#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_fourcc.h>
#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_plane_helper.h>
#include <drm/drm_probe_helper.h>
+#include <drm/drm_vblank.h>
#include "kirin_drm_drv.h"
#include "kirin_ade_reg.h"
diff --git a/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c
index 4a7fe10a37cb..204c94c01e3d 100644
--- a/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c
+++ b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c
@@ -13,16 +13,19 @@
#include <linux/of_platform.h>
#include <linux/component.h>
+#include <linux/module.h>
#include <linux/of_graph.h>
+#include <linux/platform_device.h>
-#include <drm/drmP.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_drv.h>
#include <drm/drm_fb_cma_helper.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_of.h>
#include <drm/drm_probe_helper.h>
+#include <drm/drm_vblank.h>
#include "kirin_drm_drv.h"
@@ -113,8 +116,7 @@ static int kirin_gem_cma_dumb_create(struct drm_file *file,
}
static struct drm_driver kirin_drm_driver = {
- .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_PRIME |
- DRIVER_ATOMIC,
+ .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC,
.fops = &kirin_drm_fops,
.gem_free_object_unlocked = drm_gem_cma_free_object,
@@ -123,8 +125,6 @@ static struct drm_driver kirin_drm_driver = {
.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
- .gem_prime_export = drm_gem_prime_export,
- .gem_prime_import = drm_gem_prime_import,
.gem_prime_get_sg_table = drm_gem_cma_prime_get_sg_table,
.gem_prime_import_sg_table = drm_gem_cma_prime_import_sg_table,
.gem_prime_vmap = drm_gem_cma_prime_vmap,
diff --git a/drivers/gpu/drm/i2c/ch7006_priv.h b/drivers/gpu/drm/i2c/ch7006_priv.h
index b6e091935977..986b04599906 100644
--- a/drivers/gpu/drm/i2c/ch7006_priv.h
+++ b/drivers/gpu/drm/i2c/ch7006_priv.h
@@ -27,7 +27,6 @@
#ifndef __DRM_I2C_CH7006_PRIV_H__
#define __DRM_I2C_CH7006_PRIV_H__
-#include <drm/drmP.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_encoder_slave.h>
#include <drm/drm_probe_helper.h>
diff --git a/drivers/gpu/drm/i2c/sil164_drv.c b/drivers/gpu/drm/i2c/sil164_drv.c
index 878ba8d06ce2..8bcf0d199145 100644
--- a/drivers/gpu/drm/i2c/sil164_drv.c
+++ b/drivers/gpu/drm/i2c/sil164_drv.c
@@ -26,8 +26,9 @@
#include <linux/module.h>
-#include <drm/drmP.h>
+#include <drm/drm_drv.h>
#include <drm/drm_encoder_slave.h>
+#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
#include <drm/i2c/sil164.h>
diff --git a/drivers/gpu/drm/i810/i810_dma.c b/drivers/gpu/drm/i810/i810_dma.c
index 3b378936f575..2a77823b8e9a 100644
--- a/drivers/gpu/drm/i810/i810_dma.c
+++ b/drivers/gpu/drm/i810/i810_dma.c
@@ -30,13 +30,20 @@
*
*/
-#include <drm/drmP.h>
+#include <linux/delay.h>
+#include <linux/mman.h>
+
+#include <drm/drm_agpsupport.h>
+#include <drm/drm_device.h>
+#include <drm/drm_drv.h>
+#include <drm/drm_file.h>
+#include <drm/drm_ioctl.h>
+#include <drm/drm_irq.h>
+#include <drm/drm_pci.h>
+#include <drm/drm_print.h>
#include <drm/i810_drm.h>
+
#include "i810_drv.h"
-#include <linux/interrupt.h> /* For task queue support */
-#include <linux/delay.h>
-#include <linux/slab.h>
-#include <linux/pagemap.h>
#define I810_BUF_FREE 2
#define I810_BUF_CLIENT 1
diff --git a/drivers/gpu/drm/i810/i810_drv.c b/drivers/gpu/drm/i810/i810_drv.c
index c69d5c487f51..5dd26a06ee0e 100644
--- a/drivers/gpu/drm/i810/i810_drv.c
+++ b/drivers/gpu/drm/i810/i810_drv.c
@@ -30,13 +30,15 @@
* Gareth Hughes <gareth@valinux.com>
*/
+#include "i810_drv.h"
#include <linux/module.h>
-#include <drm/drmP.h>
+#include <drm/drm_drv.h>
+#include <drm/drm_file.h>
+#include <drm/drm_pci.h>
+#include <drm/drm_pciids.h>
#include <drm/i810_drm.h>
-#include "i810_drv.h"
-#include <drm/drm_pciids.h>
static struct pci_device_id pciidlist[] = {
i810_PCI_IDS
diff --git a/drivers/gpu/drm/i810/i810_drv.h b/drivers/gpu/drm/i810/i810_drv.h
index c73d2f2da57b..9df3981ffc66 100644
--- a/drivers/gpu/drm/i810/i810_drv.h
+++ b/drivers/gpu/drm/i810/i810_drv.h
@@ -32,7 +32,9 @@
#ifndef _I810_DRV_H_
#define _I810_DRV_H_
+#include <drm/drm_ioctl.h>
#include <drm/drm_legacy.h>
+#include <drm/i810_drm.h>
/* General customization:
*/
diff --git a/drivers/gpu/drm/i915/Kconfig.debug b/drivers/gpu/drm/i915/Kconfig.debug
index 8d922bb4d953..87a38c6aaa41 100644
--- a/drivers/gpu/drm/i915/Kconfig.debug
+++ b/drivers/gpu/drm/i915/Kconfig.debug
@@ -7,6 +7,7 @@ config DRM_I915_WERROR
# We use the dependency on !COMPILE_TEST to not be enabled in
# allmodconfig or allyesconfig configurations
depends on !COMPILE_TEST
+ select HEADER_TEST
default n
help
Add -Werror to the build flags for (and only for) i915.ko.
@@ -94,6 +95,20 @@ config DRM_I915_TRACE_GEM
If in doubt, say "N".
+config DRM_I915_TRACE_GTT
+ bool "Insert extra ftrace output from the GTT internals"
+ depends on DRM_I915_DEBUG_GEM
+ select TRACING
+ default n
+ help
+ Enable additional and verbose debugging output that will spam
+ ordinary tests, but may be vital for post-mortem debugging when
+ used with /proc/sys/kernel/ftrace_dump_on_oops
+
+ Recommended for driver developers only.
+
+ If in doubt, say "N".
+
config DRM_I915_SW_FENCE_DEBUG_OBJECTS
bool "Enable additional driver debugging for fence objects"
depends on DRM_I915
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index 8cace65f50ce..331b19cc8247 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -32,9 +32,9 @@ subdir-ccflags-y += \
$(call as-instr,movntdqa (%eax)$(comma)%xmm0,-DCONFIG_AS_MOVNTDQA)
# Extra header tests
-include $(src)/Makefile.header-test
+header-test-pattern-$(CONFIG_DRM_I915_WERROR) := *.h
-subdir-ccflags-y += -I$(src)
+subdir-ccflags-y += -I$(srctree)/$(src)
# Please keep these build lists sorted!
@@ -73,14 +73,23 @@ gt-y += \
gt/intel_context.o \
gt/intel_engine_cs.o \
gt/intel_engine_pm.o \
+ gt/intel_gt.o \
gt/intel_gt_pm.o \
gt/intel_hangcheck.o \
gt/intel_lrc.o \
+ gt/intel_renderstate.o \
gt/intel_reset.o \
gt/intel_ringbuffer.o \
gt/intel_mocs.o \
gt/intel_sseu.o \
+ gt/intel_timeline.o \
gt/intel_workarounds.o
+# autogenerated null render state
+gt-y += \
+ gt/gen6_renderstate.o \
+ gt/gen7_renderstate.o \
+ gt/gen8_renderstate.o \
+ gt/gen9_renderstate.o
gt-$(CONFIG_DRM_I915_SELFTEST) += \
gt/mock_engine.o
i915-y += $(gt-y)
@@ -120,33 +129,26 @@ i915-y += \
i915_gem_fence_reg.o \
i915_gem_gtt.o \
i915_gem.o \
- i915_gem_render_state.o \
i915_globals.o \
i915_query.o \
i915_request.o \
i915_scheduler.o \
- i915_timeline.o \
i915_trace_points.o \
i915_vma.o \
intel_wopcm.o
# general-purpose microcontroller (GuC) support
-i915-y += intel_uc.o \
- intel_uc_fw.o \
- intel_guc.o \
- intel_guc_ads.o \
- intel_guc_ct.o \
- intel_guc_fw.o \
- intel_guc_log.o \
- intel_guc_submission.o \
- intel_huc.o \
- intel_huc_fw.o
-
-# autogenerated null render state
-i915-y += intel_renderstate_gen6.o \
- intel_renderstate_gen7.o \
- intel_renderstate_gen8.o \
- intel_renderstate_gen9.o
+obj-y += gt/uc/
+i915-y += gt/uc/intel_uc.o \
+ gt/uc/intel_uc_fw.o \
+ gt/uc/intel_guc.o \
+ gt/uc/intel_guc_ads.o \
+ gt/uc/intel_guc_ct.o \
+ gt/uc/intel_guc_fw.o \
+ gt/uc/intel_guc_log.o \
+ gt/uc/intel_guc_submission.o \
+ gt/uc/intel_huc.o \
+ gt/uc/intel_huc_fw.o
# modesetting core code
obj-y += display/
@@ -173,7 +175,8 @@ i915-y += \
display/intel_overlay.o \
display/intel_psr.o \
display/intel_quirks.o \
- display/intel_sprite.o
+ display/intel_sprite.o \
+ display/intel_tc.o
i915-$(CONFIG_ACPI) += \
display/intel_acpi.o \
display/intel_opregion.o
@@ -210,6 +213,25 @@ i915-y += \
display/vlv_dsi.o \
display/vlv_dsi_pll.o
+# perf code
+obj-y += oa/
+i915-y += \
+ oa/i915_oa_hsw.o \
+ oa/i915_oa_bdw.o \
+ oa/i915_oa_chv.o \
+ oa/i915_oa_sklgt2.o \
+ oa/i915_oa_sklgt3.o \
+ oa/i915_oa_sklgt4.o \
+ oa/i915_oa_bxt.o \
+ oa/i915_oa_kblgt2.o \
+ oa/i915_oa_kblgt3.o \
+ oa/i915_oa_glk.o \
+ oa/i915_oa_cflgt2.o \
+ oa/i915_oa_cflgt3.o \
+ oa/i915_oa_cnl.o \
+ oa/i915_oa_icl.o
+i915-y += i915_perf.o
+
# Post-mortem debug and GPU hang state capture
i915-$(CONFIG_DRM_I915_CAPTURE_ERROR) += i915_gpu_error.o
i915-$(CONFIG_DRM_I915_SELFTEST) += \
@@ -224,23 +246,6 @@ i915-$(CONFIG_DRM_I915_SELFTEST) += \
# virtual gpu code
i915-y += i915_vgpu.o
-# perf code
-i915-y += i915_perf.o \
- i915_oa_hsw.o \
- i915_oa_bdw.o \
- i915_oa_chv.o \
- i915_oa_sklgt2.o \
- i915_oa_sklgt3.o \
- i915_oa_sklgt4.o \
- i915_oa_bxt.o \
- i915_oa_kblgt2.o \
- i915_oa_kblgt3.o \
- i915_oa_glk.o \
- i915_oa_cflgt2.o \
- i915_oa_cflgt3.o \
- i915_oa_cnl.o \
- i915_oa_icl.o
-
ifeq ($(CONFIG_DRM_I915_GVT),y)
i915-y += intel_gvt.o
include $(src)/gvt/Makefile
diff --git a/drivers/gpu/drm/i915/Makefile.header-test b/drivers/gpu/drm/i915/Makefile.header-test
deleted file mode 100644
index 7cde0ec34615..000000000000
--- a/drivers/gpu/drm/i915/Makefile.header-test
+++ /dev/null
@@ -1,22 +0,0 @@
-# SPDX-License-Identifier: MIT
-# Copyright © 2019 Intel Corporation
-
-# Test the headers are compilable as standalone units
-header-test-$(CONFIG_DRM_I915_WERROR) := \
- i915_active_types.h \
- i915_debugfs.h \
- i915_drv.h \
- i915_irq.h \
- i915_params.h \
- i915_priolist_types.h \
- i915_reg.h \
- i915_scheduler_types.h \
- i915_timeline_types.h \
- i915_utils.h \
- intel_csr.h \
- intel_drv.h \
- intel_pm.h \
- intel_runtime_pm.h \
- intel_sideband.h \
- intel_uncore.h \
- intel_wakeref.h
diff --git a/drivers/gpu/drm/i915/display/Makefile b/drivers/gpu/drm/i915/display/Makefile
index 1c75b5c9790c..173c305d7866 100644
--- a/drivers/gpu/drm/i915/display/Makefile
+++ b/drivers/gpu/drm/i915/display/Makefile
@@ -1,2 +1,6 @@
+# For building individual subdir files on the command line
+subdir-ccflags-y += -I$(srctree)/$(src)/..
+
# Extra header tests
-include $(src)/Makefile.header-test
+header-test-pattern-$(CONFIG_DRM_I915_WERROR) := *.h
+header-test- := intel_vbt_defs.h
diff --git a/drivers/gpu/drm/i915/display/Makefile.header-test b/drivers/gpu/drm/i915/display/Makefile.header-test
deleted file mode 100644
index fc7d4e5bd2c6..000000000000
--- a/drivers/gpu/drm/i915/display/Makefile.header-test
+++ /dev/null
@@ -1,16 +0,0 @@
-# SPDX-License-Identifier: MIT
-# Copyright © 2019 Intel Corporation
-
-# Test the headers are compilable as standalone units
-header_test := $(notdir $(filter-out %/intel_vbt_defs.h,$(wildcard $(src)/*.h)))
-
-quiet_cmd_header_test = HDRTEST $@
- cmd_header_test = echo "\#include \"$(<F)\"" > $@
-
-header_test_%.c: %.h
- $(call cmd,header_test)
-
-extra-$(CONFIG_DRM_I915_WERROR) += \
- $(foreach h,$(header_test),$(patsubst %.h,header_test_%.o,$(h)))
-
-clean-files += $(foreach h,$(header_test),$(patsubst %.h,header_test_%.c,$(h)))
diff --git a/drivers/gpu/drm/i915/display/icl_dsi.c b/drivers/gpu/drm/i915/display/icl_dsi.c
index 74448e6bf749..a42348be0438 100644
--- a/drivers/gpu/drm/i915/display/icl_dsi.c
+++ b/drivers/gpu/drm/i915/display/icl_dsi.c
@@ -202,63 +202,62 @@ static void dsi_program_swing_and_deemphasis(struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
- enum port port;
+ enum phy phy;
u32 tmp;
int lane;
- for_each_dsi_port(port, intel_dsi->ports) {
-
+ for_each_dsi_phy(phy, intel_dsi->phys) {
/*
* Program voltage swing and pre-emphasis level values as per
* table in BSPEC under DDI buffer programing
*/
- tmp = I915_READ(ICL_PORT_TX_DW5_LN0(port));
+ tmp = I915_READ(ICL_PORT_TX_DW5_LN0(phy));
tmp &= ~(SCALING_MODE_SEL_MASK | RTERM_SELECT_MASK);
tmp |= SCALING_MODE_SEL(0x2);
tmp |= TAP2_DISABLE | TAP3_DISABLE;
tmp |= RTERM_SELECT(0x6);
- I915_WRITE(ICL_PORT_TX_DW5_GRP(port), tmp);
+ I915_WRITE(ICL_PORT_TX_DW5_GRP(phy), tmp);
- tmp = I915_READ(ICL_PORT_TX_DW5_AUX(port));
+ tmp = I915_READ(ICL_PORT_TX_DW5_AUX(phy));
tmp &= ~(SCALING_MODE_SEL_MASK | RTERM_SELECT_MASK);
tmp |= SCALING_MODE_SEL(0x2);
tmp |= TAP2_DISABLE | TAP3_DISABLE;
tmp |= RTERM_SELECT(0x6);
- I915_WRITE(ICL_PORT_TX_DW5_AUX(port), tmp);
+ I915_WRITE(ICL_PORT_TX_DW5_AUX(phy), tmp);
- tmp = I915_READ(ICL_PORT_TX_DW2_LN0(port));
+ tmp = I915_READ(ICL_PORT_TX_DW2_LN0(phy));
tmp &= ~(SWING_SEL_LOWER_MASK | SWING_SEL_UPPER_MASK |
RCOMP_SCALAR_MASK);
tmp |= SWING_SEL_UPPER(0x2);
tmp |= SWING_SEL_LOWER(0x2);
tmp |= RCOMP_SCALAR(0x98);
- I915_WRITE(ICL_PORT_TX_DW2_GRP(port), tmp);
+ I915_WRITE(ICL_PORT_TX_DW2_GRP(phy), tmp);
- tmp = I915_READ(ICL_PORT_TX_DW2_AUX(port));
+ tmp = I915_READ(ICL_PORT_TX_DW2_AUX(phy));
tmp &= ~(SWING_SEL_LOWER_MASK | SWING_SEL_UPPER_MASK |
RCOMP_SCALAR_MASK);
tmp |= SWING_SEL_UPPER(0x2);
tmp |= SWING_SEL_LOWER(0x2);
tmp |= RCOMP_SCALAR(0x98);
- I915_WRITE(ICL_PORT_TX_DW2_AUX(port), tmp);
+ I915_WRITE(ICL_PORT_TX_DW2_AUX(phy), tmp);
- tmp = I915_READ(ICL_PORT_TX_DW4_AUX(port));
+ tmp = I915_READ(ICL_PORT_TX_DW4_AUX(phy));
tmp &= ~(POST_CURSOR_1_MASK | POST_CURSOR_2_MASK |
CURSOR_COEFF_MASK);
tmp |= POST_CURSOR_1(0x0);
tmp |= POST_CURSOR_2(0x0);
tmp |= CURSOR_COEFF(0x3f);
- I915_WRITE(ICL_PORT_TX_DW4_AUX(port), tmp);
+ I915_WRITE(ICL_PORT_TX_DW4_AUX(phy), tmp);
for (lane = 0; lane <= 3; lane++) {
/* Bspec: must not use GRP register for write */
- tmp = I915_READ(ICL_PORT_TX_DW4_LN(lane, port));
+ tmp = I915_READ(ICL_PORT_TX_DW4_LN(lane, phy));
tmp &= ~(POST_CURSOR_1_MASK | POST_CURSOR_2_MASK |
CURSOR_COEFF_MASK);
tmp |= POST_CURSOR_1(0x0);
tmp |= POST_CURSOR_2(0x0);
tmp |= CURSOR_COEFF(0x3f);
- I915_WRITE(ICL_PORT_TX_DW4_LN(lane, port), tmp);
+ I915_WRITE(ICL_PORT_TX_DW4_LN(lane, phy), tmp);
}
}
}
@@ -364,10 +363,10 @@ static void gen11_dsi_power_up_lanes(struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
- enum port port;
+ enum phy phy;
- for_each_dsi_port(port, intel_dsi->ports)
- intel_combo_phy_power_up_lanes(dev_priv, port, true,
+ for_each_dsi_phy(phy, intel_dsi->phys)
+ intel_combo_phy_power_up_lanes(dev_priv, phy, true,
intel_dsi->lane_count, false);
}
@@ -375,34 +374,47 @@ static void gen11_dsi_config_phy_lanes_sequence(struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
- enum port port;
+ enum phy phy;
u32 tmp;
int lane;
/* Step 4b(i) set loadgen select for transmit and aux lanes */
- for_each_dsi_port(port, intel_dsi->ports) {
- tmp = I915_READ(ICL_PORT_TX_DW4_AUX(port));
+ for_each_dsi_phy(phy, intel_dsi->phys) {
+ tmp = I915_READ(ICL_PORT_TX_DW4_AUX(phy));
tmp &= ~LOADGEN_SELECT;
- I915_WRITE(ICL_PORT_TX_DW4_AUX(port), tmp);
+ I915_WRITE(ICL_PORT_TX_DW4_AUX(phy), tmp);
for (lane = 0; lane <= 3; lane++) {
- tmp = I915_READ(ICL_PORT_TX_DW4_LN(lane, port));
+ tmp = I915_READ(ICL_PORT_TX_DW4_LN(lane, phy));
tmp &= ~LOADGEN_SELECT;
if (lane != 2)
tmp |= LOADGEN_SELECT;
- I915_WRITE(ICL_PORT_TX_DW4_LN(lane, port), tmp);
+ I915_WRITE(ICL_PORT_TX_DW4_LN(lane, phy), tmp);
}
}
/* Step 4b(ii) set latency optimization for transmit and aux lanes */
- for_each_dsi_port(port, intel_dsi->ports) {
- tmp = I915_READ(ICL_PORT_TX_DW2_AUX(port));
+ for_each_dsi_phy(phy, intel_dsi->phys) {
+ tmp = I915_READ(ICL_PORT_TX_DW2_AUX(phy));
tmp &= ~FRC_LATENCY_OPTIM_MASK;
tmp |= FRC_LATENCY_OPTIM_VAL(0x5);
- I915_WRITE(ICL_PORT_TX_DW2_AUX(port), tmp);
- tmp = I915_READ(ICL_PORT_TX_DW2_LN0(port));
+ I915_WRITE(ICL_PORT_TX_DW2_AUX(phy), tmp);
+ tmp = I915_READ(ICL_PORT_TX_DW2_LN0(phy));
tmp &= ~FRC_LATENCY_OPTIM_MASK;
tmp |= FRC_LATENCY_OPTIM_VAL(0x5);
- I915_WRITE(ICL_PORT_TX_DW2_GRP(port), tmp);
+ I915_WRITE(ICL_PORT_TX_DW2_GRP(phy), tmp);
+
+ /* For EHL set latency optimization for PCS_DW1 lanes */
+ if (IS_ELKHARTLAKE(dev_priv)) {
+ tmp = I915_READ(ICL_PORT_PCS_DW1_AUX(phy));
+ tmp &= ~LATENCY_OPTIM_MASK;
+ tmp |= LATENCY_OPTIM_VAL(0);
+ I915_WRITE(ICL_PORT_PCS_DW1_AUX(phy), tmp);
+
+ tmp = I915_READ(ICL_PORT_PCS_DW1_LN0(phy));
+ tmp &= ~LATENCY_OPTIM_MASK;
+ tmp |= LATENCY_OPTIM_VAL(0x1);
+ I915_WRITE(ICL_PORT_PCS_DW1_GRP(phy), tmp);
+ }
}
}
@@ -412,16 +424,16 @@ static void gen11_dsi_voltage_swing_program_seq(struct intel_encoder *encoder)
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
u32 tmp;
- enum port port;
+ enum phy phy;
/* clear common keeper enable bit */
- for_each_dsi_port(port, intel_dsi->ports) {
- tmp = I915_READ(ICL_PORT_PCS_DW1_LN0(port));
+ for_each_dsi_phy(phy, intel_dsi->phys) {
+ tmp = I915_READ(ICL_PORT_PCS_DW1_LN0(phy));
tmp &= ~COMMON_KEEPER_EN;
- I915_WRITE(ICL_PORT_PCS_DW1_GRP(port), tmp);
- tmp = I915_READ(ICL_PORT_PCS_DW1_AUX(port));
+ I915_WRITE(ICL_PORT_PCS_DW1_GRP(phy), tmp);
+ tmp = I915_READ(ICL_PORT_PCS_DW1_AUX(phy));
tmp &= ~COMMON_KEEPER_EN;
- I915_WRITE(ICL_PORT_PCS_DW1_AUX(port), tmp);
+ I915_WRITE(ICL_PORT_PCS_DW1_AUX(phy), tmp);
}
/*
@@ -429,33 +441,33 @@ static void gen11_dsi_voltage_swing_program_seq(struct intel_encoder *encoder)
* Note: loadgen select program is done
* as part of lane phy sequence configuration
*/
- for_each_dsi_port(port, intel_dsi->ports) {
- tmp = I915_READ(ICL_PORT_CL_DW5(port));
+ for_each_dsi_phy(phy, intel_dsi->phys) {
+ tmp = I915_READ(ICL_PORT_CL_DW5(phy));
tmp |= SUS_CLOCK_CONFIG;
- I915_WRITE(ICL_PORT_CL_DW5(port), tmp);
+ I915_WRITE(ICL_PORT_CL_DW5(phy), tmp);
}
/* Clear training enable to change swing values */
- for_each_dsi_port(port, intel_dsi->ports) {
- tmp = I915_READ(ICL_PORT_TX_DW5_LN0(port));
+ for_each_dsi_phy(phy, intel_dsi->phys) {
+ tmp = I915_READ(ICL_PORT_TX_DW5_LN0(phy));
tmp &= ~TX_TRAINING_EN;
- I915_WRITE(ICL_PORT_TX_DW5_GRP(port), tmp);
- tmp = I915_READ(ICL_PORT_TX_DW5_AUX(port));
+ I915_WRITE(ICL_PORT_TX_DW5_GRP(phy), tmp);
+ tmp = I915_READ(ICL_PORT_TX_DW5_AUX(phy));
tmp &= ~TX_TRAINING_EN;
- I915_WRITE(ICL_PORT_TX_DW5_AUX(port), tmp);
+ I915_WRITE(ICL_PORT_TX_DW5_AUX(phy), tmp);
}
/* Program swing and de-emphasis */
dsi_program_swing_and_deemphasis(encoder);
/* Set training enable to trigger update */
- for_each_dsi_port(port, intel_dsi->ports) {
- tmp = I915_READ(ICL_PORT_TX_DW5_LN0(port));
+ for_each_dsi_phy(phy, intel_dsi->phys) {
+ tmp = I915_READ(ICL_PORT_TX_DW5_LN0(phy));
tmp |= TX_TRAINING_EN;
- I915_WRITE(ICL_PORT_TX_DW5_GRP(port), tmp);
- tmp = I915_READ(ICL_PORT_TX_DW5_AUX(port));
+ I915_WRITE(ICL_PORT_TX_DW5_GRP(phy), tmp);
+ tmp = I915_READ(ICL_PORT_TX_DW5_AUX(phy));
tmp |= TX_TRAINING_EN;
- I915_WRITE(ICL_PORT_TX_DW5_AUX(port), tmp);
+ I915_WRITE(ICL_PORT_TX_DW5_AUX(phy), tmp);
}
}
@@ -484,6 +496,7 @@ static void gen11_dsi_setup_dphy_timings(struct intel_encoder *encoder)
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
u32 tmp;
enum port port;
+ enum phy phy;
/* Program T-INIT master registers */
for_each_dsi_port(port, intel_dsi->ports) {
@@ -531,6 +544,14 @@ static void gen11_dsi_setup_dphy_timings(struct intel_encoder *encoder)
I915_WRITE(DSI_TA_TIMING_PARAM(port), tmp);
}
}
+
+ if (IS_ELKHARTLAKE(dev_priv)) {
+ for_each_dsi_phy(phy, intel_dsi->phys) {
+ tmp = I915_READ(ICL_DPHY_CHKN(phy));
+ tmp |= ICL_DPHY_CHKN_AFE_OVER_PPI_STRAP;
+ I915_WRITE(ICL_DPHY_CHKN(phy), tmp);
+ }
+ }
}
static void gen11_dsi_gate_clocks(struct intel_encoder *encoder)
@@ -538,15 +559,14 @@ static void gen11_dsi_gate_clocks(struct intel_encoder *encoder)
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
u32 tmp;
- enum port port;
+ enum phy phy;
mutex_lock(&dev_priv->dpll_lock);
- tmp = I915_READ(DPCLKA_CFGCR0_ICL);
- for_each_dsi_port(port, intel_dsi->ports) {
- tmp |= DPCLKA_CFGCR0_DDI_CLK_OFF(port);
- }
+ tmp = I915_READ(ICL_DPCLKA_CFGCR0);
+ for_each_dsi_phy(phy, intel_dsi->phys)
+ tmp |= ICL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy);
- I915_WRITE(DPCLKA_CFGCR0_ICL, tmp);
+ I915_WRITE(ICL_DPCLKA_CFGCR0, tmp);
mutex_unlock(&dev_priv->dpll_lock);
}
@@ -555,15 +575,14 @@ static void gen11_dsi_ungate_clocks(struct intel_encoder *encoder)
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
u32 tmp;
- enum port port;
+ enum phy phy;
mutex_lock(&dev_priv->dpll_lock);
- tmp = I915_READ(DPCLKA_CFGCR0_ICL);
- for_each_dsi_port(port, intel_dsi->ports) {
- tmp &= ~DPCLKA_CFGCR0_DDI_CLK_OFF(port);
- }
+ tmp = I915_READ(ICL_DPCLKA_CFGCR0);
+ for_each_dsi_phy(phy, intel_dsi->phys)
+ tmp &= ~ICL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy);
- I915_WRITE(DPCLKA_CFGCR0_ICL, tmp);
+ I915_WRITE(ICL_DPCLKA_CFGCR0, tmp);
mutex_unlock(&dev_priv->dpll_lock);
}
@@ -573,24 +592,24 @@ static void gen11_dsi_map_pll(struct intel_encoder *encoder,
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
struct intel_shared_dpll *pll = crtc_state->shared_dpll;
- enum port port;
+ enum phy phy;
u32 val;
mutex_lock(&dev_priv->dpll_lock);
- val = I915_READ(DPCLKA_CFGCR0_ICL);
- for_each_dsi_port(port, intel_dsi->ports) {
- val &= ~DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(port);
- val |= DPCLKA_CFGCR0_DDI_CLK_SEL(pll->info->id, port);
+ val = I915_READ(ICL_DPCLKA_CFGCR0);
+ for_each_dsi_phy(phy, intel_dsi->phys) {
+ val &= ~ICL_DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(phy);
+ val |= ICL_DPCLKA_CFGCR0_DDI_CLK_SEL(pll->info->id, phy);
}
- I915_WRITE(DPCLKA_CFGCR0_ICL, val);
+ I915_WRITE(ICL_DPCLKA_CFGCR0, val);
- for_each_dsi_port(port, intel_dsi->ports) {
- val &= ~DPCLKA_CFGCR0_DDI_CLK_OFF(port);
+ for_each_dsi_phy(phy, intel_dsi->phys) {
+ val &= ~ICL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy);
}
- I915_WRITE(DPCLKA_CFGCR0_ICL, val);
+ I915_WRITE(ICL_DPCLKA_CFGCR0, val);
- POSTING_READ(DPCLKA_CFGCR0_ICL);
+ POSTING_READ(ICL_DPCLKA_CFGCR0);
mutex_unlock(&dev_priv->dpll_lock);
}
@@ -744,7 +763,7 @@ gen11_dsi_set_transcoder_timings(struct intel_encoder *encoder,
enum transcoder dsi_trans;
/* horizontal timings */
u16 htotal, hactive, hsync_start, hsync_end, hsync_size;
- u16 hfront_porch, hback_porch;
+ u16 hback_porch;
/* vertical timings */
u16 vtotal, vactive, vsync_start, vsync_end, vsync_shift;
@@ -753,8 +772,6 @@ gen11_dsi_set_transcoder_timings(struct intel_encoder *encoder,
hsync_start = adjusted_mode->crtc_hsync_start;
hsync_end = adjusted_mode->crtc_hsync_end;
hsync_size = hsync_end - hsync_start;
- hfront_porch = (adjusted_mode->crtc_hsync_start -
- adjusted_mode->crtc_hdisplay);
hback_porch = (adjusted_mode->crtc_htotal -
adjusted_mode->crtc_hsync_end);
vactive = adjusted_mode->crtc_vdisplay;
@@ -1487,6 +1504,26 @@ static void icl_dphy_param_init(struct intel_dsi *intel_dsi)
intel_dsi_log_params(intel_dsi);
}
+static void icl_dsi_add_properties(struct intel_connector *connector)
+{
+ u32 allowed_scalers;
+
+ allowed_scalers = BIT(DRM_MODE_SCALE_ASPECT) |
+ BIT(DRM_MODE_SCALE_FULLSCREEN) |
+ BIT(DRM_MODE_SCALE_CENTER);
+
+ drm_connector_attach_scaling_mode_property(&connector->base,
+ allowed_scalers);
+
+ connector->base.state->scaling_mode = DRM_MODE_SCALE_ASPECT;
+
+ connector->base.display_info.panel_orientation =
+ intel_dsi_get_panel_orientation(connector);
+ drm_connector_init_panel_orientation_property(&connector->base,
+ connector->panel.fixed_mode->hdisplay,
+ connector->panel.fixed_mode->vdisplay);
+}
+
void icl_dsi_init(struct drm_i915_private *dev_priv)
{
struct drm_device *dev = &dev_priv->drm;
@@ -1580,6 +1617,8 @@ void icl_dsi_init(struct drm_i915_private *dev_priv)
}
icl_dphy_param_init(intel_dsi);
+
+ icl_dsi_add_properties(intel_connector);
return;
err:
diff --git a/drivers/gpu/drm/i915/display/intel_atomic_plane.c b/drivers/gpu/drm/i915/display/intel_atomic_plane.c
index 30bd4e76fff9..ab411d5e093c 100644
--- a/drivers/gpu/drm/i915/display/intel_atomic_plane.c
+++ b/drivers/gpu/drm/i915/display/intel_atomic_plane.c
@@ -176,33 +176,49 @@ int intel_plane_atomic_check_with_state(const struct intel_crtc_state *old_crtc_
new_crtc_state->data_rate[plane->id] =
intel_plane_data_rate(new_crtc_state, new_plane_state);
- return intel_plane_atomic_calc_changes(old_crtc_state,
- &new_crtc_state->base,
- old_plane_state,
- &new_plane_state->base);
+ return intel_plane_atomic_calc_changes(old_crtc_state, new_crtc_state,
+ old_plane_state, new_plane_state);
}
-static int intel_plane_atomic_check(struct drm_plane *plane,
- struct drm_plane_state *new_plane_state)
+static struct intel_crtc *
+get_crtc_from_states(const struct intel_plane_state *old_plane_state,
+ const struct intel_plane_state *new_plane_state)
{
- struct drm_atomic_state *state = new_plane_state->state;
- const struct drm_plane_state *old_plane_state =
- drm_atomic_get_old_plane_state(state, plane);
- struct drm_crtc *crtc = new_plane_state->crtc ?: old_plane_state->crtc;
- const struct drm_crtc_state *old_crtc_state;
- struct drm_crtc_state *new_crtc_state;
-
- new_plane_state->visible = false;
+ if (new_plane_state->base.crtc)
+ return to_intel_crtc(new_plane_state->base.crtc);
+
+ if (old_plane_state->base.crtc)
+ return to_intel_crtc(old_plane_state->base.crtc);
+
+ return NULL;
+}
+
+static int intel_plane_atomic_check(struct drm_plane *_plane,
+ struct drm_plane_state *_new_plane_state)
+{
+ struct intel_plane *plane = to_intel_plane(_plane);
+ struct intel_atomic_state *state =
+ to_intel_atomic_state(_new_plane_state->state);
+ struct intel_plane_state *new_plane_state =
+ to_intel_plane_state(_new_plane_state);
+ const struct intel_plane_state *old_plane_state =
+ intel_atomic_get_old_plane_state(state, plane);
+ struct intel_crtc *crtc =
+ get_crtc_from_states(old_plane_state, new_plane_state);
+ const struct intel_crtc_state *old_crtc_state;
+ struct intel_crtc_state *new_crtc_state;
+
+ new_plane_state->base.visible = false;
if (!crtc)
return 0;
- old_crtc_state = drm_atomic_get_old_crtc_state(state, crtc);
- new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
+ old_crtc_state = intel_atomic_get_old_crtc_state(state, crtc);
+ new_crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
- return intel_plane_atomic_check_with_state(to_intel_crtc_state(old_crtc_state),
- to_intel_crtc_state(new_crtc_state),
- to_intel_plane_state(old_plane_state),
- to_intel_plane_state(new_plane_state));
+ return intel_plane_atomic_check_with_state(old_crtc_state,
+ new_crtc_state,
+ old_plane_state,
+ new_plane_state);
}
static struct intel_plane *
diff --git a/drivers/gpu/drm/i915/display/intel_atomic_plane.h b/drivers/gpu/drm/i915/display/intel_atomic_plane.h
index 1437a8797e10..cb7ef4f9eafd 100644
--- a/drivers/gpu/drm/i915/display/intel_atomic_plane.h
+++ b/drivers/gpu/drm/i915/display/intel_atomic_plane.h
@@ -8,7 +8,6 @@
#include <linux/types.h>
-struct drm_crtc_state;
struct drm_plane;
struct drm_property;
struct intel_atomic_state;
@@ -43,8 +42,8 @@ int intel_plane_atomic_check_with_state(const struct intel_crtc_state *old_crtc_
const struct intel_plane_state *old_plane_state,
struct intel_plane_state *intel_state);
int intel_plane_atomic_calc_changes(const struct intel_crtc_state *old_crtc_state,
- struct drm_crtc_state *crtc_state,
+ struct intel_crtc_state *crtc_state,
const struct intel_plane_state *old_plane_state,
- struct drm_plane_state *plane_state);
+ struct intel_plane_state *plane_state);
#endif /* __INTEL_ATOMIC_PLANE_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_audio.c b/drivers/gpu/drm/i915/display/intel_audio.c
index 840daff12246..c8fd35a7ca42 100644
--- a/drivers/gpu/drm/i915/display/intel_audio.c
+++ b/drivers/gpu/drm/i915/display/intel_audio.c
@@ -72,6 +72,13 @@ struct dp_aud_n_m {
u16 n;
};
+struct hdmi_aud_ncts {
+ int sample_rate;
+ int clock;
+ int n;
+ int cts;
+};
+
/* Values according to DP 1.4 Table 2-104 */
static const struct dp_aud_n_m dp_aud_n_m[] = {
{ 32000, LC_162M, 1024, 10125 },
@@ -148,12 +155,7 @@ static const struct {
#define TMDS_594M 594000
#define TMDS_593M 593407
-static const struct {
- int sample_rate;
- int clock;
- int n;
- int cts;
-} hdmi_aud_ncts[] = {
+static const struct hdmi_aud_ncts hdmi_aud_ncts_24bpp[] = {
{ 32000, TMDS_296M, 5824, 421875 },
{ 32000, TMDS_297M, 3072, 222750 },
{ 32000, TMDS_593M, 5824, 843750 },
@@ -184,6 +186,49 @@ static const struct {
{ 192000, TMDS_594M, 24576, 594000 },
};
+/* Appendix C - N & CTS values for deep color from HDMI 2.0 spec*/
+/* HDMI N/CTS table for 10 bit deep color(30 bpp)*/
+#define TMDS_371M 371250
+#define TMDS_370M 370878
+
+static const struct hdmi_aud_ncts hdmi_aud_ncts_30bpp[] = {
+ { 32000, TMDS_370M, 5824, 527344 },
+ { 32000, TMDS_371M, 6144, 556875 },
+ { 44100, TMDS_370M, 8918, 585938 },
+ { 44100, TMDS_371M, 4704, 309375 },
+ { 88200, TMDS_370M, 17836, 585938 },
+ { 88200, TMDS_371M, 9408, 309375 },
+ { 176400, TMDS_370M, 35672, 585938 },
+ { 176400, TMDS_371M, 18816, 309375 },
+ { 48000, TMDS_370M, 11648, 703125 },
+ { 48000, TMDS_371M, 5120, 309375 },
+ { 96000, TMDS_370M, 23296, 703125 },
+ { 96000, TMDS_371M, 10240, 309375 },
+ { 192000, TMDS_370M, 46592, 703125 },
+ { 192000, TMDS_371M, 20480, 309375 },
+};
+
+/* HDMI N/CTS table for 12 bit deep color(36 bpp)*/
+#define TMDS_445_5M 445500
+#define TMDS_445M 445054
+
+static const struct hdmi_aud_ncts hdmi_aud_ncts_36bpp[] = {
+ { 32000, TMDS_445M, 5824, 632813 },
+ { 32000, TMDS_445_5M, 4096, 445500 },
+ { 44100, TMDS_445M, 8918, 703125 },
+ { 44100, TMDS_445_5M, 4704, 371250 },
+ { 88200, TMDS_445M, 17836, 703125 },
+ { 88200, TMDS_445_5M, 9408, 371250 },
+ { 176400, TMDS_445M, 35672, 703125 },
+ { 176400, TMDS_445_5M, 18816, 371250 },
+ { 48000, TMDS_445M, 5824, 421875 },
+ { 48000, TMDS_445_5M, 5120, 371250 },
+ { 96000, TMDS_445M, 11648, 421875 },
+ { 96000, TMDS_445_5M, 10240, 371250 },
+ { 192000, TMDS_445M, 23296, 421875 },
+ { 192000, TMDS_445_5M, 20480, 371250 },
+};
+
/* get AUD_CONFIG_PIXEL_CLOCK_HDMI_* value for mode */
static u32 audio_config_hdmi_pixel_clock(const struct intel_crtc_state *crtc_state)
{
@@ -212,14 +257,24 @@ static u32 audio_config_hdmi_pixel_clock(const struct intel_crtc_state *crtc_sta
static int audio_config_hdmi_get_n(const struct intel_crtc_state *crtc_state,
int rate)
{
- const struct drm_display_mode *adjusted_mode =
- &crtc_state->base.adjusted_mode;
- int i;
+ const struct hdmi_aud_ncts *hdmi_ncts_table;
+ int i, size;
+
+ if (crtc_state->pipe_bpp == 36) {
+ hdmi_ncts_table = hdmi_aud_ncts_36bpp;
+ size = ARRAY_SIZE(hdmi_aud_ncts_36bpp);
+ } else if (crtc_state->pipe_bpp == 30) {
+ hdmi_ncts_table = hdmi_aud_ncts_30bpp;
+ size = ARRAY_SIZE(hdmi_aud_ncts_30bpp);
+ } else {
+ hdmi_ncts_table = hdmi_aud_ncts_24bpp;
+ size = ARRAY_SIZE(hdmi_aud_ncts_24bpp);
+ }
- for (i = 0; i < ARRAY_SIZE(hdmi_aud_ncts); i++) {
- if (rate == hdmi_aud_ncts[i].sample_rate &&
- adjusted_mode->crtc_clock == hdmi_aud_ncts[i].clock) {
- return hdmi_aud_ncts[i].n;
+ for (i = 0; i < size; i++) {
+ if (rate == hdmi_ncts_table[i].sample_rate &&
+ crtc_state->port_clock == hdmi_ncts_table[i].clock) {
+ return hdmi_ncts_table[i].n;
}
}
return 0;
diff --git a/drivers/gpu/drm/i915/display/intel_bios.c b/drivers/gpu/drm/i915/display/intel_bios.c
index 3ef4e9f573cf..b416b394b641 100644
--- a/drivers/gpu/drm/i915/display/intel_bios.c
+++ b/drivers/gpu/drm/i915/display/intel_bios.c
@@ -28,6 +28,7 @@
#include <drm/drm_dp_helper.h>
#include <drm/i915_drm.h>
+#include "display/intel_display.h"
#include "display/intel_gmbus.h"
#include "i915_drv.h"
@@ -1354,12 +1355,27 @@ static const u8 mcc_ddc_pin_map[] = {
[MCC_DDC_BUS_DDI_C] = GMBUS_PIN_9_TC1_ICP,
};
+static const u8 tgp_ddc_pin_map[] = {
+ [ICL_DDC_BUS_DDI_A] = GMBUS_PIN_1_BXT,
+ [ICL_DDC_BUS_DDI_B] = GMBUS_PIN_2_BXT,
+ [TGL_DDC_BUS_DDI_C] = GMBUS_PIN_3_BXT,
+ [ICL_DDC_BUS_PORT_1] = GMBUS_PIN_9_TC1_ICP,
+ [ICL_DDC_BUS_PORT_2] = GMBUS_PIN_10_TC2_ICP,
+ [ICL_DDC_BUS_PORT_3] = GMBUS_PIN_11_TC3_ICP,
+ [ICL_DDC_BUS_PORT_4] = GMBUS_PIN_12_TC4_ICP,
+ [TGL_DDC_BUS_PORT_5] = GMBUS_PIN_13_TC5_TGP,
+ [TGL_DDC_BUS_PORT_6] = GMBUS_PIN_14_TC6_TGP,
+};
+
static u8 map_ddc_pin(struct drm_i915_private *dev_priv, u8 vbt_pin)
{
const u8 *ddc_pin_map;
int n_entries;
- if (HAS_PCH_MCC(dev_priv)) {
+ if (HAS_PCH_TGP(dev_priv)) {
+ ddc_pin_map = tgp_ddc_pin_map;
+ n_entries = ARRAY_SIZE(tgp_ddc_pin_map);
+ } else if (HAS_PCH_MCC(dev_priv)) {
ddc_pin_map = mcc_ddc_pin_map;
n_entries = ARRAY_SIZE(mcc_ddc_pin_map);
} else if (HAS_PCH_ICP(dev_priv)) {
@@ -1668,6 +1684,9 @@ parse_general_definitions(struct drm_i915_private *dev_priv,
if (!child->device_type)
continue;
+ DRM_DEBUG_KMS("Found VBT child device with type 0x%x\n",
+ child->device_type);
+
/*
* Copy as much as we know (sizeof) and is available
* (child_dev_size) of the child device. Accessing the data must
@@ -1730,12 +1749,13 @@ init_vbt_missing_defaults(struct drm_i915_private *dev_priv)
for (port = PORT_A; port < I915_MAX_PORTS; port++) {
struct ddi_vbt_port_info *info =
&dev_priv->vbt.ddi_port_info[port];
+ enum phy phy = intel_port_to_phy(dev_priv, port);
/*
* VBT has the TypeC mode (native,TBT/USB) and we don't want
* to detect it.
*/
- if (intel_port_is_tc(dev_priv, port))
+ if (intel_phy_is_tc(dev_priv, phy))
continue;
info->supports_dvi = (port != PORT_A && port != PORT_E);
@@ -1888,10 +1908,10 @@ out:
}
/**
- * intel_bios_cleanup - Free any resources allocated by intel_bios_init()
+ * intel_bios_driver_remove - Free any resources allocated by intel_bios_init()
* @dev_priv: i915 device instance
*/
-void intel_bios_cleanup(struct drm_i915_private *dev_priv)
+void intel_bios_driver_remove(struct drm_i915_private *dev_priv)
{
kfree(dev_priv->vbt.child_dev);
dev_priv->vbt.child_dev = NULL;
diff --git a/drivers/gpu/drm/i915/display/intel_bios.h b/drivers/gpu/drm/i915/display/intel_bios.h
index 4e42cfaf61a7..4969189e620f 100644
--- a/drivers/gpu/drm/i915/display/intel_bios.h
+++ b/drivers/gpu/drm/i915/display/intel_bios.h
@@ -42,6 +42,7 @@ enum intel_backlight_type {
INTEL_BACKLIGHT_DISPLAY_DDI,
INTEL_BACKLIGHT_DSI_DCS,
INTEL_BACKLIGHT_PANEL_DRIVER_INTERFACE,
+ INTEL_BACKLIGHT_VESA_EDP_AUX_INTERFACE,
};
struct edp_power_seq {
@@ -227,7 +228,7 @@ struct mipi_pps_data {
} __packed;
void intel_bios_init(struct drm_i915_private *dev_priv);
-void intel_bios_cleanup(struct drm_i915_private *dev_priv);
+void intel_bios_driver_remove(struct drm_i915_private *dev_priv);
bool intel_bios_is_valid_vbt(const void *buf, size_t size);
bool intel_bios_is_tv_present(struct drm_i915_private *dev_priv);
bool intel_bios_is_lvds_present(struct drm_i915_private *dev_priv, u8 *i2c_pin);
diff --git a/drivers/gpu/drm/i915/display/intel_bw.c b/drivers/gpu/drm/i915/display/intel_bw.c
index 7b908e10d32e..ee52c5b4643b 100644
--- a/drivers/gpu/drm/i915/display/intel_bw.c
+++ b/drivers/gpu/drm/i915/display/intel_bw.c
@@ -65,7 +65,7 @@ static int icl_pcode_read_qgv_point_info(struct drm_i915_private *dev_priv,
struct intel_qgv_point *sp,
int point)
{
- u32 val = 0, val2;
+ u32 val = 0, val2 = 0;
int ret;
ret = sandybridge_pcode_read(dev_priv,
diff --git a/drivers/gpu/drm/i915/display/intel_cdclk.c b/drivers/gpu/drm/i915/display/intel_cdclk.c
index 0d19bbd08122..93b0d190c184 100644
--- a/drivers/gpu/drm/i915/display/intel_cdclk.c
+++ b/drivers/gpu/drm/i915/display/intel_cdclk.c
@@ -545,10 +545,10 @@ static void vlv_set_cdclk(struct drm_i915_private *dev_priv,
/* There are cases where we can end up here with power domains
* off and a CDCLK frequency other than the minimum, like when
* issuing a modeset without actually changing any display after
- * a system suspend. So grab the PIPE-A domain, which covers
+ * a system suspend. So grab the display core domain, which covers
* the HW blocks needed for the following programming.
*/
- wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_PIPE_A);
+ wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_DISPLAY_CORE);
vlv_iosf_sb_get(dev_priv,
BIT(VLV_IOSF_SB_CCK) |
@@ -606,7 +606,7 @@ static void vlv_set_cdclk(struct drm_i915_private *dev_priv,
vlv_program_pfi_credits(dev_priv);
- intel_display_power_put(dev_priv, POWER_DOMAIN_PIPE_A, wakeref);
+ intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
}
static void chv_set_cdclk(struct drm_i915_private *dev_priv,
@@ -631,10 +631,10 @@ static void chv_set_cdclk(struct drm_i915_private *dev_priv,
/* There are cases where we can end up here with power domains
* off and a CDCLK frequency other than the minimum, like when
* issuing a modeset without actually changing any display after
- * a system suspend. So grab the PIPE-A domain, which covers
+ * a system suspend. So grab the display core domain, which covers
* the HW blocks needed for the following programming.
*/
- wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_PIPE_A);
+ wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_DISPLAY_CORE);
vlv_punit_get(dev_priv);
val = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM);
@@ -653,7 +653,7 @@ static void chv_set_cdclk(struct drm_i915_private *dev_priv,
vlv_program_pfi_credits(dev_priv);
- intel_display_power_put(dev_priv, POWER_DOMAIN_PIPE_A, wakeref);
+ intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
}
static int bdw_calc_cdclk(int min_cdclk)
@@ -1756,9 +1756,10 @@ sanitize:
static int icl_calc_cdclk(int min_cdclk, unsigned int ref)
{
- int ranges_24[] = { 312000, 552000, 648000 };
- int ranges_19_38[] = { 307200, 556800, 652800 };
- int *ranges;
+ static const int ranges_24[] = { 180000, 192000, 312000, 552000, 648000 };
+ static const int ranges_19_38[] = { 172800, 192000, 307200, 556800, 652800 };
+ const int *ranges;
+ int len, i;
switch (ref) {
default:
@@ -1766,19 +1767,22 @@ static int icl_calc_cdclk(int min_cdclk, unsigned int ref)
/* fall through */
case 24000:
ranges = ranges_24;
+ len = ARRAY_SIZE(ranges_24);
break;
case 19200:
case 38400:
ranges = ranges_19_38;
+ len = ARRAY_SIZE(ranges_19_38);
break;
}
- if (min_cdclk > ranges[1])
- return ranges[2];
- else if (min_cdclk > ranges[0])
- return ranges[1];
- else
- return ranges[0];
+ for (i = 0; i < len; i++) {
+ if (min_cdclk <= ranges[i])
+ return ranges[i];
+ }
+
+ WARN_ON(min_cdclk > ranges[len - 1]);
+ return ranges[len - 1];
}
static int icl_calc_cdclk_pll_vco(struct drm_i915_private *dev_priv, int cdclk)
@@ -1792,16 +1796,24 @@ static int icl_calc_cdclk_pll_vco(struct drm_i915_private *dev_priv, int cdclk)
default:
MISSING_CASE(cdclk);
/* fall through */
+ case 172800:
case 307200:
case 556800:
case 652800:
WARN_ON(dev_priv->cdclk.hw.ref != 19200 &&
dev_priv->cdclk.hw.ref != 38400);
break;
+ case 180000:
case 312000:
case 552000:
case 648000:
WARN_ON(dev_priv->cdclk.hw.ref != 24000);
+ break;
+ case 192000:
+ WARN_ON(dev_priv->cdclk.hw.ref != 19200 &&
+ dev_priv->cdclk.hw.ref != 38400 &&
+ dev_priv->cdclk.hw.ref != 24000);
+ break;
}
ratio = cdclk / (dev_priv->cdclk.hw.ref / 2);
@@ -1854,14 +1866,23 @@ static void icl_set_cdclk(struct drm_i915_private *dev_priv,
dev_priv->cdclk.hw.voltage_level = cdclk_state->voltage_level;
}
-static u8 icl_calc_voltage_level(int cdclk)
+static u8 icl_calc_voltage_level(struct drm_i915_private *dev_priv, int cdclk)
{
- if (cdclk > 556800)
- return 2;
- else if (cdclk > 312000)
- return 1;
- else
- return 0;
+ if (IS_ELKHARTLAKE(dev_priv)) {
+ if (cdclk > 312000)
+ return 2;
+ else if (cdclk > 180000)
+ return 1;
+ else
+ return 0;
+ } else {
+ if (cdclk > 556800)
+ return 2;
+ else if (cdclk > 312000)
+ return 1;
+ else
+ return 0;
+ }
}
static void icl_get_cdclk(struct drm_i915_private *dev_priv,
@@ -1912,7 +1933,7 @@ out:
* at least what the CDCLK frequency requires.
*/
cdclk_state->voltage_level =
- icl_calc_voltage_level(cdclk_state->cdclk);
+ icl_calc_voltage_level(dev_priv, cdclk_state->cdclk);
}
static void icl_init_cdclk(struct drm_i915_private *dev_priv)
@@ -1947,7 +1968,8 @@ sanitize:
sanitized_state.vco = icl_calc_cdclk_pll_vco(dev_priv,
sanitized_state.cdclk);
sanitized_state.voltage_level =
- icl_calc_voltage_level(sanitized_state.cdclk);
+ icl_calc_voltage_level(dev_priv,
+ sanitized_state.cdclk);
icl_set_cdclk(dev_priv, &sanitized_state, INVALID_PIPE);
}
@@ -1958,7 +1980,8 @@ static void icl_uninit_cdclk(struct drm_i915_private *dev_priv)
cdclk_state.cdclk = cdclk_state.bypass;
cdclk_state.vco = 0;
- cdclk_state.voltage_level = icl_calc_voltage_level(cdclk_state.cdclk);
+ cdclk_state.voltage_level = icl_calc_voltage_level(dev_priv,
+ cdclk_state.cdclk);
icl_set_cdclk(dev_priv, &cdclk_state, INVALID_PIPE);
}
@@ -2560,7 +2583,7 @@ static int icl_modeset_calc_cdclk(struct intel_atomic_state *state)
state->cdclk.logical.vco = vco;
state->cdclk.logical.cdclk = cdclk;
state->cdclk.logical.voltage_level =
- max(icl_calc_voltage_level(cdclk),
+ max(icl_calc_voltage_level(dev_priv, cdclk),
cnl_compute_min_voltage_level(state));
if (!state->active_crtcs) {
@@ -2570,7 +2593,7 @@ static int icl_modeset_calc_cdclk(struct intel_atomic_state *state)
state->cdclk.actual.vco = vco;
state->cdclk.actual.cdclk = cdclk;
state->cdclk.actual.voltage_level =
- icl_calc_voltage_level(cdclk);
+ icl_calc_voltage_level(dev_priv, cdclk);
} else {
state->cdclk.actual = state->cdclk.logical;
}
@@ -2605,7 +2628,12 @@ static int intel_compute_max_dotclk(struct drm_i915_private *dev_priv)
*/
void intel_update_max_cdclk(struct drm_i915_private *dev_priv)
{
- if (INTEL_GEN(dev_priv) >= 11) {
+ if (IS_ELKHARTLAKE(dev_priv)) {
+ if (dev_priv->cdclk.hw.ref == 24000)
+ dev_priv->max_cdclk_freq = 552000;
+ else
+ dev_priv->max_cdclk_freq = 556800;
+ } else if (INTEL_GEN(dev_priv) >= 11) {
if (dev_priv->cdclk.hw.ref == 24000)
dev_priv->max_cdclk_freq = 648000;
else
diff --git a/drivers/gpu/drm/i915/display/intel_combo_phy.c b/drivers/gpu/drm/i915/display/intel_combo_phy.c
index 841708da5a56..ac8218a040ab 100644
--- a/drivers/gpu/drm/i915/display/intel_combo_phy.c
+++ b/drivers/gpu/drm/i915/display/intel_combo_phy.c
@@ -6,13 +6,13 @@
#include "intel_combo_phy.h"
#include "intel_drv.h"
-#define for_each_combo_port(__dev_priv, __port) \
- for ((__port) = PORT_A; (__port) < I915_MAX_PORTS; (__port)++) \
- for_each_if(intel_port_is_combophy(__dev_priv, __port))
+#define for_each_combo_phy(__dev_priv, __phy) \
+ for ((__phy) = PHY_A; (__phy) < I915_MAX_PHYS; (__phy)++) \
+ for_each_if(intel_phy_is_combo(__dev_priv, __phy))
-#define for_each_combo_port_reverse(__dev_priv, __port) \
- for ((__port) = I915_MAX_PORTS; (__port)-- > PORT_A;) \
- for_each_if(intel_port_is_combophy(__dev_priv, __port))
+#define for_each_combo_phy_reverse(__dev_priv, __phy) \
+ for ((__phy) = I915_MAX_PHYS; (__phy)-- > PHY_A;) \
+ for_each_if(intel_phy_is_combo(__dev_priv, __phy))
enum {
PROCMON_0_85V_DOT_0,
@@ -38,18 +38,17 @@ static const struct cnl_procmon {
};
/*
- * CNL has just one set of registers, while ICL has two sets: one for port A and
- * the other for port B. The CNL registers are equivalent to the ICL port A
- * registers, that's why we call the ICL macros even though the function has CNL
- * on its name.
+ * CNL has just one set of registers, while gen11 has a set for each combo PHY.
+ * The CNL registers are equivalent to the gen11 PHY A registers, that's why we
+ * call the ICL macros even though the function has CNL on its name.
*/
static const struct cnl_procmon *
-cnl_get_procmon_ref_values(struct drm_i915_private *dev_priv, enum port port)
+cnl_get_procmon_ref_values(struct drm_i915_private *dev_priv, enum phy phy)
{
const struct cnl_procmon *procmon;
u32 val;
- val = I915_READ(ICL_PORT_COMP_DW3(port));
+ val = I915_READ(ICL_PORT_COMP_DW3(phy));
switch (val & (PROCESS_INFO_MASK | VOLTAGE_INFO_MASK)) {
default:
MISSING_CASE(val);
@@ -75,32 +74,32 @@ cnl_get_procmon_ref_values(struct drm_i915_private *dev_priv, enum port port)
}
static void cnl_set_procmon_ref_values(struct drm_i915_private *dev_priv,
- enum port port)
+ enum phy phy)
{
const struct cnl_procmon *procmon;
u32 val;
- procmon = cnl_get_procmon_ref_values(dev_priv, port);
+ procmon = cnl_get_procmon_ref_values(dev_priv, phy);
- val = I915_READ(ICL_PORT_COMP_DW1(port));
+ val = I915_READ(ICL_PORT_COMP_DW1(phy));
val &= ~((0xff << 16) | 0xff);
val |= procmon->dw1;
- I915_WRITE(ICL_PORT_COMP_DW1(port), val);
+ I915_WRITE(ICL_PORT_COMP_DW1(phy), val);
- I915_WRITE(ICL_PORT_COMP_DW9(port), procmon->dw9);
- I915_WRITE(ICL_PORT_COMP_DW10(port), procmon->dw10);
+ I915_WRITE(ICL_PORT_COMP_DW9(phy), procmon->dw9);
+ I915_WRITE(ICL_PORT_COMP_DW10(phy), procmon->dw10);
}
static bool check_phy_reg(struct drm_i915_private *dev_priv,
- enum port port, i915_reg_t reg, u32 mask,
+ enum phy phy, i915_reg_t reg, u32 mask,
u32 expected_val)
{
u32 val = I915_READ(reg);
if ((val & mask) != expected_val) {
- DRM_DEBUG_DRIVER("Port %c combo PHY reg %08x state mismatch: "
+ DRM_DEBUG_DRIVER("Combo PHY %c reg %08x state mismatch: "
"current %08x mask %08x expected %08x\n",
- port_name(port),
+ phy_name(phy),
reg.reg, val, mask, expected_val);
return false;
}
@@ -109,18 +108,18 @@ static bool check_phy_reg(struct drm_i915_private *dev_priv,
}
static bool cnl_verify_procmon_ref_values(struct drm_i915_private *dev_priv,
- enum port port)
+ enum phy phy)
{
const struct cnl_procmon *procmon;
bool ret;
- procmon = cnl_get_procmon_ref_values(dev_priv, port);
+ procmon = cnl_get_procmon_ref_values(dev_priv, phy);
- ret = check_phy_reg(dev_priv, port, ICL_PORT_COMP_DW1(port),
+ ret = check_phy_reg(dev_priv, phy, ICL_PORT_COMP_DW1(phy),
(0xff << 16) | 0xff, procmon->dw1);
- ret &= check_phy_reg(dev_priv, port, ICL_PORT_COMP_DW9(port),
+ ret &= check_phy_reg(dev_priv, phy, ICL_PORT_COMP_DW9(phy),
-1U, procmon->dw9);
- ret &= check_phy_reg(dev_priv, port, ICL_PORT_COMP_DW10(port),
+ ret &= check_phy_reg(dev_priv, phy, ICL_PORT_COMP_DW10(phy),
-1U, procmon->dw10);
return ret;
@@ -134,15 +133,15 @@ static bool cnl_combo_phy_enabled(struct drm_i915_private *dev_priv)
static bool cnl_combo_phy_verify_state(struct drm_i915_private *dev_priv)
{
- enum port port = PORT_A;
+ enum phy phy = PHY_A;
bool ret;
if (!cnl_combo_phy_enabled(dev_priv))
return false;
- ret = cnl_verify_procmon_ref_values(dev_priv, port);
+ ret = cnl_verify_procmon_ref_values(dev_priv, phy);
- ret &= check_phy_reg(dev_priv, port, CNL_PORT_CL1CM_DW5,
+ ret &= check_phy_reg(dev_priv, phy, CNL_PORT_CL1CM_DW5,
CL_POWER_DOWN_ENABLE, CL_POWER_DOWN_ENABLE);
return ret;
@@ -157,7 +156,7 @@ static void cnl_combo_phys_init(struct drm_i915_private *dev_priv)
I915_WRITE(CHICKEN_MISC_2, val);
/* Dummy PORT_A to get the correct CNL register from the ICL macro */
- cnl_set_procmon_ref_values(dev_priv, PORT_A);
+ cnl_set_procmon_ref_values(dev_priv, PHY_A);
val = I915_READ(CNL_PORT_COMP_DW0);
val |= COMP_INIT;
@@ -181,35 +180,39 @@ static void cnl_combo_phys_uninit(struct drm_i915_private *dev_priv)
}
static bool icl_combo_phy_enabled(struct drm_i915_private *dev_priv,
- enum port port)
+ enum phy phy)
{
- return !(I915_READ(ICL_PHY_MISC(port)) &
- ICL_PHY_MISC_DE_IO_COMP_PWR_DOWN) &&
- (I915_READ(ICL_PORT_COMP_DW0(port)) & COMP_INIT);
+ /* The PHY C added by EHL has no PHY_MISC register */
+ if (IS_ELKHARTLAKE(dev_priv) && phy == PHY_C)
+ return I915_READ(ICL_PORT_COMP_DW0(phy)) & COMP_INIT;
+ else
+ return !(I915_READ(ICL_PHY_MISC(phy)) &
+ ICL_PHY_MISC_DE_IO_COMP_PWR_DOWN) &&
+ (I915_READ(ICL_PORT_COMP_DW0(phy)) & COMP_INIT);
}
static bool icl_combo_phy_verify_state(struct drm_i915_private *dev_priv,
- enum port port)
+ enum phy phy)
{
bool ret;
- if (!icl_combo_phy_enabled(dev_priv, port))
+ if (!icl_combo_phy_enabled(dev_priv, phy))
return false;
- ret = cnl_verify_procmon_ref_values(dev_priv, port);
+ ret = cnl_verify_procmon_ref_values(dev_priv, phy);
- if (port == PORT_A)
- ret &= check_phy_reg(dev_priv, port, ICL_PORT_COMP_DW8(port),
+ if (phy == PHY_A)
+ ret &= check_phy_reg(dev_priv, phy, ICL_PORT_COMP_DW8(phy),
IREFGEN, IREFGEN);
- ret &= check_phy_reg(dev_priv, port, ICL_PORT_CL_DW5(port),
+ ret &= check_phy_reg(dev_priv, phy, ICL_PORT_CL_DW5(phy),
CL_POWER_DOWN_ENABLE, CL_POWER_DOWN_ENABLE);
return ret;
}
void intel_combo_phy_power_up_lanes(struct drm_i915_private *dev_priv,
- enum port port, bool is_dsi,
+ enum phy phy, bool is_dsi,
int lane_count, bool lane_reversal)
{
u8 lane_mask;
@@ -254,66 +257,120 @@ void intel_combo_phy_power_up_lanes(struct drm_i915_private *dev_priv,
}
}
- val = I915_READ(ICL_PORT_CL_DW10(port));
+ val = I915_READ(ICL_PORT_CL_DW10(phy));
val &= ~PWR_DOWN_LN_MASK;
val |= lane_mask << PWR_DOWN_LN_SHIFT;
- I915_WRITE(ICL_PORT_CL_DW10(port), val);
+ I915_WRITE(ICL_PORT_CL_DW10(phy), val);
+}
+
+static u32 ehl_combo_phy_a_mux(struct drm_i915_private *i915, u32 val)
+{
+ bool ddi_a_present = i915->vbt.ddi_port_info[PORT_A].child != NULL;
+ bool ddi_d_present = i915->vbt.ddi_port_info[PORT_D].child != NULL;
+ bool dsi_present = intel_bios_is_dsi_present(i915, NULL);
+
+ /*
+ * VBT's 'dvo port' field for child devices references the DDI, not
+ * the PHY. So if combo PHY A is wired up to drive an external
+ * display, we should see a child device present on PORT_D and
+ * nothing on PORT_A and no DSI.
+ */
+ if (ddi_d_present && !ddi_a_present && !dsi_present)
+ return val | ICL_PHY_MISC_MUX_DDID;
+
+ /*
+ * If we encounter a VBT that claims to have an external display on
+ * DDI-D _and_ an internal display on DDI-A/DSI leave an error message
+ * in the log and let the internal display win.
+ */
+ if (ddi_d_present)
+ DRM_ERROR("VBT claims to have both internal and external displays on PHY A. Configuring for internal.\n");
+
+ return val & ~ICL_PHY_MISC_MUX_DDID;
}
static void icl_combo_phys_init(struct drm_i915_private *dev_priv)
{
- enum port port;
+ enum phy phy;
- for_each_combo_port(dev_priv, port) {
+ for_each_combo_phy(dev_priv, phy) {
u32 val;
- if (icl_combo_phy_verify_state(dev_priv, port)) {
- DRM_DEBUG_DRIVER("Port %c combo PHY already enabled, won't reprogram it.\n",
- port_name(port));
+ if (icl_combo_phy_verify_state(dev_priv, phy)) {
+ DRM_DEBUG_DRIVER("Combo PHY %c already enabled, won't reprogram it.\n",
+ phy_name(phy));
continue;
}
- val = I915_READ(ICL_PHY_MISC(port));
+ /*
+ * Although EHL adds a combo PHY C, there's no PHY_MISC
+ * register for it and no need to program the
+ * DE_IO_COMP_PWR_DOWN setting on PHY C.
+ */
+ if (IS_ELKHARTLAKE(dev_priv) && phy == PHY_C)
+ goto skip_phy_misc;
+
+ /*
+ * EHL's combo PHY A can be hooked up to either an external
+ * display (via DDI-D) or an internal display (via DDI-A or
+ * the DSI DPHY). This is a motherboard design decision that
+ * can't be changed on the fly, so initialize the PHY's mux
+ * based on whether our VBT indicates the presence of any
+ * "internal" child devices.
+ */
+ val = I915_READ(ICL_PHY_MISC(phy));
+ if (IS_ELKHARTLAKE(dev_priv) && phy == PHY_A)
+ val = ehl_combo_phy_a_mux(dev_priv, val);
val &= ~ICL_PHY_MISC_DE_IO_COMP_PWR_DOWN;
- I915_WRITE(ICL_PHY_MISC(port), val);
+ I915_WRITE(ICL_PHY_MISC(phy), val);
- cnl_set_procmon_ref_values(dev_priv, port);
+skip_phy_misc:
+ cnl_set_procmon_ref_values(dev_priv, phy);
- if (port == PORT_A) {
- val = I915_READ(ICL_PORT_COMP_DW8(port));
+ if (phy == PHY_A) {
+ val = I915_READ(ICL_PORT_COMP_DW8(phy));
val |= IREFGEN;
- I915_WRITE(ICL_PORT_COMP_DW8(port), val);
+ I915_WRITE(ICL_PORT_COMP_DW8(phy), val);
}
- val = I915_READ(ICL_PORT_COMP_DW0(port));
+ val = I915_READ(ICL_PORT_COMP_DW0(phy));
val |= COMP_INIT;
- I915_WRITE(ICL_PORT_COMP_DW0(port), val);
+ I915_WRITE(ICL_PORT_COMP_DW0(phy), val);
- val = I915_READ(ICL_PORT_CL_DW5(port));
+ val = I915_READ(ICL_PORT_CL_DW5(phy));
val |= CL_POWER_DOWN_ENABLE;
- I915_WRITE(ICL_PORT_CL_DW5(port), val);
+ I915_WRITE(ICL_PORT_CL_DW5(phy), val);
}
}
static void icl_combo_phys_uninit(struct drm_i915_private *dev_priv)
{
- enum port port;
+ enum phy phy;
- for_each_combo_port_reverse(dev_priv, port) {
+ for_each_combo_phy_reverse(dev_priv, phy) {
u32 val;
- if (port == PORT_A &&
- !icl_combo_phy_verify_state(dev_priv, port))
- DRM_WARN("Port %c combo PHY HW state changed unexpectedly\n",
- port_name(port));
+ if (phy == PHY_A &&
+ !icl_combo_phy_verify_state(dev_priv, phy))
+ DRM_WARN("Combo PHY %c HW state changed unexpectedly\n",
+ phy_name(phy));
+
+ /*
+ * Although EHL adds a combo PHY C, there's no PHY_MISC
+ * register for it and no need to program the
+ * DE_IO_COMP_PWR_DOWN setting on PHY C.
+ */
+ if (IS_ELKHARTLAKE(dev_priv) && phy == PHY_C)
+ goto skip_phy_misc;
- val = I915_READ(ICL_PHY_MISC(port));
+ val = I915_READ(ICL_PHY_MISC(phy));
val |= ICL_PHY_MISC_DE_IO_COMP_PWR_DOWN;
- I915_WRITE(ICL_PHY_MISC(port), val);
+ I915_WRITE(ICL_PHY_MISC(phy), val);
- val = I915_READ(ICL_PORT_COMP_DW0(port));
+skip_phy_misc:
+ val = I915_READ(ICL_PORT_COMP_DW0(phy));
val &= ~COMP_INIT;
- I915_WRITE(ICL_PORT_COMP_DW0(port), val);
+ I915_WRITE(ICL_PORT_COMP_DW0(phy), val);
}
}
diff --git a/drivers/gpu/drm/i915/display/intel_combo_phy.h b/drivers/gpu/drm/i915/display/intel_combo_phy.h
index e6e195a83b19..660886f86c59 100644
--- a/drivers/gpu/drm/i915/display/intel_combo_phy.h
+++ b/drivers/gpu/drm/i915/display/intel_combo_phy.h
@@ -7,14 +7,14 @@
#define __INTEL_COMBO_PHY_H__
#include <linux/types.h>
-#include <drm/i915_drm.h>
struct drm_i915_private;
+enum phy;
void intel_combo_phy_init(struct drm_i915_private *dev_priv);
void intel_combo_phy_uninit(struct drm_i915_private *dev_priv);
void intel_combo_phy_power_up_lanes(struct drm_i915_private *dev_priv,
- enum port port, bool is_dsi,
+ enum phy phy, bool is_dsi,
int lane_count, bool lane_reversal);
#endif /* __INTEL_COMBO_PHY_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_connector.c b/drivers/gpu/drm/i915/display/intel_connector.c
index 41310f8e5a2a..d0163d86c42a 100644
--- a/drivers/gpu/drm/i915/display/intel_connector.c
+++ b/drivers/gpu/drm/i915/display/intel_connector.c
@@ -118,7 +118,7 @@ int intel_connector_register(struct drm_connector *connector)
if (ret)
goto err;
- if (i915_inject_load_failure()) {
+ if (i915_inject_probe_failure()) {
ret = -EFAULT;
goto err_backlight;
}
diff --git a/drivers/gpu/drm/i915/display/intel_ddi.c b/drivers/gpu/drm/i915/display/intel_ddi.c
index 7925a176f900..cf3c3fd7089f 100644
--- a/drivers/gpu/drm/i915/display/intel_ddi.c
+++ b/drivers/gpu/drm/i915/display/intel_ddi.c
@@ -45,6 +45,7 @@
#include "intel_lspcon.h"
#include "intel_panel.h"
#include "intel_psr.h"
+#include "intel_tc.h"
#include "intel_vdsc.h"
struct ddi_buf_trans {
@@ -846,8 +847,8 @@ cnl_get_buf_trans_edp(struct drm_i915_private *dev_priv, int *n_entries)
}
static const struct cnl_ddi_buf_trans *
-icl_get_combo_buf_trans(struct drm_i915_private *dev_priv, enum port port,
- int type, int rate, int *n_entries)
+icl_get_combo_buf_trans(struct drm_i915_private *dev_priv, int type, int rate,
+ int *n_entries)
{
if (type == INTEL_OUTPUT_HDMI) {
*n_entries = ARRAY_SIZE(icl_combo_phy_ddi_translations_hdmi);
@@ -867,12 +868,13 @@ icl_get_combo_buf_trans(struct drm_i915_private *dev_priv, enum port port,
static int intel_ddi_hdmi_level(struct drm_i915_private *dev_priv, enum port port)
{
int n_entries, level, default_entry;
+ enum phy phy = intel_port_to_phy(dev_priv, port);
level = dev_priv->vbt.ddi_port_info[port].hdmi_level_shift;
if (INTEL_GEN(dev_priv) >= 11) {
- if (intel_port_is_combophy(dev_priv, port))
- icl_get_combo_buf_trans(dev_priv, port, INTEL_OUTPUT_HDMI,
+ if (intel_phy_is_combo(dev_priv, phy))
+ icl_get_combo_buf_trans(dev_priv, INTEL_OUTPUT_HDMI,
0, &n_entries);
else
n_entries = ARRAY_SIZE(icl_mg_phy_ddi_translations);
@@ -1486,9 +1488,10 @@ static void icl_ddi_clock_get(struct intel_encoder *encoder,
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dpll_hw_state *pll_state = &pipe_config->dpll_hw_state;
enum port port = encoder->port;
+ enum phy phy = intel_port_to_phy(dev_priv, port);
int link_clock;
- if (intel_port_is_combophy(dev_priv, port)) {
+ if (intel_phy_is_combo(dev_priv, phy)) {
link_clock = cnl_calc_wrpll_link(dev_priv, pll_state);
} else {
enum intel_dpll_id pll_id = intel_get_shared_dpll_id(dev_priv,
@@ -1770,7 +1773,10 @@ void intel_ddi_enable_transcoder_func(const struct intel_crtc_state *crtc_state)
/* Enable TRANS_DDI_FUNC_CTL for the pipe to work in HDMI mode */
temp = TRANS_DDI_FUNC_ENABLE;
- temp |= TRANS_DDI_SELECT_PORT(port);
+ if (INTEL_GEN(dev_priv) >= 12)
+ temp |= TGL_TRANS_DDI_SELECT_PORT(port);
+ else
+ temp |= TRANS_DDI_SELECT_PORT(port);
switch (crtc_state->pipe_bpp) {
case 18:
@@ -1850,8 +1856,13 @@ void intel_ddi_disable_transcoder_func(const struct intel_crtc_state *crtc_state
i915_reg_t reg = TRANS_DDI_FUNC_CTL(cpu_transcoder);
u32 val = I915_READ(reg);
- val &= ~(TRANS_DDI_FUNC_ENABLE | TRANS_DDI_PORT_MASK | TRANS_DDI_DP_VC_PAYLOAD_ALLOC);
- val |= TRANS_DDI_PORT_NONE;
+ if (INTEL_GEN(dev_priv) >= 12) {
+ val &= ~(TRANS_DDI_FUNC_ENABLE | TGL_TRANS_DDI_PORT_MASK |
+ TRANS_DDI_DP_VC_PAYLOAD_ALLOC);
+ } else {
+ val &= ~(TRANS_DDI_FUNC_ENABLE | TRANS_DDI_PORT_MASK |
+ TRANS_DDI_DP_VC_PAYLOAD_ALLOC);
+ }
I915_WRITE(reg, val);
if (dev_priv->quirks & QUIRK_INCREASE_DDI_DISABLED_TIME &&
@@ -2003,10 +2014,19 @@ static void intel_ddi_get_encoder_pipes(struct intel_encoder *encoder,
mst_pipe_mask = 0;
for_each_pipe(dev_priv, p) {
enum transcoder cpu_transcoder = (enum transcoder)p;
+ unsigned int port_mask, ddi_select;
+
+ if (INTEL_GEN(dev_priv) >= 12) {
+ port_mask = TGL_TRANS_DDI_PORT_MASK;
+ ddi_select = TGL_TRANS_DDI_SELECT_PORT(port);
+ } else {
+ port_mask = TRANS_DDI_PORT_MASK;
+ ddi_select = TRANS_DDI_SELECT_PORT(port);
+ }
tmp = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder));
- if ((tmp & TRANS_DDI_PORT_MASK) != TRANS_DDI_SELECT_PORT(port))
+ if ((tmp & port_mask) != ddi_select)
continue;
if ((tmp & TRANS_DDI_MODE_SELECT_MASK) ==
@@ -2085,6 +2105,7 @@ static void intel_ddi_get_power_domains(struct intel_encoder *encoder,
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_digital_port *dig_port;
+ enum phy phy = intel_port_to_phy(dev_priv, encoder->port);
/*
* TODO: Add support for MST encoders. Atm, the following should never
@@ -2102,7 +2123,7 @@ static void intel_ddi_get_power_domains(struct intel_encoder *encoder,
* ports.
*/
if (intel_crtc_has_dp_encoder(crtc_state) ||
- intel_port_is_tc(dev_priv, encoder->port))
+ intel_phy_is_tc(dev_priv, phy))
intel_display_power_get(dev_priv,
intel_ddi_main_link_aux_domain(dig_port));
@@ -2122,9 +2143,14 @@ void intel_ddi_enable_pipe_clock(const struct intel_crtc_state *crtc_state)
enum port port = encoder->port;
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
- if (cpu_transcoder != TRANSCODER_EDP)
- I915_WRITE(TRANS_CLK_SEL(cpu_transcoder),
- TRANS_CLK_SEL_PORT(port));
+ if (cpu_transcoder != TRANSCODER_EDP) {
+ if (INTEL_GEN(dev_priv) >= 12)
+ I915_WRITE(TRANS_CLK_SEL(cpu_transcoder),
+ TGL_TRANS_CLK_SEL_PORT(port));
+ else
+ I915_WRITE(TRANS_CLK_SEL(cpu_transcoder),
+ TRANS_CLK_SEL_PORT(port));
+ }
}
void intel_ddi_disable_pipe_clock(const struct intel_crtc_state *crtc_state)
@@ -2132,9 +2158,14 @@ void intel_ddi_disable_pipe_clock(const struct intel_crtc_state *crtc_state)
struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
- if (cpu_transcoder != TRANSCODER_EDP)
- I915_WRITE(TRANS_CLK_SEL(cpu_transcoder),
- TRANS_CLK_SEL_DISABLED);
+ if (cpu_transcoder != TRANSCODER_EDP) {
+ if (INTEL_GEN(dev_priv) >= 12)
+ I915_WRITE(TRANS_CLK_SEL(cpu_transcoder),
+ TGL_TRANS_CLK_SEL_DISABLED);
+ else
+ I915_WRITE(TRANS_CLK_SEL(cpu_transcoder),
+ TRANS_CLK_SEL_DISABLED);
+ }
}
static void _skl_ddi_set_iboost(struct drm_i915_private *dev_priv,
@@ -2227,11 +2258,12 @@ u8 intel_ddi_dp_voltage_max(struct intel_encoder *encoder)
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
enum port port = encoder->port;
+ enum phy phy = intel_port_to_phy(dev_priv, port);
int n_entries;
if (INTEL_GEN(dev_priv) >= 11) {
- if (intel_port_is_combophy(dev_priv, port))
- icl_get_combo_buf_trans(dev_priv, port, encoder->type,
+ if (intel_phy_is_combo(dev_priv, phy))
+ icl_get_combo_buf_trans(dev_priv, encoder->type,
intel_dp->link_rate, &n_entries);
else
n_entries = ARRAY_SIZE(icl_mg_phy_ddi_translations);
@@ -2413,15 +2445,15 @@ static void cnl_ddi_vswing_sequence(struct intel_encoder *encoder,
}
static void icl_ddi_combo_vswing_program(struct drm_i915_private *dev_priv,
- u32 level, enum port port, int type,
+ u32 level, enum phy phy, int type,
int rate)
{
const struct cnl_ddi_buf_trans *ddi_translations = NULL;
u32 n_entries, val;
int ln;
- ddi_translations = icl_get_combo_buf_trans(dev_priv, port, type,
- rate, &n_entries);
+ ddi_translations = icl_get_combo_buf_trans(dev_priv, type, rate,
+ &n_entries);
if (!ddi_translations)
return;
@@ -2431,41 +2463,41 @@ static void icl_ddi_combo_vswing_program(struct drm_i915_private *dev_priv,
}
/* Set PORT_TX_DW5 */
- val = I915_READ(ICL_PORT_TX_DW5_LN0(port));
+ val = I915_READ(ICL_PORT_TX_DW5_LN0(phy));
val &= ~(SCALING_MODE_SEL_MASK | RTERM_SELECT_MASK |
TAP2_DISABLE | TAP3_DISABLE);
val |= SCALING_MODE_SEL(0x2);
val |= RTERM_SELECT(0x6);
val |= TAP3_DISABLE;
- I915_WRITE(ICL_PORT_TX_DW5_GRP(port), val);
+ I915_WRITE(ICL_PORT_TX_DW5_GRP(phy), val);
/* Program PORT_TX_DW2 */
- val = I915_READ(ICL_PORT_TX_DW2_LN0(port));
+ val = I915_READ(ICL_PORT_TX_DW2_LN0(phy));
val &= ~(SWING_SEL_LOWER_MASK | SWING_SEL_UPPER_MASK |
RCOMP_SCALAR_MASK);
val |= SWING_SEL_UPPER(ddi_translations[level].dw2_swing_sel);
val |= SWING_SEL_LOWER(ddi_translations[level].dw2_swing_sel);
/* Program Rcomp scalar for every table entry */
val |= RCOMP_SCALAR(0x98);
- I915_WRITE(ICL_PORT_TX_DW2_GRP(port), val);
+ I915_WRITE(ICL_PORT_TX_DW2_GRP(phy), val);
/* Program PORT_TX_DW4 */
/* We cannot write to GRP. It would overwrite individual loadgen. */
for (ln = 0; ln <= 3; ln++) {
- val = I915_READ(ICL_PORT_TX_DW4_LN(ln, port));
+ val = I915_READ(ICL_PORT_TX_DW4_LN(ln, phy));
val &= ~(POST_CURSOR_1_MASK | POST_CURSOR_2_MASK |
CURSOR_COEFF_MASK);
val |= POST_CURSOR_1(ddi_translations[level].dw4_post_cursor_1);
val |= POST_CURSOR_2(ddi_translations[level].dw4_post_cursor_2);
val |= CURSOR_COEFF(ddi_translations[level].dw4_cursor_coeff);
- I915_WRITE(ICL_PORT_TX_DW4_LN(ln, port), val);
+ I915_WRITE(ICL_PORT_TX_DW4_LN(ln, phy), val);
}
/* Program PORT_TX_DW7 */
- val = I915_READ(ICL_PORT_TX_DW7_LN0(port));
+ val = I915_READ(ICL_PORT_TX_DW7_LN0(phy));
val &= ~N_SCALAR_MASK;
val |= N_SCALAR(ddi_translations[level].dw7_n_scalar);
- I915_WRITE(ICL_PORT_TX_DW7_GRP(port), val);
+ I915_WRITE(ICL_PORT_TX_DW7_GRP(phy), val);
}
static void icl_combo_phy_ddi_vswing_sequence(struct intel_encoder *encoder,
@@ -2473,7 +2505,7 @@ static void icl_combo_phy_ddi_vswing_sequence(struct intel_encoder *encoder,
enum intel_output_type type)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- enum port port = encoder->port;
+ enum phy phy = intel_port_to_phy(dev_priv, encoder->port);
int width = 0;
int rate = 0;
u32 val;
@@ -2494,12 +2526,12 @@ static void icl_combo_phy_ddi_vswing_sequence(struct intel_encoder *encoder,
* set PORT_PCS_DW1 cmnkeeper_enable to 1b,
* else clear to 0b.
*/
- val = I915_READ(ICL_PORT_PCS_DW1_LN0(port));
+ val = I915_READ(ICL_PORT_PCS_DW1_LN0(phy));
if (type == INTEL_OUTPUT_HDMI)
val &= ~COMMON_KEEPER_EN;
else
val |= COMMON_KEEPER_EN;
- I915_WRITE(ICL_PORT_PCS_DW1_GRP(port), val);
+ I915_WRITE(ICL_PORT_PCS_DW1_GRP(phy), val);
/* 2. Program loadgen select */
/*
@@ -2509,33 +2541,33 @@ static void icl_combo_phy_ddi_vswing_sequence(struct intel_encoder *encoder,
* > 6 GHz (LN0=0, LN1=0, LN2=0, LN3=0)
*/
for (ln = 0; ln <= 3; ln++) {
- val = I915_READ(ICL_PORT_TX_DW4_LN(ln, port));
+ val = I915_READ(ICL_PORT_TX_DW4_LN(ln, phy));
val &= ~LOADGEN_SELECT;
if ((rate <= 600000 && width == 4 && ln >= 1) ||
(rate <= 600000 && width < 4 && (ln == 1 || ln == 2))) {
val |= LOADGEN_SELECT;
}
- I915_WRITE(ICL_PORT_TX_DW4_LN(ln, port), val);
+ I915_WRITE(ICL_PORT_TX_DW4_LN(ln, phy), val);
}
/* 3. Set PORT_CL_DW5 SUS Clock Config to 11b */
- val = I915_READ(ICL_PORT_CL_DW5(port));
+ val = I915_READ(ICL_PORT_CL_DW5(phy));
val |= SUS_CLOCK_CONFIG;
- I915_WRITE(ICL_PORT_CL_DW5(port), val);
+ I915_WRITE(ICL_PORT_CL_DW5(phy), val);
/* 4. Clear training enable to change swing values */
- val = I915_READ(ICL_PORT_TX_DW5_LN0(port));
+ val = I915_READ(ICL_PORT_TX_DW5_LN0(phy));
val &= ~TX_TRAINING_EN;
- I915_WRITE(ICL_PORT_TX_DW5_GRP(port), val);
+ I915_WRITE(ICL_PORT_TX_DW5_GRP(phy), val);
/* 5. Program swing and de-emphasis */
- icl_ddi_combo_vswing_program(dev_priv, level, port, type, rate);
+ icl_ddi_combo_vswing_program(dev_priv, level, phy, type, rate);
/* 6. Set training enable to trigger update */
- val = I915_READ(ICL_PORT_TX_DW5_LN0(port));
+ val = I915_READ(ICL_PORT_TX_DW5_LN0(phy));
val |= TX_TRAINING_EN;
- I915_WRITE(ICL_PORT_TX_DW5_GRP(port), val);
+ I915_WRITE(ICL_PORT_TX_DW5_GRP(phy), val);
}
static void icl_mg_phy_ddi_vswing_sequence(struct intel_encoder *encoder,
@@ -2663,9 +2695,9 @@ static void icl_ddi_vswing_sequence(struct intel_encoder *encoder,
enum intel_output_type type)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- enum port port = encoder->port;
+ enum phy phy = intel_port_to_phy(dev_priv, encoder->port);
- if (intel_port_is_combophy(dev_priv, port))
+ if (intel_phy_is_combo(dev_priv, phy))
icl_combo_phy_ddi_vswing_sequence(encoder, level, type);
else
icl_mg_phy_ddi_vswing_sequence(encoder, link_clock, level);
@@ -2728,12 +2760,13 @@ u32 ddi_signal_levels(struct intel_dp *intel_dp)
static inline
u32 icl_dpclka_cfgcr0_clk_off(struct drm_i915_private *dev_priv,
- enum port port)
+ enum phy phy)
{
- if (intel_port_is_combophy(dev_priv, port)) {
- return ICL_DPCLKA_CFGCR0_DDI_CLK_OFF(port);
- } else if (intel_port_is_tc(dev_priv, port)) {
- enum tc_port tc_port = intel_port_to_tc(dev_priv, port);
+ if (intel_phy_is_combo(dev_priv, phy)) {
+ return ICL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy);
+ } else if (intel_phy_is_tc(dev_priv, phy)) {
+ enum tc_port tc_port = intel_port_to_tc(dev_priv,
+ (enum port)phy);
return ICL_DPCLKA_CFGCR0_TC_CLK_OFF(tc_port);
}
@@ -2746,23 +2779,33 @@ static void icl_map_plls_to_ports(struct intel_encoder *encoder,
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_shared_dpll *pll = crtc_state->shared_dpll;
- enum port port = encoder->port;
+ enum phy phy = intel_port_to_phy(dev_priv, encoder->port);
u32 val;
mutex_lock(&dev_priv->dpll_lock);
- val = I915_READ(DPCLKA_CFGCR0_ICL);
- WARN_ON((val & icl_dpclka_cfgcr0_clk_off(dev_priv, port)) == 0);
+ val = I915_READ(ICL_DPCLKA_CFGCR0);
+ WARN_ON((val & icl_dpclka_cfgcr0_clk_off(dev_priv, phy)) == 0);
- if (intel_port_is_combophy(dev_priv, port)) {
- val &= ~DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(port);
- val |= DPCLKA_CFGCR0_DDI_CLK_SEL(pll->info->id, port);
- I915_WRITE(DPCLKA_CFGCR0_ICL, val);
- POSTING_READ(DPCLKA_CFGCR0_ICL);
+ if (intel_phy_is_combo(dev_priv, phy)) {
+ /*
+ * Even though this register references DDIs, note that we
+ * want to pass the PHY rather than the port (DDI). For
+ * ICL, port=phy in all cases so it doesn't matter, but for
+ * EHL the bspec notes the following:
+ *
+ * "DDID clock tied to DDIA clock, so DPCLKA_CFGCR0 DDIA
+ * Clock Select chooses the PLL for both DDIA and DDID and
+ * drives port A in all cases."
+ */
+ val &= ~ICL_DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(phy);
+ val |= ICL_DPCLKA_CFGCR0_DDI_CLK_SEL(pll->info->id, phy);
+ I915_WRITE(ICL_DPCLKA_CFGCR0, val);
+ POSTING_READ(ICL_DPCLKA_CFGCR0);
}
- val &= ~icl_dpclka_cfgcr0_clk_off(dev_priv, port);
- I915_WRITE(DPCLKA_CFGCR0_ICL, val);
+ val &= ~icl_dpclka_cfgcr0_clk_off(dev_priv, phy);
+ I915_WRITE(ICL_DPCLKA_CFGCR0, val);
mutex_unlock(&dev_priv->dpll_lock);
}
@@ -2770,14 +2813,14 @@ static void icl_map_plls_to_ports(struct intel_encoder *encoder,
static void icl_unmap_plls_to_ports(struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- enum port port = encoder->port;
+ enum phy phy = intel_port_to_phy(dev_priv, encoder->port);
u32 val;
mutex_lock(&dev_priv->dpll_lock);
- val = I915_READ(DPCLKA_CFGCR0_ICL);
- val |= icl_dpclka_cfgcr0_clk_off(dev_priv, port);
- I915_WRITE(DPCLKA_CFGCR0_ICL, val);
+ val = I915_READ(ICL_DPCLKA_CFGCR0);
+ val |= icl_dpclka_cfgcr0_clk_off(dev_priv, phy);
+ I915_WRITE(ICL_DPCLKA_CFGCR0, val);
mutex_unlock(&dev_priv->dpll_lock);
}
@@ -2835,11 +2878,13 @@ void icl_sanitize_encoder_pll_mapping(struct intel_encoder *encoder)
ddi_clk_needed = false;
}
- val = I915_READ(DPCLKA_CFGCR0_ICL);
+ val = I915_READ(ICL_DPCLKA_CFGCR0);
for_each_port_masked(port, port_mask) {
+ enum phy phy = intel_port_to_phy(dev_priv, port);
+
bool ddi_clk_ungated = !(val &
icl_dpclka_cfgcr0_clk_off(dev_priv,
- port));
+ phy));
if (ddi_clk_needed == ddi_clk_ungated)
continue;
@@ -2851,10 +2896,10 @@ void icl_sanitize_encoder_pll_mapping(struct intel_encoder *encoder)
if (WARN_ON(ddi_clk_needed))
continue;
- DRM_NOTE("Port %c is disabled/in DSI mode with an ungated DDI clock, gate it\n",
- port_name(port));
- val |= icl_dpclka_cfgcr0_clk_off(dev_priv, port);
- I915_WRITE(DPCLKA_CFGCR0_ICL, val);
+ DRM_NOTE("PHY %c is disabled/in DSI mode with an ungated DDI clock, gate it\n",
+ phy_name(port));
+ val |= icl_dpclka_cfgcr0_clk_off(dev_priv, phy);
+ I915_WRITE(ICL_DPCLKA_CFGCR0, val);
}
}
@@ -2863,6 +2908,7 @@ static void intel_ddi_clk_select(struct intel_encoder *encoder,
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
enum port port = encoder->port;
+ enum phy phy = intel_port_to_phy(dev_priv, port);
u32 val;
const struct intel_shared_dpll *pll = crtc_state->shared_dpll;
@@ -2872,7 +2918,7 @@ static void intel_ddi_clk_select(struct intel_encoder *encoder,
mutex_lock(&dev_priv->dpll_lock);
if (INTEL_GEN(dev_priv) >= 11) {
- if (!intel_port_is_combophy(dev_priv, port))
+ if (!intel_phy_is_combo(dev_priv, phy))
I915_WRITE(DDI_CLK_SEL(port),
icl_pll_to_ddi_clk_sel(encoder, crtc_state));
} else if (IS_CANNONLAKE(dev_priv)) {
@@ -2912,9 +2958,10 @@ static void intel_ddi_clk_disable(struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
enum port port = encoder->port;
+ enum phy phy = intel_port_to_phy(dev_priv, port);
if (INTEL_GEN(dev_priv) >= 11) {
- if (!intel_port_is_combophy(dev_priv, port))
+ if (!intel_phy_is_combo(dev_priv, phy))
I915_WRITE(DDI_CLK_SEL(port), DDI_CLK_SEL_NONE);
} else if (IS_CANNONLAKE(dev_priv)) {
I915_WRITE(DPCLKA_CFGCR0, I915_READ(DPCLKA_CFGCR0) |
@@ -2995,25 +3042,22 @@ static void icl_program_mg_dp_mode(struct intel_digital_port *intel_dig_port)
{
struct drm_i915_private *dev_priv = to_i915(intel_dig_port->base.base.dev);
enum port port = intel_dig_port->base.port;
- enum tc_port tc_port = intel_port_to_tc(dev_priv, port);
- u32 ln0, ln1, lane_info;
+ u32 ln0, ln1, lane_mask;
- if (tc_port == PORT_TC_NONE || intel_dig_port->tc_type == TC_PORT_TBT)
+ if (intel_dig_port->tc_mode == TC_PORT_TBT_ALT)
return;
ln0 = I915_READ(MG_DP_MODE(0, port));
ln1 = I915_READ(MG_DP_MODE(1, port));
- switch (intel_dig_port->tc_type) {
- case TC_PORT_TYPEC:
+ switch (intel_dig_port->tc_mode) {
+ case TC_PORT_DP_ALT:
ln0 &= ~(MG_DP_MODE_CFG_DP_X1_MODE | MG_DP_MODE_CFG_DP_X2_MODE);
ln1 &= ~(MG_DP_MODE_CFG_DP_X1_MODE | MG_DP_MODE_CFG_DP_X2_MODE);
- lane_info = (I915_READ(PORT_TX_DFLEXDPSP) &
- DP_LANE_ASSIGNMENT_MASK(tc_port)) >>
- DP_LANE_ASSIGNMENT_SHIFT(tc_port);
+ lane_mask = intel_tc_port_get_lane_mask(intel_dig_port);
- switch (lane_info) {
+ switch (lane_mask) {
case 0x1:
case 0x4:
break;
@@ -3038,7 +3082,7 @@ static void icl_program_mg_dp_mode(struct intel_digital_port *intel_dig_port)
MG_DP_MODE_CFG_DP_X2_MODE;
break;
default:
- MISSING_CASE(lane_info);
+ MISSING_CASE(lane_mask);
}
break;
@@ -3048,7 +3092,7 @@ static void icl_program_mg_dp_mode(struct intel_digital_port *intel_dig_port)
break;
default:
- MISSING_CASE(intel_dig_port->tc_type);
+ MISSING_CASE(intel_dig_port->tc_mode);
return;
}
@@ -3110,6 +3154,7 @@ static void intel_ddi_pre_enable_dp(struct intel_encoder *encoder,
struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
enum port port = encoder->port;
+ enum phy phy = intel_port_to_phy(dev_priv, port);
struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base);
bool is_mst = intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST);
int level = intel_ddi_dp_level(intel_dp);
@@ -3123,7 +3168,10 @@ static void intel_ddi_pre_enable_dp(struct intel_encoder *encoder,
intel_ddi_clk_select(encoder, crtc_state);
- intel_display_power_get(dev_priv, dig_port->ddi_io_power_domain);
+ if (!intel_phy_is_tc(dev_priv, phy) ||
+ dig_port->tc_mode != TC_PORT_TBT_ALT)
+ intel_display_power_get(dev_priv,
+ dig_port->ddi_io_power_domain);
icl_program_mg_dp_mode(dig_port);
icl_disable_phy_clock_gating(dig_port);
@@ -3138,11 +3186,11 @@ static void intel_ddi_pre_enable_dp(struct intel_encoder *encoder,
else
intel_prepare_dp_ddi_buffers(encoder, crtc_state);
- if (intel_port_is_combophy(dev_priv, port)) {
+ if (intel_phy_is_combo(dev_priv, phy)) {
bool lane_reversal =
dig_port->saved_port_bits & DDI_BUF_PORT_REVERSAL;
- intel_combo_phy_power_up_lanes(dev_priv, port, false,
+ intel_combo_phy_power_up_lanes(dev_priv, phy, false,
crtc_state->lane_count,
lane_reversal);
}
@@ -3290,6 +3338,7 @@ static void intel_ddi_post_disable_dp(struct intel_encoder *encoder,
struct intel_dp *intel_dp = &dig_port->dp;
bool is_mst = intel_crtc_has_type(old_crtc_state,
INTEL_OUTPUT_DP_MST);
+ enum phy phy = intel_port_to_phy(dev_priv, encoder->port);
if (!is_mst) {
intel_ddi_disable_pipe_clock(old_crtc_state);
@@ -3305,8 +3354,10 @@ static void intel_ddi_post_disable_dp(struct intel_encoder *encoder,
intel_edp_panel_vdd_on(intel_dp);
intel_edp_panel_off(intel_dp);
- intel_display_power_put_unchecked(dev_priv,
- dig_port->ddi_io_power_domain);
+ if (!intel_phy_is_tc(dev_priv, phy) ||
+ dig_port->tc_mode != TC_PORT_TBT_ALT)
+ intel_display_power_put_unchecked(dev_priv,
+ dig_port->ddi_io_power_domain);
intel_ddi_clk_disable(encoder);
}
@@ -3511,7 +3562,8 @@ static void intel_enable_ddi(struct intel_encoder *encoder,
/* Enable hdcp if it's desired */
if (conn_state->content_protection ==
DRM_MODE_CONTENT_PROTECTION_DESIRED)
- intel_hdcp_enable(to_intel_connector(conn_state->connector));
+ intel_hdcp_enable(to_intel_connector(conn_state->connector),
+ (u8)conn_state->hdcp_content_type);
}
static void intel_disable_ddi_dp(struct intel_encoder *encoder,
@@ -3580,44 +3632,65 @@ static void intel_ddi_update_pipe(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
{
+ struct intel_connector *connector =
+ to_intel_connector(conn_state->connector);
+ struct intel_hdcp *hdcp = &connector->hdcp;
+ bool content_protection_type_changed =
+ (conn_state->hdcp_content_type != hdcp->content_type &&
+ conn_state->content_protection !=
+ DRM_MODE_CONTENT_PROTECTION_UNDESIRED);
+
if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
intel_ddi_update_pipe_dp(encoder, crtc_state, conn_state);
+ /*
+ * During the HDCP encryption session if Type change is requested,
+ * disable the HDCP and reenable it with new TYPE value.
+ */
if (conn_state->content_protection ==
- DRM_MODE_CONTENT_PROTECTION_DESIRED)
- intel_hdcp_enable(to_intel_connector(conn_state->connector));
- else if (conn_state->content_protection ==
- DRM_MODE_CONTENT_PROTECTION_UNDESIRED)
- intel_hdcp_disable(to_intel_connector(conn_state->connector));
+ DRM_MODE_CONTENT_PROTECTION_UNDESIRED ||
+ content_protection_type_changed)
+ intel_hdcp_disable(connector);
+
+ /*
+ * Mark the hdcp state as DESIRED after the hdcp disable of type
+ * change procedure.
+ */
+ if (content_protection_type_changed) {
+ mutex_lock(&hdcp->mutex);
+ hdcp->value = DRM_MODE_CONTENT_PROTECTION_DESIRED;
+ schedule_work(&hdcp->prop_work);
+ mutex_unlock(&hdcp->mutex);
+ }
+
+ if (conn_state->content_protection ==
+ DRM_MODE_CONTENT_PROTECTION_DESIRED ||
+ content_protection_type_changed)
+ intel_hdcp_enable(connector, (u8)conn_state->hdcp_content_type);
}
-static void intel_ddi_set_fia_lane_count(struct intel_encoder *encoder,
- const struct intel_crtc_state *pipe_config,
- enum port port)
+static void
+intel_ddi_update_prepare(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
+ struct intel_crtc *crtc)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base);
- enum tc_port tc_port = intel_port_to_tc(dev_priv, port);
- u32 val = I915_READ(PORT_TX_DFLEXDPMLE1);
- bool lane_reversal = dig_port->saved_port_bits & DDI_BUF_PORT_REVERSAL;
-
- val &= ~DFLEXDPMLE1_DPMLETC_MASK(tc_port);
- switch (pipe_config->lane_count) {
- case 1:
- val |= (lane_reversal) ? DFLEXDPMLE1_DPMLETC_ML3(tc_port) :
- DFLEXDPMLE1_DPMLETC_ML0(tc_port);
- break;
- case 2:
- val |= (lane_reversal) ? DFLEXDPMLE1_DPMLETC_ML3_2(tc_port) :
- DFLEXDPMLE1_DPMLETC_ML1_0(tc_port);
- break;
- case 4:
- val |= DFLEXDPMLE1_DPMLETC_ML3_0(tc_port);
- break;
- default:
- MISSING_CASE(pipe_config->lane_count);
- }
- I915_WRITE(PORT_TX_DFLEXDPMLE1, val);
+ struct intel_crtc_state *crtc_state =
+ crtc ? intel_atomic_get_new_crtc_state(state, crtc) : NULL;
+ int required_lanes = crtc_state ? crtc_state->lane_count : 1;
+
+ WARN_ON(crtc && crtc->active);
+
+ intel_tc_port_get_link(enc_to_dig_port(&encoder->base), required_lanes);
+ if (crtc_state && crtc_state->base.active)
+ intel_update_active_dpll(state, crtc, encoder);
+}
+
+static void
+intel_ddi_update_complete(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
+ struct intel_crtc *crtc)
+{
+ intel_tc_port_put_link(enc_to_dig_port(&encoder->base));
}
static void
@@ -3627,26 +3700,25 @@ intel_ddi_pre_pll_enable(struct intel_encoder *encoder,
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base);
- enum port port = encoder->port;
+ enum phy phy = intel_port_to_phy(dev_priv, encoder->port);
+ bool is_tc_port = intel_phy_is_tc(dev_priv, phy);
- if (intel_crtc_has_dp_encoder(crtc_state) ||
- intel_port_is_tc(dev_priv, encoder->port))
+ if (is_tc_port)
+ intel_tc_port_get_link(dig_port, crtc_state->lane_count);
+
+ if (intel_crtc_has_dp_encoder(crtc_state) || is_tc_port)
intel_display_power_get(dev_priv,
intel_ddi_main_link_aux_domain(dig_port));
- if (IS_GEN9_LP(dev_priv))
+ if (is_tc_port && dig_port->tc_mode != TC_PORT_TBT_ALT)
+ /*
+ * Program the lane count for static/dynamic connections on
+ * Type-C ports. Skip this step for TBT.
+ */
+ intel_tc_port_set_fia_lane_count(dig_port, crtc_state->lane_count);
+ else if (IS_GEN9_LP(dev_priv))
bxt_ddi_phy_set_lane_optim_mask(encoder,
crtc_state->lane_lat_optim_mask);
-
- /*
- * Program the lane count for static/dynamic connections on Type-C ports.
- * Skip this step for TBT.
- */
- if (dig_port->tc_type == TC_PORT_UNKNOWN ||
- dig_port->tc_type == TC_PORT_TBT)
- return;
-
- intel_ddi_set_fia_lane_count(encoder, crtc_state, port);
}
static void
@@ -3656,11 +3728,15 @@ intel_ddi_post_pll_disable(struct intel_encoder *encoder,
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base);
+ enum phy phy = intel_port_to_phy(dev_priv, encoder->port);
+ bool is_tc_port = intel_phy_is_tc(dev_priv, phy);
- if (intel_crtc_has_dp_encoder(crtc_state) ||
- intel_port_is_tc(dev_priv, encoder->port))
+ if (intel_crtc_has_dp_encoder(crtc_state) || is_tc_port)
intel_display_power_put_unchecked(dev_priv,
intel_ddi_main_link_aux_domain(dig_port));
+
+ if (is_tc_port)
+ intel_tc_port_put_link(dig_port);
}
static void intel_ddi_prepare_link_retrain(struct intel_dp *intel_dp)
@@ -3737,7 +3813,6 @@ void intel_ddi_get_config(struct intel_encoder *encoder,
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_crtc *intel_crtc = to_intel_crtc(pipe_config->base.crtc);
enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
- struct intel_digital_port *intel_dig_port;
u32 temp, flags = 0;
/* XXX: DSI transcoder paranoia */
@@ -3776,7 +3851,6 @@ void intel_ddi_get_config(struct intel_encoder *encoder,
switch (temp & TRANS_DDI_MODE_SELECT_MASK) {
case TRANS_DDI_MODE_SELECT_HDMI:
pipe_config->has_hdmi_sink = true;
- intel_dig_port = enc_to_dig_port(&encoder->base);
pipe_config->infoframes.enable |=
intel_hdmi_infoframes_enabled(encoder, pipe_config);
@@ -3914,49 +3988,18 @@ static int intel_ddi_compute_config(struct intel_encoder *encoder,
return 0;
}
-static void intel_ddi_encoder_suspend(struct intel_encoder *encoder)
-{
- struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base);
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
-
- intel_dp_encoder_suspend(encoder);
-
- /*
- * TODO: disconnect also from USB DP alternate mode once we have a
- * way to handle the modeset restore in that mode during resume
- * even if the sink has disappeared while being suspended.
- */
- if (dig_port->tc_legacy_port)
- icl_tc_phy_disconnect(i915, dig_port);
-}
-
-static void intel_ddi_encoder_reset(struct drm_encoder *drm_encoder)
-{
- struct intel_digital_port *dig_port = enc_to_dig_port(drm_encoder);
- struct drm_i915_private *i915 = to_i915(drm_encoder->dev);
-
- if (intel_port_is_tc(i915, dig_port->base.port))
- intel_digital_port_connected(&dig_port->base);
-
- intel_dp_encoder_reset(drm_encoder);
-}
-
static void intel_ddi_encoder_destroy(struct drm_encoder *encoder)
{
struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
- struct drm_i915_private *i915 = to_i915(encoder->dev);
intel_dp_encoder_flush_work(encoder);
- if (intel_port_is_tc(i915, dig_port->base.port))
- icl_tc_phy_disconnect(i915, dig_port);
-
drm_encoder_cleanup(encoder);
kfree(dig_port);
}
static const struct drm_encoder_funcs intel_ddi_funcs = {
- .reset = intel_ddi_encoder_reset,
+ .reset = intel_dp_encoder_reset,
.destroy = intel_ddi_encoder_destroy,
};
@@ -4081,14 +4124,17 @@ static int intel_hdmi_reset_link(struct intel_encoder *encoder,
return modeset_pipe(&crtc->base, ctx);
}
-static bool intel_ddi_hotplug(struct intel_encoder *encoder,
- struct intel_connector *connector)
+static enum intel_hotplug_state
+intel_ddi_hotplug(struct intel_encoder *encoder,
+ struct intel_connector *connector,
+ bool irq_received)
{
+ struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base);
struct drm_modeset_acquire_ctx ctx;
- bool changed;
+ enum intel_hotplug_state state;
int ret;
- changed = intel_encoder_hotplug(encoder, connector);
+ state = intel_encoder_hotplug(encoder, connector, irq_received);
drm_modeset_acquire_init(&ctx, 0);
@@ -4110,7 +4156,27 @@ static bool intel_ddi_hotplug(struct intel_encoder *encoder,
drm_modeset_acquire_fini(&ctx);
WARN(ret, "Acquiring modeset locks failed with %i\n", ret);
- return changed;
+ /*
+ * Unpowered type-c dongles can take some time to boot and be
+ * responsible, so here giving some time to those dongles to power up
+ * and then retrying the probe.
+ *
+ * On many platforms the HDMI live state signal is known to be
+ * unreliable, so we can't use it to detect if a sink is connected or
+ * not. Instead we detect if it's connected based on whether we can
+ * read the EDID or not. That in turn has a problem during disconnect,
+ * since the HPD interrupt may be raised before the DDC lines get
+ * disconnected (due to how the required length of DDC vs. HPD
+ * connector pins are specified) and so we'll still be able to get a
+ * valid EDID. To solve this schedule another detection cycle if this
+ * time around we didn't detect any change in the sink's connection
+ * status.
+ */
+ if (state == INTEL_HOTPLUG_UNCHANGED && irq_received &&
+ !dig_port->dp.is_mst)
+ state = INTEL_HOTPLUG_RETRY;
+
+ return state;
}
static struct intel_connector *
@@ -4198,6 +4264,7 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port)
struct drm_encoder *encoder;
bool init_hdmi, init_dp, init_lspcon = false;
enum pipe pipe;
+ enum phy phy = intel_port_to_phy(dev_priv, port);
init_hdmi = port_info->supports_dvi || port_info->supports_hdmi;
init_dp = port_info->supports_dp;
@@ -4242,7 +4309,7 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port)
intel_encoder->update_pipe = intel_ddi_update_pipe;
intel_encoder->get_hw_state = intel_ddi_get_hw_state;
intel_encoder->get_config = intel_ddi_get_config;
- intel_encoder->suspend = intel_ddi_encoder_suspend;
+ intel_encoder->suspend = intel_dp_encoder_suspend;
intel_encoder->get_power_domains = intel_ddi_get_power_domains;
intel_encoder->type = INTEL_OUTPUT_DDI;
intel_encoder->power_domain = intel_port_to_power_domain(port);
@@ -4261,9 +4328,15 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port)
intel_dig_port->max_lanes = intel_ddi_max_lanes(intel_dig_port);
intel_dig_port->aux_ch = intel_bios_port_aux_ch(dev_priv, port);
- intel_dig_port->tc_legacy_port = intel_port_is_tc(dev_priv, port) &&
- !port_info->supports_typec_usb &&
- !port_info->supports_tbt;
+ if (intel_phy_is_tc(dev_priv, phy)) {
+ bool is_legacy = !port_info->supports_typec_usb &&
+ !port_info->supports_tbt;
+
+ intel_tc_port_init(intel_dig_port, is_legacy);
+
+ intel_encoder->update_prepare = intel_ddi_update_prepare;
+ intel_encoder->update_complete = intel_ddi_update_complete;
+ }
switch (port) {
case PORT_A:
@@ -4290,6 +4363,18 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port)
intel_dig_port->ddi_io_power_domain =
POWER_DOMAIN_PORT_DDI_F_IO;
break;
+ case PORT_G:
+ intel_dig_port->ddi_io_power_domain =
+ POWER_DOMAIN_PORT_DDI_G_IO;
+ break;
+ case PORT_H:
+ intel_dig_port->ddi_io_power_domain =
+ POWER_DOMAIN_PORT_DDI_H_IO;
+ break;
+ case PORT_I:
+ intel_dig_port->ddi_io_power_domain =
+ POWER_DOMAIN_PORT_DDI_I_IO;
+ break;
default:
MISSING_CASE(port);
}
@@ -4324,9 +4409,6 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port)
intel_infoframe_init(intel_dig_port);
- if (intel_port_is_tc(dev_priv, port))
- intel_digital_port_connected(intel_encoder);
-
return;
err:
diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c
index 592b92782fab..9e4ee29fd0fc 100644
--- a/drivers/gpu/drm/i915/display/intel_display.c
+++ b/drivers/gpu/drm/i915/display/intel_display.c
@@ -78,6 +78,7 @@
#include "intel_quirks.h"
#include "intel_sideband.h"
#include "intel_sprite.h"
+#include "intel_tc.h"
/* Primary plane formats for gen <= 3 */
static const u32 i8xx_primary_formats[] = {
@@ -515,9 +516,9 @@ icl_wa_scalerclkgating(struct drm_i915_private *dev_priv, enum pipe pipe,
}
static bool
-needs_modeset(const struct drm_crtc_state *state)
+needs_modeset(const struct intel_crtc_state *state)
{
- return drm_atomic_crtc_needs_modeset(state);
+ return drm_atomic_crtc_needs_modeset(&state->base);
}
/*
@@ -3715,10 +3716,27 @@ int i9xx_check_plane_surface(struct intel_plane_state *plane_state)
return 0;
}
+static bool i9xx_plane_has_windowing(struct intel_plane *plane)
+{
+ struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+ enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
+
+ if (IS_CHERRYVIEW(dev_priv))
+ return i9xx_plane == PLANE_B;
+ else if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
+ return false;
+ else if (IS_GEN(dev_priv, 4))
+ return i9xx_plane == PLANE_C;
+ else
+ return i9xx_plane == PLANE_B ||
+ i9xx_plane == PLANE_C;
+}
+
static int
i9xx_plane_check(struct intel_crtc_state *crtc_state,
struct intel_plane_state *plane_state)
{
+ struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
int ret;
ret = chv_plane_check_rotation(plane_state);
@@ -3729,7 +3747,8 @@ i9xx_plane_check(struct intel_crtc_state *crtc_state,
&crtc_state->base,
DRM_PLANE_HELPER_NO_SCALING,
DRM_PLANE_HELPER_NO_SCALING,
- false, true);
+ i9xx_plane_has_windowing(plane),
+ true);
if (ret)
return ret;
@@ -3758,6 +3777,10 @@ static void i9xx_update_plane(struct intel_plane *plane,
u32 linear_offset;
int x = plane_state->color_plane[0].x;
int y = plane_state->color_plane[0].y;
+ int crtc_x = plane_state->base.dst.x1;
+ int crtc_y = plane_state->base.dst.y1;
+ int crtc_w = drm_rect_width(&plane_state->base.dst);
+ int crtc_h = drm_rect_height(&plane_state->base.dst);
unsigned long irqflags;
u32 dspaddr_offset;
u32 dspcntr;
@@ -3776,18 +3799,18 @@ static void i9xx_update_plane(struct intel_plane *plane,
I915_WRITE_FW(DSPSTRIDE(i9xx_plane), plane_state->color_plane[0].stride);
if (INTEL_GEN(dev_priv) < 4) {
- /* pipesrc and dspsize control the size that is scaled from,
- * which should always be the user's requested size.
+ /*
+ * PLANE_A doesn't actually have a full window
+ * generator but let's assume we still need to
+ * program whatever is there.
*/
- I915_WRITE_FW(DSPPOS(i9xx_plane), 0);
+ I915_WRITE_FW(DSPPOS(i9xx_plane), (crtc_y << 16) | crtc_x);
I915_WRITE_FW(DSPSIZE(i9xx_plane),
- ((crtc_state->pipe_src_h - 1) << 16) |
- (crtc_state->pipe_src_w - 1));
+ ((crtc_h - 1) << 16) | (crtc_w - 1));
} else if (IS_CHERRYVIEW(dev_priv) && i9xx_plane == PLANE_B) {
- I915_WRITE_FW(PRIMPOS(i9xx_plane), 0);
+ I915_WRITE_FW(PRIMPOS(i9xx_plane), (crtc_y << 16) | crtc_x);
I915_WRITE_FW(PRIMSIZE(i9xx_plane),
- ((crtc_state->pipe_src_h - 1) << 16) |
- (crtc_state->pipe_src_w - 1));
+ ((crtc_h - 1) << 16) | (crtc_w - 1));
I915_WRITE_FW(PRIMCNSTALPHA(i9xx_plane), 0);
}
@@ -3950,10 +3973,10 @@ static u32 skl_plane_ctl_format(u32 pixel_format)
case DRM_FORMAT_XRGB8888:
case DRM_FORMAT_ARGB8888:
return PLANE_CTL_FORMAT_XRGB_8888;
+ case DRM_FORMAT_XBGR2101010:
+ return PLANE_CTL_FORMAT_XRGB_2101010 | PLANE_CTL_ORDER_RGBX;
case DRM_FORMAT_XRGB2101010:
return PLANE_CTL_FORMAT_XRGB_2101010;
- case DRM_FORMAT_XBGR2101010:
- return PLANE_CTL_ORDER_RGBX | PLANE_CTL_FORMAT_XRGB_2101010;
case DRM_FORMAT_XBGR16161616F:
case DRM_FORMAT_ABGR16161616F:
return PLANE_CTL_FORMAT_XRGB_16161616F | PLANE_CTL_ORDER_RGBX;
@@ -4248,12 +4271,13 @@ void intel_prepare_reset(struct drm_i915_private *dev_priv)
return;
/* We have a modeset vs reset deadlock, defensively unbreak it. */
- set_bit(I915_RESET_MODESET, &dev_priv->gpu_error.flags);
- wake_up_all(&dev_priv->gpu_error.wait_queue);
+ set_bit(I915_RESET_MODESET, &dev_priv->gt.reset.flags);
+ smp_mb__after_atomic();
+ wake_up_bit(&dev_priv->gt.reset.flags, I915_RESET_MODESET);
if (atomic_read(&dev_priv->gpu_error.pending_fb_pin)) {
DRM_DEBUG_KMS("Modeset potentially stuck, unbreaking through wedging\n");
- i915_gem_set_wedged(dev_priv);
+ intel_gt_set_wedged(&dev_priv->gt);
}
/*
@@ -4299,7 +4323,7 @@ void intel_finish_reset(struct drm_i915_private *dev_priv)
int ret;
/* reset doesn't touch the display */
- if (!test_bit(I915_RESET_MODESET, &dev_priv->gpu_error.flags))
+ if (!test_bit(I915_RESET_MODESET, &dev_priv->gt.reset.flags))
return;
state = fetch_and_zero(&dev_priv->modeset_restore_state);
@@ -4339,7 +4363,7 @@ unlock:
drm_modeset_acquire_fini(ctx);
mutex_unlock(&dev->mode_config.mutex);
- clear_bit(I915_RESET_MODESET, &dev_priv->gpu_error.flags);
+ clear_bit_unlock(I915_RESET_MODESET, &dev_priv->gt.reset.flags);
}
static void icl_set_pipe_chicken(struct intel_crtc *crtc)
@@ -5796,7 +5820,7 @@ static bool hsw_pre_update_disable_ips(const struct intel_crtc_state *old_crtc_s
if (!old_crtc_state->ips_enabled)
return false;
- if (needs_modeset(&new_crtc_state->base))
+ if (needs_modeset(new_crtc_state))
return true;
/*
@@ -5823,7 +5847,7 @@ static bool hsw_post_update_enable_ips(const struct intel_crtc_state *old_crtc_s
if (!new_crtc_state->ips_enabled)
return false;
- if (needs_modeset(&new_crtc_state->base))
+ if (needs_modeset(new_crtc_state))
return true;
/*
@@ -5877,13 +5901,13 @@ static void intel_post_plane_update(struct intel_crtc_state *old_crtc_state)
struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
- struct drm_atomic_state *old_state = old_crtc_state->base.state;
+ struct drm_atomic_state *state = old_crtc_state->base.state;
struct intel_crtc_state *pipe_config =
- intel_atomic_get_new_crtc_state(to_intel_atomic_state(old_state),
+ intel_atomic_get_new_crtc_state(to_intel_atomic_state(state),
crtc);
struct drm_plane *primary = crtc->base.primary;
struct drm_plane_state *old_primary_state =
- drm_atomic_get_old_plane_state(old_state, primary);
+ drm_atomic_get_old_plane_state(state, primary);
intel_frontbuffer_flip(to_i915(crtc->base.dev), pipe_config->fb_bits);
@@ -5895,12 +5919,12 @@ static void intel_post_plane_update(struct intel_crtc_state *old_crtc_state)
if (old_primary_state) {
struct drm_plane_state *new_primary_state =
- drm_atomic_get_new_plane_state(old_state, primary);
+ drm_atomic_get_new_plane_state(state, primary);
intel_fbc_post_update(crtc);
if (new_primary_state->visible &&
- (needs_modeset(&pipe_config->base) ||
+ (needs_modeset(pipe_config) ||
!old_primary_state->visible))
intel_post_enable_primary(&crtc->base, pipe_config);
}
@@ -5920,20 +5944,20 @@ static void intel_pre_plane_update(struct intel_crtc_state *old_crtc_state,
struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
- struct drm_atomic_state *old_state = old_crtc_state->base.state;
+ struct drm_atomic_state *state = old_crtc_state->base.state;
struct drm_plane *primary = crtc->base.primary;
struct drm_plane_state *old_primary_state =
- drm_atomic_get_old_plane_state(old_state, primary);
- bool modeset = needs_modeset(&pipe_config->base);
- struct intel_atomic_state *old_intel_state =
- to_intel_atomic_state(old_state);
+ drm_atomic_get_old_plane_state(state, primary);
+ bool modeset = needs_modeset(pipe_config);
+ struct intel_atomic_state *intel_state =
+ to_intel_atomic_state(state);
if (hsw_pre_update_disable_ips(old_crtc_state, pipe_config))
hsw_disable_ips(old_crtc_state);
if (old_primary_state) {
struct intel_plane_state *new_primary_state =
- intel_atomic_get_new_plane_state(old_intel_state,
+ intel_atomic_get_new_plane_state(intel_state,
to_intel_plane(primary));
intel_fbc_pre_update(crtc, pipe_config, new_primary_state);
@@ -5984,7 +6008,7 @@ static void intel_pre_plane_update(struct intel_crtc_state *old_crtc_state,
* If we're doing a modeset, we're done. No need to do any pre-vblank
* watermark programming here.
*/
- if (needs_modeset(&pipe_config->base))
+ if (needs_modeset(pipe_config))
return;
/*
@@ -6002,7 +6026,7 @@ static void intel_pre_plane_update(struct intel_crtc_state *old_crtc_state,
* us to.
*/
if (dev_priv->display.initial_watermarks != NULL)
- dev_priv->display.initial_watermarks(old_intel_state,
+ dev_priv->display.initial_watermarks(intel_state,
pipe_config);
else if (pipe_config->update_wm_pre)
intel_update_watermarks(crtc);
@@ -6036,19 +6060,111 @@ static void intel_crtc_disable_planes(struct intel_atomic_state *state,
intel_frontbuffer_flip(dev_priv, fb_bits);
}
-static void intel_encoders_pre_pll_enable(struct drm_crtc *crtc,
+/*
+ * intel_connector_primary_encoder - get the primary encoder for a connector
+ * @connector: connector for which to return the encoder
+ *
+ * Returns the primary encoder for a connector. There is a 1:1 mapping from
+ * all connectors to their encoder, except for DP-MST connectors which have
+ * both a virtual and a primary encoder. These DP-MST primary encoders can be
+ * pointed to by as many DP-MST connectors as there are pipes.
+ */
+static struct intel_encoder *
+intel_connector_primary_encoder(struct intel_connector *connector)
+{
+ struct intel_encoder *encoder;
+
+ if (connector->mst_port)
+ return &dp_to_dig_port(connector->mst_port)->base;
+
+ encoder = intel_attached_encoder(&connector->base);
+ WARN_ON(!encoder);
+
+ return encoder;
+}
+
+static bool
+intel_connector_needs_modeset(struct intel_atomic_state *state,
+ const struct drm_connector_state *old_conn_state,
+ const struct drm_connector_state *new_conn_state)
+{
+ struct intel_crtc *old_crtc = old_conn_state->crtc ?
+ to_intel_crtc(old_conn_state->crtc) : NULL;
+ struct intel_crtc *new_crtc = new_conn_state->crtc ?
+ to_intel_crtc(new_conn_state->crtc) : NULL;
+
+ return new_crtc != old_crtc ||
+ (new_crtc &&
+ needs_modeset(intel_atomic_get_new_crtc_state(state, new_crtc)));
+}
+
+static void intel_encoders_update_prepare(struct intel_atomic_state *state)
+{
+ struct drm_connector_state *old_conn_state;
+ struct drm_connector_state *new_conn_state;
+ struct drm_connector *conn;
+ int i;
+
+ for_each_oldnew_connector_in_state(&state->base, conn,
+ old_conn_state, new_conn_state, i) {
+ struct intel_encoder *encoder;
+ struct intel_crtc *crtc;
+
+ if (!intel_connector_needs_modeset(state,
+ old_conn_state,
+ new_conn_state))
+ continue;
+
+ encoder = intel_connector_primary_encoder(to_intel_connector(conn));
+ if (!encoder->update_prepare)
+ continue;
+
+ crtc = new_conn_state->crtc ?
+ to_intel_crtc(new_conn_state->crtc) : NULL;
+ encoder->update_prepare(state, encoder, crtc);
+ }
+}
+
+static void intel_encoders_update_complete(struct intel_atomic_state *state)
+{
+ struct drm_connector_state *old_conn_state;
+ struct drm_connector_state *new_conn_state;
+ struct drm_connector *conn;
+ int i;
+
+ for_each_oldnew_connector_in_state(&state->base, conn,
+ old_conn_state, new_conn_state, i) {
+ struct intel_encoder *encoder;
+ struct intel_crtc *crtc;
+
+ if (!intel_connector_needs_modeset(state,
+ old_conn_state,
+ new_conn_state))
+ continue;
+
+ encoder = intel_connector_primary_encoder(to_intel_connector(conn));
+ if (!encoder->update_complete)
+ continue;
+
+ crtc = new_conn_state->crtc ?
+ to_intel_crtc(new_conn_state->crtc) : NULL;
+ encoder->update_complete(state, encoder, crtc);
+ }
+}
+
+static void intel_encoders_pre_pll_enable(struct intel_crtc *crtc,
struct intel_crtc_state *crtc_state,
- struct drm_atomic_state *old_state)
+ struct intel_atomic_state *state)
{
struct drm_connector_state *conn_state;
struct drm_connector *conn;
int i;
- for_each_new_connector_in_state(old_state, conn, conn_state, i) {
+ for_each_new_connector_in_state(&state->base, conn, conn_state, i) {
struct intel_encoder *encoder =
to_intel_encoder(conn_state->best_encoder);
- if (conn_state->crtc != crtc)
+ if (conn_state->crtc != &crtc->base)
continue;
if (encoder->pre_pll_enable)
@@ -6056,19 +6172,19 @@ static void intel_encoders_pre_pll_enable(struct drm_crtc *crtc,
}
}
-static void intel_encoders_pre_enable(struct drm_crtc *crtc,
+static void intel_encoders_pre_enable(struct intel_crtc *crtc,
struct intel_crtc_state *crtc_state,
- struct drm_atomic_state *old_state)
+ struct intel_atomic_state *state)
{
struct drm_connector_state *conn_state;
struct drm_connector *conn;
int i;
- for_each_new_connector_in_state(old_state, conn, conn_state, i) {
+ for_each_new_connector_in_state(&state->base, conn, conn_state, i) {
struct intel_encoder *encoder =
to_intel_encoder(conn_state->best_encoder);
- if (conn_state->crtc != crtc)
+ if (conn_state->crtc != &crtc->base)
continue;
if (encoder->pre_enable)
@@ -6076,19 +6192,19 @@ static void intel_encoders_pre_enable(struct drm_crtc *crtc,
}
}
-static void intel_encoders_enable(struct drm_crtc *crtc,
+static void intel_encoders_enable(struct intel_crtc *crtc,
struct intel_crtc_state *crtc_state,
- struct drm_atomic_state *old_state)
+ struct intel_atomic_state *state)
{
struct drm_connector_state *conn_state;
struct drm_connector *conn;
int i;
- for_each_new_connector_in_state(old_state, conn, conn_state, i) {
+ for_each_new_connector_in_state(&state->base, conn, conn_state, i) {
struct intel_encoder *encoder =
to_intel_encoder(conn_state->best_encoder);
- if (conn_state->crtc != crtc)
+ if (conn_state->crtc != &crtc->base)
continue;
if (encoder->enable)
@@ -6097,19 +6213,19 @@ static void intel_encoders_enable(struct drm_crtc *crtc,
}
}
-static void intel_encoders_disable(struct drm_crtc *crtc,
+static void intel_encoders_disable(struct intel_crtc *crtc,
struct intel_crtc_state *old_crtc_state,
- struct drm_atomic_state *old_state)
+ struct intel_atomic_state *state)
{
struct drm_connector_state *old_conn_state;
struct drm_connector *conn;
int i;
- for_each_old_connector_in_state(old_state, conn, old_conn_state, i) {
+ for_each_old_connector_in_state(&state->base, conn, old_conn_state, i) {
struct intel_encoder *encoder =
to_intel_encoder(old_conn_state->best_encoder);
- if (old_conn_state->crtc != crtc)
+ if (old_conn_state->crtc != &crtc->base)
continue;
intel_opregion_notify_encoder(encoder, false);
@@ -6118,19 +6234,19 @@ static void intel_encoders_disable(struct drm_crtc *crtc,
}
}
-static void intel_encoders_post_disable(struct drm_crtc *crtc,
+static void intel_encoders_post_disable(struct intel_crtc *crtc,
struct intel_crtc_state *old_crtc_state,
- struct drm_atomic_state *old_state)
+ struct intel_atomic_state *state)
{
struct drm_connector_state *old_conn_state;
struct drm_connector *conn;
int i;
- for_each_old_connector_in_state(old_state, conn, old_conn_state, i) {
+ for_each_old_connector_in_state(&state->base, conn, old_conn_state, i) {
struct intel_encoder *encoder =
to_intel_encoder(old_conn_state->best_encoder);
- if (old_conn_state->crtc != crtc)
+ if (old_conn_state->crtc != &crtc->base)
continue;
if (encoder->post_disable)
@@ -6138,19 +6254,19 @@ static void intel_encoders_post_disable(struct drm_crtc *crtc,
}
}
-static void intel_encoders_post_pll_disable(struct drm_crtc *crtc,
+static void intel_encoders_post_pll_disable(struct intel_crtc *crtc,
struct intel_crtc_state *old_crtc_state,
- struct drm_atomic_state *old_state)
+ struct intel_atomic_state *state)
{
struct drm_connector_state *old_conn_state;
struct drm_connector *conn;
int i;
- for_each_old_connector_in_state(old_state, conn, old_conn_state, i) {
+ for_each_old_connector_in_state(&state->base, conn, old_conn_state, i) {
struct intel_encoder *encoder =
to_intel_encoder(old_conn_state->best_encoder);
- if (old_conn_state->crtc != crtc)
+ if (old_conn_state->crtc != &crtc->base)
continue;
if (encoder->post_pll_disable)
@@ -6158,19 +6274,19 @@ static void intel_encoders_post_pll_disable(struct drm_crtc *crtc,
}
}
-static void intel_encoders_update_pipe(struct drm_crtc *crtc,
+static void intel_encoders_update_pipe(struct intel_crtc *crtc,
struct intel_crtc_state *crtc_state,
- struct drm_atomic_state *old_state)
+ struct intel_atomic_state *state)
{
struct drm_connector_state *conn_state;
struct drm_connector *conn;
int i;
- for_each_new_connector_in_state(old_state, conn, conn_state, i) {
+ for_each_new_connector_in_state(&state->base, conn, conn_state, i) {
struct intel_encoder *encoder =
to_intel_encoder(conn_state->best_encoder);
- if (conn_state->crtc != crtc)
+ if (conn_state->crtc != &crtc->base)
continue;
if (encoder->update_pipe)
@@ -6187,15 +6303,13 @@ static void intel_disable_primary_plane(const struct intel_crtc_state *crtc_stat
}
static void ironlake_crtc_enable(struct intel_crtc_state *pipe_config,
- struct drm_atomic_state *old_state)
+ struct intel_atomic_state *state)
{
struct drm_crtc *crtc = pipe_config->base.crtc;
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int pipe = intel_crtc->pipe;
- struct intel_atomic_state *old_intel_state =
- to_intel_atomic_state(old_state);
if (WARN_ON(intel_crtc->active))
return;
@@ -6231,7 +6345,7 @@ static void ironlake_crtc_enable(struct intel_crtc_state *pipe_config,
intel_crtc->active = true;
- intel_encoders_pre_enable(crtc, pipe_config, old_state);
+ intel_encoders_pre_enable(intel_crtc, pipe_config, state);
if (pipe_config->has_pch_encoder) {
/* Note: FDI PLL enabling _must_ be done before we enable the
@@ -6255,16 +6369,16 @@ static void ironlake_crtc_enable(struct intel_crtc_state *pipe_config,
intel_disable_primary_plane(pipe_config);
if (dev_priv->display.initial_watermarks != NULL)
- dev_priv->display.initial_watermarks(old_intel_state, pipe_config);
+ dev_priv->display.initial_watermarks(state, pipe_config);
intel_enable_pipe(pipe_config);
if (pipe_config->has_pch_encoder)
- ironlake_pch_enable(old_intel_state, pipe_config);
+ ironlake_pch_enable(state, pipe_config);
assert_vblank_disabled(crtc);
intel_crtc_vblank_on(pipe_config);
- intel_encoders_enable(crtc, pipe_config, old_state);
+ intel_encoders_enable(intel_crtc, pipe_config, state);
if (HAS_PCH_CPT(dev_priv))
cpt_verify_modeset(dev, intel_crtc->pipe);
@@ -6310,33 +6424,37 @@ static void icl_pipe_mbus_enable(struct intel_crtc *crtc)
u32 val;
val = MBUS_DBOX_A_CREDIT(2);
- val |= MBUS_DBOX_BW_CREDIT(1);
- val |= MBUS_DBOX_B_CREDIT(8);
+
+ if (INTEL_GEN(dev_priv) >= 12) {
+ val |= MBUS_DBOX_BW_CREDIT(2);
+ val |= MBUS_DBOX_B_CREDIT(12);
+ } else {
+ val |= MBUS_DBOX_BW_CREDIT(1);
+ val |= MBUS_DBOX_B_CREDIT(8);
+ }
I915_WRITE(PIPE_MBUS_DBOX_CTL(pipe), val);
}
static void haswell_crtc_enable(struct intel_crtc_state *pipe_config,
- struct drm_atomic_state *old_state)
+ struct intel_atomic_state *state)
{
struct drm_crtc *crtc = pipe_config->base.crtc;
struct drm_i915_private *dev_priv = to_i915(crtc->dev);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int pipe = intel_crtc->pipe, hsw_workaround_pipe;
enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
- struct intel_atomic_state *old_intel_state =
- to_intel_atomic_state(old_state);
bool psl_clkgate_wa;
if (WARN_ON(intel_crtc->active))
return;
- intel_encoders_pre_pll_enable(crtc, pipe_config, old_state);
+ intel_encoders_pre_pll_enable(intel_crtc, pipe_config, state);
if (pipe_config->shared_dpll)
intel_enable_shared_dpll(pipe_config);
- intel_encoders_pre_enable(crtc, pipe_config, old_state);
+ intel_encoders_pre_enable(intel_crtc, pipe_config, state);
if (intel_crtc_has_dp_encoder(pipe_config))
intel_dp_set_m_n(pipe_config, M1_N1);
@@ -6394,7 +6512,7 @@ static void haswell_crtc_enable(struct intel_crtc_state *pipe_config,
intel_ddi_enable_transcoder_func(pipe_config);
if (dev_priv->display.initial_watermarks != NULL)
- dev_priv->display.initial_watermarks(old_intel_state, pipe_config);
+ dev_priv->display.initial_watermarks(state, pipe_config);
if (INTEL_GEN(dev_priv) >= 11)
icl_pipe_mbus_enable(intel_crtc);
@@ -6404,7 +6522,7 @@ static void haswell_crtc_enable(struct intel_crtc_state *pipe_config,
intel_enable_pipe(pipe_config);
if (pipe_config->has_pch_encoder)
- lpt_pch_enable(old_intel_state, pipe_config);
+ lpt_pch_enable(state, pipe_config);
if (intel_crtc_has_type(pipe_config, INTEL_OUTPUT_DP_MST))
intel_ddi_set_vc_payload_alloc(pipe_config, true);
@@ -6412,7 +6530,7 @@ static void haswell_crtc_enable(struct intel_crtc_state *pipe_config,
assert_vblank_disabled(crtc);
intel_crtc_vblank_on(pipe_config);
- intel_encoders_enable(crtc, pipe_config, old_state);
+ intel_encoders_enable(intel_crtc, pipe_config, state);
if (psl_clkgate_wa) {
intel_wait_for_vblank(dev_priv, pipe);
@@ -6444,7 +6562,7 @@ static void ironlake_pfit_disable(const struct intel_crtc_state *old_crtc_state)
}
static void ironlake_crtc_disable(struct intel_crtc_state *old_crtc_state,
- struct drm_atomic_state *old_state)
+ struct intel_atomic_state *state)
{
struct drm_crtc *crtc = old_crtc_state->base.crtc;
struct drm_device *dev = crtc->dev;
@@ -6460,7 +6578,7 @@ static void ironlake_crtc_disable(struct intel_crtc_state *old_crtc_state,
intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false);
- intel_encoders_disable(crtc, old_crtc_state, old_state);
+ intel_encoders_disable(intel_crtc, old_crtc_state, state);
drm_crtc_vblank_off(crtc);
assert_vblank_disabled(crtc);
@@ -6472,7 +6590,7 @@ static void ironlake_crtc_disable(struct intel_crtc_state *old_crtc_state,
if (old_crtc_state->has_pch_encoder)
ironlake_fdi_disable(crtc);
- intel_encoders_post_disable(crtc, old_crtc_state, old_state);
+ intel_encoders_post_disable(intel_crtc, old_crtc_state, state);
if (old_crtc_state->has_pch_encoder) {
ironlake_disable_pch_transcoder(dev_priv, pipe);
@@ -6503,14 +6621,14 @@ static void ironlake_crtc_disable(struct intel_crtc_state *old_crtc_state,
}
static void haswell_crtc_disable(struct intel_crtc_state *old_crtc_state,
- struct drm_atomic_state *old_state)
+ struct intel_atomic_state *state)
{
struct drm_crtc *crtc = old_crtc_state->base.crtc;
struct drm_i915_private *dev_priv = to_i915(crtc->dev);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder;
- intel_encoders_disable(crtc, old_crtc_state, old_state);
+ intel_encoders_disable(intel_crtc, old_crtc_state, state);
drm_crtc_vblank_off(crtc);
assert_vblank_disabled(crtc);
@@ -6532,9 +6650,9 @@ static void haswell_crtc_disable(struct intel_crtc_state *old_crtc_state,
else
ironlake_pfit_disable(old_crtc_state);
- intel_encoders_post_disable(crtc, old_crtc_state, old_state);
+ intel_encoders_post_disable(intel_crtc, old_crtc_state, state);
- intel_encoders_post_pll_disable(crtc, old_crtc_state, old_state);
+ intel_encoders_post_pll_disable(intel_crtc, old_crtc_state, state);
}
static void i9xx_pfit_enable(const struct intel_crtc_state *crtc_state)
@@ -6560,33 +6678,47 @@ static void i9xx_pfit_enable(const struct intel_crtc_state *crtc_state)
I915_WRITE(BCLRPAT(crtc->pipe), 0);
}
-bool intel_port_is_combophy(struct drm_i915_private *dev_priv, enum port port)
+bool intel_phy_is_combo(struct drm_i915_private *dev_priv, enum phy phy)
{
- if (port == PORT_NONE)
+ if (phy == PHY_NONE)
return false;
- if (IS_ELKHARTLAKE(dev_priv))
- return port <= PORT_C;
+ if (IS_ELKHARTLAKE(dev_priv) || INTEL_GEN(dev_priv) >= 12)
+ return phy <= PHY_C;
if (INTEL_GEN(dev_priv) >= 11)
- return port <= PORT_B;
+ return phy <= PHY_B;
return false;
}
-bool intel_port_is_tc(struct drm_i915_private *dev_priv, enum port port)
+bool intel_phy_is_tc(struct drm_i915_private *dev_priv, enum phy phy)
{
+ if (INTEL_GEN(dev_priv) >= 12)
+ return phy >= PHY_D && phy <= PHY_I;
+
if (INTEL_GEN(dev_priv) >= 11 && !IS_ELKHARTLAKE(dev_priv))
- return port >= PORT_C && port <= PORT_F;
+ return phy >= PHY_C && phy <= PHY_F;
return false;
}
+enum phy intel_port_to_phy(struct drm_i915_private *i915, enum port port)
+{
+ if (IS_ELKHARTLAKE(i915) && port == PORT_D)
+ return PHY_A;
+
+ return (enum phy)port;
+}
+
enum tc_port intel_port_to_tc(struct drm_i915_private *dev_priv, enum port port)
{
- if (!intel_port_is_tc(dev_priv, port))
+ if (!intel_phy_is_tc(dev_priv, intel_port_to_phy(dev_priv, port)))
return PORT_TC_NONE;
+ if (INTEL_GEN(dev_priv) >= 12)
+ return port - PORT_D;
+
return port - PORT_C;
}
@@ -6614,6 +6746,26 @@ enum intel_display_power_domain intel_port_to_power_domain(enum port port)
enum intel_display_power_domain
intel_aux_power_domain(struct intel_digital_port *dig_port)
{
+ struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
+ enum phy phy = intel_port_to_phy(dev_priv, dig_port->base.port);
+
+ if (intel_phy_is_tc(dev_priv, phy) &&
+ dig_port->tc_mode == TC_PORT_TBT_ALT) {
+ switch (dig_port->aux_ch) {
+ case AUX_CH_C:
+ return POWER_DOMAIN_AUX_TBT1;
+ case AUX_CH_D:
+ return POWER_DOMAIN_AUX_TBT2;
+ case AUX_CH_E:
+ return POWER_DOMAIN_AUX_TBT3;
+ case AUX_CH_F:
+ return POWER_DOMAIN_AUX_TBT4;
+ default:
+ MISSING_CASE(dig_port->aux_ch);
+ return POWER_DOMAIN_AUX_TBT1;
+ }
+ }
+
switch (dig_port->aux_ch) {
case AUX_CH_A:
return POWER_DOMAIN_AUX_A;
@@ -6633,14 +6785,12 @@ intel_aux_power_domain(struct intel_digital_port *dig_port)
}
}
-static u64 get_crtc_power_domains(struct drm_crtc *crtc,
- struct intel_crtc_state *crtc_state)
+static u64 get_crtc_power_domains(struct intel_crtc_state *crtc_state)
{
- struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
struct drm_encoder *encoder;
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- enum pipe pipe = intel_crtc->pipe;
+ enum pipe pipe = crtc->pipe;
u64 mask;
enum transcoder transcoder = crtc_state->cpu_transcoder;
@@ -6653,7 +6803,8 @@ static u64 get_crtc_power_domains(struct drm_crtc *crtc,
crtc_state->pch_pfit.force_thru)
mask |= BIT_ULL(POWER_DOMAIN_PIPE_PANEL_FITTER(pipe));
- drm_for_each_encoder_mask(encoder, dev, crtc_state->base.encoder_mask) {
+ drm_for_each_encoder_mask(encoder, &dev_priv->drm,
+ crtc_state->base.encoder_mask) {
struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
mask |= BIT_ULL(intel_encoder->power_domain);
@@ -6669,17 +6820,16 @@ static u64 get_crtc_power_domains(struct drm_crtc *crtc,
}
static u64
-modeset_get_crtc_power_domains(struct drm_crtc *crtc,
- struct intel_crtc_state *crtc_state)
+modeset_get_crtc_power_domains(struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *dev_priv = to_i915(crtc->dev);
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum intel_display_power_domain domain;
u64 domains, new_domains, old_domains;
- old_domains = intel_crtc->enabled_power_domains;
- intel_crtc->enabled_power_domains = new_domains =
- get_crtc_power_domains(crtc, crtc_state);
+ old_domains = crtc->enabled_power_domains;
+ crtc->enabled_power_domains = new_domains =
+ get_crtc_power_domains(crtc_state);
domains = new_domains & ~old_domains;
@@ -6699,10 +6849,8 @@ static void modeset_put_power_domains(struct drm_i915_private *dev_priv,
}
static void valleyview_crtc_enable(struct intel_crtc_state *pipe_config,
- struct drm_atomic_state *old_state)
+ struct intel_atomic_state *state)
{
- struct intel_atomic_state *old_intel_state =
- to_intel_atomic_state(old_state);
struct drm_crtc *crtc = pipe_config->base.crtc;
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = to_i915(dev);
@@ -6729,7 +6877,7 @@ static void valleyview_crtc_enable(struct intel_crtc_state *pipe_config,
intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
- intel_encoders_pre_pll_enable(crtc, pipe_config, old_state);
+ intel_encoders_pre_pll_enable(intel_crtc, pipe_config, state);
if (IS_CHERRYVIEW(dev_priv)) {
chv_prepare_pll(intel_crtc, pipe_config);
@@ -6739,7 +6887,7 @@ static void valleyview_crtc_enable(struct intel_crtc_state *pipe_config,
vlv_enable_pll(intel_crtc, pipe_config);
}
- intel_encoders_pre_enable(crtc, pipe_config, old_state);
+ intel_encoders_pre_enable(intel_crtc, pipe_config, state);
i9xx_pfit_enable(pipe_config);
@@ -6748,14 +6896,13 @@ static void valleyview_crtc_enable(struct intel_crtc_state *pipe_config,
/* update DSPCNTR to configure gamma for pipe bottom color */
intel_disable_primary_plane(pipe_config);
- dev_priv->display.initial_watermarks(old_intel_state,
- pipe_config);
+ dev_priv->display.initial_watermarks(state, pipe_config);
intel_enable_pipe(pipe_config);
assert_vblank_disabled(crtc);
intel_crtc_vblank_on(pipe_config);
- intel_encoders_enable(crtc, pipe_config, old_state);
+ intel_encoders_enable(intel_crtc, pipe_config, state);
}
static void i9xx_set_pll_dividers(const struct intel_crtc_state *crtc_state)
@@ -6768,10 +6915,8 @@ static void i9xx_set_pll_dividers(const struct intel_crtc_state *crtc_state)
}
static void i9xx_crtc_enable(struct intel_crtc_state *pipe_config,
- struct drm_atomic_state *old_state)
+ struct intel_atomic_state *state)
{
- struct intel_atomic_state *old_intel_state =
- to_intel_atomic_state(old_state);
struct drm_crtc *crtc = pipe_config->base.crtc;
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = to_i915(dev);
@@ -6796,7 +6941,7 @@ static void i9xx_crtc_enable(struct intel_crtc_state *pipe_config,
if (!IS_GEN(dev_priv, 2))
intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
- intel_encoders_pre_enable(crtc, pipe_config, old_state);
+ intel_encoders_pre_enable(intel_crtc, pipe_config, state);
i9xx_enable_pll(intel_crtc, pipe_config);
@@ -6808,7 +6953,7 @@ static void i9xx_crtc_enable(struct intel_crtc_state *pipe_config,
intel_disable_primary_plane(pipe_config);
if (dev_priv->display.initial_watermarks != NULL)
- dev_priv->display.initial_watermarks(old_intel_state,
+ dev_priv->display.initial_watermarks(state,
pipe_config);
else
intel_update_watermarks(intel_crtc);
@@ -6817,7 +6962,7 @@ static void i9xx_crtc_enable(struct intel_crtc_state *pipe_config,
assert_vblank_disabled(crtc);
intel_crtc_vblank_on(pipe_config);
- intel_encoders_enable(crtc, pipe_config, old_state);
+ intel_encoders_enable(intel_crtc, pipe_config, state);
}
static void i9xx_pfit_disable(const struct intel_crtc_state *old_crtc_state)
@@ -6836,7 +6981,7 @@ static void i9xx_pfit_disable(const struct intel_crtc_state *old_crtc_state)
}
static void i9xx_crtc_disable(struct intel_crtc_state *old_crtc_state,
- struct drm_atomic_state *old_state)
+ struct intel_atomic_state *state)
{
struct drm_crtc *crtc = old_crtc_state->base.crtc;
struct drm_device *dev = crtc->dev;
@@ -6851,7 +6996,7 @@ static void i9xx_crtc_disable(struct intel_crtc_state *old_crtc_state,
if (IS_GEN(dev_priv, 2))
intel_wait_for_vblank(dev_priv, pipe);
- intel_encoders_disable(crtc, old_crtc_state, old_state);
+ intel_encoders_disable(intel_crtc, old_crtc_state, state);
drm_crtc_vblank_off(crtc);
assert_vblank_disabled(crtc);
@@ -6860,7 +7005,7 @@ static void i9xx_crtc_disable(struct intel_crtc_state *old_crtc_state,
i9xx_pfit_disable(old_crtc_state);
- intel_encoders_post_disable(crtc, old_crtc_state, old_state);
+ intel_encoders_post_disable(intel_crtc, old_crtc_state, state);
if (!intel_crtc_has_type(old_crtc_state, INTEL_OUTPUT_DSI)) {
if (IS_CHERRYVIEW(dev_priv))
@@ -6871,7 +7016,7 @@ static void i9xx_crtc_disable(struct intel_crtc_state *old_crtc_state,
i9xx_disable_pll(old_crtc_state);
}
- intel_encoders_post_pll_disable(crtc, old_crtc_state, old_state);
+ intel_encoders_post_pll_disable(intel_crtc, old_crtc_state, state);
if (!IS_GEN(dev_priv, 2))
intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
@@ -6925,7 +7070,7 @@ static void intel_crtc_disable_noatomic(struct drm_crtc *crtc,
WARN_ON(IS_ERR(crtc_state) || ret);
- dev_priv->display.crtc_disable(crtc_state, state);
+ dev_priv->display.crtc_disable(crtc_state, to_intel_atomic_state(state));
drm_atomic_state_put(state);
@@ -6988,7 +7133,7 @@ void intel_encoder_destroy(struct drm_encoder *encoder)
/* Cross check the actual hw state with our own modeset state tracking (and it's
* internal consistency). */
-static void intel_connector_verify_state(struct drm_crtc_state *crtc_state,
+static void intel_connector_verify_state(struct intel_crtc_state *crtc_state,
struct drm_connector_state *conn_state)
{
struct intel_connector *connector = to_intel_connector(conn_state->connector);
@@ -7006,7 +7151,7 @@ static void intel_connector_verify_state(struct drm_crtc_state *crtc_state,
if (!crtc_state)
return;
- I915_STATE_WARN(!crtc_state->active,
+ I915_STATE_WARN(!crtc_state->base.active,
"connector is active, but attached crtc isn't\n");
if (!encoder || encoder->type == INTEL_OUTPUT_DP_MST)
@@ -7018,7 +7163,7 @@ static void intel_connector_verify_state(struct drm_crtc_state *crtc_state,
I915_STATE_WARN(conn_state->crtc != encoder->base.crtc,
"attached encoder crtc differs from connector crtc\n");
} else {
- I915_STATE_WARN(crtc_state && crtc_state->active,
+ I915_STATE_WARN(crtc_state && crtc_state->base.active,
"attached crtc is active, but connector isn't\n");
I915_STATE_WARN(!crtc_state && conn_state->best_encoder,
"best encoder set without crtc!\n");
@@ -9484,6 +9629,8 @@ static int ironlake_crtc_compute_clock(struct intel_crtc *crtc,
struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_atomic_state *state =
+ to_intel_atomic_state(crtc_state->base.state);
const struct intel_limit *limit;
int refclk = 120000;
@@ -9525,7 +9672,7 @@ static int ironlake_crtc_compute_clock(struct intel_crtc *crtc,
ironlake_compute_dpll(crtc, crtc_state, NULL);
- if (!intel_get_shared_dpll(crtc_state, NULL)) {
+ if (!intel_reserve_shared_dplls(state, crtc, NULL)) {
DRM_DEBUG_KMS("failed to find PLL for pipe %c\n",
pipe_name(crtc->pipe));
return -EINVAL;
@@ -9906,7 +10053,7 @@ static int haswell_crtc_compute_clock(struct intel_crtc *crtc,
struct intel_encoder *encoder =
intel_get_crtc_new_encoder(state, crtc_state);
- if (!intel_get_shared_dpll(crtc_state, encoder)) {
+ if (!intel_reserve_shared_dplls(state, crtc, encoder)) {
DRM_DEBUG_KMS("failed to find PLL for pipe %c\n",
pipe_name(crtc->pipe));
return -EINVAL;
@@ -9936,22 +10083,37 @@ static void icelake_get_ddi_pll(struct drm_i915_private *dev_priv,
enum port port,
struct intel_crtc_state *pipe_config)
{
+ enum phy phy = intel_port_to_phy(dev_priv, port);
+ enum icl_port_dpll_id port_dpll_id;
enum intel_dpll_id id;
u32 temp;
- /* TODO: TBT pll not implemented. */
- if (intel_port_is_combophy(dev_priv, port)) {
- temp = I915_READ(DPCLKA_CFGCR0_ICL) &
- DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(port);
- id = temp >> DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(port);
- } else if (intel_port_is_tc(dev_priv, port)) {
- id = icl_tc_port_to_pll_id(intel_port_to_tc(dev_priv, port));
+ if (intel_phy_is_combo(dev_priv, phy)) {
+ temp = I915_READ(ICL_DPCLKA_CFGCR0) &
+ ICL_DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(phy);
+ id = temp >> ICL_DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(phy);
+ port_dpll_id = ICL_PORT_DPLL_DEFAULT;
+ } else if (intel_phy_is_tc(dev_priv, phy)) {
+ u32 clk_sel = I915_READ(DDI_CLK_SEL(port)) & DDI_CLK_SEL_MASK;
+
+ if (clk_sel == DDI_CLK_SEL_MG) {
+ id = icl_tc_port_to_pll_id(intel_port_to_tc(dev_priv,
+ port));
+ port_dpll_id = ICL_PORT_DPLL_MG_PHY;
+ } else {
+ WARN_ON(clk_sel < DDI_CLK_SEL_TBT_162);
+ id = DPLL_ID_ICL_TBTPLL;
+ port_dpll_id = ICL_PORT_DPLL_DEFAULT;
+ }
} else {
WARN(1, "Invalid port %x\n", port);
return;
}
- pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id);
+ pipe_config->icl_port_dplls[port_dpll_id].pll =
+ intel_get_shared_dpll_by_id(dev_priv, id);
+
+ icl_set_active_port_dpll(pipe_config, port_dpll_id);
}
static void bxt_get_ddi_pll(struct drm_i915_private *dev_priv,
@@ -10191,7 +10353,11 @@ static void haswell_get_ddi_port_state(struct intel_crtc *crtc,
tmp = I915_READ(TRANS_DDI_FUNC_CTL(pipe_config->cpu_transcoder));
- port = (tmp & TRANS_DDI_PORT_MASK) >> TRANS_DDI_PORT_SHIFT;
+ if (INTEL_GEN(dev_priv) >= 12)
+ port = (tmp & TGL_TRANS_DDI_PORT_MASK) >>
+ TGL_TRANS_DDI_PORT_SHIFT;
+ else
+ port = (tmp & TRANS_DDI_PORT_MASK) >> TRANS_DDI_PORT_SHIFT;
if (INTEL_GEN(dev_priv) >= 11)
icelake_get_ddi_pll(dev_priv, port, pipe_config);
@@ -11297,7 +11463,7 @@ static void intel_crtc_destroy(struct drm_crtc *crtc)
*
* Returns true or false.
*/
-static bool intel_wm_need_update(struct intel_plane_state *cur,
+static bool intel_wm_need_update(const struct intel_plane_state *cur,
struct intel_plane_state *new)
{
/* Update watermarks on tiling or size changes. */
@@ -11329,33 +11495,28 @@ static bool needs_scaling(const struct intel_plane_state *state)
}
int intel_plane_atomic_calc_changes(const struct intel_crtc_state *old_crtc_state,
- struct drm_crtc_state *crtc_state,
+ struct intel_crtc_state *crtc_state,
const struct intel_plane_state *old_plane_state,
- struct drm_plane_state *plane_state)
+ struct intel_plane_state *plane_state)
{
- struct intel_crtc_state *pipe_config = to_intel_crtc_state(crtc_state);
- struct drm_crtc *crtc = crtc_state->crtc;
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- struct intel_plane *plane = to_intel_plane(plane_state->plane);
- struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
bool mode_changed = needs_modeset(crtc_state);
bool was_crtc_enabled = old_crtc_state->base.active;
- bool is_crtc_enabled = crtc_state->active;
+ bool is_crtc_enabled = crtc_state->base.active;
bool turn_off, turn_on, visible, was_visible;
- struct drm_framebuffer *fb = plane_state->fb;
+ struct drm_framebuffer *fb = plane_state->base.fb;
int ret;
if (INTEL_GEN(dev_priv) >= 9 && plane->id != PLANE_CURSOR) {
- ret = skl_update_scaler_plane(
- to_intel_crtc_state(crtc_state),
- to_intel_plane_state(plane_state));
+ ret = skl_update_scaler_plane(crtc_state, plane_state);
if (ret)
return ret;
}
was_visible = old_plane_state->base.visible;
- visible = plane_state->visible;
+ visible = plane_state->base.visible;
if (!was_crtc_enabled && WARN_ON(was_visible))
was_visible = false;
@@ -11371,22 +11532,22 @@ int intel_plane_atomic_calc_changes(const struct intel_crtc_state *old_crtc_stat
* only combine the results from all planes in the current place?
*/
if (!is_crtc_enabled) {
- plane_state->visible = visible = false;
- to_intel_crtc_state(crtc_state)->active_planes &= ~BIT(plane->id);
- to_intel_crtc_state(crtc_state)->data_rate[plane->id] = 0;
+ plane_state->base.visible = visible = false;
+ crtc_state->active_planes &= ~BIT(plane->id);
+ crtc_state->data_rate[plane->id] = 0;
}
if (!was_visible && !visible)
return 0;
if (fb != old_plane_state->base.fb)
- pipe_config->fb_changed = true;
+ crtc_state->fb_changed = true;
turn_off = was_visible && (!visible || mode_changed);
turn_on = visible && (!was_visible || mode_changed);
DRM_DEBUG_ATOMIC("[CRTC:%d:%s] has [PLANE:%d:%s] with fb %i\n",
- intel_crtc->base.base.id, intel_crtc->base.name,
+ crtc->base.base.id, crtc->base.name,
plane->base.base.id, plane->base.name,
fb ? fb->base.id : -1);
@@ -11397,29 +11558,28 @@ int intel_plane_atomic_calc_changes(const struct intel_crtc_state *old_crtc_stat
if (turn_on) {
if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv))
- pipe_config->update_wm_pre = true;
+ crtc_state->update_wm_pre = true;
/* must disable cxsr around plane enable/disable */
if (plane->id != PLANE_CURSOR)
- pipe_config->disable_cxsr = true;
+ crtc_state->disable_cxsr = true;
} else if (turn_off) {
if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv))
- pipe_config->update_wm_post = true;
+ crtc_state->update_wm_post = true;
/* must disable cxsr around plane enable/disable */
if (plane->id != PLANE_CURSOR)
- pipe_config->disable_cxsr = true;
- } else if (intel_wm_need_update(to_intel_plane_state(plane->base.state),
- to_intel_plane_state(plane_state))) {
+ crtc_state->disable_cxsr = true;
+ } else if (intel_wm_need_update(old_plane_state, plane_state)) {
if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv)) {
/* FIXME bollocks */
- pipe_config->update_wm_pre = true;
- pipe_config->update_wm_post = true;
+ crtc_state->update_wm_pre = true;
+ crtc_state->update_wm_post = true;
}
}
if (visible || was_visible)
- pipe_config->fb_bits |= plane->frontbuffer_bit;
+ crtc_state->fb_bits |= plane->frontbuffer_bit;
/*
* ILK/SNB DVSACNTR/Sprite Enable
@@ -11458,8 +11618,8 @@ int intel_plane_atomic_calc_changes(const struct intel_crtc_state *old_crtc_stat
(IS_GEN_RANGE(dev_priv, 5, 6) ||
IS_IVYBRIDGE(dev_priv)) &&
(turn_on || (!needs_scaling(old_plane_state) &&
- needs_scaling(to_intel_plane_state(plane_state)))))
- pipe_config->disable_lp_wm = true;
+ needs_scaling(plane_state))))
+ crtc_state->disable_lp_wm = true;
return 0;
}
@@ -11608,7 +11768,7 @@ static int intel_crtc_atomic_check(struct drm_crtc *crtc,
struct intel_crtc_state *pipe_config =
to_intel_crtc_state(crtc_state);
int ret;
- bool mode_changed = needs_modeset(crtc_state);
+ bool mode_changed = needs_modeset(pipe_config);
if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv) &&
mode_changed && !crtc_state->active)
@@ -12090,6 +12250,8 @@ clear_intel_crtc_state(struct intel_crtc_state *crtc_state)
saved_state->scaler_state = crtc_state->scaler_state;
saved_state->shared_dpll = crtc_state->shared_dpll;
saved_state->dpll_hw_state = crtc_state->dpll_hw_state;
+ memcpy(saved_state->icl_port_dplls, crtc_state->icl_port_dplls,
+ sizeof(saved_state->icl_port_dplls));
saved_state->crc_enabled = crtc_state->crc_enabled;
if (IS_G4X(dev_priv) ||
IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
@@ -12706,10 +12868,10 @@ static void intel_pipe_config_sanity_check(struct drm_i915_private *dev_priv,
}
}
-static void verify_wm_state(struct drm_crtc *crtc,
- struct drm_crtc_state *new_state)
+static void verify_wm_state(struct intel_crtc *crtc,
+ struct intel_crtc_state *new_crtc_state)
{
- struct drm_i915_private *dev_priv = to_i915(crtc->dev);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
struct skl_hw_state {
struct skl_ddb_entry ddb_y[I915_MAX_PLANES];
struct skl_ddb_entry ddb_uv[I915_MAX_PLANES];
@@ -12719,21 +12881,20 @@ static void verify_wm_state(struct drm_crtc *crtc,
struct skl_ddb_allocation *sw_ddb;
struct skl_pipe_wm *sw_wm;
struct skl_ddb_entry *hw_ddb_entry, *sw_ddb_entry;
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- const enum pipe pipe = intel_crtc->pipe;
+ const enum pipe pipe = crtc->pipe;
int plane, level, max_level = ilk_wm_max_level(dev_priv);
- if (INTEL_GEN(dev_priv) < 9 || !new_state->active)
+ if (INTEL_GEN(dev_priv) < 9 || !new_crtc_state->base.active)
return;
hw = kzalloc(sizeof(*hw), GFP_KERNEL);
if (!hw)
return;
- skl_pipe_wm_get_hw_state(intel_crtc, &hw->wm);
- sw_wm = &to_intel_crtc_state(new_state)->wm.skl.optimal;
+ skl_pipe_wm_get_hw_state(crtc, &hw->wm);
+ sw_wm = &new_crtc_state->wm.skl.optimal;
- skl_pipe_ddb_get_hw_state(intel_crtc, hw->ddb_y, hw->ddb_uv);
+ skl_pipe_ddb_get_hw_state(crtc, hw->ddb_y, hw->ddb_uv);
skl_ddb_get_hw_state(dev_priv, &hw->ddb);
sw_ddb = &dev_priv->wm.skl_hw.ddb;
@@ -12781,7 +12942,7 @@ static void verify_wm_state(struct drm_crtc *crtc,
/* DDB */
hw_ddb_entry = &hw->ddb_y[plane];
- sw_ddb_entry = &to_intel_crtc_state(new_state)->wm.skl.plane_ddb_y[plane];
+ sw_ddb_entry = &new_crtc_state->wm.skl.plane_ddb_y[plane];
if (!skl_ddb_entry_equal(hw_ddb_entry, sw_ddb_entry)) {
DRM_ERROR("mismatch in DDB state pipe %c plane %d (expected (%u,%u), found (%u,%u))\n",
@@ -12833,7 +12994,7 @@ static void verify_wm_state(struct drm_crtc *crtc,
/* DDB */
hw_ddb_entry = &hw->ddb_y[PLANE_CURSOR];
- sw_ddb_entry = &to_intel_crtc_state(new_state)->wm.skl.plane_ddb_y[PLANE_CURSOR];
+ sw_ddb_entry = &new_crtc_state->wm.skl.plane_ddb_y[PLANE_CURSOR];
if (!skl_ddb_entry_equal(hw_ddb_entry, sw_ddb_entry)) {
DRM_ERROR("mismatch in DDB state pipe %c cursor (expected (%u,%u), found (%u,%u))\n",
@@ -12847,23 +13008,22 @@ static void verify_wm_state(struct drm_crtc *crtc,
}
static void
-verify_connector_state(struct drm_device *dev,
- struct drm_atomic_state *state,
- struct drm_crtc *crtc)
+verify_connector_state(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
{
struct drm_connector *connector;
struct drm_connector_state *new_conn_state;
int i;
- for_each_new_connector_in_state(state, connector, new_conn_state, i) {
+ for_each_new_connector_in_state(&state->base, connector, new_conn_state, i) {
struct drm_encoder *encoder = connector->encoder;
- struct drm_crtc_state *crtc_state = NULL;
+ struct intel_crtc_state *crtc_state = NULL;
- if (new_conn_state->crtc != crtc)
+ if (new_conn_state->crtc != &crtc->base)
continue;
if (crtc)
- crtc_state = drm_atomic_get_new_crtc_state(state, new_conn_state->crtc);
+ crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
intel_connector_verify_state(crtc_state, new_conn_state);
@@ -12873,14 +13033,14 @@ verify_connector_state(struct drm_device *dev,
}
static void
-verify_encoder_state(struct drm_device *dev, struct drm_atomic_state *state)
+verify_encoder_state(struct drm_i915_private *dev_priv, struct intel_atomic_state *state)
{
struct intel_encoder *encoder;
struct drm_connector *connector;
struct drm_connector_state *old_conn_state, *new_conn_state;
int i;
- for_each_intel_encoder(dev, encoder) {
+ for_each_intel_encoder(&dev_priv->drm, encoder) {
bool enabled = false, found = false;
enum pipe pipe;
@@ -12888,7 +13048,7 @@ verify_encoder_state(struct drm_device *dev, struct drm_atomic_state *state)
encoder->base.base.id,
encoder->base.name);
- for_each_oldnew_connector_in_state(state, connector, old_conn_state,
+ for_each_oldnew_connector_in_state(&state->base, connector, old_conn_state,
new_conn_state, i) {
if (old_conn_state->best_encoder == &encoder->base)
found = true;
@@ -12922,50 +13082,49 @@ verify_encoder_state(struct drm_device *dev, struct drm_atomic_state *state)
}
static void
-verify_crtc_state(struct drm_crtc *crtc,
- struct drm_crtc_state *old_crtc_state,
- struct drm_crtc_state *new_crtc_state)
+verify_crtc_state(struct intel_crtc *crtc,
+ struct intel_crtc_state *old_crtc_state,
+ struct intel_crtc_state *new_crtc_state)
{
- struct drm_device *dev = crtc->dev;
+ struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_encoder *encoder;
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- struct intel_crtc_state *pipe_config, *sw_config;
- struct drm_atomic_state *old_state;
+ struct intel_crtc_state *pipe_config;
+ struct drm_atomic_state *state;
bool active;
- old_state = old_crtc_state->state;
- __drm_atomic_helper_crtc_destroy_state(old_crtc_state);
- pipe_config = to_intel_crtc_state(old_crtc_state);
+ state = old_crtc_state->base.state;
+ __drm_atomic_helper_crtc_destroy_state(&old_crtc_state->base);
+ pipe_config = old_crtc_state;
memset(pipe_config, 0, sizeof(*pipe_config));
- pipe_config->base.crtc = crtc;
- pipe_config->base.state = old_state;
+ pipe_config->base.crtc = &crtc->base;
+ pipe_config->base.state = state;
- DRM_DEBUG_KMS("[CRTC:%d:%s]\n", crtc->base.id, crtc->name);
+ DRM_DEBUG_KMS("[CRTC:%d:%s]\n", crtc->base.base.id, crtc->base.name);
- active = dev_priv->display.get_pipe_config(intel_crtc, pipe_config);
+ active = dev_priv->display.get_pipe_config(crtc, pipe_config);
/* we keep both pipes enabled on 830 */
if (IS_I830(dev_priv))
- active = new_crtc_state->active;
+ active = new_crtc_state->base.active;
- I915_STATE_WARN(new_crtc_state->active != active,
+ I915_STATE_WARN(new_crtc_state->base.active != active,
"crtc active state doesn't match with hw state "
- "(expected %i, found %i)\n", new_crtc_state->active, active);
+ "(expected %i, found %i)\n", new_crtc_state->base.active, active);
- I915_STATE_WARN(intel_crtc->active != new_crtc_state->active,
+ I915_STATE_WARN(crtc->active != new_crtc_state->base.active,
"transitional active state does not match atomic hw state "
- "(expected %i, found %i)\n", new_crtc_state->active, intel_crtc->active);
+ "(expected %i, found %i)\n", new_crtc_state->base.active, crtc->active);
- for_each_encoder_on_crtc(dev, crtc, encoder) {
+ for_each_encoder_on_crtc(dev, &crtc->base, encoder) {
enum pipe pipe;
active = encoder->get_hw_state(encoder, &pipe);
- I915_STATE_WARN(active != new_crtc_state->active,
+ I915_STATE_WARN(active != new_crtc_state->base.active,
"[ENCODER:%i] active %i with crtc active %i\n",
- encoder->base.base.id, active, new_crtc_state->active);
+ encoder->base.base.id, active, new_crtc_state->base.active);
- I915_STATE_WARN(active && intel_crtc->pipe != pipe,
+ I915_STATE_WARN(active && crtc->pipe != pipe,
"Encoder connected to wrong pipe %c\n",
pipe_name(pipe));
@@ -12975,16 +13134,16 @@ verify_crtc_state(struct drm_crtc *crtc,
intel_crtc_compute_pixel_rate(pipe_config);
- if (!new_crtc_state->active)
+ if (!new_crtc_state->base.active)
return;
intel_pipe_config_sanity_check(dev_priv, pipe_config);
- sw_config = to_intel_crtc_state(new_crtc_state);
- if (!intel_pipe_config_compare(sw_config, pipe_config, false)) {
+ if (!intel_pipe_config_compare(new_crtc_state,
+ pipe_config, false)) {
I915_STATE_WARN(1, "pipe state doesn't match!\n");
intel_dump_pipe_config(pipe_config, NULL, "[hw state]");
- intel_dump_pipe_config(sw_config, NULL, "[sw state]");
+ intel_dump_pipe_config(new_crtc_state, NULL, "[sw state]");
}
}
@@ -13004,8 +13163,8 @@ intel_verify_planes(struct intel_atomic_state *state)
static void
verify_single_dpll_state(struct drm_i915_private *dev_priv,
struct intel_shared_dpll *pll,
- struct drm_crtc *crtc,
- struct drm_crtc_state *new_state)
+ struct intel_crtc *crtc,
+ struct intel_crtc_state *new_crtc_state)
{
struct intel_dpll_hw_state dpll_hw_state;
unsigned int crtc_mask;
@@ -13035,16 +13194,16 @@ verify_single_dpll_state(struct drm_i915_private *dev_priv,
return;
}
- crtc_mask = drm_crtc_mask(crtc);
+ crtc_mask = drm_crtc_mask(&crtc->base);
- if (new_state->active)
+ if (new_crtc_state->base.active)
I915_STATE_WARN(!(pll->active_mask & crtc_mask),
"pll active mismatch (expected pipe %c in active mask 0x%02x)\n",
- pipe_name(drm_crtc_index(crtc)), pll->active_mask);
+ pipe_name(drm_crtc_index(&crtc->base)), pll->active_mask);
else
I915_STATE_WARN(pll->active_mask & crtc_mask,
"pll active mismatch (didn't expect pipe %c in active mask 0x%02x)\n",
- pipe_name(drm_crtc_index(crtc)), pll->active_mask);
+ pipe_name(drm_crtc_index(&crtc->base)), pll->active_mask);
I915_STATE_WARN(!(pll->state.crtc_mask & crtc_mask),
"pll enabled crtcs mismatch (expected 0x%x in 0x%02x)\n",
@@ -13057,51 +13216,47 @@ verify_single_dpll_state(struct drm_i915_private *dev_priv,
}
static void
-verify_shared_dpll_state(struct drm_device *dev, struct drm_crtc *crtc,
- struct drm_crtc_state *old_crtc_state,
- struct drm_crtc_state *new_crtc_state)
+verify_shared_dpll_state(struct intel_crtc *crtc,
+ struct intel_crtc_state *old_crtc_state,
+ struct intel_crtc_state *new_crtc_state)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_crtc_state *old_state = to_intel_crtc_state(old_crtc_state);
- struct intel_crtc_state *new_state = to_intel_crtc_state(new_crtc_state);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- if (new_state->shared_dpll)
- verify_single_dpll_state(dev_priv, new_state->shared_dpll, crtc, new_crtc_state);
+ if (new_crtc_state->shared_dpll)
+ verify_single_dpll_state(dev_priv, new_crtc_state->shared_dpll, crtc, new_crtc_state);
- if (old_state->shared_dpll &&
- old_state->shared_dpll != new_state->shared_dpll) {
- unsigned int crtc_mask = drm_crtc_mask(crtc);
- struct intel_shared_dpll *pll = old_state->shared_dpll;
+ if (old_crtc_state->shared_dpll &&
+ old_crtc_state->shared_dpll != new_crtc_state->shared_dpll) {
+ unsigned int crtc_mask = drm_crtc_mask(&crtc->base);
+ struct intel_shared_dpll *pll = old_crtc_state->shared_dpll;
I915_STATE_WARN(pll->active_mask & crtc_mask,
"pll active mismatch (didn't expect pipe %c in active mask)\n",
- pipe_name(drm_crtc_index(crtc)));
+ pipe_name(drm_crtc_index(&crtc->base)));
I915_STATE_WARN(pll->state.crtc_mask & crtc_mask,
"pll enabled crtcs mismatch (found %x in enabled mask)\n",
- pipe_name(drm_crtc_index(crtc)));
+ pipe_name(drm_crtc_index(&crtc->base)));
}
}
static void
-intel_modeset_verify_crtc(struct drm_crtc *crtc,
- struct drm_atomic_state *state,
- struct drm_crtc_state *old_state,
- struct drm_crtc_state *new_state)
+intel_modeset_verify_crtc(struct intel_crtc *crtc,
+ struct intel_atomic_state *state,
+ struct intel_crtc_state *old_crtc_state,
+ struct intel_crtc_state *new_crtc_state)
{
- if (!needs_modeset(new_state) &&
- !to_intel_crtc_state(new_state)->update_pipe)
+ if (!needs_modeset(new_crtc_state) && !new_crtc_state->update_pipe)
return;
- verify_wm_state(crtc, new_state);
- verify_connector_state(crtc->dev, state, crtc);
- verify_crtc_state(crtc, old_state, new_state);
- verify_shared_dpll_state(crtc->dev, crtc, old_state, new_state);
+ verify_wm_state(crtc, new_crtc_state);
+ verify_connector_state(state, crtc);
+ verify_crtc_state(crtc, old_crtc_state, new_crtc_state);
+ verify_shared_dpll_state(crtc, old_crtc_state, new_crtc_state);
}
static void
-verify_disabled_dpll_state(struct drm_device *dev)
+verify_disabled_dpll_state(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
int i;
for (i = 0; i < dev_priv->num_shared_dpll; i++)
@@ -13109,12 +13264,12 @@ verify_disabled_dpll_state(struct drm_device *dev)
}
static void
-intel_modeset_verify_disabled(struct drm_device *dev,
- struct drm_atomic_state *state)
+intel_modeset_verify_disabled(struct drm_i915_private *dev_priv,
+ struct intel_atomic_state *state)
{
- verify_encoder_state(dev, state);
- verify_connector_state(dev, state, NULL);
- verify_disabled_dpll_state(dev);
+ verify_encoder_state(dev_priv, state);
+ verify_connector_state(state, NULL);
+ verify_disabled_dpll_state(dev_priv);
}
static void update_scanline_offset(const struct intel_crtc_state *crtc_state)
@@ -13168,27 +13323,18 @@ static void update_scanline_offset(const struct intel_crtc_state *crtc_state)
static void intel_modeset_clear_plls(struct intel_atomic_state *state)
{
struct drm_i915_private *dev_priv = to_i915(state->base.dev);
- struct intel_crtc_state *old_crtc_state, *new_crtc_state;
+ struct intel_crtc_state *new_crtc_state;
struct intel_crtc *crtc;
int i;
if (!dev_priv->display.crtc_compute_clock)
return;
- for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
- new_crtc_state, i) {
- struct intel_shared_dpll *old_dpll =
- old_crtc_state->shared_dpll;
-
- if (!needs_modeset(&new_crtc_state->base))
- continue;
-
- new_crtc_state->shared_dpll = NULL;
-
- if (!old_dpll)
+ for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
+ if (!needs_modeset(new_crtc_state))
continue;
- intel_release_shared_dpll(old_dpll, crtc, &state->base);
+ intel_release_shared_dplls(state, crtc);
}
}
@@ -13210,7 +13356,7 @@ static int haswell_mode_set_planes_workaround(struct intel_atomic_state *state)
/* look at all crtc's that are going to be enabled in during modeset */
for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
if (!crtc_state->base.active ||
- !needs_modeset(&crtc_state->base))
+ !needs_modeset(crtc_state))
continue;
if (first_crtc_state) {
@@ -13235,7 +13381,7 @@ static int haswell_mode_set_planes_workaround(struct intel_atomic_state *state)
crtc_state->hsw_workaround_pipe = INVALID_PIPE;
if (!crtc_state->base.active ||
- needs_modeset(&crtc_state->base))
+ needs_modeset(crtc_state))
continue;
/* 2 or more enabled crtcs means no need for w/a */
@@ -13253,15 +13399,16 @@ static int haswell_mode_set_planes_workaround(struct intel_atomic_state *state)
return 0;
}
-static int intel_lock_all_pipes(struct drm_atomic_state *state)
+static int intel_lock_all_pipes(struct intel_atomic_state *state)
{
- struct drm_crtc *crtc;
+ struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ struct intel_crtc *crtc;
/* Add all pipes to the state */
- for_each_crtc(state->dev, crtc) {
- struct drm_crtc_state *crtc_state;
+ for_each_intel_crtc(&dev_priv->drm, crtc) {
+ struct intel_crtc_state *crtc_state;
- crtc_state = drm_atomic_get_crtc_state(state, crtc);
+ crtc_state = intel_atomic_get_crtc_state(&state->base, crtc);
if (IS_ERR(crtc_state))
return PTR_ERR(crtc_state);
}
@@ -13269,32 +13416,35 @@ static int intel_lock_all_pipes(struct drm_atomic_state *state)
return 0;
}
-static int intel_modeset_all_pipes(struct drm_atomic_state *state)
+static int intel_modeset_all_pipes(struct intel_atomic_state *state)
{
- struct drm_crtc *crtc;
+ struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ struct intel_crtc *crtc;
/*
* Add all pipes to the state, and force
* a modeset on all the active ones.
*/
- for_each_crtc(state->dev, crtc) {
- struct drm_crtc_state *crtc_state;
+ for_each_intel_crtc(&dev_priv->drm, crtc) {
+ struct intel_crtc_state *crtc_state;
int ret;
- crtc_state = drm_atomic_get_crtc_state(state, crtc);
+ crtc_state = intel_atomic_get_crtc_state(&state->base, crtc);
if (IS_ERR(crtc_state))
return PTR_ERR(crtc_state);
- if (!crtc_state->active || needs_modeset(crtc_state))
+ if (!crtc_state->base.active || needs_modeset(crtc_state))
continue;
- crtc_state->mode_changed = true;
+ crtc_state->base.mode_changed = true;
- ret = drm_atomic_add_affected_connectors(state, crtc);
+ ret = drm_atomic_add_affected_connectors(&state->base,
+ &crtc->base);
if (ret)
return ret;
- ret = drm_atomic_add_affected_planes(state, crtc);
+ ret = drm_atomic_add_affected_planes(&state->base,
+ &crtc->base);
if (ret)
return ret;
}
@@ -13356,18 +13506,18 @@ static int intel_modeset_checks(struct intel_atomic_state *state)
*/
if (intel_cdclk_changed(&dev_priv->cdclk.logical,
&state->cdclk.logical)) {
- ret = intel_lock_all_pipes(&state->base);
+ ret = intel_lock_all_pipes(state);
if (ret < 0)
return ret;
}
if (is_power_of_2(state->active_crtcs)) {
- struct drm_crtc *crtc;
- struct drm_crtc_state *crtc_state;
+ struct intel_crtc *crtc;
+ struct intel_crtc_state *crtc_state;
pipe = ilog2(state->active_crtcs);
- crtc = &intel_get_crtc_for_pipe(dev_priv, pipe)->base;
- crtc_state = drm_atomic_get_new_crtc_state(&state->base, crtc);
+ crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
+ crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
if (crtc_state && needs_modeset(crtc_state))
pipe = INVALID_PIPE;
} else {
@@ -13379,14 +13529,14 @@ static int intel_modeset_checks(struct intel_atomic_state *state)
intel_cdclk_needs_cd2x_update(dev_priv,
&dev_priv->cdclk.actual,
&state->cdclk.actual)) {
- ret = intel_lock_all_pipes(&state->base);
+ ret = intel_lock_all_pipes(state);
if (ret < 0)
return ret;
state->cdclk.pipe = pipe;
} else if (intel_cdclk_needs_modeset(&dev_priv->cdclk.actual,
&state->cdclk.actual)) {
- ret = intel_modeset_all_pipes(&state->base);
+ ret = intel_modeset_all_pipes(state);
if (ret < 0)
return ret;
@@ -13478,7 +13628,7 @@ static int intel_atomic_check(struct drm_device *dev,
for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
new_crtc_state, i) {
- if (!needs_modeset(&new_crtc_state->base))
+ if (!needs_modeset(new_crtc_state))
continue;
if (!new_crtc_state->base.enable) {
@@ -13492,7 +13642,7 @@ static int intel_atomic_check(struct drm_device *dev,
intel_crtc_check_fastset(old_crtc_state, new_crtc_state);
- if (needs_modeset(&new_crtc_state->base))
+ if (needs_modeset(new_crtc_state))
any_ms = true;
}
@@ -13527,12 +13677,12 @@ static int intel_atomic_check(struct drm_device *dev,
for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
new_crtc_state, i) {
- if (!needs_modeset(&new_crtc_state->base) &&
+ if (!needs_modeset(new_crtc_state) &&
!new_crtc_state->update_pipe)
continue;
intel_dump_pipe_config(new_crtc_state, state,
- needs_modeset(&new_crtc_state->base) ?
+ needs_modeset(new_crtc_state) ?
"[modeset]" : "[fastset]");
}
@@ -13553,10 +13703,10 @@ static int intel_atomic_check(struct drm_device *dev,
return ret;
}
-static int intel_atomic_prepare_commit(struct drm_device *dev,
- struct drm_atomic_state *state)
+static int intel_atomic_prepare_commit(struct intel_atomic_state *state)
{
- return drm_atomic_helper_prepare_planes(dev, state);
+ return drm_atomic_helper_prepare_planes(state->base.dev,
+ &state->base);
}
u32 intel_crtc_get_vblank_counter(struct intel_crtc *crtc)
@@ -13567,60 +13717,57 @@ u32 intel_crtc_get_vblank_counter(struct intel_crtc *crtc)
if (!vblank->max_vblank_count)
return (u32)drm_crtc_accurate_vblank_count(&crtc->base);
- return dev->driver->get_vblank_counter(dev, crtc->pipe);
+ return crtc->base.funcs->get_vblank_counter(&crtc->base);
}
-static void intel_update_crtc(struct drm_crtc *crtc,
- struct drm_atomic_state *state,
- struct drm_crtc_state *old_crtc_state,
- struct drm_crtc_state *new_crtc_state)
+static void intel_update_crtc(struct intel_crtc *crtc,
+ struct intel_atomic_state *state,
+ struct intel_crtc_state *old_crtc_state,
+ struct intel_crtc_state *new_crtc_state)
{
- struct drm_device *dev = crtc->dev;
+ struct drm_device *dev = state->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- struct intel_crtc_state *pipe_config = to_intel_crtc_state(new_crtc_state);
bool modeset = needs_modeset(new_crtc_state);
struct intel_plane_state *new_plane_state =
- intel_atomic_get_new_plane_state(to_intel_atomic_state(state),
- to_intel_plane(crtc->primary));
+ intel_atomic_get_new_plane_state(state,
+ to_intel_plane(crtc->base.primary));
if (modeset) {
- update_scanline_offset(pipe_config);
- dev_priv->display.crtc_enable(pipe_config, state);
+ update_scanline_offset(new_crtc_state);
+ dev_priv->display.crtc_enable(new_crtc_state, state);
/* vblanks work again, re-enable pipe CRC. */
- intel_crtc_enable_pipe_crc(intel_crtc);
+ intel_crtc_enable_pipe_crc(crtc);
} else {
- intel_pre_plane_update(to_intel_crtc_state(old_crtc_state),
- pipe_config);
+ intel_pre_plane_update(old_crtc_state, new_crtc_state);
- if (pipe_config->update_pipe)
- intel_encoders_update_pipe(crtc, pipe_config, state);
+ if (new_crtc_state->update_pipe)
+ intel_encoders_update_pipe(crtc, new_crtc_state, state);
}
- if (pipe_config->update_pipe && !pipe_config->enable_fbc)
- intel_fbc_disable(intel_crtc);
+ if (new_crtc_state->update_pipe && !new_crtc_state->enable_fbc)
+ intel_fbc_disable(crtc);
else if (new_plane_state)
- intel_fbc_enable(intel_crtc, pipe_config, new_plane_state);
+ intel_fbc_enable(crtc, new_crtc_state, new_plane_state);
- intel_begin_crtc_commit(to_intel_atomic_state(state), intel_crtc);
+ intel_begin_crtc_commit(state, crtc);
if (INTEL_GEN(dev_priv) >= 9)
- skl_update_planes_on_crtc(to_intel_atomic_state(state), intel_crtc);
+ skl_update_planes_on_crtc(state, crtc);
else
- i9xx_update_planes_on_crtc(to_intel_atomic_state(state), intel_crtc);
+ i9xx_update_planes_on_crtc(state, crtc);
- intel_finish_crtc_commit(to_intel_atomic_state(state), intel_crtc);
+ intel_finish_crtc_commit(state, crtc);
}
-static void intel_update_crtcs(struct drm_atomic_state *state)
+static void intel_update_crtcs(struct intel_atomic_state *state)
{
- struct drm_crtc *crtc;
- struct drm_crtc_state *old_crtc_state, *new_crtc_state;
+ struct intel_crtc *crtc;
+ struct intel_crtc_state *old_crtc_state, *new_crtc_state;
int i;
- for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
- if (!new_crtc_state->active)
+ for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
+ if (!new_crtc_state->base.active)
continue;
intel_update_crtc(crtc, state, old_crtc_state,
@@ -13628,26 +13775,23 @@ static void intel_update_crtcs(struct drm_atomic_state *state)
}
}
-static void skl_update_crtcs(struct drm_atomic_state *state)
+static void skl_update_crtcs(struct intel_atomic_state *state)
{
- struct drm_i915_private *dev_priv = to_i915(state->dev);
- struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
- struct drm_crtc *crtc;
- struct intel_crtc *intel_crtc;
- struct drm_crtc_state *old_crtc_state, *new_crtc_state;
- struct intel_crtc_state *cstate;
+ struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ struct intel_crtc *crtc;
+ struct intel_crtc_state *old_crtc_state, *new_crtc_state;
unsigned int updated = 0;
bool progress;
enum pipe pipe;
int i;
u8 hw_enabled_slices = dev_priv->wm.skl_hw.ddb.enabled_slices;
- u8 required_slices = intel_state->wm_results.ddb.enabled_slices;
+ u8 required_slices = state->wm_results.ddb.enabled_slices;
struct skl_ddb_entry entries[I915_MAX_PIPES] = {};
- for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i)
+ for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i)
/* ignore allocations for crtc's that have been turned off. */
- if (new_crtc_state->active)
- entries[i] = to_intel_crtc_state(old_crtc_state)->wm.skl.ddb;
+ if (new_crtc_state->base.active)
+ entries[i] = old_crtc_state->wm.skl.ddb;
/* If 2nd DBuf slice required, enable it here */
if (INTEL_GEN(dev_priv) >= 11 && required_slices > hw_enabled_slices)
@@ -13662,24 +13806,22 @@ static void skl_update_crtcs(struct drm_atomic_state *state)
do {
progress = false;
- for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
+ for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
bool vbl_wait = false;
- unsigned int cmask = drm_crtc_mask(crtc);
+ unsigned int cmask = drm_crtc_mask(&crtc->base);
- intel_crtc = to_intel_crtc(crtc);
- cstate = to_intel_crtc_state(new_crtc_state);
- pipe = intel_crtc->pipe;
+ pipe = crtc->pipe;
- if (updated & cmask || !cstate->base.active)
+ if (updated & cmask || !new_crtc_state->base.active)
continue;
- if (skl_ddb_allocation_overlaps(&cstate->wm.skl.ddb,
+ if (skl_ddb_allocation_overlaps(&new_crtc_state->wm.skl.ddb,
entries,
INTEL_INFO(dev_priv)->num_pipes, i))
continue;
updated |= cmask;
- entries[i] = cstate->wm.skl.ddb;
+ entries[i] = new_crtc_state->wm.skl.ddb;
/*
* If this is an already active pipe, it's DDB changed,
@@ -13687,10 +13829,10 @@ static void skl_update_crtcs(struct drm_atomic_state *state)
* then we need to wait for a vblank to pass for the
* new ddb allocation to take effect.
*/
- if (!skl_ddb_entry_equal(&cstate->wm.skl.ddb,
- &to_intel_crtc_state(old_crtc_state)->wm.skl.ddb) &&
- !new_crtc_state->active_changed &&
- intel_state->wm_results.dirty_pipes != updated)
+ if (!skl_ddb_entry_equal(&new_crtc_state->wm.skl.ddb,
+ &old_crtc_state->wm.skl.ddb) &&
+ !new_crtc_state->base.active_changed &&
+ state->wm_results.dirty_pipes != updated)
vbl_wait = true;
intel_update_crtc(crtc, state, old_crtc_state,
@@ -13736,18 +13878,21 @@ static void intel_atomic_commit_fence_wait(struct intel_atomic_state *intel_stat
for (;;) {
prepare_to_wait(&intel_state->commit_ready.wait,
&wait_fence, TASK_UNINTERRUPTIBLE);
- prepare_to_wait(&dev_priv->gpu_error.wait_queue,
+ prepare_to_wait(bit_waitqueue(&dev_priv->gt.reset.flags,
+ I915_RESET_MODESET),
&wait_reset, TASK_UNINTERRUPTIBLE);
- if (i915_sw_fence_done(&intel_state->commit_ready)
- || test_bit(I915_RESET_MODESET, &dev_priv->gpu_error.flags))
+ if (i915_sw_fence_done(&intel_state->commit_ready) ||
+ test_bit(I915_RESET_MODESET, &dev_priv->gt.reset.flags))
break;
schedule();
}
finish_wait(&intel_state->commit_ready.wait, &wait_fence);
- finish_wait(&dev_priv->gpu_error.wait_queue, &wait_reset);
+ finish_wait(bit_waitqueue(&dev_priv->gt.reset.flags,
+ I915_RESET_MODESET),
+ &wait_reset);
}
static void intel_atomic_cleanup_work(struct work_struct *work)
@@ -13763,57 +13908,49 @@ static void intel_atomic_cleanup_work(struct work_struct *work)
intel_atomic_helper_free_state(i915);
}
-static void intel_atomic_commit_tail(struct drm_atomic_state *state)
+static void intel_atomic_commit_tail(struct intel_atomic_state *state)
{
- struct drm_device *dev = state->dev;
- struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
+ struct drm_device *dev = state->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
- struct drm_crtc_state *old_crtc_state, *new_crtc_state;
- struct intel_crtc_state *new_intel_crtc_state, *old_intel_crtc_state;
- struct drm_crtc *crtc;
- struct intel_crtc *intel_crtc;
+ struct intel_crtc_state *new_crtc_state, *old_crtc_state;
+ struct intel_crtc *crtc;
u64 put_domains[I915_MAX_PIPES] = {};
intel_wakeref_t wakeref = 0;
int i;
- intel_atomic_commit_fence_wait(intel_state);
+ intel_atomic_commit_fence_wait(state);
- drm_atomic_helper_wait_for_dependencies(state);
+ drm_atomic_helper_wait_for_dependencies(&state->base);
- if (intel_state->modeset)
+ if (state->modeset)
wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_MODESET);
- for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
- old_intel_crtc_state = to_intel_crtc_state(old_crtc_state);
- new_intel_crtc_state = to_intel_crtc_state(new_crtc_state);
- intel_crtc = to_intel_crtc(crtc);
-
+ for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
if (needs_modeset(new_crtc_state) ||
- to_intel_crtc_state(new_crtc_state)->update_pipe) {
+ new_crtc_state->update_pipe) {
- put_domains[intel_crtc->pipe] =
- modeset_get_crtc_power_domains(crtc,
- new_intel_crtc_state);
+ put_domains[crtc->pipe] =
+ modeset_get_crtc_power_domains(new_crtc_state);
}
if (!needs_modeset(new_crtc_state))
continue;
- intel_pre_plane_update(old_intel_crtc_state, new_intel_crtc_state);
+ intel_pre_plane_update(old_crtc_state, new_crtc_state);
- if (old_crtc_state->active) {
- intel_crtc_disable_planes(intel_state, intel_crtc);
+ if (old_crtc_state->base.active) {
+ intel_crtc_disable_planes(state, crtc);
/*
* We need to disable pipe CRC before disabling the pipe,
* or we race against vblank off.
*/
- intel_crtc_disable_pipe_crc(intel_crtc);
+ intel_crtc_disable_pipe_crc(crtc);
- dev_priv->display.crtc_disable(old_intel_crtc_state, state);
- intel_crtc->active = false;
- intel_fbc_disable(intel_crtc);
- intel_disable_shared_dpll(old_intel_crtc_state);
+ dev_priv->display.crtc_disable(old_crtc_state, state);
+ crtc->active = false;
+ intel_fbc_disable(crtc);
+ intel_disable_shared_dpll(old_crtc_state);
/*
* Underruns don't always raise
@@ -13823,25 +13960,25 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state)
intel_check_pch_fifo_underruns(dev_priv);
/* FIXME unify this for all platforms */
- if (!new_crtc_state->active &&
+ if (!new_crtc_state->base.active &&
!HAS_GMCH(dev_priv) &&
dev_priv->display.initial_watermarks)
- dev_priv->display.initial_watermarks(intel_state,
- new_intel_crtc_state);
+ dev_priv->display.initial_watermarks(state,
+ new_crtc_state);
}
}
- /* FIXME: Eventually get rid of our intel_crtc->config pointer */
- for_each_new_crtc_in_state(state, crtc, new_crtc_state, i)
- to_intel_crtc(crtc)->config = to_intel_crtc_state(new_crtc_state);
+ /* FIXME: Eventually get rid of our crtc->config pointer */
+ for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i)
+ crtc->config = new_crtc_state;
- if (intel_state->modeset) {
- drm_atomic_helper_update_legacy_modeset_state(state->dev, state);
+ if (state->modeset) {
+ drm_atomic_helper_update_legacy_modeset_state(dev, &state->base);
intel_set_cdclk_pre_plane_update(dev_priv,
- &intel_state->cdclk.actual,
+ &state->cdclk.actual,
&dev_priv->cdclk.actual,
- intel_state->cdclk.pipe);
+ state->cdclk.pipe);
/*
* SKL workaround: bspec recommends we disable the SAGV when we
@@ -13850,31 +13987,37 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state)
if (!intel_can_enable_sagv(state))
intel_disable_sagv(dev_priv);
- intel_modeset_verify_disabled(dev, state);
+ intel_modeset_verify_disabled(dev_priv, state);
}
/* Complete the events for pipes that have now been disabled */
- for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
+ for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
bool modeset = needs_modeset(new_crtc_state);
/* Complete events for now disable pipes here. */
- if (modeset && !new_crtc_state->active && new_crtc_state->event) {
+ if (modeset && !new_crtc_state->base.active && new_crtc_state->base.event) {
spin_lock_irq(&dev->event_lock);
- drm_crtc_send_vblank_event(crtc, new_crtc_state->event);
+ drm_crtc_send_vblank_event(&crtc->base, new_crtc_state->base.event);
spin_unlock_irq(&dev->event_lock);
- new_crtc_state->event = NULL;
+ new_crtc_state->base.event = NULL;
}
}
+ if (state->modeset)
+ intel_encoders_update_prepare(state);
+
/* Now enable the clocks, plane, pipe, and connectors that we set up. */
dev_priv->display.update_crtcs(state);
- if (intel_state->modeset)
+ if (state->modeset) {
+ intel_encoders_update_complete(state);
+
intel_set_cdclk_post_plane_update(dev_priv,
- &intel_state->cdclk.actual,
+ &state->cdclk.actual,
&dev_priv->cdclk.actual,
- intel_state->cdclk.pipe);
+ state->cdclk.pipe);
+ }
/* FIXME: We should call drm_atomic_helper_commit_hw_done() here
* already, but still need the state for the delayed optimization. To
@@ -13885,16 +14028,14 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state)
* - switch over to the vblank wait helper in the core after that since
* we don't need out special handling any more.
*/
- drm_atomic_helper_wait_for_flip_done(dev, state);
+ drm_atomic_helper_wait_for_flip_done(dev, &state->base);
- for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
- new_intel_crtc_state = to_intel_crtc_state(new_crtc_state);
-
- if (new_crtc_state->active &&
+ for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
+ if (new_crtc_state->base.active &&
!needs_modeset(new_crtc_state) &&
- (new_intel_crtc_state->base.color_mgmt_changed ||
- new_intel_crtc_state->update_pipe))
- intel_color_load_luts(new_intel_crtc_state);
+ (new_crtc_state->base.color_mgmt_changed ||
+ new_crtc_state->update_pipe))
+ intel_color_load_luts(new_crtc_state);
}
/*
@@ -13904,16 +14045,14 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state)
*
* TODO: Move this (and other cleanup) to an async worker eventually.
*/
- for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
- new_intel_crtc_state = to_intel_crtc_state(new_crtc_state);
-
+ for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
if (dev_priv->display.optimize_watermarks)
- dev_priv->display.optimize_watermarks(intel_state,
- new_intel_crtc_state);
+ dev_priv->display.optimize_watermarks(state,
+ new_crtc_state);
}
- for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
- intel_post_plane_update(to_intel_crtc_state(old_crtc_state));
+ for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
+ intel_post_plane_update(old_crtc_state);
if (put_domains[i])
modeset_put_power_domains(dev_priv, put_domains[i]);
@@ -13921,15 +14060,15 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state)
intel_modeset_verify_crtc(crtc, state, old_crtc_state, new_crtc_state);
}
- if (intel_state->modeset)
- intel_verify_planes(intel_state);
+ if (state->modeset)
+ intel_verify_planes(state);
- if (intel_state->modeset && intel_can_enable_sagv(state))
+ if (state->modeset && intel_can_enable_sagv(state))
intel_enable_sagv(dev_priv);
- drm_atomic_helper_commit_hw_done(state);
+ drm_atomic_helper_commit_hw_done(&state->base);
- if (intel_state->modeset) {
+ if (state->modeset) {
/* As one of the primary mmio accessors, KMS has a high
* likelihood of triggering bugs in unclaimed access. After we
* finish modesetting, see if an error has been flagged, and if
@@ -13939,7 +14078,7 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state)
intel_uncore_arm_unclaimed_mmio_detection(&dev_priv->uncore);
intel_display_power_put(dev_priv, POWER_DOMAIN_MODESET, wakeref);
}
- intel_runtime_pm_put(&dev_priv->runtime_pm, intel_state->wakeref);
+ intel_runtime_pm_put(&dev_priv->runtime_pm, state->wakeref);
/*
* Defer the cleanup of the old state to a separate worker to not
@@ -13949,14 +14088,14 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state)
* schedule point (cond_resched()) here anyway to keep latencies
* down.
*/
- INIT_WORK(&state->commit_work, intel_atomic_cleanup_work);
- queue_work(system_highpri_wq, &state->commit_work);
+ INIT_WORK(&state->base.commit_work, intel_atomic_cleanup_work);
+ queue_work(system_highpri_wq, &state->base.commit_work);
}
static void intel_atomic_commit_work(struct work_struct *work)
{
- struct drm_atomic_state *state =
- container_of(work, struct drm_atomic_state, commit_work);
+ struct intel_atomic_state *state =
+ container_of(work, struct intel_atomic_state, base.commit_work);
intel_atomic_commit_tail(state);
}
@@ -13986,42 +14125,31 @@ intel_atomic_commit_ready(struct i915_sw_fence *fence,
return NOTIFY_DONE;
}
-static void intel_atomic_track_fbs(struct drm_atomic_state *state)
+static void intel_atomic_track_fbs(struct intel_atomic_state *state)
{
- struct drm_plane_state *old_plane_state, *new_plane_state;
- struct drm_plane *plane;
+ struct intel_plane_state *old_plane_state, *new_plane_state;
+ struct intel_plane *plane;
int i;
- for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i)
- i915_gem_track_fb(intel_fb_obj(old_plane_state->fb),
- intel_fb_obj(new_plane_state->fb),
- to_intel_plane(plane)->frontbuffer_bit);
+ for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state,
+ new_plane_state, i)
+ i915_gem_track_fb(intel_fb_obj(old_plane_state->base.fb),
+ intel_fb_obj(new_plane_state->base.fb),
+ plane->frontbuffer_bit);
}
-/**
- * intel_atomic_commit - commit validated state object
- * @dev: DRM device
- * @state: the top-level driver state object
- * @nonblock: nonblocking commit
- *
- * This function commits a top-level state object that has been validated
- * with drm_atomic_helper_check().
- *
- * RETURNS
- * Zero for success or -errno.
- */
static int intel_atomic_commit(struct drm_device *dev,
- struct drm_atomic_state *state,
+ struct drm_atomic_state *_state,
bool nonblock)
{
- struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
+ struct intel_atomic_state *state = to_intel_atomic_state(_state);
struct drm_i915_private *dev_priv = to_i915(dev);
int ret = 0;
- intel_state->wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
+ state->wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
- drm_atomic_state_get(state);
- i915_sw_fence_init(&intel_state->commit_ready,
+ drm_atomic_state_get(&state->base);
+ i915_sw_fence_init(&state->commit_ready,
intel_atomic_commit_ready);
/*
@@ -14041,63 +14169,61 @@ static int intel_atomic_commit(struct drm_device *dev,
* FIXME doing watermarks and fb cleanup from a vblank worker
* (assuming we had any) would solve these problems.
*/
- if (INTEL_GEN(dev_priv) < 9 && state->legacy_cursor_update) {
+ if (INTEL_GEN(dev_priv) < 9 && state->base.legacy_cursor_update) {
struct intel_crtc_state *new_crtc_state;
struct intel_crtc *crtc;
int i;
- for_each_new_intel_crtc_in_state(intel_state, crtc, new_crtc_state, i)
+ for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i)
if (new_crtc_state->wm.need_postvbl_update ||
new_crtc_state->update_wm_post)
- state->legacy_cursor_update = false;
+ state->base.legacy_cursor_update = false;
}
- ret = intel_atomic_prepare_commit(dev, state);
+ ret = intel_atomic_prepare_commit(state);
if (ret) {
DRM_DEBUG_ATOMIC("Preparing state failed with %i\n", ret);
- i915_sw_fence_commit(&intel_state->commit_ready);
- intel_runtime_pm_put(&dev_priv->runtime_pm, intel_state->wakeref);
+ i915_sw_fence_commit(&state->commit_ready);
+ intel_runtime_pm_put(&dev_priv->runtime_pm, state->wakeref);
return ret;
}
- ret = drm_atomic_helper_setup_commit(state, nonblock);
+ ret = drm_atomic_helper_setup_commit(&state->base, nonblock);
if (!ret)
- ret = drm_atomic_helper_swap_state(state, true);
+ ret = drm_atomic_helper_swap_state(&state->base, true);
if (ret) {
- i915_sw_fence_commit(&intel_state->commit_ready);
+ i915_sw_fence_commit(&state->commit_ready);
- drm_atomic_helper_cleanup_planes(dev, state);
- intel_runtime_pm_put(&dev_priv->runtime_pm, intel_state->wakeref);
+ drm_atomic_helper_cleanup_planes(dev, &state->base);
+ intel_runtime_pm_put(&dev_priv->runtime_pm, state->wakeref);
return ret;
}
dev_priv->wm.distrust_bios_wm = false;
intel_shared_dpll_swap_state(state);
intel_atomic_track_fbs(state);
- if (intel_state->modeset) {
- memcpy(dev_priv->min_cdclk, intel_state->min_cdclk,
- sizeof(intel_state->min_cdclk));
- memcpy(dev_priv->min_voltage_level,
- intel_state->min_voltage_level,
- sizeof(intel_state->min_voltage_level));
- dev_priv->active_crtcs = intel_state->active_crtcs;
- dev_priv->cdclk.force_min_cdclk =
- intel_state->cdclk.force_min_cdclk;
+ if (state->modeset) {
+ memcpy(dev_priv->min_cdclk, state->min_cdclk,
+ sizeof(state->min_cdclk));
+ memcpy(dev_priv->min_voltage_level, state->min_voltage_level,
+ sizeof(state->min_voltage_level));
+ dev_priv->active_crtcs = state->active_crtcs;
+ dev_priv->cdclk.force_min_cdclk = state->cdclk.force_min_cdclk;
- intel_cdclk_swap_state(intel_state);
+ intel_cdclk_swap_state(state);
}
- drm_atomic_state_get(state);
- INIT_WORK(&state->commit_work, intel_atomic_commit_work);
+ drm_atomic_state_get(&state->base);
+ INIT_WORK(&state->base.commit_work, intel_atomic_commit_work);
- i915_sw_fence_commit(&intel_state->commit_ready);
- if (nonblock && intel_state->modeset) {
- queue_work(dev_priv->modeset_wq, &state->commit_work);
+ i915_sw_fence_commit(&state->commit_ready);
+ if (nonblock && state->modeset) {
+ queue_work(dev_priv->modeset_wq, &state->base.commit_work);
} else if (nonblock) {
- queue_work(system_unbound_wq, &state->commit_work);
+ queue_work(system_unbound_wq, &state->base.commit_work);
} else {
- if (intel_state->modeset)
+ if (state->modeset)
flush_workqueue(dev_priv->modeset_wq);
intel_atomic_commit_tail(state);
}
@@ -14105,18 +14231,6 @@ static int intel_atomic_commit(struct drm_device *dev,
return 0;
}
-static const struct drm_crtc_funcs intel_crtc_funcs = {
- .gamma_set = drm_atomic_helper_legacy_gamma_set,
- .set_config = drm_atomic_helper_set_config,
- .destroy = intel_crtc_destroy,
- .page_flip = drm_atomic_helper_page_flip,
- .atomic_duplicate_state = intel_crtc_duplicate_state,
- .atomic_destroy_state = intel_crtc_destroy_state,
- .set_crc_source = intel_crtc_set_crc_source,
- .verify_crc_source = intel_crtc_verify_crc_source,
- .get_crc_sources = intel_crtc_get_crc_sources,
-};
-
struct wait_rps_boost {
struct wait_queue_entry wait;
@@ -14250,9 +14364,9 @@ intel_prepare_plane_fb(struct drm_plane *plane,
int ret;
if (old_obj) {
- struct drm_crtc_state *crtc_state =
- drm_atomic_get_new_crtc_state(new_state->state,
- plane->state->crtc);
+ struct intel_crtc_state *crtc_state =
+ intel_atomic_get_new_crtc_state(intel_state,
+ to_intel_crtc(plane->state->crtc));
/* Big Hammer, we also need to ensure that any pending
* MI_WAIT_FOR_EVENT inside a user batch buffer on the
@@ -14413,7 +14527,7 @@ static void intel_begin_crtc_commit(struct intel_atomic_state *state,
intel_atomic_get_old_crtc_state(state, crtc);
struct intel_crtc_state *new_crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
- bool modeset = needs_modeset(&new_crtc_state->base);
+ bool modeset = needs_modeset(new_crtc_state);
/* Perform vblank evasion around commit operation */
intel_pipe_update_start(new_crtc_state);
@@ -14466,7 +14580,7 @@ static void intel_finish_crtc_commit(struct intel_atomic_state *state,
intel_pipe_update_end(new_crtc_state);
if (new_crtc_state->update_pipe &&
- !needs_modeset(&new_crtc_state->base) &&
+ !needs_modeset(new_crtc_state) &&
old_crtc_state->base.mode.private_flags & I915_MODE_FLAG_INHERITED)
intel_crtc_arm_fifo_underrun(crtc, new_crtc_state);
}
@@ -14580,7 +14694,7 @@ intel_legacy_cursor_update(struct drm_plane *plane,
* When crtc is inactive or there is a modeset pending,
* wait for it to complete in the slowpath
*/
- if (!crtc_state->base.active || needs_modeset(&crtc_state->base) ||
+ if (!crtc_state->base.active || needs_modeset(crtc_state) ||
crtc_state->update_pipe)
goto slow;
@@ -14910,8 +15024,76 @@ static void intel_crtc_init_scalers(struct intel_crtc *crtc,
scaler_state->scaler_id = -1;
}
+#define INTEL_CRTC_FUNCS \
+ .gamma_set = drm_atomic_helper_legacy_gamma_set, \
+ .set_config = drm_atomic_helper_set_config, \
+ .destroy = intel_crtc_destroy, \
+ .page_flip = drm_atomic_helper_page_flip, \
+ .atomic_duplicate_state = intel_crtc_duplicate_state, \
+ .atomic_destroy_state = intel_crtc_destroy_state, \
+ .set_crc_source = intel_crtc_set_crc_source, \
+ .verify_crc_source = intel_crtc_verify_crc_source, \
+ .get_crc_sources = intel_crtc_get_crc_sources
+
+static const struct drm_crtc_funcs bdw_crtc_funcs = {
+ INTEL_CRTC_FUNCS,
+
+ .get_vblank_counter = g4x_get_vblank_counter,
+ .enable_vblank = bdw_enable_vblank,
+ .disable_vblank = bdw_disable_vblank,
+};
+
+static const struct drm_crtc_funcs ilk_crtc_funcs = {
+ INTEL_CRTC_FUNCS,
+
+ .get_vblank_counter = g4x_get_vblank_counter,
+ .enable_vblank = ilk_enable_vblank,
+ .disable_vblank = ilk_disable_vblank,
+};
+
+static const struct drm_crtc_funcs g4x_crtc_funcs = {
+ INTEL_CRTC_FUNCS,
+
+ .get_vblank_counter = g4x_get_vblank_counter,
+ .enable_vblank = i965_enable_vblank,
+ .disable_vblank = i965_disable_vblank,
+};
+
+static const struct drm_crtc_funcs i965_crtc_funcs = {
+ INTEL_CRTC_FUNCS,
+
+ .get_vblank_counter = i915_get_vblank_counter,
+ .enable_vblank = i965_enable_vblank,
+ .disable_vblank = i965_disable_vblank,
+};
+
+static const struct drm_crtc_funcs i945gm_crtc_funcs = {
+ INTEL_CRTC_FUNCS,
+
+ .get_vblank_counter = i915_get_vblank_counter,
+ .enable_vblank = i945gm_enable_vblank,
+ .disable_vblank = i945gm_disable_vblank,
+};
+
+static const struct drm_crtc_funcs i915_crtc_funcs = {
+ INTEL_CRTC_FUNCS,
+
+ .get_vblank_counter = i915_get_vblank_counter,
+ .enable_vblank = i8xx_enable_vblank,
+ .disable_vblank = i8xx_disable_vblank,
+};
+
+static const struct drm_crtc_funcs i8xx_crtc_funcs = {
+ INTEL_CRTC_FUNCS,
+
+ /* no hw vblank counter */
+ .enable_vblank = i8xx_enable_vblank,
+ .disable_vblank = i8xx_disable_vblank,
+};
+
static int intel_crtc_init(struct drm_i915_private *dev_priv, enum pipe pipe)
{
+ const struct drm_crtc_funcs *funcs;
struct intel_crtc *intel_crtc;
struct intel_crtc_state *crtc_state = NULL;
struct intel_plane *primary = NULL;
@@ -14955,10 +15137,28 @@ static int intel_crtc_init(struct drm_i915_private *dev_priv, enum pipe pipe)
}
intel_crtc->plane_ids_mask |= BIT(cursor->id);
+ if (HAS_GMCH(dev_priv)) {
+ if (IS_CHERRYVIEW(dev_priv) ||
+ IS_VALLEYVIEW(dev_priv) || IS_G4X(dev_priv))
+ funcs = &g4x_crtc_funcs;
+ else if (IS_GEN(dev_priv, 4))
+ funcs = &i965_crtc_funcs;
+ else if (IS_I945GM(dev_priv))
+ funcs = &i945gm_crtc_funcs;
+ else if (IS_GEN(dev_priv, 3))
+ funcs = &i915_crtc_funcs;
+ else
+ funcs = &i8xx_crtc_funcs;
+ } else {
+ if (INTEL_GEN(dev_priv) >= 8)
+ funcs = &bdw_crtc_funcs;
+ else
+ funcs = &ilk_crtc_funcs;
+ }
+
ret = drm_crtc_init_with_planes(&dev_priv->drm, &intel_crtc->base,
&primary->base, &cursor->base,
- &intel_crtc_funcs,
- "pipe %c", pipe_name(pipe));
+ funcs, "pipe %c", pipe_name(pipe));
if (ret)
goto fail;
@@ -15114,12 +15314,18 @@ static void intel_setup_outputs(struct drm_i915_private *dev_priv)
if (!HAS_DISPLAY(dev_priv))
return;
- if (IS_ELKHARTLAKE(dev_priv)) {
+ if (INTEL_GEN(dev_priv) >= 12) {
+ /* TODO: initialize TC ports as well */
+ intel_ddi_init(dev_priv, PORT_A);
+ intel_ddi_init(dev_priv, PORT_B);
+ intel_ddi_init(dev_priv, PORT_C);
+ } else if (IS_ELKHARTLAKE(dev_priv)) {
intel_ddi_init(dev_priv, PORT_A);
intel_ddi_init(dev_priv, PORT_B);
intel_ddi_init(dev_priv, PORT_C);
+ intel_ddi_init(dev_priv, PORT_D);
icl_dsi_init(dev_priv);
- } else if (INTEL_GEN(dev_priv) >= 11) {
+ } else if (IS_GEN(dev_priv, 11)) {
intel_ddi_init(dev_priv, PORT_A);
intel_ddi_init(dev_priv, PORT_B);
intel_ddi_init(dev_priv, PORT_C);
@@ -15775,8 +15981,8 @@ static void sanitize_watermarks(struct drm_device *dev)
struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_atomic_state *state;
struct intel_atomic_state *intel_state;
- struct drm_crtc *crtc;
- struct drm_crtc_state *cstate;
+ struct intel_crtc *crtc;
+ struct intel_crtc_state *crtc_state;
struct drm_modeset_acquire_ctx ctx;
int ret;
int i;
@@ -15831,13 +16037,11 @@ retry:
}
/* Write calculated watermark values back */
- for_each_new_crtc_in_state(state, crtc, cstate, i) {
- struct intel_crtc_state *cs = to_intel_crtc_state(cstate);
-
- cs->wm.need_postvbl_update = true;
- dev_priv->display.optimize_watermarks(intel_state, cs);
+ for_each_new_intel_crtc_in_state(intel_state, crtc, crtc_state, i) {
+ crtc_state->wm.need_postvbl_update = true;
+ dev_priv->display.optimize_watermarks(intel_state, crtc_state);
- to_intel_crtc_state(crtc->state)->wm = cs->wm;
+ to_intel_crtc_state(crtc->base.state)->wm = crtc_state->wm;
}
put_state:
@@ -16495,6 +16699,13 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
pll->on = pll->info->funcs->get_hw_state(dev_priv, pll,
&pll->state.hw_state);
+
+ if (IS_ELKHARTLAKE(dev_priv) && pll->on &&
+ pll->info->id == DPLL_ID_EHL_DPLL4) {
+ pll->wakeref = intel_display_power_get(dev_priv,
+ POWER_DOMAIN_DPLL_DC_OFF);
+ }
+
pll->state.crtc_mask = 0;
for_each_intel_crtc(dev, crtc) {
struct intel_crtc_state *crtc_state =
@@ -16744,6 +16955,17 @@ intel_modeset_setup_hw_state(struct drm_device *dev,
intel_modeset_readout_hw_state(dev);
/* HW state is read out, now we need to sanitize this mess. */
+
+ /* Sanitize the TypeC port mode upfront, encoders depend on this */
+ for_each_intel_encoder(dev, encoder) {
+ enum phy phy = intel_port_to_phy(dev_priv, encoder->port);
+
+ /* We need to sanitize only the MST primary port. */
+ if (encoder->type != INTEL_OUTPUT_DP_MST &&
+ intel_phy_is_tc(dev_priv, phy))
+ intel_tc_port_sanitize(enc_to_dig_port(&encoder->base));
+ }
+
get_encoder_power_domains(dev_priv);
if (HAS_PCH_IBX(dev_priv))
@@ -16804,7 +17026,7 @@ intel_modeset_setup_hw_state(struct drm_device *dev,
u64 put_domains;
crtc_state = to_intel_crtc_state(crtc->base.state);
- put_domains = modeset_get_crtc_power_domains(&crtc->base, crtc_state);
+ put_domains = modeset_get_crtc_power_domains(crtc_state);
if (WARN_ON(put_domains))
modeset_put_power_domains(dev_priv, put_domains);
}
@@ -16866,7 +17088,7 @@ static void intel_hpd_poll_fini(struct drm_device *dev)
drm_connector_list_iter_end(&conn_iter);
}
-void intel_modeset_cleanup(struct drm_device *dev)
+void intel_modeset_driver_remove(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = to_i915(dev);
@@ -16982,7 +17204,7 @@ struct intel_display_error_state {
u32 vtotal;
u32 vblank;
u32 vsync;
- } transcoder[4];
+ } transcoder[5];
};
struct intel_display_error_state *
@@ -16993,6 +17215,7 @@ intel_display_capture_error_state(struct drm_i915_private *dev_priv)
TRANSCODER_A,
TRANSCODER_B,
TRANSCODER_C,
+ TRANSCODER_D,
TRANSCODER_EDP,
};
int i;
diff --git a/drivers/gpu/drm/i915/display/intel_display.h b/drivers/gpu/drm/i915/display/intel_display.h
index ee6b8194a459..d2c718f25478 100644
--- a/drivers/gpu/drm/i915/display/intel_display.h
+++ b/drivers/gpu/drm/i915/display/intel_display.h
@@ -45,6 +45,8 @@ enum i915_gpio {
GPIOK,
GPIOL,
GPIOM,
+ GPION,
+ GPIOO,
};
/*
@@ -58,6 +60,7 @@ enum pipe {
PIPE_A = 0,
PIPE_B,
PIPE_C,
+ PIPE_D,
_PIPE_EDP,
I915_MAX_PIPES = _PIPE_EDP
@@ -75,6 +78,7 @@ enum transcoder {
TRANSCODER_A = PIPE_A,
TRANSCODER_B = PIPE_B,
TRANSCODER_C = PIPE_C,
+ TRANSCODER_D = PIPE_D,
/*
* The following transcoders can map to any pipe, their enum value
@@ -98,6 +102,8 @@ static inline const char *transcoder_name(enum transcoder transcoder)
return "B";
case TRANSCODER_C:
return "C";
+ case TRANSCODER_D:
+ return "D";
case TRANSCODER_EDP:
return "EDP";
case TRANSCODER_DSI_A:
@@ -173,6 +179,12 @@ static inline const char *port_identifier(enum port port)
return "Port E";
case PORT_F:
return "Port F";
+ case PORT_G:
+ return "Port G";
+ case PORT_H:
+ return "Port H";
+ case PORT_I:
+ return "Port I";
default:
return "<invalid>";
}
@@ -185,14 +197,15 @@ enum tc_port {
PORT_TC2,
PORT_TC3,
PORT_TC4,
+ PORT_TC5,
+ PORT_TC6,
I915_MAX_TC_PORTS
};
-enum tc_port_type {
- TC_PORT_UNKNOWN = 0,
- TC_PORT_TYPEC,
- TC_PORT_TBT,
+enum tc_port_mode {
+ TC_PORT_TBT_ALT,
+ TC_PORT_DP_ALT,
TC_PORT_LEGACY,
};
@@ -229,6 +242,30 @@ struct intel_link_m_n {
u32 link_n;
};
+enum phy {
+ PHY_NONE = -1,
+
+ PHY_A = 0,
+ PHY_B,
+ PHY_C,
+ PHY_D,
+ PHY_E,
+ PHY_F,
+ PHY_G,
+ PHY_H,
+ PHY_I,
+
+ I915_MAX_PHYS
+};
+
+#define phy_name(a) ((a) + 'A')
+
+enum phy_fia {
+ FIA1,
+ FIA2,
+ FIA3,
+};
+
#define for_each_pipe(__dev_priv, __p) \
for ((__p) = 0; (__p) < INTEL_INFO(__dev_priv)->num_pipes; (__p)++)
@@ -254,6 +291,10 @@ struct intel_link_m_n {
for ((__port) = PORT_A; (__port) < I915_MAX_PORTS; (__port)++) \
for_each_if((__ports_mask) & BIT(__port))
+#define for_each_phy_masked(__phy, __phys_mask) \
+ for ((__phy) = PHY_A; (__phy) < I915_MAX_PHYS; (__phy)++) \
+ for_each_if((__phys_mask) & BIT(__phy))
+
#define for_each_crtc(dev, crtc) \
list_for_each_entry(crtc, &(dev)->mode_config.crtc_list, head)
@@ -357,5 +398,6 @@ void lpt_disable_clkout_dp(struct drm_i915_private *dev_priv);
u32 intel_plane_fb_max_stride(struct drm_i915_private *dev_priv,
u32 pixel_format, u64 modifier);
bool intel_plane_can_remap(const struct intel_plane_state *plane_state);
+enum phy intel_port_to_phy(struct drm_i915_private *i915, enum port port);
#endif
diff --git a/drivers/gpu/drm/i915/display/intel_display_power.c b/drivers/gpu/drm/i915/display/intel_display_power.c
index 2d1939db108f..dd2a50b8ba0a 100644
--- a/drivers/gpu/drm/i915/display/intel_display_power.c
+++ b/drivers/gpu/drm/i915/display/intel_display_power.c
@@ -17,13 +17,17 @@
#include "intel_drv.h"
#include "intel_hotplug.h"
#include "intel_sideband.h"
+#include "intel_tc.h"
bool intel_display_power_well_is_enabled(struct drm_i915_private *dev_priv,
enum i915_power_well_id power_well_id);
const char *
-intel_display_power_domain_str(enum intel_display_power_domain domain)
+intel_display_power_domain_str(struct drm_i915_private *i915,
+ enum intel_display_power_domain domain)
{
+ bool ddi_tc_ports = IS_GEN(i915, 12);
+
switch (domain) {
case POWER_DOMAIN_DISPLAY_CORE:
return "DISPLAY_CORE";
@@ -33,22 +37,28 @@ intel_display_power_domain_str(enum intel_display_power_domain domain)
return "PIPE_B";
case POWER_DOMAIN_PIPE_C:
return "PIPE_C";
+ case POWER_DOMAIN_PIPE_D:
+ return "PIPE_D";
case POWER_DOMAIN_PIPE_A_PANEL_FITTER:
return "PIPE_A_PANEL_FITTER";
case POWER_DOMAIN_PIPE_B_PANEL_FITTER:
return "PIPE_B_PANEL_FITTER";
case POWER_DOMAIN_PIPE_C_PANEL_FITTER:
return "PIPE_C_PANEL_FITTER";
+ case POWER_DOMAIN_PIPE_D_PANEL_FITTER:
+ return "PIPE_D_PANEL_FITTER";
case POWER_DOMAIN_TRANSCODER_A:
return "TRANSCODER_A";
case POWER_DOMAIN_TRANSCODER_B:
return "TRANSCODER_B";
case POWER_DOMAIN_TRANSCODER_C:
return "TRANSCODER_C";
+ case POWER_DOMAIN_TRANSCODER_D:
+ return "TRANSCODER_D";
case POWER_DOMAIN_TRANSCODER_EDP:
return "TRANSCODER_EDP";
- case POWER_DOMAIN_TRANSCODER_EDP_VDSC:
- return "TRANSCODER_EDP_VDSC";
+ case POWER_DOMAIN_TRANSCODER_VDSC_PW2:
+ return "TRANSCODER_VDSC_PW2";
case POWER_DOMAIN_TRANSCODER_DSI_A:
return "TRANSCODER_DSI_A";
case POWER_DOMAIN_TRANSCODER_DSI_C:
@@ -60,11 +70,23 @@ intel_display_power_domain_str(enum intel_display_power_domain domain)
case POWER_DOMAIN_PORT_DDI_C_LANES:
return "PORT_DDI_C_LANES";
case POWER_DOMAIN_PORT_DDI_D_LANES:
- return "PORT_DDI_D_LANES";
+ BUILD_BUG_ON(POWER_DOMAIN_PORT_DDI_D_LANES !=
+ POWER_DOMAIN_PORT_DDI_TC1_LANES);
+ return ddi_tc_ports ? "PORT_DDI_TC1_LANES" : "PORT_DDI_D_LANES";
case POWER_DOMAIN_PORT_DDI_E_LANES:
- return "PORT_DDI_E_LANES";
+ BUILD_BUG_ON(POWER_DOMAIN_PORT_DDI_E_LANES !=
+ POWER_DOMAIN_PORT_DDI_TC2_LANES);
+ return ddi_tc_ports ? "PORT_DDI_TC2_LANES" : "PORT_DDI_E_LANES";
case POWER_DOMAIN_PORT_DDI_F_LANES:
- return "PORT_DDI_F_LANES";
+ BUILD_BUG_ON(POWER_DOMAIN_PORT_DDI_F_LANES !=
+ POWER_DOMAIN_PORT_DDI_TC3_LANES);
+ return ddi_tc_ports ? "PORT_DDI_TC3_LANES" : "PORT_DDI_F_LANES";
+ case POWER_DOMAIN_PORT_DDI_TC4_LANES:
+ return "PORT_DDI_TC4_LANES";
+ case POWER_DOMAIN_PORT_DDI_TC5_LANES:
+ return "PORT_DDI_TC5_LANES";
+ case POWER_DOMAIN_PORT_DDI_TC6_LANES:
+ return "PORT_DDI_TC6_LANES";
case POWER_DOMAIN_PORT_DDI_A_IO:
return "PORT_DDI_A_IO";
case POWER_DOMAIN_PORT_DDI_B_IO:
@@ -72,11 +94,23 @@ intel_display_power_domain_str(enum intel_display_power_domain domain)
case POWER_DOMAIN_PORT_DDI_C_IO:
return "PORT_DDI_C_IO";
case POWER_DOMAIN_PORT_DDI_D_IO:
- return "PORT_DDI_D_IO";
+ BUILD_BUG_ON(POWER_DOMAIN_PORT_DDI_D_IO !=
+ POWER_DOMAIN_PORT_DDI_TC1_IO);
+ return ddi_tc_ports ? "PORT_DDI_TC1_IO" : "PORT_DDI_D_IO";
case POWER_DOMAIN_PORT_DDI_E_IO:
- return "PORT_DDI_E_IO";
+ BUILD_BUG_ON(POWER_DOMAIN_PORT_DDI_E_IO !=
+ POWER_DOMAIN_PORT_DDI_TC2_IO);
+ return ddi_tc_ports ? "PORT_DDI_TC2_IO" : "PORT_DDI_E_IO";
case POWER_DOMAIN_PORT_DDI_F_IO:
- return "PORT_DDI_F_IO";
+ BUILD_BUG_ON(POWER_DOMAIN_PORT_DDI_F_IO !=
+ POWER_DOMAIN_PORT_DDI_TC3_IO);
+ return ddi_tc_ports ? "PORT_DDI_TC3_IO" : "PORT_DDI_F_IO";
+ case POWER_DOMAIN_PORT_DDI_TC4_IO:
+ return "PORT_DDI_TC4_IO";
+ case POWER_DOMAIN_PORT_DDI_TC5_IO:
+ return "PORT_DDI_TC5_IO";
+ case POWER_DOMAIN_PORT_DDI_TC6_IO:
+ return "PORT_DDI_TC6_IO";
case POWER_DOMAIN_PORT_DSI:
return "PORT_DSI";
case POWER_DOMAIN_PORT_CRT:
@@ -94,11 +128,20 @@ intel_display_power_domain_str(enum intel_display_power_domain domain)
case POWER_DOMAIN_AUX_C:
return "AUX_C";
case POWER_DOMAIN_AUX_D:
- return "AUX_D";
+ BUILD_BUG_ON(POWER_DOMAIN_AUX_D != POWER_DOMAIN_AUX_TC1);
+ return ddi_tc_ports ? "AUX_TC1" : "AUX_D";
case POWER_DOMAIN_AUX_E:
- return "AUX_E";
+ BUILD_BUG_ON(POWER_DOMAIN_AUX_E != POWER_DOMAIN_AUX_TC2);
+ return ddi_tc_ports ? "AUX_TC2" : "AUX_E";
case POWER_DOMAIN_AUX_F:
- return "AUX_F";
+ BUILD_BUG_ON(POWER_DOMAIN_AUX_F != POWER_DOMAIN_AUX_TC3);
+ return ddi_tc_ports ? "AUX_TC3" : "AUX_F";
+ case POWER_DOMAIN_AUX_TC4:
+ return "AUX_TC4";
+ case POWER_DOMAIN_AUX_TC5:
+ return "AUX_TC5";
+ case POWER_DOMAIN_AUX_TC6:
+ return "AUX_TC6";
case POWER_DOMAIN_AUX_IO_A:
return "AUX_IO_A";
case POWER_DOMAIN_AUX_TBT1:
@@ -109,6 +152,10 @@ intel_display_power_domain_str(enum intel_display_power_domain domain)
return "AUX_TBT3";
case POWER_DOMAIN_AUX_TBT4:
return "AUX_TBT4";
+ case POWER_DOMAIN_AUX_TBT5:
+ return "AUX_TBT5";
+ case POWER_DOMAIN_AUX_TBT6:
+ return "AUX_TBT6";
case POWER_DOMAIN_GMBUS:
return "GMBUS";
case POWER_DOMAIN_INIT:
@@ -117,6 +164,8 @@ intel_display_power_domain_str(enum intel_display_power_domain domain)
return "MODESET";
case POWER_DOMAIN_GT_IRQ:
return "GT_IRQ";
+ case POWER_DOMAIN_DPLL_DC_OFF:
+ return "DPLL_DC_OFF";
default:
MISSING_CASE(domain);
return "?";
@@ -269,11 +318,17 @@ static void hsw_wait_for_power_well_enable(struct drm_i915_private *dev_priv,
int pw_idx = power_well->desc->hsw.idx;
/* Timeout for PW1:10 us, AUX:not specified, other PWs:20 us. */
- WARN_ON(intel_wait_for_register(&dev_priv->uncore,
- regs->driver,
- HSW_PWR_WELL_CTL_STATE(pw_idx),
- HSW_PWR_WELL_CTL_STATE(pw_idx),
- 1));
+ if (intel_wait_for_register(&dev_priv->uncore,
+ regs->driver,
+ HSW_PWR_WELL_CTL_STATE(pw_idx),
+ HSW_PWR_WELL_CTL_STATE(pw_idx),
+ 1)) {
+ DRM_DEBUG_KMS("%s power well enable timeout\n",
+ power_well->desc->name);
+
+ /* An AUX timeout is expected if the TBT DP tunnel is down. */
+ WARN_ON(!power_well->desc->hsw.is_tc_tbt);
+ }
}
static u32 hsw_power_well_requesters(struct drm_i915_private *dev_priv,
@@ -388,7 +443,7 @@ static void hsw_power_well_disable(struct drm_i915_private *dev_priv,
hsw_wait_for_power_well_disable(dev_priv, power_well);
}
-#define ICL_AUX_PW_TO_PORT(pw_idx) ((pw_idx) - ICL_PW_CTL_IDX_AUX_A)
+#define ICL_AUX_PW_TO_PHY(pw_idx) ((pw_idx) - ICL_PW_CTL_IDX_AUX_A)
static void
icl_combo_phy_aux_power_well_enable(struct drm_i915_private *dev_priv,
@@ -396,21 +451,29 @@ icl_combo_phy_aux_power_well_enable(struct drm_i915_private *dev_priv,
{
const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
int pw_idx = power_well->desc->hsw.idx;
- enum port port = ICL_AUX_PW_TO_PORT(pw_idx);
+ enum phy phy = ICL_AUX_PW_TO_PHY(pw_idx);
u32 val;
+ int wa_idx_max;
val = I915_READ(regs->driver);
I915_WRITE(regs->driver, val | HSW_PWR_WELL_CTL_REQ(pw_idx));
- val = I915_READ(ICL_PORT_CL_DW12(port));
- I915_WRITE(ICL_PORT_CL_DW12(port), val | ICL_LANE_ENABLE_AUX);
+ if (INTEL_GEN(dev_priv) < 12) {
+ val = I915_READ(ICL_PORT_CL_DW12(phy));
+ I915_WRITE(ICL_PORT_CL_DW12(phy), val | ICL_LANE_ENABLE_AUX);
+ }
hsw_wait_for_power_well_enable(dev_priv, power_well);
- /* Display WA #1178: icl */
- if (IS_ICELAKE(dev_priv) &&
- pw_idx >= ICL_PW_CTL_IDX_AUX_A && pw_idx <= ICL_PW_CTL_IDX_AUX_B &&
- !intel_bios_is_port_edp(dev_priv, port)) {
+ /* Display WA #1178: icl, tgl */
+ if (IS_TIGERLAKE(dev_priv))
+ wa_idx_max = ICL_PW_CTL_IDX_AUX_C;
+ else
+ wa_idx_max = ICL_PW_CTL_IDX_AUX_B;
+
+ if (!IS_ELKHARTLAKE(dev_priv) &&
+ pw_idx >= ICL_PW_CTL_IDX_AUX_A && pw_idx <= wa_idx_max &&
+ !intel_bios_is_port_edp(dev_priv, (enum port)phy)) {
val = I915_READ(ICL_AUX_ANAOVRD1(pw_idx));
val |= ICL_AUX_ANAOVRD1_ENABLE | ICL_AUX_ANAOVRD1_LDO_BYPASS;
I915_WRITE(ICL_AUX_ANAOVRD1(pw_idx), val);
@@ -423,11 +486,13 @@ icl_combo_phy_aux_power_well_disable(struct drm_i915_private *dev_priv,
{
const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
int pw_idx = power_well->desc->hsw.idx;
- enum port port = ICL_AUX_PW_TO_PORT(pw_idx);
+ enum phy phy = ICL_AUX_PW_TO_PHY(pw_idx);
u32 val;
- val = I915_READ(ICL_PORT_CL_DW12(port));
- I915_WRITE(ICL_PORT_CL_DW12(port), val & ~ICL_LANE_ENABLE_AUX);
+ if (INTEL_GEN(dev_priv) < 12) {
+ val = I915_READ(ICL_PORT_CL_DW12(phy));
+ I915_WRITE(ICL_PORT_CL_DW12(phy), val & ~ICL_LANE_ENABLE_AUX);
+ }
val = I915_READ(regs->driver);
I915_WRITE(regs->driver, val & ~HSW_PWR_WELL_CTL_REQ(pw_idx));
@@ -441,26 +506,108 @@ icl_combo_phy_aux_power_well_disable(struct drm_i915_private *dev_priv,
#define ICL_TBT_AUX_PW_TO_CH(pw_idx) \
((pw_idx) - ICL_PW_CTL_IDX_AUX_TBT1 + AUX_CH_C)
+static enum aux_ch icl_tc_phy_aux_ch(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ int pw_idx = power_well->desc->hsw.idx;
+
+ return power_well->desc->hsw.is_tc_tbt ? ICL_TBT_AUX_PW_TO_CH(pw_idx) :
+ ICL_AUX_PW_TO_CH(pw_idx);
+}
+
+#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
+
+static u64 async_put_domains_mask(struct i915_power_domains *power_domains);
+
+static int power_well_async_ref_count(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ int refs = hweight64(power_well->desc->domains &
+ async_put_domains_mask(&dev_priv->power_domains));
+
+ WARN_ON(refs > power_well->count);
+
+ return refs;
+}
+
+static void icl_tc_port_assert_ref_held(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ enum aux_ch aux_ch = icl_tc_phy_aux_ch(dev_priv, power_well);
+ struct intel_digital_port *dig_port = NULL;
+ struct intel_encoder *encoder;
+
+ /* Bypass the check if all references are released asynchronously */
+ if (power_well_async_ref_count(dev_priv, power_well) ==
+ power_well->count)
+ return;
+
+ aux_ch = icl_tc_phy_aux_ch(dev_priv, power_well);
+
+ for_each_intel_encoder(&dev_priv->drm, encoder) {
+ enum phy phy = intel_port_to_phy(dev_priv, encoder->port);
+
+ if (!intel_phy_is_tc(dev_priv, phy))
+ continue;
+
+ /* We'll check the MST primary port */
+ if (encoder->type == INTEL_OUTPUT_DP_MST)
+ continue;
+
+ dig_port = enc_to_dig_port(&encoder->base);
+ if (WARN_ON(!dig_port))
+ continue;
+
+ if (dig_port->aux_ch != aux_ch) {
+ dig_port = NULL;
+ continue;
+ }
+
+ break;
+ }
+
+ if (WARN_ON(!dig_port))
+ return;
+
+ WARN_ON(!intel_tc_port_ref_held(dig_port));
+}
+
+#else
+
+static void icl_tc_port_assert_ref_held(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+}
+
+#endif
+
static void
icl_tc_phy_aux_power_well_enable(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
{
- int pw_idx = power_well->desc->hsw.idx;
- bool is_tbt = power_well->desc->hsw.is_tc_tbt;
- enum aux_ch aux_ch;
+ enum aux_ch aux_ch = icl_tc_phy_aux_ch(dev_priv, power_well);
u32 val;
- aux_ch = is_tbt ? ICL_TBT_AUX_PW_TO_CH(pw_idx) :
- ICL_AUX_PW_TO_CH(pw_idx);
+ icl_tc_port_assert_ref_held(dev_priv, power_well);
+
val = I915_READ(DP_AUX_CH_CTL(aux_ch));
val &= ~DP_AUX_CH_CTL_TBT_IO;
- if (is_tbt)
+ if (power_well->desc->hsw.is_tc_tbt)
val |= DP_AUX_CH_CTL_TBT_IO;
I915_WRITE(DP_AUX_CH_CTL(aux_ch), val);
hsw_power_well_enable(dev_priv, power_well);
}
+static void
+icl_tc_phy_aux_power_well_disable(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ icl_tc_port_assert_ref_held(dev_priv, power_well);
+
+ hsw_power_well_disable(dev_priv, power_well);
+}
+
/*
* We should only use the power well if we explicitly asked the hardware to
* enable it, so check if it's enabled and also check if we've requested it to
@@ -1071,7 +1218,7 @@ static void vlv_display_power_well_deinit(struct drm_i915_private *dev_priv)
spin_unlock_irq(&dev_priv->irq_lock);
/* make sure we're done processing display irqs */
- synchronize_irq(dev_priv->drm.irq);
+ intel_synchronize_irq(dev_priv);
intel_power_sequencer_reset(dev_priv);
@@ -1575,12 +1722,15 @@ __async_put_domains_state_ok(struct i915_power_domains *power_domains)
static void print_power_domains(struct i915_power_domains *power_domains,
const char *prefix, u64 mask)
{
+ struct drm_i915_private *i915 =
+ container_of(power_domains, struct drm_i915_private,
+ power_domains);
enum intel_display_power_domain domain;
DRM_DEBUG_DRIVER("%s (%lu):\n", prefix, hweight64(mask));
for_each_power_domain(domain, mask)
DRM_DEBUG_DRIVER("%s use_count %d\n",
- intel_display_power_domain_str(domain),
+ intel_display_power_domain_str(i915, domain),
power_domains->domain_use_count[domain]);
}
@@ -1750,7 +1900,7 @@ __intel_display_power_put_domain(struct drm_i915_private *dev_priv,
{
struct i915_power_domains *power_domains;
struct i915_power_well *power_well;
- const char *name = intel_display_power_domain_str(domain);
+ const char *name = intel_display_power_domain_str(dev_priv, domain);
power_domains = &dev_priv->power_domains;
@@ -2359,7 +2509,7 @@ void intel_display_power_put(struct drm_i915_private *dev_priv,
*/
#define ICL_PW_2_POWER_DOMAINS ( \
ICL_PW_3_POWER_DOMAINS | \
- BIT_ULL(POWER_DOMAIN_TRANSCODER_EDP_VDSC) | \
+ BIT_ULL(POWER_DOMAIN_TRANSCODER_VDSC_PW2) | \
BIT_ULL(POWER_DOMAIN_INIT))
/*
* - KVMR (HW control)
@@ -2368,6 +2518,7 @@ void intel_display_power_put(struct drm_i915_private *dev_priv,
ICL_PW_2_POWER_DOMAINS | \
BIT_ULL(POWER_DOMAIN_MODESET) | \
BIT_ULL(POWER_DOMAIN_AUX_A) | \
+ BIT_ULL(POWER_DOMAIN_DPLL_DC_OFF) | \
BIT_ULL(POWER_DOMAIN_INIT))
#define ICL_DDI_IO_A_POWER_DOMAINS ( \
@@ -2405,6 +2556,93 @@ void intel_display_power_put(struct drm_i915_private *dev_priv,
#define ICL_AUX_TBT4_IO_POWER_DOMAINS ( \
BIT_ULL(POWER_DOMAIN_AUX_TBT4))
+#define TGL_PW_5_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_PIPE_D) | \
+ BIT_ULL(POWER_DOMAIN_PIPE_D_PANEL_FITTER) | \
+ BIT_ULL(POWER_DOMAIN_INIT))
+
+#define TGL_PW_4_POWER_DOMAINS ( \
+ TGL_PW_5_POWER_DOMAINS | \
+ BIT_ULL(POWER_DOMAIN_PIPE_C) | \
+ BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \
+ BIT_ULL(POWER_DOMAIN_INIT))
+
+#define TGL_PW_3_POWER_DOMAINS ( \
+ TGL_PW_4_POWER_DOMAINS | \
+ BIT_ULL(POWER_DOMAIN_PIPE_B) | \
+ BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \
+ BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \
+ BIT_ULL(POWER_DOMAIN_TRANSCODER_D) | \
+ BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_TC1_LANES) | \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_TC1_IO) | \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_TC2_LANES) | \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_TC2_IO) | \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_TC3_LANES) | \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_TC3_IO) | \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_TC4_LANES) | \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_TC4_IO) | \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_TC5_LANES) | \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_TC5_IO) | \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_TC6_LANES) | \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_TC6_IO) | \
+ BIT_ULL(POWER_DOMAIN_AUX_TC1) | \
+ BIT_ULL(POWER_DOMAIN_AUX_TC2) | \
+ BIT_ULL(POWER_DOMAIN_AUX_TC3) | \
+ BIT_ULL(POWER_DOMAIN_AUX_TC4) | \
+ BIT_ULL(POWER_DOMAIN_AUX_TC5) | \
+ BIT_ULL(POWER_DOMAIN_AUX_TC6) | \
+ BIT_ULL(POWER_DOMAIN_AUX_TBT1) | \
+ BIT_ULL(POWER_DOMAIN_AUX_TBT2) | \
+ BIT_ULL(POWER_DOMAIN_AUX_TBT3) | \
+ BIT_ULL(POWER_DOMAIN_AUX_TBT4) | \
+ BIT_ULL(POWER_DOMAIN_AUX_TBT5) | \
+ BIT_ULL(POWER_DOMAIN_AUX_TBT6) | \
+ BIT_ULL(POWER_DOMAIN_VGA) | \
+ BIT_ULL(POWER_DOMAIN_AUDIO) | \
+ BIT_ULL(POWER_DOMAIN_INIT))
+
+#define TGL_PW_2_POWER_DOMAINS ( \
+ TGL_PW_3_POWER_DOMAINS | \
+ BIT_ULL(POWER_DOMAIN_TRANSCODER_VDSC_PW2) | \
+ BIT_ULL(POWER_DOMAIN_INIT))
+
+#define TGL_DISPLAY_DC_OFF_POWER_DOMAINS ( \
+ TGL_PW_2_POWER_DOMAINS | \
+ BIT_ULL(POWER_DOMAIN_MODESET) | \
+ BIT_ULL(POWER_DOMAIN_AUX_A) | \
+ BIT_ULL(POWER_DOMAIN_INIT))
+
+#define TGL_DDI_IO_TC1_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_TC1_IO))
+#define TGL_DDI_IO_TC2_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_TC2_IO))
+#define TGL_DDI_IO_TC3_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_TC3_IO))
+#define TGL_DDI_IO_TC4_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_TC4_IO))
+#define TGL_DDI_IO_TC5_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_TC5_IO))
+#define TGL_DDI_IO_TC6_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_TC6_IO))
+
+#define TGL_AUX_TC1_IO_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_AUX_TC1))
+#define TGL_AUX_TC2_IO_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_AUX_TC2))
+#define TGL_AUX_TC3_IO_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_AUX_TC3))
+#define TGL_AUX_TC4_IO_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_AUX_TC4))
+#define TGL_AUX_TC5_IO_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_AUX_TC5))
+#define TGL_AUX_TC6_IO_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_AUX_TC6))
+#define TGL_AUX_TBT5_IO_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_AUX_TBT5))
+#define TGL_AUX_TBT6_IO_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_AUX_TBT6))
+
static const struct i915_power_well_ops i9xx_always_on_power_well_ops = {
.sync_hw = i9xx_power_well_sync_hw_noop,
.enable = i9xx_always_on_power_well_noop,
@@ -3113,7 +3351,7 @@ static const struct i915_power_well_ops icl_combo_phy_aux_power_well_ops = {
static const struct i915_power_well_ops icl_tc_phy_aux_power_well_ops = {
.sync_hw = hsw_power_well_sync_hw,
.enable = icl_tc_phy_aux_power_well_enable,
- .disable = hsw_power_well_disable,
+ .disable = icl_tc_phy_aux_power_well_disable,
.is_enabled = hsw_power_well_enabled,
};
@@ -3362,6 +3600,335 @@ static const struct i915_power_well_desc icl_power_wells[] = {
},
};
+static const struct i915_power_well_desc tgl_power_wells[] = {
+ {
+ .name = "always-on",
+ .always_on = true,
+ .domains = POWER_DOMAIN_MASK,
+ .ops = &i9xx_always_on_power_well_ops,
+ .id = DISP_PW_ID_NONE,
+ },
+ {
+ .name = "power well 1",
+ /* Handled by the DMC firmware */
+ .always_on = true,
+ .domains = 0,
+ .ops = &hsw_power_well_ops,
+ .id = SKL_DISP_PW_1,
+ {
+ .hsw.regs = &hsw_power_well_regs,
+ .hsw.idx = ICL_PW_CTL_IDX_PW_1,
+ .hsw.has_fuses = true,
+ },
+ },
+ {
+ .name = "DC off",
+ .domains = TGL_DISPLAY_DC_OFF_POWER_DOMAINS,
+ .ops = &gen9_dc_off_power_well_ops,
+ .id = DISP_PW_ID_NONE,
+ },
+ {
+ .name = "power well 2",
+ .domains = TGL_PW_2_POWER_DOMAINS,
+ .ops = &hsw_power_well_ops,
+ .id = SKL_DISP_PW_2,
+ {
+ .hsw.regs = &hsw_power_well_regs,
+ .hsw.idx = ICL_PW_CTL_IDX_PW_2,
+ .hsw.has_fuses = true,
+ },
+ },
+ {
+ .name = "power well 3",
+ .domains = TGL_PW_3_POWER_DOMAINS,
+ .ops = &hsw_power_well_ops,
+ .id = DISP_PW_ID_NONE,
+ {
+ .hsw.regs = &hsw_power_well_regs,
+ .hsw.idx = ICL_PW_CTL_IDX_PW_3,
+ .hsw.irq_pipe_mask = BIT(PIPE_B),
+ .hsw.has_vga = true,
+ .hsw.has_fuses = true,
+ },
+ },
+ {
+ .name = "DDI A IO",
+ .domains = ICL_DDI_IO_A_POWER_DOMAINS,
+ .ops = &hsw_power_well_ops,
+ .id = DISP_PW_ID_NONE,
+ {
+ .hsw.regs = &icl_ddi_power_well_regs,
+ .hsw.idx = ICL_PW_CTL_IDX_DDI_A,
+ }
+ },
+ {
+ .name = "DDI B IO",
+ .domains = ICL_DDI_IO_B_POWER_DOMAINS,
+ .ops = &hsw_power_well_ops,
+ .id = DISP_PW_ID_NONE,
+ {
+ .hsw.regs = &icl_ddi_power_well_regs,
+ .hsw.idx = ICL_PW_CTL_IDX_DDI_B,
+ }
+ },
+ {
+ .name = "DDI C IO",
+ .domains = ICL_DDI_IO_C_POWER_DOMAINS,
+ .ops = &hsw_power_well_ops,
+ .id = DISP_PW_ID_NONE,
+ {
+ .hsw.regs = &icl_ddi_power_well_regs,
+ .hsw.idx = ICL_PW_CTL_IDX_DDI_C,
+ }
+ },
+ {
+ .name = "DDI TC1 IO",
+ .domains = TGL_DDI_IO_TC1_POWER_DOMAINS,
+ .ops = &hsw_power_well_ops,
+ .id = DISP_PW_ID_NONE,
+ {
+ .hsw.regs = &icl_ddi_power_well_regs,
+ .hsw.idx = TGL_PW_CTL_IDX_DDI_TC1,
+ },
+ },
+ {
+ .name = "DDI TC2 IO",
+ .domains = TGL_DDI_IO_TC2_POWER_DOMAINS,
+ .ops = &hsw_power_well_ops,
+ .id = DISP_PW_ID_NONE,
+ {
+ .hsw.regs = &icl_ddi_power_well_regs,
+ .hsw.idx = TGL_PW_CTL_IDX_DDI_TC2,
+ },
+ },
+ {
+ .name = "DDI TC3 IO",
+ .domains = TGL_DDI_IO_TC3_POWER_DOMAINS,
+ .ops = &hsw_power_well_ops,
+ .id = DISP_PW_ID_NONE,
+ {
+ .hsw.regs = &icl_ddi_power_well_regs,
+ .hsw.idx = TGL_PW_CTL_IDX_DDI_TC3,
+ },
+ },
+ {
+ .name = "DDI TC4 IO",
+ .domains = TGL_DDI_IO_TC4_POWER_DOMAINS,
+ .ops = &hsw_power_well_ops,
+ .id = DISP_PW_ID_NONE,
+ {
+ .hsw.regs = &icl_ddi_power_well_regs,
+ .hsw.idx = TGL_PW_CTL_IDX_DDI_TC4,
+ },
+ },
+ {
+ .name = "DDI TC5 IO",
+ .domains = TGL_DDI_IO_TC5_POWER_DOMAINS,
+ .ops = &hsw_power_well_ops,
+ .id = DISP_PW_ID_NONE,
+ {
+ .hsw.regs = &icl_ddi_power_well_regs,
+ .hsw.idx = TGL_PW_CTL_IDX_DDI_TC5,
+ },
+ },
+ {
+ .name = "DDI TC6 IO",
+ .domains = TGL_DDI_IO_TC6_POWER_DOMAINS,
+ .ops = &hsw_power_well_ops,
+ .id = DISP_PW_ID_NONE,
+ {
+ .hsw.regs = &icl_ddi_power_well_regs,
+ .hsw.idx = TGL_PW_CTL_IDX_DDI_TC6,
+ },
+ },
+ {
+ .name = "AUX A",
+ .domains = ICL_AUX_A_IO_POWER_DOMAINS,
+ .ops = &icl_combo_phy_aux_power_well_ops,
+ .id = DISP_PW_ID_NONE,
+ {
+ .hsw.regs = &icl_aux_power_well_regs,
+ .hsw.idx = ICL_PW_CTL_IDX_AUX_A,
+ },
+ },
+ {
+ .name = "AUX B",
+ .domains = ICL_AUX_B_IO_POWER_DOMAINS,
+ .ops = &icl_combo_phy_aux_power_well_ops,
+ .id = DISP_PW_ID_NONE,
+ {
+ .hsw.regs = &icl_aux_power_well_regs,
+ .hsw.idx = ICL_PW_CTL_IDX_AUX_B,
+ },
+ },
+ {
+ .name = "AUX C",
+ .domains = ICL_AUX_C_IO_POWER_DOMAINS,
+ .ops = &icl_combo_phy_aux_power_well_ops,
+ .id = DISP_PW_ID_NONE,
+ {
+ .hsw.regs = &icl_aux_power_well_regs,
+ .hsw.idx = ICL_PW_CTL_IDX_AUX_C,
+ },
+ },
+ {
+ .name = "AUX TC1",
+ .domains = TGL_AUX_TC1_IO_POWER_DOMAINS,
+ .ops = &icl_tc_phy_aux_power_well_ops,
+ .id = DISP_PW_ID_NONE,
+ {
+ .hsw.regs = &icl_aux_power_well_regs,
+ .hsw.idx = TGL_PW_CTL_IDX_AUX_TC1,
+ .hsw.is_tc_tbt = false,
+ },
+ },
+ {
+ .name = "AUX TC2",
+ .domains = TGL_AUX_TC2_IO_POWER_DOMAINS,
+ .ops = &icl_tc_phy_aux_power_well_ops,
+ .id = DISP_PW_ID_NONE,
+ {
+ .hsw.regs = &icl_aux_power_well_regs,
+ .hsw.idx = TGL_PW_CTL_IDX_AUX_TC2,
+ .hsw.is_tc_tbt = false,
+ },
+ },
+ {
+ .name = "AUX TC3",
+ .domains = TGL_AUX_TC3_IO_POWER_DOMAINS,
+ .ops = &icl_tc_phy_aux_power_well_ops,
+ .id = DISP_PW_ID_NONE,
+ {
+ .hsw.regs = &icl_aux_power_well_regs,
+ .hsw.idx = TGL_PW_CTL_IDX_AUX_TC3,
+ .hsw.is_tc_tbt = false,
+ },
+ },
+ {
+ .name = "AUX TC4",
+ .domains = TGL_AUX_TC4_IO_POWER_DOMAINS,
+ .ops = &icl_tc_phy_aux_power_well_ops,
+ .id = DISP_PW_ID_NONE,
+ {
+ .hsw.regs = &icl_aux_power_well_regs,
+ .hsw.idx = TGL_PW_CTL_IDX_AUX_TC4,
+ .hsw.is_tc_tbt = false,
+ },
+ },
+ {
+ .name = "AUX TC5",
+ .domains = TGL_AUX_TC5_IO_POWER_DOMAINS,
+ .ops = &icl_tc_phy_aux_power_well_ops,
+ .id = DISP_PW_ID_NONE,
+ {
+ .hsw.regs = &icl_aux_power_well_regs,
+ .hsw.idx = TGL_PW_CTL_IDX_AUX_TC5,
+ .hsw.is_tc_tbt = false,
+ },
+ },
+ {
+ .name = "AUX TC6",
+ .domains = TGL_AUX_TC6_IO_POWER_DOMAINS,
+ .ops = &icl_tc_phy_aux_power_well_ops,
+ .id = DISP_PW_ID_NONE,
+ {
+ .hsw.regs = &icl_aux_power_well_regs,
+ .hsw.idx = TGL_PW_CTL_IDX_AUX_TC6,
+ .hsw.is_tc_tbt = false,
+ },
+ },
+ {
+ .name = "AUX TBT1",
+ .domains = ICL_AUX_TBT1_IO_POWER_DOMAINS,
+ .ops = &hsw_power_well_ops,
+ .id = DISP_PW_ID_NONE,
+ {
+ .hsw.regs = &icl_aux_power_well_regs,
+ .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT1,
+ .hsw.is_tc_tbt = true,
+ },
+ },
+ {
+ .name = "AUX TBT2",
+ .domains = ICL_AUX_TBT2_IO_POWER_DOMAINS,
+ .ops = &hsw_power_well_ops,
+ .id = DISP_PW_ID_NONE,
+ {
+ .hsw.regs = &icl_aux_power_well_regs,
+ .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT2,
+ .hsw.is_tc_tbt = true,
+ },
+ },
+ {
+ .name = "AUX TBT3",
+ .domains = ICL_AUX_TBT3_IO_POWER_DOMAINS,
+ .ops = &hsw_power_well_ops,
+ .id = DISP_PW_ID_NONE,
+ {
+ .hsw.regs = &icl_aux_power_well_regs,
+ .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT3,
+ .hsw.is_tc_tbt = true,
+ },
+ },
+ {
+ .name = "AUX TBT4",
+ .domains = ICL_AUX_TBT4_IO_POWER_DOMAINS,
+ .ops = &hsw_power_well_ops,
+ .id = DISP_PW_ID_NONE,
+ {
+ .hsw.regs = &icl_aux_power_well_regs,
+ .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT4,
+ .hsw.is_tc_tbt = true,
+ },
+ },
+ {
+ .name = "AUX TBT5",
+ .domains = TGL_AUX_TBT5_IO_POWER_DOMAINS,
+ .ops = &hsw_power_well_ops,
+ .id = DISP_PW_ID_NONE,
+ {
+ .hsw.regs = &icl_aux_power_well_regs,
+ .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT5,
+ .hsw.is_tc_tbt = true,
+ },
+ },
+ {
+ .name = "AUX TBT6",
+ .domains = TGL_AUX_TBT6_IO_POWER_DOMAINS,
+ .ops = &hsw_power_well_ops,
+ .id = DISP_PW_ID_NONE,
+ {
+ .hsw.regs = &icl_aux_power_well_regs,
+ .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT6,
+ .hsw.is_tc_tbt = true,
+ },
+ },
+ {
+ .name = "power well 4",
+ .domains = TGL_PW_4_POWER_DOMAINS,
+ .ops = &hsw_power_well_ops,
+ .id = DISP_PW_ID_NONE,
+ {
+ .hsw.regs = &hsw_power_well_regs,
+ .hsw.idx = ICL_PW_CTL_IDX_PW_4,
+ .hsw.has_fuses = true,
+ .hsw.irq_pipe_mask = BIT(PIPE_C),
+ }
+ },
+ {
+ .name = "power well 5",
+ .domains = TGL_PW_5_POWER_DOMAINS,
+ .ops = &hsw_power_well_ops,
+ .id = DISP_PW_ID_NONE,
+ {
+ .hsw.regs = &hsw_power_well_regs,
+ .hsw.idx = TGL_PW_CTL_IDX_PW_5,
+ .hsw.has_fuses = true,
+ .hsw.irq_pipe_mask = BIT(PIPE_D),
+ },
+ },
+};
+
static int
sanitize_disable_power_well_option(const struct drm_i915_private *dev_priv,
int disable_power_well)
@@ -3489,7 +4056,9 @@ int intel_power_domains_init(struct drm_i915_private *dev_priv)
* The enabling order will be from lower to higher indexed wells,
* the disabling order is reversed.
*/
- if (IS_GEN(dev_priv, 11)) {
+ if (IS_GEN(dev_priv, 12)) {
+ err = set_power_wells(power_domains, tgl_power_wells);
+ } else if (IS_GEN(dev_priv, 11)) {
err = set_power_wells(power_domains, icl_power_wells);
} else if (IS_CANNONLAKE(dev_priv)) {
err = set_power_wells(power_domains, cnl_power_wells);
@@ -4337,7 +4906,7 @@ static void intel_power_domains_verify_state(struct drm_i915_private *dev_priv);
*
* It will return with power domains disabled (to be enabled later by
* intel_power_domains_enable()) and must be paired with
- * intel_power_domains_fini_hw().
+ * intel_power_domains_driver_remove().
*/
void intel_power_domains_init_hw(struct drm_i915_private *i915, bool resume)
{
@@ -4389,7 +4958,7 @@ void intel_power_domains_init_hw(struct drm_i915_private *i915, bool resume)
}
/**
- * intel_power_domains_fini_hw - deinitialize hw power domain state
+ * intel_power_domains_driver_remove - deinitialize hw power domain state
* @i915: i915 device instance
*
* De-initializes the display power domain HW state. It also ensures that the
@@ -4399,7 +4968,7 @@ void intel_power_domains_init_hw(struct drm_i915_private *i915, bool resume)
* intel_power_domains_disable()) and must be paired with
* intel_power_domains_init_hw().
*/
-void intel_power_domains_fini_hw(struct drm_i915_private *i915)
+void intel_power_domains_driver_remove(struct drm_i915_private *i915)
{
intel_wakeref_t wakeref __maybe_unused =
fetch_and_zero(&i915->power_domains.wakeref);
@@ -4553,7 +5122,8 @@ static void intel_power_domains_dump_info(struct drm_i915_private *i915)
for_each_power_domain(domain, power_well->desc->domains)
DRM_DEBUG_DRIVER(" %-23s %d\n",
- intel_display_power_domain_str(domain),
+ intel_display_power_domain_str(i915,
+ domain),
power_domains->domain_use_count[domain]);
}
}
diff --git a/drivers/gpu/drm/i915/display/intel_display_power.h b/drivers/gpu/drm/i915/display/intel_display_power.h
index ff57b0a7fe59..e4d2c1ba24b0 100644
--- a/drivers/gpu/drm/i915/display/intel_display_power.h
+++ b/drivers/gpu/drm/i915/display/intel_display_power.h
@@ -18,28 +18,47 @@ enum intel_display_power_domain {
POWER_DOMAIN_PIPE_A,
POWER_DOMAIN_PIPE_B,
POWER_DOMAIN_PIPE_C,
+ POWER_DOMAIN_PIPE_D,
POWER_DOMAIN_PIPE_A_PANEL_FITTER,
POWER_DOMAIN_PIPE_B_PANEL_FITTER,
POWER_DOMAIN_PIPE_C_PANEL_FITTER,
+ POWER_DOMAIN_PIPE_D_PANEL_FITTER,
POWER_DOMAIN_TRANSCODER_A,
POWER_DOMAIN_TRANSCODER_B,
POWER_DOMAIN_TRANSCODER_C,
+ POWER_DOMAIN_TRANSCODER_D,
POWER_DOMAIN_TRANSCODER_EDP,
- POWER_DOMAIN_TRANSCODER_EDP_VDSC,
+ /* VDSC/joining for TRANSCODER_EDP (ICL) or TRANSCODER_A (TGL) */
+ POWER_DOMAIN_TRANSCODER_VDSC_PW2,
POWER_DOMAIN_TRANSCODER_DSI_A,
POWER_DOMAIN_TRANSCODER_DSI_C,
POWER_DOMAIN_PORT_DDI_A_LANES,
POWER_DOMAIN_PORT_DDI_B_LANES,
POWER_DOMAIN_PORT_DDI_C_LANES,
POWER_DOMAIN_PORT_DDI_D_LANES,
+ POWER_DOMAIN_PORT_DDI_TC1_LANES = POWER_DOMAIN_PORT_DDI_D_LANES,
POWER_DOMAIN_PORT_DDI_E_LANES,
+ POWER_DOMAIN_PORT_DDI_TC2_LANES = POWER_DOMAIN_PORT_DDI_E_LANES,
POWER_DOMAIN_PORT_DDI_F_LANES,
+ POWER_DOMAIN_PORT_DDI_TC3_LANES = POWER_DOMAIN_PORT_DDI_F_LANES,
+ POWER_DOMAIN_PORT_DDI_TC4_LANES,
+ POWER_DOMAIN_PORT_DDI_TC5_LANES,
+ POWER_DOMAIN_PORT_DDI_TC6_LANES,
POWER_DOMAIN_PORT_DDI_A_IO,
POWER_DOMAIN_PORT_DDI_B_IO,
POWER_DOMAIN_PORT_DDI_C_IO,
POWER_DOMAIN_PORT_DDI_D_IO,
+ POWER_DOMAIN_PORT_DDI_TC1_IO = POWER_DOMAIN_PORT_DDI_D_IO,
POWER_DOMAIN_PORT_DDI_E_IO,
+ POWER_DOMAIN_PORT_DDI_TC2_IO = POWER_DOMAIN_PORT_DDI_E_IO,
POWER_DOMAIN_PORT_DDI_F_IO,
+ POWER_DOMAIN_PORT_DDI_TC3_IO = POWER_DOMAIN_PORT_DDI_F_IO,
+ POWER_DOMAIN_PORT_DDI_G_IO,
+ POWER_DOMAIN_PORT_DDI_TC4_IO = POWER_DOMAIN_PORT_DDI_G_IO,
+ POWER_DOMAIN_PORT_DDI_H_IO,
+ POWER_DOMAIN_PORT_DDI_TC5_IO = POWER_DOMAIN_PORT_DDI_H_IO,
+ POWER_DOMAIN_PORT_DDI_I_IO,
+ POWER_DOMAIN_PORT_DDI_TC6_IO = POWER_DOMAIN_PORT_DDI_I_IO,
POWER_DOMAIN_PORT_DSI,
POWER_DOMAIN_PORT_CRT,
POWER_DOMAIN_PORT_OTHER,
@@ -49,16 +68,25 @@ enum intel_display_power_domain {
POWER_DOMAIN_AUX_B,
POWER_DOMAIN_AUX_C,
POWER_DOMAIN_AUX_D,
+ POWER_DOMAIN_AUX_TC1 = POWER_DOMAIN_AUX_D,
POWER_DOMAIN_AUX_E,
+ POWER_DOMAIN_AUX_TC2 = POWER_DOMAIN_AUX_E,
POWER_DOMAIN_AUX_F,
+ POWER_DOMAIN_AUX_TC3 = POWER_DOMAIN_AUX_F,
+ POWER_DOMAIN_AUX_TC4,
+ POWER_DOMAIN_AUX_TC5,
+ POWER_DOMAIN_AUX_TC6,
POWER_DOMAIN_AUX_IO_A,
POWER_DOMAIN_AUX_TBT1,
POWER_DOMAIN_AUX_TBT2,
POWER_DOMAIN_AUX_TBT3,
POWER_DOMAIN_AUX_TBT4,
+ POWER_DOMAIN_AUX_TBT5,
+ POWER_DOMAIN_AUX_TBT6,
POWER_DOMAIN_GMBUS,
POWER_DOMAIN_MODESET,
POWER_DOMAIN_GT_IRQ,
+ POWER_DOMAIN_DPLL_DC_OFF,
POWER_DOMAIN_INIT,
POWER_DOMAIN_NUM,
@@ -213,7 +241,7 @@ void gen9_enable_dc5(struct drm_i915_private *dev_priv);
int intel_power_domains_init(struct drm_i915_private *dev_priv);
void intel_power_domains_cleanup(struct drm_i915_private *dev_priv);
void intel_power_domains_init_hw(struct drm_i915_private *dev_priv, bool resume);
-void intel_power_domains_fini_hw(struct drm_i915_private *dev_priv);
+void intel_power_domains_driver_remove(struct drm_i915_private *dev_priv);
void icl_display_core_init(struct drm_i915_private *dev_priv, bool resume);
void icl_display_core_uninit(struct drm_i915_private *dev_priv);
void intel_power_domains_enable(struct drm_i915_private *dev_priv);
@@ -227,7 +255,8 @@ void bxt_display_core_init(struct drm_i915_private *dev_priv, bool resume);
void bxt_display_core_uninit(struct drm_i915_private *dev_priv);
const char *
-intel_display_power_domain_str(enum intel_display_power_domain domain);
+intel_display_power_domain_str(struct drm_i915_private *i915,
+ enum intel_display_power_domain domain);
bool intel_display_power_is_enabled(struct drm_i915_private *dev_priv,
enum intel_display_power_domain domain);
diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c
index d0fc34826771..0eb5d66f87a7 100644
--- a/drivers/gpu/drm/i915/display/intel_dp.c
+++ b/drivers/gpu/drm/i915/display/intel_dp.c
@@ -62,6 +62,7 @@
#include "intel_panel.h"
#include "intel_psr.h"
#include "intel_sideband.h"
+#include "intel_tc.h"
#include "intel_vdsc.h"
#define DP_DPRX_ESI_LEN 14
@@ -211,47 +212,13 @@ static int intel_dp_max_common_rate(struct intel_dp *intel_dp)
return intel_dp->common_rates[intel_dp->num_common_rates - 1];
}
-static int intel_dp_get_fia_supported_lane_count(struct intel_dp *intel_dp)
-{
- struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
- struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
- enum tc_port tc_port = intel_port_to_tc(dev_priv, dig_port->base.port);
- intel_wakeref_t wakeref;
- u32 lane_info;
-
- if (tc_port == PORT_TC_NONE || dig_port->tc_type != TC_PORT_TYPEC)
- return 4;
-
- lane_info = 0;
- with_intel_display_power(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref)
- lane_info = (I915_READ(PORT_TX_DFLEXDPSP) &
- DP_LANE_ASSIGNMENT_MASK(tc_port)) >>
- DP_LANE_ASSIGNMENT_SHIFT(tc_port);
-
- switch (lane_info) {
- default:
- MISSING_CASE(lane_info);
- /* fall through */
- case 1:
- case 2:
- case 4:
- case 8:
- return 1;
- case 3:
- case 12:
- return 2;
- case 15:
- return 4;
- }
-}
-
/* Theoretical max between source and sink */
static int intel_dp_max_common_lane_count(struct intel_dp *intel_dp)
{
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
int source_max = intel_dig_port->max_lanes;
int sink_max = drm_dp_max_lane_count(intel_dp->dpcd);
- int fia_max = intel_dp_get_fia_supported_lane_count(intel_dp);
+ int fia_max = intel_tc_port_fia_max_lane_count(intel_dig_port);
return min3(source_max, sink_max, fia_max);
}
@@ -330,9 +297,9 @@ static int icl_max_source_rate(struct intel_dp *intel_dp)
{
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
- enum port port = dig_port->base.port;
+ enum phy phy = intel_port_to_phy(dev_priv, dig_port->base.port);
- if (intel_port_is_combophy(dev_priv, port) &&
+ if (intel_phy_is_combo(dev_priv, phy) &&
!IS_ELKHARTLAKE(dev_priv) &&
!intel_dp_is_edp(intel_dp))
return 540000;
@@ -1209,7 +1176,7 @@ static u32 skl_get_aux_send_ctl(struct intel_dp *intel_dp,
DP_AUX_CH_CTL_FW_SYNC_PULSE_SKL(32) |
DP_AUX_CH_CTL_SYNC_PULSE_SKL(32);
- if (intel_dig_port->tc_type == TC_PORT_TBT)
+ if (intel_dig_port->tc_mode == TC_PORT_TBT_ALT)
ret |= DP_AUX_CH_CTL_TBT_IO;
return ret;
@@ -1225,6 +1192,8 @@ intel_dp_aux_xfer(struct intel_dp *intel_dp,
struct drm_i915_private *i915 =
to_i915(intel_dig_port->base.base.dev);
struct intel_uncore *uncore = &i915->uncore;
+ enum phy phy = intel_port_to_phy(i915, intel_dig_port->base.port);
+ bool is_tc_port = intel_phy_is_tc(i915, phy);
i915_reg_t ch_ctl, ch_data[5];
u32 aux_clock_divider;
enum intel_display_power_domain aux_domain =
@@ -1240,6 +1209,9 @@ intel_dp_aux_xfer(struct intel_dp *intel_dp,
for (i = 0; i < ARRAY_SIZE(ch_data); i++)
ch_data[i] = intel_dp->aux_ch_data_reg(intel_dp, i);
+ if (is_tc_port)
+ intel_tc_port_lock(intel_dig_port);
+
aux_wakeref = intel_display_power_get(i915, aux_domain);
pps_wakeref = pps_lock(intel_dp);
@@ -1392,6 +1364,9 @@ out:
pps_unlock(intel_dp, pps_wakeref);
intel_display_power_put_async(i915, aux_domain, aux_wakeref);
+ if (is_tc_port)
+ intel_tc_port_unlock(intel_dig_port);
+
return ret;
}
@@ -1879,8 +1854,10 @@ intel_dp_compute_link_config_wide(struct intel_dp *intel_dp,
int mode_rate, link_clock, link_avail;
for (bpp = limits->max_bpp; bpp >= limits->min_bpp; bpp -= 2 * 3) {
+ int output_bpp = intel_dp_output_bpp(pipe_config, bpp);
+
mode_rate = intel_dp_link_required(adjusted_mode->crtc_clock,
- bpp);
+ output_bpp);
for (clock = limits->min_clock; clock <= limits->max_clock; clock++) {
for (lane_count = limits->min_lane_count;
@@ -4244,8 +4221,14 @@ intel_dp_get_dpcd(struct intel_dp *intel_dp)
if (!intel_dp_read_dpcd(intel_dp))
return false;
- /* Don't clobber cached eDP rates. */
+ /*
+ * Don't clobber cached eDP rates. Also skip re-reading
+ * the OUI/ID since we know it won't change.
+ */
if (!intel_dp_is_edp(intel_dp)) {
+ drm_dp_read_desc(&intel_dp->aux, &intel_dp->desc,
+ drm_dp_is_branch(intel_dp->dpcd));
+
intel_dp_set_sink_rates(intel_dp);
intel_dp_set_common_rates(intel_dp);
}
@@ -4254,7 +4237,8 @@ intel_dp_get_dpcd(struct intel_dp *intel_dp)
* Some eDP panels do not set a valid value for sink count, that is why
* it don't care about read it here and in intel_edp_init_dpcd().
*/
- if (!intel_dp_is_edp(intel_dp)) {
+ if (!intel_dp_is_edp(intel_dp) &&
+ !drm_dp_has_quirk(&intel_dp->desc, DP_DPCD_QUIRK_NO_SINK_COUNT)) {
u8 count;
ssize_t r;
@@ -4879,14 +4863,16 @@ int intel_dp_retrain_link(struct intel_encoder *encoder,
* retrain the link to get a picture. That's in case no
* userspace component reacted to intermittent HPD dip.
*/
-static bool intel_dp_hotplug(struct intel_encoder *encoder,
- struct intel_connector *connector)
+static enum intel_hotplug_state
+intel_dp_hotplug(struct intel_encoder *encoder,
+ struct intel_connector *connector,
+ bool irq_received)
{
struct drm_modeset_acquire_ctx ctx;
- bool changed;
+ enum intel_hotplug_state state;
int ret;
- changed = intel_encoder_hotplug(encoder, connector);
+ state = intel_encoder_hotplug(encoder, connector, irq_received);
drm_modeset_acquire_init(&ctx, 0);
@@ -4905,7 +4891,14 @@ static bool intel_dp_hotplug(struct intel_encoder *encoder,
drm_modeset_acquire_fini(&ctx);
WARN(ret, "Acquiring modeset locks failed with %i\n", ret);
- return changed;
+ /*
+ * Keeping it consistent with intel_ddi_hotplug() and
+ * intel_hdmi_hotplug().
+ */
+ if (state == INTEL_HOTPLUG_UNCHANGED && irq_received)
+ state = INTEL_HOTPLUG_RETRY;
+
+ return state;
}
static void intel_dp_check_service_irq(struct intel_dp *intel_dp)
@@ -5233,204 +5226,16 @@ static bool icl_combo_port_connected(struct drm_i915_private *dev_priv,
return I915_READ(SDEISR) & SDE_DDI_HOTPLUG_ICP(port);
}
-static const char *tc_type_name(enum tc_port_type type)
-{
- static const char * const names[] = {
- [TC_PORT_UNKNOWN] = "unknown",
- [TC_PORT_LEGACY] = "legacy",
- [TC_PORT_TYPEC] = "typec",
- [TC_PORT_TBT] = "tbt",
- };
-
- if (WARN_ON(type >= ARRAY_SIZE(names)))
- type = TC_PORT_UNKNOWN;
-
- return names[type];
-}
-
-static void icl_update_tc_port_type(struct drm_i915_private *dev_priv,
- struct intel_digital_port *intel_dig_port,
- bool is_legacy, bool is_typec, bool is_tbt)
-{
- enum port port = intel_dig_port->base.port;
- enum tc_port_type old_type = intel_dig_port->tc_type;
-
- WARN_ON(is_legacy + is_typec + is_tbt != 1);
-
- if (is_legacy)
- intel_dig_port->tc_type = TC_PORT_LEGACY;
- else if (is_typec)
- intel_dig_port->tc_type = TC_PORT_TYPEC;
- else if (is_tbt)
- intel_dig_port->tc_type = TC_PORT_TBT;
- else
- return;
-
- /* Types are not supposed to be changed at runtime. */
- WARN_ON(old_type != TC_PORT_UNKNOWN &&
- old_type != intel_dig_port->tc_type);
-
- if (old_type != intel_dig_port->tc_type)
- DRM_DEBUG_KMS("Port %c has TC type %s\n", port_name(port),
- tc_type_name(intel_dig_port->tc_type));
-}
-
-/*
- * This function implements the first part of the Connect Flow described by our
- * specification, Gen11 TypeC Programming chapter. The rest of the flow (reading
- * lanes, EDID, etc) is done as needed in the typical places.
- *
- * Unlike the other ports, type-C ports are not available to use as soon as we
- * get a hotplug. The type-C PHYs can be shared between multiple controllers:
- * display, USB, etc. As a result, handshaking through FIA is required around
- * connect and disconnect to cleanly transfer ownership with the controller and
- * set the type-C power state.
- *
- * We could opt to only do the connect flow when we actually try to use the AUX
- * channels or do a modeset, then immediately run the disconnect flow after
- * usage, but there are some implications on this for a dynamic environment:
- * things may go away or change behind our backs. So for now our driver is
- * always trying to acquire ownership of the controller as soon as it gets an
- * interrupt (or polls state and sees a port is connected) and only gives it
- * back when it sees a disconnect. Implementation of a more fine-grained model
- * will require a lot of coordination with user space and thorough testing for
- * the extra possible cases.
- */
-static bool icl_tc_phy_connect(struct drm_i915_private *dev_priv,
- struct intel_digital_port *dig_port)
-{
- enum tc_port tc_port = intel_port_to_tc(dev_priv, dig_port->base.port);
- u32 val;
-
- if (dig_port->tc_type != TC_PORT_LEGACY &&
- dig_port->tc_type != TC_PORT_TYPEC)
- return true;
-
- val = I915_READ(PORT_TX_DFLEXDPPMS);
- if (!(val & DP_PHY_MODE_STATUS_COMPLETED(tc_port))) {
- DRM_DEBUG_KMS("DP PHY for TC port %d not ready\n", tc_port);
- WARN_ON(dig_port->tc_legacy_port);
- return false;
- }
-
- /*
- * This function may be called many times in a row without an HPD event
- * in between, so try to avoid the write when we can.
- */
- val = I915_READ(PORT_TX_DFLEXDPCSSS);
- if (!(val & DP_PHY_MODE_STATUS_NOT_SAFE(tc_port))) {
- val |= DP_PHY_MODE_STATUS_NOT_SAFE(tc_port);
- I915_WRITE(PORT_TX_DFLEXDPCSSS, val);
- }
-
- /*
- * Now we have to re-check the live state, in case the port recently
- * became disconnected. Not necessary for legacy mode.
- */
- if (dig_port->tc_type == TC_PORT_TYPEC &&
- !(I915_READ(PORT_TX_DFLEXDPSP) & TC_LIVE_STATE_TC(tc_port))) {
- DRM_DEBUG_KMS("TC PHY %d sudden disconnect.\n", tc_port);
- icl_tc_phy_disconnect(dev_priv, dig_port);
- return false;
- }
-
- return true;
-}
-
-/*
- * See the comment at the connect function. This implements the Disconnect
- * Flow.
- */
-void icl_tc_phy_disconnect(struct drm_i915_private *dev_priv,
- struct intel_digital_port *dig_port)
-{
- enum tc_port tc_port = intel_port_to_tc(dev_priv, dig_port->base.port);
-
- if (dig_port->tc_type == TC_PORT_UNKNOWN)
- return;
-
- /*
- * TBT disconnection flow is read the live status, what was done in
- * caller.
- */
- if (dig_port->tc_type == TC_PORT_TYPEC ||
- dig_port->tc_type == TC_PORT_LEGACY) {
- u32 val;
-
- val = I915_READ(PORT_TX_DFLEXDPCSSS);
- val &= ~DP_PHY_MODE_STATUS_NOT_SAFE(tc_port);
- I915_WRITE(PORT_TX_DFLEXDPCSSS, val);
- }
-
- DRM_DEBUG_KMS("Port %c TC type %s disconnected\n",
- port_name(dig_port->base.port),
- tc_type_name(dig_port->tc_type));
-
- dig_port->tc_type = TC_PORT_UNKNOWN;
-}
-
-/*
- * The type-C ports are different because even when they are connected, they may
- * not be available/usable by the graphics driver: see the comment on
- * icl_tc_phy_connect(). So in our driver instead of adding the additional
- * concept of "usable" and make everything check for "connected and usable" we
- * define a port as "connected" when it is not only connected, but also when it
- * is usable by the rest of the driver. That maintains the old assumption that
- * connected ports are usable, and avoids exposing to the users objects they
- * can't really use.
- */
-static bool icl_tc_port_connected(struct drm_i915_private *dev_priv,
- struct intel_digital_port *intel_dig_port)
-{
- enum port port = intel_dig_port->base.port;
- enum tc_port tc_port = intel_port_to_tc(dev_priv, port);
- bool is_legacy, is_typec, is_tbt;
- u32 dpsp;
-
- /*
- * Complain if we got a legacy port HPD, but VBT didn't mark the port as
- * legacy. Treat the port as legacy from now on.
- */
- if (!intel_dig_port->tc_legacy_port &&
- I915_READ(SDEISR) & SDE_TC_HOTPLUG_ICP(tc_port)) {
- DRM_ERROR("VBT incorrectly claims port %c is not TypeC legacy\n",
- port_name(port));
- intel_dig_port->tc_legacy_port = true;
- }
- is_legacy = intel_dig_port->tc_legacy_port;
-
- /*
- * The spec says we shouldn't be using the ISR bits for detecting
- * between TC and TBT. We should use DFLEXDPSP.
- */
- dpsp = I915_READ(PORT_TX_DFLEXDPSP);
- is_typec = dpsp & TC_LIVE_STATE_TC(tc_port);
- is_tbt = dpsp & TC_LIVE_STATE_TBT(tc_port);
-
- if (!is_legacy && !is_typec && !is_tbt) {
- icl_tc_phy_disconnect(dev_priv, intel_dig_port);
-
- return false;
- }
-
- icl_update_tc_port_type(dev_priv, intel_dig_port, is_legacy, is_typec,
- is_tbt);
-
- if (!icl_tc_phy_connect(dev_priv, intel_dig_port))
- return false;
-
- return true;
-}
-
static bool icl_digital_port_connected(struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base);
+ enum phy phy = intel_port_to_phy(dev_priv, encoder->port);
- if (intel_port_is_combophy(dev_priv, encoder->port))
+ if (intel_phy_is_combo(dev_priv, phy))
return icl_combo_port_connected(dev_priv, dig_port);
- else if (intel_port_is_tc(dev_priv, encoder->port))
- return icl_tc_port_connected(dev_priv, dig_port);
+ else if (intel_phy_is_tc(dev_priv, phy))
+ return intel_tc_port_connected(dig_port);
else
MISSING_CASE(encoder->hpd_pin);
@@ -5588,9 +5393,6 @@ intel_dp_detect(struct drm_connector *connector,
if (INTEL_GEN(dev_priv) >= 11)
intel_dp_get_dsc_sink_cap(intel_dp);
- drm_dp_read_desc(&intel_dp->aux, &intel_dp->desc,
- drm_dp_is_branch(intel_dp->dpcd));
-
intel_dp_configure_mst(intel_dp);
if (intel_dp->is_mst) {
@@ -6835,8 +6637,6 @@ static void intel_dp_set_drrs_state(struct drm_i915_private *dev_priv,
const struct intel_crtc_state *crtc_state,
int refresh_rate)
{
- struct intel_encoder *encoder;
- struct intel_digital_port *dig_port = NULL;
struct intel_dp *intel_dp = dev_priv->drrs.dp;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
enum drrs_refresh_rate_type index = DRRS_HIGH_RR;
@@ -6851,9 +6651,6 @@ static void intel_dp_set_drrs_state(struct drm_i915_private *dev_priv,
return;
}
- dig_port = dp_to_dig_port(intel_dp);
- encoder = &dig_port->base;
-
if (!intel_crtc) {
DRM_DEBUG_KMS("DRRS: intel_crtc not initialized\n");
return;
@@ -7333,6 +7130,7 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
struct drm_device *dev = intel_encoder->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
enum port port = intel_encoder->port;
+ enum phy phy = intel_port_to_phy(dev_priv, port);
int type;
/* Initialize the work for modeset in case of link train failure */
@@ -7359,7 +7157,7 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
* Currently we don't support eDP on TypeC ports, although in
* theory it could work on TypeC legacy ports.
*/
- WARN_ON(intel_port_is_tc(dev_priv, port));
+ WARN_ON(intel_phy_is_tc(dev_priv, phy));
type = DRM_MODE_CONNECTOR_eDP;
} else {
type = DRM_MODE_CONNECTOR_DisplayPort;
diff --git a/drivers/gpu/drm/i915/display/intel_dp.h b/drivers/gpu/drm/i915/display/intel_dp.h
index da70b1a41c83..657bbb1f5ed0 100644
--- a/drivers/gpu/drm/i915/display/intel_dp.h
+++ b/drivers/gpu/drm/i915/display/intel_dp.h
@@ -112,8 +112,6 @@ bool intel_dp_get_colorimetry_status(struct intel_dp *intel_dp);
int intel_dp_link_required(int pixel_clock, int bpp);
int intel_dp_max_data_rate(int max_link_clock, int max_lanes);
bool intel_digital_port_connected(struct intel_encoder *encoder);
-void icl_tc_phy_disconnect(struct drm_i915_private *dev_priv,
- struct intel_digital_port *dig_port);
static inline unsigned int intel_dp_unused_lane_mask(int lane_count)
{
diff --git a/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c b/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c
index 7ded95a334db..6b0b73479fb8 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c
@@ -264,8 +264,11 @@ intel_dp_aux_display_control_capable(struct intel_connector *connector)
int intel_dp_aux_init_backlight_funcs(struct intel_connector *intel_connector)
{
struct intel_panel *panel = &intel_connector->panel;
+ struct drm_i915_private *dev_priv = to_i915(intel_connector->base.dev);
- if (!i915_modparams.enable_dpcd_backlight)
+ if (i915_modparams.enable_dpcd_backlight == 0 ||
+ (i915_modparams.enable_dpcd_backlight == -1 &&
+ dev_priv->vbt.backlight.type != INTEL_BACKLIGHT_VESA_EDP_AUX_INTERFACE))
return -ENODEV;
if (!intel_dp_aux_display_control_capable(intel_connector))
diff --git a/drivers/gpu/drm/i915/display/intel_dp_mst.h b/drivers/gpu/drm/i915/display/intel_dp_mst.h
index 1470c6e0514b..6754c211205a 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_mst.h
+++ b/drivers/gpu/drm/i915/display/intel_dp_mst.h
@@ -6,9 +6,15 @@
#ifndef __INTEL_DP_MST_H__
#define __INTEL_DP_MST_H__
-struct intel_digital_port;
+#include "intel_drv.h"
int intel_dp_mst_encoder_init(struct intel_digital_port *intel_dig_port, int conn_id);
void intel_dp_mst_encoder_cleanup(struct intel_digital_port *intel_dig_port);
+static inline int
+intel_dp_mst_encoder_active_links(struct intel_digital_port *intel_dig_port)
+{
+ return intel_dig_port->dp.active_mst_links;
+}
+
#endif /* __INTEL_DP_MST_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_dpll_mgr.c b/drivers/gpu/drm/i915/display/intel_dpll_mgr.c
index 2d4e7b9a7b9d..f9bdf8514a53 100644
--- a/drivers/gpu/drm/i915/display/intel_dpll_mgr.c
+++ b/drivers/gpu/drm/i915/display/intel_dpll_mgr.c
@@ -36,9 +36,10 @@
* This file provides an abstraction over display PLLs. The function
* intel_shared_dpll_init() initializes the PLLs for the given platform. The
* users of a PLL are tracked and that tracking is integrated with the atomic
- * modest interface. During an atomic operation, a PLL can be requested for a
- * given CRTC and encoder configuration by calling intel_get_shared_dpll() and
- * a previously used PLL can be released with intel_release_shared_dpll().
+ * modset interface. During an atomic operation, required PLLs can be reserved
+ * for a given CRTC and encoder configuration by calling
+ * intel_reserve_shared_dplls() and previously reserved PLLs can be released
+ * with intel_release_shared_dplls().
* Changes to the users are first staged in the atomic state, and then made
* effective by calling intel_shared_dpll_swap_state() during the atomic
* commit phase.
@@ -243,17 +244,18 @@ out:
}
static struct intel_shared_dpll *
-intel_find_shared_dpll(struct intel_crtc_state *crtc_state,
+intel_find_shared_dpll(struct intel_atomic_state *state,
+ const struct intel_crtc *crtc,
+ const struct intel_dpll_hw_state *pll_state,
enum intel_dpll_id range_min,
enum intel_dpll_id range_max)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
struct intel_shared_dpll *pll, *unused_pll = NULL;
struct intel_shared_dpll_state *shared_dpll;
enum intel_dpll_id i;
- shared_dpll = intel_atomic_get_shared_dpll_state(crtc_state->base.state);
+ shared_dpll = intel_atomic_get_shared_dpll_state(&state->base);
for (i = range_min; i <= range_max; i++) {
pll = &dev_priv->shared_dplls[i];
@@ -265,9 +267,9 @@ intel_find_shared_dpll(struct intel_crtc_state *crtc_state,
continue;
}
- if (memcmp(&crtc_state->dpll_hw_state,
+ if (memcmp(pll_state,
&shared_dpll[i].hw_state,
- sizeof(crtc_state->dpll_hw_state)) == 0) {
+ sizeof(*pll_state)) == 0) {
DRM_DEBUG_KMS("[CRTC:%d:%s] sharing existing %s (crtc mask 0x%08x, active %x)\n",
crtc->base.base.id, crtc->base.name,
pll->info->name,
@@ -289,26 +291,51 @@ intel_find_shared_dpll(struct intel_crtc_state *crtc_state,
}
static void
-intel_reference_shared_dpll(struct intel_shared_dpll *pll,
- struct intel_crtc_state *crtc_state)
+intel_reference_shared_dpll(struct intel_atomic_state *state,
+ const struct intel_crtc *crtc,
+ const struct intel_shared_dpll *pll,
+ const struct intel_dpll_hw_state *pll_state)
{
struct intel_shared_dpll_state *shared_dpll;
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
const enum intel_dpll_id id = pll->info->id;
- shared_dpll = intel_atomic_get_shared_dpll_state(crtc_state->base.state);
+ shared_dpll = intel_atomic_get_shared_dpll_state(&state->base);
if (shared_dpll[id].crtc_mask == 0)
- shared_dpll[id].hw_state =
- crtc_state->dpll_hw_state;
+ shared_dpll[id].hw_state = *pll_state;
- crtc_state->shared_dpll = pll;
DRM_DEBUG_DRIVER("using %s for pipe %c\n", pll->info->name,
pipe_name(crtc->pipe));
shared_dpll[id].crtc_mask |= 1 << crtc->pipe;
}
+static void intel_unreference_shared_dpll(struct intel_atomic_state *state,
+ const struct intel_crtc *crtc,
+ const struct intel_shared_dpll *pll)
+{
+ struct intel_shared_dpll_state *shared_dpll;
+
+ shared_dpll = intel_atomic_get_shared_dpll_state(&state->base);
+ shared_dpll[pll->info->id].crtc_mask &= ~(1 << crtc->pipe);
+}
+
+static void intel_put_dpll(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ const struct intel_crtc_state *old_crtc_state =
+ intel_atomic_get_old_crtc_state(state, crtc);
+ struct intel_crtc_state *new_crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+
+ new_crtc_state->shared_dpll = NULL;
+
+ if (!old_crtc_state->shared_dpll)
+ return;
+
+ intel_unreference_shared_dpll(state, crtc, old_crtc_state->shared_dpll);
+}
+
/**
* intel_shared_dpll_swap_state - make atomic DPLL configuration effective
* @state: atomic state
@@ -320,25 +347,20 @@ intel_reference_shared_dpll(struct intel_shared_dpll *pll,
* i.e. it also puts the current state into @state, even though there is no
* need for that at this moment.
*/
-void intel_shared_dpll_swap_state(struct drm_atomic_state *state)
+void intel_shared_dpll_swap_state(struct intel_atomic_state *state)
{
- struct drm_i915_private *dev_priv = to_i915(state->dev);
- struct intel_shared_dpll_state *shared_dpll;
- struct intel_shared_dpll *pll;
+ struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ struct intel_shared_dpll_state *shared_dpll = state->shared_dpll;
enum intel_dpll_id i;
- if (!to_intel_atomic_state(state)->dpll_set)
+ if (!state->dpll_set)
return;
- shared_dpll = to_intel_atomic_state(state)->shared_dpll;
for (i = 0; i < dev_priv->num_shared_dpll; i++) {
- struct intel_shared_dpll_state tmp;
+ struct intel_shared_dpll *pll =
+ &dev_priv->shared_dplls[i];
- pll = &dev_priv->shared_dplls[i];
-
- tmp = pll->state;
- pll->state = shared_dpll[i];
- shared_dpll[i] = tmp;
+ swap(pll->state, shared_dpll[i]);
}
}
@@ -421,11 +443,12 @@ static void ibx_pch_dpll_disable(struct drm_i915_private *dev_priv,
udelay(200);
}
-static struct intel_shared_dpll *
-ibx_get_dpll(struct intel_crtc_state *crtc_state,
- struct intel_encoder *encoder)
+static bool ibx_get_dpll(struct intel_atomic_state *state,
+ struct intel_crtc *crtc,
+ struct intel_encoder *encoder)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct intel_crtc_state *crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
struct intel_shared_dpll *pll;
enum intel_dpll_id i;
@@ -439,18 +462,22 @@ ibx_get_dpll(struct intel_crtc_state *crtc_state,
crtc->base.base.id, crtc->base.name,
pll->info->name);
} else {
- pll = intel_find_shared_dpll(crtc_state,
+ pll = intel_find_shared_dpll(state, crtc,
+ &crtc_state->dpll_hw_state,
DPLL_ID_PCH_PLL_A,
DPLL_ID_PCH_PLL_B);
}
if (!pll)
- return NULL;
+ return false;
/* reference the pll */
- intel_reference_shared_dpll(pll, crtc_state);
+ intel_reference_shared_dpll(state, crtc,
+ pll, &crtc_state->dpll_hw_state);
- return pll;
+ crtc_state->shared_dpll = pll;
+
+ return true;
}
static void ibx_dump_hw_state(struct drm_i915_private *dev_priv,
@@ -767,8 +794,12 @@ hsw_ddi_calculate_wrpll(int clock /* in Hz */,
*r2_out = best.r2;
}
-static struct intel_shared_dpll *hsw_ddi_hdmi_get_dpll(struct intel_crtc_state *crtc_state)
+static struct intel_shared_dpll *
+hsw_ddi_hdmi_get_dpll(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
{
+ struct intel_crtc_state *crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
struct intel_shared_dpll *pll;
u32 val;
unsigned int p, n2, r2;
@@ -781,7 +812,8 @@ static struct intel_shared_dpll *hsw_ddi_hdmi_get_dpll(struct intel_crtc_state *
crtc_state->dpll_hw_state.wrpll = val;
- pll = intel_find_shared_dpll(crtc_state,
+ pll = intel_find_shared_dpll(state, crtc,
+ &crtc_state->dpll_hw_state,
DPLL_ID_WRPLL1, DPLL_ID_WRPLL2);
if (!pll)
@@ -821,38 +853,44 @@ hsw_ddi_dp_get_dpll(struct intel_crtc_state *crtc_state)
return pll;
}
-static struct intel_shared_dpll *
-hsw_get_dpll(struct intel_crtc_state *crtc_state,
- struct intel_encoder *encoder)
+static bool hsw_get_dpll(struct intel_atomic_state *state,
+ struct intel_crtc *crtc,
+ struct intel_encoder *encoder)
{
+ struct intel_crtc_state *crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
struct intel_shared_dpll *pll;
memset(&crtc_state->dpll_hw_state, 0,
sizeof(crtc_state->dpll_hw_state));
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) {
- pll = hsw_ddi_hdmi_get_dpll(crtc_state);
+ pll = hsw_ddi_hdmi_get_dpll(state, crtc);
} else if (intel_crtc_has_dp_encoder(crtc_state)) {
pll = hsw_ddi_dp_get_dpll(crtc_state);
} else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG)) {
if (WARN_ON(crtc_state->port_clock / 2 != 135000))
- return NULL;
+ return false;
crtc_state->dpll_hw_state.spll =
SPLL_PLL_ENABLE | SPLL_FREQ_1350MHz | SPLL_REF_MUXED_SSC;
- pll = intel_find_shared_dpll(crtc_state,
+ pll = intel_find_shared_dpll(state, crtc,
+ &crtc_state->dpll_hw_state,
DPLL_ID_SPLL, DPLL_ID_SPLL);
} else {
- return NULL;
+ return false;
}
if (!pll)
- return NULL;
+ return false;
- intel_reference_shared_dpll(pll, crtc_state);
+ intel_reference_shared_dpll(state, crtc,
+ pll, &crtc_state->dpll_hw_state);
- return pll;
+ crtc_state->shared_dpll = pll;
+
+ return true;
}
static void hsw_dump_hw_state(struct drm_i915_private *dev_priv,
@@ -1385,10 +1423,12 @@ skl_ddi_dp_set_dpll_hw_state(struct intel_crtc_state *crtc_state)
return true;
}
-static struct intel_shared_dpll *
-skl_get_dpll(struct intel_crtc_state *crtc_state,
- struct intel_encoder *encoder)
+static bool skl_get_dpll(struct intel_atomic_state *state,
+ struct intel_crtc *crtc,
+ struct intel_encoder *encoder)
{
+ struct intel_crtc_state *crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
struct intel_shared_dpll *pll;
bool bret;
@@ -1396,32 +1436,37 @@ skl_get_dpll(struct intel_crtc_state *crtc_state,
bret = skl_ddi_hdmi_pll_dividers(crtc_state);
if (!bret) {
DRM_DEBUG_KMS("Could not get HDMI pll dividers.\n");
- return NULL;
+ return false;
}
} else if (intel_crtc_has_dp_encoder(crtc_state)) {
bret = skl_ddi_dp_set_dpll_hw_state(crtc_state);
if (!bret) {
DRM_DEBUG_KMS("Could not set DP dpll HW state.\n");
- return NULL;
+ return false;
}
} else {
- return NULL;
+ return false;
}
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP))
- pll = intel_find_shared_dpll(crtc_state,
+ pll = intel_find_shared_dpll(state, crtc,
+ &crtc_state->dpll_hw_state,
DPLL_ID_SKL_DPLL0,
DPLL_ID_SKL_DPLL0);
else
- pll = intel_find_shared_dpll(crtc_state,
+ pll = intel_find_shared_dpll(state, crtc,
+ &crtc_state->dpll_hw_state,
DPLL_ID_SKL_DPLL1,
DPLL_ID_SKL_DPLL3);
if (!pll)
- return NULL;
+ return false;
- intel_reference_shared_dpll(pll, crtc_state);
+ intel_reference_shared_dpll(state, crtc,
+ pll, &crtc_state->dpll_hw_state);
- return pll;
+ crtc_state->shared_dpll = pll;
+
+ return true;
}
static void skl_dump_hw_state(struct drm_i915_private *dev_priv,
@@ -1827,22 +1872,23 @@ bxt_ddi_hdmi_set_dpll_hw_state(struct intel_crtc_state *crtc_state)
return bxt_ddi_set_dpll_hw_state(crtc_state, &clk_div);
}
-static struct intel_shared_dpll *
-bxt_get_dpll(struct intel_crtc_state *crtc_state,
- struct intel_encoder *encoder)
+static bool bxt_get_dpll(struct intel_atomic_state *state,
+ struct intel_crtc *crtc,
+ struct intel_encoder *encoder)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct intel_crtc_state *crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
struct intel_shared_dpll *pll;
enum intel_dpll_id id;
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI) &&
!bxt_ddi_hdmi_set_dpll_hw_state(crtc_state))
- return NULL;
+ return false;
if (intel_crtc_has_dp_encoder(crtc_state) &&
!bxt_ddi_dp_set_dpll_hw_state(crtc_state))
- return NULL;
+ return false;
/* 1:1 mapping between ports and PLLs */
id = (enum intel_dpll_id) encoder->port;
@@ -1851,9 +1897,12 @@ bxt_get_dpll(struct intel_crtc_state *crtc_state,
DRM_DEBUG_KMS("[CRTC:%d:%s] using pre-allocated %s\n",
crtc->base.base.id, crtc->base.name, pll->info->name);
- intel_reference_shared_dpll(pll, crtc_state);
+ intel_reference_shared_dpll(state, crtc,
+ pll, &crtc_state->dpll_hw_state);
- return pll;
+ crtc_state->shared_dpll = pll;
+
+ return true;
}
static void bxt_dump_hw_state(struct drm_i915_private *dev_priv,
@@ -1884,9 +1933,14 @@ static const struct intel_shared_dpll_funcs bxt_ddi_pll_funcs = {
struct intel_dpll_mgr {
const struct dpll_info *dpll_info;
- struct intel_shared_dpll *(*get_dpll)(struct intel_crtc_state *crtc_state,
- struct intel_encoder *encoder);
-
+ bool (*get_dplls)(struct intel_atomic_state *state,
+ struct intel_crtc *crtc,
+ struct intel_encoder *encoder);
+ void (*put_dplls)(struct intel_atomic_state *state,
+ struct intel_crtc *crtc);
+ void (*update_active_dpll)(struct intel_atomic_state *state,
+ struct intel_crtc *crtc,
+ struct intel_encoder *encoder);
void (*dump_hw_state)(struct drm_i915_private *dev_priv,
const struct intel_dpll_hw_state *hw_state);
};
@@ -1899,7 +1953,8 @@ static const struct dpll_info pch_plls[] = {
static const struct intel_dpll_mgr pch_pll_mgr = {
.dpll_info = pch_plls,
- .get_dpll = ibx_get_dpll,
+ .get_dplls = ibx_get_dpll,
+ .put_dplls = intel_put_dpll,
.dump_hw_state = ibx_dump_hw_state,
};
@@ -1915,7 +1970,8 @@ static const struct dpll_info hsw_plls[] = {
static const struct intel_dpll_mgr hsw_pll_mgr = {
.dpll_info = hsw_plls,
- .get_dpll = hsw_get_dpll,
+ .get_dplls = hsw_get_dpll,
+ .put_dplls = intel_put_dpll,
.dump_hw_state = hsw_dump_hw_state,
};
@@ -1929,7 +1985,8 @@ static const struct dpll_info skl_plls[] = {
static const struct intel_dpll_mgr skl_pll_mgr = {
.dpll_info = skl_plls,
- .get_dpll = skl_get_dpll,
+ .get_dplls = skl_get_dpll,
+ .put_dplls = intel_put_dpll,
.dump_hw_state = skl_dump_hw_state,
};
@@ -1942,7 +1999,8 @@ static const struct dpll_info bxt_plls[] = {
static const struct intel_dpll_mgr bxt_pll_mgr = {
.dpll_info = bxt_plls,
- .get_dpll = bxt_get_dpll,
+ .get_dplls = bxt_get_dpll,
+ .put_dplls = intel_put_dpll,
.dump_hw_state = bxt_dump_hw_state,
};
@@ -2332,10 +2390,12 @@ cnl_ddi_dp_set_dpll_hw_state(struct intel_crtc_state *crtc_state)
return true;
}
-static struct intel_shared_dpll *
-cnl_get_dpll(struct intel_crtc_state *crtc_state,
- struct intel_encoder *encoder)
+static bool cnl_get_dpll(struct intel_atomic_state *state,
+ struct intel_crtc *crtc,
+ struct intel_encoder *encoder)
{
+ struct intel_crtc_state *crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
struct intel_shared_dpll *pll;
bool bret;
@@ -2343,31 +2403,35 @@ cnl_get_dpll(struct intel_crtc_state *crtc_state,
bret = cnl_ddi_hdmi_pll_dividers(crtc_state);
if (!bret) {
DRM_DEBUG_KMS("Could not get HDMI pll dividers.\n");
- return NULL;
+ return false;
}
} else if (intel_crtc_has_dp_encoder(crtc_state)) {
bret = cnl_ddi_dp_set_dpll_hw_state(crtc_state);
if (!bret) {
DRM_DEBUG_KMS("Could not set DP dpll HW state.\n");
- return NULL;
+ return false;
}
} else {
DRM_DEBUG_KMS("Skip DPLL setup for output_types 0x%x\n",
crtc_state->output_types);
- return NULL;
+ return false;
}
- pll = intel_find_shared_dpll(crtc_state,
+ pll = intel_find_shared_dpll(state, crtc,
+ &crtc_state->dpll_hw_state,
DPLL_ID_SKL_DPLL0,
DPLL_ID_SKL_DPLL2);
if (!pll) {
DRM_DEBUG_KMS("No PLL selected\n");
- return NULL;
+ return false;
}
- intel_reference_shared_dpll(pll, crtc_state);
+ intel_reference_shared_dpll(state, crtc,
+ pll, &crtc_state->dpll_hw_state);
- return pll;
+ crtc_state->shared_dpll = pll;
+
+ return true;
}
static void cnl_dump_hw_state(struct drm_i915_private *dev_priv,
@@ -2394,7 +2458,8 @@ static const struct dpll_info cnl_plls[] = {
static const struct intel_dpll_mgr cnl_pll_mgr = {
.dpll_info = cnl_plls,
- .get_dpll = cnl_get_dpll,
+ .get_dplls = cnl_get_dpll,
+ .put_dplls = intel_put_dpll,
.dump_hw_state = cnl_dump_hw_state,
};
@@ -2506,14 +2571,16 @@ static bool icl_calc_tbt_pll(struct intel_crtc_state *crtc_state,
}
static bool icl_calc_dpll_state(struct intel_crtc_state *crtc_state,
- struct intel_encoder *encoder)
+ struct intel_encoder *encoder,
+ struct intel_dpll_hw_state *pll_state)
{
struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
u32 cfgcr0, cfgcr1;
struct skl_wrpll_params pll_params = { 0 };
bool ret;
- if (intel_port_is_tc(dev_priv, encoder->port))
+ if (intel_phy_is_tc(dev_priv, intel_port_to_phy(dev_priv,
+ encoder->port)))
ret = icl_calc_tbt_pll(crtc_state, &pll_params);
else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI) ||
intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI))
@@ -2530,14 +2597,17 @@ static bool icl_calc_dpll_state(struct intel_crtc_state *crtc_state,
cfgcr1 = DPLL_CFGCR1_QDIV_RATIO(pll_params.qdiv_ratio) |
DPLL_CFGCR1_QDIV_MODE(pll_params.qdiv_mode) |
DPLL_CFGCR1_KDIV(pll_params.kdiv) |
- DPLL_CFGCR1_PDIV(pll_params.pdiv) |
- DPLL_CFGCR1_CENTRAL_FREQ_8400;
+ DPLL_CFGCR1_PDIV(pll_params.pdiv);
- memset(&crtc_state->dpll_hw_state, 0,
- sizeof(crtc_state->dpll_hw_state));
+ if (INTEL_GEN(dev_priv) >= 12)
+ cfgcr1 |= TGL_DPLL_CFGCR1_CFSELOVRD_NORMAL_XTAL;
+ else
+ cfgcr1 |= DPLL_CFGCR1_CENTRAL_FREQ_8400;
- crtc_state->dpll_hw_state.cfgcr0 = cfgcr0;
- crtc_state->dpll_hw_state.cfgcr1 = cfgcr1;
+ memset(pll_state, 0, sizeof(*pll_state));
+
+ pll_state->cfgcr0 = cfgcr0;
+ pll_state->cfgcr1 = cfgcr1;
return true;
}
@@ -2627,10 +2697,10 @@ static bool icl_mg_pll_find_divisors(int clock_khz, bool is_dp, bool use_ssc,
* The specification for this function uses real numbers, so the math had to be
* adapted to integer-only calculation, that's why it looks so different.
*/
-static bool icl_calc_mg_pll_state(struct intel_crtc_state *crtc_state)
+static bool icl_calc_mg_pll_state(struct intel_crtc_state *crtc_state,
+ struct intel_dpll_hw_state *pll_state)
{
struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
- struct intel_dpll_hw_state *pll_state = &crtc_state->dpll_hw_state;
int refclk_khz = dev_priv->cdclk.hw.ref;
int clock = crtc_state->port_clock;
u32 dco_khz, m1div, m2div_int, m2div_rem, m2div_frac;
@@ -2792,63 +2862,184 @@ static bool icl_calc_mg_pll_state(struct intel_crtc_state *crtc_state)
return true;
}
-static struct intel_shared_dpll *
-icl_get_dpll(struct intel_crtc_state *crtc_state,
- struct intel_encoder *encoder)
+/**
+ * icl_set_active_port_dpll - select the active port DPLL for a given CRTC
+ * @crtc_state: state for the CRTC to select the DPLL for
+ * @port_dpll_id: the active @port_dpll_id to select
+ *
+ * Select the given @port_dpll_id instance from the DPLLs reserved for the
+ * CRTC.
+ */
+void icl_set_active_port_dpll(struct intel_crtc_state *crtc_state,
+ enum icl_port_dpll_id port_dpll_id)
{
- struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
- struct intel_digital_port *intel_dig_port;
- struct intel_shared_dpll *pll;
+ struct icl_port_dpll *port_dpll =
+ &crtc_state->icl_port_dplls[port_dpll_id];
+
+ crtc_state->shared_dpll = port_dpll->pll;
+ crtc_state->dpll_hw_state = port_dpll->hw_state;
+}
+
+static void icl_update_active_dpll(struct intel_atomic_state *state,
+ struct intel_crtc *crtc,
+ struct intel_encoder *encoder)
+{
+ struct intel_crtc_state *crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+ struct intel_digital_port *primary_port;
+ enum icl_port_dpll_id port_dpll_id = ICL_PORT_DPLL_DEFAULT;
+
+ primary_port = encoder->type == INTEL_OUTPUT_DP_MST ?
+ enc_to_mst(&encoder->base)->primary :
+ enc_to_dig_port(&encoder->base);
+
+ if (primary_port &&
+ (primary_port->tc_mode == TC_PORT_DP_ALT ||
+ primary_port->tc_mode == TC_PORT_LEGACY))
+ port_dpll_id = ICL_PORT_DPLL_MG_PHY;
+
+ icl_set_active_port_dpll(crtc_state, port_dpll_id);
+}
+
+static bool icl_get_combo_phy_dpll(struct intel_atomic_state *state,
+ struct intel_crtc *crtc,
+ struct intel_encoder *encoder)
+{
+ struct intel_crtc_state *crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+ struct icl_port_dpll *port_dpll =
+ &crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum port port = encoder->port;
- enum intel_dpll_id min, max;
- bool ret;
+ bool has_dpll4 = false;
- if (intel_port_is_combophy(dev_priv, port)) {
- min = DPLL_ID_ICL_DPLL0;
- max = DPLL_ID_ICL_DPLL1;
- ret = icl_calc_dpll_state(crtc_state, encoder);
- } else if (intel_port_is_tc(dev_priv, port)) {
- if (encoder->type == INTEL_OUTPUT_DP_MST) {
- struct intel_dp_mst_encoder *mst_encoder;
+ if (!icl_calc_dpll_state(crtc_state, encoder, &port_dpll->hw_state)) {
+ DRM_DEBUG_KMS("Could not calculate combo PHY PLL state.\n");
- mst_encoder = enc_to_mst(&encoder->base);
- intel_dig_port = mst_encoder->primary;
- } else {
- intel_dig_port = enc_to_dig_port(&encoder->base);
- }
+ return false;
+ }
- if (intel_dig_port->tc_type == TC_PORT_TBT) {
- min = DPLL_ID_ICL_TBTPLL;
- max = min;
- ret = icl_calc_dpll_state(crtc_state, encoder);
- } else {
- enum tc_port tc_port;
+ if (IS_ELKHARTLAKE(dev_priv) && port != PORT_A)
+ has_dpll4 = true;
+
+ port_dpll->pll = intel_find_shared_dpll(state, crtc,
+ &port_dpll->hw_state,
+ DPLL_ID_ICL_DPLL0,
+ has_dpll4 ? DPLL_ID_EHL_DPLL4
+ : DPLL_ID_ICL_DPLL1);
+ if (!port_dpll->pll) {
+ DRM_DEBUG_KMS("No combo PHY PLL found for port %c\n",
+ port_name(encoder->port));
+ return false;
+ }
- tc_port = intel_port_to_tc(dev_priv, port);
- min = icl_tc_port_to_pll_id(tc_port);
- max = min;
- ret = icl_calc_mg_pll_state(crtc_state);
- }
- } else {
- MISSING_CASE(port);
- return NULL;
+ intel_reference_shared_dpll(state, crtc,
+ port_dpll->pll, &port_dpll->hw_state);
+
+ icl_update_active_dpll(state, crtc, encoder);
+
+ return true;
+}
+
+static bool icl_get_tc_phy_dplls(struct intel_atomic_state *state,
+ struct intel_crtc *crtc,
+ struct intel_encoder *encoder)
+{
+ struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ struct intel_crtc_state *crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+ struct icl_port_dpll *port_dpll;
+ enum intel_dpll_id dpll_id;
+
+ port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
+ if (!icl_calc_dpll_state(crtc_state, encoder, &port_dpll->hw_state)) {
+ DRM_DEBUG_KMS("Could not calculate TBT PLL state.\n");
+ return false;
}
- if (!ret) {
- DRM_DEBUG_KMS("Could not calculate PLL state.\n");
- return NULL;
+ port_dpll->pll = intel_find_shared_dpll(state, crtc,
+ &port_dpll->hw_state,
+ DPLL_ID_ICL_TBTPLL,
+ DPLL_ID_ICL_TBTPLL);
+ if (!port_dpll->pll) {
+ DRM_DEBUG_KMS("No TBT-ALT PLL found\n");
+ return false;
}
+ intel_reference_shared_dpll(state, crtc,
+ port_dpll->pll, &port_dpll->hw_state);
- pll = intel_find_shared_dpll(crtc_state, min, max);
- if (!pll) {
- DRM_DEBUG_KMS("No PLL selected\n");
- return NULL;
+ port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_MG_PHY];
+ if (!icl_calc_mg_pll_state(crtc_state, &port_dpll->hw_state)) {
+ DRM_DEBUG_KMS("Could not calculate MG PHY PLL state.\n");
+ goto err_unreference_tbt_pll;
}
- intel_reference_shared_dpll(pll, crtc_state);
+ dpll_id = icl_tc_port_to_pll_id(intel_port_to_tc(dev_priv,
+ encoder->port));
+ port_dpll->pll = intel_find_shared_dpll(state, crtc,
+ &port_dpll->hw_state,
+ dpll_id,
+ dpll_id);
+ if (!port_dpll->pll) {
+ DRM_DEBUG_KMS("No MG PHY PLL found\n");
+ goto err_unreference_tbt_pll;
+ }
+ intel_reference_shared_dpll(state, crtc,
+ port_dpll->pll, &port_dpll->hw_state);
- return pll;
+ icl_update_active_dpll(state, crtc, encoder);
+
+ return true;
+
+err_unreference_tbt_pll:
+ port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
+ intel_unreference_shared_dpll(state, crtc, port_dpll->pll);
+
+ return false;
+}
+
+static bool icl_get_dplls(struct intel_atomic_state *state,
+ struct intel_crtc *crtc,
+ struct intel_encoder *encoder)
+{
+ struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ enum phy phy = intel_port_to_phy(dev_priv, encoder->port);
+
+ if (intel_phy_is_combo(dev_priv, phy))
+ return icl_get_combo_phy_dpll(state, crtc, encoder);
+ else if (intel_phy_is_tc(dev_priv, phy))
+ return icl_get_tc_phy_dplls(state, crtc, encoder);
+
+ MISSING_CASE(phy);
+
+ return false;
+}
+
+static void icl_put_dplls(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ const struct intel_crtc_state *old_crtc_state =
+ intel_atomic_get_old_crtc_state(state, crtc);
+ struct intel_crtc_state *new_crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+ enum icl_port_dpll_id id;
+
+ new_crtc_state->shared_dpll = NULL;
+
+ for (id = ICL_PORT_DPLL_DEFAULT; id < ICL_PORT_DPLL_COUNT; id++) {
+ const struct icl_port_dpll *old_port_dpll =
+ &old_crtc_state->icl_port_dplls[id];
+ struct icl_port_dpll *new_port_dpll =
+ &new_crtc_state->icl_port_dplls[id];
+
+ new_port_dpll->pll = NULL;
+
+ if (!old_port_dpll->pll)
+ continue;
+
+ intel_unreference_shared_dpll(state, crtc, old_port_dpll->pll);
+ }
}
static bool mg_pll_get_hw_state(struct drm_i915_private *dev_priv,
@@ -2932,8 +3123,18 @@ static bool icl_pll_get_hw_state(struct drm_i915_private *dev_priv,
if (!(val & PLL_ENABLE))
goto out;
- hw_state->cfgcr0 = I915_READ(ICL_DPLL_CFGCR0(id));
- hw_state->cfgcr1 = I915_READ(ICL_DPLL_CFGCR1(id));
+ if (INTEL_GEN(dev_priv) >= 12) {
+ hw_state->cfgcr0 = I915_READ(TGL_DPLL_CFGCR0(id));
+ hw_state->cfgcr1 = I915_READ(TGL_DPLL_CFGCR1(id));
+ } else {
+ if (IS_ELKHARTLAKE(dev_priv) && id == DPLL_ID_EHL_DPLL4) {
+ hw_state->cfgcr0 = I915_READ(ICL_DPLL_CFGCR0(4));
+ hw_state->cfgcr1 = I915_READ(ICL_DPLL_CFGCR1(4));
+ } else {
+ hw_state->cfgcr0 = I915_READ(ICL_DPLL_CFGCR0(id));
+ hw_state->cfgcr1 = I915_READ(ICL_DPLL_CFGCR1(id));
+ }
+ }
ret = true;
out:
@@ -2945,8 +3146,14 @@ static bool combo_pll_get_hw_state(struct drm_i915_private *dev_priv,
struct intel_shared_dpll *pll,
struct intel_dpll_hw_state *hw_state)
{
- return icl_pll_get_hw_state(dev_priv, pll, hw_state,
- CNL_DPLL_ENABLE(pll->info->id));
+ i915_reg_t enable_reg = CNL_DPLL_ENABLE(pll->info->id);
+
+ if (IS_ELKHARTLAKE(dev_priv) &&
+ pll->info->id == DPLL_ID_EHL_DPLL4) {
+ enable_reg = MG_PLL_ENABLE(0);
+ }
+
+ return icl_pll_get_hw_state(dev_priv, pll, hw_state, enable_reg);
}
static bool tbt_pll_get_hw_state(struct drm_i915_private *dev_priv,
@@ -2961,10 +3168,24 @@ static void icl_dpll_write(struct drm_i915_private *dev_priv,
{
struct intel_dpll_hw_state *hw_state = &pll->state.hw_state;
const enum intel_dpll_id id = pll->info->id;
+ i915_reg_t cfgcr0_reg, cfgcr1_reg;
- I915_WRITE(ICL_DPLL_CFGCR0(id), hw_state->cfgcr0);
- I915_WRITE(ICL_DPLL_CFGCR1(id), hw_state->cfgcr1);
- POSTING_READ(ICL_DPLL_CFGCR1(id));
+ if (INTEL_GEN(dev_priv) >= 12) {
+ cfgcr0_reg = TGL_DPLL_CFGCR0(id);
+ cfgcr1_reg = TGL_DPLL_CFGCR1(id);
+ } else {
+ if (IS_ELKHARTLAKE(dev_priv) && id == DPLL_ID_EHL_DPLL4) {
+ cfgcr0_reg = ICL_DPLL_CFGCR0(4);
+ cfgcr1_reg = ICL_DPLL_CFGCR1(4);
+ } else {
+ cfgcr0_reg = ICL_DPLL_CFGCR0(id);
+ cfgcr1_reg = ICL_DPLL_CFGCR1(id);
+ }
+ }
+
+ I915_WRITE(cfgcr0_reg, hw_state->cfgcr0);
+ I915_WRITE(cfgcr1_reg, hw_state->cfgcr1);
+ POSTING_READ(cfgcr1_reg);
}
static void icl_mg_pll_write(struct drm_i915_private *dev_priv,
@@ -3057,6 +3278,19 @@ static void combo_pll_enable(struct drm_i915_private *dev_priv,
{
i915_reg_t enable_reg = CNL_DPLL_ENABLE(pll->info->id);
+ if (IS_ELKHARTLAKE(dev_priv) &&
+ pll->info->id == DPLL_ID_EHL_DPLL4) {
+ enable_reg = MG_PLL_ENABLE(0);
+
+ /*
+ * We need to disable DC states when this DPLL is enabled.
+ * This can be done by taking a reference on DPLL4 power
+ * domain.
+ */
+ pll->wakeref = intel_display_power_get(dev_priv,
+ POWER_DOMAIN_DPLL_DC_OFF);
+ }
+
icl_pll_power_enable(dev_priv, pll, enable_reg);
icl_dpll_write(dev_priv, pll);
@@ -3152,7 +3386,19 @@ static void icl_pll_disable(struct drm_i915_private *dev_priv,
static void combo_pll_disable(struct drm_i915_private *dev_priv,
struct intel_shared_dpll *pll)
{
- icl_pll_disable(dev_priv, pll, CNL_DPLL_ENABLE(pll->info->id));
+ i915_reg_t enable_reg = CNL_DPLL_ENABLE(pll->info->id);
+
+ if (IS_ELKHARTLAKE(dev_priv) &&
+ pll->info->id == DPLL_ID_EHL_DPLL4) {
+ enable_reg = MG_PLL_ENABLE(0);
+ icl_pll_disable(dev_priv, pll, enable_reg);
+
+ intel_display_power_put(dev_priv, POWER_DOMAIN_DPLL_DC_OFF,
+ pll->wakeref);
+ return;
+ }
+
+ icl_pll_disable(dev_priv, pll, enable_reg);
}
static void tbt_pll_disable(struct drm_i915_private *dev_priv,
@@ -3223,19 +3469,38 @@ static const struct dpll_info icl_plls[] = {
static const struct intel_dpll_mgr icl_pll_mgr = {
.dpll_info = icl_plls,
- .get_dpll = icl_get_dpll,
+ .get_dplls = icl_get_dplls,
+ .put_dplls = icl_put_dplls,
+ .update_active_dpll = icl_update_active_dpll,
.dump_hw_state = icl_dump_hw_state,
};
static const struct dpll_info ehl_plls[] = {
{ "DPLL 0", &combo_pll_funcs, DPLL_ID_ICL_DPLL0, 0 },
{ "DPLL 1", &combo_pll_funcs, DPLL_ID_ICL_DPLL1, 0 },
+ { "DPLL 4", &combo_pll_funcs, DPLL_ID_EHL_DPLL4, 0 },
{ },
};
static const struct intel_dpll_mgr ehl_pll_mgr = {
.dpll_info = ehl_plls,
- .get_dpll = icl_get_dpll,
+ .get_dplls = icl_get_dplls,
+ .put_dplls = icl_put_dplls,
+ .dump_hw_state = icl_dump_hw_state,
+};
+
+static const struct dpll_info tgl_plls[] = {
+ { "DPLL 0", &combo_pll_funcs, DPLL_ID_ICL_DPLL0, 0 },
+ { "DPLL 1", &combo_pll_funcs, DPLL_ID_ICL_DPLL1, 0 },
+ { "TBT PLL", &tbt_pll_funcs, DPLL_ID_ICL_TBTPLL, 0 },
+ /* TODO: Add typeC plls */
+ { },
+};
+
+static const struct intel_dpll_mgr tgl_pll_mgr = {
+ .dpll_info = tgl_plls,
+ .get_dplls = icl_get_dplls,
+ .put_dplls = icl_put_dplls,
.dump_hw_state = icl_dump_hw_state,
};
@@ -3252,7 +3517,9 @@ void intel_shared_dpll_init(struct drm_device *dev)
const struct dpll_info *dpll_info;
int i;
- if (IS_ELKHARTLAKE(dev_priv))
+ if (INTEL_GEN(dev_priv) >= 12)
+ dpll_mgr = &tgl_pll_mgr;
+ else if (IS_ELKHARTLAKE(dev_priv))
dpll_mgr = &ehl_pll_mgr;
else if (INTEL_GEN(dev_priv) >= 11)
dpll_mgr = &icl_pll_mgr;
@@ -3287,50 +3554,87 @@ void intel_shared_dpll_init(struct drm_device *dev)
}
/**
- * intel_get_shared_dpll - get a shared DPLL for CRTC and encoder combination
- * @crtc_state: atomic state for the crtc
+ * intel_reserve_shared_dplls - reserve DPLLs for CRTC and encoder combination
+ * @state: atomic state
+ * @crtc: CRTC to reserve DPLLs for
* @encoder: encoder
*
- * Find an appropriate DPLL for the given CRTC and encoder combination. A
- * reference from the @crtc_state to the returned pll is registered in the
- * atomic state. That configuration is made effective by calling
- * intel_shared_dpll_swap_state(). The reference should be released by calling
- * intel_release_shared_dpll().
+ * This function reserves all required DPLLs for the given CRTC and encoder
+ * combination in the current atomic commit @state and the new @crtc atomic
+ * state.
+ *
+ * The new configuration in the atomic commit @state is made effective by
+ * calling intel_shared_dpll_swap_state().
+ *
+ * The reserved DPLLs should be released by calling
+ * intel_release_shared_dplls().
*
* Returns:
- * A shared DPLL to be used by @crtc_state and @encoder.
+ * True if all required DPLLs were successfully reserved.
*/
-struct intel_shared_dpll *
-intel_get_shared_dpll(struct intel_crtc_state *crtc_state,
- struct intel_encoder *encoder)
+bool intel_reserve_shared_dplls(struct intel_atomic_state *state,
+ struct intel_crtc *crtc,
+ struct intel_encoder *encoder)
{
- struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
+ struct drm_i915_private *dev_priv = to_i915(state->base.dev);
const struct intel_dpll_mgr *dpll_mgr = dev_priv->dpll_mgr;
if (WARN_ON(!dpll_mgr))
- return NULL;
+ return false;
- return dpll_mgr->get_dpll(crtc_state, encoder);
+ return dpll_mgr->get_dplls(state, crtc, encoder);
}
/**
- * intel_release_shared_dpll - end use of DPLL by CRTC in atomic state
- * @dpll: dpll in use by @crtc
- * @crtc: crtc
+ * intel_release_shared_dplls - end use of DPLLs by CRTC in atomic state
* @state: atomic state
+ * @crtc: crtc from which the DPLLs are to be released
*
- * This function releases the reference from @crtc to @dpll from the
- * atomic @state. The new configuration is made effective by calling
- * intel_shared_dpll_swap_state().
+ * This function releases all DPLLs reserved by intel_reserve_shared_dplls()
+ * from the current atomic commit @state and the old @crtc atomic state.
+ *
+ * The new configuration in the atomic commit @state is made effective by
+ * calling intel_shared_dpll_swap_state().
*/
-void intel_release_shared_dpll(struct intel_shared_dpll *dpll,
- struct intel_crtc *crtc,
- struct drm_atomic_state *state)
+void intel_release_shared_dplls(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
{
- struct intel_shared_dpll_state *shared_dpll_state;
+ struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ const struct intel_dpll_mgr *dpll_mgr = dev_priv->dpll_mgr;
+
+ /*
+ * FIXME: this function is called for every platform having a
+ * compute_clock hook, even though the platform doesn't yet support
+ * the shared DPLL framework and intel_reserve_shared_dplls() is not
+ * called on those.
+ */
+ if (!dpll_mgr)
+ return;
+
+ dpll_mgr->put_dplls(state, crtc);
+}
+
+/**
+ * intel_update_active_dpll - update the active DPLL for a CRTC/encoder
+ * @state: atomic state
+ * @crtc: the CRTC for which to update the active DPLL
+ * @encoder: encoder determining the type of port DPLL
+ *
+ * Update the active DPLL for the given @crtc/@encoder in @crtc's atomic state,
+ * from the port DPLLs reserved previously by intel_reserve_shared_dplls(). The
+ * DPLL selected will be based on the current mode of the encoder's port.
+ */
+void intel_update_active_dpll(struct intel_atomic_state *state,
+ struct intel_crtc *crtc,
+ struct intel_encoder *encoder)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ const struct intel_dpll_mgr *dpll_mgr = dev_priv->dpll_mgr;
+
+ if (WARN_ON(!dpll_mgr))
+ return;
- shared_dpll_state = intel_atomic_get_shared_dpll_state(state);
- shared_dpll_state[dpll->info->id].crtc_mask &= ~(1 << crtc->pipe);
+ dpll_mgr->update_active_dpll(state, crtc, encoder);
}
/**
diff --git a/drivers/gpu/drm/i915/display/intel_dpll_mgr.h b/drivers/gpu/drm/i915/display/intel_dpll_mgr.h
index d0570414f3d1..e7588799fce5 100644
--- a/drivers/gpu/drm/i915/display/intel_dpll_mgr.h
+++ b/drivers/gpu/drm/i915/display/intel_dpll_mgr.h
@@ -28,6 +28,7 @@
#include <linux/types.h>
#include "intel_display.h"
+#include "intel_wakeref.h"
/*FIXME: Move this to a more appropriate place. */
#define abs_diff(a, b) ({ \
@@ -36,9 +37,9 @@
(void) (&__a == &__b); \
__a > __b ? (__a - __b) : (__b - __a); })
-struct drm_atomic_state;
struct drm_device;
struct drm_i915_private;
+struct intel_atomic_state;
struct intel_crtc;
struct intel_crtc_state;
struct intel_encoder;
@@ -110,35 +111,59 @@ enum intel_dpll_id {
/**
- * @DPLL_ID_ICL_DPLL0: ICL combo PHY DPLL0
+ * @DPLL_ID_ICL_DPLL0: ICL/TGL combo PHY DPLL0
*/
DPLL_ID_ICL_DPLL0 = 0,
/**
- * @DPLL_ID_ICL_DPLL1: ICL combo PHY DPLL1
+ * @DPLL_ID_ICL_DPLL1: ICL/TGL combo PHY DPLL1
*/
DPLL_ID_ICL_DPLL1 = 1,
/**
- * @DPLL_ID_ICL_TBTPLL: ICL TBT PLL
+ * @DPLL_ID_EHL_DPLL4: EHL combo PHY DPLL4
+ */
+ DPLL_ID_EHL_DPLL4 = 2,
+ /**
+ * @DPLL_ID_ICL_TBTPLL: ICL/TGL TBT PLL
*/
DPLL_ID_ICL_TBTPLL = 2,
/**
- * @DPLL_ID_ICL_MGPLL1: ICL MG PLL 1 port 1 (C)
+ * @DPLL_ID_ICL_MGPLL1: ICL MG PLL 1 port 1 (C),
+ * TGL TC PLL 1 port 1 (TC1)
*/
DPLL_ID_ICL_MGPLL1 = 3,
/**
* @DPLL_ID_ICL_MGPLL2: ICL MG PLL 1 port 2 (D)
+ * TGL TC PLL 1 port 2 (TC2)
*/
DPLL_ID_ICL_MGPLL2 = 4,
/**
* @DPLL_ID_ICL_MGPLL3: ICL MG PLL 1 port 3 (E)
+ * TGL TC PLL 1 port 3 (TC3)
*/
DPLL_ID_ICL_MGPLL3 = 5,
/**
* @DPLL_ID_ICL_MGPLL4: ICL MG PLL 1 port 4 (F)
+ * TGL TC PLL 1 port 4 (TC4)
*/
DPLL_ID_ICL_MGPLL4 = 6,
+ /**
+ * @DPLL_ID_TGL_TCPLL5: TGL TC PLL port 5 (TC5)
+ */
+ DPLL_ID_TGL_MGPLL5 = 7,
+ /**
+ * @DPLL_ID_TGL_TCPLL6: TGL TC PLL port 6 (TC6)
+ */
+ DPLL_ID_TGL_MGPLL6 = 8,
+};
+
+#define I915_NUM_PLLS 9
+
+enum icl_port_dpll_id {
+ ICL_PORT_DPLL_DEFAULT,
+ ICL_PORT_DPLL_MG_PHY,
+
+ ICL_PORT_DPLL_COUNT,
};
-#define I915_NUM_PLLS 7
struct intel_dpll_hw_state {
/* i9xx, pch plls */
@@ -195,7 +220,7 @@ struct intel_dpll_hw_state {
* future state which would be applied by an atomic mode set (stored in
* a struct &intel_atomic_state).
*
- * See also intel_get_shared_dpll() and intel_release_shared_dpll().
+ * See also intel_reserve_shared_dplls() and intel_release_shared_dplls().
*/
struct intel_shared_dpll_state {
/**
@@ -312,6 +337,7 @@ struct intel_shared_dpll {
* @info: platform specific info
*/
const struct dpll_info *info;
+ intel_wakeref_t wakeref;
};
#define SKL_DPLL0 0
@@ -331,15 +357,20 @@ void assert_shared_dpll(struct drm_i915_private *dev_priv,
bool state);
#define assert_shared_dpll_enabled(d, p) assert_shared_dpll(d, p, true)
#define assert_shared_dpll_disabled(d, p) assert_shared_dpll(d, p, false)
-struct intel_shared_dpll *intel_get_shared_dpll(struct intel_crtc_state *state,
- struct intel_encoder *encoder);
-void intel_release_shared_dpll(struct intel_shared_dpll *dpll,
- struct intel_crtc *crtc,
- struct drm_atomic_state *state);
+bool intel_reserve_shared_dplls(struct intel_atomic_state *state,
+ struct intel_crtc *crtc,
+ struct intel_encoder *encoder);
+void intel_release_shared_dplls(struct intel_atomic_state *state,
+ struct intel_crtc *crtc);
+void icl_set_active_port_dpll(struct intel_crtc_state *crtc_state,
+ enum icl_port_dpll_id port_dpll_id);
+void intel_update_active_dpll(struct intel_atomic_state *state,
+ struct intel_crtc *crtc,
+ struct intel_encoder *encoder);
void intel_prepare_shared_dpll(const struct intel_crtc_state *crtc_state);
void intel_enable_shared_dpll(const struct intel_crtc_state *crtc_state);
void intel_disable_shared_dpll(const struct intel_crtc_state *crtc_state);
-void intel_shared_dpll_swap_state(struct drm_atomic_state *state);
+void intel_shared_dpll_swap_state(struct intel_atomic_state *state);
void intel_shared_dpll_init(struct drm_device *dev);
void intel_dpll_dump_hw_state(struct drm_i915_private *dev_priv,
diff --git a/drivers/gpu/drm/i915/display/intel_dsi.h b/drivers/gpu/drm/i915/display/intel_dsi.h
index 6d20434636cd..1cd24bd46518 100644
--- a/drivers/gpu/drm/i915/display/intel_dsi.h
+++ b/drivers/gpu/drm/i915/display/intel_dsi.h
@@ -49,8 +49,11 @@ struct intel_dsi {
struct intel_connector *attached_connector;
- /* bit mask of ports being driven */
- u16 ports;
+ /* bit mask of ports (vlv dsi) or phys (icl dsi) being driven */
+ union {
+ u16 ports; /* VLV DSI */
+ u16 phys; /* ICL DSI */
+ };
/* if true, use HS mode, otherwise LP */
bool hs;
@@ -132,7 +135,10 @@ static inline struct intel_dsi_host *to_intel_dsi_host(struct mipi_dsi_host *h)
return container_of(h, struct intel_dsi_host, base);
}
-#define for_each_dsi_port(__port, __ports_mask) for_each_port_masked(__port, __ports_mask)
+#define for_each_dsi_port(__port, __ports_mask) \
+ for_each_port_masked(__port, __ports_mask)
+#define for_each_dsi_phy(__phy, __phys_mask) \
+ for_each_phy_masked(__phy, __phys_mask)
static inline struct intel_dsi *enc_to_intel_dsi(struct drm_encoder *encoder)
{
diff --git a/drivers/gpu/drm/i915/display/intel_gmbus.c b/drivers/gpu/drm/i915/display/intel_gmbus.c
index 4f6a9bd5af47..b42c79aea61a 100644
--- a/drivers/gpu/drm/i915/display/intel_gmbus.c
+++ b/drivers/gpu/drm/i915/display/intel_gmbus.c
@@ -94,11 +94,25 @@ static const struct gmbus_pin gmbus_pins_mcc[] = {
[GMBUS_PIN_9_TC1_ICP] = { "dpc", GPIOJ },
};
+static const struct gmbus_pin gmbus_pins_tgp[] = {
+ [GMBUS_PIN_1_BXT] = { "dpa", GPIOB },
+ [GMBUS_PIN_2_BXT] = { "dpb", GPIOC },
+ [GMBUS_PIN_3_BXT] = { "dpc", GPIOD },
+ [GMBUS_PIN_9_TC1_ICP] = { "tc1", GPIOJ },
+ [GMBUS_PIN_10_TC2_ICP] = { "tc2", GPIOK },
+ [GMBUS_PIN_11_TC3_ICP] = { "tc3", GPIOL },
+ [GMBUS_PIN_12_TC4_ICP] = { "tc4", GPIOM },
+ [GMBUS_PIN_13_TC5_TGP] = { "tc5", GPION },
+ [GMBUS_PIN_14_TC6_TGP] = { "tc6", GPIOO },
+};
+
/* pin is expected to be valid */
static const struct gmbus_pin *get_gmbus_pin(struct drm_i915_private *dev_priv,
unsigned int pin)
{
- if (HAS_PCH_MCC(dev_priv))
+ if (HAS_PCH_TGP(dev_priv))
+ return &gmbus_pins_tgp[pin];
+ else if (HAS_PCH_MCC(dev_priv))
return &gmbus_pins_mcc[pin];
else if (HAS_PCH_ICP(dev_priv))
return &gmbus_pins_icp[pin];
@@ -119,7 +133,9 @@ bool intel_gmbus_is_valid_pin(struct drm_i915_private *dev_priv,
{
unsigned int size;
- if (HAS_PCH_MCC(dev_priv))
+ if (HAS_PCH_TGP(dev_priv))
+ size = ARRAY_SIZE(gmbus_pins_tgp);
+ else if (HAS_PCH_MCC(dev_priv))
size = ARRAY_SIZE(gmbus_pins_mcc);
else if (HAS_PCH_ICP(dev_priv))
size = ARRAY_SIZE(gmbus_pins_icp);
diff --git a/drivers/gpu/drm/i915/display/intel_hdcp.c b/drivers/gpu/drm/i915/display/intel_hdcp.c
index bc3a94d491c4..845eb8f29b58 100644
--- a/drivers/gpu/drm/i915/display/intel_hdcp.c
+++ b/drivers/gpu/drm/i915/display/intel_hdcp.c
@@ -523,12 +523,16 @@ int intel_hdcp_auth_downstream(struct intel_connector *connector)
* authentication.
*/
num_downstream = DRM_HDCP_NUM_DOWNSTREAM(bstatus[0]);
- if (num_downstream == 0)
+ if (num_downstream == 0) {
+ DRM_DEBUG_KMS("Repeater with zero downstream devices\n");
return -EINVAL;
+ }
ksv_fifo = kcalloc(DRM_HDCP_KSV_LEN, num_downstream, GFP_KERNEL);
- if (!ksv_fifo)
+ if (!ksv_fifo) {
+ DRM_DEBUG_KMS("Out of mem: ksv_fifo\n");
return -ENOMEM;
+ }
ret = shim->read_ksv_fifo(intel_dig_port, num_downstream, ksv_fifo);
if (ret)
@@ -865,7 +869,6 @@ static void intel_hdcp_prop_work(struct work_struct *work)
prop_work);
struct intel_connector *connector = intel_hdcp_to_connector(hdcp);
struct drm_device *dev = connector->base.dev;
- struct drm_connector_state *state;
drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
mutex_lock(&hdcp->mutex);
@@ -875,10 +878,9 @@ static void intel_hdcp_prop_work(struct work_struct *work)
* those to UNDESIRED is handled by core. If value == UNDESIRED,
* we're running just after hdcp has been disabled, so just exit
*/
- if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
- state = connector->base.state;
- state->content_protection = hdcp->value;
- }
+ if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_UNDESIRED)
+ drm_hdcp_update_content_protection(&connector->base,
+ hdcp->value);
mutex_unlock(&hdcp->mutex);
drm_modeset_unlock(&dev->mode_config.connection_mutex);
@@ -1206,8 +1208,10 @@ static int hdcp2_authentication_key_exchange(struct intel_connector *connector)
if (ret < 0)
return ret;
- if (msgs.send_cert.rx_caps[0] != HDCP_2_2_RX_CAPS_VERSION_VAL)
+ if (msgs.send_cert.rx_caps[0] != HDCP_2_2_RX_CAPS_VERSION_VAL) {
+ DRM_DEBUG_KMS("cert.rx_caps dont claim HDCP2.2\n");
return -EINVAL;
+ }
hdcp->is_repeater = HDCP_2_2_RX_REPEATER(msgs.send_cert.rx_caps[2]);
@@ -1748,14 +1752,15 @@ static const struct component_ops i915_hdcp_component_ops = {
.unbind = i915_hdcp_component_unbind,
};
-static inline int initialize_hdcp_port_data(struct intel_connector *connector)
+static inline int initialize_hdcp_port_data(struct intel_connector *connector,
+ const struct intel_hdcp_shim *shim)
{
struct intel_hdcp *hdcp = &connector->hdcp;
struct hdcp_port_data *data = &hdcp->port_data;
data->port = connector->encoder->port;
data->port_type = (u8)HDCP_PORT_TYPE_INTEGRATED;
- data->protocol = (u8)hdcp->shim->protocol;
+ data->protocol = (u8)shim->protocol;
data->k = 1;
if (!data->streams)
@@ -1805,12 +1810,13 @@ void intel_hdcp_component_init(struct drm_i915_private *dev_priv)
}
}
-static void intel_hdcp2_init(struct intel_connector *connector)
+static void intel_hdcp2_init(struct intel_connector *connector,
+ const struct intel_hdcp_shim *shim)
{
struct intel_hdcp *hdcp = &connector->hdcp;
int ret;
- ret = initialize_hdcp_port_data(connector);
+ ret = initialize_hdcp_port_data(connector, shim);
if (ret) {
DRM_DEBUG_KMS("Mei hdcp data init failed\n");
return;
@@ -1829,23 +1835,28 @@ int intel_hdcp_init(struct intel_connector *connector,
if (!shim)
return -EINVAL;
- ret = drm_connector_attach_content_protection_property(&connector->base);
- if (ret)
+ if (is_hdcp2_supported(dev_priv))
+ intel_hdcp2_init(connector, shim);
+
+ ret =
+ drm_connector_attach_content_protection_property(&connector->base,
+ hdcp->hdcp2_supported);
+ if (ret) {
+ hdcp->hdcp2_supported = false;
+ kfree(hdcp->port_data.streams);
return ret;
+ }
hdcp->shim = shim;
mutex_init(&hdcp->mutex);
INIT_DELAYED_WORK(&hdcp->check_work, intel_hdcp_check_work);
INIT_WORK(&hdcp->prop_work, intel_hdcp_prop_work);
-
- if (is_hdcp2_supported(dev_priv))
- intel_hdcp2_init(connector);
init_waitqueue_head(&hdcp->cp_irq_queue);
return 0;
}
-int intel_hdcp_enable(struct intel_connector *connector)
+int intel_hdcp_enable(struct intel_connector *connector, u8 content_type)
{
struct intel_hdcp *hdcp = &connector->hdcp;
unsigned long check_link_interval = DRM_HDCP_CHECK_PERIOD_MS;
@@ -1856,6 +1867,7 @@ int intel_hdcp_enable(struct intel_connector *connector)
mutex_lock(&hdcp->mutex);
WARN_ON(hdcp->value == DRM_MODE_CONTENT_PROTECTION_ENABLED);
+ hdcp->content_type = content_type;
/*
* Considering that HDCP2.2 is more secure than HDCP1.4, If the setup
@@ -1867,8 +1879,12 @@ int intel_hdcp_enable(struct intel_connector *connector)
check_link_interval = DRM_HDCP2_CHECK_PERIOD_MS;
}
- /* When HDCP2.2 fails, HDCP1.4 will be attempted */
- if (ret && intel_hdcp_capable(connector)) {
+ /*
+ * When HDCP2.2 fails and Content Type is not Type1, HDCP1.4 will
+ * be attempted.
+ */
+ if (ret && intel_hdcp_capable(connector) &&
+ hdcp->content_type != DRM_MODE_HDCP_CONTENT_TYPE1) {
ret = _intel_hdcp_enable(connector);
}
@@ -1950,12 +1966,15 @@ void intel_hdcp_atomic_check(struct drm_connector *connector,
/*
* Nothing to do if the state didn't change, or HDCP was activated since
- * the last commit
+ * the last commit. And also no change in hdcp content type.
*/
if (old_cp == new_cp ||
(old_cp == DRM_MODE_CONTENT_PROTECTION_DESIRED &&
- new_cp == DRM_MODE_CONTENT_PROTECTION_ENABLED))
- return;
+ new_cp == DRM_MODE_CONTENT_PROTECTION_ENABLED)) {
+ if (old_state->hdcp_content_type ==
+ new_state->hdcp_content_type)
+ return;
+ }
crtc_state = drm_atomic_get_new_crtc_state(new_state->state,
new_state->crtc);
diff --git a/drivers/gpu/drm/i915/display/intel_hdcp.h b/drivers/gpu/drm/i915/display/intel_hdcp.h
index be8da85c866a..13555b054930 100644
--- a/drivers/gpu/drm/i915/display/intel_hdcp.h
+++ b/drivers/gpu/drm/i915/display/intel_hdcp.h
@@ -21,7 +21,7 @@ void intel_hdcp_atomic_check(struct drm_connector *connector,
struct drm_connector_state *new_state);
int intel_hdcp_init(struct intel_connector *connector,
const struct intel_hdcp_shim *hdcp_shim);
-int intel_hdcp_enable(struct intel_connector *connector);
+int intel_hdcp_enable(struct intel_connector *connector, u8 content_type);
int intel_hdcp_disable(struct intel_connector *connector);
bool is_hdcp_supported(struct drm_i915_private *dev_priv, enum port port);
bool intel_hdcp_capable(struct intel_connector *connector);
diff --git a/drivers/gpu/drm/i915/display/intel_hdmi.c b/drivers/gpu/drm/i915/display/intel_hdmi.c
index 0ebec69bbbfc..9bf28de10401 100644
--- a/drivers/gpu/drm/i915/display/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/display/intel_hdmi.c
@@ -2930,51 +2930,34 @@ static u8 cnp_port_to_ddc_pin(struct drm_i915_private *dev_priv,
static u8 icl_port_to_ddc_pin(struct drm_i915_private *dev_priv, enum port port)
{
- u8 ddc_pin;
+ enum phy phy = intel_port_to_phy(dev_priv, port);
- switch (port) {
- case PORT_A:
- ddc_pin = GMBUS_PIN_1_BXT;
- break;
- case PORT_B:
- ddc_pin = GMBUS_PIN_2_BXT;
- break;
- case PORT_C:
- ddc_pin = GMBUS_PIN_9_TC1_ICP;
- break;
- case PORT_D:
- ddc_pin = GMBUS_PIN_10_TC2_ICP;
- break;
- case PORT_E:
- ddc_pin = GMBUS_PIN_11_TC3_ICP;
- break;
- case PORT_F:
- ddc_pin = GMBUS_PIN_12_TC4_ICP;
- break;
- default:
- MISSING_CASE(port);
- ddc_pin = GMBUS_PIN_2_BXT;
- break;
- }
- return ddc_pin;
+ if (intel_phy_is_combo(dev_priv, phy))
+ return GMBUS_PIN_1_BXT + port;
+ else if (intel_phy_is_tc(dev_priv, phy))
+ return GMBUS_PIN_9_TC1_ICP + intel_port_to_tc(dev_priv, port);
+
+ WARN(1, "Unknown port:%c\n", port_name(port));
+ return GMBUS_PIN_2_BXT;
}
static u8 mcc_port_to_ddc_pin(struct drm_i915_private *dev_priv, enum port port)
{
+ enum phy phy = intel_port_to_phy(dev_priv, port);
u8 ddc_pin;
- switch (port) {
- case PORT_A:
+ switch (phy) {
+ case PHY_A:
ddc_pin = GMBUS_PIN_1_BXT;
break;
- case PORT_B:
+ case PHY_B:
ddc_pin = GMBUS_PIN_2_BXT;
break;
- case PORT_C:
+ case PHY_C:
ddc_pin = GMBUS_PIN_9_TC1_ICP;
break;
default:
- MISSING_CASE(port);
+ MISSING_CASE(phy);
ddc_pin = GMBUS_PIN_1_BXT;
break;
}
@@ -3019,7 +3002,7 @@ static u8 intel_hdmi_ddc_pin(struct drm_i915_private *dev_priv,
if (HAS_PCH_MCC(dev_priv))
ddc_pin = mcc_port_to_ddc_pin(dev_priv, port);
- else if (HAS_PCH_ICP(dev_priv))
+ else if (HAS_PCH_TGP(dev_priv) || HAS_PCH_ICP(dev_priv))
ddc_pin = icl_port_to_ddc_pin(dev_priv, port);
else if (HAS_PCH_CNP(dev_priv))
ddc_pin = cnp_port_to_ddc_pin(dev_priv, port);
@@ -3143,6 +3126,32 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
DRM_DEBUG_KMS("CEC notifier get failed\n");
}
+static enum intel_hotplug_state
+intel_hdmi_hotplug(struct intel_encoder *encoder,
+ struct intel_connector *connector, bool irq_received)
+{
+ enum intel_hotplug_state state;
+
+ state = intel_encoder_hotplug(encoder, connector, irq_received);
+
+ /*
+ * On many platforms the HDMI live state signal is known to be
+ * unreliable, so we can't use it to detect if a sink is connected or
+ * not. Instead we detect if it's connected based on whether we can
+ * read the EDID or not. That in turn has a problem during disconnect,
+ * since the HPD interrupt may be raised before the DDC lines get
+ * disconnected (due to how the required length of DDC vs. HPD
+ * connector pins are specified) and so we'll still be able to get a
+ * valid EDID. To solve this schedule another detection cycle if this
+ * time around we didn't detect any change in the sink's connection
+ * status.
+ */
+ if (state == INTEL_HOTPLUG_UNCHANGED && irq_received)
+ state = INTEL_HOTPLUG_RETRY;
+
+ return state;
+}
+
void intel_hdmi_init(struct drm_i915_private *dev_priv,
i915_reg_t hdmi_reg, enum port port)
{
@@ -3166,7 +3175,7 @@ void intel_hdmi_init(struct drm_i915_private *dev_priv,
&intel_hdmi_enc_funcs, DRM_MODE_ENCODER_TMDS,
"HDMI %c", port_name(port));
- intel_encoder->hotplug = intel_encoder_hotplug;
+ intel_encoder->hotplug = intel_hdmi_hotplug;
intel_encoder->compute_config = intel_hdmi_compute_config;
if (HAS_PCH_SPLIT(dev_priv)) {
intel_encoder->disable = pch_disable_hdmi;
diff --git a/drivers/gpu/drm/i915/display/intel_hotplug.c b/drivers/gpu/drm/i915/display/intel_hotplug.c
index ea3de4acc850..342587d91d57 100644
--- a/drivers/gpu/drm/i915/display/intel_hotplug.c
+++ b/drivers/gpu/drm/i915/display/intel_hotplug.c
@@ -112,6 +112,7 @@ enum hpd_pin intel_hpd_pin_default(struct drm_i915_private *dev_priv,
#define HPD_STORM_DETECT_PERIOD 1000
#define HPD_STORM_REENABLE_DELAY (2 * 60 * 1000)
+#define HPD_RETRY_DELAY 1000
/**
* intel_hpd_irq_storm_detect - gather stats and detect HPD IRQ storm on a pin
@@ -266,8 +267,10 @@ static void intel_hpd_irq_storm_reenable_work(struct work_struct *work)
intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
}
-bool intel_encoder_hotplug(struct intel_encoder *encoder,
- struct intel_connector *connector)
+enum intel_hotplug_state
+intel_encoder_hotplug(struct intel_encoder *encoder,
+ struct intel_connector *connector,
+ bool irq_received)
{
struct drm_device *dev = connector->base.dev;
enum drm_connector_status old_status;
@@ -279,7 +282,7 @@ bool intel_encoder_hotplug(struct intel_encoder *encoder,
drm_helper_probe_detect(&connector->base, NULL, false);
if (old_status == connector->base.status)
- return false;
+ return INTEL_HOTPLUG_UNCHANGED;
DRM_DEBUG_KMS("[CONNECTOR:%d:%s] status updated from %s to %s\n",
connector->base.base.id,
@@ -287,7 +290,7 @@ bool intel_encoder_hotplug(struct intel_encoder *encoder,
drm_get_connector_status_name(old_status),
drm_get_connector_status_name(connector->base.status));
- return true;
+ return INTEL_HOTPLUG_CHANGED;
}
static bool intel_encoder_has_hpd_pulse(struct intel_encoder *encoder)
@@ -339,7 +342,7 @@ static void i915_digport_work_func(struct work_struct *work)
spin_lock_irq(&dev_priv->irq_lock);
dev_priv->hotplug.event_bits |= old_bits;
spin_unlock_irq(&dev_priv->irq_lock);
- schedule_work(&dev_priv->hotplug.hotplug_work);
+ queue_delayed_work(system_wq, &dev_priv->hotplug.hotplug_work, 0);
}
}
@@ -349,14 +352,16 @@ static void i915_digport_work_func(struct work_struct *work)
static void i915_hotplug_work_func(struct work_struct *work)
{
struct drm_i915_private *dev_priv =
- container_of(work, struct drm_i915_private, hotplug.hotplug_work);
+ container_of(work, struct drm_i915_private,
+ hotplug.hotplug_work.work);
struct drm_device *dev = &dev_priv->drm;
struct intel_connector *intel_connector;
struct intel_encoder *intel_encoder;
struct drm_connector *connector;
struct drm_connector_list_iter conn_iter;
- bool changed = false;
+ u32 changed = 0, retry = 0;
u32 hpd_event_bits;
+ u32 hpd_retry_bits;
mutex_lock(&dev->mode_config.mutex);
DRM_DEBUG_KMS("running encoder hotplug functions\n");
@@ -365,6 +370,8 @@ static void i915_hotplug_work_func(struct work_struct *work)
hpd_event_bits = dev_priv->hotplug.event_bits;
dev_priv->hotplug.event_bits = 0;
+ hpd_retry_bits = dev_priv->hotplug.retry_bits;
+ dev_priv->hotplug.retry_bits = 0;
/* Enable polling for connectors which had HPD IRQ storms */
intel_hpd_irq_storm_switch_to_polling(dev_priv);
@@ -373,16 +380,29 @@ static void i915_hotplug_work_func(struct work_struct *work)
drm_connector_list_iter_begin(dev, &conn_iter);
drm_for_each_connector_iter(connector, &conn_iter) {
+ u32 hpd_bit;
+
intel_connector = to_intel_connector(connector);
if (!intel_connector->encoder)
continue;
intel_encoder = intel_connector->encoder;
- if (hpd_event_bits & (1 << intel_encoder->hpd_pin)) {
+ hpd_bit = BIT(intel_encoder->hpd_pin);
+ if ((hpd_event_bits | hpd_retry_bits) & hpd_bit) {
DRM_DEBUG_KMS("Connector %s (pin %i) received hotplug event.\n",
connector->name, intel_encoder->hpd_pin);
- changed |= intel_encoder->hotplug(intel_encoder,
- intel_connector);
+ switch (intel_encoder->hotplug(intel_encoder,
+ intel_connector,
+ hpd_event_bits & hpd_bit)) {
+ case INTEL_HOTPLUG_UNCHANGED:
+ break;
+ case INTEL_HOTPLUG_CHANGED:
+ changed |= hpd_bit;
+ break;
+ case INTEL_HOTPLUG_RETRY:
+ retry |= hpd_bit;
+ break;
+ }
}
}
drm_connector_list_iter_end(&conn_iter);
@@ -390,6 +410,17 @@ static void i915_hotplug_work_func(struct work_struct *work)
if (changed)
drm_kms_helper_hotplug_event(dev);
+
+ /* Remove shared HPD pins that have changed */
+ retry &= ~changed;
+ if (retry) {
+ spin_lock_irq(&dev_priv->irq_lock);
+ dev_priv->hotplug.retry_bits |= retry;
+ spin_unlock_irq(&dev_priv->irq_lock);
+
+ mod_delayed_work(system_wq, &dev_priv->hotplug.hotplug_work,
+ msecs_to_jiffies(HPD_RETRY_DELAY));
+ }
}
@@ -516,7 +547,7 @@ void intel_hpd_irq_handler(struct drm_i915_private *dev_priv,
if (queue_dig)
queue_work(dev_priv->hotplug.dp_wq, &dev_priv->hotplug.dig_port_work);
if (queue_hp)
- schedule_work(&dev_priv->hotplug.hotplug_work);
+ queue_delayed_work(system_wq, &dev_priv->hotplug.hotplug_work, 0);
}
/**
@@ -636,7 +667,8 @@ void intel_hpd_poll_init(struct drm_i915_private *dev_priv)
void intel_hpd_init_work(struct drm_i915_private *dev_priv)
{
- INIT_WORK(&dev_priv->hotplug.hotplug_work, i915_hotplug_work_func);
+ INIT_DELAYED_WORK(&dev_priv->hotplug.hotplug_work,
+ i915_hotplug_work_func);
INIT_WORK(&dev_priv->hotplug.dig_port_work, i915_digport_work_func);
INIT_WORK(&dev_priv->hotplug.poll_init_work, i915_hpd_poll_init_work);
INIT_DELAYED_WORK(&dev_priv->hotplug.reenable_work,
@@ -650,11 +682,12 @@ void intel_hpd_cancel_work(struct drm_i915_private *dev_priv)
dev_priv->hotplug.long_port_mask = 0;
dev_priv->hotplug.short_port_mask = 0;
dev_priv->hotplug.event_bits = 0;
+ dev_priv->hotplug.retry_bits = 0;
spin_unlock_irq(&dev_priv->irq_lock);
cancel_work_sync(&dev_priv->hotplug.dig_port_work);
- cancel_work_sync(&dev_priv->hotplug.hotplug_work);
+ cancel_delayed_work_sync(&dev_priv->hotplug.hotplug_work);
cancel_work_sync(&dev_priv->hotplug.poll_init_work);
cancel_delayed_work_sync(&dev_priv->hotplug.reenable_work);
}
diff --git a/drivers/gpu/drm/i915/display/intel_hotplug.h b/drivers/gpu/drm/i915/display/intel_hotplug.h
index 805f897dbb7a..b0cd447b7fbc 100644
--- a/drivers/gpu/drm/i915/display/intel_hotplug.h
+++ b/drivers/gpu/drm/i915/display/intel_hotplug.h
@@ -15,8 +15,9 @@ struct intel_connector;
struct intel_encoder;
void intel_hpd_poll_init(struct drm_i915_private *dev_priv);
-bool intel_encoder_hotplug(struct intel_encoder *encoder,
- struct intel_connector *connector);
+enum intel_hotplug_state intel_encoder_hotplug(struct intel_encoder *encoder,
+ struct intel_connector *connector,
+ bool irq_received);
void intel_hpd_irq_handler(struct drm_i915_private *dev_priv,
u32 pin_mask, u32 long_mask);
void intel_hpd_init(struct drm_i915_private *dev_priv);
diff --git a/drivers/gpu/drm/i915/display/intel_overlay.c b/drivers/gpu/drm/i915/display/intel_overlay.c
index 21339b7f6a3e..07929726b780 100644
--- a/drivers/gpu/drm/i915/display/intel_overlay.c
+++ b/drivers/gpu/drm/i915/display/intel_overlay.c
@@ -175,6 +175,7 @@ struct overlay_registers {
struct intel_overlay {
struct drm_i915_private *i915;
+ struct intel_context *context;
struct intel_crtc *crtc;
struct i915_vma *vma;
struct i915_vma *old_vma;
@@ -239,9 +240,7 @@ static int intel_overlay_do_wait_request(struct intel_overlay *overlay,
static struct i915_request *alloc_request(struct intel_overlay *overlay)
{
- struct intel_engine_cs *engine = overlay->i915->engine[RCS0];
-
- return i915_request_create(engine->kernel_context);
+ return i915_request_create(overlay->context);
}
/* overlay needs to be disable in OCMD reg */
@@ -1359,11 +1358,16 @@ void intel_overlay_setup(struct drm_i915_private *dev_priv)
if (!HAS_OVERLAY(dev_priv))
return;
+ if (!HAS_ENGINE(dev_priv, RCS0))
+ return;
+
overlay = kzalloc(sizeof(*overlay), GFP_KERNEL);
if (!overlay)
return;
overlay->i915 = dev_priv;
+ overlay->context = dev_priv->engine[RCS0]->kernel_context;
+ GEM_BUG_ON(!overlay->context);
overlay->color_key = 0x0101fe;
overlay->color_key_enabled = true;
diff --git a/drivers/gpu/drm/i915/display/intel_pipe_crc.c b/drivers/gpu/drm/i915/display/intel_pipe_crc.c
index 1e2c4307d05a..9a48f7a01e7e 100644
--- a/drivers/gpu/drm/i915/display/intel_pipe_crc.c
+++ b/drivers/gpu/drm/i915/display/intel_pipe_crc.c
@@ -667,5 +667,5 @@ void intel_crtc_disable_pipe_crc(struct intel_crtc *intel_crtc)
I915_WRITE(PIPE_CRC_CTL(crtc->index), 0);
POSTING_READ(PIPE_CRC_CTL(crtc->index));
- synchronize_irq(dev_priv->drm.irq);
+ intel_synchronize_irq(dev_priv);
}
diff --git a/drivers/gpu/drm/i915/display/intel_sdvo.c b/drivers/gpu/drm/i915/display/intel_sdvo.c
index ceda03e5a3d4..c5e2dfd7ef80 100644
--- a/drivers/gpu/drm/i915/display/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/display/intel_sdvo.c
@@ -274,130 +274,145 @@ static bool intel_sdvo_read_byte(struct intel_sdvo *intel_sdvo, u8 addr, u8 *ch)
return false;
}
-#define SDVO_CMD_NAME_ENTRY(cmd) {cmd, #cmd}
+#define SDVO_CMD_NAME_ENTRY(cmd_) { .cmd = SDVO_CMD_ ## cmd_, .name = #cmd_ }
+
/** Mapping of command numbers to names, for debug output */
-static const struct _sdvo_cmd_name {
+static const struct {
u8 cmd;
const char *name;
} __attribute__ ((packed)) sdvo_cmd_names[] = {
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_RESET),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_DEVICE_CAPS),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_FIRMWARE_REV),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_TRAINED_INPUTS),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_ACTIVE_OUTPUTS),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_ACTIVE_OUTPUTS),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_IN_OUT_MAP),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_IN_OUT_MAP),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_ATTACHED_DISPLAYS),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HOT_PLUG_SUPPORT),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_ACTIVE_HOT_PLUG),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_ACTIVE_HOT_PLUG),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_INTERRUPT_EVENT_SOURCE),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_TARGET_INPUT),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_TARGET_OUTPUT),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_INPUT_TIMINGS_PART1),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_INPUT_TIMINGS_PART2),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_INPUT_TIMINGS_PART1),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_INPUT_TIMINGS_PART2),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_INPUT_TIMINGS_PART1),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_OUTPUT_TIMINGS_PART1),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_OUTPUT_TIMINGS_PART2),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_OUTPUT_TIMINGS_PART1),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_OUTPUT_TIMINGS_PART2),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_CREATE_PREFERRED_INPUT_TIMING),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART1),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART2),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_INPUT_PIXEL_CLOCK_RANGE),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_OUTPUT_PIXEL_CLOCK_RANGE),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SUPPORTED_CLOCK_RATE_MULTS),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_CLOCK_RATE_MULT),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_CLOCK_RATE_MULT),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SUPPORTED_TV_FORMATS),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_TV_FORMAT),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_TV_FORMAT),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SUPPORTED_POWER_STATES),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_POWER_STATE),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_ENCODER_POWER_STATE),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_DISPLAY_POWER_STATE),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_CONTROL_BUS_SWITCH),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SDTV_RESOLUTION_SUPPORT),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SCALED_HDTV_RESOLUTION_SUPPORT),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SUPPORTED_ENHANCEMENTS),
+ SDVO_CMD_NAME_ENTRY(RESET),
+ SDVO_CMD_NAME_ENTRY(GET_DEVICE_CAPS),
+ SDVO_CMD_NAME_ENTRY(GET_FIRMWARE_REV),
+ SDVO_CMD_NAME_ENTRY(GET_TRAINED_INPUTS),
+ SDVO_CMD_NAME_ENTRY(GET_ACTIVE_OUTPUTS),
+ SDVO_CMD_NAME_ENTRY(SET_ACTIVE_OUTPUTS),
+ SDVO_CMD_NAME_ENTRY(GET_IN_OUT_MAP),
+ SDVO_CMD_NAME_ENTRY(SET_IN_OUT_MAP),
+ SDVO_CMD_NAME_ENTRY(GET_ATTACHED_DISPLAYS),
+ SDVO_CMD_NAME_ENTRY(GET_HOT_PLUG_SUPPORT),
+ SDVO_CMD_NAME_ENTRY(SET_ACTIVE_HOT_PLUG),
+ SDVO_CMD_NAME_ENTRY(GET_ACTIVE_HOT_PLUG),
+ SDVO_CMD_NAME_ENTRY(GET_INTERRUPT_EVENT_SOURCE),
+ SDVO_CMD_NAME_ENTRY(SET_TARGET_INPUT),
+ SDVO_CMD_NAME_ENTRY(SET_TARGET_OUTPUT),
+ SDVO_CMD_NAME_ENTRY(GET_INPUT_TIMINGS_PART1),
+ SDVO_CMD_NAME_ENTRY(GET_INPUT_TIMINGS_PART2),
+ SDVO_CMD_NAME_ENTRY(SET_INPUT_TIMINGS_PART1),
+ SDVO_CMD_NAME_ENTRY(SET_INPUT_TIMINGS_PART2),
+ SDVO_CMD_NAME_ENTRY(SET_OUTPUT_TIMINGS_PART1),
+ SDVO_CMD_NAME_ENTRY(SET_OUTPUT_TIMINGS_PART2),
+ SDVO_CMD_NAME_ENTRY(GET_OUTPUT_TIMINGS_PART1),
+ SDVO_CMD_NAME_ENTRY(GET_OUTPUT_TIMINGS_PART2),
+ SDVO_CMD_NAME_ENTRY(CREATE_PREFERRED_INPUT_TIMING),
+ SDVO_CMD_NAME_ENTRY(GET_PREFERRED_INPUT_TIMING_PART1),
+ SDVO_CMD_NAME_ENTRY(GET_PREFERRED_INPUT_TIMING_PART2),
+ SDVO_CMD_NAME_ENTRY(GET_INPUT_PIXEL_CLOCK_RANGE),
+ SDVO_CMD_NAME_ENTRY(GET_OUTPUT_PIXEL_CLOCK_RANGE),
+ SDVO_CMD_NAME_ENTRY(GET_SUPPORTED_CLOCK_RATE_MULTS),
+ SDVO_CMD_NAME_ENTRY(GET_CLOCK_RATE_MULT),
+ SDVO_CMD_NAME_ENTRY(SET_CLOCK_RATE_MULT),
+ SDVO_CMD_NAME_ENTRY(GET_SUPPORTED_TV_FORMATS),
+ SDVO_CMD_NAME_ENTRY(GET_TV_FORMAT),
+ SDVO_CMD_NAME_ENTRY(SET_TV_FORMAT),
+ SDVO_CMD_NAME_ENTRY(GET_SUPPORTED_POWER_STATES),
+ SDVO_CMD_NAME_ENTRY(GET_POWER_STATE),
+ SDVO_CMD_NAME_ENTRY(SET_ENCODER_POWER_STATE),
+ SDVO_CMD_NAME_ENTRY(SET_DISPLAY_POWER_STATE),
+ SDVO_CMD_NAME_ENTRY(SET_CONTROL_BUS_SWITCH),
+ SDVO_CMD_NAME_ENTRY(GET_SDTV_RESOLUTION_SUPPORT),
+ SDVO_CMD_NAME_ENTRY(GET_SCALED_HDTV_RESOLUTION_SUPPORT),
+ SDVO_CMD_NAME_ENTRY(GET_SUPPORTED_ENHANCEMENTS),
/* Add the op code for SDVO enhancements */
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_HPOS),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HPOS),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_HPOS),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_VPOS),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_VPOS),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_VPOS),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_SATURATION),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SATURATION),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_SATURATION),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_HUE),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HUE),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_HUE),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_CONTRAST),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_CONTRAST),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_CONTRAST),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_BRIGHTNESS),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_BRIGHTNESS),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_BRIGHTNESS),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_OVERSCAN_H),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_OVERSCAN_H),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_OVERSCAN_H),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_OVERSCAN_V),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_OVERSCAN_V),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_OVERSCAN_V),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_FLICKER_FILTER),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_FLICKER_FILTER),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_FLICKER_FILTER),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_FLICKER_FILTER_ADAPTIVE),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_FLICKER_FILTER_ADAPTIVE),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_FLICKER_FILTER_ADAPTIVE),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_FLICKER_FILTER_2D),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_FLICKER_FILTER_2D),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_FLICKER_FILTER_2D),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_SHARPNESS),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SHARPNESS),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_SHARPNESS),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_DOT_CRAWL),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_DOT_CRAWL),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_TV_CHROMA_FILTER),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_TV_CHROMA_FILTER),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_TV_CHROMA_FILTER),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_TV_LUMA_FILTER),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_TV_LUMA_FILTER),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_TV_LUMA_FILTER),
+ SDVO_CMD_NAME_ENTRY(GET_MAX_HPOS),
+ SDVO_CMD_NAME_ENTRY(GET_HPOS),
+ SDVO_CMD_NAME_ENTRY(SET_HPOS),
+ SDVO_CMD_NAME_ENTRY(GET_MAX_VPOS),
+ SDVO_CMD_NAME_ENTRY(GET_VPOS),
+ SDVO_CMD_NAME_ENTRY(SET_VPOS),
+ SDVO_CMD_NAME_ENTRY(GET_MAX_SATURATION),
+ SDVO_CMD_NAME_ENTRY(GET_SATURATION),
+ SDVO_CMD_NAME_ENTRY(SET_SATURATION),
+ SDVO_CMD_NAME_ENTRY(GET_MAX_HUE),
+ SDVO_CMD_NAME_ENTRY(GET_HUE),
+ SDVO_CMD_NAME_ENTRY(SET_HUE),
+ SDVO_CMD_NAME_ENTRY(GET_MAX_CONTRAST),
+ SDVO_CMD_NAME_ENTRY(GET_CONTRAST),
+ SDVO_CMD_NAME_ENTRY(SET_CONTRAST),
+ SDVO_CMD_NAME_ENTRY(GET_MAX_BRIGHTNESS),
+ SDVO_CMD_NAME_ENTRY(GET_BRIGHTNESS),
+ SDVO_CMD_NAME_ENTRY(SET_BRIGHTNESS),
+ SDVO_CMD_NAME_ENTRY(GET_MAX_OVERSCAN_H),
+ SDVO_CMD_NAME_ENTRY(GET_OVERSCAN_H),
+ SDVO_CMD_NAME_ENTRY(SET_OVERSCAN_H),
+ SDVO_CMD_NAME_ENTRY(GET_MAX_OVERSCAN_V),
+ SDVO_CMD_NAME_ENTRY(GET_OVERSCAN_V),
+ SDVO_CMD_NAME_ENTRY(SET_OVERSCAN_V),
+ SDVO_CMD_NAME_ENTRY(GET_MAX_FLICKER_FILTER),
+ SDVO_CMD_NAME_ENTRY(GET_FLICKER_FILTER),
+ SDVO_CMD_NAME_ENTRY(SET_FLICKER_FILTER),
+ SDVO_CMD_NAME_ENTRY(GET_MAX_FLICKER_FILTER_ADAPTIVE),
+ SDVO_CMD_NAME_ENTRY(GET_FLICKER_FILTER_ADAPTIVE),
+ SDVO_CMD_NAME_ENTRY(SET_FLICKER_FILTER_ADAPTIVE),
+ SDVO_CMD_NAME_ENTRY(GET_MAX_FLICKER_FILTER_2D),
+ SDVO_CMD_NAME_ENTRY(GET_FLICKER_FILTER_2D),
+ SDVO_CMD_NAME_ENTRY(SET_FLICKER_FILTER_2D),
+ SDVO_CMD_NAME_ENTRY(GET_MAX_SHARPNESS),
+ SDVO_CMD_NAME_ENTRY(GET_SHARPNESS),
+ SDVO_CMD_NAME_ENTRY(SET_SHARPNESS),
+ SDVO_CMD_NAME_ENTRY(GET_DOT_CRAWL),
+ SDVO_CMD_NAME_ENTRY(SET_DOT_CRAWL),
+ SDVO_CMD_NAME_ENTRY(GET_MAX_TV_CHROMA_FILTER),
+ SDVO_CMD_NAME_ENTRY(GET_TV_CHROMA_FILTER),
+ SDVO_CMD_NAME_ENTRY(SET_TV_CHROMA_FILTER),
+ SDVO_CMD_NAME_ENTRY(GET_MAX_TV_LUMA_FILTER),
+ SDVO_CMD_NAME_ENTRY(GET_TV_LUMA_FILTER),
+ SDVO_CMD_NAME_ENTRY(SET_TV_LUMA_FILTER),
/* HDMI op code */
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SUPP_ENCODE),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_ENCODE),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_ENCODE),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_PIXEL_REPLI),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_PIXEL_REPLI),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_COLORIMETRY_CAP),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_COLORIMETRY),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_COLORIMETRY),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_AUDIO_ENCRYPT_PREFER),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_AUDIO_STAT),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_AUDIO_STAT),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HBUF_INDEX),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_HBUF_INDEX),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HBUF_INFO),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HBUF_AV_SPLIT),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_HBUF_AV_SPLIT),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HBUF_TXRATE),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_HBUF_TXRATE),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_HBUF_DATA),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HBUF_DATA),
+ SDVO_CMD_NAME_ENTRY(GET_SUPP_ENCODE),
+ SDVO_CMD_NAME_ENTRY(GET_ENCODE),
+ SDVO_CMD_NAME_ENTRY(SET_ENCODE),
+ SDVO_CMD_NAME_ENTRY(SET_PIXEL_REPLI),
+ SDVO_CMD_NAME_ENTRY(GET_PIXEL_REPLI),
+ SDVO_CMD_NAME_ENTRY(GET_COLORIMETRY_CAP),
+ SDVO_CMD_NAME_ENTRY(SET_COLORIMETRY),
+ SDVO_CMD_NAME_ENTRY(GET_COLORIMETRY),
+ SDVO_CMD_NAME_ENTRY(GET_AUDIO_ENCRYPT_PREFER),
+ SDVO_CMD_NAME_ENTRY(SET_AUDIO_STAT),
+ SDVO_CMD_NAME_ENTRY(GET_AUDIO_STAT),
+ SDVO_CMD_NAME_ENTRY(GET_HBUF_INDEX),
+ SDVO_CMD_NAME_ENTRY(SET_HBUF_INDEX),
+ SDVO_CMD_NAME_ENTRY(GET_HBUF_INFO),
+ SDVO_CMD_NAME_ENTRY(GET_HBUF_AV_SPLIT),
+ SDVO_CMD_NAME_ENTRY(SET_HBUF_AV_SPLIT),
+ SDVO_CMD_NAME_ENTRY(GET_HBUF_TXRATE),
+ SDVO_CMD_NAME_ENTRY(SET_HBUF_TXRATE),
+ SDVO_CMD_NAME_ENTRY(SET_HBUF_DATA),
+ SDVO_CMD_NAME_ENTRY(GET_HBUF_DATA),
};
+#undef SDVO_CMD_NAME_ENTRY
+
+static const char *sdvo_cmd_name(u8 cmd)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(sdvo_cmd_names); i++) {
+ if (cmd == sdvo_cmd_names[i].cmd)
+ return sdvo_cmd_names[i].name;
+ }
+
+ return NULL;
+}
+
#define SDVO_NAME(svdo) ((svdo)->port == PORT_B ? "SDVOB" : "SDVOC")
static void intel_sdvo_debug_write(struct intel_sdvo *intel_sdvo, u8 cmd,
const void *args, int args_len)
{
+ const char *cmd_name;
int i, pos = 0;
#define BUF_LEN 256
char buffer[BUF_LEN];
@@ -412,15 +427,12 @@ static void intel_sdvo_debug_write(struct intel_sdvo *intel_sdvo, u8 cmd,
for (; i < 8; i++) {
BUF_PRINT(" ");
}
- for (i = 0; i < ARRAY_SIZE(sdvo_cmd_names); i++) {
- if (cmd == sdvo_cmd_names[i].cmd) {
- BUF_PRINT("(%s)", sdvo_cmd_names[i].name);
- break;
- }
- }
- if (i == ARRAY_SIZE(sdvo_cmd_names)) {
+
+ cmd_name = sdvo_cmd_name(cmd);
+ if (cmd_name)
+ BUF_PRINT("(%s)", cmd_name);
+ else
BUF_PRINT("(%02X)", cmd);
- }
BUG_ON(pos >= BUF_LEN - 1);
#undef BUF_PRINT
#undef BUF_LEN
@@ -429,15 +441,23 @@ static void intel_sdvo_debug_write(struct intel_sdvo *intel_sdvo, u8 cmd,
}
static const char * const cmd_status_names[] = {
- "Power on",
- "Success",
- "Not supported",
- "Invalid arg",
- "Pending",
- "Target not specified",
- "Scaling not supported"
+ [SDVO_CMD_STATUS_POWER_ON] = "Power on",
+ [SDVO_CMD_STATUS_SUCCESS] = "Success",
+ [SDVO_CMD_STATUS_NOTSUPP] = "Not supported",
+ [SDVO_CMD_STATUS_INVALID_ARG] = "Invalid arg",
+ [SDVO_CMD_STATUS_PENDING] = "Pending",
+ [SDVO_CMD_STATUS_TARGET_NOT_SPECIFIED] = "Target not specified",
+ [SDVO_CMD_STATUS_SCALING_NOT_SUPP] = "Scaling not supported",
};
+static const char *sdvo_cmd_status(u8 status)
+{
+ if (status < ARRAY_SIZE(cmd_status_names))
+ return cmd_status_names[status];
+ else
+ return NULL;
+}
+
static bool __intel_sdvo_write_cmd(struct intel_sdvo *intel_sdvo, u8 cmd,
const void *args, int args_len,
bool unlocked)
@@ -516,6 +536,7 @@ static bool intel_sdvo_write_cmd(struct intel_sdvo *intel_sdvo, u8 cmd,
static bool intel_sdvo_read_response(struct intel_sdvo *intel_sdvo,
void *response, int response_len)
{
+ const char *cmd_status;
u8 retry = 15; /* 5 quick checks, followed by 10 long checks */
u8 status;
int i, pos = 0;
@@ -562,8 +583,9 @@ static bool intel_sdvo_read_response(struct intel_sdvo *intel_sdvo,
#define BUF_PRINT(args...) \
pos += snprintf(buffer + pos, max_t(int, BUF_LEN - pos, 0), args)
- if (status <= SDVO_CMD_STATUS_SCALING_NOT_SUPP)
- BUF_PRINT("(%s)", cmd_status_names[status]);
+ cmd_status = sdvo_cmd_status(status);
+ if (cmd_status)
+ BUF_PRINT("(%s)", cmd_status);
else
BUF_PRINT("(??? %d)", status);
@@ -929,6 +951,20 @@ static bool intel_sdvo_set_audio_state(struct intel_sdvo *intel_sdvo,
&audio_state, 1);
}
+static bool intel_sdvo_get_hbuf_size(struct intel_sdvo *intel_sdvo,
+ u8 *hbuf_size)
+{
+ if (!intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_HBUF_INFO,
+ hbuf_size, 1))
+ return false;
+
+ /* Buffer size is 0 based, hooray! However zero means zero. */
+ if (*hbuf_size)
+ (*hbuf_size)++;
+
+ return true;
+}
+
#if 0
static void intel_sdvo_dump_hdmi_buf(struct intel_sdvo *intel_sdvo)
{
@@ -972,14 +1008,10 @@ static bool intel_sdvo_write_infoframe(struct intel_sdvo *intel_sdvo,
set_buf_index, 2))
return false;
- if (!intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_HBUF_INFO,
- &hbuf_size, 1))
+ if (!intel_sdvo_get_hbuf_size(intel_sdvo, &hbuf_size))
return false;
- /* Buffer size is 0 based, hooray! */
- hbuf_size++;
-
- DRM_DEBUG_KMS("writing sdvo hbuf: %i, hbuf_size %i, hbuf_size: %i\n",
+ DRM_DEBUG_KMS("writing sdvo hbuf: %i, length %u, hbuf_size: %i\n",
if_index, length, hbuf_size);
if (hbuf_size < length)
@@ -1030,14 +1062,10 @@ static ssize_t intel_sdvo_read_infoframe(struct intel_sdvo *intel_sdvo,
if (tx_rate == SDVO_HBUF_TX_DISABLED)
return 0;
- if (!intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_HBUF_INFO,
- &hbuf_size, 1))
- return -ENXIO;
-
- /* Buffer size is 0 based, hooray! */
- hbuf_size++;
+ if (!intel_sdvo_get_hbuf_size(intel_sdvo, &hbuf_size))
+ return false;
- DRM_DEBUG_KMS("reading sdvo hbuf: %i, hbuf_size %i, hbuf_size: %i\n",
+ DRM_DEBUG_KMS("reading sdvo hbuf: %i, length %u, hbuf_size: %i\n",
if_index, length, hbuf_size);
hbuf_size = min_t(unsigned int, length, hbuf_size);
@@ -1893,12 +1921,14 @@ static void intel_sdvo_enable_hotplug(struct intel_encoder *encoder)
&intel_sdvo->hotplug_active, 2);
}
-static bool intel_sdvo_hotplug(struct intel_encoder *encoder,
- struct intel_connector *connector)
+static enum intel_hotplug_state
+intel_sdvo_hotplug(struct intel_encoder *encoder,
+ struct intel_connector *connector,
+ bool irq_received)
{
intel_sdvo_enable_hotplug(encoder);
- return intel_encoder_hotplug(encoder, connector);
+ return intel_encoder_hotplug(encoder, connector, irq_received);
}
static bool
diff --git a/drivers/gpu/drm/i915/display/intel_sprite.c b/drivers/gpu/drm/i915/display/intel_sprite.c
index 004b52027ae8..53c6594c4588 100644
--- a/drivers/gpu/drm/i915/display/intel_sprite.c
+++ b/drivers/gpu/drm/i915/display/intel_sprite.c
@@ -441,9 +441,21 @@ icl_program_input_csc(struct intel_plane *plane,
*/
[DRM_COLOR_YCBCR_BT709] = {
0x7C98, 0x7800, 0x0,
- 0x9EF8, 0x7800, 0xABF8,
+ 0x9EF8, 0x7800, 0xAC00,
0x0, 0x7800, 0x7ED8,
},
+ /*
+ * BT.2020 full range YCbCr -> full range RGB
+ * The matrix required is :
+ * [1.000, 0.000, 1.474,
+ * 1.000, -0.1645, -0.5713,
+ * 1.000, 1.8814, 0.0000]
+ */
+ [DRM_COLOR_YCBCR_BT2020] = {
+ 0x7BC8, 0x7800, 0x0,
+ 0x8928, 0x7800, 0xAA88,
+ 0x0, 0x7800, 0x7F10,
+ },
};
/* Matrix for Limited Range to Full Range Conversion */
@@ -451,26 +463,38 @@ icl_program_input_csc(struct intel_plane *plane,
/*
* BT.601 Limted range YCbCr -> full range RGB
* The matrix required is :
- * [1.164384, 0.000, 1.596370,
- * 1.138393, -0.382500, -0.794598,
- * 1.138393, 1.971696, 0.0000]
+ * [1.164384, 0.000, 1.596027,
+ * 1.164384, -0.39175, -0.812813,
+ * 1.164384, 2.017232, 0.0000]
*/
[DRM_COLOR_YCBCR_BT601] = {
0x7CC8, 0x7950, 0x0,
- 0x8CB8, 0x7918, 0x9C40,
- 0x0, 0x7918, 0x7FC8,
+ 0x8D00, 0x7950, 0x9C88,
+ 0x0, 0x7950, 0x6810,
},
/*
* BT.709 Limited range YCbCr -> full range RGB
* The matrix required is :
- * [1.164, 0.000, 1.833671,
- * 1.138393, -0.213249, -0.532909,
- * 1.138393, 2.112402, 0.0000]
+ * [1.164384, 0.000, 1.792741,
+ * 1.164384, -0.213249, -0.532909,
+ * 1.164384, 2.112402, 0.0000]
*/
[DRM_COLOR_YCBCR_BT709] = {
- 0x7EA8, 0x7950, 0x0,
- 0x8888, 0x7918, 0xADA8,
- 0x0, 0x7918, 0x6870,
+ 0x7E58, 0x7950, 0x0,
+ 0x8888, 0x7950, 0xADA8,
+ 0x0, 0x7950, 0x6870,
+ },
+ /*
+ * BT.2020 Limited range YCbCr -> full range RGB
+ * The matrix required is :
+ * [1.164, 0.000, 1.678,
+ * 1.164, -0.1873, -0.6504,
+ * 1.164, 2.1417, 0.0000]
+ */
+ [DRM_COLOR_YCBCR_BT2020] = {
+ 0x7D70, 0x7950, 0x0,
+ 0x8A68, 0x7950, 0xAC00,
+ 0x0, 0x7950, 0x6890,
},
};
const u16 *csc;
@@ -492,8 +516,11 @@ icl_program_input_csc(struct intel_plane *plane,
I915_WRITE_FW(PLANE_INPUT_CSC_PREOFF(pipe, plane_id, 0),
PREOFF_YUV_TO_RGB_HI);
- I915_WRITE_FW(PLANE_INPUT_CSC_PREOFF(pipe, plane_id, 1),
- PREOFF_YUV_TO_RGB_ME);
+ if (plane_state->base.color_range == DRM_COLOR_YCBCR_FULL_RANGE)
+ I915_WRITE_FW(PLANE_INPUT_CSC_PREOFF(pipe, plane_id, 1), 0);
+ else
+ I915_WRITE_FW(PLANE_INPUT_CSC_PREOFF(pipe, plane_id, 1),
+ PREOFF_YUV_TO_RGB_ME);
I915_WRITE_FW(PLANE_INPUT_CSC_PREOFF(pipe, plane_id, 2),
PREOFF_YUV_TO_RGB_LO);
I915_WRITE_FW(PLANE_INPUT_CSC_POSTOFF(pipe, plane_id, 0), 0x0);
@@ -683,6 +710,16 @@ skl_plane_get_hw_state(struct intel_plane *plane,
return ret;
}
+static void i9xx_plane_linear_gamma(u16 gamma[8])
+{
+ /* The points are not evenly spaced. */
+ static const u8 in[8] = { 0, 1, 2, 4, 8, 16, 24, 32 };
+ int i;
+
+ for (i = 0; i < 8; i++)
+ gamma[i] = (in[i] << 8) / 32;
+}
+
static void
chv_update_csc(const struct intel_plane_state *plane_state)
{
@@ -858,6 +895,31 @@ static u32 vlv_sprite_ctl(const struct intel_crtc_state *crtc_state,
return sprctl;
}
+static void vlv_update_gamma(const struct intel_plane_state *plane_state)
+{
+ struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
+ struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+ const struct drm_framebuffer *fb = plane_state->base.fb;
+ enum pipe pipe = plane->pipe;
+ enum plane_id plane_id = plane->id;
+ u16 gamma[8];
+ int i;
+
+ /* Seems RGB data bypasses the gamma always */
+ if (!fb->format->is_yuv)
+ return;
+
+ i9xx_plane_linear_gamma(gamma);
+
+ /* FIXME these register are single buffered :( */
+ /* The two end points are implicit (0.0 and 1.0) */
+ for (i = 1; i < 8 - 1; i++)
+ I915_WRITE_FW(SPGAMC(pipe, plane_id, i - 1),
+ gamma[i] << 16 |
+ gamma[i] << 8 |
+ gamma[i]);
+}
+
static void
vlv_update_plane(struct intel_plane *plane,
const struct intel_crtc_state *crtc_state,
@@ -916,6 +978,7 @@ vlv_update_plane(struct intel_plane *plane,
intel_plane_ggtt_offset(plane_state) + sprsurf_offset);
vlv_update_clrc(plane_state);
+ vlv_update_gamma(plane_state);
spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
}
@@ -1013,6 +1076,8 @@ static u32 ivb_sprite_ctl(const struct intel_crtc_state *crtc_state,
return 0;
}
+ sprctl |= SPRITE_INT_GAMMA_DISABLE;
+
if (plane_state->base.color_encoding == DRM_COLOR_YCBCR_BT709)
sprctl |= SPRITE_YUV_TO_RGB_CSC_FORMAT_BT709;
@@ -1033,6 +1098,45 @@ static u32 ivb_sprite_ctl(const struct intel_crtc_state *crtc_state,
return sprctl;
}
+static void ivb_sprite_linear_gamma(u16 gamma[18])
+{
+ int i;
+
+ for (i = 0; i < 17; i++)
+ gamma[i] = (i << 10) / 16;
+
+ gamma[i] = 3 << 10;
+ i++;
+}
+
+static void ivb_update_gamma(const struct intel_plane_state *plane_state)
+{
+ struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
+ struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+ enum pipe pipe = plane->pipe;
+ u16 gamma[18];
+ int i;
+
+ ivb_sprite_linear_gamma(gamma);
+
+ /* FIXME these register are single buffered :( */
+ for (i = 0; i < 16; i++)
+ I915_WRITE_FW(SPRGAMC(pipe, i),
+ gamma[i] << 20 |
+ gamma[i] << 10 |
+ gamma[i]);
+
+ I915_WRITE_FW(SPRGAMC16(pipe, 0), gamma[i]);
+ I915_WRITE_FW(SPRGAMC16(pipe, 1), gamma[i]);
+ I915_WRITE_FW(SPRGAMC16(pipe, 2), gamma[i]);
+ i++;
+
+ I915_WRITE_FW(SPRGAMC17(pipe, 0), gamma[i]);
+ I915_WRITE_FW(SPRGAMC17(pipe, 1), gamma[i]);
+ I915_WRITE_FW(SPRGAMC17(pipe, 2), gamma[i]);
+ i++;
+}
+
static void
ivb_update_plane(struct intel_plane *plane,
const struct intel_crtc_state *crtc_state,
@@ -1099,6 +1203,8 @@ ivb_update_plane(struct intel_plane *plane,
I915_WRITE_FW(SPRSURF(pipe),
intel_plane_ggtt_offset(plane_state) + sprsurf_offset);
+ ivb_update_gamma(plane_state);
+
spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
}
@@ -1224,6 +1330,66 @@ static u32 g4x_sprite_ctl(const struct intel_crtc_state *crtc_state,
return dvscntr;
}
+static void g4x_update_gamma(const struct intel_plane_state *plane_state)
+{
+ struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
+ struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+ const struct drm_framebuffer *fb = plane_state->base.fb;
+ enum pipe pipe = plane->pipe;
+ u16 gamma[8];
+ int i;
+
+ /* Seems RGB data bypasses the gamma always */
+ if (!fb->format->is_yuv)
+ return;
+
+ i9xx_plane_linear_gamma(gamma);
+
+ /* FIXME these register are single buffered :( */
+ /* The two end points are implicit (0.0 and 1.0) */
+ for (i = 1; i < 8 - 1; i++)
+ I915_WRITE_FW(DVSGAMC_G4X(pipe, i - 1),
+ gamma[i] << 16 |
+ gamma[i] << 8 |
+ gamma[i]);
+}
+
+static void ilk_sprite_linear_gamma(u16 gamma[17])
+{
+ int i;
+
+ for (i = 0; i < 17; i++)
+ gamma[i] = (i << 10) / 16;
+}
+
+static void ilk_update_gamma(const struct intel_plane_state *plane_state)
+{
+ struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
+ struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+ const struct drm_framebuffer *fb = plane_state->base.fb;
+ enum pipe pipe = plane->pipe;
+ u16 gamma[17];
+ int i;
+
+ /* Seems RGB data bypasses the gamma always */
+ if (!fb->format->is_yuv)
+ return;
+
+ ilk_sprite_linear_gamma(gamma);
+
+ /* FIXME these register are single buffered :( */
+ for (i = 0; i < 16; i++)
+ I915_WRITE_FW(DVSGAMC_ILK(pipe, i),
+ gamma[i] << 20 |
+ gamma[i] << 10 |
+ gamma[i]);
+
+ I915_WRITE_FW(DVSGAMCMAX_ILK(pipe, 0), gamma[i]);
+ I915_WRITE_FW(DVSGAMCMAX_ILK(pipe, 1), gamma[i]);
+ I915_WRITE_FW(DVSGAMCMAX_ILK(pipe, 2), gamma[i]);
+ i++;
+}
+
static void
g4x_update_plane(struct intel_plane *plane,
const struct intel_crtc_state *crtc_state,
@@ -1283,6 +1449,11 @@ g4x_update_plane(struct intel_plane *plane,
I915_WRITE_FW(DVSSURF(pipe),
intel_plane_ggtt_offset(plane_state) + dvssurf_offset);
+ if (IS_G4X(dev_priv))
+ g4x_update_gamma(plane_state);
+ else
+ ilk_update_gamma(plane_state);
+
spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
}
@@ -1347,7 +1518,7 @@ g4x_sprite_check_scaling(struct intel_crtc_state *crtc_state,
const struct drm_framebuffer *fb = plane_state->base.fb;
const struct drm_rect *src = &plane_state->base.src;
const struct drm_rect *dst = &plane_state->base.dst;
- int src_x, src_y, src_w, src_h, crtc_w, crtc_h;
+ int src_x, src_w, src_h, crtc_w, crtc_h;
const struct drm_display_mode *adjusted_mode =
&crtc_state->base.adjusted_mode;
unsigned int cpp = fb->format->cpp[0];
@@ -1358,7 +1529,6 @@ g4x_sprite_check_scaling(struct intel_crtc_state *crtc_state,
crtc_h = drm_rect_height(dst);
src_x = src->x1 >> 16;
- src_y = src->y1 >> 16;
src_w = drm_rect_width(src) >> 16;
src_h = drm_rect_height(src) >> 16;
@@ -1852,28 +2022,7 @@ static const u32 skl_plane_formats[] = {
DRM_FORMAT_VYUY,
};
-static const u32 icl_plane_formats[] = {
- DRM_FORMAT_C8,
- DRM_FORMAT_RGB565,
- DRM_FORMAT_XRGB8888,
- DRM_FORMAT_XBGR8888,
- DRM_FORMAT_ARGB8888,
- DRM_FORMAT_ABGR8888,
- DRM_FORMAT_XRGB2101010,
- DRM_FORMAT_XBGR2101010,
- DRM_FORMAT_YUYV,
- DRM_FORMAT_YVYU,
- DRM_FORMAT_UYVY,
- DRM_FORMAT_VYUY,
- DRM_FORMAT_Y210,
- DRM_FORMAT_Y212,
- DRM_FORMAT_Y216,
- DRM_FORMAT_XVYU2101010,
- DRM_FORMAT_XVYU12_16161616,
- DRM_FORMAT_XVYU16161616,
-};
-
-static const u32 icl_hdr_plane_formats[] = {
+static const u32 skl_planar_formats[] = {
DRM_FORMAT_C8,
DRM_FORMAT_RGB565,
DRM_FORMAT_XRGB8888,
@@ -1882,23 +2031,14 @@ static const u32 icl_hdr_plane_formats[] = {
DRM_FORMAT_ABGR8888,
DRM_FORMAT_XRGB2101010,
DRM_FORMAT_XBGR2101010,
- DRM_FORMAT_XRGB16161616F,
- DRM_FORMAT_XBGR16161616F,
- DRM_FORMAT_ARGB16161616F,
- DRM_FORMAT_ABGR16161616F,
DRM_FORMAT_YUYV,
DRM_FORMAT_YVYU,
DRM_FORMAT_UYVY,
DRM_FORMAT_VYUY,
- DRM_FORMAT_Y210,
- DRM_FORMAT_Y212,
- DRM_FORMAT_Y216,
- DRM_FORMAT_XVYU2101010,
- DRM_FORMAT_XVYU12_16161616,
- DRM_FORMAT_XVYU16161616,
+ DRM_FORMAT_NV12,
};
-static const u32 skl_planar_formats[] = {
+static const u32 glk_planar_formats[] = {
DRM_FORMAT_C8,
DRM_FORMAT_RGB565,
DRM_FORMAT_XRGB8888,
@@ -1912,9 +2052,12 @@ static const u32 skl_planar_formats[] = {
DRM_FORMAT_UYVY,
DRM_FORMAT_VYUY,
DRM_FORMAT_NV12,
+ DRM_FORMAT_P010,
+ DRM_FORMAT_P012,
+ DRM_FORMAT_P016,
};
-static const u32 glk_planar_formats[] = {
+static const u32 icl_sdr_y_plane_formats[] = {
DRM_FORMAT_C8,
DRM_FORMAT_RGB565,
DRM_FORMAT_XRGB8888,
@@ -1927,13 +2070,15 @@ static const u32 glk_planar_formats[] = {
DRM_FORMAT_YVYU,
DRM_FORMAT_UYVY,
DRM_FORMAT_VYUY,
- DRM_FORMAT_NV12,
- DRM_FORMAT_P010,
- DRM_FORMAT_P012,
- DRM_FORMAT_P016,
+ DRM_FORMAT_Y210,
+ DRM_FORMAT_Y212,
+ DRM_FORMAT_Y216,
+ DRM_FORMAT_XVYU2101010,
+ DRM_FORMAT_XVYU12_16161616,
+ DRM_FORMAT_XVYU16161616,
};
-static const u32 icl_planar_formats[] = {
+static const u32 icl_sdr_uv_plane_formats[] = {
DRM_FORMAT_C8,
DRM_FORMAT_RGB565,
DRM_FORMAT_XRGB8888,
@@ -1958,7 +2103,7 @@ static const u32 icl_planar_formats[] = {
DRM_FORMAT_XVYU16161616,
};
-static const u32 icl_hdr_planar_formats[] = {
+static const u32 icl_hdr_plane_formats[] = {
DRM_FORMAT_C8,
DRM_FORMAT_RGB565,
DRM_FORMAT_XRGB8888,
@@ -2201,9 +2346,6 @@ static bool skl_plane_has_fbc(struct drm_i915_private *dev_priv,
static bool skl_plane_has_planar(struct drm_i915_private *dev_priv,
enum pipe pipe, enum plane_id plane_id)
{
- if (INTEL_GEN(dev_priv) >= 11)
- return plane_id <= PLANE_SPRITE3;
-
/* Display WA #0870: skl, bxt */
if (IS_SKYLAKE(dev_priv) || IS_BROXTON(dev_priv))
return false;
@@ -2217,6 +2359,48 @@ static bool skl_plane_has_planar(struct drm_i915_private *dev_priv,
return true;
}
+static const u32 *skl_get_plane_formats(struct drm_i915_private *dev_priv,
+ enum pipe pipe, enum plane_id plane_id,
+ int *num_formats)
+{
+ if (skl_plane_has_planar(dev_priv, pipe, plane_id)) {
+ *num_formats = ARRAY_SIZE(skl_planar_formats);
+ return skl_planar_formats;
+ } else {
+ *num_formats = ARRAY_SIZE(skl_plane_formats);
+ return skl_plane_formats;
+ }
+}
+
+static const u32 *glk_get_plane_formats(struct drm_i915_private *dev_priv,
+ enum pipe pipe, enum plane_id plane_id,
+ int *num_formats)
+{
+ if (skl_plane_has_planar(dev_priv, pipe, plane_id)) {
+ *num_formats = ARRAY_SIZE(glk_planar_formats);
+ return glk_planar_formats;
+ } else {
+ *num_formats = ARRAY_SIZE(skl_plane_formats);
+ return skl_plane_formats;
+ }
+}
+
+static const u32 *icl_get_plane_formats(struct drm_i915_private *dev_priv,
+ enum pipe pipe, enum plane_id plane_id,
+ int *num_formats)
+{
+ if (icl_is_hdr_plane(dev_priv, plane_id)) {
+ *num_formats = ARRAY_SIZE(icl_hdr_plane_formats);
+ return icl_hdr_plane_formats;
+ } else if (icl_is_nv12_y_plane(plane_id)) {
+ *num_formats = ARRAY_SIZE(icl_sdr_y_plane_formats);
+ return icl_sdr_y_plane_formats;
+ } else {
+ *num_formats = ARRAY_SIZE(icl_sdr_uv_plane_formats);
+ return icl_sdr_uv_plane_formats;
+ }
+}
+
static bool skl_plane_has_ccs(struct drm_i915_private *dev_priv,
enum pipe pipe, enum plane_id plane_id)
{
@@ -2270,30 +2454,15 @@ skl_universal_plane_create(struct drm_i915_private *dev_priv,
if (icl_is_nv12_y_plane(plane_id))
plane->update_slave = icl_update_slave;
- if (skl_plane_has_planar(dev_priv, pipe, plane_id)) {
- if (icl_is_hdr_plane(dev_priv, plane_id)) {
- formats = icl_hdr_planar_formats;
- num_formats = ARRAY_SIZE(icl_hdr_planar_formats);
- } else if (INTEL_GEN(dev_priv) >= 11) {
- formats = icl_planar_formats;
- num_formats = ARRAY_SIZE(icl_planar_formats);
- } else if (INTEL_GEN(dev_priv) == 10 || IS_GEMINILAKE(dev_priv)) {
- formats = glk_planar_formats;
- num_formats = ARRAY_SIZE(glk_planar_formats);
- } else {
- formats = skl_planar_formats;
- num_formats = ARRAY_SIZE(skl_planar_formats);
- }
- } else if (icl_is_hdr_plane(dev_priv, plane_id)) {
- formats = icl_hdr_plane_formats;
- num_formats = ARRAY_SIZE(icl_hdr_plane_formats);
- } else if (INTEL_GEN(dev_priv) >= 11) {
- formats = icl_plane_formats;
- num_formats = ARRAY_SIZE(icl_plane_formats);
- } else {
- formats = skl_plane_formats;
- num_formats = ARRAY_SIZE(skl_plane_formats);
- }
+ if (INTEL_GEN(dev_priv) >= 11)
+ formats = icl_get_plane_formats(dev_priv, pipe,
+ plane_id, &num_formats);
+ else if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
+ formats = glk_get_plane_formats(dev_priv, pipe,
+ plane_id, &num_formats);
+ else
+ formats = skl_get_plane_formats(dev_priv, pipe,
+ plane_id, &num_formats);
plane->has_ccs = skl_plane_has_ccs(dev_priv, pipe, plane_id);
if (plane->has_ccs)
diff --git a/drivers/gpu/drm/i915/display/intel_tc.c b/drivers/gpu/drm/i915/display/intel_tc.c
new file mode 100644
index 000000000000..c96a81c2416c
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_tc.c
@@ -0,0 +1,537 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#include "i915_drv.h"
+#include "intel_display.h"
+#include "intel_dp_mst.h"
+#include "intel_tc.h"
+
+static const char *tc_port_mode_name(enum tc_port_mode mode)
+{
+ static const char * const names[] = {
+ [TC_PORT_TBT_ALT] = "tbt-alt",
+ [TC_PORT_DP_ALT] = "dp-alt",
+ [TC_PORT_LEGACY] = "legacy",
+ };
+
+ if (WARN_ON(mode >= ARRAY_SIZE(names)))
+ mode = TC_PORT_TBT_ALT;
+
+ return names[mode];
+}
+
+static bool has_modular_fia(struct drm_i915_private *i915)
+{
+ if (!INTEL_INFO(i915)->display.has_modular_fia)
+ return false;
+
+ return intel_uncore_read(&i915->uncore,
+ PORT_TX_DFLEXDPSP(FIA1)) & MODULAR_FIA_MASK;
+}
+
+static enum phy_fia tc_port_to_fia(struct drm_i915_private *i915,
+ enum tc_port tc_port)
+{
+ if (!has_modular_fia(i915))
+ return FIA1;
+
+ /*
+ * Each Modular FIA instance houses 2 TC ports. In SOC that has more
+ * than two TC ports, there are multiple instances of Modular FIA.
+ */
+ return tc_port / 2;
+}
+
+u32 intel_tc_port_get_lane_mask(struct intel_digital_port *dig_port)
+{
+ struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+ enum tc_port tc_port = intel_port_to_tc(i915, dig_port->base.port);
+ struct intel_uncore *uncore = &i915->uncore;
+ u32 lane_mask;
+
+ lane_mask = intel_uncore_read(uncore,
+ PORT_TX_DFLEXDPSP(dig_port->tc_phy_fia));
+
+ WARN_ON(lane_mask == 0xffffffff);
+
+ return (lane_mask & DP_LANE_ASSIGNMENT_MASK(tc_port)) >>
+ DP_LANE_ASSIGNMENT_SHIFT(tc_port);
+}
+
+int intel_tc_port_fia_max_lane_count(struct intel_digital_port *dig_port)
+{
+ struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+ intel_wakeref_t wakeref;
+ u32 lane_mask;
+
+ if (dig_port->tc_mode != TC_PORT_DP_ALT)
+ return 4;
+
+ lane_mask = 0;
+ with_intel_display_power(i915, POWER_DOMAIN_DISPLAY_CORE, wakeref)
+ lane_mask = intel_tc_port_get_lane_mask(dig_port);
+
+ switch (lane_mask) {
+ default:
+ MISSING_CASE(lane_mask);
+ /* fall-through */
+ case 0x1:
+ case 0x2:
+ case 0x4:
+ case 0x8:
+ return 1;
+ case 0x3:
+ case 0xc:
+ return 2;
+ case 0xf:
+ return 4;
+ }
+}
+
+void intel_tc_port_set_fia_lane_count(struct intel_digital_port *dig_port,
+ int required_lanes)
+{
+ struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+ enum tc_port tc_port = intel_port_to_tc(i915, dig_port->base.port);
+ bool lane_reversal = dig_port->saved_port_bits & DDI_BUF_PORT_REVERSAL;
+ struct intel_uncore *uncore = &i915->uncore;
+ u32 val;
+
+ WARN_ON(lane_reversal && dig_port->tc_mode != TC_PORT_LEGACY);
+
+ val = intel_uncore_read(uncore,
+ PORT_TX_DFLEXDPMLE1(dig_port->tc_phy_fia));
+ val &= ~DFLEXDPMLE1_DPMLETC_MASK(tc_port);
+
+ switch (required_lanes) {
+ case 1:
+ val |= lane_reversal ? DFLEXDPMLE1_DPMLETC_ML3(tc_port) :
+ DFLEXDPMLE1_DPMLETC_ML0(tc_port);
+ break;
+ case 2:
+ val |= lane_reversal ? DFLEXDPMLE1_DPMLETC_ML3_2(tc_port) :
+ DFLEXDPMLE1_DPMLETC_ML1_0(tc_port);
+ break;
+ case 4:
+ val |= DFLEXDPMLE1_DPMLETC_ML3_0(tc_port);
+ break;
+ default:
+ MISSING_CASE(required_lanes);
+ }
+
+ intel_uncore_write(uncore,
+ PORT_TX_DFLEXDPMLE1(dig_port->tc_phy_fia), val);
+}
+
+static void tc_port_fixup_legacy_flag(struct intel_digital_port *dig_port,
+ u32 live_status_mask)
+{
+ u32 valid_hpd_mask;
+
+ if (dig_port->tc_legacy_port)
+ valid_hpd_mask = BIT(TC_PORT_LEGACY);
+ else
+ valid_hpd_mask = BIT(TC_PORT_DP_ALT) |
+ BIT(TC_PORT_TBT_ALT);
+
+ if (!(live_status_mask & ~valid_hpd_mask))
+ return;
+
+ /* If live status mismatches the VBT flag, trust the live status. */
+ DRM_ERROR("Port %s: live status %08x mismatches the legacy port flag, fix flag\n",
+ dig_port->tc_port_name, live_status_mask);
+
+ dig_port->tc_legacy_port = !dig_port->tc_legacy_port;
+}
+
+static u32 tc_port_live_status_mask(struct intel_digital_port *dig_port)
+{
+ struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+ enum tc_port tc_port = intel_port_to_tc(i915, dig_port->base.port);
+ struct intel_uncore *uncore = &i915->uncore;
+ u32 mask = 0;
+ u32 val;
+
+ val = intel_uncore_read(uncore,
+ PORT_TX_DFLEXDPSP(dig_port->tc_phy_fia));
+
+ if (val == 0xffffffff) {
+ DRM_DEBUG_KMS("Port %s: PHY in TCCOLD, nothing connected\n",
+ dig_port->tc_port_name);
+ return mask;
+ }
+
+ if (val & TC_LIVE_STATE_TBT(tc_port))
+ mask |= BIT(TC_PORT_TBT_ALT);
+ if (val & TC_LIVE_STATE_TC(tc_port))
+ mask |= BIT(TC_PORT_DP_ALT);
+
+ if (intel_uncore_read(uncore, SDEISR) & SDE_TC_HOTPLUG_ICP(tc_port))
+ mask |= BIT(TC_PORT_LEGACY);
+
+ /* The sink can be connected only in a single mode. */
+ if (!WARN_ON(hweight32(mask) > 1))
+ tc_port_fixup_legacy_flag(dig_port, mask);
+
+ return mask;
+}
+
+static bool icl_tc_phy_status_complete(struct intel_digital_port *dig_port)
+{
+ struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+ enum tc_port tc_port = intel_port_to_tc(i915, dig_port->base.port);
+ struct intel_uncore *uncore = &i915->uncore;
+ u32 val;
+
+ val = intel_uncore_read(uncore,
+ PORT_TX_DFLEXDPPMS(dig_port->tc_phy_fia));
+ if (val == 0xffffffff) {
+ DRM_DEBUG_KMS("Port %s: PHY in TCCOLD, assuming not complete\n",
+ dig_port->tc_port_name);
+ return false;
+ }
+
+ return val & DP_PHY_MODE_STATUS_COMPLETED(tc_port);
+}
+
+static bool icl_tc_phy_set_safe_mode(struct intel_digital_port *dig_port,
+ bool enable)
+{
+ struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+ enum tc_port tc_port = intel_port_to_tc(i915, dig_port->base.port);
+ struct intel_uncore *uncore = &i915->uncore;
+ u32 val;
+
+ val = intel_uncore_read(uncore,
+ PORT_TX_DFLEXDPCSSS(dig_port->tc_phy_fia));
+ if (val == 0xffffffff) {
+ DRM_DEBUG_KMS("Port %s: PHY in TCCOLD, can't set safe-mode to %s\n",
+ dig_port->tc_port_name,
+ enableddisabled(enable));
+
+ return false;
+ }
+
+ val &= ~DP_PHY_MODE_STATUS_NOT_SAFE(tc_port);
+ if (!enable)
+ val |= DP_PHY_MODE_STATUS_NOT_SAFE(tc_port);
+
+ intel_uncore_write(uncore,
+ PORT_TX_DFLEXDPCSSS(dig_port->tc_phy_fia), val);
+
+ if (enable && wait_for(!icl_tc_phy_status_complete(dig_port), 10))
+ DRM_DEBUG_KMS("Port %s: PHY complete clear timed out\n",
+ dig_port->tc_port_name);
+
+ return true;
+}
+
+static bool icl_tc_phy_is_in_safe_mode(struct intel_digital_port *dig_port)
+{
+ struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+ enum tc_port tc_port = intel_port_to_tc(i915, dig_port->base.port);
+ struct intel_uncore *uncore = &i915->uncore;
+ u32 val;
+
+ val = intel_uncore_read(uncore,
+ PORT_TX_DFLEXDPCSSS(dig_port->tc_phy_fia));
+ if (val == 0xffffffff) {
+ DRM_DEBUG_KMS("Port %s: PHY in TCCOLD, assume safe mode\n",
+ dig_port->tc_port_name);
+ return true;
+ }
+
+ return !(val & DP_PHY_MODE_STATUS_NOT_SAFE(tc_port));
+}
+
+/*
+ * This function implements the first part of the Connect Flow described by our
+ * specification, Gen11 TypeC Programming chapter. The rest of the flow (reading
+ * lanes, EDID, etc) is done as needed in the typical places.
+ *
+ * Unlike the other ports, type-C ports are not available to use as soon as we
+ * get a hotplug. The type-C PHYs can be shared between multiple controllers:
+ * display, USB, etc. As a result, handshaking through FIA is required around
+ * connect and disconnect to cleanly transfer ownership with the controller and
+ * set the type-C power state.
+ */
+static void icl_tc_phy_connect(struct intel_digital_port *dig_port,
+ int required_lanes)
+{
+ int max_lanes;
+
+ if (!icl_tc_phy_status_complete(dig_port)) {
+ DRM_DEBUG_KMS("Port %s: PHY not ready\n",
+ dig_port->tc_port_name);
+ goto out_set_tbt_alt_mode;
+ }
+
+ if (!icl_tc_phy_set_safe_mode(dig_port, false) &&
+ !WARN_ON(dig_port->tc_legacy_port))
+ goto out_set_tbt_alt_mode;
+
+ max_lanes = intel_tc_port_fia_max_lane_count(dig_port);
+ if (dig_port->tc_legacy_port) {
+ WARN_ON(max_lanes != 4);
+ dig_port->tc_mode = TC_PORT_LEGACY;
+
+ return;
+ }
+
+ /*
+ * Now we have to re-check the live state, in case the port recently
+ * became disconnected. Not necessary for legacy mode.
+ */
+ if (!(tc_port_live_status_mask(dig_port) & BIT(TC_PORT_DP_ALT))) {
+ DRM_DEBUG_KMS("Port %s: PHY sudden disconnect\n",
+ dig_port->tc_port_name);
+ goto out_set_safe_mode;
+ }
+
+ if (max_lanes < required_lanes) {
+ DRM_DEBUG_KMS("Port %s: PHY max lanes %d < required lanes %d\n",
+ dig_port->tc_port_name,
+ max_lanes, required_lanes);
+ goto out_set_safe_mode;
+ }
+
+ dig_port->tc_mode = TC_PORT_DP_ALT;
+
+ return;
+
+out_set_safe_mode:
+ icl_tc_phy_set_safe_mode(dig_port, true);
+out_set_tbt_alt_mode:
+ dig_port->tc_mode = TC_PORT_TBT_ALT;
+}
+
+/*
+ * See the comment at the connect function. This implements the Disconnect
+ * Flow.
+ */
+static void icl_tc_phy_disconnect(struct intel_digital_port *dig_port)
+{
+ switch (dig_port->tc_mode) {
+ case TC_PORT_LEGACY:
+ /* Nothing to do, we never disconnect from legacy mode */
+ break;
+ case TC_PORT_DP_ALT:
+ icl_tc_phy_set_safe_mode(dig_port, true);
+ dig_port->tc_mode = TC_PORT_TBT_ALT;
+ break;
+ case TC_PORT_TBT_ALT:
+ /* Nothing to do, we stay in TBT-alt mode */
+ break;
+ default:
+ MISSING_CASE(dig_port->tc_mode);
+ }
+}
+
+static bool icl_tc_phy_is_connected(struct intel_digital_port *dig_port)
+{
+ if (!icl_tc_phy_status_complete(dig_port)) {
+ DRM_DEBUG_KMS("Port %s: PHY status not complete\n",
+ dig_port->tc_port_name);
+ return dig_port->tc_mode == TC_PORT_TBT_ALT;
+ }
+
+ if (icl_tc_phy_is_in_safe_mode(dig_port)) {
+ DRM_DEBUG_KMS("Port %s: PHY still in safe mode\n",
+ dig_port->tc_port_name);
+
+ return false;
+ }
+
+ return dig_port->tc_mode == TC_PORT_DP_ALT ||
+ dig_port->tc_mode == TC_PORT_LEGACY;
+}
+
+static enum tc_port_mode
+intel_tc_port_get_current_mode(struct intel_digital_port *dig_port)
+{
+ u32 live_status_mask = tc_port_live_status_mask(dig_port);
+ bool in_safe_mode = icl_tc_phy_is_in_safe_mode(dig_port);
+ enum tc_port_mode mode;
+
+ if (in_safe_mode || WARN_ON(!icl_tc_phy_status_complete(dig_port)))
+ return TC_PORT_TBT_ALT;
+
+ mode = dig_port->tc_legacy_port ? TC_PORT_LEGACY : TC_PORT_DP_ALT;
+ if (live_status_mask) {
+ enum tc_port_mode live_mode = fls(live_status_mask) - 1;
+
+ if (!WARN_ON(live_mode == TC_PORT_TBT_ALT))
+ mode = live_mode;
+ }
+
+ return mode;
+}
+
+static enum tc_port_mode
+intel_tc_port_get_target_mode(struct intel_digital_port *dig_port)
+{
+ u32 live_status_mask = tc_port_live_status_mask(dig_port);
+
+ if (live_status_mask)
+ return fls(live_status_mask) - 1;
+
+ return icl_tc_phy_status_complete(dig_port) &&
+ dig_port->tc_legacy_port ? TC_PORT_LEGACY :
+ TC_PORT_TBT_ALT;
+}
+
+static void intel_tc_port_reset_mode(struct intel_digital_port *dig_port,
+ int required_lanes)
+{
+ struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+ enum tc_port_mode old_tc_mode = dig_port->tc_mode;
+
+ intel_display_power_flush_work(i915);
+ WARN_ON(intel_display_power_is_enabled(i915,
+ intel_aux_power_domain(dig_port)));
+
+ icl_tc_phy_disconnect(dig_port);
+ icl_tc_phy_connect(dig_port, required_lanes);
+
+ DRM_DEBUG_KMS("Port %s: TC port mode reset (%s -> %s)\n",
+ dig_port->tc_port_name,
+ tc_port_mode_name(old_tc_mode),
+ tc_port_mode_name(dig_port->tc_mode));
+}
+
+static void
+intel_tc_port_link_init_refcount(struct intel_digital_port *dig_port,
+ int refcount)
+{
+ WARN_ON(dig_port->tc_link_refcount);
+ dig_port->tc_link_refcount = refcount;
+}
+
+void intel_tc_port_sanitize(struct intel_digital_port *dig_port)
+{
+ struct intel_encoder *encoder = &dig_port->base;
+ int active_links = 0;
+
+ mutex_lock(&dig_port->tc_lock);
+
+ dig_port->tc_mode = intel_tc_port_get_current_mode(dig_port);
+ if (dig_port->dp.is_mst)
+ active_links = intel_dp_mst_encoder_active_links(dig_port);
+ else if (encoder->base.crtc)
+ active_links = to_intel_crtc(encoder->base.crtc)->active;
+
+ if (active_links) {
+ if (!icl_tc_phy_is_connected(dig_port))
+ DRM_DEBUG_KMS("Port %s: PHY disconnected with %d active link(s)\n",
+ dig_port->tc_port_name, active_links);
+ intel_tc_port_link_init_refcount(dig_port, active_links);
+
+ goto out;
+ }
+
+ if (dig_port->tc_legacy_port)
+ icl_tc_phy_connect(dig_port, 1);
+
+out:
+ DRM_DEBUG_KMS("Port %s: sanitize mode (%s)\n",
+ dig_port->tc_port_name,
+ tc_port_mode_name(dig_port->tc_mode));
+
+ mutex_unlock(&dig_port->tc_lock);
+}
+
+static bool intel_tc_port_needs_reset(struct intel_digital_port *dig_port)
+{
+ return intel_tc_port_get_target_mode(dig_port) != dig_port->tc_mode;
+}
+
+/*
+ * The type-C ports are different because even when they are connected, they may
+ * not be available/usable by the graphics driver: see the comment on
+ * icl_tc_phy_connect(). So in our driver instead of adding the additional
+ * concept of "usable" and make everything check for "connected and usable" we
+ * define a port as "connected" when it is not only connected, but also when it
+ * is usable by the rest of the driver. That maintains the old assumption that
+ * connected ports are usable, and avoids exposing to the users objects they
+ * can't really use.
+ */
+bool intel_tc_port_connected(struct intel_digital_port *dig_port)
+{
+ bool is_connected;
+
+ intel_tc_port_lock(dig_port);
+ is_connected = tc_port_live_status_mask(dig_port) &
+ BIT(dig_port->tc_mode);
+ intel_tc_port_unlock(dig_port);
+
+ return is_connected;
+}
+
+static void __intel_tc_port_lock(struct intel_digital_port *dig_port,
+ int required_lanes)
+{
+ struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+ intel_wakeref_t wakeref;
+
+ wakeref = intel_display_power_get(i915, POWER_DOMAIN_DISPLAY_CORE);
+
+ mutex_lock(&dig_port->tc_lock);
+
+ if (!dig_port->tc_link_refcount &&
+ intel_tc_port_needs_reset(dig_port))
+ intel_tc_port_reset_mode(dig_port, required_lanes);
+
+ WARN_ON(dig_port->tc_lock_wakeref);
+ dig_port->tc_lock_wakeref = wakeref;
+}
+
+void intel_tc_port_lock(struct intel_digital_port *dig_port)
+{
+ __intel_tc_port_lock(dig_port, 1);
+}
+
+void intel_tc_port_unlock(struct intel_digital_port *dig_port)
+{
+ struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+ intel_wakeref_t wakeref = fetch_and_zero(&dig_port->tc_lock_wakeref);
+
+ mutex_unlock(&dig_port->tc_lock);
+
+ intel_display_power_put_async(i915, POWER_DOMAIN_DISPLAY_CORE,
+ wakeref);
+}
+
+void intel_tc_port_get_link(struct intel_digital_port *dig_port,
+ int required_lanes)
+{
+ __intel_tc_port_lock(dig_port, required_lanes);
+ dig_port->tc_link_refcount++;
+ intel_tc_port_unlock(dig_port);
+}
+
+void intel_tc_port_put_link(struct intel_digital_port *dig_port)
+{
+ mutex_lock(&dig_port->tc_lock);
+ dig_port->tc_link_refcount--;
+ mutex_unlock(&dig_port->tc_lock);
+}
+
+void intel_tc_port_init(struct intel_digital_port *dig_port, bool is_legacy)
+{
+ struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+ enum port port = dig_port->base.port;
+ enum tc_port tc_port = intel_port_to_tc(i915, port);
+
+ if (WARN_ON(tc_port == PORT_TC_NONE))
+ return;
+
+ snprintf(dig_port->tc_port_name, sizeof(dig_port->tc_port_name),
+ "%c/TC#%d", port_name(port), tc_port + 1);
+
+ mutex_init(&dig_port->tc_lock);
+ dig_port->tc_legacy_port = is_legacy;
+ dig_port->tc_link_refcount = 0;
+ dig_port->tc_phy_fia = tc_port_to_fia(i915, tc_port);
+}
diff --git a/drivers/gpu/drm/i915/display/intel_tc.h b/drivers/gpu/drm/i915/display/intel_tc.h
new file mode 100644
index 000000000000..22fe922ac9cf
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_tc.h
@@ -0,0 +1,35 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef __INTEL_TC_H__
+#define __INTEL_TC_H__
+
+#include <linux/mutex.h>
+#include <linux/types.h>
+
+#include "intel_drv.h"
+
+bool intel_tc_port_connected(struct intel_digital_port *dig_port);
+u32 intel_tc_port_get_lane_mask(struct intel_digital_port *dig_port);
+int intel_tc_port_fia_max_lane_count(struct intel_digital_port *dig_port);
+void intel_tc_port_set_fia_lane_count(struct intel_digital_port *dig_port,
+ int required_lanes);
+
+void intel_tc_port_sanitize(struct intel_digital_port *dig_port);
+void intel_tc_port_lock(struct intel_digital_port *dig_port);
+void intel_tc_port_unlock(struct intel_digital_port *dig_port);
+void intel_tc_port_get_link(struct intel_digital_port *dig_port,
+ int required_lanes);
+void intel_tc_port_put_link(struct intel_digital_port *dig_port);
+
+static inline int intel_tc_port_ref_held(struct intel_digital_port *dig_port)
+{
+ return mutex_is_locked(&dig_port->tc_lock) ||
+ dig_port->tc_link_refcount;
+}
+
+void intel_tc_port_init(struct intel_digital_port *dig_port, bool is_legacy);
+
+#endif /* __INTEL_TC_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_vbt_defs.h b/drivers/gpu/drm/i915/display/intel_vbt_defs.h
index 5ddbe71ab423..09cd37fb0b1c 100644
--- a/drivers/gpu/drm/i915/display/intel_vbt_defs.h
+++ b/drivers/gpu/drm/i915/display/intel_vbt_defs.h
@@ -310,10 +310,13 @@ enum vbt_gmbus_ddi {
DDC_BUS_DDI_F,
ICL_DDC_BUS_DDI_A = 0x1,
ICL_DDC_BUS_DDI_B,
+ TGL_DDC_BUS_DDI_C,
ICL_DDC_BUS_PORT_1 = 0x4,
ICL_DDC_BUS_PORT_2,
ICL_DDC_BUS_PORT_3,
ICL_DDC_BUS_PORT_4,
+ TGL_DDC_BUS_PORT_5,
+ TGL_DDC_BUS_PORT_6,
MCC_DDC_BUS_DDI_A = 0x1,
MCC_DDC_BUS_DDI_B,
MCC_DDC_BUS_DDI_C = 0x4,
diff --git a/drivers/gpu/drm/i915/display/intel_vdsc.c b/drivers/gpu/drm/i915/display/intel_vdsc.c
index ffec807b8960..4ab19c432ef5 100644
--- a/drivers/gpu/drm/i915/display/intel_vdsc.c
+++ b/drivers/gpu/drm/i915/display/intel_vdsc.c
@@ -459,17 +459,23 @@ int intel_dp_compute_dsc_params(struct intel_dp *intel_dp,
enum intel_display_power_domain
intel_dsc_power_domain(const struct intel_crtc_state *crtc_state)
{
+ struct drm_i915_private *i915 = to_i915(crtc_state->base.crtc->dev);
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
/*
- * On ICL VDSC/joining for eDP transcoder uses a separate power well PW2
- * This requires POWER_DOMAIN_TRANSCODER_EDP_VDSC power domain.
+ * On ICL VDSC/joining for eDP transcoder uses a separate power well,
+ * PW2. This requires POWER_DOMAIN_TRANSCODER_VDSC_PW2 power domain.
* For any other transcoder, VDSC/joining uses the power well associated
* with the pipe/transcoder in use. Hence another reference on the
* transcoder power domain will suffice.
+ *
+ * On TGL we have the same mapping, but for transcoder A (the special
+ * TRANSCODER_EDP is gone).
*/
- if (cpu_transcoder == TRANSCODER_EDP)
- return POWER_DOMAIN_TRANSCODER_EDP_VDSC;
+ if (INTEL_GEN(i915) >= 12 && cpu_transcoder == TRANSCODER_A)
+ return POWER_DOMAIN_TRANSCODER_VDSC_PW2;
+ else if (cpu_transcoder == TRANSCODER_EDP)
+ return POWER_DOMAIN_TRANSCODER_VDSC_PW2;
else
return POWER_DOMAIN_TRANSCODER(cpu_transcoder);
}
diff --git a/drivers/gpu/drm/i915/display/vlv_dsi.c b/drivers/gpu/drm/i915/display/vlv_dsi.c
index e272d826210a..c8002ffd29e7 100644
--- a/drivers/gpu/drm/i915/display/vlv_dsi.c
+++ b/drivers/gpu/drm/i915/display/vlv_dsi.c
@@ -1644,7 +1644,7 @@ vlv_dsi_get_panel_orientation(struct intel_connector *connector)
return intel_dsi_get_panel_orientation(connector);
}
-static void intel_dsi_add_properties(struct intel_connector *connector)
+static void vlv_dsi_add_properties(struct intel_connector *connector)
{
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
@@ -1983,7 +1983,7 @@ void vlv_dsi_init(struct drm_i915_private *dev_priv)
intel_panel_init(&intel_connector->panel, fixed_mode, NULL);
intel_panel_setup_backlight(connector, INVALID_PIPE);
- intel_dsi_add_properties(intel_connector);
+ vlv_dsi_add_properties(intel_connector);
return;
diff --git a/drivers/gpu/drm/i915/gem/Makefile b/drivers/gpu/drm/i915/gem/Makefile
index 07e7b8b840ea..7e73aa587967 100644
--- a/drivers/gpu/drm/i915/gem/Makefile
+++ b/drivers/gpu/drm/i915/gem/Makefile
@@ -1 +1,5 @@
-include $(src)/Makefile.header-test # Extra header tests
+# For building individual subdir files on the command line
+subdir-ccflags-y += -I$(srctree)/$(src)/..
+
+# Extra header tests
+header-test-pattern-$(CONFIG_DRM_I915_WERROR) := *.h
diff --git a/drivers/gpu/drm/i915/gem/Makefile.header-test b/drivers/gpu/drm/i915/gem/Makefile.header-test
deleted file mode 100644
index 61e06cbb4b32..000000000000
--- a/drivers/gpu/drm/i915/gem/Makefile.header-test
+++ /dev/null
@@ -1,16 +0,0 @@
-# SPDX-License-Identifier: MIT
-# Copyright © 2019 Intel Corporation
-
-# Test the headers are compilable as standalone units
-header_test := $(notdir $(wildcard $(src)/*.h))
-
-quiet_cmd_header_test = HDRTEST $@
- cmd_header_test = echo "\#include \"$(<F)\"" > $@
-
-header_test_%.c: %.h
- $(call cmd,header_test)
-
-extra-$(CONFIG_DRM_I915_WERROR) += \
- $(foreach h,$(header_test),$(patsubst %.h,header_test_%.o,$(h)))
-
-clean-files += $(foreach h,$(header_test),$(patsubst %.h,header_test_%.c,$(h)))
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_client_blt.c b/drivers/gpu/drm/i915/gem/i915_gem_client_blt.c
index 1fdab0767a47..2312a0c6af89 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_client_blt.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_client_blt.c
@@ -72,7 +72,6 @@ static struct i915_sleeve *create_sleeve(struct i915_address_space *vm,
vma->ops = &proxy_vma_ops;
sleeve->vma = vma;
- sleeve->obj = i915_gem_object_get(obj);
sleeve->pages = pages;
sleeve->page_sizes = *page_sizes;
@@ -85,7 +84,6 @@ err_free:
static void destroy_sleeve(struct i915_sleeve *sleeve)
{
- i915_gem_object_put(sleeve->obj);
kfree(sleeve);
}
@@ -155,7 +153,7 @@ static void clear_pages_worker(struct work_struct *work)
{
struct clear_pages_work *w = container_of(work, typeof(*w), work);
struct drm_i915_private *i915 = w->ce->gem_context->i915;
- struct drm_i915_gem_object *obj = w->sleeve->obj;
+ struct drm_i915_gem_object *obj = w->sleeve->vma->obj;
struct i915_vma *vma = w->sleeve->vma;
struct i915_request *rq;
int err = w->dma.error;
@@ -164,11 +162,12 @@ static void clear_pages_worker(struct work_struct *work)
goto out_signal;
if (obj->cache_dirty) {
- obj->write_domain = 0;
if (i915_gem_object_has_struct_page(obj))
drm_clflush_sg(w->sleeve->pages);
obj->cache_dirty = false;
}
+ obj->read_domains = I915_GEM_GPU_DOMAINS;
+ obj->write_domain = 0;
/* XXX: we need to kill this */
mutex_lock(&i915->drm.struct_mutex);
@@ -193,10 +192,12 @@ static void clear_pages_worker(struct work_struct *work)
goto out_request;
}
- /* XXX: more feverish nightmares await */
- i915_vma_lock(vma);
- err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
- i915_vma_unlock(vma);
+ /*
+ * w->dma is already exported via (vma|obj)->resv we need only
+ * keep track of the GPU activity within this vma/request, and
+ * propagate the signal from the request to w->dma.
+ */
+ err = i915_active_ref(&vma->active, rq->fence.context, rq);
if (err)
goto out_request;
@@ -249,13 +250,11 @@ int i915_gem_schedule_fill_pages_blt(struct drm_i915_gem_object *obj,
u32 value)
{
struct drm_i915_private *i915 = to_i915(obj->base.dev);
- struct i915_gem_context *ctx = ce->gem_context;
- struct i915_address_space *vm = ctx->vm ?: &i915->ggtt.vm;
struct clear_pages_work *work;
struct i915_sleeve *sleeve;
int err;
- sleeve = create_sleeve(vm, obj, pages, page_sizes);
+ sleeve = create_sleeve(ce->vm, obj, pages, page_sizes);
if (IS_ERR(sleeve))
return PTR_ERR(sleeve);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context.c b/drivers/gpu/drm/i915/gem/i915_gem_context.c
index 0f2c22a3bcb6..b28c7ca681a8 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_context.c
@@ -316,7 +316,7 @@ static void i915_gem_context_free(struct i915_gem_context *ctx)
mutex_destroy(&ctx->engines_mutex);
if (ctx->timeline)
- i915_timeline_put(ctx->timeline);
+ intel_timeline_put(ctx->timeline);
kfree(ctx->name);
put_pid(ctx->pid);
@@ -459,8 +459,7 @@ __create_context(struct drm_i915_private *i915)
i915_gem_context_set_recoverable(ctx);
ctx->ring_size = 4 * PAGE_SIZE;
- ctx->desc_template =
- default_desc_template(i915, &i915->mm.aliasing_ppgtt->vm);
+ ctx->desc_template = default_desc_template(i915, NULL);
for (i = 0; i < ARRAY_SIZE(ctx->hang_timestamp); i++)
ctx->hang_timestamp[i] = jiffies - CONTEXT_FAST_HANG_JIFFIES;
@@ -476,10 +475,18 @@ static struct i915_address_space *
__set_ppgtt(struct i915_gem_context *ctx, struct i915_address_space *vm)
{
struct i915_address_space *old = ctx->vm;
+ struct i915_gem_engines_iter it;
+ struct intel_context *ce;
ctx->vm = i915_vm_get(vm);
ctx->desc_template = default_desc_template(ctx->i915, vm);
+ for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) {
+ i915_vm_put(ce->vm);
+ ce->vm = i915_vm_get(vm);
+ }
+ i915_gem_context_unlock_engines(ctx);
+
return old;
}
@@ -528,9 +535,9 @@ i915_gem_create_context(struct drm_i915_private *dev_priv, unsigned int flags)
}
if (flags & I915_CONTEXT_CREATE_FLAGS_SINGLE_TIMELINE) {
- struct i915_timeline *timeline;
+ struct intel_timeline *timeline;
- timeline = i915_timeline_create(dev_priv, NULL);
+ timeline = intel_timeline_create(&dev_priv->gt, NULL);
if (IS_ERR(timeline)) {
context_close(ctx);
return ERR_CAST(timeline);
@@ -644,20 +651,13 @@ static void init_contexts(struct drm_i915_private *i915)
init_llist_head(&i915->contexts.free_list);
}
-static bool needs_preempt_context(struct drm_i915_private *i915)
-{
- return HAS_EXECLISTS(i915);
-}
-
int i915_gem_contexts_init(struct drm_i915_private *dev_priv)
{
struct i915_gem_context *ctx;
/* Reassure ourselves we are only called once */
GEM_BUG_ON(dev_priv->kernel_context);
- GEM_BUG_ON(dev_priv->preempt_context);
- intel_engine_init_ctx_wa(dev_priv->engine[RCS0]);
init_contexts(dev_priv);
/* lowest priority; idle task */
@@ -677,15 +677,6 @@ int i915_gem_contexts_init(struct drm_i915_private *dev_priv)
GEM_BUG_ON(!atomic_read(&ctx->hw_id_pin_count));
dev_priv->kernel_context = ctx;
- /* highest priority; preempting task */
- if (needs_preempt_context(dev_priv)) {
- ctx = i915_gem_context_create_kernel(dev_priv, INT_MAX);
- if (!IS_ERR(ctx))
- dev_priv->preempt_context = ctx;
- else
- DRM_ERROR("Failed to create preempt context; disabling preemption\n");
- }
-
DRM_DEBUG_DRIVER("%s context support initialized\n",
DRIVER_CAPS(dev_priv)->has_logical_contexts ?
"logical" : "fake");
@@ -696,8 +687,6 @@ void i915_gem_contexts_fini(struct drm_i915_private *i915)
{
lockdep_assert_held(&i915->drm.struct_mutex);
- if (i915->preempt_context)
- destroy_kernel_context(&i915->preempt_context);
destroy_kernel_context(&i915->kernel_context);
/* Must free all deferred contexts (via flush_workqueue) first */
@@ -923,8 +912,12 @@ static int context_barrier_task(struct i915_gem_context *ctx,
if (!cb)
return -ENOMEM;
- i915_active_init(i915, &cb->base, cb_retire);
- i915_active_acquire(&cb->base);
+ i915_active_init(i915, &cb->base, NULL, cb_retire);
+ err = i915_active_acquire(&cb->base);
+ if (err) {
+ kfree(cb);
+ return err;
+ }
for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) {
struct i915_request *rq;
@@ -1019,7 +1012,7 @@ static void set_ppgtt_barrier(void *data)
static int emit_ppgtt_update(struct i915_request *rq, void *data)
{
- struct i915_address_space *vm = rq->gem_context->vm;
+ struct i915_address_space *vm = rq->hw_context->vm;
struct intel_engine_cs *engine = rq->engine;
u32 base = engine->mmio_base;
u32 *cs;
@@ -1128,9 +1121,8 @@ static int set_ppgtt(struct drm_i915_file_private *file_priv,
set_ppgtt_barrier,
old);
if (err) {
- ctx->vm = old;
- ctx->desc_template = default_desc_template(ctx->i915, old);
- i915_vm_put(vm);
+ i915_vm_put(__set_ppgtt(ctx, old));
+ i915_vm_put(old);
}
unlock:
@@ -1187,26 +1179,11 @@ gen8_modify_rpcs(struct intel_context *ce, struct intel_sseu sseu)
if (IS_ERR(rq))
return PTR_ERR(rq);
- /* Queue this switch after all other activity by this context. */
- ret = i915_active_request_set(&ce->ring->timeline->last_request, rq);
- if (ret)
- goto out_add;
-
- /*
- * Guarantee context image and the timeline remains pinned until the
- * modifying request is retired by setting the ce activity tracker.
- *
- * But we only need to take one pin on the account of it. Or in other
- * words transfer the pinned ce object to tracked active request.
- */
- GEM_BUG_ON(i915_active_is_idle(&ce->active));
- ret = i915_active_ref(&ce->active, rq->fence.context, rq);
- if (ret)
- goto out_add;
-
- ret = gen8_emit_rpcs_config(rq, ce, sseu);
+ /* Serialise with the remote context */
+ ret = intel_context_prepare_remote_request(ce, rq);
+ if (ret == 0)
+ ret = gen8_emit_rpcs_config(rq, ce, sseu);
-out_add:
i915_request_add(rq);
return ret;
}
@@ -2015,8 +1992,8 @@ static int clone_timeline(struct i915_gem_context *dst,
GEM_BUG_ON(src->timeline == dst->timeline);
if (dst->timeline)
- i915_timeline_put(dst->timeline);
- dst->timeline = i915_timeline_get(src->timeline);
+ intel_timeline_put(dst->timeline);
+ dst->timeline = intel_timeline_get(src->timeline);
}
return 0;
@@ -2141,7 +2118,7 @@ int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
if (args->flags & I915_CONTEXT_CREATE_FLAGS_UNKNOWN)
return -EINVAL;
- ret = i915_terminally_wedged(i915);
+ ret = intel_gt_terminally_wedged(&i915->gt);
if (ret)
return ret;
@@ -2287,8 +2264,8 @@ int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data,
args->size = 0;
if (ctx->vm)
args->value = ctx->vm->total;
- else if (to_i915(dev)->mm.aliasing_ppgtt)
- args->value = to_i915(dev)->mm.aliasing_ppgtt->vm.total;
+ else if (to_i915(dev)->ggtt.alias)
+ args->value = to_i915(dev)->ggtt.alias->vm.total;
else
args->value = to_i915(dev)->ggtt.vm.total;
break;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context.h b/drivers/gpu/drm/i915/gem/i915_gem_context.h
index 9691dd062f72..106e2ccf7a4c 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_context.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_context.h
@@ -198,12 +198,6 @@ i915_gem_context_unlock_engines(struct i915_gem_context *ctx)
}
static inline struct intel_context *
-i915_gem_context_lookup_engine(struct i915_gem_context *ctx, unsigned int idx)
-{
- return i915_gem_context_engines(ctx)->engines[idx];
-}
-
-static inline struct intel_context *
i915_gem_context_get_engine(struct i915_gem_context *ctx, unsigned int idx)
{
struct intel_context *ce = ERR_PTR(-EINVAL);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context_types.h b/drivers/gpu/drm/i915/gem/i915_gem_context_types.h
index cc513410eeef..0ee61482ef94 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_context_types.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_context_types.h
@@ -26,7 +26,7 @@ struct pid;
struct drm_i915_private;
struct drm_i915_file_private;
struct i915_address_space;
-struct i915_timeline;
+struct intel_timeline;
struct intel_ring;
struct i915_gem_engines {
@@ -77,7 +77,7 @@ struct i915_gem_context {
struct i915_gem_engines __rcu *engines;
struct mutex engines_mutex; /* guards writes to engines */
- struct i915_timeline *timeline;
+ struct intel_timeline *timeline;
/**
* @vm: unique address space (GTT)
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c
index cbf1701d3acc..570b20ad9e58 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c
@@ -204,8 +204,7 @@ static const struct dma_buf_ops i915_dmabuf_ops = {
.end_cpu_access = i915_gem_end_cpu_access,
};
-struct dma_buf *i915_gem_prime_export(struct drm_device *dev,
- struct drm_gem_object *gem_obj, int flags)
+struct dma_buf *i915_gem_prime_export(struct drm_gem_object *gem_obj, int flags)
{
struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
@@ -222,7 +221,7 @@ struct dma_buf *i915_gem_prime_export(struct drm_device *dev,
return ERR_PTR(ret);
}
- return drm_gem_dmabuf_export(dev, &exp_info);
+ return drm_gem_dmabuf_export(gem_obj->dev, &exp_info);
}
static int i915_gem_object_get_pages_dmabuf(struct drm_i915_gem_object *obj)
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
index 5fae0e50aad0..cbd7c6e3a1f8 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
@@ -16,6 +16,7 @@
#include "gem/i915_gem_ioctls.h"
#include "gt/intel_context.h"
+#include "gt/intel_gt.h"
#include "gt/intel_gt_pm.h"
#include "i915_gem_ioctls.h"
@@ -222,7 +223,6 @@ struct i915_execbuffer {
struct intel_engine_cs *engine; /** engine to queue the request to */
struct intel_context *context; /* logical state for the request */
struct i915_gem_context *gem_context; /** caller's context */
- struct i915_address_space *vm; /** GTT and vma for the request */
struct i915_request *request; /** our request to build */
struct i915_vma *batch; /** identity of the batch obj/vma */
@@ -696,7 +696,7 @@ static int eb_reserve(struct i915_execbuffer *eb)
case 1:
/* Too fragmented, unbind everything and retry */
- err = i915_gem_evict_vm(eb->vm);
+ err = i915_gem_evict_vm(eb->context->vm);
if (err)
return err;
break;
@@ -724,12 +724,8 @@ static int eb_select_context(struct i915_execbuffer *eb)
return -ENOENT;
eb->gem_context = ctx;
- if (ctx->vm) {
- eb->vm = ctx->vm;
+ if (ctx->vm)
eb->invalid_flags |= EXEC_OBJECT_NEEDS_GTT;
- } else {
- eb->vm = &eb->i915->ggtt.vm;
- }
eb->context_flags = 0;
if (test_bit(UCONTEXT_NO_ZEROMAP, &ctx->user_flags))
@@ -831,7 +827,7 @@ static int eb_lookup_vmas(struct i915_execbuffer *eb)
goto err_vma;
}
- vma = i915_vma_instance(obj, eb->vm, NULL);
+ vma = i915_vma_instance(obj, eb->context->vm, NULL);
if (IS_ERR(vma)) {
err = PTR_ERR(vma);
goto err_obj;
@@ -994,7 +990,7 @@ static void reloc_gpu_flush(struct reloc_cache *cache)
__i915_gem_object_flush_map(cache->rq->batch->obj, 0, cache->rq_size);
i915_gem_object_unpin_map(cache->rq->batch->obj);
- i915_gem_chipset_flush(cache->rq->i915);
+ intel_gt_chipset_flush(cache->rq->engine->gt);
i915_request_add(cache->rq);
cache->rq = NULL;
@@ -1954,7 +1950,7 @@ static int eb_move_to_gpu(struct i915_execbuffer *eb)
eb->exec = NULL;
/* Unconditionally flush any chipset caches (for streaming writes). */
- i915_gem_chipset_flush(eb->i915);
+ intel_gt_chipset_flush(eb->engine->gt);
return 0;
err_skip:
@@ -2129,7 +2125,7 @@ static int eb_pin_context(struct i915_execbuffer *eb, struct intel_context *ce)
* ABI: Before userspace accesses the GPU (e.g. execbuffer), report
* EIO if the GPU is already wedged.
*/
- err = i915_terminally_wedged(eb->i915);
+ err = intel_gt_terminally_wedged(ce->engine->gt);
if (err)
return err;
@@ -2436,7 +2432,7 @@ i915_gem_do_execbuffer(struct drm_device *dev,
* wakeref that we hold until the GPU has been idle for at least
* 100ms.
*/
- intel_gt_pm_get(eb.i915);
+ intel_gt_pm_get(&eb.i915->gt);
err = i915_mutex_lock_interruptible(dev);
if (err)
@@ -2606,7 +2602,7 @@ err_engine:
err_unlock:
mutex_unlock(&dev->struct_mutex);
err_rpm:
- intel_gt_pm_put(eb.i915);
+ intel_gt_pm_put(&eb.i915->gt);
i915_gem_context_put(eb.gem_context);
err_destroy:
eb_destroy(&eb);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/i915_gem_mman.c
index 39a661927d8e..dfa525e37eb8 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_mman.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_mman.c
@@ -7,6 +7,8 @@
#include <linux/mman.h>
#include <linux/sizes.h>
+#include "gt/intel_gt.h"
+
#include "i915_drv.h"
#include "i915_gem_gtt.h"
#include "i915_gem_ioctls.h"
@@ -246,7 +248,7 @@ vm_fault_t i915_gem_fault(struct vm_fault *vmf)
wakeref = intel_runtime_pm_get(rpm);
- srcu = i915_reset_trylock(i915);
+ srcu = intel_gt_reset_trylock(ggtt->vm.gt);
if (srcu < 0) {
ret = srcu;
goto err_rpm;
@@ -326,7 +328,7 @@ err_unpin:
err_unlock:
mutex_unlock(&dev->struct_mutex);
err_reset:
- i915_reset_unlock(i915, srcu);
+ intel_gt_reset_unlock(ggtt->vm.gt, srcu);
err_rpm:
intel_runtime_pm_put(rpm, wakeref);
i915_gem_object_unpin_pages(obj);
@@ -339,7 +341,7 @@ err:
* fail). But any other -EIO isn't ours (e.g. swap in failure)
* and so needs to be reported.
*/
- if (!i915_terminally_wedged(i915))
+ if (!intel_gt_is_wedged(ggtt->vm.gt))
return VM_FAULT_SIGBUS;
/* else, fall through */
case -EAGAIN:
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.c b/drivers/gpu/drm/i915/gem/i915_gem_object.c
index be6caccce0c5..d5197a2a106f 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object.c
@@ -23,7 +23,7 @@
*/
#include "display/intel_frontbuffer.h"
-
+#include "gt/intel_gt.h"
#include "i915_drv.h"
#include "i915_gem_clflush.h"
#include "i915_gem_context.h"
@@ -146,6 +146,19 @@ void i915_gem_close_object(struct drm_gem_object *gem, struct drm_file *file)
}
}
+static void __i915_gem_free_object_rcu(struct rcu_head *head)
+{
+ struct drm_i915_gem_object *obj =
+ container_of(head, typeof(*obj), rcu);
+ struct drm_i915_private *i915 = to_i915(obj->base.dev);
+
+ reservation_object_fini(&obj->base._resv);
+ i915_gem_object_free(obj);
+
+ GEM_BUG_ON(!atomic_read(&i915->mm.free_count));
+ atomic_dec(&i915->mm.free_count);
+}
+
static void __i915_gem_free_objects(struct drm_i915_private *i915,
struct llist_node *freed)
{
@@ -160,7 +173,6 @@ static void __i915_gem_free_objects(struct drm_i915_private *i915,
mutex_lock(&i915->drm.struct_mutex);
- GEM_BUG_ON(i915_gem_object_is_active(obj));
list_for_each_entry_safe(vma, vn, &obj->vma.list, obj_link) {
GEM_BUG_ON(i915_vma_is_active(vma));
vma->flags &= ~I915_VMA_PIN_MASK;
@@ -169,22 +181,6 @@ static void __i915_gem_free_objects(struct drm_i915_private *i915,
GEM_BUG_ON(!list_empty(&obj->vma.list));
GEM_BUG_ON(!RB_EMPTY_ROOT(&obj->vma.tree));
- /*
- * This serializes freeing with the shrinker. Since the free
- * is delayed, first by RCU then by the workqueue, we want the
- * shrinker to be able to free pages of unreferenced objects,
- * or else we may oom whilst there are plenty of deferred
- * freed objects.
- */
- if (i915_gem_object_has_pages(obj) &&
- i915_gem_object_is_shrinkable(obj)) {
- unsigned long flags;
-
- spin_lock_irqsave(&i915->mm.obj_lock, flags);
- list_del_init(&obj->mm.link);
- spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
- }
-
mutex_unlock(&i915->drm.struct_mutex);
GEM_BUG_ON(atomic_read(&obj->bind_count));
@@ -192,25 +188,21 @@ static void __i915_gem_free_objects(struct drm_i915_private *i915,
GEM_BUG_ON(atomic_read(&obj->frontbuffer_bits));
GEM_BUG_ON(!list_empty(&obj->lut_list));
- if (obj->ops->release)
- obj->ops->release(obj);
-
atomic_set(&obj->mm.pages_pin_count, 0);
__i915_gem_object_put_pages(obj, I915_MM_NORMAL);
GEM_BUG_ON(i915_gem_object_has_pages(obj));
+ bitmap_free(obj->bit_17);
if (obj->base.import_attach)
drm_prime_gem_destroy(&obj->base, NULL);
- drm_gem_object_release(&obj->base);
+ drm_gem_free_mmap_offset(&obj->base);
- bitmap_free(obj->bit_17);
- i915_gem_object_free(obj);
-
- GEM_BUG_ON(!atomic_read(&i915->mm.free_count));
- atomic_dec(&i915->mm.free_count);
+ if (obj->ops->release)
+ obj->ops->release(obj);
- cond_resched();
+ /* But keep the pointer alive for RCU-protected lookups */
+ call_rcu(&obj->rcu, __i915_gem_free_object_rcu);
}
intel_runtime_pm_put(&i915->runtime_pm, wakeref);
}
@@ -261,18 +253,34 @@ static void __i915_gem_free_work(struct work_struct *work)
spin_unlock(&i915->mm.free_lock);
}
-static void __i915_gem_free_object_rcu(struct rcu_head *head)
+void i915_gem_free_object(struct drm_gem_object *gem_obj)
{
- struct drm_i915_gem_object *obj =
- container_of(head, typeof(*obj), rcu);
+ struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
struct drm_i915_private *i915 = to_i915(obj->base.dev);
/*
- * We reuse obj->rcu for the freed list, so we had better not treat
- * it like a rcu_head from this point forwards. And we expect all
- * objects to be freed via this path.
+ * Before we free the object, make sure any pure RCU-only
+ * read-side critical sections are complete, e.g.
+ * i915_gem_busy_ioctl(). For the corresponding synchronized
+ * lookup see i915_gem_object_lookup_rcu().
*/
- destroy_rcu_head(&obj->rcu);
+ atomic_inc(&i915->mm.free_count);
+
+ /*
+ * This serializes freeing with the shrinker. Since the free
+ * is delayed, first by RCU then by the workqueue, we want the
+ * shrinker to be able to free pages of unreferenced objects,
+ * or else we may oom whilst there are plenty of deferred
+ * freed objects.
+ */
+ if (i915_gem_object_has_pages(obj) &&
+ i915_gem_object_is_shrinkable(obj)) {
+ unsigned long flags;
+
+ spin_lock_irqsave(&i915->mm.obj_lock, flags);
+ list_del_init(&obj->mm.link);
+ spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
+ }
/*
* Since we require blocking on struct_mutex to unbind the freed
@@ -288,20 +296,6 @@ static void __i915_gem_free_object_rcu(struct rcu_head *head)
queue_work(i915->wq, &i915->mm.free_work);
}
-void i915_gem_free_object(struct drm_gem_object *gem_obj)
-{
- struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
-
- /*
- * Before we free the object, make sure any pure RCU-only
- * read-side critical sections are complete, e.g.
- * i915_gem_busy_ioctl(). For the corresponding synchronized
- * lookup see i915_gem_object_lookup_rcu().
- */
- atomic_inc(&to_i915(obj->base.dev)->mm.free_count);
- call_rcu(&obj->rcu, __i915_gem_free_object_rcu);
-}
-
static inline enum fb_op_origin
fb_write_origin(struct drm_i915_gem_object *obj, unsigned int domain)
{
@@ -319,7 +313,6 @@ void
i915_gem_object_flush_write_domain(struct drm_i915_gem_object *obj,
unsigned int flush_domains)
{
- struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
struct i915_vma *vma;
assert_object_held(obj);
@@ -329,7 +322,8 @@ i915_gem_object_flush_write_domain(struct drm_i915_gem_object *obj,
switch (obj->write_domain) {
case I915_GEM_DOMAIN_GTT:
- i915_gem_flush_ggtt_writes(dev_priv);
+ for_each_ggtt_vma(vma, obj)
+ intel_gt_flush_ggtt_writes(vma->vm->gt);
intel_fb_obj_flush(obj,
fb_write_origin(obj, I915_GEM_DOMAIN_GTT));
@@ -340,6 +334,7 @@ i915_gem_object_flush_write_domain(struct drm_i915_gem_object *obj,
i915_vma_unset_ggtt_write(vma);
}
+
break;
case I915_GEM_DOMAIN_WC:
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.h b/drivers/gpu/drm/i915/gem/i915_gem_object.h
index dfebd5706f16..67aea07ea019 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object.h
@@ -81,7 +81,7 @@ i915_gem_object_lookup(struct drm_file *file, u32 handle)
}
__deprecated
-extern struct drm_gem_object *
+struct drm_gem_object *
drm_gem_object_lookup(struct drm_file *file, u32 handle);
__attribute__((nonnull))
@@ -159,12 +159,6 @@ i915_gem_object_needs_async_cancel(const struct drm_i915_gem_object *obj)
}
static inline bool
-i915_gem_object_is_active(const struct drm_i915_gem_object *obj)
-{
- return READ_ONCE(obj->active_count);
-}
-
-static inline bool
i915_gem_object_is_framebuffer(const struct drm_i915_gem_object *obj)
{
return READ_ONCE(obj->framebuffer_references);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object_blt.c b/drivers/gpu/drm/i915/gem/i915_gem_object_blt.c
index cb42e3a312e2..685064af32d1 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object_blt.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object_blt.c
@@ -47,15 +47,11 @@ int i915_gem_object_fill_blt(struct drm_i915_gem_object *obj,
struct intel_context *ce,
u32 value)
{
- struct drm_i915_private *i915 = to_i915(obj->base.dev);
- struct i915_gem_context *ctx = ce->gem_context;
- struct i915_address_space *vm = ctx->vm ?: &i915->ggtt.vm;
struct i915_request *rq;
struct i915_vma *vma;
int err;
- /* XXX: ce->vm please */
- vma = i915_vma_instance(obj, vm, NULL);
+ vma = i915_vma_instance(obj, ce->vm, NULL);
if (IS_ERR(vma))
return PTR_ERR(vma);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
index 18bf4f8d6d80..34b51fad02de 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
@@ -154,7 +154,6 @@ struct drm_i915_gem_object {
/** Count of VMA actually bound by this object */
atomic_t bind_count;
- unsigned int active_count;
/** Count of how many global VMA are currently pinned for use by HW */
unsigned int pin_global;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_phys.c b/drivers/gpu/drm/i915/gem/i915_gem_phys.c
index 2deac933cf59..102fd7a23d3d 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_phys.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_phys.c
@@ -13,6 +13,7 @@
#include <drm/drm_legacy.h> /* for drm_pci.h! */
#include <drm/drm_pci.h>
+#include "gt/intel_gt.h"
#include "i915_drv.h"
#include "i915_gem_object.h"
#include "i915_scatterlist.h"
@@ -60,7 +61,7 @@ static int i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj)
vaddr += PAGE_SIZE;
}
- i915_gem_chipset_flush(to_i915(obj->base.dev));
+ intel_gt_chipset_flush(&to_i915(obj->base.dev)->gt);
st = kmalloc(sizeof(*st), GFP_KERNEL);
if (!st) {
@@ -132,16 +133,9 @@ i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj,
drm_pci_free(obj->base.dev, obj->phys_handle);
}
-static void
-i915_gem_object_release_phys(struct drm_i915_gem_object *obj)
-{
- i915_gem_object_unpin_pages(obj);
-}
-
static const struct drm_i915_gem_object_ops i915_gem_phys_ops = {
.get_pages = i915_gem_object_get_pages_phys,
.put_pages = i915_gem_object_put_pages_phys,
- .release = i915_gem_object_release_phys,
};
int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj, int align)
@@ -158,7 +152,7 @@ int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj, int align)
if (obj->ops != &i915_gem_shmem_ops)
return -EINVAL;
- err = i915_gem_object_unbind(obj);
+ err = i915_gem_object_unbind(obj, I915_GEM_OBJECT_UNBIND_ACTIVE);
if (err)
return err;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_pm.c b/drivers/gpu/drm/i915/gem/i915_gem_pm.c
index 914b5d4112bb..b5561cbdc5ea 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_pm.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_pm.c
@@ -5,6 +5,7 @@
*/
#include "gem/i915_gem_pm.h"
+#include "gt/intel_gt.h"
#include "gt/intel_gt_pm.h"
#include "i915_drv.h"
@@ -38,7 +39,7 @@ static void i915_gem_park(struct drm_i915_private *i915)
i915_gem_batch_pool_fini(&engine->batch_pool);
}
- i915_timelines_park(i915);
+ intel_timelines_park(i915);
i915_vma_parked(i915);
i915_globals_park();
@@ -54,7 +55,8 @@ static void idle_work_handler(struct work_struct *work)
mutex_lock(&i915->drm.struct_mutex);
intel_wakeref_lock(&i915->gt.wakeref);
- park = !intel_wakeref_active(&i915->gt.wakeref) && !work_pending(work);
+ park = (!intel_wakeref_is_active(&i915->gt.wakeref) &&
+ !work_pending(work));
intel_wakeref_unlock(&i915->gt.wakeref);
if (park)
i915_gem_park(i915);
@@ -105,18 +107,18 @@ static int pm_notifier(struct notifier_block *nb,
return NOTIFY_OK;
}
-static bool switch_to_kernel_context_sync(struct drm_i915_private *i915)
+static bool switch_to_kernel_context_sync(struct intel_gt *gt)
{
- bool result = !i915_terminally_wedged(i915);
+ bool result = !intel_gt_is_wedged(gt);
do {
- if (i915_gem_wait_for_idle(i915,
+ if (i915_gem_wait_for_idle(gt->i915,
I915_WAIT_LOCKED |
I915_WAIT_FOR_IDLE_BOOST,
I915_GEM_IDLE_TIMEOUT) == -ETIME) {
/* XXX hide warning from gem_eio */
if (i915_modparams.reset) {
- dev_err(i915->drm.dev,
+ dev_err(gt->i915->drm.dev,
"Failed to idle engines, declaring wedged!\n");
GEM_TRACE_DUMP();
}
@@ -125,18 +127,18 @@ static bool switch_to_kernel_context_sync(struct drm_i915_private *i915)
* Forcibly cancel outstanding work and leave
* the gpu quiet.
*/
- i915_gem_set_wedged(i915);
+ intel_gt_set_wedged(gt);
result = false;
}
- } while (i915_retire_requests(i915) && result);
+ } while (i915_retire_requests(gt->i915) && result);
- GEM_BUG_ON(i915->gt.awake);
+ GEM_BUG_ON(gt->awake);
return result;
}
bool i915_gem_load_power_context(struct drm_i915_private *i915)
{
- return switch_to_kernel_context_sync(i915);
+ return switch_to_kernel_context_sync(&i915->gt);
}
void i915_gem_suspend(struct drm_i915_private *i915)
@@ -157,7 +159,7 @@ void i915_gem_suspend(struct drm_i915_private *i915)
* state. Fortunately, the kernel_context is disposable and we do
* not rely on its state.
*/
- switch_to_kernel_context_sync(i915);
+ switch_to_kernel_context_sync(&i915->gt);
mutex_unlock(&i915->drm.struct_mutex);
@@ -168,11 +170,11 @@ void i915_gem_suspend(struct drm_i915_private *i915)
GEM_BUG_ON(i915->gt.awake);
flush_work(&i915->gem.idle_work);
- cancel_delayed_work_sync(&i915->gpu_error.hangcheck_work);
+ cancel_delayed_work_sync(&i915->gt.hangcheck.work);
i915_gem_drain_freed_objects(i915);
- intel_uc_suspend(i915);
+ intel_uc_suspend(&i915->gt.uc);
}
static struct drm_i915_gem_object *first_mm_object(struct list_head *list)
@@ -237,7 +239,6 @@ void i915_gem_suspend_late(struct drm_i915_private *i915)
}
spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
- intel_uc_sanitize(i915);
i915_gem_sanitize(i915);
}
@@ -261,10 +262,10 @@ void i915_gem_resume(struct drm_i915_private *i915)
* guarantee that the context image is complete. So let's just reset
* it and start again.
*/
- if (intel_gt_resume(i915))
+ if (intel_gt_resume(&i915->gt))
goto err_wedged;
- intel_uc_resume(i915);
+ intel_uc_resume(&i915->gt.uc);
/* Always reload a context for powersaving. */
if (!i915_gem_load_power_context(i915))
@@ -276,10 +277,10 @@ out_unlock:
return;
err_wedged:
- if (!i915_reset_failed(i915)) {
+ if (!intel_gt_is_wedged(&i915->gt)) {
dev_err(i915->drm.dev,
"Failed to re-initialize GPU, declaring it wedged!\n");
- i915_gem_set_wedged(i915);
+ intel_gt_set_wedged(&i915->gt);
}
goto out_unlock;
}
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_shmem.c b/drivers/gpu/drm/i915/gem/i915_gem_shmem.c
index 19d9ecdb2894..d2a1158868e7 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_shmem.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_shmem.c
@@ -414,6 +414,11 @@ shmem_pwrite(struct drm_i915_gem_object *obj,
return 0;
}
+static void shmem_release(struct drm_i915_gem_object *obj)
+{
+ fput(obj->base.filp);
+}
+
const struct drm_i915_gem_object_ops i915_gem_shmem_ops = {
.flags = I915_GEM_OBJECT_HAS_STRUCT_PAGE |
I915_GEM_OBJECT_IS_SHRINKABLE,
@@ -424,6 +429,8 @@ const struct drm_i915_gem_object_ops i915_gem_shmem_ops = {
.writeback = shmem_writeback,
.pwrite = shmem_pwrite,
+
+ .release = shmem_release,
};
static int create_shmem(struct drm_i915_private *i915,
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c b/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c
index 3a926a8755c6..3f4c6bdcc3c3 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c
@@ -88,10 +88,18 @@ static bool can_release_pages(struct drm_i915_gem_object *obj)
return swap_available() || obj->mm.madv == I915_MADV_DONTNEED;
}
-static bool unsafe_drop_pages(struct drm_i915_gem_object *obj)
+static bool unsafe_drop_pages(struct drm_i915_gem_object *obj,
+ unsigned long shrink)
{
- if (i915_gem_object_unbind(obj) == 0)
+ unsigned long flags;
+
+ flags = 0;
+ if (shrink & I915_SHRINK_ACTIVE)
+ flags = I915_GEM_OBJECT_UNBIND_ACTIVE;
+
+ if (i915_gem_object_unbind(obj, flags) == 0)
__i915_gem_object_put_pages(obj, I915_MM_SHRINKER);
+
return !i915_gem_object_has_pages(obj);
}
@@ -169,7 +177,6 @@ i915_gem_shrink(struct drm_i915_private *i915,
*/
trace_i915_gem_shrink(i915, target, shrink);
- i915_retire_requests(i915);
/*
* Unbinding of objects will require HW access; Let us not wake the
@@ -230,8 +237,7 @@ i915_gem_shrink(struct drm_i915_private *i915,
continue;
if (!(shrink & I915_SHRINK_ACTIVE) &&
- (i915_gem_object_is_active(obj) ||
- i915_gem_object_is_framebuffer(obj)))
+ i915_gem_object_is_framebuffer(obj))
continue;
if (!(shrink & I915_SHRINK_BOUND) &&
@@ -246,7 +252,7 @@ i915_gem_shrink(struct drm_i915_private *i915,
spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
- if (unsafe_drop_pages(obj)) {
+ if (unsafe_drop_pages(obj, shrink)) {
/* May arrive from get_pages on another bo */
mutex_lock_nested(&obj->mm.lock,
I915_MM_SHRINKER);
@@ -269,8 +275,6 @@ i915_gem_shrink(struct drm_i915_private *i915,
if (shrink & I915_SHRINK_BOUND)
intel_runtime_pm_put(&i915->runtime_pm, wakeref);
- i915_retire_requests(i915);
-
shrinker_unlock(i915, unlock);
if (nr_scanned)
@@ -427,12 +431,6 @@ i915_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr
if (!shrinker_lock(i915, 0, &unlock))
return NOTIFY_DONE;
- /* Force everything onto the inactive lists */
- if (i915_gem_wait_for_idle(i915,
- I915_WAIT_LOCKED,
- MAX_SCHEDULE_TIMEOUT))
- goto out;
-
with_intel_runtime_pm(&i915->runtime_pm, wakeref)
freed_pages += i915_gem_shrink(i915, -1UL, NULL,
I915_SHRINK_BOUND |
@@ -455,7 +453,6 @@ i915_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr
}
mutex_unlock(&i915->ggtt.vm.mutex);
-out:
shrinker_unlock(i915, unlock);
*(unsigned long *)ptr += freed_pages;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_stolen.c b/drivers/gpu/drm/i915/gem/i915_gem_stolen.c
index de1fab2058ec..639c852bad12 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_stolen.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_stolen.c
@@ -529,8 +529,6 @@ i915_gem_object_release_stolen(struct drm_i915_gem_object *obj)
GEM_BUG_ON(!stolen);
- __i915_gem_object_unpin_pages(obj);
-
i915_gem_stolen_remove_node(dev_priv, stolen);
kfree(stolen);
}
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_throttle.c b/drivers/gpu/drm/i915/gem/i915_gem_throttle.c
index adb3074d9ce2..1e372420771b 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_throttle.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_throttle.c
@@ -41,7 +41,7 @@ i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
long ret;
/* ABI: return -EIO if already wedged */
- ret = i915_terminally_wedged(to_i915(dev));
+ ret = intel_gt_terminally_wedged(&to_i915(dev)->gt);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
index 2caa594322bc..b9d2bb15e4a6 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
@@ -150,7 +150,8 @@ userptr_mn_invalidate_range_start(struct mmu_notifier *_mn,
}
}
- ret = i915_gem_object_unbind(obj);
+ ret = i915_gem_object_unbind(obj,
+ I915_GEM_OBJECT_UNBIND_ACTIVE);
if (ret == 0)
ret = __i915_gem_object_put_pages(obj, I915_MM_SHRINKER);
i915_gem_object_put(obj);
@@ -662,6 +663,14 @@ i915_gem_userptr_put_pages(struct drm_i915_gem_object *obj,
__i915_gem_object_release_shmem(obj, pages, true);
i915_gem_gtt_finish_pages(obj, pages);
+ /*
+ * We always mark objects as dirty when they are used by the GPU,
+ * just in case. However, if we set the vma as being read-only we know
+ * that the object will never have been written to.
+ */
+ if (i915_gem_object_is_readonly(obj))
+ obj->mm.dirty = false;
+
for_each_sgt_page(page, sgt_iter, pages) {
if (obj->mm.dirty)
/*
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_wait.c b/drivers/gpu/drm/i915/gem/i915_gem_wait.c
index 26ec6579b7cd..fa46a54bcbe7 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_wait.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_wait.c
@@ -35,7 +35,6 @@ i915_gem_object_wait_reservation(struct reservation_object *resv,
unsigned int flags,
long timeout)
{
- unsigned int seq = __read_seqcount_begin(&resv->seq);
struct dma_fence *excl;
bool prune_fences = false;
@@ -83,15 +82,12 @@ i915_gem_object_wait_reservation(struct reservation_object *resv,
/*
* Opportunistically prune the fences iff we know they have *all* been
- * signaled and that the reservation object has not been changed (i.e.
- * no new fences have been added).
+ * signaled.
*/
- if (prune_fences && !__read_seqcount_retry(&resv->seq, seq)) {
- if (reservation_object_trylock(resv)) {
- if (!__read_seqcount_retry(&resv->seq, seq))
- reservation_object_add_excl_fence(resv, NULL);
- reservation_object_unlock(resv);
- }
+ if (prune_fences && reservation_object_trylock(resv)) {
+ if (reservation_object_test_signaled_rcu(resv, true))
+ reservation_object_add_excl_fence(resv, NULL);
+ reservation_object_unlock(resv);
}
return timeout;
diff --git a/drivers/gpu/drm/i915/gem/selftests/huge_pages.c b/drivers/gpu/drm/i915/gem/selftests/huge_pages.c
index b74729b6f353..6cbd4a668c9a 100644
--- a/drivers/gpu/drm/i915/gem/selftests/huge_pages.c
+++ b/drivers/gpu/drm/i915/gem/selftests/huge_pages.c
@@ -10,6 +10,8 @@
#include "gem/i915_gem_pm.h"
+#include "gt/intel_gt.h"
+
#include "igt_gem_utils.h"
#include "mock_context.h"
@@ -926,7 +928,7 @@ gpu_write_dw(struct i915_vma *vma, u64 offset, u32 val)
}
*cmd = MI_BATCH_BUFFER_END;
- i915_gem_chipset_flush(i915);
+ intel_gt_chipset_flush(vma->vm->gt);
i915_gem_object_unpin_map(obj);
@@ -1037,8 +1039,7 @@ static int __igt_write_huge(struct i915_gem_context *ctx,
u64 size, u64 offset,
u32 dword, u32 val)
{
- struct drm_i915_private *i915 = to_i915(obj->base.dev);
- struct i915_address_space *vm = ctx->vm ?: &i915->ggtt.vm;
+ struct i915_address_space *vm = ctx->vm ?: &engine->gt->ggtt->vm;
unsigned int flags = PIN_USER | PIN_OFFSET_FIXED;
struct i915_vma *vma;
int err;
@@ -1421,6 +1422,9 @@ static int igt_ppgtt_pin_update(void *arg)
struct drm_i915_gem_object *obj;
struct i915_vma *vma;
unsigned int flags = PIN_USER | PIN_OFFSET_FIXED;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ unsigned int n;
int first, last;
int err;
@@ -1518,11 +1522,20 @@ static int igt_ppgtt_pin_update(void *arg)
* land in the now stale 2M page.
*/
- err = gpu_write(vma, ctx, dev_priv->engine[RCS0], 0, 0xdeadbeaf);
- if (err)
- goto out_unpin;
+ n = 0;
+ for_each_engine(engine, dev_priv, id) {
+ if (!intel_engine_can_store_dword(engine))
+ continue;
- err = cpu_check(obj, 0, 0xdeadbeaf);
+ err = gpu_write(vma, ctx, engine, n++, 0xdeadbeaf);
+ if (err)
+ goto out_unpin;
+ }
+ while (n--) {
+ err = cpu_check(obj, n, 0xdeadbeaf);
+ if (err)
+ goto out_unpin;
+ }
out_unpin:
i915_vma_unpin(vma);
@@ -1598,8 +1611,11 @@ static int igt_shrink_thp(void *arg)
struct drm_i915_private *i915 = ctx->i915;
struct i915_address_space *vm = ctx->vm ?: &i915->ggtt.vm;
struct drm_i915_gem_object *obj;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
struct i915_vma *vma;
unsigned int flags = PIN_USER;
+ unsigned int n;
int err;
/*
@@ -1635,9 +1651,15 @@ static int igt_shrink_thp(void *arg)
if (err)
goto out_unpin;
- err = gpu_write(vma, ctx, i915->engine[RCS0], 0, 0xdeadbeaf);
- if (err)
- goto out_unpin;
+ n = 0;
+ for_each_engine(engine, i915, id) {
+ if (!intel_engine_can_store_dword(engine))
+ continue;
+
+ err = gpu_write(vma, ctx, engine, n++, 0xdeadbeaf);
+ if (err)
+ goto out_unpin;
+ }
i915_vma_unpin(vma);
@@ -1662,7 +1684,12 @@ static int igt_shrink_thp(void *arg)
if (err)
goto out_close;
- err = cpu_check(obj, 0, 0xdeadbeaf);
+ while (n--) {
+ err = cpu_check(obj, n, 0xdeadbeaf);
+ if (err)
+ goto out_unpin;
+ }
+
out_unpin:
i915_vma_unpin(vma);
@@ -1726,7 +1753,7 @@ out_unlock:
return err;
}
-int i915_gem_huge_page_live_selftests(struct drm_i915_private *dev_priv)
+int i915_gem_huge_page_live_selftests(struct drm_i915_private *i915)
{
static const struct i915_subtest tests[] = {
SUBTEST(igt_shrink_thp),
@@ -1741,22 +1768,22 @@ int i915_gem_huge_page_live_selftests(struct drm_i915_private *dev_priv)
intel_wakeref_t wakeref;
int err;
- if (!HAS_PPGTT(dev_priv)) {
+ if (!HAS_PPGTT(i915)) {
pr_info("PPGTT not supported, skipping live-selftests\n");
return 0;
}
- if (i915_terminally_wedged(dev_priv))
+ if (intel_gt_is_wedged(&i915->gt))
return 0;
- file = mock_file(dev_priv);
+ file = mock_file(i915);
if (IS_ERR(file))
return PTR_ERR(file);
- mutex_lock(&dev_priv->drm.struct_mutex);
- wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
+ mutex_lock(&i915->drm.struct_mutex);
+ wakeref = intel_runtime_pm_get(&i915->runtime_pm);
- ctx = live_context(dev_priv, file);
+ ctx = live_context(i915, file);
if (IS_ERR(ctx)) {
err = PTR_ERR(ctx);
goto out_unlock;
@@ -1768,10 +1795,10 @@ int i915_gem_huge_page_live_selftests(struct drm_i915_private *dev_priv)
err = i915_subtests(tests, ctx);
out_unlock:
- intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
- mutex_unlock(&dev_priv->drm.struct_mutex);
+ intel_runtime_pm_put(&i915->runtime_pm, wakeref);
+ mutex_unlock(&i915->drm.struct_mutex);
- mock_file_free(dev_priv, file);
+ mock_file_free(i915, file);
return err;
}
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c
index f3a5eb807c1c..275c28926067 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c
@@ -5,14 +5,16 @@
#include "i915_selftest.h"
+#include "gt/intel_gt.h"
+
#include "selftests/igt_flush_test.h"
#include "selftests/mock_drm.h"
#include "mock_context.h"
static int igt_client_fill(void *arg)
{
- struct intel_context *ce = arg;
- struct drm_i915_private *i915 = ce->gem_context->i915;
+ struct drm_i915_private *i915 = arg;
+ struct intel_context *ce = i915->engine[BCS0]->kernel_context;
struct drm_i915_gem_object *obj;
struct rnd_state prng;
IGT_TIMEOUT(end);
@@ -63,17 +65,6 @@ static int igt_client_fill(void *arg)
if (err)
goto err_unpin;
- /*
- * XXX: For now do the wait without the object resv lock to
- * ensure we don't deadlock.
- */
- err = i915_gem_object_wait(obj,
- I915_WAIT_INTERRUPTIBLE |
- I915_WAIT_ALL,
- MAX_SCHEDULE_TIMEOUT);
- if (err)
- goto err_unpin;
-
i915_gem_object_lock(obj);
err = i915_gem_object_set_to_cpu_domain(obj, false);
i915_gem_object_unlock(obj);
@@ -100,11 +91,6 @@ err_unpin:
err_put:
i915_gem_object_put(obj);
err_flush:
- mutex_lock(&i915->drm.struct_mutex);
- if (igt_flush_test(i915, I915_WAIT_LOCKED))
- err = -EIO;
- mutex_unlock(&i915->drm.struct_mutex);
-
if (err == -ENOMEM)
err = 0;
@@ -117,11 +103,11 @@ int i915_gem_client_blt_live_selftests(struct drm_i915_private *i915)
SUBTEST(igt_client_fill),
};
- if (i915_terminally_wedged(i915))
+ if (intel_gt_is_wedged(&i915->gt))
return 0;
if (!HAS_ENGINE(i915, BCS0))
return 0;
- return i915_subtests(tests, i915->engine[BCS0]->kernel_context);
+ return i915_live_subtests(tests, i915);
}
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c
index 8f22d3f18422..a1a4b53cdc4a 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c
@@ -6,6 +6,8 @@
#include <linux/prime_numbers.h>
+#include "gt/intel_gt.h"
+
#include "i915_selftest.h"
#include "selftests/i915_random.h"
@@ -242,12 +244,15 @@ static bool always_valid(struct drm_i915_private *i915)
static bool needs_fence_registers(struct drm_i915_private *i915)
{
- return !i915_terminally_wedged(i915);
+ return !intel_gt_is_wedged(&i915->gt);
}
static bool needs_mi_store_dword(struct drm_i915_private *i915)
{
- if (i915_terminally_wedged(i915))
+ if (intel_gt_is_wedged(&i915->gt))
+ return false;
+
+ if (!HAS_ENGINE(i915, RCS0))
return false;
return intel_engine_can_store_dword(i915->engine[RCS0]);
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
index eaa2b16574c7..7f9f6701b32c 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
@@ -7,6 +7,7 @@
#include <linux/prime_numbers.h>
#include "gem/i915_gem_pm.h"
+#include "gt/intel_gt.h"
#include "gt/intel_reset.h"
#include "i915_selftest.h"
@@ -31,7 +32,6 @@ static int live_nop_switch(void *arg)
struct intel_engine_cs *engine;
struct i915_gem_context **ctx;
enum intel_engine_id id;
- intel_wakeref_t wakeref;
struct igt_live_test t;
struct drm_file *file;
unsigned long n;
@@ -53,7 +53,6 @@ static int live_nop_switch(void *arg)
return PTR_ERR(file);
mutex_lock(&i915->drm.struct_mutex);
- wakeref = intel_runtime_pm_get(&i915->runtime_pm);
ctx = kcalloc(nctx, sizeof(*ctx), GFP_KERNEL);
if (!ctx) {
@@ -85,7 +84,7 @@ static int live_nop_switch(void *arg)
}
if (i915_request_wait(rq, 0, HZ / 5) < 0) {
pr_err("Failed to populated %d contexts\n", nctx);
- i915_gem_set_wedged(i915);
+ intel_gt_set_wedged(&i915->gt);
err = -EIO;
goto out_unlock;
}
@@ -129,7 +128,7 @@ static int live_nop_switch(void *arg)
if (i915_request_wait(rq, 0, HZ / 5) < 0) {
pr_err("Switching between %ld contexts timed out\n",
prime);
- i915_gem_set_wedged(i915);
+ intel_gt_set_wedged(&i915->gt);
break;
}
@@ -152,7 +151,6 @@ static int live_nop_switch(void *arg)
}
out_unlock:
- intel_runtime_pm_put(&i915->runtime_pm, wakeref);
mutex_unlock(&i915->drm.struct_mutex);
mock_file_free(i915, file);
return err;
@@ -237,8 +235,7 @@ static int gpu_fill(struct drm_i915_gem_object *obj,
struct intel_engine_cs *engine,
unsigned int dw)
{
- struct drm_i915_private *i915 = to_i915(obj->base.dev);
- struct i915_address_space *vm = ctx->vm ?: &i915->ggtt.vm;
+ struct i915_address_space *vm = ctx->vm ?: &engine->gt->ggtt->vm;
struct i915_request *rq;
struct i915_vma *vma;
struct i915_vma *batch;
@@ -431,6 +428,9 @@ create_test_object(struct i915_gem_context *ctx,
u64 size;
int err;
+ /* Keep in GEM's good graces */
+ i915_retire_requests(ctx->i915);
+
size = min(vm->total / 2, 1024ull * DW_PER_PAGE * PAGE_SIZE);
size = round_down(size, DW_PER_PAGE * PAGE_SIZE);
@@ -507,7 +507,6 @@ static int igt_ctx_exec(void *arg)
dw = 0;
while (!time_after(jiffies, end_time)) {
struct i915_gem_context *ctx;
- intel_wakeref_t wakeref;
ctx = live_context(i915, file);
if (IS_ERR(ctx)) {
@@ -523,8 +522,7 @@ static int igt_ctx_exec(void *arg)
}
}
- with_intel_runtime_pm(&i915->runtime_pm, wakeref)
- err = gpu_fill(obj, ctx, engine, dw);
+ err = gpu_fill(obj, ctx, engine, dw);
if (err) {
pr_err("Failed to fill dword %lu [%lu/%lu] with gpu (%s) in ctx %u [full-ppgtt? %s], err=%d\n",
ndwords, dw, max_dwords(obj),
@@ -565,6 +563,8 @@ out_unlock:
mock_file_free(i915, file);
if (err)
return err;
+
+ i915_gem_drain_freed_objects(i915);
}
return 0;
@@ -623,7 +623,6 @@ static int igt_shared_ctx_exec(void *arg)
ncontexts = 0;
while (!time_after(jiffies, end_time)) {
struct i915_gem_context *ctx;
- intel_wakeref_t wakeref;
ctx = kernel_context(i915);
if (IS_ERR(ctx)) {
@@ -642,9 +641,7 @@ static int igt_shared_ctx_exec(void *arg)
}
}
- err = 0;
- with_intel_runtime_pm(&i915->runtime_pm, wakeref)
- err = gpu_fill(obj, ctx, engine, dw);
+ err = gpu_fill(obj, ctx, engine, dw);
if (err) {
pr_err("Failed to fill dword %lu [%lu/%lu] with gpu (%s) in ctx %u [full-ppgtt? %s], err=%d\n",
ndwords, dw, max_dwords(obj),
@@ -678,6 +675,10 @@ static int igt_shared_ctx_exec(void *arg)
dw += rem;
}
+
+ mutex_unlock(&i915->drm.struct_mutex);
+ i915_gem_drain_freed_objects(i915);
+ mutex_lock(&i915->drm.struct_mutex);
}
out_test:
if (igt_live_test_end(&t))
@@ -746,7 +747,7 @@ emit_rpcs_query(struct drm_i915_gem_object *obj,
GEM_BUG_ON(!intel_engine_can_store_dword(ce->engine));
- vma = i915_vma_instance(obj, ce->gem_context->vm, NULL);
+ vma = i915_vma_instance(obj, ce->vm, NULL);
if (IS_ERR(vma))
return PTR_ERR(vma);
@@ -956,7 +957,7 @@ __sseu_finish(struct drm_i915_private *i915,
int ret = 0;
if (flags & TEST_RESET) {
- ret = i915_reset_engine(ce->engine, "sseu");
+ ret = intel_engine_reset(ce->engine, "sseu");
if (ret)
goto out;
}
@@ -1025,35 +1026,33 @@ __igt_ctx_sseu(struct drm_i915_private *i915,
unsigned int flags)
{
struct intel_engine_cs *engine = i915->engine[RCS0];
- struct intel_sseu default_sseu = engine->sseu;
struct drm_i915_gem_object *obj;
struct i915_gem_context *ctx;
struct intel_context *ce;
struct intel_sseu pg_sseu;
- intel_wakeref_t wakeref;
struct drm_file *file;
int ret;
- if (INTEL_GEN(i915) < 9)
+ if (INTEL_GEN(i915) < 9 || !engine)
return 0;
if (!RUNTIME_INFO(i915)->sseu.has_slice_pg)
return 0;
- if (hweight32(default_sseu.slice_mask) < 2)
+ if (hweight32(engine->sseu.slice_mask) < 2)
return 0;
/*
* Gen11 VME friendly power-gated configuration with half enabled
* sub-slices.
*/
- pg_sseu = default_sseu;
+ pg_sseu = engine->sseu;
pg_sseu.slice_mask = 1;
pg_sseu.subslice_mask =
- ~(~0 << (hweight32(default_sseu.subslice_mask) / 2));
+ ~(~0 << (hweight32(engine->sseu.subslice_mask) / 2));
pr_info("SSEU subtest '%s', flags=%x, def_slices=%u, pg_slices=%u\n",
- name, flags, hweight32(default_sseu.slice_mask),
+ name, flags, hweight32(engine->sseu.slice_mask),
hweight32(pg_sseu.slice_mask));
file = mock_file(i915);
@@ -1061,7 +1060,7 @@ __igt_ctx_sseu(struct drm_i915_private *i915,
return PTR_ERR(file);
if (flags & TEST_RESET)
- igt_global_reset_lock(i915);
+ igt_global_reset_lock(&i915->gt);
mutex_lock(&i915->drm.struct_mutex);
@@ -1078,12 +1077,10 @@ __igt_ctx_sseu(struct drm_i915_private *i915,
goto out_unlock;
}
- wakeref = intel_runtime_pm_get(&i915->runtime_pm);
-
ce = i915_gem_context_get_engine(ctx, RCS0);
if (IS_ERR(ce)) {
ret = PTR_ERR(ce);
- goto out_rpm;
+ goto out_put;
}
ret = intel_context_pin(ce);
@@ -1091,7 +1088,7 @@ __igt_ctx_sseu(struct drm_i915_private *i915,
goto out_context;
/* First set the default mask. */
- ret = __sseu_test(i915, name, flags, ce, obj, default_sseu);
+ ret = __sseu_test(i915, name, flags, ce, obj, engine->sseu);
if (ret)
goto out_fail;
@@ -1101,7 +1098,7 @@ __igt_ctx_sseu(struct drm_i915_private *i915,
goto out_fail;
/* Back to defaults. */
- ret = __sseu_test(i915, name, flags, ce, obj, default_sseu);
+ ret = __sseu_test(i915, name, flags, ce, obj, engine->sseu);
if (ret)
goto out_fail;
@@ -1117,15 +1114,14 @@ out_fail:
intel_context_unpin(ce);
out_context:
intel_context_put(ce);
-out_rpm:
- intel_runtime_pm_put(&i915->runtime_pm, wakeref);
+out_put:
i915_gem_object_put(obj);
out_unlock:
mutex_unlock(&i915->drm.struct_mutex);
if (flags & TEST_RESET)
- igt_global_reset_unlock(i915);
+ igt_global_reset_unlock(&i915->gt);
mock_file_free(i915, file);
@@ -1194,7 +1190,7 @@ static int igt_ctx_readonly(void *arg)
goto out_unlock;
}
- vm = ctx->vm ?: &i915->mm.aliasing_ppgtt->vm;
+ vm = ctx->vm ?: &i915->ggtt.alias->vm;
if (!vm || !vm->has_read_only) {
err = 0;
goto out_unlock;
@@ -1207,8 +1203,6 @@ static int igt_ctx_readonly(void *arg)
unsigned int id;
for_each_engine(engine, i915, id) {
- intel_wakeref_t wakeref;
-
if (!intel_engine_can_store_dword(engine))
continue;
@@ -1223,9 +1217,7 @@ static int igt_ctx_readonly(void *arg)
i915_gem_object_set_readonly(obj);
}
- err = 0;
- with_intel_runtime_pm(&i915->runtime_pm, wakeref)
- err = gpu_fill(obj, ctx, engine, dw);
+ err = gpu_fill(obj, ctx, engine, dw);
if (err) {
pr_err("Failed to fill dword %lu [%lu/%lu] with gpu (%s) in ctx %u [full-ppgtt? %s], err=%d\n",
ndwords, dw, max_dwords(obj),
@@ -1488,7 +1480,6 @@ static int igt_vm_isolation(void *arg)
struct drm_i915_private *i915 = arg;
struct i915_gem_context *ctx_a, *ctx_b;
struct intel_engine_cs *engine;
- intel_wakeref_t wakeref;
struct igt_live_test t;
struct drm_file *file;
I915_RND_STATE(prng);
@@ -1535,8 +1526,6 @@ static int igt_vm_isolation(void *arg)
GEM_BUG_ON(ctx_b->vm->total != vm_total);
vm_total -= I915_GTT_PAGE_SIZE;
- wakeref = intel_runtime_pm_get(&i915->runtime_pm);
-
count = 0;
for_each_engine(engine, i915, id) {
IGT_TIMEOUT(end_time);
@@ -1551,7 +1540,7 @@ static int igt_vm_isolation(void *arg)
div64_u64_rem(i915_prandom_u64_state(&prng),
vm_total, &offset);
- offset &= -sizeof(u32);
+ offset = round_down(offset, alignof_dword);
offset += I915_GTT_PAGE_SIZE;
err = write_to_scratch(ctx_a, engine,
@@ -1560,7 +1549,7 @@ static int igt_vm_isolation(void *arg)
err = read_from_scratch(ctx_b, engine,
offset, &value);
if (err)
- goto out_rpm;
+ goto out_unlock;
if (value) {
pr_err("%s: Read %08x from scratch (offset 0x%08x_%08x), after %lu reads!\n",
@@ -1569,7 +1558,7 @@ static int igt_vm_isolation(void *arg)
lower_32_bits(offset),
this);
err = -EINVAL;
- goto out_rpm;
+ goto out_unlock;
}
this++;
@@ -1579,8 +1568,6 @@ static int igt_vm_isolation(void *arg)
pr_info("Checked %lu scratch offsets across %d engines\n",
count, RUNTIME_INFO(i915)->num_engines);
-out_rpm:
- intel_runtime_pm_put(&i915->runtime_pm, wakeref);
out_unlock:
if (igt_live_test_end(&t))
err = -EIO;
@@ -1736,7 +1723,7 @@ int i915_gem_context_mock_selftests(void)
return err;
}
-int i915_gem_context_live_selftests(struct drm_i915_private *dev_priv)
+int i915_gem_context_live_selftests(struct drm_i915_private *i915)
{
static const struct i915_subtest tests[] = {
SUBTEST(live_nop_switch),
@@ -1747,8 +1734,8 @@ int i915_gem_context_live_selftests(struct drm_i915_private *dev_priv)
SUBTEST(igt_vm_isolation),
};
- if (i915_terminally_wedged(dev_priv))
+ if (intel_gt_is_wedged(&i915->gt))
return 0;
- return i915_subtests(tests, dev_priv);
+ return i915_live_subtests(tests, i915);
}
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c
index e3a64edef918..d85d1ce273ca 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c
@@ -20,7 +20,7 @@ static int igt_dmabuf_export(void *arg)
if (IS_ERR(obj))
return PTR_ERR(obj);
- dmabuf = i915_gem_prime_export(&i915->drm, &obj->base, 0);
+ dmabuf = i915_gem_prime_export(&obj->base, 0);
i915_gem_object_put(obj);
if (IS_ERR(dmabuf)) {
pr_err("i915_gem_prime_export failed with err=%d\n",
@@ -44,7 +44,7 @@ static int igt_dmabuf_import_self(void *arg)
if (IS_ERR(obj))
return PTR_ERR(obj);
- dmabuf = i915_gem_prime_export(&i915->drm, &obj->base, 0);
+ dmabuf = i915_gem_prime_export(&obj->base, 0);
if (IS_ERR(dmabuf)) {
pr_err("i915_gem_prime_export failed with err=%d\n",
(int)PTR_ERR(dmabuf));
@@ -219,7 +219,7 @@ static int igt_dmabuf_export_vmap(void *arg)
if (IS_ERR(obj))
return PTR_ERR(obj);
- dmabuf = i915_gem_prime_export(&i915->drm, &obj->base, 0);
+ dmabuf = i915_gem_prime_export(&obj->base, 0);
if (IS_ERR(dmabuf)) {
pr_err("i915_gem_prime_export failed with err=%d\n",
(int)PTR_ERR(dmabuf));
@@ -266,7 +266,7 @@ static int igt_dmabuf_export_kmap(void *arg)
if (IS_ERR(obj))
return PTR_ERR(obj);
- dmabuf = i915_gem_prime_export(&i915->drm, &obj->base, 0);
+ dmabuf = i915_gem_prime_export(&obj->base, 0);
i915_gem_object_put(obj);
if (IS_ERR(dmabuf)) {
err = PTR_ERR(dmabuf);
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
index 5c81f4b4813a..01857c12f12f 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
@@ -6,6 +6,7 @@
#include <linux/prime_numbers.h>
+#include "gt/intel_gt.h"
#include "gt/intel_gt_pm.h"
#include "huge_gem_object.h"
#include "i915_selftest.h"
@@ -143,7 +144,7 @@ static int check_partial_mapping(struct drm_i915_gem_object *obj,
if (offset >= obj->base.size)
continue;
- i915_gem_flush_ggtt_writes(to_i915(obj->base.dev));
+ intel_gt_flush_ggtt_writes(&to_i915(obj->base.dev)->gt);
p = i915_gem_object_get_page(obj, offset >> PAGE_SHIFT);
cpu = kmap(p) + offset_in_page(offset);
@@ -327,7 +328,8 @@ out:
static int make_obj_busy(struct drm_i915_gem_object *obj)
{
struct drm_i915_private *i915 = to_i915(obj->base.dev);
- struct i915_request *rq;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
struct i915_vma *vma;
int err;
@@ -339,17 +341,21 @@ static int make_obj_busy(struct drm_i915_gem_object *obj)
if (err)
return err;
- rq = i915_request_create(i915->engine[RCS0]->kernel_context);
- if (IS_ERR(rq)) {
- i915_vma_unpin(vma);
- return PTR_ERR(rq);
- }
+ for_each_engine(engine, i915, id) {
+ struct i915_request *rq;
+
+ rq = i915_request_create(engine->kernel_context);
+ if (IS_ERR(rq)) {
+ i915_vma_unpin(vma);
+ return PTR_ERR(rq);
+ }
- i915_vma_lock(vma);
- err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
- i915_vma_unlock(vma);
+ i915_vma_lock(vma);
+ err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
+ i915_vma_unlock(vma);
- i915_request_add(rq);
+ i915_request_add(rq);
+ }
i915_vma_unpin(vma);
i915_gem_object_put(obj); /* leave it only alive via its active ref */
@@ -378,7 +384,7 @@ static void disable_retire_worker(struct drm_i915_private *i915)
{
i915_gem_shrinker_unregister(i915);
- intel_gt_pm_get(i915);
+ intel_gt_pm_get(&i915->gt);
cancel_delayed_work_sync(&i915->gem.retire_work);
flush_work(&i915->gem.idle_work);
@@ -386,7 +392,7 @@ static void disable_retire_worker(struct drm_i915_private *i915)
static void restore_retire_worker(struct drm_i915_private *i915)
{
- intel_gt_pm_put(i915);
+ intel_gt_pm_put(&i915->gt);
mutex_lock(&i915->drm.struct_mutex);
igt_flush_test(i915, I915_WAIT_LOCKED);
@@ -395,6 +401,18 @@ static void restore_retire_worker(struct drm_i915_private *i915)
i915_gem_shrinker_register(i915);
}
+static void mmap_offset_lock(struct drm_i915_private *i915)
+ __acquires(&i915->drm.vma_offset_manager->vm_lock)
+{
+ write_lock(&i915->drm.vma_offset_manager->vm_lock);
+}
+
+static void mmap_offset_unlock(struct drm_i915_private *i915)
+ __releases(&i915->drm.vma_offset_manager->vm_lock)
+{
+ write_unlock(&i915->drm.vma_offset_manager->vm_lock);
+}
+
static int igt_mmap_offset_exhaustion(void *arg)
{
struct drm_i915_private *i915 = arg;
@@ -413,7 +431,9 @@ static int igt_mmap_offset_exhaustion(void *arg)
drm_mm_for_each_hole(hole, mm, hole_start, hole_end) {
resv.start = hole_start;
resv.size = hole_end - hole_start - 1; /* PAGE_SIZE units */
+ mmap_offset_lock(i915);
err = drm_mm_reserve_node(mm, &resv);
+ mmap_offset_unlock(i915);
if (err) {
pr_err("Failed to trim VMA manager, err=%d\n", err);
goto out_park;
@@ -458,7 +478,7 @@ static int igt_mmap_offset_exhaustion(void *arg)
/* Now fill with busy dead objects that we expect to reap */
for (loop = 0; loop < 3; loop++) {
- if (i915_terminally_wedged(i915))
+ if (intel_gt_is_wedged(&i915->gt))
break;
obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
@@ -474,19 +494,12 @@ static int igt_mmap_offset_exhaustion(void *arg)
pr_err("[loop %d] Failed to busy the object\n", loop);
goto err_obj;
}
-
- /* NB we rely on the _active_ reference to access obj now */
- GEM_BUG_ON(!i915_gem_object_is_active(obj));
- err = create_mmap_offset(obj);
- if (err) {
- pr_err("[loop %d] create_mmap_offset failed with err=%d\n",
- loop, err);
- goto out;
- }
}
out:
+ mmap_offset_lock(i915);
drm_mm_remove_node(&resv);
+ mmap_offset_unlock(i915);
out_park:
restore_retire_worker(i915);
return err;
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_object_blt.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_object_blt.c
index e23d8c9e9298..19843acc84d3 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_object_blt.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_object_blt.c
@@ -3,6 +3,8 @@
* Copyright © 2019 Intel Corporation
*/
+#include "gt/intel_gt.h"
+
#include "i915_selftest.h"
#include "selftests/igt_flush_test.h"
@@ -11,8 +13,8 @@
static int igt_fill_blt(void *arg)
{
- struct intel_context *ce = arg;
- struct drm_i915_private *i915 = ce->gem_context->i915;
+ struct drm_i915_private *i915 = arg;
+ struct intel_context *ce = i915->engine[BCS0]->kernel_context;
struct drm_i915_gem_object *obj;
struct rnd_state prng;
IGT_TIMEOUT(end);
@@ -83,11 +85,6 @@ err_unpin:
err_put:
i915_gem_object_put(obj);
err_flush:
- mutex_lock(&i915->drm.struct_mutex);
- if (igt_flush_test(i915, I915_WAIT_LOCKED))
- err = -EIO;
- mutex_unlock(&i915->drm.struct_mutex);
-
if (err == -ENOMEM)
err = 0;
@@ -100,11 +97,11 @@ int i915_gem_object_blt_live_selftests(struct drm_i915_private *i915)
SUBTEST(igt_fill_blt),
};
- if (i915_terminally_wedged(i915))
+ if (intel_gt_is_wedged(&i915->gt))
return 0;
if (!HAS_ENGINE(i915, BCS0))
return 0;
- return i915_subtests(tests, i915->engine[BCS0]->kernel_context);
+ return i915_live_subtests(tests, i915);
}
diff --git a/drivers/gpu/drm/i915/gt/Makefile b/drivers/gpu/drm/i915/gt/Makefile
index 1c75b5c9790c..7e73aa587967 100644
--- a/drivers/gpu/drm/i915/gt/Makefile
+++ b/drivers/gpu/drm/i915/gt/Makefile
@@ -1,2 +1,5 @@
+# For building individual subdir files on the command line
+subdir-ccflags-y += -I$(srctree)/$(src)/..
+
# Extra header tests
-include $(src)/Makefile.header-test
+header-test-pattern-$(CONFIG_DRM_I915_WERROR) := *.h
diff --git a/drivers/gpu/drm/i915/gt/Makefile.header-test b/drivers/gpu/drm/i915/gt/Makefile.header-test
deleted file mode 100644
index 61e06cbb4b32..000000000000
--- a/drivers/gpu/drm/i915/gt/Makefile.header-test
+++ /dev/null
@@ -1,16 +0,0 @@
-# SPDX-License-Identifier: MIT
-# Copyright © 2019 Intel Corporation
-
-# Test the headers are compilable as standalone units
-header_test := $(notdir $(wildcard $(src)/*.h))
-
-quiet_cmd_header_test = HDRTEST $@
- cmd_header_test = echo "\#include \"$(<F)\"" > $@
-
-header_test_%.c: %.h
- $(call cmd,header_test)
-
-extra-$(CONFIG_DRM_I915_WERROR) += \
- $(foreach h,$(header_test),$(patsubst %.h,header_test_%.o,$(h)))
-
-clean-files += $(foreach h,$(header_test),$(patsubst %.h,header_test_%.c,$(h)))
diff --git a/drivers/gpu/drm/i915/intel_renderstate_gen6.c b/drivers/gpu/drm/i915/gt/gen6_renderstate.c
index 11c8e7b3dd7c..11c8e7b3dd7c 100644
--- a/drivers/gpu/drm/i915/intel_renderstate_gen6.c
+++ b/drivers/gpu/drm/i915/gt/gen6_renderstate.c
diff --git a/drivers/gpu/drm/i915/intel_renderstate_gen7.c b/drivers/gpu/drm/i915/gt/gen7_renderstate.c
index 655180646152..655180646152 100644
--- a/drivers/gpu/drm/i915/intel_renderstate_gen7.c
+++ b/drivers/gpu/drm/i915/gt/gen7_renderstate.c
diff --git a/drivers/gpu/drm/i915/intel_renderstate_gen8.c b/drivers/gpu/drm/i915/gt/gen8_renderstate.c
index 95288a34c15d..95288a34c15d 100644
--- a/drivers/gpu/drm/i915/intel_renderstate_gen8.c
+++ b/drivers/gpu/drm/i915/gt/gen8_renderstate.c
diff --git a/drivers/gpu/drm/i915/intel_renderstate_gen9.c b/drivers/gpu/drm/i915/gt/gen9_renderstate.c
index 7d3ac02f0177..7d3ac02f0177 100644
--- a/drivers/gpu/drm/i915/intel_renderstate_gen9.c
+++ b/drivers/gpu/drm/i915/gt/gen9_renderstate.c
diff --git a/drivers/gpu/drm/i915/gt/intel_context.c b/drivers/gpu/drm/i915/gt/intel_context.c
index 23120901c55f..f30441a140f8 100644
--- a/drivers/gpu/drm/i915/gt/intel_context.c
+++ b/drivers/gpu/drm/i915/gt/intel_context.c
@@ -59,6 +59,10 @@ int __intel_context_do_pin(struct intel_context *ce)
if (err)
goto err;
+ GEM_TRACE("%s context:%llx pin ring:{head:%04x, tail:%04x}\n",
+ ce->engine->name, ce->ring->timeline->fence_context,
+ ce->ring->head, ce->ring->tail);
+
i915_gem_context_get(ce->gem_context); /* for ctx->ppgtt */
smp_mb__before_atomic(); /* flush pin before it is visible */
@@ -85,6 +89,9 @@ void intel_context_unpin(struct intel_context *ce)
mutex_lock_nested(&ce->pin_mutex, SINGLE_DEPTH_NESTING);
if (likely(atomic_dec_and_test(&ce->pin_count))) {
+ GEM_TRACE("%s context:%llx retire\n",
+ ce->engine->name, ce->ring->timeline->fence_context);
+
ce->ops->unpin(ce);
i915_gem_context_put(ce->gem_context);
@@ -95,11 +102,15 @@ void intel_context_unpin(struct intel_context *ce)
intel_context_put(ce);
}
-static int __context_pin_state(struct i915_vma *vma, unsigned long flags)
+static int __context_pin_state(struct i915_vma *vma)
{
+ u64 flags;
int err;
- err = i915_vma_pin(vma, 0, 0, flags | PIN_GLOBAL);
+ flags = i915_ggtt_pin_bias(vma) | PIN_OFFSET_BIAS;
+ flags |= PIN_HIGH | PIN_GLOBAL;
+
+ err = i915_vma_pin(vma, 0, 0, flags);
if (err)
return err;
@@ -119,10 +130,13 @@ static void __context_unpin_state(struct i915_vma *vma)
__i915_vma_unpin(vma);
}
-static void intel_context_retire(struct i915_active *active)
+static void __intel_context_retire(struct i915_active *active)
{
struct intel_context *ce = container_of(active, typeof(*ce), active);
+ GEM_TRACE("%s context:%llx retire\n",
+ ce->engine->name, ce->ring->timeline->fence_context);
+
if (ce->state)
__context_unpin_state(ce->state);
@@ -130,35 +144,11 @@ static void intel_context_retire(struct i915_active *active)
intel_context_put(ce);
}
-void
-intel_context_init(struct intel_context *ce,
- struct i915_gem_context *ctx,
- struct intel_engine_cs *engine)
-{
- GEM_BUG_ON(!engine->cops);
-
- kref_init(&ce->ref);
-
- ce->gem_context = ctx;
- ce->engine = engine;
- ce->ops = engine->cops;
- ce->sseu = engine->sseu;
-
- INIT_LIST_HEAD(&ce->signal_link);
- INIT_LIST_HEAD(&ce->signals);
-
- mutex_init(&ce->pin_mutex);
-
- i915_active_init(ctx->i915, &ce->active, intel_context_retire);
-}
-
-int intel_context_active_acquire(struct intel_context *ce, unsigned long flags)
+static int __intel_context_active(struct i915_active *active)
{
+ struct intel_context *ce = container_of(active, typeof(*ce), active);
int err;
- if (!i915_active_acquire(&ce->active))
- return 0;
-
intel_context_get(ce);
err = intel_ring_pin(ce->ring);
@@ -168,7 +158,7 @@ int intel_context_active_acquire(struct intel_context *ce, unsigned long flags)
if (!ce->state)
return 0;
- err = __context_pin_state(ce->state, flags);
+ err = __context_pin_state(ce->state);
if (err)
goto err_ring;
@@ -188,15 +178,40 @@ err_ring:
intel_ring_unpin(ce->ring);
err_put:
intel_context_put(ce);
- i915_active_cancel(&ce->active);
return err;
}
-void intel_context_active_release(struct intel_context *ce)
+void
+intel_context_init(struct intel_context *ce,
+ struct i915_gem_context *ctx,
+ struct intel_engine_cs *engine)
+{
+ GEM_BUG_ON(!engine->cops);
+
+ kref_init(&ce->ref);
+
+ ce->gem_context = ctx;
+ ce->vm = i915_vm_get(ctx->vm ?: &engine->gt->ggtt->vm);
+
+ ce->engine = engine;
+ ce->ops = engine->cops;
+ ce->sseu = engine->sseu;
+
+ INIT_LIST_HEAD(&ce->signal_link);
+ INIT_LIST_HEAD(&ce->signals);
+
+ mutex_init(&ce->pin_mutex);
+
+ i915_active_init(ctx->i915, &ce->active,
+ __intel_context_active, __intel_context_retire);
+}
+
+void intel_context_fini(struct intel_context *ce)
{
- /* Nodes preallocated in intel_context_active() */
- i915_active_acquire_barrier(&ce->active);
- i915_active_release(&ce->active);
+ i915_vm_put(ce->vm);
+
+ mutex_destroy(&ce->pin_mutex);
+ i915_active_fini(&ce->active);
}
static void i915_global_context_shrink(void)
@@ -234,6 +249,44 @@ void intel_context_exit_engine(struct intel_context *ce)
intel_engine_pm_put(ce->engine);
}
+int intel_context_prepare_remote_request(struct intel_context *ce,
+ struct i915_request *rq)
+{
+ struct intel_timeline *tl = ce->ring->timeline;
+ int err;
+
+ /* Only suitable for use in remotely modifying this context */
+ GEM_BUG_ON(rq->hw_context == ce);
+
+ if (rq->timeline != tl) { /* beware timeline sharing */
+ err = mutex_lock_interruptible_nested(&tl->mutex,
+ SINGLE_DEPTH_NESTING);
+ if (err)
+ return err;
+
+ /* Queue this switch after current activity by this context. */
+ err = i915_active_request_set(&tl->last_request, rq);
+ if (err)
+ goto unlock;
+ }
+ lockdep_assert_held(&tl->mutex);
+
+ /*
+ * Guarantee context image and the timeline remains pinned until the
+ * modifying request is retired by setting the ce activity tracker.
+ *
+ * But we only need to take one pin on the account of it. Or in other
+ * words transfer the pinned ce object to tracked active request.
+ */
+ GEM_BUG_ON(i915_active_is_idle(&ce->active));
+ err = i915_active_ref(&ce->active, rq->fence.context, rq);
+
+unlock:
+ if (rq->timeline != tl)
+ mutex_unlock(&tl->mutex);
+ return err;
+}
+
struct i915_request *intel_context_create_request(struct intel_context *ce)
{
struct i915_request *rq;
diff --git a/drivers/gpu/drm/i915/gt/intel_context.h b/drivers/gpu/drm/i915/gt/intel_context.h
index a47275bc4f01..23c7e4c0ce7c 100644
--- a/drivers/gpu/drm/i915/gt/intel_context.h
+++ b/drivers/gpu/drm/i915/gt/intel_context.h
@@ -9,12 +9,14 @@
#include <linux/lockdep.h>
+#include "i915_active.h"
#include "intel_context_types.h"
#include "intel_engine_types.h"
void intel_context_init(struct intel_context *ce,
struct i915_gem_context *ctx,
struct intel_engine_cs *engine);
+void intel_context_fini(struct intel_context *ce);
struct intel_context *
intel_context_create(struct i915_gem_context *ctx,
@@ -102,8 +104,17 @@ static inline void intel_context_exit(struct intel_context *ce)
ce->ops->exit(ce);
}
-int intel_context_active_acquire(struct intel_context *ce, unsigned long flags);
-void intel_context_active_release(struct intel_context *ce);
+static inline int intel_context_active_acquire(struct intel_context *ce)
+{
+ return i915_active_acquire(&ce->active);
+}
+
+static inline void intel_context_active_release(struct intel_context *ce)
+{
+ /* Nodes preallocated in intel_context_active() */
+ i915_active_acquire_barrier(&ce->active);
+ i915_active_release(&ce->active);
+}
static inline struct intel_context *intel_context_get(struct intel_context *ce)
{
@@ -129,6 +140,9 @@ static inline void intel_context_timeline_unlock(struct intel_context *ce)
mutex_unlock(&ce->ring->timeline->mutex);
}
+int intel_context_prepare_remote_request(struct intel_context *ce,
+ struct i915_request *rq);
+
struct i915_request *intel_context_create_request(struct intel_context *ce);
#endif /* __INTEL_CONTEXT_H__ */
diff --git a/drivers/gpu/drm/i915/gt/intel_context_types.h b/drivers/gpu/drm/i915/gt/intel_context_types.h
index 08049ee91cee..68a7e979b1a9 100644
--- a/drivers/gpu/drm/i915/gt/intel_context_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_context_types.h
@@ -13,6 +13,7 @@
#include <linux/types.h>
#include "i915_active_types.h"
+#include "i915_utils.h"
#include "intel_engine_types.h"
#include "intel_sseu.h"
@@ -35,9 +36,15 @@ struct intel_context_ops {
struct intel_context {
struct kref ref;
- struct i915_gem_context *gem_context;
struct intel_engine_cs *engine;
struct intel_engine_cs *inflight;
+#define intel_context_inflight(ce) ptr_mask_bits((ce)->inflight, 2)
+#define intel_context_inflight_count(ce) ptr_unmask_bits((ce)->inflight, 2)
+#define intel_context_inflight_inc(ce) ptr_count_inc(&(ce)->inflight)
+#define intel_context_inflight_dec(ce) ptr_count_dec(&(ce)->inflight)
+
+ struct i915_address_space *vm;
+ struct i915_gem_context *gem_context;
struct list_head signal_link;
struct list_head signals;
diff --git a/drivers/gpu/drm/i915/gt/intel_engine.h b/drivers/gpu/drm/i915/gt/intel_engine.h
index 2f1c6871ee95..db5c73ce86ee 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine.h
+++ b/drivers/gpu/drm/i915/gt/intel_engine.h
@@ -14,7 +14,7 @@
#include "i915_reg.h"
#include "i915_request.h"
#include "i915_selftest.h"
-#include "i915_timeline.h"
+#include "gt/intel_timeline.h"
#include "intel_engine_types.h"
#include "intel_gpu_commands.h"
#include "intel_workarounds.h"
@@ -51,7 +51,7 @@ struct drm_printer;
#define ENGINE_READ16(...) __ENGINE_READ_OP(read16, __VA_ARGS__)
#define ENGINE_READ(...) __ENGINE_READ_OP(read, __VA_ARGS__)
#define ENGINE_READ_FW(...) __ENGINE_READ_OP(read_fw, __VA_ARGS__)
-#define ENGINE_POSTING_READ(...) __ENGINE_READ_OP(posting_read, __VA_ARGS__)
+#define ENGINE_POSTING_READ(...) __ENGINE_READ_OP(posting_read_fw, __VA_ARGS__)
#define ENGINE_POSTING_READ16(...) __ENGINE_READ_OP(posting_read16, __VA_ARGS__)
#define ENGINE_READ64(engine__, lower_reg__, upper_reg__) \
@@ -125,71 +125,26 @@ hangcheck_action_to_str(const enum intel_engine_hangcheck_action a)
void intel_engines_set_scheduler_caps(struct drm_i915_private *i915);
-static inline void
-execlists_set_active(struct intel_engine_execlists *execlists,
- unsigned int bit)
-{
- __set_bit(bit, (unsigned long *)&execlists->active);
-}
-
-static inline bool
-execlists_set_active_once(struct intel_engine_execlists *execlists,
- unsigned int bit)
-{
- return !__test_and_set_bit(bit, (unsigned long *)&execlists->active);
-}
-
-static inline void
-execlists_clear_active(struct intel_engine_execlists *execlists,
- unsigned int bit)
-{
- __clear_bit(bit, (unsigned long *)&execlists->active);
-}
-
-static inline void
-execlists_clear_all_active(struct intel_engine_execlists *execlists)
+static inline unsigned int
+execlists_num_ports(const struct intel_engine_execlists * const execlists)
{
- execlists->active = 0;
+ return execlists->port_mask + 1;
}
-static inline bool
-execlists_is_active(const struct intel_engine_execlists *execlists,
- unsigned int bit)
+static inline struct i915_request *
+execlists_active(const struct intel_engine_execlists *execlists)
{
- return test_bit(bit, (unsigned long *)&execlists->active);
+ GEM_BUG_ON(execlists->active - execlists->inflight >
+ execlists_num_ports(execlists));
+ return READ_ONCE(*execlists->active);
}
-void execlists_user_begin(struct intel_engine_execlists *execlists,
- const struct execlist_port *port);
-void execlists_user_end(struct intel_engine_execlists *execlists);
-
void
execlists_cancel_port_requests(struct intel_engine_execlists * const execlists);
struct i915_request *
execlists_unwind_incomplete_requests(struct intel_engine_execlists *execlists);
-static inline unsigned int
-execlists_num_ports(const struct intel_engine_execlists * const execlists)
-{
- return execlists->port_mask + 1;
-}
-
-static inline struct execlist_port *
-execlists_port_complete(struct intel_engine_execlists * const execlists,
- struct execlist_port * const port)
-{
- const unsigned int m = execlists->port_mask;
-
- GEM_BUG_ON(port_index(port, execlists) != 0);
- GEM_BUG_ON(!execlists_is_active(execlists, EXECLISTS_ACTIVE_USER));
-
- memmove(port, port + 1, m * sizeof(struct execlist_port));
- memset(port + m, 0, sizeof(struct execlist_port));
-
- return port;
-}
-
static inline u32
intel_read_status_page(const struct intel_engine_cs *engine, int reg)
{
@@ -245,7 +200,7 @@ intel_write_status_page(struct intel_engine_cs *engine, int reg, u32 value)
struct intel_ring *
intel_engine_create_ring(struct intel_engine_cs *engine,
- struct i915_timeline *timeline,
+ struct intel_timeline *timeline,
int size);
int intel_ring_pin(struct intel_ring *ring);
void intel_ring_reset(struct intel_ring *ring, u32 tail);
@@ -456,8 +411,8 @@ gen8_emit_ggtt_write(u32 *cs, u32 value, u32 gtt_offset, u32 flags)
return cs;
}
-static inline void intel_engine_reset(struct intel_engine_cs *engine,
- bool stalled)
+static inline void __intel_engine_reset(struct intel_engine_cs *engine,
+ bool stalled)
{
if (engine->reset.reset)
engine->reset.reset(engine, stalled);
@@ -465,9 +420,9 @@ static inline void intel_engine_reset(struct intel_engine_cs *engine,
}
bool intel_engine_is_idle(struct intel_engine_cs *engine);
-bool intel_engines_are_idle(struct drm_i915_private *dev_priv);
+bool intel_engines_are_idle(struct intel_gt *gt);
-void intel_engines_reset_default_submission(struct drm_i915_private *i915);
+void intel_engines_reset_default_submission(struct intel_gt *gt);
unsigned int intel_engines_has_context_isolation(struct drm_i915_private *i915);
bool intel_engine_can_store_dword(struct intel_engine_cs *engine);
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_cs.c b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
index f25632c9b292..65cbf1d9118d 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_cs.c
+++ b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
@@ -28,6 +28,8 @@
#include "i915_drv.h"
+#include "gt/intel_gt.h"
+
#include "intel_engine.h"
#include "intel_engine_pm.h"
#include "intel_context.h"
@@ -314,6 +316,7 @@ intel_engine_setup(struct drm_i915_private *dev_priv,
engine->id = id;
engine->mask = BIT(id);
engine->i915 = dev_priv;
+ engine->gt = &dev_priv->gt;
engine->uncore = &dev_priv->uncore;
__sprint_engine_name(engine->name, info);
engine->hw_id = engine->guc_id = info->hw_id;
@@ -423,7 +426,7 @@ int intel_engines_init_mmio(struct drm_i915_private *i915)
WARN_ON(engine_mask &
GENMASK(BITS_PER_TYPE(mask) - 1, I915_NUM_ENGINES));
- if (i915_inject_load_failure())
+ if (i915_inject_probe_failure())
return -ENODEV;
for (i = 0; i < ARRAY_SIZE(intel_engines); i++) {
@@ -445,15 +448,9 @@ int intel_engines_init_mmio(struct drm_i915_private *i915)
if (WARN_ON(mask != engine_mask))
device_info->engine_mask = mask;
- /* We always presume we have at least RCS available for later probing */
- if (WARN_ON(!HAS_ENGINE(i915, RCS0))) {
- err = -ENODEV;
- goto cleanup;
- }
-
RUNTIME_INFO(i915)->num_engines = hweight32(mask);
- i915_check_and_clear_faults(i915);
+ intel_gt_check_and_clear_faults(&i915->gt);
intel_setup_engine_capabilities(i915);
@@ -508,6 +505,10 @@ void intel_engine_init_execlists(struct intel_engine_cs *engine)
GEM_BUG_ON(!is_power_of_2(execlists_num_ports(execlists)));
GEM_BUG_ON(execlists_num_ports(execlists) > EXECLIST_MAX_PORTS);
+ memset(execlists->pending, 0, sizeof(execlists->pending));
+ execlists->active =
+ memset(execlists->inflight, 0, sizeof(execlists->inflight));
+
execlists->queue_priority_hint = INT_MIN;
execlists->queue = RB_ROOT_CACHED;
}
@@ -577,7 +578,7 @@ static int init_status_page(struct intel_engine_cs *engine)
i915_gem_object_set_cache_coherency(obj, I915_CACHE_LLC);
- vma = i915_vma_instance(obj, &engine->i915->ggtt.vm, NULL);
+ vma = i915_vma_instance(obj, &engine->gt->ggtt->vm, NULL);
if (IS_ERR(vma)) {
ret = PTR_ERR(vma);
goto err;
@@ -629,6 +630,10 @@ static int intel_engine_setup_common(struct intel_engine_cs *engine)
engine->sseu =
intel_sseu_from_device_info(&RUNTIME_INFO(engine->i915)->sseu);
+ intel_engine_init_workarounds(engine);
+ intel_engine_init_whitelist(engine);
+ intel_engine_init_ctx_wa(engine);
+
return 0;
}
@@ -681,9 +686,10 @@ void intel_engines_set_scheduler_caps(struct drm_i915_private *i915)
u8 engine;
u8 sched;
} map[] = {
-#define MAP(x, y) { ilog2(I915_ENGINE_HAS_##x), ilog2(I915_SCHEDULER_CAP_##y) }
- MAP(PREEMPTION, PREEMPTION),
- MAP(SEMAPHORES, SEMAPHORES),
+#define MAP(x, y) { ilog2(I915_ENGINE_##x), ilog2(I915_SCHEDULER_CAP_##y) }
+ MAP(HAS_PREEMPTION, PREEMPTION),
+ MAP(HAS_SEMAPHORES, SEMAPHORES),
+ MAP(SUPPORTS_STATS, ENGINE_BUSY_STATS),
#undef MAP
};
struct intel_engine_cs *engine;
@@ -717,7 +723,7 @@ void intel_engines_set_scheduler_caps(struct drm_i915_private *i915)
struct measure_breadcrumb {
struct i915_request rq;
- struct i915_timeline timeline;
+ struct intel_timeline timeline;
struct intel_ring ring;
u32 cs[1024];
};
@@ -727,15 +733,15 @@ static int measure_breadcrumb_dw(struct intel_engine_cs *engine)
struct measure_breadcrumb *frame;
int dw = -ENOMEM;
- GEM_BUG_ON(!engine->i915->gt.scratch);
+ GEM_BUG_ON(!engine->gt->scratch);
frame = kzalloc(sizeof(*frame), GFP_KERNEL);
if (!frame)
return -ENOMEM;
- if (i915_timeline_init(engine->i915,
- &frame->timeline,
- engine->status_page.vma))
+ if (intel_timeline_init(&frame->timeline,
+ engine->gt,
+ engine->status_page.vma))
goto out_frame;
INIT_LIST_HEAD(&frame->ring.request_list);
@@ -750,17 +756,17 @@ static int measure_breadcrumb_dw(struct intel_engine_cs *engine)
frame->rq.ring = &frame->ring;
frame->rq.timeline = &frame->timeline;
- dw = i915_timeline_pin(&frame->timeline);
+ dw = intel_timeline_pin(&frame->timeline);
if (dw < 0)
goto out_timeline;
dw = engine->emit_fini_breadcrumb(&frame->rq, frame->cs) - frame->cs;
GEM_BUG_ON(dw & 1); /* RING_TAIL must be qword aligned */
- i915_timeline_unpin(&frame->timeline);
+ intel_timeline_unpin(&frame->timeline);
out_timeline:
- i915_timeline_fini(&frame->timeline);
+ intel_timeline_fini(&frame->timeline);
out_frame:
kfree(frame);
return dw;
@@ -823,6 +829,8 @@ int intel_engine_init_common(struct intel_engine_cs *engine)
struct drm_i915_private *i915 = engine->i915;
int ret;
+ engine->set_default_submission(engine);
+
/* We may need to do things with the shrinker which
* require us to immediately switch back to the default
* context. This can cause a problem as pinning the
@@ -835,28 +843,15 @@ int intel_engine_init_common(struct intel_engine_cs *engine)
if (ret)
return ret;
- /*
- * Similarly the preempt context must always be available so that
- * we can interrupt the engine at any time. However, as preemption
- * is optional, we allow it to fail.
- */
- if (i915->preempt_context)
- pin_context(i915->preempt_context, engine,
- &engine->preempt_context);
-
ret = measure_breadcrumb_dw(engine);
if (ret < 0)
goto err_unpin;
engine->emit_fini_breadcrumb_dw = ret;
- engine->set_default_submission(engine);
-
return 0;
err_unpin:
- if (engine->preempt_context)
- intel_context_unpin(engine->preempt_context);
intel_context_unpin(engine->kernel_context);
return ret;
}
@@ -881,8 +876,6 @@ void intel_engine_cleanup_common(struct intel_engine_cs *engine)
if (engine->default_state)
i915_gem_object_put(engine->default_state);
- if (engine->preempt_context)
- intel_context_unpin(engine->preempt_context);
intel_context_unpin(engine->kernel_context);
GEM_BUG_ON(!llist_empty(&engine->barrier_tasks));
@@ -966,57 +959,23 @@ const char *i915_cache_level_str(struct drm_i915_private *i915, int type)
}
}
-u32 intel_calculate_mcr_s_ss_select(struct drm_i915_private *dev_priv)
-{
- const struct sseu_dev_info *sseu = &RUNTIME_INFO(dev_priv)->sseu;
- unsigned int slice = fls(sseu->slice_mask) - 1;
- unsigned int subslice;
- u32 mcr_s_ss_select;
-
- GEM_BUG_ON(slice >= ARRAY_SIZE(sseu->subslice_mask));
- subslice = fls(sseu->subslice_mask[slice]);
- GEM_BUG_ON(!subslice);
- subslice--;
-
- if (IS_GEN(dev_priv, 10))
- mcr_s_ss_select = GEN8_MCR_SLICE(slice) |
- GEN8_MCR_SUBSLICE(subslice);
- else if (INTEL_GEN(dev_priv) >= 11)
- mcr_s_ss_select = GEN11_MCR_SLICE(slice) |
- GEN11_MCR_SUBSLICE(subslice);
- else
- mcr_s_ss_select = 0;
-
- return mcr_s_ss_select;
-}
-
static u32
read_subslice_reg(struct intel_engine_cs *engine, int slice, int subslice,
i915_reg_t reg)
{
struct drm_i915_private *i915 = engine->i915;
struct intel_uncore *uncore = engine->uncore;
- u32 mcr_slice_subslice_mask;
- u32 mcr_slice_subslice_select;
- u32 default_mcr_s_ss_select;
- u32 mcr;
- u32 ret;
+ u32 mcr_mask, mcr_ss, mcr, old_mcr, val;
enum forcewake_domains fw_domains;
if (INTEL_GEN(i915) >= 11) {
- mcr_slice_subslice_mask = GEN11_MCR_SLICE_MASK |
- GEN11_MCR_SUBSLICE_MASK;
- mcr_slice_subslice_select = GEN11_MCR_SLICE(slice) |
- GEN11_MCR_SUBSLICE(subslice);
+ mcr_mask = GEN11_MCR_SLICE_MASK | GEN11_MCR_SUBSLICE_MASK;
+ mcr_ss = GEN11_MCR_SLICE(slice) | GEN11_MCR_SUBSLICE(subslice);
} else {
- mcr_slice_subslice_mask = GEN8_MCR_SLICE_MASK |
- GEN8_MCR_SUBSLICE_MASK;
- mcr_slice_subslice_select = GEN8_MCR_SLICE(slice) |
- GEN8_MCR_SUBSLICE(subslice);
+ mcr_mask = GEN8_MCR_SLICE_MASK | GEN8_MCR_SUBSLICE_MASK;
+ mcr_ss = GEN8_MCR_SLICE(slice) | GEN8_MCR_SUBSLICE(subslice);
}
- default_mcr_s_ss_select = intel_calculate_mcr_s_ss_select(i915);
-
fw_domains = intel_uncore_forcewake_for_reg(uncore, reg,
FW_REG_READ);
fw_domains |= intel_uncore_forcewake_for_reg(uncore,
@@ -1026,26 +985,23 @@ read_subslice_reg(struct intel_engine_cs *engine, int slice, int subslice,
spin_lock_irq(&uncore->lock);
intel_uncore_forcewake_get__locked(uncore, fw_domains);
- mcr = intel_uncore_read_fw(uncore, GEN8_MCR_SELECTOR);
-
- WARN_ON_ONCE((mcr & mcr_slice_subslice_mask) !=
- default_mcr_s_ss_select);
+ old_mcr = mcr = intel_uncore_read_fw(uncore, GEN8_MCR_SELECTOR);
- mcr &= ~mcr_slice_subslice_mask;
- mcr |= mcr_slice_subslice_select;
+ mcr &= ~mcr_mask;
+ mcr |= mcr_ss;
intel_uncore_write_fw(uncore, GEN8_MCR_SELECTOR, mcr);
- ret = intel_uncore_read_fw(uncore, reg);
+ val = intel_uncore_read_fw(uncore, reg);
- mcr &= ~mcr_slice_subslice_mask;
- mcr |= default_mcr_s_ss_select;
+ mcr &= ~mcr_mask;
+ mcr |= old_mcr & mcr_mask;
intel_uncore_write_fw(uncore, GEN8_MCR_SELECTOR, mcr);
intel_uncore_forcewake_put__locked(uncore, fw_domains);
spin_unlock_irq(&uncore->lock);
- return ret;
+ return val;
}
/* NB: please notice the memset */
@@ -1150,17 +1106,17 @@ static bool ring_is_idle(struct intel_engine_cs *engine)
bool intel_engine_is_idle(struct intel_engine_cs *engine)
{
/* More white lies, if wedged, hw state is inconsistent */
- if (i915_reset_failed(engine->i915))
+ if (intel_gt_is_wedged(engine->gt))
return true;
- if (!intel_wakeref_active(&engine->wakeref))
+ if (!intel_engine_pm_is_awake(engine))
return true;
/* Waiting to drain ELSP? */
- if (READ_ONCE(engine->execlists.active)) {
+ if (execlists_active(&engine->execlists)) {
struct tasklet_struct *t = &engine->execlists.tasklet;
- synchronize_hardirq(engine->i915->drm.irq);
+ synchronize_hardirq(engine->i915->drm.pdev->irq);
local_bh_disable();
if (tasklet_trylock(t)) {
@@ -1174,7 +1130,7 @@ bool intel_engine_is_idle(struct intel_engine_cs *engine)
/* Otherwise flush the tasklet if it was on another cpu */
tasklet_unlock_wait(t);
- if (READ_ONCE(engine->execlists.active))
+ if (execlists_active(&engine->execlists))
return false;
}
@@ -1186,7 +1142,7 @@ bool intel_engine_is_idle(struct intel_engine_cs *engine)
return ring_is_idle(engine);
}
-bool intel_engines_are_idle(struct drm_i915_private *i915)
+bool intel_engines_are_idle(struct intel_gt *gt)
{
struct intel_engine_cs *engine;
enum intel_engine_id id;
@@ -1195,14 +1151,14 @@ bool intel_engines_are_idle(struct drm_i915_private *i915)
* If the driver is wedged, HW state may be very inconsistent and
* report that it is still busy, even though we have stopped using it.
*/
- if (i915_reset_failed(i915))
+ if (intel_gt_is_wedged(gt))
return true;
/* Already parked (and passed an idleness test); must still be idle */
- if (!READ_ONCE(i915->gt.awake))
+ if (!READ_ONCE(gt->awake))
return true;
- for_each_engine(engine, i915, id) {
+ for_each_engine(engine, gt->i915, id) {
if (!intel_engine_is_idle(engine))
return false;
}
@@ -1210,12 +1166,12 @@ bool intel_engines_are_idle(struct drm_i915_private *i915)
return true;
}
-void intel_engines_reset_default_submission(struct drm_i915_private *i915)
+void intel_engines_reset_default_submission(struct intel_gt *gt)
{
struct intel_engine_cs *engine;
enum intel_engine_id id;
- for_each_engine(engine, i915, id)
+ for_each_engine(engine, gt->i915, id)
engine->set_default_submission(engine);
}
@@ -1372,6 +1328,7 @@ static void intel_engine_print_registers(struct intel_engine_cs *engine,
}
if (HAS_EXECLISTS(dev_priv)) {
+ struct i915_request * const *port, *rq;
const u32 *hws =
&engine->status_page.addr[I915_HWS_CSB_BUF0_INDEX];
const u8 num_entries = execlists->csb_size;
@@ -1404,27 +1361,33 @@ static void intel_engine_print_registers(struct intel_engine_cs *engine,
}
spin_lock_irqsave(&engine->active.lock, flags);
- for (idx = 0; idx < execlists_num_ports(execlists); idx++) {
- struct i915_request *rq;
- unsigned int count;
+ for (port = execlists->active; (rq = *port); port++) {
+ char hdr[80];
+ int len;
+
+ len = snprintf(hdr, sizeof(hdr),
+ "\t\tActive[%d: ",
+ (int)(port - execlists->active));
+ if (!i915_request_signaled(rq))
+ len += snprintf(hdr + len, sizeof(hdr) - len,
+ "ring:{start:%08x, hwsp:%08x, seqno:%08x}, ",
+ i915_ggtt_offset(rq->ring->vma),
+ rq->timeline->hwsp_offset,
+ hwsp_seqno(rq));
+ snprintf(hdr + len, sizeof(hdr) - len, "rq: ");
+ print_request(m, rq, hdr);
+ }
+ for (port = execlists->pending; (rq = *port); port++) {
char hdr[80];
- rq = port_unpack(&execlists->port[idx], &count);
- if (!rq) {
- drm_printf(m, "\t\tELSP[%d] idle\n", idx);
- } else if (!i915_request_signaled(rq)) {
- snprintf(hdr, sizeof(hdr),
- "\t\tELSP[%d] count=%d, ring:{start:%08x, hwsp:%08x, seqno:%08x}, rq: ",
- idx, count,
- i915_ggtt_offset(rq->ring->vma),
- rq->timeline->hwsp_offset,
- hwsp_seqno(rq));
- print_request(m, rq, hdr);
- } else {
- print_request(m, rq, "\t\tELSP[%d] rq: ");
- }
+ snprintf(hdr, sizeof(hdr),
+ "\t\tPending[%d] ring:{start:%08x, hwsp:%08x, seqno:%08x}, rq: ",
+ (int)(port - execlists->pending),
+ i915_ggtt_offset(rq->ring->vma),
+ rq->timeline->hwsp_offset,
+ hwsp_seqno(rq));
+ print_request(m, rq, hdr);
}
- drm_printf(m, "\t\tHW active? 0x%x\n", execlists->active);
spin_unlock_irqrestore(&engine->active.lock, flags);
} else if (INTEL_GEN(dev_priv) > 6) {
drm_printf(m, "\tPP_DIR_BASE: 0x%08x\n",
@@ -1486,7 +1449,7 @@ void intel_engine_dump(struct intel_engine_cs *engine,
va_end(ap);
}
- if (i915_reset_failed(engine->i915))
+ if (intel_gt_is_wedged(engine->gt))
drm_printf(m, "*** WEDGED ***\n");
drm_printf(m, "\tAwake? %d\n", atomic_read(&engine->wakeref.count));
@@ -1587,15 +1550,19 @@ int intel_enable_engine_stats(struct intel_engine_cs *engine)
}
if (engine->stats.enabled++ == 0) {
- const struct execlist_port *port = execlists->port;
- unsigned int num_ports = execlists_num_ports(execlists);
+ struct i915_request * const *port;
+ struct i915_request *rq;
engine->stats.enabled_at = ktime_get();
/* XXX submission method oblivious? */
- while (num_ports-- && port_isset(port)) {
+ for (port = execlists->active; (rq = *port); port++)
engine->stats.active++;
- port++;
+
+ for (port = execlists->pending; (rq = *port); port++) {
+ /* Exclude any contexts already counted in active */
+ if (intel_context_inflight_count(rq->hw_context) == 1)
+ engine->stats.active++;
}
if (engine->stats.active)
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_pm.c b/drivers/gpu/drm/i915/gt/intel_engine_pm.c
index ae5b6baf6dff..e74fbf04a68d 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_pm.c
+++ b/drivers/gpu/drm/i915/gt/intel_engine_pm.c
@@ -8,6 +8,7 @@
#include "intel_engine.h"
#include "intel_engine_pm.h"
+#include "intel_gt.h"
#include "intel_gt_pm.h"
static int __engine_unpark(struct intel_wakeref *wf)
@@ -18,7 +19,7 @@ static int __engine_unpark(struct intel_wakeref *wf)
GEM_TRACE("%s\n", engine->name);
- intel_gt_pm_get(engine->i915);
+ intel_gt_pm_get(engine->gt);
/* Pin the default state for fast resets from atomic context. */
map = NULL;
@@ -66,7 +67,7 @@ static bool switch_to_kernel_context(struct intel_engine_cs *engine)
return true;
/* GPU is pointing to the void, as good as in the kernel context. */
- if (i915_reset_failed(engine->i915))
+ if (intel_gt_is_wedged(engine->gt))
return true;
/*
@@ -129,7 +130,7 @@ static int __engine_park(struct intel_wakeref *wf)
engine->execlists.no_priolist = false;
- intel_gt_pm_put(engine->i915);
+ intel_gt_pm_put(engine->gt);
return 0;
}
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_pm.h b/drivers/gpu/drm/i915/gt/intel_engine_pm.h
index a11c893f64c6..015ac72d7ad0 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_pm.h
+++ b/drivers/gpu/drm/i915/gt/intel_engine_pm.h
@@ -16,6 +16,12 @@ void intel_engine_pm_get(struct intel_engine_cs *engine);
void intel_engine_pm_put(struct intel_engine_cs *engine);
static inline bool
+intel_engine_pm_is_awake(const struct intel_engine_cs *engine)
+{
+ return intel_wakeref_is_active(&engine->wakeref);
+}
+
+static inline bool
intel_engine_pm_get_if_awake(struct intel_engine_cs *engine)
{
return intel_wakeref_get_if_active(&engine->wakeref);
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_types.h b/drivers/gpu/drm/i915/gt/intel_engine_types.h
index 43e975a26016..da61dd329210 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_engine_types.h
@@ -12,6 +12,7 @@
#include <linux/kref.h>
#include <linux/list.h>
#include <linux/llist.h>
+#include <linux/timer.h>
#include <linux/types.h>
#include "i915_gem.h"
@@ -19,7 +20,7 @@
#include "i915_pmu.h"
#include "i915_priolist_types.h"
#include "i915_selftest.h"
-#include "i915_timeline_types.h"
+#include "gt/intel_timeline_types.h"
#include "intel_sseu.h"
#include "intel_wakeref.h"
#include "intel_workarounds_types.h"
@@ -35,6 +36,7 @@ struct drm_i915_reg_table;
struct i915_gem_context;
struct i915_request;
struct i915_sched_attr;
+struct intel_gt;
struct intel_uncore;
typedef u8 intel_engine_mask_t;
@@ -66,7 +68,7 @@ struct intel_ring {
struct i915_vma *vma;
void *vaddr;
- struct i915_timeline *timeline;
+ struct intel_timeline *timeline;
struct list_head request_list;
struct list_head active_link;
@@ -150,6 +152,11 @@ struct intel_engine_execlists {
struct tasklet_struct tasklet;
/**
+ * @timer: kick the current context if its timeslice expires
+ */
+ struct timer_list timer;
+
+ /**
* @default_priolist: priority list for I915_PRIORITY_NORMAL
*/
struct i915_priolist default_priolist;
@@ -172,51 +179,28 @@ struct intel_engine_execlists {
*/
u32 __iomem *ctrl_reg;
+#define EXECLIST_MAX_PORTS 2
/**
- * @port: execlist port states
+ * @active: the currently known context executing on HW
+ */
+ struct i915_request * const *active;
+ /**
+ * @inflight: the set of contexts submitted and acknowleged by HW
*
- * For each hardware ELSP (ExecList Submission Port) we keep
- * track of the last request and the number of times we submitted
- * that port to hw. We then count the number of times the hw reports
- * a context completion or preemption. As only one context can
- * be active on hw, we limit resubmission of context to port[0]. This
- * is called Lite Restore, of the context.
+ * The set of inflight contexts is managed by reading CS events
+ * from the HW. On a context-switch event (not preemption), we
+ * know the HW has transitioned from port0 to port1, and we
+ * advance our inflight/active tracking accordingly.
*/
- struct execlist_port {
- /**
- * @request_count: combined request and submission count
- */
- struct i915_request *request_count;
-#define EXECLIST_COUNT_BITS 2
-#define port_request(p) ptr_mask_bits((p)->request_count, EXECLIST_COUNT_BITS)
-#define port_count(p) ptr_unmask_bits((p)->request_count, EXECLIST_COUNT_BITS)
-#define port_pack(rq, count) ptr_pack_bits(rq, count, EXECLIST_COUNT_BITS)
-#define port_unpack(p, count) ptr_unpack_bits((p)->request_count, count, EXECLIST_COUNT_BITS)
-#define port_set(p, packed) ((p)->request_count = (packed))
-#define port_isset(p) ((p)->request_count)
-#define port_index(p, execlists) ((p) - (execlists)->port)
-
- /**
- * @context_id: context ID for port
- */
- GEM_DEBUG_DECL(u32 context_id);
-
-#define EXECLIST_MAX_PORTS 2
- } port[EXECLIST_MAX_PORTS];
-
+ struct i915_request *inflight[EXECLIST_MAX_PORTS + 1 /* sentinel */];
/**
- * @active: is the HW active? We consider the HW as active after
- * submitting any context for execution and until we have seen the
- * last context completion event. After that, we do not expect any
- * more events until we submit, and so can park the HW.
+ * @pending: the next set of contexts submitted to ELSP
*
- * As we have a small number of different sources from which we feed
- * the HW, we track the state of each inside a single bitfield.
+ * We store the array of contexts that we submit to HW (via ELSP) and
+ * promote them to the inflight array once HW has signaled the
+ * preemption or idle-to-active event.
*/
- unsigned int active;
-#define EXECLISTS_ACTIVE_USER 0
-#define EXECLISTS_ACTIVE_PREEMPT 1
-#define EXECLISTS_ACTIVE_HWACK 2
+ struct i915_request *pending[EXECLIST_MAX_PORTS + 1];
/**
* @port_mask: number of execlist ports - 1
@@ -258,11 +242,6 @@ struct intel_engine_execlists {
u32 *csb_status;
/**
- * @preempt_complete_status: expected CSB upon completing preemption
- */
- u32 preempt_complete_status;
-
- /**
* @csb_size: context status buffer FIFO size
*/
u8 csb_size;
@@ -279,6 +258,7 @@ struct intel_engine_execlists {
struct intel_engine_cs {
struct drm_i915_private *i915;
+ struct intel_gt *gt;
struct intel_uncore *uncore;
char name[INTEL_ENGINE_CS_MAX_NAME];
@@ -308,7 +288,6 @@ struct intel_engine_cs {
struct llist_head barrier_tasks;
struct intel_context *kernel_context; /* pinned */
- struct intel_context *preempt_context; /* pinned; optional */
intel_engine_mask_t saturated; /* submitting semaphores too late? */
@@ -404,7 +383,6 @@ struct intel_engine_cs {
const struct intel_context_ops *cops;
int (*request_alloc)(struct i915_request *rq);
- int (*init_context)(struct i915_request *rq);
int (*emit_flush)(struct i915_request *request, u32 mode);
#define EMIT_INVALIDATE BIT(0)
diff --git a/drivers/gpu/drm/i915/gt/intel_gpu_commands.h b/drivers/gpu/drm/i915/gt/intel_gpu_commands.h
index eec31e36aca7..69f34737325f 100644
--- a/drivers/gpu/drm/i915/gt/intel_gpu_commands.h
+++ b/drivers/gpu/drm/i915/gt/intel_gpu_commands.h
@@ -8,6 +8,13 @@
#define _INTEL_GPU_COMMANDS_H_
/*
+ * Target address alignments required for GPU access e.g.
+ * MI_STORE_DWORD_IMM.
+ */
+#define alignof_dword 4
+#define alignof_qword 8
+
+/*
* Instruction field definitions used by the command parser
*/
#define INSTR_CLIENT_SHIFT 29
diff --git a/drivers/gpu/drm/i915/gt/intel_gt.c b/drivers/gpu/drm/i915/gt/intel_gt.c
new file mode 100644
index 000000000000..f7e69db4019d
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_gt.c
@@ -0,0 +1,250 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#include "i915_drv.h"
+
+#include "intel_gt.h"
+#include "intel_gt_pm.h"
+#include "intel_uncore.h"
+
+void intel_gt_init_early(struct intel_gt *gt, struct drm_i915_private *i915)
+{
+ gt->i915 = i915;
+ gt->uncore = &i915->uncore;
+
+ INIT_LIST_HEAD(&gt->active_rings);
+ INIT_LIST_HEAD(&gt->closed_vma);
+
+ spin_lock_init(&gt->closed_lock);
+
+ intel_gt_init_hangcheck(gt);
+ intel_gt_init_reset(gt);
+ intel_gt_pm_init_early(gt);
+}
+
+void intel_gt_init_hw(struct drm_i915_private *i915)
+{
+ i915->gt.ggtt = &i915->ggtt;
+}
+
+static void rmw_set(struct intel_uncore *uncore, i915_reg_t reg, u32 set)
+{
+ intel_uncore_rmw(uncore, reg, 0, set);
+}
+
+static void rmw_clear(struct intel_uncore *uncore, i915_reg_t reg, u32 clr)
+{
+ intel_uncore_rmw(uncore, reg, clr, 0);
+}
+
+static void clear_register(struct intel_uncore *uncore, i915_reg_t reg)
+{
+ intel_uncore_rmw(uncore, reg, 0, 0);
+}
+
+static void gen8_clear_engine_error_register(struct intel_engine_cs *engine)
+{
+ GEN6_RING_FAULT_REG_RMW(engine, RING_FAULT_VALID, 0);
+ GEN6_RING_FAULT_REG_POSTING_READ(engine);
+}
+
+void
+intel_gt_clear_error_registers(struct intel_gt *gt,
+ intel_engine_mask_t engine_mask)
+{
+ struct drm_i915_private *i915 = gt->i915;
+ struct intel_uncore *uncore = gt->uncore;
+ u32 eir;
+
+ if (!IS_GEN(i915, 2))
+ clear_register(uncore, PGTBL_ER);
+
+ if (INTEL_GEN(i915) < 4)
+ clear_register(uncore, IPEIR(RENDER_RING_BASE));
+ else
+ clear_register(uncore, IPEIR_I965);
+
+ clear_register(uncore, EIR);
+ eir = intel_uncore_read(uncore, EIR);
+ if (eir) {
+ /*
+ * some errors might have become stuck,
+ * mask them.
+ */
+ DRM_DEBUG_DRIVER("EIR stuck: 0x%08x, masking\n", eir);
+ rmw_set(uncore, EMR, eir);
+ intel_uncore_write(uncore, GEN2_IIR,
+ I915_MASTER_ERROR_INTERRUPT);
+ }
+
+ if (INTEL_GEN(i915) >= 8) {
+ rmw_clear(uncore, GEN8_RING_FAULT_REG, RING_FAULT_VALID);
+ intel_uncore_posting_read(uncore, GEN8_RING_FAULT_REG);
+ } else if (INTEL_GEN(i915) >= 6) {
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+
+ for_each_engine_masked(engine, i915, engine_mask, id)
+ gen8_clear_engine_error_register(engine);
+ }
+}
+
+static void gen6_check_faults(struct intel_gt *gt)
+{
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ u32 fault;
+
+ for_each_engine(engine, gt->i915, id) {
+ fault = GEN6_RING_FAULT_REG_READ(engine);
+ if (fault & RING_FAULT_VALID) {
+ DRM_DEBUG_DRIVER("Unexpected fault\n"
+ "\tAddr: 0x%08lx\n"
+ "\tAddress space: %s\n"
+ "\tSource ID: %d\n"
+ "\tType: %d\n",
+ fault & PAGE_MASK,
+ fault & RING_FAULT_GTTSEL_MASK ?
+ "GGTT" : "PPGTT",
+ RING_FAULT_SRCID(fault),
+ RING_FAULT_FAULT_TYPE(fault));
+ }
+ }
+}
+
+static void gen8_check_faults(struct intel_gt *gt)
+{
+ struct intel_uncore *uncore = gt->uncore;
+ u32 fault = intel_uncore_read(uncore, GEN8_RING_FAULT_REG);
+
+ if (fault & RING_FAULT_VALID) {
+ u32 fault_data0, fault_data1;
+ u64 fault_addr;
+
+ fault_data0 = intel_uncore_read(uncore, GEN8_FAULT_TLB_DATA0);
+ fault_data1 = intel_uncore_read(uncore, GEN8_FAULT_TLB_DATA1);
+ fault_addr = ((u64)(fault_data1 & FAULT_VA_HIGH_BITS) << 44) |
+ ((u64)fault_data0 << 12);
+
+ DRM_DEBUG_DRIVER("Unexpected fault\n"
+ "\tAddr: 0x%08x_%08x\n"
+ "\tAddress space: %s\n"
+ "\tEngine ID: %d\n"
+ "\tSource ID: %d\n"
+ "\tType: %d\n",
+ upper_32_bits(fault_addr),
+ lower_32_bits(fault_addr),
+ fault_data1 & FAULT_GTT_SEL ? "GGTT" : "PPGTT",
+ GEN8_RING_FAULT_ENGINE_ID(fault),
+ RING_FAULT_SRCID(fault),
+ RING_FAULT_FAULT_TYPE(fault));
+ }
+}
+
+void intel_gt_check_and_clear_faults(struct intel_gt *gt)
+{
+ struct drm_i915_private *i915 = gt->i915;
+
+ /* From GEN8 onwards we only have one 'All Engine Fault Register' */
+ if (INTEL_GEN(i915) >= 8)
+ gen8_check_faults(gt);
+ else if (INTEL_GEN(i915) >= 6)
+ gen6_check_faults(gt);
+ else
+ return;
+
+ intel_gt_clear_error_registers(gt, ALL_ENGINES);
+}
+
+void intel_gt_flush_ggtt_writes(struct intel_gt *gt)
+{
+ struct drm_i915_private *i915 = gt->i915;
+ intel_wakeref_t wakeref;
+
+ /*
+ * No actual flushing is required for the GTT write domain for reads
+ * from the GTT domain. Writes to it "immediately" go to main memory
+ * as far as we know, so there's no chipset flush. It also doesn't
+ * land in the GPU render cache.
+ *
+ * However, we do have to enforce the order so that all writes through
+ * the GTT land before any writes to the device, such as updates to
+ * the GATT itself.
+ *
+ * We also have to wait a bit for the writes to land from the GTT.
+ * An uncached read (i.e. mmio) seems to be ideal for the round-trip
+ * timing. This issue has only been observed when switching quickly
+ * between GTT writes and CPU reads from inside the kernel on recent hw,
+ * and it appears to only affect discrete GTT blocks (i.e. on LLC
+ * system agents we cannot reproduce this behaviour, until Cannonlake
+ * that was!).
+ */
+
+ wmb();
+
+ if (INTEL_INFO(i915)->has_coherent_ggtt)
+ return;
+
+ intel_gt_chipset_flush(gt);
+
+ with_intel_runtime_pm(&i915->runtime_pm, wakeref) {
+ struct intel_uncore *uncore = gt->uncore;
+
+ spin_lock_irq(&uncore->lock);
+ intel_uncore_posting_read_fw(uncore,
+ RING_HEAD(RENDER_RING_BASE));
+ spin_unlock_irq(&uncore->lock);
+ }
+}
+
+void intel_gt_chipset_flush(struct intel_gt *gt)
+{
+ wmb();
+ if (INTEL_GEN(gt->i915) < 6)
+ intel_gtt_chipset_flush();
+}
+
+int intel_gt_init_scratch(struct intel_gt *gt, unsigned int size)
+{
+ struct drm_i915_private *i915 = gt->i915;
+ struct drm_i915_gem_object *obj;
+ struct i915_vma *vma;
+ int ret;
+
+ obj = i915_gem_object_create_stolen(i915, size);
+ if (!obj)
+ obj = i915_gem_object_create_internal(i915, size);
+ if (IS_ERR(obj)) {
+ DRM_ERROR("Failed to allocate scratch page\n");
+ return PTR_ERR(obj);
+ }
+
+ vma = i915_vma_instance(obj, &gt->ggtt->vm, NULL);
+ if (IS_ERR(vma)) {
+ ret = PTR_ERR(vma);
+ goto err_unref;
+ }
+
+ ret = i915_vma_pin(vma, 0, 0, PIN_GLOBAL | PIN_HIGH);
+ if (ret)
+ goto err_unref;
+
+ gt->scratch = vma;
+ return 0;
+
+err_unref:
+ i915_gem_object_put(obj);
+ return ret;
+}
+
+void intel_gt_fini_scratch(struct intel_gt *gt)
+{
+ i915_vma_unpin_and_release(&gt->scratch, 0);
+}
+
+void intel_gt_cleanup_early(struct intel_gt *gt)
+{
+ intel_gt_fini_reset(gt);
+}
diff --git a/drivers/gpu/drm/i915/gt/intel_gt.h b/drivers/gpu/drm/i915/gt/intel_gt.h
new file mode 100644
index 000000000000..640bb0531f5b
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_gt.h
@@ -0,0 +1,60 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef __INTEL_GT__
+#define __INTEL_GT__
+
+#include "intel_engine_types.h"
+#include "intel_gt_types.h"
+#include "intel_reset.h"
+
+struct drm_i915_private;
+
+static inline struct intel_gt *uc_to_gt(struct intel_uc *uc)
+{
+ return container_of(uc, struct intel_gt, uc);
+}
+
+static inline struct intel_gt *guc_to_gt(struct intel_guc *guc)
+{
+ return container_of(guc, struct intel_gt, uc.guc);
+}
+
+static inline struct intel_gt *huc_to_gt(struct intel_huc *huc)
+{
+ return container_of(huc, struct intel_gt, uc.huc);
+}
+
+void intel_gt_init_early(struct intel_gt *gt, struct drm_i915_private *i915);
+void intel_gt_init_hw(struct drm_i915_private *i915);
+
+void intel_gt_cleanup_early(struct intel_gt *gt);
+
+void intel_gt_check_and_clear_faults(struct intel_gt *gt);
+void intel_gt_clear_error_registers(struct intel_gt *gt,
+ intel_engine_mask_t engine_mask);
+
+void intel_gt_flush_ggtt_writes(struct intel_gt *gt);
+void intel_gt_chipset_flush(struct intel_gt *gt);
+
+void intel_gt_init_hangcheck(struct intel_gt *gt);
+
+int intel_gt_init_scratch(struct intel_gt *gt, unsigned int size);
+void intel_gt_fini_scratch(struct intel_gt *gt);
+
+static inline u32 intel_gt_scratch_offset(const struct intel_gt *gt,
+ enum intel_gt_scratch_field field)
+{
+ return i915_ggtt_offset(gt->scratch) + field;
+}
+
+static inline bool intel_gt_is_wedged(struct intel_gt *gt)
+{
+ return __intel_reset_failed(&gt->reset);
+}
+
+void intel_gt_queue_hangcheck(struct intel_gt *gt);
+
+#endif /* __INTEL_GT_H__ */
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_pm.c b/drivers/gpu/drm/i915/gt/intel_gt_pm.c
index 9f8f7f54191f..65c0d0c9d543 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_pm.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt_pm.c
@@ -5,7 +5,9 @@
*/
#include "i915_drv.h"
+#include "i915_params.h"
#include "intel_engine_pm.h"
+#include "intel_gt.h"
#include "intel_gt_pm.h"
#include "intel_pm.h"
#include "intel_wakeref.h"
@@ -17,8 +19,8 @@ static void pm_notify(struct drm_i915_private *i915, int state)
static int intel_gt_unpark(struct intel_wakeref *wf)
{
- struct drm_i915_private *i915 =
- container_of(wf, typeof(*i915), gt.wakeref);
+ struct intel_gt *gt = container_of(wf, typeof(*gt), wakeref);
+ struct drm_i915_private *i915 = gt->i915;
GEM_TRACE("\n");
@@ -33,8 +35,8 @@ static int intel_gt_unpark(struct intel_wakeref *wf)
* Work around it by grabbing a GT IRQ power domain whilst there is any
* GT activity, preventing any DC state transitions.
*/
- i915->gt.awake = intel_display_power_get(i915, POWER_DOMAIN_GT_IRQ);
- GEM_BUG_ON(!i915->gt.awake);
+ gt->awake = intel_display_power_get(i915, POWER_DOMAIN_GT_IRQ);
+ GEM_BUG_ON(!gt->awake);
intel_enable_gt_powersave(i915);
@@ -44,16 +46,18 @@ static int intel_gt_unpark(struct intel_wakeref *wf)
i915_pmu_gt_unparked(i915);
- i915_queue_hangcheck(i915);
+ intel_gt_queue_hangcheck(gt);
pm_notify(i915, INTEL_GT_UNPARK);
return 0;
}
-void intel_gt_pm_get(struct drm_i915_private *i915)
+void intel_gt_pm_get(struct intel_gt *gt)
{
- intel_wakeref_get(&i915->runtime_pm, &i915->gt.wakeref, intel_gt_unpark);
+ struct intel_runtime_pm *rpm = &gt->i915->runtime_pm;
+
+ intel_wakeref_get(rpm, &gt->wakeref, intel_gt_unpark);
}
static int intel_gt_park(struct intel_wakeref *wf)
@@ -76,28 +80,30 @@ static int intel_gt_park(struct intel_wakeref *wf)
return 0;
}
-void intel_gt_pm_put(struct drm_i915_private *i915)
+void intel_gt_pm_put(struct intel_gt *gt)
{
- intel_wakeref_put(&i915->runtime_pm, &i915->gt.wakeref, intel_gt_park);
+ struct intel_runtime_pm *rpm = &gt->i915->runtime_pm;
+
+ intel_wakeref_put(rpm, &gt->wakeref, intel_gt_park);
}
-void intel_gt_pm_init(struct drm_i915_private *i915)
+void intel_gt_pm_init_early(struct intel_gt *gt)
{
- intel_wakeref_init(&i915->gt.wakeref);
- BLOCKING_INIT_NOTIFIER_HEAD(&i915->gt.pm_notifications);
+ intel_wakeref_init(&gt->wakeref);
+ BLOCKING_INIT_NOTIFIER_HEAD(&gt->pm_notifications);
}
-static bool reset_engines(struct drm_i915_private *i915)
+static bool reset_engines(struct intel_gt *gt)
{
- if (INTEL_INFO(i915)->gpu_reset_clobbers_display)
+ if (INTEL_INFO(gt->i915)->gpu_reset_clobbers_display)
return false;
- return intel_gpu_reset(i915, ALL_ENGINES) == 0;
+ return __intel_gt_reset(gt, ALL_ENGINES) == 0;
}
/**
* intel_gt_sanitize: called after the GPU has lost power
- * @i915: the i915 device
+ * @gt: the i915 GT container
* @force: ignore a failed reset and sanitize engine state anyway
*
* Anytime we reset the GPU, either with an explicit GPU reset or through a
@@ -105,21 +111,23 @@ static bool reset_engines(struct drm_i915_private *i915)
* to match. Note that calling intel_gt_sanitize() if the GPU has not
* been reset results in much confusion!
*/
-void intel_gt_sanitize(struct drm_i915_private *i915, bool force)
+void intel_gt_sanitize(struct intel_gt *gt, bool force)
{
struct intel_engine_cs *engine;
enum intel_engine_id id;
GEM_TRACE("\n");
- if (!reset_engines(i915) && !force)
+ intel_uc_sanitize(&gt->uc);
+
+ if (!reset_engines(gt) && !force)
return;
- for_each_engine(engine, i915, id)
- intel_engine_reset(engine, false);
+ for_each_engine(engine, gt->i915, id)
+ __intel_engine_reset(engine, false);
}
-int intel_gt_resume(struct drm_i915_private *i915)
+int intel_gt_resume(struct intel_gt *gt)
{
struct intel_engine_cs *engine;
enum intel_engine_id id;
@@ -131,8 +139,8 @@ int intel_gt_resume(struct drm_i915_private *i915)
* Only the kernel contexts should remain pinned over suspend,
* allowing us to fixup the user contexts on their first pin.
*/
- intel_gt_pm_get(i915);
- for_each_engine(engine, i915, id) {
+ intel_gt_pm_get(gt);
+ for_each_engine(engine, gt->i915, id) {
struct intel_context *ce;
intel_engine_pm_get(engine);
@@ -141,22 +149,18 @@ int intel_gt_resume(struct drm_i915_private *i915)
if (ce)
ce->ops->reset(ce);
- ce = engine->preempt_context;
- if (ce)
- ce->ops->reset(ce);
-
engine->serial++; /* kernel context lost */
err = engine->resume(engine);
intel_engine_pm_put(engine);
if (err) {
- dev_err(i915->drm.dev,
+ dev_err(gt->i915->drm.dev,
"Failed to restart %s (%d)\n",
engine->name, err);
break;
}
}
- intel_gt_pm_put(i915);
+ intel_gt_pm_put(gt);
return err;
}
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_pm.h b/drivers/gpu/drm/i915/gt/intel_gt_pm.h
index 53f342b20181..ba960e1fc209 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_pm.h
+++ b/drivers/gpu/drm/i915/gt/intel_gt_pm.h
@@ -9,19 +9,19 @@
#include <linux/types.h>
-struct drm_i915_private;
+struct intel_gt;
enum {
INTEL_GT_UNPARK,
INTEL_GT_PARK,
};
-void intel_gt_pm_get(struct drm_i915_private *i915);
-void intel_gt_pm_put(struct drm_i915_private *i915);
+void intel_gt_pm_get(struct intel_gt *gt);
+void intel_gt_pm_put(struct intel_gt *gt);
-void intel_gt_pm_init(struct drm_i915_private *i915);
+void intel_gt_pm_init_early(struct intel_gt *gt);
-void intel_gt_sanitize(struct drm_i915_private *i915, bool force);
-int intel_gt_resume(struct drm_i915_private *i915);
+void intel_gt_sanitize(struct intel_gt *gt, bool force);
+int intel_gt_resume(struct intel_gt *gt);
#endif /* INTEL_GT_PM_H */
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_types.h b/drivers/gpu/drm/i915/gt/intel_gt_types.h
new file mode 100644
index 000000000000..34d4a868e4f1
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_gt_types.h
@@ -0,0 +1,96 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef __INTEL_GT_TYPES__
+#define __INTEL_GT_TYPES__
+
+#include <linux/ktime.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/notifier.h>
+#include <linux/spinlock.h>
+#include <linux/types.h>
+
+#include "uc/intel_uc.h"
+
+#include "i915_vma.h"
+#include "intel_reset_types.h"
+#include "intel_wakeref.h"
+
+struct drm_i915_private;
+struct i915_ggtt;
+struct intel_uncore;
+
+struct intel_hangcheck {
+ /* For hangcheck timer */
+#define DRM_I915_HANGCHECK_PERIOD 1500 /* in ms */
+#define DRM_I915_HANGCHECK_JIFFIES msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD)
+
+ struct delayed_work work;
+};
+
+struct intel_gt {
+ struct drm_i915_private *i915;
+ struct intel_uncore *uncore;
+ struct i915_ggtt *ggtt;
+
+ struct intel_uc uc;
+
+ struct intel_gt_timelines {
+ struct mutex mutex; /* protects list */
+ struct list_head active_list;
+
+ /* Pack multiple timelines' seqnos into the same page */
+ spinlock_t hwsp_lock;
+ struct list_head hwsp_free_list;
+ } timelines;
+
+ struct list_head active_rings;
+
+ struct intel_wakeref wakeref;
+
+ struct list_head closed_vma;
+ spinlock_t closed_lock; /* guards the list of closed_vma */
+
+ struct intel_hangcheck hangcheck;
+ struct intel_reset reset;
+
+ /**
+ * Is the GPU currently considered idle, or busy executing
+ * userspace requests? Whilst idle, we allow runtime power
+ * management to power down the hardware and display clocks.
+ * In order to reduce the effect on performance, there
+ * is a slight delay before we do so.
+ */
+ intel_wakeref_t awake;
+
+ struct blocking_notifier_head pm_notifications;
+
+ ktime_t last_init_time;
+
+ struct i915_vma *scratch;
+
+ u32 pm_imr;
+ u32 pm_ier;
+
+ u32 pm_guc_events;
+};
+
+enum intel_gt_scratch_field {
+ /* 8 bytes */
+ INTEL_GT_SCRATCH_FIELD_DEFAULT = 0,
+
+ /* 8 bytes */
+ INTEL_GT_SCRATCH_FIELD_CLEAR_SLM_WA = 128,
+
+ /* 8 bytes */
+ INTEL_GT_SCRATCH_FIELD_RENDER_FLUSH = 128,
+
+ /* 8 bytes */
+ INTEL_GT_SCRATCH_FIELD_COHERENTL3_WA = 256,
+
+};
+
+#endif /* __INTEL_GT_TYPES_H__ */
diff --git a/drivers/gpu/drm/i915/gt/intel_hangcheck.c b/drivers/gpu/drm/i915/gt/intel_hangcheck.c
index 6bcfa6456c45..05d042cdefe2 100644
--- a/drivers/gpu/drm/i915/gt/intel_hangcheck.c
+++ b/drivers/gpu/drm/i915/gt/intel_hangcheck.c
@@ -22,8 +22,10 @@
*
*/
-#include "intel_reset.h"
#include "i915_drv.h"
+#include "intel_engine.h"
+#include "intel_gt.h"
+#include "intel_reset.h"
struct hangcheck {
u64 acthd;
@@ -57,9 +59,6 @@ static bool subunits_stuck(struct intel_engine_cs *engine)
int slice;
int subslice;
- if (engine->id != RCS0)
- return true;
-
intel_engine_get_instdone(engine, &instdone);
/* There might be unstable subunit states even when
@@ -103,7 +102,6 @@ head_stuck(struct intel_engine_cs *engine, u64 acthd)
static enum intel_engine_hangcheck_action
engine_stuck(struct intel_engine_cs *engine, u64 acthd)
{
- struct drm_i915_private *dev_priv = engine->i915;
enum intel_engine_hangcheck_action ha;
u32 tmp;
@@ -111,7 +109,7 @@ engine_stuck(struct intel_engine_cs *engine, u64 acthd)
if (ha != ENGINE_DEAD)
return ha;
- if (IS_GEN(dev_priv, 2))
+ if (IS_GEN(engine->i915, 2))
return ENGINE_DEAD;
/* Is the chip hanging on a WAIT_FOR_EVENT?
@@ -121,8 +119,8 @@ engine_stuck(struct intel_engine_cs *engine, u64 acthd)
*/
tmp = ENGINE_READ(engine, RING_CTL);
if (tmp & RING_WAIT) {
- i915_handle_error(dev_priv, engine->mask, 0,
- "stuck wait on %s", engine->name);
+ intel_gt_handle_error(engine->gt, engine->mask, 0,
+ "stuck wait on %s", engine->name);
ENGINE_WRITE(engine, RING_CTL, tmp);
return ENGINE_WAIT_KICK;
}
@@ -222,7 +220,7 @@ static void hangcheck_accumulate_sample(struct intel_engine_cs *engine,
I915_ENGINE_WEDGED_TIMEOUT);
}
-static void hangcheck_declare_hang(struct drm_i915_private *i915,
+static void hangcheck_declare_hang(struct intel_gt *gt,
intel_engine_mask_t hung,
intel_engine_mask_t stuck)
{
@@ -238,12 +236,12 @@ static void hangcheck_declare_hang(struct drm_i915_private *i915,
hung &= ~stuck;
len = scnprintf(msg, sizeof(msg),
"%s on ", stuck == hung ? "no progress" : "hang");
- for_each_engine_masked(engine, i915, hung, tmp)
+ for_each_engine_masked(engine, gt->i915, hung, tmp)
len += scnprintf(msg + len, sizeof(msg) - len,
"%s, ", engine->name);
msg[len-2] = '\0';
- return i915_handle_error(i915, hung, I915_ERROR_CAPTURE, "%s", msg);
+ return intel_gt_handle_error(gt, hung, I915_ERROR_CAPTURE, "%s", msg);
}
/*
@@ -254,11 +252,10 @@ static void hangcheck_declare_hang(struct drm_i915_private *i915,
* we kick the ring. If we see no progress on three subsequent calls
* we assume chip is wedged and try to fix it by resetting the chip.
*/
-static void i915_hangcheck_elapsed(struct work_struct *work)
+static void hangcheck_elapsed(struct work_struct *work)
{
- struct drm_i915_private *dev_priv =
- container_of(work, typeof(*dev_priv),
- gpu_error.hangcheck_work.work);
+ struct intel_gt *gt =
+ container_of(work, typeof(*gt), hangcheck.work.work);
intel_engine_mask_t hung = 0, stuck = 0, wedged = 0;
struct intel_engine_cs *engine;
enum intel_engine_id id;
@@ -267,13 +264,13 @@ static void i915_hangcheck_elapsed(struct work_struct *work)
if (!i915_modparams.enable_hangcheck)
return;
- if (!READ_ONCE(dev_priv->gt.awake))
+ if (!READ_ONCE(gt->awake))
return;
- if (i915_terminally_wedged(dev_priv))
+ if (intel_gt_is_wedged(gt))
return;
- wakeref = intel_runtime_pm_get_if_in_use(&dev_priv->runtime_pm);
+ wakeref = intel_runtime_pm_get_if_in_use(&gt->i915->runtime_pm);
if (!wakeref)
return;
@@ -281,9 +278,9 @@ static void i915_hangcheck_elapsed(struct work_struct *work)
* periodically arm the mmio checker to see if we are triggering
* any invalid access.
*/
- intel_uncore_arm_unclaimed_mmio_detection(&dev_priv->uncore);
+ intel_uncore_arm_unclaimed_mmio_detection(gt->uncore);
- for_each_engine(engine, dev_priv, id) {
+ for_each_engine(engine, gt->i915, id) {
struct hangcheck hc;
intel_engine_signal_breadcrumbs(engine);
@@ -305,7 +302,7 @@ static void i915_hangcheck_elapsed(struct work_struct *work)
if (GEM_SHOW_DEBUG() && (hung | stuck)) {
struct drm_printer p = drm_debug_printer("hangcheck");
- for_each_engine(engine, dev_priv, id) {
+ for_each_engine(engine, gt->i915, id) {
if (intel_engine_is_idle(engine))
continue;
@@ -314,20 +311,37 @@ static void i915_hangcheck_elapsed(struct work_struct *work)
}
if (wedged) {
- dev_err(dev_priv->drm.dev,
+ dev_err(gt->i915->drm.dev,
"GPU recovery timed out,"
" cancelling all in-flight rendering.\n");
GEM_TRACE_DUMP();
- i915_gem_set_wedged(dev_priv);
+ intel_gt_set_wedged(gt);
}
if (hung)
- hangcheck_declare_hang(dev_priv, hung, stuck);
+ hangcheck_declare_hang(gt, hung, stuck);
- intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
+ intel_runtime_pm_put(&gt->i915->runtime_pm, wakeref);
/* Reset timer in case GPU hangs without another request being added */
- i915_queue_hangcheck(dev_priv);
+ intel_gt_queue_hangcheck(gt);
+}
+
+void intel_gt_queue_hangcheck(struct intel_gt *gt)
+{
+ unsigned long delay;
+
+ if (unlikely(!i915_modparams.enable_hangcheck))
+ return;
+
+ /*
+ * Don't continually defer the hangcheck so that it is always run at
+ * least once after work has been scheduled on any ring. Otherwise,
+ * we will ignore a hung ring if a second ring is kept busy.
+ */
+
+ delay = round_jiffies_up_relative(DRM_I915_HANGCHECK_JIFFIES);
+ queue_delayed_work(system_long_wq, &gt->hangcheck.work, delay);
}
void intel_engine_init_hangcheck(struct intel_engine_cs *engine)
@@ -336,10 +350,9 @@ void intel_engine_init_hangcheck(struct intel_engine_cs *engine)
engine->hangcheck.action_timestamp = jiffies;
}
-void intel_hangcheck_init(struct drm_i915_private *i915)
+void intel_gt_init_hangcheck(struct intel_gt *gt)
{
- INIT_DELAYED_WORK(&i915->gpu_error.hangcheck_work,
- i915_hangcheck_elapsed);
+ INIT_DELAYED_WORK(&gt->hangcheck.work, hangcheck_elapsed);
}
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c
index 82b7ace62d97..d9061d9348cb 100644
--- a/drivers/gpu/drm/i915/gt/intel_lrc.c
+++ b/drivers/gpu/drm/i915/gt/intel_lrc.c
@@ -136,9 +136,9 @@
#include "gem/i915_gem_context.h"
#include "i915_drv.h"
-#include "i915_gem_render_state.h"
#include "i915_vgpu.h"
#include "intel_engine_pm.h"
+#include "intel_gt.h"
#include "intel_lrc_reg.h"
#include "intel_mocs.h"
#include "intel_reset.h"
@@ -161,6 +161,8 @@
#define GEN8_CTX_STATUS_COMPLETED_MASK \
(GEN8_CTX_STATUS_COMPLETE | GEN8_CTX_STATUS_PREEMPTED)
+#define CTX_DESC_FORCE_RESTORE BIT_ULL(2)
+
/* Typical size of the average request (2 pipecontrols and a MI_BB) */
#define EXECLISTS_REQUEST_SIZE 64 /* bytes */
#define WA_TAIL_DWORDS 2
@@ -221,6 +223,26 @@ static void execlists_init_reg_state(u32 *reg_state,
struct intel_engine_cs *engine,
struct intel_ring *ring);
+static inline u32 intel_hws_preempt_address(struct intel_engine_cs *engine)
+{
+ return (i915_ggtt_offset(engine->status_page.vma) +
+ I915_GEM_HWS_PREEMPT_ADDR);
+}
+
+static inline void
+ring_set_paused(const struct intel_engine_cs *engine, int state)
+{
+ /*
+ * We inspect HWS_PREEMPT with a semaphore inside
+ * engine->emit_fini_breadcrumb. If the dword is true,
+ * the ring is paused as the semaphore will busywait
+ * until the dword is false.
+ */
+ engine->status_page.addr[I915_GEM_HWS_PREEMPT] = state;
+ if (state)
+ wmb();
+}
+
static inline struct i915_priolist *to_priolist(struct rb_node *rb)
{
return rb_entry(rb, struct i915_priolist, node);
@@ -236,6 +258,17 @@ static int effective_prio(const struct i915_request *rq)
int prio = rq_prio(rq);
/*
+ * If this request is special and must not be interrupted at any
+ * cost, so be it. Note we are only checking the most recent request
+ * in the context and so may be masking an earlier vip request. It
+ * is hoped that under the conditions where nopreempt is used, this
+ * will not matter (i.e. all requests to that context will be
+ * nopreempt for as long as desired).
+ */
+ if (i915_request_has_nopreempt(rq))
+ prio = I915_PRIORITY_UNPREEMPTABLE;
+
+ /*
* On unwinding the active request, we give it a priority bump
* if it has completed waiting on any semaphore. If we know that
* the request has already started, we can prevent an unwanted
@@ -245,6 +278,7 @@ static int effective_prio(const struct i915_request *rq)
prio |= I915_PRIORITY_NOSEMAPHORE;
/* Restrict mere WAIT boosts from triggering preemption */
+ BUILD_BUG_ON(__NO_PREEMPTION & ~I915_PRIORITY_MASK); /* only internal */
return prio | __NO_PREEMPTION;
}
@@ -271,10 +305,7 @@ static inline bool need_preempt(const struct intel_engine_cs *engine,
{
int last_prio;
- if (!engine->preempt_context)
- return false;
-
- if (i915_request_completed(rq))
+ if (!intel_engine_has_semaphores(engine))
return false;
/*
@@ -338,9 +369,6 @@ __maybe_unused static inline bool
assert_priority_queue(const struct i915_request *prev,
const struct i915_request *next)
{
- const struct intel_engine_execlists *execlists =
- &prev->engine->execlists;
-
/*
* Without preemption, the prev may refer to the still active element
* which we refuse to let go.
@@ -348,7 +376,7 @@ assert_priority_queue(const struct i915_request *prev,
* Even with preemption, there are times when we think it is better not
* to preempt and leave an ostensibly lower priority request in flight.
*/
- if (port_request(execlists->port) == prev)
+ if (i915_request_is_active(prev))
return true;
return rq_prio(prev) >= rq_prio(next);
@@ -442,13 +470,11 @@ __unwind_incomplete_requests(struct intel_engine_cs *engine)
struct intel_engine_cs *owner;
if (i915_request_completed(rq))
- break;
+ continue; /* XXX */
__i915_request_unsubmit(rq);
unwind_wa_tail(rq);
- GEM_BUG_ON(rq->hw_context->inflight);
-
/*
* Push the request back into the queue for later resubmission.
* If this request is not native to this physical engine (i.e.
@@ -468,6 +494,19 @@ __unwind_incomplete_requests(struct intel_engine_cs *engine)
list_move(&rq->sched.link, pl);
active = rq;
} else {
+ /*
+ * Decouple the virtual breadcrumb before moving it
+ * back to the virtual engine -- we don't want the
+ * request to complete in the background and try
+ * and cancel the breadcrumb on the virtual engine
+ * (instead of the old engine where it is linked)!
+ */
+ if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT,
+ &rq->fence.flags)) {
+ spin_lock(&rq->lock);
+ i915_request_cancel_breadcrumb(rq);
+ spin_unlock(&rq->lock);
+ }
rq->engine = owner;
owner->submit_request(rq);
active = NULL;
@@ -500,32 +539,32 @@ execlists_context_status_change(struct i915_request *rq, unsigned long status)
status, rq);
}
-inline void
-execlists_user_begin(struct intel_engine_execlists *execlists,
- const struct execlist_port *port)
+static inline struct i915_request *
+execlists_schedule_in(struct i915_request *rq, int idx)
{
- execlists_set_active_once(execlists, EXECLISTS_ACTIVE_USER);
-}
+ struct intel_context *ce = rq->hw_context;
+ int count;
-inline void
-execlists_user_end(struct intel_engine_execlists *execlists)
-{
- execlists_clear_active(execlists, EXECLISTS_ACTIVE_USER);
-}
+ trace_i915_request_in(rq, idx);
-static inline void
-execlists_context_schedule_in(struct i915_request *rq)
-{
- GEM_BUG_ON(rq->hw_context->inflight);
+ count = intel_context_inflight_count(ce);
+ if (!count) {
+ intel_context_get(ce);
+ ce->inflight = rq->engine;
+
+ execlists_context_status_change(rq, INTEL_CONTEXT_SCHEDULE_IN);
+ intel_engine_context_in(ce->inflight);
+ }
+
+ intel_context_inflight_inc(ce);
+ GEM_BUG_ON(intel_context_inflight(ce) != rq->engine);
- execlists_context_status_change(rq, INTEL_CONTEXT_SCHEDULE_IN);
- intel_engine_context_in(rq->engine);
- rq->hw_context->inflight = rq->engine;
+ return i915_request_get(rq);
}
-static void kick_siblings(struct i915_request *rq)
+static void kick_siblings(struct i915_request *rq, struct intel_context *ce)
{
- struct virtual_engine *ve = to_virtual_engine(rq->hw_context->engine);
+ struct virtual_engine *ve = container_of(ce, typeof(*ve), context);
struct i915_request *next = READ_ONCE(ve->request);
if (next && next->execution_mask & ~rq->execution_mask)
@@ -533,29 +572,42 @@ static void kick_siblings(struct i915_request *rq)
}
static inline void
-execlists_context_schedule_out(struct i915_request *rq, unsigned long status)
+execlists_schedule_out(struct i915_request *rq)
{
- rq->hw_context->inflight = NULL;
- intel_engine_context_out(rq->engine);
- execlists_context_status_change(rq, status);
+ struct intel_context *ce = rq->hw_context;
+
+ GEM_BUG_ON(!intel_context_inflight_count(ce));
+
trace_i915_request_out(rq);
- /*
- * If this is part of a virtual engine, its next request may have
- * been blocked waiting for access to the active context. We have
- * to kick all the siblings again in case we need to switch (e.g.
- * the next request is not runnable on this engine). Hopefully,
- * we will already have submitted the next request before the
- * tasklet runs and do not need to rebuild each virtual tree
- * and kick everyone again.
- */
- if (rq->engine != rq->hw_context->engine)
- kick_siblings(rq);
+ intel_context_inflight_dec(ce);
+ if (!intel_context_inflight_count(ce)) {
+ intel_engine_context_out(ce->inflight);
+ execlists_context_status_change(rq, INTEL_CONTEXT_SCHEDULE_OUT);
+
+ /*
+ * If this is part of a virtual engine, its next request may
+ * have been blocked waiting for access to the active context.
+ * We have to kick all the siblings again in case we need to
+ * switch (e.g. the next request is not runnable on this
+ * engine). Hopefully, we will already have submitted the next
+ * request before the tasklet runs and do not need to rebuild
+ * each virtual tree and kick everyone again.
+ */
+ ce->inflight = NULL;
+ if (rq->engine != ce->engine)
+ kick_siblings(rq, ce);
+
+ intel_context_put(ce);
+ }
+
+ i915_request_put(rq);
}
-static u64 execlists_update_context(struct i915_request *rq)
+static u64 execlists_update_context(const struct i915_request *rq)
{
struct intel_context *ce = rq->hw_context;
+ u64 desc;
ce->lrc_reg_state[CTX_RING_TAIL + 1] =
intel_ring_set_tail(rq->ring, rq->tail);
@@ -576,7 +628,11 @@ static u64 execlists_update_context(struct i915_request *rq)
* wmb).
*/
mb();
- return ce->lrc_desc;
+
+ desc = ce->lrc_desc;
+ ce->lrc_desc &= ~CTX_DESC_FORCE_RESTORE;
+
+ return desc;
}
static inline void write_desc(struct intel_engine_execlists *execlists, u64 desc, u32 port)
@@ -590,12 +646,62 @@ static inline void write_desc(struct intel_engine_execlists *execlists, u64 desc
}
}
+static __maybe_unused void
+trace_ports(const struct intel_engine_execlists *execlists,
+ const char *msg,
+ struct i915_request * const *ports)
+{
+ const struct intel_engine_cs *engine =
+ container_of(execlists, typeof(*engine), execlists);
+
+ GEM_TRACE("%s: %s { %llx:%lld%s, %llx:%lld }\n",
+ engine->name, msg,
+ ports[0]->fence.context,
+ ports[0]->fence.seqno,
+ i915_request_completed(ports[0]) ? "!" :
+ i915_request_started(ports[0]) ? "*" :
+ "",
+ ports[1] ? ports[1]->fence.context : 0,
+ ports[1] ? ports[1]->fence.seqno : 0);
+}
+
+static __maybe_unused bool
+assert_pending_valid(const struct intel_engine_execlists *execlists,
+ const char *msg)
+{
+ struct i915_request * const *port, *rq;
+ struct intel_context *ce = NULL;
+
+ trace_ports(execlists, msg, execlists->pending);
+
+ if (execlists->pending[execlists_num_ports(execlists)])
+ return false;
+
+ for (port = execlists->pending; (rq = *port); port++) {
+ if (ce == rq->hw_context)
+ return false;
+
+ ce = rq->hw_context;
+ if (i915_request_completed(rq))
+ continue;
+
+ if (i915_active_is_idle(&ce->active))
+ return false;
+
+ if (!i915_vma_is_pinned(ce->state))
+ return false;
+ }
+
+ return ce;
+}
+
static void execlists_submit_ports(struct intel_engine_cs *engine)
{
struct intel_engine_execlists *execlists = &engine->execlists;
- struct execlist_port *port = execlists->port;
unsigned int n;
+ GEM_BUG_ON(!assert_pending_valid(execlists, "submit"));
+
/*
* We can skip acquiring intel_runtime_pm_get() here as it was taken
* on our behalf by the request (see i915_gem_mark_busy()) and it will
@@ -604,7 +710,7 @@ static void execlists_submit_ports(struct intel_engine_cs *engine)
* that all ELSP are drained i.e. we have processed the CSB,
* before allowing ourselves to idle and calling intel_runtime_pm_put().
*/
- GEM_BUG_ON(!intel_wakeref_active(&engine->wakeref));
+ GEM_BUG_ON(!intel_engine_pm_is_awake(engine));
/*
* ELSQ note: the submit queue is not cleared after being submitted
@@ -613,38 +719,16 @@ static void execlists_submit_ports(struct intel_engine_cs *engine)
* of elsq entries, keep this in mind before changing the loop below.
*/
for (n = execlists_num_ports(execlists); n--; ) {
- struct i915_request *rq;
- unsigned int count;
- u64 desc;
+ struct i915_request *rq = execlists->pending[n];
- rq = port_unpack(&port[n], &count);
- if (rq) {
- GEM_BUG_ON(count > !n);
- if (!count++)
- execlists_context_schedule_in(rq);
- port_set(&port[n], port_pack(rq, count));
- desc = execlists_update_context(rq);
- GEM_DEBUG_EXEC(port[n].context_id = upper_32_bits(desc));
-
- GEM_TRACE("%s in[%d]: ctx=%d.%d, fence %llx:%lld (current %d), prio=%d\n",
- engine->name, n,
- port[n].context_id, count,
- rq->fence.context, rq->fence.seqno,
- hwsp_seqno(rq),
- rq_prio(rq));
- } else {
- GEM_BUG_ON(!n);
- desc = 0;
- }
-
- write_desc(execlists, desc, n);
+ write_desc(execlists,
+ rq ? execlists_update_context(rq) : 0,
+ n);
}
/* we need to manually load the submit queue */
if (execlists->ctrl_reg)
writel(EL_CTRL_LOAD, execlists->ctrl_reg);
-
- execlists_clear_active(execlists, EXECLISTS_ACTIVE_HWACK);
}
static bool ctx_single_port_submission(const struct intel_context *ce)
@@ -668,6 +752,7 @@ static bool can_merge_ctx(const struct intel_context *prev,
static bool can_merge_rq(const struct i915_request *prev,
const struct i915_request *next)
{
+ GEM_BUG_ON(prev == next);
GEM_BUG_ON(!assert_priority_queue(prev, next));
if (!can_merge_ctx(prev->hw_context, next->hw_context))
@@ -676,58 +761,6 @@ static bool can_merge_rq(const struct i915_request *prev,
return true;
}
-static void port_assign(struct execlist_port *port, struct i915_request *rq)
-{
- GEM_BUG_ON(rq == port_request(port));
-
- if (port_isset(port))
- i915_request_put(port_request(port));
-
- port_set(port, port_pack(i915_request_get(rq), port_count(port)));
-}
-
-static void inject_preempt_context(struct intel_engine_cs *engine)
-{
- struct intel_engine_execlists *execlists = &engine->execlists;
- struct intel_context *ce = engine->preempt_context;
- unsigned int n;
-
- GEM_BUG_ON(execlists->preempt_complete_status !=
- upper_32_bits(ce->lrc_desc));
-
- /*
- * Switch to our empty preempt context so
- * the state of the GPU is known (idle).
- */
- GEM_TRACE("%s\n", engine->name);
- for (n = execlists_num_ports(execlists); --n; )
- write_desc(execlists, 0, n);
-
- write_desc(execlists, ce->lrc_desc, n);
-
- /* we need to manually load the submit queue */
- if (execlists->ctrl_reg)
- writel(EL_CTRL_LOAD, execlists->ctrl_reg);
-
- execlists_clear_active(execlists, EXECLISTS_ACTIVE_HWACK);
- execlists_set_active(execlists, EXECLISTS_ACTIVE_PREEMPT);
-
- (void)I915_SELFTEST_ONLY(execlists->preempt_hang.count++);
-}
-
-static void complete_preempt_context(struct intel_engine_execlists *execlists)
-{
- GEM_BUG_ON(!execlists_is_active(execlists, EXECLISTS_ACTIVE_PREEMPT));
-
- if (inject_preempt_hang(execlists))
- return;
-
- execlists_cancel_port_requests(execlists);
- __unwind_incomplete_requests(container_of(execlists,
- struct intel_engine_cs,
- execlists));
-}
-
static void virtual_update_register_offsets(u32 *regs,
struct intel_engine_cs *engine)
{
@@ -792,7 +825,7 @@ static bool virtual_matches(const struct virtual_engine *ve,
* we reuse the register offsets). This is a very small
* hystersis on the greedy seelction algorithm.
*/
- inflight = READ_ONCE(ve->context.inflight);
+ inflight = intel_context_inflight(&ve->context);
if (inflight && inflight != engine)
return false;
@@ -815,13 +848,108 @@ static void virtual_xfer_breadcrumbs(struct virtual_engine *ve,
spin_unlock(&old->breadcrumbs.irq_lock);
}
+static struct i915_request *
+last_active(const struct intel_engine_execlists *execlists)
+{
+ struct i915_request * const *last = execlists->active;
+
+ while (*last && i915_request_completed(*last))
+ last++;
+
+ return *last;
+}
+
+static void defer_request(struct i915_request *rq, struct list_head * const pl)
+{
+ LIST_HEAD(list);
+
+ /*
+ * We want to move the interrupted request to the back of
+ * the round-robin list (i.e. its priority level), but
+ * in doing so, we must then move all requests that were in
+ * flight and were waiting for the interrupted request to
+ * be run after it again.
+ */
+ do {
+ struct i915_dependency *p;
+
+ GEM_BUG_ON(i915_request_is_active(rq));
+ list_move_tail(&rq->sched.link, pl);
+
+ list_for_each_entry(p, &rq->sched.waiters_list, wait_link) {
+ struct i915_request *w =
+ container_of(p->waiter, typeof(*w), sched);
+
+ /* Leave semaphores spinning on the other engines */
+ if (w->engine != rq->engine)
+ continue;
+
+ /* No waiter should start before its signaler */
+ GEM_BUG_ON(i915_request_started(w) &&
+ !i915_request_completed(rq));
+
+ GEM_BUG_ON(i915_request_is_active(w));
+ if (list_empty(&w->sched.link))
+ continue; /* Not yet submitted; unready */
+
+ if (rq_prio(w) < rq_prio(rq))
+ continue;
+
+ GEM_BUG_ON(rq_prio(w) > rq_prio(rq));
+ list_move_tail(&w->sched.link, &list);
+ }
+
+ rq = list_first_entry_or_null(&list, typeof(*rq), sched.link);
+ } while (rq);
+}
+
+static void defer_active(struct intel_engine_cs *engine)
+{
+ struct i915_request *rq;
+
+ rq = __unwind_incomplete_requests(engine);
+ if (!rq)
+ return;
+
+ defer_request(rq, i915_sched_lookup_priolist(engine, rq_prio(rq)));
+}
+
+static bool
+need_timeslice(struct intel_engine_cs *engine, const struct i915_request *rq)
+{
+ int hint;
+
+ if (!intel_engine_has_semaphores(engine))
+ return false;
+
+ if (list_is_last(&rq->sched.link, &engine->active.requests))
+ return false;
+
+ hint = max(rq_prio(list_next_entry(rq, sched.link)),
+ engine->execlists.queue_priority_hint);
+
+ return hint >= effective_prio(rq);
+}
+
+static bool
+enable_timeslice(struct intel_engine_cs *engine)
+{
+ struct i915_request *last = last_active(&engine->execlists);
+
+ return last && need_timeslice(engine, last);
+}
+
+static void record_preemption(struct intel_engine_execlists *execlists)
+{
+ (void)I915_SELFTEST_ONLY(execlists->preempt_hang.count++);
+}
+
static void execlists_dequeue(struct intel_engine_cs *engine)
{
struct intel_engine_execlists * const execlists = &engine->execlists;
- struct execlist_port *port = execlists->port;
- const struct execlist_port * const last_port =
- &execlists->port[execlists->port_mask];
- struct i915_request *last = port_request(port);
+ struct i915_request **port = execlists->pending;
+ struct i915_request ** const last_port = port + execlists->port_mask;
+ struct i915_request *last;
struct rb_node *rb;
bool submit = false;
@@ -867,65 +995,100 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
break;
}
+ /*
+ * If the queue is higher priority than the last
+ * request in the currently active context, submit afresh.
+ * We will resubmit again afterwards in case we need to split
+ * the active context to interject the preemption request,
+ * i.e. we will retrigger preemption following the ack in case
+ * of trouble.
+ */
+ last = last_active(execlists);
if (last) {
- /*
- * Don't resubmit or switch until all outstanding
- * preemptions (lite-restore) are seen. Then we
- * know the next preemption status we see corresponds
- * to this ELSP update.
- */
- GEM_BUG_ON(!execlists_is_active(execlists,
- EXECLISTS_ACTIVE_USER));
- GEM_BUG_ON(!port_count(&port[0]));
+ if (need_preempt(engine, last, rb)) {
+ GEM_TRACE("%s: preempting last=%llx:%lld, prio=%d, hint=%d\n",
+ engine->name,
+ last->fence.context,
+ last->fence.seqno,
+ last->sched.attr.priority,
+ execlists->queue_priority_hint);
+ record_preemption(execlists);
- /*
- * If we write to ELSP a second time before the HW has had
- * a chance to respond to the previous write, we can confuse
- * the HW and hit "undefined behaviour". After writing to ELSP,
- * we must then wait until we see a context-switch event from
- * the HW to indicate that it has had a chance to respond.
- */
- if (!execlists_is_active(execlists, EXECLISTS_ACTIVE_HWACK))
- return;
+ /*
+ * Don't let the RING_HEAD advance past the breadcrumb
+ * as we unwind (and until we resubmit) so that we do
+ * not accidentally tell it to go backwards.
+ */
+ ring_set_paused(engine, 1);
- if (need_preempt(engine, last, rb)) {
- inject_preempt_context(engine);
- return;
- }
+ /*
+ * Note that we have not stopped the GPU at this point,
+ * so we are unwinding the incomplete requests as they
+ * remain inflight and so by the time we do complete
+ * the preemption, some of the unwound requests may
+ * complete!
+ */
+ __unwind_incomplete_requests(engine);
- /*
- * In theory, we could coalesce more requests onto
- * the second port (the first port is active, with
- * no preemptions pending). However, that means we
- * then have to deal with the possible lite-restore
- * of the second port (as we submit the ELSP, there
- * may be a context-switch) but also we may complete
- * the resubmission before the context-switch. Ergo,
- * coalescing onto the second port will cause a
- * preemption event, but we cannot predict whether
- * that will affect port[0] or port[1].
- *
- * If the second port is already active, we can wait
- * until the next context-switch before contemplating
- * new requests. The GPU will be busy and we should be
- * able to resubmit the new ELSP before it idles,
- * avoiding pipeline bubbles (momentary pauses where
- * the driver is unable to keep up the supply of new
- * work). However, we have to double check that the
- * priorities of the ports haven't been switch.
- */
- if (port_count(&port[1]))
- return;
+ /*
+ * If we need to return to the preempted context, we
+ * need to skip the lite-restore and force it to
+ * reload the RING_TAIL. Otherwise, the HW has a
+ * tendency to ignore us rewinding the TAIL to the
+ * end of an earlier request.
+ */
+ last->hw_context->lrc_desc |= CTX_DESC_FORCE_RESTORE;
+ last = NULL;
+ } else if (need_timeslice(engine, last) &&
+ !timer_pending(&engine->execlists.timer)) {
+ GEM_TRACE("%s: expired last=%llx:%lld, prio=%d, hint=%d\n",
+ engine->name,
+ last->fence.context,
+ last->fence.seqno,
+ last->sched.attr.priority,
+ execlists->queue_priority_hint);
- /*
- * WaIdleLiteRestore:bdw,skl
- * Apply the wa NOOPs to prevent
- * ring:HEAD == rq:TAIL as we resubmit the
- * request. See gen8_emit_fini_breadcrumb() for
- * where we prepare the padding after the
- * end of the request.
- */
- last->tail = last->wa_tail;
+ ring_set_paused(engine, 1);
+ defer_active(engine);
+
+ /*
+ * Unlike for preemption, if we rewind and continue
+ * executing the same context as previously active,
+ * the order of execution will remain the same and
+ * the tail will only advance. We do not need to
+ * force a full context restore, as a lite-restore
+ * is sufficient to resample the monotonic TAIL.
+ *
+ * If we switch to any other context, similarly we
+ * will not rewind TAIL of current context, and
+ * normal save/restore will preserve state and allow
+ * us to later continue executing the same request.
+ */
+ last = NULL;
+ } else {
+ /*
+ * Otherwise if we already have a request pending
+ * for execution after the current one, we can
+ * just wait until the next CS event before
+ * queuing more. In either case we will force a
+ * lite-restore preemption event, but if we wait
+ * we hopefully coalesce several updates into a single
+ * submission.
+ */
+ if (!list_is_last(&last->sched.link,
+ &engine->active.requests))
+ return;
+
+ /*
+ * WaIdleLiteRestore:bdw,skl
+ * Apply the wa NOOPs to prevent
+ * ring:HEAD == rq:TAIL as we resubmit the
+ * request. See gen8_emit_fini_breadcrumb() for
+ * where we prepare the padding after the
+ * end of the request.
+ */
+ last->tail = last->wa_tail;
+ }
}
while (rb) { /* XXX virtual is always taking precedence */
@@ -955,9 +1118,24 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
continue;
}
+ if (i915_request_completed(rq)) {
+ ve->request = NULL;
+ ve->base.execlists.queue_priority_hint = INT_MIN;
+ rb_erase_cached(rb, &execlists->virtual);
+ RB_CLEAR_NODE(rb);
+
+ rq->engine = engine;
+ __i915_request_submit(rq);
+
+ spin_unlock(&ve->base.active.lock);
+
+ rb = rb_first_cached(&execlists->virtual);
+ continue;
+ }
+
if (last && !can_merge_rq(last, rq)) {
spin_unlock(&ve->base.active.lock);
- return; /* leave this rq for another engine */
+ return; /* leave this for another */
}
GEM_TRACE("%s: virtual rq=%llx:%lld%s, new engine? %s\n",
@@ -1006,9 +1184,10 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
}
__i915_request_submit(rq);
- trace_i915_request_in(rq, port_index(port, execlists));
- submit = true;
- last = rq;
+ if (!i915_request_completed(rq)) {
+ submit = true;
+ last = rq;
+ }
}
spin_unlock(&ve->base.active.lock);
@@ -1021,6 +1200,9 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
int i;
priolist_for_each_request_consume(rq, rn, p, i) {
+ if (i915_request_completed(rq))
+ goto skip;
+
/*
* Can we combine this request with the current port?
* It has to be the same context/ringbuffer and not
@@ -1060,19 +1242,14 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
ctx_single_port_submission(rq->hw_context))
goto done;
-
- if (submit)
- port_assign(port, last);
+ *port = execlists_schedule_in(last, port - execlists->pending);
port++;
-
- GEM_BUG_ON(port_isset(port));
}
- __i915_request_submit(rq);
- trace_i915_request_in(rq, port_index(port, execlists));
-
last = rq;
submit = true;
+skip:
+ __i915_request_submit(rq);
}
rb_erase_cached(&p->node, &execlists->queue);
@@ -1097,54 +1274,32 @@ done:
* interrupt for secondary ports).
*/
execlists->queue_priority_hint = queue_prio(execlists);
+ GEM_TRACE("%s: queue_priority_hint:%d, submit:%s\n",
+ engine->name, execlists->queue_priority_hint,
+ yesno(submit));
if (submit) {
- port_assign(port, last);
+ *port = execlists_schedule_in(last, port - execlists->pending);
+ memset(port + 1, 0, (last_port - port) * sizeof(*port));
execlists_submit_ports(engine);
+ } else {
+ ring_set_paused(engine, 0);
}
-
- /* We must always keep the beast fed if we have work piled up */
- GEM_BUG_ON(rb_first_cached(&execlists->queue) &&
- !port_isset(execlists->port));
-
- /* Re-evaluate the executing context setup after each preemptive kick */
- if (last)
- execlists_user_begin(execlists, execlists->port);
-
- /* If the engine is now idle, so should be the flag; and vice versa. */
- GEM_BUG_ON(execlists_is_active(&engine->execlists,
- EXECLISTS_ACTIVE_USER) ==
- !port_isset(engine->execlists.port));
}
void
execlists_cancel_port_requests(struct intel_engine_execlists * const execlists)
{
- struct execlist_port *port = execlists->port;
- unsigned int num_ports = execlists_num_ports(execlists);
+ struct i915_request * const *port, *rq;
- while (num_ports-- && port_isset(port)) {
- struct i915_request *rq = port_request(port);
+ for (port = execlists->pending; (rq = *port); port++)
+ execlists_schedule_out(rq);
+ memset(execlists->pending, 0, sizeof(execlists->pending));
- GEM_TRACE("%s:port%u fence %llx:%lld, (current %d)\n",
- rq->engine->name,
- (unsigned int)(port - execlists->port),
- rq->fence.context, rq->fence.seqno,
- hwsp_seqno(rq));
-
- GEM_BUG_ON(!execlists->active);
- execlists_context_schedule_out(rq,
- i915_request_completed(rq) ?
- INTEL_CONTEXT_SCHEDULE_OUT :
- INTEL_CONTEXT_SCHEDULE_PREEMPTED);
-
- i915_request_put(rq);
-
- memset(port, 0, sizeof(*port));
- port++;
- }
-
- execlists_clear_all_active(execlists);
+ for (port = execlists->active; (rq = *port); port++)
+ execlists_schedule_out(rq);
+ execlists->active =
+ memset(execlists->inflight, 0, sizeof(execlists->inflight));
}
static inline void
@@ -1160,10 +1315,33 @@ reset_in_progress(const struct intel_engine_execlists *execlists)
return unlikely(!__tasklet_is_enabled(&execlists->tasklet));
}
+enum csb_step {
+ CSB_NOP,
+ CSB_PROMOTE,
+ CSB_PREEMPT,
+ CSB_COMPLETE,
+};
+
+static inline enum csb_step
+csb_parse(const struct intel_engine_execlists *execlists, const u32 *csb)
+{
+ unsigned int status = *csb;
+
+ if (status & GEN8_CTX_STATUS_IDLE_ACTIVE)
+ return CSB_PROMOTE;
+
+ if (status & GEN8_CTX_STATUS_PREEMPTED)
+ return CSB_PREEMPT;
+
+ if (*execlists->active)
+ return CSB_COMPLETE;
+
+ return CSB_NOP;
+}
+
static void process_csb(struct intel_engine_cs *engine)
{
struct intel_engine_execlists * const execlists = &engine->execlists;
- struct execlist_port *port = execlists->port;
const u32 * const buf = execlists->csb_status;
const u8 num_entries = execlists->csb_size;
u8 head, tail;
@@ -1198,10 +1376,6 @@ static void process_csb(struct intel_engine_cs *engine)
rmb();
do {
- struct i915_request *rq;
- unsigned int status;
- unsigned int count;
-
if (++head == num_entries)
head = 0;
@@ -1223,68 +1397,39 @@ static void process_csb(struct intel_engine_cs *engine)
* status notifier.
*/
- GEM_TRACE("%s csb[%d]: status=0x%08x:0x%08x, active=0x%x\n",
+ GEM_TRACE("%s csb[%d]: status=0x%08x:0x%08x\n",
engine->name, head,
- buf[2 * head + 0], buf[2 * head + 1],
- execlists->active);
-
- status = buf[2 * head];
- if (status & (GEN8_CTX_STATUS_IDLE_ACTIVE |
- GEN8_CTX_STATUS_PREEMPTED))
- execlists_set_active(execlists,
- EXECLISTS_ACTIVE_HWACK);
- if (status & GEN8_CTX_STATUS_ACTIVE_IDLE)
- execlists_clear_active(execlists,
- EXECLISTS_ACTIVE_HWACK);
-
- if (!(status & GEN8_CTX_STATUS_COMPLETED_MASK))
- continue;
+ buf[2 * head + 0], buf[2 * head + 1]);
- /* We should never get a COMPLETED | IDLE_ACTIVE! */
- GEM_BUG_ON(status & GEN8_CTX_STATUS_IDLE_ACTIVE);
+ switch (csb_parse(execlists, buf + 2 * head)) {
+ case CSB_PREEMPT: /* cancel old inflight, prepare for switch */
+ trace_ports(execlists, "preempted", execlists->active);
- if (status & GEN8_CTX_STATUS_COMPLETE &&
- buf[2*head + 1] == execlists->preempt_complete_status) {
- GEM_TRACE("%s preempt-idle\n", engine->name);
- complete_preempt_context(execlists);
- continue;
- }
+ while (*execlists->active)
+ execlists_schedule_out(*execlists->active++);
- if (status & GEN8_CTX_STATUS_PREEMPTED &&
- execlists_is_active(execlists,
- EXECLISTS_ACTIVE_PREEMPT))
- continue;
+ /* fallthrough */
+ case CSB_PROMOTE: /* switch pending to inflight */
+ GEM_BUG_ON(*execlists->active);
+ GEM_BUG_ON(!assert_pending_valid(execlists, "promote"));
+ execlists->active =
+ memcpy(execlists->inflight,
+ execlists->pending,
+ execlists_num_ports(execlists) *
+ sizeof(*execlists->pending));
+ execlists->pending[0] = NULL;
- GEM_BUG_ON(!execlists_is_active(execlists,
- EXECLISTS_ACTIVE_USER));
+ trace_ports(execlists, "promoted", execlists->active);
- rq = port_unpack(port, &count);
- GEM_TRACE("%s out[0]: ctx=%d.%d, fence %llx:%lld (current %d), prio=%d\n",
- engine->name,
- port->context_id, count,
- rq ? rq->fence.context : 0,
- rq ? rq->fence.seqno : 0,
- rq ? hwsp_seqno(rq) : 0,
- rq ? rq_prio(rq) : 0);
+ if (enable_timeslice(engine))
+ mod_timer(&execlists->timer, jiffies + 1);
- /* Check the context/desc id for this event matches */
- GEM_DEBUG_BUG_ON(buf[2 * head + 1] != port->context_id);
+ if (!inject_preempt_hang(execlists))
+ ring_set_paused(engine, 0);
+ break;
- GEM_BUG_ON(count == 0);
- if (--count == 0) {
- /*
- * On the final event corresponding to the
- * submission of this context, we expect either
- * an element-switch event or a completion
- * event (and on completion, the active-idle
- * marker). No more preemptions, lite-restore
- * or otherwise.
- */
- GEM_BUG_ON(status & GEN8_CTX_STATUS_PREEMPTED);
- GEM_BUG_ON(port_isset(&port[1]) &&
- !(status & GEN8_CTX_STATUS_ELEMENT_SWITCH));
- GEM_BUG_ON(!port_isset(&port[1]) &&
- !(status & GEN8_CTX_STATUS_ACTIVE_IDLE));
+ case CSB_COMPLETE: /* port0 completed, advanced to port1 */
+ trace_ports(execlists, "completed", execlists->active);
/*
* We rely on the hardware being strongly
@@ -1292,22 +1437,16 @@ static void process_csb(struct intel_engine_cs *engine)
* coherent (visible from the CPU) before the
* user interrupt and CSB is processed.
*/
- GEM_BUG_ON(!i915_request_completed(rq));
+ GEM_BUG_ON(!i915_request_completed(*execlists->active) &&
+ !reset_in_progress(execlists));
+ execlists_schedule_out(*execlists->active++);
- execlists_context_schedule_out(rq,
- INTEL_CONTEXT_SCHEDULE_OUT);
- i915_request_put(rq);
-
- GEM_TRACE("%s completed ctx=%d\n",
- engine->name, port->context_id);
+ GEM_BUG_ON(execlists->active - execlists->inflight >
+ execlists_num_ports(execlists));
+ break;
- port = execlists_port_complete(execlists, port);
- if (port_isset(port))
- execlists_user_begin(execlists, port);
- else
- execlists_user_end(execlists);
- } else {
- port_set(port, port_pack(rq, count));
+ case CSB_NOP:
+ break;
}
} while (head != tail);
@@ -1332,7 +1471,7 @@ static void __execlists_submission_tasklet(struct intel_engine_cs *const engine)
lockdep_assert_held(&engine->active.lock);
process_csb(engine);
- if (!execlists_is_active(&engine->execlists, EXECLISTS_ACTIVE_PREEMPT))
+ if (!engine->execlists.pending[0])
execlists_dequeue(engine);
}
@@ -1345,16 +1484,20 @@ static void execlists_submission_tasklet(unsigned long data)
struct intel_engine_cs * const engine = (struct intel_engine_cs *)data;
unsigned long flags;
- GEM_TRACE("%s awake?=%d, active=%x\n",
- engine->name,
- !!intel_wakeref_active(&engine->wakeref),
- engine->execlists.active);
-
spin_lock_irqsave(&engine->active.lock, flags);
__execlists_submission_tasklet(engine);
spin_unlock_irqrestore(&engine->active.lock, flags);
}
+static void execlists_submission_timer(struct timer_list *timer)
+{
+ struct intel_engine_cs *engine =
+ from_timer(engine, timer, execlists.timer);
+
+ /* Kick the tasklet for some interrupt coalescing and reset handling */
+ tasklet_hi_schedule(&engine->execlists.tasklet);
+}
+
static void queue_request(struct intel_engine_cs *engine,
struct i915_sched_node *node,
int prio)
@@ -1376,12 +1519,16 @@ static void __submit_queue_imm(struct intel_engine_cs *engine)
tasklet_hi_schedule(&execlists->tasklet);
}
-static void submit_queue(struct intel_engine_cs *engine, int prio)
+static void submit_queue(struct intel_engine_cs *engine,
+ const struct i915_request *rq)
{
- if (prio > engine->execlists.queue_priority_hint) {
- engine->execlists.queue_priority_hint = prio;
- __submit_queue_imm(engine);
- }
+ struct intel_engine_execlists *execlists = &engine->execlists;
+
+ if (rq_prio(rq) <= execlists->queue_priority_hint)
+ return;
+
+ execlists->queue_priority_hint = rq_prio(rq);
+ __submit_queue_imm(engine);
}
static void execlists_submit_request(struct i915_request *request)
@@ -1397,7 +1544,7 @@ static void execlists_submit_request(struct i915_request *request)
GEM_BUG_ON(RB_EMPTY_ROOT(&engine->execlists.queue.rb_root));
GEM_BUG_ON(list_empty(&request->sched.link));
- submit_queue(engine, rq_prio(request));
+ submit_queue(engine, request);
spin_unlock_irqrestore(&engine->active.lock, flags);
}
@@ -1405,9 +1552,7 @@ static void execlists_submit_request(struct i915_request *request)
static void __execlists_context_fini(struct intel_context *ce)
{
intel_ring_put(ce->ring);
-
- GEM_BUG_ON(i915_gem_object_is_active(ce->state->obj));
- i915_gem_object_put(ce->state->obj);
+ i915_vma_put(ce->state);
}
static void execlists_context_destroy(struct kref *kref)
@@ -1420,6 +1565,7 @@ static void execlists_context_destroy(struct kref *kref)
if (ce->state)
__execlists_context_fini(ce);
+ intel_context_fini(ce);
intel_context_free(ce);
}
@@ -1444,9 +1590,12 @@ __execlists_update_reg_state(struct intel_context *ce,
regs[CTX_RING_TAIL + 1] = ring->tail;
/* RPCS */
- if (engine->class == RENDER_CLASS)
+ if (engine->class == RENDER_CLASS) {
regs[CTX_R_PWR_CLK_STATE + 1] =
intel_sseu_make_rpcs(engine->i915, &ce->sseu);
+
+ i915_oa_init_reg_state(engine, ce, regs);
+ }
}
static int
@@ -1456,19 +1605,15 @@ __execlists_context_pin(struct intel_context *ce,
void *vaddr;
int ret;
- GEM_BUG_ON(!ce->gem_context->vm);
-
ret = execlists_context_deferred_alloc(ce, engine);
if (ret)
goto err;
GEM_BUG_ON(!ce->state);
- ret = intel_context_active_acquire(ce,
- engine->i915->ggtt.pin_bias |
- PIN_OFFSET_BIAS |
- PIN_HIGH);
+ ret = intel_context_active_acquire(ce);
if (ret)
goto err;
+ GEM_BUG_ON(!i915_vma_is_pinned(ce->state));
vaddr = i915_gem_object_pin_map(ce->state->obj,
i915_coherent_map_type(engine->i915) |
@@ -1569,8 +1714,7 @@ static int gen8_emit_init_breadcrumb(struct i915_request *rq)
static int emit_pdps(struct i915_request *rq)
{
const struct intel_engine_cs * const engine = rq->engine;
- struct i915_ppgtt * const ppgtt =
- i915_vm_to_ppgtt(rq->gem_context->vm);
+ struct i915_ppgtt * const ppgtt = i915_vm_to_ppgtt(rq->hw_context->vm);
int err, i;
u32 *cs;
@@ -1643,7 +1787,7 @@ static int execlists_request_alloc(struct i915_request *request)
*/
/* Unconditionally invalidate GPU caches and TLBs. */
- if (i915_vm_is_4lvl(request->gem_context->vm))
+ if (i915_vm_is_4lvl(request->hw_context->vm))
ret = request->engine->emit_flush(request, EMIT_INVALIDATE);
else
ret = emit_pdps(request);
@@ -1676,7 +1820,8 @@ gen8_emit_flush_coherentl3_wa(struct intel_engine_cs *engine, u32 *batch)
/* NB no one else is allowed to scribble over scratch + 256! */
*batch++ = MI_STORE_REGISTER_MEM_GEN8 | MI_SRM_LRM_GLOBAL_GTT;
*batch++ = i915_mmio_reg_offset(GEN8_L3SQCREG4);
- *batch++ = i915_scratch_offset(engine->i915) + 256;
+ *batch++ = intel_gt_scratch_offset(engine->gt,
+ INTEL_GT_SCRATCH_FIELD_COHERENTL3_WA);
*batch++ = 0;
*batch++ = MI_LOAD_REGISTER_IMM(1);
@@ -1690,12 +1835,19 @@ gen8_emit_flush_coherentl3_wa(struct intel_engine_cs *engine, u32 *batch)
*batch++ = MI_LOAD_REGISTER_MEM_GEN8 | MI_SRM_LRM_GLOBAL_GTT;
*batch++ = i915_mmio_reg_offset(GEN8_L3SQCREG4);
- *batch++ = i915_scratch_offset(engine->i915) + 256;
+ *batch++ = intel_gt_scratch_offset(engine->gt,
+ INTEL_GT_SCRATCH_FIELD_COHERENTL3_WA);
*batch++ = 0;
return batch;
}
+static u32 slm_offset(struct intel_engine_cs *engine)
+{
+ return intel_gt_scratch_offset(engine->gt,
+ INTEL_GT_SCRATCH_FIELD_CLEAR_SLM_WA);
+}
+
/*
* Typically we only have one indirect_ctx and per_ctx batch buffer which are
* initialized at the beginning and shared across all contexts but this field
@@ -1727,8 +1879,7 @@ static u32 *gen8_init_indirectctx_bb(struct intel_engine_cs *engine, u32 *batch)
PIPE_CONTROL_GLOBAL_GTT_IVB |
PIPE_CONTROL_CS_STALL |
PIPE_CONTROL_QW_WRITE,
- i915_scratch_offset(engine->i915) +
- 2 * CACHELINE_BYTES);
+ slm_offset(engine));
*batch++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
@@ -1874,7 +2025,7 @@ static int lrc_setup_wa_ctx(struct intel_engine_cs *engine)
if (IS_ERR(obj))
return PTR_ERR(obj);
- vma = i915_vma_instance(obj, &engine->i915->ggtt.vm, NULL);
+ vma = i915_vma_instance(obj, &engine->gt->ggtt->vm, NULL);
if (IS_ERR(vma)) {
err = PTR_ERR(vma);
goto err;
@@ -1970,22 +2121,23 @@ static int intel_init_workaround_bb(struct intel_engine_cs *engine)
static void enable_execlists(struct intel_engine_cs *engine)
{
+ u32 mode;
+
+ assert_forcewakes_active(engine->uncore, FORCEWAKE_ALL);
+
intel_engine_set_hwsp_writemask(engine, ~0u); /* HWSTAM */
if (INTEL_GEN(engine->i915) >= 11)
- ENGINE_WRITE(engine,
- RING_MODE_GEN7,
- _MASKED_BIT_ENABLE(GEN11_GFX_DISABLE_LEGACY_MODE));
+ mode = _MASKED_BIT_ENABLE(GEN11_GFX_DISABLE_LEGACY_MODE);
else
- ENGINE_WRITE(engine,
- RING_MODE_GEN7,
- _MASKED_BIT_ENABLE(GFX_RUN_LIST_ENABLE));
+ mode = _MASKED_BIT_ENABLE(GFX_RUN_LIST_ENABLE);
+ ENGINE_WRITE_FW(engine, RING_MODE_GEN7, mode);
- ENGINE_WRITE(engine, RING_MI_MODE, _MASKED_BIT_DISABLE(STOP_RING));
+ ENGINE_WRITE_FW(engine, RING_MI_MODE, _MASKED_BIT_DISABLE(STOP_RING));
- ENGINE_WRITE(engine,
- RING_HWS_PGA,
- i915_ggtt_offset(engine->status_page.vma));
+ ENGINE_WRITE_FW(engine,
+ RING_HWS_PGA,
+ i915_ggtt_offset(engine->status_page.vma));
ENGINE_POSTING_READ(engine, RING_HWS_PGA);
}
@@ -1993,7 +2145,7 @@ static bool unexpected_starting_state(struct intel_engine_cs *engine)
{
bool unexpected = false;
- if (ENGINE_READ(engine, RING_MI_MODE) & STOP_RING) {
+ if (ENGINE_READ_FW(engine, RING_MI_MODE) & STOP_RING) {
DRM_DEBUG_DRIVER("STOP_RING still set in RING_MI_MODE\n");
unexpected = true;
}
@@ -2041,34 +2193,32 @@ static void execlists_reset_prepare(struct intel_engine_cs *engine)
__tasklet_disable_sync_once(&execlists->tasklet);
GEM_BUG_ON(!reset_in_progress(execlists));
- intel_engine_stop_cs(engine);
-
/* And flush any current direct submission. */
spin_lock_irqsave(&engine->active.lock, flags);
spin_unlock_irqrestore(&engine->active.lock, flags);
-}
-
-static bool lrc_regs_ok(const struct i915_request *rq)
-{
- const struct intel_ring *ring = rq->ring;
- const u32 *regs = rq->hw_context->lrc_reg_state;
-
- /* Quick spot check for the common signs of context corruption */
- if (regs[CTX_RING_BUFFER_CONTROL + 1] !=
- (RING_CTL_SIZE(ring->size) | RING_VALID))
- return false;
-
- if (regs[CTX_RING_BUFFER_START + 1] != i915_ggtt_offset(ring->vma))
- return false;
-
- return true;
+ /*
+ * We stop engines, otherwise we might get failed reset and a
+ * dead gpu (on elk). Also as modern gpu as kbl can suffer
+ * from system hang if batchbuffer is progressing when
+ * the reset is issued, regardless of READY_TO_RESET ack.
+ * Thus assume it is best to stop engines on all gens
+ * where we have a gpu reset.
+ *
+ * WaKBLVECSSemaphoreWaitPoll:kbl (on ALL_ENGINES)
+ *
+ * FIXME: Wa for more modern gens needs to be validated
+ */
+ intel_engine_stop_cs(engine);
}
-static void reset_csb_pointers(struct intel_engine_execlists *execlists)
+static void reset_csb_pointers(struct intel_engine_cs *engine)
{
+ struct intel_engine_execlists * const execlists = &engine->execlists;
const unsigned int reset_value = execlists->csb_size - 1;
+ ring_set_paused(engine, 0);
+
/*
* After a reset, the HW starts writing into CSB entry [0]. We
* therefore have to set our HEAD pointer back one entry so that
@@ -2115,18 +2265,21 @@ static void __execlists_reset(struct intel_engine_cs *engine, bool stalled)
process_csb(engine); /* drain preemption events */
/* Following the reset, we need to reload the CSB read/write pointers */
- reset_csb_pointers(&engine->execlists);
+ reset_csb_pointers(engine);
/*
* Save the currently executing context, even if we completed
* its request, it was still running at the time of the
* reset and will have been clobbered.
*/
- if (!port_isset(execlists->port))
- goto out_clear;
+ rq = execlists_active(execlists);
+ if (!rq)
+ goto unwind;
- rq = port_request(execlists->port);
ce = rq->hw_context;
+ GEM_BUG_ON(i915_active_is_idle(&ce->active));
+ GEM_BUG_ON(!i915_vma_is_pinned(ce->state));
+ rq = active_request(rq);
/*
* Catch up with any missed context-switch interrupts.
@@ -2139,9 +2292,12 @@ static void __execlists_reset(struct intel_engine_cs *engine, bool stalled)
*/
execlists_cancel_port_requests(execlists);
- rq = active_request(rq);
- if (!rq)
+ if (!rq) {
+ ce->ring->head = ce->ring->tail;
goto out_replay;
+ }
+
+ ce->ring->head = intel_ring_wrap(ce->ring, rq->head);
/*
* If this request hasn't started yet, e.g. it is waiting on a
@@ -2155,7 +2311,7 @@ static void __execlists_reset(struct intel_engine_cs *engine, bool stalled)
* Otherwise, if we have not started yet, the request should replay
* perfectly and we do not need to flag the result as being erroneous.
*/
- if (!i915_request_started(rq) && lrc_regs_ok(rq))
+ if (!i915_request_started(rq))
goto out_replay;
/*
@@ -2169,8 +2325,8 @@ static void __execlists_reset(struct intel_engine_cs *engine, bool stalled)
* and have to at least restore the RING register in the context
* image back to the expected values to skip over the guilty request.
*/
- i915_reset_request(rq, stalled);
- if (!stalled && lrc_regs_ok(rq))
+ __i915_request_reset(rq, stalled);
+ if (!stalled)
goto out_replay;
/*
@@ -2190,17 +2346,14 @@ static void __execlists_reset(struct intel_engine_cs *engine, bool stalled)
execlists_init_reg_state(regs, ce, engine, ce->ring);
out_replay:
- /* Rerun the request; its payload has been neutered (if guilty). */
- ce->ring->head =
- rq ? intel_ring_wrap(ce->ring, rq->head) : ce->ring->tail;
+ GEM_TRACE("%s replay {head:%04x, tail:%04x\n",
+ engine->name, ce->ring->head, ce->ring->tail);
intel_ring_update_space(ce->ring);
__execlists_update_reg_state(ce, engine);
+unwind:
/* Push back any incomplete requests for replay after the reset. */
__unwind_incomplete_requests(engine);
-
-out_clear:
- execlists_clear_all_active(execlists);
}
static void execlists_reset(struct intel_engine_cs *engine, bool stalled)
@@ -2296,7 +2449,6 @@ static void execlists_cancel_requests(struct intel_engine_cs *engine)
execlists->queue_priority_hint = INT_MIN;
execlists->queue = RB_ROOT_CACHED;
- GEM_BUG_ON(port_isset(execlists->port));
GEM_BUG_ON(__tasklet_is_enabled(&execlists->tasklet));
execlists->tasklet.func = nop_submission_tasklet;
@@ -2434,7 +2586,8 @@ static int gen8_emit_flush_render(struct i915_request *request,
{
struct intel_engine_cs *engine = request->engine;
u32 scratch_addr =
- i915_scratch_offset(engine->i915) + 2 * CACHELINE_BYTES;
+ intel_gt_scratch_offset(engine->gt,
+ INTEL_GT_SCRATCH_FIELD_RENDER_FLUSH);
bool vf_flush_wa = false, dc_flush_wa = false;
u32 *cs, flags = 0;
int len;
@@ -2514,15 +2667,30 @@ static u32 *gen8_emit_wa_tail(struct i915_request *request, u32 *cs)
return cs;
}
+static u32 *emit_preempt_busywait(struct i915_request *request, u32 *cs)
+{
+ *cs++ = MI_SEMAPHORE_WAIT |
+ MI_SEMAPHORE_GLOBAL_GTT |
+ MI_SEMAPHORE_POLL |
+ MI_SEMAPHORE_SAD_EQ_SDD;
+ *cs++ = 0;
+ *cs++ = intel_hws_preempt_address(request->engine);
+ *cs++ = 0;
+
+ return cs;
+}
+
static u32 *gen8_emit_fini_breadcrumb(struct i915_request *request, u32 *cs)
{
cs = gen8_emit_ggtt_write(cs,
request->fence.seqno,
request->timeline->hwsp_offset,
0);
-
*cs++ = MI_USER_INTERRUPT;
+
*cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
+ if (intel_engine_has_semaphores(request->engine))
+ cs = emit_preempt_busywait(request, cs);
request->tail = intel_ring_offset(request, cs);
assert_ring_tail_valid(request->ring, request->tail);
@@ -2543,9 +2711,11 @@ static u32 *gen8_emit_fini_breadcrumb_rcs(struct i915_request *request, u32 *cs)
PIPE_CONTROL_FLUSH_ENABLE |
PIPE_CONTROL_CS_STALL,
0);
-
*cs++ = MI_USER_INTERRUPT;
+
*cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
+ if (intel_engine_has_semaphores(request->engine))
+ cs = emit_preempt_busywait(request, cs);
request->tail = intel_ring_offset(request, cs);
assert_ring_tail_valid(request->ring, request->tail);
@@ -2553,27 +2723,9 @@ static u32 *gen8_emit_fini_breadcrumb_rcs(struct i915_request *request, u32 *cs)
return gen8_emit_wa_tail(request, cs);
}
-static int gen8_init_rcs_context(struct i915_request *rq)
-{
- int ret;
-
- ret = intel_engine_emit_ctx_wa(rq);
- if (ret)
- return ret;
-
- ret = intel_rcs_context_init_mocs(rq);
- /*
- * Failing to program the MOCS is non-fatal.The system will not
- * run at peak performance. So generate an error and carry on.
- */
- if (ret)
- DRM_ERROR("MOCS failed to program: expect performance issues.\n");
-
- return i915_gem_render_state_emit(rq);
-}
-
static void execlists_park(struct intel_engine_cs *engine)
{
+ del_timer_sync(&engine->execlists.timer);
intel_engine_park(engine);
}
@@ -2592,11 +2744,11 @@ void intel_execlists_set_default_submission(struct intel_engine_cs *engine)
engine->unpark = NULL;
engine->flags |= I915_ENGINE_SUPPORTS_STATS;
- if (!intel_vgpu_active(engine->i915))
+ if (!intel_vgpu_active(engine->i915)) {
engine->flags |= I915_ENGINE_HAS_SEMAPHORES;
- if (engine->preempt_context &&
- HAS_LOGICAL_RING_PREEMPTION(engine->i915))
- engine->flags |= I915_ENGINE_HAS_PREEMPTION;
+ if (HAS_LOGICAL_RING_PREEMPTION(engine->i915))
+ engine->flags |= I915_ENGINE_HAS_PREEMPTION;
+ }
}
static void execlists_destroy(struct intel_engine_cs *engine)
@@ -2672,12 +2824,12 @@ int intel_execlists_submission_setup(struct intel_engine_cs *engine)
tasklet_init(&engine->execlists.tasklet,
execlists_submission_tasklet, (unsigned long)engine);
+ timer_setup(&engine->execlists.timer, execlists_submission_timer, 0);
logical_ring_default_vfuncs(engine);
logical_ring_default_irqs(engine);
if (engine->class == RENDER_CLASS) {
- engine->init_context = gen8_init_rcs_context;
engine->emit_flush = gen8_emit_flush_render;
engine->emit_fini_breadcrumb = gen8_emit_fini_breadcrumb_rcs;
}
@@ -2697,9 +2849,6 @@ int intel_execlists_submission_init(struct intel_engine_cs *engine)
if (ret)
return ret;
- intel_engine_init_workarounds(engine);
- intel_engine_init_whitelist(engine);
-
if (intel_init_workaround_bb(engine))
/*
* We continue even if we fail to initialize WA batch
@@ -2718,11 +2867,6 @@ int intel_execlists_submission_init(struct intel_engine_cs *engine)
i915_mmio_reg_offset(RING_ELSP(base));
}
- execlists->preempt_complete_status = ~0u;
- if (engine->preempt_context)
- execlists->preempt_complete_status =
- upper_32_bits(engine->preempt_context->lrc_desc);
-
execlists->csb_status =
&engine->status_page.addr[I915_HWS_CSB_BUF0_INDEX];
@@ -2734,7 +2878,7 @@ int intel_execlists_submission_init(struct intel_engine_cs *engine)
else
execlists->csb_size = GEN11_CSB_ENTRIES;
- reset_csb_pointers(execlists);
+ reset_csb_pointers(engine);
return 0;
}
@@ -2773,7 +2917,7 @@ static void execlists_init_reg_state(u32 *regs,
struct intel_engine_cs *engine,
struct intel_ring *ring)
{
- struct i915_ppgtt *ppgtt = i915_vm_to_ppgtt(ce->gem_context->vm);
+ struct i915_ppgtt *ppgtt = i915_vm_to_ppgtt(ce->vm);
bool rcs = engine->class == RENDER_CLASS;
u32 base = engine->mmio_base;
@@ -2864,8 +3008,6 @@ static void execlists_init_reg_state(u32 *regs,
if (rcs) {
regs[CTX_LRI_HEADER_2] = MI_LOAD_REGISTER_IMM(1);
CTX_REG(regs, CTX_R_PWR_CLK_STATE, GEN8_R_PWR_CLK_STATE, 0);
-
- i915_oa_init_reg_state(engine, ce, regs);
}
regs[CTX_END] = MI_BATCH_BUFFER_END;
@@ -2917,11 +3059,6 @@ populate_lr_context(struct intel_context *ce,
if (!engine->default_state)
regs[CTX_CONTEXT_CONTROL + 1] |=
_MASKED_BIT_ENABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT);
- if (ce->gem_context == engine->i915->preempt_context &&
- INTEL_GEN(engine->i915) < 11)
- regs[CTX_CONTEXT_CONTROL + 1] |=
- _MASKED_BIT_ENABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT |
- CTX_CTRL_ENGINE_CTX_SAVE_INHIBIT);
ret = 0;
err_unpin_ctx:
@@ -2932,12 +3069,13 @@ err_unpin_ctx:
return ret;
}
-static struct i915_timeline *get_timeline(struct i915_gem_context *ctx)
+static struct intel_timeline *
+get_timeline(struct i915_gem_context *ctx, struct intel_gt *gt)
{
if (ctx->timeline)
- return i915_timeline_get(ctx->timeline);
+ return intel_timeline_get(ctx->timeline);
else
- return i915_timeline_create(ctx->i915, NULL);
+ return intel_timeline_create(gt, NULL);
}
static int execlists_context_deferred_alloc(struct intel_context *ce,
@@ -2947,7 +3085,7 @@ static int execlists_context_deferred_alloc(struct intel_context *ce,
struct i915_vma *vma;
u32 context_size;
struct intel_ring *ring;
- struct i915_timeline *timeline;
+ struct intel_timeline *timeline;
int ret;
if (ce->state)
@@ -2965,13 +3103,13 @@ static int execlists_context_deferred_alloc(struct intel_context *ce,
if (IS_ERR(ctx_obj))
return PTR_ERR(ctx_obj);
- vma = i915_vma_instance(ctx_obj, &engine->i915->ggtt.vm, NULL);
+ vma = i915_vma_instance(ctx_obj, &engine->gt->ggtt->vm, NULL);
if (IS_ERR(vma)) {
ret = PTR_ERR(vma);
goto error_deref_obj;
}
- timeline = get_timeline(ce->gem_context);
+ timeline = get_timeline(ce->gem_context, engine->gt);
if (IS_ERR(timeline)) {
ret = PTR_ERR(timeline);
goto error_deref_obj;
@@ -2980,7 +3118,7 @@ static int execlists_context_deferred_alloc(struct intel_context *ce,
ring = intel_engine_create_ring(engine,
timeline,
ce->gem_context->ring_size);
- i915_timeline_put(timeline);
+ intel_timeline_put(timeline);
if (IS_ERR(ring)) {
ret = PTR_ERR(ring);
goto error_deref_obj;
@@ -3038,6 +3176,7 @@ static void virtual_context_destroy(struct kref *kref)
if (ve->context.state)
__execlists_context_fini(&ve->context);
+ intel_context_fini(&ve->context);
kfree(ve->bonds);
kfree(ve);
@@ -3290,11 +3429,11 @@ intel_execlists_create_virtual(struct i915_gem_context *ctx,
return ERR_PTR(-ENOMEM);
ve->base.i915 = ctx->i915;
+ ve->base.gt = siblings[0]->gt;
ve->base.id = -1;
ve->base.class = OTHER_CLASS;
ve->base.uabi_class = I915_ENGINE_CLASS_INVALID;
ve->base.instance = I915_ENGINE_CLASS_INVALID_VIRTUAL;
- ve->base.flags = I915_ENGINE_IS_VIRTUAL;
/*
* The decision on whether to submit a request using semaphores
@@ -3391,8 +3530,12 @@ intel_execlists_create_virtual(struct i915_gem_context *ctx,
ve->base.emit_fini_breadcrumb = sibling->emit_fini_breadcrumb;
ve->base.emit_fini_breadcrumb_dw =
sibling->emit_fini_breadcrumb_dw;
+
+ ve->base.flags = sibling->flags;
}
+ ve->base.flags |= I915_ENGINE_IS_VIRTUAL;
+
return &ve->context;
err_put:
diff --git a/drivers/gpu/drm/i915/gt/intel_mocs.c b/drivers/gpu/drm/i915/gt/intel_mocs.c
index 1f9db50b1869..e082b25d2db1 100644
--- a/drivers/gpu/drm/i915/gt/intel_mocs.c
+++ b/drivers/gpu/drm/i915/gt/intel_mocs.c
@@ -23,6 +23,7 @@
#include "i915_drv.h"
#include "intel_engine.h"
+#include "intel_gt.h"
#include "intel_mocs.h"
#include "intel_lrc.h"
@@ -247,7 +248,7 @@ static const struct drm_i915_mocs_entry icelake_mocs_table[] = {
/**
* get_mocs_settings()
- * @dev_priv: i915 device.
+ * @gt: gt device
* @table: Output table that will be made to point at appropriate
* MOCS values for the device.
*
@@ -257,33 +258,34 @@ static const struct drm_i915_mocs_entry icelake_mocs_table[] = {
*
* Return: true if there are applicable MOCS settings for the device.
*/
-static bool get_mocs_settings(struct drm_i915_private *dev_priv,
+static bool get_mocs_settings(struct intel_gt *gt,
struct drm_i915_mocs_table *table)
{
+ struct drm_i915_private *i915 = gt->i915;
bool result = false;
- if (INTEL_GEN(dev_priv) >= 11) {
+ if (INTEL_GEN(i915) >= 11) {
table->size = ARRAY_SIZE(icelake_mocs_table);
table->table = icelake_mocs_table;
table->n_entries = GEN11_NUM_MOCS_ENTRIES;
result = true;
- } else if (IS_GEN9_BC(dev_priv) || IS_CANNONLAKE(dev_priv)) {
+ } else if (IS_GEN9_BC(i915) || IS_CANNONLAKE(i915)) {
table->size = ARRAY_SIZE(skylake_mocs_table);
table->n_entries = GEN9_NUM_MOCS_ENTRIES;
table->table = skylake_mocs_table;
result = true;
- } else if (IS_GEN9_LP(dev_priv)) {
+ } else if (IS_GEN9_LP(i915)) {
table->size = ARRAY_SIZE(broxton_mocs_table);
table->n_entries = GEN9_NUM_MOCS_ENTRIES;
table->table = broxton_mocs_table;
result = true;
} else {
- WARN_ONCE(INTEL_GEN(dev_priv) >= 9,
+ WARN_ONCE(INTEL_GEN(i915) >= 9,
"Platform that should have a MOCS table does not.\n");
}
/* WaDisableSkipCaching:skl,bxt,kbl,glk */
- if (IS_GEN(dev_priv, 9)) {
+ if (IS_GEN(i915, 9)) {
int i;
for (i = 0; i < table->size; i++)
@@ -338,12 +340,16 @@ static u32 get_entry_control(const struct drm_i915_mocs_table *table,
*/
void intel_mocs_init_engine(struct intel_engine_cs *engine)
{
- struct drm_i915_private *dev_priv = engine->i915;
+ struct intel_gt *gt = engine->gt;
+ struct intel_uncore *uncore = gt->uncore;
struct drm_i915_mocs_table table;
unsigned int index;
u32 unused_value;
- if (!get_mocs_settings(dev_priv, &table))
+ /* Called under a blanket forcewake */
+ assert_forcewakes_active(uncore, FORCEWAKE_ALL);
+
+ if (!get_mocs_settings(gt, &table))
return;
/* Set unused values to PTE */
@@ -352,12 +358,16 @@ void intel_mocs_init_engine(struct intel_engine_cs *engine)
for (index = 0; index < table.size; index++) {
u32 value = get_entry_control(&table, index);
- I915_WRITE(mocs_register(engine->id, index), value);
+ intel_uncore_write_fw(uncore,
+ mocs_register(engine->id, index),
+ value);
}
/* All remaining entries are also unused */
for (; index < table.n_entries; index++)
- I915_WRITE(mocs_register(engine->id, index), unused_value);
+ intel_uncore_write_fw(uncore,
+ mocs_register(engine->id, index),
+ unused_value);
}
/**
@@ -490,7 +500,7 @@ static int emit_mocs_l3cc_table(struct i915_request *rq,
/**
* intel_mocs_init_l3cc_table() - program the mocs control table
- * @dev_priv: i915 device private
+ * @gt: the intel_gt container
*
* This function simply programs the mocs registers for the given table
* starting at the given address. This register set is programmed in pairs.
@@ -502,13 +512,14 @@ static int emit_mocs_l3cc_table(struct i915_request *rq,
*
* Return: Nothing.
*/
-void intel_mocs_init_l3cc_table(struct drm_i915_private *dev_priv)
+void intel_mocs_init_l3cc_table(struct intel_gt *gt)
{
+ struct intel_uncore *uncore = gt->uncore;
struct drm_i915_mocs_table table;
unsigned int i;
u16 unused_value;
- if (!get_mocs_settings(dev_priv, &table))
+ if (!get_mocs_settings(gt, &table))
return;
/* Set unused values to PTE */
@@ -518,23 +529,27 @@ void intel_mocs_init_l3cc_table(struct drm_i915_private *dev_priv)
u16 low = get_entry_l3cc(&table, 2 * i);
u16 high = get_entry_l3cc(&table, 2 * i + 1);
- I915_WRITE(GEN9_LNCFCMOCS(i),
- l3cc_combine(&table, low, high));
+ intel_uncore_write(uncore,
+ GEN9_LNCFCMOCS(i),
+ l3cc_combine(&table, low, high));
}
/* Odd table size - 1 left over */
if (table.size & 0x01) {
u16 low = get_entry_l3cc(&table, 2 * i);
- I915_WRITE(GEN9_LNCFCMOCS(i),
- l3cc_combine(&table, low, unused_value));
+ intel_uncore_write(uncore,
+ GEN9_LNCFCMOCS(i),
+ l3cc_combine(&table, low, unused_value));
i++;
}
/* All remaining entries are also unused */
for (; i < table.n_entries / 2; i++)
- I915_WRITE(GEN9_LNCFCMOCS(i),
- l3cc_combine(&table, unused_value, unused_value));
+ intel_uncore_write(uncore,
+ GEN9_LNCFCMOCS(i),
+ l3cc_combine(&table, unused_value,
+ unused_value));
}
/**
@@ -553,12 +568,15 @@ void intel_mocs_init_l3cc_table(struct drm_i915_private *dev_priv)
*
* Return: 0 on success, otherwise the error status.
*/
-int intel_rcs_context_init_mocs(struct i915_request *rq)
+int intel_mocs_emit(struct i915_request *rq)
{
struct drm_i915_mocs_table t;
int ret;
- if (get_mocs_settings(rq->i915, &t)) {
+ if (rq->engine->class != RENDER_CLASS)
+ return 0;
+
+ if (get_mocs_settings(rq->engine->gt, &t)) {
/* Program the RCS control registers */
ret = emit_mocs_control_table(rq, &t);
if (ret)
diff --git a/drivers/gpu/drm/i915/gt/intel_mocs.h b/drivers/gpu/drm/i915/gt/intel_mocs.h
index 0913704a1af2..a334db2d6d6b 100644
--- a/drivers/gpu/drm/i915/gt/intel_mocs.h
+++ b/drivers/gpu/drm/i915/gt/intel_mocs.h
@@ -52,9 +52,11 @@
struct drm_i915_private;
struct i915_request;
struct intel_engine_cs;
+struct intel_gt;
-int intel_rcs_context_init_mocs(struct i915_request *rq);
-void intel_mocs_init_l3cc_table(struct drm_i915_private *dev_priv);
+void intel_mocs_init_l3cc_table(struct intel_gt *gt);
void intel_mocs_init_engine(struct intel_engine_cs *engine);
+int intel_mocs_emit(struct i915_request *rq);
+
#endif
diff --git a/drivers/gpu/drm/i915/i915_gem_render_state.c b/drivers/gpu/drm/i915/gt/intel_renderstate.c
index 4ee032072d4f..be37d4501c67 100644
--- a/drivers/gpu/drm/i915/i915_gem_render_state.c
+++ b/drivers/gpu/drm/i915/gt/intel_renderstate.c
@@ -26,10 +26,9 @@
*/
#include "i915_drv.h"
-#include "i915_gem_render_state.h"
#include "intel_renderstate.h"
-struct intel_render_state {
+struct intel_renderstate {
const struct intel_renderstate_rodata *rodata;
struct drm_i915_gem_object *obj;
struct i915_vma *vma;
@@ -42,7 +41,7 @@ struct intel_render_state {
static const struct intel_renderstate_rodata *
render_state_get_rodata(const struct intel_engine_cs *engine)
{
- if (engine->id != RCS0)
+ if (engine->class != RENDER_CLASS)
return NULL;
switch (INTEL_GEN(engine->i915)) {
@@ -75,7 +74,7 @@ render_state_get_rodata(const struct intel_engine_cs *engine)
(batch)[(i)++] = (val); \
} while(0)
-static int render_state_setup(struct intel_render_state *so,
+static int render_state_setup(struct intel_renderstate *so,
struct drm_i915_private *i915)
{
const struct intel_renderstate_rodata *rodata = so->rodata;
@@ -177,10 +176,10 @@ err:
#undef OUT_BATCH
-int i915_gem_render_state_emit(struct i915_request *rq)
+int intel_renderstate_emit(struct i915_request *rq)
{
struct intel_engine_cs *engine = rq->engine;
- struct intel_render_state so = {}; /* keep the compiler happy */
+ struct intel_renderstate so = {}; /* keep the compiler happy */
int err;
so.rodata = render_state_get_rodata(engine);
@@ -194,7 +193,7 @@ int i915_gem_render_state_emit(struct i915_request *rq)
if (IS_ERR(so.obj))
return PTR_ERR(so.obj);
- so.vma = i915_vma_instance(so.obj, &engine->i915->ggtt.vm, NULL);
+ so.vma = i915_vma_instance(so.obj, &engine->gt->ggtt->vm, NULL);
if (IS_ERR(so.vma)) {
err = PTR_ERR(so.vma);
goto err_obj;
diff --git a/drivers/gpu/drm/i915/intel_renderstate.h b/drivers/gpu/drm/i915/gt/intel_renderstate.h
index 08f6fea05a2c..8d5079145054 100644
--- a/drivers/gpu/drm/i915/intel_renderstate.h
+++ b/drivers/gpu/drm/i915/gt/intel_renderstate.h
@@ -21,11 +21,13 @@
* DEALINGS IN THE SOFTWARE.
*/
-#ifndef _INTEL_RENDERSTATE_H
-#define _INTEL_RENDERSTATE_H
+#ifndef _INTEL_RENDERSTATE_H_
+#define _INTEL_RENDERSTATE_H_
#include <linux/types.h>
+struct i915_request;
+
struct intel_renderstate_rodata {
const u32 *reloc;
const u32 *batch;
@@ -44,4 +46,6 @@ extern const struct intel_renderstate_rodata gen7_null_state;
extern const struct intel_renderstate_rodata gen8_null_state;
extern const struct intel_renderstate_rodata gen9_null_state;
-#endif /* INTEL_RENDERSTATE_H */
+int intel_renderstate_emit(struct i915_request *rq);
+
+#endif /* _INTEL_RENDERSTATE_H_ */
diff --git a/drivers/gpu/drm/i915/gt/intel_reset.c b/drivers/gpu/drm/i915/gt/intel_reset.c
index 3f907701ef4d..98c071fe532b 100644
--- a/drivers/gpu/drm/i915/gt/intel_reset.c
+++ b/drivers/gpu/drm/i915/gt/intel_reset.c
@@ -15,26 +15,17 @@
#include "i915_gpu_error.h"
#include "i915_irq.h"
#include "intel_engine_pm.h"
+#include "intel_gt.h"
#include "intel_gt_pm.h"
#include "intel_reset.h"
-#include "intel_guc.h"
+#include "uc/intel_guc.h"
#define RESET_MAX_RETRIES 3
/* XXX How to handle concurrent GGTT updates using tiling registers? */
#define RESET_UNDER_STOP_MACHINE 0
-static void rmw_set(struct intel_uncore *uncore, i915_reg_t reg, u32 set)
-{
- intel_uncore_rmw(uncore, reg, 0, set);
-}
-
-static void rmw_clear(struct intel_uncore *uncore, i915_reg_t reg, u32 clr)
-{
- intel_uncore_rmw(uncore, reg, clr, 0);
-}
-
static void rmw_set_fw(struct intel_uncore *uncore, i915_reg_t reg, u32 set)
{
intel_uncore_rmw_fw(uncore, reg, 0, set);
@@ -123,7 +114,7 @@ static void context_mark_innocent(struct i915_gem_context *ctx)
atomic_inc(&ctx->active_count);
}
-void i915_reset_request(struct i915_request *rq, bool guilty)
+void __i915_request_reset(struct i915_request *rq, bool guilty)
{
GEM_TRACE("%s rq=%llx:%lld, guilty? %s\n",
rq->engine->name,
@@ -144,48 +135,6 @@ void i915_reset_request(struct i915_request *rq, bool guilty)
}
}
-static void gen3_stop_engine(struct intel_engine_cs *engine)
-{
- struct intel_uncore *uncore = engine->uncore;
- const u32 base = engine->mmio_base;
-
- GEM_TRACE("%s\n", engine->name);
-
- if (intel_engine_stop_cs(engine))
- GEM_TRACE("%s: timed out on STOP_RING\n", engine->name);
-
- intel_uncore_write_fw(uncore,
- RING_HEAD(base),
- intel_uncore_read_fw(uncore, RING_TAIL(base)));
- intel_uncore_posting_read_fw(uncore, RING_HEAD(base)); /* paranoia */
-
- intel_uncore_write_fw(uncore, RING_HEAD(base), 0);
- intel_uncore_write_fw(uncore, RING_TAIL(base), 0);
- intel_uncore_posting_read_fw(uncore, RING_TAIL(base));
-
- /* The ring must be empty before it is disabled */
- intel_uncore_write_fw(uncore, RING_CTL(base), 0);
-
- /* Check acts as a post */
- if (intel_uncore_read_fw(uncore, RING_HEAD(base)))
- GEM_TRACE("%s: ring head [%x] not parked\n",
- engine->name,
- intel_uncore_read_fw(uncore, RING_HEAD(base)));
-}
-
-static void i915_stop_engines(struct drm_i915_private *i915,
- intel_engine_mask_t engine_mask)
-{
- struct intel_engine_cs *engine;
- intel_engine_mask_t tmp;
-
- if (INTEL_GEN(i915) < 3)
- return;
-
- for_each_engine_masked(engine, i915, engine_mask, tmp)
- gen3_stop_engine(engine);
-}
-
static bool i915_in_reset(struct pci_dev *pdev)
{
u8 gdrst;
@@ -194,11 +143,11 @@ static bool i915_in_reset(struct pci_dev *pdev)
return gdrst & GRDOM_RESET_STATUS;
}
-static int i915_do_reset(struct drm_i915_private *i915,
+static int i915_do_reset(struct intel_gt *gt,
intel_engine_mask_t engine_mask,
unsigned int retry)
{
- struct pci_dev *pdev = i915->drm.pdev;
+ struct pci_dev *pdev = gt->i915->drm.pdev;
int err;
/* Assert reset for at least 20 usec, and wait for acknowledgement. */
@@ -223,22 +172,22 @@ static bool g4x_reset_complete(struct pci_dev *pdev)
return (gdrst & GRDOM_RESET_ENABLE) == 0;
}
-static int g33_do_reset(struct drm_i915_private *i915,
+static int g33_do_reset(struct intel_gt *gt,
intel_engine_mask_t engine_mask,
unsigned int retry)
{
- struct pci_dev *pdev = i915->drm.pdev;
+ struct pci_dev *pdev = gt->i915->drm.pdev;
pci_write_config_byte(pdev, I915_GDRST, GRDOM_RESET_ENABLE);
return wait_for_atomic(g4x_reset_complete(pdev), 50);
}
-static int g4x_do_reset(struct drm_i915_private *i915,
+static int g4x_do_reset(struct intel_gt *gt,
intel_engine_mask_t engine_mask,
unsigned int retry)
{
- struct pci_dev *pdev = i915->drm.pdev;
- struct intel_uncore *uncore = &i915->uncore;
+ struct pci_dev *pdev = gt->i915->drm.pdev;
+ struct intel_uncore *uncore = gt->uncore;
int ret;
/* WaVcpClkGateDisableForMediaReset:ctg,elk */
@@ -270,11 +219,11 @@ out:
return ret;
}
-static int ironlake_do_reset(struct drm_i915_private *i915,
+static int ironlake_do_reset(struct intel_gt *gt,
intel_engine_mask_t engine_mask,
unsigned int retry)
{
- struct intel_uncore *uncore = &i915->uncore;
+ struct intel_uncore *uncore = gt->uncore;
int ret;
intel_uncore_write_fw(uncore, ILK_GDSR,
@@ -306,10 +255,9 @@ out:
}
/* Reset the hardware domains (GENX_GRDOM_*) specified by mask */
-static int gen6_hw_domain_reset(struct drm_i915_private *i915,
- u32 hw_domain_mask)
+static int gen6_hw_domain_reset(struct intel_gt *gt, u32 hw_domain_mask)
{
- struct intel_uncore *uncore = &i915->uncore;
+ struct intel_uncore *uncore = gt->uncore;
int err;
/*
@@ -331,7 +279,7 @@ static int gen6_hw_domain_reset(struct drm_i915_private *i915,
return err;
}
-static int gen6_reset_engines(struct drm_i915_private *i915,
+static int gen6_reset_engines(struct intel_gt *gt,
intel_engine_mask_t engine_mask,
unsigned int retry)
{
@@ -351,13 +299,13 @@ static int gen6_reset_engines(struct drm_i915_private *i915,
intel_engine_mask_t tmp;
hw_mask = 0;
- for_each_engine_masked(engine, i915, engine_mask, tmp) {
+ for_each_engine_masked(engine, gt->i915, engine_mask, tmp) {
GEM_BUG_ON(engine->id >= ARRAY_SIZE(hw_engine_mask));
hw_mask |= hw_engine_mask[engine->id];
}
}
- return gen6_hw_domain_reset(i915, hw_mask);
+ return gen6_hw_domain_reset(gt, hw_mask);
}
static u32 gen11_lock_sfc(struct intel_engine_cs *engine)
@@ -455,7 +403,7 @@ static void gen11_unlock_sfc(struct intel_engine_cs *engine)
rmw_clear_fw(uncore, sfc_forced_lock, sfc_forced_lock_bit);
}
-static int gen11_reset_engines(struct drm_i915_private *i915,
+static int gen11_reset_engines(struct intel_gt *gt,
intel_engine_mask_t engine_mask,
unsigned int retry)
{
@@ -478,17 +426,17 @@ static int gen11_reset_engines(struct drm_i915_private *i915,
hw_mask = GEN11_GRDOM_FULL;
} else {
hw_mask = 0;
- for_each_engine_masked(engine, i915, engine_mask, tmp) {
+ for_each_engine_masked(engine, gt->i915, engine_mask, tmp) {
GEM_BUG_ON(engine->id >= ARRAY_SIZE(hw_engine_mask));
hw_mask |= hw_engine_mask[engine->id];
hw_mask |= gen11_lock_sfc(engine);
}
}
- ret = gen6_hw_domain_reset(i915, hw_mask);
+ ret = gen6_hw_domain_reset(gt, hw_mask);
if (engine_mask != ALL_ENGINES)
- for_each_engine_masked(engine, i915, engine_mask, tmp)
+ for_each_engine_masked(engine, gt->i915, engine_mask, tmp)
gen11_unlock_sfc(engine);
return ret;
@@ -538,7 +486,7 @@ static void gen8_engine_reset_cancel(struct intel_engine_cs *engine)
_MASKED_BIT_DISABLE(RESET_CTL_REQUEST_RESET));
}
-static int gen8_reset_engines(struct drm_i915_private *i915,
+static int gen8_reset_engines(struct intel_gt *gt,
intel_engine_mask_t engine_mask,
unsigned int retry)
{
@@ -547,7 +495,7 @@ static int gen8_reset_engines(struct drm_i915_private *i915,
intel_engine_mask_t tmp;
int ret;
- for_each_engine_masked(engine, i915, engine_mask, tmp) {
+ for_each_engine_masked(engine, gt->i915, engine_mask, tmp) {
ret = gen8_engine_reset_prepare(engine);
if (ret && !reset_non_ready)
goto skip_reset;
@@ -563,23 +511,23 @@ static int gen8_reset_engines(struct drm_i915_private *i915,
* We rather take context corruption instead of
* failed reset with a wedged driver/gpu. And
* active bb execution case should be covered by
- * i915_stop_engines we have before the reset.
+ * stop_engines() we have before the reset.
*/
}
- if (INTEL_GEN(i915) >= 11)
- ret = gen11_reset_engines(i915, engine_mask, retry);
+ if (INTEL_GEN(gt->i915) >= 11)
+ ret = gen11_reset_engines(gt, engine_mask, retry);
else
- ret = gen6_reset_engines(i915, engine_mask, retry);
+ ret = gen6_reset_engines(gt, engine_mask, retry);
skip_reset:
- for_each_engine_masked(engine, i915, engine_mask, tmp)
+ for_each_engine_masked(engine, gt->i915, engine_mask, tmp)
gen8_engine_reset_cancel(engine);
return ret;
}
-typedef int (*reset_func)(struct drm_i915_private *,
+typedef int (*reset_func)(struct intel_gt *,
intel_engine_mask_t engine_mask,
unsigned int retry);
@@ -601,15 +549,14 @@ static reset_func intel_get_gpu_reset(struct drm_i915_private *i915)
return NULL;
}
-int intel_gpu_reset(struct drm_i915_private *i915,
- intel_engine_mask_t engine_mask)
+int __intel_gt_reset(struct intel_gt *gt, intel_engine_mask_t engine_mask)
{
const int retries = engine_mask == ALL_ENGINES ? RESET_MAX_RETRIES : 1;
reset_func reset;
int ret = -ETIMEDOUT;
int retry;
- reset = intel_get_gpu_reset(i915);
+ reset = intel_get_gpu_reset(gt->i915);
if (!reset)
return -ENODEV;
@@ -617,31 +564,14 @@ int intel_gpu_reset(struct drm_i915_private *i915,
* If the power well sleeps during the reset, the reset
* request may be dropped and never completes (causing -EIO).
*/
- intel_uncore_forcewake_get(&i915->uncore, FORCEWAKE_ALL);
+ intel_uncore_forcewake_get(gt->uncore, FORCEWAKE_ALL);
for (retry = 0; ret == -ETIMEDOUT && retry < retries; retry++) {
- /*
- * We stop engines, otherwise we might get failed reset and a
- * dead gpu (on elk). Also as modern gpu as kbl can suffer
- * from system hang if batchbuffer is progressing when
- * the reset is issued, regardless of READY_TO_RESET ack.
- * Thus assume it is best to stop engines on all gens
- * where we have a gpu reset.
- *
- * WaKBLVECSSemaphoreWaitPoll:kbl (on ALL_ENGINES)
- *
- * WaMediaResetMainRingCleanup:ctg,elk (presumably)
- *
- * FIXME: Wa for more modern gens needs to be validated
- */
- if (retry)
- i915_stop_engines(i915, engine_mask);
-
GEM_TRACE("engine_mask=%x\n", engine_mask);
preempt_disable();
- ret = reset(i915, engine_mask, retry);
+ ret = reset(gt, engine_mask, retry);
preempt_enable();
}
- intel_uncore_forcewake_put(&i915->uncore, FORCEWAKE_ALL);
+ intel_uncore_forcewake_put(gt->uncore, FORCEWAKE_ALL);
return ret;
}
@@ -659,17 +589,17 @@ bool intel_has_reset_engine(struct drm_i915_private *i915)
return INTEL_INFO(i915)->has_reset_engine && i915_modparams.reset >= 2;
}
-int intel_reset_guc(struct drm_i915_private *i915)
+int intel_reset_guc(struct intel_gt *gt)
{
u32 guc_domain =
- INTEL_GEN(i915) >= 11 ? GEN11_GRDOM_GUC : GEN9_GRDOM_GUC;
+ INTEL_GEN(gt->i915) >= 11 ? GEN11_GRDOM_GUC : GEN9_GRDOM_GUC;
int ret;
- GEM_BUG_ON(!HAS_GUC(i915));
+ GEM_BUG_ON(!HAS_GT_UC(gt->i915));
- intel_uncore_forcewake_get(&i915->uncore, FORCEWAKE_ALL);
- ret = gen6_hw_domain_reset(i915, guc_domain);
- intel_uncore_forcewake_put(&i915->uncore, FORCEWAKE_ALL);
+ intel_uncore_forcewake_get(gt->uncore, FORCEWAKE_ALL);
+ ret = gen6_hw_domain_reset(gt, guc_domain);
+ intel_uncore_forcewake_put(gt->uncore, FORCEWAKE_ALL);
return ret;
}
@@ -691,56 +621,55 @@ static void reset_prepare_engine(struct intel_engine_cs *engine)
engine->reset.prepare(engine);
}
-static void revoke_mmaps(struct drm_i915_private *i915)
+static void revoke_mmaps(struct intel_gt *gt)
{
int i;
- for (i = 0; i < i915->ggtt.num_fences; i++) {
+ for (i = 0; i < gt->ggtt->num_fences; i++) {
struct drm_vma_offset_node *node;
struct i915_vma *vma;
u64 vma_offset;
- vma = READ_ONCE(i915->ggtt.fence_regs[i].vma);
+ vma = READ_ONCE(gt->ggtt->fence_regs[i].vma);
if (!vma)
continue;
if (!i915_vma_has_userfault(vma))
continue;
- GEM_BUG_ON(vma->fence != &i915->ggtt.fence_regs[i]);
+ GEM_BUG_ON(vma->fence != &gt->ggtt->fence_regs[i]);
node = &vma->obj->base.vma_node;
vma_offset = vma->ggtt_view.partial.offset << PAGE_SHIFT;
- unmap_mapping_range(i915->drm.anon_inode->i_mapping,
+ unmap_mapping_range(gt->i915->drm.anon_inode->i_mapping,
drm_vma_node_offset_addr(node) + vma_offset,
vma->size,
1);
}
}
-static intel_engine_mask_t reset_prepare(struct drm_i915_private *i915)
+static intel_engine_mask_t reset_prepare(struct intel_gt *gt)
{
struct intel_engine_cs *engine;
intel_engine_mask_t awake = 0;
enum intel_engine_id id;
- for_each_engine(engine, i915, id) {
+ for_each_engine(engine, gt->i915, id) {
if (intel_engine_pm_get_if_awake(engine))
awake |= engine->mask;
reset_prepare_engine(engine);
}
- intel_uc_reset_prepare(i915);
+ intel_uc_reset_prepare(&gt->uc);
return awake;
}
-static void gt_revoke(struct drm_i915_private *i915)
+static void gt_revoke(struct intel_gt *gt)
{
- revoke_mmaps(i915);
+ revoke_mmaps(gt);
}
-static int gt_reset(struct drm_i915_private *i915,
- intel_engine_mask_t stalled_mask)
+static int gt_reset(struct intel_gt *gt, intel_engine_mask_t stalled_mask)
{
struct intel_engine_cs *engine;
enum intel_engine_id id;
@@ -750,14 +679,14 @@ static int gt_reset(struct drm_i915_private *i915,
* Everything depends on having the GTT running, so we need to start
* there.
*/
- err = i915_ggtt_enable_hw(i915);
+ err = i915_ggtt_enable_hw(gt->i915);
if (err)
return err;
- for_each_engine(engine, i915, id)
- intel_engine_reset(engine, stalled_mask & engine->mask);
+ for_each_engine(engine, gt->i915, id)
+ __intel_engine_reset(engine, stalled_mask & engine->mask);
- i915_gem_restore_fences(i915);
+ i915_gem_restore_fences(gt->i915);
return err;
}
@@ -770,13 +699,12 @@ static void reset_finish_engine(struct intel_engine_cs *engine)
intel_engine_signal_breadcrumbs(engine);
}
-static void reset_finish(struct drm_i915_private *i915,
- intel_engine_mask_t awake)
+static void reset_finish(struct intel_gt *gt, intel_engine_mask_t awake)
{
struct intel_engine_cs *engine;
enum intel_engine_id id;
- for_each_engine(engine, i915, id) {
+ for_each_engine(engine, gt->i915, id) {
reset_finish_engine(engine);
if (awake & engine->mask)
intel_engine_pm_put(engine);
@@ -800,20 +728,19 @@ static void nop_submit_request(struct i915_request *request)
intel_engine_queue_breadcrumbs(engine);
}
-static void __i915_gem_set_wedged(struct drm_i915_private *i915)
+static void __intel_gt_set_wedged(struct intel_gt *gt)
{
- struct i915_gpu_error *error = &i915->gpu_error;
struct intel_engine_cs *engine;
intel_engine_mask_t awake;
enum intel_engine_id id;
- if (test_bit(I915_WEDGED, &error->flags))
+ if (test_bit(I915_WEDGED, &gt->reset.flags))
return;
- if (GEM_SHOW_DEBUG() && !intel_engines_are_idle(i915)) {
+ if (GEM_SHOW_DEBUG() && !intel_engines_are_idle(gt)) {
struct drm_printer p = drm_debug_printer(__func__);
- for_each_engine(engine, i915, id)
+ for_each_engine(engine, gt->i915, id)
intel_engine_dump(engine, &p, "%s\n", engine->name);
}
@@ -824,17 +751,17 @@ static void __i915_gem_set_wedged(struct drm_i915_private *i915)
* rolling the global seqno forward (since this would complete requests
* for which we haven't set the fence error to EIO yet).
*/
- awake = reset_prepare(i915);
+ awake = reset_prepare(gt);
/* Even if the GPU reset fails, it should still stop the engines */
- if (!INTEL_INFO(i915)->gpu_reset_clobbers_display)
- intel_gpu_reset(i915, ALL_ENGINES);
+ if (!INTEL_INFO(gt->i915)->gpu_reset_clobbers_display)
+ __intel_gt_reset(gt, ALL_ENGINES);
- for_each_engine(engine, i915, id) {
+ for_each_engine(engine, gt->i915, id) {
engine->submit_request = nop_submit_request;
engine->schedule = NULL;
}
- i915->caps.scheduler = 0;
+ gt->i915->caps.scheduler = 0;
/*
* Make sure no request can slip through without getting completed by
@@ -842,37 +769,36 @@ static void __i915_gem_set_wedged(struct drm_i915_private *i915)
* in nop_submit_request.
*/
synchronize_rcu_expedited();
- set_bit(I915_WEDGED, &error->flags);
+ set_bit(I915_WEDGED, &gt->reset.flags);
/* Mark all executing requests as skipped */
- for_each_engine(engine, i915, id)
+ for_each_engine(engine, gt->i915, id)
engine->cancel_requests(engine);
- reset_finish(i915, awake);
+ reset_finish(gt, awake);
GEM_TRACE("end\n");
}
-void i915_gem_set_wedged(struct drm_i915_private *i915)
+void intel_gt_set_wedged(struct intel_gt *gt)
{
- struct i915_gpu_error *error = &i915->gpu_error;
intel_wakeref_t wakeref;
- mutex_lock(&error->wedge_mutex);
- with_intel_runtime_pm(&i915->runtime_pm, wakeref)
- __i915_gem_set_wedged(i915);
- mutex_unlock(&error->wedge_mutex);
+ mutex_lock(&gt->reset.mutex);
+ with_intel_runtime_pm(&gt->i915->runtime_pm, wakeref)
+ __intel_gt_set_wedged(gt);
+ mutex_unlock(&gt->reset.mutex);
}
-static bool __i915_gem_unset_wedged(struct drm_i915_private *i915)
+static bool __intel_gt_unset_wedged(struct intel_gt *gt)
{
- struct i915_gpu_error *error = &i915->gpu_error;
- struct i915_timeline *tl;
+ struct intel_gt_timelines *timelines = &gt->timelines;
+ struct intel_timeline *tl;
- if (!test_bit(I915_WEDGED, &error->flags))
+ if (!test_bit(I915_WEDGED, &gt->reset.flags))
return true;
- if (!i915->gt.scratch) /* Never full initialised, recovery impossible */
+ if (!gt->scratch) /* Never full initialised, recovery impossible */
return false;
GEM_TRACE("start\n");
@@ -887,8 +813,8 @@ static bool __i915_gem_unset_wedged(struct drm_i915_private *i915)
*
* No more can be submitted until we reset the wedged bit.
*/
- mutex_lock(&i915->gt.timelines.mutex);
- list_for_each_entry(tl, &i915->gt.timelines.active_list, link) {
+ mutex_lock(&timelines->mutex);
+ list_for_each_entry(tl, &timelines->active_list, link) {
struct i915_request *rq;
rq = i915_active_request_get_unlocked(&tl->last_request);
@@ -905,9 +831,9 @@ static bool __i915_gem_unset_wedged(struct drm_i915_private *i915)
dma_fence_default_wait(&rq->fence, false, MAX_SCHEDULE_TIMEOUT);
i915_request_put(rq);
}
- mutex_unlock(&i915->gt.timelines.mutex);
+ mutex_unlock(&timelines->mutex);
- intel_gt_sanitize(i915, false);
+ intel_gt_sanitize(gt, false);
/*
* Undo nop_submit_request. We prevent all new i915 requests from
@@ -918,53 +844,51 @@ static bool __i915_gem_unset_wedged(struct drm_i915_private *i915)
* the nop_submit_request on reset, we can do this from normal
* context and do not require stop_machine().
*/
- intel_engines_reset_default_submission(i915);
+ intel_engines_reset_default_submission(gt);
GEM_TRACE("end\n");
smp_mb__before_atomic(); /* complete takeover before enabling execbuf */
- clear_bit(I915_WEDGED, &i915->gpu_error.flags);
+ clear_bit(I915_WEDGED, &gt->reset.flags);
return true;
}
-bool i915_gem_unset_wedged(struct drm_i915_private *i915)
+bool intel_gt_unset_wedged(struct intel_gt *gt)
{
- struct i915_gpu_error *error = &i915->gpu_error;
bool result;
- mutex_lock(&error->wedge_mutex);
- result = __i915_gem_unset_wedged(i915);
- mutex_unlock(&error->wedge_mutex);
+ mutex_lock(&gt->reset.mutex);
+ result = __intel_gt_unset_wedged(gt);
+ mutex_unlock(&gt->reset.mutex);
return result;
}
-static int do_reset(struct drm_i915_private *i915,
- intel_engine_mask_t stalled_mask)
+static int do_reset(struct intel_gt *gt, intel_engine_mask_t stalled_mask)
{
int err, i;
- gt_revoke(i915);
+ gt_revoke(gt);
- err = intel_gpu_reset(i915, ALL_ENGINES);
+ err = __intel_gt_reset(gt, ALL_ENGINES);
for (i = 0; err && i < RESET_MAX_RETRIES; i++) {
msleep(10 * (i + 1));
- err = intel_gpu_reset(i915, ALL_ENGINES);
+ err = __intel_gt_reset(gt, ALL_ENGINES);
}
if (err)
return err;
- return gt_reset(i915, stalled_mask);
+ return gt_reset(gt, stalled_mask);
}
-static int resume(struct drm_i915_private *i915)
+static int resume(struct intel_gt *gt)
{
struct intel_engine_cs *engine;
enum intel_engine_id id;
int ret;
- for_each_engine(engine, i915, id) {
+ for_each_engine(engine, gt->i915, id) {
ret = engine->resume(engine);
if (ret)
return ret;
@@ -974,8 +898,8 @@ static int resume(struct drm_i915_private *i915)
}
/**
- * i915_reset - reset chip after a hang
- * @i915: #drm_i915_private to reset
+ * intel_gt_reset - reset chip after a hang
+ * @gt: #intel_gt to reset
* @stalled_mask: mask of the stalled engines with the guilty requests
* @reason: user error message for why we are resetting
*
@@ -990,50 +914,50 @@ static int resume(struct drm_i915_private *i915)
* - re-init interrupt state
* - re-init display
*/
-void i915_reset(struct drm_i915_private *i915,
- intel_engine_mask_t stalled_mask,
- const char *reason)
+void intel_gt_reset(struct intel_gt *gt,
+ intel_engine_mask_t stalled_mask,
+ const char *reason)
{
- struct i915_gpu_error *error = &i915->gpu_error;
intel_engine_mask_t awake;
int ret;
- GEM_TRACE("flags=%lx\n", error->flags);
+ GEM_TRACE("flags=%lx\n", gt->reset.flags);
might_sleep();
- GEM_BUG_ON(!test_bit(I915_RESET_BACKOFF, &error->flags));
- mutex_lock(&error->wedge_mutex);
+ GEM_BUG_ON(!test_bit(I915_RESET_BACKOFF, &gt->reset.flags));
+ mutex_lock(&gt->reset.mutex);
/* Clear any previous failed attempts at recovery. Time to try again. */
- if (!__i915_gem_unset_wedged(i915))
+ if (!__intel_gt_unset_wedged(gt))
goto unlock;
if (reason)
- dev_notice(i915->drm.dev, "Resetting chip for %s\n", reason);
- error->reset_count++;
+ dev_notice(gt->i915->drm.dev,
+ "Resetting chip for %s\n", reason);
+ atomic_inc(&gt->i915->gpu_error.reset_count);
- awake = reset_prepare(i915);
+ awake = reset_prepare(gt);
- if (!intel_has_gpu_reset(i915)) {
+ if (!intel_has_gpu_reset(gt->i915)) {
if (i915_modparams.reset)
- dev_err(i915->drm.dev, "GPU reset not supported\n");
+ dev_err(gt->i915->drm.dev, "GPU reset not supported\n");
else
DRM_DEBUG_DRIVER("GPU reset disabled\n");
goto error;
}
- if (INTEL_INFO(i915)->gpu_reset_clobbers_display)
- intel_runtime_pm_disable_interrupts(i915);
+ if (INTEL_INFO(gt->i915)->gpu_reset_clobbers_display)
+ intel_runtime_pm_disable_interrupts(gt->i915);
- if (do_reset(i915, stalled_mask)) {
- dev_err(i915->drm.dev, "Failed to reset chip\n");
+ if (do_reset(gt, stalled_mask)) {
+ dev_err(gt->i915->drm.dev, "Failed to reset chip\n");
goto taint;
}
- if (INTEL_INFO(i915)->gpu_reset_clobbers_display)
- intel_runtime_pm_enable_interrupts(i915);
+ if (INTEL_INFO(gt->i915)->gpu_reset_clobbers_display)
+ intel_runtime_pm_enable_interrupts(gt->i915);
- intel_overlay_reset(i915);
+ intel_overlay_reset(gt->i915);
/*
* Next we need to restore the context, but we don't use those
@@ -1043,23 +967,23 @@ void i915_reset(struct drm_i915_private *i915,
* was running at the time of the reset (i.e. we weren't VT
* switched away).
*/
- ret = i915_gem_init_hw(i915);
+ ret = i915_gem_init_hw(gt->i915);
if (ret) {
DRM_ERROR("Failed to initialise HW following reset (%d)\n",
ret);
goto taint;
}
- ret = resume(i915);
+ ret = resume(gt);
if (ret)
goto taint;
- i915_queue_hangcheck(i915);
+ intel_gt_queue_hangcheck(gt);
finish:
- reset_finish(i915, awake);
+ reset_finish(gt, awake);
unlock:
- mutex_unlock(&error->wedge_mutex);
+ mutex_unlock(&gt->reset.mutex);
return;
taint:
@@ -1077,18 +1001,17 @@ taint:
*/
add_taint_for_CI(TAINT_WARN);
error:
- __i915_gem_set_wedged(i915);
+ __intel_gt_set_wedged(gt);
goto finish;
}
-static inline int intel_gt_reset_engine(struct drm_i915_private *i915,
- struct intel_engine_cs *engine)
+static inline int intel_gt_reset_engine(struct intel_engine_cs *engine)
{
- return intel_gpu_reset(i915, engine->mask);
+ return __intel_gt_reset(engine->gt, engine->mask);
}
/**
- * i915_reset_engine - reset GPU engine to recover from a hang
+ * intel_engine_reset - reset GPU engine to recover from a hang
* @engine: engine to reset
* @msg: reason for GPU reset; or NULL for no dev_notice()
*
@@ -1100,13 +1023,13 @@ static inline int intel_gt_reset_engine(struct drm_i915_private *i915,
* - reset engine (which will force the engine to idle)
* - re-init/configure engine
*/
-int i915_reset_engine(struct intel_engine_cs *engine, const char *msg)
+int intel_engine_reset(struct intel_engine_cs *engine, const char *msg)
{
- struct i915_gpu_error *error = &engine->i915->gpu_error;
+ struct intel_gt *gt = engine->gt;
int ret;
- GEM_TRACE("%s flags=%lx\n", engine->name, error->flags);
- GEM_BUG_ON(!test_bit(I915_RESET_ENGINE + engine->id, &error->flags));
+ GEM_TRACE("%s flags=%lx\n", engine->name, gt->reset.flags);
+ GEM_BUG_ON(!test_bit(I915_RESET_ENGINE + engine->id, &gt->reset.flags));
if (!intel_engine_pm_get_if_awake(engine))
return 0;
@@ -1116,16 +1039,16 @@ int i915_reset_engine(struct intel_engine_cs *engine, const char *msg)
if (msg)
dev_notice(engine->i915->drm.dev,
"Resetting %s for %s\n", engine->name, msg);
- error->reset_engine_count[engine->id]++;
+ atomic_inc(&engine->i915->gpu_error.reset_engine_count[engine->uabi_class]);
- if (!engine->i915->guc.execbuf_client)
- ret = intel_gt_reset_engine(engine->i915, engine);
+ if (!engine->gt->uc.guc.execbuf_client)
+ ret = intel_gt_reset_engine(engine);
else
- ret = intel_guc_reset_engine(&engine->i915->guc, engine);
+ ret = intel_guc_reset_engine(&engine->gt->uc.guc, engine);
if (ret) {
/* If we fail here, we expect to fallback to a global reset */
DRM_DEBUG_DRIVER("%sFailed to reset %s, ret=%d\n",
- engine->i915->guc.execbuf_client ? "GuC " : "",
+ engine->gt->uc.guc.execbuf_client ? "GuC " : "",
engine->name, ret);
goto out;
}
@@ -1135,7 +1058,7 @@ int i915_reset_engine(struct intel_engine_cs *engine, const char *msg)
* active request and can drop it, adjust head to skip the offending
* request to resume executing remaining requests in the queue.
*/
- intel_engine_reset(engine, true);
+ __intel_engine_reset(engine, true);
/*
* The engine and its registers (and workarounds in case of render)
@@ -1151,16 +1074,15 @@ out:
return ret;
}
-static void i915_reset_device(struct drm_i915_private *i915,
- u32 engine_mask,
- const char *reason)
+static void intel_gt_reset_global(struct intel_gt *gt,
+ u32 engine_mask,
+ const char *reason)
{
- struct i915_gpu_error *error = &i915->gpu_error;
- struct kobject *kobj = &i915->drm.primary->kdev->kobj;
+ struct kobject *kobj = &gt->i915->drm.primary->kdev->kobj;
char *error_event[] = { I915_ERROR_UEVENT "=1", NULL };
char *reset_event[] = { I915_RESET_UEVENT "=1", NULL };
char *reset_done_event[] = { I915_ERROR_UEVENT "=0", NULL };
- struct i915_wedge_me w;
+ struct intel_wedge_me w;
kobject_uevent_env(kobj, KOBJ_CHANGE, error_event);
@@ -1168,137 +1090,24 @@ static void i915_reset_device(struct drm_i915_private *i915,
kobject_uevent_env(kobj, KOBJ_CHANGE, reset_event);
/* Use a watchdog to ensure that our reset completes */
- i915_wedge_on_timeout(&w, i915, 5 * HZ) {
- intel_prepare_reset(i915);
+ intel_wedge_on_timeout(&w, gt, 5 * HZ) {
+ intel_prepare_reset(gt->i915);
/* Flush everyone using a resource about to be clobbered */
- synchronize_srcu_expedited(&error->reset_backoff_srcu);
+ synchronize_srcu_expedited(&gt->reset.backoff_srcu);
- i915_reset(i915, engine_mask, reason);
+ intel_gt_reset(gt, engine_mask, reason);
- intel_finish_reset(i915);
+ intel_finish_reset(gt->i915);
}
- if (!test_bit(I915_WEDGED, &error->flags))
+ if (!test_bit(I915_WEDGED, &gt->reset.flags))
kobject_uevent_env(kobj, KOBJ_CHANGE, reset_done_event);
}
-static void clear_register(struct intel_uncore *uncore, i915_reg_t reg)
-{
- intel_uncore_rmw(uncore, reg, 0, 0);
-}
-
-static void gen8_clear_engine_error_register(struct intel_engine_cs *engine)
-{
- GEN6_RING_FAULT_REG_RMW(engine, RING_FAULT_VALID, 0);
- GEN6_RING_FAULT_REG_POSTING_READ(engine);
-}
-
-static void clear_error_registers(struct drm_i915_private *i915,
- intel_engine_mask_t engine_mask)
-{
- struct intel_uncore *uncore = &i915->uncore;
- u32 eir;
-
- if (!IS_GEN(i915, 2))
- clear_register(uncore, PGTBL_ER);
-
- if (INTEL_GEN(i915) < 4)
- clear_register(uncore, IPEIR(RENDER_RING_BASE));
- else
- clear_register(uncore, IPEIR_I965);
-
- clear_register(uncore, EIR);
- eir = intel_uncore_read(uncore, EIR);
- if (eir) {
- /*
- * some errors might have become stuck,
- * mask them.
- */
- DRM_DEBUG_DRIVER("EIR stuck: 0x%08x, masking\n", eir);
- rmw_set(uncore, EMR, eir);
- intel_uncore_write(uncore, GEN2_IIR,
- I915_MASTER_ERROR_INTERRUPT);
- }
-
- if (INTEL_GEN(i915) >= 8) {
- rmw_clear(uncore, GEN8_RING_FAULT_REG, RING_FAULT_VALID);
- intel_uncore_posting_read(uncore, GEN8_RING_FAULT_REG);
- } else if (INTEL_GEN(i915) >= 6) {
- struct intel_engine_cs *engine;
- enum intel_engine_id id;
-
- for_each_engine_masked(engine, i915, engine_mask, id)
- gen8_clear_engine_error_register(engine);
- }
-}
-
-static void gen6_check_faults(struct drm_i915_private *dev_priv)
-{
- struct intel_engine_cs *engine;
- enum intel_engine_id id;
- u32 fault;
-
- for_each_engine(engine, dev_priv, id) {
- fault = GEN6_RING_FAULT_REG_READ(engine);
- if (fault & RING_FAULT_VALID) {
- DRM_DEBUG_DRIVER("Unexpected fault\n"
- "\tAddr: 0x%08lx\n"
- "\tAddress space: %s\n"
- "\tSource ID: %d\n"
- "\tType: %d\n",
- fault & PAGE_MASK,
- fault & RING_FAULT_GTTSEL_MASK ? "GGTT" : "PPGTT",
- RING_FAULT_SRCID(fault),
- RING_FAULT_FAULT_TYPE(fault));
- }
- }
-}
-
-static void gen8_check_faults(struct drm_i915_private *dev_priv)
-{
- u32 fault = I915_READ(GEN8_RING_FAULT_REG);
-
- if (fault & RING_FAULT_VALID) {
- u32 fault_data0, fault_data1;
- u64 fault_addr;
-
- fault_data0 = I915_READ(GEN8_FAULT_TLB_DATA0);
- fault_data1 = I915_READ(GEN8_FAULT_TLB_DATA1);
- fault_addr = ((u64)(fault_data1 & FAULT_VA_HIGH_BITS) << 44) |
- ((u64)fault_data0 << 12);
-
- DRM_DEBUG_DRIVER("Unexpected fault\n"
- "\tAddr: 0x%08x_%08x\n"
- "\tAddress space: %s\n"
- "\tEngine ID: %d\n"
- "\tSource ID: %d\n"
- "\tType: %d\n",
- upper_32_bits(fault_addr),
- lower_32_bits(fault_addr),
- fault_data1 & FAULT_GTT_SEL ? "GGTT" : "PPGTT",
- GEN8_RING_FAULT_ENGINE_ID(fault),
- RING_FAULT_SRCID(fault),
- RING_FAULT_FAULT_TYPE(fault));
- }
-}
-
-void i915_check_and_clear_faults(struct drm_i915_private *i915)
-{
- /* From GEN8 onwards we only have one 'All Engine Fault Register' */
- if (INTEL_GEN(i915) >= 8)
- gen8_check_faults(i915);
- else if (INTEL_GEN(i915) >= 6)
- gen6_check_faults(i915);
- else
- return;
-
- clear_error_registers(i915, ALL_ENGINES);
-}
-
/**
- * i915_handle_error - handle a gpu error
- * @i915: i915 device private
+ * intel_gt_handle_error - handle a gpu error
+ * @gt: the intel_gt
* @engine_mask: mask representing engines that are hung
* @flags: control flags
* @fmt: Error message format string
@@ -1309,12 +1118,11 @@ void i915_check_and_clear_faults(struct drm_i915_private *i915)
* so userspace knows something bad happened (should trigger collection
* of a ring dump etc.).
*/
-void i915_handle_error(struct drm_i915_private *i915,
- intel_engine_mask_t engine_mask,
- unsigned long flags,
- const char *fmt, ...)
+void intel_gt_handle_error(struct intel_gt *gt,
+ intel_engine_mask_t engine_mask,
+ unsigned long flags,
+ const char *fmt, ...)
{
- struct i915_gpu_error *error = &i915->gpu_error;
struct intel_engine_cs *engine;
intel_wakeref_t wakeref;
intel_engine_mask_t tmp;
@@ -1338,33 +1146,31 @@ void i915_handle_error(struct drm_i915_private *i915,
* isn't the case at least when we get here by doing a
* simulated reset via debugfs, so get an RPM reference.
*/
- wakeref = intel_runtime_pm_get(&i915->runtime_pm);
+ wakeref = intel_runtime_pm_get(&gt->i915->runtime_pm);
- engine_mask &= INTEL_INFO(i915)->engine_mask;
+ engine_mask &= INTEL_INFO(gt->i915)->engine_mask;
if (flags & I915_ERROR_CAPTURE) {
- i915_capture_error_state(i915, engine_mask, msg);
- clear_error_registers(i915, engine_mask);
+ i915_capture_error_state(gt->i915, engine_mask, msg);
+ intel_gt_clear_error_registers(gt, engine_mask);
}
/*
* Try engine reset when available. We fall back to full reset if
* single reset fails.
*/
- if (intel_has_reset_engine(i915) && !__i915_wedged(error)) {
- for_each_engine_masked(engine, i915, engine_mask, tmp) {
+ if (intel_has_reset_engine(gt->i915) && !intel_gt_is_wedged(gt)) {
+ for_each_engine_masked(engine, gt->i915, engine_mask, tmp) {
BUILD_BUG_ON(I915_RESET_MODESET >= I915_RESET_ENGINE);
if (test_and_set_bit(I915_RESET_ENGINE + engine->id,
- &error->flags))
+ &gt->reset.flags))
continue;
- if (i915_reset_engine(engine, msg) == 0)
+ if (intel_engine_reset(engine, msg) == 0)
engine_mask &= ~engine->mask;
- clear_bit(I915_RESET_ENGINE + engine->id,
- &error->flags);
- wake_up_bit(&error->flags,
- I915_RESET_ENGINE + engine->id);
+ clear_and_wake_up_bit(I915_RESET_ENGINE + engine->id,
+ &gt->reset.flags);
}
}
@@ -1372,9 +1178,9 @@ void i915_handle_error(struct drm_i915_private *i915,
goto out;
/* Full reset needs the mutex, stop any other user trying to do so. */
- if (test_and_set_bit(I915_RESET_BACKOFF, &error->flags)) {
- wait_event(error->reset_queue,
- !test_bit(I915_RESET_BACKOFF, &error->flags));
+ if (test_and_set_bit(I915_RESET_BACKOFF, &gt->reset.flags)) {
+ wait_event(gt->reset.queue,
+ !test_bit(I915_RESET_BACKOFF, &gt->reset.flags));
goto out; /* piggy-back on the other reset */
}
@@ -1382,113 +1188,119 @@ void i915_handle_error(struct drm_i915_private *i915,
synchronize_rcu_expedited();
/* Prevent any other reset-engine attempt. */
- for_each_engine(engine, i915, tmp) {
+ for_each_engine(engine, gt->i915, tmp) {
while (test_and_set_bit(I915_RESET_ENGINE + engine->id,
- &error->flags))
- wait_on_bit(&error->flags,
+ &gt->reset.flags))
+ wait_on_bit(&gt->reset.flags,
I915_RESET_ENGINE + engine->id,
TASK_UNINTERRUPTIBLE);
}
- i915_reset_device(i915, engine_mask, msg);
+ intel_gt_reset_global(gt, engine_mask, msg);
- for_each_engine(engine, i915, tmp) {
- clear_bit(I915_RESET_ENGINE + engine->id,
- &error->flags);
- }
-
- clear_bit(I915_RESET_BACKOFF, &error->flags);
- wake_up_all(&error->reset_queue);
+ for_each_engine(engine, gt->i915, tmp)
+ clear_bit_unlock(I915_RESET_ENGINE + engine->id,
+ &gt->reset.flags);
+ clear_bit_unlock(I915_RESET_BACKOFF, &gt->reset.flags);
+ smp_mb__after_atomic();
+ wake_up_all(&gt->reset.queue);
out:
- intel_runtime_pm_put(&i915->runtime_pm, wakeref);
+ intel_runtime_pm_put(&gt->i915->runtime_pm, wakeref);
}
-int i915_reset_trylock(struct drm_i915_private *i915)
+int intel_gt_reset_trylock(struct intel_gt *gt)
{
- struct i915_gpu_error *error = &i915->gpu_error;
int srcu;
- might_lock(&error->reset_backoff_srcu);
+ might_lock(&gt->reset.backoff_srcu);
might_sleep();
rcu_read_lock();
- while (test_bit(I915_RESET_BACKOFF, &error->flags)) {
+ while (test_bit(I915_RESET_BACKOFF, &gt->reset.flags)) {
rcu_read_unlock();
- if (wait_event_interruptible(error->reset_queue,
+ if (wait_event_interruptible(gt->reset.queue,
!test_bit(I915_RESET_BACKOFF,
- &error->flags)))
+ &gt->reset.flags)))
return -EINTR;
rcu_read_lock();
}
- srcu = srcu_read_lock(&error->reset_backoff_srcu);
+ srcu = srcu_read_lock(&gt->reset.backoff_srcu);
rcu_read_unlock();
return srcu;
}
-void i915_reset_unlock(struct drm_i915_private *i915, int tag)
-__releases(&i915->gpu_error.reset_backoff_srcu)
+void intel_gt_reset_unlock(struct intel_gt *gt, int tag)
+__releases(&gt->reset.backoff_srcu)
{
- struct i915_gpu_error *error = &i915->gpu_error;
-
- srcu_read_unlock(&error->reset_backoff_srcu, tag);
+ srcu_read_unlock(&gt->reset.backoff_srcu, tag);
}
-int i915_terminally_wedged(struct drm_i915_private *i915)
+int intel_gt_terminally_wedged(struct intel_gt *gt)
{
- struct i915_gpu_error *error = &i915->gpu_error;
-
might_sleep();
- if (!__i915_wedged(error))
+ if (!intel_gt_is_wedged(gt))
return 0;
/* Reset still in progress? Maybe we will recover? */
- if (!test_bit(I915_RESET_BACKOFF, &error->flags))
+ if (!test_bit(I915_RESET_BACKOFF, &gt->reset.flags))
return -EIO;
/* XXX intel_reset_finish() still takes struct_mutex!!! */
- if (mutex_is_locked(&i915->drm.struct_mutex))
+ if (mutex_is_locked(&gt->i915->drm.struct_mutex))
return -EAGAIN;
- if (wait_event_interruptible(error->reset_queue,
+ if (wait_event_interruptible(gt->reset.queue,
!test_bit(I915_RESET_BACKOFF,
- &error->flags)))
+ &gt->reset.flags)))
return -EINTR;
- return __i915_wedged(error) ? -EIO : 0;
+ return intel_gt_is_wedged(gt) ? -EIO : 0;
+}
+
+void intel_gt_init_reset(struct intel_gt *gt)
+{
+ init_waitqueue_head(&gt->reset.queue);
+ mutex_init(&gt->reset.mutex);
+ init_srcu_struct(&gt->reset.backoff_srcu);
+}
+
+void intel_gt_fini_reset(struct intel_gt *gt)
+{
+ cleanup_srcu_struct(&gt->reset.backoff_srcu);
}
-static void i915_wedge_me(struct work_struct *work)
+static void intel_wedge_me(struct work_struct *work)
{
- struct i915_wedge_me *w = container_of(work, typeof(*w), work.work);
+ struct intel_wedge_me *w = container_of(work, typeof(*w), work.work);
- dev_err(w->i915->drm.dev,
+ dev_err(w->gt->i915->drm.dev,
"%s timed out, cancelling all in-flight rendering.\n",
w->name);
- i915_gem_set_wedged(w->i915);
+ intel_gt_set_wedged(w->gt);
}
-void __i915_init_wedge(struct i915_wedge_me *w,
- struct drm_i915_private *i915,
- long timeout,
- const char *name)
+void __intel_init_wedge(struct intel_wedge_me *w,
+ struct intel_gt *gt,
+ long timeout,
+ const char *name)
{
- w->i915 = i915;
+ w->gt = gt;
w->name = name;
- INIT_DELAYED_WORK_ONSTACK(&w->work, i915_wedge_me);
+ INIT_DELAYED_WORK_ONSTACK(&w->work, intel_wedge_me);
schedule_delayed_work(&w->work, timeout);
}
-void __i915_fini_wedge(struct i915_wedge_me *w)
+void __intel_fini_wedge(struct intel_wedge_me *w)
{
cancel_delayed_work_sync(&w->work);
destroy_delayed_work_on_stack(&w->work);
- w->i915 = NULL;
+ w->gt = NULL;
}
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
diff --git a/drivers/gpu/drm/i915/gt/intel_reset.h b/drivers/gpu/drm/i915/gt/intel_reset.h
index 580ebdb59eca..37a987b17108 100644
--- a/drivers/gpu/drm/i915/gt/intel_reset.h
+++ b/drivers/gpu/drm/i915/gt/intel_reset.h
@@ -11,58 +11,67 @@
#include <linux/types.h>
#include <linux/srcu.h>
-#include "gt/intel_engine_types.h"
+#include "intel_engine_types.h"
+#include "intel_reset_types.h"
struct drm_i915_private;
struct i915_request;
struct intel_engine_cs;
+struct intel_gt;
struct intel_guc;
+void intel_gt_init_reset(struct intel_gt *gt);
+void intel_gt_fini_reset(struct intel_gt *gt);
+
__printf(4, 5)
-void i915_handle_error(struct drm_i915_private *i915,
- intel_engine_mask_t engine_mask,
- unsigned long flags,
- const char *fmt, ...);
+void intel_gt_handle_error(struct intel_gt *gt,
+ intel_engine_mask_t engine_mask,
+ unsigned long flags,
+ const char *fmt, ...);
#define I915_ERROR_CAPTURE BIT(0)
-void i915_check_and_clear_faults(struct drm_i915_private *i915);
-
-void i915_reset(struct drm_i915_private *i915,
- intel_engine_mask_t stalled_mask,
- const char *reason);
-int i915_reset_engine(struct intel_engine_cs *engine,
- const char *reason);
-
-void i915_reset_request(struct i915_request *rq, bool guilty);
+void intel_gt_reset(struct intel_gt *gt,
+ intel_engine_mask_t stalled_mask,
+ const char *reason);
+int intel_engine_reset(struct intel_engine_cs *engine,
+ const char *reason);
-int __must_check i915_reset_trylock(struct drm_i915_private *i915);
-void i915_reset_unlock(struct drm_i915_private *i915, int tag);
+void __i915_request_reset(struct i915_request *rq, bool guilty);
-int i915_terminally_wedged(struct drm_i915_private *i915);
+int __must_check intel_gt_reset_trylock(struct intel_gt *gt);
+void intel_gt_reset_unlock(struct intel_gt *gt, int tag);
-bool intel_has_gpu_reset(struct drm_i915_private *i915);
-bool intel_has_reset_engine(struct drm_i915_private *i915);
+void intel_gt_set_wedged(struct intel_gt *gt);
+bool intel_gt_unset_wedged(struct intel_gt *gt);
+int intel_gt_terminally_wedged(struct intel_gt *gt);
-int intel_gpu_reset(struct drm_i915_private *i915,
- intel_engine_mask_t engine_mask);
+int __intel_gt_reset(struct intel_gt *gt, intel_engine_mask_t engine_mask);
-int intel_reset_guc(struct drm_i915_private *i915);
+int intel_reset_guc(struct intel_gt *gt);
-struct i915_wedge_me {
+struct intel_wedge_me {
struct delayed_work work;
- struct drm_i915_private *i915;
+ struct intel_gt *gt;
const char *name;
};
-void __i915_init_wedge(struct i915_wedge_me *w,
- struct drm_i915_private *i915,
- long timeout,
- const char *name);
-void __i915_fini_wedge(struct i915_wedge_me *w);
+void __intel_init_wedge(struct intel_wedge_me *w,
+ struct intel_gt *gt,
+ long timeout,
+ const char *name);
+void __intel_fini_wedge(struct intel_wedge_me *w);
-#define i915_wedge_on_timeout(W, DEV, TIMEOUT) \
- for (__i915_init_wedge((W), (DEV), (TIMEOUT), __func__); \
- (W)->i915; \
- __i915_fini_wedge((W)))
+#define intel_wedge_on_timeout(W, GT, TIMEOUT) \
+ for (__intel_init_wedge((W), (GT), (TIMEOUT), __func__); \
+ (W)->gt; \
+ __intel_fini_wedge((W)))
+
+static inline bool __intel_reset_failed(const struct intel_reset *reset)
+{
+ return unlikely(test_bit(I915_WEDGED, &reset->flags));
+}
+
+bool intel_has_gpu_reset(struct drm_i915_private *i915);
+bool intel_has_reset_engine(struct drm_i915_private *i915);
#endif /* I915_RESET_H */
diff --git a/drivers/gpu/drm/i915/gt/intel_reset_types.h b/drivers/gpu/drm/i915/gt/intel_reset_types.h
new file mode 100644
index 000000000000..31968356e0c0
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_reset_types.h
@@ -0,0 +1,50 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef __INTEL_RESET_TYPES_H_
+#define __INTEL_RESET_TYPES_H_
+
+#include <linux/mutex.h>
+#include <linux/wait.h>
+#include <linux/srcu.h>
+
+struct intel_reset {
+ /**
+ * flags: Control various stages of the GPU reset
+ *
+ * #I915_RESET_BACKOFF - When we start a global reset, we need to
+ * serialise with any other users attempting to do the same, and
+ * any global resources that may be clobber by the reset (such as
+ * FENCE registers).
+ *
+ * #I915_RESET_ENGINE[num_engines] - Since the driver doesn't need to
+ * acquire the struct_mutex to reset an engine, we need an explicit
+ * flag to prevent two concurrent reset attempts in the same engine.
+ * As the number of engines continues to grow, allocate the flags from
+ * the most significant bits.
+ *
+ * #I915_WEDGED - If reset fails and we can no longer use the GPU,
+ * we set the #I915_WEDGED bit. Prior to command submission, e.g.
+ * i915_request_alloc(), this bit is checked and the sequence
+ * aborted (with -EIO reported to userspace) if set.
+ */
+ unsigned long flags;
+#define I915_RESET_BACKOFF 0
+#define I915_RESET_MODESET 1
+#define I915_RESET_ENGINE 2
+#define I915_WEDGED (BITS_PER_LONG - 1)
+
+ struct mutex mutex; /* serialises wedging/unwedging */
+
+ /**
+ * Waitqueue to signal when the reset has completed. Used by clients
+ * that wait for dev_priv->mm.wedged to settle.
+ */
+ wait_queue_head_t queue;
+
+ struct srcu_struct backoff_srcu;
+};
+
+#endif /* _INTEL_RESET_TYPES_H_ */
diff --git a/drivers/gpu/drm/i915/gt/intel_ringbuffer.c b/drivers/gpu/drm/i915/gt/intel_ringbuffer.c
index 12010e798868..8d24a49e5139 100644
--- a/drivers/gpu/drm/i915/gt/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/gt/intel_ringbuffer.c
@@ -34,9 +34,9 @@
#include "gem/i915_gem_context.h"
#include "i915_drv.h"
-#include "i915_gem_render_state.h"
#include "i915_trace.h"
#include "intel_context.h"
+#include "intel_gt.h"
#include "intel_reset.h"
#include "intel_workarounds.h"
@@ -75,7 +75,8 @@ gen2_render_ring_flush(struct i915_request *rq, u32 mode)
*cs++ = cmd;
while (num_store_dw--) {
*cs++ = MI_STORE_DWORD_IMM | MI_MEM_VIRTUAL;
- *cs++ = i915_scratch_offset(rq->i915);
+ *cs++ = intel_gt_scratch_offset(rq->engine->gt,
+ INTEL_GT_SCRATCH_FIELD_DEFAULT);
*cs++ = 0;
}
*cs++ = MI_FLUSH | MI_NO_WRITE_FLUSH;
@@ -148,7 +149,9 @@ gen4_render_ring_flush(struct i915_request *rq, u32 mode)
*/
if (mode & EMIT_INVALIDATE) {
*cs++ = GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE;
- *cs++ = i915_scratch_offset(rq->i915) | PIPE_CONTROL_GLOBAL_GTT;
+ *cs++ = intel_gt_scratch_offset(rq->engine->gt,
+ INTEL_GT_SCRATCH_FIELD_DEFAULT) |
+ PIPE_CONTROL_GLOBAL_GTT;
*cs++ = 0;
*cs++ = 0;
@@ -156,7 +159,9 @@ gen4_render_ring_flush(struct i915_request *rq, u32 mode)
*cs++ = MI_FLUSH;
*cs++ = GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE;
- *cs++ = i915_scratch_offset(rq->i915) | PIPE_CONTROL_GLOBAL_GTT;
+ *cs++ = intel_gt_scratch_offset(rq->engine->gt,
+ INTEL_GT_SCRATCH_FIELD_DEFAULT) |
+ PIPE_CONTROL_GLOBAL_GTT;
*cs++ = 0;
*cs++ = 0;
}
@@ -208,7 +213,9 @@ gen4_render_ring_flush(struct i915_request *rq, u32 mode)
static int
gen6_emit_post_sync_nonzero_flush(struct i915_request *rq)
{
- u32 scratch_addr = i915_scratch_offset(rq->i915) + 2 * CACHELINE_BYTES;
+ u32 scratch_addr =
+ intel_gt_scratch_offset(rq->engine->gt,
+ INTEL_GT_SCRATCH_FIELD_RENDER_FLUSH);
u32 *cs;
cs = intel_ring_begin(rq, 6);
@@ -241,7 +248,9 @@ gen6_emit_post_sync_nonzero_flush(struct i915_request *rq)
static int
gen6_render_ring_flush(struct i915_request *rq, u32 mode)
{
- u32 scratch_addr = i915_scratch_offset(rq->i915) + 2 * CACHELINE_BYTES;
+ u32 scratch_addr =
+ intel_gt_scratch_offset(rq->engine->gt,
+ INTEL_GT_SCRATCH_FIELD_RENDER_FLUSH);
u32 *cs, flags = 0;
int ret;
@@ -299,7 +308,9 @@ static u32 *gen6_rcs_emit_breadcrumb(struct i915_request *rq, u32 *cs)
*cs++ = GFX_OP_PIPE_CONTROL(4);
*cs++ = PIPE_CONTROL_QW_WRITE;
- *cs++ = i915_scratch_offset(rq->i915) | PIPE_CONTROL_GLOBAL_GTT;
+ *cs++ = intel_gt_scratch_offset(rq->engine->gt,
+ INTEL_GT_SCRATCH_FIELD_DEFAULT) |
+ PIPE_CONTROL_GLOBAL_GTT;
*cs++ = 0;
/* Finally we can flush and with it emit the breadcrumb */
@@ -342,7 +353,9 @@ gen7_render_ring_cs_stall_wa(struct i915_request *rq)
static int
gen7_render_ring_flush(struct i915_request *rq, u32 mode)
{
- u32 scratch_addr = i915_scratch_offset(rq->i915) + 2 * CACHELINE_BYTES;
+ u32 scratch_addr =
+ intel_gt_scratch_offset(rq->engine->gt,
+ INTEL_GT_SCRATCH_FIELD_RENDER_FLUSH);
u32 *cs, flags = 0;
/*
@@ -725,7 +738,45 @@ out:
static void reset_prepare(struct intel_engine_cs *engine)
{
- intel_engine_stop_cs(engine);
+ struct intel_uncore *uncore = engine->uncore;
+ const u32 base = engine->mmio_base;
+
+ /*
+ * We stop engines, otherwise we might get failed reset and a
+ * dead gpu (on elk). Also as modern gpu as kbl can suffer
+ * from system hang if batchbuffer is progressing when
+ * the reset is issued, regardless of READY_TO_RESET ack.
+ * Thus assume it is best to stop engines on all gens
+ * where we have a gpu reset.
+ *
+ * WaKBLVECSSemaphoreWaitPoll:kbl (on ALL_ENGINES)
+ *
+ * WaMediaResetMainRingCleanup:ctg,elk (presumably)
+ *
+ * FIXME: Wa for more modern gens needs to be validated
+ */
+ GEM_TRACE("%s\n", engine->name);
+
+ if (intel_engine_stop_cs(engine))
+ GEM_TRACE("%s: timed out on STOP_RING\n", engine->name);
+
+ intel_uncore_write_fw(uncore,
+ RING_HEAD(base),
+ intel_uncore_read_fw(uncore, RING_TAIL(base)));
+ intel_uncore_posting_read_fw(uncore, RING_HEAD(base)); /* paranoia */
+
+ intel_uncore_write_fw(uncore, RING_HEAD(base), 0);
+ intel_uncore_write_fw(uncore, RING_TAIL(base), 0);
+ intel_uncore_posting_read_fw(uncore, RING_TAIL(base));
+
+ /* The ring must be empty before it is disabled */
+ intel_uncore_write_fw(uncore, RING_CTL(base), 0);
+
+ /* Check acts as a post */
+ if (intel_uncore_read_fw(uncore, RING_HEAD(base)))
+ GEM_TRACE("%s: ring head [%x] not parked\n",
+ engine->name,
+ intel_uncore_read_fw(uncore, RING_HEAD(base)));
}
static void reset_ring(struct intel_engine_cs *engine, bool stalled)
@@ -781,7 +832,7 @@ static void reset_ring(struct intel_engine_cs *engine, bool stalled)
* If the request was innocent, we try to replay the request
* with the restored context.
*/
- i915_reset_request(rq, stalled);
+ __i915_request_reset(rq, stalled);
GEM_BUG_ON(rq->ring != engine->buffer);
head = rq->head;
@@ -797,21 +848,6 @@ static void reset_finish(struct intel_engine_cs *engine)
{
}
-static int intel_rcs_ctx_init(struct i915_request *rq)
-{
- int ret;
-
- ret = intel_engine_emit_ctx_wa(rq);
- if (ret != 0)
- return ret;
-
- ret = i915_gem_render_state_emit(rq);
- if (ret)
- return ret;
-
- return 0;
-}
-
static int rcs_resume(struct intel_engine_cs *engine)
{
struct drm_i915_private *dev_priv = engine->i915;
@@ -1033,14 +1069,14 @@ hsw_vebox_irq_enable(struct intel_engine_cs *engine)
/* Flush/delay to ensure the RING_IMR is active before the GT IMR */
ENGINE_POSTING_READ(engine, RING_IMR);
- gen6_unmask_pm_irq(engine->i915, engine->irq_enable_mask);
+ gen6_unmask_pm_irq(engine->gt, engine->irq_enable_mask);
}
static void
hsw_vebox_irq_disable(struct intel_engine_cs *engine)
{
ENGINE_WRITE(engine, RING_IMR, ~0);
- gen6_mask_pm_irq(engine->i915, engine->irq_enable_mask);
+ gen6_mask_pm_irq(engine->gt, engine->irq_enable_mask);
}
static int
@@ -1071,9 +1107,11 @@ i830_emit_bb_start(struct i915_request *rq,
u64 offset, u32 len,
unsigned int dispatch_flags)
{
- u32 *cs, cs_offset = i915_scratch_offset(rq->i915);
+ u32 *cs, cs_offset =
+ intel_gt_scratch_offset(rq->engine->gt,
+ INTEL_GT_SCRATCH_FIELD_DEFAULT);
- GEM_BUG_ON(rq->i915->gt.scratch->size < I830_WA_SIZE);
+ GEM_BUG_ON(rq->engine->gt->scratch->size < I830_WA_SIZE);
cs = intel_ring_begin(rq, 6);
if (IS_ERR(cs))
@@ -1156,7 +1194,7 @@ int intel_ring_pin(struct intel_ring *ring)
if (atomic_fetch_inc(&ring->pin_count))
return 0;
- ret = i915_timeline_pin(ring->timeline);
+ ret = intel_timeline_pin(ring->timeline);
if (ret)
goto err_unpin;
@@ -1189,12 +1227,13 @@ int intel_ring_pin(struct intel_ring *ring)
GEM_BUG_ON(ring->vaddr);
ring->vaddr = addr;
+ GEM_TRACE("ring:%llx pin\n", ring->timeline->fence_context);
return 0;
err_ring:
i915_vma_unpin(vma);
err_timeline:
- i915_timeline_unpin(ring->timeline);
+ intel_timeline_unpin(ring->timeline);
err_unpin:
atomic_dec(&ring->pin_count);
return ret;
@@ -1215,10 +1254,13 @@ void intel_ring_unpin(struct intel_ring *ring)
if (!atomic_dec_and_test(&ring->pin_count))
return;
+ GEM_TRACE("ring:%llx unpin\n", ring->timeline->fence_context);
+
/* Discard any unused bytes beyond that submitted to hw. */
intel_ring_reset(ring, ring->tail);
GEM_BUG_ON(!ring->vma);
+ i915_vma_unset_ggtt_write(ring->vma);
if (i915_vma_is_map_and_fenceable(ring->vma))
i915_vma_unpin_iomap(ring->vma);
else
@@ -1230,19 +1272,19 @@ void intel_ring_unpin(struct intel_ring *ring)
ring->vma->obj->pin_global--;
i915_vma_unpin(ring->vma);
- i915_timeline_unpin(ring->timeline);
+ intel_timeline_unpin(ring->timeline);
}
-static struct i915_vma *
-intel_ring_create_vma(struct drm_i915_private *dev_priv, int size)
+static struct i915_vma *create_ring_vma(struct i915_ggtt *ggtt, int size)
{
- struct i915_address_space *vm = &dev_priv->ggtt.vm;
+ struct i915_address_space *vm = &ggtt->vm;
+ struct drm_i915_private *i915 = vm->i915;
struct drm_i915_gem_object *obj;
struct i915_vma *vma;
- obj = i915_gem_object_create_stolen(dev_priv, size);
+ obj = i915_gem_object_create_stolen(i915, size);
if (!obj)
- obj = i915_gem_object_create_internal(dev_priv, size);
+ obj = i915_gem_object_create_internal(i915, size);
if (IS_ERR(obj))
return ERR_CAST(obj);
@@ -1266,9 +1308,10 @@ err:
struct intel_ring *
intel_engine_create_ring(struct intel_engine_cs *engine,
- struct i915_timeline *timeline,
+ struct intel_timeline *timeline,
int size)
{
+ struct drm_i915_private *i915 = engine->i915;
struct intel_ring *ring;
struct i915_vma *vma;
@@ -1281,7 +1324,7 @@ intel_engine_create_ring(struct intel_engine_cs *engine,
kref_init(&ring->ref);
INIT_LIST_HEAD(&ring->request_list);
- ring->timeline = i915_timeline_get(timeline);
+ ring->timeline = intel_timeline_get(timeline);
ring->size = size;
/* Workaround an erratum on the i830 which causes a hang if
@@ -1289,12 +1332,12 @@ intel_engine_create_ring(struct intel_engine_cs *engine,
* of the buffer.
*/
ring->effective_size = size;
- if (IS_I830(engine->i915) || IS_I845G(engine->i915))
+ if (IS_I830(i915) || IS_I845G(i915))
ring->effective_size -= 2 * CACHELINE_BYTES;
intel_ring_update_space(ring);
- vma = intel_ring_create_vma(engine->i915, size);
+ vma = create_ring_vma(engine->gt->ggtt, size);
if (IS_ERR(vma)) {
kfree(ring);
return ERR_CAST(vma);
@@ -1311,13 +1354,12 @@ void intel_ring_free(struct kref *ref)
i915_vma_close(ring->vma);
i915_vma_put(ring->vma);
- i915_timeline_put(ring->timeline);
+ intel_timeline_put(ring->timeline);
kfree(ring);
}
static void __ring_context_fini(struct intel_context *ce)
{
- GEM_BUG_ON(i915_gem_object_is_active(ce->state->obj));
i915_gem_object_put(ce->state->obj);
}
@@ -1330,33 +1372,45 @@ static void ring_context_destroy(struct kref *ref)
if (ce->state)
__ring_context_fini(ce);
+ intel_context_fini(ce);
intel_context_free(ce);
}
-static int __context_pin_ppgtt(struct i915_gem_context *ctx)
+static struct i915_address_space *vm_alias(struct intel_context *ce)
+{
+ struct i915_address_space *vm;
+
+ vm = ce->vm;
+ if (i915_is_ggtt(vm))
+ vm = &i915_vm_to_ggtt(vm)->alias->vm;
+
+ return vm;
+}
+
+static int __context_pin_ppgtt(struct intel_context *ce)
{
struct i915_address_space *vm;
int err = 0;
- vm = ctx->vm ?: &ctx->i915->mm.aliasing_ppgtt->vm;
+ vm = vm_alias(ce);
if (vm)
err = gen6_ppgtt_pin(i915_vm_to_ppgtt((vm)));
return err;
}
-static void __context_unpin_ppgtt(struct i915_gem_context *ctx)
+static void __context_unpin_ppgtt(struct intel_context *ce)
{
struct i915_address_space *vm;
- vm = ctx->vm ?: &ctx->i915->mm.aliasing_ppgtt->vm;
+ vm = vm_alias(ce);
if (vm)
gen6_ppgtt_unpin(i915_vm_to_ppgtt(vm));
}
static void ring_context_unpin(struct intel_context *ce)
{
- __context_unpin_ppgtt(ce->gem_context);
+ __context_unpin_ppgtt(ce);
}
static struct i915_vma *
@@ -1412,7 +1466,7 @@ alloc_context_vma(struct intel_engine_cs *engine)
i915_gem_object_unpin_map(obj);
}
- vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
+ vma = i915_vma_instance(obj, &engine->gt->ggtt->vm, NULL);
if (IS_ERR(vma)) {
err = PTR_ERR(vma);
goto err_obj;
@@ -1446,11 +1500,11 @@ static int ring_context_pin(struct intel_context *ce)
ce->state = vma;
}
- err = intel_context_active_acquire(ce, PIN_HIGH);
+ err = intel_context_active_acquire(ce);
if (err)
return err;
- err = __context_pin_ppgtt(ce->gem_context);
+ err = __context_pin_ppgtt(ce);
if (err)
goto err_active;
@@ -1492,7 +1546,7 @@ static int load_pd_dir(struct i915_request *rq, const struct i915_ppgtt *ppgtt)
*cs++ = MI_LOAD_REGISTER_IMM(1);
*cs++ = i915_mmio_reg_offset(RING_PP_DIR_BASE(engine->mmio_base));
- *cs++ = ppgtt->pd->base.ggtt_offset << 10;
+ *cs++ = px_base(ppgtt->pd)->ggtt_offset << 10;
intel_ring_advance(rq, cs);
@@ -1511,7 +1565,8 @@ static int flush_pd_dir(struct i915_request *rq)
/* Stall until the page table load is complete */
*cs++ = MI_STORE_REGISTER_MEM | MI_SRM_LRM_GLOBAL_GTT;
*cs++ = i915_mmio_reg_offset(RING_PP_DIR_BASE(engine->mmio_base));
- *cs++ = i915_scratch_offset(rq->i915);
+ *cs++ = intel_gt_scratch_offset(rq->engine->gt,
+ INTEL_GT_SCRATCH_FIELD_DEFAULT);
*cs++ = MI_NOOP;
intel_ring_advance(rq, cs);
@@ -1627,7 +1682,8 @@ static inline int mi_set_context(struct i915_request *rq, u32 flags)
/* Insert a delay before the next switch! */
*cs++ = MI_STORE_REGISTER_MEM | MI_SRM_LRM_GLOBAL_GTT;
*cs++ = i915_mmio_reg_offset(last_reg);
- *cs++ = i915_scratch_offset(rq->i915);
+ *cs++ = intel_gt_scratch_offset(rq->engine->gt,
+ INTEL_GT_SCRATCH_FIELD_DEFAULT);
*cs++ = MI_NOOP;
}
*cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
@@ -1640,7 +1696,7 @@ static inline int mi_set_context(struct i915_request *rq, u32 flags)
return 0;
}
-static int remap_l3(struct i915_request *rq, int slice)
+static int remap_l3_slice(struct i915_request *rq, int slice)
{
u32 *cs, *remap_info = rq->i915->l3_parity.remap_info[slice];
int i;
@@ -1668,15 +1724,34 @@ static int remap_l3(struct i915_request *rq, int slice)
return 0;
}
+static int remap_l3(struct i915_request *rq)
+{
+ struct i915_gem_context *ctx = rq->gem_context;
+ int i, err;
+
+ if (!ctx->remap_slice)
+ return 0;
+
+ for (i = 0; i < MAX_L3_SLICES; i++) {
+ if (!(ctx->remap_slice & BIT(i)))
+ continue;
+
+ err = remap_l3_slice(rq, i);
+ if (err)
+ return err;
+ }
+
+ ctx->remap_slice = 0;
+ return 0;
+}
+
static int switch_context(struct i915_request *rq)
{
struct intel_engine_cs *engine = rq->engine;
- struct i915_gem_context *ctx = rq->gem_context;
- struct i915_address_space *vm =
- ctx->vm ?: &rq->i915->mm.aliasing_ppgtt->vm;
+ struct i915_address_space *vm = vm_alias(rq->hw_context);
unsigned int unwind_mm = 0;
u32 hw_flags = 0;
- int ret, i;
+ int ret;
GEM_BUG_ON(HAS_EXECLISTS(rq->i915));
@@ -1720,7 +1795,7 @@ static int switch_context(struct i915_request *rq)
* as nothing actually executes using the kernel context; it
* is purely used for flushing user contexts.
*/
- if (i915_gem_context_is_kernel(ctx))
+ if (i915_gem_context_is_kernel(rq->gem_context))
hw_flags = MI_RESTORE_INHIBIT;
ret = mi_set_context(rq, hw_flags);
@@ -1754,18 +1829,9 @@ static int switch_context(struct i915_request *rq)
goto err_mm;
}
- if (ctx->remap_slice) {
- for (i = 0; i < MAX_L3_SLICES; i++) {
- if (!(ctx->remap_slice & BIT(i)))
- continue;
-
- ret = remap_l3(rq, i);
- if (ret)
- goto err_mm;
- }
-
- ctx->remap_slice = 0;
- }
+ ret = remap_l3(rq);
+ if (ret)
+ goto err_mm;
return 0;
@@ -2166,11 +2232,9 @@ static void setup_rcs(struct intel_engine_cs *engine)
engine->irq_enable_mask = GT_RENDER_USER_INTERRUPT;
if (INTEL_GEN(i915) >= 7) {
- engine->init_context = intel_rcs_ctx_init;
engine->emit_flush = gen7_render_ring_flush;
engine->emit_fini_breadcrumb = gen7_rcs_emit_breadcrumb;
} else if (IS_GEN(i915, 6)) {
- engine->init_context = intel_rcs_ctx_init;
engine->emit_flush = gen6_render_ring_flush;
engine->emit_fini_breadcrumb = gen6_rcs_emit_breadcrumb;
} else if (IS_GEN(i915, 5)) {
@@ -2267,11 +2331,11 @@ int intel_ring_submission_setup(struct intel_engine_cs *engine)
int intel_ring_submission_init(struct intel_engine_cs *engine)
{
- struct i915_timeline *timeline;
+ struct intel_timeline *timeline;
struct intel_ring *ring;
int err;
- timeline = i915_timeline_create(engine->i915, engine->status_page.vma);
+ timeline = intel_timeline_create(engine->gt, engine->status_page.vma);
if (IS_ERR(timeline)) {
err = PTR_ERR(timeline);
goto err;
@@ -2279,7 +2343,7 @@ int intel_ring_submission_init(struct intel_engine_cs *engine)
GEM_BUG_ON(timeline->has_initial_breadcrumb);
ring = intel_engine_create_ring(engine, timeline, 32 * PAGE_SIZE);
- i915_timeline_put(timeline);
+ intel_timeline_put(timeline);
if (IS_ERR(ring)) {
err = PTR_ERR(ring);
goto err;
diff --git a/drivers/gpu/drm/i915/i915_timeline.c b/drivers/gpu/drm/i915/gt/intel_timeline.c
index c311ce9c6f9d..6daa9eb59e19 100644
--- a/drivers/gpu/drm/i915/i915_timeline.c
+++ b/drivers/gpu/drm/i915/gt/intel_timeline.c
@@ -4,38 +4,36 @@
* Copyright © 2016-2018 Intel Corporation
*/
+#include "gt/intel_gt_types.h"
+
#include "i915_drv.h"
#include "i915_active.h"
#include "i915_syncmap.h"
-#include "i915_timeline.h"
+#include "gt/intel_timeline.h"
#define ptr_set_bit(ptr, bit) ((typeof(ptr))((unsigned long)(ptr) | BIT(bit)))
#define ptr_test_bit(ptr, bit) ((unsigned long)(ptr) & BIT(bit))
-struct i915_timeline_hwsp {
- struct i915_gt_timelines *gt;
+struct intel_timeline_hwsp {
+ struct intel_gt *gt;
+ struct intel_gt_timelines *gt_timelines;
struct list_head free_link;
struct i915_vma *vma;
u64 free_bitmap;
};
-struct i915_timeline_cacheline {
+struct intel_timeline_cacheline {
struct i915_active active;
- struct i915_timeline_hwsp *hwsp;
+ struct intel_timeline_hwsp *hwsp;
void *vaddr;
#define CACHELINE_BITS 6
#define CACHELINE_FREE CACHELINE_BITS
};
-static inline struct drm_i915_private *
-hwsp_to_i915(struct i915_timeline_hwsp *hwsp)
-{
- return container_of(hwsp->gt, struct drm_i915_private, gt.timelines);
-}
-
-static struct i915_vma *__hwsp_alloc(struct drm_i915_private *i915)
+static struct i915_vma *__hwsp_alloc(struct intel_gt *gt)
{
+ struct drm_i915_private *i915 = gt->i915;
struct drm_i915_gem_object *obj;
struct i915_vma *vma;
@@ -45,7 +43,7 @@ static struct i915_vma *__hwsp_alloc(struct drm_i915_private *i915)
i915_gem_object_set_cache_coherency(obj, I915_CACHE_LLC);
- vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
+ vma = i915_vma_instance(obj, &gt->ggtt->vm, NULL);
if (IS_ERR(vma))
i915_gem_object_put(obj);
@@ -53,11 +51,10 @@ static struct i915_vma *__hwsp_alloc(struct drm_i915_private *i915)
}
static struct i915_vma *
-hwsp_alloc(struct i915_timeline *timeline, unsigned int *cacheline)
+hwsp_alloc(struct intel_timeline *timeline, unsigned int *cacheline)
{
- struct drm_i915_private *i915 = timeline->i915;
- struct i915_gt_timelines *gt = &i915->gt.timelines;
- struct i915_timeline_hwsp *hwsp;
+ struct intel_gt_timelines *gt = &timeline->gt->timelines;
+ struct intel_timeline_hwsp *hwsp;
BUILD_BUG_ON(BITS_PER_TYPE(u64) * CACHELINE_BYTES > PAGE_SIZE);
@@ -75,16 +72,17 @@ hwsp_alloc(struct i915_timeline *timeline, unsigned int *cacheline)
if (!hwsp)
return ERR_PTR(-ENOMEM);
- vma = __hwsp_alloc(i915);
+ vma = __hwsp_alloc(timeline->gt);
if (IS_ERR(vma)) {
kfree(hwsp);
return vma;
}
vma->private = hwsp;
+ hwsp->gt = timeline->gt;
hwsp->vma = vma;
hwsp->free_bitmap = ~0ull;
- hwsp->gt = gt;
+ hwsp->gt_timelines = gt;
spin_lock_irq(&gt->hwsp_lock);
list_add(&hwsp->free_link, &gt->hwsp_free_list);
@@ -102,9 +100,9 @@ hwsp_alloc(struct i915_timeline *timeline, unsigned int *cacheline)
return hwsp->vma;
}
-static void __idle_hwsp_free(struct i915_timeline_hwsp *hwsp, int cacheline)
+static void __idle_hwsp_free(struct intel_timeline_hwsp *hwsp, int cacheline)
{
- struct i915_gt_timelines *gt = hwsp->gt;
+ struct intel_gt_timelines *gt = hwsp->gt_timelines;
unsigned long flags;
spin_lock_irqsave(&gt->hwsp_lock, flags);
@@ -126,7 +124,7 @@ static void __idle_hwsp_free(struct i915_timeline_hwsp *hwsp, int cacheline)
spin_unlock_irqrestore(&gt->hwsp_lock, flags);
}
-static void __idle_cacheline_free(struct i915_timeline_cacheline *cl)
+static void __idle_cacheline_free(struct intel_timeline_cacheline *cl)
{
GEM_BUG_ON(!i915_active_is_idle(&cl->active));
@@ -140,7 +138,7 @@ static void __idle_cacheline_free(struct i915_timeline_cacheline *cl)
static void __cacheline_retire(struct i915_active *active)
{
- struct i915_timeline_cacheline *cl =
+ struct intel_timeline_cacheline *cl =
container_of(active, typeof(*cl), active);
i915_vma_unpin(cl->hwsp->vma);
@@ -148,10 +146,19 @@ static void __cacheline_retire(struct i915_active *active)
__idle_cacheline_free(cl);
}
-static struct i915_timeline_cacheline *
-cacheline_alloc(struct i915_timeline_hwsp *hwsp, unsigned int cacheline)
+static int __cacheline_active(struct i915_active *active)
+{
+ struct intel_timeline_cacheline *cl =
+ container_of(active, typeof(*cl), active);
+
+ __i915_vma_pin(cl->hwsp->vma);
+ return 0;
+}
+
+static struct intel_timeline_cacheline *
+cacheline_alloc(struct intel_timeline_hwsp *hwsp, unsigned int cacheline)
{
- struct i915_timeline_cacheline *cl;
+ struct intel_timeline_cacheline *cl;
void *vaddr;
GEM_BUG_ON(cacheline >= BIT(CACHELINE_BITS));
@@ -170,24 +177,25 @@ cacheline_alloc(struct i915_timeline_hwsp *hwsp, unsigned int cacheline)
cl->hwsp = hwsp;
cl->vaddr = page_pack_bits(vaddr, cacheline);
- i915_active_init(hwsp_to_i915(hwsp), &cl->active, __cacheline_retire);
+ i915_active_init(hwsp->gt->i915, &cl->active,
+ __cacheline_active, __cacheline_retire);
return cl;
}
-static void cacheline_acquire(struct i915_timeline_cacheline *cl)
+static void cacheline_acquire(struct intel_timeline_cacheline *cl)
{
- if (cl && i915_active_acquire(&cl->active))
- __i915_vma_pin(cl->hwsp->vma);
+ if (cl)
+ i915_active_acquire(&cl->active);
}
-static void cacheline_release(struct i915_timeline_cacheline *cl)
+static void cacheline_release(struct intel_timeline_cacheline *cl)
{
if (cl)
i915_active_release(&cl->active);
}
-static void cacheline_free(struct i915_timeline_cacheline *cl)
+static void cacheline_free(struct intel_timeline_cacheline *cl)
{
GEM_BUG_ON(ptr_test_bit(cl->vaddr, CACHELINE_FREE));
cl->vaddr = ptr_set_bit(cl->vaddr, CACHELINE_FREE);
@@ -196,29 +204,22 @@ static void cacheline_free(struct i915_timeline_cacheline *cl)
__idle_cacheline_free(cl);
}
-int i915_timeline_init(struct drm_i915_private *i915,
- struct i915_timeline *timeline,
- struct i915_vma *hwsp)
+int intel_timeline_init(struct intel_timeline *timeline,
+ struct intel_gt *gt,
+ struct i915_vma *hwsp)
{
void *vaddr;
- /*
- * Ideally we want a set of engines on a single leaf as we expect
- * to mostly be tracking synchronisation between engines. It is not
- * a huge issue if this is not the case, but we may want to mitigate
- * any page crossing penalties if they become an issue.
- *
- * Called during early_init before we know how many engines there are.
- */
- BUILD_BUG_ON(KSYNCMAP < I915_NUM_ENGINES);
+ kref_init(&timeline->kref);
- timeline->i915 = i915;
+ timeline->gt = gt;
timeline->pin_count = 0;
+
timeline->has_initial_breadcrumb = !hwsp;
timeline->hwsp_cacheline = NULL;
if (!hwsp) {
- struct i915_timeline_cacheline *cl;
+ struct intel_timeline_cacheline *cl;
unsigned int cacheline;
hwsp = hwsp_alloc(timeline, &cacheline);
@@ -261,55 +262,47 @@ int i915_timeline_init(struct drm_i915_private *i915,
return 0;
}
-void i915_timelines_init(struct drm_i915_private *i915)
+static void timelines_init(struct intel_gt *gt)
{
- struct i915_gt_timelines *gt = &i915->gt.timelines;
+ struct intel_gt_timelines *timelines = &gt->timelines;
- mutex_init(&gt->mutex);
- INIT_LIST_HEAD(&gt->active_list);
+ mutex_init(&timelines->mutex);
+ INIT_LIST_HEAD(&timelines->active_list);
- spin_lock_init(&gt->hwsp_lock);
- INIT_LIST_HEAD(&gt->hwsp_free_list);
+ spin_lock_init(&timelines->hwsp_lock);
+ INIT_LIST_HEAD(&timelines->hwsp_free_list);
+}
- /* via i915_gem_wait_for_idle() */
- i915_gem_shrinker_taints_mutex(i915, &gt->mutex);
+void intel_timelines_init(struct drm_i915_private *i915)
+{
+ timelines_init(&i915->gt);
}
-static void timeline_add_to_active(struct i915_timeline *tl)
+static void timeline_add_to_active(struct intel_timeline *tl)
{
- struct i915_gt_timelines *gt = &tl->i915->gt.timelines;
+ struct intel_gt_timelines *gt = &tl->gt->timelines;
mutex_lock(&gt->mutex);
list_add(&tl->link, &gt->active_list);
mutex_unlock(&gt->mutex);
}
-static void timeline_remove_from_active(struct i915_timeline *tl)
+static void timeline_remove_from_active(struct intel_timeline *tl)
{
- struct i915_gt_timelines *gt = &tl->i915->gt.timelines;
+ struct intel_gt_timelines *gt = &tl->gt->timelines;
mutex_lock(&gt->mutex);
list_del(&tl->link);
mutex_unlock(&gt->mutex);
}
-/**
- * i915_timelines_park - called when the driver idles
- * @i915: the drm_i915_private device
- *
- * When the driver is completely idle, we know that all of our sync points
- * have been signaled and our tracking is then entirely redundant. Any request
- * to wait upon an older sync point will be completed instantly as we know
- * the fence is signaled and therefore we will not even look them up in the
- * sync point map.
- */
-void i915_timelines_park(struct drm_i915_private *i915)
+static void timelines_park(struct intel_gt *gt)
{
- struct i915_gt_timelines *gt = &i915->gt.timelines;
- struct i915_timeline *timeline;
+ struct intel_gt_timelines *timelines = &gt->timelines;
+ struct intel_timeline *timeline;
- mutex_lock(&gt->mutex);
- list_for_each_entry(timeline, &gt->active_list, link) {
+ mutex_lock(&timelines->mutex);
+ list_for_each_entry(timeline, &timelines->active_list, link) {
/*
* All known fences are completed so we can scrap
* the current sync point tracking and start afresh,
@@ -318,10 +311,25 @@ void i915_timelines_park(struct drm_i915_private *i915)
*/
i915_syncmap_free(&timeline->sync);
}
- mutex_unlock(&gt->mutex);
+ mutex_unlock(&timelines->mutex);
}
-void i915_timeline_fini(struct i915_timeline *timeline)
+/**
+ * intel_timelines_park - called when the driver idles
+ * @i915: the drm_i915_private device
+ *
+ * When the driver is completely idle, we know that all of our sync points
+ * have been signaled and our tracking is then entirely redundant. Any request
+ * to wait upon an older sync point will be completed instantly as we know
+ * the fence is signaled and therefore we will not even look them up in the
+ * sync point map.
+ */
+void intel_timelines_park(struct drm_i915_private *i915)
+{
+ timelines_park(&i915->gt);
+}
+
+void intel_timeline_fini(struct intel_timeline *timeline)
{
GEM_BUG_ON(timeline->pin_count);
GEM_BUG_ON(!list_empty(&timeline->requests));
@@ -336,29 +344,26 @@ void i915_timeline_fini(struct i915_timeline *timeline)
i915_vma_put(timeline->hwsp_ggtt);
}
-struct i915_timeline *
-i915_timeline_create(struct drm_i915_private *i915,
- struct i915_vma *global_hwsp)
+struct intel_timeline *
+intel_timeline_create(struct intel_gt *gt, struct i915_vma *global_hwsp)
{
- struct i915_timeline *timeline;
+ struct intel_timeline *timeline;
int err;
timeline = kzalloc(sizeof(*timeline), GFP_KERNEL);
if (!timeline)
return ERR_PTR(-ENOMEM);
- err = i915_timeline_init(i915, timeline, global_hwsp);
+ err = intel_timeline_init(timeline, gt, global_hwsp);
if (err) {
kfree(timeline);
return ERR_PTR(err);
}
- kref_init(&timeline->kref);
-
return timeline;
}
-int i915_timeline_pin(struct i915_timeline *tl)
+int intel_timeline_pin(struct intel_timeline *tl)
{
int err;
@@ -384,7 +389,7 @@ unpin:
return err;
}
-static u32 timeline_advance(struct i915_timeline *tl)
+static u32 timeline_advance(struct intel_timeline *tl)
{
GEM_BUG_ON(!tl->pin_count);
GEM_BUG_ON(tl->seqno & tl->has_initial_breadcrumb);
@@ -392,17 +397,17 @@ static u32 timeline_advance(struct i915_timeline *tl)
return tl->seqno += 1 + tl->has_initial_breadcrumb;
}
-static void timeline_rollback(struct i915_timeline *tl)
+static void timeline_rollback(struct intel_timeline *tl)
{
tl->seqno -= 1 + tl->has_initial_breadcrumb;
}
static noinline int
-__i915_timeline_get_seqno(struct i915_timeline *tl,
- struct i915_request *rq,
- u32 *seqno)
+__intel_timeline_get_seqno(struct intel_timeline *tl,
+ struct i915_request *rq,
+ u32 *seqno)
{
- struct i915_timeline_cacheline *cl;
+ struct intel_timeline_cacheline *cl;
unsigned int cacheline;
struct i915_vma *vma;
void *vaddr;
@@ -488,31 +493,31 @@ err_rollback:
return err;
}
-int i915_timeline_get_seqno(struct i915_timeline *tl,
- struct i915_request *rq,
- u32 *seqno)
+int intel_timeline_get_seqno(struct intel_timeline *tl,
+ struct i915_request *rq,
+ u32 *seqno)
{
*seqno = timeline_advance(tl);
/* Replace the HWSP on wraparound for HW semaphores */
if (unlikely(!*seqno && tl->hwsp_cacheline))
- return __i915_timeline_get_seqno(tl, rq, seqno);
+ return __intel_timeline_get_seqno(tl, rq, seqno);
return 0;
}
-static int cacheline_ref(struct i915_timeline_cacheline *cl,
+static int cacheline_ref(struct intel_timeline_cacheline *cl,
struct i915_request *rq)
{
return i915_active_ref(&cl->active, rq->fence.context, rq);
}
-int i915_timeline_read_hwsp(struct i915_request *from,
- struct i915_request *to,
- u32 *hwsp)
+int intel_timeline_read_hwsp(struct i915_request *from,
+ struct i915_request *to,
+ u32 *hwsp)
{
- struct i915_timeline_cacheline *cl = from->hwsp_cacheline;
- struct i915_timeline *tl = from->timeline;
+ struct intel_timeline_cacheline *cl = from->hwsp_cacheline;
+ struct intel_timeline *tl = from->timeline;
int err;
GEM_BUG_ON(to->timeline == tl);
@@ -535,7 +540,7 @@ int i915_timeline_read_hwsp(struct i915_request *from,
return err;
}
-void i915_timeline_unpin(struct i915_timeline *tl)
+void intel_timeline_unpin(struct intel_timeline *tl)
{
GEM_BUG_ON(!tl->pin_count);
if (--tl->pin_count)
@@ -554,26 +559,31 @@ void i915_timeline_unpin(struct i915_timeline *tl)
__i915_vma_unpin(tl->hwsp_ggtt);
}
-void __i915_timeline_free(struct kref *kref)
+void __intel_timeline_free(struct kref *kref)
{
- struct i915_timeline *timeline =
+ struct intel_timeline *timeline =
container_of(kref, typeof(*timeline), kref);
- i915_timeline_fini(timeline);
+ intel_timeline_fini(timeline);
kfree(timeline);
}
-void i915_timelines_fini(struct drm_i915_private *i915)
+static void timelines_fini(struct intel_gt *gt)
{
- struct i915_gt_timelines *gt = &i915->gt.timelines;
+ struct intel_gt_timelines *timelines = &gt->timelines;
- GEM_BUG_ON(!list_empty(&gt->active_list));
- GEM_BUG_ON(!list_empty(&gt->hwsp_free_list));
+ GEM_BUG_ON(!list_empty(&timelines->active_list));
+ GEM_BUG_ON(!list_empty(&timelines->hwsp_free_list));
- mutex_destroy(&gt->mutex);
+ mutex_destroy(&timelines->mutex);
+}
+
+void intel_timelines_fini(struct drm_i915_private *i915)
+{
+ timelines_fini(&i915->gt);
}
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
-#include "selftests/mock_timeline.c"
-#include "selftests/i915_timeline.c"
+#include "gt/selftests/mock_timeline.c"
+#include "gt/selftest_timeline.c"
#endif
diff --git a/drivers/gpu/drm/i915/gt/intel_timeline.h b/drivers/gpu/drm/i915/gt/intel_timeline.h
new file mode 100644
index 000000000000..e08cebf64833
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_timeline.h
@@ -0,0 +1,93 @@
+/*
+ * Copyright © 2016 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#ifndef I915_TIMELINE_H
+#define I915_TIMELINE_H
+
+#include <linux/lockdep.h>
+
+#include "i915_active.h"
+#include "i915_syncmap.h"
+#include "gt/intel_timeline_types.h"
+
+int intel_timeline_init(struct intel_timeline *tl,
+ struct intel_gt *gt,
+ struct i915_vma *hwsp);
+void intel_timeline_fini(struct intel_timeline *tl);
+
+struct intel_timeline *
+intel_timeline_create(struct intel_gt *gt, struct i915_vma *global_hwsp);
+
+static inline struct intel_timeline *
+intel_timeline_get(struct intel_timeline *timeline)
+{
+ kref_get(&timeline->kref);
+ return timeline;
+}
+
+void __intel_timeline_free(struct kref *kref);
+static inline void intel_timeline_put(struct intel_timeline *timeline)
+{
+ kref_put(&timeline->kref, __intel_timeline_free);
+}
+
+static inline int __intel_timeline_sync_set(struct intel_timeline *tl,
+ u64 context, u32 seqno)
+{
+ return i915_syncmap_set(&tl->sync, context, seqno);
+}
+
+static inline int intel_timeline_sync_set(struct intel_timeline *tl,
+ const struct dma_fence *fence)
+{
+ return __intel_timeline_sync_set(tl, fence->context, fence->seqno);
+}
+
+static inline bool __intel_timeline_sync_is_later(struct intel_timeline *tl,
+ u64 context, u32 seqno)
+{
+ return i915_syncmap_is_later(&tl->sync, context, seqno);
+}
+
+static inline bool intel_timeline_sync_is_later(struct intel_timeline *tl,
+ const struct dma_fence *fence)
+{
+ return __intel_timeline_sync_is_later(tl, fence->context, fence->seqno);
+}
+
+int intel_timeline_pin(struct intel_timeline *tl);
+int intel_timeline_get_seqno(struct intel_timeline *tl,
+ struct i915_request *rq,
+ u32 *seqno);
+void intel_timeline_unpin(struct intel_timeline *tl);
+
+int intel_timeline_read_hwsp(struct i915_request *from,
+ struct i915_request *until,
+ u32 *hwsp_offset);
+
+void intel_timelines_init(struct drm_i915_private *i915);
+void intel_timelines_park(struct drm_i915_private *i915);
+void intel_timelines_fini(struct drm_i915_private *i915);
+
+#endif
diff --git a/drivers/gpu/drm/i915/i915_timeline_types.h b/drivers/gpu/drm/i915/gt/intel_timeline_types.h
index fce5cb4f1090..9a71aea7a338 100644
--- a/drivers/gpu/drm/i915/i915_timeline_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_timeline_types.h
@@ -16,10 +16,10 @@
struct drm_i915_private;
struct i915_vma;
-struct i915_timeline_cacheline;
+struct intel_timeline_cacheline;
struct i915_syncmap;
-struct i915_timeline {
+struct intel_timeline {
u64 fence_context;
u32 seqno;
@@ -30,7 +30,7 @@ struct i915_timeline {
struct i915_vma *hwsp_ggtt;
u32 hwsp_offset;
- struct i915_timeline_cacheline *hwsp_cacheline;
+ struct intel_timeline_cacheline *hwsp_cacheline;
bool has_initial_breadcrumb;
@@ -59,7 +59,7 @@ struct i915_timeline {
struct i915_syncmap *sync;
struct list_head link;
- struct drm_i915_private *i915;
+ struct intel_gt *gt;
struct kref kref;
};
diff --git a/drivers/gpu/drm/i915/gt/intel_workarounds.c b/drivers/gpu/drm/i915/gt/intel_workarounds.c
index 98dfb086320f..704ace01e7f5 100644
--- a/drivers/gpu/drm/i915/gt/intel_workarounds.c
+++ b/drivers/gpu/drm/i915/gt/intel_workarounds.c
@@ -6,6 +6,7 @@
#include "i915_drv.h"
#include "intel_context.h"
+#include "intel_gt.h"
#include "intel_workarounds.h"
/**
@@ -49,9 +50,10 @@
* - Public functions to init or apply the given workaround type.
*/
-static void wa_init_start(struct i915_wa_list *wal, const char *name)
+static void wa_init_start(struct i915_wa_list *wal, const char *name, const char *engine_name)
{
wal->name = name;
+ wal->engine_name = engine_name;
}
#define WA_LIST_CHUNK (1 << 4)
@@ -73,8 +75,8 @@ static void wa_init_finish(struct i915_wa_list *wal)
if (!wal->count)
return;
- DRM_DEBUG_DRIVER("Initialized %u %s workarounds\n",
- wal->wa_count, wal->name);
+ DRM_DEBUG_DRIVER("Initialized %u %s workarounds on %s\n",
+ wal->wa_count, wal->name, wal->engine_name);
}
static void _wa_add(struct i915_wa_list *wal, const struct i915_wa *wa)
@@ -175,19 +177,6 @@ wa_write_or(struct i915_wa_list *wal, i915_reg_t reg, u32 val)
wa_write_masked_or(wal, reg, val, val);
}
-static void
-ignore_wa_write_or(struct i915_wa_list *wal, i915_reg_t reg, u32 mask, u32 val)
-{
- struct i915_wa wa = {
- .reg = reg,
- .mask = mask,
- .val = val,
- /* Bonkers HW, skip verifying */
- };
-
- _wa_add(wal, &wa);
-}
-
#define WA_SET_BIT_MASKED(addr, mask) \
wa_write_masked_or(wal, (addr), (mask), _MASKED_BIT_ENABLE(mask))
@@ -536,12 +525,6 @@ static void icl_ctx_workarounds_init(struct intel_engine_cs *engine,
intel_uncore_read(engine->uncore, GEN8_L3CNTLREG) |
GEN8_ERRDETBCTRL);
- /* WaDisableBankHangMode:icl */
- wa_write(wal,
- GEN8_L3CNTLREG,
- intel_uncore_read(engine->uncore, GEN8_L3CNTLREG) |
- GEN8_ERRDETBCTRL);
-
/* Wa_1604370585:icl (pre-prod)
* Formerly known as WaPushConstantDereferenceHoldDisable
*/
@@ -596,7 +579,7 @@ __intel_engine_init_ctx_wa(struct intel_engine_cs *engine,
if (engine->class != RENDER_CLASS)
return;
- wa_init_start(wal, name);
+ wa_init_start(wal, name, engine->name);
if (IS_GEN(i915, 11))
icl_ctx_workarounds_init(engine, wal);
@@ -766,7 +749,10 @@ static void
wa_init_mcr(struct drm_i915_private *i915, struct i915_wa_list *wal)
{
const struct sseu_dev_info *sseu = &RUNTIME_INFO(i915)->sseu;
- u32 mcr_slice_subslice_mask;
+ unsigned int slice, subslice;
+ u32 l3_en, mcr, mcr_mask;
+
+ GEM_BUG_ON(INTEL_GEN(i915) < 10);
/*
* WaProgramMgsrForL3BankSpecificMmioReads: cnl,icl
@@ -774,42 +760,7 @@ wa_init_mcr(struct drm_i915_private *i915, struct i915_wa_list *wal)
* the case, we might need to program MCR select to a valid L3Bank
* by default, to make sure we correctly read certain registers
* later on (in the range 0xB100 - 0xB3FF).
- * This might be incompatible with
- * WaProgramMgsrForCorrectSliceSpecificMmioReads.
- * Fortunately, this should not happen in production hardware, so
- * we only assert that this is the case (instead of implementing
- * something more complex that requires checking the range of every
- * MMIO read).
- */
- if (INTEL_GEN(i915) >= 10 &&
- is_power_of_2(sseu->slice_mask)) {
- /*
- * read FUSE3 for enabled L3 Bank IDs, if L3 Bank matches
- * enabled subslice, no need to redirect MCR packet
- */
- u32 slice = fls(sseu->slice_mask);
- u32 fuse3 =
- intel_uncore_read(&i915->uncore, GEN10_MIRROR_FUSE3);
- u8 ss_mask = sseu->subslice_mask[slice];
-
- u8 enabled_mask = (ss_mask | ss_mask >>
- GEN10_L3BANK_PAIR_COUNT) & GEN10_L3BANK_MASK;
- u8 disabled_mask = fuse3 & GEN10_L3BANK_MASK;
-
- /*
- * Production silicon should have matched L3Bank and
- * subslice enabled
- */
- WARN_ON((enabled_mask & disabled_mask) != enabled_mask);
- }
-
- if (INTEL_GEN(i915) >= 11)
- mcr_slice_subslice_mask = GEN11_MCR_SLICE_MASK |
- GEN11_MCR_SUBSLICE_MASK;
- else
- mcr_slice_subslice_mask = GEN8_MCR_SLICE_MASK |
- GEN8_MCR_SUBSLICE_MASK;
- /*
+ *
* WaProgramMgsrForCorrectSliceSpecificMmioReads:cnl,icl
* Before any MMIO read into slice/subslice specific registers, MCR
* packet control register needs to be programmed to point to any
@@ -819,11 +770,51 @@ wa_init_mcr(struct drm_i915_private *i915, struct i915_wa_list *wal)
* are consistent across s/ss in almost all cases. In the rare
* occasions, such as INSTDONE, where this value is dependent
* on s/ss combo, the read should be done with read_subslice_reg.
+ *
+ * Since GEN8_MCR_SELECTOR contains dual-purpose bits which select both
+ * to which subslice, or to which L3 bank, the respective mmio reads
+ * will go, we have to find a common index which works for both
+ * accesses.
+ *
+ * Case where we cannot find a common index fortunately should not
+ * happen in production hardware, so we only emit a warning instead of
+ * implementing something more complex that requires checking the range
+ * of every MMIO read.
*/
- wa_write_masked_or(wal,
- GEN8_MCR_SELECTOR,
- mcr_slice_subslice_mask,
- intel_calculate_mcr_s_ss_select(i915));
+
+ if (INTEL_GEN(i915) >= 10 && is_power_of_2(sseu->slice_mask)) {
+ u32 l3_fuse =
+ intel_uncore_read(&i915->uncore, GEN10_MIRROR_FUSE3) &
+ GEN10_L3BANK_MASK;
+
+ DRM_DEBUG_DRIVER("L3 fuse = %x\n", l3_fuse);
+ l3_en = ~(l3_fuse << GEN10_L3BANK_PAIR_COUNT | l3_fuse);
+ } else {
+ l3_en = ~0;
+ }
+
+ slice = fls(sseu->slice_mask) - 1;
+ GEM_BUG_ON(slice >= ARRAY_SIZE(sseu->subslice_mask));
+ subslice = fls(l3_en & sseu->subslice_mask[slice]);
+ if (!subslice) {
+ DRM_WARN("No common index found between subslice mask %x and L3 bank mask %x!\n",
+ sseu->subslice_mask[slice], l3_en);
+ subslice = fls(l3_en);
+ WARN_ON(!subslice);
+ }
+ subslice--;
+
+ if (INTEL_GEN(i915) >= 11) {
+ mcr = GEN11_MCR_SLICE(slice) | GEN11_MCR_SUBSLICE(subslice);
+ mcr_mask = GEN11_MCR_SLICE_MASK | GEN11_MCR_SUBSLICE_MASK;
+ } else {
+ mcr = GEN8_MCR_SLICE(slice) | GEN8_MCR_SUBSLICE(subslice);
+ mcr_mask = GEN8_MCR_SLICE_MASK | GEN8_MCR_SUBSLICE_MASK;
+ }
+
+ DRM_DEBUG_DRIVER("MCR slice/subslice = %x\n", mcr);
+
+ wa_write_masked_or(wal, GEN8_MCR_SELECTOR, mcr_mask, mcr);
}
static void
@@ -926,7 +917,7 @@ void intel_gt_init_workarounds(struct drm_i915_private *i915)
{
struct i915_wa_list *wal = &i915->gt_wa_list;
- wa_init_start(wal, "GT");
+ wa_init_start(wal, "GT", "global");
gt_init_workarounds(i915, wal);
wa_init_finish(wal);
}
@@ -990,9 +981,9 @@ wa_list_apply(struct intel_uncore *uncore, const struct i915_wa_list *wal)
spin_unlock_irqrestore(&uncore->lock, flags);
}
-void intel_gt_apply_workarounds(struct drm_i915_private *i915)
+void intel_gt_apply_workarounds(struct intel_gt *gt)
{
- wa_list_apply(&i915->uncore, &i915->gt_wa_list);
+ wa_list_apply(gt->uncore, &gt->i915->gt_wa_list);
}
static bool wa_list_verify(struct intel_uncore *uncore,
@@ -1011,10 +1002,23 @@ static bool wa_list_verify(struct intel_uncore *uncore,
return ok;
}
-bool intel_gt_verify_workarounds(struct drm_i915_private *i915,
- const char *from)
+bool intel_gt_verify_workarounds(struct intel_gt *gt, const char *from)
+{
+ return wa_list_verify(gt->uncore, &gt->i915->gt_wa_list, from);
+}
+
+static inline bool is_nonpriv_flags_valid(u32 flags)
{
- return wa_list_verify(&i915->uncore, &i915->gt_wa_list, from);
+ /* Check only valid flag bits are set */
+ if (flags & ~RING_FORCE_TO_NONPRIV_MASK_VALID)
+ return false;
+
+ /* NB: Only 3 out of 4 enum values are valid for access field */
+ if ((flags & RING_FORCE_TO_NONPRIV_ACCESS_MASK) ==
+ RING_FORCE_TO_NONPRIV_ACCESS_INVALID)
+ return false;
+
+ return true;
}
static void
@@ -1027,6 +1031,9 @@ whitelist_reg_ext(struct i915_wa_list *wal, i915_reg_t reg, u32 flags)
if (GEM_DEBUG_WARN_ON(wal->count >= RING_MAX_NONPRIV_SLOTS))
return;
+ if (GEM_DEBUG_WARN_ON(!is_nonpriv_flags_valid(flags)))
+ return;
+
wa.reg.reg |= flags;
_wa_add(wal, &wa);
}
@@ -1034,7 +1041,7 @@ whitelist_reg_ext(struct i915_wa_list *wal, i915_reg_t reg, u32 flags)
static void
whitelist_reg(struct i915_wa_list *wal, i915_reg_t reg)
{
- whitelist_reg_ext(wal, reg, RING_FORCE_TO_NONPRIV_RW);
+ whitelist_reg_ext(wal, reg, RING_FORCE_TO_NONPRIV_ACCESS_RW);
}
static void gen9_whitelist_build(struct i915_wa_list *w)
@@ -1115,7 +1122,7 @@ static void cfl_whitelist_build(struct intel_engine_cs *engine)
* - PS_DEPTH_COUNT_UDW
*/
whitelist_reg_ext(w, PS_INVOCATION_COUNT,
- RING_FORCE_TO_NONPRIV_RD |
+ RING_FORCE_TO_NONPRIV_ACCESS_RD |
RING_FORCE_TO_NONPRIV_RANGE_4);
}
@@ -1155,20 +1162,20 @@ static void icl_whitelist_build(struct intel_engine_cs *engine)
* - PS_DEPTH_COUNT_UDW
*/
whitelist_reg_ext(w, PS_INVOCATION_COUNT,
- RING_FORCE_TO_NONPRIV_RD |
+ RING_FORCE_TO_NONPRIV_ACCESS_RD |
RING_FORCE_TO_NONPRIV_RANGE_4);
break;
case VIDEO_DECODE_CLASS:
/* hucStatusRegOffset */
whitelist_reg_ext(w, _MMIO(0x2000 + engine->mmio_base),
- RING_FORCE_TO_NONPRIV_RD);
+ RING_FORCE_TO_NONPRIV_ACCESS_RD);
/* hucUKernelHdrInfoRegOffset */
whitelist_reg_ext(w, _MMIO(0x2014 + engine->mmio_base),
- RING_FORCE_TO_NONPRIV_RD);
+ RING_FORCE_TO_NONPRIV_ACCESS_RD);
/* hucStatus2RegOffset */
whitelist_reg_ext(w, _MMIO(0x23B0 + engine->mmio_base),
- RING_FORCE_TO_NONPRIV_RD);
+ RING_FORCE_TO_NONPRIV_ACCESS_RD);
break;
default:
@@ -1181,7 +1188,7 @@ void intel_engine_init_whitelist(struct intel_engine_cs *engine)
struct drm_i915_private *i915 = engine->i915;
struct i915_wa_list *w = &engine->whitelist;
- wa_init_start(w, "whitelist");
+ wa_init_start(w, "whitelist", engine->name);
if (IS_GEN(i915, 11))
icl_whitelist_build(engine);
@@ -1240,10 +1247,9 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
_3D_CHICKEN3_AA_LINE_QUALITY_FIX_ENABLE);
/* WaPipelineFlushCoherentLines:icl */
- ignore_wa_write_or(wal,
- GEN8_L3SQCREG4,
- GEN8_LQSC_FLUSH_COHERENT_LINES,
- GEN8_LQSC_FLUSH_COHERENT_LINES);
+ wa_write_or(wal,
+ GEN8_L3SQCREG4,
+ GEN8_LQSC_FLUSH_COHERENT_LINES);
/*
* Wa_1405543622:icl
@@ -1270,10 +1276,9 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
* Wa_1405733216:icl
* Formerly known as WaDisableCleanEvicts
*/
- ignore_wa_write_or(wal,
- GEN8_L3SQCREG4,
- GEN11_LQSC_CLEAN_EVICT_DISABLE,
- GEN11_LQSC_CLEAN_EVICT_DISABLE);
+ wa_write_or(wal,
+ GEN8_L3SQCREG4,
+ GEN11_LQSC_CLEAN_EVICT_DISABLE);
/* WaForwardProgressSoftReset:icl */
wa_write_or(wal,
@@ -1292,6 +1297,12 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
wa_write_or(wal,
GEN7_SARCHKMD,
GEN7_DISABLE_SAMPLER_PREFETCH);
+
+ /* Wa_1409178092:icl */
+ wa_write_masked_or(wal,
+ GEN11_SCRATCH2,
+ GEN11_COHERENT_PARTIAL_WRITE_MERGE_ENABLE,
+ 0);
}
if (IS_GEN_RANGE(i915, 9, 11)) {
@@ -1360,7 +1371,7 @@ engine_init_workarounds(struct intel_engine_cs *engine, struct i915_wa_list *wal
if (I915_SELFTEST_ONLY(INTEL_GEN(engine->i915) < 8))
return;
- if (engine->id == RCS0)
+ if (engine->class == RENDER_CLASS)
rcs_engine_wa_init(engine, wal);
else
xcs_engine_wa_init(engine, wal);
@@ -1370,10 +1381,10 @@ void intel_engine_init_workarounds(struct intel_engine_cs *engine)
{
struct i915_wa_list *wal = &engine->wa_list;
- if (GEM_WARN_ON(INTEL_GEN(engine->i915) < 8))
+ if (INTEL_GEN(engine->i915) < 8)
return;
- wa_init_start(wal, engine->name);
+ wa_init_start(wal, "engine", engine->name);
engine_init_workarounds(engine, wal);
wa_init_finish(wal);
}
@@ -1416,26 +1427,50 @@ err_obj:
return ERR_PTR(err);
}
+static bool mcr_range(struct drm_i915_private *i915, u32 offset)
+{
+ /*
+ * Registers in this range are affected by the MCR selector
+ * which only controls CPU initiated MMIO. Routing does not
+ * work for CS access so we cannot verify them on this path.
+ */
+ if (INTEL_GEN(i915) >= 8 && (offset >= 0xb100 && offset <= 0xb3ff))
+ return true;
+
+ return false;
+}
+
static int
wa_list_srm(struct i915_request *rq,
const struct i915_wa_list *wal,
struct i915_vma *vma)
{
+ struct drm_i915_private *i915 = rq->i915;
+ unsigned int i, count = 0;
const struct i915_wa *wa;
- unsigned int i;
u32 srm, *cs;
srm = MI_STORE_REGISTER_MEM | MI_SRM_LRM_GLOBAL_GTT;
- if (INTEL_GEN(rq->i915) >= 8)
+ if (INTEL_GEN(i915) >= 8)
srm++;
- cs = intel_ring_begin(rq, 4 * wal->count);
+ for (i = 0, wa = wal->list; i < wal->count; i++, wa++) {
+ if (!mcr_range(i915, i915_mmio_reg_offset(wa->reg)))
+ count++;
+ }
+
+ cs = intel_ring_begin(rq, 4 * count);
if (IS_ERR(cs))
return PTR_ERR(cs);
for (i = 0, wa = wal->list; i < wal->count; i++, wa++) {
+ u32 offset = i915_mmio_reg_offset(wa->reg);
+
+ if (mcr_range(i915, offset))
+ continue;
+
*cs++ = srm;
- *cs++ = i915_mmio_reg_offset(wa->reg);
+ *cs++ = offset;
*cs++ = i915_ggtt_offset(vma) + sizeof(u32) * i;
*cs++ = 0;
}
@@ -1458,7 +1493,7 @@ static int engine_wa_list_verify(struct intel_context *ce,
if (!wal->count)
return 0;
- vma = create_scratch(&ce->engine->i915->ggtt.vm, wal->count);
+ vma = create_scratch(&ce->engine->gt->ggtt->vm, wal->count);
if (IS_ERR(vma))
return PTR_ERR(vma);
@@ -1485,9 +1520,13 @@ static int engine_wa_list_verify(struct intel_context *ce,
}
err = 0;
- for (i = 0, wa = wal->list; i < wal->count; i++, wa++)
+ for (i = 0, wa = wal->list; i < wal->count; i++, wa++) {
+ if (mcr_range(rq->i915, i915_mmio_reg_offset(wa->reg)))
+ continue;
+
if (!wa_verify(wa, results[i], wal->name, from))
err = -ENXIO;
+ }
i915_gem_object_unpin_map(vma->obj);
diff --git a/drivers/gpu/drm/i915/gt/intel_workarounds.h b/drivers/gpu/drm/i915/gt/intel_workarounds.h
index 3761a6ee58bb..8c9c769c2204 100644
--- a/drivers/gpu/drm/i915/gt/intel_workarounds.h
+++ b/drivers/gpu/drm/i915/gt/intel_workarounds.h
@@ -14,6 +14,7 @@
struct drm_i915_private;
struct i915_request;
struct intel_engine_cs;
+struct intel_gt;
static inline void intel_wa_list_free(struct i915_wa_list *wal)
{
@@ -25,9 +26,8 @@ void intel_engine_init_ctx_wa(struct intel_engine_cs *engine);
int intel_engine_emit_ctx_wa(struct i915_request *rq);
void intel_gt_init_workarounds(struct drm_i915_private *i915);
-void intel_gt_apply_workarounds(struct drm_i915_private *i915);
-bool intel_gt_verify_workarounds(struct drm_i915_private *i915,
- const char *from);
+void intel_gt_apply_workarounds(struct intel_gt *gt);
+bool intel_gt_verify_workarounds(struct intel_gt *gt, const char *from);
void intel_engine_init_whitelist(struct intel_engine_cs *engine);
void intel_engine_apply_whitelist(struct intel_engine_cs *engine);
diff --git a/drivers/gpu/drm/i915/gt/intel_workarounds_types.h b/drivers/gpu/drm/i915/gt/intel_workarounds_types.h
index 42ac1fb99572..e27ab1b710b3 100644
--- a/drivers/gpu/drm/i915/gt/intel_workarounds_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_workarounds_types.h
@@ -20,6 +20,7 @@ struct i915_wa {
struct i915_wa_list {
const char *name;
+ const char *engine_name;
struct i915_wa *list;
unsigned int count;
unsigned int wa_count;
diff --git a/drivers/gpu/drm/i915/gt/mock_engine.c b/drivers/gpu/drm/i915/gt/mock_engine.c
index 486c6953dcb1..10cb312462e5 100644
--- a/drivers/gpu/drm/i915/gt/mock_engine.c
+++ b/drivers/gpu/drm/i915/gt/mock_engine.c
@@ -33,15 +33,15 @@
struct mock_ring {
struct intel_ring base;
- struct i915_timeline timeline;
+ struct intel_timeline timeline;
};
-static void mock_timeline_pin(struct i915_timeline *tl)
+static void mock_timeline_pin(struct intel_timeline *tl)
{
tl->pin_count++;
}
-static void mock_timeline_unpin(struct i915_timeline *tl)
+static void mock_timeline_unpin(struct intel_timeline *tl)
{
GEM_BUG_ON(!tl->pin_count);
tl->pin_count--;
@@ -56,7 +56,7 @@ static struct intel_ring *mock_ring(struct intel_engine_cs *engine)
if (!ring)
return NULL;
- if (i915_timeline_init(engine->i915, &ring->timeline, NULL)) {
+ if (intel_timeline_init(&ring->timeline, engine->gt, NULL)) {
kfree(ring);
return NULL;
}
@@ -78,7 +78,7 @@ static void mock_ring_free(struct intel_ring *base)
{
struct mock_ring *ring = container_of(base, typeof(*ring), base);
- i915_timeline_fini(&ring->timeline);
+ intel_timeline_fini(&ring->timeline);
kfree(ring);
}
@@ -142,6 +142,7 @@ static void mock_context_destroy(struct kref *ref)
if (ce->ring)
mock_ring_free(ce->ring);
+ intel_context_fini(ce);
intel_context_free(ce);
}
@@ -155,7 +156,7 @@ static int mock_context_pin(struct intel_context *ce)
return -ENOMEM;
}
- ret = intel_context_active_acquire(ce, PIN_HIGH);
+ ret = intel_context_active_acquire(ce);
if (ret)
return ret;
@@ -257,6 +258,7 @@ struct intel_engine_cs *mock_engine(struct drm_i915_private *i915,
/* minimal engine setup for requests */
engine->base.i915 = i915;
+ engine->base.gt = &i915->gt;
snprintf(engine->base.name, sizeof(engine->base.name), "%s", name);
engine->base.id = id;
engine->base.mask = BIT(id);
diff --git a/drivers/gpu/drm/i915/gt/selftest_hangcheck.c b/drivers/gpu/drm/i915/gt/selftest_hangcheck.c
index 1ee4c923044f..4484b4447db1 100644
--- a/drivers/gpu/drm/i915/gt/selftest_hangcheck.c
+++ b/drivers/gpu/drm/i915/gt/selftest_hangcheck.c
@@ -25,13 +25,13 @@
#include <linux/kthread.h>
#include "gem/i915_gem_context.h"
+#include "gt/intel_gt.h"
#include "intel_engine_pm.h"
#include "i915_selftest.h"
#include "selftests/i915_random.h"
#include "selftests/igt_flush_test.h"
#include "selftests/igt_reset.h"
-#include "selftests/igt_wedge_me.h"
#include "selftests/igt_atomic.h"
#include "selftests/mock_drm.h"
@@ -42,7 +42,7 @@
#define IGT_IDLE_TIMEOUT 50 /* ms; time to wait after flushing between tests */
struct hang {
- struct drm_i915_private *i915;
+ struct intel_gt *gt;
struct drm_i915_gem_object *hws;
struct drm_i915_gem_object *obj;
struct i915_gem_context *ctx;
@@ -50,27 +50,27 @@ struct hang {
u32 *batch;
};
-static int hang_init(struct hang *h, struct drm_i915_private *i915)
+static int hang_init(struct hang *h, struct intel_gt *gt)
{
void *vaddr;
int err;
memset(h, 0, sizeof(*h));
- h->i915 = i915;
+ h->gt = gt;
- h->ctx = kernel_context(i915);
+ h->ctx = kernel_context(gt->i915);
if (IS_ERR(h->ctx))
return PTR_ERR(h->ctx);
GEM_BUG_ON(i915_gem_context_is_bannable(h->ctx));
- h->hws = i915_gem_object_create_internal(i915, PAGE_SIZE);
+ h->hws = i915_gem_object_create_internal(gt->i915, PAGE_SIZE);
if (IS_ERR(h->hws)) {
err = PTR_ERR(h->hws);
goto err_ctx;
}
- h->obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
+ h->obj = i915_gem_object_create_internal(gt->i915, PAGE_SIZE);
if (IS_ERR(h->obj)) {
err = PTR_ERR(h->obj);
goto err_hws;
@@ -85,7 +85,7 @@ static int hang_init(struct hang *h, struct drm_i915_private *i915)
h->seqno = memset(vaddr, 0xff, PAGE_SIZE);
vaddr = i915_gem_object_pin_map(h->obj,
- i915_coherent_map_type(i915));
+ i915_coherent_map_type(gt->i915));
if (IS_ERR(vaddr)) {
err = PTR_ERR(vaddr);
goto err_unpin_hws;
@@ -127,35 +127,31 @@ static int move_to_active(struct i915_vma *vma,
static struct i915_request *
hang_create_request(struct hang *h, struct intel_engine_cs *engine)
{
- struct drm_i915_private *i915 = h->i915;
- struct i915_address_space *vm = h->ctx->vm ?: &i915->ggtt.vm;
+ struct intel_gt *gt = h->gt;
+ struct i915_address_space *vm = h->ctx->vm ?: &engine->gt->ggtt->vm;
+ struct drm_i915_gem_object *obj;
struct i915_request *rq = NULL;
struct i915_vma *hws, *vma;
unsigned int flags;
+ void *vaddr;
u32 *batch;
int err;
- if (i915_gem_object_is_active(h->obj)) {
- struct drm_i915_gem_object *obj;
- void *vaddr;
-
- obj = i915_gem_object_create_internal(h->i915, PAGE_SIZE);
- if (IS_ERR(obj))
- return ERR_CAST(obj);
+ obj = i915_gem_object_create_internal(gt->i915, PAGE_SIZE);
+ if (IS_ERR(obj))
+ return ERR_CAST(obj);
- vaddr = i915_gem_object_pin_map(obj,
- i915_coherent_map_type(h->i915));
- if (IS_ERR(vaddr)) {
- i915_gem_object_put(obj);
- return ERR_CAST(vaddr);
- }
+ vaddr = i915_gem_object_pin_map(obj, i915_coherent_map_type(gt->i915));
+ if (IS_ERR(vaddr)) {
+ i915_gem_object_put(obj);
+ return ERR_CAST(vaddr);
+ }
- i915_gem_object_unpin_map(h->obj);
- i915_gem_object_put(h->obj);
+ i915_gem_object_unpin_map(h->obj);
+ i915_gem_object_put(h->obj);
- h->obj = obj;
- h->batch = vaddr;
- }
+ h->obj = obj;
+ h->batch = vaddr;
vma = i915_vma_instance(h->obj, vm, NULL);
if (IS_ERR(vma))
@@ -188,7 +184,7 @@ hang_create_request(struct hang *h, struct intel_engine_cs *engine)
goto cancel_rq;
batch = h->batch;
- if (INTEL_GEN(i915) >= 8) {
+ if (INTEL_GEN(gt->i915) >= 8) {
*batch++ = MI_STORE_DWORD_IMM_GEN4;
*batch++ = lower_32_bits(hws_address(hws, rq));
*batch++ = upper_32_bits(hws_address(hws, rq));
@@ -202,7 +198,7 @@ hang_create_request(struct hang *h, struct intel_engine_cs *engine)
*batch++ = MI_BATCH_BUFFER_START | 1 << 8 | 1;
*batch++ = lower_32_bits(vma->node.start);
*batch++ = upper_32_bits(vma->node.start);
- } else if (INTEL_GEN(i915) >= 6) {
+ } else if (INTEL_GEN(gt->i915) >= 6) {
*batch++ = MI_STORE_DWORD_IMM_GEN4;
*batch++ = 0;
*batch++ = lower_32_bits(hws_address(hws, rq));
@@ -215,7 +211,7 @@ hang_create_request(struct hang *h, struct intel_engine_cs *engine)
*batch++ = MI_ARB_CHECK;
*batch++ = MI_BATCH_BUFFER_START | 1 << 8;
*batch++ = lower_32_bits(vma->node.start);
- } else if (INTEL_GEN(i915) >= 4) {
+ } else if (INTEL_GEN(gt->i915) >= 4) {
*batch++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
*batch++ = 0;
*batch++ = lower_32_bits(hws_address(hws, rq));
@@ -242,7 +238,7 @@ hang_create_request(struct hang *h, struct intel_engine_cs *engine)
*batch++ = lower_32_bits(vma->node.start);
}
*batch++ = MI_BATCH_BUFFER_END; /* not reached */
- i915_gem_chipset_flush(h->i915);
+ intel_gt_chipset_flush(engine->gt);
if (rq->engine->emit_init_breadcrumb) {
err = rq->engine->emit_init_breadcrumb(rq);
@@ -251,7 +247,7 @@ hang_create_request(struct hang *h, struct intel_engine_cs *engine)
}
flags = 0;
- if (INTEL_GEN(vm->i915) <= 5)
+ if (INTEL_GEN(gt->i915) <= 5)
flags |= I915_DISPATCH_SECURE;
err = rq->engine->emit_bb_start(rq, vma->node.start, PAGE_SIZE, flags);
@@ -276,7 +272,7 @@ static u32 hws_seqno(const struct hang *h, const struct i915_request *rq)
static void hang_fini(struct hang *h)
{
*h->batch = MI_BATCH_BUFFER_END;
- i915_gem_chipset_flush(h->i915);
+ intel_gt_chipset_flush(h->gt);
i915_gem_object_unpin_map(h->obj);
i915_gem_object_put(h->obj);
@@ -286,7 +282,7 @@ static void hang_fini(struct hang *h)
kernel_context_close(h->ctx);
- igt_flush_test(h->i915, I915_WAIT_LOCKED);
+ igt_flush_test(h->gt->i915, I915_WAIT_LOCKED);
}
static bool wait_until_running(struct hang *h, struct i915_request *rq)
@@ -301,7 +297,7 @@ static bool wait_until_running(struct hang *h, struct i915_request *rq)
static int igt_hang_sanitycheck(void *arg)
{
- struct drm_i915_private *i915 = arg;
+ struct intel_gt *gt = arg;
struct i915_request *rq;
struct intel_engine_cs *engine;
enum intel_engine_id id;
@@ -310,13 +306,13 @@ static int igt_hang_sanitycheck(void *arg)
/* Basic check that we can execute our hanging batch */
- mutex_lock(&i915->drm.struct_mutex);
- err = hang_init(&h, i915);
+ mutex_lock(&gt->i915->drm.struct_mutex);
+ err = hang_init(&h, gt);
if (err)
goto unlock;
- for_each_engine(engine, i915, id) {
- struct igt_wedge_me w;
+ for_each_engine(engine, gt->i915, id) {
+ struct intel_wedge_me w;
long timeout;
if (!intel_engine_can_store_dword(engine))
@@ -333,15 +329,15 @@ static int igt_hang_sanitycheck(void *arg)
i915_request_get(rq);
*h.batch = MI_BATCH_BUFFER_END;
- i915_gem_chipset_flush(i915);
+ intel_gt_chipset_flush(engine->gt);
i915_request_add(rq);
timeout = 0;
- igt_wedge_on_timeout(&w, i915, HZ / 10 /* 100ms timeout*/)
+ intel_wedge_on_timeout(&w, gt, HZ / 10 /* 100ms */)
timeout = i915_request_wait(rq, 0,
MAX_SCHEDULE_TIMEOUT);
- if (i915_reset_failed(i915))
+ if (intel_gt_is_wedged(gt))
timeout = -EIO;
i915_request_put(rq);
@@ -357,7 +353,7 @@ static int igt_hang_sanitycheck(void *arg)
fini:
hang_fini(&h);
unlock:
- mutex_unlock(&i915->drm.struct_mutex);
+ mutex_unlock(&gt->i915->drm.struct_mutex);
return err;
}
@@ -368,37 +364,37 @@ static bool wait_for_idle(struct intel_engine_cs *engine)
static int igt_reset_nop(void *arg)
{
- struct drm_i915_private *i915 = arg;
+ struct intel_gt *gt = arg;
+ struct i915_gpu_error *global = &gt->i915->gpu_error;
struct intel_engine_cs *engine;
struct i915_gem_context *ctx;
unsigned int reset_count, count;
enum intel_engine_id id;
- intel_wakeref_t wakeref;
struct drm_file *file;
IGT_TIMEOUT(end_time);
int err = 0;
/* Check that we can reset during non-user portions of requests */
- file = mock_file(i915);
+ file = mock_file(gt->i915);
if (IS_ERR(file))
return PTR_ERR(file);
- mutex_lock(&i915->drm.struct_mutex);
- ctx = live_context(i915, file);
- mutex_unlock(&i915->drm.struct_mutex);
+ mutex_lock(&gt->i915->drm.struct_mutex);
+ ctx = live_context(gt->i915, file);
+ mutex_unlock(&gt->i915->drm.struct_mutex);
if (IS_ERR(ctx)) {
err = PTR_ERR(ctx);
goto out;
}
i915_gem_context_clear_bannable(ctx);
- wakeref = intel_runtime_pm_get(&i915->runtime_pm);
- reset_count = i915_reset_count(&i915->gpu_error);
+ reset_count = i915_reset_count(global);
count = 0;
do {
- mutex_lock(&i915->drm.struct_mutex);
- for_each_engine(engine, i915, id) {
+ mutex_lock(&gt->i915->drm.struct_mutex);
+
+ for_each_engine(engine, gt->i915, id) {
int i;
for (i = 0; i < 16; i++) {
@@ -413,82 +409,78 @@ static int igt_reset_nop(void *arg)
i915_request_add(rq);
}
}
- mutex_unlock(&i915->drm.struct_mutex);
- igt_global_reset_lock(i915);
- i915_reset(i915, ALL_ENGINES, NULL);
- igt_global_reset_unlock(i915);
- if (i915_reset_failed(i915)) {
+ igt_global_reset_lock(gt);
+ intel_gt_reset(gt, ALL_ENGINES, NULL);
+ igt_global_reset_unlock(gt);
+
+ mutex_unlock(&gt->i915->drm.struct_mutex);
+ if (intel_gt_is_wedged(gt)) {
err = -EIO;
break;
}
- if (i915_reset_count(&i915->gpu_error) !=
- reset_count + ++count) {
+ if (i915_reset_count(global) != reset_count + ++count) {
pr_err("Full GPU reset not recorded!\n");
err = -EINVAL;
break;
}
- err = igt_flush_test(i915, 0);
+ err = igt_flush_test(gt->i915, 0);
if (err)
break;
} while (time_before(jiffies, end_time));
pr_info("%s: %d resets\n", __func__, count);
- mutex_lock(&i915->drm.struct_mutex);
- err = igt_flush_test(i915, I915_WAIT_LOCKED);
- mutex_unlock(&i915->drm.struct_mutex);
-
- intel_runtime_pm_put(&i915->runtime_pm, wakeref);
+ mutex_lock(&gt->i915->drm.struct_mutex);
+ err = igt_flush_test(gt->i915, I915_WAIT_LOCKED);
+ mutex_unlock(&gt->i915->drm.struct_mutex);
out:
- mock_file_free(i915, file);
- if (i915_reset_failed(i915))
+ mock_file_free(gt->i915, file);
+ if (intel_gt_is_wedged(gt))
err = -EIO;
return err;
}
static int igt_reset_nop_engine(void *arg)
{
- struct drm_i915_private *i915 = arg;
+ struct intel_gt *gt = arg;
+ struct i915_gpu_error *global = &gt->i915->gpu_error;
struct intel_engine_cs *engine;
struct i915_gem_context *ctx;
enum intel_engine_id id;
- intel_wakeref_t wakeref;
struct drm_file *file;
int err = 0;
/* Check that we can engine-reset during non-user portions */
- if (!intel_has_reset_engine(i915))
+ if (!intel_has_reset_engine(gt->i915))
return 0;
- file = mock_file(i915);
+ file = mock_file(gt->i915);
if (IS_ERR(file))
return PTR_ERR(file);
- mutex_lock(&i915->drm.struct_mutex);
- ctx = live_context(i915, file);
- mutex_unlock(&i915->drm.struct_mutex);
+ mutex_lock(&gt->i915->drm.struct_mutex);
+ ctx = live_context(gt->i915, file);
+ mutex_unlock(&gt->i915->drm.struct_mutex);
if (IS_ERR(ctx)) {
err = PTR_ERR(ctx);
goto out;
}
i915_gem_context_clear_bannable(ctx);
- wakeref = intel_runtime_pm_get(&i915->runtime_pm);
- for_each_engine(engine, i915, id) {
+ for_each_engine(engine, gt->i915, id) {
unsigned int reset_count, reset_engine_count;
unsigned int count;
IGT_TIMEOUT(end_time);
- reset_count = i915_reset_count(&i915->gpu_error);
- reset_engine_count = i915_reset_engine_count(&i915->gpu_error,
- engine);
+ reset_count = i915_reset_count(global);
+ reset_engine_count = i915_reset_engine_count(global, engine);
count = 0;
- set_bit(I915_RESET_ENGINE + id, &i915->gpu_error.flags);
+ set_bit(I915_RESET_ENGINE + id, &gt->reset.flags);
do {
int i;
@@ -499,7 +491,7 @@ static int igt_reset_nop_engine(void *arg)
break;
}
- mutex_lock(&i915->drm.struct_mutex);
+ mutex_lock(&gt->i915->drm.struct_mutex);
for (i = 0; i < 16; i++) {
struct i915_request *rq;
@@ -511,21 +503,20 @@ static int igt_reset_nop_engine(void *arg)
i915_request_add(rq);
}
- mutex_unlock(&i915->drm.struct_mutex);
-
- err = i915_reset_engine(engine, NULL);
+ err = intel_engine_reset(engine, NULL);
+ mutex_unlock(&gt->i915->drm.struct_mutex);
if (err) {
pr_err("i915_reset_engine failed\n");
break;
}
- if (i915_reset_count(&i915->gpu_error) != reset_count) {
+ if (i915_reset_count(global) != reset_count) {
pr_err("Full GPU reset recorded! (engine reset expected)\n");
err = -EINVAL;
break;
}
- if (i915_reset_engine_count(&i915->gpu_error, engine) !=
+ if (i915_reset_engine_count(global, engine) !=
reset_engine_count + ++count) {
pr_err("%s engine reset not recorded!\n",
engine->name);
@@ -533,31 +524,31 @@ static int igt_reset_nop_engine(void *arg)
break;
}
} while (time_before(jiffies, end_time));
- clear_bit(I915_RESET_ENGINE + id, &i915->gpu_error.flags);
+ clear_bit(I915_RESET_ENGINE + id, &gt->reset.flags);
pr_info("%s(%s): %d resets\n", __func__, engine->name, count);
if (err)
break;
- err = igt_flush_test(i915, 0);
+ err = igt_flush_test(gt->i915, 0);
if (err)
break;
}
- mutex_lock(&i915->drm.struct_mutex);
- err = igt_flush_test(i915, I915_WAIT_LOCKED);
- mutex_unlock(&i915->drm.struct_mutex);
+ mutex_lock(&gt->i915->drm.struct_mutex);
+ err = igt_flush_test(gt->i915, I915_WAIT_LOCKED);
+ mutex_unlock(&gt->i915->drm.struct_mutex);
- intel_runtime_pm_put(&i915->runtime_pm, wakeref);
out:
- mock_file_free(i915, file);
- if (i915_reset_failed(i915))
+ mock_file_free(gt->i915, file);
+ if (intel_gt_is_wedged(gt))
err = -EIO;
return err;
}
-static int __igt_reset_engine(struct drm_i915_private *i915, bool active)
+static int __igt_reset_engine(struct intel_gt *gt, bool active)
{
+ struct i915_gpu_error *global = &gt->i915->gpu_error;
struct intel_engine_cs *engine;
enum intel_engine_id id;
struct hang h;
@@ -565,18 +556,18 @@ static int __igt_reset_engine(struct drm_i915_private *i915, bool active)
/* Check that we can issue an engine reset on an idle engine (no-op) */
- if (!intel_has_reset_engine(i915))
+ if (!intel_has_reset_engine(gt->i915))
return 0;
if (active) {
- mutex_lock(&i915->drm.struct_mutex);
- err = hang_init(&h, i915);
- mutex_unlock(&i915->drm.struct_mutex);
+ mutex_lock(&gt->i915->drm.struct_mutex);
+ err = hang_init(&h, gt);
+ mutex_unlock(&gt->i915->drm.struct_mutex);
if (err)
return err;
}
- for_each_engine(engine, i915, id) {
+ for_each_engine(engine, gt->i915, id) {
unsigned int reset_count, reset_engine_count;
IGT_TIMEOUT(end_time);
@@ -590,30 +581,29 @@ static int __igt_reset_engine(struct drm_i915_private *i915, bool active)
break;
}
- reset_count = i915_reset_count(&i915->gpu_error);
- reset_engine_count = i915_reset_engine_count(&i915->gpu_error,
- engine);
+ reset_count = i915_reset_count(global);
+ reset_engine_count = i915_reset_engine_count(global, engine);
intel_engine_pm_get(engine);
- set_bit(I915_RESET_ENGINE + id, &i915->gpu_error.flags);
+ set_bit(I915_RESET_ENGINE + id, &gt->reset.flags);
do {
if (active) {
struct i915_request *rq;
- mutex_lock(&i915->drm.struct_mutex);
+ mutex_lock(&gt->i915->drm.struct_mutex);
rq = hang_create_request(&h, engine);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
- mutex_unlock(&i915->drm.struct_mutex);
+ mutex_unlock(&gt->i915->drm.struct_mutex);
break;
}
i915_request_get(rq);
i915_request_add(rq);
- mutex_unlock(&i915->drm.struct_mutex);
+ mutex_unlock(&gt->i915->drm.struct_mutex);
if (!wait_until_running(&h, rq)) {
- struct drm_printer p = drm_info_printer(i915->drm.dev);
+ struct drm_printer p = drm_info_printer(gt->i915->drm.dev);
pr_err("%s: Failed to start request %llx, at %x\n",
__func__, rq->fence.seqno, hws_seqno(&h, rq));
@@ -628,19 +618,19 @@ static int __igt_reset_engine(struct drm_i915_private *i915, bool active)
i915_request_put(rq);
}
- err = i915_reset_engine(engine, NULL);
+ err = intel_engine_reset(engine, NULL);
if (err) {
pr_err("i915_reset_engine failed\n");
break;
}
- if (i915_reset_count(&i915->gpu_error) != reset_count) {
+ if (i915_reset_count(global) != reset_count) {
pr_err("Full GPU reset recorded! (engine reset expected)\n");
err = -EINVAL;
break;
}
- if (i915_reset_engine_count(&i915->gpu_error, engine) !=
+ if (i915_reset_engine_count(global, engine) !=
++reset_engine_count) {
pr_err("%s engine reset not recorded!\n",
engine->name);
@@ -648,24 +638,24 @@ static int __igt_reset_engine(struct drm_i915_private *i915, bool active)
break;
}
} while (time_before(jiffies, end_time));
- clear_bit(I915_RESET_ENGINE + id, &i915->gpu_error.flags);
+ clear_bit(I915_RESET_ENGINE + id, &gt->reset.flags);
intel_engine_pm_put(engine);
if (err)
break;
- err = igt_flush_test(i915, 0);
+ err = igt_flush_test(gt->i915, 0);
if (err)
break;
}
- if (i915_reset_failed(i915))
+ if (intel_gt_is_wedged(gt))
err = -EIO;
if (active) {
- mutex_lock(&i915->drm.struct_mutex);
+ mutex_lock(&gt->i915->drm.struct_mutex);
hang_fini(&h);
- mutex_unlock(&i915->drm.struct_mutex);
+ mutex_unlock(&gt->i915->drm.struct_mutex);
}
return err;
@@ -707,7 +697,7 @@ static int active_request_put(struct i915_request *rq)
rq->fence.seqno);
GEM_TRACE_DUMP();
- i915_gem_set_wedged(rq->i915);
+ intel_gt_set_wedged(rq->engine->gt);
err = -EIO;
}
@@ -784,10 +774,11 @@ err_file:
return err;
}
-static int __igt_reset_engines(struct drm_i915_private *i915,
+static int __igt_reset_engines(struct intel_gt *gt,
const char *test_name,
unsigned int flags)
{
+ struct i915_gpu_error *global = &gt->i915->gpu_error;
struct intel_engine_cs *engine, *other;
enum intel_engine_id id, tmp;
struct hang h;
@@ -797,13 +788,13 @@ static int __igt_reset_engines(struct drm_i915_private *i915,
* with any other engine.
*/
- if (!intel_has_reset_engine(i915))
+ if (!intel_has_reset_engine(gt->i915))
return 0;
if (flags & TEST_ACTIVE) {
- mutex_lock(&i915->drm.struct_mutex);
- err = hang_init(&h, i915);
- mutex_unlock(&i915->drm.struct_mutex);
+ mutex_lock(&gt->i915->drm.struct_mutex);
+ err = hang_init(&h, gt);
+ mutex_unlock(&gt->i915->drm.struct_mutex);
if (err)
return err;
@@ -811,9 +802,9 @@ static int __igt_reset_engines(struct drm_i915_private *i915,
h.ctx->sched.priority = 1024;
}
- for_each_engine(engine, i915, id) {
+ for_each_engine(engine, gt->i915, id) {
struct active_engine threads[I915_NUM_ENGINES] = {};
- unsigned long global = i915_reset_count(&i915->gpu_error);
+ unsigned long device = i915_reset_count(global);
unsigned long count = 0, reported;
IGT_TIMEOUT(end_time);
@@ -829,12 +820,11 @@ static int __igt_reset_engines(struct drm_i915_private *i915,
}
memset(threads, 0, sizeof(threads));
- for_each_engine(other, i915, tmp) {
+ for_each_engine(other, gt->i915, tmp) {
struct task_struct *tsk;
threads[tmp].resets =
- i915_reset_engine_count(&i915->gpu_error,
- other);
+ i915_reset_engine_count(global, other);
if (!(flags & TEST_OTHERS))
continue;
@@ -857,25 +847,25 @@ static int __igt_reset_engines(struct drm_i915_private *i915,
}
intel_engine_pm_get(engine);
- set_bit(I915_RESET_ENGINE + id, &i915->gpu_error.flags);
+ set_bit(I915_RESET_ENGINE + id, &gt->reset.flags);
do {
struct i915_request *rq = NULL;
if (flags & TEST_ACTIVE) {
- mutex_lock(&i915->drm.struct_mutex);
+ mutex_lock(&gt->i915->drm.struct_mutex);
rq = hang_create_request(&h, engine);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
- mutex_unlock(&i915->drm.struct_mutex);
+ mutex_unlock(&gt->i915->drm.struct_mutex);
break;
}
i915_request_get(rq);
i915_request_add(rq);
- mutex_unlock(&i915->drm.struct_mutex);
+ mutex_unlock(&gt->i915->drm.struct_mutex);
if (!wait_until_running(&h, rq)) {
- struct drm_printer p = drm_info_printer(i915->drm.dev);
+ struct drm_printer p = drm_info_printer(gt->i915->drm.dev);
pr_err("%s: Failed to start request %llx, at %x\n",
__func__, rq->fence.seqno, hws_seqno(&h, rq));
@@ -888,7 +878,7 @@ static int __igt_reset_engines(struct drm_i915_private *i915,
}
}
- err = i915_reset_engine(engine, NULL);
+ err = intel_engine_reset(engine, NULL);
if (err) {
pr_err("i915_reset_engine(%s:%s): failed, err=%d\n",
engine->name, test_name, err);
@@ -900,7 +890,7 @@ static int __igt_reset_engines(struct drm_i915_private *i915,
if (rq) {
if (i915_request_wait(rq, 0, HZ / 5) < 0) {
struct drm_printer p =
- drm_info_printer(i915->drm.dev);
+ drm_info_printer(gt->i915->drm.dev);
pr_err("i915_reset_engine(%s:%s):"
" failed to complete request after reset\n",
@@ -910,7 +900,7 @@ static int __igt_reset_engines(struct drm_i915_private *i915,
i915_request_put(rq);
GEM_TRACE_DUMP();
- i915_gem_set_wedged(i915);
+ intel_gt_set_wedged(gt);
err = -EIO;
break;
}
@@ -920,7 +910,7 @@ static int __igt_reset_engines(struct drm_i915_private *i915,
if (!(flags & TEST_SELF) && !wait_for_idle(engine)) {
struct drm_printer p =
- drm_info_printer(i915->drm.dev);
+ drm_info_printer(gt->i915->drm.dev);
pr_err("i915_reset_engine(%s:%s):"
" failed to idle after reset\n",
@@ -932,12 +922,12 @@ static int __igt_reset_engines(struct drm_i915_private *i915,
break;
}
} while (time_before(jiffies, end_time));
- clear_bit(I915_RESET_ENGINE + id, &i915->gpu_error.flags);
+ clear_bit(I915_RESET_ENGINE + id, &gt->reset.flags);
intel_engine_pm_put(engine);
pr_info("i915_reset_engine(%s:%s): %lu resets\n",
engine->name, test_name, count);
- reported = i915_reset_engine_count(&i915->gpu_error, engine);
+ reported = i915_reset_engine_count(global, engine);
reported -= threads[engine->id].resets;
if (reported != count) {
pr_err("i915_reset_engine(%s:%s): reset %lu times, but reported %lu\n",
@@ -947,7 +937,7 @@ static int __igt_reset_engines(struct drm_i915_private *i915,
}
unwind:
- for_each_engine(other, i915, tmp) {
+ for_each_engine(other, gt->i915, tmp) {
int ret;
if (!threads[tmp].task)
@@ -962,22 +952,21 @@ unwind:
}
put_task_struct(threads[tmp].task);
- if (other != engine &&
+ if (other->uabi_class != engine->uabi_class &&
threads[tmp].resets !=
- i915_reset_engine_count(&i915->gpu_error, other)) {
+ i915_reset_engine_count(global, other)) {
pr_err("Innocent engine %s was reset (count=%ld)\n",
other->name,
- i915_reset_engine_count(&i915->gpu_error,
- other) -
+ i915_reset_engine_count(global, other) -
threads[tmp].resets);
if (!err)
err = -EINVAL;
}
}
- if (global != i915_reset_count(&i915->gpu_error)) {
+ if (device != i915_reset_count(global)) {
pr_err("Global reset (count=%ld)!\n",
- i915_reset_count(&i915->gpu_error) - global);
+ i915_reset_count(global) - device);
if (!err)
err = -EINVAL;
}
@@ -985,20 +974,20 @@ unwind:
if (err)
break;
- mutex_lock(&i915->drm.struct_mutex);
- err = igt_flush_test(i915, I915_WAIT_LOCKED);
- mutex_unlock(&i915->drm.struct_mutex);
+ mutex_lock(&gt->i915->drm.struct_mutex);
+ err = igt_flush_test(gt->i915, I915_WAIT_LOCKED);
+ mutex_unlock(&gt->i915->drm.struct_mutex);
if (err)
break;
}
- if (i915_reset_failed(i915))
+ if (intel_gt_is_wedged(gt))
err = -EIO;
if (flags & TEST_ACTIVE) {
- mutex_lock(&i915->drm.struct_mutex);
+ mutex_lock(&gt->i915->drm.struct_mutex);
hang_fini(&h);
- mutex_unlock(&i915->drm.struct_mutex);
+ mutex_unlock(&gt->i915->drm.struct_mutex);
}
return err;
@@ -1024,13 +1013,13 @@ static int igt_reset_engines(void *arg)
},
{ }
};
- struct drm_i915_private *i915 = arg;
+ struct intel_gt *gt = arg;
typeof(*phases) *p;
int err;
for (p = phases; p->name; p++) {
if (p->flags & TEST_PRIORITY) {
- if (!(i915->caps.scheduler & I915_SCHEDULER_CAP_PRIORITY))
+ if (!(gt->i915->caps.scheduler & I915_SCHEDULER_CAP_PRIORITY))
continue;
}
@@ -1042,38 +1031,39 @@ static int igt_reset_engines(void *arg)
return 0;
}
-static u32 fake_hangcheck(struct drm_i915_private *i915,
- intel_engine_mask_t mask)
+static u32 fake_hangcheck(struct intel_gt *gt, intel_engine_mask_t mask)
{
- u32 count = i915_reset_count(&i915->gpu_error);
+ u32 count = i915_reset_count(&gt->i915->gpu_error);
- i915_reset(i915, mask, NULL);
+ intel_gt_reset(gt, mask, NULL);
return count;
}
static int igt_reset_wait(void *arg)
{
- struct drm_i915_private *i915 = arg;
+ struct intel_gt *gt = arg;
+ struct i915_gpu_error *global = &gt->i915->gpu_error;
+ struct intel_engine_cs *engine = gt->i915->engine[RCS0];
struct i915_request *rq;
unsigned int reset_count;
struct hang h;
long timeout;
int err;
- if (!intel_engine_can_store_dword(i915->engine[RCS0]))
+ if (!engine || !intel_engine_can_store_dword(engine))
return 0;
/* Check that we detect a stuck waiter and issue a reset */
- igt_global_reset_lock(i915);
+ igt_global_reset_lock(gt);
- mutex_lock(&i915->drm.struct_mutex);
- err = hang_init(&h, i915);
+ mutex_lock(&gt->i915->drm.struct_mutex);
+ err = hang_init(&h, gt);
if (err)
goto unlock;
- rq = hang_create_request(&h, i915->engine[RCS0]);
+ rq = hang_create_request(&h, engine);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
goto fini;
@@ -1083,19 +1073,19 @@ static int igt_reset_wait(void *arg)
i915_request_add(rq);
if (!wait_until_running(&h, rq)) {
- struct drm_printer p = drm_info_printer(i915->drm.dev);
+ struct drm_printer p = drm_info_printer(gt->i915->drm.dev);
pr_err("%s: Failed to start request %llx, at %x\n",
__func__, rq->fence.seqno, hws_seqno(&h, rq));
intel_engine_dump(rq->engine, &p, "%s\n", rq->engine->name);
- i915_gem_set_wedged(i915);
+ intel_gt_set_wedged(gt);
err = -EIO;
goto out_rq;
}
- reset_count = fake_hangcheck(i915, ALL_ENGINES);
+ reset_count = fake_hangcheck(gt, ALL_ENGINES);
timeout = i915_request_wait(rq, 0, 10);
if (timeout < 0) {
@@ -1105,7 +1095,7 @@ static int igt_reset_wait(void *arg)
goto out_rq;
}
- if (i915_reset_count(&i915->gpu_error) == reset_count) {
+ if (i915_reset_count(global) == reset_count) {
pr_err("No GPU reset recorded!\n");
err = -EINVAL;
goto out_rq;
@@ -1116,10 +1106,10 @@ out_rq:
fini:
hang_fini(&h);
unlock:
- mutex_unlock(&i915->drm.struct_mutex);
- igt_global_reset_unlock(i915);
+ mutex_unlock(&gt->i915->drm.struct_mutex);
+ igt_global_reset_unlock(gt);
- if (i915_reset_failed(i915))
+ if (intel_gt_is_wedged(gt))
return -EIO;
return err;
@@ -1178,11 +1168,12 @@ out_unlock:
return err;
}
-static int __igt_reset_evict_vma(struct drm_i915_private *i915,
+static int __igt_reset_evict_vma(struct intel_gt *gt,
struct i915_address_space *vm,
int (*fn)(void *),
unsigned int flags)
{
+ struct intel_engine_cs *engine = gt->i915->engine[RCS0];
struct drm_i915_gem_object *obj;
struct task_struct *tsk = NULL;
struct i915_request *rq;
@@ -1190,17 +1181,17 @@ static int __igt_reset_evict_vma(struct drm_i915_private *i915,
struct hang h;
int err;
- if (!intel_engine_can_store_dword(i915->engine[RCS0]))
+ if (!engine || !intel_engine_can_store_dword(engine))
return 0;
/* Check that we can recover an unbind stuck on a hanging request */
- mutex_lock(&i915->drm.struct_mutex);
- err = hang_init(&h, i915);
+ mutex_lock(&gt->i915->drm.struct_mutex);
+ err = hang_init(&h, gt);
if (err)
goto unlock;
- obj = i915_gem_object_create_internal(i915, SZ_1M);
+ obj = i915_gem_object_create_internal(gt->i915, SZ_1M);
if (IS_ERR(obj)) {
err = PTR_ERR(obj);
goto fini;
@@ -1220,7 +1211,7 @@ static int __igt_reset_evict_vma(struct drm_i915_private *i915,
goto out_obj;
}
- rq = hang_create_request(&h, i915->engine[RCS0]);
+ rq = hang_create_request(&h, engine);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
goto out_obj;
@@ -1258,16 +1249,16 @@ static int __igt_reset_evict_vma(struct drm_i915_private *i915,
if (err)
goto out_rq;
- mutex_unlock(&i915->drm.struct_mutex);
+ mutex_unlock(&gt->i915->drm.struct_mutex);
if (!wait_until_running(&h, rq)) {
- struct drm_printer p = drm_info_printer(i915->drm.dev);
+ struct drm_printer p = drm_info_printer(gt->i915->drm.dev);
pr_err("%s: Failed to start request %llx, at %x\n",
__func__, rq->fence.seqno, hws_seqno(&h, rq));
intel_engine_dump(rq->engine, &p, "%s\n", rq->engine->name);
- i915_gem_set_wedged(i915);
+ intel_gt_set_wedged(gt);
goto out_reset;
}
@@ -1284,31 +1275,31 @@ static int __igt_reset_evict_vma(struct drm_i915_private *i915,
wait_for_completion(&arg.completion);
if (wait_for(!list_empty(&rq->fence.cb_list), 10)) {
- struct drm_printer p = drm_info_printer(i915->drm.dev);
+ struct drm_printer p = drm_info_printer(gt->i915->drm.dev);
pr_err("igt/evict_vma kthread did not wait\n");
intel_engine_dump(rq->engine, &p, "%s\n", rq->engine->name);
- i915_gem_set_wedged(i915);
+ intel_gt_set_wedged(gt);
goto out_reset;
}
out_reset:
- igt_global_reset_lock(i915);
- fake_hangcheck(rq->i915, rq->engine->mask);
- igt_global_reset_unlock(i915);
+ igt_global_reset_lock(gt);
+ fake_hangcheck(gt, rq->engine->mask);
+ igt_global_reset_unlock(gt);
if (tsk) {
- struct igt_wedge_me w;
+ struct intel_wedge_me w;
/* The reset, even indirectly, should take less than 10ms. */
- igt_wedge_on_timeout(&w, i915, HZ / 10 /* 100ms timeout*/)
+ intel_wedge_on_timeout(&w, gt, HZ / 10 /* 100ms */)
err = kthread_stop(tsk);
put_task_struct(tsk);
}
- mutex_lock(&i915->drm.struct_mutex);
+ mutex_lock(&gt->i915->drm.struct_mutex);
out_rq:
i915_request_put(rq);
out_obj:
@@ -1316,9 +1307,9 @@ out_obj:
fini:
hang_fini(&h);
unlock:
- mutex_unlock(&i915->drm.struct_mutex);
+ mutex_unlock(&gt->i915->drm.struct_mutex);
- if (i915_reset_failed(i915))
+ if (intel_gt_is_wedged(gt))
return -EIO;
return err;
@@ -1326,26 +1317,26 @@ unlock:
static int igt_reset_evict_ggtt(void *arg)
{
- struct drm_i915_private *i915 = arg;
+ struct intel_gt *gt = arg;
- return __igt_reset_evict_vma(i915, &i915->ggtt.vm,
+ return __igt_reset_evict_vma(gt, &gt->ggtt->vm,
evict_vma, EXEC_OBJECT_WRITE);
}
static int igt_reset_evict_ppgtt(void *arg)
{
- struct drm_i915_private *i915 = arg;
+ struct intel_gt *gt = arg;
struct i915_gem_context *ctx;
struct drm_file *file;
int err;
- file = mock_file(i915);
+ file = mock_file(gt->i915);
if (IS_ERR(file))
return PTR_ERR(file);
- mutex_lock(&i915->drm.struct_mutex);
- ctx = live_context(i915, file);
- mutex_unlock(&i915->drm.struct_mutex);
+ mutex_lock(&gt->i915->drm.struct_mutex);
+ ctx = live_context(gt->i915, file);
+ mutex_unlock(&gt->i915->drm.struct_mutex);
if (IS_ERR(ctx)) {
err = PTR_ERR(ctx);
goto out;
@@ -1353,29 +1344,29 @@ static int igt_reset_evict_ppgtt(void *arg)
err = 0;
if (ctx->vm) /* aliasing == global gtt locking, covered above */
- err = __igt_reset_evict_vma(i915, ctx->vm,
+ err = __igt_reset_evict_vma(gt, ctx->vm,
evict_vma, EXEC_OBJECT_WRITE);
out:
- mock_file_free(i915, file);
+ mock_file_free(gt->i915, file);
return err;
}
static int igt_reset_evict_fence(void *arg)
{
- struct drm_i915_private *i915 = arg;
+ struct intel_gt *gt = arg;
- return __igt_reset_evict_vma(i915, &i915->ggtt.vm,
+ return __igt_reset_evict_vma(gt, &gt->ggtt->vm,
evict_fence, EXEC_OBJECT_NEEDS_FENCE);
}
-static int wait_for_others(struct drm_i915_private *i915,
+static int wait_for_others(struct intel_gt *gt,
struct intel_engine_cs *exclude)
{
struct intel_engine_cs *engine;
enum intel_engine_id id;
- for_each_engine(engine, i915, id) {
+ for_each_engine(engine, gt->i915, id) {
if (engine == exclude)
continue;
@@ -1388,7 +1379,8 @@ static int wait_for_others(struct drm_i915_private *i915,
static int igt_reset_queue(void *arg)
{
- struct drm_i915_private *i915 = arg;
+ struct intel_gt *gt = arg;
+ struct i915_gpu_error *global = &gt->i915->gpu_error;
struct intel_engine_cs *engine;
enum intel_engine_id id;
struct hang h;
@@ -1396,14 +1388,14 @@ static int igt_reset_queue(void *arg)
/* Check that we replay pending requests following a hang */
- igt_global_reset_lock(i915);
+ igt_global_reset_lock(gt);
- mutex_lock(&i915->drm.struct_mutex);
- err = hang_init(&h, i915);
+ mutex_lock(&gt->i915->drm.struct_mutex);
+ err = hang_init(&h, gt);
if (err)
goto unlock;
- for_each_engine(engine, i915, id) {
+ for_each_engine(engine, gt->i915, id) {
struct i915_request *prev;
IGT_TIMEOUT(end_time);
unsigned int count;
@@ -1444,7 +1436,7 @@ static int igt_reset_queue(void *arg)
* (hangcheck), or we focus on resetting just one
* engine and so avoid repeatedly resetting innocents.
*/
- err = wait_for_others(i915, engine);
+ err = wait_for_others(gt, engine);
if (err) {
pr_err("%s(%s): Failed to idle other inactive engines after device reset\n",
__func__, engine->name);
@@ -1452,12 +1444,12 @@ static int igt_reset_queue(void *arg)
i915_request_put(prev);
GEM_TRACE_DUMP();
- i915_gem_set_wedged(i915);
+ intel_gt_set_wedged(gt);
goto fini;
}
if (!wait_until_running(&h, prev)) {
- struct drm_printer p = drm_info_printer(i915->drm.dev);
+ struct drm_printer p = drm_info_printer(gt->i915->drm.dev);
pr_err("%s(%s): Failed to start request %llx, at %x\n",
__func__, engine->name,
@@ -1468,13 +1460,13 @@ static int igt_reset_queue(void *arg)
i915_request_put(rq);
i915_request_put(prev);
- i915_gem_set_wedged(i915);
+ intel_gt_set_wedged(gt);
err = -EIO;
goto fini;
}
- reset_count = fake_hangcheck(i915, BIT(id));
+ reset_count = fake_hangcheck(gt, BIT(id));
if (prev->fence.error != -EIO) {
pr_err("GPU reset not recorded on hanging request [fence.error=%d]!\n",
@@ -1494,7 +1486,7 @@ static int igt_reset_queue(void *arg)
goto fini;
}
- if (i915_reset_count(&i915->gpu_error) == reset_count) {
+ if (i915_reset_count(global) == reset_count) {
pr_err("No GPU reset recorded!\n");
i915_request_put(rq);
i915_request_put(prev);
@@ -1509,11 +1501,11 @@ static int igt_reset_queue(void *arg)
pr_info("%s: Completed %d resets\n", engine->name, count);
*h.batch = MI_BATCH_BUFFER_END;
- i915_gem_chipset_flush(i915);
+ intel_gt_chipset_flush(engine->gt);
i915_request_put(prev);
- err = igt_flush_test(i915, I915_WAIT_LOCKED);
+ err = igt_flush_test(gt->i915, I915_WAIT_LOCKED);
if (err)
break;
}
@@ -1521,10 +1513,10 @@ static int igt_reset_queue(void *arg)
fini:
hang_fini(&h);
unlock:
- mutex_unlock(&i915->drm.struct_mutex);
- igt_global_reset_unlock(i915);
+ mutex_unlock(&gt->i915->drm.struct_mutex);
+ igt_global_reset_unlock(gt);
- if (i915_reset_failed(i915))
+ if (intel_gt_is_wedged(gt))
return -EIO;
return err;
@@ -1532,8 +1524,9 @@ unlock:
static int igt_handle_error(void *arg)
{
- struct drm_i915_private *i915 = arg;
- struct intel_engine_cs *engine = i915->engine[RCS0];
+ struct intel_gt *gt = arg;
+ struct i915_gpu_error *global = &gt->i915->gpu_error;
+ struct intel_engine_cs *engine = gt->i915->engine[RCS0];
struct hang h;
struct i915_request *rq;
struct i915_gpu_state *error;
@@ -1541,15 +1534,15 @@ static int igt_handle_error(void *arg)
/* Check that we can issue a global GPU and engine reset */
- if (!intel_has_reset_engine(i915))
+ if (!intel_has_reset_engine(gt->i915))
return 0;
if (!engine || !intel_engine_can_store_dword(engine))
return 0;
- mutex_lock(&i915->drm.struct_mutex);
+ mutex_lock(&gt->i915->drm.struct_mutex);
- err = hang_init(&h, i915);
+ err = hang_init(&h, gt);
if (err)
goto err_unlock;
@@ -1563,28 +1556,28 @@ static int igt_handle_error(void *arg)
i915_request_add(rq);
if (!wait_until_running(&h, rq)) {
- struct drm_printer p = drm_info_printer(i915->drm.dev);
+ struct drm_printer p = drm_info_printer(gt->i915->drm.dev);
pr_err("%s: Failed to start request %llx, at %x\n",
__func__, rq->fence.seqno, hws_seqno(&h, rq));
intel_engine_dump(rq->engine, &p, "%s\n", rq->engine->name);
- i915_gem_set_wedged(i915);
+ intel_gt_set_wedged(gt);
err = -EIO;
goto err_request;
}
- mutex_unlock(&i915->drm.struct_mutex);
+ mutex_unlock(&gt->i915->drm.struct_mutex);
/* Temporarily disable error capture */
- error = xchg(&i915->gpu_error.first_error, (void *)-1);
+ error = xchg(&global->first_error, (void *)-1);
- i915_handle_error(i915, engine->mask, 0, NULL);
+ intel_gt_handle_error(gt, engine->mask, 0, NULL);
- xchg(&i915->gpu_error.first_error, error);
+ xchg(&global->first_error, error);
- mutex_lock(&i915->drm.struct_mutex);
+ mutex_lock(&gt->i915->drm.struct_mutex);
if (rq->fence.error != -EIO) {
pr_err("Guilty request not identified!\n");
@@ -1597,7 +1590,7 @@ err_request:
err_fini:
hang_fini(&h);
err_unlock:
- mutex_unlock(&i915->drm.struct_mutex);
+ mutex_unlock(&gt->i915->drm.struct_mutex);
return err;
}
@@ -1614,7 +1607,7 @@ static int __igt_atomic_reset_engine(struct intel_engine_cs *engine,
tasklet_disable_nosync(t);
p->critical_section_begin();
- err = i915_reset_engine(engine, NULL);
+ err = intel_engine_reset(engine, NULL);
p->critical_section_end();
tasklet_enable(t);
@@ -1629,7 +1622,6 @@ static int __igt_atomic_reset_engine(struct intel_engine_cs *engine,
static int igt_atomic_reset_engine(struct intel_engine_cs *engine,
const struct igt_atomic_section *p)
{
- struct drm_i915_private *i915 = engine->i915;
struct i915_request *rq;
struct hang h;
int err;
@@ -1638,7 +1630,7 @@ static int igt_atomic_reset_engine(struct intel_engine_cs *engine,
if (err)
return err;
- err = hang_init(&h, i915);
+ err = hang_init(&h, engine->gt);
if (err)
return err;
@@ -1657,16 +1649,16 @@ static int igt_atomic_reset_engine(struct intel_engine_cs *engine,
pr_err("%s(%s): Failed to start request %llx, at %x\n",
__func__, engine->name,
rq->fence.seqno, hws_seqno(&h, rq));
- i915_gem_set_wedged(i915);
+ intel_gt_set_wedged(engine->gt);
err = -EIO;
}
if (err == 0) {
- struct igt_wedge_me w;
+ struct intel_wedge_me w;
- igt_wedge_on_timeout(&w, i915, HZ / 20 /* 50ms timeout*/)
+ intel_wedge_on_timeout(&w, engine->gt, HZ / 20 /* 50ms */)
i915_request_wait(rq, 0, MAX_SCHEDULE_TIMEOUT);
- if (i915_reset_failed(i915))
+ if (intel_gt_is_wedged(engine->gt))
err = -EIO;
}
@@ -1678,30 +1670,30 @@ out:
static int igt_reset_engines_atomic(void *arg)
{
- struct drm_i915_private *i915 = arg;
+ struct intel_gt *gt = arg;
const typeof(*igt_atomic_phases) *p;
int err = 0;
/* Check that the engines resets are usable from atomic context */
- if (!intel_has_reset_engine(i915))
+ if (!intel_has_reset_engine(gt->i915))
return 0;
- if (USES_GUC_SUBMISSION(i915))
+ if (USES_GUC_SUBMISSION(gt->i915))
return 0;
- igt_global_reset_lock(i915);
- mutex_lock(&i915->drm.struct_mutex);
+ igt_global_reset_lock(gt);
+ mutex_lock(&gt->i915->drm.struct_mutex);
/* Flush any requests before we get started and check basics */
- if (!igt_force_reset(i915))
+ if (!igt_force_reset(gt))
goto unlock;
for (p = igt_atomic_phases; p->name; p++) {
struct intel_engine_cs *engine;
enum intel_engine_id id;
- for_each_engine(engine, i915, id) {
+ for_each_engine(engine, gt->i915, id) {
err = igt_atomic_reset_engine(engine, p);
if (err)
goto out;
@@ -1710,11 +1702,11 @@ static int igt_reset_engines_atomic(void *arg)
out:
/* As we poke around the guts, do a full reset before continuing. */
- igt_force_reset(i915);
+ igt_force_reset(gt);
unlock:
- mutex_unlock(&i915->drm.struct_mutex);
- igt_global_reset_unlock(i915);
+ mutex_unlock(&gt->i915->drm.struct_mutex);
+ igt_global_reset_unlock(gt);
return err;
}
@@ -1736,28 +1728,29 @@ int intel_hangcheck_live_selftests(struct drm_i915_private *i915)
SUBTEST(igt_reset_evict_fence),
SUBTEST(igt_handle_error),
};
+ struct intel_gt *gt = &i915->gt;
intel_wakeref_t wakeref;
bool saved_hangcheck;
int err;
- if (!intel_has_gpu_reset(i915))
+ if (!intel_has_gpu_reset(gt->i915))
return 0;
- if (i915_terminally_wedged(i915))
+ if (intel_gt_is_wedged(gt))
return -EIO; /* we're long past hope of a successful reset */
- wakeref = intel_runtime_pm_get(&i915->runtime_pm);
+ wakeref = intel_runtime_pm_get(&gt->i915->runtime_pm);
saved_hangcheck = fetch_and_zero(&i915_modparams.enable_hangcheck);
- drain_delayed_work(&i915->gpu_error.hangcheck_work); /* flush param */
+ drain_delayed_work(&gt->hangcheck.work); /* flush param */
- err = i915_subtests(tests, i915);
+ err = intel_gt_live_subtests(tests, gt);
- mutex_lock(&i915->drm.struct_mutex);
- igt_flush_test(i915, I915_WAIT_LOCKED);
- mutex_unlock(&i915->drm.struct_mutex);
+ mutex_lock(&gt->i915->drm.struct_mutex);
+ igt_flush_test(gt->i915, I915_WAIT_LOCKED);
+ mutex_unlock(&gt->i915->drm.struct_mutex);
i915_modparams.enable_hangcheck = saved_hangcheck;
- intel_runtime_pm_put(&i915->runtime_pm, wakeref);
+ intel_runtime_pm_put(&gt->i915->runtime_pm, wakeref);
return err;
}
diff --git a/drivers/gpu/drm/i915/gt/selftest_lrc.c b/drivers/gpu/drm/i915/gt/selftest_lrc.c
index 401e8b539297..60f27e52d267 100644
--- a/drivers/gpu/drm/i915/gt/selftest_lrc.c
+++ b/drivers/gpu/drm/i915/gt/selftest_lrc.c
@@ -55,7 +55,7 @@ static int live_sanitycheck(void *arg)
if (!igt_wait_for_spinner(&spin, rq)) {
GEM_TRACE("spinner failed to start\n");
GEM_TRACE_DUMP();
- i915_gem_set_wedged(i915);
+ intel_gt_set_wedged(&i915->gt);
err = -EIO;
goto err_ctx;
}
@@ -73,12 +73,231 @@ err_ctx:
err_spin:
igt_spinner_fini(&spin);
err_unlock:
- igt_flush_test(i915, I915_WAIT_LOCKED);
intel_runtime_pm_put(&i915->runtime_pm, wakeref);
mutex_unlock(&i915->drm.struct_mutex);
return err;
}
+static int
+emit_semaphore_chain(struct i915_request *rq, struct i915_vma *vma, int idx)
+{
+ u32 *cs;
+
+ cs = intel_ring_begin(rq, 10);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
+
+ *cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
+
+ *cs++ = MI_SEMAPHORE_WAIT |
+ MI_SEMAPHORE_GLOBAL_GTT |
+ MI_SEMAPHORE_POLL |
+ MI_SEMAPHORE_SAD_NEQ_SDD;
+ *cs++ = 0;
+ *cs++ = i915_ggtt_offset(vma) + 4 * idx;
+ *cs++ = 0;
+
+ if (idx > 0) {
+ *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
+ *cs++ = i915_ggtt_offset(vma) + 4 * (idx - 1);
+ *cs++ = 0;
+ *cs++ = 1;
+ } else {
+ *cs++ = MI_NOOP;
+ *cs++ = MI_NOOP;
+ *cs++ = MI_NOOP;
+ *cs++ = MI_NOOP;
+ }
+
+ *cs++ = MI_ARB_ON_OFF | MI_ARB_DISABLE;
+
+ intel_ring_advance(rq, cs);
+ return 0;
+}
+
+static struct i915_request *
+semaphore_queue(struct intel_engine_cs *engine, struct i915_vma *vma, int idx)
+{
+ struct i915_gem_context *ctx;
+ struct i915_request *rq;
+ int err;
+
+ ctx = kernel_context(engine->i915);
+ if (!ctx)
+ return ERR_PTR(-ENOMEM);
+
+ rq = igt_request_alloc(ctx, engine);
+ if (IS_ERR(rq))
+ goto out_ctx;
+
+ err = emit_semaphore_chain(rq, vma, idx);
+ i915_request_add(rq);
+ if (err)
+ rq = ERR_PTR(err);
+
+out_ctx:
+ kernel_context_close(ctx);
+ return rq;
+}
+
+static int
+release_queue(struct intel_engine_cs *engine,
+ struct i915_vma *vma,
+ int idx)
+{
+ struct i915_sched_attr attr = {
+ .priority = I915_USER_PRIORITY(I915_PRIORITY_MAX),
+ };
+ struct i915_request *rq;
+ u32 *cs;
+
+ rq = i915_request_create(engine->kernel_context);
+ if (IS_ERR(rq))
+ return PTR_ERR(rq);
+
+ cs = intel_ring_begin(rq, 4);
+ if (IS_ERR(cs)) {
+ i915_request_add(rq);
+ return PTR_ERR(cs);
+ }
+
+ *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
+ *cs++ = i915_ggtt_offset(vma) + 4 * (idx - 1);
+ *cs++ = 0;
+ *cs++ = 1;
+
+ intel_ring_advance(rq, cs);
+ i915_request_add(rq);
+
+ engine->schedule(rq, &attr);
+
+ return 0;
+}
+
+static int
+slice_semaphore_queue(struct intel_engine_cs *outer,
+ struct i915_vma *vma,
+ int count)
+{
+ struct intel_engine_cs *engine;
+ struct i915_request *head;
+ enum intel_engine_id id;
+ int err, i, n = 0;
+
+ head = semaphore_queue(outer, vma, n++);
+ if (IS_ERR(head))
+ return PTR_ERR(head);
+
+ i915_request_get(head);
+ for_each_engine(engine, outer->i915, id) {
+ for (i = 0; i < count; i++) {
+ struct i915_request *rq;
+
+ rq = semaphore_queue(engine, vma, n++);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto out;
+ }
+ }
+ }
+
+ err = release_queue(outer, vma, n);
+ if (err)
+ goto out;
+
+ if (i915_request_wait(head,
+ I915_WAIT_LOCKED,
+ 2 * RUNTIME_INFO(outer->i915)->num_engines * (count + 2) * (count + 3)) < 0) {
+ pr_err("Failed to slice along semaphore chain of length (%d, %d)!\n",
+ count, n);
+ GEM_TRACE_DUMP();
+ intel_gt_set_wedged(outer->gt);
+ err = -EIO;
+ }
+
+out:
+ i915_request_put(head);
+ return err;
+}
+
+static int live_timeslice_preempt(void *arg)
+{
+ struct drm_i915_private *i915 = arg;
+ struct drm_i915_gem_object *obj;
+ intel_wakeref_t wakeref;
+ struct i915_vma *vma;
+ void *vaddr;
+ int err = 0;
+ int count;
+
+ /*
+ * If a request takes too long, we would like to give other users
+ * a fair go on the GPU. In particular, users may create batches
+ * that wait upon external input, where that input may even be
+ * supplied by another GPU job. To avoid blocking forever, we
+ * need to preempt the current task and replace it with another
+ * ready task.
+ */
+
+ mutex_lock(&i915->drm.struct_mutex);
+ wakeref = intel_runtime_pm_get(&i915->runtime_pm);
+
+ obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
+ if (IS_ERR(obj)) {
+ err = PTR_ERR(obj);
+ goto err_unlock;
+ }
+
+ vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
+ if (IS_ERR(vma)) {
+ err = PTR_ERR(vma);
+ goto err_obj;
+ }
+
+ vaddr = i915_gem_object_pin_map(obj, I915_MAP_WC);
+ if (IS_ERR(vaddr)) {
+ err = PTR_ERR(vaddr);
+ goto err_obj;
+ }
+
+ err = i915_vma_pin(vma, 0, 0, PIN_GLOBAL);
+ if (err)
+ goto err_map;
+
+ for_each_prime_number_from(count, 1, 16) {
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+
+ for_each_engine(engine, i915, id) {
+ if (!intel_engine_has_preemption(engine))
+ continue;
+
+ memset(vaddr, 0, PAGE_SIZE);
+
+ err = slice_semaphore_queue(engine, vma, count);
+ if (err)
+ goto err_pin;
+
+ if (igt_flush_test(i915, I915_WAIT_LOCKED)) {
+ err = -EIO;
+ goto err_pin;
+ }
+ }
+ }
+
+err_pin:
+ i915_vma_unpin(vma);
+err_map:
+ i915_gem_object_unpin_map(obj);
+err_obj:
+ i915_gem_object_put(obj);
+err_unlock:
+ intel_runtime_pm_put(&i915->runtime_pm, wakeref);
+ mutex_unlock(&i915->drm.struct_mutex);
+
+ return err;
+}
+
static int live_busywait_preempt(void *arg)
{
struct drm_i915_private *i915 = arg;
@@ -138,6 +357,9 @@ static int live_busywait_preempt(void *arg)
struct igt_live_test t;
u32 *cs;
+ if (!intel_engine_has_preemption(engine))
+ continue;
+
if (!intel_engine_can_store_dword(engine))
continue;
@@ -229,7 +451,7 @@ static int live_busywait_preempt(void *arg)
intel_engine_dump(engine, &p, "%s\n", engine->name);
GEM_TRACE_DUMP();
- i915_gem_set_wedged(i915);
+ intel_gt_set_wedged(&i915->gt);
err = -EIO;
goto err_vma;
}
@@ -253,8 +475,6 @@ err_ctx_lo:
err_ctx_hi:
kernel_context_close(ctx_hi);
err_unlock:
- if (igt_flush_test(i915, I915_WAIT_LOCKED))
- err = -EIO;
intel_runtime_pm_put(&i915->runtime_pm, wakeref);
mutex_unlock(&i915->drm.struct_mutex);
return err;
@@ -320,7 +540,7 @@ static int live_preempt(void *arg)
if (!igt_wait_for_spinner(&spin_lo, rq)) {
GEM_TRACE("lo spinner failed to start\n");
GEM_TRACE_DUMP();
- i915_gem_set_wedged(i915);
+ intel_gt_set_wedged(&i915->gt);
err = -EIO;
goto err_ctx_lo;
}
@@ -337,7 +557,7 @@ static int live_preempt(void *arg)
if (!igt_wait_for_spinner(&spin_hi, rq)) {
GEM_TRACE("hi spinner failed to start\n");
GEM_TRACE_DUMP();
- i915_gem_set_wedged(i915);
+ intel_gt_set_wedged(&i915->gt);
err = -EIO;
goto err_ctx_lo;
}
@@ -361,7 +581,6 @@ err_spin_lo:
err_spin_hi:
igt_spinner_fini(&spin_hi);
err_unlock:
- igt_flush_test(i915, I915_WAIT_LOCKED);
intel_runtime_pm_put(&i915->runtime_pm, wakeref);
mutex_unlock(&i915->drm.struct_mutex);
return err;
@@ -398,6 +617,9 @@ static int live_late_preempt(void *arg)
if (!ctx_lo)
goto err_ctx_hi;
+ /* Make sure ctx_lo stays before ctx_hi until we trigger preemption. */
+ ctx_lo->sched.priority = I915_USER_PRIORITY(1);
+
for_each_engine(engine, i915, id) {
struct igt_live_test t;
struct i915_request *rq;
@@ -465,7 +687,6 @@ err_spin_lo:
err_spin_hi:
igt_spinner_fini(&spin_hi);
err_unlock:
- igt_flush_test(i915, I915_WAIT_LOCKED);
intel_runtime_pm_put(&i915->runtime_pm, wakeref);
mutex_unlock(&i915->drm.struct_mutex);
return err;
@@ -473,7 +694,7 @@ err_unlock:
err_wedged:
igt_spinner_end(&spin_hi);
igt_spinner_end(&spin_lo);
- i915_gem_set_wedged(i915);
+ intel_gt_set_wedged(&i915->gt);
err = -EIO;
goto err_ctx_lo;
}
@@ -506,6 +727,114 @@ static void preempt_client_fini(struct preempt_client *c)
kernel_context_close(c->ctx);
}
+static int live_nopreempt(void *arg)
+{
+ struct drm_i915_private *i915 = arg;
+ struct intel_engine_cs *engine;
+ struct preempt_client a, b;
+ enum intel_engine_id id;
+ intel_wakeref_t wakeref;
+ int err = -ENOMEM;
+
+ /*
+ * Verify that we can disable preemption for an individual request
+ * that may be being observed and not want to be interrupted.
+ */
+
+ if (!HAS_LOGICAL_RING_PREEMPTION(i915))
+ return 0;
+
+ mutex_lock(&i915->drm.struct_mutex);
+ wakeref = intel_runtime_pm_get(&i915->runtime_pm);
+
+ if (preempt_client_init(i915, &a))
+ goto err_unlock;
+ if (preempt_client_init(i915, &b))
+ goto err_client_a;
+ b.ctx->sched.priority = I915_USER_PRIORITY(I915_PRIORITY_MAX);
+
+ for_each_engine(engine, i915, id) {
+ struct i915_request *rq_a, *rq_b;
+
+ if (!intel_engine_has_preemption(engine))
+ continue;
+
+ engine->execlists.preempt_hang.count = 0;
+
+ rq_a = igt_spinner_create_request(&a.spin,
+ a.ctx, engine,
+ MI_ARB_CHECK);
+ if (IS_ERR(rq_a)) {
+ err = PTR_ERR(rq_a);
+ goto err_client_b;
+ }
+
+ /* Low priority client, but unpreemptable! */
+ rq_a->flags |= I915_REQUEST_NOPREEMPT;
+
+ i915_request_add(rq_a);
+ if (!igt_wait_for_spinner(&a.spin, rq_a)) {
+ pr_err("First client failed to start\n");
+ goto err_wedged;
+ }
+
+ rq_b = igt_spinner_create_request(&b.spin,
+ b.ctx, engine,
+ MI_ARB_CHECK);
+ if (IS_ERR(rq_b)) {
+ err = PTR_ERR(rq_b);
+ goto err_client_b;
+ }
+
+ i915_request_add(rq_b);
+
+ /* B is much more important than A! (But A is unpreemptable.) */
+ GEM_BUG_ON(rq_prio(rq_b) <= rq_prio(rq_a));
+
+ /* Wait long enough for preemption and timeslicing */
+ if (igt_wait_for_spinner(&b.spin, rq_b)) {
+ pr_err("Second client started too early!\n");
+ goto err_wedged;
+ }
+
+ igt_spinner_end(&a.spin);
+
+ if (!igt_wait_for_spinner(&b.spin, rq_b)) {
+ pr_err("Second client failed to start\n");
+ goto err_wedged;
+ }
+
+ igt_spinner_end(&b.spin);
+
+ if (engine->execlists.preempt_hang.count) {
+ pr_err("Preemption recorded x%d; should have been suppressed!\n",
+ engine->execlists.preempt_hang.count);
+ err = -EINVAL;
+ goto err_wedged;
+ }
+
+ if (igt_flush_test(i915, I915_WAIT_LOCKED))
+ goto err_wedged;
+ }
+
+ err = 0;
+err_client_b:
+ preempt_client_fini(&b);
+err_client_a:
+ preempt_client_fini(&a);
+err_unlock:
+ intel_runtime_pm_put(&i915->runtime_pm, wakeref);
+ mutex_unlock(&i915->drm.struct_mutex);
+ return err;
+
+err_wedged:
+ igt_spinner_end(&b.spin);
+ igt_spinner_end(&a.spin);
+ intel_gt_set_wedged(&i915->gt);
+ err = -EIO;
+ goto err_client_b;
+}
+
static int live_suppress_self_preempt(void *arg)
{
struct drm_i915_private *i915 = arg;
@@ -531,6 +860,9 @@ static int live_suppress_self_preempt(void *arg)
if (USES_GUC_SUBMISSION(i915))
return 0; /* presume black blox */
+ if (intel_vgpu_active(i915))
+ return 0; /* GVT forces single port & request submission */
+
mutex_lock(&i915->drm.struct_mutex);
wakeref = intel_runtime_pm_get(&i915->runtime_pm);
@@ -604,8 +936,6 @@ err_client_b:
err_client_a:
preempt_client_fini(&a);
err_unlock:
- if (igt_flush_test(i915, I915_WAIT_LOCKED))
- err = -EIO;
intel_runtime_pm_put(&i915->runtime_pm, wakeref);
mutex_unlock(&i915->drm.struct_mutex);
return err;
@@ -613,7 +943,7 @@ err_unlock:
err_wedged:
igt_spinner_end(&b.spin);
igt_spinner_end(&a.spin);
- i915_gem_set_wedged(i915);
+ intel_gt_set_wedged(&i915->gt);
err = -EIO;
goto err_client_b;
}
@@ -646,6 +976,10 @@ static struct i915_request *dummy_request(struct intel_engine_cs *engine)
i915_sw_fence_init(&rq->submit, dummy_notify);
set_bit(I915_FENCE_FLAG_ACTIVE, &rq->fence.flags);
+ spin_lock_init(&rq->lock);
+ rq->fence.lock = &rq->lock;
+ INIT_LIST_HEAD(&rq->fence.cb_list);
+
return rq;
}
@@ -773,8 +1107,6 @@ err_client_1:
err_client_0:
preempt_client_fini(&client[0]);
err_unlock:
- if (igt_flush_test(i915, I915_WAIT_LOCKED))
- err = -EIO;
intel_runtime_pm_put(&i915->runtime_pm, wakeref);
mutex_unlock(&i915->drm.struct_mutex);
return err;
@@ -782,7 +1114,7 @@ err_unlock:
err_wedged:
for (i = 0; i < ARRAY_SIZE(client); i++)
igt_spinner_end(&client[i].spin);
- i915_gem_set_wedged(i915);
+ intel_gt_set_wedged(&i915->gt);
err = -EIO;
goto err_client_3;
}
@@ -921,8 +1253,6 @@ err_client_lo:
err_client_hi:
preempt_client_fini(&hi);
err_unlock:
- if (igt_flush_test(i915, I915_WAIT_LOCKED))
- err = -EIO;
intel_runtime_pm_put(&i915->runtime_pm, wakeref);
mutex_unlock(&i915->drm.struct_mutex);
return err;
@@ -930,7 +1260,7 @@ err_unlock:
err_wedged:
igt_spinner_end(&hi.spin);
igt_spinner_end(&lo.spin);
- i915_gem_set_wedged(i915);
+ intel_gt_set_wedged(&i915->gt);
err = -EIO;
goto err_client_lo;
}
@@ -989,7 +1319,7 @@ static int live_preempt_hang(void *arg)
if (!igt_wait_for_spinner(&spin_lo, rq)) {
GEM_TRACE("lo spinner failed to start\n");
GEM_TRACE_DUMP();
- i915_gem_set_wedged(i915);
+ intel_gt_set_wedged(&i915->gt);
err = -EIO;
goto err_ctx_lo;
}
@@ -1011,21 +1341,21 @@ static int live_preempt_hang(void *arg)
HZ / 10)) {
pr_err("Preemption did not occur within timeout!");
GEM_TRACE_DUMP();
- i915_gem_set_wedged(i915);
+ intel_gt_set_wedged(&i915->gt);
err = -EIO;
goto err_ctx_lo;
}
- set_bit(I915_RESET_ENGINE + id, &i915->gpu_error.flags);
- i915_reset_engine(engine, NULL);
- clear_bit(I915_RESET_ENGINE + id, &i915->gpu_error.flags);
+ set_bit(I915_RESET_ENGINE + id, &i915->gt.reset.flags);
+ intel_engine_reset(engine, NULL);
+ clear_bit(I915_RESET_ENGINE + id, &i915->gt.reset.flags);
engine->execlists.preempt_hang.inject_hang = false;
if (!igt_wait_for_spinner(&spin_hi, rq)) {
GEM_TRACE("hi spinner failed to start\n");
GEM_TRACE_DUMP();
- i915_gem_set_wedged(i915);
+ intel_gt_set_wedged(&i915->gt);
err = -EIO;
goto err_ctx_lo;
}
@@ -1048,7 +1378,6 @@ err_spin_lo:
err_spin_hi:
igt_spinner_fini(&spin_hi);
err_unlock:
- igt_flush_test(i915, I915_WAIT_LOCKED);
intel_runtime_pm_put(&i915->runtime_pm, wakeref);
mutex_unlock(&i915->drm.struct_mutex);
return err;
@@ -1406,7 +1735,7 @@ static int nop_virtual_engine(struct drm_i915_private *i915,
request[nc]->fence.context,
request[nc]->fence.seqno);
GEM_TRACE_DUMP();
- i915_gem_set_wedged(i915);
+ intel_gt_set_wedged(&i915->gt);
break;
}
}
@@ -1553,7 +1882,7 @@ static int mask_virtual_engine(struct drm_i915_private *i915,
request[n]->fence.context,
request[n]->fence.seqno);
GEM_TRACE_DUMP();
- i915_gem_set_wedged(i915);
+ intel_gt_set_wedged(&i915->gt);
err = -EIO;
goto out;
}
@@ -1812,9 +2141,11 @@ int intel_execlists_live_selftests(struct drm_i915_private *i915)
{
static const struct i915_subtest tests[] = {
SUBTEST(live_sanitycheck),
+ SUBTEST(live_timeslice_preempt),
SUBTEST(live_busywait_preempt),
SUBTEST(live_preempt),
SUBTEST(live_late_preempt),
+ SUBTEST(live_nopreempt),
SUBTEST(live_suppress_self_preempt),
SUBTEST(live_suppress_wait_preempt),
SUBTEST(live_chain_preempt),
@@ -1828,8 +2159,8 @@ int intel_execlists_live_selftests(struct drm_i915_private *i915)
if (!HAS_EXECLISTS(i915))
return 0;
- if (i915_terminally_wedged(i915))
+ if (intel_gt_is_wedged(&i915->gt))
return 0;
- return i915_subtests(tests, i915);
+ return i915_live_subtests(tests, i915);
}
diff --git a/drivers/gpu/drm/i915/gt/selftest_reset.c b/drivers/gpu/drm/i915/gt/selftest_reset.c
index b5c590c9ccba..00a4f60cdfd5 100644
--- a/drivers/gpu/drm/i915/gt/selftest_reset.c
+++ b/drivers/gpu/drm/i915/gt/selftest_reset.c
@@ -9,26 +9,29 @@
static int igt_global_reset(void *arg)
{
- struct drm_i915_private *i915 = arg;
+ struct intel_gt *gt = arg;
unsigned int reset_count;
+ intel_wakeref_t wakeref;
int err = 0;
/* Check that we can issue a global GPU reset */
- igt_global_reset_lock(i915);
+ igt_global_reset_lock(gt);
+ wakeref = intel_runtime_pm_get(&gt->i915->runtime_pm);
- reset_count = i915_reset_count(&i915->gpu_error);
+ reset_count = i915_reset_count(&gt->i915->gpu_error);
- i915_reset(i915, ALL_ENGINES, NULL);
+ intel_gt_reset(gt, ALL_ENGINES, NULL);
- if (i915_reset_count(&i915->gpu_error) == reset_count) {
+ if (i915_reset_count(&gt->i915->gpu_error) == reset_count) {
pr_err("No GPU reset recorded!\n");
err = -EINVAL;
}
- igt_global_reset_unlock(i915);
+ intel_runtime_pm_put(&gt->i915->runtime_pm, wakeref);
+ igt_global_reset_unlock(gt);
- if (i915_reset_failed(i915))
+ if (intel_gt_is_wedged(gt))
err = -EIO;
return err;
@@ -36,64 +39,123 @@ static int igt_global_reset(void *arg)
static int igt_wedged_reset(void *arg)
{
- struct drm_i915_private *i915 = arg;
+ struct intel_gt *gt = arg;
intel_wakeref_t wakeref;
/* Check that we can recover a wedged device with a GPU reset */
- igt_global_reset_lock(i915);
- wakeref = intel_runtime_pm_get(&i915->runtime_pm);
+ igt_global_reset_lock(gt);
+ wakeref = intel_runtime_pm_get(&gt->i915->runtime_pm);
- i915_gem_set_wedged(i915);
+ intel_gt_set_wedged(gt);
- GEM_BUG_ON(!i915_reset_failed(i915));
- i915_reset(i915, ALL_ENGINES, NULL);
+ GEM_BUG_ON(!intel_gt_is_wedged(gt));
+ intel_gt_reset(gt, ALL_ENGINES, NULL);
- intel_runtime_pm_put(&i915->runtime_pm, wakeref);
- igt_global_reset_unlock(i915);
+ intel_runtime_pm_put(&gt->i915->runtime_pm, wakeref);
+ igt_global_reset_unlock(gt);
- return i915_reset_failed(i915) ? -EIO : 0;
+ return intel_gt_is_wedged(gt) ? -EIO : 0;
}
static int igt_atomic_reset(void *arg)
{
- struct drm_i915_private *i915 = arg;
+ struct intel_gt *gt = arg;
const typeof(*igt_atomic_phases) *p;
int err = 0;
/* Check that the resets are usable from atomic context */
- igt_global_reset_lock(i915);
- mutex_lock(&i915->drm.struct_mutex);
+ intel_gt_pm_get(gt);
+ igt_global_reset_lock(gt);
/* Flush any requests before we get started and check basics */
- if (!igt_force_reset(i915))
+ if (!igt_force_reset(gt))
goto unlock;
for (p = igt_atomic_phases; p->name; p++) {
intel_engine_mask_t awake;
- GEM_TRACE("intel_gpu_reset under %s\n", p->name);
+ GEM_TRACE("__intel_gt_reset under %s\n", p->name);
- awake = reset_prepare(i915);
+ awake = reset_prepare(gt);
p->critical_section_begin();
- reset_prepare(i915);
- err = intel_gpu_reset(i915, ALL_ENGINES);
+
+ err = __intel_gt_reset(gt, ALL_ENGINES);
+
p->critical_section_end();
- reset_finish(i915, awake);
+ reset_finish(gt, awake);
if (err) {
- pr_err("intel_gpu_reset failed under %s\n", p->name);
+ pr_err("__intel_gt_reset failed under %s\n", p->name);
break;
}
}
/* As we poke around the guts, do a full reset before continuing. */
- igt_force_reset(i915);
+ igt_force_reset(gt);
unlock:
- mutex_unlock(&i915->drm.struct_mutex);
- igt_global_reset_unlock(i915);
+ igt_global_reset_unlock(gt);
+ intel_gt_pm_put(gt);
+
+ return err;
+}
+
+static int igt_atomic_engine_reset(void *arg)
+{
+ struct intel_gt *gt = arg;
+ const typeof(*igt_atomic_phases) *p;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ int err = 0;
+
+ /* Check that the resets are usable from atomic context */
+
+ if (!intel_has_reset_engine(gt->i915))
+ return 0;
+
+ if (USES_GUC_SUBMISSION(gt->i915))
+ return 0;
+
+ intel_gt_pm_get(gt);
+ igt_global_reset_lock(gt);
+
+ /* Flush any requests before we get started and check basics */
+ if (!igt_force_reset(gt))
+ goto out_unlock;
+
+ for_each_engine(engine, gt->i915, id) {
+ tasklet_disable_nosync(&engine->execlists.tasklet);
+ intel_engine_pm_get(engine);
+
+ for (p = igt_atomic_phases; p->name; p++) {
+ GEM_TRACE("intel_engine_reset(%s) under %s\n",
+ engine->name, p->name);
+
+ p->critical_section_begin();
+ err = intel_engine_reset(engine, NULL);
+ p->critical_section_end();
+
+ if (err) {
+ pr_err("intel_engine_reset(%s) failed under %s\n",
+ engine->name, p->name);
+ break;
+ }
+ }
+
+ intel_engine_pm_put(engine);
+ tasklet_enable(&engine->execlists.tasklet);
+ if (err)
+ break;
+ }
+
+ /* As we poke around the guts, do a full reset before continuing. */
+ igt_force_reset(gt);
+
+out_unlock:
+ igt_global_reset_unlock(gt);
+ intel_gt_pm_put(gt);
return err;
}
@@ -104,18 +166,15 @@ int intel_reset_live_selftests(struct drm_i915_private *i915)
SUBTEST(igt_global_reset), /* attempt to recover GPU first */
SUBTEST(igt_wedged_reset),
SUBTEST(igt_atomic_reset),
+ SUBTEST(igt_atomic_engine_reset),
};
- intel_wakeref_t wakeref;
- int err = 0;
+ struct intel_gt *gt = &i915->gt;
- if (!intel_has_gpu_reset(i915))
+ if (!intel_has_gpu_reset(gt->i915))
return 0;
- if (i915_terminally_wedged(i915))
+ if (intel_gt_is_wedged(gt))
return -EIO; /* we're long past hope of a successful reset */
- with_intel_runtime_pm(&i915->runtime_pm, wakeref)
- err = i915_subtests(tests, i915);
-
- return err;
+ return intel_gt_live_subtests(tests, gt);
}
diff --git a/drivers/gpu/drm/i915/selftests/i915_timeline.c b/drivers/gpu/drm/i915/gt/selftest_timeline.c
index 76d3977f1d4b..f0a840030382 100644
--- a/drivers/gpu/drm/i915/selftests/i915_timeline.c
+++ b/drivers/gpu/drm/i915/gt/selftest_timeline.c
@@ -7,15 +7,16 @@
#include <linux/prime_numbers.h>
#include "gem/i915_gem_pm.h"
+#include "intel_gt.h"
-#include "i915_random.h"
-#include "i915_selftest.h"
+#include "../selftests/i915_random.h"
+#include "../i915_selftest.h"
-#include "igt_flush_test.h"
-#include "mock_gem_device.h"
-#include "mock_timeline.h"
+#include "../selftests/igt_flush_test.h"
+#include "../selftests/mock_gem_device.h"
+#include "selftests/mock_timeline.h"
-static struct page *hwsp_page(struct i915_timeline *tl)
+static struct page *hwsp_page(struct intel_timeline *tl)
{
struct drm_i915_gem_object *obj = tl->hwsp_ggtt->obj;
@@ -23,7 +24,7 @@ static struct page *hwsp_page(struct i915_timeline *tl)
return sg_page(obj->mm.pages->sgl);
}
-static unsigned long hwsp_cacheline(struct i915_timeline *tl)
+static unsigned long hwsp_cacheline(struct intel_timeline *tl)
{
unsigned long address = (unsigned long)page_address(hwsp_page(tl));
@@ -35,7 +36,7 @@ static unsigned long hwsp_cacheline(struct i915_timeline *tl)
struct mock_hwsp_freelist {
struct drm_i915_private *i915;
struct radix_tree_root cachelines;
- struct i915_timeline **history;
+ struct intel_timeline **history;
unsigned long count, max;
struct rnd_state prng;
};
@@ -46,12 +47,12 @@ enum {
static void __mock_hwsp_record(struct mock_hwsp_freelist *state,
unsigned int idx,
- struct i915_timeline *tl)
+ struct intel_timeline *tl)
{
tl = xchg(&state->history[idx], tl);
if (tl) {
radix_tree_delete(&state->cachelines, hwsp_cacheline(tl));
- i915_timeline_put(tl);
+ intel_timeline_put(tl);
}
}
@@ -59,14 +60,14 @@ static int __mock_hwsp_timeline(struct mock_hwsp_freelist *state,
unsigned int count,
unsigned int flags)
{
- struct i915_timeline *tl;
+ struct intel_timeline *tl;
unsigned int idx;
while (count--) {
unsigned long cacheline;
int err;
- tl = i915_timeline_create(state->i915, NULL);
+ tl = intel_timeline_create(&state->i915->gt, NULL);
if (IS_ERR(tl))
return PTR_ERR(tl);
@@ -77,7 +78,7 @@ static int __mock_hwsp_timeline(struct mock_hwsp_freelist *state,
pr_err("HWSP cacheline %lu already used; duplicate allocation!\n",
cacheline);
}
- i915_timeline_put(tl);
+ intel_timeline_put(tl);
return err;
}
@@ -162,21 +163,21 @@ struct __igt_sync {
bool set;
};
-static int __igt_sync(struct i915_timeline *tl,
+static int __igt_sync(struct intel_timeline *tl,
u64 ctx,
const struct __igt_sync *p,
const char *name)
{
int ret;
- if (__i915_timeline_sync_is_later(tl, ctx, p->seqno) != p->expected) {
+ if (__intel_timeline_sync_is_later(tl, ctx, p->seqno) != p->expected) {
pr_err("%s: %s(ctx=%llu, seqno=%u) expected passed %s but failed\n",
name, p->name, ctx, p->seqno, yesno(p->expected));
return -EINVAL;
}
if (p->set) {
- ret = __i915_timeline_sync_set(tl, ctx, p->seqno);
+ ret = __intel_timeline_sync_set(tl, ctx, p->seqno);
if (ret)
return ret;
}
@@ -204,7 +205,7 @@ static int igt_sync(void *arg)
{ "unwrap", UINT_MAX, true, false },
{},
}, *p;
- struct i915_timeline tl;
+ struct intel_timeline tl;
int order, offset;
int ret = -ENODEV;
@@ -248,7 +249,7 @@ static unsigned int random_engine(struct rnd_state *rnd)
static int bench_sync(void *arg)
{
struct rnd_state prng;
- struct i915_timeline tl;
+ struct intel_timeline tl;
unsigned long end_time, count;
u64 prng32_1M;
ktime_t kt;
@@ -286,7 +287,7 @@ static int bench_sync(void *arg)
do {
u64 id = i915_prandom_u64_state(&prng);
- __i915_timeline_sync_set(&tl, id, 0);
+ __intel_timeline_sync_set(&tl, id, 0);
count++;
} while (!time_after(jiffies, end_time));
kt = ktime_sub(ktime_get(), kt);
@@ -301,7 +302,7 @@ static int bench_sync(void *arg)
while (end_time--) {
u64 id = i915_prandom_u64_state(&prng);
- if (!__i915_timeline_sync_is_later(&tl, id, 0)) {
+ if (!__intel_timeline_sync_is_later(&tl, id, 0)) {
mock_timeline_fini(&tl);
pr_err("Lookup of %llu failed\n", id);
return -EINVAL;
@@ -322,7 +323,7 @@ static int bench_sync(void *arg)
kt = ktime_get();
end_time = jiffies + HZ/10;
do {
- __i915_timeline_sync_set(&tl, count++, 0);
+ __intel_timeline_sync_set(&tl, count++, 0);
} while (!time_after(jiffies, end_time));
kt = ktime_sub(ktime_get(), kt);
pr_info("%s: %lu in-order insertions, %lluns/insert\n",
@@ -332,7 +333,7 @@ static int bench_sync(void *arg)
end_time = count;
kt = ktime_get();
while (end_time--) {
- if (!__i915_timeline_sync_is_later(&tl, end_time, 0)) {
+ if (!__intel_timeline_sync_is_later(&tl, end_time, 0)) {
pr_err("Lookup of %lu failed\n", end_time);
mock_timeline_fini(&tl);
return -EINVAL;
@@ -356,8 +357,8 @@ static int bench_sync(void *arg)
u32 id = random_engine(&prng);
u32 seqno = prandom_u32_state(&prng);
- if (!__i915_timeline_sync_is_later(&tl, id, seqno))
- __i915_timeline_sync_set(&tl, id, seqno);
+ if (!__intel_timeline_sync_is_later(&tl, id, seqno))
+ __intel_timeline_sync_set(&tl, id, seqno);
count++;
} while (!time_after(jiffies, end_time));
@@ -385,8 +386,8 @@ static int bench_sync(void *arg)
*/
u64 id = (u64)(count & mask) << order;
- __i915_timeline_sync_is_later(&tl, id, 0);
- __i915_timeline_sync_set(&tl, id, 0);
+ __intel_timeline_sync_is_later(&tl, id, 0);
+ __intel_timeline_sync_set(&tl, id, 0);
count++;
} while (!time_after(jiffies, end_time));
@@ -401,7 +402,7 @@ static int bench_sync(void *arg)
return 0;
}
-int i915_timeline_mock_selftests(void)
+int intel_timeline_mock_selftests(void)
{
static const struct i915_subtest tests[] = {
SUBTEST(mock_hwsp_freelist),
@@ -443,14 +444,14 @@ static int emit_ggtt_store_dw(struct i915_request *rq, u32 addr, u32 value)
}
static struct i915_request *
-tl_write(struct i915_timeline *tl, struct intel_engine_cs *engine, u32 value)
+tl_write(struct intel_timeline *tl, struct intel_engine_cs *engine, u32 value)
{
struct i915_request *rq;
int err;
- lockdep_assert_held(&tl->i915->drm.struct_mutex); /* lazy rq refs */
+ lockdep_assert_held(&tl->gt->i915->drm.struct_mutex); /* lazy rq refs */
- err = i915_timeline_pin(tl);
+ err = intel_timeline_pin(tl);
if (err) {
rq = ERR_PTR(err);
goto out;
@@ -466,26 +467,26 @@ tl_write(struct i915_timeline *tl, struct intel_engine_cs *engine, u32 value)
rq = ERR_PTR(err);
out_unpin:
- i915_timeline_unpin(tl);
+ intel_timeline_unpin(tl);
out:
if (IS_ERR(rq))
pr_err("Failed to write to timeline!\n");
return rq;
}
-static struct i915_timeline *
-checked_i915_timeline_create(struct drm_i915_private *i915)
+static struct intel_timeline *
+checked_intel_timeline_create(struct drm_i915_private *i915)
{
- struct i915_timeline *tl;
+ struct intel_timeline *tl;
- tl = i915_timeline_create(i915, NULL);
+ tl = intel_timeline_create(&i915->gt, NULL);
if (IS_ERR(tl))
return tl;
if (*tl->hwsp_seqno != tl->seqno) {
pr_err("Timeline created with incorrect breadcrumb, found %x, expected %x\n",
*tl->hwsp_seqno, tl->seqno);
- i915_timeline_put(tl);
+ intel_timeline_put(tl);
return ERR_PTR(-EINVAL);
}
@@ -496,7 +497,7 @@ static int live_hwsp_engine(void *arg)
{
#define NUM_TIMELINES 4096
struct drm_i915_private *i915 = arg;
- struct i915_timeline **timelines;
+ struct intel_timeline **timelines;
struct intel_engine_cs *engine;
enum intel_engine_id id;
intel_wakeref_t wakeref;
@@ -523,10 +524,10 @@ static int live_hwsp_engine(void *arg)
continue;
for (n = 0; n < NUM_TIMELINES; n++) {
- struct i915_timeline *tl;
+ struct intel_timeline *tl;
struct i915_request *rq;
- tl = checked_i915_timeline_create(i915);
+ tl = checked_intel_timeline_create(i915);
if (IS_ERR(tl)) {
err = PTR_ERR(tl);
goto out;
@@ -534,7 +535,7 @@ static int live_hwsp_engine(void *arg)
rq = tl_write(tl, engine, count);
if (IS_ERR(rq)) {
- i915_timeline_put(tl);
+ intel_timeline_put(tl);
err = PTR_ERR(rq);
goto out;
}
@@ -548,14 +549,14 @@ out:
err = -EIO;
for (n = 0; n < count; n++) {
- struct i915_timeline *tl = timelines[n];
+ struct intel_timeline *tl = timelines[n];
if (!err && *tl->hwsp_seqno != n) {
pr_err("Invalid seqno stored in timeline %lu, found 0x%x\n",
n, *tl->hwsp_seqno);
err = -EINVAL;
}
- i915_timeline_put(tl);
+ intel_timeline_put(tl);
}
intel_runtime_pm_put(&i915->runtime_pm, wakeref);
@@ -571,7 +572,7 @@ static int live_hwsp_alternate(void *arg)
{
#define NUM_TIMELINES 4096
struct drm_i915_private *i915 = arg;
- struct i915_timeline **timelines;
+ struct intel_timeline **timelines;
struct intel_engine_cs *engine;
enum intel_engine_id id;
intel_wakeref_t wakeref;
@@ -596,13 +597,13 @@ static int live_hwsp_alternate(void *arg)
count = 0;
for (n = 0; n < NUM_TIMELINES; n++) {
for_each_engine(engine, i915, id) {
- struct i915_timeline *tl;
+ struct intel_timeline *tl;
struct i915_request *rq;
if (!intel_engine_can_store_dword(engine))
continue;
- tl = checked_i915_timeline_create(i915);
+ tl = checked_intel_timeline_create(i915);
if (IS_ERR(tl)) {
err = PTR_ERR(tl);
goto out;
@@ -610,7 +611,7 @@ static int live_hwsp_alternate(void *arg)
rq = tl_write(tl, engine, count);
if (IS_ERR(rq)) {
- i915_timeline_put(tl);
+ intel_timeline_put(tl);
err = PTR_ERR(rq);
goto out;
}
@@ -624,14 +625,14 @@ out:
err = -EIO;
for (n = 0; n < count; n++) {
- struct i915_timeline *tl = timelines[n];
+ struct intel_timeline *tl = timelines[n];
if (!err && *tl->hwsp_seqno != n) {
pr_err("Invalid seqno stored in timeline %lu, found 0x%x\n",
n, *tl->hwsp_seqno);
err = -EINVAL;
}
- i915_timeline_put(tl);
+ intel_timeline_put(tl);
}
intel_runtime_pm_put(&i915->runtime_pm, wakeref);
@@ -647,7 +648,7 @@ static int live_hwsp_wrap(void *arg)
{
struct drm_i915_private *i915 = arg;
struct intel_engine_cs *engine;
- struct i915_timeline *tl;
+ struct intel_timeline *tl;
enum intel_engine_id id;
intel_wakeref_t wakeref;
int err = 0;
@@ -660,7 +661,7 @@ static int live_hwsp_wrap(void *arg)
mutex_lock(&i915->drm.struct_mutex);
wakeref = intel_runtime_pm_get(&i915->runtime_pm);
- tl = i915_timeline_create(i915, NULL);
+ tl = intel_timeline_create(&i915->gt, NULL);
if (IS_ERR(tl)) {
err = PTR_ERR(tl);
goto out_rpm;
@@ -668,7 +669,7 @@ static int live_hwsp_wrap(void *arg)
if (!tl->has_initial_breadcrumb || !tl->hwsp_cacheline)
goto out_free;
- err = i915_timeline_pin(tl);
+ err = intel_timeline_pin(tl);
if (err)
goto out_free;
@@ -688,7 +689,7 @@ static int live_hwsp_wrap(void *arg)
tl->seqno = -4u;
- err = i915_timeline_get_seqno(tl, rq, &seqno[0]);
+ err = intel_timeline_get_seqno(tl, rq, &seqno[0]);
if (err) {
i915_request_add(rq);
goto out;
@@ -703,7 +704,7 @@ static int live_hwsp_wrap(void *arg)
}
hwsp_seqno[0] = tl->hwsp_seqno;
- err = i915_timeline_get_seqno(tl, rq, &seqno[1]);
+ err = intel_timeline_get_seqno(tl, rq, &seqno[1]);
if (err) {
i915_request_add(rq);
goto out;
@@ -745,9 +746,9 @@ out:
if (igt_flush_test(i915, I915_WAIT_LOCKED))
err = -EIO;
- i915_timeline_unpin(tl);
+ intel_timeline_unpin(tl);
out_free:
- i915_timeline_put(tl);
+ intel_timeline_put(tl);
out_rpm:
intel_runtime_pm_put(&i915->runtime_pm, wakeref);
mutex_unlock(&i915->drm.struct_mutex);
@@ -781,10 +782,10 @@ static int live_hwsp_recycle(void *arg)
continue;
do {
- struct i915_timeline *tl;
+ struct intel_timeline *tl;
struct i915_request *rq;
- tl = checked_i915_timeline_create(i915);
+ tl = checked_intel_timeline_create(i915);
if (IS_ERR(tl)) {
err = PTR_ERR(tl);
goto out;
@@ -792,14 +793,14 @@ static int live_hwsp_recycle(void *arg)
rq = tl_write(tl, engine, count);
if (IS_ERR(rq)) {
- i915_timeline_put(tl);
+ intel_timeline_put(tl);
err = PTR_ERR(rq);
goto out;
}
if (i915_request_wait(rq, 0, HZ / 5) < 0) {
pr_err("Wait for timeline writes timed out!\n");
- i915_timeline_put(tl);
+ intel_timeline_put(tl);
err = -EIO;
goto out;
}
@@ -810,26 +811,24 @@ static int live_hwsp_recycle(void *arg)
err = -EINVAL;
}
- i915_timeline_put(tl);
+ intel_timeline_put(tl);
count++;
if (err)
goto out;
- i915_timelines_park(i915); /* Encourage recycling! */
+ intel_timelines_park(i915); /* Encourage recycling! */
} while (!__igt_timeout(end_time, NULL));
}
out:
- if (igt_flush_test(i915, I915_WAIT_LOCKED))
- err = -EIO;
intel_runtime_pm_put(&i915->runtime_pm, wakeref);
mutex_unlock(&i915->drm.struct_mutex);
return err;
}
-int i915_timeline_live_selftests(struct drm_i915_private *i915)
+int intel_timeline_live_selftests(struct drm_i915_private *i915)
{
static const struct i915_subtest tests[] = {
SUBTEST(live_hwsp_recycle),
@@ -838,8 +837,8 @@ int i915_timeline_live_selftests(struct drm_i915_private *i915)
SUBTEST(live_hwsp_wrap),
};
- if (i915_terminally_wedged(i915))
+ if (intel_gt_is_wedged(&i915->gt))
return 0;
- return i915_subtests(tests, i915);
+ return i915_live_subtests(tests, i915);
}
diff --git a/drivers/gpu/drm/i915/gt/selftest_workarounds.c b/drivers/gpu/drm/i915/gt/selftest_workarounds.c
index 44becd9538be..ab147985fa74 100644
--- a/drivers/gpu/drm/i915/gt/selftest_workarounds.c
+++ b/drivers/gpu/drm/i915/gt/selftest_workarounds.c
@@ -5,13 +5,13 @@
*/
#include "gem/i915_gem_pm.h"
+#include "gt/intel_gt.h"
#include "i915_selftest.h"
#include "intel_reset.h"
#include "selftests/igt_flush_test.h"
#include "selftests/igt_reset.h"
#include "selftests/igt_spinner.h"
-#include "selftests/igt_wedge_me.h"
#include "selftests/mock_drm.h"
#include "gem/selftests/igt_gem_utils.h"
@@ -24,11 +24,9 @@ static const struct wo_register {
{ INTEL_GEMINILAKE, 0x731c }
};
-#define REF_NAME_MAX (INTEL_ENGINE_CS_MAX_NAME + 8)
struct wa_lists {
struct i915_wa_list gt_wa_list;
struct {
- char name[REF_NAME_MAX];
struct i915_wa_list wa_list;
struct i915_wa_list ctx_wa_list;
} engine[I915_NUM_ENGINES];
@@ -42,25 +40,20 @@ reference_lists_init(struct drm_i915_private *i915, struct wa_lists *lists)
memset(lists, 0, sizeof(*lists));
- wa_init_start(&lists->gt_wa_list, "GT_REF");
+ wa_init_start(&lists->gt_wa_list, "GT_REF", "global");
gt_init_workarounds(i915, &lists->gt_wa_list);
wa_init_finish(&lists->gt_wa_list);
for_each_engine(engine, i915, id) {
struct i915_wa_list *wal = &lists->engine[id].wa_list;
- char *name = lists->engine[id].name;
- snprintf(name, REF_NAME_MAX, "%s_REF", engine->name);
-
- wa_init_start(wal, name);
+ wa_init_start(wal, "REF", engine->name);
engine_init_workarounds(engine, wal);
wa_init_finish(wal);
- snprintf(name, REF_NAME_MAX, "%s_CTX_REF", engine->name);
-
__intel_engine_init_ctx_wa(engine,
&lists->engine[id].ctx_wa_list,
- name);
+ "CTX_REF");
}
}
@@ -102,7 +95,7 @@ read_nonprivs(struct i915_gem_context *ctx, struct intel_engine_cs *engine)
i915_gem_object_flush_map(result);
i915_gem_object_unpin_map(result);
- vma = i915_vma_instance(result, &engine->i915->ggtt.vm, NULL);
+ vma = i915_vma_instance(result, &engine->gt->ggtt->vm, NULL);
if (IS_ERR(vma)) {
err = PTR_ERR(vma);
goto err_obj;
@@ -184,7 +177,7 @@ static int check_whitelist(struct i915_gem_context *ctx,
struct intel_engine_cs *engine)
{
struct drm_i915_gem_object *results;
- struct igt_wedge_me wedge;
+ struct intel_wedge_me wedge;
u32 *vaddr;
int err;
int i;
@@ -195,10 +188,10 @@ static int check_whitelist(struct i915_gem_context *ctx,
err = 0;
i915_gem_object_lock(results);
- igt_wedge_on_timeout(&wedge, ctx->i915, HZ / 5) /* a safety net! */
+ intel_wedge_on_timeout(&wedge, &ctx->i915->gt, HZ / 5) /* safety net! */
err = i915_gem_object_set_to_cpu_domain(results, false);
i915_gem_object_unlock(results);
- if (i915_terminally_wedged(ctx->i915))
+ if (intel_gt_is_wedged(&ctx->i915->gt))
err = -EIO;
if (err)
goto out_put;
@@ -231,13 +224,13 @@ out_put:
static int do_device_reset(struct intel_engine_cs *engine)
{
- i915_reset(engine->i915, engine->mask, "live_workarounds");
+ intel_gt_reset(engine->gt, engine->mask, "live_workarounds");
return 0;
}
static int do_engine_reset(struct intel_engine_cs *engine)
{
- return i915_reset_engine(engine, "live_workarounds");
+ return intel_engine_reset(engine, "live_workarounds");
}
static int
@@ -286,64 +279,67 @@ static int check_whitelist_across_reset(struct intel_engine_cs *engine,
const char *name)
{
struct drm_i915_private *i915 = engine->i915;
- struct i915_gem_context *ctx;
+ struct i915_gem_context *ctx, *tmp;
struct igt_spinner spin;
intel_wakeref_t wakeref;
int err;
- pr_info("Checking %d whitelisted registers (RING_NONPRIV) [%s]\n",
- engine->whitelist.count, name);
-
- err = igt_spinner_init(&spin, i915);
- if (err)
- return err;
+ pr_info("Checking %d whitelisted registers on %s (RING_NONPRIV) [%s]\n",
+ engine->whitelist.count, engine->name, name);
ctx = kernel_context(i915);
if (IS_ERR(ctx))
return PTR_ERR(ctx);
+ err = igt_spinner_init(&spin, i915);
+ if (err)
+ goto out_ctx;
+
err = check_whitelist(ctx, engine);
if (err) {
pr_err("Invalid whitelist *before* %s reset!\n", name);
- goto out;
+ goto out_spin;
}
err = switch_to_scratch_context(engine, &spin);
if (err)
- goto out;
+ goto out_spin;
with_intel_runtime_pm(&i915->runtime_pm, wakeref)
err = reset(engine);
igt_spinner_end(&spin);
- igt_spinner_fini(&spin);
if (err) {
pr_err("%s reset failed\n", name);
- goto out;
+ goto out_spin;
}
err = check_whitelist(ctx, engine);
if (err) {
pr_err("Whitelist not preserved in context across %s reset!\n",
name);
- goto out;
+ goto out_spin;
}
+ tmp = kernel_context(i915);
+ if (IS_ERR(tmp)) {
+ err = PTR_ERR(tmp);
+ goto out_spin;
+ }
kernel_context_close(ctx);
-
- ctx = kernel_context(i915);
- if (IS_ERR(ctx))
- return PTR_ERR(ctx);
+ ctx = tmp;
err = check_whitelist(ctx, engine);
if (err) {
pr_err("Invalid whitelist *after* %s reset in fresh context!\n",
name);
- goto out;
+ goto out_spin;
}
-out:
+out_spin:
+ igt_spinner_fini(&spin);
+out_ctx:
kernel_context_close(ctx);
return err;
}
@@ -393,6 +389,10 @@ static bool wo_register(struct intel_engine_cs *engine, u32 reg)
enum intel_platform platform = INTEL_INFO(engine->i915)->platform;
int i;
+ if ((reg & RING_FORCE_TO_NONPRIV_ACCESS_MASK) ==
+ RING_FORCE_TO_NONPRIV_ACCESS_WR)
+ return true;
+
for (i = 0; i < ARRAY_SIZE(wo_registers); i++) {
if (wo_registers[i].platform == platform &&
wo_registers[i].reg == reg)
@@ -404,7 +404,8 @@ static bool wo_register(struct intel_engine_cs *engine, u32 reg)
static bool ro_register(u32 reg)
{
- if (reg & RING_FORCE_TO_NONPRIV_RD)
+ if ((reg & RING_FORCE_TO_NONPRIV_ACCESS_MASK) ==
+ RING_FORCE_TO_NONPRIV_ACCESS_RD)
return true;
return false;
@@ -476,12 +477,12 @@ static int check_dirty_whitelist(struct i915_gem_context *ctx,
u32 srm, lrm, rsvd;
u32 expect;
int idx;
+ bool ro_reg;
if (wo_register(engine, reg))
continue;
- if (ro_register(reg))
- continue;
+ ro_reg = ro_register(reg);
srm = MI_STORE_REGISTER_MEM;
lrm = MI_LOAD_REGISTER_MEM;
@@ -542,7 +543,7 @@ static int check_dirty_whitelist(struct i915_gem_context *ctx,
i915_gem_object_flush_map(batch->obj);
i915_gem_object_unpin_map(batch->obj);
- i915_gem_chipset_flush(ctx->i915);
+ intel_gt_chipset_flush(engine->gt);
rq = igt_request_alloc(ctx, engine);
if (IS_ERR(rq)) {
@@ -570,7 +571,7 @@ err_request:
if (i915_request_wait(rq, 0, HZ / 5) < 0) {
pr_err("%s: Futzing %x timedout; cancelling test\n",
engine->name, reg);
- i915_gem_set_wedged(ctx->i915);
+ intel_gt_set_wedged(&ctx->i915->gt);
err = -EIO;
goto out_batch;
}
@@ -582,24 +583,35 @@ err_request:
}
GEM_BUG_ON(values[ARRAY_SIZE(values) - 1] != 0xffffffff);
- rsvd = results[ARRAY_SIZE(values)]; /* detect write masking */
- if (!rsvd) {
- pr_err("%s: Unable to write to whitelisted register %x\n",
- engine->name, reg);
- err = -EINVAL;
- goto out_unpin;
+ if (!ro_reg) {
+ /* detect write masking */
+ rsvd = results[ARRAY_SIZE(values)];
+ if (!rsvd) {
+ pr_err("%s: Unable to write to whitelisted register %x\n",
+ engine->name, reg);
+ err = -EINVAL;
+ goto out_unpin;
+ }
}
expect = results[0];
idx = 1;
for (v = 0; v < ARRAY_SIZE(values); v++) {
- expect = reg_write(expect, values[v], rsvd);
+ if (ro_reg)
+ expect = results[0];
+ else
+ expect = reg_write(expect, values[v], rsvd);
+
if (results[idx] != expect)
err++;
idx++;
}
for (v = 0; v < ARRAY_SIZE(values); v++) {
- expect = reg_write(expect, ~values[v], rsvd);
+ if (ro_reg)
+ expect = results[0];
+ else
+ expect = reg_write(expect, ~values[v], rsvd);
+
if (results[idx] != expect)
err++;
idx++;
@@ -608,15 +620,22 @@ err_request:
pr_err("%s: %d mismatch between values written to whitelisted register [%x], and values read back!\n",
engine->name, err, reg);
- pr_info("%s: Whitelisted register: %x, original value %08x, rsvd %08x\n",
- engine->name, reg, results[0], rsvd);
+ if (ro_reg)
+ pr_info("%s: Whitelisted read-only register: %x, original value %08x\n",
+ engine->name, reg, results[0]);
+ else
+ pr_info("%s: Whitelisted register: %x, original value %08x, rsvd %08x\n",
+ engine->name, reg, results[0], rsvd);
expect = results[0];
idx = 1;
for (v = 0; v < ARRAY_SIZE(values); v++) {
u32 w = values[v];
- expect = reg_write(expect, w, rsvd);
+ if (ro_reg)
+ expect = results[0];
+ else
+ expect = reg_write(expect, w, rsvd);
pr_info("Wrote %08x, read %08x, expect %08x\n",
w, results[idx], expect);
idx++;
@@ -624,7 +643,10 @@ err_request:
for (v = 0; v < ARRAY_SIZE(values); v++) {
u32 w = ~values[v];
- expect = reg_write(expect, w, rsvd);
+ if (ro_reg)
+ expect = results[0];
+ else
+ expect = reg_write(expect, w, rsvd);
pr_info("Wrote %08x, read %08x, expect %08x\n",
w, results[idx], expect);
idx++;
@@ -707,7 +729,7 @@ static int live_reset_whitelist(void *arg)
if (!engine || engine->whitelist.count == 0)
return 0;
- igt_global_reset_lock(i915);
+ igt_global_reset_lock(&i915->gt);
if (intel_has_reset_engine(i915)) {
err = check_whitelist_across_reset(engine,
@@ -726,7 +748,7 @@ static int live_reset_whitelist(void *arg)
}
out:
- igt_global_reset_unlock(i915);
+ igt_global_reset_unlock(&i915->gt);
return err;
}
@@ -756,8 +778,8 @@ static int read_whitelisted_registers(struct i915_gem_context *ctx,
u64 offset = results->node.start + sizeof(u32) * i;
u32 reg = i915_mmio_reg_offset(engine->whitelist.list[i].reg);
- /* Clear RD only and WR only flags */
- reg &= ~(RING_FORCE_TO_NONPRIV_RD | RING_FORCE_TO_NONPRIV_WR);
+ /* Clear access permission field */
+ reg &= ~RING_FORCE_TO_NONPRIV_ACCESS_MASK;
*cs++ = srm;
*cs++ = reg;
@@ -806,7 +828,7 @@ static int scrub_whitelisted_registers(struct i915_gem_context *ctx,
*cs++ = MI_BATCH_BUFFER_END;
i915_gem_object_flush_map(batch->obj);
- i915_gem_chipset_flush(ctx->i915);
+ intel_gt_chipset_flush(engine->gt);
rq = igt_request_alloc(ctx, engine);
if (IS_ERR(rq)) {
@@ -927,7 +949,8 @@ check_whitelisted_registers(struct intel_engine_cs *engine,
for (i = 0; i < engine->whitelist.count; i++) {
const struct i915_wa *wa = &engine->whitelist.list[i];
- if (i915_mmio_reg_offset(wa->reg) & RING_FORCE_TO_NONPRIV_RD)
+ if (i915_mmio_reg_offset(wa->reg) &
+ RING_FORCE_TO_NONPRIV_ACCESS_RD)
continue;
if (!fn(engine, a[i], b[i], wa->reg))
@@ -1094,7 +1117,7 @@ live_gpu_reset_workarounds(void *arg)
pr_info("Verifying after GPU reset...\n");
- igt_global_reset_lock(i915);
+ igt_global_reset_lock(&i915->gt);
wakeref = intel_runtime_pm_get(&i915->runtime_pm);
reference_lists_init(i915, &lists);
@@ -1103,7 +1126,7 @@ live_gpu_reset_workarounds(void *arg)
if (!ok)
goto out;
- i915_reset(i915, ALL_ENGINES, "live_workarounds");
+ intel_gt_reset(&i915->gt, ALL_ENGINES, "live_workarounds");
ok = verify_wa_lists(ctx, &lists, "after reset");
@@ -1111,7 +1134,7 @@ out:
kernel_context_close(ctx);
reference_lists_fini(i915, &lists);
intel_runtime_pm_put(&i915->runtime_pm, wakeref);
- igt_global_reset_unlock(i915);
+ igt_global_reset_unlock(&i915->gt);
return ok ? 0 : -ESRCH;
}
@@ -1136,7 +1159,7 @@ live_engine_reset_workarounds(void *arg)
if (IS_ERR(ctx))
return PTR_ERR(ctx);
- igt_global_reset_lock(i915);
+ igt_global_reset_lock(&i915->gt);
wakeref = intel_runtime_pm_get(&i915->runtime_pm);
reference_lists_init(i915, &lists);
@@ -1152,7 +1175,7 @@ live_engine_reset_workarounds(void *arg)
goto err;
}
- i915_reset_engine(engine, "live_workarounds");
+ intel_engine_reset(engine, "live_workarounds");
ok = verify_wa_lists(ctx, &lists, "after idle reset");
if (!ok) {
@@ -1180,7 +1203,7 @@ live_engine_reset_workarounds(void *arg)
goto err;
}
- i915_reset_engine(engine, "live_workarounds");
+ intel_engine_reset(engine, "live_workarounds");
igt_spinner_end(&spin);
igt_spinner_fini(&spin);
@@ -1195,7 +1218,7 @@ live_engine_reset_workarounds(void *arg)
err:
reference_lists_fini(i915, &lists);
intel_runtime_pm_put(&i915->runtime_pm, wakeref);
- igt_global_reset_unlock(i915);
+ igt_global_reset_unlock(&i915->gt);
kernel_context_close(ctx);
igt_flush_test(i915, I915_WAIT_LOCKED);
@@ -1214,7 +1237,7 @@ int intel_workarounds_live_selftests(struct drm_i915_private *i915)
};
int err;
- if (i915_terminally_wedged(i915))
+ if (intel_gt_is_wedged(&i915->gt))
return 0;
mutex_lock(&i915->drm.struct_mutex);
diff --git a/drivers/gpu/drm/i915/selftests/mock_timeline.c b/drivers/gpu/drm/i915/gt/selftests/mock_timeline.c
index 65b52be23d42..5c549205828a 100644
--- a/drivers/gpu/drm/i915/selftests/mock_timeline.c
+++ b/drivers/gpu/drm/i915/gt/selftests/mock_timeline.c
@@ -4,13 +4,13 @@
* Copyright © 2017-2018 Intel Corporation
*/
-#include "../i915_timeline.h"
+#include "../intel_timeline.h"
#include "mock_timeline.h"
-void mock_timeline_init(struct i915_timeline *timeline, u64 context)
+void mock_timeline_init(struct intel_timeline *timeline, u64 context)
{
- timeline->i915 = NULL;
+ timeline->gt = NULL;
timeline->fence_context = context;
mutex_init(&timeline->mutex);
@@ -23,7 +23,7 @@ void mock_timeline_init(struct i915_timeline *timeline, u64 context)
INIT_LIST_HEAD(&timeline->link);
}
-void mock_timeline_fini(struct i915_timeline *timeline)
+void mock_timeline_fini(struct intel_timeline *timeline)
{
i915_syncmap_free(&timeline->sync);
}
diff --git a/drivers/gpu/drm/i915/selftests/mock_timeline.h b/drivers/gpu/drm/i915/gt/selftests/mock_timeline.h
index b6deaa61110d..689efc66c908 100644
--- a/drivers/gpu/drm/i915/selftests/mock_timeline.h
+++ b/drivers/gpu/drm/i915/gt/selftests/mock_timeline.h
@@ -7,9 +7,9 @@
#ifndef __MOCK_TIMELINE__
#define __MOCK_TIMELINE__
-struct i915_timeline;
+struct intel_timeline;
-void mock_timeline_init(struct i915_timeline *timeline, u64 context);
-void mock_timeline_fini(struct i915_timeline *timeline);
+void mock_timeline_init(struct intel_timeline *timeline, u64 context);
+void mock_timeline_fini(struct intel_timeline *timeline);
#endif /* !__MOCK_TIMELINE__ */
diff --git a/drivers/gpu/drm/i915/gt/uc/Makefile b/drivers/gpu/drm/i915/gt/uc/Makefile
new file mode 100644
index 000000000000..bec94d434cb6
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/uc/Makefile
@@ -0,0 +1,5 @@
+# For building individual subdir files on the command line
+subdir-ccflags-y += -I$(srctree)/$(src)/../..
+
+# Extra header tests
+header-test-pattern-$(CONFIG_DRM_I915_WERROR) := *.h
diff --git a/drivers/gpu/drm/i915/intel_guc.c b/drivers/gpu/drm/i915/gt/uc/intel_guc.c
index c40a6efdd33a..13fbbffd05c7 100644
--- a/drivers/gpu/drm/i915/intel_guc.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc.c
@@ -22,6 +22,7 @@
*
*/
+#include "gt/intel_gt.h"
#include "intel_guc.h"
#include "intel_guc_ads.h"
#include "intel_guc_submission.h"
@@ -29,16 +30,16 @@
static void gen8_guc_raise_irq(struct intel_guc *guc)
{
- struct drm_i915_private *dev_priv = guc_to_i915(guc);
+ struct intel_gt *gt = guc_to_gt(guc);
- I915_WRITE(GUC_SEND_INTERRUPT, GUC_SEND_TRIGGER);
+ intel_uncore_write(gt->uncore, GUC_SEND_INTERRUPT, GUC_SEND_TRIGGER);
}
static void gen11_guc_raise_irq(struct intel_guc *guc)
{
- struct drm_i915_private *dev_priv = guc_to_i915(guc);
+ struct intel_gt *gt = guc_to_gt(guc);
- I915_WRITE(GEN11_GUC_HOST_INTERRUPT, 0);
+ intel_uncore_write(gt->uncore, GEN11_GUC_HOST_INTERRUPT, 0);
}
static inline i915_reg_t guc_send_reg(struct intel_guc *guc, u32 i)
@@ -52,11 +53,11 @@ static inline i915_reg_t guc_send_reg(struct intel_guc *guc, u32 i)
void intel_guc_init_send_regs(struct intel_guc *guc)
{
- struct drm_i915_private *dev_priv = guc_to_i915(guc);
+ struct intel_gt *gt = guc_to_gt(guc);
enum forcewake_domains fw_domains = 0;
unsigned int i;
- if (INTEL_GEN(dev_priv) >= 11) {
+ if (INTEL_GEN(gt->i915) >= 11) {
guc->send_regs.base =
i915_mmio_reg_offset(GEN11_SOFT_SCRATCH(0));
guc->send_regs.count = GEN11_SOFT_SCRATCH_COUNT;
@@ -67,7 +68,7 @@ void intel_guc_init_send_regs(struct intel_guc *guc)
}
for (i = 0; i < guc->send_regs.count; i++) {
- fw_domains |= intel_uncore_forcewake_for_reg(&dev_priv->uncore,
+ fw_domains |= intel_uncore_forcewake_for_reg(gt->uncore,
guc_send_reg(guc, i),
FW_REG_READ | FW_REG_WRITE);
}
@@ -76,7 +77,7 @@ void intel_guc_init_send_regs(struct intel_guc *guc)
void intel_guc_init_early(struct intel_guc *guc)
{
- struct drm_i915_private *i915 = guc_to_i915(guc);
+ struct drm_i915_private *i915 = guc_to_gt(guc)->i915;
intel_guc_fw_init_early(guc);
intel_guc_ct_init_early(&guc->ct);
@@ -99,90 +100,6 @@ void intel_guc_init_early(struct intel_guc *guc)
}
}
-static int guc_init_wq(struct intel_guc *guc)
-{
- struct drm_i915_private *dev_priv = guc_to_i915(guc);
-
- /*
- * GuC log buffer flush work item has to do register access to
- * send the ack to GuC and this work item, if not synced before
- * suspend, can potentially get executed after the GFX device is
- * suspended.
- * By marking the WQ as freezable, we don't have to bother about
- * flushing of this work item from the suspend hooks, the pending
- * work item if any will be either executed before the suspend
- * or scheduled later on resume. This way the handling of work
- * item can be kept same between system suspend & rpm suspend.
- */
- guc->log.relay.flush_wq =
- alloc_ordered_workqueue("i915-guc_log",
- WQ_HIGHPRI | WQ_FREEZABLE);
- if (!guc->log.relay.flush_wq) {
- DRM_ERROR("Couldn't allocate workqueue for GuC log\n");
- return -ENOMEM;
- }
-
- /*
- * Even though both sending GuC action, and adding a new workitem to
- * GuC workqueue are serialized (each with its own locking), since
- * we're using mutliple engines, it's possible that we're going to
- * issue a preempt request with two (or more - each for different
- * engine) workitems in GuC queue. In this situation, GuC may submit
- * all of them, which will make us very confused.
- * Our preemption contexts may even already be complete - before we
- * even had the chance to sent the preempt action to GuC!. Rather
- * than introducing yet another lock, we can just use ordered workqueue
- * to make sure we're always sending a single preemption request with a
- * single workitem.
- */
- if (HAS_LOGICAL_RING_PREEMPTION(dev_priv) &&
- USES_GUC_SUBMISSION(dev_priv)) {
- guc->preempt_wq = alloc_ordered_workqueue("i915-guc_preempt",
- WQ_HIGHPRI);
- if (!guc->preempt_wq) {
- destroy_workqueue(guc->log.relay.flush_wq);
- DRM_ERROR("Couldn't allocate workqueue for GuC "
- "preemption\n");
- return -ENOMEM;
- }
- }
-
- return 0;
-}
-
-static void guc_fini_wq(struct intel_guc *guc)
-{
- struct workqueue_struct *wq;
-
- wq = fetch_and_zero(&guc->preempt_wq);
- if (wq)
- destroy_workqueue(wq);
-
- wq = fetch_and_zero(&guc->log.relay.flush_wq);
- if (wq)
- destroy_workqueue(wq);
-}
-
-int intel_guc_init_misc(struct intel_guc *guc)
-{
- struct drm_i915_private *i915 = guc_to_i915(guc);
- int ret;
-
- ret = guc_init_wq(guc);
- if (ret)
- return ret;
-
- intel_uc_fw_fetch(i915, &guc->fw);
-
- return 0;
-}
-
-void intel_guc_fini_misc(struct intel_guc *guc)
-{
- intel_uc_fw_cleanup_fetch(&guc->fw);
- guc_fini_wq(guc);
-}
-
static int guc_shared_data_create(struct intel_guc *guc)
{
struct i915_vma *vma;
@@ -209,66 +126,6 @@ static void guc_shared_data_destroy(struct intel_guc *guc)
i915_vma_unpin_and_release(&guc->shared_data, I915_VMA_RELEASE_MAP);
}
-int intel_guc_init(struct intel_guc *guc)
-{
- struct drm_i915_private *dev_priv = guc_to_i915(guc);
- int ret;
-
- ret = intel_uc_fw_init(&guc->fw);
- if (ret)
- goto err_fetch;
-
- ret = guc_shared_data_create(guc);
- if (ret)
- goto err_fw;
- GEM_BUG_ON(!guc->shared_data);
-
- ret = intel_guc_log_create(&guc->log);
- if (ret)
- goto err_shared;
-
- ret = intel_guc_ads_create(guc);
- if (ret)
- goto err_log;
- GEM_BUG_ON(!guc->ads_vma);
-
- ret = intel_guc_ct_init(&guc->ct);
- if (ret)
- goto err_ads;
-
- /* We need to notify the guc whenever we change the GGTT */
- i915_ggtt_enable_guc(dev_priv);
-
- return 0;
-
-err_ads:
- intel_guc_ads_destroy(guc);
-err_log:
- intel_guc_log_destroy(&guc->log);
-err_shared:
- guc_shared_data_destroy(guc);
-err_fw:
- intel_uc_fw_fini(&guc->fw);
-err_fetch:
- intel_uc_fw_cleanup_fetch(&guc->fw);
- return ret;
-}
-
-void intel_guc_fini(struct intel_guc *guc)
-{
- struct drm_i915_private *dev_priv = guc_to_i915(guc);
-
- i915_ggtt_disable_guc(dev_priv);
-
- intel_guc_ct_fini(&guc->ct);
-
- intel_guc_ads_destroy(guc);
- intel_guc_log_destroy(&guc->log);
- guc_shared_data_destroy(guc);
- intel_uc_fw_fini(&guc->fw);
- intel_uc_fw_cleanup_fetch(&guc->fw);
-}
-
static u32 guc_ctl_debug_flags(struct intel_guc *guc)
{
u32 level = intel_guc_log_get_level(&guc->log);
@@ -287,7 +144,7 @@ static u32 guc_ctl_feature_flags(struct intel_guc *guc)
{
u32 flags = 0;
- if (!USES_GUC_SUBMISSION(guc_to_i915(guc)))
+ if (!intel_uc_is_using_guc_submission(&guc_to_gt(guc)->uc))
flags |= GUC_CTL_DISABLE_SCHEDULER;
return flags;
@@ -297,7 +154,7 @@ static u32 guc_ctl_ctxinfo_flags(struct intel_guc *guc)
{
u32 flags = 0;
- if (USES_GUC_SUBMISSION(guc_to_i915(guc))) {
+ if (intel_uc_is_using_guc_submission(&guc_to_gt(guc)->uc)) {
u32 ctxnum, base;
base = intel_guc_ggtt_offset(guc, guc->stage_desc_pool);
@@ -364,13 +221,12 @@ static u32 guc_ctl_ads_flags(struct intel_guc *guc)
* transfer. These parameters are read by the firmware on startup
* and cannot be changed thereafter.
*/
-void intel_guc_init_params(struct intel_guc *guc)
+static void guc_init_params(struct intel_guc *guc)
{
- struct drm_i915_private *dev_priv = guc_to_i915(guc);
- u32 params[GUC_CTL_MAX_DWORDS];
+ u32 *params = guc->params;
int i;
- memset(params, 0, sizeof(params));
+ BUILD_BUG_ON(sizeof(guc->params) != GUC_CTL_MAX_DWORDS * sizeof(u32));
params[GUC_CTL_CTXINFO] = guc_ctl_ctxinfo_flags(guc);
params[GUC_CTL_LOG_PARAMS] = guc_ctl_log_params_flags(guc);
@@ -380,20 +236,109 @@ void intel_guc_init_params(struct intel_guc *guc)
for (i = 0; i < GUC_CTL_MAX_DWORDS; i++)
DRM_DEBUG_DRIVER("param[%2d] = %#x\n", i, params[i]);
+}
+
+/*
+ * Initialise the GuC parameter block before starting the firmware
+ * transfer. These parameters are read by the firmware on startup
+ * and cannot be changed thereafter.
+ */
+void intel_guc_write_params(struct intel_guc *guc)
+{
+ struct intel_uncore *uncore = guc_to_gt(guc)->uncore;
+ int i;
/*
* All SOFT_SCRATCH registers are in FORCEWAKE_BLITTER domain and
* they are power context saved so it's ok to release forcewake
* when we are done here and take it again at xfer time.
*/
- intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_BLITTER);
+ intel_uncore_forcewake_get(uncore, FORCEWAKE_BLITTER);
- I915_WRITE(SOFT_SCRATCH(0), 0);
+ intel_uncore_write(uncore, SOFT_SCRATCH(0), 0);
for (i = 0; i < GUC_CTL_MAX_DWORDS; i++)
- I915_WRITE(SOFT_SCRATCH(1 + i), params[i]);
+ intel_uncore_write(uncore, SOFT_SCRATCH(1 + i), guc->params[i]);
+
+ intel_uncore_forcewake_put(uncore, FORCEWAKE_BLITTER);
+}
- intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_BLITTER);
+int intel_guc_init(struct intel_guc *guc)
+{
+ struct intel_gt *gt = guc_to_gt(guc);
+ int ret;
+
+ ret = intel_uc_fw_init(&guc->fw);
+ if (ret)
+ goto err_fetch;
+
+ ret = guc_shared_data_create(guc);
+ if (ret)
+ goto err_fw;
+ GEM_BUG_ON(!guc->shared_data);
+
+ ret = intel_guc_log_create(&guc->log);
+ if (ret)
+ goto err_shared;
+
+ ret = intel_guc_ads_create(guc);
+ if (ret)
+ goto err_log;
+ GEM_BUG_ON(!guc->ads_vma);
+
+ ret = intel_guc_ct_init(&guc->ct);
+ if (ret)
+ goto err_ads;
+
+ if (intel_uc_is_using_guc_submission(&gt->uc)) {
+ /*
+ * This is stuff we need to have available at fw load time
+ * if we are planning to enable submission later
+ */
+ ret = intel_guc_submission_init(guc);
+ if (ret)
+ goto err_ct;
+ }
+
+ /* now that everything is perma-pinned, initialize the parameters */
+ guc_init_params(guc);
+
+ /* We need to notify the guc whenever we change the GGTT */
+ i915_ggtt_enable_guc(gt->ggtt);
+
+ return 0;
+
+err_ct:
+ intel_guc_ct_fini(&guc->ct);
+err_ads:
+ intel_guc_ads_destroy(guc);
+err_log:
+ intel_guc_log_destroy(&guc->log);
+err_shared:
+ guc_shared_data_destroy(guc);
+err_fw:
+ intel_uc_fw_fini(&guc->fw);
+err_fetch:
+ intel_uc_fw_cleanup_fetch(&guc->fw);
+ return ret;
+}
+
+void intel_guc_fini(struct intel_guc *guc)
+{
+ struct intel_gt *gt = guc_to_gt(guc);
+
+ i915_ggtt_disable_guc(gt->ggtt);
+
+ if (intel_uc_is_using_guc_submission(&gt->uc))
+ intel_guc_submission_fini(guc);
+
+ intel_guc_ct_fini(&guc->ct);
+
+ intel_guc_ads_destroy(guc);
+ intel_guc_log_destroy(&guc->log);
+ guc_shared_data_destroy(guc);
+ intel_uc_fw_fini(&guc->fw);
+ intel_uc_fw_cleanup_fetch(&guc->fw);
}
int intel_guc_send_nop(struct intel_guc *guc, const u32 *action, u32 len,
@@ -414,8 +359,7 @@ void intel_guc_to_host_event_handler_nop(struct intel_guc *guc)
int intel_guc_send_mmio(struct intel_guc *guc, const u32 *action, u32 len,
u32 *response_buf, u32 response_buf_size)
{
- struct drm_i915_private *dev_priv = guc_to_i915(guc);
- struct intel_uncore *uncore = &dev_priv->uncore;
+ struct intel_uncore *uncore = guc_to_gt(guc)->uncore;
u32 status;
int i;
int ret;
@@ -464,7 +408,8 @@ int intel_guc_send_mmio(struct intel_guc *guc, const u32 *action, u32 len,
int count = min(response_buf_size, guc->send_regs.count - 1);
for (i = 0; i < count; i++)
- response_buf[i] = I915_READ(guc_send_reg(guc, i + 1));
+ response_buf[i] = intel_uncore_read(uncore,
+ guc_send_reg(guc, i + 1));
}
/* Use data from the GuC response as our return value */
@@ -497,7 +442,7 @@ int intel_guc_to_host_process_recv_msg(struct intel_guc *guc,
int intel_guc_sample_forcewake(struct intel_guc *guc)
{
- struct drm_i915_private *dev_priv = guc_to_i915(guc);
+ struct drm_i915_private *dev_priv = guc_to_gt(guc)->i915;
u32 action[2];
action[0] = INTEL_GUC_ACTION_SAMPLE_FORCEWAKE;
@@ -538,7 +483,7 @@ int intel_guc_auth_huc(struct intel_guc *guc, u32 rsa_offset)
*/
int intel_guc_suspend(struct intel_guc *guc)
{
- struct drm_i915_private *dev_priv = guc_to_i915(guc);
+ struct intel_uncore *uncore = guc_to_gt(guc)->uncore;
int ret;
u32 status;
u32 action[] = {
@@ -556,13 +501,14 @@ int intel_guc_suspend(struct intel_guc *guc)
* in progress so we need to take care of that ourselves as well.
*/
- I915_WRITE(SOFT_SCRATCH(14), INTEL_GUC_SLEEP_STATE_INVALID_MASK);
+ intel_uncore_write(uncore, SOFT_SCRATCH(14),
+ INTEL_GUC_SLEEP_STATE_INVALID_MASK);
ret = intel_guc_send(guc, action, ARRAY_SIZE(action));
if (ret)
return ret;
- ret = __intel_wait_for_register(&dev_priv->uncore, SOFT_SCRATCH(14),
+ ret = __intel_wait_for_register(uncore, SOFT_SCRATCH(14),
INTEL_GUC_SLEEP_STATE_INVALID_MASK,
0, 0, 10, &status);
if (ret)
@@ -658,17 +604,17 @@ int intel_guc_resume(struct intel_guc *guc)
*/
struct i915_vma *intel_guc_allocate_vma(struct intel_guc *guc, u32 size)
{
- struct drm_i915_private *dev_priv = guc_to_i915(guc);
+ struct intel_gt *gt = guc_to_gt(guc);
struct drm_i915_gem_object *obj;
struct i915_vma *vma;
u64 flags;
int ret;
- obj = i915_gem_object_create_shmem(dev_priv, size);
+ obj = i915_gem_object_create_shmem(gt->i915, size);
if (IS_ERR(obj))
return ERR_CAST(obj);
- vma = i915_vma_instance(obj, &dev_priv->ggtt.vm, NULL);
+ vma = i915_vma_instance(obj, &gt->ggtt->vm, NULL);
if (IS_ERR(vma))
goto err;
diff --git a/drivers/gpu/drm/i915/intel_guc.h b/drivers/gpu/drm/i915/gt/uc/intel_guc.h
index 08c906abdfa2..714e9892aaff 100644
--- a/drivers/gpu/drm/i915/intel_guc.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc.h
@@ -35,10 +35,7 @@
#include "i915_utils.h"
#include "i915_vma.h"
-struct guc_preempt_work {
- struct work_struct work;
- struct intel_engine_cs *engine;
-};
+struct __guc_ads_blob;
/*
* Top level structure of GuC. It handles firmware loading and manages client
@@ -59,12 +56,14 @@ struct intel_guc {
struct {
bool enabled;
- void (*reset)(struct drm_i915_private *i915);
- void (*enable)(struct drm_i915_private *i915);
- void (*disable)(struct drm_i915_private *i915);
+ void (*reset)(struct intel_guc *guc);
+ void (*enable)(struct intel_guc *guc);
+ void (*disable)(struct intel_guc *guc);
} interrupts;
struct i915_vma *ads_vma;
+ struct __guc_ads_blob *ads_blob;
+
struct i915_vma *stage_desc_pool;
void *stage_desc_pool_vaddr;
struct ida stage_ids;
@@ -72,15 +71,14 @@ struct intel_guc {
void *shared_data_vaddr;
struct intel_guc_client *execbuf_client;
- struct intel_guc_client *preempt_client;
-
- struct guc_preempt_work preempt_work[I915_NUM_ENGINES];
- struct workqueue_struct *preempt_wq;
DECLARE_BITMAP(doorbell_bitmap, GUC_NUM_DOORBELLS);
/* Cyclic counter mod pagesize */
u32 db_cacheline;
+ /* Control params for fw initialization */
+ u32 params[GUC_CTL_MAX_DWORDS];
+
/* GuC's FW specific registers used in MMIO send */
struct {
u32 base;
@@ -88,6 +86,9 @@ struct intel_guc {
enum forcewake_domains fw_domains;
} send_regs;
+ /* Store msg (e.g. log flush) that we see while CTBs are disabled */
+ u32 mmio_msg;
+
/* To serialize the intel_guc_send actions */
struct mutex send_mutex;
@@ -154,11 +155,9 @@ static inline u32 intel_guc_ggtt_offset(struct intel_guc *guc,
void intel_guc_init_early(struct intel_guc *guc);
void intel_guc_init_send_regs(struct intel_guc *guc);
-void intel_guc_init_params(struct intel_guc *guc);
-int intel_guc_init_misc(struct intel_guc *guc);
+void intel_guc_write_params(struct intel_guc *guc);
int intel_guc_init(struct intel_guc *guc);
void intel_guc_fini(struct intel_guc *guc);
-void intel_guc_fini_misc(struct intel_guc *guc);
int intel_guc_send_nop(struct intel_guc *guc, const u32 *action, u32 len,
u32 *response_buf, u32 response_buf_size);
int intel_guc_send_mmio(struct intel_guc *guc, const u32 *action, u32 len,
@@ -173,14 +172,16 @@ int intel_guc_suspend(struct intel_guc *guc);
int intel_guc_resume(struct intel_guc *guc);
struct i915_vma *intel_guc_allocate_vma(struct intel_guc *guc, u32 size);
-static inline bool intel_guc_is_loaded(struct intel_guc *guc)
+static inline bool intel_guc_is_running(struct intel_guc *guc)
{
- return intel_uc_fw_is_loaded(&guc->fw);
+ return intel_uc_fw_is_running(&guc->fw);
}
static inline int intel_guc_sanitize(struct intel_guc *guc)
{
intel_uc_fw_sanitize(&guc->fw);
+ guc->mmio_msg = 0;
+
return 0;
}
diff --git a/drivers/gpu/drm/i915/intel_guc_ads.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c
index ecb69fc94218..a0da80241f22 100644
--- a/drivers/gpu/drm/i915/intel_guc_ads.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c
@@ -22,6 +22,7 @@
*
*/
+#include "gt/intel_gt.h"
#include "intel_guc_ads.h"
#include "intel_uc.h"
#include "i915_drv.h"
@@ -83,18 +84,14 @@ struct __guc_ads_blob {
u8 reg_state_buffer[GUC_S3_SAVE_SPACE_PAGES * PAGE_SIZE];
} __packed;
-static int __guc_ads_init(struct intel_guc *guc)
+static void __guc_ads_init(struct intel_guc *guc)
{
- struct drm_i915_private *dev_priv = guc_to_i915(guc);
- struct __guc_ads_blob *blob;
+ struct drm_i915_private *dev_priv = guc_to_gt(guc)->i915;
+ struct __guc_ads_blob *blob = guc->ads_blob;
const u32 skipped_size = LRC_PPHWSP_SZ * PAGE_SIZE + LR_HW_CONTEXT_SIZE;
u32 base;
u8 engine_class;
- blob = i915_gem_object_pin_map(guc->ads_vma->obj, I915_MAP_WB);
- if (IS_ERR(blob))
- return PTR_ERR(blob);
-
/* GuC scheduling policies */
guc_policies_init(&blob->policies);
@@ -144,9 +141,7 @@ static int __guc_ads_init(struct intel_guc *guc)
blob->ads.gt_system_info = base + ptr_offset(blob, system_info);
blob->ads.clients_info = base + ptr_offset(blob, clients_info);
- i915_gem_object_unpin_map(guc->ads_vma->obj);
-
- return 0;
+ i915_gem_object_flush_map(guc->ads_vma->obj);
}
/**
@@ -160,6 +155,7 @@ int intel_guc_ads_create(struct intel_guc *guc)
{
const u32 size = PAGE_ALIGN(sizeof(struct __guc_ads_blob));
struct i915_vma *vma;
+ void *blob;
int ret;
GEM_BUG_ON(guc->ads_vma);
@@ -168,11 +164,16 @@ int intel_guc_ads_create(struct intel_guc *guc)
if (IS_ERR(vma))
return PTR_ERR(vma);
+ blob = i915_gem_object_pin_map(vma->obj, I915_MAP_WB);
+ if (IS_ERR(blob)) {
+ ret = PTR_ERR(blob);
+ goto err_vma;
+ }
+
guc->ads_vma = vma;
+ guc->ads_blob = blob;
- ret = __guc_ads_init(guc);
- if (ret)
- goto err_vma;
+ __guc_ads_init(guc);
return 0;
@@ -183,7 +184,7 @@ err_vma:
void intel_guc_ads_destroy(struct intel_guc *guc)
{
- i915_vma_unpin_and_release(&guc->ads_vma, 0);
+ i915_vma_unpin_and_release(&guc->ads_vma, I915_VMA_RELEASE_MAP);
}
/**
diff --git a/drivers/gpu/drm/i915/intel_guc_ads.h b/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.h
index 7f40f9cd5fb9..7f40f9cd5fb9 100644
--- a/drivers/gpu/drm/i915/intel_guc_ads.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.h
diff --git a/drivers/gpu/drm/i915/intel_guc_ct.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c
index 3921809f812b..9e383a47609f 100644
--- a/drivers/gpu/drm/i915/intel_guc_ct.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c
@@ -529,8 +529,8 @@ unlink:
/*
* Command Transport (CT) buffer based GuC send function.
*/
-static int intel_guc_send_ct(struct intel_guc *guc, const u32 *action, u32 len,
- u32 *response_buf, u32 response_buf_size)
+int intel_guc_send_ct(struct intel_guc *guc, const u32 *action, u32 len,
+ u32 *response_buf, u32 response_buf_size)
{
struct intel_guc_ct *ct = &guc->ct;
struct intel_guc_ct_channel *ctch = &ct->host_channel;
@@ -834,7 +834,7 @@ static void ct_process_host_channel(struct intel_guc_ct *ct)
* When we're communicating with the GuC over CT, GuC uses events
* to notify us about new messages being posted on the RECV buffer.
*/
-static void intel_guc_to_host_event_handler_ct(struct intel_guc *guc)
+void intel_guc_to_host_event_handler_ct(struct intel_guc *guc)
{
struct intel_guc_ct *ct = &guc->ct;
@@ -892,20 +892,11 @@ int intel_guc_ct_enable(struct intel_guc_ct *ct)
{
struct intel_guc *guc = ct_to_guc(ct);
struct intel_guc_ct_channel *ctch = &ct->host_channel;
- int err;
if (ctch->enabled)
return 0;
- err = ctch_enable(guc, ctch);
- if (unlikely(err))
- return err;
-
- /* Switch into cmd transport buffer based send() */
- guc->send = intel_guc_send_ct;
- guc->handler = intel_guc_to_host_event_handler_ct;
- DRM_INFO("CT: %s\n", enableddisabled(true));
- return 0;
+ return ctch_enable(guc, ctch);
}
/**
@@ -921,9 +912,4 @@ void intel_guc_ct_disable(struct intel_guc_ct *ct)
return;
ctch_disable(guc, ctch);
-
- /* Disable send */
- guc->send = intel_guc_send_nop;
- guc->handler = intel_guc_to_host_event_handler_nop;
- DRM_INFO("CT: %s\n", enableddisabled(false));
}
diff --git a/drivers/gpu/drm/i915/intel_guc_ct.h b/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.h
index 41ba593a4df7..8c1f6d133168 100644
--- a/drivers/gpu/drm/i915/intel_guc_ct.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.h
@@ -24,11 +24,14 @@
#ifndef _INTEL_GUC_CT_H_
#define _INTEL_GUC_CT_H_
-struct intel_guc;
-struct i915_vma;
+#include <linux/spinlock.h>
+#include <linux/workqueue.h>
#include "intel_guc_fwif.h"
+struct i915_vma;
+struct intel_guc;
+
/**
* DOC: Command Transport (CT).
*
@@ -101,4 +104,8 @@ static inline void intel_guc_ct_stop(struct intel_guc_ct *ct)
ct->host_channel.enabled = false;
}
+int intel_guc_send_ct(struct intel_guc *guc, const u32 *action, u32 len,
+ u32 *response_buf, u32 response_buf_size);
+void intel_guc_to_host_event_handler_ct(struct intel_guc *guc);
+
#endif /* _INTEL_GUC_CT_H_ */
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c
new file mode 100644
index 000000000000..28735c14b9a0
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c
@@ -0,0 +1,181 @@
+/*
+ * Copyright © 2014 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ * Vinit Azad <vinit.azad@intel.com>
+ * Ben Widawsky <ben@bwidawsk.net>
+ * Dave Gordon <david.s.gordon@intel.com>
+ * Alex Dai <yu.dai@intel.com>
+ */
+
+#include "gt/intel_gt.h"
+#include "intel_guc_fw.h"
+#include "i915_drv.h"
+
+/**
+ * intel_guc_fw_init_early() - initializes GuC firmware struct
+ * @guc: intel_guc struct
+ *
+ * On platforms with GuC selects firmware for uploading
+ */
+void intel_guc_fw_init_early(struct intel_guc *guc)
+{
+ intel_uc_fw_init_early(&guc->fw, INTEL_UC_FW_TYPE_GUC, guc_to_gt(guc)->i915);
+}
+
+static void guc_prepare_xfer(struct intel_uncore *uncore)
+{
+ u32 shim_flags = GUC_DISABLE_SRAM_INIT_TO_ZEROES |
+ GUC_ENABLE_READ_CACHE_LOGIC |
+ GUC_ENABLE_MIA_CACHING |
+ GUC_ENABLE_READ_CACHE_FOR_SRAM_DATA |
+ GUC_ENABLE_READ_CACHE_FOR_WOPCM_DATA |
+ GUC_ENABLE_MIA_CLOCK_GATING;
+
+ /* Must program this register before loading the ucode with DMA */
+ intel_uncore_write(uncore, GUC_SHIM_CONTROL, shim_flags);
+
+ if (IS_GEN9_LP(uncore->i915))
+ intel_uncore_write(uncore, GEN9LP_GT_PM_CONFIG, GT_DOORBELL_ENABLE);
+ else
+ intel_uncore_write(uncore, GEN9_GT_PM_CONFIG, GT_DOORBELL_ENABLE);
+
+ if (IS_GEN(uncore->i915, 9)) {
+ /* DOP Clock Gating Enable for GuC clocks */
+ intel_uncore_rmw(uncore, GEN7_MISCCPCTL,
+ 0, GEN8_DOP_CLOCK_GATE_GUC_ENABLE);
+
+ /* allows for 5us (in 10ns units) before GT can go to RC6 */
+ intel_uncore_write(uncore, GUC_ARAT_C6DIS, 0x1FF);
+ }
+}
+
+/* Copy RSA signature from the fw image to HW for verification */
+static void guc_xfer_rsa(struct intel_uc_fw *guc_fw,
+ struct intel_uncore *uncore)
+{
+ u32 rsa[UOS_RSA_SCRATCH_COUNT];
+ size_t copied;
+ int i;
+
+ copied = intel_uc_fw_copy_rsa(guc_fw, rsa, sizeof(rsa));
+ GEM_BUG_ON(copied < sizeof(rsa));
+
+ for (i = 0; i < UOS_RSA_SCRATCH_COUNT; i++)
+ intel_uncore_write(uncore, UOS_RSA_SCRATCH(i), rsa[i]);
+}
+
+/*
+ * Read the GuC status register (GUC_STATUS) and store it in the
+ * specified location; then return a boolean indicating whether
+ * the value matches either of two values representing completion
+ * of the GuC boot process.
+ *
+ * This is used for polling the GuC status in a wait_for()
+ * loop below.
+ */
+static inline bool guc_ready(struct intel_uncore *uncore, u32 *status)
+{
+ u32 val = intel_uncore_read(uncore, GUC_STATUS);
+ u32 uk_val = val & GS_UKERNEL_MASK;
+
+ *status = val;
+ return (uk_val == GS_UKERNEL_READY) ||
+ ((val & GS_MIA_CORE_STATE) && (uk_val == GS_UKERNEL_LAPIC_DONE));
+}
+
+static int guc_wait_ucode(struct intel_uncore *uncore)
+{
+ u32 status;
+ int ret;
+
+ /*
+ * Wait for the GuC to start up.
+ * NB: Docs recommend not using the interrupt for completion.
+ * Measurements indicate this should take no more than 20ms, so a
+ * timeout here indicates that the GuC has failed and is unusable.
+ * (Higher levels of the driver may decide to reset the GuC and
+ * attempt the ucode load again if this happens.)
+ */
+ ret = wait_for(guc_ready(uncore, &status), 100);
+ DRM_DEBUG_DRIVER("GuC status %#x\n", status);
+
+ if ((status & GS_BOOTROM_MASK) == GS_BOOTROM_RSA_FAILED) {
+ DRM_ERROR("GuC firmware signature verification failed\n");
+ ret = -ENOEXEC;
+ }
+
+ if ((status & GS_UKERNEL_MASK) == GS_UKERNEL_EXCEPTION) {
+ DRM_ERROR("GuC firmware exception. EIP: %#x\n",
+ intel_uncore_read(uncore, SOFT_SCRATCH(13)));
+ ret = -ENXIO;
+ }
+
+ return ret;
+}
+
+/**
+ * intel_guc_fw_upload() - load GuC uCode to device
+ * @guc: intel_guc structure
+ *
+ * Called from intel_uc_init_hw() during driver load, resume from sleep and
+ * after a GPU reset.
+ *
+ * The firmware image should have already been fetched into memory, so only
+ * check that fetch succeeded, and then transfer the image to the h/w.
+ *
+ * Return: non-zero code on error
+ */
+int intel_guc_fw_upload(struct intel_guc *guc)
+{
+ struct intel_gt *gt = guc_to_gt(guc);
+ struct intel_uncore *uncore = gt->uncore;
+ int ret;
+
+ guc_prepare_xfer(uncore);
+
+ /*
+ * Note that GuC needs the CSS header plus uKernel code to be copied
+ * by the DMA engine in one operation, whereas the RSA signature is
+ * loaded via MMIO.
+ */
+ guc_xfer_rsa(&guc->fw, uncore);
+
+ /*
+ * Current uCode expects the code to be loaded at 8k; locations below
+ * this are used for the stack.
+ */
+ ret = intel_uc_fw_upload(&guc->fw, gt, 0x2000, UOS_MOVE);
+ if (ret)
+ goto out;
+
+ ret = guc_wait_ucode(uncore);
+ if (ret)
+ goto out;
+
+ guc->fw.status = INTEL_UC_FIRMWARE_RUNNING;
+ return 0;
+
+out:
+ guc->fw.status = INTEL_UC_FIRMWARE_FAIL;
+ return ret;
+}
diff --git a/drivers/gpu/drm/i915/intel_guc_fw.h b/drivers/gpu/drm/i915/gt/uc/intel_guc_fw.h
index 4ec5d3d9e2b0..4ec5d3d9e2b0 100644
--- a/drivers/gpu/drm/i915/intel_guc_fw.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_fw.h
diff --git a/drivers/gpu/drm/i915/intel_guc_fwif.h b/drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h
index f55f3bc8524d..06a9bdfb0faf 100644
--- a/drivers/gpu/drm/i915/intel_guc_fwif.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h
@@ -23,6 +23,10 @@
#ifndef _INTEL_GUC_FWIF_H
#define _INTEL_GUC_FWIF_H
+#include <linux/bits.h>
+#include <linux/compiler.h>
+#include <linux/types.h>
+
#define GUC_CLIENT_PRIORITY_KMD_HIGH 0
#define GUC_CLIENT_PRIORITY_HIGH 1
#define GUC_CLIENT_PRIORITY_KMD_NORMAL 2
@@ -39,13 +43,8 @@
#define GUC_VIDEO_ENGINE2 4
#define GUC_MAX_ENGINES_NUM (GUC_VIDEO_ENGINE2 + 1)
-/*
- * XXX: Beware that Gen9 firmware 32.x uses wrong definition for
- * GUC_MAX_INSTANCES_PER_CLASS (1) but this is harmless for us now
- * as we are not enabling GuC submission mode where this will be used
- */
#define GUC_MAX_ENGINE_CLASSES 5
-#define GUC_MAX_INSTANCES_PER_CLASS 4
+#define GUC_MAX_INSTANCES_PER_CLASS 16
#define GUC_DOORBELL_INVALID 256
@@ -122,76 +121,6 @@
#define GUC_CTL_MAX_DWORDS (SOFT_SCRATCH_COUNT - 2) /* [1..14] */
-/**
- * DOC: GuC Firmware Layout
- *
- * The GuC firmware layout looks like this:
- *
- * +-------------------------------+
- * | uc_css_header |
- * | |
- * | contains major/minor version |
- * +-------------------------------+
- * | uCode |
- * +-------------------------------+
- * | RSA signature |
- * +-------------------------------+
- * | modulus key |
- * +-------------------------------+
- * | exponent val |
- * +-------------------------------+
- *
- * The firmware may or may not have modulus key and exponent data. The header,
- * uCode and RSA signature are must-have components that will be used by driver.
- * Length of each components, which is all in dwords, can be found in header.
- * In the case that modulus and exponent are not present in fw, a.k.a truncated
- * image, the length value still appears in header.
- *
- * Driver will do some basic fw size validation based on the following rules:
- *
- * 1. Header, uCode and RSA are must-have components.
- * 2. All firmware components, if they present, are in the sequence illustrated
- * in the layout table above.
- * 3. Length info of each component can be found in header, in dwords.
- * 4. Modulus and exponent key are not required by driver. They may not appear
- * in fw. So driver will load a truncated firmware in this case.
- *
- * HuC firmware layout is same as GuC firmware.
- * Only HuC version information is saved in a different way.
- */
-
-struct uc_css_header {
- u32 module_type;
- /* header_size includes all non-uCode bits, including css_header, rsa
- * key, modulus key and exponent data. */
- u32 header_size_dw;
- u32 header_version;
- u32 module_id;
- u32 module_vendor;
- u32 date;
-#define CSS_DATE_DAY (0xFF << 0)
-#define CSS_DATE_MONTH (0xFF << 8)
-#define CSS_DATE_YEAR (0xFFFF << 16)
- u32 size_dw; /* uCode plus header_size_dw */
- u32 key_size_dw;
- u32 modulus_size_dw;
- u32 exponent_size_dw;
- u32 time;
-#define CSS_TIME_HOUR (0xFF << 0)
-#define CSS_DATE_MIN (0xFF << 8)
-#define CSS_DATE_SEC (0xFFFF << 16)
- char username[8];
- char buildnumber[12];
- u32 sw_version;
-#define CSS_SW_VERSION_GUC_MAJOR (0xFF << 16)
-#define CSS_SW_VERSION_GUC_MINOR (0xFF << 8)
-#define CSS_SW_VERSION_GUC_PATCH (0xFF << 0)
-#define CSS_SW_VERSION_HUC_MAJOR (0xFFFF << 16)
-#define CSS_SW_VERSION_HUC_MINOR (0xFFFF << 0)
- u32 reserved[14];
- u32 header_info;
-} __packed;
-
/* Work item for submitting workloads into work queue of GuC. */
struct guc_wq_item {
u32 header;
diff --git a/drivers/gpu/drm/i915/intel_guc_log.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_log.c
index e3b83ecb90b5..3460deca12c8 100644
--- a/drivers/gpu/drm/i915/intel_guc_log.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_log.c
@@ -24,6 +24,7 @@
#include <linux/debugfs.h>
+#include "gt/intel_gt.h"
#include "intel_guc_log.h"
#include "i915_drv.h"
@@ -209,7 +210,7 @@ static bool guc_check_log_buf_overflow(struct intel_guc_log *log,
log->stats[type].sampled_overflow += 16;
}
- dev_notice_ratelimited(guc_to_i915(log_to_guc(log))->drm.dev,
+ dev_notice_ratelimited(guc_to_gt(log_to_guc(log))->i915->drm.dev,
"GuC log buffer overflow\n");
}
@@ -383,7 +384,7 @@ void intel_guc_log_init_early(struct intel_guc_log *log)
static int guc_log_relay_create(struct intel_guc_log *log)
{
struct intel_guc *guc = log_to_guc(log);
- struct drm_i915_private *dev_priv = guc_to_i915(guc);
+ struct drm_i915_private *dev_priv = guc_to_gt(guc)->i915;
struct rchan *guc_log_relay_chan;
size_t n_subbufs, subbuf_size;
int ret;
@@ -429,7 +430,7 @@ static void guc_log_relay_destroy(struct intel_guc_log *log)
static void guc_log_capture_logs(struct intel_guc_log *log)
{
struct intel_guc *guc = log_to_guc(log);
- struct drm_i915_private *dev_priv = guc_to_i915(guc);
+ struct drm_i915_private *dev_priv = guc_to_gt(guc)->i915;
intel_wakeref_t wakeref;
guc_read_update_log_buffer(log);
@@ -442,6 +443,29 @@ static void guc_log_capture_logs(struct intel_guc_log *log)
guc_action_flush_log_complete(guc);
}
+static u32 __get_default_log_level(struct intel_guc_log *log)
+{
+ /* A negative value means "use platform/config default" */
+ if (i915_modparams.guc_log_level < 0) {
+ return (IS_ENABLED(CONFIG_DRM_I915_DEBUG) ||
+ IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)) ?
+ GUC_LOG_LEVEL_MAX : GUC_LOG_LEVEL_NON_VERBOSE;
+ }
+
+ if (i915_modparams.guc_log_level > GUC_LOG_LEVEL_MAX) {
+ DRM_WARN("Incompatible option detected: %s=%d, %s!\n",
+ "guc_log_level", i915_modparams.guc_log_level,
+ "verbosity too high");
+ return (IS_ENABLED(CONFIG_DRM_I915_DEBUG) ||
+ IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)) ?
+ GUC_LOG_LEVEL_MAX : GUC_LOG_LEVEL_DISABLED;
+ }
+
+ GEM_BUG_ON(i915_modparams.guc_log_level < GUC_LOG_LEVEL_DISABLED);
+ GEM_BUG_ON(i915_modparams.guc_log_level > GUC_LOG_LEVEL_MAX);
+ return i915_modparams.guc_log_level;
+}
+
int intel_guc_log_create(struct intel_guc_log *log)
{
struct intel_guc *guc = log_to_guc(log);
@@ -481,7 +505,11 @@ int intel_guc_log_create(struct intel_guc_log *log)
log->vma = vma;
- log->level = i915_modparams.guc_log_level;
+ log->level = __get_default_log_level(log);
+ DRM_DEBUG_DRIVER("guc_log_level=%d (%s, verbose:%s, verbosity:%d)\n",
+ log->level, enableddisabled(log->level),
+ yesno(GUC_LOG_LEVEL_IS_VERBOSE(log->level)),
+ GUC_LOG_LEVEL_TO_VERBOSITY(log->level));
return 0;
@@ -498,7 +526,7 @@ void intel_guc_log_destroy(struct intel_guc_log *log)
int intel_guc_log_set_level(struct intel_guc_log *log, u32 level)
{
struct intel_guc *guc = log_to_guc(log);
- struct drm_i915_private *dev_priv = guc_to_i915(guc);
+ struct drm_i915_private *dev_priv = guc_to_gt(guc)->i915;
intel_wakeref_t wakeref;
int ret = 0;
@@ -578,7 +606,7 @@ int intel_guc_log_relay_open(struct intel_guc_log *log)
* the flush notification. This means that we need to unconditionally
* flush on relay enabling, since GuC only notifies us once.
*/
- queue_work(log->relay.flush_wq, &log->relay.flush_work);
+ queue_work(system_highpri_wq, &log->relay.flush_work);
return 0;
@@ -593,7 +621,7 @@ out_unlock:
void intel_guc_log_relay_flush(struct intel_guc_log *log)
{
struct intel_guc *guc = log_to_guc(log);
- struct drm_i915_private *i915 = guc_to_i915(guc);
+ struct drm_i915_private *i915 = guc_to_gt(guc)->i915;
intel_wakeref_t wakeref;
/*
@@ -612,10 +640,10 @@ void intel_guc_log_relay_flush(struct intel_guc_log *log)
void intel_guc_log_relay_close(struct intel_guc_log *log)
{
struct intel_guc *guc = log_to_guc(log);
- struct drm_i915_private *i915 = guc_to_i915(guc);
+ struct drm_i915_private *i915 = guc_to_gt(guc)->i915;
guc_log_disable_flush_events(log);
- synchronize_irq(i915->drm.irq);
+ intel_synchronize_irq(i915);
flush_work(&log->relay.flush_work);
@@ -628,5 +656,5 @@ void intel_guc_log_relay_close(struct intel_guc_log *log)
void intel_guc_log_handle_flush_event(struct intel_guc_log *log)
{
- queue_work(log->relay.flush_wq, &log->relay.flush_work);
+ queue_work(system_highpri_wq, &log->relay.flush_work);
}
diff --git a/drivers/gpu/drm/i915/intel_guc_log.h b/drivers/gpu/drm/i915/gt/uc/intel_guc_log.h
index 7bc763f10c03..1969572f1f79 100644
--- a/drivers/gpu/drm/i915/intel_guc_log.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_log.h
@@ -66,7 +66,6 @@ struct intel_guc_log {
struct i915_vma *vma;
struct {
void *buf_addr;
- struct workqueue_struct *flush_wq;
struct work_struct flush_work;
struct rchan *channel;
struct mutex lock;
diff --git a/drivers/gpu/drm/i915/intel_guc_reg.h b/drivers/gpu/drm/i915/gt/uc/intel_guc_reg.h
index a214f8b71929..e3cbb23299ce 100644
--- a/drivers/gpu/drm/i915/intel_guc_reg.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_reg.h
@@ -24,6 +24,11 @@
#ifndef _INTEL_GUC_REG_H_
#define _INTEL_GUC_REG_H_
+#include <linux/compiler.h>
+#include <linux/types.h>
+
+#include "i915_reg.h"
+
/* Definitions of GuC H/W registers, bits, etc */
#define GUC_STATUS _MMIO(0xc000)
@@ -37,6 +42,7 @@
#define GS_UKERNEL_MASK (0xFF << GS_UKERNEL_SHIFT)
#define GS_UKERNEL_LAPIC_DONE (0x30 << GS_UKERNEL_SHIFT)
#define GS_UKERNEL_DPC_ERROR (0x60 << GS_UKERNEL_SHIFT)
+#define GS_UKERNEL_EXCEPTION (0x70 << GS_UKERNEL_SHIFT)
#define GS_UKERNEL_READY (0xF0 << GS_UKERNEL_SHIFT)
#define GS_MIA_SHIFT 16
#define GS_MIA_MASK (0x07 << GS_MIA_SHIFT)
@@ -135,21 +141,21 @@ struct guc_doorbell_info {
#define GUC_PM_P24C_IER _MMIO(0xC55C)
/* GuC Interrupt Vector */
-#define GEN11_GUC_INTR_GUC2HOST (1 << 15)
-#define GEN11_GUC_INTR_EXEC_ERROR (1 << 14)
-#define GEN11_GUC_INTR_DISPLAY_EVENT (1 << 13)
-#define GEN11_GUC_INTR_SEM_SIG (1 << 12)
-#define GEN11_GUC_INTR_IOMMU2GUC (1 << 11)
-#define GEN11_GUC_INTR_DOORBELL_RANG (1 << 10)
-#define GEN11_GUC_INTR_DMA_DONE (1 << 9)
-#define GEN11_GUC_INTR_FATAL_ERROR (1 << 8)
-#define GEN11_GUC_INTR_NOTIF_ERROR (1 << 7)
-#define GEN11_GUC_INTR_SW_INT_6 (1 << 6)
-#define GEN11_GUC_INTR_SW_INT_5 (1 << 5)
-#define GEN11_GUC_INTR_SW_INT_4 (1 << 4)
-#define GEN11_GUC_INTR_SW_INT_3 (1 << 3)
-#define GEN11_GUC_INTR_SW_INT_2 (1 << 2)
-#define GEN11_GUC_INTR_SW_INT_1 (1 << 1)
-#define GEN11_GUC_INTR_SW_INT_0 (1 << 0)
+#define GUC_INTR_GUC2HOST BIT(15)
+#define GUC_INTR_EXEC_ERROR BIT(14)
+#define GUC_INTR_DISPLAY_EVENT BIT(13)
+#define GUC_INTR_SEM_SIG BIT(12)
+#define GUC_INTR_IOMMU2GUC BIT(11)
+#define GUC_INTR_DOORBELL_RANG BIT(10)
+#define GUC_INTR_DMA_DONE BIT(9)
+#define GUC_INTR_FATAL_ERROR BIT(8)
+#define GUC_INTR_NOTIF_ERROR BIT(7)
+#define GUC_INTR_SW_INT_6 BIT(6)
+#define GUC_INTR_SW_INT_5 BIT(5)
+#define GUC_INTR_SW_INT_4 BIT(4)
+#define GUC_INTR_SW_INT_3 BIT(3)
+#define GUC_INTR_SW_INT_2 BIT(2)
+#define GUC_INTR_SW_INT_1 BIT(1)
+#define GUC_INTR_SW_INT_0 BIT(0)
#endif
diff --git a/drivers/gpu/drm/i915/intel_guc_submission.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
index db531ebc7704..b4238fe16a03 100644
--- a/drivers/gpu/drm/i915/intel_guc_submission.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
@@ -24,15 +24,21 @@
#include <linux/circ_buf.h>
-#include "gt/intel_engine_pm.h"
-#include "gt/intel_lrc_reg.h"
-#include "gt/intel_context.h"
#include "gem/i915_gem_context.h"
+#include "gt/intel_context.h"
+#include "gt/intel_engine_pm.h"
+#include "gt/intel_gt.h"
+#include "gt/intel_lrc_reg.h"
#include "intel_guc_submission.h"
+
#include "i915_drv.h"
-#define GUC_PREEMPT_FINISHED 0x1
+enum {
+ GUC_PREEMPT_NONE = 0,
+ GUC_PREEMPT_INPROGRESS,
+ GUC_PREEMPT_FINISHED,
+};
#define GUC_PREEMPT_BREADCRUMB_DWORDS 0x8
#define GUC_PREEMPT_BREADCRUMB_BYTES \
(sizeof(u32) * GUC_PREEMPT_BREADCRUMB_DWORDS)
@@ -42,11 +48,10 @@
*
* GuC client:
* A intel_guc_client refers to a submission path through GuC. Currently, there
- * are two clients. One of them (the execbuf_client) is charged with all
- * submissions to the GuC, the other one (preempt_client) is responsible for
- * preempting the execbuf_client. This struct is the owner of a doorbell, a
- * process descriptor and a workqueue (all of them inside a single gem object
- * that contains all required pages for these elements).
+ * is only one client, which is charged with all submissions to the GuC. This
+ * struct is the owner of a doorbell, a process descriptor and a workqueue (all
+ * of them inside a single gem object that contains all required pages for these
+ * elements).
*
* GuC stage descriptor:
* During initialization, the driver allocates a static pool of 1024 such
@@ -84,12 +89,6 @@
*
*/
-static inline u32 intel_hws_preempt_done_address(struct intel_engine_cs *engine)
-{
- return (i915_ggtt_offset(engine->status_page.vma) +
- I915_GEM_HWS_PREEMPT_ADDR);
-}
-
static inline struct i915_priolist *to_priolist(struct rb_node *rb)
{
return rb_entry(rb, struct i915_priolist, node);
@@ -203,10 +202,10 @@ static struct guc_doorbell_info *__get_doorbell(struct intel_guc_client *client)
static bool __doorbell_valid(struct intel_guc *guc, u16 db_id)
{
- struct drm_i915_private *dev_priv = guc_to_i915(guc);
+ struct intel_uncore *uncore = guc_to_gt(guc)->uncore;
GEM_BUG_ON(db_id >= GUC_NUM_DOORBELLS);
- return I915_READ(GEN8_DRBREGL(db_id)) & GEN8_DRB_VALID;
+ return intel_uncore_read(uncore, GEN8_DRBREGL(db_id)) & GEN8_DRB_VALID;
}
static void __init_doorbell(struct intel_guc_client *client)
@@ -366,10 +365,7 @@ static void guc_stage_desc_pool_destroy(struct intel_guc *guc)
static void guc_stage_desc_init(struct intel_guc_client *client)
{
struct intel_guc *guc = client->guc;
- struct i915_gem_context *ctx = client->owner;
- struct i915_gem_engines_iter it;
struct guc_stage_desc *desc;
- struct intel_context *ce;
u32 gfx_addr;
desc = __get_stage_desc(client);
@@ -383,55 +379,6 @@ static void guc_stage_desc_init(struct intel_guc_client *client)
desc->priority = client->priority;
desc->db_id = client->doorbell_id;
- for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) {
- struct guc_execlist_context *lrc;
-
- if (!(ce->engine->mask & client->engines))
- continue;
-
- /* TODO: We have a design issue to be solved here. Only when we
- * receive the first batch, we know which engine is used by the
- * user. But here GuC expects the lrc and ring to be pinned. It
- * is not an issue for default context, which is the only one
- * for now who owns a GuC client. But for future owner of GuC
- * client, need to make sure lrc is pinned prior to enter here.
- */
- if (!ce->state)
- break; /* XXX: continue? */
-
- /*
- * XXX: When this is a GUC_STAGE_DESC_ATTR_KERNEL client (proxy
- * submission or, in other words, not using a direct submission
- * model) the KMD's LRCA is not used for any work submission.
- * Instead, the GuC uses the LRCA of the user mode context (see
- * guc_add_request below).
- */
- lrc = &desc->lrc[ce->engine->guc_id];
- lrc->context_desc = lower_32_bits(ce->lrc_desc);
-
- /* The state page is after PPHWSP */
- lrc->ring_lrca = intel_guc_ggtt_offset(guc, ce->state) +
- LRC_STATE_PN * PAGE_SIZE;
-
- /* XXX: In direct submission, the GuC wants the HW context id
- * here. In proxy submission, it wants the stage id
- */
- lrc->context_id = (client->stage_id << GUC_ELC_CTXID_OFFSET) |
- (ce->engine->guc_id << GUC_ELC_ENGINE_OFFSET);
-
- lrc->ring_begin = intel_guc_ggtt_offset(guc, ce->ring->vma);
- lrc->ring_end = lrc->ring_begin + ce->ring->size - 1;
- lrc->ring_next_free_location = lrc->ring_begin;
- lrc->ring_current_tail_pointer_value = 0;
-
- desc->engines_used |= BIT(ce->engine->guc_id);
- }
- i915_gem_context_unlock_engines(ctx);
-
- DRM_DEBUG_DRIVER("Host engines 0x%x => GuC engines used 0x%x\n",
- client->engines, desc->engines_used);
- WARN_ON(desc->engines_used == 0);
-
/*
* The doorbell, process descriptor, and workqueue are all parts
* of the client object, which the GuC will reference via the GGTT
@@ -537,15 +484,11 @@ static void guc_add_request(struct intel_guc *guc, struct i915_request *rq)
u32 ctx_desc = lower_32_bits(rq->hw_context->lrc_desc);
u32 ring_tail = intel_ring_set_tail(rq->ring, rq->tail) / sizeof(u64);
- spin_lock(&client->wq_lock);
-
guc_wq_item_append(client, engine->guc_id, ctx_desc,
ring_tail, rq->fence.seqno);
guc_ring_doorbell(client);
client->submissions[engine->id] += 1;
-
- spin_unlock(&client->wq_lock);
}
/*
@@ -563,207 +506,72 @@ static void flush_ggtt_writes(struct i915_vma *vma)
intel_uncore_posting_read_fw(&i915->uncore, GUC_STATUS);
}
-static void inject_preempt_context(struct work_struct *work)
+static void guc_submit(struct intel_engine_cs *engine,
+ struct i915_request **out,
+ struct i915_request **end)
{
- struct guc_preempt_work *preempt_work =
- container_of(work, typeof(*preempt_work), work);
- struct intel_engine_cs *engine = preempt_work->engine;
- struct intel_guc *guc = container_of(preempt_work, typeof(*guc),
- preempt_work[engine->id]);
- struct intel_guc_client *client = guc->preempt_client;
- struct guc_stage_desc *stage_desc = __get_stage_desc(client);
- struct intel_context *ce = engine->preempt_context;
- u32 data[7];
-
- if (!ce->ring->emit) { /* recreate upon load/resume */
- u32 addr = intel_hws_preempt_done_address(engine);
- u32 *cs;
-
- cs = ce->ring->vaddr;
- if (engine->class == RENDER_CLASS) {
- cs = gen8_emit_ggtt_write_rcs(cs,
- GUC_PREEMPT_FINISHED,
- addr,
- PIPE_CONTROL_CS_STALL);
- } else {
- cs = gen8_emit_ggtt_write(cs,
- GUC_PREEMPT_FINISHED,
- addr,
- 0);
- *cs++ = MI_NOOP;
- *cs++ = MI_NOOP;
- }
- *cs++ = MI_USER_INTERRUPT;
- *cs++ = MI_NOOP;
-
- ce->ring->emit = GUC_PREEMPT_BREADCRUMB_BYTES;
- GEM_BUG_ON((void *)cs - ce->ring->vaddr != ce->ring->emit);
+ struct intel_guc *guc = &engine->gt->uc.guc;
+ struct intel_guc_client *client = guc->execbuf_client;
- flush_ggtt_writes(ce->ring->vma);
- }
+ spin_lock(&client->wq_lock);
- spin_lock_irq(&client->wq_lock);
- guc_wq_item_append(client, engine->guc_id, lower_32_bits(ce->lrc_desc),
- GUC_PREEMPT_BREADCRUMB_BYTES / sizeof(u64), 0);
- spin_unlock_irq(&client->wq_lock);
+ do {
+ struct i915_request *rq = *out++;
- /*
- * If GuC firmware performs an engine reset while that engine had
- * a preemption pending, it will set the terminated attribute bit
- * on our preemption stage descriptor. GuC firmware retains all
- * pending work items for a high-priority GuC client, unlike the
- * normal-priority GuC client where work items are dropped. It
- * wants to make sure the preempt-to-idle work doesn't run when
- * scheduling resumes, and uses this bit to inform its scheduler
- * and presumably us as well. Our job is to clear it for the next
- * preemption after reset, otherwise that and future preemptions
- * will never complete. We'll just clear it every time.
- */
- stage_desc->attribute &= ~GUC_STAGE_DESC_ATTR_TERMINATED;
-
- data[0] = INTEL_GUC_ACTION_REQUEST_PREEMPTION;
- data[1] = client->stage_id;
- data[2] = INTEL_GUC_PREEMPT_OPTION_DROP_WORK_Q |
- INTEL_GUC_PREEMPT_OPTION_DROP_SUBMIT_Q;
- data[3] = engine->guc_id;
- data[4] = guc->execbuf_client->priority;
- data[5] = guc->execbuf_client->stage_id;
- data[6] = intel_guc_ggtt_offset(guc, guc->shared_data);
-
- if (WARN_ON(intel_guc_send(guc, data, ARRAY_SIZE(data)))) {
- execlists_clear_active(&engine->execlists,
- EXECLISTS_ACTIVE_PREEMPT);
- tasklet_schedule(&engine->execlists.tasklet);
- }
+ flush_ggtt_writes(rq->ring->vma);
+ guc_add_request(guc, rq);
+ } while (out != end);
- (void)I915_SELFTEST_ONLY(engine->execlists.preempt_hang.count++);
+ spin_unlock(&client->wq_lock);
}
-/*
- * We're using user interrupt and HWSP value to mark that preemption has
- * finished and GPU is idle. Normally, we could unwind and continue similar to
- * execlists submission path. Unfortunately, with GuC we also need to wait for
- * it to finish its own postprocessing, before attempting to submit. Otherwise
- * GuC may silently ignore our submissions, and thus we risk losing request at
- * best, executing out-of-order and causing kernel panic at worst.
- */
-#define GUC_PREEMPT_POSTPROCESS_DELAY_MS 10
-static void wait_for_guc_preempt_report(struct intel_engine_cs *engine)
+static inline int rq_prio(const struct i915_request *rq)
{
- struct intel_guc *guc = &engine->i915->guc;
- struct guc_shared_ctx_data *data = guc->shared_data_vaddr;
- struct guc_ctx_report *report =
- &data->preempt_ctx_report[engine->guc_id];
-
- if (wait_for_atomic(report->report_return_status ==
- INTEL_GUC_REPORT_STATUS_COMPLETE,
- GUC_PREEMPT_POSTPROCESS_DELAY_MS))
- DRM_ERROR("Timed out waiting for GuC preemption report\n");
- /*
- * GuC is expecting that we're also going to clear the affected context
- * counter, let's also reset the return status to not depend on GuC
- * resetting it after recieving another preempt action
- */
- report->affected_count = 0;
- report->report_return_status = INTEL_GUC_REPORT_STATUS_UNKNOWN;
+ return rq->sched.attr.priority | __NO_PREEMPTION;
}
-static void complete_preempt_context(struct intel_engine_cs *engine)
+static struct i915_request *schedule_in(struct i915_request *rq, int idx)
{
- struct intel_engine_execlists *execlists = &engine->execlists;
-
- GEM_BUG_ON(!execlists_is_active(execlists, EXECLISTS_ACTIVE_PREEMPT));
-
- if (inject_preempt_hang(execlists))
- return;
+ trace_i915_request_in(rq, idx);
- execlists_cancel_port_requests(execlists);
- execlists_unwind_incomplete_requests(execlists);
+ if (!rq->hw_context->inflight)
+ rq->hw_context->inflight = rq->engine;
+ intel_context_inflight_inc(rq->hw_context);
- wait_for_guc_preempt_report(engine);
- intel_write_status_page(engine, I915_GEM_HWS_PREEMPT, 0);
+ return i915_request_get(rq);
}
-/**
- * guc_submit() - Submit commands through GuC
- * @engine: engine associated with the commands
- *
- * The only error here arises if the doorbell hardware isn't functioning
- * as expected, which really shouln't happen.
- */
-static void guc_submit(struct intel_engine_cs *engine)
+static void schedule_out(struct i915_request *rq)
{
- struct intel_guc *guc = &engine->i915->guc;
- struct intel_engine_execlists * const execlists = &engine->execlists;
- struct execlist_port *port = execlists->port;
- unsigned int n;
-
- for (n = 0; n < execlists_num_ports(execlists); n++) {
- struct i915_request *rq;
- unsigned int count;
-
- rq = port_unpack(&port[n], &count);
- if (rq && count == 0) {
- port_set(&port[n], port_pack(rq, ++count));
-
- flush_ggtt_writes(rq->ring->vma);
-
- guc_add_request(guc, rq);
- }
- }
-}
+ trace_i915_request_out(rq);
-static void port_assign(struct execlist_port *port, struct i915_request *rq)
-{
- GEM_BUG_ON(port_isset(port));
+ intel_context_inflight_dec(rq->hw_context);
+ if (!intel_context_inflight_count(rq->hw_context))
+ rq->hw_context->inflight = NULL;
- port_set(port, i915_request_get(rq));
+ i915_request_put(rq);
}
-static inline int rq_prio(const struct i915_request *rq)
-{
- return rq->sched.attr.priority;
-}
-
-static inline int port_prio(const struct execlist_port *port)
-{
- return rq_prio(port_request(port)) | __NO_PREEMPTION;
-}
-
-static bool __guc_dequeue(struct intel_engine_cs *engine)
+static void __guc_dequeue(struct intel_engine_cs *engine)
{
struct intel_engine_execlists * const execlists = &engine->execlists;
- struct execlist_port *port = execlists->port;
- struct i915_request *last = NULL;
- const struct execlist_port * const last_port =
- &execlists->port[execlists->port_mask];
+ struct i915_request **first = execlists->inflight;
+ struct i915_request ** const last_port = first + execlists->port_mask;
+ struct i915_request *last = first[0];
+ struct i915_request **port;
bool submit = false;
struct rb_node *rb;
lockdep_assert_held(&engine->active.lock);
- if (port_isset(port)) {
- if (intel_engine_has_preemption(engine)) {
- struct guc_preempt_work *preempt_work =
- &engine->i915->guc.preempt_work[engine->id];
- int prio = execlists->queue_priority_hint;
-
- if (i915_scheduler_need_preempt(prio,
- port_prio(port))) {
- execlists_set_active(execlists,
- EXECLISTS_ACTIVE_PREEMPT);
- queue_work(engine->i915->guc.preempt_wq,
- &preempt_work->work);
- return false;
- }
- }
+ if (last) {
+ if (*++first)
+ return;
- port++;
- if (port_isset(port))
- return false;
+ last = NULL;
}
- GEM_BUG_ON(port_isset(port));
+ port = first;
while ((rb = rb_first_cached(&execlists->queue))) {
struct i915_priolist *p = to_priolist(rb);
struct i915_request *rq, *rn;
@@ -774,18 +582,15 @@ static bool __guc_dequeue(struct intel_engine_cs *engine)
if (port == last_port)
goto done;
- if (submit)
- port_assign(port, last);
+ *port = schedule_in(last,
+ port - execlists->inflight);
port++;
}
list_del_init(&rq->sched.link);
-
__i915_request_submit(rq);
- trace_i915_request_in(rq, port_index(port, execlists));
-
- last = rq;
submit = true;
+ last = rq;
}
rb_erase_cached(&p->node, &execlists->queue);
@@ -794,58 +599,36 @@ static bool __guc_dequeue(struct intel_engine_cs *engine)
done:
execlists->queue_priority_hint =
rb ? to_priolist(rb)->priority : INT_MIN;
- if (submit)
- port_assign(port, last);
- if (last)
- execlists_user_begin(execlists, execlists->port);
-
- /* We must always keep the beast fed if we have work piled up */
- GEM_BUG_ON(port_isset(execlists->port) &&
- !execlists_is_active(execlists, EXECLISTS_ACTIVE_USER));
- GEM_BUG_ON(rb_first_cached(&execlists->queue) &&
- !port_isset(execlists->port));
-
- return submit;
-}
-
-static void guc_dequeue(struct intel_engine_cs *engine)
-{
- if (__guc_dequeue(engine))
- guc_submit(engine);
+ if (submit) {
+ *port = schedule_in(last, port - execlists->inflight);
+ *++port = NULL;
+ guc_submit(engine, first, port);
+ }
+ execlists->active = execlists->inflight;
}
static void guc_submission_tasklet(unsigned long data)
{
struct intel_engine_cs * const engine = (struct intel_engine_cs *)data;
struct intel_engine_execlists * const execlists = &engine->execlists;
- struct execlist_port *port = execlists->port;
- struct i915_request *rq;
+ struct i915_request **port, *rq;
unsigned long flags;
spin_lock_irqsave(&engine->active.lock, flags);
- rq = port_request(port);
- while (rq && i915_request_completed(rq)) {
- trace_i915_request_out(rq);
- i915_request_put(rq);
-
- port = execlists_port_complete(execlists, port);
- if (port_isset(port)) {
- execlists_user_begin(execlists, port);
- rq = port_request(port);
- } else {
- execlists_user_end(execlists);
- rq = NULL;
- }
- }
+ for (port = execlists->inflight; (rq = *port); port++) {
+ if (!i915_request_completed(rq))
+ break;
- if (execlists_is_active(execlists, EXECLISTS_ACTIVE_PREEMPT) &&
- intel_read_status_page(engine, I915_GEM_HWS_PREEMPT) ==
- GUC_PREEMPT_FINISHED)
- complete_preempt_context(engine);
+ schedule_out(rq);
+ }
+ if (port != execlists->inflight) {
+ int idx = port - execlists->inflight;
+ int rem = ARRAY_SIZE(execlists->inflight) - idx;
+ memmove(execlists->inflight, port, rem * sizeof(*port));
+ }
- if (!execlists_is_active(execlists, EXECLISTS_ACTIVE_PREEMPT))
- guc_dequeue(engine);
+ __guc_dequeue(engine);
spin_unlock_irqrestore(&engine->active.lock, flags);
}
@@ -866,16 +649,6 @@ static void guc_reset_prepare(struct intel_engine_cs *engine)
* prevents the race.
*/
__tasklet_disable_sync_once(&execlists->tasklet);
-
- /*
- * We're using worker to queue preemption requests from the tasklet in
- * GuC submission mode.
- * Even though tasklet was disabled, we may still have a worker queued.
- * Let's make sure that all workers scheduled before disabling the
- * tasklet are completed before continuing with the reset.
- */
- if (engine->i915->guc.preempt_wq)
- flush_workqueue(engine->i915->guc.preempt_wq);
}
static void guc_reset(struct intel_engine_cs *engine, bool stalled)
@@ -896,7 +669,7 @@ static void guc_reset(struct intel_engine_cs *engine, bool stalled)
if (!i915_request_started(rq))
stalled = false;
- i915_reset_request(rq, stalled);
+ __i915_request_reset(rq, stalled);
intel_lr_context_reset(engine, rq->hw_context, rq->head, stalled);
out_unlock:
@@ -959,7 +732,6 @@ static void guc_cancel_requests(struct intel_engine_cs *engine)
execlists->queue_priority_hint = INT_MIN;
execlists->queue = RB_ROOT_CACHED;
- GEM_BUG_ON(port_isset(execlists->port));
spin_unlock_irqrestore(&engine->active.lock, flags);
}
@@ -1014,25 +786,18 @@ static bool guc_verify_doorbells(struct intel_guc *guc)
/**
* guc_client_alloc() - Allocate an intel_guc_client
- * @dev_priv: driver private data structure
- * @engines: The set of engines to enable for this client
+ * @guc: the intel_guc structure
* @priority: four levels priority _CRITICAL, _HIGH, _NORMAL and _LOW
* The kernel client to replace ExecList submission is created with
* NORMAL priority. Priority of a client for scheduler can be HIGH,
* while a preemption context can use CRITICAL.
- * @ctx: the context that owns the client (we use the default render
- * context)
*
* Return: An intel_guc_client object if success, else NULL.
*/
static struct intel_guc_client *
-guc_client_alloc(struct drm_i915_private *dev_priv,
- u32 engines,
- u32 priority,
- struct i915_gem_context *ctx)
+guc_client_alloc(struct intel_guc *guc, u32 priority)
{
struct intel_guc_client *client;
- struct intel_guc *guc = &dev_priv->guc;
struct i915_vma *vma;
void *vaddr;
int ret;
@@ -1042,8 +807,6 @@ guc_client_alloc(struct drm_i915_private *dev_priv,
return ERR_PTR(-ENOMEM);
client->guc = guc;
- client->owner = ctx;
- client->engines = engines;
client->priority = priority;
client->doorbell_id = GUC_DOORBELL_INVALID;
spin_lock_init(&client->wq_lock);
@@ -1088,8 +851,8 @@ guc_client_alloc(struct drm_i915_private *dev_priv,
else
client->proc_desc_offset = (GUC_DB_SIZE / 2);
- DRM_DEBUG_DRIVER("new priority %u client %p for engine(s) 0x%x: stage_id %u\n",
- priority, client, client->engines, client->stage_id);
+ DRM_DEBUG_DRIVER("new priority %u client %p: stage_id %u\n",
+ priority, client, client->stage_id);
DRM_DEBUG_DRIVER("doorbell id %u, cacheline offset 0x%lx\n",
client->doorbell_id, client->doorbell_offset);
@@ -1129,36 +892,17 @@ static inline bool ctx_save_restore_disabled(struct intel_context *ce)
static int guc_clients_create(struct intel_guc *guc)
{
- struct drm_i915_private *dev_priv = guc_to_i915(guc);
struct intel_guc_client *client;
GEM_BUG_ON(guc->execbuf_client);
- GEM_BUG_ON(guc->preempt_client);
- client = guc_client_alloc(dev_priv,
- INTEL_INFO(dev_priv)->engine_mask,
- GUC_CLIENT_PRIORITY_KMD_NORMAL,
- dev_priv->kernel_context);
+ client = guc_client_alloc(guc, GUC_CLIENT_PRIORITY_KMD_NORMAL);
if (IS_ERR(client)) {
DRM_ERROR("Failed to create GuC client for submission!\n");
return PTR_ERR(client);
}
guc->execbuf_client = client;
- if (dev_priv->preempt_context) {
- client = guc_client_alloc(dev_priv,
- INTEL_INFO(dev_priv)->engine_mask,
- GUC_CLIENT_PRIORITY_KMD_HIGH,
- dev_priv->preempt_context);
- if (IS_ERR(client)) {
- DRM_ERROR("Failed to create GuC client for preemption!\n");
- guc_client_free(guc->execbuf_client);
- guc->execbuf_client = NULL;
- return PTR_ERR(client);
- }
- guc->preempt_client = client;
- }
-
return 0;
}
@@ -1166,10 +910,6 @@ static void guc_clients_destroy(struct intel_guc *guc)
{
struct intel_guc_client *client;
- client = fetch_and_zero(&guc->preempt_client);
- if (client)
- guc_client_free(client);
-
client = fetch_and_zero(&guc->execbuf_client);
if (client)
guc_client_free(client);
@@ -1201,7 +941,7 @@ static void __guc_client_disable(struct intel_guc_client *client)
* the case, instead of trying (in vain) to communicate with it, let's
* just cleanup the doorbell HW and our internal state.
*/
- if (intel_guc_is_loaded(client->guc))
+ if (intel_guc_is_running(client->guc))
destroy_doorbell(client);
else
__fini_doorbell(client);
@@ -1212,28 +952,11 @@ static void __guc_client_disable(struct intel_guc_client *client)
static int guc_clients_enable(struct intel_guc *guc)
{
- int ret;
-
- ret = __guc_client_enable(guc->execbuf_client);
- if (ret)
- return ret;
-
- if (guc->preempt_client) {
- ret = __guc_client_enable(guc->preempt_client);
- if (ret) {
- __guc_client_disable(guc->execbuf_client);
- return ret;
- }
- }
-
- return 0;
+ return __guc_client_enable(guc->execbuf_client);
}
static void guc_clients_disable(struct intel_guc *guc)
{
- if (guc->preempt_client)
- __guc_client_disable(guc->preempt_client);
-
if (guc->execbuf_client)
__guc_client_disable(guc->execbuf_client);
}
@@ -1244,9 +967,6 @@ static void guc_clients_disable(struct intel_guc *guc)
*/
int intel_guc_submission_init(struct intel_guc *guc)
{
- struct drm_i915_private *dev_priv = guc_to_i915(guc);
- struct intel_engine_cs *engine;
- enum intel_engine_id id;
int ret;
if (guc->stage_desc_pool)
@@ -1266,11 +986,6 @@ int intel_guc_submission_init(struct intel_guc *guc)
if (ret)
goto err_pool;
- for_each_engine(engine, dev_priv, id) {
- guc->preempt_work[id].engine = engine;
- INIT_WORK(&guc->preempt_work[id].work, inject_preempt_context);
- }
-
return 0;
err_pool:
@@ -1280,13 +995,6 @@ err_pool:
void intel_guc_submission_fini(struct intel_guc *guc)
{
- struct drm_i915_private *dev_priv = guc_to_i915(guc);
- struct intel_engine_cs *engine;
- enum intel_engine_id id;
-
- for_each_engine(engine, dev_priv, id)
- cancel_work_sync(&guc->preempt_work[id].work);
-
guc_clients_destroy(guc);
WARN_ON(!guc_verify_doorbells(guc));
@@ -1294,9 +1002,10 @@ void intel_guc_submission_fini(struct intel_guc *guc)
guc_stage_desc_pool_destroy(guc);
}
-static void guc_interrupts_capture(struct drm_i915_private *dev_priv)
+static void guc_interrupts_capture(struct intel_gt *gt)
{
- struct intel_rps *rps = &dev_priv->gt_pm.rps;
+ struct intel_rps *rps = &gt->i915->gt_pm.rps;
+ struct intel_uncore *uncore = gt->uncore;
struct intel_engine_cs *engine;
enum intel_engine_id id;
int irqs;
@@ -1305,16 +1014,16 @@ static void guc_interrupts_capture(struct drm_i915_private *dev_priv)
* to GuC
*/
irqs = _MASKED_BIT_ENABLE(GFX_INTERRUPT_STEERING);
- for_each_engine(engine, dev_priv, id)
+ for_each_engine(engine, gt->i915, id)
ENGINE_WRITE(engine, RING_MODE_GEN7, irqs);
/* route USER_INTERRUPT to Host, all others are sent to GuC. */
irqs = GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT;
/* These three registers have the same bit definitions */
- I915_WRITE(GUC_BCS_RCS_IER, ~irqs);
- I915_WRITE(GUC_VCS2_VCS1_IER, ~irqs);
- I915_WRITE(GUC_WD_VECS_IER, ~irqs);
+ intel_uncore_write(uncore, GUC_BCS_RCS_IER, ~irqs);
+ intel_uncore_write(uncore, GUC_VCS2_VCS1_IER, ~irqs);
+ intel_uncore_write(uncore, GUC_WD_VECS_IER, ~irqs);
/*
* The REDIRECT_TO_GUC bit of the PMINTRMSK register directs all
@@ -1339,9 +1048,10 @@ static void guc_interrupts_capture(struct drm_i915_private *dev_priv)
rps->pm_intrmsk_mbz &= ~GEN8_PMINTR_DISABLE_REDIRECT_TO_GUC;
}
-static void guc_interrupts_release(struct drm_i915_private *dev_priv)
+static void guc_interrupts_release(struct intel_gt *gt)
{
- struct intel_rps *rps = &dev_priv->gt_pm.rps;
+ struct intel_rps *rps = &gt->i915->gt_pm.rps;
+ struct intel_uncore *uncore = gt->uncore;
struct intel_engine_cs *engine;
enum intel_engine_id id;
int irqs;
@@ -1352,13 +1062,13 @@ static void guc_interrupts_release(struct drm_i915_private *dev_priv)
*/
irqs = _MASKED_FIELD(GFX_FORWARD_VBLANK_MASK, GFX_FORWARD_VBLANK_NEVER);
irqs |= _MASKED_BIT_DISABLE(GFX_INTERRUPT_STEERING);
- for_each_engine(engine, dev_priv, id)
+ for_each_engine(engine, gt->i915, id)
ENGINE_WRITE(engine, RING_MODE_GEN7, irqs);
/* route all GT interrupts to the host */
- I915_WRITE(GUC_BCS_RCS_IER, 0);
- I915_WRITE(GUC_VCS2_VCS1_IER, 0);
- I915_WRITE(GUC_WD_VECS_IER, 0);
+ intel_uncore_write(uncore, GUC_BCS_RCS_IER, 0);
+ intel_uncore_write(uncore, GUC_VCS2_VCS1_IER, 0);
+ intel_uncore_write(uncore, GUC_WD_VECS_IER, 0);
rps->pm_intrmsk_mbz |= GEN8_PMINTR_DISABLE_REDIRECT_TO_GUC;
rps->pm_intrmsk_mbz &= ~ARAT_EXPIRED_INTRMSK;
@@ -1408,7 +1118,7 @@ static void guc_set_default_submission(struct intel_engine_cs *engine)
int intel_guc_submission_enable(struct intel_guc *guc)
{
- struct drm_i915_private *dev_priv = guc_to_i915(guc);
+ struct intel_gt *gt = guc_to_gt(guc);
struct intel_engine_cs *engine;
enum intel_engine_id id;
int err;
@@ -1422,7 +1132,7 @@ int intel_guc_submission_enable(struct intel_guc *guc)
* and it is guaranteed that it will remove the work item from the
* queue before our request is completed.
*/
- BUILD_BUG_ON(ARRAY_SIZE(engine->execlists.port) *
+ BUILD_BUG_ON(ARRAY_SIZE(engine->execlists.inflight) *
sizeof(struct guc_wq_item) *
I915_NUM_ENGINES > GUC_WQ_SIZE);
@@ -1433,9 +1143,9 @@ int intel_guc_submission_enable(struct intel_guc *guc)
return err;
/* Take over from manual control of ELSP (execlists) */
- guc_interrupts_capture(dev_priv);
+ guc_interrupts_capture(gt);
- for_each_engine(engine, dev_priv, id) {
+ for_each_engine(engine, gt->i915, id) {
engine->set_default_submission = guc_set_default_submission;
engine->set_default_submission(engine);
}
@@ -1445,14 +1155,14 @@ int intel_guc_submission_enable(struct intel_guc *guc)
void intel_guc_submission_disable(struct intel_guc *guc)
{
- struct drm_i915_private *dev_priv = guc_to_i915(guc);
+ struct intel_gt *gt = guc_to_gt(guc);
- GEM_BUG_ON(dev_priv->gt.awake); /* GT should be parked first */
+ GEM_BUG_ON(gt->awake); /* GT should be parked first */
- guc_interrupts_release(dev_priv);
+ guc_interrupts_release(gt);
guc_clients_disable(guc);
}
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
-#include "selftests/intel_guc.c"
+#include "selftest_guc.c"
#endif
diff --git a/drivers/gpu/drm/i915/intel_guc_submission.h b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.h
index 7d823a513b9c..87a38cb6faf3 100644
--- a/drivers/gpu/drm/i915/intel_guc_submission.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.h
@@ -58,11 +58,9 @@ struct drm_i915_private;
struct intel_guc_client {
struct i915_vma *vma;
void *vaddr;
- struct i915_gem_context *owner;
struct intel_guc *guc;
/* bitmap of (host) engine ids */
- u32 engines;
u32 priority;
u32 stage_id;
u32 proc_desc_offset;
diff --git a/drivers/gpu/drm/i915/intel_huc.c b/drivers/gpu/drm/i915/gt/uc/intel_huc.c
index fb6f693d3cac..c9535caba844 100644
--- a/drivers/gpu/drm/i915/intel_huc.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_huc.c
@@ -24,12 +24,13 @@
#include <linux/types.h>
+#include "gt/intel_gt.h"
#include "intel_huc.h"
#include "i915_drv.h"
void intel_huc_init_early(struct intel_huc *huc)
{
- struct drm_i915_private *i915 = huc_to_i915(huc);
+ struct drm_i915_private *i915 = huc_to_gt(huc)->i915;
intel_huc_fw_init_early(huc);
@@ -44,19 +45,12 @@ void intel_huc_init_early(struct intel_huc *huc)
}
}
-int intel_huc_init_misc(struct intel_huc *huc)
-{
- struct drm_i915_private *i915 = huc_to_i915(huc);
-
- intel_uc_fw_fetch(i915, &huc->fw);
- return 0;
-}
-
static int intel_huc_rsa_data_create(struct intel_huc *huc)
{
- struct drm_i915_private *i915 = huc_to_i915(huc);
- struct intel_guc *guc = &i915->guc;
+ struct intel_gt *gt = huc_to_gt(huc);
+ struct intel_guc *guc = &gt->uc.guc;
struct i915_vma *vma;
+ size_t copied;
void *vaddr;
/*
@@ -69,6 +63,7 @@ static int intel_huc_rsa_data_create(struct intel_huc *huc)
* the authentication since its GGTT offset will be GuC
* accessible.
*/
+ GEM_BUG_ON(huc->fw.rsa_size > PAGE_SIZE);
vma = intel_guc_allocate_vma(guc, PAGE_SIZE);
if (IS_ERR(vma))
return PTR_ERR(vma);
@@ -79,26 +74,43 @@ static int intel_huc_rsa_data_create(struct intel_huc *huc)
return PTR_ERR(vaddr);
}
+ copied = intel_uc_fw_copy_rsa(&huc->fw, vaddr, vma->size);
+ GEM_BUG_ON(copied < huc->fw.rsa_size);
+
+ i915_gem_object_unpin_map(vma->obj);
+
huc->rsa_data = vma;
- huc->rsa_data_vaddr = vaddr;
return 0;
}
static void intel_huc_rsa_data_destroy(struct intel_huc *huc)
{
- i915_vma_unpin_and_release(&huc->rsa_data, I915_VMA_RELEASE_MAP);
+ i915_vma_unpin_and_release(&huc->rsa_data, 0);
}
int intel_huc_init(struct intel_huc *huc)
{
int err;
- err = intel_huc_rsa_data_create(huc);
+ err = intel_uc_fw_init(&huc->fw);
if (err)
return err;
- return intel_uc_fw_init(&huc->fw);
+ /*
+ * HuC firmware image is outside GuC accessible range.
+ * Copy the RSA signature out of the image into
+ * a perma-pinned region set aside for it
+ */
+ err = intel_huc_rsa_data_create(huc);
+ if (err)
+ goto out_fini;
+
+ return 0;
+
+out_fini:
+ intel_uc_fw_fini(&huc->fw);
+ return err;
}
void intel_huc_fini(struct intel_huc *huc)
@@ -120,12 +132,12 @@ void intel_huc_fini(struct intel_huc *huc)
*/
int intel_huc_auth(struct intel_huc *huc)
{
- struct drm_i915_private *i915 = huc_to_i915(huc);
- struct intel_guc *guc = &i915->guc;
+ struct intel_gt *gt = huc_to_gt(huc);
+ struct intel_guc *guc = &gt->uc.guc;
int ret;
- if (huc->fw.load_status != INTEL_UC_FIRMWARE_SUCCESS)
- return -ENOEXEC;
+ GEM_BUG_ON(!intel_uc_fw_is_loaded(&huc->fw));
+ GEM_BUG_ON(intel_huc_is_authenticated(huc));
ret = intel_guc_auth_huc(guc,
intel_guc_ggtt_offset(guc, huc->rsa_data));
@@ -135,7 +147,7 @@ int intel_huc_auth(struct intel_huc *huc)
}
/* Check authentication status, it should be done by now */
- ret = __intel_wait_for_register(&i915->uncore,
+ ret = __intel_wait_for_register(gt->uncore,
huc->status.reg,
huc->status.mask,
huc->status.value,
@@ -145,10 +157,12 @@ int intel_huc_auth(struct intel_huc *huc)
goto fail;
}
+ huc->fw.status = INTEL_UC_FIRMWARE_RUNNING;
+
return 0;
fail:
- huc->fw.load_status = INTEL_UC_FIRMWARE_FAIL;
+ huc->fw.status = INTEL_UC_FIRMWARE_FAIL;
DRM_ERROR("HuC: Authentication failed %d\n", ret);
return ret;
@@ -167,16 +181,15 @@ fail:
*/
int intel_huc_check_status(struct intel_huc *huc)
{
- struct drm_i915_private *dev_priv = huc_to_i915(huc);
+ struct intel_gt *gt = huc_to_gt(huc);
intel_wakeref_t wakeref;
- bool status = false;
+ u32 status = 0;
- if (!HAS_HUC(dev_priv))
+ if (!intel_uc_is_using_huc(&gt->uc))
return -ENODEV;
- with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref)
- status = (I915_READ(huc->status.reg) & huc->status.mask) ==
- huc->status.value;
+ with_intel_runtime_pm(&gt->i915->runtime_pm, wakeref)
+ status = intel_uncore_read(gt->uncore, huc->status.reg);
- return status;
+ return (status & huc->status.mask) == huc->status.value;
}
diff --git a/drivers/gpu/drm/i915/intel_huc.h b/drivers/gpu/drm/i915/gt/uc/intel_huc.h
index 2a6c94e79f17..4465209ce233 100644
--- a/drivers/gpu/drm/i915/intel_huc.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_huc.h
@@ -35,7 +35,6 @@ struct intel_huc {
/* HuC-specific additions */
struct i915_vma *rsa_data;
- void *rsa_data_vaddr;
struct {
i915_reg_t reg;
@@ -45,21 +44,20 @@ struct intel_huc {
};
void intel_huc_init_early(struct intel_huc *huc);
-int intel_huc_init_misc(struct intel_huc *huc);
int intel_huc_init(struct intel_huc *huc);
void intel_huc_fini(struct intel_huc *huc);
int intel_huc_auth(struct intel_huc *huc);
int intel_huc_check_status(struct intel_huc *huc);
-static inline void intel_huc_fini_misc(struct intel_huc *huc)
-{
- intel_uc_fw_cleanup_fetch(&huc->fw);
-}
-
static inline int intel_huc_sanitize(struct intel_huc *huc)
{
intel_uc_fw_sanitize(&huc->fw);
return 0;
}
+static inline bool intel_huc_is_authenticated(struct intel_huc *huc)
+{
+ return intel_uc_fw_is_running(&huc->fw);
+}
+
#endif
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_huc_fw.c b/drivers/gpu/drm/i915/gt/uc/intel_huc_fw.c
new file mode 100644
index 000000000000..0e885859c828
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/uc/intel_huc_fw.c
@@ -0,0 +1,53 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2014-2018 Intel Corporation
+ */
+
+#include "gt/intel_gt.h"
+#include "intel_huc_fw.h"
+#include "i915_drv.h"
+
+/**
+ * DOC: HuC Firmware
+ *
+ * Motivation:
+ * GEN9 introduces a new dedicated firmware for usage in media HEVC (High
+ * Efficiency Video Coding) operations. Userspace can use the firmware
+ * capabilities by adding HuC specific commands to batch buffers.
+ *
+ * Implementation:
+ * The same firmware loader is used as the GuC. However, the actual
+ * loading to HW is deferred until GEM initialization is done.
+ *
+ * Note that HuC firmware loading must be done before GuC loading.
+ */
+
+/**
+ * intel_huc_fw_init_early() - initializes HuC firmware struct
+ * @huc: intel_huc struct
+ *
+ * On platforms with HuC selects firmware for uploading
+ */
+void intel_huc_fw_init_early(struct intel_huc *huc)
+{
+ intel_uc_fw_init_early(&huc->fw, INTEL_UC_FW_TYPE_HUC, huc_to_gt(huc)->i915);
+}
+
+/**
+ * intel_huc_fw_upload() - load HuC uCode to device
+ * @huc: intel_huc structure
+ *
+ * Called from intel_uc_init_hw() during driver load, resume from sleep and
+ * after a GPU reset. Note that HuC must be loaded before GuC.
+ *
+ * The firmware image should have already been fetched into memory, so only
+ * check that fetch succeeded, and then transfer the image to the h/w.
+ *
+ * Return: non-zero code on error
+ */
+int intel_huc_fw_upload(struct intel_huc *huc)
+{
+ /* HW doesn't look at destination address for HuC, so set it to 0 */
+ return intel_uc_fw_upload(&huc->fw, huc_to_gt(huc), 0, HUC_UKERNEL);
+}
diff --git a/drivers/gpu/drm/i915/intel_huc_fw.h b/drivers/gpu/drm/i915/gt/uc/intel_huc_fw.h
index 8a00a0ebddc5..8a00a0ebddc5 100644
--- a/drivers/gpu/drm/i915/intel_huc_fw.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_huc_fw.h
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc.c b/drivers/gpu/drm/i915/gt/uc/intel_uc.c
new file mode 100644
index 000000000000..6eb8bb3fa252
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/uc/intel_uc.c
@@ -0,0 +1,570 @@
+/*
+ * Copyright © 2016 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#include "gt/intel_gt.h"
+#include "gt/intel_reset.h"
+#include "intel_guc.h"
+#include "intel_guc_ads.h"
+#include "intel_guc_submission.h"
+#include "intel_uc.h"
+
+#include "i915_drv.h"
+
+static void guc_free_load_err_log(struct intel_guc *guc);
+
+/* Reset GuC providing us with fresh state for both GuC and HuC.
+ */
+static int __intel_uc_reset_hw(struct intel_uc *uc)
+{
+ struct intel_gt *gt = uc_to_gt(uc);
+ int ret;
+ u32 guc_status;
+
+ ret = intel_reset_guc(gt);
+ if (ret) {
+ DRM_ERROR("Failed to reset GuC, ret = %d\n", ret);
+ return ret;
+ }
+
+ guc_status = intel_uncore_read(gt->uncore, GUC_STATUS);
+ WARN(!(guc_status & GS_MIA_IN_RESET),
+ "GuC status: 0x%x, MIA core expected to be in reset\n",
+ guc_status);
+
+ return ret;
+}
+
+static int __get_platform_enable_guc(struct intel_uc *uc)
+{
+ struct intel_uc_fw *guc_fw = &uc->guc.fw;
+ struct intel_uc_fw *huc_fw = &uc->huc.fw;
+ int enable_guc = 0;
+
+ if (!HAS_GT_UC(uc_to_gt(uc)->i915))
+ return 0;
+
+ /* We don't want to enable GuC/HuC on pre-Gen11 by default */
+ if (INTEL_GEN(uc_to_gt(uc)->i915) < 11)
+ return 0;
+
+ if (intel_uc_fw_supported(guc_fw) && intel_uc_fw_supported(huc_fw))
+ enable_guc |= ENABLE_GUC_LOAD_HUC;
+
+ return enable_guc;
+}
+
+/**
+ * sanitize_options_early - sanitize uC related modparam options
+ * @uc: the intel_uc structure
+ *
+ * In case of "enable_guc" option this function will attempt to modify
+ * it only if it was initially set to "auto(-1)". Default value for this
+ * modparam varies between platforms and it is hardcoded in driver code.
+ * Any other modparam value is only monitored against availability of the
+ * related hardware or firmware definitions.
+ */
+static void sanitize_options_early(struct intel_uc *uc)
+{
+ struct intel_uc_fw *guc_fw = &uc->guc.fw;
+ struct intel_uc_fw *huc_fw = &uc->huc.fw;
+
+ /* A negative value means "use platform default" */
+ if (i915_modparams.enable_guc < 0)
+ i915_modparams.enable_guc = __get_platform_enable_guc(uc);
+
+ DRM_DEBUG_DRIVER("enable_guc=%d (submission:%s huc:%s)\n",
+ i915_modparams.enable_guc,
+ yesno(intel_uc_is_using_guc_submission(uc)),
+ yesno(intel_uc_is_using_huc(uc)));
+
+ /* Verify GuC firmware availability */
+ if (intel_uc_is_using_guc(uc) && !intel_uc_fw_supported(guc_fw)) {
+ DRM_WARN("Incompatible option detected: enable_guc=%d, "
+ "but GuC is not supported!\n",
+ i915_modparams.enable_guc);
+ DRM_INFO("Disabling GuC/HuC loading!\n");
+ i915_modparams.enable_guc = 0;
+ }
+
+ /* Verify HuC firmware availability */
+ if (intel_uc_is_using_huc(uc) && !intel_uc_fw_supported(huc_fw)) {
+ DRM_WARN("Incompatible option detected: enable_guc=%d, "
+ "but HuC is not supported!\n",
+ i915_modparams.enable_guc);
+ DRM_INFO("Disabling HuC loading!\n");
+ i915_modparams.enable_guc &= ~ENABLE_GUC_LOAD_HUC;
+ }
+
+ /* XXX: GuC submission is unavailable for now */
+ if (intel_uc_is_using_guc_submission(uc)) {
+ DRM_INFO("Incompatible option detected: enable_guc=%d, "
+ "but GuC submission is not supported!\n",
+ i915_modparams.enable_guc);
+ DRM_INFO("Switching to non-GuC submission mode!\n");
+ i915_modparams.enable_guc &= ~ENABLE_GUC_SUBMISSION;
+ }
+
+ /* Make sure that sanitization was done */
+ GEM_BUG_ON(i915_modparams.enable_guc < 0);
+}
+
+void intel_uc_init_early(struct intel_uc *uc)
+{
+ intel_guc_init_early(&uc->guc);
+ intel_huc_init_early(&uc->huc);
+
+ sanitize_options_early(uc);
+}
+
+void intel_uc_cleanup_early(struct intel_uc *uc)
+{
+ guc_free_load_err_log(&uc->guc);
+}
+
+/**
+ * intel_uc_init_mmio - setup uC MMIO access
+ * @uc: the intel_uc structure
+ *
+ * Setup minimal state necessary for MMIO accesses later in the
+ * initialization sequence.
+ */
+void intel_uc_init_mmio(struct intel_uc *uc)
+{
+ intel_guc_init_send_regs(&uc->guc);
+}
+
+static void guc_capture_load_err_log(struct intel_guc *guc)
+{
+ if (!guc->log.vma || !intel_guc_log_get_level(&guc->log))
+ return;
+
+ if (!guc->load_err_log)
+ guc->load_err_log = i915_gem_object_get(guc->log.vma->obj);
+
+ return;
+}
+
+static void guc_free_load_err_log(struct intel_guc *guc)
+{
+ if (guc->load_err_log)
+ i915_gem_object_put(guc->load_err_log);
+}
+
+/*
+ * Events triggered while CT buffers are disabled are logged in the SCRATCH_15
+ * register using the same bits used in the CT message payload. Since our
+ * communication channel with guc is turned off at this point, we can save the
+ * message and handle it after we turn it back on.
+ */
+static void guc_clear_mmio_msg(struct intel_guc *guc)
+{
+ intel_uncore_write(guc_to_gt(guc)->uncore, SOFT_SCRATCH(15), 0);
+}
+
+static void guc_get_mmio_msg(struct intel_guc *guc)
+{
+ u32 val;
+
+ spin_lock_irq(&guc->irq_lock);
+
+ val = intel_uncore_read(guc_to_gt(guc)->uncore, SOFT_SCRATCH(15));
+ guc->mmio_msg |= val & guc->msg_enabled_mask;
+
+ /*
+ * clear all events, including the ones we're not currently servicing,
+ * to make sure we don't try to process a stale message if we enable
+ * handling of more events later.
+ */
+ guc_clear_mmio_msg(guc);
+
+ spin_unlock_irq(&guc->irq_lock);
+}
+
+static void guc_handle_mmio_msg(struct intel_guc *guc)
+{
+ struct drm_i915_private *i915 = guc_to_gt(guc)->i915;
+
+ /* we need communication to be enabled to reply to GuC */
+ GEM_BUG_ON(guc->handler == intel_guc_to_host_event_handler_nop);
+
+ if (!guc->mmio_msg)
+ return;
+
+ spin_lock_irq(&i915->irq_lock);
+ intel_guc_to_host_process_recv_msg(guc, &guc->mmio_msg, 1);
+ spin_unlock_irq(&i915->irq_lock);
+
+ guc->mmio_msg = 0;
+}
+
+static void guc_reset_interrupts(struct intel_guc *guc)
+{
+ guc->interrupts.reset(guc);
+}
+
+static void guc_enable_interrupts(struct intel_guc *guc)
+{
+ guc->interrupts.enable(guc);
+}
+
+static void guc_disable_interrupts(struct intel_guc *guc)
+{
+ guc->interrupts.disable(guc);
+}
+
+static int guc_enable_communication(struct intel_guc *guc)
+{
+ struct drm_i915_private *i915 = guc_to_gt(guc)->i915;
+ int ret;
+
+ ret = intel_guc_ct_enable(&guc->ct);
+ if (ret)
+ return ret;
+
+ guc->send = intel_guc_send_ct;
+ guc->handler = intel_guc_to_host_event_handler_ct;
+
+ /* check for mmio messages received before/during the CT enable */
+ guc_get_mmio_msg(guc);
+ guc_handle_mmio_msg(guc);
+
+ guc_enable_interrupts(guc);
+
+ /* check for CT messages received before we enabled interrupts */
+ spin_lock_irq(&i915->irq_lock);
+ intel_guc_to_host_event_handler_ct(guc);
+ spin_unlock_irq(&i915->irq_lock);
+
+ DRM_INFO("GuC communication enabled\n");
+
+ return 0;
+}
+
+static void guc_stop_communication(struct intel_guc *guc)
+{
+ intel_guc_ct_stop(&guc->ct);
+
+ guc->send = intel_guc_send_nop;
+ guc->handler = intel_guc_to_host_event_handler_nop;
+
+ guc_clear_mmio_msg(guc);
+}
+
+static void guc_disable_communication(struct intel_guc *guc)
+{
+ /*
+ * Events generated during or after CT disable are logged by guc in
+ * via mmio. Make sure the register is clear before disabling CT since
+ * all events we cared about have already been processed via CT.
+ */
+ guc_clear_mmio_msg(guc);
+
+ guc_disable_interrupts(guc);
+
+ guc->send = intel_guc_send_nop;
+ guc->handler = intel_guc_to_host_event_handler_nop;
+
+ intel_guc_ct_disable(&guc->ct);
+
+ /*
+ * Check for messages received during/after the CT disable. We do not
+ * expect any messages to have arrived via CT between the interrupt
+ * disable and the CT disable because GuC should've been idle until we
+ * triggered the CT disable protocol.
+ */
+ guc_get_mmio_msg(guc);
+
+ DRM_INFO("GuC communication disabled\n");
+}
+
+void intel_uc_fetch_firmwares(struct intel_uc *uc)
+{
+ struct drm_i915_private *i915 = uc_to_gt(uc)->i915;
+
+ if (!intel_uc_is_using_guc(uc))
+ return;
+
+ intel_uc_fw_fetch(&uc->guc.fw, i915);
+
+ if (intel_uc_is_using_huc(uc))
+ intel_uc_fw_fetch(&uc->huc.fw, i915);
+}
+
+void intel_uc_cleanup_firmwares(struct intel_uc *uc)
+{
+ if (!intel_uc_is_using_guc(uc))
+ return;
+
+ if (intel_uc_is_using_huc(uc))
+ intel_uc_fw_cleanup_fetch(&uc->huc.fw);
+
+ intel_uc_fw_cleanup_fetch(&uc->guc.fw);
+}
+
+int intel_uc_init(struct intel_uc *uc)
+{
+ struct intel_guc *guc = &uc->guc;
+ struct intel_huc *huc = &uc->huc;
+ int ret;
+
+ if (!intel_uc_is_using_guc(uc))
+ return 0;
+
+ if (!intel_uc_fw_supported(&guc->fw))
+ return -ENODEV;
+
+ /* XXX: GuC submission is unavailable for now */
+ GEM_BUG_ON(intel_uc_is_using_guc_submission(uc));
+
+ ret = intel_guc_init(guc);
+ if (ret)
+ return ret;
+
+ if (intel_uc_is_using_huc(uc)) {
+ ret = intel_huc_init(huc);
+ if (ret)
+ goto err_guc;
+ }
+
+ return 0;
+
+err_guc:
+ intel_guc_fini(guc);
+ return ret;
+}
+
+void intel_uc_fini(struct intel_uc *uc)
+{
+ struct intel_guc *guc = &uc->guc;
+
+ if (!intel_uc_is_using_guc(uc))
+ return;
+
+ GEM_BUG_ON(!intel_uc_fw_supported(&guc->fw));
+
+ if (intel_uc_is_using_huc(uc))
+ intel_huc_fini(&uc->huc);
+
+ intel_guc_fini(guc);
+}
+
+static void __uc_sanitize(struct intel_uc *uc)
+{
+ struct intel_guc *guc = &uc->guc;
+ struct intel_huc *huc = &uc->huc;
+
+ GEM_BUG_ON(!intel_uc_fw_supported(&guc->fw));
+
+ intel_huc_sanitize(huc);
+ intel_guc_sanitize(guc);
+
+ __intel_uc_reset_hw(uc);
+}
+
+void intel_uc_sanitize(struct intel_uc *uc)
+{
+ if (!intel_uc_is_using_guc(uc))
+ return;
+
+ __uc_sanitize(uc);
+}
+
+int intel_uc_init_hw(struct intel_uc *uc)
+{
+ struct drm_i915_private *i915 = uc_to_gt(uc)->i915;
+ struct intel_guc *guc = &uc->guc;
+ struct intel_huc *huc = &uc->huc;
+ int ret, attempts;
+
+ if (!intel_uc_is_using_guc(uc))
+ return 0;
+
+ GEM_BUG_ON(!intel_uc_fw_supported(&guc->fw));
+
+ guc_reset_interrupts(guc);
+
+ /* WaEnableuKernelHeaderValidFix:skl */
+ /* WaEnableGuCBootHashCheckNotSet:skl,bxt,kbl */
+ if (IS_GEN(i915, 9))
+ attempts = 3;
+ else
+ attempts = 1;
+
+ while (attempts--) {
+ /*
+ * Always reset the GuC just before (re)loading, so
+ * that the state and timing are fairly predictable
+ */
+ ret = __intel_uc_reset_hw(uc);
+ if (ret)
+ goto err_out;
+
+ if (intel_uc_is_using_huc(uc)) {
+ ret = intel_huc_fw_upload(huc);
+ if (ret && intel_uc_fw_is_overridden(&huc->fw))
+ goto err_out;
+ }
+
+ intel_guc_ads_reset(guc);
+ intel_guc_write_params(guc);
+ ret = intel_guc_fw_upload(guc);
+ if (ret == 0)
+ break;
+
+ DRM_DEBUG_DRIVER("GuC fw load failed: %d; will reset and "
+ "retry %d more time(s)\n", ret, attempts);
+ }
+
+ /* Did we succeded or run out of retries? */
+ if (ret)
+ goto err_log_capture;
+
+ ret = guc_enable_communication(guc);
+ if (ret)
+ goto err_log_capture;
+
+ if (intel_uc_fw_is_loaded(&huc->fw)) {
+ ret = intel_huc_auth(huc);
+ if (ret && intel_uc_fw_is_overridden(&huc->fw))
+ goto err_communication;
+ }
+
+ ret = intel_guc_sample_forcewake(guc);
+ if (ret)
+ goto err_communication;
+
+ if (intel_uc_is_using_guc_submission(uc)) {
+ ret = intel_guc_submission_enable(guc);
+ if (ret)
+ goto err_communication;
+ }
+
+ dev_info(i915->drm.dev, "GuC firmware version %u.%u\n",
+ guc->fw.major_ver_found, guc->fw.minor_ver_found);
+ dev_info(i915->drm.dev, "GuC submission %s\n",
+ enableddisabled(intel_uc_is_using_guc_submission(uc)));
+ dev_info(i915->drm.dev, "HuC %s\n",
+ enableddisabled(intel_huc_is_authenticated(huc)));
+
+ return 0;
+
+ /*
+ * We've failed to load the firmware :(
+ */
+err_communication:
+ guc_disable_communication(guc);
+err_log_capture:
+ guc_capture_load_err_log(guc);
+err_out:
+ __uc_sanitize(uc);
+
+ /*
+ * Note that there is no fallback as either user explicitly asked for
+ * the GuC or driver default option was to run with the GuC enabled.
+ */
+ if (GEM_WARN_ON(ret == -EIO))
+ ret = -EINVAL;
+
+ dev_err(i915->drm.dev, "GuC initialization failed %d\n", ret);
+ return ret;
+}
+
+void intel_uc_fini_hw(struct intel_uc *uc)
+{
+ struct intel_guc *guc = &uc->guc;
+
+ if (!intel_guc_is_running(guc))
+ return;
+
+ GEM_BUG_ON(!intel_uc_fw_supported(&guc->fw));
+
+ if (intel_uc_is_using_guc_submission(uc))
+ intel_guc_submission_disable(guc);
+
+ guc_disable_communication(guc);
+ __uc_sanitize(uc);
+}
+
+/**
+ * intel_uc_reset_prepare - Prepare for reset
+ * @uc: the intel_uc structure
+ *
+ * Preparing for full gpu reset.
+ */
+void intel_uc_reset_prepare(struct intel_uc *uc)
+{
+ struct intel_guc *guc = &uc->guc;
+
+ if (!intel_guc_is_running(guc))
+ return;
+
+ guc_stop_communication(guc);
+ __uc_sanitize(uc);
+}
+
+void intel_uc_runtime_suspend(struct intel_uc *uc)
+{
+ struct intel_guc *guc = &uc->guc;
+ int err;
+
+ if (!intel_guc_is_running(guc))
+ return;
+
+ err = intel_guc_suspend(guc);
+ if (err)
+ DRM_DEBUG_DRIVER("Failed to suspend GuC, err=%d", err);
+
+ guc_disable_communication(guc);
+}
+
+void intel_uc_suspend(struct intel_uc *uc)
+{
+ struct intel_guc *guc = &uc->guc;
+ intel_wakeref_t wakeref;
+
+ if (!intel_guc_is_running(guc))
+ return;
+
+ with_intel_runtime_pm(&uc_to_gt(uc)->i915->runtime_pm, wakeref)
+ intel_uc_runtime_suspend(uc);
+}
+
+int intel_uc_resume(struct intel_uc *uc)
+{
+ struct intel_guc *guc = &uc->guc;
+ int err;
+
+ if (!intel_guc_is_running(guc))
+ return 0;
+
+ guc_enable_communication(guc);
+
+ err = intel_guc_resume(guc);
+ if (err) {
+ DRM_DEBUG_DRIVER("Failed to resume GuC, err=%d", err);
+ return err;
+ }
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/i915/intel_uc.h b/drivers/gpu/drm/i915/gt/uc/intel_uc.h
index 3ea06c87dfcd..fe3362fd7706 100644
--- a/drivers/gpu/drm/i915/intel_uc.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_uc.h
@@ -28,34 +28,39 @@
#include "intel_huc.h"
#include "i915_params.h"
-void intel_uc_init_early(struct drm_i915_private *dev_priv);
-void intel_uc_cleanup_early(struct drm_i915_private *dev_priv);
-void intel_uc_init_mmio(struct drm_i915_private *dev_priv);
-int intel_uc_init_misc(struct drm_i915_private *dev_priv);
-void intel_uc_fini_misc(struct drm_i915_private *dev_priv);
-void intel_uc_sanitize(struct drm_i915_private *dev_priv);
-int intel_uc_init_hw(struct drm_i915_private *dev_priv);
-void intel_uc_fini_hw(struct drm_i915_private *dev_priv);
-int intel_uc_init(struct drm_i915_private *dev_priv);
-void intel_uc_fini(struct drm_i915_private *dev_priv);
-void intel_uc_reset_prepare(struct drm_i915_private *i915);
-void intel_uc_suspend(struct drm_i915_private *i915);
-void intel_uc_runtime_suspend(struct drm_i915_private *i915);
-int intel_uc_resume(struct drm_i915_private *dev_priv);
+struct intel_uc {
+ struct intel_guc guc;
+ struct intel_huc huc;
+};
-static inline bool intel_uc_is_using_guc(struct drm_i915_private *i915)
+void intel_uc_init_early(struct intel_uc *uc);
+void intel_uc_cleanup_early(struct intel_uc *uc);
+void intel_uc_init_mmio(struct intel_uc *uc);
+void intel_uc_fetch_firmwares(struct intel_uc *uc);
+void intel_uc_cleanup_firmwares(struct intel_uc *uc);
+void intel_uc_sanitize(struct intel_uc *uc);
+int intel_uc_init_hw(struct intel_uc *uc);
+void intel_uc_fini_hw(struct intel_uc *uc);
+int intel_uc_init(struct intel_uc *uc);
+void intel_uc_fini(struct intel_uc *uc);
+void intel_uc_reset_prepare(struct intel_uc *uc);
+void intel_uc_suspend(struct intel_uc *uc);
+void intel_uc_runtime_suspend(struct intel_uc *uc);
+int intel_uc_resume(struct intel_uc *uc);
+
+static inline bool intel_uc_is_using_guc(struct intel_uc *uc)
{
GEM_BUG_ON(i915_modparams.enable_guc < 0);
return i915_modparams.enable_guc > 0;
}
-static inline bool intel_uc_is_using_guc_submission(struct drm_i915_private *i915)
+static inline bool intel_uc_is_using_guc_submission(struct intel_uc *uc)
{
GEM_BUG_ON(i915_modparams.enable_guc < 0);
return i915_modparams.enable_guc & ENABLE_GUC_SUBMISSION;
}
-static inline bool intel_uc_is_using_huc(struct drm_i915_private *i915)
+static inline bool intel_uc_is_using_huc(struct intel_uc *uc)
{
GEM_BUG_ON(i915_modparams.enable_guc < 0);
return i915_modparams.enable_guc & ENABLE_GUC_LOAD_HUC;
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c
new file mode 100644
index 000000000000..ac91e3efd02b
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c
@@ -0,0 +1,540 @@
+/*
+ * Copyright © 2016-2017 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#include <linux/bitfield.h>
+#include <linux/firmware.h>
+#include <drm/drm_print.h>
+
+#include "intel_uc_fw.h"
+#include "intel_uc_fw_abi.h"
+#include "i915_drv.h"
+
+/*
+ * List of required GuC and HuC binaries per-platform.
+ * Must be ordered based on platform + revid, from newer to older.
+ */
+#define INTEL_UC_FIRMWARE_DEFS(fw_def, guc_def, huc_def) \
+ fw_def(ICELAKE, 0, guc_def(icl, 33, 0, 0), huc_def(icl, 8, 4, 3238)) \
+ fw_def(COFFEELAKE, 0, guc_def(kbl, 33, 0, 0), huc_def(kbl, 02, 00, 1810)) \
+ fw_def(GEMINILAKE, 0, guc_def(glk, 33, 0, 0), huc_def(glk, 03, 01, 2893)) \
+ fw_def(KABYLAKE, 0, guc_def(kbl, 33, 0, 0), huc_def(kbl, 02, 00, 1810)) \
+ fw_def(BROXTON, 0, guc_def(bxt, 33, 0, 0), huc_def(bxt, 01, 8, 2893)) \
+ fw_def(SKYLAKE, 0, guc_def(skl, 33, 0, 0), huc_def(skl, 01, 07, 1398))
+
+#define __MAKE_UC_FW_PATH(prefix_, name_, separator_, major_, minor_, patch_) \
+ "i915/" \
+ __stringify(prefix_) name_ \
+ __stringify(major_) separator_ \
+ __stringify(minor_) separator_ \
+ __stringify(patch_) ".bin"
+
+#define MAKE_GUC_FW_PATH(prefix_, major_, minor_, patch_) \
+ __MAKE_UC_FW_PATH(prefix_, "_guc_", ".", major_, minor_, patch_)
+
+#define MAKE_HUC_FW_PATH(prefix_, major_, minor_, bld_num_) \
+ __MAKE_UC_FW_PATH(prefix_, "_huc_ver", "_", major_, minor_, bld_num_)
+
+/* All blobs need to be declared via MODULE_FIRMWARE() */
+#define INTEL_UC_MODULE_FW(platform_, revid_, guc_, huc_) \
+ MODULE_FIRMWARE(guc_); \
+ MODULE_FIRMWARE(huc_);
+
+INTEL_UC_FIRMWARE_DEFS(INTEL_UC_MODULE_FW, MAKE_GUC_FW_PATH, MAKE_HUC_FW_PATH)
+
+/* The below structs and macros are used to iterate across the list of blobs */
+struct __packed uc_fw_blob {
+ u8 major;
+ u8 minor;
+ const char *path;
+};
+
+#define UC_FW_BLOB(major_, minor_, path_) \
+ { .major = major_, .minor = minor_, .path = path_ }
+
+#define GUC_FW_BLOB(prefix_, major_, minor_, patch_) \
+ UC_FW_BLOB(major_, minor_, \
+ MAKE_GUC_FW_PATH(prefix_, major_, minor_, patch_))
+
+#define HUC_FW_BLOB(prefix_, major_, minor_, bld_num_) \
+ UC_FW_BLOB(major_, minor_, \
+ MAKE_HUC_FW_PATH(prefix_, major_, minor_, bld_num_))
+
+struct __packed uc_fw_platform_requirement {
+ enum intel_platform p;
+ u8 rev; /* first platform rev using this FW */
+ const struct uc_fw_blob blobs[INTEL_UC_FW_NUM_TYPES];
+};
+
+#define MAKE_FW_LIST(platform_, revid_, guc_, huc_) \
+{ \
+ .p = INTEL_##platform_, \
+ .rev = revid_, \
+ .blobs[INTEL_UC_FW_TYPE_GUC] = guc_, \
+ .blobs[INTEL_UC_FW_TYPE_HUC] = huc_, \
+},
+
+static void
+__uc_fw_auto_select(struct intel_uc_fw *uc_fw, enum intel_platform p, u8 rev)
+{
+ static const struct uc_fw_platform_requirement fw_blobs[] = {
+ INTEL_UC_FIRMWARE_DEFS(MAKE_FW_LIST, GUC_FW_BLOB, HUC_FW_BLOB)
+ };
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(fw_blobs) && p <= fw_blobs[i].p; i++) {
+ if (p == fw_blobs[i].p && rev >= fw_blobs[i].rev) {
+ const struct uc_fw_blob *blob =
+ &fw_blobs[i].blobs[uc_fw->type];
+ uc_fw->path = blob->path;
+ uc_fw->major_ver_wanted = blob->major;
+ uc_fw->minor_ver_wanted = blob->minor;
+ break;
+ }
+ }
+
+ /* make sure the list is ordered as expected */
+ if (IS_ENABLED(CONFIG_DRM_I915_SELFTEST)) {
+ for (i = 1; i < ARRAY_SIZE(fw_blobs); i++) {
+ if (fw_blobs[i].p < fw_blobs[i - 1].p)
+ continue;
+
+ if (fw_blobs[i].p == fw_blobs[i - 1].p &&
+ fw_blobs[i].rev < fw_blobs[i - 1].rev)
+ continue;
+
+ pr_err("invalid FW blob order: %s r%u comes before %s r%u\n",
+ intel_platform_name(fw_blobs[i - 1].p),
+ fw_blobs[i - 1].rev,
+ intel_platform_name(fw_blobs[i].p),
+ fw_blobs[i].rev);
+
+ uc_fw->path = NULL;
+ }
+ }
+}
+
+static bool
+__uc_fw_override(struct intel_uc_fw *uc_fw)
+{
+ switch (uc_fw->type) {
+ case INTEL_UC_FW_TYPE_GUC:
+ uc_fw->path = i915_modparams.guc_firmware_path;
+ break;
+ case INTEL_UC_FW_TYPE_HUC:
+ uc_fw->path = i915_modparams.huc_firmware_path;
+ break;
+ }
+
+ uc_fw->user_overridden = uc_fw->path;
+ return uc_fw->user_overridden;
+}
+
+/**
+ * intel_uc_fw_init_early - initialize the uC object and select the firmware
+ * @i915: device private
+ * @uc_fw: uC firmware
+ * @type: type of uC
+ *
+ * Initialize the state of our uC object and relevant tracking and select the
+ * firmware to fetch and load.
+ */
+void intel_uc_fw_init_early(struct intel_uc_fw *uc_fw,
+ enum intel_uc_fw_type type,
+ struct drm_i915_private *i915)
+{
+ /*
+ * we use FIRMWARE_UNINITIALIZED to detect checks against uc_fw->status
+ * before we're looked at the HW caps to see if we have uc support
+ */
+ BUILD_BUG_ON(INTEL_UC_FIRMWARE_UNINITIALIZED);
+ GEM_BUG_ON(uc_fw->status);
+ GEM_BUG_ON(uc_fw->path);
+
+ uc_fw->type = type;
+
+ if (HAS_GT_UC(i915) && likely(!__uc_fw_override(uc_fw)))
+ __uc_fw_auto_select(uc_fw, INTEL_INFO(i915)->platform,
+ INTEL_REVID(i915));
+
+ if (uc_fw->path && *uc_fw->path)
+ uc_fw->status = INTEL_UC_FIRMWARE_SELECTED;
+ else
+ uc_fw->status = INTEL_UC_FIRMWARE_NOT_SUPPORTED;
+}
+
+/**
+ * intel_uc_fw_fetch - fetch uC firmware
+ *
+ * @uc_fw: uC firmware
+ * @i915: device private
+ *
+ * Fetch uC firmware into GEM obj.
+ */
+void intel_uc_fw_fetch(struct intel_uc_fw *uc_fw, struct drm_i915_private *i915)
+{
+ struct drm_i915_gem_object *obj;
+ const struct firmware *fw = NULL;
+ struct uc_css_header *css;
+ size_t size;
+ int err;
+
+ GEM_BUG_ON(!intel_uc_fw_supported(uc_fw));
+
+ err = request_firmware(&fw, uc_fw->path, i915->drm.dev);
+ if (err)
+ goto fail;
+
+ DRM_DEBUG_DRIVER("%s fw size %zu ptr %p\n",
+ intel_uc_fw_type_repr(uc_fw->type), fw->size, fw);
+
+ /* Check the size of the blob before examining buffer contents */
+ if (fw->size < sizeof(struct uc_css_header)) {
+ DRM_WARN("%s: Unexpected firmware size (%zu, min %zu)\n",
+ intel_uc_fw_type_repr(uc_fw->type),
+ fw->size, sizeof(struct uc_css_header));
+ err = -ENODATA;
+ goto fail;
+ }
+
+ css = (struct uc_css_header *)fw->data;
+
+ /* Check integrity of size values inside CSS header */
+ size = (css->header_size_dw - css->key_size_dw - css->modulus_size_dw -
+ css->exponent_size_dw) * sizeof(u32);
+ if (size != sizeof(struct uc_css_header)) {
+ DRM_WARN("%s: Mismatched firmware header definition\n",
+ intel_uc_fw_type_repr(uc_fw->type));
+ err = -ENOEXEC;
+ goto fail;
+ }
+
+ /* uCode size must calculated from other sizes */
+ uc_fw->ucode_size = (css->size_dw - css->header_size_dw) * sizeof(u32);
+
+ /* now RSA */
+ if (css->key_size_dw != UOS_RSA_SCRATCH_COUNT) {
+ DRM_WARN("%s: Mismatched firmware RSA key size (%u)\n",
+ intel_uc_fw_type_repr(uc_fw->type), css->key_size_dw);
+ err = -ENOEXEC;
+ goto fail;
+ }
+ uc_fw->rsa_size = css->key_size_dw * sizeof(u32);
+
+ /* At least, it should have header, uCode and RSA. Size of all three. */
+ size = sizeof(struct uc_css_header) + uc_fw->ucode_size + uc_fw->rsa_size;
+ if (fw->size < size) {
+ DRM_WARN("%s: Truncated firmware (%zu, expected %zu)\n",
+ intel_uc_fw_type_repr(uc_fw->type), fw->size, size);
+ err = -ENOEXEC;
+ goto fail;
+ }
+
+ /* Get version numbers from the CSS header */
+ switch (uc_fw->type) {
+ case INTEL_UC_FW_TYPE_GUC:
+ uc_fw->major_ver_found = FIELD_GET(CSS_SW_VERSION_GUC_MAJOR,
+ css->sw_version);
+ uc_fw->minor_ver_found = FIELD_GET(CSS_SW_VERSION_GUC_MINOR,
+ css->sw_version);
+ break;
+
+ case INTEL_UC_FW_TYPE_HUC:
+ uc_fw->major_ver_found = FIELD_GET(CSS_SW_VERSION_HUC_MAJOR,
+ css->sw_version);
+ uc_fw->minor_ver_found = FIELD_GET(CSS_SW_VERSION_HUC_MINOR,
+ css->sw_version);
+ break;
+
+ default:
+ MISSING_CASE(uc_fw->type);
+ break;
+ }
+
+ DRM_DEBUG_DRIVER("%s fw version %u.%u (wanted %u.%u)\n",
+ intel_uc_fw_type_repr(uc_fw->type),
+ uc_fw->major_ver_found, uc_fw->minor_ver_found,
+ uc_fw->major_ver_wanted, uc_fw->minor_ver_wanted);
+
+ if (uc_fw->major_ver_wanted == 0 && uc_fw->minor_ver_wanted == 0) {
+ DRM_NOTE("%s: Skipping firmware version check\n",
+ intel_uc_fw_type_repr(uc_fw->type));
+ } else if (uc_fw->major_ver_found != uc_fw->major_ver_wanted ||
+ uc_fw->minor_ver_found < uc_fw->minor_ver_wanted) {
+ DRM_NOTE("%s: Wrong firmware version (%u.%u, required %u.%u)\n",
+ intel_uc_fw_type_repr(uc_fw->type),
+ uc_fw->major_ver_found, uc_fw->minor_ver_found,
+ uc_fw->major_ver_wanted, uc_fw->minor_ver_wanted);
+ err = -ENOEXEC;
+ goto fail;
+ }
+
+ obj = i915_gem_object_create_shmem_from_data(i915, fw->data, fw->size);
+ if (IS_ERR(obj)) {
+ err = PTR_ERR(obj);
+ DRM_DEBUG_DRIVER("%s fw object_create err=%d\n",
+ intel_uc_fw_type_repr(uc_fw->type), err);
+ goto fail;
+ }
+
+ uc_fw->obj = obj;
+ uc_fw->size = fw->size;
+ uc_fw->status = INTEL_UC_FIRMWARE_AVAILABLE;
+
+ release_firmware(fw);
+ return;
+
+fail:
+ uc_fw->status = INTEL_UC_FIRMWARE_MISSING;
+
+ DRM_WARN("%s: Failed to fetch firmware %s (error %d)\n",
+ intel_uc_fw_type_repr(uc_fw->type), uc_fw->path, err);
+ DRM_INFO("%s: Firmware can be downloaded from %s\n",
+ intel_uc_fw_type_repr(uc_fw->type), INTEL_UC_FIRMWARE_URL);
+
+ release_firmware(fw); /* OK even if fw is NULL */
+}
+
+static u32 uc_fw_ggtt_offset(struct intel_uc_fw *uc_fw, struct i915_ggtt *ggtt)
+{
+ struct drm_mm_node *node = &ggtt->uc_fw;
+
+ GEM_BUG_ON(!node->allocated);
+ GEM_BUG_ON(upper_32_bits(node->start));
+ GEM_BUG_ON(upper_32_bits(node->start + node->size - 1));
+
+ return lower_32_bits(node->start);
+}
+
+static void intel_uc_fw_ggtt_bind(struct intel_uc_fw *uc_fw,
+ struct intel_gt *gt)
+{
+ struct drm_i915_gem_object *obj = uc_fw->obj;
+ struct i915_ggtt *ggtt = gt->ggtt;
+ struct i915_vma dummy = {
+ .node.start = uc_fw_ggtt_offset(uc_fw, ggtt),
+ .node.size = obj->base.size,
+ .pages = obj->mm.pages,
+ .vm = &ggtt->vm,
+ };
+
+ GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
+ GEM_BUG_ON(dummy.node.size > ggtt->uc_fw.size);
+
+ /* uc_fw->obj cache domains were not controlled across suspend */
+ drm_clflush_sg(dummy.pages);
+
+ ggtt->vm.insert_entries(&ggtt->vm, &dummy, I915_CACHE_NONE, 0);
+}
+
+static void intel_uc_fw_ggtt_unbind(struct intel_uc_fw *uc_fw,
+ struct intel_gt *gt)
+{
+ struct drm_i915_gem_object *obj = uc_fw->obj;
+ struct i915_ggtt *ggtt = gt->ggtt;
+ u64 start = uc_fw_ggtt_offset(uc_fw, ggtt);
+
+ ggtt->vm.clear_range(&ggtt->vm, start, obj->base.size);
+}
+
+static int uc_fw_xfer(struct intel_uc_fw *uc_fw, struct intel_gt *gt,
+ u32 wopcm_offset, u32 dma_flags)
+{
+ struct intel_uncore *uncore = gt->uncore;
+ u64 offset;
+ int ret;
+
+ intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL);
+
+ /* Set the source address for the uCode */
+ offset = uc_fw_ggtt_offset(uc_fw, gt->ggtt);
+ GEM_BUG_ON(upper_32_bits(offset) & 0xFFFF0000);
+ intel_uncore_write_fw(uncore, DMA_ADDR_0_LOW, lower_32_bits(offset));
+ intel_uncore_write_fw(uncore, DMA_ADDR_0_HIGH, upper_32_bits(offset));
+
+ /* Set the DMA destination */
+ intel_uncore_write_fw(uncore, DMA_ADDR_1_LOW, wopcm_offset);
+ intel_uncore_write_fw(uncore, DMA_ADDR_1_HIGH, DMA_ADDRESS_SPACE_WOPCM);
+
+ /*
+ * Set the transfer size. The header plus uCode will be copied to WOPCM
+ * via DMA, excluding any other components
+ */
+ intel_uncore_write_fw(uncore, DMA_COPY_SIZE,
+ sizeof(struct uc_css_header) + uc_fw->ucode_size);
+
+ /* Start the DMA */
+ intel_uncore_write_fw(uncore, DMA_CTRL,
+ _MASKED_BIT_ENABLE(dma_flags | START_DMA));
+
+ /* Wait for DMA to finish */
+ ret = intel_wait_for_register_fw(uncore, DMA_CTRL, START_DMA, 0, 100);
+ if (ret)
+ dev_err(gt->i915->drm.dev, "DMA for %s fw failed, DMA_CTRL=%u\n",
+ intel_uc_fw_type_repr(uc_fw->type),
+ intel_uncore_read_fw(uncore, DMA_CTRL));
+
+ /* Disable the bits once DMA is over */
+ intel_uncore_write_fw(uncore, DMA_CTRL, _MASKED_BIT_DISABLE(dma_flags));
+
+ intel_uncore_forcewake_put(uncore, FORCEWAKE_ALL);
+
+ return ret;
+}
+
+/**
+ * intel_uc_fw_upload - load uC firmware using custom loader
+ * @uc_fw: uC firmware
+ * @gt: the intel_gt structure
+ * @wopcm_offset: destination offset in wopcm
+ * @dma_flags: flags for flags for dma ctrl
+ *
+ * Loads uC firmware and updates internal flags.
+ *
+ * Return: 0 on success, non-zero on failure.
+ */
+int intel_uc_fw_upload(struct intel_uc_fw *uc_fw, struct intel_gt *gt,
+ u32 wopcm_offset, u32 dma_flags)
+{
+ int err;
+
+ DRM_DEBUG_DRIVER("%s fw load %s\n",
+ intel_uc_fw_type_repr(uc_fw->type), uc_fw->path);
+
+ /* make sure the status was cleared the last time we reset the uc */
+ GEM_BUG_ON(intel_uc_fw_is_loaded(uc_fw));
+
+ if (!intel_uc_fw_is_available(uc_fw))
+ return -ENOEXEC;
+ /* Call custom loader */
+ intel_uc_fw_ggtt_bind(uc_fw, gt);
+ err = uc_fw_xfer(uc_fw, gt, wopcm_offset, dma_flags);
+ intel_uc_fw_ggtt_unbind(uc_fw, gt);
+ if (err)
+ goto fail;
+
+ uc_fw->status = INTEL_UC_FIRMWARE_TRANSFERRED;
+ DRM_DEBUG_DRIVER("%s fw xfer completed\n",
+ intel_uc_fw_type_repr(uc_fw->type));
+
+ DRM_INFO("%s: Loaded firmware %s (version %u.%u)\n",
+ intel_uc_fw_type_repr(uc_fw->type),
+ uc_fw->path,
+ uc_fw->major_ver_found, uc_fw->minor_ver_found);
+
+ return 0;
+
+fail:
+ uc_fw->status = INTEL_UC_FIRMWARE_FAIL;
+ DRM_DEBUG_DRIVER("%s fw load failed\n",
+ intel_uc_fw_type_repr(uc_fw->type));
+
+ DRM_WARN("%s: Failed to load firmware %s (error %d)\n",
+ intel_uc_fw_type_repr(uc_fw->type), uc_fw->path, err);
+
+ return err;
+}
+
+int intel_uc_fw_init(struct intel_uc_fw *uc_fw)
+{
+ int err;
+
+ /* this should happen before the load! */
+ GEM_BUG_ON(intel_uc_fw_is_loaded(uc_fw));
+
+ if (!intel_uc_fw_is_available(uc_fw))
+ return -ENOEXEC;
+
+ err = i915_gem_object_pin_pages(uc_fw->obj);
+ if (err)
+ DRM_DEBUG_DRIVER("%s fw pin-pages err=%d\n",
+ intel_uc_fw_type_repr(uc_fw->type), err);
+
+ return err;
+}
+
+void intel_uc_fw_fini(struct intel_uc_fw *uc_fw)
+{
+ if (!intel_uc_fw_is_available(uc_fw))
+ return;
+
+ i915_gem_object_unpin_pages(uc_fw->obj);
+}
+
+/**
+ * intel_uc_fw_cleanup_fetch - cleanup uC firmware
+ *
+ * @uc_fw: uC firmware
+ *
+ * Cleans up uC firmware by releasing the firmware GEM obj.
+ */
+void intel_uc_fw_cleanup_fetch(struct intel_uc_fw *uc_fw)
+{
+ struct drm_i915_gem_object *obj;
+
+ obj = fetch_and_zero(&uc_fw->obj);
+ if (obj)
+ i915_gem_object_put(obj);
+
+ uc_fw->status = INTEL_UC_FIRMWARE_SELECTED;
+}
+
+/**
+ * intel_uc_fw_copy_rsa - copy fw RSA to buffer
+ *
+ * @uc_fw: uC firmware
+ * @dst: dst buffer
+ * @max_len: max number of bytes to copy
+ *
+ * Return: number of copied bytes.
+ */
+size_t intel_uc_fw_copy_rsa(struct intel_uc_fw *uc_fw, void *dst, u32 max_len)
+{
+ struct sg_table *pages = uc_fw->obj->mm.pages;
+ u32 size = min_t(u32, uc_fw->rsa_size, max_len);
+ u32 offset = sizeof(struct uc_css_header) + uc_fw->ucode_size;
+
+ GEM_BUG_ON(!intel_uc_fw_is_available(uc_fw));
+
+ return sg_pcopy_to_buffer(pages->sgl, pages->nents, dst, size, offset);
+}
+
+/**
+ * intel_uc_fw_dump - dump information about uC firmware
+ * @uc_fw: uC firmware
+ * @p: the &drm_printer
+ *
+ * Pretty printer for uC firmware.
+ */
+void intel_uc_fw_dump(const struct intel_uc_fw *uc_fw, struct drm_printer *p)
+{
+ drm_printf(p, "%s firmware: %s\n",
+ intel_uc_fw_type_repr(uc_fw->type), uc_fw->path);
+ drm_printf(p, "\tstatus: %s\n",
+ intel_uc_fw_status_repr(uc_fw->status));
+ drm_printf(p, "\tversion: wanted %u.%u, found %u.%u\n",
+ uc_fw->major_ver_wanted, uc_fw->minor_ver_wanted,
+ uc_fw->major_ver_found, uc_fw->minor_ver_found);
+ drm_printf(p, "\tuCode: %u bytes\n", uc_fw->ucode_size);
+ drm_printf(p, "\tRSA: %u bytes\n", uc_fw->rsa_size);
+}
diff --git a/drivers/gpu/drm/i915/intel_uc_fw.h b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.h
index ff98f8661d72..6b64b8073703 100644
--- a/drivers/gpu/drm/i915/intel_uc_fw.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.h
@@ -25,34 +25,45 @@
#ifndef _INTEL_UC_FW_H_
#define _INTEL_UC_FW_H_
+#include <linux/types.h>
+#include "intel_uc_fw_abi.h"
+#include "i915_gem.h"
+
struct drm_printer;
struct drm_i915_private;
+struct intel_gt;
/* Home of GuC, HuC and DMC firmwares */
#define INTEL_UC_FIRMWARE_URL "https://git.kernel.org/pub/scm/linux/kernel/git/firmware/linux-firmware.git/tree/i915"
enum intel_uc_fw_status {
- INTEL_UC_FIRMWARE_FAIL = -1,
- INTEL_UC_FIRMWARE_NONE = 0,
- INTEL_UC_FIRMWARE_PENDING,
- INTEL_UC_FIRMWARE_SUCCESS
+ INTEL_UC_FIRMWARE_FAIL = -3, /* failed to xfer or init/auth the fw */
+ INTEL_UC_FIRMWARE_MISSING = -2, /* blob not found on the system */
+ INTEL_UC_FIRMWARE_NOT_SUPPORTED = -1, /* no uc HW */
+ INTEL_UC_FIRMWARE_UNINITIALIZED = 0, /* used to catch checks done too early */
+ INTEL_UC_FIRMWARE_SELECTED, /* selected the blob we want to load */
+ INTEL_UC_FIRMWARE_AVAILABLE, /* blob found and copied in mem */
+ INTEL_UC_FIRMWARE_TRANSFERRED, /* dma xfer done */
+ INTEL_UC_FIRMWARE_RUNNING /* init/auth done */
};
enum intel_uc_fw_type {
- INTEL_UC_FW_TYPE_GUC,
+ INTEL_UC_FW_TYPE_GUC = 0,
INTEL_UC_FW_TYPE_HUC
};
+#define INTEL_UC_FW_NUM_TYPES 2
/*
* This structure encapsulates all the data needed during the process
* of fetching, caching, and loading the firmware image into the uC.
*/
struct intel_uc_fw {
+ enum intel_uc_fw_type type;
+ enum intel_uc_fw_status status;
const char *path;
+ bool user_overridden;
size_t size;
struct drm_i915_gem_object *obj;
- enum intel_uc_fw_status fetch_status;
- enum intel_uc_fw_status load_status;
/*
* The firmware build process will generate a version header file with major and
@@ -64,13 +75,8 @@ struct intel_uc_fw {
u16 major_ver_found;
u16 minor_ver_found;
- enum intel_uc_fw_type type;
- u32 header_size;
- u32 header_offset;
u32 rsa_size;
- u32 rsa_offset;
u32 ucode_size;
- u32 ucode_offset;
};
static inline
@@ -79,12 +85,20 @@ const char *intel_uc_fw_status_repr(enum intel_uc_fw_status status)
switch (status) {
case INTEL_UC_FIRMWARE_FAIL:
return "FAIL";
- case INTEL_UC_FIRMWARE_NONE:
- return "NONE";
- case INTEL_UC_FIRMWARE_PENDING:
- return "PENDING";
- case INTEL_UC_FIRMWARE_SUCCESS:
- return "SUCCESS";
+ case INTEL_UC_FIRMWARE_MISSING:
+ return "MISSING";
+ case INTEL_UC_FIRMWARE_NOT_SUPPORTED:
+ return "N/A";
+ case INTEL_UC_FIRMWARE_UNINITIALIZED:
+ return "UNINITIALIZED";
+ case INTEL_UC_FIRMWARE_SELECTED:
+ return "SELECTED";
+ case INTEL_UC_FIRMWARE_AVAILABLE:
+ return "AVAILABLE";
+ case INTEL_UC_FIRMWARE_TRANSFERRED:
+ return "TRANSFERRED";
+ case INTEL_UC_FIRMWARE_RUNNING:
+ return "RUNNING";
}
return "<invalid>";
}
@@ -100,30 +114,43 @@ static inline const char *intel_uc_fw_type_repr(enum intel_uc_fw_type type)
return "uC";
}
-static inline
-void intel_uc_fw_init_early(struct intel_uc_fw *uc_fw,
- enum intel_uc_fw_type type)
+static inline enum intel_uc_fw_status
+__intel_uc_fw_status(struct intel_uc_fw *uc_fw)
{
- uc_fw->path = NULL;
- uc_fw->fetch_status = INTEL_UC_FIRMWARE_NONE;
- uc_fw->load_status = INTEL_UC_FIRMWARE_NONE;
- uc_fw->type = type;
+ /* shouldn't call this before checking hw/blob availability */
+ GEM_BUG_ON(uc_fw->status == INTEL_UC_FIRMWARE_UNINITIALIZED);
+ return uc_fw->status;
}
-static inline bool intel_uc_fw_is_selected(struct intel_uc_fw *uc_fw)
+static inline bool intel_uc_fw_is_available(struct intel_uc_fw *uc_fw)
{
- return uc_fw->path != NULL;
+ return __intel_uc_fw_status(uc_fw) >= INTEL_UC_FIRMWARE_AVAILABLE;
}
static inline bool intel_uc_fw_is_loaded(struct intel_uc_fw *uc_fw)
{
- return uc_fw->load_status == INTEL_UC_FIRMWARE_SUCCESS;
+ return __intel_uc_fw_status(uc_fw) >= INTEL_UC_FIRMWARE_TRANSFERRED;
+}
+
+static inline bool intel_uc_fw_is_running(struct intel_uc_fw *uc_fw)
+{
+ return __intel_uc_fw_status(uc_fw) == INTEL_UC_FIRMWARE_RUNNING;
+}
+
+static inline bool intel_uc_fw_supported(struct intel_uc_fw *uc_fw)
+{
+ return __intel_uc_fw_status(uc_fw) != INTEL_UC_FIRMWARE_NOT_SUPPORTED;
+}
+
+static inline bool intel_uc_fw_is_overridden(const struct intel_uc_fw *uc_fw)
+{
+ return uc_fw->user_overridden;
}
static inline void intel_uc_fw_sanitize(struct intel_uc_fw *uc_fw)
{
if (intel_uc_fw_is_loaded(uc_fw))
- uc_fw->load_status = INTEL_UC_FIRMWARE_PENDING;
+ uc_fw->status = INTEL_UC_FIRMWARE_AVAILABLE;
}
/**
@@ -136,20 +163,23 @@ static inline void intel_uc_fw_sanitize(struct intel_uc_fw *uc_fw)
*/
static inline u32 intel_uc_fw_get_upload_size(struct intel_uc_fw *uc_fw)
{
- if (uc_fw->fetch_status != INTEL_UC_FIRMWARE_SUCCESS)
+ if (!intel_uc_fw_is_available(uc_fw))
return 0;
- return uc_fw->header_size + uc_fw->ucode_size;
+ return sizeof(struct uc_css_header) + uc_fw->ucode_size;
}
-void intel_uc_fw_fetch(struct drm_i915_private *dev_priv,
- struct intel_uc_fw *uc_fw);
+void intel_uc_fw_init_early(struct intel_uc_fw *uc_fw,
+ enum intel_uc_fw_type type,
+ struct drm_i915_private *i915);
+void intel_uc_fw_fetch(struct intel_uc_fw *uc_fw,
+ struct drm_i915_private *i915);
void intel_uc_fw_cleanup_fetch(struct intel_uc_fw *uc_fw);
-int intel_uc_fw_upload(struct intel_uc_fw *uc_fw,
- int (*xfer)(struct intel_uc_fw *uc_fw));
+int intel_uc_fw_upload(struct intel_uc_fw *uc_fw, struct intel_gt *gt,
+ u32 wopcm_offset, u32 dma_flags);
int intel_uc_fw_init(struct intel_uc_fw *uc_fw);
void intel_uc_fw_fini(struct intel_uc_fw *uc_fw);
-u32 intel_uc_fw_ggtt_offset(struct intel_uc_fw *uc_fw);
+size_t intel_uc_fw_copy_rsa(struct intel_uc_fw *uc_fw, void *dst, u32 max_len);
void intel_uc_fw_dump(const struct intel_uc_fw *uc_fw, struct drm_printer *p);
#endif
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw_abi.h b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw_abi.h
new file mode 100644
index 000000000000..ae58e8a8c53b
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw_abi.h
@@ -0,0 +1,82 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef _INTEL_UC_FW_ABI_H
+#define _INTEL_UC_FW_ABI_H
+
+#include <linux/types.h>
+#include <linux/build_bug.h>
+
+/**
+ * DOC: Firmware Layout
+ *
+ * The GuC/HuC firmware layout looks like this::
+ *
+ * +======================================================================+
+ * | Firmware blob |
+ * +===============+===============+============+============+============+
+ * | CSS header | uCode | RSA key | modulus | exponent |
+ * +===============+===============+============+============+============+
+ * <-header size-> <---header size continued ----------->
+ * <--- size ----------------------------------------------------------->
+ * <-key size->
+ * <-mod size->
+ * <-exp size->
+ *
+ * The firmware may or may not have modulus key and exponent data. The header,
+ * uCode and RSA signature are must-have components that will be used by driver.
+ * Length of each components, which is all in dwords, can be found in header.
+ * In the case that modulus and exponent are not present in fw, a.k.a truncated
+ * image, the length value still appears in header.
+ *
+ * Driver will do some basic fw size validation based on the following rules:
+ *
+ * 1. Header, uCode and RSA are must-have components.
+ * 2. All firmware components, if they present, are in the sequence illustrated
+ * in the layout table above.
+ * 3. Length info of each component can be found in header, in dwords.
+ * 4. Modulus and exponent key are not required by driver. They may not appear
+ * in fw. So driver will load a truncated firmware in this case.
+ *
+ * The only difference between GuC and HuC firmwares is how the version
+ * information is saved.
+ */
+
+struct uc_css_header {
+ u32 module_type;
+ /*
+ * header_size includes all non-uCode bits, including css_header, rsa
+ * key, modulus key and exponent data.
+ */
+ u32 header_size_dw;
+ u32 header_version;
+ u32 module_id;
+ u32 module_vendor;
+ u32 date;
+#define CSS_DATE_DAY (0xFF << 0)
+#define CSS_DATE_MONTH (0xFF << 8)
+#define CSS_DATE_YEAR (0xFFFF << 16)
+ u32 size_dw; /* uCode plus header_size_dw */
+ u32 key_size_dw;
+ u32 modulus_size_dw;
+ u32 exponent_size_dw;
+ u32 time;
+#define CSS_TIME_HOUR (0xFF << 0)
+#define CSS_DATE_MIN (0xFF << 8)
+#define CSS_DATE_SEC (0xFFFF << 16)
+ char username[8];
+ char buildnumber[12];
+ u32 sw_version;
+#define CSS_SW_VERSION_GUC_MAJOR (0xFF << 16)
+#define CSS_SW_VERSION_GUC_MINOR (0xFF << 8)
+#define CSS_SW_VERSION_GUC_PATCH (0xFF << 0)
+#define CSS_SW_VERSION_HUC_MAJOR (0xFFFF << 16)
+#define CSS_SW_VERSION_HUC_MINOR (0xFFFF << 0)
+ u32 reserved[14];
+ u32 header_info;
+} __packed;
+static_assert(sizeof(struct uc_css_header) == 128);
+
+#endif /* _INTEL_UC_FW_ABI_H */
diff --git a/drivers/gpu/drm/i915/selftests/intel_guc.c b/drivers/gpu/drm/i915/gt/uc/selftest_guc.c
index 6ca8584cd64c..371f7a60c987 100644
--- a/drivers/gpu/drm/i915/selftests/intel_guc.c
+++ b/drivers/gpu/drm/i915/gt/uc/selftest_guc.c
@@ -103,17 +103,9 @@ static int ring_doorbell_nop(struct intel_guc_client *client)
/*
* Basic client sanity check, handy to validate create_clients.
*/
-static int validate_client(struct intel_guc_client *client,
- int client_priority,
- bool is_preempt_client)
+static int validate_client(struct intel_guc_client *client, int client_priority)
{
- struct drm_i915_private *dev_priv = guc_to_i915(client->guc);
- struct i915_gem_context *ctx_owner = is_preempt_client ?
- dev_priv->preempt_context : dev_priv->kernel_context;
-
- if (client->owner != ctx_owner ||
- client->engines != INTEL_INFO(dev_priv)->engine_mask ||
- client->priority != client_priority ||
+ if (client->priority != client_priority ||
client->doorbell_id == GUC_DOORBELL_INVALID)
return -EINVAL;
else
@@ -142,11 +134,11 @@ static int igt_guc_clients(void *args)
struct intel_guc *guc;
int err = 0;
- GEM_BUG_ON(!HAS_GUC(dev_priv));
+ GEM_BUG_ON(!HAS_GT_UC(dev_priv));
mutex_lock(&dev_priv->drm.struct_mutex);
wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
- guc = &dev_priv->guc;
+ guc = &dev_priv->gt.uc.guc;
if (!guc) {
pr_err("No guc object!\n");
err = -EINVAL;
@@ -163,7 +155,7 @@ static int igt_guc_clients(void *args)
*/
guc_clients_disable(guc);
guc_clients_destroy(guc);
- if (guc->execbuf_client || guc->preempt_client) {
+ if (guc->execbuf_client) {
pr_err("guc_clients_destroy lied!\n");
err = -EINVAL;
goto unlock;
@@ -177,24 +169,14 @@ static int igt_guc_clients(void *args)
GEM_BUG_ON(!guc->execbuf_client);
err = validate_client(guc->execbuf_client,
- GUC_CLIENT_PRIORITY_KMD_NORMAL, false);
+ GUC_CLIENT_PRIORITY_KMD_NORMAL);
if (err) {
pr_err("execbug client validation failed\n");
goto out;
}
- if (guc->preempt_client) {
- err = validate_client(guc->preempt_client,
- GUC_CLIENT_PRIORITY_KMD_HIGH, true);
- if (err) {
- pr_err("preempt client validation failed\n");
- goto out;
- }
- }
-
- /* each client should now have reserved a doorbell */
- if (!has_doorbell(guc->execbuf_client) ||
- (guc->preempt_client && !has_doorbell(guc->preempt_client))) {
+ /* the client should now have reserved a doorbell */
+ if (!has_doorbell(guc->execbuf_client)) {
pr_err("guc_clients_create didn't reserve doorbells\n");
err = -EINVAL;
goto out;
@@ -204,8 +186,7 @@ static int igt_guc_clients(void *args)
guc_clients_enable(guc);
/* each client should now have received a doorbell */
- if (!client_doorbell_in_sync(guc->execbuf_client) ||
- !client_doorbell_in_sync(guc->preempt_client)) {
+ if (!client_doorbell_in_sync(guc->execbuf_client)) {
pr_err("failed to initialize the doorbells\n");
err = -EINVAL;
goto out;
@@ -245,11 +226,11 @@ static int igt_guc_doorbells(void *arg)
int i, err = 0;
u16 db_id;
- GEM_BUG_ON(!HAS_GUC(dev_priv));
+ GEM_BUG_ON(!HAS_GT_UC(dev_priv));
mutex_lock(&dev_priv->drm.struct_mutex);
wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
- guc = &dev_priv->guc;
+ guc = &dev_priv->gt.uc.guc;
if (!guc) {
pr_err("No guc object!\n");
err = -EINVAL;
@@ -261,10 +242,7 @@ static int igt_guc_doorbells(void *arg)
goto unlock;
for (i = 0; i < ATTEMPTS; i++) {
- clients[i] = guc_client_alloc(dev_priv,
- INTEL_INFO(dev_priv)->engine_mask,
- i % GUC_CLIENT_PRIORITY_NUM,
- dev_priv->kernel_context);
+ clients[i] = guc_client_alloc(guc, i % GUC_CLIENT_PRIORITY_NUM);
if (!clients[i]) {
pr_err("[%d] No guc client\n", i);
@@ -300,8 +278,7 @@ static int igt_guc_doorbells(void *arg)
goto out;
}
- err = validate_client(clients[i],
- i % GUC_CLIENT_PRIORITY_NUM, false);
+ err = validate_client(clients[i], i % GUC_CLIENT_PRIORITY_NUM);
if (err) {
pr_err("[%d] client_alloc sanity check failed!\n", i);
err = -EINVAL;
diff --git a/drivers/gpu/drm/i915/gvt/dmabuf.c b/drivers/gpu/drm/i915/gvt/dmabuf.c
index 41c8ebc60c63..13044c027f27 100644
--- a/drivers/gpu/drm/i915/gvt/dmabuf.c
+++ b/drivers/gpu/drm/i915/gvt/dmabuf.c
@@ -491,7 +491,7 @@ int intel_vgpu_get_dmabuf(struct intel_vgpu *vgpu, unsigned int dmabuf_id)
obj->gvt_info = dmabuf_obj->info;
- dmabuf = i915_gem_prime_export(dev, &obj->base, DRM_CLOEXEC | DRM_RDWR);
+ dmabuf = i915_gem_prime_export(&obj->base, DRM_CLOEXEC | DRM_RDWR);
if (IS_ERR(dmabuf)) {
gvt_vgpu_err("export dma-buf failed\n");
ret = PTR_ERR(dmabuf);
diff --git a/drivers/gpu/drm/i915/gvt/gtt.h b/drivers/gpu/drm/i915/gvt/gtt.h
index 42d0394f0de2..88789316807d 100644
--- a/drivers/gpu/drm/i915/gvt/gtt.h
+++ b/drivers/gpu/drm/i915/gvt/gtt.h
@@ -205,17 +205,18 @@ struct intel_vgpu_gtt {
struct intel_vgpu_scratch_pt scratch_pt[GTT_TYPE_MAX];
};
-extern int intel_vgpu_init_gtt(struct intel_vgpu *vgpu);
-extern void intel_vgpu_clean_gtt(struct intel_vgpu *vgpu);
+int intel_vgpu_init_gtt(struct intel_vgpu *vgpu);
+void intel_vgpu_clean_gtt(struct intel_vgpu *vgpu);
void intel_vgpu_reset_ggtt(struct intel_vgpu *vgpu, bool invalidate_old);
void intel_vgpu_invalidate_ppgtt(struct intel_vgpu *vgpu);
-extern int intel_gvt_init_gtt(struct intel_gvt *gvt);
+int intel_gvt_init_gtt(struct intel_gvt *gvt);
void intel_vgpu_reset_gtt(struct intel_vgpu *vgpu);
-extern void intel_gvt_clean_gtt(struct intel_gvt *gvt);
+void intel_gvt_clean_gtt(struct intel_gvt *gvt);
-extern struct intel_vgpu_mm *intel_gvt_find_ppgtt_mm(struct intel_vgpu *vgpu,
- int page_table_level, void *root_entry);
+struct intel_vgpu_mm *intel_gvt_find_ppgtt_mm(struct intel_vgpu *vgpu,
+ int page_table_level,
+ void *root_entry);
struct intel_vgpu_oos_page {
struct intel_vgpu_ppgtt_spt *spt;
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c
index 9f3fd7d96a69..f40524b0e300 100644
--- a/drivers/gpu/drm/i915/gvt/scheduler.c
+++ b/drivers/gpu/drm/i915/gvt/scheduler.c
@@ -1157,7 +1157,7 @@ void intel_vgpu_clean_submission(struct intel_vgpu *vgpu)
intel_vgpu_select_submission_ops(vgpu, ALL_ENGINES, 0);
- i915_context_ppgtt_root_restore(s, i915_vm_to_ppgtt(s->shadow[0]->gem_context->vm));
+ i915_context_ppgtt_root_restore(s, i915_vm_to_ppgtt(s->shadow[0]->vm));
for_each_engine(engine, vgpu->gvt->dev_priv, id)
intel_context_unpin(s->shadow[id]);
diff --git a/drivers/gpu/drm/i915/i915_active.c b/drivers/gpu/drm/i915/i915_active.c
index 293e5bcc4b6c..d32db8a4db5c 100644
--- a/drivers/gpu/drm/i915/i915_active.c
+++ b/drivers/gpu/drm/i915/i915_active.c
@@ -4,6 +4,8 @@
* Copyright © 2019 Intel Corporation
*/
+#include <linux/debugobjects.h>
+
#include "gt/intel_engine_pm.h"
#include "i915_drv.h"
@@ -31,49 +33,108 @@ struct active_node {
u64 timeline;
};
-static void
-__active_park(struct i915_active *ref)
+#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM) && IS_ENABLED(CONFIG_DEBUG_OBJECTS)
+
+static void *active_debug_hint(void *addr)
{
- struct active_node *it, *n;
+ struct i915_active *ref = addr;
- rbtree_postorder_for_each_entry_safe(it, n, &ref->tree, node) {
- GEM_BUG_ON(i915_active_request_isset(&it->base));
- kmem_cache_free(global.slab_cache, it);
- }
- ref->tree = RB_ROOT;
+ return (void *)ref->active ?: (void *)ref->retire ?: (void *)ref;
+}
+
+static struct debug_obj_descr active_debug_desc = {
+ .name = "i915_active",
+ .debug_hint = active_debug_hint,
+};
+
+static void debug_active_init(struct i915_active *ref)
+{
+ debug_object_init(ref, &active_debug_desc);
}
+static void debug_active_activate(struct i915_active *ref)
+{
+ debug_object_activate(ref, &active_debug_desc);
+}
+
+static void debug_active_deactivate(struct i915_active *ref)
+{
+ debug_object_deactivate(ref, &active_debug_desc);
+}
+
+static void debug_active_fini(struct i915_active *ref)
+{
+ debug_object_free(ref, &active_debug_desc);
+}
+
+static void debug_active_assert(struct i915_active *ref)
+{
+ debug_object_assert_init(ref, &active_debug_desc);
+}
+
+#else
+
+static inline void debug_active_init(struct i915_active *ref) { }
+static inline void debug_active_activate(struct i915_active *ref) { }
+static inline void debug_active_deactivate(struct i915_active *ref) { }
+static inline void debug_active_fini(struct i915_active *ref) { }
+static inline void debug_active_assert(struct i915_active *ref) { }
+
+#endif
+
static void
__active_retire(struct i915_active *ref)
{
- GEM_BUG_ON(!ref->count);
- if (--ref->count)
- return;
+ struct active_node *it, *n;
+ struct rb_root root;
+ bool retire = false;
+
+ lockdep_assert_held(&ref->mutex);
+
+ /* return the unused nodes to our slabcache -- flushing the allocator */
+ if (atomic_dec_and_test(&ref->count)) {
+ debug_active_deactivate(ref);
+ root = ref->tree;
+ ref->tree = RB_ROOT;
+ ref->cache = NULL;
+ retire = true;
+ }
- /* return the unused nodes to our slabcache */
- __active_park(ref);
+ mutex_unlock(&ref->mutex);
+ if (!retire)
+ return;
ref->retire(ref);
+
+ rbtree_postorder_for_each_entry_safe(it, n, &root, node) {
+ GEM_BUG_ON(i915_active_request_isset(&it->base));
+ kmem_cache_free(global.slab_cache, it);
+ }
}
static void
-node_retire(struct i915_active_request *base, struct i915_request *rq)
+active_retire(struct i915_active *ref)
{
- __active_retire(container_of(base, struct active_node, base)->ref);
+ GEM_BUG_ON(!atomic_read(&ref->count));
+ if (atomic_add_unless(&ref->count, -1, 1))
+ return;
+
+ /* One active may be flushed from inside the acquire of another */
+ mutex_lock_nested(&ref->mutex, SINGLE_DEPTH_NESTING);
+ __active_retire(ref);
}
static void
-last_retire(struct i915_active_request *base, struct i915_request *rq)
+node_retire(struct i915_active_request *base, struct i915_request *rq)
{
- __active_retire(container_of(base, struct i915_active, last));
+ active_retire(container_of(base, struct active_node, base)->ref);
}
static struct i915_active_request *
active_instance(struct i915_active *ref, u64 idx)
{
- struct active_node *node;
+ struct active_node *node, *prealloc;
struct rb_node **p, *parent;
- struct i915_request *old;
/*
* We track the most recently used timeline to skip a rbtree search
@@ -81,20 +142,18 @@ active_instance(struct i915_active *ref, u64 idx)
* at all. We can reuse the last slot if it is empty, that is
* after the previous activity has been retired, or if it matches the
* current timeline.
- *
- * Note that we allow the timeline to be active simultaneously in
- * the rbtree and the last cache. We do this to avoid having
- * to search and replace the rbtree element for a new timeline, with
- * the cost being that we must be aware that the ref may be retired
- * twice for the same timeline (as the older rbtree element will be
- * retired before the new request added to last).
*/
- old = i915_active_request_raw(&ref->last, BKL(ref));
- if (!old || old->fence.context == idx)
- goto out;
+ node = READ_ONCE(ref->cache);
+ if (node && node->timeline == idx)
+ return &node->base;
- /* Move the currently active fence into the rbtree */
- idx = old->fence.context;
+ /* Preallocate a replacement, just in case */
+ prealloc = kmem_cache_alloc(global.slab_cache, GFP_KERNEL);
+ if (!prealloc)
+ return NULL;
+
+ mutex_lock(&ref->mutex);
+ GEM_BUG_ON(i915_active_is_idle(ref));
parent = NULL;
p = &ref->tree.rb_node;
@@ -102,8 +161,10 @@ active_instance(struct i915_active *ref, u64 idx)
parent = *p;
node = rb_entry(parent, struct active_node, node);
- if (node->timeline == idx)
- goto replace;
+ if (node->timeline == idx) {
+ kmem_cache_free(global.slab_cache, prealloc);
+ goto out;
+ }
if (node->timeline < idx)
p = &parent->rb_right;
@@ -111,17 +172,7 @@ active_instance(struct i915_active *ref, u64 idx)
p = &parent->rb_left;
}
- node = kmem_cache_alloc(global.slab_cache, GFP_KERNEL);
-
- /* kmalloc may retire the ref->last (thanks shrinker)! */
- if (unlikely(!i915_active_request_raw(&ref->last, BKL(ref)))) {
- kmem_cache_free(global.slab_cache, node);
- goto out;
- }
-
- if (unlikely(!node))
- return ERR_PTR(-ENOMEM);
-
+ node = prealloc;
i915_active_request_init(&node->base, NULL, node_retire);
node->ref = ref;
node->timeline = idx;
@@ -129,38 +180,30 @@ active_instance(struct i915_active *ref, u64 idx)
rb_link_node(&node->node, parent, p);
rb_insert_color(&node->node, &ref->tree);
-replace:
- /*
- * Overwrite the previous active slot in the rbtree with last,
- * leaving last zeroed. If the previous slot is still active,
- * we must be careful as we now only expect to receive one retire
- * callback not two, and so much undo the active counting for the
- * overwritten slot.
- */
- if (i915_active_request_isset(&node->base)) {
- /* Retire ourselves from the old rq->active_list */
- __list_del_entry(&node->base.link);
- ref->count--;
- GEM_BUG_ON(!ref->count);
- }
- GEM_BUG_ON(list_empty(&ref->last.link));
- list_replace_init(&ref->last.link, &node->base.link);
- node->base.request = fetch_and_zero(&ref->last.request);
-
out:
- return &ref->last;
+ ref->cache = node;
+ mutex_unlock(&ref->mutex);
+
+ return &node->base;
}
-void i915_active_init(struct drm_i915_private *i915,
- struct i915_active *ref,
- void (*retire)(struct i915_active *ref))
+void __i915_active_init(struct drm_i915_private *i915,
+ struct i915_active *ref,
+ int (*active)(struct i915_active *ref),
+ void (*retire)(struct i915_active *ref),
+ struct lock_class_key *key)
{
+ debug_active_init(ref);
+
ref->i915 = i915;
+ ref->flags = 0;
+ ref->active = active;
ref->retire = retire;
ref->tree = RB_ROOT;
- i915_active_request_init(&ref->last, NULL, last_retire);
+ ref->cache = NULL;
init_llist_head(&ref->barriers);
- ref->count = 0;
+ atomic_set(&ref->count, 0);
+ __mutex_init(&ref->mutex, "i915_active", key);
}
int i915_active_ref(struct i915_active *ref,
@@ -168,60 +211,123 @@ int i915_active_ref(struct i915_active *ref,
struct i915_request *rq)
{
struct i915_active_request *active;
- int err = 0;
+ int err;
/* Prevent reaping in case we malloc/wait while building the tree */
- i915_active_acquire(ref);
+ err = i915_active_acquire(ref);
+ if (err)
+ return err;
active = active_instance(ref, timeline);
- if (IS_ERR(active)) {
- err = PTR_ERR(active);
+ if (!active) {
+ err = -ENOMEM;
goto out;
}
if (!i915_active_request_isset(active))
- ref->count++;
+ atomic_inc(&ref->count);
__i915_active_request_set(active, rq);
- GEM_BUG_ON(!ref->count);
out:
i915_active_release(ref);
return err;
}
-bool i915_active_acquire(struct i915_active *ref)
+int i915_active_acquire(struct i915_active *ref)
{
- lockdep_assert_held(BKL(ref));
- return !ref->count++;
+ int err;
+
+ debug_active_assert(ref);
+ if (atomic_add_unless(&ref->count, 1, 0))
+ return 0;
+
+ err = mutex_lock_interruptible(&ref->mutex);
+ if (err)
+ return err;
+
+ if (!atomic_read(&ref->count) && ref->active)
+ err = ref->active(ref);
+ if (!err) {
+ debug_active_activate(ref);
+ atomic_inc(&ref->count);
+ }
+
+ mutex_unlock(&ref->mutex);
+
+ return err;
}
void i915_active_release(struct i915_active *ref)
{
- lockdep_assert_held(BKL(ref));
- __active_retire(ref);
+ debug_active_assert(ref);
+ active_retire(ref);
+}
+
+static void __active_ungrab(struct i915_active *ref)
+{
+ clear_and_wake_up_bit(I915_ACTIVE_GRAB_BIT, &ref->flags);
+}
+
+bool i915_active_trygrab(struct i915_active *ref)
+{
+ debug_active_assert(ref);
+
+ if (test_and_set_bit(I915_ACTIVE_GRAB_BIT, &ref->flags))
+ return false;
+
+ if (!atomic_add_unless(&ref->count, 1, 0)) {
+ __active_ungrab(ref);
+ return false;
+ }
+
+ return true;
+}
+
+void i915_active_ungrab(struct i915_active *ref)
+{
+ GEM_BUG_ON(!test_bit(I915_ACTIVE_GRAB_BIT, &ref->flags));
+
+ active_retire(ref);
+ __active_ungrab(ref);
}
int i915_active_wait(struct i915_active *ref)
{
struct active_node *it, *n;
- int ret = 0;
+ int err;
- if (i915_active_acquire(ref))
- goto out_release;
+ might_sleep();
+ might_lock(&ref->mutex);
+
+ if (i915_active_is_idle(ref))
+ return 0;
+
+ err = mutex_lock_interruptible(&ref->mutex);
+ if (err)
+ return err;
- ret = i915_active_request_retire(&ref->last, BKL(ref));
- if (ret)
- goto out_release;
+ if (!atomic_add_unless(&ref->count, 1, 0)) {
+ mutex_unlock(&ref->mutex);
+ return 0;
+ }
rbtree_postorder_for_each_entry_safe(it, n, &ref->tree, node) {
- ret = i915_active_request_retire(&it->base, BKL(ref));
- if (ret)
+ err = i915_active_request_retire(&it->base, BKL(ref));
+ if (err)
break;
}
-out_release:
- i915_active_release(ref);
- return ret;
+ __active_retire(ref);
+ if (err)
+ return err;
+
+ if (wait_on_bit(&ref->flags, I915_ACTIVE_GRAB_BIT, TASK_KILLABLE))
+ return -EINTR;
+
+ if (!i915_active_is_idle(ref))
+ return -EBUSY;
+
+ return 0;
}
int i915_request_await_active_request(struct i915_request *rq,
@@ -236,23 +342,24 @@ int i915_request_await_active_request(struct i915_request *rq,
int i915_request_await_active(struct i915_request *rq, struct i915_active *ref)
{
struct active_node *it, *n;
- int err = 0;
+ int err;
- /* await allocates and so we need to avoid hitting the shrinker */
- if (i915_active_acquire(ref))
- goto out; /* was idle */
+ if (RB_EMPTY_ROOT(&ref->tree))
+ return 0;
- err = i915_request_await_active_request(rq, &ref->last);
+ /* await allocates and so we need to avoid hitting the shrinker */
+ err = i915_active_acquire(ref);
if (err)
- goto out;
+ return err;
+ mutex_lock(&ref->mutex);
rbtree_postorder_for_each_entry_safe(it, n, &ref->tree, node) {
err = i915_request_await_active_request(rq, &it->base);
if (err)
- goto out;
+ break;
}
+ mutex_unlock(&ref->mutex);
-out:
i915_active_release(ref);
return err;
}
@@ -260,9 +367,10 @@ out:
#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)
void i915_active_fini(struct i915_active *ref)
{
- GEM_BUG_ON(i915_active_request_isset(&ref->last));
+ debug_active_fini(ref);
GEM_BUG_ON(!RB_EMPTY_ROOT(&ref->tree));
- GEM_BUG_ON(ref->count);
+ GEM_BUG_ON(atomic_read(&ref->count));
+ mutex_destroy(&ref->mutex);
}
#endif
@@ -270,12 +378,12 @@ int i915_active_acquire_preallocate_barrier(struct i915_active *ref,
struct intel_engine_cs *engine)
{
struct drm_i915_private *i915 = engine->i915;
+ intel_engine_mask_t tmp, mask = engine->mask;
struct llist_node *pos, *next;
- unsigned long tmp;
int err;
- GEM_BUG_ON(!engine->mask);
- for_each_engine_masked(engine, i915, engine->mask, tmp) {
+ GEM_BUG_ON(!mask);
+ for_each_engine_masked(engine, i915, mask, tmp) {
struct intel_context *kctx = engine->kernel_context;
struct active_node *node;
@@ -289,7 +397,7 @@ int i915_active_acquire_preallocate_barrier(struct i915_active *ref,
(void *)engine, node_retire);
node->timeline = kctx->ring->timeline->fence_context;
node->ref = ref;
- ref->count++;
+ atomic_inc(&ref->count);
intel_engine_pm_get(engine);
llist_add((struct llist_node *)&node->base.link,
@@ -316,8 +424,9 @@ void i915_active_acquire_barrier(struct i915_active *ref)
{
struct llist_node *pos, *next;
- i915_active_acquire(ref);
+ GEM_BUG_ON(i915_active_is_idle(ref));
+ mutex_lock_nested(&ref->mutex, SINGLE_DEPTH_NESTING);
llist_for_each_safe(pos, next, llist_del_all(&ref->barriers)) {
struct intel_engine_cs *engine;
struct active_node *node;
@@ -347,7 +456,7 @@ void i915_active_acquire_barrier(struct i915_active *ref)
&engine->barrier_tasks);
intel_engine_pm_put(engine);
}
- i915_active_release(ref);
+ mutex_unlock(&ref->mutex);
}
void i915_request_add_barriers(struct i915_request *rq)
diff --git a/drivers/gpu/drm/i915/i915_active.h b/drivers/gpu/drm/i915/i915_active.h
index c14eebf6d074..ba68b077ec6c 100644
--- a/drivers/gpu/drm/i915/i915_active.h
+++ b/drivers/gpu/drm/i915/i915_active.h
@@ -369,9 +369,16 @@ i915_active_request_retire(struct i915_active_request *active,
* synchronisation.
*/
-void i915_active_init(struct drm_i915_private *i915,
- struct i915_active *ref,
- void (*retire)(struct i915_active *ref));
+void __i915_active_init(struct drm_i915_private *i915,
+ struct i915_active *ref,
+ int (*active)(struct i915_active *ref),
+ void (*retire)(struct i915_active *ref),
+ struct lock_class_key *key);
+#define i915_active_init(i915, ref, active, retire) do { \
+ static struct lock_class_key __key; \
+ \
+ __i915_active_init(i915, ref, active, retire, &__key); \
+} while (0)
int i915_active_ref(struct i915_active *ref,
u64 timeline,
@@ -384,20 +391,17 @@ int i915_request_await_active(struct i915_request *rq,
int i915_request_await_active_request(struct i915_request *rq,
struct i915_active_request *active);
-bool i915_active_acquire(struct i915_active *ref);
-
-static inline void i915_active_cancel(struct i915_active *ref)
-{
- GEM_BUG_ON(ref->count != 1);
- ref->count = 0;
-}
-
+int i915_active_acquire(struct i915_active *ref);
void i915_active_release(struct i915_active *ref);
+void __i915_active_release_nested(struct i915_active *ref, int subclass);
+
+bool i915_active_trygrab(struct i915_active *ref);
+void i915_active_ungrab(struct i915_active *ref);
static inline bool
i915_active_is_idle(const struct i915_active *ref)
{
- return !ref->count;
+ return !atomic_read(&ref->count);
}
#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)
diff --git a/drivers/gpu/drm/i915/i915_active_types.h b/drivers/gpu/drm/i915/i915_active_types.h
index c025991b9233..74743dd0d5f0 100644
--- a/drivers/gpu/drm/i915/i915_active_types.h
+++ b/drivers/gpu/drm/i915/i915_active_types.h
@@ -7,7 +7,9 @@
#ifndef _I915_ACTIVE_TYPES_H_
#define _I915_ACTIVE_TYPES_H_
+#include <linux/atomic.h>
#include <linux/llist.h>
+#include <linux/mutex.h>
#include <linux/rbtree.h>
#include <linux/rcupdate.h>
@@ -24,13 +26,20 @@ struct i915_active_request {
i915_active_retire_fn retire;
};
+struct active_node;
+
struct i915_active {
struct drm_i915_private *i915;
+ struct active_node *cache;
struct rb_root tree;
- struct i915_active_request last;
- unsigned int count;
+ struct mutex mutex;
+ atomic_t count;
+
+ unsigned long flags;
+#define I915_ACTIVE_GRAB_BIT 0
+ int (*active)(struct i915_active *ref);
void (*retire)(struct i915_active *ref);
struct llist_head barriers;
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 62cf34db9280..24787bb48c9f 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -40,12 +40,12 @@
#include "gem/i915_gem_context.h"
#include "gt/intel_reset.h"
+#include "gt/uc/intel_guc_submission.h"
#include "i915_debugfs.h"
#include "i915_irq.h"
#include "intel_csr.h"
#include "intel_drv.h"
-#include "intel_guc_submission.h"
#include "intel_pm.h"
#include "intel_sideband.h"
@@ -75,11 +75,6 @@ static int i915_capabilities(struct seq_file *m, void *data)
return 0;
}
-static char get_active_flag(struct drm_i915_gem_object *obj)
-{
- return i915_gem_object_is_active(obj) ? '*' : ' ';
-}
-
static char get_pin_flag(struct drm_i915_gem_object *obj)
{
return obj->pin_global ? 'p' : ' ';
@@ -144,9 +139,8 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
unsigned int frontbuffer_bits;
int pin_count = 0;
- seq_printf(m, "%pK: %c%c%c%c%c %8zdKiB %02x %02x %s%s%s",
+ seq_printf(m, "%pK: %c%c%c%c %8zdKiB %02x %02x %s%s%s",
&obj->base,
- get_active_flag(obj),
get_pin_flag(obj),
get_tiling_flag(obj),
get_global_flag(obj),
@@ -1080,17 +1074,16 @@ static void i915_instdone_info(struct drm_i915_private *dev_priv,
static int i915_hangcheck_info(struct seq_file *m, void *unused)
{
- struct drm_i915_private *dev_priv = node_to_i915(m->private);
+ struct drm_i915_private *i915 = node_to_i915(m->private);
+ struct intel_gt *gt = &i915->gt;
struct intel_engine_cs *engine;
- u64 acthd[I915_NUM_ENGINES];
- struct intel_instdone instdone;
intel_wakeref_t wakeref;
enum intel_engine_id id;
- seq_printf(m, "Reset flags: %lx\n", dev_priv->gpu_error.flags);
- if (test_bit(I915_WEDGED, &dev_priv->gpu_error.flags))
+ seq_printf(m, "Reset flags: %lx\n", gt->reset.flags);
+ if (test_bit(I915_WEDGED, &gt->reset.flags))
seq_puts(m, "\tWedged\n");
- if (test_bit(I915_RESET_BACKOFF, &dev_priv->gpu_error.flags))
+ if (test_bit(I915_RESET_BACKOFF, &gt->reset.flags))
seq_puts(m, "\tDevice (global) reset in progress\n");
if (!i915_modparams.enable_hangcheck) {
@@ -1098,42 +1091,37 @@ static int i915_hangcheck_info(struct seq_file *m, void *unused)
return 0;
}
- with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref) {
- for_each_engine(engine, dev_priv, id)
- acthd[id] = intel_engine_get_active_head(engine);
-
- intel_engine_get_instdone(dev_priv->engine[RCS0], &instdone);
- }
-
- if (timer_pending(&dev_priv->gpu_error.hangcheck_work.timer))
+ if (timer_pending(&gt->hangcheck.work.timer))
seq_printf(m, "Hangcheck active, timer fires in %dms\n",
- jiffies_to_msecs(dev_priv->gpu_error.hangcheck_work.timer.expires -
+ jiffies_to_msecs(gt->hangcheck.work.timer.expires -
jiffies));
- else if (delayed_work_pending(&dev_priv->gpu_error.hangcheck_work))
+ else if (delayed_work_pending(&gt->hangcheck.work))
seq_puts(m, "Hangcheck active, work pending\n");
else
seq_puts(m, "Hangcheck inactive\n");
- seq_printf(m, "GT active? %s\n", yesno(dev_priv->gt.awake));
+ seq_printf(m, "GT active? %s\n", yesno(gt->awake));
- for_each_engine(engine, dev_priv, id) {
- seq_printf(m, "%s: %d ms ago\n",
- engine->name,
- jiffies_to_msecs(jiffies -
- engine->hangcheck.action_timestamp));
+ with_intel_runtime_pm(&i915->runtime_pm, wakeref) {
+ for_each_engine(engine, i915, id) {
+ struct intel_instdone instdone;
- seq_printf(m, "\tACTHD = 0x%08llx [current 0x%08llx]\n",
- (long long)engine->hangcheck.acthd,
- (long long)acthd[id]);
+ seq_printf(m, "%s: %d ms ago\n",
+ engine->name,
+ jiffies_to_msecs(jiffies -
+ engine->hangcheck.action_timestamp));
- if (engine->id == RCS0) {
- seq_puts(m, "\tinstdone read =\n");
+ seq_printf(m, "\tACTHD = 0x%08llx [current 0x%08llx]\n",
+ (long long)engine->hangcheck.acthd,
+ intel_engine_get_active_head(engine));
- i915_instdone_info(dev_priv, m, &instdone);
+ intel_engine_get_instdone(engine, &instdone);
- seq_puts(m, "\tinstdone accu =\n");
+ seq_puts(m, "\tinstdone read =\n");
+ i915_instdone_info(i915, m, &instdone);
- i915_instdone_info(dev_priv, m,
+ seq_puts(m, "\tinstdone accu =\n");
+ i915_instdone_info(i915, m,
&engine->hangcheck.instdone);
}
}
@@ -1141,23 +1129,6 @@ static int i915_hangcheck_info(struct seq_file *m, void *unused)
return 0;
}
-static int i915_reset_info(struct seq_file *m, void *unused)
-{
- struct drm_i915_private *dev_priv = node_to_i915(m->private);
- struct i915_gpu_error *error = &dev_priv->gpu_error;
- struct intel_engine_cs *engine;
- enum intel_engine_id id;
-
- seq_printf(m, "full gpu reset = %u\n", i915_reset_count(error));
-
- for_each_engine(engine, dev_priv, id) {
- seq_printf(m, "%s = %u\n", engine->name,
- i915_reset_engine_count(error, engine));
- }
-
- return 0;
-}
-
static int ironlake_drpc_info(struct seq_file *m)
{
struct drm_i915_private *i915 = node_to_i915(m->private);
@@ -1894,11 +1865,11 @@ static int i915_huc_load_status_info(struct seq_file *m, void *data)
intel_wakeref_t wakeref;
struct drm_printer p;
- if (!HAS_HUC(dev_priv))
+ if (!HAS_GT_UC(dev_priv))
return -ENODEV;
p = drm_seq_file_printer(m);
- intel_uc_fw_dump(&dev_priv->huc.fw, &p);
+ intel_uc_fw_dump(&dev_priv->gt.uc.huc.fw, &p);
with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref)
seq_printf(m, "\nHuC status 0x%08x:\n", I915_READ(HUC_STATUS2));
@@ -1912,11 +1883,11 @@ static int i915_guc_load_status_info(struct seq_file *m, void *data)
intel_wakeref_t wakeref;
struct drm_printer p;
- if (!HAS_GUC(dev_priv))
+ if (!HAS_GT_UC(dev_priv))
return -ENODEV;
p = drm_seq_file_printer(m);
- intel_uc_fw_dump(&dev_priv->guc.fw, &p);
+ intel_uc_fw_dump(&dev_priv->gt.uc.guc.fw, &p);
with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref) {
u32 tmp = I915_READ(GUC_STATUS);
@@ -1959,7 +1930,7 @@ stringify_guc_log_type(enum guc_log_buffer_type type)
static void i915_guc_log_info(struct seq_file *m,
struct drm_i915_private *dev_priv)
{
- struct intel_guc_log *log = &dev_priv->guc.log;
+ struct intel_guc_log *log = &dev_priv->gt.uc.guc.log;
enum guc_log_buffer_type type;
if (!intel_guc_log_relay_enabled(log)) {
@@ -2005,7 +1976,7 @@ static void i915_guc_client_info(struct seq_file *m,
static int i915_guc_info(struct seq_file *m, void *data)
{
struct drm_i915_private *dev_priv = node_to_i915(m->private);
- const struct intel_guc *guc = &dev_priv->guc;
+ const struct intel_guc *guc = &dev_priv->gt.uc.guc;
if (!USES_GUC(dev_priv))
return -ENODEV;
@@ -2023,11 +1994,6 @@ static int i915_guc_info(struct seq_file *m, void *data)
seq_printf(m, "\nGuC execbuf client @ %p:\n", guc->execbuf_client);
i915_guc_client_info(m, dev_priv, guc->execbuf_client);
- if (guc->preempt_client) {
- seq_printf(m, "\nGuC preempt client @ %p:\n",
- guc->preempt_client);
- i915_guc_client_info(m, dev_priv, guc->preempt_client);
- }
/* Add more as required ... */
@@ -2037,9 +2003,8 @@ static int i915_guc_info(struct seq_file *m, void *data)
static int i915_guc_stage_pool(struct seq_file *m, void *data)
{
struct drm_i915_private *dev_priv = node_to_i915(m->private);
- const struct intel_guc *guc = &dev_priv->guc;
+ const struct intel_guc *guc = &dev_priv->gt.uc.guc;
struct guc_stage_desc *desc = guc->stage_desc_pool_vaddr;
- struct intel_guc_client *client = guc->execbuf_client;
intel_engine_mask_t tmp;
int index;
@@ -2069,7 +2034,7 @@ static int i915_guc_stage_pool(struct seq_file *m, void *data)
desc->wq_addr, desc->wq_size);
seq_putc(m, '\n');
- for_each_engine_masked(engine, dev_priv, client->engines, tmp) {
+ for_each_engine(engine, dev_priv, tmp) {
u32 guc_engine_id = engine->guc_id;
struct guc_execlist_context *lrc =
&desc->lrc[guc_engine_id];
@@ -2097,13 +2062,13 @@ static int i915_guc_log_dump(struct seq_file *m, void *data)
u32 *log;
int i = 0;
- if (!HAS_GUC(dev_priv))
+ if (!HAS_GT_UC(dev_priv))
return -ENODEV;
if (dump_load_err)
- obj = dev_priv->guc.load_err_log;
- else if (dev_priv->guc.log.vma)
- obj = dev_priv->guc.log.vma->obj;
+ obj = dev_priv->gt.uc.guc.load_err_log;
+ else if (dev_priv->gt.uc.guc.log.vma)
+ obj = dev_priv->gt.uc.guc.log.vma->obj;
if (!obj)
return 0;
@@ -2134,7 +2099,7 @@ static int i915_guc_log_level_get(void *data, u64 *val)
if (!USES_GUC(dev_priv))
return -ENODEV;
- *val = intel_guc_log_get_level(&dev_priv->guc.log);
+ *val = intel_guc_log_get_level(&dev_priv->gt.uc.guc.log);
return 0;
}
@@ -2146,7 +2111,7 @@ static int i915_guc_log_level_set(void *data, u64 val)
if (!USES_GUC(dev_priv))
return -ENODEV;
- return intel_guc_log_set_level(&dev_priv->guc.log, val);
+ return intel_guc_log_set_level(&dev_priv->gt.uc.guc.log, val);
}
DEFINE_SIMPLE_ATTRIBUTE(i915_guc_log_level_fops,
@@ -2160,9 +2125,9 @@ static int i915_guc_log_relay_open(struct inode *inode, struct file *file)
if (!USES_GUC(dev_priv))
return -ENODEV;
- file->private_data = &dev_priv->guc.log;
+ file->private_data = &dev_priv->gt.uc.guc.log;
- return intel_guc_log_relay_open(&dev_priv->guc.log);
+ return intel_guc_log_relay_open(&dev_priv->gt.uc.guc.log);
}
static ssize_t
@@ -2182,7 +2147,7 @@ static int i915_guc_log_relay_release(struct inode *inode, struct file *file)
{
struct drm_i915_private *dev_priv = inode->i_private;
- intel_guc_log_relay_close(&dev_priv->guc.log);
+ intel_guc_log_relay_close(&dev_priv->gt.uc.guc.log);
return 0;
}
@@ -2485,7 +2450,8 @@ static int i915_power_domain_info(struct seq_file *m, void *unused)
for_each_power_domain(power_domain, power_well->desc->domains)
seq_printf(m, " %-23s %d\n",
- intel_display_power_domain_str(power_domain),
+ intel_display_power_domain_str(dev_priv,
+ power_domain),
power_domains->domain_use_count[power_domain]);
}
@@ -2603,6 +2569,25 @@ static void intel_panel_info(struct seq_file *m, struct intel_panel *panel)
intel_seq_print_mode(m, 2, mode);
}
+static void intel_hdcp_info(struct seq_file *m,
+ struct intel_connector *intel_connector)
+{
+ bool hdcp_cap, hdcp2_cap;
+
+ hdcp_cap = intel_hdcp_capable(intel_connector);
+ hdcp2_cap = intel_hdcp2_capable(intel_connector);
+
+ if (hdcp_cap)
+ seq_puts(m, "HDCP1.4 ");
+ if (hdcp2_cap)
+ seq_puts(m, "HDCP2.2 ");
+
+ if (!hdcp_cap && !hdcp2_cap)
+ seq_puts(m, "None");
+
+ seq_puts(m, "\n");
+}
+
static void intel_dp_info(struct seq_file *m,
struct intel_connector *intel_connector)
{
@@ -2616,6 +2601,10 @@ static void intel_dp_info(struct seq_file *m,
drm_dp_downstream_debug(m, intel_dp->dpcd, intel_dp->downstream_ports,
&intel_dp->aux);
+ if (intel_connector->hdcp.shim) {
+ seq_puts(m, "\tHDCP version: ");
+ intel_hdcp_info(m, intel_connector);
+ }
}
static void intel_dp_mst_info(struct seq_file *m,
@@ -2639,6 +2628,10 @@ static void intel_hdmi_info(struct seq_file *m,
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&intel_encoder->base);
seq_printf(m, "\taudio support: %s\n", yesno(intel_hdmi->has_audio));
+ if (intel_connector->hdcp.shim) {
+ seq_puts(m, "\tHDCP version: ");
+ intel_hdcp_info(m, intel_connector);
+ }
}
static void intel_lvds_info(struct seq_file *m,
@@ -2966,14 +2959,28 @@ static int i915_shared_dplls_info(struct seq_file *m, void *unused)
static int i915_wa_registers(struct seq_file *m, void *unused)
{
struct drm_i915_private *i915 = node_to_i915(m->private);
- const struct i915_wa_list *wal = &i915->engine[RCS0]->ctx_wa_list;
- struct i915_wa *wa;
- unsigned int i;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
- seq_printf(m, "Workarounds applied: %u\n", wal->count);
- for (i = 0, wa = wal->list; i < wal->count; i++, wa++)
- seq_printf(m, "0x%X: 0x%08X, mask: 0x%08X\n",
- i915_mmio_reg_offset(wa->reg), wa->val, wa->mask);
+ for_each_engine(engine, i915, id) {
+ const struct i915_wa_list *wal = &engine->ctx_wa_list;
+ const struct i915_wa *wa;
+ unsigned int count;
+
+ count = wal->count;
+ if (!count)
+ continue;
+
+ seq_printf(m, "%s: Workarounds applied: %u\n",
+ engine->name, count);
+
+ for (wa = wal->list; count--; wa++)
+ seq_printf(m, "0x%X: 0x%08X, mask: 0x%08X\n",
+ i915_mmio_reg_offset(wa->reg),
+ wa->val, wa->mask);
+
+ seq_printf(m, "\n");
+ }
return 0;
}
@@ -3620,7 +3627,8 @@ static const struct file_operations i915_cur_wm_latency_fops = {
static int
i915_wedged_get(void *data, u64 *val)
{
- int ret = i915_terminally_wedged(data);
+ struct drm_i915_private *i915 = data;
+ int ret = intel_gt_terminally_wedged(&i915->gt);
switch (ret) {
case -EIO:
@@ -3640,11 +3648,11 @@ i915_wedged_set(void *data, u64 val)
struct drm_i915_private *i915 = data;
/* Flush any previous reset before applying for a new one */
- wait_event(i915->gpu_error.reset_queue,
- !test_bit(I915_RESET_BACKOFF, &i915->gpu_error.flags));
+ wait_event(i915->gt.reset.queue,
+ !test_bit(I915_RESET_BACKOFF, &i915->gt.reset.flags));
- i915_handle_error(i915, val, I915_ERROR_CAPTURE,
- "Manually set wedged engine mask = %llx", val);
+ intel_gt_handle_error(&i915->gt, val, I915_ERROR_CAPTURE,
+ "Manually set wedged engine mask = %llx", val);
return 0;
}
@@ -3687,8 +3695,9 @@ i915_drop_caches_set(void *data, u64 val)
val, val & DROP_ALL);
if (val & DROP_RESET_ACTIVE &&
- wait_for(intel_engines_are_idle(i915), I915_IDLE_ENGINES_TIMEOUT))
- i915_gem_set_wedged(i915);
+ wait_for(intel_engines_are_idle(&i915->gt),
+ I915_IDLE_ENGINES_TIMEOUT))
+ intel_gt_set_wedged(&i915->gt);
/* No need to check and wait for gpu resets, only libdrm auto-restarts
* on ioctls on -EAGAIN. */
@@ -3723,8 +3732,8 @@ i915_drop_caches_set(void *data, u64 val)
mutex_unlock(&i915->drm.struct_mutex);
}
- if (val & DROP_RESET_ACTIVE && i915_terminally_wedged(i915))
- i915_handle_error(i915, ALL_ENGINES, 0, NULL);
+ if (val & DROP_RESET_ACTIVE && intel_gt_terminally_wedged(&i915->gt))
+ intel_gt_handle_error(&i915->gt, ALL_ENGINES, 0, NULL);
fs_reclaim_acquire(GFP_KERNEL);
if (val & DROP_BOUND)
@@ -4087,9 +4096,9 @@ static int i915_hpd_storm_ctl_show(struct seq_file *m, void *data)
/* Synchronize with everything first in case there's been an HPD
* storm, but we haven't finished handling it in the kernel yet
*/
- synchronize_irq(dev_priv->drm.irq);
+ intel_synchronize_irq(dev_priv);
flush_work(&dev_priv->hotplug.dig_port_work);
- flush_work(&dev_priv->hotplug.hotplug_work);
+ flush_delayed_work(&dev_priv->hotplug.hotplug_work);
seq_printf(m, "Threshold: %d\n", hotplug->hpd_storm_threshold);
seq_printf(m, "Detected: %s\n",
@@ -4379,7 +4388,6 @@ static const struct drm_info_list i915_debugfs_list[] = {
{"i915_huc_load_status", i915_huc_load_status_info, 0},
{"i915_frequency_info", i915_frequency_info, 0},
{"i915_hangcheck_info", i915_hangcheck_info, 0},
- {"i915_reset_info", i915_reset_info, 0},
{"i915_drpc_info", i915_drpc_info, 0},
{"i915_emon_status", i915_emon_status, 0},
{"i915_ring_freq_table", i915_ring_freq_table, 0},
@@ -4547,7 +4555,6 @@ static int i915_hdcp_sink_capability_show(struct seq_file *m, void *data)
{
struct drm_connector *connector = m->private;
struct intel_connector *intel_connector = to_intel_connector(connector);
- bool hdcp_cap, hdcp2_cap;
if (connector->status != connector_status_connected)
return -ENODEV;
@@ -4558,17 +4565,7 @@ static int i915_hdcp_sink_capability_show(struct seq_file *m, void *data)
seq_printf(m, "%s:%d HDCP version: ", connector->name,
connector->base.id);
- hdcp_cap = intel_hdcp_capable(intel_connector);
- hdcp2_cap = intel_hdcp2_capable(intel_connector);
-
- if (hdcp_cap)
- seq_puts(m, "HDCP1.4 ");
- if (hdcp2_cap)
- seq_puts(m, "HDCP2.2 ");
-
- if (!hdcp_cap && !hdcp2_cap)
- seq_puts(m, "None");
- seq_puts(m, "\n");
+ intel_hdcp_info(m, intel_connector);
return 0;
}
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index f62e3397d936..f2d3d754af37 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -61,9 +61,11 @@
#include "gem/i915_gem_context.h"
#include "gem/i915_gem_ioctls.h"
+#include "gt/intel_gt.h"
#include "gt/intel_gt_pm.h"
#include "gt/intel_reset.h"
#include "gt/intel_workarounds.h"
+#include "gt/uc/intel_uc.h"
#include "i915_debugfs.h"
#include "i915_drv.h"
@@ -75,19 +77,18 @@
#include "intel_csr.h"
#include "intel_drv.h"
#include "intel_pm.h"
-#include "intel_uc.h"
static struct drm_driver driver;
#if IS_ENABLED(CONFIG_DRM_I915_DEBUG)
-static unsigned int i915_load_fail_count;
+static unsigned int i915_probe_fail_count;
-bool __i915_inject_load_failure(const char *func, int line)
+bool __i915_inject_probe_failure(const char *func, int line)
{
- if (i915_load_fail_count >= i915_modparams.inject_load_failure)
+ if (i915_probe_fail_count >= i915_modparams.inject_load_failure)
return false;
- if (++i915_load_fail_count == i915_modparams.inject_load_failure) {
+ if (++i915_probe_fail_count == i915_modparams.inject_load_failure) {
DRM_INFO("Injecting failure at checkpoint %u [%s:%d]\n",
i915_modparams.inject_load_failure, func, line);
i915_modparams.inject_load_failure = 0;
@@ -99,7 +100,7 @@ bool __i915_inject_load_failure(const char *func, int line)
bool i915_error_injected(void)
{
- return i915_load_fail_count && !i915_modparams.inject_load_failure;
+ return i915_probe_fail_count && !i915_modparams.inject_load_failure;
}
#endif
@@ -219,9 +220,14 @@ intel_pch_type(const struct drm_i915_private *dev_priv, unsigned short id)
WARN_ON(!IS_ICELAKE(dev_priv));
return PCH_ICP;
case INTEL_PCH_MCC_DEVICE_ID_TYPE:
+ case INTEL_PCH_MCC2_DEVICE_ID_TYPE:
DRM_DEBUG_KMS("Found Mule Creek Canyon PCH\n");
WARN_ON(!IS_ELKHARTLAKE(dev_priv));
return PCH_MCC;
+ case INTEL_PCH_TGP_DEVICE_ID_TYPE:
+ DRM_DEBUG_KMS("Found Tiger Lake LP PCH\n");
+ WARN_ON(!IS_TIGERLAKE(dev_priv));
+ return PCH_TGP;
default:
return PCH_NONE;
}
@@ -249,7 +255,9 @@ intel_virt_detect_pch(const struct drm_i915_private *dev_priv)
* make an educated guess as to which PCH is really there.
*/
- if (IS_ELKHARTLAKE(dev_priv))
+ if (IS_TIGERLAKE(dev_priv))
+ id = INTEL_PCH_TGP_DEVICE_ID_TYPE;
+ else if (IS_ELKHARTLAKE(dev_priv))
id = INTEL_PCH_MCC_DEVICE_ID_TYPE;
else if (IS_ICELAKE(dev_priv))
id = INTEL_PCH_ICP_DEVICE_ID_TYPE;
@@ -418,7 +426,7 @@ static int i915_getparam_ioctl(struct drm_device *dev, void *data,
value = sseu->min_eu_in_pool;
break;
case I915_PARAM_HUC_STATUS:
- value = intel_huc_check_status(&dev_priv->huc);
+ value = intel_huc_check_status(&dev_priv->gt.uc.huc);
if (value < 0)
return value;
break;
@@ -673,13 +681,13 @@ static const struct vga_switcheroo_client_ops i915_switcheroo_ops = {
.can_switch = i915_switcheroo_can_switch,
};
-static int i915_load_modeset_init(struct drm_device *dev)
+static int i915_driver_modeset_probe(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = to_i915(dev);
struct pci_dev *pdev = dev_priv->drm.pdev;
int ret;
- if (i915_inject_load_failure())
+ if (i915_inject_probe_failure())
return -ENODEV;
if (HAS_DISPLAY(dev_priv)) {
@@ -749,16 +757,16 @@ static int i915_load_modeset_init(struct drm_device *dev)
cleanup_gem:
i915_gem_suspend(dev_priv);
- i915_gem_fini_hw(dev_priv);
- i915_gem_fini(dev_priv);
+ i915_gem_driver_remove(dev_priv);
+ i915_gem_driver_release(dev_priv);
cleanup_modeset:
- intel_modeset_cleanup(dev);
+ intel_modeset_driver_remove(dev);
cleanup_irq:
- drm_irq_uninstall(dev);
+ intel_irq_uninstall(dev_priv);
intel_gmbus_teardown(dev_priv);
cleanup_csr:
intel_csr_ucode_fini(dev_priv);
- intel_power_domains_fini_hw(dev_priv);
+ intel_power_domains_driver_remove(dev_priv);
vga_switcheroo_unregister_client(pdev);
cleanup_vga_client:
vga_client_register(pdev, NULL, NULL, NULL);
@@ -840,15 +848,6 @@ out_err:
return -ENOMEM;
}
-static void i915_engines_cleanup(struct drm_i915_private *i915)
-{
- struct intel_engine_cs *engine;
- enum intel_engine_id id;
-
- for_each_engine(engine, i915, id)
- kfree(engine);
-}
-
static void i915_workqueues_cleanup(struct drm_i915_private *dev_priv)
{
destroy_workqueue(dev_priv->hotplug.dp_wq);
@@ -882,7 +881,7 @@ static void intel_detect_preproduction_hw(struct drm_i915_private *dev_priv)
}
/**
- * i915_driver_init_early - setup state not requiring device access
+ * i915_driver_early_probe - setup state not requiring device access
* @dev_priv: device private
*
* Initialize everything that is a "SW-only" state, that is state not
@@ -891,16 +890,16 @@ static void intel_detect_preproduction_hw(struct drm_i915_private *dev_priv)
* system memory allocation, setting up device specific attributes and
* function hooks not requiring accessing the device.
*/
-static int i915_driver_init_early(struct drm_i915_private *dev_priv)
+static int i915_driver_early_probe(struct drm_i915_private *dev_priv)
{
int ret = 0;
- if (i915_inject_load_failure())
+ if (i915_inject_probe_failure())
return -ENODEV;
intel_device_info_subplatform_init(dev_priv);
- intel_uncore_init_early(&dev_priv->uncore);
+ intel_uncore_init_early(&dev_priv->uncore, dev_priv);
spin_lock_init(&dev_priv->irq_lock);
spin_lock_init(&dev_priv->gpu_error.lock);
@@ -920,7 +919,9 @@ static int i915_driver_init_early(struct drm_i915_private *dev_priv)
ret = i915_workqueues_init(dev_priv);
if (ret < 0)
- goto err_engines;
+ return ret;
+
+ intel_gt_init_early(&dev_priv->gt, dev_priv);
ret = i915_gem_init_early(dev_priv);
if (ret < 0)
@@ -930,14 +931,13 @@ static int i915_driver_init_early(struct drm_i915_private *dev_priv)
intel_detect_pch(dev_priv);
intel_wopcm_init_early(&dev_priv->wopcm);
- intel_uc_init_early(dev_priv);
+ intel_uc_init_early(&dev_priv->gt.uc);
intel_pm_setup(dev_priv);
intel_init_dpio(dev_priv);
ret = intel_power_domains_init(dev_priv);
if (ret < 0)
goto err_uc;
intel_irq_init(dev_priv);
- intel_hangcheck_init(dev_priv);
intel_init_display_hooks(dev_priv);
intel_init_clock_gating_hooks(dev_priv);
intel_init_audio_hooks(dev_priv);
@@ -948,34 +948,32 @@ static int i915_driver_init_early(struct drm_i915_private *dev_priv)
return 0;
err_uc:
- intel_uc_cleanup_early(dev_priv);
+ intel_uc_cleanup_early(&dev_priv->gt.uc);
i915_gem_cleanup_early(dev_priv);
err_workqueues:
i915_workqueues_cleanup(dev_priv);
-err_engines:
- i915_engines_cleanup(dev_priv);
return ret;
}
/**
- * i915_driver_cleanup_early - cleanup the setup done in i915_driver_init_early()
+ * i915_driver_late_release - cleanup the setup done in
+ * i915_driver_early_probe()
* @dev_priv: device private
*/
-static void i915_driver_cleanup_early(struct drm_i915_private *dev_priv)
+static void i915_driver_late_release(struct drm_i915_private *dev_priv)
{
intel_irq_fini(dev_priv);
intel_power_domains_cleanup(dev_priv);
- intel_uc_cleanup_early(dev_priv);
+ intel_uc_cleanup_early(&dev_priv->gt.uc);
i915_gem_cleanup_early(dev_priv);
i915_workqueues_cleanup(dev_priv);
- i915_engines_cleanup(dev_priv);
pm_qos_remove_request(&dev_priv->sb_qos);
mutex_destroy(&dev_priv->sb_lock);
}
/**
- * i915_driver_init_mmio - setup device MMIO
+ * i915_driver_mmio_probe - setup device MMIO
* @dev_priv: device private
*
* Setup minimal device state necessary for MMIO accesses later in the
@@ -983,11 +981,11 @@ static void i915_driver_cleanup_early(struct drm_i915_private *dev_priv)
* side effects or exposing the driver via kernel internal or user space
* interfaces.
*/
-static int i915_driver_init_mmio(struct drm_i915_private *dev_priv)
+static int i915_driver_mmio_probe(struct drm_i915_private *dev_priv)
{
int ret;
- if (i915_inject_load_failure())
+ if (i915_inject_probe_failure())
return -ENODEV;
if (i915_get_bridge_dev(dev_priv))
@@ -1004,7 +1002,7 @@ static int i915_driver_init_mmio(struct drm_i915_private *dev_priv)
intel_uncore_prune_mmio_domains(&dev_priv->uncore);
- intel_uc_init_mmio(dev_priv);
+ intel_uc_init_mmio(&dev_priv->gt.uc);
ret = intel_engines_init_mmio(dev_priv);
if (ret)
@@ -1024,11 +1022,12 @@ err_bridge:
}
/**
- * i915_driver_cleanup_mmio - cleanup the setup done in i915_driver_init_mmio()
+ * i915_driver_mmio_release - cleanup the setup done in i915_driver_mmio_probe()
* @dev_priv: device private
*/
-static void i915_driver_cleanup_mmio(struct drm_i915_private *dev_priv)
+static void i915_driver_mmio_release(struct drm_i915_private *dev_priv)
{
+ intel_engines_cleanup(dev_priv);
intel_teardown_mchbar(dev_priv);
intel_uncore_fini_mmio(&dev_priv->uncore);
pci_dev_put(dev_priv->bridge_dev);
@@ -1520,18 +1519,18 @@ static void edram_detect(struct drm_i915_private *dev_priv)
}
/**
- * i915_driver_init_hw - setup state requiring device access
+ * i915_driver_hw_probe - setup state requiring device access
* @dev_priv: device private
*
* Setup state that requires accessing the device, but doesn't require
* exposing the driver via kernel internal or userspace interfaces.
*/
-static int i915_driver_init_hw(struct drm_i915_private *dev_priv)
+static int i915_driver_hw_probe(struct drm_i915_private *dev_priv)
{
struct pci_dev *pdev = dev_priv->drm.pdev;
int ret;
- if (i915_inject_load_failure())
+ if (i915_inject_probe_failure())
return -ENODEV;
intel_device_info_runtime_init(dev_priv);
@@ -1590,6 +1589,8 @@ static int i915_driver_init_hw(struct drm_i915_private *dev_priv)
if (ret)
goto err_ggtt;
+ intel_gt_init_hw(dev_priv);
+
ret = i915_ggtt_enable_hw(dev_priv);
if (ret) {
DRM_ERROR("failed to enable GGTT\n");
@@ -1629,7 +1630,8 @@ static int i915_driver_init_hw(struct drm_i915_private *dev_priv)
pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY,
PM_QOS_DEFAULT_VALUE);
- intel_uncore_sanitize(dev_priv);
+ /* BIOS often leaves RC6 enabled, but disable it for hw init */
+ intel_sanitize_gt_powersave(dev_priv);
intel_gt_init_workarounds(dev_priv);
@@ -1677,17 +1679,17 @@ err_msi:
pci_disable_msi(pdev);
pm_qos_remove_request(&dev_priv->pm_qos);
err_ggtt:
- i915_ggtt_cleanup_hw(dev_priv);
+ i915_ggtt_driver_release(dev_priv);
err_perf:
i915_perf_fini(dev_priv);
return ret;
}
/**
- * i915_driver_cleanup_hw - cleanup the setup done in i915_driver_init_hw()
+ * i915_driver_hw_remove - cleanup the setup done in i915_driver_hw_probe()
* @dev_priv: device private
*/
-static void i915_driver_cleanup_hw(struct drm_i915_private *dev_priv)
+static void i915_driver_hw_remove(struct drm_i915_private *dev_priv)
{
struct pci_dev *pdev = dev_priv->drm.pdev;
@@ -1863,17 +1865,17 @@ static void i915_driver_destroy(struct drm_i915_private *i915)
}
/**
- * i915_driver_load - setup chip and create an initial config
+ * i915_driver_probe - setup chip and create an initial config
* @pdev: PCI device
* @ent: matching PCI ID entry
*
- * The driver load routine has to do several things:
+ * The driver probe routine has to do several things:
* - drive output discovery via intel_modeset_init()
* - initialize the memory manager
* - allocate initial config memory
* - setup the DRM framebuffer with the allocated memory
*/
-int i915_driver_load(struct pci_dev *pdev, const struct pci_device_id *ent)
+int i915_driver_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
const struct intel_device_info *match_info =
(struct intel_device_info *)ent->driver_data;
@@ -1892,21 +1894,23 @@ int i915_driver_load(struct pci_dev *pdev, const struct pci_device_id *ent)
if (ret)
goto out_fini;
- ret = i915_driver_init_early(dev_priv);
+ ret = i915_driver_early_probe(dev_priv);
if (ret < 0)
goto out_pci_disable;
disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
- ret = i915_driver_init_mmio(dev_priv);
+ i915_detect_vgpu(dev_priv);
+
+ ret = i915_driver_mmio_probe(dev_priv);
if (ret < 0)
goto out_runtime_pm_put;
- ret = i915_driver_init_hw(dev_priv);
+ ret = i915_driver_hw_probe(dev_priv);
if (ret < 0)
goto out_cleanup_mmio;
- ret = i915_load_modeset_init(&dev_priv->drm);
+ ret = i915_driver_modeset_probe(&dev_priv->drm);
if (ret < 0)
goto out_cleanup_hw;
@@ -1919,22 +1923,25 @@ int i915_driver_load(struct pci_dev *pdev, const struct pci_device_id *ent)
return 0;
out_cleanup_hw:
- i915_driver_cleanup_hw(dev_priv);
- i915_ggtt_cleanup_hw(dev_priv);
+ i915_driver_hw_remove(dev_priv);
+ i915_ggtt_driver_release(dev_priv);
+
+ /* Paranoia: make sure we have disabled everything before we exit. */
+ intel_sanitize_gt_powersave(dev_priv);
out_cleanup_mmio:
- i915_driver_cleanup_mmio(dev_priv);
+ i915_driver_mmio_release(dev_priv);
out_runtime_pm_put:
enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
- i915_driver_cleanup_early(dev_priv);
+ i915_driver_late_release(dev_priv);
out_pci_disable:
pci_disable_device(pdev);
out_fini:
- i915_load_error(dev_priv, "Device initialization failed (%d)\n", ret);
+ i915_probe_error(dev_priv, "Device initialization failed (%d)\n", ret);
i915_driver_destroy(dev_priv);
return ret;
}
-void i915_driver_unload(struct drm_device *dev)
+void i915_driver_remove(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = to_i915(dev);
struct pci_dev *pdev = dev_priv->drm.pdev;
@@ -1948,7 +1955,7 @@ void i915_driver_unload(struct drm_device *dev)
* all in-flight requests so that we can quickly unbind the active
* resources.
*/
- i915_gem_set_wedged(dev_priv);
+ intel_gt_set_wedged(&dev_priv->gt);
/* Flush any external code that still may be under the RCU lock */
synchronize_rcu();
@@ -1957,11 +1964,11 @@ void i915_driver_unload(struct drm_device *dev)
drm_atomic_helper_shutdown(dev);
- intel_gvt_cleanup(dev_priv);
+ intel_gvt_driver_remove(dev_priv);
- intel_modeset_cleanup(dev);
+ intel_modeset_driver_remove(dev);
- intel_bios_cleanup(dev_priv);
+ intel_bios_driver_remove(dev_priv);
vga_switcheroo_unregister_client(pdev);
vga_client_register(pdev, NULL, NULL, NULL);
@@ -1969,14 +1976,14 @@ void i915_driver_unload(struct drm_device *dev)
intel_csr_ucode_fini(dev_priv);
/* Free error state after interrupts are fully disabled. */
- cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work);
+ cancel_delayed_work_sync(&dev_priv->gt.hangcheck.work);
i915_reset_error_state(dev_priv);
- i915_gem_fini_hw(dev_priv);
+ i915_gem_driver_remove(dev_priv);
- intel_power_domains_fini_hw(dev_priv);
+ intel_power_domains_driver_remove(dev_priv);
- i915_driver_cleanup_hw(dev_priv);
+ i915_driver_hw_remove(dev_priv);
enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
}
@@ -1988,15 +1995,19 @@ static void i915_driver_release(struct drm_device *dev)
disable_rpm_wakeref_asserts(rpm);
- i915_gem_fini(dev_priv);
+ i915_gem_driver_release(dev_priv);
+
+ i915_ggtt_driver_release(dev_priv);
- i915_ggtt_cleanup_hw(dev_priv);
- i915_driver_cleanup_mmio(dev_priv);
+ /* Paranoia: make sure we have disabled everything before we exit. */
+ intel_sanitize_gt_powersave(dev_priv);
+
+ i915_driver_mmio_release(dev_priv);
enable_rpm_wakeref_asserts(rpm);
- intel_runtime_pm_cleanup(rpm);
+ intel_runtime_pm_driver_release(rpm);
- i915_driver_cleanup_early(dev_priv);
+ i915_driver_late_release(dev_priv);
i915_driver_destroy(dev_priv);
}
@@ -2189,7 +2200,7 @@ static int i915_drm_suspend_late(struct drm_device *dev, bool hibernation)
out:
enable_rpm_wakeref_asserts(rpm);
if (!dev_priv->uncore.user_forcewake.count)
- intel_runtime_pm_cleanup(rpm);
+ intel_runtime_pm_driver_release(rpm);
return ret;
}
@@ -2348,7 +2359,7 @@ static int i915_drm_resume_early(struct drm_device *dev)
intel_uncore_resume_early(&dev_priv->uncore);
- i915_check_and_clear_faults(dev_priv);
+ intel_gt_check_and_clear_faults(&dev_priv->gt);
if (INTEL_GEN(dev_priv) >= 11 || IS_GEN9_LP(dev_priv)) {
gen9_sanitize_dc_state(dev_priv);
@@ -2357,11 +2368,11 @@ static int i915_drm_resume_early(struct drm_device *dev)
hsw_disable_pc8(dev_priv);
}
- intel_uncore_sanitize(dev_priv);
+ intel_sanitize_gt_powersave(dev_priv);
intel_power_domains_resume(dev_priv);
- intel_gt_sanitize(dev_priv, true);
+ intel_gt_sanitize(&dev_priv->gt, true);
enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
@@ -2384,8 +2395,7 @@ static int i915_resume_switcheroo(struct drm_device *dev)
static int i915_pm_prepare(struct device *kdev)
{
- struct pci_dev *pdev = to_pci_dev(kdev);
- struct drm_device *dev = pci_get_drvdata(pdev);
+ struct drm_device *dev = dev_get_drvdata(kdev);
if (!dev) {
dev_err(kdev, "DRM not initialized, aborting suspend.\n");
@@ -2400,8 +2410,7 @@ static int i915_pm_prepare(struct device *kdev)
static int i915_pm_suspend(struct device *kdev)
{
- struct pci_dev *pdev = to_pci_dev(kdev);
- struct drm_device *dev = pci_get_drvdata(pdev);
+ struct drm_device *dev = dev_get_drvdata(kdev);
if (!dev) {
dev_err(kdev, "DRM not initialized, aborting suspend.\n");
@@ -2895,8 +2904,7 @@ static int vlv_resume_prepare(struct drm_i915_private *dev_priv,
static int intel_runtime_suspend(struct device *kdev)
{
- struct pci_dev *pdev = to_pci_dev(kdev);
- struct drm_device *dev = pci_get_drvdata(pdev);
+ struct drm_device *dev = dev_get_drvdata(kdev);
struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_runtime_pm *rpm = &dev_priv->runtime_pm;
int ret;
@@ -2917,7 +2925,7 @@ static int intel_runtime_suspend(struct device *kdev)
*/
i915_gem_runtime_suspend(dev_priv);
- intel_uc_runtime_suspend(dev_priv);
+ intel_uc_runtime_suspend(&dev_priv->gt.uc);
intel_runtime_pm_disable_interrupts(dev_priv);
@@ -2942,9 +2950,9 @@ static int intel_runtime_suspend(struct device *kdev)
intel_runtime_pm_enable_interrupts(dev_priv);
- intel_uc_resume(dev_priv);
+ intel_uc_resume(&dev_priv->gt.uc);
- i915_gem_init_swizzling(dev_priv);
+ intel_gt_init_swizzling(&dev_priv->gt);
i915_gem_restore_fences(dev_priv);
enable_rpm_wakeref_asserts(rpm);
@@ -2953,7 +2961,7 @@ static int intel_runtime_suspend(struct device *kdev)
}
enable_rpm_wakeref_asserts(rpm);
- intel_runtime_pm_cleanup(rpm);
+ intel_runtime_pm_driver_release(rpm);
if (intel_uncore_arm_unclaimed_mmio_detection(&dev_priv->uncore))
DRM_ERROR("Unclaimed access detected prior to suspending\n");
@@ -2994,8 +3002,7 @@ static int intel_runtime_suspend(struct device *kdev)
static int intel_runtime_resume(struct device *kdev)
{
- struct pci_dev *pdev = to_pci_dev(kdev);
- struct drm_device *dev = pci_get_drvdata(pdev);
+ struct drm_device *dev = dev_get_drvdata(kdev);
struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_runtime_pm *rpm = &dev_priv->runtime_pm;
int ret = 0;
@@ -3040,13 +3047,13 @@ static int intel_runtime_resume(struct device *kdev)
intel_runtime_pm_enable_interrupts(dev_priv);
- intel_uc_resume(dev_priv);
+ intel_uc_resume(&dev_priv->gt.uc);
/*
* No point of rolling back things in case of an error, as the best
* we can do is to hope that things will still work (and disable RPM).
*/
- i915_gem_init_swizzling(dev_priv);
+ intel_gt_init_swizzling(&dev_priv->gt);
i915_gem_restore_fences(dev_priv);
/*
@@ -3188,9 +3195,9 @@ static const struct drm_ioctl_desc i915_ioctls[] = {
DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_GETPARAM, i915_gem_context_getparam_ioctl, DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_SETPARAM, i915_gem_context_setparam_ioctl, DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_PERF_OPEN, i915_perf_open_ioctl, DRM_RENDER_ALLOW),
- DRM_IOCTL_DEF_DRV(I915_PERF_ADD_CONFIG, i915_perf_add_config_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
- DRM_IOCTL_DEF_DRV(I915_PERF_REMOVE_CONFIG, i915_perf_remove_config_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
- DRM_IOCTL_DEF_DRV(I915_QUERY, i915_query_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(I915_PERF_ADD_CONFIG, i915_perf_add_config_ioctl, DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(I915_PERF_REMOVE_CONFIG, i915_perf_remove_config_ioctl, DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(I915_QUERY, i915_query_ioctl, DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_GEM_VM_CREATE, i915_gem_vm_create_ioctl, DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_GEM_VM_DESTROY, i915_gem_vm_destroy_ioctl, DRM_RENDER_ALLOW),
};
@@ -3200,7 +3207,7 @@ static struct drm_driver driver = {
* deal with them for Intel hardware.
*/
.driver_features =
- DRIVER_GEM | DRIVER_PRIME |
+ DRIVER_GEM |
DRIVER_RENDER | DRIVER_MODESET | DRIVER_ATOMIC | DRIVER_SYNCOBJ,
.release = i915_driver_release,
.open = i915_driver_open,
@@ -3216,6 +3223,9 @@ static struct drm_driver driver = {
.gem_prime_export = i915_gem_prime_export,
.gem_prime_import = i915_gem_prime_import,
+ .get_vblank_timestamp = drm_calc_vbltimestamp_from_scanoutpos,
+ .get_scanout_position = i915_get_crtc_scanoutpos,
+
.dumb_create = i915_gem_dumb_create,
.dumb_map_offset = i915_gem_mmap_gtt,
.ioctls = i915_ioctls,
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index fe7a6ec2c199..2e13ecc9cbb6 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -72,11 +72,12 @@
#include "gt/intel_lrc.h"
#include "gt/intel_engine.h"
+#include "gt/intel_gt_types.h"
#include "gt/intel_workarounds.h"
+#include "gt/uc/intel_uc.h"
#include "intel_device_info.h"
#include "intel_runtime_pm.h"
-#include "intel_uc.h"
#include "intel_uncore.h"
#include "intel_wakeref.h"
#include "intel_wopcm.h"
@@ -88,7 +89,7 @@
#include "i915_gpu_error.h"
#include "i915_request.h"
#include "i915_scheduler.h"
-#include "i915_timeline.h"
+#include "gt/intel_timeline.h"
#include "i915_vma.h"
#include "intel_gvt.h"
@@ -98,8 +99,8 @@
#define DRIVER_NAME "i915"
#define DRIVER_DESC "Intel Graphics"
-#define DRIVER_DATE "20190619"
-#define DRIVER_TIMESTAMP 1560947544
+#define DRIVER_DATE "20190730"
+#define DRIVER_TIMESTAMP 1564512624
/* Use I915_STATE_WARN(x) and I915_STATE_WARN_ON() (rather than WARN() and
* WARN_ON()) for hw state sanity checks to check for unexpected conditions
@@ -121,20 +122,20 @@
#if IS_ENABLED(CONFIG_DRM_I915_DEBUG)
-bool __i915_inject_load_failure(const char *func, int line);
-#define i915_inject_load_failure() \
- __i915_inject_load_failure(__func__, __LINE__)
+bool __i915_inject_probe_failure(const char *func, int line);
+#define i915_inject_probe_failure() \
+ __i915_inject_probe_failure(__func__, __LINE__)
bool i915_error_injected(void);
#else
-#define i915_inject_load_failure() false
+#define i915_inject_probe_failure() false
#define i915_error_injected() false
#endif
-#define i915_load_error(i915, fmt, ...) \
+#define i915_probe_error(i915, fmt, ...) \
__i915_printk(i915, i915_error_injected() ? KERN_DEBUG : KERN_ERR, \
fmt, ##__VA_ARGS__)
@@ -162,7 +163,7 @@ enum hpd_pin {
#define HPD_STORM_DEFAULT_THRESHOLD 50
struct i915_hotplug {
- struct work_struct hotplug_work;
+ struct delayed_work hotplug_work;
struct {
unsigned long last_jiffies;
@@ -174,6 +175,7 @@ struct i915_hotplug {
} state;
} stats[HPD_NUM_PINS];
u32 event_bits;
+ u32 retry_bits;
struct delayed_work reenable_work;
u32 long_port_mask;
@@ -286,14 +288,14 @@ struct drm_i915_display_funcs {
enum pipe pipe);
int (*get_fifo_size)(struct drm_i915_private *dev_priv,
enum i9xx_plane_id i9xx_plane);
- int (*compute_pipe_wm)(struct intel_crtc_state *cstate);
- int (*compute_intermediate_wm)(struct intel_crtc_state *newstate);
+ int (*compute_pipe_wm)(struct intel_crtc_state *crtc_state);
+ int (*compute_intermediate_wm)(struct intel_crtc_state *crtc_state);
void (*initial_watermarks)(struct intel_atomic_state *state,
- struct intel_crtc_state *cstate);
+ struct intel_crtc_state *crtc_state);
void (*atomic_update_watermarks)(struct intel_atomic_state *state,
- struct intel_crtc_state *cstate);
+ struct intel_crtc_state *crtc_state);
void (*optimize_watermarks)(struct intel_atomic_state *state,
- struct intel_crtc_state *cstate);
+ struct intel_crtc_state *crtc_state);
int (*compute_global_watermarks)(struct intel_atomic_state *state);
void (*update_wm)(struct intel_crtc *crtc);
int (*modeset_calc_cdclk)(struct intel_atomic_state *state);
@@ -306,10 +308,10 @@ struct drm_i915_display_funcs {
int (*crtc_compute_clock)(struct intel_crtc *crtc,
struct intel_crtc_state *crtc_state);
void (*crtc_enable)(struct intel_crtc_state *pipe_config,
- struct drm_atomic_state *old_state);
+ struct intel_atomic_state *old_state);
void (*crtc_disable)(struct intel_crtc_state *old_crtc_state,
- struct drm_atomic_state *old_state);
- void (*update_crtcs)(struct drm_atomic_state *state);
+ struct intel_atomic_state *old_state);
+ void (*update_crtcs)(struct intel_atomic_state *state);
void (*audio_codec_enable)(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state);
@@ -535,6 +537,7 @@ enum intel_pch {
PCH_CNP, /* Cannon/Comet Lake PCH */
PCH_ICP, /* Ice Lake PCH */
PCH_MCC, /* Mule Creek Canyon PCH */
+ PCH_TGP, /* Tiger Lake PCH */
};
#define QUIRK_LVDS_SSC_DISABLE (1<<1)
@@ -781,9 +784,6 @@ struct i915_gem_mm {
*/
struct vfsmount *gemfs;
- /** PPGTT used for aliasing the PPGTT with the GTT */
- struct i915_ppgtt *aliasing_ppgtt;
-
struct notifier_block oom_notifier;
struct notifier_block vmap_notifier;
struct shrinker shrinker;
@@ -1347,9 +1347,6 @@ struct drm_i915_private {
struct intel_wopcm wopcm;
- struct intel_huc huc;
- struct intel_guc guc;
-
struct intel_csr csr;
struct intel_gmbus gmbus[GMBUS_NUM_PINS];
@@ -1377,8 +1374,6 @@ struct drm_i915_private {
struct intel_engine_cs *engine[I915_NUM_ENGINES];
/* Context used internally to idle the GPU and setup initial state */
struct i915_gem_context *kernel_context;
- /* Context only to be used for injecting preemption commands */
- struct i915_gem_context *preempt_context;
struct intel_engine_cs *engine_class[MAX_ENGINE_CLASS + 1]
[MAX_ENGINE_INSTANCE + 1];
@@ -1402,10 +1397,7 @@ struct drm_i915_private {
u32 de_irq_mask[I915_MAX_PIPES];
};
u32 gt_irq_mask;
- u32 pm_imr;
- u32 pm_ier;
u32 pm_rps_events;
- u32 pm_guc_events;
u32 pipestat_irq_mask[I915_MAX_PIPES];
struct i915_hotplug hotplug;
@@ -1488,8 +1480,6 @@ struct drm_i915_private {
DECLARE_HASHTABLE(mm_structs, 7);
struct mutex mm_lock;
- struct intel_ppat ppat;
-
/* Kernel Modesetting */
struct intel_crtc *plane_to_crtc_mapping[I915_MAX_PIPES];
@@ -1645,7 +1635,7 @@ struct drm_i915_private {
/*
* Should be held around atomic WM register writing; also
* protects * intel_crtc->wm.active and
- * cstate->wm.need_postvbl_update.
+ * crtc_state->wm.need_postvbl_update.
*/
struct mutex wm_mutex;
@@ -1825,38 +1815,7 @@ struct drm_i915_private {
} perf;
/* Abstract the submission mechanism (legacy ringbuffer or execlists) away */
- struct {
- struct i915_gt_timelines {
- struct mutex mutex; /* protects list, tainted by GPU */
- struct list_head active_list;
-
- /* Pack multiple timelines' seqnos into the same page */
- spinlock_t hwsp_lock;
- struct list_head hwsp_free_list;
- } timelines;
-
- struct list_head active_rings;
-
- struct intel_wakeref wakeref;
-
- struct list_head closed_vma;
- spinlock_t closed_lock; /* guards the list of closed_vma */
-
- /**
- * Is the GPU currently considered idle, or busy executing
- * userspace requests? Whilst idle, we allow runtime power
- * management to power down the hardware and display clocks.
- * In order to reduce the effect on performance, there
- * is a slight delay before we do so.
- */
- intel_wakeref_t awake;
-
- struct blocking_notifier_head pm_notifications;
-
- ktime_t last_init_time;
-
- struct i915_vma *scratch;
- } gt;
+ struct intel_gt gt;
struct {
struct notifier_block pm_notifier;
@@ -1941,21 +1900,6 @@ static inline struct drm_i915_private *wopcm_to_i915(struct intel_wopcm *wopcm)
return container_of(wopcm, struct drm_i915_private, wopcm);
}
-static inline struct drm_i915_private *guc_to_i915(struct intel_guc *guc)
-{
- return container_of(guc, struct drm_i915_private, guc);
-}
-
-static inline struct drm_i915_private *huc_to_i915(struct intel_huc *huc)
-{
- return container_of(huc, struct drm_i915_private, huc);
-}
-
-static inline struct drm_i915_private *uncore_to_i915(struct intel_uncore *uncore)
-{
- return container_of(uncore, struct drm_i915_private, uncore);
-}
-
/* Simple iterator over all initialised engines */
#define for_each_engine(engine__, dev_priv__, id__) \
for ((id__) = 0; \
@@ -2127,6 +2071,7 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915,
#define IS_CANNONLAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_CANNONLAKE)
#define IS_ICELAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_ICELAKE)
#define IS_ELKHARTLAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_ELKHARTLAKE)
+#define IS_TIGERLAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_TIGERLAKE)
#define IS_HSW_EARLY_SDV(dev_priv) (IS_HASWELL(dev_priv) && \
(INTEL_DEVID(dev_priv) & 0xFF00) == 0x0C00)
#define IS_BDW_ULT(dev_priv) \
@@ -2323,23 +2268,12 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915,
#define HAS_IPC(dev_priv) (INTEL_INFO(dev_priv)->display.has_ipc)
-/*
- * For now, anything with a GuC requires uCode loading, and then supports
- * command submission once loaded. But these are logically independent
- * properties, so we have separate macros to test them.
- */
-#define HAS_GUC(dev_priv) (INTEL_INFO(dev_priv)->has_guc)
-#define HAS_GUC_UCODE(dev_priv) (HAS_GUC(dev_priv))
-#define HAS_GUC_SCHED(dev_priv) (HAS_GUC(dev_priv))
+#define HAS_GT_UC(dev_priv) (INTEL_INFO(dev_priv)->has_gt_uc)
-/* For now, anything with a GuC has also HuC */
-#define HAS_HUC(dev_priv) (HAS_GUC(dev_priv))
-#define HAS_HUC_UCODE(dev_priv) (HAS_GUC(dev_priv))
-
-/* Having a GuC is not the same as using a GuC */
-#define USES_GUC(dev_priv) intel_uc_is_using_guc(dev_priv)
-#define USES_GUC_SUBMISSION(dev_priv) intel_uc_is_using_guc_submission(dev_priv)
-#define USES_HUC(dev_priv) intel_uc_is_using_huc(dev_priv)
+/* Having GuC/HuC is not the same as using GuC/HuC */
+#define USES_GUC(dev_priv) intel_uc_is_using_guc(&(dev_priv)->gt.uc)
+#define USES_GUC_SUBMISSION(dev_priv) intel_uc_is_using_guc_submission(&(dev_priv)->gt.uc)
+#define USES_HUC(dev_priv) intel_uc_is_using_huc(&(dev_priv)->gt.uc)
#define HAS_POOLED_EU(dev_priv) (INTEL_INFO(dev_priv)->has_pooled_eu)
@@ -2359,6 +2293,8 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915,
#define INTEL_PCH_CMP_DEVICE_ID_TYPE 0x0280
#define INTEL_PCH_ICP_DEVICE_ID_TYPE 0x3480
#define INTEL_PCH_MCC_DEVICE_ID_TYPE 0x4B00
+#define INTEL_PCH_MCC2_DEVICE_ID_TYPE 0x3880
+#define INTEL_PCH_TGP_DEVICE_ID_TYPE 0xA080
#define INTEL_PCH_P2X_DEVICE_ID_TYPE 0x7100
#define INTEL_PCH_P3X_DEVICE_ID_TYPE 0x7000
#define INTEL_PCH_QEMU_DEVICE_ID_TYPE 0x2900 /* qemu q35 has 2918 */
@@ -2366,6 +2302,7 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915,
#define INTEL_PCH_TYPE(dev_priv) ((dev_priv)->pch_type)
#define INTEL_PCH_ID(dev_priv) ((dev_priv)->pch_id)
#define HAS_PCH_MCC(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_MCC)
+#define HAS_PCH_TGP(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_TGP)
#define HAS_PCH_ICP(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_ICP)
#define HAS_PCH_CNP(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_CNP)
#define HAS_PCH_SPT(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_SPT)
@@ -2426,40 +2363,18 @@ __i915_printk(struct drm_i915_private *dev_priv, const char *level,
__i915_printk(dev_priv, KERN_ERR, fmt, ##__VA_ARGS__)
#ifdef CONFIG_COMPAT
-extern long i915_compat_ioctl(struct file *filp, unsigned int cmd,
- unsigned long arg);
+long i915_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg);
#else
#define i915_compat_ioctl NULL
#endif
extern const struct dev_pm_ops i915_pm_ops;
-extern int i915_driver_load(struct pci_dev *pdev,
- const struct pci_device_id *ent);
-extern void i915_driver_unload(struct drm_device *dev);
+int i915_driver_probe(struct pci_dev *pdev, const struct pci_device_id *ent);
+void i915_driver_remove(struct drm_device *dev);
-extern void intel_engine_init_hangcheck(struct intel_engine_cs *engine);
-extern void intel_hangcheck_init(struct drm_i915_private *dev_priv);
+void intel_engine_init_hangcheck(struct intel_engine_cs *engine);
int vlv_force_gfx_clock(struct drm_i915_private *dev_priv, bool on);
-u32 intel_calculate_mcr_s_ss_select(struct drm_i915_private *dev_priv);
-
-static inline void i915_queue_hangcheck(struct drm_i915_private *dev_priv)
-{
- unsigned long delay;
-
- if (unlikely(!i915_modparams.enable_hangcheck))
- return;
-
- /* Don't continually defer the hangcheck so that it is always run at
- * least once after work has been scheduled on any ring. Otherwise,
- * we will ignore a hung ring if a second ring is kept busy.
- */
-
- delay = round_jiffies_up_relative(DRM_I915_HANGCHECK_JIFFIES);
- queue_delayed_work(system_long_wq,
- &dev_priv->gpu_error.hangcheck_work, delay);
-}
-
static inline bool intel_gvt_active(struct drm_i915_private *dev_priv)
{
return dev_priv->gvt;
@@ -2481,18 +2396,17 @@ int i915_gem_freeze_late(struct drm_i915_private *dev_priv);
static inline void i915_gem_drain_freed_objects(struct drm_i915_private *i915)
{
- if (!atomic_read(&i915->mm.free_count))
- return;
-
- /* A single pass should suffice to release all the freed objects (along
+ /*
+ * A single pass should suffice to release all the freed objects (along
* most call paths) , but be a little more paranoid in that freeing
* the objects does take a little amount of time, during which the rcu
* callbacks could have added new objects into the freed list, and
* armed the work again.
*/
- do {
+ while (atomic_read(&i915->mm.free_count)) {
+ flush_work(&i915->mm.free_work);
rcu_barrier();
- } while (flush_work(&i915->mm.free_work));
+ }
}
static inline void i915_gem_drain_workqueue(struct drm_i915_private *i915)
@@ -2510,6 +2424,7 @@ static inline void i915_gem_drain_workqueue(struct drm_i915_private *i915)
*/
int pass = 3;
do {
+ flush_workqueue(i915->wq);
rcu_barrier();
i915_gem_drain_freed_objects(i915);
} while (--pass);
@@ -2523,7 +2438,9 @@ i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
u64 alignment,
u64 flags);
-int i915_gem_object_unbind(struct drm_i915_gem_object *obj);
+int i915_gem_object_unbind(struct drm_i915_gem_object *obj,
+ unsigned long flags);
+#define I915_GEM_OBJECT_UNBIND_ACTIVE BIT(0)
void i915_gem_runtime_suspend(struct drm_i915_private *dev_priv);
@@ -2546,36 +2463,22 @@ void i915_gem_track_fb(struct drm_i915_gem_object *old,
int __must_check i915_gem_set_global_seqno(struct drm_device *dev, u32 seqno);
-static inline bool __i915_wedged(struct i915_gpu_error *error)
-{
- return unlikely(test_bit(I915_WEDGED, &error->flags));
-}
-
-static inline bool i915_reset_failed(struct drm_i915_private *i915)
-{
- return __i915_wedged(&i915->gpu_error);
-}
-
static inline u32 i915_reset_count(struct i915_gpu_error *error)
{
- return READ_ONCE(error->reset_count);
+ return atomic_read(&error->reset_count);
}
static inline u32 i915_reset_engine_count(struct i915_gpu_error *error,
struct intel_engine_cs *engine)
{
- return READ_ONCE(error->reset_engine_count[engine->id]);
+ return atomic_read(&error->reset_engine_count[engine->uabi_class]);
}
-void i915_gem_set_wedged(struct drm_i915_private *dev_priv);
-bool i915_gem_unset_wedged(struct drm_i915_private *dev_priv);
-
void i915_gem_init_mmio(struct drm_i915_private *i915);
int __must_check i915_gem_init(struct drm_i915_private *dev_priv);
int __must_check i915_gem_init_hw(struct drm_i915_private *dev_priv);
-void i915_gem_init_swizzling(struct drm_i915_private *dev_priv);
-void i915_gem_fini_hw(struct drm_i915_private *dev_priv);
-void i915_gem_fini(struct drm_i915_private *dev_priv);
+void i915_gem_driver_remove(struct drm_i915_private *dev_priv);
+void i915_gem_driver_release(struct drm_i915_private *dev_priv);
int i915_gem_wait_for_idle(struct drm_i915_private *dev_priv,
unsigned int flags, long timeout);
void i915_gem_suspend(struct drm_i915_private *dev_priv);
@@ -2592,8 +2495,7 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev,
struct dma_buf *dma_buf);
-struct dma_buf *i915_gem_prime_export(struct drm_device *dev,
- struct drm_gem_object *gem_obj, int flags);
+struct dma_buf *i915_gem_prime_export(struct drm_gem_object *gem_obj, int flags);
static inline struct i915_gem_context *
__i915_gem_context_lookup_rcu(struct drm_i915_file_private *file_priv, u32 id)
@@ -2636,16 +2538,6 @@ int __must_check i915_gem_evict_for_node(struct i915_address_space *vm,
unsigned int flags);
int i915_gem_evict_vm(struct i915_address_space *vm);
-void i915_gem_flush_ggtt_writes(struct drm_i915_private *dev_priv);
-
-/* belongs in i915_gem_gtt.h */
-static inline void i915_gem_chipset_flush(struct drm_i915_private *dev_priv)
-{
- wmb();
- if (INTEL_GEN(dev_priv) < 6)
- intel_gtt_chipset_flush();
-}
-
/* i915_gem_stolen.c */
int i915_gem_stolen_insert_node(struct drm_i915_private *dev_priv,
struct drm_mm_node *node, u64 size,
@@ -2717,14 +2609,14 @@ int intel_engine_cmd_parser(struct intel_engine_cs *engine,
bool is_master);
/* i915_perf.c */
-extern void i915_perf_init(struct drm_i915_private *dev_priv);
-extern void i915_perf_fini(struct drm_i915_private *dev_priv);
-extern void i915_perf_register(struct drm_i915_private *dev_priv);
-extern void i915_perf_unregister(struct drm_i915_private *dev_priv);
+void i915_perf_init(struct drm_i915_private *dev_priv);
+void i915_perf_fini(struct drm_i915_private *dev_priv);
+void i915_perf_register(struct drm_i915_private *dev_priv);
+void i915_perf_unregister(struct drm_i915_private *dev_priv);
/* i915_suspend.c */
-extern int i915_save_state(struct drm_i915_private *dev_priv);
-extern int i915_restore_state(struct drm_i915_private *dev_priv);
+int i915_save_state(struct drm_i915_private *dev_priv);
+int i915_restore_state(struct drm_i915_private *dev_priv);
/* i915_sysfs.c */
void i915_setup_sysfs(struct drm_i915_private *dev_priv);
@@ -2738,23 +2630,22 @@ mkwrite_device_info(struct drm_i915_private *dev_priv)
}
/* modesetting */
-extern void intel_modeset_init_hw(struct drm_device *dev);
-extern int intel_modeset_init(struct drm_device *dev);
-extern void intel_modeset_cleanup(struct drm_device *dev);
-extern int intel_modeset_vga_set_state(struct drm_i915_private *dev_priv,
- bool state);
-extern void intel_display_resume(struct drm_device *dev);
-extern void i915_redisable_vga(struct drm_i915_private *dev_priv);
-extern void i915_redisable_vga_power_on(struct drm_i915_private *dev_priv);
-extern void intel_init_pch_refclk(struct drm_i915_private *dev_priv);
+void intel_modeset_init_hw(struct drm_device *dev);
+int intel_modeset_init(struct drm_device *dev);
+void intel_modeset_driver_remove(struct drm_device *dev);
+int intel_modeset_vga_set_state(struct drm_i915_private *dev_priv, bool state);
+void intel_display_resume(struct drm_device *dev);
+void i915_redisable_vga(struct drm_i915_private *dev_priv);
+void i915_redisable_vga_power_on(struct drm_i915_private *dev_priv);
+void intel_init_pch_refclk(struct drm_i915_private *dev_priv);
int i915_reg_read_ioctl(struct drm_device *dev, void *data,
struct drm_file *file);
-extern struct intel_display_error_state *
+struct intel_display_error_state *
intel_display_capture_error_state(struct drm_i915_private *dev_priv);
-extern void intel_display_print_error_state(struct drm_i915_error_state_buf *e,
- struct intel_display_error_state *error);
+void intel_display_print_error_state(struct drm_i915_error_state_buf *e,
+ struct intel_display_error_state *error);
#define __I915_REG_OP(op__, dev_priv__, ...) \
intel_uncore_##op__(&(dev_priv__)->uncore, __VA_ARGS__)
@@ -2830,11 +2721,6 @@ static inline int intel_hws_csb_write_index(struct drm_i915_private *i915)
return I915_HWS_CSB_WRITE_INDEX;
}
-static inline u32 i915_scratch_offset(const struct drm_i915_private *i915)
-{
- return i915_ggtt_offset(i915->gt.scratch);
-}
-
static inline enum i915_map_type
i915_coherent_map_type(struct drm_i915_private *i915)
{
diff --git a/drivers/gpu/drm/i915/i915_fixed.h b/drivers/gpu/drm/i915/i915_fixed.h
index 6621595fe74c..a327094de2bd 100644
--- a/drivers/gpu/drm/i915/i915_fixed.h
+++ b/drivers/gpu/drm/i915/i915_fixed.h
@@ -6,6 +6,11 @@
#ifndef _I915_FIXED_H_
#define _I915_FIXED_H_
+#include <linux/bug.h>
+#include <linux/kernel.h>
+#include <linux/math64.h>
+#include <linux/types.h>
+
typedef struct {
u32 val;
} uint_fixed_16_16_t;
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 8a659d3d7435..65863e955f40 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -46,9 +46,11 @@
#include "gem/i915_gem_ioctls.h"
#include "gem/i915_gem_pm.h"
#include "gem/i915_gemfs.h"
+#include "gt/intel_gt.h"
#include "gt/intel_gt_pm.h"
#include "gt/intel_mocs.h"
#include "gt/intel_reset.h"
+#include "gt/intel_renderstate.h"
#include "gt/intel_workarounds.h"
#include "i915_drv.h"
@@ -100,7 +102,8 @@ i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
return 0;
}
-int i915_gem_object_unbind(struct drm_i915_gem_object *obj)
+int i915_gem_object_unbind(struct drm_i915_gem_object *obj,
+ unsigned long flags)
{
struct i915_vma *vma;
LIST_HEAD(still_in_list);
@@ -115,7 +118,10 @@ int i915_gem_object_unbind(struct drm_i915_gem_object *obj)
list_move_tail(&vma->obj_link, &still_in_list);
spin_unlock(&obj->vma.lock);
- ret = i915_vma_unbind(vma);
+ ret = -EBUSY;
+ if (flags & I915_GEM_OBJECT_UNBIND_ACTIVE ||
+ !i915_vma_is_active(vma))
+ ret = i915_vma_unbind(vma);
spin_lock(&obj->vma.lock);
}
@@ -141,7 +147,7 @@ i915_gem_phys_pwrite(struct drm_i915_gem_object *obj,
return -EFAULT;
drm_clflush_virt_range(vaddr, args->size);
- i915_gem_chipset_flush(to_i915(obj->base.dev));
+ intel_gt_chipset_flush(&to_i915(obj->base.dev)->gt);
intel_fb_obj_flush(obj, ORIGIN_CPU);
return 0;
@@ -232,46 +238,6 @@ i915_gem_create_ioctl(struct drm_device *dev, void *data,
&args->size, &args->handle);
}
-void i915_gem_flush_ggtt_writes(struct drm_i915_private *dev_priv)
-{
- intel_wakeref_t wakeref;
-
- /*
- * No actual flushing is required for the GTT write domain for reads
- * from the GTT domain. Writes to it "immediately" go to main memory
- * as far as we know, so there's no chipset flush. It also doesn't
- * land in the GPU render cache.
- *
- * However, we do have to enforce the order so that all writes through
- * the GTT land before any writes to the device, such as updates to
- * the GATT itself.
- *
- * We also have to wait a bit for the writes to land from the GTT.
- * An uncached read (i.e. mmio) seems to be ideal for the round-trip
- * timing. This issue has only been observed when switching quickly
- * between GTT writes and CPU reads from inside the kernel on recent hw,
- * and it appears to only affect discrete GTT blocks (i.e. on LLC
- * system agents we cannot reproduce this behaviour, until Cannonlake
- * that was!).
- */
-
- wmb();
-
- if (INTEL_INFO(dev_priv)->has_coherent_ggtt)
- return;
-
- i915_gem_chipset_flush(dev_priv);
-
- with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref) {
- struct intel_uncore *uncore = &dev_priv->uncore;
-
- spin_lock_irq(&uncore->lock);
- intel_uncore_posting_read_fw(uncore,
- RING_HEAD(RENDER_RING_BASE));
- spin_unlock_irq(&uncore->lock);
- }
-}
-
static int
shmem_pread(struct page *page, int offset, int len, char __user *user_data,
bool needs_clflush)
@@ -430,11 +396,9 @@ i915_gem_gtt_pread(struct drm_i915_gem_object *obj,
unsigned page_length = PAGE_SIZE - page_offset;
page_length = remain < page_length ? remain : page_length;
if (node.allocated) {
- wmb();
ggtt->vm.insert_page(&ggtt->vm,
i915_gem_object_get_dma_address(obj, offset >> PAGE_SHIFT),
node.start, I915_CACHE_NONE, 0);
- wmb();
} else {
page_base += offset & PAGE_MASK;
}
@@ -454,7 +418,6 @@ i915_gem_gtt_pread(struct drm_i915_gem_object *obj,
out_unpin:
mutex_lock(&i915->drm.struct_mutex);
if (node.allocated) {
- wmb();
ggtt->vm.clear_range(&ggtt->vm, node.start, node.size);
remove_mappable_node(&node);
} else {
@@ -648,7 +611,8 @@ i915_gem_gtt_pwrite_fast(struct drm_i915_gem_object *obj,
unsigned int page_length = PAGE_SIZE - page_offset;
page_length = remain < page_length ? remain : page_length;
if (node.allocated) {
- wmb(); /* flush the write before we modify the GGTT */
+ /* flush the write before we modify the GGTT */
+ intel_gt_flush_ggtt_writes(ggtt->vm.gt);
ggtt->vm.insert_page(&ggtt->vm,
i915_gem_object_get_dma_address(obj, offset >> PAGE_SHIFT),
node.start, I915_CACHE_NONE, 0);
@@ -677,8 +641,8 @@ i915_gem_gtt_pwrite_fast(struct drm_i915_gem_object *obj,
i915_gem_object_unlock_fence(obj, fence);
out_unpin:
mutex_lock(&i915->drm.struct_mutex);
+ intel_gt_flush_ggtt_writes(ggtt->vm.gt);
if (node.allocated) {
- wmb();
ggtt->vm.clear_range(&ggtt->vm, node.start, node.size);
remove_mappable_node(&node);
} else {
@@ -929,13 +893,13 @@ void i915_gem_runtime_suspend(struct drm_i915_private *i915)
}
}
-static int wait_for_engines(struct drm_i915_private *i915)
+static int wait_for_engines(struct intel_gt *gt)
{
- if (wait_for(intel_engines_are_idle(i915), I915_IDLE_ENGINES_TIMEOUT)) {
- dev_err(i915->drm.dev,
+ if (wait_for(intel_engines_are_idle(gt), I915_IDLE_ENGINES_TIMEOUT)) {
+ dev_err(gt->i915->drm.dev,
"Failed to idle engines, declaring wedged!\n");
GEM_TRACE_DUMP();
- i915_gem_set_wedged(i915);
+ intel_gt_set_wedged(gt);
return -EIO;
}
@@ -946,8 +910,8 @@ static long
wait_for_timelines(struct drm_i915_private *i915,
unsigned int flags, long timeout)
{
- struct i915_gt_timelines *gt = &i915->gt.timelines;
- struct i915_timeline *tl;
+ struct intel_gt_timelines *gt = &i915->gt.timelines;
+ struct intel_timeline *tl;
mutex_lock(&gt->mutex);
list_for_each_entry(tl, &gt->active_list, link) {
@@ -988,15 +952,15 @@ wait_for_timelines(struct drm_i915_private *i915,
int i915_gem_wait_for_idle(struct drm_i915_private *i915,
unsigned int flags, long timeout)
{
+ /* If the device is asleep, we have no requests outstanding */
+ if (!READ_ONCE(i915->gt.awake))
+ return 0;
+
GEM_TRACE("flags=%x (%s), timeout=%ld%s, awake?=%s\n",
flags, flags & I915_WAIT_LOCKED ? "locked" : "unlocked",
timeout, timeout == MAX_SCHEDULE_TIMEOUT ? " (forever)" : "",
yesno(i915->gt.awake));
- /* If the device is asleep, we have no requests outstanding */
- if (!READ_ONCE(i915->gt.awake))
- return 0;
-
timeout = wait_for_timelines(i915, flags, timeout);
if (timeout < 0)
return timeout;
@@ -1006,7 +970,7 @@ int i915_gem_wait_for_idle(struct drm_i915_private *i915,
lockdep_assert_held(&i915->drm.struct_mutex);
- err = wait_for_engines(i915);
+ err = wait_for_engines(&i915->gt);
if (err)
return err;
@@ -1184,8 +1148,8 @@ void i915_gem_sanitize(struct drm_i915_private *i915)
* back to defaults, recovering from whatever wedged state we left it
* in and so worth trying to use the device once more.
*/
- if (i915_terminally_wedged(i915))
- i915_gem_unset_wedged(i915);
+ if (intel_gt_is_wedged(&i915->gt))
+ intel_gt_unset_wedged(&i915->gt);
/*
* If we inherit context state from the BIOS or earlier occupants
@@ -1195,82 +1159,72 @@ void i915_gem_sanitize(struct drm_i915_private *i915)
* it may impact the display and we are uncertain about the stability
* of the reset, so this could be applied to even earlier gen.
*/
- intel_gt_sanitize(i915, false);
+ intel_gt_sanitize(&i915->gt, false);
intel_uncore_forcewake_put(&i915->uncore, FORCEWAKE_ALL);
intel_runtime_pm_put(&i915->runtime_pm, wakeref);
}
-void i915_gem_init_swizzling(struct drm_i915_private *dev_priv)
+static void init_unused_ring(struct intel_gt *gt, u32 base)
{
- if (INTEL_GEN(dev_priv) < 5 ||
- dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_NONE)
- return;
-
- I915_WRITE(DISP_ARB_CTL, I915_READ(DISP_ARB_CTL) |
- DISP_TILE_SURFACE_SWIZZLING);
-
- if (IS_GEN(dev_priv, 5))
- return;
-
- I915_WRITE(TILECTL, I915_READ(TILECTL) | TILECTL_SWZCTL);
- if (IS_GEN(dev_priv, 6))
- I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_SNB));
- else if (IS_GEN(dev_priv, 7))
- I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_IVB));
- else if (IS_GEN(dev_priv, 8))
- I915_WRITE(GAMTARBMODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_BDW));
- else
- BUG();
-}
+ struct intel_uncore *uncore = gt->uncore;
-static void init_unused_ring(struct drm_i915_private *dev_priv, u32 base)
-{
- I915_WRITE(RING_CTL(base), 0);
- I915_WRITE(RING_HEAD(base), 0);
- I915_WRITE(RING_TAIL(base), 0);
- I915_WRITE(RING_START(base), 0);
+ intel_uncore_write(uncore, RING_CTL(base), 0);
+ intel_uncore_write(uncore, RING_HEAD(base), 0);
+ intel_uncore_write(uncore, RING_TAIL(base), 0);
+ intel_uncore_write(uncore, RING_START(base), 0);
}
-static void init_unused_rings(struct drm_i915_private *dev_priv)
+static void init_unused_rings(struct intel_gt *gt)
{
- if (IS_I830(dev_priv)) {
- init_unused_ring(dev_priv, PRB1_BASE);
- init_unused_ring(dev_priv, SRB0_BASE);
- init_unused_ring(dev_priv, SRB1_BASE);
- init_unused_ring(dev_priv, SRB2_BASE);
- init_unused_ring(dev_priv, SRB3_BASE);
- } else if (IS_GEN(dev_priv, 2)) {
- init_unused_ring(dev_priv, SRB0_BASE);
- init_unused_ring(dev_priv, SRB1_BASE);
- } else if (IS_GEN(dev_priv, 3)) {
- init_unused_ring(dev_priv, PRB1_BASE);
- init_unused_ring(dev_priv, PRB2_BASE);
+ struct drm_i915_private *i915 = gt->i915;
+
+ if (IS_I830(i915)) {
+ init_unused_ring(gt, PRB1_BASE);
+ init_unused_ring(gt, SRB0_BASE);
+ init_unused_ring(gt, SRB1_BASE);
+ init_unused_ring(gt, SRB2_BASE);
+ init_unused_ring(gt, SRB3_BASE);
+ } else if (IS_GEN(i915, 2)) {
+ init_unused_ring(gt, SRB0_BASE);
+ init_unused_ring(gt, SRB1_BASE);
+ } else if (IS_GEN(i915, 3)) {
+ init_unused_ring(gt, PRB1_BASE);
+ init_unused_ring(gt, PRB2_BASE);
}
}
-int i915_gem_init_hw(struct drm_i915_private *dev_priv)
+int i915_gem_init_hw(struct drm_i915_private *i915)
{
+ struct intel_uncore *uncore = &i915->uncore;
+ struct intel_gt *gt = &i915->gt;
int ret;
- dev_priv->gt.last_init_time = ktime_get();
+ BUG_ON(!i915->kernel_context);
+ ret = intel_gt_terminally_wedged(gt);
+ if (ret)
+ return ret;
+
+ gt->last_init_time = ktime_get();
/* Double layer security blanket, see i915_gem_init() */
- intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
+ intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL);
- if (HAS_EDRAM(dev_priv) && INTEL_GEN(dev_priv) < 9)
- I915_WRITE(HSW_IDICR, I915_READ(HSW_IDICR) | IDIHASHMSK(0xf));
+ if (HAS_EDRAM(i915) && INTEL_GEN(i915) < 9)
+ intel_uncore_rmw(uncore, HSW_IDICR, 0, IDIHASHMSK(0xf));
- if (IS_HASWELL(dev_priv))
- I915_WRITE(MI_PREDICATE_RESULT_2, IS_HSW_GT3(dev_priv) ?
- LOWER_SLICE_ENABLED : LOWER_SLICE_DISABLED);
+ if (IS_HASWELL(i915))
+ intel_uncore_write(uncore,
+ MI_PREDICATE_RESULT_2,
+ IS_HSW_GT3(i915) ?
+ LOWER_SLICE_ENABLED : LOWER_SLICE_DISABLED);
/* Apply the GT workarounds... */
- intel_gt_apply_workarounds(dev_priv);
+ intel_gt_apply_workarounds(gt);
/* ...and determine whether they are sticking. */
- intel_gt_verify_workarounds(dev_priv, "init");
+ intel_gt_verify_workarounds(gt, "init");
- i915_gem_init_swizzling(dev_priv);
+ intel_gt_init_swizzling(gt);
/*
* At least 830 can leave some of the unused rings
@@ -1278,41 +1232,33 @@ int i915_gem_init_hw(struct drm_i915_private *dev_priv)
* will prevent c3 entry. Makes sure all unused rings
* are totally idle.
*/
- init_unused_rings(dev_priv);
-
- BUG_ON(!dev_priv->kernel_context);
- ret = i915_terminally_wedged(dev_priv);
- if (ret)
- goto out;
+ init_unused_rings(gt);
- ret = i915_ppgtt_init_hw(dev_priv);
+ ret = i915_ppgtt_init_hw(gt);
if (ret) {
DRM_ERROR("Enabling PPGTT failed (%d)\n", ret);
goto out;
}
- ret = intel_wopcm_init_hw(&dev_priv->wopcm);
+ ret = intel_wopcm_init_hw(&i915->wopcm, gt);
if (ret) {
DRM_ERROR("Enabling WOPCM failed (%d)\n", ret);
goto out;
}
/* We can't enable contexts until all firmware is loaded */
- ret = intel_uc_init_hw(dev_priv);
+ ret = intel_uc_init_hw(&i915->gt.uc);
if (ret) {
DRM_ERROR("Enabling uc failed (%d)\n", ret);
goto out;
}
- intel_mocs_init_l3cc_table(dev_priv);
+ intel_mocs_init_l3cc_table(gt);
- intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
-
- intel_engines_set_scheduler_caps(dev_priv);
- return 0;
+ intel_engines_set_scheduler_caps(i915);
out:
- intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
+ intel_uncore_forcewake_put(uncore, FORCEWAKE_ALL);
return ret;
}
@@ -1349,10 +1295,24 @@ static int __intel_engines_record_defaults(struct drm_i915_private *i915)
goto err_active;
}
- err = 0;
- if (rq->engine->init_context)
- err = rq->engine->init_context(rq);
+ err = intel_engine_emit_ctx_wa(rq);
+ if (err)
+ goto err_rq;
+
+ /*
+ * Failing to program the MOCS is non-fatal.The system will not
+ * run at peak performance. So warn the user and carry on.
+ */
+ err = intel_mocs_emit(rq);
+ if (err)
+ dev_notice(i915->drm.dev,
+ "Failed to program MOCS registers; expect performance issues.\n");
+ err = intel_renderstate_emit(rq);
+ if (err)
+ goto err_rq;
+
+err_rq:
i915_request_add(rq);
if (err)
goto err_active;
@@ -1437,46 +1397,19 @@ err_active:
* and ready to be torn-down. The quickest way we can accomplish
* this is by declaring ourselves wedged.
*/
- i915_gem_set_wedged(i915);
+ intel_gt_set_wedged(&i915->gt);
goto out_ctx;
}
static int
i915_gem_init_scratch(struct drm_i915_private *i915, unsigned int size)
{
- struct drm_i915_gem_object *obj;
- struct i915_vma *vma;
- int ret;
-
- obj = i915_gem_object_create_stolen(i915, size);
- if (!obj)
- obj = i915_gem_object_create_internal(i915, size);
- if (IS_ERR(obj)) {
- DRM_ERROR("Failed to allocate scratch page\n");
- return PTR_ERR(obj);
- }
-
- vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
- if (IS_ERR(vma)) {
- ret = PTR_ERR(vma);
- goto err_unref;
- }
-
- ret = i915_vma_pin(vma, 0, 0, PIN_GLOBAL | PIN_HIGH);
- if (ret)
- goto err_unref;
-
- i915->gt.scratch = vma;
- return 0;
-
-err_unref:
- i915_gem_object_put(obj);
- return ret;
+ return intel_gt_init_scratch(&i915->gt, size);
}
static void i915_gem_fini_scratch(struct drm_i915_private *i915)
{
- i915_vma_unpin_and_release(&i915->gt.scratch, 0);
+ intel_gt_fini_scratch(&i915->gt);
}
static int intel_engines_verify_workarounds(struct drm_i915_private *i915)
@@ -1507,19 +1440,17 @@ int i915_gem_init(struct drm_i915_private *dev_priv)
dev_priv->mm.unordered_timeline = dma_fence_context_alloc(1);
- i915_timelines_init(dev_priv);
+ intel_timelines_init(dev_priv);
ret = i915_gem_init_userptr(dev_priv);
if (ret)
return ret;
- ret = intel_uc_init_misc(dev_priv);
- if (ret)
- return ret;
+ intel_uc_fetch_firmwares(&dev_priv->gt.uc);
ret = intel_wopcm_init(&dev_priv->wopcm);
if (ret)
- goto err_uc_misc;
+ goto err_uc_fw;
/* This is just a security blanket to placate dragons.
* On some systems, we very sporadically observe that the first TLBs
@@ -1530,7 +1461,7 @@ int i915_gem_init(struct drm_i915_private *dev_priv)
mutex_lock(&dev_priv->drm.struct_mutex);
intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
- ret = i915_gem_init_ggtt(dev_priv);
+ ret = i915_init_ggtt(dev_priv);
if (ret) {
GEM_BUG_ON(ret == -EIO);
goto err_unlock;
@@ -1563,7 +1494,7 @@ int i915_gem_init(struct drm_i915_private *dev_priv)
intel_init_gt_powersave(dev_priv);
- ret = intel_uc_init(dev_priv);
+ ret = intel_uc_init(&dev_priv->gt.uc);
if (ret)
goto err_pm;
@@ -1572,7 +1503,7 @@ int i915_gem_init(struct drm_i915_private *dev_priv)
goto err_uc_init;
/* Only when the HW is re-initialised, can we replay the requests */
- ret = intel_gt_resume(dev_priv);
+ ret = intel_gt_resume(&dev_priv->gt);
if (ret)
goto err_init_hw;
@@ -1595,12 +1526,12 @@ int i915_gem_init(struct drm_i915_private *dev_priv)
if (ret)
goto err_gt;
- if (i915_inject_load_failure()) {
+ if (i915_inject_probe_failure()) {
ret = -ENODEV;
goto err_gt;
}
- if (i915_inject_load_failure()) {
+ if (i915_inject_probe_failure()) {
ret = -EIO;
goto err_gt;
}
@@ -1619,7 +1550,7 @@ int i915_gem_init(struct drm_i915_private *dev_priv)
err_gt:
mutex_unlock(&dev_priv->drm.struct_mutex);
- i915_gem_set_wedged(dev_priv);
+ intel_gt_set_wedged(&dev_priv->gt);
i915_gem_suspend(dev_priv);
i915_gem_suspend_late(dev_priv);
@@ -1627,9 +1558,9 @@ err_gt:
mutex_lock(&dev_priv->drm.struct_mutex);
err_init_hw:
- intel_uc_fini_hw(dev_priv);
+ intel_uc_fini_hw(&dev_priv->gt.uc);
err_uc_init:
- intel_uc_fini(dev_priv);
+ intel_uc_fini(&dev_priv->gt.uc);
err_pm:
if (ret != -EIO) {
intel_cleanup_gt_powersave(dev_priv);
@@ -1645,12 +1576,12 @@ err_unlock:
intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
mutex_unlock(&dev_priv->drm.struct_mutex);
-err_uc_misc:
- intel_uc_fini_misc(dev_priv);
+err_uc_fw:
+ intel_uc_cleanup_firmwares(&dev_priv->gt.uc);
if (ret != -EIO) {
i915_gem_cleanup_userptr(dev_priv);
- i915_timelines_fini(dev_priv);
+ intel_timelines_fini(dev_priv);
}
if (ret == -EIO) {
@@ -1661,10 +1592,10 @@ err_uc_misc:
* wedged. But we only want to do this where the GPU is angry,
* for all other failure, such as an allocation failure, bail.
*/
- if (!i915_reset_failed(dev_priv)) {
- i915_load_error(dev_priv,
- "Failed to initialize GPU, declaring it wedged!\n");
- i915_gem_set_wedged(dev_priv);
+ if (!intel_gt_is_wedged(&dev_priv->gt)) {
+ i915_probe_error(dev_priv,
+ "Failed to initialize GPU, declaring it wedged!\n");
+ intel_gt_set_wedged(&dev_priv->gt);
}
/* Minimal basic recovery for KMS */
@@ -1680,7 +1611,7 @@ err_uc_misc:
return ret;
}
-void i915_gem_fini_hw(struct drm_i915_private *dev_priv)
+void i915_gem_driver_remove(struct drm_i915_private *dev_priv)
{
GEM_BUG_ON(dev_priv->gt.awake);
@@ -1693,14 +1624,14 @@ void i915_gem_fini_hw(struct drm_i915_private *dev_priv)
i915_gem_drain_workqueue(dev_priv);
mutex_lock(&dev_priv->drm.struct_mutex);
- intel_uc_fini_hw(dev_priv);
- intel_uc_fini(dev_priv);
+ intel_uc_fini_hw(&dev_priv->gt.uc);
+ intel_uc_fini(&dev_priv->gt.uc);
mutex_unlock(&dev_priv->drm.struct_mutex);
i915_gem_drain_freed_objects(dev_priv);
}
-void i915_gem_fini(struct drm_i915_private *dev_priv)
+void i915_gem_driver_release(struct drm_i915_private *dev_priv)
{
mutex_lock(&dev_priv->drm.struct_mutex);
intel_engines_cleanup(dev_priv);
@@ -1712,9 +1643,9 @@ void i915_gem_fini(struct drm_i915_private *dev_priv)
intel_cleanup_gt_powersave(dev_priv);
- intel_uc_fini_misc(dev_priv);
+ intel_uc_cleanup_firmwares(&dev_priv->gt.uc);
i915_gem_cleanup_userptr(dev_priv);
- i915_timelines_fini(dev_priv);
+ intel_timelines_fini(dev_priv);
i915_gem_drain_freed_objects(dev_priv);
@@ -1743,20 +1674,9 @@ int i915_gem_init_early(struct drm_i915_private *dev_priv)
{
int err;
- intel_gt_pm_init(dev_priv);
-
- INIT_LIST_HEAD(&dev_priv->gt.active_rings);
- INIT_LIST_HEAD(&dev_priv->gt.closed_vma);
- spin_lock_init(&dev_priv->gt.closed_lock);
-
i915_gem_init__mm(dev_priv);
i915_gem_init__pm(dev_priv);
- init_waitqueue_head(&dev_priv->gpu_error.wait_queue);
- init_waitqueue_head(&dev_priv->gpu_error.reset_queue);
- mutex_init(&dev_priv->gpu_error.wedge_mutex);
- init_srcu_struct(&dev_priv->gpu_error.reset_backoff_srcu);
-
atomic_set(&dev_priv->mm.bsd_engine_dispatch_index, 0);
spin_lock_init(&dev_priv->fb_tracking.lock);
@@ -1775,7 +1695,7 @@ void i915_gem_cleanup_early(struct drm_i915_private *dev_priv)
GEM_BUG_ON(atomic_read(&dev_priv->mm.free_count));
WARN_ON(dev_priv->mm.shrink_count);
- cleanup_srcu_struct(&dev_priv->gpu_error.reset_backoff_srcu);
+ intel_gt_cleanup_early(&dev_priv->gt);
i915_gemfs_fini(dev_priv);
}
diff --git a/drivers/gpu/drm/i915/i915_gem_batch_pool.c b/drivers/gpu/drm/i915/i915_gem_batch_pool.c
index 25a3e4d09a2f..b17f23991253 100644
--- a/drivers/gpu/drm/i915/i915_gem_batch_pool.c
+++ b/drivers/gpu/drm/i915/i915_gem_batch_pool.c
@@ -94,34 +94,26 @@ i915_gem_batch_pool_get(struct i915_gem_batch_pool *pool,
list = &pool->cache_list[n];
list_for_each_entry(obj, list, batch_pool_link) {
+ struct reservation_object *resv = obj->base.resv;
+
/* The batches are strictly LRU ordered */
- if (i915_gem_object_is_active(obj)) {
- struct reservation_object *resv = obj->base.resv;
-
- if (!reservation_object_test_signaled_rcu(resv, true))
- break;
-
- i915_retire_requests(pool->engine->i915);
- GEM_BUG_ON(i915_gem_object_is_active(obj));
-
- /*
- * The object is now idle, clear the array of shared
- * fences before we add a new request. Although, we
- * remain on the same engine, we may be on a different
- * timeline and so may continually grow the array,
- * trapping a reference to all the old fences, rather
- * than replace the existing fence.
- */
- if (rcu_access_pointer(resv->fence)) {
- reservation_object_lock(resv, NULL);
- reservation_object_add_excl_fence(resv, NULL);
- reservation_object_unlock(resv);
- }
+ if (!reservation_object_test_signaled_rcu(resv, true))
+ break;
+
+ /*
+ * The object is now idle, clear the array of shared
+ * fences before we add a new request. Although, we
+ * remain on the same engine, we may be on a different
+ * timeline and so may continually grow the array,
+ * trapping a reference to all the old fences, rather
+ * than replace the existing fence.
+ */
+ if (rcu_access_pointer(resv->fence)) {
+ reservation_object_lock(resv, NULL);
+ reservation_object_add_excl_fence(resv, NULL);
+ reservation_object_unlock(resv);
}
- GEM_BUG_ON(!reservation_object_test_signaled_rcu(obj->base.resv,
- true));
-
if (obj->base.size >= size)
goto found;
}
diff --git a/drivers/gpu/drm/i915/i915_gem_fence_reg.c b/drivers/gpu/drm/i915/i915_gem_fence_reg.c
index 0bf53ac1c835..bcac359ec661 100644
--- a/drivers/gpu/drm/i915/i915_gem_fence_reg.c
+++ b/drivers/gpu/drm/i915/i915_gem_fence_reg.c
@@ -834,3 +834,35 @@ void i915_ggtt_init_fences(struct i915_ggtt *ggtt)
i915_gem_restore_fences(i915);
}
+
+void intel_gt_init_swizzling(struct intel_gt *gt)
+{
+ struct drm_i915_private *i915 = gt->i915;
+ struct intel_uncore *uncore = gt->uncore;
+
+ if (INTEL_GEN(i915) < 5 ||
+ i915->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_NONE)
+ return;
+
+ intel_uncore_rmw(uncore, DISP_ARB_CTL, 0, DISP_TILE_SURFACE_SWIZZLING);
+
+ if (IS_GEN(i915, 5))
+ return;
+
+ intel_uncore_rmw(uncore, TILECTL, 0, TILECTL_SWZCTL);
+
+ if (IS_GEN(i915, 6))
+ intel_uncore_write(uncore,
+ ARB_MODE,
+ _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_SNB));
+ else if (IS_GEN(i915, 7))
+ intel_uncore_write(uncore,
+ ARB_MODE,
+ _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_IVB));
+ else if (IS_GEN(i915, 8))
+ intel_uncore_write(uncore,
+ GAMTARBMODE,
+ _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_BDW));
+ else
+ MISSING_CASE(INTEL_GEN(i915));
+}
diff --git a/drivers/gpu/drm/i915/i915_gem_fence_reg.h b/drivers/gpu/drm/i915/i915_gem_fence_reg.h
index d2da98828179..37e4f104f7c0 100644
--- a/drivers/gpu/drm/i915/i915_gem_fence_reg.h
+++ b/drivers/gpu/drm/i915/i915_gem_fence_reg.h
@@ -32,6 +32,7 @@ struct drm_i915_gem_object;
struct drm_i915_private;
struct i915_ggtt;
struct i915_vma;
+struct intel_gt;
struct sg_table;
#define I965_FENCE_PAGE 4096UL
@@ -66,4 +67,6 @@ void i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj,
void i915_ggtt_init_fences(struct i915_ggtt *ggtt);
+void intel_gt_init_swizzling(struct intel_gt *gt);
+
#endif
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 7015a97b1097..c3028722d4e3 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -36,6 +36,7 @@
#include <drm/i915_drm.h>
#include "display/intel_frontbuffer.h"
+#include "gt/intel_gt.h"
#include "i915_drv.h"
#include "i915_scatterlist.h"
@@ -45,6 +46,12 @@
#define I915_GFP_ALLOW_FAIL (GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_NOWARN)
+#if IS_ENABLED(CONFIG_DRM_I915_TRACE_GTT)
+#define DBG(...) trace_printk(__VA_ARGS__)
+#else
+#define DBG(...)
+#endif
+
/**
* DOC: Global GTT views
*
@@ -106,12 +113,14 @@
*
*/
+#define as_pd(x) container_of((x), typeof(struct i915_page_directory), pt)
+
static int
i915_get_ggtt_vma_pages(struct i915_vma *vma);
-static void gen6_ggtt_invalidate(struct drm_i915_private *i915)
+static void gen6_ggtt_invalidate(struct i915_ggtt *ggtt)
{
- struct intel_uncore *uncore = &i915->uncore;
+ struct intel_uncore *uncore = &ggtt->vm.i915->uncore;
/*
* Note that as an uncached mmio write, this will flush the
@@ -120,24 +129,19 @@ static void gen6_ggtt_invalidate(struct drm_i915_private *i915)
intel_uncore_write_fw(uncore, GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
}
-static void guc_ggtt_invalidate(struct drm_i915_private *i915)
+static void guc_ggtt_invalidate(struct i915_ggtt *ggtt)
{
- struct intel_uncore *uncore = &i915->uncore;
+ struct intel_uncore *uncore = &ggtt->vm.i915->uncore;
- gen6_ggtt_invalidate(i915);
+ gen6_ggtt_invalidate(ggtt);
intel_uncore_write_fw(uncore, GEN8_GTCR, GEN8_GTCR_INVALIDATE);
}
-static void gmch_ggtt_invalidate(struct drm_i915_private *i915)
+static void gmch_ggtt_invalidate(struct i915_ggtt *ggtt)
{
intel_gtt_chipset_flush();
}
-static inline void i915_ggtt_invalidate(struct drm_i915_private *i915)
-{
- i915->ggtt.invalidate(i915);
-}
-
static int ppgtt_bind_vma(struct i915_vma *vma,
enum i915_cache_level cache_level,
u32 unused)
@@ -215,10 +219,10 @@ static u64 gen8_pte_encode(dma_addr_t addr,
return pte;
}
-static gen8_pde_t gen8_pde_encode(const dma_addr_t addr,
- const enum i915_cache_level level)
+static u64 gen8_pde_encode(const dma_addr_t addr,
+ const enum i915_cache_level level)
{
- gen8_pde_t pde = _PAGE_PRESENT | _PAGE_RW;
+ u64 pde = _PAGE_PRESENT | _PAGE_RW;
pde |= addr;
if (level != I915_CACHE_NONE)
pde |= PPAT_CACHED_PDE;
@@ -227,9 +231,6 @@ static gen8_pde_t gen8_pde_encode(const dma_addr_t addr,
return pde;
}
-#define gen8_pdpe_encode gen8_pde_encode
-#define gen8_pml4e_encode gen8_pde_encode
-
static u64 snb_pte_encode(dma_addr_t addr,
enum i915_cache_level level,
u32 flags)
@@ -482,9 +483,69 @@ static void vm_free_page(struct i915_address_space *vm, struct page *page)
spin_unlock(&vm->free_pages.lock);
}
+static void i915_address_space_fini(struct i915_address_space *vm)
+{
+ spin_lock(&vm->free_pages.lock);
+ if (pagevec_count(&vm->free_pages.pvec))
+ vm_free_pages_release(vm, true);
+ GEM_BUG_ON(pagevec_count(&vm->free_pages.pvec));
+ spin_unlock(&vm->free_pages.lock);
+
+ drm_mm_takedown(&vm->mm);
+
+ mutex_destroy(&vm->mutex);
+}
+
+static void ppgtt_destroy_vma(struct i915_address_space *vm)
+{
+ struct list_head *phases[] = {
+ &vm->bound_list,
+ &vm->unbound_list,
+ NULL,
+ }, **phase;
+
+ mutex_lock(&vm->i915->drm.struct_mutex);
+ for (phase = phases; *phase; phase++) {
+ struct i915_vma *vma, *vn;
+
+ list_for_each_entry_safe(vma, vn, *phase, vm_link)
+ i915_vma_destroy(vma);
+ }
+ mutex_unlock(&vm->i915->drm.struct_mutex);
+}
+
+static void __i915_vm_release(struct work_struct *work)
+{
+ struct i915_address_space *vm =
+ container_of(work, struct i915_address_space, rcu.work);
+
+ ppgtt_destroy_vma(vm);
+
+ GEM_BUG_ON(!list_empty(&vm->bound_list));
+ GEM_BUG_ON(!list_empty(&vm->unbound_list));
+
+ vm->cleanup(vm);
+ i915_address_space_fini(vm);
+
+ kfree(vm);
+}
+
+void i915_vm_release(struct kref *kref)
+{
+ struct i915_address_space *vm =
+ container_of(kref, struct i915_address_space, ref);
+
+ GEM_BUG_ON(i915_is_ggtt(vm));
+ trace_i915_ppgtt_release(vm);
+
+ vm->closed = true;
+ queue_rcu_work(vm->i915->wq, &vm->rcu);
+}
+
static void i915_address_space_init(struct i915_address_space *vm, int subclass)
{
kref_init(&vm->ref);
+ INIT_RCU_WORK(&vm->rcu, __i915_vm_release);
/*
* The vm->mutex must be reclaim safe (for use in the shrinker).
@@ -505,19 +566,6 @@ static void i915_address_space_init(struct i915_address_space *vm, int subclass)
INIT_LIST_HEAD(&vm->bound_list);
}
-static void i915_address_space_fini(struct i915_address_space *vm)
-{
- spin_lock(&vm->free_pages.lock);
- if (pagevec_count(&vm->free_pages.pvec))
- vm_free_pages_release(vm, true);
- GEM_BUG_ON(pagevec_count(&vm->free_pages.pvec));
- spin_unlock(&vm->free_pages.lock);
-
- drm_mm_takedown(&vm->mm);
-
- mutex_destroy(&vm->mutex);
-}
-
static int __setup_page_dma(struct i915_address_space *vm,
struct i915_page_dma *p,
gfp_t gfp)
@@ -554,28 +602,17 @@ static void cleanup_page_dma(struct i915_address_space *vm,
#define kmap_atomic_px(px) kmap_atomic(px_base(px)->page)
-#define setup_px(vm, px) setup_page_dma((vm), px_base(px))
-#define cleanup_px(vm, px) cleanup_page_dma((vm), px_base(px))
-#define fill_px(vm, px, v) fill_page_dma((vm), px_base(px), (v))
-#define fill32_px(vm, px, v) fill_page_dma_32((vm), px_base(px), (v))
-
-static void fill_page_dma(struct i915_address_space *vm,
- struct i915_page_dma *p,
- const u64 val)
+static void
+fill_page_dma(const struct i915_page_dma *p, const u64 val, unsigned int count)
{
- u64 * const vaddr = kmap_atomic(p->page);
-
- memset64(vaddr, val, PAGE_SIZE / sizeof(val));
-
- kunmap_atomic(vaddr);
+ kunmap_atomic(memset64(kmap_atomic(p->page), val, count));
}
-static void fill_page_dma_32(struct i915_address_space *vm,
- struct i915_page_dma *p,
- const u32 v)
-{
- fill_page_dma(vm, p, (u64)v << 32 | v);
-}
+#define fill_px(px, v) fill_page_dma(px_base(px), (v), PAGE_SIZE / sizeof(u64))
+#define fill32_px(px, v) do { \
+ u64 v__ = lower_32_bits(v); \
+ fill_px((px), v__ << 32 | v__); \
+} while (0)
static int
setup_scratch_page(struct i915_address_space *vm, gfp_t gfp)
@@ -602,7 +639,7 @@ setup_scratch_page(struct i915_address_space *vm, gfp_t gfp)
gfp |= __GFP_ZERO | __GFP_RETRY_MAYFAIL;
do {
- int order = get_order(size);
+ unsigned int order = get_order(size);
struct page *page;
dma_addr_t addr;
@@ -621,8 +658,8 @@ setup_scratch_page(struct i915_address_space *vm, gfp_t gfp)
if (unlikely(!IS_ALIGNED(addr, size)))
goto unmap_page;
- vm->scratch_page.page = page;
- vm->scratch_page.daddr = addr;
+ vm->scratch[0].base.page = page;
+ vm->scratch[0].base.daddr = addr;
vm->scratch_order = order;
return 0;
@@ -641,14 +678,30 @@ skip:
static void cleanup_scratch_page(struct i915_address_space *vm)
{
- struct i915_page_dma *p = &vm->scratch_page;
- int order = vm->scratch_order;
+ struct i915_page_dma *p = px_base(&vm->scratch[0]);
+ unsigned int order = vm->scratch_order;
dma_unmap_page(vm->dma, p->daddr, BIT(order) << PAGE_SHIFT,
PCI_DMA_BIDIRECTIONAL);
__free_pages(p->page, order);
}
+static void free_scratch(struct i915_address_space *vm)
+{
+ int i;
+
+ if (!px_dma(&vm->scratch[0])) /* set to 0 on clones */
+ return;
+
+ for (i = 1; i <= vm->top; i++) {
+ if (!px_dma(&vm->scratch[i]))
+ break;
+ cleanup_page_dma(vm, px_base(&vm->scratch[i]));
+ }
+
+ cleanup_scratch_page(vm);
+}
+
static struct i915_page_table *alloc_pt(struct i915_address_space *vm)
{
struct i915_page_table *pt;
@@ -657,50 +710,24 @@ static struct i915_page_table *alloc_pt(struct i915_address_space *vm)
if (unlikely(!pt))
return ERR_PTR(-ENOMEM);
- if (unlikely(setup_px(vm, pt))) {
+ if (unlikely(setup_page_dma(vm, &pt->base))) {
kfree(pt);
return ERR_PTR(-ENOMEM);
}
atomic_set(&pt->used, 0);
-
return pt;
}
-static void free_pt(struct i915_address_space *vm, struct i915_page_table *pt)
-{
- cleanup_px(vm, pt);
- kfree(pt);
-}
-
-static void gen8_initialize_pt(struct i915_address_space *vm,
- struct i915_page_table *pt)
-{
- fill_px(vm, pt, vm->scratch_pte);
-}
-
-static void gen6_initialize_pt(struct i915_address_space *vm,
- struct i915_page_table *pt)
-{
- fill32_px(vm, pt, vm->scratch_pte);
-}
-
-static struct i915_page_directory *__alloc_pd(void)
+static struct i915_page_directory *__alloc_pd(size_t sz)
{
struct i915_page_directory *pd;
- pd = kmalloc(sizeof(*pd), I915_GFP_ALLOW_FAIL);
-
+ pd = kzalloc(sz, I915_GFP_ALLOW_FAIL);
if (unlikely(!pd))
return NULL;
- memset(&pd->base, 0, sizeof(pd->base));
- atomic_set(&pd->used, 0);
spin_lock_init(&pd->lock);
-
- /* for safety */
- pd->entry[0] = NULL;
-
return pd;
}
@@ -708,11 +735,11 @@ static struct i915_page_directory *alloc_pd(struct i915_address_space *vm)
{
struct i915_page_directory *pd;
- pd = __alloc_pd();
+ pd = __alloc_pd(sizeof(*pd));
if (unlikely(!pd))
return ERR_PTR(-ENOMEM);
- if (unlikely(setup_px(vm, pd))) {
+ if (unlikely(setup_page_dma(vm, px_base(pd)))) {
kfree(pd);
return ERR_PTR(-ENOMEM);
}
@@ -720,36 +747,72 @@ static struct i915_page_directory *alloc_pd(struct i915_address_space *vm)
return pd;
}
-static inline bool pd_has_phys_page(const struct i915_page_directory * const pd)
+static void free_pd(struct i915_address_space *vm, struct i915_page_dma *pd)
{
- return pd->base.page;
+ cleanup_page_dma(vm, pd);
+ kfree(pd);
}
-static void free_pd(struct i915_address_space *vm,
- struct i915_page_directory *pd)
+#define free_px(vm, px) free_pd(vm, px_base(px))
+
+static inline void
+write_dma_entry(struct i915_page_dma * const pdma,
+ const unsigned short idx,
+ const u64 encoded_entry)
{
- if (likely(pd_has_phys_page(pd)))
- cleanup_px(vm, pd);
+ u64 * const vaddr = kmap_atomic(pdma->page);
- kfree(pd);
+ vaddr[idx] = encoded_entry;
+ kunmap_atomic(vaddr);
}
-static void init_pd_with_page(struct i915_address_space *vm,
- struct i915_page_directory * const pd,
- struct i915_page_table *pt)
+static inline void
+__set_pd_entry(struct i915_page_directory * const pd,
+ const unsigned short idx,
+ struct i915_page_dma * const to,
+ u64 (*encode)(const dma_addr_t, const enum i915_cache_level))
+{
+ GEM_BUG_ON(atomic_read(px_used(pd)) > ARRAY_SIZE(pd->entry));
+
+ atomic_inc(px_used(pd));
+ pd->entry[idx] = to;
+ write_dma_entry(px_base(pd), idx, encode(to->daddr, I915_CACHE_LLC));
+}
+
+#define set_pd_entry(pd, idx, to) \
+ __set_pd_entry((pd), (idx), px_base(to), gen8_pde_encode)
+
+static inline void
+clear_pd_entry(struct i915_page_directory * const pd,
+ const unsigned short idx,
+ const struct i915_page_scratch * const scratch)
{
- fill_px(vm, pd, gen8_pde_encode(px_dma(pt), I915_CACHE_LLC));
- memset_p(pd->entry, pt, 512);
+ GEM_BUG_ON(atomic_read(px_used(pd)) == 0);
+
+ write_dma_entry(px_base(pd), idx, scratch->encode);
+ pd->entry[idx] = NULL;
+ atomic_dec(px_used(pd));
}
-static void init_pd(struct i915_address_space *vm,
- struct i915_page_directory * const pd,
- struct i915_page_directory * const to)
+static bool
+release_pd_entry(struct i915_page_directory * const pd,
+ const unsigned short idx,
+ struct i915_page_table * const pt,
+ const struct i915_page_scratch * const scratch)
{
- GEM_DEBUG_BUG_ON(!pd_has_phys_page(pd));
+ bool free = false;
+
+ if (atomic_add_unless(&pt->used, -1, 1))
+ return false;
+
+ spin_lock(&pd->lock);
+ if (atomic_dec_and_test(&pt->used)) {
+ clear_pd_entry(pd, idx, scratch);
+ free = true;
+ }
+ spin_unlock(&pd->lock);
- fill_px(vm, pd, gen8_pdpe_encode(px_dma(to), I915_CACHE_LLC));
- memset_p(pd->entry, to, 512);
+ return free;
}
/*
@@ -763,165 +826,305 @@ static void mark_tlbs_dirty(struct i915_ppgtt *ppgtt)
ppgtt->pd_dirty_engines = ALL_ENGINES;
}
-/* Removes entries from a single page table, releasing it if it's empty.
- * Caller can use the return value to update higher-level entries.
- */
-static bool gen8_ppgtt_clear_pt(const struct i915_address_space *vm,
- struct i915_page_table *pt,
- u64 start, u64 length)
+static int gen8_ppgtt_notify_vgt(struct i915_ppgtt *ppgtt, bool create)
{
- unsigned int num_entries = gen8_pte_count(start, length);
- gen8_pte_t *vaddr;
+ struct i915_address_space *vm = &ppgtt->vm;
+ struct drm_i915_private *dev_priv = vm->i915;
+ enum vgt_g2v_type msg;
+ int i;
- vaddr = kmap_atomic_px(pt);
- memset64(vaddr + gen8_pte_index(start), vm->scratch_pte, num_entries);
- kunmap_atomic(vaddr);
+ if (create)
+ atomic_inc(px_used(ppgtt->pd)); /* never remove */
+ else
+ atomic_dec(px_used(ppgtt->pd));
+
+ if (i915_vm_is_4lvl(vm)) {
+ const u64 daddr = px_dma(ppgtt->pd);
- GEM_BUG_ON(num_entries > atomic_read(&pt->used));
- return !atomic_sub_return(num_entries, &pt->used);
+ I915_WRITE(vgtif_reg(pdp[0].lo), lower_32_bits(daddr));
+ I915_WRITE(vgtif_reg(pdp[0].hi), upper_32_bits(daddr));
+
+ msg = (create ? VGT_G2V_PPGTT_L4_PAGE_TABLE_CREATE :
+ VGT_G2V_PPGTT_L4_PAGE_TABLE_DESTROY);
+ } else {
+ for (i = 0; i < GEN8_3LVL_PDPES; i++) {
+ const u64 daddr = i915_page_dir_dma_addr(ppgtt, i);
+
+ I915_WRITE(vgtif_reg(pdp[i].lo), lower_32_bits(daddr));
+ I915_WRITE(vgtif_reg(pdp[i].hi), upper_32_bits(daddr));
+ }
+
+ msg = (create ? VGT_G2V_PPGTT_L3_PAGE_TABLE_CREATE :
+ VGT_G2V_PPGTT_L3_PAGE_TABLE_DESTROY);
+ }
+
+ I915_WRITE(vgtif_reg(g2v_notify), msg);
+
+ return 0;
}
-static void gen8_ppgtt_set_pde(struct i915_address_space *vm,
- struct i915_page_directory *pd,
- struct i915_page_table *pt,
- unsigned int pde)
+/* Index shifts into the pagetable are offset by GEN8_PTE_SHIFT [12] */
+#define GEN8_PAGE_SIZE (SZ_4K) /* page and page-directory sizes are the same */
+#define GEN8_PTE_SHIFT (ilog2(GEN8_PAGE_SIZE))
+#define GEN8_PDES (GEN8_PAGE_SIZE / sizeof(u64))
+#define gen8_pd_shift(lvl) ((lvl) * ilog2(GEN8_PDES))
+#define gen8_pd_index(i, lvl) i915_pde_index((i), gen8_pd_shift(lvl))
+#define __gen8_pte_shift(lvl) (GEN8_PTE_SHIFT + gen8_pd_shift(lvl))
+#define __gen8_pte_index(a, lvl) i915_pde_index((a), __gen8_pte_shift(lvl))
+
+static inline unsigned int
+gen8_pd_range(u64 start, u64 end, int lvl, unsigned int *idx)
{
- gen8_pde_t *vaddr;
+ const int shift = gen8_pd_shift(lvl);
+ const u64 mask = ~0ull << gen8_pd_shift(lvl + 1);
- vaddr = kmap_atomic_px(pd);
- vaddr[pde] = gen8_pde_encode(px_dma(pt), I915_CACHE_LLC);
- kunmap_atomic(vaddr);
+ GEM_BUG_ON(start >= end);
+ end += ~mask >> gen8_pd_shift(1);
+
+ *idx = i915_pde_index(start, shift);
+ if ((start ^ end) & mask)
+ return GEN8_PDES - *idx;
+ else
+ return i915_pde_index(end, shift) - *idx;
}
-static bool gen8_ppgtt_clear_pd(struct i915_address_space *vm,
- struct i915_page_directory *pd,
- u64 start, u64 length)
+static inline bool gen8_pd_contains(u64 start, u64 end, int lvl)
{
- struct i915_page_table *pt;
- u32 pde;
+ const u64 mask = ~0ull << gen8_pd_shift(lvl + 1);
- gen8_for_each_pde(pt, pd, start, length, pde) {
- bool free = false;
+ GEM_BUG_ON(start >= end);
+ return (start ^ end) & mask && (start & ~mask) == 0;
+}
- GEM_BUG_ON(pt == vm->scratch_pt);
+static inline unsigned int gen8_pt_count(u64 start, u64 end)
+{
+ GEM_BUG_ON(start >= end);
+ if ((start ^ end) >> gen8_pd_shift(1))
+ return GEN8_PDES - (start & (GEN8_PDES - 1));
+ else
+ return end - start;
+}
- if (!gen8_ppgtt_clear_pt(vm, pt, start, length))
- continue;
+static inline unsigned int gen8_pd_top_count(const struct i915_address_space *vm)
+{
+ unsigned int shift = __gen8_pte_shift(vm->top);
+ return (vm->total + (1ull << shift) - 1) >> shift;
+}
- spin_lock(&pd->lock);
- if (!atomic_read(&pt->used)) {
- gen8_ppgtt_set_pde(vm, pd, vm->scratch_pt, pde);
- pd->entry[pde] = vm->scratch_pt;
+static void __gen8_ppgtt_cleanup(struct i915_address_space *vm,
+ struct i915_page_directory *pd,
+ int count, int lvl)
+{
+ if (lvl) {
+ void **pde = pd->entry;
- GEM_BUG_ON(!atomic_read(&pd->used));
- atomic_dec(&pd->used);
- free = true;
- }
- spin_unlock(&pd->lock);
- if (free)
- free_pt(vm, pt);
+ do {
+ if (!*pde)
+ continue;
+
+ __gen8_ppgtt_cleanup(vm, *pde, GEN8_PDES, lvl - 1);
+ } while (pde++, --count);
}
- return !atomic_read(&pd->used);
+ free_px(vm, pd);
}
-static void gen8_ppgtt_set_pdpe(struct i915_page_directory *pdp,
- struct i915_page_directory *pd,
- unsigned int pdpe)
+static void gen8_ppgtt_cleanup(struct i915_address_space *vm)
{
- gen8_ppgtt_pdpe_t *vaddr;
+ struct i915_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
- if (!pd_has_phys_page(pdp))
- return;
+ if (intel_vgpu_active(vm->i915))
+ gen8_ppgtt_notify_vgt(ppgtt, false);
- vaddr = kmap_atomic_px(pdp);
- vaddr[pdpe] = gen8_pdpe_encode(px_dma(pd), I915_CACHE_LLC);
- kunmap_atomic(vaddr);
+ __gen8_ppgtt_cleanup(vm, ppgtt->pd, gen8_pd_top_count(vm), vm->top);
+ free_scratch(vm);
}
-/* Removes entries from a single page dir pointer, releasing it if it's empty.
- * Caller can use the return value to update higher-level entries
- */
-static bool gen8_ppgtt_clear_pdp(struct i915_address_space *vm,
- struct i915_page_directory * const pdp,
- u64 start, u64 length)
+static u64 __gen8_ppgtt_clear(struct i915_address_space * const vm,
+ struct i915_page_directory * const pd,
+ u64 start, const u64 end, int lvl)
{
- struct i915_page_directory *pd;
- unsigned int pdpe;
+ const struct i915_page_scratch * const scratch = &vm->scratch[lvl];
+ unsigned int idx, len;
- gen8_for_each_pdpe(pd, pdp, start, length, pdpe) {
- bool free = false;
+ len = gen8_pd_range(start, end, lvl--, &idx);
+ DBG("%s(%p):{ lvl:%d, start:%llx, end:%llx, idx:%d, len:%d, used:%d}\n",
+ __func__, vm, lvl + 1, start, end,
+ idx, len, atomic_read(px_used(pd)));
+ GEM_BUG_ON(!len || len >= atomic_read(px_used(pd)));
- GEM_BUG_ON(pd == vm->scratch_pd);
-
- if (!gen8_ppgtt_clear_pd(vm, pd, start, length))
+ do {
+ struct i915_page_table *pt = pd->entry[idx];
+
+ if (atomic_fetch_inc(&pt->used) >> gen8_pd_shift(1) &&
+ gen8_pd_contains(start, end, lvl)) {
+ DBG("%s(%p):{ lvl:%d, idx:%d, start:%llx, end:%llx } removing pd\n",
+ __func__, vm, lvl + 1, idx, start, end);
+ clear_pd_entry(pd, idx, scratch);
+ __gen8_ppgtt_cleanup(vm, as_pd(pt), I915_PDES, lvl);
+ start += (u64)I915_PDES << gen8_pd_shift(lvl);
continue;
+ }
+
+ if (lvl) {
+ start = __gen8_ppgtt_clear(vm, as_pd(pt),
+ start, end, lvl);
+ } else {
+ unsigned int count;
+ u64 *vaddr;
- spin_lock(&pdp->lock);
- if (!atomic_read(&pd->used)) {
- gen8_ppgtt_set_pdpe(pdp, vm->scratch_pd, pdpe);
- pdp->entry[pdpe] = vm->scratch_pd;
+ count = gen8_pt_count(start, end);
+ DBG("%s(%p):{ lvl:%d, start:%llx, end:%llx, idx:%d, len:%d, used:%d} removing pte\n",
+ __func__, vm, lvl, start, end,
+ gen8_pd_index(start, 0), count,
+ atomic_read(&pt->used));
+ GEM_BUG_ON(!count || count >= atomic_read(&pt->used));
+
+ vaddr = kmap_atomic_px(pt);
+ memset64(vaddr + gen8_pd_index(start, 0),
+ vm->scratch[0].encode,
+ count);
+ kunmap_atomic(vaddr);
- GEM_BUG_ON(!atomic_read(&pdp->used));
- atomic_dec(&pdp->used);
- free = true;
+ atomic_sub(count, &pt->used);
+ start += count;
}
- spin_unlock(&pdp->lock);
- if (free)
- free_pd(vm, pd);
- }
- return !atomic_read(&pdp->used);
+ if (release_pd_entry(pd, idx, pt, scratch))
+ free_px(vm, pt);
+ } while (idx++, --len);
+
+ return start;
}
-static void gen8_ppgtt_clear_3lvl(struct i915_address_space *vm,
- u64 start, u64 length)
+static void gen8_ppgtt_clear(struct i915_address_space *vm,
+ u64 start, u64 length)
{
- gen8_ppgtt_clear_pdp(vm, i915_vm_to_ppgtt(vm)->pd, start, length);
+ GEM_BUG_ON(!IS_ALIGNED(start, BIT_ULL(GEN8_PTE_SHIFT)));
+ GEM_BUG_ON(!IS_ALIGNED(length, BIT_ULL(GEN8_PTE_SHIFT)));
+
+ start >>= GEN8_PTE_SHIFT;
+ length >>= GEN8_PTE_SHIFT;
+ GEM_BUG_ON(length == 0);
+
+ __gen8_ppgtt_clear(vm, i915_vm_to_ppgtt(vm)->pd,
+ start, start + length, vm->top);
}
-static void gen8_ppgtt_set_pml4e(struct i915_page_directory *pml4,
- struct i915_page_directory *pdp,
- unsigned int pml4e)
+static int __gen8_ppgtt_alloc(struct i915_address_space * const vm,
+ struct i915_page_directory * const pd,
+ u64 * const start, u64 end, int lvl)
{
- gen8_ppgtt_pml4e_t *vaddr;
+ const struct i915_page_scratch * const scratch = &vm->scratch[lvl];
+ struct i915_page_table *alloc = NULL;
+ unsigned int idx, len;
+ int ret = 0;
- vaddr = kmap_atomic_px(pml4);
- vaddr[pml4e] = gen8_pml4e_encode(px_dma(pdp), I915_CACHE_LLC);
- kunmap_atomic(vaddr);
+ len = gen8_pd_range(*start, end, lvl--, &idx);
+ DBG("%s(%p):{lvl:%d, start:%llx, end:%llx, idx:%d, len:%d, used:%d}\n",
+ __func__, vm, lvl + 1, *start, end,
+ idx, len, atomic_read(px_used(pd)));
+ GEM_BUG_ON(!len || (idx + len - 1) >> gen8_pd_shift(1));
+
+ spin_lock(&pd->lock);
+ GEM_BUG_ON(!atomic_read(px_used(pd))); /* Must be pinned! */
+ do {
+ struct i915_page_table *pt = pd->entry[idx];
+
+ if (!pt) {
+ spin_unlock(&pd->lock);
+
+ DBG("%s(%p):{ lvl:%d, idx:%d } allocating new tree\n",
+ __func__, vm, lvl + 1, idx);
+
+ pt = fetch_and_zero(&alloc);
+ if (lvl) {
+ if (!pt) {
+ pt = &alloc_pd(vm)->pt;
+ if (IS_ERR(pt)) {
+ ret = PTR_ERR(pt);
+ goto out;
+ }
+ }
+
+ fill_px(pt, vm->scratch[lvl].encode);
+ } else {
+ if (!pt) {
+ pt = alloc_pt(vm);
+ if (IS_ERR(pt)) {
+ ret = PTR_ERR(pt);
+ goto out;
+ }
+ }
+
+ if (intel_vgpu_active(vm->i915) ||
+ gen8_pt_count(*start, end) < I915_PDES)
+ fill_px(pt, vm->scratch[lvl].encode);
+ }
+
+ spin_lock(&pd->lock);
+ if (likely(!pd->entry[idx]))
+ set_pd_entry(pd, idx, pt);
+ else
+ alloc = pt, pt = pd->entry[idx];
+ }
+
+ if (lvl) {
+ atomic_inc(&pt->used);
+ spin_unlock(&pd->lock);
+
+ ret = __gen8_ppgtt_alloc(vm, as_pd(pt),
+ start, end, lvl);
+ if (unlikely(ret)) {
+ if (release_pd_entry(pd, idx, pt, scratch))
+ free_px(vm, pt);
+ goto out;
+ }
+
+ spin_lock(&pd->lock);
+ atomic_dec(&pt->used);
+ GEM_BUG_ON(!atomic_read(&pt->used));
+ } else {
+ unsigned int count = gen8_pt_count(*start, end);
+
+ DBG("%s(%p):{lvl:%d, start:%llx, end:%llx, idx:%d, len:%d, used:%d} inserting pte\n",
+ __func__, vm, lvl, *start, end,
+ gen8_pd_index(*start, 0), count,
+ atomic_read(&pt->used));
+
+ atomic_add(count, &pt->used);
+ GEM_BUG_ON(atomic_read(&pt->used) > I915_PDES);
+ *start += count;
+ }
+ } while (idx++, --len);
+ spin_unlock(&pd->lock);
+out:
+ if (alloc)
+ free_px(vm, alloc);
+ return ret;
}
-/* Removes entries from a single pml4.
- * This is the top-level structure in 4-level page tables used on gen8+.
- * Empty entries are always scratch pml4e.
- */
-static void gen8_ppgtt_clear_4lvl(struct i915_address_space *vm,
- u64 start, u64 length)
+static int gen8_ppgtt_alloc(struct i915_address_space *vm,
+ u64 start, u64 length)
{
- struct i915_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
- struct i915_page_directory * const pml4 = ppgtt->pd;
- struct i915_page_directory *pdp;
- unsigned int pml4e;
+ u64 from;
+ int err;
- GEM_BUG_ON(!i915_vm_is_4lvl(vm));
+ GEM_BUG_ON(!IS_ALIGNED(start, BIT_ULL(GEN8_PTE_SHIFT)));
+ GEM_BUG_ON(!IS_ALIGNED(length, BIT_ULL(GEN8_PTE_SHIFT)));
- gen8_for_each_pml4e(pdp, pml4, start, length, pml4e) {
- bool free = false;
- GEM_BUG_ON(pdp == vm->scratch_pdp);
+ start >>= GEN8_PTE_SHIFT;
+ length >>= GEN8_PTE_SHIFT;
+ GEM_BUG_ON(length == 0);
+ from = start;
- if (!gen8_ppgtt_clear_pdp(vm, pdp, start, length))
- continue;
+ err = __gen8_ppgtt_alloc(vm, i915_vm_to_ppgtt(vm)->pd,
+ &start, start + length, vm->top);
+ if (unlikely(err && from != start))
+ __gen8_ppgtt_clear(vm, i915_vm_to_ppgtt(vm)->pd,
+ from, start, vm->top);
- spin_lock(&pml4->lock);
- if (!atomic_read(&pdp->used)) {
- gen8_ppgtt_set_pml4e(pml4, vm->scratch_pdp, pml4e);
- pml4->entry[pml4e] = vm->scratch_pdp;
- free = true;
- }
- spin_unlock(&pml4->lock);
- if (free)
- free_pd(vm, pdp);
- }
+ return err;
}
static inline struct sgt_dma {
@@ -933,47 +1136,28 @@ static inline struct sgt_dma {
return (struct sgt_dma) { sg, addr, addr + sg->length };
}
-struct gen8_insert_pte {
- u16 pml4e;
- u16 pdpe;
- u16 pde;
- u16 pte;
-};
-
-static __always_inline struct gen8_insert_pte gen8_insert_pte(u64 start)
-{
- return (struct gen8_insert_pte) {
- gen8_pml4e_index(start),
- gen8_pdpe_index(start),
- gen8_pde_index(start),
- gen8_pte_index(start),
- };
-}
-
-static __always_inline bool
+static __always_inline u64
gen8_ppgtt_insert_pte_entries(struct i915_ppgtt *ppgtt,
struct i915_page_directory *pdp,
struct sgt_dma *iter,
- struct gen8_insert_pte *idx,
+ u64 idx,
enum i915_cache_level cache_level,
u32 flags)
{
struct i915_page_directory *pd;
const gen8_pte_t pte_encode = gen8_pte_encode(0, cache_level, flags);
gen8_pte_t *vaddr;
- bool ret;
- GEM_BUG_ON(idx->pdpe >= i915_pdpes_per_pdp(&ppgtt->vm));
- pd = i915_pd_entry(pdp, idx->pdpe);
- vaddr = kmap_atomic_px(i915_pt_entry(pd, idx->pde));
+ pd = i915_pd_entry(pdp, gen8_pd_index(idx, 2));
+ vaddr = kmap_atomic_px(i915_pt_entry(pd, gen8_pd_index(idx, 1)));
do {
- vaddr[idx->pte] = pte_encode | iter->dma;
+ vaddr[gen8_pd_index(idx, 0)] = pte_encode | iter->dma;
iter->dma += I915_GTT_PAGE_SIZE;
if (iter->dma >= iter->max) {
iter->sg = __sg_next(iter->sg);
if (!iter->sg) {
- ret = false;
+ idx = 0;
break;
}
@@ -981,30 +1165,22 @@ gen8_ppgtt_insert_pte_entries(struct i915_ppgtt *ppgtt,
iter->max = iter->dma + iter->sg->length;
}
- if (++idx->pte == GEN8_PTES) {
- idx->pte = 0;
-
- if (++idx->pde == I915_PDES) {
- idx->pde = 0;
-
+ if (gen8_pd_index(++idx, 0) == 0) {
+ if (gen8_pd_index(idx, 1) == 0) {
/* Limited by sg length for 3lvl */
- if (++idx->pdpe == GEN8_PML4ES_PER_PML4) {
- idx->pdpe = 0;
- ret = true;
+ if (gen8_pd_index(idx, 2) == 0)
break;
- }
- GEM_BUG_ON(idx->pdpe >= i915_pdpes_per_pdp(&ppgtt->vm));
- pd = pdp->entry[idx->pdpe];
+ pd = pdp->entry[gen8_pd_index(idx, 2)];
}
kunmap_atomic(vaddr);
- vaddr = kmap_atomic_px(i915_pt_entry(pd, idx->pde));
+ vaddr = kmap_atomic_px(i915_pt_entry(pd, gen8_pd_index(idx, 1)));
}
} while (1);
kunmap_atomic(vaddr);
- return ret;
+ return idx;
}
static void gen8_ppgtt_insert_3lvl(struct i915_address_space *vm,
@@ -1014,9 +1190,9 @@ static void gen8_ppgtt_insert_3lvl(struct i915_address_space *vm,
{
struct i915_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
struct sgt_dma iter = sgt_dma(vma);
- struct gen8_insert_pte idx = gen8_insert_pte(vma->node.start);
- gen8_ppgtt_insert_pte_entries(ppgtt, ppgtt->pd, &iter, &idx,
+ gen8_ppgtt_insert_pte_entries(ppgtt, ppgtt->pd, &iter,
+ vma->node.start >> GEN8_PTE_SHIFT,
cache_level, flags);
vma->page_sizes.gtt = I915_GTT_PAGE_SIZE;
@@ -1033,39 +1209,38 @@ static void gen8_ppgtt_insert_huge_entries(struct i915_vma *vma,
dma_addr_t rem = iter->sg->length;
do {
- struct gen8_insert_pte idx = gen8_insert_pte(start);
struct i915_page_directory *pdp =
- i915_pdp_entry(pml4, idx.pml4e);
- struct i915_page_directory *pd = i915_pd_entry(pdp, idx.pdpe);
- unsigned int page_size;
- bool maybe_64K = false;
+ i915_pd_entry(pml4, __gen8_pte_index(start, 3));
+ struct i915_page_directory *pd =
+ i915_pd_entry(pdp, __gen8_pte_index(start, 2));
gen8_pte_t encode = pte_encode;
+ unsigned int maybe_64K = -1;
+ unsigned int page_size;
gen8_pte_t *vaddr;
- u16 index, max;
+ u16 index;
if (vma->page_sizes.sg & I915_GTT_PAGE_SIZE_2M &&
IS_ALIGNED(iter->dma, I915_GTT_PAGE_SIZE_2M) &&
- rem >= I915_GTT_PAGE_SIZE_2M && !idx.pte) {
- index = idx.pde;
- max = I915_PDES;
- page_size = I915_GTT_PAGE_SIZE_2M;
-
+ rem >= I915_GTT_PAGE_SIZE_2M &&
+ !__gen8_pte_index(start, 0)) {
+ index = __gen8_pte_index(start, 1);
encode |= GEN8_PDE_PS_2M;
+ page_size = I915_GTT_PAGE_SIZE_2M;
vaddr = kmap_atomic_px(pd);
} else {
- struct i915_page_table *pt = i915_pt_entry(pd, idx.pde);
+ struct i915_page_table *pt =
+ i915_pt_entry(pd, __gen8_pte_index(start, 1));
- index = idx.pte;
- max = GEN8_PTES;
+ index = __gen8_pte_index(start, 0);
page_size = I915_GTT_PAGE_SIZE;
if (!index &&
vma->page_sizes.sg & I915_GTT_PAGE_SIZE_64K &&
IS_ALIGNED(iter->dma, I915_GTT_PAGE_SIZE_64K) &&
(IS_ALIGNED(rem, I915_GTT_PAGE_SIZE_64K) ||
- rem >= (max - index) * I915_GTT_PAGE_SIZE))
- maybe_64K = true;
+ rem >= (I915_PDES - index) * I915_GTT_PAGE_SIZE))
+ maybe_64K = __gen8_pte_index(start, 1);
vaddr = kmap_atomic_px(pt);
}
@@ -1086,16 +1261,16 @@ static void gen8_ppgtt_insert_huge_entries(struct i915_vma *vma,
iter->dma = sg_dma_address(iter->sg);
iter->max = iter->dma + rem;
- if (maybe_64K && index < max &&
+ if (maybe_64K != -1 && index < I915_PDES &&
!(IS_ALIGNED(iter->dma, I915_GTT_PAGE_SIZE_64K) &&
(IS_ALIGNED(rem, I915_GTT_PAGE_SIZE_64K) ||
- rem >= (max - index) * I915_GTT_PAGE_SIZE)))
- maybe_64K = false;
+ rem >= (I915_PDES - index) * I915_GTT_PAGE_SIZE)))
+ maybe_64K = -1;
if (unlikely(!IS_ALIGNED(iter->dma, page_size)))
break;
}
- } while (rem >= page_size && index < max);
+ } while (rem >= page_size && index < I915_PDES);
kunmap_atomic(vaddr);
@@ -1105,14 +1280,14 @@ static void gen8_ppgtt_insert_huge_entries(struct i915_vma *vma,
* it and have reached the end of the sg table and we have
* enough padding.
*/
- if (maybe_64K &&
- (index == max ||
+ if (maybe_64K != -1 &&
+ (index == I915_PDES ||
(i915_vm_has_scratch_64K(vma->vm) &&
!iter->sg && IS_ALIGNED(vma->node.start +
vma->node.size,
I915_GTT_PAGE_SIZE_2M)))) {
vaddr = kmap_atomic_px(pd);
- vaddr[idx.pde] |= GEN8_PDE_IPS_64K;
+ vaddr[maybe_64K] |= GEN8_PDE_IPS_64K;
kunmap_atomic(vaddr);
page_size = I915_GTT_PAGE_SIZE_64K;
@@ -1128,9 +1303,8 @@ static void gen8_ppgtt_insert_huge_entries(struct i915_vma *vma,
if (I915_SELFTEST_ONLY(vma->vm->scrub_64K)) {
u16 i;
- encode = vma->vm->scratch_pte;
- vaddr = kmap_atomic_px(i915_pt_entry(pd,
- idx.pde));
+ encode = vma->vm->scratch[0].encode;
+ vaddr = kmap_atomic_px(i915_pt_entry(pd, maybe_64K));
for (i = 1; i < index; i += 16)
memset64(vaddr + i, encode, 15);
@@ -1156,32 +1330,22 @@ static void gen8_ppgtt_insert_4lvl(struct i915_address_space *vm,
gen8_ppgtt_insert_huge_entries(vma, pml4, &iter, cache_level,
flags);
} else {
- struct gen8_insert_pte idx = gen8_insert_pte(vma->node.start);
+ u64 idx = vma->node.start >> GEN8_PTE_SHIFT;
- while (gen8_ppgtt_insert_pte_entries(ppgtt,
- i915_pdp_entry(pml4, idx.pml4e++),
- &iter, &idx, cache_level,
- flags))
- GEM_BUG_ON(idx.pml4e >= GEN8_PML4ES_PER_PML4);
+ while ((idx = gen8_ppgtt_insert_pte_entries(ppgtt,
+ i915_pd_entry(pml4, gen8_pd_index(idx, 3)),
+ &iter, idx, cache_level,
+ flags)))
+ ;
vma->page_sizes.gtt = I915_GTT_PAGE_SIZE;
}
}
-static void gen8_free_page_tables(struct i915_address_space *vm,
- struct i915_page_directory *pd)
-{
- int i;
-
- for (i = 0; i < I915_PDES; i++) {
- if (pd->entry[i] != vm->scratch_pt)
- free_pt(vm, pd->entry[i]);
- }
-}
-
static int gen8_init_scratch(struct i915_address_space *vm)
{
int ret;
+ int i;
/*
* If everybody agrees to not to write into the scratch page,
@@ -1195,10 +1359,8 @@ static int gen8_init_scratch(struct i915_address_space *vm)
GEM_BUG_ON(!clone->has_read_only);
vm->scratch_order = clone->scratch_order;
- vm->scratch_pte = clone->scratch_pte;
- vm->scratch_pt = clone->scratch_pt;
- vm->scratch_pd = clone->scratch_pd;
- vm->scratch_pdp = clone->scratch_pdp;
+ memcpy(vm->scratch, clone->scratch, sizeof(vm->scratch));
+ px_dma(&vm->scratch[0]) = 0; /* no xfer of ownership */
return 0;
}
@@ -1206,377 +1368,88 @@ static int gen8_init_scratch(struct i915_address_space *vm)
if (ret)
return ret;
- vm->scratch_pte =
- gen8_pte_encode(vm->scratch_page.daddr,
- I915_CACHE_LLC,
- vm->has_read_only);
+ vm->scratch[0].encode =
+ gen8_pte_encode(px_dma(&vm->scratch[0]),
+ I915_CACHE_LLC, vm->has_read_only);
- vm->scratch_pt = alloc_pt(vm);
- if (IS_ERR(vm->scratch_pt)) {
- ret = PTR_ERR(vm->scratch_pt);
- goto free_scratch_page;
- }
+ for (i = 1; i <= vm->top; i++) {
+ if (unlikely(setup_page_dma(vm, px_base(&vm->scratch[i]))))
+ goto free_scratch;
- vm->scratch_pd = alloc_pd(vm);
- if (IS_ERR(vm->scratch_pd)) {
- ret = PTR_ERR(vm->scratch_pd);
- goto free_pt;
- }
-
- if (i915_vm_is_4lvl(vm)) {
- vm->scratch_pdp = alloc_pd(vm);
- if (IS_ERR(vm->scratch_pdp)) {
- ret = PTR_ERR(vm->scratch_pdp);
- goto free_pd;
- }
+ fill_px(&vm->scratch[i], vm->scratch[i - 1].encode);
+ vm->scratch[i].encode =
+ gen8_pde_encode(px_dma(&vm->scratch[i]),
+ I915_CACHE_LLC);
}
- gen8_initialize_pt(vm, vm->scratch_pt);
- init_pd_with_page(vm, vm->scratch_pd, vm->scratch_pt);
- if (i915_vm_is_4lvl(vm))
- init_pd(vm, vm->scratch_pdp, vm->scratch_pd);
-
return 0;
-free_pd:
- free_pd(vm, vm->scratch_pd);
-free_pt:
- free_pt(vm, vm->scratch_pt);
-free_scratch_page:
- cleanup_scratch_page(vm);
-
- return ret;
+free_scratch:
+ free_scratch(vm);
+ return -ENOMEM;
}
-static int gen8_ppgtt_notify_vgt(struct i915_ppgtt *ppgtt, bool create)
+static int gen8_preallocate_top_level_pdp(struct i915_ppgtt *ppgtt)
{
struct i915_address_space *vm = &ppgtt->vm;
- struct drm_i915_private *dev_priv = vm->i915;
- enum vgt_g2v_type msg;
- int i;
+ struct i915_page_directory *pd = ppgtt->pd;
+ unsigned int idx;
- if (i915_vm_is_4lvl(vm)) {
- const u64 daddr = px_dma(ppgtt->pd);
+ GEM_BUG_ON(vm->top != 2);
+ GEM_BUG_ON(gen8_pd_top_count(vm) != GEN8_3LVL_PDPES);
- I915_WRITE(vgtif_reg(pdp[0].lo), lower_32_bits(daddr));
- I915_WRITE(vgtif_reg(pdp[0].hi), upper_32_bits(daddr));
+ for (idx = 0; idx < GEN8_3LVL_PDPES; idx++) {
+ struct i915_page_directory *pde;
- msg = (create ? VGT_G2V_PPGTT_L4_PAGE_TABLE_CREATE :
- VGT_G2V_PPGTT_L4_PAGE_TABLE_DESTROY);
- } else {
- for (i = 0; i < GEN8_3LVL_PDPES; i++) {
- const u64 daddr = i915_page_dir_dma_addr(ppgtt, i);
+ pde = alloc_pd(vm);
+ if (IS_ERR(pde))
+ return PTR_ERR(pde);
- I915_WRITE(vgtif_reg(pdp[i].lo), lower_32_bits(daddr));
- I915_WRITE(vgtif_reg(pdp[i].hi), upper_32_bits(daddr));
- }
-
- msg = (create ? VGT_G2V_PPGTT_L3_PAGE_TABLE_CREATE :
- VGT_G2V_PPGTT_L3_PAGE_TABLE_DESTROY);
+ fill_px(pde, vm->scratch[1].encode);
+ set_pd_entry(pd, idx, pde);
+ atomic_inc(px_used(pde)); /* keep pinned */
}
- I915_WRITE(vgtif_reg(g2v_notify), msg);
-
return 0;
}
-static void gen8_free_scratch(struct i915_address_space *vm)
-{
- if (!vm->scratch_page.daddr)
- return;
-
- if (i915_vm_is_4lvl(vm))
- free_pd(vm, vm->scratch_pdp);
- free_pd(vm, vm->scratch_pd);
- free_pt(vm, vm->scratch_pt);
- cleanup_scratch_page(vm);
-}
-
-static void gen8_ppgtt_cleanup_3lvl(struct i915_address_space *vm,
- struct i915_page_directory *pdp)
+static void ppgtt_init(struct i915_ppgtt *ppgtt, struct intel_gt *gt)
{
- const unsigned int pdpes = i915_pdpes_per_pdp(vm);
- int i;
-
- for (i = 0; i < pdpes; i++) {
- if (pdp->entry[i] == vm->scratch_pd)
- continue;
-
- gen8_free_page_tables(vm, pdp->entry[i]);
- free_pd(vm, pdp->entry[i]);
- }
-
- free_pd(vm, pdp);
-}
-
-static void gen8_ppgtt_cleanup_4lvl(struct i915_ppgtt *ppgtt)
-{
- struct i915_page_directory * const pml4 = ppgtt->pd;
- int i;
-
- for (i = 0; i < GEN8_PML4ES_PER_PML4; i++) {
- struct i915_page_directory *pdp = i915_pdp_entry(pml4, i);
-
- if (pdp == ppgtt->vm.scratch_pdp)
- continue;
-
- gen8_ppgtt_cleanup_3lvl(&ppgtt->vm, pdp);
- }
-
- free_pd(&ppgtt->vm, pml4);
-}
-
-static void gen8_ppgtt_cleanup(struct i915_address_space *vm)
-{
- struct drm_i915_private *i915 = vm->i915;
- struct i915_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
+ struct drm_i915_private *i915 = gt->i915;
- if (intel_vgpu_active(i915))
- gen8_ppgtt_notify_vgt(ppgtt, false);
-
- if (i915_vm_is_4lvl(vm))
- gen8_ppgtt_cleanup_4lvl(ppgtt);
- else
- gen8_ppgtt_cleanup_3lvl(&ppgtt->vm, ppgtt->pd);
-
- gen8_free_scratch(vm);
-}
-
-static int gen8_ppgtt_alloc_pd(struct i915_address_space *vm,
- struct i915_page_directory *pd,
- u64 start, u64 length)
-{
- struct i915_page_table *pt, *alloc = NULL;
- u64 from = start;
- unsigned int pde;
- int ret = 0;
-
- spin_lock(&pd->lock);
- gen8_for_each_pde(pt, pd, start, length, pde) {
- const int count = gen8_pte_count(start, length);
-
- if (pt == vm->scratch_pt) {
- spin_unlock(&pd->lock);
-
- pt = fetch_and_zero(&alloc);
- if (!pt)
- pt = alloc_pt(vm);
- if (IS_ERR(pt)) {
- ret = PTR_ERR(pt);
- goto unwind;
- }
-
- if (count < GEN8_PTES || intel_vgpu_active(vm->i915))
- gen8_initialize_pt(vm, pt);
-
- spin_lock(&pd->lock);
- if (pd->entry[pde] == vm->scratch_pt) {
- gen8_ppgtt_set_pde(vm, pd, pt, pde);
- pd->entry[pde] = pt;
- atomic_inc(&pd->used);
- } else {
- alloc = pt;
- pt = pd->entry[pde];
- }
- }
-
- atomic_add(count, &pt->used);
- }
- spin_unlock(&pd->lock);
- goto out;
-
-unwind:
- gen8_ppgtt_clear_pd(vm, pd, from, start - from);
-out:
- if (alloc)
- free_pt(vm, alloc);
- return ret;
-}
-
-static int gen8_ppgtt_alloc_pdp(struct i915_address_space *vm,
- struct i915_page_directory *pdp,
- u64 start, u64 length)
-{
- struct i915_page_directory *pd, *alloc = NULL;
- u64 from = start;
- unsigned int pdpe;
- int ret = 0;
-
- spin_lock(&pdp->lock);
- gen8_for_each_pdpe(pd, pdp, start, length, pdpe) {
- if (pd == vm->scratch_pd) {
- spin_unlock(&pdp->lock);
-
- pd = fetch_and_zero(&alloc);
- if (!pd)
- pd = alloc_pd(vm);
- if (IS_ERR(pd)) {
- ret = PTR_ERR(pd);
- goto unwind;
- }
-
- init_pd_with_page(vm, pd, vm->scratch_pt);
-
- spin_lock(&pdp->lock);
- if (pdp->entry[pdpe] == vm->scratch_pd) {
- gen8_ppgtt_set_pdpe(pdp, pd, pdpe);
- pdp->entry[pdpe] = pd;
- atomic_inc(&pdp->used);
- } else {
- alloc = pd;
- pd = pdp->entry[pdpe];
- }
- }
- atomic_inc(&pd->used);
- spin_unlock(&pdp->lock);
-
- ret = gen8_ppgtt_alloc_pd(vm, pd, start, length);
- if (unlikely(ret))
- goto unwind_pd;
-
- spin_lock(&pdp->lock);
- atomic_dec(&pd->used);
- }
- spin_unlock(&pdp->lock);
- goto out;
-
-unwind_pd:
- spin_lock(&pdp->lock);
- if (atomic_dec_and_test(&pd->used)) {
- gen8_ppgtt_set_pdpe(pdp, vm->scratch_pd, pdpe);
- pdp->entry[pdpe] = vm->scratch_pd;
- GEM_BUG_ON(!atomic_read(&pdp->used));
- atomic_dec(&pdp->used);
- GEM_BUG_ON(alloc);
- alloc = pd; /* defer the free to after the lock */
- }
- spin_unlock(&pdp->lock);
-unwind:
- gen8_ppgtt_clear_pdp(vm, pdp, from, start - from);
-out:
- if (alloc)
- free_pd(vm, alloc);
- return ret;
-}
-
-static int gen8_ppgtt_alloc_3lvl(struct i915_address_space *vm,
- u64 start, u64 length)
-{
- return gen8_ppgtt_alloc_pdp(vm,
- i915_vm_to_ppgtt(vm)->pd, start, length);
-}
-
-static int gen8_ppgtt_alloc_4lvl(struct i915_address_space *vm,
- u64 start, u64 length)
-{
- struct i915_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
- struct i915_page_directory * const pml4 = ppgtt->pd;
- struct i915_page_directory *pdp, *alloc = NULL;
- u64 from = start;
- int ret = 0;
- u32 pml4e;
-
- spin_lock(&pml4->lock);
- gen8_for_each_pml4e(pdp, pml4, start, length, pml4e) {
- if (pdp == vm->scratch_pdp) {
- spin_unlock(&pml4->lock);
-
- pdp = fetch_and_zero(&alloc);
- if (!pdp)
- pdp = alloc_pd(vm);
- if (IS_ERR(pdp)) {
- ret = PTR_ERR(pdp);
- goto unwind;
- }
-
- init_pd(vm, pdp, vm->scratch_pd);
-
- spin_lock(&pml4->lock);
- if (pml4->entry[pml4e] == vm->scratch_pdp) {
- gen8_ppgtt_set_pml4e(pml4, pdp, pml4e);
- pml4->entry[pml4e] = pdp;
- } else {
- alloc = pdp;
- pdp = pml4->entry[pml4e];
- }
- }
- atomic_inc(&pdp->used);
- spin_unlock(&pml4->lock);
-
- ret = gen8_ppgtt_alloc_pdp(vm, pdp, start, length);
- if (unlikely(ret))
- goto unwind_pdp;
+ ppgtt->vm.gt = gt;
+ ppgtt->vm.i915 = i915;
+ ppgtt->vm.dma = &i915->drm.pdev->dev;
+ ppgtt->vm.total = BIT_ULL(INTEL_INFO(i915)->ppgtt_size);
- spin_lock(&pml4->lock);
- atomic_dec(&pdp->used);
- }
- spin_unlock(&pml4->lock);
- goto out;
+ i915_address_space_init(&ppgtt->vm, VM_CLASS_PPGTT);
-unwind_pdp:
- spin_lock(&pml4->lock);
- if (atomic_dec_and_test(&pdp->used)) {
- gen8_ppgtt_set_pml4e(pml4, vm->scratch_pdp, pml4e);
- pml4->entry[pml4e] = vm->scratch_pdp;
- GEM_BUG_ON(alloc);
- alloc = pdp; /* defer the free until after the lock */
- }
- spin_unlock(&pml4->lock);
-unwind:
- gen8_ppgtt_clear_4lvl(vm, from, start - from);
-out:
- if (alloc)
- free_pd(vm, alloc);
- return ret;
+ ppgtt->vm.vma_ops.bind_vma = ppgtt_bind_vma;
+ ppgtt->vm.vma_ops.unbind_vma = ppgtt_unbind_vma;
+ ppgtt->vm.vma_ops.set_pages = ppgtt_set_pages;
+ ppgtt->vm.vma_ops.clear_pages = clear_pages;
}
-static int gen8_preallocate_top_level_pdp(struct i915_ppgtt *ppgtt)
+static struct i915_page_directory *
+gen8_alloc_top_pd(struct i915_address_space *vm)
{
- struct i915_address_space *vm = &ppgtt->vm;
- struct i915_page_directory *pdp = ppgtt->pd;
+ const unsigned int count = gen8_pd_top_count(vm);
struct i915_page_directory *pd;
- u64 start = 0, length = ppgtt->vm.total;
- u64 from = start;
- unsigned int pdpe;
- gen8_for_each_pdpe(pd, pdp, start, length, pdpe) {
- pd = alloc_pd(vm);
- if (IS_ERR(pd))
- goto unwind;
+ GEM_BUG_ON(count > ARRAY_SIZE(pd->entry));
- init_pd_with_page(vm, pd, vm->scratch_pt);
- gen8_ppgtt_set_pdpe(pdp, pd, pdpe);
-
- atomic_inc(&pdp->used);
- }
-
- atomic_inc(&pdp->used); /* never remove */
-
- return 0;
+ pd = __alloc_pd(offsetof(typeof(*pd), entry[count]));
+ if (unlikely(!pd))
+ return ERR_PTR(-ENOMEM);
-unwind:
- start -= from;
- gen8_for_each_pdpe(pd, pdp, from, start, pdpe) {
- gen8_ppgtt_set_pdpe(pdp, vm->scratch_pd, pdpe);
- free_pd(vm, pd);
+ if (unlikely(setup_page_dma(vm, px_base(pd)))) {
+ kfree(pd);
+ return ERR_PTR(-ENOMEM);
}
- atomic_set(&pdp->used, 0);
- return -ENOMEM;
-}
-static void ppgtt_init(struct drm_i915_private *i915,
- struct i915_ppgtt *ppgtt)
-{
- ppgtt->vm.i915 = i915;
- ppgtt->vm.dma = &i915->drm.pdev->dev;
- ppgtt->vm.total = BIT_ULL(INTEL_INFO(i915)->ppgtt_size);
-
- i915_address_space_init(&ppgtt->vm, VM_CLASS_PPGTT);
-
- ppgtt->vm.vma_ops.bind_vma = ppgtt_bind_vma;
- ppgtt->vm.vma_ops.unbind_vma = ppgtt_unbind_vma;
- ppgtt->vm.vma_ops.set_pages = ppgtt_set_pages;
- ppgtt->vm.vma_ops.clear_pages = clear_pages;
+ fill_page_dma(px_base(pd), vm->scratch[vm->top].encode, count);
+ atomic_inc(px_used(pd)); /* mark as pinned */
+ return pd;
}
/*
@@ -1595,7 +1468,8 @@ static struct i915_ppgtt *gen8_ppgtt_create(struct drm_i915_private *i915)
if (!ppgtt)
return ERR_PTR(-ENOMEM);
- ppgtt_init(i915, ppgtt);
+ ppgtt_init(ppgtt, &i915->gt);
+ ppgtt->vm.top = i915_vm_is_4lvl(&ppgtt->vm) ? 3 : 2;
/*
* From bdw, there is hw support for read-only pages in the PPGTT.
@@ -1615,41 +1489,27 @@ static struct i915_ppgtt *gen8_ppgtt_create(struct drm_i915_private *i915)
if (err)
goto err_free;
- ppgtt->pd = __alloc_pd();
- if (!ppgtt->pd) {
- err = -ENOMEM;
+ ppgtt->pd = gen8_alloc_top_pd(&ppgtt->vm);
+ if (IS_ERR(ppgtt->pd)) {
+ err = PTR_ERR(ppgtt->pd);
goto err_free_scratch;
}
if (i915_vm_is_4lvl(&ppgtt->vm)) {
- err = setup_px(&ppgtt->vm, ppgtt->pd);
- if (err)
- goto err_free_pdp;
-
- init_pd(&ppgtt->vm, ppgtt->pd, ppgtt->vm.scratch_pdp);
-
- ppgtt->vm.allocate_va_range = gen8_ppgtt_alloc_4lvl;
ppgtt->vm.insert_entries = gen8_ppgtt_insert_4lvl;
- ppgtt->vm.clear_range = gen8_ppgtt_clear_4lvl;
} else {
- /*
- * We don't need to setup dma for top level pdp, only
- * for entries. So point entries to scratch.
- */
- memset_p(ppgtt->pd->entry, ppgtt->vm.scratch_pd,
- GEN8_3LVL_PDPES);
-
if (intel_vgpu_active(i915)) {
err = gen8_preallocate_top_level_pdp(ppgtt);
if (err)
- goto err_free_pdp;
+ goto err_free_pd;
}
- ppgtt->vm.allocate_va_range = gen8_ppgtt_alloc_3lvl;
ppgtt->vm.insert_entries = gen8_ppgtt_insert_3lvl;
- ppgtt->vm.clear_range = gen8_ppgtt_clear_3lvl;
}
+ ppgtt->vm.allocate_va_range = gen8_ppgtt_alloc;
+ ppgtt->vm.clear_range = gen8_ppgtt_clear;
+
if (intel_vgpu_active(i915))
gen8_ppgtt_notify_vgt(ppgtt, true);
@@ -1657,10 +1517,11 @@ static struct i915_ppgtt *gen8_ppgtt_create(struct drm_i915_private *i915)
return ppgtt;
-err_free_pdp:
- free_pd(&ppgtt->vm, ppgtt->pd);
+err_free_pd:
+ __gen8_ppgtt_cleanup(&ppgtt->vm, ppgtt->pd,
+ gen8_pd_top_count(&ppgtt->vm), ppgtt->vm.top);
err_free_scratch:
- gen8_free_scratch(&ppgtt->vm);
+ free_scratch(&ppgtt->vm);
err_free:
kfree(ppgtt);
return ERR_PTR(err);
@@ -1676,25 +1537,26 @@ static inline void gen6_write_pde(const struct gen6_ppgtt *ppgtt,
ppgtt->pd_addr + pde);
}
-static void gen7_ppgtt_enable(struct drm_i915_private *dev_priv)
+static void gen7_ppgtt_enable(struct intel_gt *gt)
{
+ struct drm_i915_private *i915 = gt->i915;
+ struct intel_uncore *uncore = gt->uncore;
struct intel_engine_cs *engine;
- u32 ecochk, ecobits;
enum intel_engine_id id;
+ u32 ecochk;
- ecobits = I915_READ(GAC_ECO_BITS);
- I915_WRITE(GAC_ECO_BITS, ecobits | ECOBITS_PPGTT_CACHE64B);
+ intel_uncore_rmw(uncore, GAC_ECO_BITS, 0, ECOBITS_PPGTT_CACHE64B);
- ecochk = I915_READ(GAM_ECOCHK);
- if (IS_HASWELL(dev_priv)) {
+ ecochk = intel_uncore_read(uncore, GAM_ECOCHK);
+ if (IS_HASWELL(i915)) {
ecochk |= ECOCHK_PPGTT_WB_HSW;
} else {
ecochk |= ECOCHK_PPGTT_LLC_IVB;
ecochk &= ~ECOCHK_PPGTT_GFDT_IVB;
}
- I915_WRITE(GAM_ECOCHK, ecochk);
+ intel_uncore_write(uncore, GAM_ECOCHK, ecochk);
- for_each_engine(engine, dev_priv, id) {
+ for_each_engine(engine, i915, id) {
/* GFX_MODE is per-ring on gen7+ */
ENGINE_WRITE(engine,
RING_MODE_GEN7,
@@ -1702,22 +1564,29 @@ static void gen7_ppgtt_enable(struct drm_i915_private *dev_priv)
}
}
-static void gen6_ppgtt_enable(struct drm_i915_private *dev_priv)
+static void gen6_ppgtt_enable(struct intel_gt *gt)
{
- u32 ecochk, gab_ctl, ecobits;
+ struct intel_uncore *uncore = gt->uncore;
- ecobits = I915_READ(GAC_ECO_BITS);
- I915_WRITE(GAC_ECO_BITS, ecobits | ECOBITS_SNB_BIT |
- ECOBITS_PPGTT_CACHE64B);
+ intel_uncore_rmw(uncore,
+ GAC_ECO_BITS,
+ 0,
+ ECOBITS_SNB_BIT | ECOBITS_PPGTT_CACHE64B);
- gab_ctl = I915_READ(GAB_CTL);
- I915_WRITE(GAB_CTL, gab_ctl | GAB_CTL_CONT_AFTER_PAGEFAULT);
+ intel_uncore_rmw(uncore,
+ GAB_CTL,
+ 0,
+ GAB_CTL_CONT_AFTER_PAGEFAULT);
- ecochk = I915_READ(GAM_ECOCHK);
- I915_WRITE(GAM_ECOCHK, ecochk | ECOCHK_SNB_BIT | ECOCHK_PPGTT_CACHE64B);
+ intel_uncore_rmw(uncore,
+ GAM_ECOCHK,
+ 0,
+ ECOCHK_SNB_BIT | ECOCHK_PPGTT_CACHE64B);
- if (HAS_PPGTT(dev_priv)) /* may be disabled for VT-d */
- I915_WRITE(GFX_MODE, _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
+ if (HAS_PPGTT(uncore->i915)) /* may be disabled for VT-d */
+ intel_uncore_write(uncore,
+ GFX_MODE,
+ _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
}
/* PPGTT support for Sandybdrige/Gen6 and later */
@@ -1726,7 +1595,7 @@ static void gen6_ppgtt_clear_range(struct i915_address_space *vm,
{
struct gen6_ppgtt * const ppgtt = to_gen6_ppgtt(i915_vm_to_ppgtt(vm));
const unsigned int first_entry = start / I915_GTT_PAGE_SIZE;
- const gen6_pte_t scratch_pte = vm->scratch_pte;
+ const gen6_pte_t scratch_pte = vm->scratch[0].encode;
unsigned int pde = first_entry / GEN6_PTES;
unsigned int pte = first_entry % GEN6_PTES;
unsigned int num_entries = length / I915_GTT_PAGE_SIZE;
@@ -1737,7 +1606,7 @@ static void gen6_ppgtt_clear_range(struct i915_address_space *vm,
const unsigned int count = min(num_entries, GEN6_PTES - pte);
gen6_pte_t *vaddr;
- GEM_BUG_ON(pt == vm->scratch_pt);
+ GEM_BUG_ON(px_base(pt) == px_base(&vm->scratch[1]));
num_entries -= count;
@@ -1774,7 +1643,7 @@ static void gen6_ppgtt_insert_entries(struct i915_address_space *vm,
struct sgt_dma iter = sgt_dma(vma);
gen6_pte_t *vaddr;
- GEM_BUG_ON(i915_pt_entry(pd, act_pt) == vm->scratch_pt);
+ GEM_BUG_ON(pd->entry[act_pt] == &vm->scratch[1]);
vaddr = kmap_atomic_px(i915_pt_entry(pd, act_pt));
do {
@@ -1819,7 +1688,7 @@ static int gen6_alloc_va_range(struct i915_address_space *vm,
gen6_for_each_pde(pt, pd, start, length, pde) {
const unsigned int count = gen6_pte_count(start, length);
- if (pt == vm->scratch_pt) {
+ if (px_base(pt) == px_base(&vm->scratch[1])) {
spin_unlock(&pd->lock);
pt = fetch_and_zero(&alloc);
@@ -1830,10 +1699,10 @@ static int gen6_alloc_va_range(struct i915_address_space *vm,
goto unwind_out;
}
- gen6_initialize_pt(vm, pt);
+ fill32_px(pt, vm->scratch[0].encode);
spin_lock(&pd->lock);
- if (pd->entry[pde] == vm->scratch_pt) {
+ if (pd->entry[pde] == &vm->scratch[1]) {
pd->entry[pde] = pt;
if (i915_vma_is_bound(ppgtt->vma,
I915_VMA_GLOBAL_BIND)) {
@@ -1852,7 +1721,7 @@ static int gen6_alloc_va_range(struct i915_address_space *vm,
if (flush) {
mark_tlbs_dirty(&ppgtt->base);
- gen6_ggtt_invalidate(vm->i915);
+ gen6_ggtt_invalidate(vm->gt->ggtt);
}
goto out;
@@ -1861,7 +1730,7 @@ unwind_out:
gen6_ppgtt_clear_range(vm, from, start - from);
out:
if (alloc)
- free_pt(vm, alloc);
+ free_px(vm, alloc);
intel_runtime_pm_put(&vm->i915->runtime_pm, wakeref);
return ret;
}
@@ -1870,108 +1739,52 @@ static int gen6_ppgtt_init_scratch(struct gen6_ppgtt *ppgtt)
{
struct i915_address_space * const vm = &ppgtt->base.vm;
struct i915_page_directory * const pd = ppgtt->base.pd;
- struct i915_page_table *unused;
- u32 pde;
int ret;
ret = setup_scratch_page(vm, __GFP_HIGHMEM);
if (ret)
return ret;
- vm->scratch_pte = vm->pte_encode(vm->scratch_page.daddr,
- I915_CACHE_NONE,
- PTE_READ_ONLY);
+ vm->scratch[0].encode =
+ vm->pte_encode(px_dma(&vm->scratch[0]),
+ I915_CACHE_NONE, PTE_READ_ONLY);
- vm->scratch_pt = alloc_pt(vm);
- if (IS_ERR(vm->scratch_pt)) {
+ if (unlikely(setup_page_dma(vm, px_base(&vm->scratch[1])))) {
cleanup_scratch_page(vm);
- return PTR_ERR(vm->scratch_pt);
+ return -ENOMEM;
}
- gen6_initialize_pt(vm, vm->scratch_pt);
-
- gen6_for_all_pdes(unused, pd, pde)
- pd->entry[pde] = vm->scratch_pt;
+ fill32_px(&vm->scratch[1], vm->scratch[0].encode);
+ memset_p(pd->entry, &vm->scratch[1], I915_PDES);
return 0;
}
-static void gen6_ppgtt_free_scratch(struct i915_address_space *vm)
-{
- free_pt(vm, vm->scratch_pt);
- cleanup_scratch_page(vm);
-}
-
static void gen6_ppgtt_free_pd(struct gen6_ppgtt *ppgtt)
{
struct i915_page_directory * const pd = ppgtt->base.pd;
+ struct i915_page_dma * const scratch =
+ px_base(&ppgtt->base.vm.scratch[1]);
struct i915_page_table *pt;
u32 pde;
gen6_for_all_pdes(pt, pd, pde)
- if (pt != ppgtt->base.vm.scratch_pt)
- free_pt(&ppgtt->base.vm, pt);
-}
-
-struct gen6_ppgtt_cleanup_work {
- struct work_struct base;
- struct i915_vma *vma;
-};
-
-static void gen6_ppgtt_cleanup_work(struct work_struct *wrk)
-{
- struct gen6_ppgtt_cleanup_work *work =
- container_of(wrk, typeof(*work), base);
- /* Side note, vma->vm is the GGTT not the ppgtt we just destroyed! */
- struct drm_i915_private *i915 = work->vma->vm->i915;
-
- mutex_lock(&i915->drm.struct_mutex);
- i915_vma_destroy(work->vma);
- mutex_unlock(&i915->drm.struct_mutex);
-
- kfree(work);
-}
-
-static int nop_set_pages(struct i915_vma *vma)
-{
- return -ENODEV;
+ if (px_base(pt) != scratch)
+ free_px(&ppgtt->base.vm, pt);
}
-static void nop_clear_pages(struct i915_vma *vma)
-{
-}
-
-static int nop_bind(struct i915_vma *vma,
- enum i915_cache_level cache_level,
- u32 unused)
-{
- return -ENODEV;
-}
-
-static void nop_unbind(struct i915_vma *vma)
-{
-}
-
-static const struct i915_vma_ops nop_vma_ops = {
- .set_pages = nop_set_pages,
- .clear_pages = nop_clear_pages,
- .bind_vma = nop_bind,
- .unbind_vma = nop_unbind,
-};
-
static void gen6_ppgtt_cleanup(struct i915_address_space *vm)
{
struct gen6_ppgtt *ppgtt = to_gen6_ppgtt(i915_vm_to_ppgtt(vm));
- struct gen6_ppgtt_cleanup_work *work = ppgtt->work;
+ struct drm_i915_private *i915 = vm->i915;
/* FIXME remove the struct_mutex to bring the locking under control */
- INIT_WORK(&work->base, gen6_ppgtt_cleanup_work);
- work->vma = ppgtt->vma;
- work->vma->ops = &nop_vma_ops;
- schedule_work(&work->base);
+ mutex_lock(&i915->drm.struct_mutex);
+ i915_vma_destroy(ppgtt->vma);
+ mutex_unlock(&i915->drm.struct_mutex);
gen6_ppgtt_free_pd(ppgtt);
- gen6_ppgtt_free_scratch(vm);
+ free_scratch(vm);
kfree(ppgtt->base.pd);
}
@@ -1998,14 +1811,14 @@ static int pd_vma_bind(struct i915_vma *vma,
struct i915_page_table *pt;
unsigned int pde;
- ppgtt->base.pd->base.ggtt_offset = ggtt_offset * sizeof(gen6_pte_t);
+ px_base(ppgtt->base.pd)->ggtt_offset = ggtt_offset * sizeof(gen6_pte_t);
ppgtt->pd_addr = (gen6_pte_t __iomem *)ggtt->gsm + ggtt_offset;
gen6_for_all_pdes(pt, ppgtt->base.pd, pde)
gen6_write_pde(ppgtt, pde, pt);
mark_tlbs_dirty(&ppgtt->base);
- gen6_ggtt_invalidate(ppgtt->base.vm.i915);
+ gen6_ggtt_invalidate(ggtt);
return 0;
}
@@ -2014,7 +1827,8 @@ static void pd_vma_unbind(struct i915_vma *vma)
{
struct gen6_ppgtt *ppgtt = vma->private;
struct i915_page_directory * const pd = ppgtt->base.pd;
- struct i915_page_table * const scratch_pt = ppgtt->base.vm.scratch_pt;
+ struct i915_page_dma * const scratch =
+ px_base(&ppgtt->base.vm.scratch[1]);
struct i915_page_table *pt;
unsigned int pde;
@@ -2023,11 +1837,11 @@ static void pd_vma_unbind(struct i915_vma *vma)
/* Free all no longer used page tables */
gen6_for_all_pdes(pt, ppgtt->base.pd, pde) {
- if (atomic_read(&pt->used) || pt == scratch_pt)
+ if (px_base(pt) == scratch || atomic_read(&pt->used))
continue;
- free_pt(&ppgtt->base.vm, pt);
- pd->entry[pde] = scratch_pt;
+ free_px(&ppgtt->base.vm, pt);
+ pd->entry[pde] = scratch;
}
ppgtt->scan_for_unused_pt = false;
@@ -2043,7 +1857,7 @@ static const struct i915_vma_ops pd_vma_ops = {
static struct i915_vma *pd_vma_create(struct gen6_ppgtt *ppgtt, int size)
{
struct drm_i915_private *i915 = ppgtt->base.vm.i915;
- struct i915_ggtt *ggtt = &i915->ggtt;
+ struct i915_ggtt *ggtt = ppgtt->base.vm.gt->ggtt;
struct i915_vma *vma;
GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE));
@@ -2053,7 +1867,7 @@ static struct i915_vma *pd_vma_create(struct gen6_ppgtt *ppgtt, int size)
if (!vma)
return ERR_PTR(-ENOMEM);
- i915_active_init(i915, &vma->active, NULL);
+ i915_active_init(i915, &vma->active, NULL, NULL);
INIT_ACTIVE_REQUEST(&vma->last_fence);
vma->vm = &ggtt->vm;
@@ -2141,7 +1955,8 @@ static struct i915_ppgtt *gen6_ppgtt_create(struct drm_i915_private *i915)
if (!ppgtt)
return ERR_PTR(-ENOMEM);
- ppgtt_init(i915, &ppgtt->base);
+ ppgtt_init(&ppgtt->base, &i915->gt);
+ ppgtt->base.vm.top = 1;
ppgtt->base.vm.allocate_va_range = gen6_alloc_va_range;
ppgtt->base.vm.clear_range = gen6_ppgtt_clear_range;
@@ -2150,16 +1965,10 @@ static struct i915_ppgtt *gen6_ppgtt_create(struct drm_i915_private *i915)
ppgtt->base.vm.pte_encode = ggtt->vm.pte_encode;
- ppgtt->work = kmalloc(sizeof(*ppgtt->work), GFP_KERNEL);
- if (!ppgtt->work) {
- err = -ENOMEM;
- goto err_free;
- }
-
- ppgtt->base.pd = __alloc_pd();
+ ppgtt->base.pd = __alloc_pd(sizeof(*ppgtt->base.pd));
if (!ppgtt->base.pd) {
err = -ENOMEM;
- goto err_work;
+ goto err_free;
}
err = gen6_ppgtt_init_scratch(ppgtt);
@@ -2175,31 +1984,40 @@ static struct i915_ppgtt *gen6_ppgtt_create(struct drm_i915_private *i915)
return &ppgtt->base;
err_scratch:
- gen6_ppgtt_free_scratch(&ppgtt->base.vm);
+ free_scratch(&ppgtt->base.vm);
err_pd:
kfree(ppgtt->base.pd);
-err_work:
- kfree(ppgtt->work);
err_free:
kfree(ppgtt);
return ERR_PTR(err);
}
-static void gtt_write_workarounds(struct drm_i915_private *dev_priv)
+static void gtt_write_workarounds(struct intel_gt *gt)
{
+ struct drm_i915_private *i915 = gt->i915;
+ struct intel_uncore *uncore = gt->uncore;
+
/* This function is for gtt related workarounds. This function is
* called on driver load and after a GPU reset, so you can place
* workarounds here even if they get overwritten by GPU reset.
*/
/* WaIncreaseDefaultTLBEntries:chv,bdw,skl,bxt,kbl,glk,cfl,cnl,icl */
- if (IS_BROADWELL(dev_priv))
- I915_WRITE(GEN8_L3_LRA_1_GPGPU, GEN8_L3_LRA_1_GPGPU_DEFAULT_VALUE_BDW);
- else if (IS_CHERRYVIEW(dev_priv))
- I915_WRITE(GEN8_L3_LRA_1_GPGPU, GEN8_L3_LRA_1_GPGPU_DEFAULT_VALUE_CHV);
- else if (IS_GEN9_LP(dev_priv))
- I915_WRITE(GEN8_L3_LRA_1_GPGPU, GEN9_L3_LRA_1_GPGPU_DEFAULT_VALUE_BXT);
- else if (INTEL_GEN(dev_priv) >= 9)
- I915_WRITE(GEN8_L3_LRA_1_GPGPU, GEN9_L3_LRA_1_GPGPU_DEFAULT_VALUE_SKL);
+ if (IS_BROADWELL(i915))
+ intel_uncore_write(uncore,
+ GEN8_L3_LRA_1_GPGPU,
+ GEN8_L3_LRA_1_GPGPU_DEFAULT_VALUE_BDW);
+ else if (IS_CHERRYVIEW(i915))
+ intel_uncore_write(uncore,
+ GEN8_L3_LRA_1_GPGPU,
+ GEN8_L3_LRA_1_GPGPU_DEFAULT_VALUE_CHV);
+ else if (IS_GEN9_LP(i915))
+ intel_uncore_write(uncore,
+ GEN8_L3_LRA_1_GPGPU,
+ GEN9_L3_LRA_1_GPGPU_DEFAULT_VALUE_BXT);
+ else if (INTEL_GEN(i915) >= 9)
+ intel_uncore_write(uncore,
+ GEN8_L3_LRA_1_GPGPU,
+ GEN9_L3_LRA_1_GPGPU_DEFAULT_VALUE_SKL);
/*
* To support 64K PTEs we need to first enable the use of the
@@ -2212,21 +2030,24 @@ static void gtt_write_workarounds(struct drm_i915_private *dev_priv)
* 32K pages, but we don't currently have any support for it in our
* driver.
*/
- if (HAS_PAGE_SIZES(dev_priv, I915_GTT_PAGE_SIZE_64K) &&
- INTEL_GEN(dev_priv) <= 10)
- I915_WRITE(GEN8_GAMW_ECO_DEV_RW_IA,
- I915_READ(GEN8_GAMW_ECO_DEV_RW_IA) |
- GAMW_ECO_ENABLE_64K_IPS_FIELD);
+ if (HAS_PAGE_SIZES(i915, I915_GTT_PAGE_SIZE_64K) &&
+ INTEL_GEN(i915) <= 10)
+ intel_uncore_rmw(uncore,
+ GEN8_GAMW_ECO_DEV_RW_IA,
+ 0,
+ GAMW_ECO_ENABLE_64K_IPS_FIELD);
}
-int i915_ppgtt_init_hw(struct drm_i915_private *dev_priv)
+int i915_ppgtt_init_hw(struct intel_gt *gt)
{
- gtt_write_workarounds(dev_priv);
+ struct drm_i915_private *i915 = gt->i915;
- if (IS_GEN(dev_priv, 6))
- gen6_ppgtt_enable(dev_priv);
- else if (IS_GEN(dev_priv, 7))
- gen7_ppgtt_enable(dev_priv);
+ gtt_write_workarounds(gt);
+
+ if (IS_GEN(i915, 6))
+ gen6_ppgtt_enable(gt);
+ else if (IS_GEN(i915, 7))
+ gen7_ppgtt_enable(gt);
return 0;
}
@@ -2254,42 +2075,6 @@ i915_ppgtt_create(struct drm_i915_private *i915)
return ppgtt;
}
-static void ppgtt_destroy_vma(struct i915_address_space *vm)
-{
- struct list_head *phases[] = {
- &vm->bound_list,
- &vm->unbound_list,
- NULL,
- }, **phase;
-
- vm->closed = true;
- for (phase = phases; *phase; phase++) {
- struct i915_vma *vma, *vn;
-
- list_for_each_entry_safe(vma, vn, *phase, vm_link)
- i915_vma_destroy(vma);
- }
-}
-
-void i915_vm_release(struct kref *kref)
-{
- struct i915_address_space *vm =
- container_of(kref, struct i915_address_space, ref);
-
- GEM_BUG_ON(i915_is_ggtt(vm));
- trace_i915_ppgtt_release(vm);
-
- ppgtt_destroy_vma(vm);
-
- GEM_BUG_ON(!list_empty(&vm->bound_list));
- GEM_BUG_ON(!list_empty(&vm->unbound_list));
-
- vm->cleanup(vm);
- i915_address_space_fini(vm);
-
- kfree(vm);
-}
-
/* Certain Gen5 chipsets require require idling the GPU before
* unmapping anything from the GTT when VT-d is enabled.
*/
@@ -2301,21 +2086,26 @@ static bool needs_idle_maps(struct drm_i915_private *dev_priv)
return IS_GEN(dev_priv, 5) && IS_MOBILE(dev_priv) && intel_vtd_active();
}
-void i915_gem_suspend_gtt_mappings(struct drm_i915_private *dev_priv)
+static void ggtt_suspend_mappings(struct i915_ggtt *ggtt)
{
- struct i915_ggtt *ggtt = &dev_priv->ggtt;
+ struct drm_i915_private *i915 = ggtt->vm.i915;
/* Don't bother messing with faults pre GEN6 as we have little
* documentation supporting that it's a good idea.
*/
- if (INTEL_GEN(dev_priv) < 6)
+ if (INTEL_GEN(i915) < 6)
return;
- i915_check_and_clear_faults(dev_priv);
+ intel_gt_check_and_clear_faults(ggtt->vm.gt);
ggtt->vm.clear_range(&ggtt->vm, 0, ggtt->vm.total);
- i915_ggtt_invalidate(dev_priv);
+ ggtt->invalidate(ggtt);
+}
+
+void i915_gem_suspend_gtt_mappings(struct drm_i915_private *i915)
+{
+ ggtt_suspend_mappings(&i915->ggtt);
}
int i915_gem_gtt_prepare_pages(struct drm_i915_gem_object *obj,
@@ -2361,7 +2151,7 @@ static void gen8_ggtt_insert_page(struct i915_address_space *vm,
gen8_set_pte(pte, gen8_pte_encode(addr, level, 0));
- ggtt->invalidate(vm->i915);
+ ggtt->invalidate(ggtt);
}
static void gen8_ggtt_insert_entries(struct i915_address_space *vm,
@@ -2389,7 +2179,7 @@ static void gen8_ggtt_insert_entries(struct i915_address_space *vm,
* We want to flush the TLBs only after we're certain all the PTE
* updates have finished.
*/
- ggtt->invalidate(vm->i915);
+ ggtt->invalidate(ggtt);
}
static void gen6_ggtt_insert_page(struct i915_address_space *vm,
@@ -2404,7 +2194,7 @@ static void gen6_ggtt_insert_page(struct i915_address_space *vm,
iowrite32(vm->pte_encode(addr, level, flags), pte);
- ggtt->invalidate(vm->i915);
+ ggtt->invalidate(ggtt);
}
/*
@@ -2430,7 +2220,7 @@ static void gen6_ggtt_insert_entries(struct i915_address_space *vm,
* We want to flush the TLBs only after we're certain all the PTE
* updates have finished.
*/
- ggtt->invalidate(vm->i915);
+ ggtt->invalidate(ggtt);
}
static void nop_clear_range(struct i915_address_space *vm,
@@ -2444,7 +2234,7 @@ static void gen8_ggtt_clear_range(struct i915_address_space *vm,
struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
unsigned first_entry = start / I915_GTT_PAGE_SIZE;
unsigned num_entries = length / I915_GTT_PAGE_SIZE;
- const gen8_pte_t scratch_pte = vm->scratch_pte;
+ const gen8_pte_t scratch_pte = vm->scratch[0].encode;
gen8_pte_t __iomem *gtt_base =
(gen8_pte_t __iomem *)ggtt->gsm + first_entry;
const int max_entries = ggtt_total_entries(ggtt) - first_entry;
@@ -2569,8 +2359,7 @@ static void gen6_ggtt_clear_range(struct i915_address_space *vm,
first_entry, num_entries, max_entries))
num_entries = max_entries;
- scratch_pte = vm->scratch_pte;
-
+ scratch_pte = vm->scratch[0].encode;
for (i = 0; i < num_entries; i++)
iowrite32(scratch_pte, &gtt_base[i]);
}
@@ -2657,18 +2446,18 @@ static int aliasing_gtt_bind_vma(struct i915_vma *vma,
pte_flags |= PTE_READ_ONLY;
if (flags & I915_VMA_LOCAL_BIND) {
- struct i915_ppgtt *appgtt = i915->mm.aliasing_ppgtt;
+ struct i915_ppgtt *alias = i915_vm_to_ggtt(vma->vm)->alias;
if (!(vma->flags & I915_VMA_LOCAL_BIND)) {
- ret = appgtt->vm.allocate_va_range(&appgtt->vm,
- vma->node.start,
- vma->size);
+ ret = alias->vm.allocate_va_range(&alias->vm,
+ vma->node.start,
+ vma->size);
if (ret)
return ret;
}
- appgtt->vm.insert_entries(&appgtt->vm, vma, cache_level,
- pte_flags);
+ alias->vm.insert_entries(&alias->vm, vma,
+ cache_level, pte_flags);
}
if (flags & I915_VMA_GLOBAL_BIND) {
@@ -2696,7 +2485,8 @@ static void aliasing_gtt_unbind_vma(struct i915_vma *vma)
}
if (vma->flags & I915_VMA_LOCAL_BIND) {
- struct i915_address_space *vm = &i915->mm.aliasing_ppgtt->vm;
+ struct i915_address_space *vm =
+ &i915_vm_to_ggtt(vma->vm)->alias->vm;
vm->clear_range(vm, vma->node.start, vma->size);
}
@@ -2753,13 +2543,12 @@ static void i915_gtt_color_adjust(const struct drm_mm_node *node,
*end -= I915_GTT_PAGE_SIZE;
}
-static int init_aliasing_ppgtt(struct drm_i915_private *i915)
+static int init_aliasing_ppgtt(struct i915_ggtt *ggtt)
{
- struct i915_ggtt *ggtt = &i915->ggtt;
struct i915_ppgtt *ppgtt;
int err;
- ppgtt = i915_ppgtt_create(i915);
+ ppgtt = i915_ppgtt_create(ggtt->vm.i915);
if (IS_ERR(ppgtt))
return PTR_ERR(ppgtt);
@@ -2778,7 +2567,7 @@ static int init_aliasing_ppgtt(struct drm_i915_private *i915)
if (err)
goto err_ppgtt;
- i915->mm.aliasing_ppgtt = ppgtt;
+ ggtt->alias = ppgtt;
GEM_BUG_ON(ggtt->vm.vma_ops.bind_vma != ggtt_bind_vma);
ggtt->vm.vma_ops.bind_vma = aliasing_gtt_bind_vma;
@@ -2793,19 +2582,24 @@ err_ppgtt:
return err;
}
-static void fini_aliasing_ppgtt(struct drm_i915_private *i915)
+static void fini_aliasing_ppgtt(struct i915_ggtt *ggtt)
{
- struct i915_ggtt *ggtt = &i915->ggtt;
+ struct drm_i915_private *i915 = ggtt->vm.i915;
struct i915_ppgtt *ppgtt;
- ppgtt = fetch_and_zero(&i915->mm.aliasing_ppgtt);
+ mutex_lock(&i915->drm.struct_mutex);
+
+ ppgtt = fetch_and_zero(&ggtt->alias);
if (!ppgtt)
- return;
+ goto out;
i915_vm_put(&ppgtt->vm);
ggtt->vm.vma_ops.bind_vma = ggtt_bind_vma;
ggtt->vm.vma_ops.unbind_vma = ggtt_unbind_vma;
+
+out:
+ mutex_unlock(&i915->drm.struct_mutex);
}
static int ggtt_reserve_guc_top(struct i915_ggtt *ggtt)
@@ -2834,7 +2628,13 @@ static void ggtt_release_guc_top(struct i915_ggtt *ggtt)
drm_mm_remove_node(&ggtt->uc_fw);
}
-int i915_gem_init_ggtt(struct drm_i915_private *dev_priv)
+static void cleanup_init_ggtt(struct i915_ggtt *ggtt)
+{
+ ggtt_release_guc_top(ggtt);
+ drm_mm_remove_node(&ggtt->error_capture);
+}
+
+static int init_ggtt(struct i915_ggtt *ggtt)
{
/* Let GEM Manage all of the aperture.
*
@@ -2845,7 +2645,6 @@ int i915_gem_init_ggtt(struct drm_i915_private *dev_priv)
* aperture. One page should be enough to keep any prefetching inside
* of the aperture.
*/
- struct i915_ggtt *ggtt = &dev_priv->ggtt;
unsigned long hole_start, hole_end;
struct drm_mm_node *entry;
int ret;
@@ -2857,9 +2656,9 @@ int i915_gem_init_ggtt(struct drm_i915_private *dev_priv)
* why.
*/
ggtt->pin_bias = max_t(u32, I915_GTT_PAGE_SIZE,
- intel_wopcm_guc_size(&dev_priv->wopcm));
+ intel_wopcm_guc_size(&ggtt->vm.i915->wopcm));
- ret = intel_vgt_balloon(dev_priv);
+ ret = intel_vgt_balloon(ggtt);
if (ret)
return ret;
@@ -2878,7 +2677,7 @@ int i915_gem_init_ggtt(struct drm_i915_private *dev_priv)
*/
ret = ggtt_reserve_guc_top(ggtt);
if (ret)
- goto err_reserve;
+ goto err;
/* Clear any non-preallocated blocks */
drm_mm_for_each_hole(entry, &ggtt->vm.mm, hole_start, hole_end) {
@@ -2891,35 +2690,41 @@ int i915_gem_init_ggtt(struct drm_i915_private *dev_priv)
/* And finally clear the reserved guard page */
ggtt->vm.clear_range(&ggtt->vm, ggtt->vm.total - PAGE_SIZE, PAGE_SIZE);
- if (INTEL_PPGTT(dev_priv) == INTEL_PPGTT_ALIASING) {
- ret = init_aliasing_ppgtt(dev_priv);
+ return 0;
+
+err:
+ cleanup_init_ggtt(ggtt);
+ return ret;
+}
+
+int i915_init_ggtt(struct drm_i915_private *i915)
+{
+ int ret;
+
+ ret = init_ggtt(&i915->ggtt);
+ if (ret)
+ return ret;
+
+ if (INTEL_PPGTT(i915) == INTEL_PPGTT_ALIASING) {
+ ret = init_aliasing_ppgtt(&i915->ggtt);
if (ret)
- goto err_appgtt;
+ cleanup_init_ggtt(&i915->ggtt);
}
return 0;
-
-err_appgtt:
- ggtt_release_guc_top(ggtt);
-err_reserve:
- drm_mm_remove_node(&ggtt->error_capture);
- return ret;
}
-/**
- * i915_ggtt_cleanup_hw - Clean up GGTT hardware initialization
- * @dev_priv: i915 device
- */
-void i915_ggtt_cleanup_hw(struct drm_i915_private *dev_priv)
+static void ggtt_cleanup_hw(struct i915_ggtt *ggtt)
{
- struct i915_ggtt *ggtt = &dev_priv->ggtt;
+ struct drm_i915_private *i915 = ggtt->vm.i915;
struct i915_vma *vma, *vn;
- struct pagevec *pvec;
ggtt->vm.closed = true;
- mutex_lock(&dev_priv->drm.struct_mutex);
- fini_aliasing_ppgtt(dev_priv);
+ rcu_barrier(); /* flush the RCU'ed__i915_vm_release */
+ flush_workqueue(i915->wq);
+
+ mutex_lock(&i915->drm.struct_mutex);
list_for_each_entry_safe(vma, vn, &ggtt->vm.bound_list, vm_link)
WARN_ON(i915_vma_unbind(vma));
@@ -2930,24 +2735,37 @@ void i915_ggtt_cleanup_hw(struct drm_i915_private *dev_priv)
ggtt_release_guc_top(ggtt);
if (drm_mm_initialized(&ggtt->vm.mm)) {
- intel_vgt_deballoon(dev_priv);
+ intel_vgt_deballoon(ggtt);
i915_address_space_fini(&ggtt->vm);
}
ggtt->vm.cleanup(&ggtt->vm);
- pvec = &dev_priv->mm.wc_stash.pvec;
+ mutex_unlock(&i915->drm.struct_mutex);
+
+ arch_phys_wc_del(ggtt->mtrr);
+ io_mapping_fini(&ggtt->iomap);
+}
+
+/**
+ * i915_ggtt_driver_release - Clean up GGTT hardware initialization
+ * @i915: i915 device
+ */
+void i915_ggtt_driver_release(struct drm_i915_private *i915)
+{
+ struct pagevec *pvec;
+
+ fini_aliasing_ppgtt(&i915->ggtt);
+
+ ggtt_cleanup_hw(&i915->ggtt);
+
+ pvec = &i915->mm.wc_stash.pvec;
if (pvec->nr) {
set_pages_array_wb(pvec->pages, pvec->nr);
__pagevec_release(pvec);
}
- mutex_unlock(&dev_priv->drm.struct_mutex);
-
- arch_phys_wc_del(ggtt->mtrr);
- io_mapping_fini(&ggtt->iomap);
-
- i915_gem_cleanup_stolen(dev_priv);
+ i915_gem_cleanup_stolen(i915);
}
static unsigned int gen6_get_total_gtt_size(u16 snb_gmch_ctl)
@@ -3018,243 +2836,48 @@ static int ggtt_probe_common(struct i915_ggtt *ggtt, u64 size)
return ret;
}
- ggtt->vm.scratch_pte =
- ggtt->vm.pte_encode(ggtt->vm.scratch_page.daddr,
+ ggtt->vm.scratch[0].encode =
+ ggtt->vm.pte_encode(px_dma(&ggtt->vm.scratch[0]),
I915_CACHE_NONE, 0);
return 0;
}
-static struct intel_ppat_entry *
-__alloc_ppat_entry(struct intel_ppat *ppat, unsigned int index, u8 value)
+static void cnl_setup_private_ppat(struct drm_i915_private *dev_priv)
{
- struct intel_ppat_entry *entry = &ppat->entries[index];
-
- GEM_BUG_ON(index >= ppat->max_entries);
- GEM_BUG_ON(test_bit(index, ppat->used));
-
- entry->ppat = ppat;
- entry->value = value;
- kref_init(&entry->ref);
- set_bit(index, ppat->used);
- set_bit(index, ppat->dirty);
-
- return entry;
+ I915_WRITE(GEN10_PAT_INDEX(0), GEN8_PPAT_WB | GEN8_PPAT_LLC);
+ I915_WRITE(GEN10_PAT_INDEX(1), GEN8_PPAT_WC | GEN8_PPAT_LLCELLC);
+ I915_WRITE(GEN10_PAT_INDEX(2), GEN8_PPAT_WT | GEN8_PPAT_LLCELLC);
+ I915_WRITE(GEN10_PAT_INDEX(3), GEN8_PPAT_UC);
+ I915_WRITE(GEN10_PAT_INDEX(4), GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(0));
+ I915_WRITE(GEN10_PAT_INDEX(5), GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(1));
+ I915_WRITE(GEN10_PAT_INDEX(6), GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(2));
+ I915_WRITE(GEN10_PAT_INDEX(7), GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(3));
}
-static void __free_ppat_entry(struct intel_ppat_entry *entry)
-{
- struct intel_ppat *ppat = entry->ppat;
- unsigned int index = entry - ppat->entries;
-
- GEM_BUG_ON(index >= ppat->max_entries);
- GEM_BUG_ON(!test_bit(index, ppat->used));
-
- entry->value = ppat->clear_value;
- clear_bit(index, ppat->used);
- set_bit(index, ppat->dirty);
-}
-
-/**
- * intel_ppat_get - get a usable PPAT entry
- * @i915: i915 device instance
- * @value: the PPAT value required by the caller
- *
- * The function tries to search if there is an existing PPAT entry which
- * matches with the required value. If perfectly matched, the existing PPAT
- * entry will be used. If only partially matched, it will try to check if
- * there is any available PPAT index. If yes, it will allocate a new PPAT
- * index for the required entry and update the HW. If not, the partially
- * matched entry will be used.
- */
-const struct intel_ppat_entry *
-intel_ppat_get(struct drm_i915_private *i915, u8 value)
-{
- struct intel_ppat *ppat = &i915->ppat;
- struct intel_ppat_entry *entry = NULL;
- unsigned int scanned, best_score;
- int i;
-
- GEM_BUG_ON(!ppat->max_entries);
-
- scanned = best_score = 0;
- for_each_set_bit(i, ppat->used, ppat->max_entries) {
- unsigned int score;
-
- score = ppat->match(ppat->entries[i].value, value);
- if (score > best_score) {
- entry = &ppat->entries[i];
- if (score == INTEL_PPAT_PERFECT_MATCH) {
- kref_get(&entry->ref);
- return entry;
- }
- best_score = score;
- }
- scanned++;
- }
-
- if (scanned == ppat->max_entries) {
- if (!entry)
- return ERR_PTR(-ENOSPC);
-
- kref_get(&entry->ref);
- return entry;
- }
-
- i = find_first_zero_bit(ppat->used, ppat->max_entries);
- entry = __alloc_ppat_entry(ppat, i, value);
- ppat->update_hw(i915);
- return entry;
-}
-
-static void release_ppat(struct kref *kref)
-{
- struct intel_ppat_entry *entry =
- container_of(kref, struct intel_ppat_entry, ref);
- struct drm_i915_private *i915 = entry->ppat->i915;
-
- __free_ppat_entry(entry);
- entry->ppat->update_hw(i915);
-}
-
-/**
- * intel_ppat_put - put back the PPAT entry got from intel_ppat_get()
- * @entry: an intel PPAT entry
- *
- * Put back the PPAT entry got from intel_ppat_get(). If the PPAT index of the
- * entry is dynamically allocated, its reference count will be decreased. Once
- * the reference count becomes into zero, the PPAT index becomes free again.
- */
-void intel_ppat_put(const struct intel_ppat_entry *entry)
-{
- struct intel_ppat *ppat = entry->ppat;
- unsigned int index = entry - ppat->entries;
-
- GEM_BUG_ON(!ppat->max_entries);
-
- kref_put(&ppat->entries[index].ref, release_ppat);
-}
-
-static void cnl_private_pat_update_hw(struct drm_i915_private *dev_priv)
-{
- struct intel_ppat *ppat = &dev_priv->ppat;
- int i;
-
- for_each_set_bit(i, ppat->dirty, ppat->max_entries) {
- I915_WRITE(GEN10_PAT_INDEX(i), ppat->entries[i].value);
- clear_bit(i, ppat->dirty);
- }
-}
-
-static void bdw_private_pat_update_hw(struct drm_i915_private *dev_priv)
+/* The GGTT and PPGTT need a private PPAT setup in order to handle cacheability
+ * bits. When using advanced contexts each context stores its own PAT, but
+ * writing this data shouldn't be harmful even in those cases. */
+static void bdw_setup_private_ppat(struct drm_i915_private *dev_priv)
{
- struct intel_ppat *ppat = &dev_priv->ppat;
- u64 pat = 0;
- int i;
-
- for (i = 0; i < ppat->max_entries; i++)
- pat |= GEN8_PPAT(i, ppat->entries[i].value);
+ u64 pat;
- bitmap_clear(ppat->dirty, 0, ppat->max_entries);
+ pat = GEN8_PPAT(0, GEN8_PPAT_WB | GEN8_PPAT_LLC) | /* for normal objects, no eLLC */
+ GEN8_PPAT(1, GEN8_PPAT_WC | GEN8_PPAT_LLCELLC) | /* for something pointing to ptes? */
+ GEN8_PPAT(2, GEN8_PPAT_WT | GEN8_PPAT_LLCELLC) | /* for scanout with eLLC */
+ GEN8_PPAT(3, GEN8_PPAT_UC) | /* Uncached objects, mostly for scanout */
+ GEN8_PPAT(4, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(0)) |
+ GEN8_PPAT(5, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(1)) |
+ GEN8_PPAT(6, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(2)) |
+ GEN8_PPAT(7, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(3));
I915_WRITE(GEN8_PRIVATE_PAT_LO, lower_32_bits(pat));
I915_WRITE(GEN8_PRIVATE_PAT_HI, upper_32_bits(pat));
}
-static unsigned int bdw_private_pat_match(u8 src, u8 dst)
+static void chv_setup_private_ppat(struct drm_i915_private *dev_priv)
{
- unsigned int score = 0;
- enum {
- AGE_MATCH = BIT(0),
- TC_MATCH = BIT(1),
- CA_MATCH = BIT(2),
- };
-
- /* Cache attribute has to be matched. */
- if (GEN8_PPAT_GET_CA(src) != GEN8_PPAT_GET_CA(dst))
- return 0;
-
- score |= CA_MATCH;
-
- if (GEN8_PPAT_GET_TC(src) == GEN8_PPAT_GET_TC(dst))
- score |= TC_MATCH;
-
- if (GEN8_PPAT_GET_AGE(src) == GEN8_PPAT_GET_AGE(dst))
- score |= AGE_MATCH;
-
- if (score == (AGE_MATCH | TC_MATCH | CA_MATCH))
- return INTEL_PPAT_PERFECT_MATCH;
-
- return score;
-}
-
-static unsigned int chv_private_pat_match(u8 src, u8 dst)
-{
- return (CHV_PPAT_GET_SNOOP(src) == CHV_PPAT_GET_SNOOP(dst)) ?
- INTEL_PPAT_PERFECT_MATCH : 0;
-}
-
-static void cnl_setup_private_ppat(struct intel_ppat *ppat)
-{
- ppat->max_entries = 8;
- ppat->update_hw = cnl_private_pat_update_hw;
- ppat->match = bdw_private_pat_match;
- ppat->clear_value = GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(3);
-
- __alloc_ppat_entry(ppat, 0, GEN8_PPAT_WB | GEN8_PPAT_LLC);
- __alloc_ppat_entry(ppat, 1, GEN8_PPAT_WC | GEN8_PPAT_LLCELLC);
- __alloc_ppat_entry(ppat, 2, GEN8_PPAT_WT | GEN8_PPAT_LLCELLC);
- __alloc_ppat_entry(ppat, 3, GEN8_PPAT_UC);
- __alloc_ppat_entry(ppat, 4, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(0));
- __alloc_ppat_entry(ppat, 5, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(1));
- __alloc_ppat_entry(ppat, 6, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(2));
- __alloc_ppat_entry(ppat, 7, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(3));
-}
-
-/* The GGTT and PPGTT need a private PPAT setup in order to handle cacheability
- * bits. When using advanced contexts each context stores its own PAT, but
- * writing this data shouldn't be harmful even in those cases. */
-static void bdw_setup_private_ppat(struct intel_ppat *ppat)
-{
- ppat->max_entries = 8;
- ppat->update_hw = bdw_private_pat_update_hw;
- ppat->match = bdw_private_pat_match;
- ppat->clear_value = GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(3);
-
- if (!HAS_PPGTT(ppat->i915)) {
- /* Spec: "For GGTT, there is NO pat_sel[2:0] from the entry,
- * so RTL will always use the value corresponding to
- * pat_sel = 000".
- * So let's disable cache for GGTT to avoid screen corruptions.
- * MOCS still can be used though.
- * - System agent ggtt writes (i.e. cpu gtt mmaps) already work
- * before this patch, i.e. the same uncached + snooping access
- * like on gen6/7 seems to be in effect.
- * - So this just fixes blitter/render access. Again it looks
- * like it's not just uncached access, but uncached + snooping.
- * So we can still hold onto all our assumptions wrt cpu
- * clflushing on LLC machines.
- */
- __alloc_ppat_entry(ppat, 0, GEN8_PPAT_UC);
- return;
- }
-
- __alloc_ppat_entry(ppat, 0, GEN8_PPAT_WB | GEN8_PPAT_LLC); /* for normal objects, no eLLC */
- __alloc_ppat_entry(ppat, 1, GEN8_PPAT_WC | GEN8_PPAT_LLCELLC); /* for something pointing to ptes? */
- __alloc_ppat_entry(ppat, 2, GEN8_PPAT_WT | GEN8_PPAT_LLCELLC); /* for scanout with eLLC */
- __alloc_ppat_entry(ppat, 3, GEN8_PPAT_UC); /* Uncached objects, mostly for scanout */
- __alloc_ppat_entry(ppat, 4, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(0));
- __alloc_ppat_entry(ppat, 5, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(1));
- __alloc_ppat_entry(ppat, 6, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(2));
- __alloc_ppat_entry(ppat, 7, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(3));
-}
-
-static void chv_setup_private_ppat(struct intel_ppat *ppat)
-{
- ppat->max_entries = 8;
- ppat->update_hw = bdw_private_pat_update_hw;
- ppat->match = chv_private_pat_match;
- ppat->clear_value = CHV_PPAT_SNOOP;
+ u64 pat;
/*
* Map WB on BDW to snooped on CHV.
@@ -3275,14 +2898,17 @@ static void chv_setup_private_ppat(struct intel_ppat *ppat)
* in order to keep the global status page working.
*/
- __alloc_ppat_entry(ppat, 0, CHV_PPAT_SNOOP);
- __alloc_ppat_entry(ppat, 1, 0);
- __alloc_ppat_entry(ppat, 2, 0);
- __alloc_ppat_entry(ppat, 3, 0);
- __alloc_ppat_entry(ppat, 4, CHV_PPAT_SNOOP);
- __alloc_ppat_entry(ppat, 5, CHV_PPAT_SNOOP);
- __alloc_ppat_entry(ppat, 6, CHV_PPAT_SNOOP);
- __alloc_ppat_entry(ppat, 7, CHV_PPAT_SNOOP);
+ pat = GEN8_PPAT(0, CHV_PPAT_SNOOP) |
+ GEN8_PPAT(1, 0) |
+ GEN8_PPAT(2, 0) |
+ GEN8_PPAT(3, 0) |
+ GEN8_PPAT(4, CHV_PPAT_SNOOP) |
+ GEN8_PPAT(5, CHV_PPAT_SNOOP) |
+ GEN8_PPAT(6, CHV_PPAT_SNOOP) |
+ GEN8_PPAT(7, CHV_PPAT_SNOOP);
+
+ I915_WRITE(GEN8_PRIVATE_PAT_LO, lower_32_bits(pat));
+ I915_WRITE(GEN8_PRIVATE_PAT_HI, upper_32_bits(pat));
}
static void gen6_gmch_remove(struct i915_address_space *vm)
@@ -3295,27 +2921,14 @@ static void gen6_gmch_remove(struct i915_address_space *vm)
static void setup_private_pat(struct drm_i915_private *dev_priv)
{
- struct intel_ppat *ppat = &dev_priv->ppat;
- int i;
-
- ppat->i915 = dev_priv;
+ GEM_BUG_ON(INTEL_GEN(dev_priv) < 8);
if (INTEL_GEN(dev_priv) >= 10)
- cnl_setup_private_ppat(ppat);
+ cnl_setup_private_ppat(dev_priv);
else if (IS_CHERRYVIEW(dev_priv) || IS_GEN9_LP(dev_priv))
- chv_setup_private_ppat(ppat);
+ chv_setup_private_ppat(dev_priv);
else
- bdw_setup_private_ppat(ppat);
-
- GEM_BUG_ON(ppat->max_entries > INTEL_MAX_PPAT_ENTRIES);
-
- for_each_clear_bit(i, ppat->used, ppat->max_entries) {
- ppat->entries[i].value = ppat->clear_value;
- ppat->entries[i].ppat = ppat;
- set_bit(i, ppat->dirty);
- }
-
- ppat->update_hw(dev_priv);
+ bdw_setup_private_ppat(dev_priv);
}
static int gen8_gmch_probe(struct i915_ggtt *ggtt)
@@ -3360,11 +2973,6 @@ static int gen8_gmch_probe(struct i915_ggtt *ggtt)
ggtt->vm.insert_page = bxt_vtd_ggtt_insert_page__BKL;
if (ggtt->vm.clear_range != nop_clear_range)
ggtt->vm.clear_range = bxt_vtd_ggtt_clear_range__BKL;
-
- /* Prevent recursively calling stop_machine() and deadlocks. */
- dev_info(dev_priv->drm.dev,
- "Disabling error capture for VT-d workaround\n");
- i915_disable_error_state(dev_priv, -ENODEV);
}
ggtt->invalidate = gen6_ggtt_invalidate;
@@ -3482,21 +3090,18 @@ static int i915_gmch_probe(struct i915_ggtt *ggtt)
return 0;
}
-/**
- * i915_ggtt_probe_hw - Probe GGTT hardware location
- * @dev_priv: i915 device
- */
-int i915_ggtt_probe_hw(struct drm_i915_private *dev_priv)
+static int ggtt_probe_hw(struct i915_ggtt *ggtt, struct intel_gt *gt)
{
- struct i915_ggtt *ggtt = &dev_priv->ggtt;
+ struct drm_i915_private *i915 = gt->i915;
int ret;
- ggtt->vm.i915 = dev_priv;
- ggtt->vm.dma = &dev_priv->drm.pdev->dev;
+ ggtt->vm.gt = gt;
+ ggtt->vm.i915 = i915;
+ ggtt->vm.dma = &i915->drm.pdev->dev;
- if (INTEL_GEN(dev_priv) <= 5)
+ if (INTEL_GEN(i915) <= 5)
ret = i915_gmch_probe(ggtt);
- else if (INTEL_GEN(dev_priv) < 8)
+ else if (INTEL_GEN(i915) < 8)
ret = gen6_gmch_probe(ggtt);
else
ret = gen8_gmch_probe(ggtt);
@@ -3524,51 +3129,82 @@ int i915_ggtt_probe_hw(struct drm_i915_private *dev_priv)
DRM_DEBUG_DRIVER("GMADR size = %lluM\n", (u64)ggtt->mappable_end >> 20);
DRM_DEBUG_DRIVER("DSM size = %lluM\n",
(u64)resource_size(&intel_graphics_stolen_res) >> 20);
- if (intel_vtd_active())
- DRM_INFO("VT-d active for gfx access\n");
return 0;
}
/**
- * i915_ggtt_init_hw - Initialize GGTT hardware
- * @dev_priv: i915 device
+ * i915_ggtt_probe_hw - Probe GGTT hardware location
+ * @i915: i915 device
*/
-int i915_ggtt_init_hw(struct drm_i915_private *dev_priv)
+int i915_ggtt_probe_hw(struct drm_i915_private *i915)
{
- struct i915_ggtt *ggtt = &dev_priv->ggtt;
int ret;
- stash_init(&dev_priv->mm.wc_stash);
+ ret = ggtt_probe_hw(&i915->ggtt, &i915->gt);
+ if (ret)
+ return ret;
+
+ if (intel_vtd_active())
+ DRM_INFO("VT-d active for gfx access\n");
+
+ return 0;
+}
+
+static int ggtt_init_hw(struct i915_ggtt *ggtt)
+{
+ struct drm_i915_private *i915 = ggtt->vm.i915;
+ int ret = 0;
+
+ mutex_lock(&i915->drm.struct_mutex);
- /* Note that we use page colouring to enforce a guard page at the
- * end of the address space. This is required as the CS may prefetch
- * beyond the end of the batch buffer, across the page boundary,
- * and beyond the end of the GTT if we do not provide a guard.
- */
- mutex_lock(&dev_priv->drm.struct_mutex);
i915_address_space_init(&ggtt->vm, VM_CLASS_GGTT);
ggtt->vm.is_ggtt = true;
/* Only VLV supports read-only GGTT mappings */
- ggtt->vm.has_read_only = IS_VALLEYVIEW(dev_priv);
+ ggtt->vm.has_read_only = IS_VALLEYVIEW(i915);
- if (!HAS_LLC(dev_priv) && !HAS_PPGTT(dev_priv))
+ if (!HAS_LLC(i915) && !HAS_PPGTT(i915))
ggtt->vm.mm.color_adjust = i915_gtt_color_adjust;
- mutex_unlock(&dev_priv->drm.struct_mutex);
- if (!io_mapping_init_wc(&dev_priv->ggtt.iomap,
- dev_priv->ggtt.gmadr.start,
- dev_priv->ggtt.mappable_end)) {
+ if (!io_mapping_init_wc(&ggtt->iomap,
+ ggtt->gmadr.start,
+ ggtt->mappable_end)) {
+ ggtt->vm.cleanup(&ggtt->vm);
ret = -EIO;
- goto out_gtt_cleanup;
+ goto out;
}
ggtt->mtrr = arch_phys_wc_add(ggtt->gmadr.start, ggtt->mappable_end);
i915_ggtt_init_fences(ggtt);
+out:
+ mutex_unlock(&i915->drm.struct_mutex);
+
+ return ret;
+}
+
+/**
+ * i915_ggtt_init_hw - Initialize GGTT hardware
+ * @dev_priv: i915 device
+ */
+int i915_ggtt_init_hw(struct drm_i915_private *dev_priv)
+{
+ int ret;
+
+ stash_init(&dev_priv->mm.wc_stash);
+
+ /* Note that we use page colouring to enforce a guard page at the
+ * end of the address space. This is required as the CS may prefetch
+ * beyond the end of the batch buffer, across the page boundary,
+ * and beyond the end of the GTT if we do not provide a guard.
+ */
+ ret = ggtt_init_hw(&dev_priv->ggtt);
+ if (ret)
+ return ret;
+
/*
* Initialise stolen early so that we may reserve preallocated
* objects for the BIOS to KMS transition.
@@ -3580,7 +3216,7 @@ int i915_ggtt_init_hw(struct drm_i915_private *dev_priv)
return 0;
out_gtt_cleanup:
- ggtt->vm.cleanup(&ggtt->vm);
+ dev_priv->ggtt.vm.cleanup(&dev_priv->ggtt.vm);
return ret;
}
@@ -3592,35 +3228,34 @@ int i915_ggtt_enable_hw(struct drm_i915_private *dev_priv)
return 0;
}
-void i915_ggtt_enable_guc(struct drm_i915_private *i915)
+void i915_ggtt_enable_guc(struct i915_ggtt *ggtt)
{
- GEM_BUG_ON(i915->ggtt.invalidate != gen6_ggtt_invalidate);
+ GEM_BUG_ON(ggtt->invalidate != gen6_ggtt_invalidate);
- i915->ggtt.invalidate = guc_ggtt_invalidate;
+ ggtt->invalidate = guc_ggtt_invalidate;
- i915_ggtt_invalidate(i915);
+ ggtt->invalidate(ggtt);
}
-void i915_ggtt_disable_guc(struct drm_i915_private *i915)
+void i915_ggtt_disable_guc(struct i915_ggtt *ggtt)
{
/* XXX Temporary pardon for error unload */
- if (i915->ggtt.invalidate == gen6_ggtt_invalidate)
+ if (ggtt->invalidate == gen6_ggtt_invalidate)
return;
/* We should only be called after i915_ggtt_enable_guc() */
- GEM_BUG_ON(i915->ggtt.invalidate != guc_ggtt_invalidate);
+ GEM_BUG_ON(ggtt->invalidate != guc_ggtt_invalidate);
- i915->ggtt.invalidate = gen6_ggtt_invalidate;
+ ggtt->invalidate = gen6_ggtt_invalidate;
- i915_ggtt_invalidate(i915);
+ ggtt->invalidate(ggtt);
}
-void i915_gem_restore_gtt_mappings(struct drm_i915_private *dev_priv)
+static void ggtt_restore_mappings(struct i915_ggtt *ggtt)
{
- struct i915_ggtt *ggtt = &dev_priv->ggtt;
struct i915_vma *vma, *vn;
- i915_check_and_clear_faults(dev_priv);
+ intel_gt_check_and_clear_faults(ggtt->vm.gt);
mutex_lock(&ggtt->vm.mutex);
@@ -3654,17 +3289,17 @@ lock:
}
ggtt->vm.closed = false;
- i915_ggtt_invalidate(dev_priv);
+ ggtt->invalidate(ggtt);
mutex_unlock(&ggtt->vm.mutex);
+}
- if (INTEL_GEN(dev_priv) >= 8) {
- struct intel_ppat *ppat = &dev_priv->ppat;
+void i915_gem_restore_gtt_mappings(struct drm_i915_private *i915)
+{
+ ggtt_restore_mappings(&i915->ggtt);
- bitmap_set(ppat->dirty, 0, ppat->max_entries);
- dev_priv->ppat.update_hw(dev_priv);
- return;
- }
+ if (INTEL_GEN(i915) >= 8)
+ setup_private_pat(i915);
}
static struct scatterlist *
@@ -3953,7 +3588,7 @@ int i915_gem_gtt_reserve(struct i915_address_space *vm,
GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE));
GEM_BUG_ON(!IS_ALIGNED(offset, I915_GTT_MIN_ALIGNMENT));
GEM_BUG_ON(range_overflows(offset, size, vm->total));
- GEM_BUG_ON(vm == &vm->i915->mm.aliasing_ppgtt->vm);
+ GEM_BUG_ON(vm == &vm->i915->ggtt.alias->vm);
GEM_BUG_ON(drm_mm_node_allocated(node));
node->size = size;
@@ -4050,7 +3685,7 @@ int i915_gem_gtt_insert(struct i915_address_space *vm,
GEM_BUG_ON(start >= end);
GEM_BUG_ON(start > 0 && !IS_ALIGNED(start, I915_GTT_PAGE_SIZE));
GEM_BUG_ON(end < U64_MAX && !IS_ALIGNED(end, I915_GTT_PAGE_SIZE));
- GEM_BUG_ON(vm == &vm->i915->mm.aliasing_ppgtt->vm);
+ GEM_BUG_ON(vm == &vm->i915->ggtt.alias->vm);
GEM_BUG_ON(drm_mm_node_allocated(node));
if (unlikely(range_overflows(start, size, end)))
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.h b/drivers/gpu/drm/i915/i915_gem_gtt.h
index 812717ccc69b..51274483502e 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.h
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.h
@@ -35,15 +35,19 @@
#define __I915_GEM_GTT_H__
#include <linux/io-mapping.h>
+#include <linux/kref.h>
#include <linux/mm.h>
#include <linux/pagevec.h>
+#include <linux/workqueue.h>
+
+#include <drm/drm_mm.h>
#include "gt/intel_reset.h"
#include "i915_gem_fence_reg.h"
#include "i915_request.h"
#include "i915_scatterlist.h"
#include "i915_selftest.h"
-#include "i915_timeline.h"
+#include "gt/intel_timeline.h"
#define I915_GTT_PAGE_SIZE_4K BIT_ULL(12)
#define I915_GTT_PAGE_SIZE_64K BIT_ULL(16)
@@ -64,12 +68,10 @@
struct drm_i915_file_private;
struct drm_i915_gem_object;
struct i915_vma;
+struct intel_gt;
typedef u32 gen6_pte_t;
typedef u64 gen8_pte_t;
-typedef u64 gen8_pde_t;
-typedef u64 gen8_ppgtt_pdpe_t;
-typedef u64 gen8_ppgtt_pml4e_t;
#define ggtt_total_entries(ggtt) ((ggtt)->vm.total >> PAGE_SHIFT)
@@ -113,30 +115,18 @@ typedef u64 gen8_ppgtt_pml4e_t;
#define HSW_GTT_ADDR_ENCODE(addr) ((addr) | (((addr) >> 28) & 0x7f0))
#define HSW_PTE_ADDR_ENCODE(addr) HSW_GTT_ADDR_ENCODE(addr)
-/* GEN8 32b style address is defined as a 3 level page table:
+/*
+ * GEN8 32b style address is defined as a 3 level page table:
* 31:30 | 29:21 | 20:12 | 11:0
* PDPE | PDE | PTE | offset
* The difference as compared to normal x86 3 level page table is the PDPEs are
* programmed via register.
- */
-#define GEN8_3LVL_PDPES 4
-#define GEN8_PDE_SHIFT 21
-#define GEN8_PDE_MASK 0x1ff
-#define GEN8_PTE_SHIFT 12
-#define GEN8_PTE_MASK 0x1ff
-#define GEN8_PTES I915_PTES(sizeof(gen8_pte_t))
-
-/* GEN8 48b style address is defined as a 4 level page table:
+ *
+ * GEN8 48b style address is defined as a 4 level page table:
* 47:39 | 38:30 | 29:21 | 20:12 | 11:0
* PML4E | PDPE | PDE | PTE | offset
*/
-#define GEN8_PML4ES_PER_PML4 512
-#define GEN8_PML4E_SHIFT 39
-#define GEN8_PML4E_MASK (GEN8_PML4ES_PER_PML4 - 1)
-#define GEN8_PDPE_SHIFT 30
-/* NB: GEN8_PDPE_MASK is untrue for 32b platforms, but it has no impact on 32b page
- * tables */
-#define GEN8_PDPE_MASK 0x1ff
+#define GEN8_3LVL_PDPES 4
#define PPAT_UNCACHED (_PAGE_PWT | _PAGE_PCD)
#define PPAT_CACHED_PDE 0 /* WB LLC */
@@ -155,11 +145,6 @@ typedef u64 gen8_ppgtt_pml4e_t;
#define GEN8_PPAT_ELLC_OVERRIDE (0<<2)
#define GEN8_PPAT(i, x) ((u64)(x) << ((i) * 8))
-#define GEN8_PPAT_GET_CA(x) ((x) & 3)
-#define GEN8_PPAT_GET_TC(x) ((x) & (3 << 2))
-#define GEN8_PPAT_GET_AGE(x) ((x) & (3 << 4))
-#define CHV_PPAT_GET_SNOOP(x) ((x) & (1 << 6))
-
#define GEN8_PDE_IPS_64K BIT(11)
#define GEN8_PDE_PS_2M BIT(7)
@@ -243,8 +228,10 @@ struct i915_page_dma {
};
};
-#define px_base(px) (&(px)->base)
-#define px_dma(px) (px_base(px)->daddr)
+struct i915_page_scratch {
+ struct i915_page_dma base;
+ u64 encode;
+};
struct i915_page_table {
struct i915_page_dma base;
@@ -252,12 +239,32 @@ struct i915_page_table {
};
struct i915_page_directory {
- struct i915_page_dma base;
- atomic_t used;
+ struct i915_page_table pt;
spinlock_t lock;
void *entry[512];
};
+#define __px_choose_expr(x, type, expr, other) \
+ __builtin_choose_expr( \
+ __builtin_types_compatible_p(typeof(x), type) || \
+ __builtin_types_compatible_p(typeof(x), const type), \
+ ({ type __x = (type)(x); expr; }), \
+ other)
+
+#define px_base(px) \
+ __px_choose_expr(px, struct i915_page_dma *, __x, \
+ __px_choose_expr(px, struct i915_page_scratch *, &__x->base, \
+ __px_choose_expr(px, struct i915_page_table *, &__x->base, \
+ __px_choose_expr(px, struct i915_page_directory *, &__x->pt.base, \
+ (void)0))))
+#define px_dma(px) (px_base(px)->daddr)
+
+#define px_pt(px) \
+ __px_choose_expr(px, struct i915_page_table *, __x, \
+ __px_choose_expr(px, struct i915_page_directory *, &__x->pt, \
+ (void)0))
+#define px_used(px) (&px_pt(px)->used)
+
struct i915_vma_ops {
/* Map an object into an address space with the given cache flags. */
int (*bind_vma)(struct i915_vma *vma,
@@ -280,8 +287,10 @@ struct pagestash {
struct i915_address_space {
struct kref ref;
+ struct rcu_work rcu;
struct drm_mm mm;
+ struct intel_gt *gt;
struct drm_i915_private *i915;
struct device *dma;
/* Every address space belongs to a struct file - except for the global
@@ -302,12 +311,9 @@ struct i915_address_space {
#define VM_CLASS_GGTT 0
#define VM_CLASS_PPGTT 1
- u64 scratch_pte;
- int scratch_order;
- struct i915_page_dma scratch_page;
- struct i915_page_table *scratch_pt;
- struct i915_page_directory *scratch_pd;
- struct i915_page_directory *scratch_pdp; /* GEN8+ & 48b PPGTT */
+ struct i915_page_scratch scratch[4];
+ unsigned int scratch_order;
+ unsigned int top;
/**
* List of vma currently bound.
@@ -386,7 +392,10 @@ struct i915_ggtt {
/** "Graphics Stolen Memory" holds the global PTEs */
void __iomem *gsm;
- void (*invalidate)(struct drm_i915_private *dev_priv);
+ void (*invalidate)(struct i915_ggtt *ggtt);
+
+ /** PPGTT used for aliasing the PPGTT with the GTT */
+ struct i915_ppgtt *alias;
bool do_idle_maps;
@@ -425,8 +434,6 @@ struct gen6_ppgtt {
unsigned int pin_count;
bool scan_for_unused_pt;
-
- struct gen6_ppgtt_cleanup_work *work;
};
#define __to_gen6_ppgtt(base) container_of(base, struct gen6_ppgtt, base)
@@ -506,15 +513,6 @@ static inline u32 gen6_pde_index(u32 addr)
return i915_pde_index(addr, GEN6_PDE_SHIFT);
}
-static inline unsigned int
-i915_pdpes_per_pdp(const struct i915_address_space *vm)
-{
- if (i915_vm_is_4lvl(vm))
- return GEN8_PML4ES_PER_PML4;
-
- return GEN8_3LVL_PDPES;
-}
-
static inline struct i915_page_table *
i915_pt_entry(const struct i915_page_directory * const pd,
const unsigned short n)
@@ -529,73 +527,12 @@ i915_pd_entry(const struct i915_page_directory * const pdp,
return pdp->entry[n];
}
-static inline struct i915_page_directory *
-i915_pdp_entry(const struct i915_page_directory * const pml4,
- const unsigned short n)
-{
- return pml4->entry[n];
-}
-
-/* Equivalent to the gen6 version, For each pde iterates over every pde
- * between from start until start + length. On gen8+ it simply iterates
- * over every page directory entry in a page directory.
- */
-#define gen8_for_each_pde(pt, pd, start, length, iter) \
- for (iter = gen8_pde_index(start); \
- length > 0 && iter < I915_PDES && \
- (pt = i915_pt_entry(pd, iter), true); \
- ({ u64 temp = ALIGN(start+1, 1 << GEN8_PDE_SHIFT); \
- temp = min(temp - start, length); \
- start += temp, length -= temp; }), ++iter)
-
-#define gen8_for_each_pdpe(pd, pdp, start, length, iter) \
- for (iter = gen8_pdpe_index(start); \
- length > 0 && iter < i915_pdpes_per_pdp(vm) && \
- (pd = i915_pd_entry(pdp, iter), true); \
- ({ u64 temp = ALIGN(start+1, 1 << GEN8_PDPE_SHIFT); \
- temp = min(temp - start, length); \
- start += temp, length -= temp; }), ++iter)
-
-#define gen8_for_each_pml4e(pdp, pml4, start, length, iter) \
- for (iter = gen8_pml4e_index(start); \
- length > 0 && iter < GEN8_PML4ES_PER_PML4 && \
- (pdp = i915_pdp_entry(pml4, iter), true); \
- ({ u64 temp = ALIGN(start+1, 1ULL << GEN8_PML4E_SHIFT); \
- temp = min(temp - start, length); \
- start += temp, length -= temp; }), ++iter)
-
-static inline u32 gen8_pte_index(u64 address)
-{
- return i915_pte_index(address, GEN8_PDE_SHIFT);
-}
-
-static inline u32 gen8_pde_index(u64 address)
-{
- return i915_pde_index(address, GEN8_PDE_SHIFT);
-}
-
-static inline u32 gen8_pdpe_index(u64 address)
-{
- return (address >> GEN8_PDPE_SHIFT) & GEN8_PDPE_MASK;
-}
-
-static inline u32 gen8_pml4e_index(u64 address)
-{
- return (address >> GEN8_PML4E_SHIFT) & GEN8_PML4E_MASK;
-}
-
-static inline u64 gen8_pte_count(u64 address, u64 length)
-{
- return i915_pte_count(address, length, GEN8_PDE_SHIFT);
-}
-
static inline dma_addr_t
i915_page_dir_dma_addr(const struct i915_ppgtt *ppgtt, const unsigned int n)
{
- struct i915_page_directory *pd;
+ struct i915_page_dma *pt = ppgtt->pd->entry[n];
- pd = i915_pdp_entry(ppgtt->pd, n);
- return px_dma(pd);
+ return px_dma(pt ?: px_base(&ppgtt->vm.scratch[ppgtt->vm.top]));
}
static inline struct i915_ggtt *
@@ -614,46 +551,15 @@ i915_vm_to_ppgtt(struct i915_address_space *vm)
return container_of(vm, struct i915_ppgtt, vm);
}
-#define INTEL_MAX_PPAT_ENTRIES 8
-#define INTEL_PPAT_PERFECT_MATCH (~0U)
-
-struct intel_ppat;
-
-struct intel_ppat_entry {
- struct intel_ppat *ppat;
- struct kref ref;
- u8 value;
-};
-
-struct intel_ppat {
- struct intel_ppat_entry entries[INTEL_MAX_PPAT_ENTRIES];
- DECLARE_BITMAP(used, INTEL_MAX_PPAT_ENTRIES);
- DECLARE_BITMAP(dirty, INTEL_MAX_PPAT_ENTRIES);
- unsigned int max_entries;
- u8 clear_value;
- /*
- * Return a score to show how two PPAT values match,
- * a INTEL_PPAT_PERFECT_MATCH indicates a perfect match
- */
- unsigned int (*match)(u8 src, u8 dst);
- void (*update_hw)(struct drm_i915_private *i915);
-
- struct drm_i915_private *i915;
-};
-
-const struct intel_ppat_entry *
-intel_ppat_get(struct drm_i915_private *i915, u8 value);
-void intel_ppat_put(const struct intel_ppat_entry *entry);
-
int i915_ggtt_probe_hw(struct drm_i915_private *dev_priv);
int i915_ggtt_init_hw(struct drm_i915_private *dev_priv);
int i915_ggtt_enable_hw(struct drm_i915_private *dev_priv);
-void i915_ggtt_enable_guc(struct drm_i915_private *i915);
-void i915_ggtt_disable_guc(struct drm_i915_private *i915);
-int i915_gem_init_ggtt(struct drm_i915_private *dev_priv);
-void i915_ggtt_cleanup_hw(struct drm_i915_private *dev_priv);
+void i915_ggtt_enable_guc(struct i915_ggtt *ggtt);
+void i915_ggtt_disable_guc(struct i915_ggtt *ggtt);
+int i915_init_ggtt(struct drm_i915_private *dev_priv);
+void i915_ggtt_driver_release(struct drm_i915_private *dev_priv);
-int i915_ppgtt_init_hw(struct drm_i915_private *dev_priv);
+int i915_ppgtt_init_hw(struct intel_gt *gt);
struct i915_ppgtt *i915_ppgtt_create(struct drm_i915_private *dev_priv);
diff --git a/drivers/gpu/drm/i915/i915_gem_render_state.h b/drivers/gpu/drm/i915/i915_gem_render_state.h
deleted file mode 100644
index 112cda8fa1a8..000000000000
--- a/drivers/gpu/drm/i915/i915_gem_render_state.h
+++ /dev/null
@@ -1,31 +0,0 @@
-/*
- * Copyright © 2014 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-#ifndef _I915_GEM_RENDER_STATE_H_
-#define _I915_GEM_RENDER_STATE_H_
-
-struct i915_request;
-
-int i915_gem_render_state_emit(struct i915_request *rq);
-
-#endif /* _I915_GEM_RENDER_STATE_H_ */
diff --git a/drivers/gpu/drm/i915/i915_globals.h b/drivers/gpu/drm/i915/i915_globals.h
index 04c1ce107fc0..2d199f411a4a 100644
--- a/drivers/gpu/drm/i915/i915_globals.h
+++ b/drivers/gpu/drm/i915/i915_globals.h
@@ -7,6 +7,8 @@
#ifndef _I915_GLOBALS_H_
#define _I915_GLOBALS_H_
+#include <linux/types.h>
+
typedef void (*i915_global_func_t)(void);
struct i915_global {
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index 8bc76fcff70d..0c0f255000c2 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -29,8 +29,8 @@
#include <linux/ascii85.h>
#include <linux/nmi.h>
+#include <linux/pagevec.h>
#include <linux/scatterlist.h>
-#include <linux/stop_machine.h>
#include <linux/utsname.h>
#include <linux/zlib.h>
@@ -46,6 +46,9 @@
#include "i915_scatterlist.h"
#include "intel_csr.h"
+#define ALLOW_FAIL (GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_NOWARN)
+#define ATOMIC_MAYFAIL (GFP_ATOMIC | __GFP_NOWARN)
+
static inline const struct intel_engine_cs *
engine_lookup(const struct drm_i915_private *i915, unsigned int id)
{
@@ -67,26 +70,6 @@ engine_name(const struct drm_i915_private *i915, unsigned int id)
return __engine_name(engine_lookup(i915, id));
}
-static const char *tiling_flag(int tiling)
-{
- switch (tiling) {
- default:
- case I915_TILING_NONE: return "";
- case I915_TILING_X: return " X";
- case I915_TILING_Y: return " Y";
- }
-}
-
-static const char *dirty_flag(int dirty)
-{
- return dirty ? " dirty" : "";
-}
-
-static const char *purgeable_flag(int purgeable)
-{
- return purgeable ? " purgeable" : "";
-}
-
static void __sg_set_buf(struct scatterlist *sg,
void *addr, unsigned int len, loff_t it)
{
@@ -114,7 +97,7 @@ static bool __i915_error_grow(struct drm_i915_error_state_buf *e, size_t len)
if (e->cur == e->end) {
struct scatterlist *sgl;
- sgl = (typeof(sgl))__get_free_page(GFP_KERNEL);
+ sgl = (typeof(sgl))__get_free_page(ALLOW_FAIL);
if (!sgl) {
e->err = -ENOMEM;
return false;
@@ -134,7 +117,7 @@ static bool __i915_error_grow(struct drm_i915_error_state_buf *e, size_t len)
}
e->size = ALIGN(len + 1, SZ_64K);
- e->buf = kmalloc(e->size, GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY);
+ e->buf = kmalloc(e->size, ALLOW_FAIL);
if (!e->buf) {
e->size = PAGE_ALIGN(len + 1);
e->buf = kmalloc(e->size, GFP_KERNEL);
@@ -211,47 +194,115 @@ i915_error_printer(struct drm_i915_error_state_buf *e)
return p;
}
+/* single threaded page allocator with a reserved stash for emergencies */
+static void pool_fini(struct pagevec *pv)
+{
+ pagevec_release(pv);
+}
+
+static int pool_refill(struct pagevec *pv, gfp_t gfp)
+{
+ while (pagevec_space(pv)) {
+ struct page *p;
+
+ p = alloc_page(gfp);
+ if (!p)
+ return -ENOMEM;
+
+ pagevec_add(pv, p);
+ }
+
+ return 0;
+}
+
+static int pool_init(struct pagevec *pv, gfp_t gfp)
+{
+ int err;
+
+ pagevec_init(pv);
+
+ err = pool_refill(pv, gfp);
+ if (err)
+ pool_fini(pv);
+
+ return err;
+}
+
+static void *pool_alloc(struct pagevec *pv, gfp_t gfp)
+{
+ struct page *p;
+
+ p = alloc_page(gfp);
+ if (!p && pagevec_count(pv))
+ p = pv->pages[--pv->nr];
+
+ return p ? page_address(p) : NULL;
+}
+
+static void pool_free(struct pagevec *pv, void *addr)
+{
+ struct page *p = virt_to_page(addr);
+
+ if (pagevec_space(pv))
+ pagevec_add(pv, p);
+ else
+ __free_page(p);
+}
+
#ifdef CONFIG_DRM_I915_COMPRESS_ERROR
struct compress {
+ struct pagevec pool;
struct z_stream_s zstream;
void *tmp;
};
static bool compress_init(struct compress *c)
{
- struct z_stream_s *zstream = memset(&c->zstream, 0, sizeof(c->zstream));
+ struct z_stream_s *zstream = &c->zstream;
- zstream->workspace =
- kmalloc(zlib_deflate_workspacesize(MAX_WBITS, MAX_MEM_LEVEL),
- GFP_ATOMIC | __GFP_NOWARN);
- if (!zstream->workspace)
+ if (pool_init(&c->pool, ALLOW_FAIL))
return false;
- if (zlib_deflateInit(zstream, Z_DEFAULT_COMPRESSION) != Z_OK) {
- kfree(zstream->workspace);
+ zstream->workspace =
+ kmalloc(zlib_deflate_workspacesize(MAX_WBITS, MAX_MEM_LEVEL),
+ ALLOW_FAIL);
+ if (!zstream->workspace) {
+ pool_fini(&c->pool);
return false;
}
c->tmp = NULL;
if (i915_has_memcpy_from_wc())
- c->tmp = (void *)__get_free_page(GFP_ATOMIC | __GFP_NOWARN);
+ c->tmp = pool_alloc(&c->pool, ALLOW_FAIL);
return true;
}
-static void *compress_next_page(struct drm_i915_error_object *dst)
+static bool compress_start(struct compress *c)
+{
+ struct z_stream_s *zstream = &c->zstream;
+ void *workspace = zstream->workspace;
+
+ memset(zstream, 0, sizeof(*zstream));
+ zstream->workspace = workspace;
+
+ return zlib_deflateInit(zstream, Z_DEFAULT_COMPRESSION) == Z_OK;
+}
+
+static void *compress_next_page(struct compress *c,
+ struct drm_i915_error_object *dst)
{
- unsigned long page;
+ void *page;
if (dst->page_count >= dst->num_pages)
return ERR_PTR(-ENOSPC);
- page = __get_free_page(GFP_ATOMIC | __GFP_NOWARN);
+ page = pool_alloc(&c->pool, ALLOW_FAIL);
if (!page)
return ERR_PTR(-ENOMEM);
- return dst->pages[dst->page_count++] = (void *)page;
+ return dst->pages[dst->page_count++] = page;
}
static int compress_page(struct compress *c,
@@ -267,7 +318,7 @@ static int compress_page(struct compress *c,
do {
if (zstream->avail_out == 0) {
- zstream->next_out = compress_next_page(dst);
+ zstream->next_out = compress_next_page(c, dst);
if (IS_ERR(zstream->next_out))
return PTR_ERR(zstream->next_out);
@@ -276,8 +327,6 @@ static int compress_page(struct compress *c,
if (zlib_deflate(zstream, Z_NO_FLUSH) != Z_OK)
return -EIO;
-
- touch_nmi_watchdog();
} while (zstream->avail_in);
/* Fallback to uncompressed if we increase size? */
@@ -295,7 +344,7 @@ static int compress_flush(struct compress *c,
do {
switch (zlib_deflate(zstream, Z_FINISH)) {
case Z_OK: /* more space requested */
- zstream->next_out = compress_next_page(dst);
+ zstream->next_out = compress_next_page(c, dst);
if (IS_ERR(zstream->next_out))
return PTR_ERR(zstream->next_out);
@@ -316,15 +365,17 @@ end:
return 0;
}
-static void compress_fini(struct compress *c,
- struct drm_i915_error_object *dst)
+static void compress_finish(struct compress *c)
{
- struct z_stream_s *zstream = &c->zstream;
+ zlib_deflateEnd(&c->zstream);
+}
- zlib_deflateEnd(zstream);
- kfree(zstream->workspace);
+static void compress_fini(struct compress *c)
+{
+ kfree(c->zstream.workspace);
if (c->tmp)
- free_page((unsigned long)c->tmp);
+ pool_free(&c->pool, c->tmp);
+ pool_fini(&c->pool);
}
static void err_compression_marker(struct drm_i915_error_state_buf *m)
@@ -335,10 +386,16 @@ static void err_compression_marker(struct drm_i915_error_state_buf *m)
#else
struct compress {
+ struct pagevec pool;
};
static bool compress_init(struct compress *c)
{
+ return pool_init(&c->pool, ALLOW_FAIL) == 0;
+}
+
+static bool compress_start(struct compress *c)
+{
return true;
}
@@ -346,14 +403,12 @@ static int compress_page(struct compress *c,
void *src,
struct drm_i915_error_object *dst)
{
- unsigned long page;
void *ptr;
- page = __get_free_page(GFP_ATOMIC | __GFP_NOWARN);
- if (!page)
+ ptr = pool_alloc(&c->pool, ALLOW_FAIL);
+ if (!ptr)
return -ENOMEM;
- ptr = (void *)page;
if (!i915_memcpy_from_wc(ptr, src, PAGE_SIZE))
memcpy(ptr, src, PAGE_SIZE);
dst->pages[dst->page_count++] = ptr;
@@ -367,11 +422,15 @@ static int compress_flush(struct compress *c,
return 0;
}
-static void compress_fini(struct compress *c,
- struct drm_i915_error_object *dst)
+static void compress_finish(struct compress *c)
{
}
+static void compress_fini(struct compress *c)
+{
+ pool_fini(&c->pool);
+}
+
static void err_compression_marker(struct drm_i915_error_state_buf *m)
{
err_puts(m, "~");
@@ -379,36 +438,6 @@ static void err_compression_marker(struct drm_i915_error_state_buf *m)
#endif
-static void print_error_buffers(struct drm_i915_error_state_buf *m,
- const char *name,
- struct drm_i915_error_buffer *err,
- int count)
-{
- err_printf(m, "%s [%d]:\n", name, count);
-
- while (count--) {
- err_printf(m, " %08x_%08x %8u %02x %02x",
- upper_32_bits(err->gtt_offset),
- lower_32_bits(err->gtt_offset),
- err->size,
- err->read_domains,
- err->write_domain);
- err_puts(m, tiling_flag(err->tiling));
- err_puts(m, dirty_flag(err->dirty));
- err_puts(m, purgeable_flag(err->purgeable));
- err_puts(m, err->userptr ? " userptr" : "");
- err_puts(m, i915_cache_level_str(m->i915, err->cache_level));
-
- if (err->name)
- err_printf(m, " (name: %d)", err->name);
- if (err->fence_reg != I915_FENCE_REG_NONE)
- err_printf(m, " (fence: %d)", err->fence_reg);
-
- err_puts(m, "\n");
- err++;
- }
-}
-
static void error_print_instdone(struct drm_i915_error_state_buf *m,
const struct drm_i915_error_engine *ee)
{
@@ -620,7 +649,7 @@ static void err_print_uc(struct drm_i915_error_state_buf *m,
const struct i915_gpu_state *error =
container_of(error_uc, typeof(*error), uc);
- if (!error->device_info.has_guc)
+ if (!error->device_info.has_gt_uc)
return;
intel_uc_fw_dump(&error_uc->guc_fw, &p);
@@ -734,33 +763,6 @@ static void __err_print_to_sgl(struct drm_i915_error_state_buf *m,
error_print_engine(m, &error->engine[i], error->epoch);
}
- for (i = 0; i < ARRAY_SIZE(error->active_vm); i++) {
- char buf[128];
- int len, first = 1;
-
- if (!error->active_vm[i])
- break;
-
- len = scnprintf(buf, sizeof(buf), "Active (");
- for (j = 0; j < ARRAY_SIZE(error->engine); j++) {
- if (error->engine[j].vm != error->active_vm[i])
- continue;
-
- len += scnprintf(buf + len, sizeof(buf), "%s%s",
- first ? "" : ", ",
- m->i915->engine[j]->name);
- first = 0;
- }
- scnprintf(buf + len, sizeof(buf), ")");
- print_error_buffers(m, buf,
- error->active_bo[i],
- error->active_bo_count[i]);
- }
-
- print_error_buffers(m, "Pinned (global)",
- error->pinned_bo,
- error->pinned_bo_count);
-
for (i = 0; i < ARRAY_SIZE(error->engine); i++) {
const struct drm_i915_error_engine *ee = &error->engine[i];
@@ -974,10 +976,6 @@ void __i915_gpu_state_free(struct kref *error_ref)
kfree(ee->requests);
}
- for (i = 0; i < ARRAY_SIZE(error->active_bo); i++)
- kfree(error->active_bo[i]);
- kfree(error->pinned_bo);
-
kfree(error->overlay);
kfree(error->display);
@@ -990,108 +988,63 @@ void __i915_gpu_state_free(struct kref *error_ref)
static struct drm_i915_error_object *
i915_error_object_create(struct drm_i915_private *i915,
- struct i915_vma *vma)
+ struct i915_vma *vma,
+ struct compress *compress)
{
struct i915_ggtt *ggtt = &i915->ggtt;
const u64 slot = ggtt->error_capture.start;
struct drm_i915_error_object *dst;
- struct compress compress;
unsigned long num_pages;
struct sgt_iter iter;
dma_addr_t dma;
int ret;
+ might_sleep();
+
if (!vma || !vma->pages)
return NULL;
num_pages = min_t(u64, vma->size, vma->obj->base.size) >> PAGE_SHIFT;
num_pages = DIV_ROUND_UP(10 * num_pages, 8); /* worstcase zlib growth */
- dst = kmalloc(sizeof(*dst) + num_pages * sizeof(u32 *),
- GFP_ATOMIC | __GFP_NOWARN);
+ dst = kmalloc(sizeof(*dst) + num_pages * sizeof(u32 *), ALLOW_FAIL);
if (!dst)
return NULL;
+ if (!compress_start(compress)) {
+ kfree(dst);
+ return NULL;
+ }
+
dst->gtt_offset = vma->node.start;
dst->gtt_size = vma->node.size;
dst->num_pages = num_pages;
dst->page_count = 0;
dst->unused = 0;
- if (!compress_init(&compress)) {
- kfree(dst);
- return NULL;
- }
-
ret = -EINVAL;
for_each_sgt_dma(dma, iter, vma->pages) {
void __iomem *s;
ggtt->vm.insert_page(&ggtt->vm, dma, slot, I915_CACHE_NONE, 0);
- s = io_mapping_map_atomic_wc(&ggtt->iomap, slot);
- ret = compress_page(&compress, (void __force *)s, dst);
- io_mapping_unmap_atomic(s);
+ s = io_mapping_map_wc(&ggtt->iomap, slot, PAGE_SIZE);
+ ret = compress_page(compress, (void __force *)s, dst);
+ io_mapping_unmap(s);
if (ret)
break;
}
- if (ret || compress_flush(&compress, dst)) {
+ if (ret || compress_flush(compress, dst)) {
while (dst->page_count--)
- free_page((unsigned long)dst->pages[dst->page_count]);
+ pool_free(&compress->pool, dst->pages[dst->page_count]);
kfree(dst);
dst = NULL;
}
+ compress_finish(compress);
- compress_fini(&compress, dst);
return dst;
}
-static void capture_bo(struct drm_i915_error_buffer *err,
- struct i915_vma *vma)
-{
- struct drm_i915_gem_object *obj = vma->obj;
-
- err->size = obj->base.size;
- err->name = obj->base.name;
-
- err->gtt_offset = vma->node.start;
- err->read_domains = obj->read_domains;
- err->write_domain = obj->write_domain;
- err->fence_reg = vma->fence ? vma->fence->id : -1;
- err->tiling = i915_gem_object_get_tiling(obj);
- err->dirty = obj->mm.dirty;
- err->purgeable = obj->mm.madv != I915_MADV_WILLNEED;
- err->userptr = obj->userptr.mm != NULL;
- err->cache_level = obj->cache_level;
-}
-
-static u32 capture_error_bo(struct drm_i915_error_buffer *err,
- int count, struct list_head *head,
- unsigned int flags)
-#define ACTIVE_ONLY BIT(0)
-#define PINNED_ONLY BIT(1)
-{
- struct i915_vma *vma;
- int i = 0;
-
- list_for_each_entry(vma, head, vm_link) {
- if (!vma->obj)
- continue;
-
- if (flags & ACTIVE_ONLY && !i915_vma_is_active(vma))
- continue;
-
- if (flags & PINNED_ONLY && !i915_vma_is_pinned(vma))
- continue;
-
- capture_bo(err++, vma);
- if (++i == count)
- break;
- }
-
- return i;
-}
-
/*
* Generate a semi-unique error code. The code is not meant to have meaning, The
* code's only purpose is to try to prevent false duplicated bug reports by
@@ -1249,10 +1202,10 @@ static void error_record_engine_registers(struct i915_gpu_state *error,
}
}
-static void record_request(struct i915_request *request,
+static void record_request(const struct i915_request *request,
struct drm_i915_error_request *erq)
{
- struct i915_gem_context *ctx = request->gem_context;
+ const struct i915_gem_context *ctx = request->gem_context;
erq->flags = request->fence.flags;
erq->context = request->fence.context;
@@ -1282,7 +1235,7 @@ static void engine_record_requests(struct intel_engine_cs *engine,
if (!count)
return;
- ee->requests = kcalloc(count, sizeof(*ee->requests), GFP_ATOMIC);
+ ee->requests = kcalloc(count, sizeof(*ee->requests), ATOMIC_MAYFAIL);
if (!ee->requests)
return;
@@ -1316,20 +1269,15 @@ static void engine_record_requests(struct intel_engine_cs *engine,
ee->num_requests = count;
}
-static void error_record_engine_execlists(struct intel_engine_cs *engine,
+static void error_record_engine_execlists(const struct intel_engine_cs *engine,
struct drm_i915_error_engine *ee)
{
const struct intel_engine_execlists * const execlists = &engine->execlists;
- unsigned int n;
-
- for (n = 0; n < execlists_num_ports(execlists); n++) {
- struct i915_request *rq = port_request(&execlists->port[n]);
+ struct i915_request * const *port = execlists->active;
+ unsigned int n = 0;
- if (!rq)
- break;
-
- record_request(rq, &ee->execlist[n]);
- }
+ while (*port)
+ record_request(*port++, &ee->execlist[n++]);
ee->num_ports = n;
}
@@ -1355,8 +1303,42 @@ static void record_context(struct drm_i915_error_context *e,
e->active = atomic_read(&ctx->active_count);
}
-static void request_record_user_bo(struct i915_request *request,
- struct drm_i915_error_engine *ee)
+struct capture_vma {
+ struct capture_vma *next;
+ void **slot;
+};
+
+static struct capture_vma *
+capture_vma(struct capture_vma *next,
+ struct i915_vma *vma,
+ struct drm_i915_error_object **out)
+{
+ struct capture_vma *c;
+
+ *out = NULL;
+ if (!vma)
+ return next;
+
+ c = kmalloc(sizeof(*c), ATOMIC_MAYFAIL);
+ if (!c)
+ return next;
+
+ if (!i915_active_trygrab(&vma->active)) {
+ kfree(c);
+ return next;
+ }
+
+ c->slot = (void **)out;
+ *c->slot = i915_vma_get(vma);
+
+ c->next = next;
+ return c;
+}
+
+static struct capture_vma *
+request_record_user_bo(struct i915_request *request,
+ struct drm_i915_error_engine *ee,
+ struct capture_vma *capture)
{
struct i915_capture_list *c;
struct drm_i915_error_object **bo;
@@ -1366,33 +1348,34 @@ static void request_record_user_bo(struct i915_request *request,
for (c = request->capture_list; c; c = c->next)
max++;
if (!max)
- return;
+ return capture;
- bo = kmalloc_array(max, sizeof(*bo), GFP_ATOMIC);
+ bo = kmalloc_array(max, sizeof(*bo), ATOMIC_MAYFAIL);
if (!bo) {
/* If we can't capture everything, try to capture something. */
max = min_t(long, max, PAGE_SIZE / sizeof(*bo));
- bo = kmalloc_array(max, sizeof(*bo), GFP_ATOMIC);
+ bo = kmalloc_array(max, sizeof(*bo), ATOMIC_MAYFAIL);
}
if (!bo)
- return;
+ return capture;
count = 0;
for (c = request->capture_list; c; c = c->next) {
- bo[count] = i915_error_object_create(request->i915, c->vma);
- if (!bo[count])
- break;
+ capture = capture_vma(capture, c->vma, &bo[count]);
if (++count == max)
break;
}
ee->user_bo = bo;
ee->user_bo_count = count;
+
+ return capture;
}
static struct drm_i915_error_object *
capture_object(struct drm_i915_private *dev_priv,
- struct drm_i915_gem_object *obj)
+ struct drm_i915_gem_object *obj,
+ struct compress *compress)
{
if (obj && i915_gem_object_has_pages(obj)) {
struct i915_vma fake = {
@@ -1402,21 +1385,22 @@ capture_object(struct drm_i915_private *dev_priv,
.obj = obj,
};
- return i915_error_object_create(dev_priv, &fake);
+ return i915_error_object_create(dev_priv, &fake, compress);
} else {
return NULL;
}
}
-static void gem_record_rings(struct i915_gpu_state *error)
+static void
+gem_record_rings(struct i915_gpu_state *error, struct compress *compress)
{
struct drm_i915_private *i915 = error->i915;
- struct i915_ggtt *ggtt = &i915->ggtt;
int i;
for (i = 0; i < I915_NUM_ENGINES; i++) {
struct intel_engine_cs *engine = i915->engine[i];
struct drm_i915_error_engine *ee = &error->engine[i];
+ struct capture_vma *capture = NULL;
struct i915_request *request;
unsigned long flags;
@@ -1427,6 +1411,9 @@ static void gem_record_rings(struct i915_gpu_state *error)
ee->engine_id = i;
+ /* Refill our page pool before entering atomic section */
+ pool_refill(&compress->pool, ALLOW_FAIL);
+
error_record_engine_registers(error, engine, ee);
error_record_engine_execlists(engine, ee);
@@ -1436,26 +1423,31 @@ static void gem_record_rings(struct i915_gpu_state *error)
struct i915_gem_context *ctx = request->gem_context;
struct intel_ring *ring = request->ring;
- ee->vm = ctx->vm ?: &ggtt->vm;
-
record_context(&ee->context, ctx);
- /* We need to copy these to an anonymous buffer
+ /*
+ * We need to copy these to an anonymous buffer
* as the simplest method to avoid being overwritten
* by userspace.
*/
- ee->batchbuffer =
- i915_error_object_create(i915, request->batch);
+ capture = capture_vma(capture,
+ request->batch,
+ &ee->batchbuffer);
if (HAS_BROKEN_CS_TLB(i915))
- ee->wa_batchbuffer =
- i915_error_object_create(i915,
- i915->gt.scratch);
- request_record_user_bo(request, ee);
+ capture = capture_vma(capture,
+ engine->gt->scratch,
+ &ee->wa_batchbuffer);
- ee->ctx =
- i915_error_object_create(i915,
- request->hw_context->state);
+ capture = request_record_user_bo(request, ee, capture);
+
+ capture = capture_vma(capture,
+ request->hw_context->state,
+ &ee->ctx);
+
+ capture = capture_vma(capture,
+ ring->vma,
+ &ee->ringbuffer);
error->simulated |=
i915_gem_context_no_error_capture(ctx);
@@ -1466,116 +1458,63 @@ static void gem_record_rings(struct i915_gpu_state *error)
ee->cpu_ring_head = ring->head;
ee->cpu_ring_tail = ring->tail;
- ee->ringbuffer =
- i915_error_object_create(i915, ring->vma);
engine_record_requests(engine, request, ee);
}
spin_unlock_irqrestore(&engine->active.lock, flags);
- ee->hws_page =
- i915_error_object_create(i915,
- engine->status_page.vma);
-
- ee->wa_ctx = i915_error_object_create(i915, engine->wa_ctx.vma);
+ while (capture) {
+ struct capture_vma *this = capture;
+ struct i915_vma *vma = *this->slot;
- ee->default_state = capture_object(i915, engine->default_state);
- }
-}
+ *this->slot =
+ i915_error_object_create(i915, vma, compress);
-static void gem_capture_vm(struct i915_gpu_state *error,
- struct i915_address_space *vm,
- int idx)
-{
- struct drm_i915_error_buffer *active_bo;
- struct i915_vma *vma;
- int count;
+ i915_active_ungrab(&vma->active);
+ i915_vma_put(vma);
- count = 0;
- list_for_each_entry(vma, &vm->bound_list, vm_link)
- if (i915_vma_is_active(vma))
- count++;
-
- active_bo = NULL;
- if (count)
- active_bo = kcalloc(count, sizeof(*active_bo), GFP_ATOMIC);
- if (active_bo)
- count = capture_error_bo(active_bo,
- count, &vm->bound_list,
- ACTIVE_ONLY);
- else
- count = 0;
-
- error->active_vm[idx] = vm;
- error->active_bo[idx] = active_bo;
- error->active_bo_count[idx] = count;
-}
-
-static void capture_active_buffers(struct i915_gpu_state *error)
-{
- int cnt = 0, i, j;
-
- BUILD_BUG_ON(ARRAY_SIZE(error->engine) > ARRAY_SIZE(error->active_bo));
- BUILD_BUG_ON(ARRAY_SIZE(error->active_bo) != ARRAY_SIZE(error->active_vm));
- BUILD_BUG_ON(ARRAY_SIZE(error->active_bo) != ARRAY_SIZE(error->active_bo_count));
+ capture = this->next;
+ kfree(this);
+ }
- /* Scan each engine looking for unique active contexts/vm */
- for (i = 0; i < ARRAY_SIZE(error->engine); i++) {
- struct drm_i915_error_engine *ee = &error->engine[i];
- bool found;
+ ee->hws_page =
+ i915_error_object_create(i915,
+ engine->status_page.vma,
+ compress);
- if (!ee->vm)
- continue;
+ ee->wa_ctx =
+ i915_error_object_create(i915,
+ engine->wa_ctx.vma,
+ compress);
- found = false;
- for (j = 0; j < i && !found; j++)
- found = error->engine[j].vm == ee->vm;
- if (!found)
- gem_capture_vm(error, ee->vm, cnt++);
+ ee->default_state =
+ capture_object(i915, engine->default_state, compress);
}
}
-static void capture_pinned_buffers(struct i915_gpu_state *error)
-{
- struct i915_address_space *vm = &error->i915->ggtt.vm;
- struct drm_i915_error_buffer *bo;
- struct i915_vma *vma;
- int count;
-
- count = 0;
- list_for_each_entry(vma, &vm->bound_list, vm_link)
- count++;
-
- bo = NULL;
- if (count)
- bo = kcalloc(count, sizeof(*bo), GFP_ATOMIC);
- if (!bo)
- return;
-
- error->pinned_bo_count =
- capture_error_bo(bo, count, &vm->bound_list, PINNED_ONLY);
- error->pinned_bo = bo;
-}
-
-static void capture_uc_state(struct i915_gpu_state *error)
+static void
+capture_uc_state(struct i915_gpu_state *error, struct compress *compress)
{
struct drm_i915_private *i915 = error->i915;
struct i915_error_uc *error_uc = &error->uc;
+ struct intel_uc *uc = &i915->gt.uc;
/* Capturing uC state won't be useful if there is no GuC */
- if (!error->device_info.has_guc)
+ if (!error->device_info.has_gt_uc)
return;
- error_uc->guc_fw = i915->guc.fw;
- error_uc->huc_fw = i915->huc.fw;
+ error_uc->guc_fw = uc->guc.fw;
+ error_uc->huc_fw = uc->huc.fw;
/* Non-default firmware paths will be specified by the modparam.
* As modparams are generally accesible from the userspace make
* explicit copies of the firmware paths.
*/
- error_uc->guc_fw.path = kstrdup(i915->guc.fw.path, GFP_ATOMIC);
- error_uc->huc_fw.path = kstrdup(i915->huc.fw.path, GFP_ATOMIC);
- error_uc->guc_log = i915_error_object_create(i915, i915->guc.log.vma);
+ error_uc->guc_fw.path = kstrdup(uc->guc.fw.path, ALLOW_FAIL);
+ error_uc->huc_fw.path = kstrdup(uc->huc.fw.path, ALLOW_FAIL);
+ error_uc->guc_log = i915_error_object_create(i915,
+ uc->guc.log.vma,
+ compress);
}
/* Capture all registers which don't fit into another category. */
@@ -1759,56 +1698,53 @@ static void capture_finish(struct i915_gpu_state *error)
ggtt->vm.clear_range(&ggtt->vm, slot, PAGE_SIZE);
}
-static int capture(void *data)
-{
- struct i915_gpu_state *error = data;
-
- error->time = ktime_get_real();
- error->boottime = ktime_get_boottime();
- error->uptime = ktime_sub(ktime_get(),
- error->i915->gt.last_init_time);
- error->capture = jiffies;
-
- capture_params(error);
- capture_gen_state(error);
- capture_uc_state(error);
- capture_reg_state(error);
- gem_record_fences(error);
- gem_record_rings(error);
- capture_active_buffers(error);
- capture_pinned_buffers(error);
-
- error->overlay = intel_overlay_capture_error_state(error->i915);
- error->display = intel_display_capture_error_state(error->i915);
-
- error->epoch = capture_find_epoch(error);
-
- capture_finish(error);
- return 0;
-}
-
#define DAY_AS_SECONDS(x) (24 * 60 * 60 * (x))
struct i915_gpu_state *
i915_capture_gpu_state(struct drm_i915_private *i915)
{
struct i915_gpu_state *error;
+ struct compress compress;
/* Check if GPU capture has been disabled */
error = READ_ONCE(i915->gpu_error.first_error);
if (IS_ERR(error))
return error;
- error = kzalloc(sizeof(*error), GFP_ATOMIC);
+ error = kzalloc(sizeof(*error), ALLOW_FAIL);
if (!error) {
i915_disable_error_state(i915, -ENOMEM);
return ERR_PTR(-ENOMEM);
}
+ if (!compress_init(&compress)) {
+ kfree(error);
+ i915_disable_error_state(i915, -ENOMEM);
+ return ERR_PTR(-ENOMEM);
+ }
+
kref_init(&error->ref);
error->i915 = i915;
- stop_machine(capture, error, NULL);
+ error->time = ktime_get_real();
+ error->boottime = ktime_get_boottime();
+ error->uptime = ktime_sub(ktime_get(), i915->gt.last_init_time);
+ error->capture = jiffies;
+
+ capture_params(error);
+ capture_gen_state(error);
+ capture_uc_state(error, &compress);
+ capture_reg_state(error);
+ gem_record_fences(error);
+ gem_record_rings(error, &compress);
+
+ error->overlay = intel_overlay_capture_error_state(i915);
+ error->display = intel_display_capture_error_state(i915);
+
+ error->epoch = capture_find_epoch(error);
+
+ capture_finish(error);
+ compress_fini(&compress);
return error;
}
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.h b/drivers/gpu/drm/i915/i915_gpu_error.h
index 2ecd0c6a1c94..a24c35107d16 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.h
+++ b/drivers/gpu/drm/i915/i915_gpu_error.h
@@ -7,6 +7,7 @@
#ifndef _I915_GPU_ERROR_H_
#define _I915_GPU_ERROR_H_
+#include <linux/atomic.h>
#include <linux/kref.h>
#include <linux/ktime.h>
#include <linux/sched.h>
@@ -14,9 +15,9 @@
#include <drm/drm_mm.h>
#include "gt/intel_engine.h"
+#include "gt/uc/intel_uc_fw.h"
#include "intel_device_info.h"
-#include "intel_uc_fw.h"
#include "i915_gem.h"
#include "i915_gem_gtt.h"
@@ -84,7 +85,6 @@ struct i915_gpu_state {
/* Software tracked state */
bool idle;
unsigned long hangcheck_timestamp;
- struct i915_address_space *vm;
int num_requests;
u32 reset_count;
@@ -160,32 +160,10 @@ struct i915_gpu_state {
} vm_info;
} engine[I915_NUM_ENGINES];
- struct drm_i915_error_buffer {
- u32 size;
- u32 name;
- u64 gtt_offset;
- u32 read_domains;
- u32 write_domain;
- s32 fence_reg:I915_MAX_NUM_FENCE_BITS;
- u32 tiling:2;
- u32 dirty:1;
- u32 purgeable:1;
- u32 userptr:1;
- u32 cache_level:3;
- } *active_bo[I915_NUM_ENGINES], *pinned_bo;
- u32 active_bo_count[I915_NUM_ENGINES], pinned_bo_count;
- struct i915_address_space *active_vm[I915_NUM_ENGINES];
-
struct scatterlist *sgl, *fit;
};
struct i915_gpu_error {
- /* For hangcheck timer */
-#define DRM_I915_HANGCHECK_PERIOD 1500 /* in ms */
-#define DRM_I915_HANGCHECK_JIFFIES msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD)
-
- struct delayed_work hangcheck_work;
-
/* For reset and error_state handling. */
spinlock_t lock;
/* Protected by the above dev->gpu_error.lock. */
@@ -193,52 +171,11 @@ struct i915_gpu_error {
atomic_t pending_fb_pin;
- /**
- * flags: Control various stages of the GPU reset
- *
- * #I915_RESET_BACKOFF - When we start a global reset, we need to
- * serialise with any other users attempting to do the same, and
- * any global resources that may be clobber by the reset (such as
- * FENCE registers).
- *
- * #I915_RESET_ENGINE[num_engines] - Since the driver doesn't need to
- * acquire the struct_mutex to reset an engine, we need an explicit
- * flag to prevent two concurrent reset attempts in the same engine.
- * As the number of engines continues to grow, allocate the flags from
- * the most significant bits.
- *
- * #I915_WEDGED - If reset fails and we can no longer use the GPU,
- * we set the #I915_WEDGED bit. Prior to command submission, e.g.
- * i915_request_alloc(), this bit is checked and the sequence
- * aborted (with -EIO reported to userspace) if set.
- */
- unsigned long flags;
-#define I915_RESET_BACKOFF 0
-#define I915_RESET_MODESET 1
-#define I915_RESET_ENGINE 2
-#define I915_WEDGED (BITS_PER_LONG - 1)
-
/** Number of times the device has been reset (global) */
- u32 reset_count;
+ atomic_t reset_count;
/** Number of times an engine has been reset */
- u32 reset_engine_count[I915_NUM_ENGINES];
-
- struct mutex wedge_mutex; /* serialises wedging/unwedging */
-
- /**
- * Waitqueue to signal when a hang is detected. Used to for waiters
- * to release the struct_mutex for the reset to procede.
- */
- wait_queue_head_t wait_queue;
-
- /**
- * Waitqueue to signal when the reset has completed. Used by clients
- * that wait for dev_priv->mm.wedged to settle.
- */
- wait_queue_head_t reset_queue;
-
- struct srcu_struct reset_backoff_srcu;
+ atomic_t reset_engine_count[I915_NUM_ENGINES];
};
struct drm_i915_error_state_buf {
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index b2e27b5b0df9..a17d4fd17962 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -42,6 +42,8 @@
#include "display/intel_lpe_audio.h"
#include "display/intel_psr.h"
+#include "gt/intel_gt.h"
+
#include "i915_drv.h"
#include "i915_irq.h"
#include "i915_trace.h"
@@ -264,7 +266,7 @@ static void gen2_irq_init(struct intel_uncore *uncore,
gen2_irq_init((uncore), imr_val, ier_val)
static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir);
-static void gen9_guc_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir);
+static void guc_irq_handler(struct intel_guc *guc, u16 guc_iir);
/* For display hotplug interrupt */
static inline void
@@ -305,17 +307,17 @@ void i915_hotplug_interrupt_update(struct drm_i915_private *dev_priv,
}
static u32
-gen11_gt_engine_identity(struct drm_i915_private * const i915,
+gen11_gt_engine_identity(struct intel_gt *gt,
const unsigned int bank, const unsigned int bit);
-static bool gen11_reset_one_iir(struct drm_i915_private * const i915,
+static bool gen11_reset_one_iir(struct intel_gt *gt,
const unsigned int bank,
const unsigned int bit)
{
- void __iomem * const regs = i915->uncore.regs;
+ void __iomem * const regs = gt->uncore->regs;
u32 dw;
- lockdep_assert_held(&i915->irq_lock);
+ lockdep_assert_held(&gt->i915->irq_lock);
dw = raw_reg_read(regs, GEN11_GT_INTR_DW(bank));
if (dw & BIT(bit)) {
@@ -323,7 +325,7 @@ static bool gen11_reset_one_iir(struct drm_i915_private * const i915,
* According to the BSpec, DW_IIR bits cannot be cleared without
* first servicing the Selector & Shared IIR registers.
*/
- gen11_gt_engine_identity(i915, bank, bit);
+ gen11_gt_engine_identity(gt, bank, bit);
/*
* We locked GT INT DW by reading it. If we want to (try
@@ -409,50 +411,54 @@ static i915_reg_t gen6_pm_iir(struct drm_i915_private *dev_priv)
return INTEL_GEN(dev_priv) >= 8 ? GEN8_GT_IIR(2) : GEN6_PMIIR;
}
-static void write_pm_imr(struct drm_i915_private *dev_priv)
+static void write_pm_imr(struct intel_gt *gt)
{
+ struct drm_i915_private *i915 = gt->i915;
+ struct intel_uncore *uncore = gt->uncore;
+ u32 mask = gt->pm_imr;
i915_reg_t reg;
- u32 mask = dev_priv->pm_imr;
- if (INTEL_GEN(dev_priv) >= 11) {
+ if (INTEL_GEN(i915) >= 11) {
reg = GEN11_GPM_WGBOXPERF_INTR_MASK;
/* pm is in upper half */
mask = mask << 16;
- } else if (INTEL_GEN(dev_priv) >= 8) {
+ } else if (INTEL_GEN(i915) >= 8) {
reg = GEN8_GT_IMR(2);
} else {
reg = GEN6_PMIMR;
}
- I915_WRITE(reg, mask);
- POSTING_READ(reg);
+ intel_uncore_write(uncore, reg, mask);
+ intel_uncore_posting_read(uncore, reg);
}
-static void write_pm_ier(struct drm_i915_private *dev_priv)
+static void write_pm_ier(struct intel_gt *gt)
{
+ struct drm_i915_private *i915 = gt->i915;
+ struct intel_uncore *uncore = gt->uncore;
+ u32 mask = gt->pm_ier;
i915_reg_t reg;
- u32 mask = dev_priv->pm_ier;
- if (INTEL_GEN(dev_priv) >= 11) {
+ if (INTEL_GEN(i915) >= 11) {
reg = GEN11_GPM_WGBOXPERF_INTR_ENABLE;
/* pm is in upper half */
mask = mask << 16;
- } else if (INTEL_GEN(dev_priv) >= 8) {
+ } else if (INTEL_GEN(i915) >= 8) {
reg = GEN8_GT_IER(2);
} else {
reg = GEN6_PMIER;
}
- I915_WRITE(reg, mask);
+ intel_uncore_write(uncore, reg, mask);
}
/**
* snb_update_pm_irq - update GEN6_PMIMR
- * @dev_priv: driver private
+ * @gt: gt for the interrupts
* @interrupt_mask: mask of interrupt bits to update
* @enabled_irq_mask: mask of interrupt bits to enable
*/
-static void snb_update_pm_irq(struct drm_i915_private *dev_priv,
+static void snb_update_pm_irq(struct intel_gt *gt,
u32 interrupt_mask,
u32 enabled_irq_mask)
{
@@ -460,37 +466,37 @@ static void snb_update_pm_irq(struct drm_i915_private *dev_priv,
WARN_ON(enabled_irq_mask & ~interrupt_mask);
- lockdep_assert_held(&dev_priv->irq_lock);
+ lockdep_assert_held(&gt->i915->irq_lock);
- new_val = dev_priv->pm_imr;
+ new_val = gt->pm_imr;
new_val &= ~interrupt_mask;
new_val |= (~enabled_irq_mask & interrupt_mask);
- if (new_val != dev_priv->pm_imr) {
- dev_priv->pm_imr = new_val;
- write_pm_imr(dev_priv);
+ if (new_val != gt->pm_imr) {
+ gt->pm_imr = new_val;
+ write_pm_imr(gt);
}
}
-void gen6_unmask_pm_irq(struct drm_i915_private *dev_priv, u32 mask)
+void gen6_unmask_pm_irq(struct intel_gt *gt, u32 mask)
{
- if (WARN_ON(!intel_irqs_enabled(dev_priv)))
+ if (WARN_ON(!intel_irqs_enabled(gt->i915)))
return;
- snb_update_pm_irq(dev_priv, mask, mask);
+ snb_update_pm_irq(gt, mask, mask);
}
-static void __gen6_mask_pm_irq(struct drm_i915_private *dev_priv, u32 mask)
+static void __gen6_mask_pm_irq(struct intel_gt *gt, u32 mask)
{
- snb_update_pm_irq(dev_priv, mask, 0);
+ snb_update_pm_irq(gt, mask, 0);
}
-void gen6_mask_pm_irq(struct drm_i915_private *dev_priv, u32 mask)
+void gen6_mask_pm_irq(struct intel_gt *gt, u32 mask)
{
- if (WARN_ON(!intel_irqs_enabled(dev_priv)))
+ if (WARN_ON(!intel_irqs_enabled(gt->i915)))
return;
- __gen6_mask_pm_irq(dev_priv, mask);
+ __gen6_mask_pm_irq(gt, mask);
}
static void gen6_reset_pm_iir(struct drm_i915_private *dev_priv, u32 reset_mask)
@@ -504,23 +510,23 @@ static void gen6_reset_pm_iir(struct drm_i915_private *dev_priv, u32 reset_mask)
POSTING_READ(reg);
}
-static void gen6_enable_pm_irq(struct drm_i915_private *dev_priv, u32 enable_mask)
+static void gen6_enable_pm_irq(struct intel_gt *gt, u32 enable_mask)
{
- lockdep_assert_held(&dev_priv->irq_lock);
+ lockdep_assert_held(&gt->i915->irq_lock);
- dev_priv->pm_ier |= enable_mask;
- write_pm_ier(dev_priv);
- gen6_unmask_pm_irq(dev_priv, enable_mask);
+ gt->pm_ier |= enable_mask;
+ write_pm_ier(gt);
+ gen6_unmask_pm_irq(gt, enable_mask);
/* unmask_pm_irq provides an implicit barrier (POSTING_READ) */
}
-static void gen6_disable_pm_irq(struct drm_i915_private *dev_priv, u32 disable_mask)
+static void gen6_disable_pm_irq(struct intel_gt *gt, u32 disable_mask)
{
- lockdep_assert_held(&dev_priv->irq_lock);
+ lockdep_assert_held(&gt->i915->irq_lock);
- dev_priv->pm_ier &= ~disable_mask;
- __gen6_mask_pm_irq(dev_priv, disable_mask);
- write_pm_ier(dev_priv);
+ gt->pm_ier &= ~disable_mask;
+ __gen6_mask_pm_irq(gt, disable_mask);
+ write_pm_ier(gt);
/* though a barrier is missing here, but don't really need a one */
}
@@ -528,7 +534,7 @@ void gen11_reset_rps_interrupts(struct drm_i915_private *dev_priv)
{
spin_lock_irq(&dev_priv->irq_lock);
- while (gen11_reset_one_iir(dev_priv, 0, GEN11_GTPM))
+ while (gen11_reset_one_iir(&dev_priv->gt, 0, GEN11_GTPM))
;
dev_priv->gt_pm.rps.pm_iir = 0;
@@ -546,6 +552,7 @@ void gen6_reset_rps_interrupts(struct drm_i915_private *dev_priv)
void gen6_enable_rps_interrupts(struct drm_i915_private *dev_priv)
{
+ struct intel_gt *gt = &dev_priv->gt;
struct intel_rps *rps = &dev_priv->gt_pm.rps;
if (READ_ONCE(rps->interrupts_enabled))
@@ -555,12 +562,12 @@ void gen6_enable_rps_interrupts(struct drm_i915_private *dev_priv)
WARN_ON_ONCE(rps->pm_iir);
if (INTEL_GEN(dev_priv) >= 11)
- WARN_ON_ONCE(gen11_reset_one_iir(dev_priv, 0, GEN11_GTPM));
+ WARN_ON_ONCE(gen11_reset_one_iir(gt, 0, GEN11_GTPM));
else
WARN_ON_ONCE(I915_READ(gen6_pm_iir(dev_priv)) & dev_priv->pm_rps_events);
rps->interrupts_enabled = true;
- gen6_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
+ gen6_enable_pm_irq(gt, dev_priv->pm_rps_events);
spin_unlock_irq(&dev_priv->irq_lock);
}
@@ -577,10 +584,10 @@ void gen6_disable_rps_interrupts(struct drm_i915_private *dev_priv)
I915_WRITE(GEN6_PMINTRMSK, gen6_sanitize_rps_pm_mask(dev_priv, ~0u));
- gen6_disable_pm_irq(dev_priv, GEN6_PM_RPS_EVENTS);
+ gen6_disable_pm_irq(&dev_priv->gt, GEN6_PM_RPS_EVENTS);
spin_unlock_irq(&dev_priv->irq_lock);
- synchronize_irq(dev_priv->drm.irq);
+ intel_synchronize_irq(dev_priv);
/* Now that we will not be generating any more work, flush any
* outstanding tasks. As we are called on the RPS idle path,
@@ -594,78 +601,94 @@ void gen6_disable_rps_interrupts(struct drm_i915_private *dev_priv)
gen6_reset_rps_interrupts(dev_priv);
}
-void gen9_reset_guc_interrupts(struct drm_i915_private *dev_priv)
+void gen9_reset_guc_interrupts(struct intel_guc *guc)
{
- assert_rpm_wakelock_held(&dev_priv->runtime_pm);
+ struct intel_gt *gt = guc_to_gt(guc);
+ struct drm_i915_private *i915 = gt->i915;
- spin_lock_irq(&dev_priv->irq_lock);
- gen6_reset_pm_iir(dev_priv, dev_priv->pm_guc_events);
- spin_unlock_irq(&dev_priv->irq_lock);
+ assert_rpm_wakelock_held(&i915->runtime_pm);
+
+ spin_lock_irq(&i915->irq_lock);
+ gen6_reset_pm_iir(i915, gt->pm_guc_events);
+ spin_unlock_irq(&i915->irq_lock);
}
-void gen9_enable_guc_interrupts(struct drm_i915_private *dev_priv)
+void gen9_enable_guc_interrupts(struct intel_guc *guc)
{
- assert_rpm_wakelock_held(&dev_priv->runtime_pm);
+ struct intel_gt *gt = guc_to_gt(guc);
+ struct drm_i915_private *i915 = gt->i915;
- spin_lock_irq(&dev_priv->irq_lock);
- if (!dev_priv->guc.interrupts.enabled) {
- WARN_ON_ONCE(I915_READ(gen6_pm_iir(dev_priv)) &
- dev_priv->pm_guc_events);
- dev_priv->guc.interrupts.enabled = true;
- gen6_enable_pm_irq(dev_priv, dev_priv->pm_guc_events);
+ assert_rpm_wakelock_held(&i915->runtime_pm);
+
+ spin_lock_irq(&i915->irq_lock);
+ if (!guc->interrupts.enabled) {
+ WARN_ON_ONCE(intel_uncore_read(gt->uncore, gen6_pm_iir(i915)) &
+ gt->pm_guc_events);
+ guc->interrupts.enabled = true;
+ gen6_enable_pm_irq(gt, gt->pm_guc_events);
}
- spin_unlock_irq(&dev_priv->irq_lock);
+ spin_unlock_irq(&i915->irq_lock);
}
-void gen9_disable_guc_interrupts(struct drm_i915_private *dev_priv)
+void gen9_disable_guc_interrupts(struct intel_guc *guc)
{
- assert_rpm_wakelock_held(&dev_priv->runtime_pm);
+ struct intel_gt *gt = guc_to_gt(guc);
+ struct drm_i915_private *i915 = gt->i915;
- spin_lock_irq(&dev_priv->irq_lock);
- dev_priv->guc.interrupts.enabled = false;
+ assert_rpm_wakelock_held(&i915->runtime_pm);
- gen6_disable_pm_irq(dev_priv, dev_priv->pm_guc_events);
+ spin_lock_irq(&i915->irq_lock);
+ guc->interrupts.enabled = false;
- spin_unlock_irq(&dev_priv->irq_lock);
- synchronize_irq(dev_priv->drm.irq);
+ gen6_disable_pm_irq(gt, gt->pm_guc_events);
- gen9_reset_guc_interrupts(dev_priv);
+ spin_unlock_irq(&i915->irq_lock);
+ intel_synchronize_irq(i915);
+
+ gen9_reset_guc_interrupts(guc);
}
-void gen11_reset_guc_interrupts(struct drm_i915_private *i915)
+void gen11_reset_guc_interrupts(struct intel_guc *guc)
{
+ struct intel_gt *gt = guc_to_gt(guc);
+ struct drm_i915_private *i915 = gt->i915;
+
spin_lock_irq(&i915->irq_lock);
- gen11_reset_one_iir(i915, 0, GEN11_GUC);
+ gen11_reset_one_iir(gt, 0, GEN11_GUC);
spin_unlock_irq(&i915->irq_lock);
}
-void gen11_enable_guc_interrupts(struct drm_i915_private *dev_priv)
+void gen11_enable_guc_interrupts(struct intel_guc *guc)
{
- spin_lock_irq(&dev_priv->irq_lock);
- if (!dev_priv->guc.interrupts.enabled) {
- u32 events = REG_FIELD_PREP(ENGINE1_MASK,
- GEN11_GUC_INTR_GUC2HOST);
-
- WARN_ON_ONCE(gen11_reset_one_iir(dev_priv, 0, GEN11_GUC));
- I915_WRITE(GEN11_GUC_SG_INTR_ENABLE, events);
- I915_WRITE(GEN11_GUC_SG_INTR_MASK, ~events);
- dev_priv->guc.interrupts.enabled = true;
+ struct intel_gt *gt = guc_to_gt(guc);
+
+ spin_lock_irq(&gt->i915->irq_lock);
+ if (!guc->interrupts.enabled) {
+ u32 events = REG_FIELD_PREP(ENGINE1_MASK, GUC_INTR_GUC2HOST);
+
+ WARN_ON_ONCE(gen11_reset_one_iir(gt, 0, GEN11_GUC));
+ intel_uncore_write(gt->uncore, GEN11_GUC_SG_INTR_ENABLE, events);
+ intel_uncore_write(gt->uncore, GEN11_GUC_SG_INTR_MASK, ~events);
+ guc->interrupts.enabled = true;
}
- spin_unlock_irq(&dev_priv->irq_lock);
+ spin_unlock_irq(&gt->i915->irq_lock);
}
-void gen11_disable_guc_interrupts(struct drm_i915_private *dev_priv)
+void gen11_disable_guc_interrupts(struct intel_guc *guc)
{
- spin_lock_irq(&dev_priv->irq_lock);
- dev_priv->guc.interrupts.enabled = false;
+ struct intel_gt *gt = guc_to_gt(guc);
+ struct drm_i915_private *i915 = gt->i915;
- I915_WRITE(GEN11_GUC_SG_INTR_MASK, ~0);
- I915_WRITE(GEN11_GUC_SG_INTR_ENABLE, 0);
+ spin_lock_irq(&i915->irq_lock);
+ guc->interrupts.enabled = false;
- spin_unlock_irq(&dev_priv->irq_lock);
- synchronize_irq(dev_priv->drm.irq);
+ intel_uncore_write(gt->uncore, GEN11_GUC_SG_INTR_MASK, ~0);
+ intel_uncore_write(gt->uncore, GEN11_GUC_SG_INTR_ENABLE, 0);
- gen11_reset_guc_interrupts(dev_priv);
+ spin_unlock_irq(&i915->irq_lock);
+ intel_synchronize_irq(i915);
+
+ gen11_reset_guc_interrupts(guc);
}
/**
@@ -924,11 +947,12 @@ static void i915_enable_asle_pipestat(struct drm_i915_private *dev_priv)
/* Called from drm generic code, passed a 'crtc', which
* we use as a pipe index
*/
-static u32 i915_get_vblank_counter(struct drm_device *dev, unsigned int pipe)
+u32 i915_get_vblank_counter(struct drm_crtc *crtc)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
+ struct drm_i915_private *dev_priv = to_i915(crtc->dev);
+ struct drm_vblank_crtc *vblank = &dev_priv->drm.vblank[drm_crtc_index(crtc)];
const struct drm_display_mode *mode = &vblank->hwmode;
+ enum pipe pipe = to_intel_crtc(crtc)->pipe;
i915_reg_t high_frame, low_frame;
u32 high1, high2, low, pixel, vbl_start, hsync_start, htotal;
unsigned long irqflags;
@@ -989,9 +1013,10 @@ static u32 i915_get_vblank_counter(struct drm_device *dev, unsigned int pipe)
return (((high1 << 8) | low) + (pixel >= vbl_start)) & 0xffffff;
}
-static u32 g4x_get_vblank_counter(struct drm_device *dev, unsigned int pipe)
+u32 g4x_get_vblank_counter(struct drm_crtc *crtc)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(crtc->dev);
+ enum pipe pipe = to_intel_crtc(crtc)->pipe;
return I915_READ(PIPE_FRMCOUNT_G4X(pipe));
}
@@ -1107,10 +1132,10 @@ static int __intel_get_crtc_scanline(struct intel_crtc *crtc)
return (position + crtc->scanline_offset) % vtotal;
}
-static bool i915_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe,
- bool in_vblank_irq, int *vpos, int *hpos,
- ktime_t *stime, ktime_t *etime,
- const struct drm_display_mode *mode)
+bool i915_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe,
+ bool in_vblank_irq, int *vpos, int *hpos,
+ ktime_t *stime, ktime_t *etime,
+ const struct drm_display_mode *mode)
{
struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_crtc *intel_crtc = intel_get_crtc_for_pipe(dev_priv,
@@ -1424,7 +1449,7 @@ out:
/* Make sure not to corrupt PMIMR state used by ringbuffer on GEN6 */
spin_lock_irq(&dev_priv->irq_lock);
if (rps->interrupts_enabled)
- gen6_unmask_pm_irq(dev_priv, dev_priv->pm_rps_events);
+ gen6_unmask_pm_irq(&dev_priv->gt, dev_priv->pm_rps_events);
spin_unlock_irq(&dev_priv->irq_lock);
}
@@ -1637,7 +1662,7 @@ static void gen8_gt_irq_handler(struct drm_i915_private *i915,
if (master_ctl & (GEN8_GT_PM_IRQ | GEN8_GT_GUC_IRQ)) {
gen6_rps_irq_handler(i915, gt_iir[2]);
- gen9_guc_irq_handler(i915, gt_iir[2]);
+ guc_irq_handler(&i915->gt.uc.guc, gt_iir[2] >> 16);
}
}
@@ -1891,8 +1916,9 @@ static void i9xx_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
/* The RPS events need forcewake, so we add them to a work queue and mask their
* IMR bits until the work is done. Other interrupts can be processed without
* the work queue. */
-static void gen11_rps_irq_handler(struct drm_i915_private *i915, u32 pm_iir)
+static void gen11_rps_irq_handler(struct intel_gt *gt, u32 pm_iir)
{
+ struct drm_i915_private *i915 = gt->i915;
struct intel_rps *rps = &i915->gt_pm.rps;
const u32 events = i915->pm_rps_events & pm_iir;
@@ -1901,7 +1927,7 @@ static void gen11_rps_irq_handler(struct drm_i915_private *i915, u32 pm_iir)
if (unlikely(!events))
return;
- gen6_mask_pm_irq(i915, events);
+ gen6_mask_pm_irq(gt, events);
if (!rps->interrupts_enabled)
return;
@@ -1916,7 +1942,8 @@ static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir)
if (pm_iir & dev_priv->pm_rps_events) {
spin_lock(&dev_priv->irq_lock);
- gen6_mask_pm_irq(dev_priv, pm_iir & dev_priv->pm_rps_events);
+ gen6_mask_pm_irq(&dev_priv->gt,
+ pm_iir & dev_priv->pm_rps_events);
if (rps->interrupts_enabled) {
rps->pm_iir |= pm_iir & dev_priv->pm_rps_events;
schedule_work(&rps->work);
@@ -1934,16 +1961,10 @@ static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir)
DRM_DEBUG("Command parser error, pm_iir 0x%08x\n", pm_iir);
}
-static void gen9_guc_irq_handler(struct drm_i915_private *dev_priv, u32 gt_iir)
+static void guc_irq_handler(struct intel_guc *guc, u16 iir)
{
- if (gt_iir & GEN9_GUC_TO_HOST_INT_EVENT)
- intel_guc_to_host_event_handler(&dev_priv->guc);
-}
-
-static void gen11_guc_irq_handler(struct drm_i915_private *i915, u16 iir)
-{
- if (iir & GEN11_GUC_INTR_GUC2HOST)
- intel_guc_to_host_event_handler(&i915->guc);
+ if (iir & GUC_INTR_GUC2HOST)
+ intel_guc_to_host_event_handler(guc);
}
static void i9xx_pipestat_irq_reset(struct drm_i915_private *dev_priv)
@@ -2185,8 +2206,7 @@ static void i9xx_hpd_irq_handler(struct drm_i915_private *dev_priv,
static irqreturn_t valleyview_irq_handler(int irq, void *arg)
{
- struct drm_device *dev = arg;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = arg;
irqreturn_t ret = IRQ_NONE;
if (!intel_irqs_enabled(dev_priv))
@@ -2271,8 +2291,7 @@ static irqreturn_t valleyview_irq_handler(int irq, void *arg)
static irqreturn_t cherryview_irq_handler(int irq, void *arg)
{
- struct drm_device *dev = arg;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = arg;
irqreturn_t ret = IRQ_NONE;
if (!intel_irqs_enabled(dev_priv))
@@ -2691,8 +2710,7 @@ static void ivb_display_irq_handler(struct drm_i915_private *dev_priv,
*/
static irqreturn_t ironlake_irq_handler(int irq, void *arg)
{
- struct drm_device *dev = arg;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = arg;
u32 de_iir, gt_iir, de_ier, sde_ier = 0;
irqreturn_t ret = IRQ_NONE;
@@ -2826,6 +2844,14 @@ static u32 gen8_de_port_aux_mask(struct drm_i915_private *dev_priv)
return mask;
}
+static u32 gen8_de_pipe_fault_mask(struct drm_i915_private *dev_priv)
+{
+ if (INTEL_GEN(dev_priv) >= 9)
+ return GEN9_DE_PIPE_IRQ_FAULT_ERRORS;
+ else
+ return GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
+}
+
static irqreturn_t
gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
{
@@ -2938,12 +2964,7 @@ gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
if (iir & GEN8_PIPE_FIFO_UNDERRUN)
intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
- fault_errors = iir;
- if (INTEL_GEN(dev_priv) >= 9)
- fault_errors &= GEN9_DE_PIPE_IRQ_FAULT_ERRORS;
- else
- fault_errors &= GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
-
+ fault_errors = iir & gen8_de_pipe_fault_mask(dev_priv);
if (fault_errors)
DRM_ERROR("Fault errors on pipe %c: 0x%08x\n",
pipe_name(pipe),
@@ -3002,7 +3023,7 @@ static inline void gen8_master_intr_enable(void __iomem * const regs)
static irqreturn_t gen8_irq_handler(int irq, void *arg)
{
- struct drm_i915_private *dev_priv = to_i915(arg);
+ struct drm_i915_private *dev_priv = arg;
void __iomem * const regs = dev_priv->uncore.regs;
u32 master_ctl;
u32 gt_iir[4];
@@ -3034,14 +3055,14 @@ static irqreturn_t gen8_irq_handler(int irq, void *arg)
}
static u32
-gen11_gt_engine_identity(struct drm_i915_private * const i915,
+gen11_gt_engine_identity(struct intel_gt *gt,
const unsigned int bank, const unsigned int bit)
{
- void __iomem * const regs = i915->uncore.regs;
+ void __iomem * const regs = gt->uncore->regs;
u32 timeout_ts;
u32 ident;
- lockdep_assert_held(&i915->irq_lock);
+ lockdep_assert_held(&gt->i915->irq_lock);
raw_reg_write(regs, GEN11_IIR_REG_SELECTOR(bank), BIT(bit));
@@ -3068,27 +3089,27 @@ gen11_gt_engine_identity(struct drm_i915_private * const i915,
}
static void
-gen11_other_irq_handler(struct drm_i915_private * const i915,
- const u8 instance, const u16 iir)
+gen11_other_irq_handler(struct intel_gt *gt, const u8 instance,
+ const u16 iir)
{
if (instance == OTHER_GUC_INSTANCE)
- return gen11_guc_irq_handler(i915, iir);
+ return guc_irq_handler(&gt->uc.guc, iir);
if (instance == OTHER_GTPM_INSTANCE)
- return gen11_rps_irq_handler(i915, iir);
+ return gen11_rps_irq_handler(gt, iir);
WARN_ONCE(1, "unhandled other interrupt instance=0x%x, iir=0x%x\n",
instance, iir);
}
static void
-gen11_engine_irq_handler(struct drm_i915_private * const i915,
- const u8 class, const u8 instance, const u16 iir)
+gen11_engine_irq_handler(struct intel_gt *gt, const u8 class,
+ const u8 instance, const u16 iir)
{
struct intel_engine_cs *engine;
if (instance <= MAX_ENGINE_INSTANCE)
- engine = i915->engine_class[class][instance];
+ engine = gt->i915->engine_class[class][instance];
else
engine = NULL;
@@ -3100,8 +3121,7 @@ gen11_engine_irq_handler(struct drm_i915_private * const i915,
}
static void
-gen11_gt_identity_handler(struct drm_i915_private * const i915,
- const u32 identity)
+gen11_gt_identity_handler(struct intel_gt *gt, const u32 identity)
{
const u8 class = GEN11_INTR_ENGINE_CLASS(identity);
const u8 instance = GEN11_INTR_ENGINE_INSTANCE(identity);
@@ -3111,31 +3131,30 @@ gen11_gt_identity_handler(struct drm_i915_private * const i915,
return;
if (class <= COPY_ENGINE_CLASS)
- return gen11_engine_irq_handler(i915, class, instance, intr);
+ return gen11_engine_irq_handler(gt, class, instance, intr);
if (class == OTHER_CLASS)
- return gen11_other_irq_handler(i915, instance, intr);
+ return gen11_other_irq_handler(gt, instance, intr);
WARN_ONCE(1, "unknown interrupt class=0x%x, instance=0x%x, intr=0x%x\n",
class, instance, intr);
}
static void
-gen11_gt_bank_handler(struct drm_i915_private * const i915,
- const unsigned int bank)
+gen11_gt_bank_handler(struct intel_gt *gt, const unsigned int bank)
{
- void __iomem * const regs = i915->uncore.regs;
+ void __iomem * const regs = gt->uncore->regs;
unsigned long intr_dw;
unsigned int bit;
- lockdep_assert_held(&i915->irq_lock);
+ lockdep_assert_held(&gt->i915->irq_lock);
intr_dw = raw_reg_read(regs, GEN11_GT_INTR_DW(bank));
for_each_set_bit(bit, &intr_dw, 32) {
- const u32 ident = gen11_gt_engine_identity(i915, bank, bit);
+ const u32 ident = gen11_gt_engine_identity(gt, bank, bit);
- gen11_gt_identity_handler(i915, ident);
+ gen11_gt_identity_handler(gt, ident);
}
/* Clear must be after shared has been served for engine */
@@ -3143,25 +3162,25 @@ gen11_gt_bank_handler(struct drm_i915_private * const i915,
}
static void
-gen11_gt_irq_handler(struct drm_i915_private * const i915,
- const u32 master_ctl)
+gen11_gt_irq_handler(struct intel_gt *gt, const u32 master_ctl)
{
+ struct drm_i915_private *i915 = gt->i915;
unsigned int bank;
spin_lock(&i915->irq_lock);
for (bank = 0; bank < 2; bank++) {
if (master_ctl & GEN11_GT_DW_IRQ(bank))
- gen11_gt_bank_handler(i915, bank);
+ gen11_gt_bank_handler(gt, bank);
}
spin_unlock(&i915->irq_lock);
}
static u32
-gen11_gu_misc_irq_ack(struct drm_i915_private *dev_priv, const u32 master_ctl)
+gen11_gu_misc_irq_ack(struct intel_gt *gt, const u32 master_ctl)
{
- void __iomem * const regs = dev_priv->uncore.regs;
+ void __iomem * const regs = gt->uncore->regs;
u32 iir;
if (!(master_ctl & GEN11_GU_MISC_IRQ))
@@ -3175,10 +3194,10 @@ gen11_gu_misc_irq_ack(struct drm_i915_private *dev_priv, const u32 master_ctl)
}
static void
-gen11_gu_misc_irq_handler(struct drm_i915_private *dev_priv, const u32 iir)
+gen11_gu_misc_irq_handler(struct intel_gt *gt, const u32 iir)
{
if (iir & GEN11_GU_MISC_GSE)
- intel_opregion_asle_intr(dev_priv);
+ intel_opregion_asle_intr(gt->i915);
}
static inline u32 gen11_master_intr_disable(void __iomem * const regs)
@@ -3201,8 +3220,9 @@ static inline void gen11_master_intr_enable(void __iomem * const regs)
static irqreturn_t gen11_irq_handler(int irq, void *arg)
{
- struct drm_i915_private * const i915 = to_i915(arg);
+ struct drm_i915_private * const i915 = arg;
void __iomem * const regs = i915->uncore.regs;
+ struct intel_gt *gt = &i915->gt;
u32 master_ctl;
u32 gu_misc_iir;
@@ -3216,7 +3236,7 @@ static irqreturn_t gen11_irq_handler(int irq, void *arg)
}
/* Find, clear, then process each source of interrupt. */
- gen11_gt_irq_handler(i915, master_ctl);
+ gen11_gt_irq_handler(gt, master_ctl);
/* IRQs are synced during runtime_suspend, we don't require a wakeref */
if (master_ctl & GEN11_DISPLAY_IRQ) {
@@ -3231,11 +3251,11 @@ static irqreturn_t gen11_irq_handler(int irq, void *arg)
enable_rpm_wakeref_asserts(&i915->runtime_pm);
}
- gu_misc_iir = gen11_gu_misc_irq_ack(i915, master_ctl);
+ gu_misc_iir = gen11_gu_misc_irq_ack(gt, master_ctl);
gen11_master_intr_enable(regs);
- gen11_gu_misc_irq_handler(i915, gu_misc_iir);
+ gen11_gu_misc_irq_handler(gt, gu_misc_iir);
return IRQ_HANDLED;
}
@@ -3243,9 +3263,10 @@ static irqreturn_t gen11_irq_handler(int irq, void *arg)
/* Called from drm generic code, passed 'crtc' which
* we use as a pipe index
*/
-static int i8xx_enable_vblank(struct drm_device *dev, unsigned int pipe)
+int i8xx_enable_vblank(struct drm_crtc *crtc)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(crtc->dev);
+ enum pipe pipe = to_intel_crtc(crtc)->pipe;
unsigned long irqflags;
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
@@ -3255,19 +3276,20 @@ static int i8xx_enable_vblank(struct drm_device *dev, unsigned int pipe)
return 0;
}
-static int i945gm_enable_vblank(struct drm_device *dev, unsigned int pipe)
+int i945gm_enable_vblank(struct drm_crtc *crtc)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(crtc->dev);
if (dev_priv->i945gm_vblank.enabled++ == 0)
schedule_work(&dev_priv->i945gm_vblank.work);
- return i8xx_enable_vblank(dev, pipe);
+ return i8xx_enable_vblank(crtc);
}
-static int i965_enable_vblank(struct drm_device *dev, unsigned int pipe)
+int i965_enable_vblank(struct drm_crtc *crtc)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(crtc->dev);
+ enum pipe pipe = to_intel_crtc(crtc)->pipe;
unsigned long irqflags;
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
@@ -3278,9 +3300,10 @@ static int i965_enable_vblank(struct drm_device *dev, unsigned int pipe)
return 0;
}
-static int ironlake_enable_vblank(struct drm_device *dev, unsigned int pipe)
+int ilk_enable_vblank(struct drm_crtc *crtc)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(crtc->dev);
+ enum pipe pipe = to_intel_crtc(crtc)->pipe;
unsigned long irqflags;
u32 bit = INTEL_GEN(dev_priv) >= 7 ?
DE_PIPE_VBLANK_IVB(pipe) : DE_PIPE_VBLANK(pipe);
@@ -3293,14 +3316,15 @@ static int ironlake_enable_vblank(struct drm_device *dev, unsigned int pipe)
* PSR is active as no frames are generated.
*/
if (HAS_PSR(dev_priv))
- drm_vblank_restore(dev, pipe);
+ drm_crtc_vblank_restore(crtc);
return 0;
}
-static int gen8_enable_vblank(struct drm_device *dev, unsigned int pipe)
+int bdw_enable_vblank(struct drm_crtc *crtc)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(crtc->dev);
+ enum pipe pipe = to_intel_crtc(crtc)->pipe;
unsigned long irqflags;
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
@@ -3311,7 +3335,7 @@ static int gen8_enable_vblank(struct drm_device *dev, unsigned int pipe)
* PSR is active as no frames are generated, so check only for PSR.
*/
if (HAS_PSR(dev_priv))
- drm_vblank_restore(dev, pipe);
+ drm_crtc_vblank_restore(crtc);
return 0;
}
@@ -3319,9 +3343,10 @@ static int gen8_enable_vblank(struct drm_device *dev, unsigned int pipe)
/* Called from drm generic code, passed 'crtc' which
* we use as a pipe index
*/
-static void i8xx_disable_vblank(struct drm_device *dev, unsigned int pipe)
+void i8xx_disable_vblank(struct drm_crtc *crtc)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(crtc->dev);
+ enum pipe pipe = to_intel_crtc(crtc)->pipe;
unsigned long irqflags;
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
@@ -3329,19 +3354,20 @@ static void i8xx_disable_vblank(struct drm_device *dev, unsigned int pipe)
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
}
-static void i945gm_disable_vblank(struct drm_device *dev, unsigned int pipe)
+void i945gm_disable_vblank(struct drm_crtc *crtc)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(crtc->dev);
- i8xx_disable_vblank(dev, pipe);
+ i8xx_disable_vblank(crtc);
if (--dev_priv->i945gm_vblank.enabled == 0)
schedule_work(&dev_priv->i945gm_vblank.work);
}
-static void i965_disable_vblank(struct drm_device *dev, unsigned int pipe)
+void i965_disable_vblank(struct drm_crtc *crtc)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(crtc->dev);
+ enum pipe pipe = to_intel_crtc(crtc)->pipe;
unsigned long irqflags;
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
@@ -3350,9 +3376,10 @@ static void i965_disable_vblank(struct drm_device *dev, unsigned int pipe)
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
}
-static void ironlake_disable_vblank(struct drm_device *dev, unsigned int pipe)
+void ilk_disable_vblank(struct drm_crtc *crtc)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(crtc->dev);
+ enum pipe pipe = to_intel_crtc(crtc)->pipe;
unsigned long irqflags;
u32 bit = INTEL_GEN(dev_priv) >= 7 ?
DE_PIPE_VBLANK_IVB(pipe) : DE_PIPE_VBLANK(pipe);
@@ -3362,9 +3389,10 @@ static void ironlake_disable_vblank(struct drm_device *dev, unsigned int pipe)
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
}
-static void gen8_disable_vblank(struct drm_device *dev, unsigned int pipe)
+void bdw_disable_vblank(struct drm_crtc *crtc)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(crtc->dev);
+ enum pipe pipe = to_intel_crtc(crtc)->pipe;
unsigned long irqflags;
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
@@ -3447,10 +3475,8 @@ static void ibx_irq_reset(struct drm_i915_private *dev_priv)
*
* This function needs to be called before interrupts are enabled.
*/
-static void ibx_irq_pre_postinstall(struct drm_device *dev)
+static void ibx_irq_pre_postinstall(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
-
if (HAS_PCH_NOP(dev_priv))
return;
@@ -3473,12 +3499,12 @@ static void vlv_display_irq_reset(struct drm_i915_private *dev_priv)
struct intel_uncore *uncore = &dev_priv->uncore;
if (IS_CHERRYVIEW(dev_priv))
- I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK_CHV);
+ intel_uncore_write(uncore, DPINVGTT, DPINVGTT_STATUS_MASK_CHV);
else
- I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK);
+ intel_uncore_write(uncore, DPINVGTT, DPINVGTT_STATUS_MASK);
i915_hotplug_interrupt_update_locked(dev_priv, 0xffffffff, 0);
- I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
+ intel_uncore_write(uncore, PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
i9xx_pipestat_irq_reset(dev_priv);
@@ -3519,18 +3545,17 @@ static void vlv_display_irq_postinstall(struct drm_i915_private *dev_priv)
/* drm_dma.h hooks
*/
-static void ironlake_irq_reset(struct drm_device *dev)
+static void ironlake_irq_reset(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_uncore *uncore = &dev_priv->uncore;
GEN3_IRQ_RESET(uncore, DE);
if (IS_GEN(dev_priv, 7))
- I915_WRITE(GEN7_ERR_INT, 0xffffffff);
+ intel_uncore_write(uncore, GEN7_ERR_INT, 0xffffffff);
if (IS_HASWELL(dev_priv)) {
- I915_WRITE(EDP_PSR_IMR, 0xffffffff);
- I915_WRITE(EDP_PSR_IIR, 0xffffffff);
+ intel_uncore_write(uncore, EDP_PSR_IMR, 0xffffffff);
+ intel_uncore_write(uncore, EDP_PSR_IIR, 0xffffffff);
}
gen5_gt_irq_reset(dev_priv);
@@ -3538,10 +3563,8 @@ static void ironlake_irq_reset(struct drm_device *dev)
ibx_irq_reset(dev_priv);
}
-static void valleyview_irq_reset(struct drm_device *dev)
+static void valleyview_irq_reset(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
-
I915_WRITE(VLV_MASTER_IER, 0);
POSTING_READ(VLV_MASTER_IER);
@@ -3563,9 +3586,8 @@ static void gen8_gt_irq_reset(struct drm_i915_private *dev_priv)
GEN8_IRQ_RESET_NDX(uncore, GT, 3);
}
-static void gen8_irq_reset(struct drm_device *dev)
+static void gen8_irq_reset(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_uncore *uncore = &dev_priv->uncore;
int pipe;
@@ -3573,8 +3595,8 @@ static void gen8_irq_reset(struct drm_device *dev)
gen8_gt_irq_reset(dev_priv);
- I915_WRITE(EDP_PSR_IMR, 0xffffffff);
- I915_WRITE(EDP_PSR_IIR, 0xffffffff);
+ intel_uncore_write(uncore, EDP_PSR_IMR, 0xffffffff);
+ intel_uncore_write(uncore, EDP_PSR_IIR, 0xffffffff);
for_each_pipe(dev_priv, pipe)
if (intel_display_power_is_enabled(dev_priv,
@@ -3589,39 +3611,40 @@ static void gen8_irq_reset(struct drm_device *dev)
ibx_irq_reset(dev_priv);
}
-static void gen11_gt_irq_reset(struct drm_i915_private *dev_priv)
+static void gen11_gt_irq_reset(struct intel_gt *gt)
{
+ struct intel_uncore *uncore = gt->uncore;
+
/* Disable RCS, BCS, VCS and VECS class engines. */
- I915_WRITE(GEN11_RENDER_COPY_INTR_ENABLE, 0);
- I915_WRITE(GEN11_VCS_VECS_INTR_ENABLE, 0);
+ intel_uncore_write(uncore, GEN11_RENDER_COPY_INTR_ENABLE, 0);
+ intel_uncore_write(uncore, GEN11_VCS_VECS_INTR_ENABLE, 0);
/* Restore masks irqs on RCS, BCS, VCS and VECS engines. */
- I915_WRITE(GEN11_RCS0_RSVD_INTR_MASK, ~0);
- I915_WRITE(GEN11_BCS_RSVD_INTR_MASK, ~0);
- I915_WRITE(GEN11_VCS0_VCS1_INTR_MASK, ~0);
- I915_WRITE(GEN11_VCS2_VCS3_INTR_MASK, ~0);
- I915_WRITE(GEN11_VECS0_VECS1_INTR_MASK, ~0);
+ intel_uncore_write(uncore, GEN11_RCS0_RSVD_INTR_MASK, ~0);
+ intel_uncore_write(uncore, GEN11_BCS_RSVD_INTR_MASK, ~0);
+ intel_uncore_write(uncore, GEN11_VCS0_VCS1_INTR_MASK, ~0);
+ intel_uncore_write(uncore, GEN11_VCS2_VCS3_INTR_MASK, ~0);
+ intel_uncore_write(uncore, GEN11_VECS0_VECS1_INTR_MASK, ~0);
- I915_WRITE(GEN11_GPM_WGBOXPERF_INTR_ENABLE, 0);
- I915_WRITE(GEN11_GPM_WGBOXPERF_INTR_MASK, ~0);
- I915_WRITE(GEN11_GUC_SG_INTR_ENABLE, 0);
- I915_WRITE(GEN11_GUC_SG_INTR_MASK, ~0);
+ intel_uncore_write(uncore, GEN11_GPM_WGBOXPERF_INTR_ENABLE, 0);
+ intel_uncore_write(uncore, GEN11_GPM_WGBOXPERF_INTR_MASK, ~0);
+ intel_uncore_write(uncore, GEN11_GUC_SG_INTR_ENABLE, 0);
+ intel_uncore_write(uncore, GEN11_GUC_SG_INTR_MASK, ~0);
}
-static void gen11_irq_reset(struct drm_device *dev)
+static void gen11_irq_reset(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_uncore *uncore = &dev_priv->uncore;
int pipe;
gen11_master_intr_disable(dev_priv->uncore.regs);
- gen11_gt_irq_reset(dev_priv);
+ gen11_gt_irq_reset(&dev_priv->gt);
- I915_WRITE(GEN11_DISPLAY_INT_CTL, 0);
+ intel_uncore_write(uncore, GEN11_DISPLAY_INT_CTL, 0);
- I915_WRITE(EDP_PSR_IMR, 0xffffffff);
- I915_WRITE(EDP_PSR_IIR, 0xffffffff);
+ intel_uncore_write(uncore, EDP_PSR_IMR, 0xffffffff);
+ intel_uncore_write(uncore, EDP_PSR_IIR, 0xffffffff);
for_each_pipe(dev_priv, pipe)
if (intel_display_power_is_enabled(dev_priv,
@@ -3680,12 +3703,11 @@ void gen8_irq_power_well_pre_disable(struct drm_i915_private *dev_priv,
spin_unlock_irq(&dev_priv->irq_lock);
/* make sure we're done processing display irqs */
- synchronize_irq(dev_priv->drm.irq);
+ intel_synchronize_irq(dev_priv);
}
-static void cherryview_irq_reset(struct drm_device *dev)
+static void cherryview_irq_reset(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_uncore *uncore = &dev_priv->uncore;
I915_WRITE(GEN8_MASTER_IRQ, 0);
@@ -3950,9 +3972,8 @@ static void bxt_hpd_irq_setup(struct drm_i915_private *dev_priv)
__bxt_hpd_detection_setup(dev_priv, enabled_irqs);
}
-static void ibx_irq_postinstall(struct drm_device *dev)
+static void ibx_irq_postinstall(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
u32 mask;
if (HAS_PCH_NOP(dev_priv))
@@ -3975,9 +3996,8 @@ static void ibx_irq_postinstall(struct drm_device *dev)
spt_hpd_detection_setup(dev_priv);
}
-static void gen5_gt_irq_postinstall(struct drm_device *dev)
+static void gen5_gt_irq_postinstall(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_uncore *uncore = &dev_priv->uncore;
u32 pm_irqs, gt_irqs;
@@ -4006,17 +4026,16 @@ static void gen5_gt_irq_postinstall(struct drm_device *dev)
*/
if (HAS_ENGINE(dev_priv, VECS0)) {
pm_irqs |= PM_VEBOX_USER_INTERRUPT;
- dev_priv->pm_ier |= PM_VEBOX_USER_INTERRUPT;
+ dev_priv->gt.pm_ier |= PM_VEBOX_USER_INTERRUPT;
}
- dev_priv->pm_imr = 0xffffffff;
- GEN3_IRQ_INIT(uncore, GEN6_PM, dev_priv->pm_imr, pm_irqs);
+ dev_priv->gt.pm_imr = 0xffffffff;
+ GEN3_IRQ_INIT(uncore, GEN6_PM, dev_priv->gt.pm_imr, pm_irqs);
}
}
-static int ironlake_irq_postinstall(struct drm_device *dev)
+static void ironlake_irq_postinstall(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_uncore *uncore = &dev_priv->uncore;
u32 display_mask, extra_mask;
@@ -4043,16 +4062,16 @@ static int ironlake_irq_postinstall(struct drm_device *dev)
dev_priv->irq_mask = ~display_mask;
- ibx_irq_pre_postinstall(dev);
+ ibx_irq_pre_postinstall(dev_priv);
GEN3_IRQ_INIT(uncore, DE, dev_priv->irq_mask,
display_mask | extra_mask);
- gen5_gt_irq_postinstall(dev);
+ gen5_gt_irq_postinstall(dev_priv);
ilk_hpd_detection_setup(dev_priv);
- ibx_irq_postinstall(dev);
+ ibx_irq_postinstall(dev_priv);
if (IS_IRONLAKE_M(dev_priv)) {
/* Enable PCU event interrupts
@@ -4064,8 +4083,6 @@ static int ironlake_irq_postinstall(struct drm_device *dev)
ilk_enable_display_irq(dev_priv, DE_PCU_EVENT);
spin_unlock_irq(&dev_priv->irq_lock);
}
-
- return 0;
}
void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv)
@@ -4097,11 +4114,9 @@ void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv)
}
-static int valleyview_irq_postinstall(struct drm_device *dev)
+static void valleyview_irq_postinstall(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
-
- gen5_gt_irq_postinstall(dev);
+ gen5_gt_irq_postinstall(dev_priv);
spin_lock_irq(&dev_priv->irq_lock);
if (dev_priv->display_irqs_enabled)
@@ -4110,13 +4125,12 @@ static int valleyview_irq_postinstall(struct drm_device *dev)
I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
POSTING_READ(VLV_MASTER_IER);
-
- return 0;
}
-static void gen8_gt_irq_postinstall(struct drm_i915_private *dev_priv)
+static void gen8_gt_irq_postinstall(struct drm_i915_private *i915)
{
- struct intel_uncore *uncore = &dev_priv->uncore;
+ struct intel_gt *gt = &i915->gt;
+ struct intel_uncore *uncore = gt->uncore;
/* These are interrupts we'll toggle with the ring mask register */
u32 gt_interrupts[] = {
@@ -4136,15 +4150,15 @@ static void gen8_gt_irq_postinstall(struct drm_i915_private *dev_priv)
GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VECS_IRQ_SHIFT)
};
- dev_priv->pm_ier = 0x0;
- dev_priv->pm_imr = ~dev_priv->pm_ier;
+ gt->pm_ier = 0x0;
+ gt->pm_imr = ~gt->pm_ier;
GEN8_IRQ_INIT_NDX(uncore, GT, 0, ~gt_interrupts[0], gt_interrupts[0]);
GEN8_IRQ_INIT_NDX(uncore, GT, 1, ~gt_interrupts[1], gt_interrupts[1]);
/*
* RPS interrupts will get enabled/disabled on demand when RPS itself
* is enabled/disabled. Same wil be the case for GuC interrupts.
*/
- GEN8_IRQ_INIT_NDX(uncore, GT, 2, dev_priv->pm_imr, dev_priv->pm_ier);
+ GEN8_IRQ_INIT_NDX(uncore, GT, 2, gt->pm_imr, gt->pm_ier);
GEN8_IRQ_INIT_NDX(uncore, GT, 3, ~gt_interrupts[3], gt_interrupts[3]);
}
@@ -4218,58 +4232,56 @@ static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
}
}
-static int gen8_irq_postinstall(struct drm_device *dev)
+static void gen8_irq_postinstall(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
-
if (HAS_PCH_SPLIT(dev_priv))
- ibx_irq_pre_postinstall(dev);
+ ibx_irq_pre_postinstall(dev_priv);
gen8_gt_irq_postinstall(dev_priv);
gen8_de_irq_postinstall(dev_priv);
if (HAS_PCH_SPLIT(dev_priv))
- ibx_irq_postinstall(dev);
+ ibx_irq_postinstall(dev_priv);
gen8_master_intr_enable(dev_priv->uncore.regs);
-
- return 0;
}
-static void gen11_gt_irq_postinstall(struct drm_i915_private *dev_priv)
+static void gen11_gt_irq_postinstall(struct intel_gt *gt)
{
const u32 irqs = GT_RENDER_USER_INTERRUPT | GT_CONTEXT_SWITCH_INTERRUPT;
+ struct intel_uncore *uncore = gt->uncore;
+ const u32 dmask = irqs << 16 | irqs;
+ const u32 smask = irqs << 16;
BUILD_BUG_ON(irqs & 0xffff0000);
/* Enable RCS, BCS, VCS and VECS class interrupts. */
- I915_WRITE(GEN11_RENDER_COPY_INTR_ENABLE, irqs << 16 | irqs);
- I915_WRITE(GEN11_VCS_VECS_INTR_ENABLE, irqs << 16 | irqs);
+ intel_uncore_write(uncore, GEN11_RENDER_COPY_INTR_ENABLE, dmask);
+ intel_uncore_write(uncore, GEN11_VCS_VECS_INTR_ENABLE, dmask);
/* Unmask irqs on RCS, BCS, VCS and VECS engines. */
- I915_WRITE(GEN11_RCS0_RSVD_INTR_MASK, ~(irqs << 16));
- I915_WRITE(GEN11_BCS_RSVD_INTR_MASK, ~(irqs << 16));
- I915_WRITE(GEN11_VCS0_VCS1_INTR_MASK, ~(irqs | irqs << 16));
- I915_WRITE(GEN11_VCS2_VCS3_INTR_MASK, ~(irqs | irqs << 16));
- I915_WRITE(GEN11_VECS0_VECS1_INTR_MASK, ~(irqs | irqs << 16));
+ intel_uncore_write(uncore, GEN11_RCS0_RSVD_INTR_MASK, ~smask);
+ intel_uncore_write(uncore, GEN11_BCS_RSVD_INTR_MASK, ~smask);
+ intel_uncore_write(uncore, GEN11_VCS0_VCS1_INTR_MASK, ~dmask);
+ intel_uncore_write(uncore, GEN11_VCS2_VCS3_INTR_MASK, ~dmask);
+ intel_uncore_write(uncore, GEN11_VECS0_VECS1_INTR_MASK, ~dmask);
/*
* RPS interrupts will get enabled/disabled on demand when RPS itself
* is enabled/disabled.
*/
- dev_priv->pm_ier = 0x0;
- dev_priv->pm_imr = ~dev_priv->pm_ier;
- I915_WRITE(GEN11_GPM_WGBOXPERF_INTR_ENABLE, 0);
- I915_WRITE(GEN11_GPM_WGBOXPERF_INTR_MASK, ~0);
+ gt->pm_ier = 0x0;
+ gt->pm_imr = ~gt->pm_ier;
+ intel_uncore_write(uncore, GEN11_GPM_WGBOXPERF_INTR_ENABLE, 0);
+ intel_uncore_write(uncore, GEN11_GPM_WGBOXPERF_INTR_MASK, ~0);
/* Same thing for GuC interrupts */
- I915_WRITE(GEN11_GUC_SG_INTR_ENABLE, 0);
- I915_WRITE(GEN11_GUC_SG_INTR_MASK, ~0);
+ intel_uncore_write(uncore, GEN11_GUC_SG_INTR_ENABLE, 0);
+ intel_uncore_write(uncore, GEN11_GUC_SG_INTR_MASK, ~0);
}
-static void icp_irq_postinstall(struct drm_device *dev)
+static void icp_irq_postinstall(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
u32 mask = SDE_GMBUS_ICP;
WARN_ON(I915_READ(SDEIER) != 0);
@@ -4282,32 +4294,27 @@ static void icp_irq_postinstall(struct drm_device *dev)
icp_hpd_detection_setup(dev_priv);
}
-static int gen11_irq_postinstall(struct drm_device *dev)
+static void gen11_irq_postinstall(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_uncore *uncore = &dev_priv->uncore;
u32 gu_misc_masked = GEN11_GU_MISC_GSE;
if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
- icp_irq_postinstall(dev);
+ icp_irq_postinstall(dev_priv);
- gen11_gt_irq_postinstall(dev_priv);
+ gen11_gt_irq_postinstall(&dev_priv->gt);
gen8_de_irq_postinstall(dev_priv);
GEN3_IRQ_INIT(uncore, GEN11_GU_MISC_, ~gu_misc_masked, gu_misc_masked);
I915_WRITE(GEN11_DISPLAY_INT_CTL, GEN11_DISPLAY_IRQ_ENABLE);
- gen11_master_intr_enable(dev_priv->uncore.regs);
+ gen11_master_intr_enable(uncore->regs);
POSTING_READ(GEN11_GFX_MSTR_IRQ);
-
- return 0;
}
-static int cherryview_irq_postinstall(struct drm_device *dev)
+static void cherryview_irq_postinstall(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
-
gen8_gt_irq_postinstall(dev_priv);
spin_lock_irq(&dev_priv->irq_lock);
@@ -4317,13 +4324,10 @@ static int cherryview_irq_postinstall(struct drm_device *dev)
I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
POSTING_READ(GEN8_MASTER_IRQ);
-
- return 0;
}
-static void i8xx_irq_reset(struct drm_device *dev)
+static void i8xx_irq_reset(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_uncore *uncore = &dev_priv->uncore;
i9xx_pipestat_irq_reset(dev_priv);
@@ -4331,9 +4335,8 @@ static void i8xx_irq_reset(struct drm_device *dev)
GEN2_IRQ_RESET(uncore);
}
-static int i8xx_irq_postinstall(struct drm_device *dev)
+static void i8xx_irq_postinstall(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_uncore *uncore = &dev_priv->uncore;
u16 enable_mask;
@@ -4362,8 +4365,6 @@ static int i8xx_irq_postinstall(struct drm_device *dev)
i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
spin_unlock_irq(&dev_priv->irq_lock);
-
- return 0;
}
static void i8xx_error_irq_ack(struct drm_i915_private *i915,
@@ -4444,8 +4445,7 @@ static void i9xx_error_irq_handler(struct drm_i915_private *dev_priv,
static irqreturn_t i8xx_irq_handler(int irq, void *arg)
{
- struct drm_device *dev = arg;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = arg;
irqreturn_t ret = IRQ_NONE;
if (!intel_irqs_enabled(dev_priv))
@@ -4488,9 +4488,8 @@ static irqreturn_t i8xx_irq_handler(int irq, void *arg)
return ret;
}
-static void i915_irq_reset(struct drm_device *dev)
+static void i915_irq_reset(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_uncore *uncore = &dev_priv->uncore;
if (I915_HAS_HOTPLUG(dev_priv)) {
@@ -4503,9 +4502,8 @@ static void i915_irq_reset(struct drm_device *dev)
GEN3_IRQ_RESET(uncore, GEN2_);
}
-static int i915_irq_postinstall(struct drm_device *dev)
+static void i915_irq_postinstall(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_uncore *uncore = &dev_priv->uncore;
u32 enable_mask;
@@ -4543,14 +4541,11 @@ static int i915_irq_postinstall(struct drm_device *dev)
spin_unlock_irq(&dev_priv->irq_lock);
i915_enable_asle_pipestat(dev_priv);
-
- return 0;
}
static irqreturn_t i915_irq_handler(int irq, void *arg)
{
- struct drm_device *dev = arg;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = arg;
irqreturn_t ret = IRQ_NONE;
if (!intel_irqs_enabled(dev_priv))
@@ -4601,9 +4596,8 @@ static irqreturn_t i915_irq_handler(int irq, void *arg)
return ret;
}
-static void i965_irq_reset(struct drm_device *dev)
+static void i965_irq_reset(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_uncore *uncore = &dev_priv->uncore;
i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
@@ -4614,9 +4608,8 @@ static void i965_irq_reset(struct drm_device *dev)
GEN3_IRQ_RESET(uncore, GEN2_);
}
-static int i965_irq_postinstall(struct drm_device *dev)
+static void i965_irq_postinstall(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_uncore *uncore = &dev_priv->uncore;
u32 enable_mask;
u32 error_mask;
@@ -4666,8 +4659,6 @@ static int i965_irq_postinstall(struct drm_device *dev)
spin_unlock_irq(&dev_priv->irq_lock);
i915_enable_asle_pipestat(dev_priv);
-
- return 0;
}
static void i915_hpd_irq_setup(struct drm_i915_private *dev_priv)
@@ -4697,8 +4688,7 @@ static void i915_hpd_irq_setup(struct drm_i915_private *dev_priv)
static irqreturn_t i965_irq_handler(int irq, void *arg)
{
- struct drm_device *dev = arg;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = arg;
irqreturn_t ret = IRQ_NONE;
if (!intel_irqs_enabled(dev_priv))
@@ -4775,8 +4765,9 @@ void intel_irq_init(struct drm_i915_private *dev_priv)
for (i = 0; i < MAX_L3_SLICES; ++i)
dev_priv->l3_parity.remap_info[i] = NULL;
- if (HAS_GUC_SCHED(dev_priv) && INTEL_GEN(dev_priv) < 11)
- dev_priv->pm_guc_events = GEN9_GUC_TO_HOST_INT_EVENT;
+ /* pre-gen11 the guc irqs bits are in the upper 16 bits of the pm reg */
+ if (HAS_GT_UC(dev_priv) && INTEL_GEN(dev_priv) < 11)
+ dev_priv->gt.pm_guc_events = GUC_INTR_GUC2HOST << 16;
/* Let's track the enabled rps events */
if (IS_VALLEYVIEW(dev_priv))
@@ -4805,11 +4796,6 @@ void intel_irq_init(struct drm_i915_private *dev_priv)
if (INTEL_GEN(dev_priv) >= 8)
rps->pm_intrmsk_mbz |= GEN8_PMINTR_DISABLE_REDIRECT_TO_GUC;
- if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
- dev->driver->get_vblank_counter = g4x_get_vblank_counter;
- else if (INTEL_GEN(dev_priv) >= 3)
- dev->driver->get_vblank_counter = i915_get_vblank_counter;
-
dev->vblank_disable_immediate = true;
/* Most platforms treat the display irq block as an always-on
@@ -4831,86 +4817,18 @@ void intel_irq_init(struct drm_i915_private *dev_priv)
*/
dev_priv->hotplug.hpd_short_storm_enabled = !HAS_DP_MST(dev_priv);
- dev->driver->get_vblank_timestamp = drm_calc_vbltimestamp_from_scanoutpos;
- dev->driver->get_scanout_position = i915_get_crtc_scanoutpos;
-
- if (IS_CHERRYVIEW(dev_priv)) {
- dev->driver->irq_handler = cherryview_irq_handler;
- dev->driver->irq_preinstall = cherryview_irq_reset;
- dev->driver->irq_postinstall = cherryview_irq_postinstall;
- dev->driver->irq_uninstall = cherryview_irq_reset;
- dev->driver->enable_vblank = i965_enable_vblank;
- dev->driver->disable_vblank = i965_disable_vblank;
- dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
- } else if (IS_VALLEYVIEW(dev_priv)) {
- dev->driver->irq_handler = valleyview_irq_handler;
- dev->driver->irq_preinstall = valleyview_irq_reset;
- dev->driver->irq_postinstall = valleyview_irq_postinstall;
- dev->driver->irq_uninstall = valleyview_irq_reset;
- dev->driver->enable_vblank = i965_enable_vblank;
- dev->driver->disable_vblank = i965_disable_vblank;
- dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
- } else if (INTEL_GEN(dev_priv) >= 11) {
- dev->driver->irq_handler = gen11_irq_handler;
- dev->driver->irq_preinstall = gen11_irq_reset;
- dev->driver->irq_postinstall = gen11_irq_postinstall;
- dev->driver->irq_uninstall = gen11_irq_reset;
- dev->driver->enable_vblank = gen8_enable_vblank;
- dev->driver->disable_vblank = gen8_disable_vblank;
- dev_priv->display.hpd_irq_setup = gen11_hpd_irq_setup;
- } else if (INTEL_GEN(dev_priv) >= 8) {
- dev->driver->irq_handler = gen8_irq_handler;
- dev->driver->irq_preinstall = gen8_irq_reset;
- dev->driver->irq_postinstall = gen8_irq_postinstall;
- dev->driver->irq_uninstall = gen8_irq_reset;
- dev->driver->enable_vblank = gen8_enable_vblank;
- dev->driver->disable_vblank = gen8_disable_vblank;
- if (IS_GEN9_LP(dev_priv))
+ if (HAS_GMCH(dev_priv)) {
+ if (I915_HAS_HOTPLUG(dev_priv))
+ dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
+ } else {
+ if (INTEL_GEN(dev_priv) >= 11)
+ dev_priv->display.hpd_irq_setup = gen11_hpd_irq_setup;
+ else if (IS_GEN9_LP(dev_priv))
dev_priv->display.hpd_irq_setup = bxt_hpd_irq_setup;
else if (INTEL_PCH_TYPE(dev_priv) >= PCH_SPT)
dev_priv->display.hpd_irq_setup = spt_hpd_irq_setup;
else
dev_priv->display.hpd_irq_setup = ilk_hpd_irq_setup;
- } else if (HAS_PCH_SPLIT(dev_priv)) {
- dev->driver->irq_handler = ironlake_irq_handler;
- dev->driver->irq_preinstall = ironlake_irq_reset;
- dev->driver->irq_postinstall = ironlake_irq_postinstall;
- dev->driver->irq_uninstall = ironlake_irq_reset;
- dev->driver->enable_vblank = ironlake_enable_vblank;
- dev->driver->disable_vblank = ironlake_disable_vblank;
- dev_priv->display.hpd_irq_setup = ilk_hpd_irq_setup;
- } else {
- if (IS_GEN(dev_priv, 2)) {
- dev->driver->irq_preinstall = i8xx_irq_reset;
- dev->driver->irq_postinstall = i8xx_irq_postinstall;
- dev->driver->irq_handler = i8xx_irq_handler;
- dev->driver->irq_uninstall = i8xx_irq_reset;
- dev->driver->enable_vblank = i8xx_enable_vblank;
- dev->driver->disable_vblank = i8xx_disable_vblank;
- } else if (IS_I945GM(dev_priv)) {
- dev->driver->irq_preinstall = i915_irq_reset;
- dev->driver->irq_postinstall = i915_irq_postinstall;
- dev->driver->irq_uninstall = i915_irq_reset;
- dev->driver->irq_handler = i915_irq_handler;
- dev->driver->enable_vblank = i945gm_enable_vblank;
- dev->driver->disable_vblank = i945gm_disable_vblank;
- } else if (IS_GEN(dev_priv, 3)) {
- dev->driver->irq_preinstall = i915_irq_reset;
- dev->driver->irq_postinstall = i915_irq_postinstall;
- dev->driver->irq_uninstall = i915_irq_reset;
- dev->driver->irq_handler = i915_irq_handler;
- dev->driver->enable_vblank = i8xx_enable_vblank;
- dev->driver->disable_vblank = i8xx_disable_vblank;
- } else {
- dev->driver->irq_preinstall = i965_irq_reset;
- dev->driver->irq_postinstall = i965_irq_postinstall;
- dev->driver->irq_uninstall = i965_irq_reset;
- dev->driver->irq_handler = i965_irq_handler;
- dev->driver->enable_vblank = i965_enable_vblank;
- dev->driver->disable_vblank = i965_disable_vblank;
- }
- if (I915_HAS_HOTPLUG(dev_priv))
- dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
}
}
@@ -4931,6 +4849,75 @@ void intel_irq_fini(struct drm_i915_private *i915)
kfree(i915->l3_parity.remap_info[i]);
}
+static irq_handler_t intel_irq_handler(struct drm_i915_private *dev_priv)
+{
+ if (HAS_GMCH(dev_priv)) {
+ if (IS_CHERRYVIEW(dev_priv))
+ return cherryview_irq_handler;
+ else if (IS_VALLEYVIEW(dev_priv))
+ return valleyview_irq_handler;
+ else if (IS_GEN(dev_priv, 4))
+ return i965_irq_handler;
+ else if (IS_GEN(dev_priv, 3))
+ return i915_irq_handler;
+ else
+ return i8xx_irq_handler;
+ } else {
+ if (INTEL_GEN(dev_priv) >= 11)
+ return gen11_irq_handler;
+ else if (INTEL_GEN(dev_priv) >= 8)
+ return gen8_irq_handler;
+ else
+ return ironlake_irq_handler;
+ }
+}
+
+static void intel_irq_reset(struct drm_i915_private *dev_priv)
+{
+ if (HAS_GMCH(dev_priv)) {
+ if (IS_CHERRYVIEW(dev_priv))
+ cherryview_irq_reset(dev_priv);
+ else if (IS_VALLEYVIEW(dev_priv))
+ valleyview_irq_reset(dev_priv);
+ else if (IS_GEN(dev_priv, 4))
+ i965_irq_reset(dev_priv);
+ else if (IS_GEN(dev_priv, 3))
+ i915_irq_reset(dev_priv);
+ else
+ i8xx_irq_reset(dev_priv);
+ } else {
+ if (INTEL_GEN(dev_priv) >= 11)
+ gen11_irq_reset(dev_priv);
+ else if (INTEL_GEN(dev_priv) >= 8)
+ gen8_irq_reset(dev_priv);
+ else
+ ironlake_irq_reset(dev_priv);
+ }
+}
+
+static void intel_irq_postinstall(struct drm_i915_private *dev_priv)
+{
+ if (HAS_GMCH(dev_priv)) {
+ if (IS_CHERRYVIEW(dev_priv))
+ cherryview_irq_postinstall(dev_priv);
+ else if (IS_VALLEYVIEW(dev_priv))
+ valleyview_irq_postinstall(dev_priv);
+ else if (IS_GEN(dev_priv, 4))
+ i965_irq_postinstall(dev_priv);
+ else if (IS_GEN(dev_priv, 3))
+ i915_irq_postinstall(dev_priv);
+ else
+ i8xx_irq_postinstall(dev_priv);
+ } else {
+ if (INTEL_GEN(dev_priv) >= 11)
+ gen11_irq_postinstall(dev_priv);
+ else if (INTEL_GEN(dev_priv) >= 8)
+ gen8_irq_postinstall(dev_priv);
+ else
+ ironlake_irq_postinstall(dev_priv);
+ }
+}
+
/**
* intel_irq_install - enables the hardware interrupt
* @dev_priv: i915 device instance
@@ -4944,6 +4931,9 @@ void intel_irq_fini(struct drm_i915_private *i915)
*/
int intel_irq_install(struct drm_i915_private *dev_priv)
{
+ int irq = dev_priv->drm.pdev->irq;
+ int ret;
+
/*
* We enable some interrupt sources in our postinstall hooks, so mark
* interrupts as enabled _before_ actually enabling them to avoid
@@ -4951,7 +4941,20 @@ int intel_irq_install(struct drm_i915_private *dev_priv)
*/
dev_priv->runtime_pm.irqs_enabled = true;
- return drm_irq_install(&dev_priv->drm, dev_priv->drm.pdev->irq);
+ dev_priv->drm.irq_enabled = true;
+
+ intel_irq_reset(dev_priv);
+
+ ret = request_irq(irq, intel_irq_handler(dev_priv),
+ IRQF_SHARED, DRIVER_NAME, dev_priv);
+ if (ret < 0) {
+ dev_priv->drm.irq_enabled = false;
+ return ret;
+ }
+
+ intel_irq_postinstall(dev_priv);
+
+ return ret;
}
/**
@@ -4963,7 +4966,23 @@ int intel_irq_install(struct drm_i915_private *dev_priv)
*/
void intel_irq_uninstall(struct drm_i915_private *dev_priv)
{
- drm_irq_uninstall(&dev_priv->drm);
+ int irq = dev_priv->drm.pdev->irq;
+
+ /*
+ * FIXME we can get called twice during driver load
+ * error handling due to intel_modeset_cleanup()
+ * calling us out of sequence. Would be nice if
+ * it didn't do that...
+ */
+ if (!dev_priv->drm.irq_enabled)
+ return;
+
+ dev_priv->drm.irq_enabled = false;
+
+ intel_irq_reset(dev_priv);
+
+ free_irq(irq, dev_priv);
+
intel_hpd_cancel_work(dev_priv);
dev_priv->runtime_pm.irqs_enabled = false;
}
@@ -4977,9 +4996,9 @@ void intel_irq_uninstall(struct drm_i915_private *dev_priv)
*/
void intel_runtime_pm_disable_interrupts(struct drm_i915_private *dev_priv)
{
- dev_priv->drm.driver->irq_uninstall(&dev_priv->drm);
+ intel_irq_reset(dev_priv);
dev_priv->runtime_pm.irqs_enabled = false;
- synchronize_irq(dev_priv->drm.irq);
+ intel_synchronize_irq(dev_priv);
}
/**
@@ -4992,6 +5011,6 @@ void intel_runtime_pm_disable_interrupts(struct drm_i915_private *dev_priv)
void intel_runtime_pm_enable_interrupts(struct drm_i915_private *dev_priv)
{
dev_priv->runtime_pm.irqs_enabled = true;
- dev_priv->drm.driver->irq_preinstall(&dev_priv->drm);
- dev_priv->drm.driver->irq_postinstall(&dev_priv->drm);
+ intel_irq_reset(dev_priv);
+ intel_irq_postinstall(dev_priv);
}
diff --git a/drivers/gpu/drm/i915/i915_irq.h b/drivers/gpu/drm/i915/i915_irq.h
index cb25dd213308..8918809cd805 100644
--- a/drivers/gpu/drm/i915/i915_irq.h
+++ b/drivers/gpu/drm/i915/i915_irq.h
@@ -12,9 +12,10 @@
struct drm_i915_private;
struct intel_crtc;
+struct intel_guc;
-extern void intel_irq_init(struct drm_i915_private *dev_priv);
-extern void intel_irq_fini(struct drm_i915_private *dev_priv);
+void intel_irq_init(struct drm_i915_private *dev_priv);
+void intel_irq_fini(struct drm_i915_private *dev_priv);
int intel_irq_install(struct drm_i915_private *dev_priv);
void intel_irq_uninstall(struct drm_i915_private *dev_priv);
@@ -77,8 +78,8 @@ ibx_disable_display_interrupt(struct drm_i915_private *dev_priv, u32 bits)
void gen5_enable_gt_irq(struct drm_i915_private *dev_priv, u32 mask);
void gen5_disable_gt_irq(struct drm_i915_private *dev_priv, u32 mask);
-void gen6_mask_pm_irq(struct drm_i915_private *dev_priv, u32 mask);
-void gen6_unmask_pm_irq(struct drm_i915_private *dev_priv, u32 mask);
+void gen6_mask_pm_irq(struct intel_gt *gt, u32 mask);
+void gen6_unmask_pm_irq(struct intel_gt *gt, u32 mask);
void gen11_reset_rps_interrupts(struct drm_i915_private *dev_priv);
void gen6_reset_rps_interrupts(struct drm_i915_private *dev_priv);
void gen6_enable_rps_interrupts(struct drm_i915_private *dev_priv);
@@ -102,16 +103,40 @@ static inline bool intel_irqs_enabled(struct drm_i915_private *dev_priv)
return dev_priv->runtime_pm.irqs_enabled;
}
+static inline void intel_synchronize_irq(struct drm_i915_private *i915)
+{
+ synchronize_irq(i915->drm.pdev->irq);
+}
+
int intel_get_crtc_scanline(struct intel_crtc *crtc);
void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv,
u8 pipe_mask);
void gen8_irq_power_well_pre_disable(struct drm_i915_private *dev_priv,
u8 pipe_mask);
-void gen9_reset_guc_interrupts(struct drm_i915_private *dev_priv);
-void gen9_enable_guc_interrupts(struct drm_i915_private *dev_priv);
-void gen9_disable_guc_interrupts(struct drm_i915_private *dev_priv);
-void gen11_reset_guc_interrupts(struct drm_i915_private *i915);
-void gen11_enable_guc_interrupts(struct drm_i915_private *i915);
-void gen11_disable_guc_interrupts(struct drm_i915_private *i915);
+void gen9_reset_guc_interrupts(struct intel_guc *guc);
+void gen9_enable_guc_interrupts(struct intel_guc *guc);
+void gen9_disable_guc_interrupts(struct intel_guc *guc);
+void gen11_reset_guc_interrupts(struct intel_guc *guc);
+void gen11_enable_guc_interrupts(struct intel_guc *guc);
+void gen11_disable_guc_interrupts(struct intel_guc *guc);
+
+bool i915_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe,
+ bool in_vblank_irq, int *vpos, int *hpos,
+ ktime_t *stime, ktime_t *etime,
+ const struct drm_display_mode *mode);
+
+u32 i915_get_vblank_counter(struct drm_crtc *crtc);
+u32 g4x_get_vblank_counter(struct drm_crtc *crtc);
+
+int i8xx_enable_vblank(struct drm_crtc *crtc);
+int i945gm_enable_vblank(struct drm_crtc *crtc);
+int i965_enable_vblank(struct drm_crtc *crtc);
+int ilk_enable_vblank(struct drm_crtc *crtc);
+int bdw_enable_vblank(struct drm_crtc *crtc);
+void i8xx_disable_vblank(struct drm_crtc *crtc);
+void i945gm_disable_vblank(struct drm_crtc *crtc);
+void i965_disable_vblank(struct drm_crtc *crtc);
+void ilk_disable_vblank(struct drm_crtc *crtc);
+void bdw_disable_vblank(struct drm_crtc *crtc);
#endif /* __I915_IRQ_H__ */
diff --git a/drivers/gpu/drm/i915/i915_params.c b/drivers/gpu/drm/i915/i915_params.c
index 5b07766a1c26..296452f9efe4 100644
--- a/drivers/gpu/drm/i915/i915_params.c
+++ b/drivers/gpu/drm/i915/i915_params.c
@@ -169,8 +169,9 @@ i915_param_named_unsafe(inject_load_failure, uint, 0400,
"Force an error after a number of failure check points (0:disabled (default), N:force failure at the Nth failure check point)");
#endif
-i915_param_named(enable_dpcd_backlight, bool, 0600,
- "Enable support for DPCD backlight control (default:false)");
+i915_param_named(enable_dpcd_backlight, int, 0600,
+ "Enable support for DPCD backlight control"
+ "(-1=use per-VBT LFP backlight type setting, 0=disabled [default], 1=enabled)");
#if IS_ENABLED(CONFIG_DRM_I915_GVT)
i915_param_named(enable_gvt, bool, 0400,
diff --git a/drivers/gpu/drm/i915/i915_params.h b/drivers/gpu/drm/i915/i915_params.h
index a4770ce46bd2..d29ade3b7de6 100644
--- a/drivers/gpu/drm/i915/i915_params.h
+++ b/drivers/gpu/drm/i915/i915_params.h
@@ -64,6 +64,7 @@ struct drm_printer;
param(int, reset, 2) \
param(unsigned int, inject_load_failure, 0) \
param(int, fastboot, -1) \
+ param(int, enable_dpcd_backlight, 0) \
param(char *, force_probe, CONFIG_DRM_I915_FORCE_PROBE) \
/* leave bools at the end to not create holes */ \
param(bool, alpha_support, IS_ENABLED(CONFIG_DRM_I915_ALPHA_SUPPORT)) \
@@ -76,7 +77,6 @@ struct drm_printer;
param(bool, verbose_state_checks, true) \
param(bool, nuclear_pageflip, false) \
param(bool, enable_dp_mst, true) \
- param(bool, enable_dpcd_backlight, false) \
param(bool, enable_gvt, false)
#define MEMBER(T, member, ...) T member;
diff --git a/drivers/gpu/drm/i915/i915_pci.c b/drivers/gpu/drm/i915/i915_pci.c
index 6c9f46fc3e12..bd9211b3d76e 100644
--- a/drivers/gpu/drm/i915/i915_pci.c
+++ b/drivers/gpu/drm/i915/i915_pci.c
@@ -595,7 +595,7 @@ static const struct intel_device_info intel_cherryview_info = {
GEN9_DEFAULT_PAGE_SIZES, \
.has_logical_ring_preemption = 1, \
.display.has_csr = 1, \
- .has_guc = 1, \
+ .has_gt_uc = 1, \
.display.has_ipc = 1, \
.ddb_size = 896
@@ -647,7 +647,7 @@ static const struct intel_device_info intel_skylake_gt4_info = {
.display.has_dp_mst = 1, \
.has_logical_ring_contexts = 1, \
.has_logical_ring_preemption = 1, \
- .has_guc = 1, \
+ .has_gt_uc = 1, \
.ppgtt_type = INTEL_PPGTT_FULL, \
.ppgtt_size = 48, \
.has_reset_engine = 1, \
@@ -761,10 +761,40 @@ static const struct intel_device_info intel_elkhartlake_info = {
GEN11_FEATURES,
PLATFORM(INTEL_ELKHARTLAKE),
.require_force_probe = 1,
- .engine_mask = BIT(RCS0) | BIT(BCS0) | BIT(VCS0),
+ .engine_mask = BIT(RCS0) | BIT(BCS0) | BIT(VCS0) | BIT(VECS0),
.ppgtt_size = 36,
};
+#define GEN12_FEATURES \
+ GEN11_FEATURES, \
+ GEN(12), \
+ .pipe_offsets = { \
+ [TRANSCODER_A] = PIPE_A_OFFSET, \
+ [TRANSCODER_B] = PIPE_B_OFFSET, \
+ [TRANSCODER_C] = PIPE_C_OFFSET, \
+ [TRANSCODER_D] = PIPE_D_OFFSET, \
+ [TRANSCODER_DSI_0] = PIPE_DSI0_OFFSET, \
+ [TRANSCODER_DSI_1] = PIPE_DSI1_OFFSET, \
+ }, \
+ .trans_offsets = { \
+ [TRANSCODER_A] = TRANSCODER_A_OFFSET, \
+ [TRANSCODER_B] = TRANSCODER_B_OFFSET, \
+ [TRANSCODER_C] = TRANSCODER_C_OFFSET, \
+ [TRANSCODER_D] = TRANSCODER_D_OFFSET, \
+ [TRANSCODER_DSI_0] = TRANSCODER_DSI0_OFFSET, \
+ [TRANSCODER_DSI_1] = TRANSCODER_DSI1_OFFSET, \
+ }
+
+static const struct intel_device_info intel_tigerlake_12_info = {
+ GEN12_FEATURES,
+ PLATFORM(INTEL_TIGERLAKE),
+ .num_pipes = 4,
+ .require_force_probe = 1,
+ .display.has_modular_fia = 1,
+ .engine_mask =
+ BIT(RCS0) | BIT(BCS0) | BIT(VECS0) | BIT(VCS0) | BIT(VCS2),
+};
+
#undef GEN
#undef PLATFORM
@@ -836,6 +866,7 @@ static const struct pci_device_id pciidlist[] = {
INTEL_CNL_IDS(&intel_cannonlake_info),
INTEL_ICL_11_IDS(&intel_icelake_11_info),
INTEL_EHL_IDS(&intel_elkhartlake_info),
+ INTEL_TGL_12_IDS(&intel_tigerlake_12_info),
{0, 0, 0}
};
MODULE_DEVICE_TABLE(pci, pciidlist);
@@ -848,7 +879,7 @@ static void i915_pci_remove(struct pci_dev *pdev)
if (!dev) /* driver load aborted, nothing to cleanup */
return;
- i915_driver_unload(dev);
+ i915_driver_remove(dev);
drm_dev_put(dev);
pci_set_drvdata(pdev, NULL);
@@ -923,11 +954,11 @@ static int i915_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (vga_switcheroo_client_probe_defer(pdev))
return -EPROBE_DEFER;
- err = i915_driver_load(pdev, ent);
+ err = i915_driver_probe(pdev, ent);
if (err)
return err;
- if (i915_inject_load_failure()) {
+ if (i915_inject_probe_failure()) {
i915_pci_remove(pdev);
return -ENODEV;
}
diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c
index 5140017f9a39..988a4092164e 100644
--- a/drivers/gpu/drm/i915/i915_perf.c
+++ b/drivers/gpu/drm/i915/i915_perf.c
@@ -200,20 +200,20 @@
#include "gt/intel_lrc_reg.h"
#include "i915_drv.h"
-#include "i915_oa_hsw.h"
-#include "i915_oa_bdw.h"
-#include "i915_oa_chv.h"
-#include "i915_oa_sklgt2.h"
-#include "i915_oa_sklgt3.h"
-#include "i915_oa_sklgt4.h"
-#include "i915_oa_bxt.h"
-#include "i915_oa_kblgt2.h"
-#include "i915_oa_kblgt3.h"
-#include "i915_oa_glk.h"
-#include "i915_oa_cflgt2.h"
-#include "i915_oa_cflgt3.h"
-#include "i915_oa_cnl.h"
-#include "i915_oa_icl.h"
+#include "oa/i915_oa_hsw.h"
+#include "oa/i915_oa_bdw.h"
+#include "oa/i915_oa_chv.h"
+#include "oa/i915_oa_sklgt2.h"
+#include "oa/i915_oa_sklgt3.h"
+#include "oa/i915_oa_sklgt4.h"
+#include "oa/i915_oa_bxt.h"
+#include "oa/i915_oa_kblgt2.h"
+#include "oa/i915_oa_kblgt3.h"
+#include "oa/i915_oa_glk.h"
+#include "oa/i915_oa_cflgt2.h"
+#include "oa/i915_oa_cflgt3.h"
+#include "oa/i915_oa_cnl.h"
+#include "oa/i915_oa_icl.h"
/* HW requires this to be a power of two, between 128k and 16M, though driver
* is currently generally designed assuming the largest 16M size is used such
@@ -1634,6 +1634,27 @@ static void hsw_disable_metric_set(struct drm_i915_private *dev_priv)
~GT_NOA_ENABLE));
}
+static u32 oa_config_flex_reg(const struct i915_oa_config *oa_config,
+ i915_reg_t reg)
+{
+ u32 mmio = i915_mmio_reg_offset(reg);
+ int i;
+
+ /*
+ * This arbitrary default will select the 'EU FPU0 Pipeline
+ * Active' event. In the future it's anticipated that there
+ * will be an explicit 'No Event' we can select, but not yet...
+ */
+ if (!oa_config)
+ return 0;
+
+ for (i = 0; i < oa_config->flex_regs_len; i++) {
+ if (i915_mmio_reg_offset(oa_config->flex_regs[i].addr) == mmio)
+ return oa_config->flex_regs[i].value;
+ }
+
+ return 0;
+}
/*
* NB: It must always remain pointer safe to run this even if the OA unit
* has been disabled.
@@ -1667,33 +1688,138 @@ gen8_update_reg_state_unlocked(struct intel_context *ce,
GEN8_OA_COUNTER_RESUME);
for (i = 0; i < ARRAY_SIZE(flex_regs); i++) {
- u32 state_offset = ctx_flexeu0 + i * 2;
- u32 mmio = i915_mmio_reg_offset(flex_regs[i]);
+ CTX_REG(reg_state, ctx_flexeu0 + i * 2, flex_regs[i],
+ oa_config_flex_reg(oa_config, flex_regs[i]));
+ }
- /*
- * This arbitrary default will select the 'EU FPU0 Pipeline
- * Active' event. In the future it's anticipated that there
- * will be an explicit 'No Event' we can select, but not yet...
- */
- u32 value = 0;
+ CTX_REG(reg_state,
+ CTX_R_PWR_CLK_STATE, GEN8_R_PWR_CLK_STATE,
+ intel_sseu_make_rpcs(i915, &ce->sseu));
+}
- if (oa_config) {
- u32 j;
+struct flex {
+ i915_reg_t reg;
+ u32 offset;
+ u32 value;
+};
- for (j = 0; j < oa_config->flex_regs_len; j++) {
- if (i915_mmio_reg_offset(oa_config->flex_regs[j].addr) == mmio) {
- value = oa_config->flex_regs[j].value;
- break;
- }
- }
- }
+static int
+gen8_store_flex(struct i915_request *rq,
+ struct intel_context *ce,
+ const struct flex *flex, unsigned int count)
+{
+ u32 offset;
+ u32 *cs;
+
+ cs = intel_ring_begin(rq, 4 * count);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
+
+ offset = i915_ggtt_offset(ce->state) + LRC_STATE_PN * PAGE_SIZE;
+ do {
+ *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
+ *cs++ = offset + (flex->offset + 1) * sizeof(u32);
+ *cs++ = 0;
+ *cs++ = flex->value;
+ } while (flex++, --count);
+
+ intel_ring_advance(rq, cs);
+
+ return 0;
+}
+
+static int
+gen8_load_flex(struct i915_request *rq,
+ struct intel_context *ce,
+ const struct flex *flex, unsigned int count)
+{
+ u32 *cs;
+
+ GEM_BUG_ON(!count || count > 63);
+
+ cs = intel_ring_begin(rq, 2 * count + 2);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
+
+ *cs++ = MI_LOAD_REGISTER_IMM(count);
+ do {
+ *cs++ = i915_mmio_reg_offset(flex->reg);
+ *cs++ = flex->value;
+ } while (flex++, --count);
+ *cs++ = MI_NOOP;
+
+ intel_ring_advance(rq, cs);
+
+ return 0;
+}
+
+static int gen8_modify_context(struct intel_context *ce,
+ const struct flex *flex, unsigned int count)
+{
+ struct i915_request *rq;
+ int err;
+
+ lockdep_assert_held(&ce->pin_mutex);
+
+ rq = i915_request_create(ce->engine->kernel_context);
+ if (IS_ERR(rq))
+ return PTR_ERR(rq);
+
+ /* Serialise with the remote context */
+ err = intel_context_prepare_remote_request(ce, rq);
+ if (err == 0)
+ err = gen8_store_flex(rq, ce, flex, count);
+
+ i915_request_add(rq);
+ return err;
+}
+
+static int gen8_modify_self(struct intel_context *ce,
+ const struct flex *flex, unsigned int count)
+{
+ struct i915_request *rq;
+ int err;
- CTX_REG(reg_state, state_offset, flex_regs[i], value);
+ rq = i915_request_create(ce);
+ if (IS_ERR(rq))
+ return PTR_ERR(rq);
+
+ err = gen8_load_flex(rq, ce, flex, count);
+
+ i915_request_add(rq);
+ return err;
+}
+
+static int gen8_configure_context(struct i915_gem_context *ctx,
+ struct flex *flex, unsigned int count)
+{
+ struct i915_gem_engines_iter it;
+ struct intel_context *ce;
+ int err = 0;
+
+ for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) {
+ GEM_BUG_ON(ce == ce->engine->kernel_context);
+
+ if (ce->engine->class != RENDER_CLASS)
+ continue;
+
+ err = intel_context_lock_pinned(ce);
+ if (err)
+ break;
+
+ flex->value = intel_sseu_make_rpcs(ctx->i915, &ce->sseu);
+
+ /* Otherwise OA settings will be set upon first use */
+ if (intel_context_is_pinned(ce))
+ err = gen8_modify_context(ce, flex, count);
+
+ intel_context_unlock_pinned(ce);
+ if (err)
+ break;
}
+ i915_gem_context_unlock_engines(ctx);
- CTX_REG(reg_state,
- CTX_R_PWR_CLK_STATE, GEN8_R_PWR_CLK_STATE,
- intel_sseu_make_rpcs(i915, &ce->sseu));
+ return err;
}
/*
@@ -1720,15 +1846,42 @@ gen8_update_reg_state_unlocked(struct intel_context *ce,
*
* Note: it's only the RCS/Render context that has any OA state.
*/
-static int gen8_configure_all_contexts(struct drm_i915_private *dev_priv,
+static int gen8_configure_all_contexts(struct drm_i915_private *i915,
const struct i915_oa_config *oa_config)
{
- unsigned int map_type = i915_coherent_map_type(dev_priv);
+ /* The MMIO offsets for Flex EU registers aren't contiguous */
+ const u32 ctx_flexeu0 = i915->perf.oa.ctx_flexeu0_offset;
+#define ctx_flexeuN(N) (ctx_flexeu0 + 2 * (N))
+ struct flex regs[] = {
+ {
+ GEN8_R_PWR_CLK_STATE,
+ CTX_R_PWR_CLK_STATE,
+ },
+ {
+ GEN8_OACTXCONTROL,
+ i915->perf.oa.ctx_oactxctrl_offset,
+ ((i915->perf.oa.period_exponent << GEN8_OA_TIMER_PERIOD_SHIFT) |
+ (i915->perf.oa.periodic ? GEN8_OA_TIMER_ENABLE : 0) |
+ GEN8_OA_COUNTER_RESUME)
+ },
+ { EU_PERF_CNTL0, ctx_flexeuN(0) },
+ { EU_PERF_CNTL1, ctx_flexeuN(1) },
+ { EU_PERF_CNTL2, ctx_flexeuN(2) },
+ { EU_PERF_CNTL3, ctx_flexeuN(3) },
+ { EU_PERF_CNTL4, ctx_flexeuN(4) },
+ { EU_PERF_CNTL5, ctx_flexeuN(5) },
+ { EU_PERF_CNTL6, ctx_flexeuN(6) },
+ };
+#undef ctx_flexeuN
+ struct intel_engine_cs *engine;
struct i915_gem_context *ctx;
- struct i915_request *rq;
- int ret;
+ enum intel_engine_id id;
+ int i;
+
+ for (i = 2; i < ARRAY_SIZE(regs); i++)
+ regs[i].value = oa_config_flex_reg(oa_config, regs[i].reg);
- lockdep_assert_held(&dev_priv->drm.struct_mutex);
+ lockdep_assert_held(&i915->drm.struct_mutex);
/*
* The OA register config is setup through the context image. This image
@@ -1740,58 +1893,41 @@ static int gen8_configure_all_contexts(struct drm_i915_private *dev_priv,
* this might leave small interval of time where the OA unit is
* configured at an invalid sampling period.
*
- * So far the best way to work around this issue seems to be draining
- * the GPU from any submitted work.
+ * Note that since we emit all requests from a single ring, there
+ * is still an implicit global barrier here that may cause a high
+ * priority context to wait for an otherwise independent low priority
+ * context. Contexts idle at the time of reconfiguration are not
+ * trapped behind the barrier.
*/
- ret = i915_gem_wait_for_idle(dev_priv,
- I915_WAIT_LOCKED,
- MAX_SCHEDULE_TIMEOUT);
- if (ret)
- return ret;
-
- /* Update all contexts now that we've stalled the submission. */
- list_for_each_entry(ctx, &dev_priv->contexts.list, link) {
- struct i915_gem_engines_iter it;
- struct intel_context *ce;
-
- for_each_gem_engine(ce,
- i915_gem_context_lock_engines(ctx),
- it) {
- u32 *regs;
+ list_for_each_entry(ctx, &i915->contexts.list, link) {
+ int err;
- if (ce->engine->class != RENDER_CLASS)
- continue;
-
- /* OA settings will be set upon first use */
- if (!ce->state)
- continue;
-
- regs = i915_gem_object_pin_map(ce->state->obj,
- map_type);
- if (IS_ERR(regs)) {
- i915_gem_context_unlock_engines(ctx);
- return PTR_ERR(regs);
- }
-
- ce->state->obj->mm.dirty = true;
- regs += LRC_STATE_PN * PAGE_SIZE / sizeof(*regs);
-
- gen8_update_reg_state_unlocked(ce, regs, oa_config);
+ if (ctx == i915->kernel_context)
+ continue;
- i915_gem_object_unpin_map(ce->state->obj);
- }
- i915_gem_context_unlock_engines(ctx);
+ err = gen8_configure_context(ctx, regs, ARRAY_SIZE(regs));
+ if (err)
+ return err;
}
/*
- * Apply the configuration by doing one context restore of the edited
- * context image.
+ * After updating all other contexts, we need to modify ourselves.
+ * If we don't modify the kernel_context, we do not get events while
+ * idle.
*/
- rq = i915_request_create(dev_priv->engine[RCS0]->kernel_context);
- if (IS_ERR(rq))
- return PTR_ERR(rq);
+ for_each_engine(engine, i915, id) {
+ struct intel_context *ce = engine->kernel_context;
+ int err;
- i915_request_add(rq);
+ if (engine->class != RENDER_CLASS)
+ continue;
+
+ regs[0].value = intel_sseu_make_rpcs(i915, &ce->sseu);
+
+ err = gen8_modify_self(ce, regs, ARRAY_SIZE(regs));
+ if (err)
+ return err;
+ }
return 0;
}
diff --git a/drivers/gpu/drm/i915/i915_pmu.c b/drivers/gpu/drm/i915/i915_pmu.c
index 8fe46ee920a0..eff86483bec0 100644
--- a/drivers/gpu/drm/i915/i915_pmu.c
+++ b/drivers/gpu/drm/i915/i915_pmu.c
@@ -102,10 +102,8 @@ static bool pmu_needs_timer(struct drm_i915_private *i915, bool gpu_active)
/*
* Also there is software busyness tracking available we do not
* need the timer for I915_SAMPLE_BUSY counter.
- *
- * Use RCS as proxy for all engines.
*/
- else if (intel_engine_supports_stats(i915->engine[RCS0]))
+ else if (i915->caps.scheduler & I915_SCHEDULER_CAP_ENGINE_BUSY_STATS)
enable &= ~BIT(I915_SAMPLE_BUSY);
/*
diff --git a/drivers/gpu/drm/i915/i915_priolist_types.h b/drivers/gpu/drm/i915/i915_priolist_types.h
index 49709de69875..b02dea17dcab 100644
--- a/drivers/gpu/drm/i915/i915_priolist_types.h
+++ b/drivers/gpu/drm/i915/i915_priolist_types.h
@@ -17,6 +17,16 @@ enum {
I915_PRIORITY_NORMAL = I915_CONTEXT_DEFAULT_PRIORITY,
I915_PRIORITY_MAX = I915_CONTEXT_MAX_USER_PRIORITY + 1,
+ /*
+ * Requests containing performance queries must not be preempted by
+ * another context. They get scheduled with their default priority and
+ * once they reach the execlist ports we ensure that they stick on the
+ * HW until finished by pretending that they have maximum priority,
+ * i.e. nothing can have higher priority and force us to usurp the
+ * active request.
+ */
+ I915_PRIORITY_UNPREEMPTABLE = INT_MAX,
+
I915_PRIORITY_INVALID = INT_MIN
};
diff --git a/drivers/gpu/drm/i915/i915_pvinfo.h b/drivers/gpu/drm/i915/i915_pvinfo.h
index 969e514916ab..683e97ac2430 100644
--- a/drivers/gpu/drm/i915/i915_pvinfo.h
+++ b/drivers/gpu/drm/i915/i915_pvinfo.h
@@ -24,6 +24,8 @@
#ifndef _I915_PVINFO_H_
#define _I915_PVINFO_H_
+#include <linux/types.h>
+
/* The MMIO offset of the shared info between guest and host emulator */
#define VGT_PVINFO_PAGE 0x78000
#define VGT_PVINFO_SIZE 0x1000
@@ -110,8 +112,9 @@ struct vgt_if {
u32 rsv7[0x200 - 24]; /* pad to one page */
} __packed;
-#define vgtif_reg(x) \
- _MMIO((VGT_PVINFO_PAGE + offsetof(struct vgt_if, x)))
+#define vgtif_offset(x) (offsetof(struct vgt_if, x))
+
+#define vgtif_reg(x) _MMIO(VGT_PVINFO_PAGE + vgtif_offset(x))
/* vGPU display status to be used by the host side */
#define VGT_DRV_DISPLAY_NOT_READY 0
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index d6483b5dc8e5..d2b76121d863 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -242,6 +242,7 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define _MMIO_PIPE3(pipe, a, b, c) _MMIO(_PICK(pipe, a, b, c))
#define _MMIO_PORT3(pipe, a, b, c) _MMIO(_PICK(pipe, a, b, c))
#define _MMIO_PHY3(phy, a, b, c) _MMIO(_PHY3(phy, a, b, c))
+#define _MMIO_PLL3(pll, a, b, c) _MMIO(_PICK(pll, a, b, c))
/*
* Device info offset array based helpers for groups of registers with unevenly
@@ -1793,19 +1794,21 @@ enum i915_power_well_id {
*/
#define _ICL_COMBOPHY_A 0x162000
#define _ICL_COMBOPHY_B 0x6C000
-#define _ICL_COMBOPHY(port) _PICK(port, _ICL_COMBOPHY_A, \
- _ICL_COMBOPHY_B)
+#define _EHL_COMBOPHY_C 0x160000
+#define _ICL_COMBOPHY(phy) _PICK(phy, _ICL_COMBOPHY_A, \
+ _ICL_COMBOPHY_B, \
+ _EHL_COMBOPHY_C)
/* CNL/ICL Port CL_DW registers */
-#define _ICL_PORT_CL_DW(dw, port) (_ICL_COMBOPHY(port) + \
+#define _ICL_PORT_CL_DW(dw, phy) (_ICL_COMBOPHY(phy) + \
4 * (dw))
#define CNL_PORT_CL1CM_DW5 _MMIO(0x162014)
-#define ICL_PORT_CL_DW5(port) _MMIO(_ICL_PORT_CL_DW(5, port))
+#define ICL_PORT_CL_DW5(phy) _MMIO(_ICL_PORT_CL_DW(5, phy))
#define CL_POWER_DOWN_ENABLE (1 << 4)
#define SUS_CLOCK_CONFIG (3 << 0)
-#define ICL_PORT_CL_DW10(port) _MMIO(_ICL_PORT_CL_DW(10, port))
+#define ICL_PORT_CL_DW10(phy) _MMIO(_ICL_PORT_CL_DW(10, phy))
#define PG_SEQ_DELAY_OVERRIDE_MASK (3 << 25)
#define PG_SEQ_DELAY_OVERRIDE_SHIFT 25
#define PG_SEQ_DELAY_OVERRIDE_ENABLE (1 << 24)
@@ -1820,23 +1823,23 @@ enum i915_power_well_id {
#define PWR_DOWN_LN_MASK (0xf << 4)
#define PWR_DOWN_LN_SHIFT 4
-#define ICL_PORT_CL_DW12(port) _MMIO(_ICL_PORT_CL_DW(12, port))
+#define ICL_PORT_CL_DW12(phy) _MMIO(_ICL_PORT_CL_DW(12, phy))
#define ICL_LANE_ENABLE_AUX (1 << 0)
/* CNL/ICL Port COMP_DW registers */
#define _ICL_PORT_COMP 0x100
-#define _ICL_PORT_COMP_DW(dw, port) (_ICL_COMBOPHY(port) + \
+#define _ICL_PORT_COMP_DW(dw, phy) (_ICL_COMBOPHY(phy) + \
_ICL_PORT_COMP + 4 * (dw))
#define CNL_PORT_COMP_DW0 _MMIO(0x162100)
-#define ICL_PORT_COMP_DW0(port) _MMIO(_ICL_PORT_COMP_DW(0, port))
+#define ICL_PORT_COMP_DW0(phy) _MMIO(_ICL_PORT_COMP_DW(0, phy))
#define COMP_INIT (1 << 31)
#define CNL_PORT_COMP_DW1 _MMIO(0x162104)
-#define ICL_PORT_COMP_DW1(port) _MMIO(_ICL_PORT_COMP_DW(1, port))
+#define ICL_PORT_COMP_DW1(phy) _MMIO(_ICL_PORT_COMP_DW(1, phy))
#define CNL_PORT_COMP_DW3 _MMIO(0x16210c)
-#define ICL_PORT_COMP_DW3(port) _MMIO(_ICL_PORT_COMP_DW(3, port))
+#define ICL_PORT_COMP_DW3(phy) _MMIO(_ICL_PORT_COMP_DW(3, phy))
#define PROCESS_INFO_DOT_0 (0 << 26)
#define PROCESS_INFO_DOT_1 (1 << 26)
#define PROCESS_INFO_DOT_4 (2 << 26)
@@ -1848,14 +1851,14 @@ enum i915_power_well_id {
#define VOLTAGE_INFO_MASK (3 << 24)
#define VOLTAGE_INFO_SHIFT 24
-#define ICL_PORT_COMP_DW8(port) _MMIO(_ICL_PORT_COMP_DW(8, port))
+#define ICL_PORT_COMP_DW8(phy) _MMIO(_ICL_PORT_COMP_DW(8, phy))
#define IREFGEN (1 << 24)
#define CNL_PORT_COMP_DW9 _MMIO(0x162124)
-#define ICL_PORT_COMP_DW9(port) _MMIO(_ICL_PORT_COMP_DW(9, port))
+#define ICL_PORT_COMP_DW9(phy) _MMIO(_ICL_PORT_COMP_DW(9, phy))
#define CNL_PORT_COMP_DW10 _MMIO(0x162128)
-#define ICL_PORT_COMP_DW10(port) _MMIO(_ICL_PORT_COMP_DW(10, port))
+#define ICL_PORT_COMP_DW10(phy) _MMIO(_ICL_PORT_COMP_DW(10, phy))
/* CNL/ICL Port PCS registers */
#define _CNL_PORT_PCS_DW1_GRP_AE 0x162304
@@ -1868,14 +1871,14 @@ enum i915_power_well_id {
#define _CNL_PORT_PCS_DW1_LN0_C 0x162C04
#define _CNL_PORT_PCS_DW1_LN0_D 0x162E04
#define _CNL_PORT_PCS_DW1_LN0_F 0x162804
-#define CNL_PORT_PCS_DW1_GRP(port) _MMIO(_PICK(port, \
+#define CNL_PORT_PCS_DW1_GRP(phy) _MMIO(_PICK(phy, \
_CNL_PORT_PCS_DW1_GRP_AE, \
_CNL_PORT_PCS_DW1_GRP_B, \
_CNL_PORT_PCS_DW1_GRP_C, \
_CNL_PORT_PCS_DW1_GRP_D, \
_CNL_PORT_PCS_DW1_GRP_AE, \
_CNL_PORT_PCS_DW1_GRP_F))
-#define CNL_PORT_PCS_DW1_LN0(port) _MMIO(_PICK(port, \
+#define CNL_PORT_PCS_DW1_LN0(phy) _MMIO(_PICK(phy, \
_CNL_PORT_PCS_DW1_LN0_AE, \
_CNL_PORT_PCS_DW1_LN0_B, \
_CNL_PORT_PCS_DW1_LN0_C, \
@@ -1886,16 +1889,18 @@ enum i915_power_well_id {
#define _ICL_PORT_PCS_AUX 0x300
#define _ICL_PORT_PCS_GRP 0x600
#define _ICL_PORT_PCS_LN(ln) (0x800 + (ln) * 0x100)
-#define _ICL_PORT_PCS_DW_AUX(dw, port) (_ICL_COMBOPHY(port) + \
+#define _ICL_PORT_PCS_DW_AUX(dw, phy) (_ICL_COMBOPHY(phy) + \
_ICL_PORT_PCS_AUX + 4 * (dw))
-#define _ICL_PORT_PCS_DW_GRP(dw, port) (_ICL_COMBOPHY(port) + \
+#define _ICL_PORT_PCS_DW_GRP(dw, phy) (_ICL_COMBOPHY(phy) + \
_ICL_PORT_PCS_GRP + 4 * (dw))
-#define _ICL_PORT_PCS_DW_LN(dw, ln, port) (_ICL_COMBOPHY(port) + \
+#define _ICL_PORT_PCS_DW_LN(dw, ln, phy) (_ICL_COMBOPHY(phy) + \
_ICL_PORT_PCS_LN(ln) + 4 * (dw))
-#define ICL_PORT_PCS_DW1_AUX(port) _MMIO(_ICL_PORT_PCS_DW_AUX(1, port))
-#define ICL_PORT_PCS_DW1_GRP(port) _MMIO(_ICL_PORT_PCS_DW_GRP(1, port))
-#define ICL_PORT_PCS_DW1_LN0(port) _MMIO(_ICL_PORT_PCS_DW_LN(1, 0, port))
+#define ICL_PORT_PCS_DW1_AUX(phy) _MMIO(_ICL_PORT_PCS_DW_AUX(1, phy))
+#define ICL_PORT_PCS_DW1_GRP(phy) _MMIO(_ICL_PORT_PCS_DW_GRP(1, phy))
+#define ICL_PORT_PCS_DW1_LN0(phy) _MMIO(_ICL_PORT_PCS_DW_LN(1, 0, phy))
#define COMMON_KEEPER_EN (1 << 26)
+#define LATENCY_OPTIM_MASK (0x3 << 2)
+#define LATENCY_OPTIM_VAL(x) ((x) << 2)
/* CNL/ICL Port TX registers */
#define _CNL_PORT_TX_AE_GRP_OFFSET 0x162340
@@ -1929,18 +1934,18 @@ enum i915_power_well_id {
#define _ICL_PORT_TX_GRP 0x680
#define _ICL_PORT_TX_LN(ln) (0x880 + (ln) * 0x100)
-#define _ICL_PORT_TX_DW_AUX(dw, port) (_ICL_COMBOPHY(port) + \
+#define _ICL_PORT_TX_DW_AUX(dw, phy) (_ICL_COMBOPHY(phy) + \
_ICL_PORT_TX_AUX + 4 * (dw))
-#define _ICL_PORT_TX_DW_GRP(dw, port) (_ICL_COMBOPHY(port) + \
+#define _ICL_PORT_TX_DW_GRP(dw, phy) (_ICL_COMBOPHY(phy) + \
_ICL_PORT_TX_GRP + 4 * (dw))
-#define _ICL_PORT_TX_DW_LN(dw, ln, port) (_ICL_COMBOPHY(port) + \
+#define _ICL_PORT_TX_DW_LN(dw, ln, phy) (_ICL_COMBOPHY(phy) + \
_ICL_PORT_TX_LN(ln) + 4 * (dw))
#define CNL_PORT_TX_DW2_GRP(port) _MMIO(_CNL_PORT_TX_DW_GRP(2, port))
#define CNL_PORT_TX_DW2_LN0(port) _MMIO(_CNL_PORT_TX_DW_LN0(2, port))
-#define ICL_PORT_TX_DW2_AUX(port) _MMIO(_ICL_PORT_TX_DW_AUX(2, port))
-#define ICL_PORT_TX_DW2_GRP(port) _MMIO(_ICL_PORT_TX_DW_GRP(2, port))
-#define ICL_PORT_TX_DW2_LN0(port) _MMIO(_ICL_PORT_TX_DW_LN(2, 0, port))
+#define ICL_PORT_TX_DW2_AUX(phy) _MMIO(_ICL_PORT_TX_DW_AUX(2, phy))
+#define ICL_PORT_TX_DW2_GRP(phy) _MMIO(_ICL_PORT_TX_DW_GRP(2, phy))
+#define ICL_PORT_TX_DW2_LN0(phy) _MMIO(_ICL_PORT_TX_DW_LN(2, 0, phy))
#define SWING_SEL_UPPER(x) (((x) >> 3) << 15)
#define SWING_SEL_UPPER_MASK (1 << 15)
#define SWING_SEL_LOWER(x) (((x) & 0x7) << 11)
@@ -1957,10 +1962,10 @@ enum i915_power_well_id {
#define CNL_PORT_TX_DW4_LN(ln, port) _MMIO(_CNL_PORT_TX_DW_LN0(4, (port)) + \
((ln) * (_CNL_PORT_TX_DW4_LN1_AE - \
_CNL_PORT_TX_DW4_LN0_AE)))
-#define ICL_PORT_TX_DW4_AUX(port) _MMIO(_ICL_PORT_TX_DW_AUX(4, port))
-#define ICL_PORT_TX_DW4_GRP(port) _MMIO(_ICL_PORT_TX_DW_GRP(4, port))
-#define ICL_PORT_TX_DW4_LN0(port) _MMIO(_ICL_PORT_TX_DW_LN(4, 0, port))
-#define ICL_PORT_TX_DW4_LN(ln, port) _MMIO(_ICL_PORT_TX_DW_LN(4, ln, port))
+#define ICL_PORT_TX_DW4_AUX(phy) _MMIO(_ICL_PORT_TX_DW_AUX(4, phy))
+#define ICL_PORT_TX_DW4_GRP(phy) _MMIO(_ICL_PORT_TX_DW_GRP(4, phy))
+#define ICL_PORT_TX_DW4_LN0(phy) _MMIO(_ICL_PORT_TX_DW_LN(4, 0, phy))
+#define ICL_PORT_TX_DW4_LN(ln, phy) _MMIO(_ICL_PORT_TX_DW_LN(4, ln, phy))
#define LOADGEN_SELECT (1 << 31)
#define POST_CURSOR_1(x) ((x) << 12)
#define POST_CURSOR_1_MASK (0x3F << 12)
@@ -1971,9 +1976,9 @@ enum i915_power_well_id {
#define CNL_PORT_TX_DW5_GRP(port) _MMIO(_CNL_PORT_TX_DW_GRP(5, port))
#define CNL_PORT_TX_DW5_LN0(port) _MMIO(_CNL_PORT_TX_DW_LN0(5, port))
-#define ICL_PORT_TX_DW5_AUX(port) _MMIO(_ICL_PORT_TX_DW_AUX(5, port))
-#define ICL_PORT_TX_DW5_GRP(port) _MMIO(_ICL_PORT_TX_DW_GRP(5, port))
-#define ICL_PORT_TX_DW5_LN0(port) _MMIO(_ICL_PORT_TX_DW_LN(5, 0, port))
+#define ICL_PORT_TX_DW5_AUX(phy) _MMIO(_ICL_PORT_TX_DW_AUX(5, phy))
+#define ICL_PORT_TX_DW5_GRP(phy) _MMIO(_ICL_PORT_TX_DW_GRP(5, phy))
+#define ICL_PORT_TX_DW5_LN0(phy) _MMIO(_ICL_PORT_TX_DW_LN(5, 0, phy))
#define TX_TRAINING_EN (1 << 31)
#define TAP2_DISABLE (1 << 30)
#define TAP3_DISABLE (1 << 29)
@@ -1984,13 +1989,17 @@ enum i915_power_well_id {
#define CNL_PORT_TX_DW7_GRP(port) _MMIO(_CNL_PORT_TX_DW_GRP(7, (port)))
#define CNL_PORT_TX_DW7_LN0(port) _MMIO(_CNL_PORT_TX_DW_LN0(7, (port)))
-#define ICL_PORT_TX_DW7_AUX(port) _MMIO(_ICL_PORT_TX_DW_AUX(7, port))
-#define ICL_PORT_TX_DW7_GRP(port) _MMIO(_ICL_PORT_TX_DW_GRP(7, port))
-#define ICL_PORT_TX_DW7_LN0(port) _MMIO(_ICL_PORT_TX_DW_LN(7, 0, port))
-#define ICL_PORT_TX_DW7_LN(ln, port) _MMIO(_ICL_PORT_TX_DW_LN(7, ln, port))
+#define ICL_PORT_TX_DW7_AUX(phy) _MMIO(_ICL_PORT_TX_DW_AUX(7, phy))
+#define ICL_PORT_TX_DW7_GRP(phy) _MMIO(_ICL_PORT_TX_DW_GRP(7, phy))
+#define ICL_PORT_TX_DW7_LN0(phy) _MMIO(_ICL_PORT_TX_DW_LN(7, 0, phy))
+#define ICL_PORT_TX_DW7_LN(ln, phy) _MMIO(_ICL_PORT_TX_DW_LN(7, ln, phy))
#define N_SCALAR(x) ((x) << 24)
#define N_SCALAR_MASK (0x7F << 24)
+#define _ICL_DPHY_CHKN_REG 0x194
+#define ICL_DPHY_CHKN(port) _MMIO(_ICL_COMBOPHY(port) + _ICL_DPHY_CHKN_REG)
+#define ICL_DPHY_CHKN_AFE_OVER_PPI_STRAP REG_BIT(7)
+
#define MG_PHY_PORT_LN(ln, port, ln0p1, ln0p2, ln1p1) \
_MMIO(_PORT((port) - PORT_C, ln0p1, ln0p2) + (ln) * ((ln1p1) - (ln0p1)))
@@ -2195,9 +2204,13 @@ enum i915_power_well_id {
#define DW6_OLDO_DYN_PWR_DOWN_EN (1 << 28)
#define FIA1_BASE 0x163000
+#define FIA2_BASE 0x16E000
+#define FIA3_BASE 0x16F000
+#define _FIA(fia) _PICK((fia), FIA1_BASE, FIA2_BASE, FIA3_BASE)
+#define _MMIO_FIA(fia, off) _MMIO(_FIA(fia) + (off))
/* ICL PHY DFLEX registers */
-#define PORT_TX_DFLEXDPMLE1 _MMIO(FIA1_BASE + 0x008C0)
+#define PORT_TX_DFLEXDPMLE1(fia) _MMIO_FIA((fia), 0x008C0)
#define DFLEXDPMLE1_DPMLETC_MASK(tc_port) (0xf << (4 * (tc_port)))
#define DFLEXDPMLE1_DPMLETC_ML0(tc_port) (1 << (4 * (tc_port)))
#define DFLEXDPMLE1_DPMLETC_ML1_0(tc_port) (3 << (4 * (tc_port)))
@@ -2513,13 +2526,19 @@ enum i915_power_well_id {
#define RING_WAIT_SEMAPHORE (1 << 10) /* gen6+ */
#define RING_FORCE_TO_NONPRIV(base, i) _MMIO(((base) + 0x4D0) + (i) * 4)
-#define RING_FORCE_TO_NONPRIV_RW (0 << 28) /* CFL+ & Gen11+ */
-#define RING_FORCE_TO_NONPRIV_RD (1 << 28)
-#define RING_FORCE_TO_NONPRIV_WR (2 << 28)
+#define RING_FORCE_TO_NONPRIV_ACCESS_RW (0 << 28) /* CFL+ & Gen11+ */
+#define RING_FORCE_TO_NONPRIV_ACCESS_RD (1 << 28)
+#define RING_FORCE_TO_NONPRIV_ACCESS_WR (2 << 28)
+#define RING_FORCE_TO_NONPRIV_ACCESS_INVALID (3 << 28)
+#define RING_FORCE_TO_NONPRIV_ACCESS_MASK (3 << 28)
#define RING_FORCE_TO_NONPRIV_RANGE_1 (0 << 0) /* CFL+ & Gen11+ */
#define RING_FORCE_TO_NONPRIV_RANGE_4 (1 << 0)
#define RING_FORCE_TO_NONPRIV_RANGE_16 (2 << 0)
#define RING_FORCE_TO_NONPRIV_RANGE_64 (3 << 0)
+#define RING_FORCE_TO_NONPRIV_RANGE_MASK (3 << 0)
+#define RING_FORCE_TO_NONPRIV_MASK_VALID \
+ (RING_FORCE_TO_NONPRIV_RANGE_MASK \
+ | RING_FORCE_TO_NONPRIV_ACCESS_MASK)
#define RING_MAX_NONPRIV_SLOTS 12
#define GEN7_TLB_RD_ADDR _MMIO(0x4700)
@@ -3246,8 +3265,10 @@ enum i915_power_well_id {
#define GMBUS_PIN_10_TC2_ICP 10
#define GMBUS_PIN_11_TC3_ICP 11
#define GMBUS_PIN_12_TC4_ICP 12
+#define GMBUS_PIN_13_TC5_TGP 13
+#define GMBUS_PIN_14_TC6_TGP 14
-#define GMBUS_NUM_PINS 13 /* including 0 */
+#define GMBUS_NUM_PINS 15 /* including 0 */
#define GMBUS1 _MMIO(dev_priv->gpio_mmio_base + 0x5104) /* command/status */
#define GMBUS_SW_CLR_INT (1 << 31)
#define GMBUS_SW_RDY (1 << 30)
@@ -4209,6 +4230,7 @@ enum {
#define TRANSCODER_B_OFFSET 0x61000
#define TRANSCODER_C_OFFSET 0x62000
#define CHV_TRANSCODER_C_OFFSET 0x63000
+#define TRANSCODER_D_OFFSET 0x63000
#define TRANSCODER_EDP_OFFSET 0x6f000
#define TRANSCODER_DSI0_OFFSET 0x6b000
#define TRANSCODER_DSI1_OFFSET 0x6b800
@@ -5755,6 +5777,7 @@ enum {
#define PIPE_A_OFFSET 0x70000
#define PIPE_B_OFFSET 0x71000
#define PIPE_C_OFFSET 0x72000
+#define PIPE_D_OFFSET 0x73000
#define CHV_PIPE_C_OFFSET 0x74000
/*
* There's actually no pipe EDP. Some pipe registers have
@@ -6284,6 +6307,7 @@ enum {
#define _DSPATILEOFF 0x701A4 /* 965+ only */
#define _DSPAOFFSET 0x701A4 /* HSW */
#define _DSPASURFLIVE 0x701AC
+#define _DSPAGAMC 0x701E0
#define DSPCNTR(plane) _MMIO_PIPE2(plane, _DSPACNTR)
#define DSPADDR(plane) _MMIO_PIPE2(plane, _DSPAADDR)
@@ -6295,6 +6319,7 @@ enum {
#define DSPLINOFF(plane) DSPADDR(plane)
#define DSPOFFSET(plane) _MMIO_PIPE2(plane, _DSPAOFFSET)
#define DSPSURFLIVE(plane) _MMIO_PIPE2(plane, _DSPASURFLIVE)
+#define DSPGAMC(plane, i) _MMIO(_PIPE2(plane, _DSPAGAMC) + (5 - (i)) * 4) /* plane C only, 6 x u0.8 */
/* CHV pipe B blender and primary plane */
#define _CHV_BLEND_A 0x60a00
@@ -6397,6 +6422,7 @@ enum {
#define _DVSAKEYMAXVAL 0x721a0
#define _DVSATILEOFF 0x721a4
#define _DVSASURFLIVE 0x721ac
+#define _DVSAGAMC_G4X 0x721e0 /* g4x */
#define _DVSASCALE 0x72204
#define DVS_SCALE_ENABLE (1 << 31)
#define DVS_FILTER_MASK (3 << 29)
@@ -6405,7 +6431,8 @@ enum {
#define DVS_FILTER_SOFTENING (2 << 29)
#define DVS_VERTICAL_OFFSET_HALF (1 << 28) /* must be enabled below */
#define DVS_VERTICAL_OFFSET_ENABLE (1 << 27)
-#define _DVSAGAMC 0x72300
+#define _DVSAGAMC_ILK 0x72300 /* ilk/snb */
+#define _DVSAGAMCMAX_ILK 0x72340 /* ilk/snb */
#define _DVSBCNTR 0x73180
#define _DVSBLINOFF 0x73184
@@ -6418,8 +6445,10 @@ enum {
#define _DVSBKEYMAXVAL 0x731a0
#define _DVSBTILEOFF 0x731a4
#define _DVSBSURFLIVE 0x731ac
+#define _DVSBGAMC_G4X 0x731e0 /* g4x */
#define _DVSBSCALE 0x73204
-#define _DVSBGAMC 0x73300
+#define _DVSBGAMC_ILK 0x73300 /* ilk/snb */
+#define _DVSBGAMCMAX_ILK 0x73340 /* ilk/snb */
#define DVSCNTR(pipe) _MMIO_PIPE(pipe, _DVSACNTR, _DVSBCNTR)
#define DVSLINOFF(pipe) _MMIO_PIPE(pipe, _DVSALINOFF, _DVSBLINOFF)
@@ -6433,6 +6462,9 @@ enum {
#define DVSKEYVAL(pipe) _MMIO_PIPE(pipe, _DVSAKEYVAL, _DVSBKEYVAL)
#define DVSKEYMSK(pipe) _MMIO_PIPE(pipe, _DVSAKEYMSK, _DVSBKEYMSK)
#define DVSSURFLIVE(pipe) _MMIO_PIPE(pipe, _DVSASURFLIVE, _DVSBSURFLIVE)
+#define DVSGAMC_G4X(pipe, i) _MMIO(_PIPE(pipe, _DVSAGAMC_G4X, _DVSBGAMC_G4X) + (5 - (i)) * 4) /* 6 x u0.8 */
+#define DVSGAMC_ILK(pipe, i) _MMIO(_PIPE(pipe, _DVSAGAMC_ILK, _DVSBGAMC_ILK) + (i) * 4) /* 16 x u0.10 */
+#define DVSGAMCMAX_ILK(pipe, i) _MMIO(_PIPE(pipe, _DVSAGAMCMAX_ILK, _DVSBGAMCMAX_ILK) + (i) * 4) /* 3 x u1.10 */
#define _SPRA_CTL 0x70280
#define SPRITE_ENABLE (1 << 31)
@@ -6457,7 +6489,7 @@ enum {
#define SPRITE_YUV_ORDER_VYUY (3 << 16)
#define SPRITE_ROTATE_180 (1 << 15)
#define SPRITE_TRICKLE_FEED_DISABLE (1 << 14)
-#define SPRITE_INT_GAMMA_ENABLE (1 << 13)
+#define SPRITE_INT_GAMMA_DISABLE (1 << 13)
#define SPRITE_TILED (1 << 10)
#define SPRITE_DEST_KEY (1 << 2)
#define _SPRA_LINOFF 0x70284
@@ -6480,6 +6512,8 @@ enum {
#define SPRITE_VERTICAL_OFFSET_HALF (1 << 28) /* must be enabled below */
#define SPRITE_VERTICAL_OFFSET_ENABLE (1 << 27)
#define _SPRA_GAMC 0x70400
+#define _SPRA_GAMC16 0x70440
+#define _SPRA_GAMC17 0x7044c
#define _SPRB_CTL 0x71280
#define _SPRB_LINOFF 0x71284
@@ -6495,6 +6529,8 @@ enum {
#define _SPRB_SURFLIVE 0x712ac
#define _SPRB_SCALE 0x71304
#define _SPRB_GAMC 0x71400
+#define _SPRB_GAMC16 0x71440
+#define _SPRB_GAMC17 0x7144c
#define SPRCTL(pipe) _MMIO_PIPE(pipe, _SPRA_CTL, _SPRB_CTL)
#define SPRLINOFF(pipe) _MMIO_PIPE(pipe, _SPRA_LINOFF, _SPRB_LINOFF)
@@ -6508,7 +6544,9 @@ enum {
#define SPRTILEOFF(pipe) _MMIO_PIPE(pipe, _SPRA_TILEOFF, _SPRB_TILEOFF)
#define SPROFFSET(pipe) _MMIO_PIPE(pipe, _SPRA_OFFSET, _SPRB_OFFSET)
#define SPRSCALE(pipe) _MMIO_PIPE(pipe, _SPRA_SCALE, _SPRB_SCALE)
-#define SPRGAMC(pipe) _MMIO_PIPE(pipe, _SPRA_GAMC, _SPRB_GAMC)
+#define SPRGAMC(pipe, i) _MMIO(_PIPE(pipe, _SPRA_GAMC, _SPRB_GAMC) + (i) * 4) /* 16 x u0.10 */
+#define SPRGAMC16(pipe, i) _MMIO(_PIPE(pipe, _SPRA_GAMC16, _SPRB_GAMC16) + (i) * 4) /* 3 x u1.10 */
+#define SPRGAMC17(pipe, i) _MMIO(_PIPE(pipe, _SPRA_GAMC17, _SPRB_GAMC17) + (i) * 4) /* 3 x u2.10 */
#define SPRSURFLIVE(pipe) _MMIO_PIPE(pipe, _SPRA_SURFLIVE, _SPRB_SURFLIVE)
#define _SPACNTR (VLV_DISPLAY_BASE + 0x72180)
@@ -6551,7 +6589,7 @@ enum {
#define _SPACLRC1 (VLV_DISPLAY_BASE + 0x721d4)
#define SP_SH_SIN(x) (((x) & 0x7ff) << 16) /* s4.7 */
#define SP_SH_COS(x) (x) /* u3.7 */
-#define _SPAGAMC (VLV_DISPLAY_BASE + 0x721f4)
+#define _SPAGAMC (VLV_DISPLAY_BASE + 0x721e0)
#define _SPBCNTR (VLV_DISPLAY_BASE + 0x72280)
#define _SPBLINOFF (VLV_DISPLAY_BASE + 0x72284)
@@ -6566,10 +6604,12 @@ enum {
#define _SPBCONSTALPHA (VLV_DISPLAY_BASE + 0x722a8)
#define _SPBCLRC0 (VLV_DISPLAY_BASE + 0x722d0)
#define _SPBCLRC1 (VLV_DISPLAY_BASE + 0x722d4)
-#define _SPBGAMC (VLV_DISPLAY_BASE + 0x722f4)
+#define _SPBGAMC (VLV_DISPLAY_BASE + 0x722e0)
+#define _VLV_SPR(pipe, plane_id, reg_a, reg_b) \
+ _PIPE((pipe) * 2 + (plane_id) - PLANE_SPRITE0, (reg_a), (reg_b))
#define _MMIO_VLV_SPR(pipe, plane_id, reg_a, reg_b) \
- _MMIO_PIPE((pipe) * 2 + (plane_id) - PLANE_SPRITE0, (reg_a), (reg_b))
+ _MMIO(_VLV_SPR((pipe), (plane_id), (reg_a), (reg_b)))
#define SPCNTR(pipe, plane_id) _MMIO_VLV_SPR((pipe), (plane_id), _SPACNTR, _SPBCNTR)
#define SPLINOFF(pipe, plane_id) _MMIO_VLV_SPR((pipe), (plane_id), _SPALINOFF, _SPBLINOFF)
@@ -6584,7 +6624,7 @@ enum {
#define SPCONSTALPHA(pipe, plane_id) _MMIO_VLV_SPR((pipe), (plane_id), _SPACONSTALPHA, _SPBCONSTALPHA)
#define SPCLRC0(pipe, plane_id) _MMIO_VLV_SPR((pipe), (plane_id), _SPACLRC0, _SPBCLRC0)
#define SPCLRC1(pipe, plane_id) _MMIO_VLV_SPR((pipe), (plane_id), _SPACLRC1, _SPBCLRC1)
-#define SPGAMC(pipe, plane_id) _MMIO_VLV_SPR((pipe), (plane_id), _SPAGAMC, _SPBGAMC)
+#define SPGAMC(pipe, plane_id, i) _MMIO(_VLV_SPR((pipe), (plane_id), _SPAGAMC, _SPBGAMC) + (5 - (i)) * 4) /* 6 x u0.10 */
/*
* CHV pipe B sprite CSC
@@ -7317,16 +7357,6 @@ enum {
#define GEN8_GT_IIR(which) _MMIO(0x44308 + (0x10 * (which)))
#define GEN8_GT_IER(which) _MMIO(0x4430c + (0x10 * (which)))
-#define GEN9_GUC_TO_HOST_INT_EVENT (1 << 31)
-#define GEN9_GUC_EXEC_ERROR_EVENT (1 << 30)
-#define GEN9_GUC_DISPLAY_EVENT (1 << 29)
-#define GEN9_GUC_SEMA_SIGNAL_EVENT (1 << 28)
-#define GEN9_GUC_IOMMU_MSG_EVENT (1 << 27)
-#define GEN9_GUC_DB_RING_EVENT (1 << 26)
-#define GEN9_GUC_DMA_DONE_EVENT (1 << 25)
-#define GEN9_GUC_FATAL_ERROR_EVENT (1 << 24)
-#define GEN9_GUC_NOTIFICATION_EVENT (1 << 23)
-
#define GEN8_RCS_IRQ_SHIFT 0
#define GEN8_BCS_IRQ_SHIFT 16
#define GEN8_VCS0_IRQ_SHIFT 0 /* NB: VCS1 in bspec! */
@@ -7606,6 +7636,7 @@ enum {
#define SKL_DFSM_PIPE_A_DISABLE (1 << 30)
#define SKL_DFSM_PIPE_B_DISABLE (1 << 21)
#define SKL_DFSM_PIPE_C_DISABLE (1 << 28)
+#define TGL_DFSM_PIPE_D_DISABLE (1 << 22)
#define SKL_DSSM _MMIO(0x51004)
#define CNL_DSSM_CDCLK_PLL_REFCLK_24MHz (1 << 31)
@@ -7690,6 +7721,9 @@ enum {
#define GEN7_L3SQCREG4 _MMIO(0xb034)
#define L3SQ_URB_READ_CAM_MATCH_DISABLE (1 << 27)
+#define GEN11_SCRATCH2 _MMIO(0xb140)
+#define GEN11_COHERENT_PARTIAL_WRITE_MERGE_ENABLE (1 << 19)
+
#define GEN8_L3SQCREG4 _MMIO(0xb118)
#define GEN11_LQSC_CLEAN_EVICT_DISABLE (1 << 6)
#define GEN8_LQSC_RO_PERF_DIS (1 << 27)
@@ -9119,7 +9153,8 @@ enum {
#define GLK_PW_CTL_IDX_DDI_A 1
#define SKL_PW_CTL_IDX_MISC_IO 0
-/* ICL - power wells */
+/* ICL/TGL - power wells */
+#define TGL_PW_CTL_IDX_PW_5 4
#define ICL_PW_CTL_IDX_PW_4 3
#define ICL_PW_CTL_IDX_PW_3 2
#define ICL_PW_CTL_IDX_PW_2 1
@@ -9128,13 +9163,25 @@ enum {
#define ICL_PWR_WELL_CTL_AUX1 _MMIO(0x45440)
#define ICL_PWR_WELL_CTL_AUX2 _MMIO(0x45444)
#define ICL_PWR_WELL_CTL_AUX4 _MMIO(0x4544C)
+#define TGL_PW_CTL_IDX_AUX_TBT6 14
+#define TGL_PW_CTL_IDX_AUX_TBT5 13
+#define TGL_PW_CTL_IDX_AUX_TBT4 12
#define ICL_PW_CTL_IDX_AUX_TBT4 11
+#define TGL_PW_CTL_IDX_AUX_TBT3 11
#define ICL_PW_CTL_IDX_AUX_TBT3 10
+#define TGL_PW_CTL_IDX_AUX_TBT2 10
#define ICL_PW_CTL_IDX_AUX_TBT2 9
+#define TGL_PW_CTL_IDX_AUX_TBT1 9
#define ICL_PW_CTL_IDX_AUX_TBT1 8
+#define TGL_PW_CTL_IDX_AUX_TC6 8
+#define TGL_PW_CTL_IDX_AUX_TC5 7
+#define TGL_PW_CTL_IDX_AUX_TC4 6
#define ICL_PW_CTL_IDX_AUX_F 5
+#define TGL_PW_CTL_IDX_AUX_TC3 5
#define ICL_PW_CTL_IDX_AUX_E 4
+#define TGL_PW_CTL_IDX_AUX_TC2 4
#define ICL_PW_CTL_IDX_AUX_D 3
+#define TGL_PW_CTL_IDX_AUX_TC1 3
#define ICL_PW_CTL_IDX_AUX_C 2
#define ICL_PW_CTL_IDX_AUX_B 1
#define ICL_PW_CTL_IDX_AUX_A 0
@@ -9142,9 +9189,15 @@ enum {
#define ICL_PWR_WELL_CTL_DDI1 _MMIO(0x45450)
#define ICL_PWR_WELL_CTL_DDI2 _MMIO(0x45454)
#define ICL_PWR_WELL_CTL_DDI4 _MMIO(0x4545C)
+#define TGL_PW_CTL_IDX_DDI_TC6 8
+#define TGL_PW_CTL_IDX_DDI_TC5 7
+#define TGL_PW_CTL_IDX_DDI_TC4 6
#define ICL_PW_CTL_IDX_DDI_F 5
+#define TGL_PW_CTL_IDX_DDI_TC3 5
#define ICL_PW_CTL_IDX_DDI_E 4
+#define TGL_PW_CTL_IDX_DDI_TC2 4
#define ICL_PW_CTL_IDX_DDI_D 3
+#define TGL_PW_CTL_IDX_DDI_TC1 3
#define ICL_PW_CTL_IDX_DDI_C 2
#define ICL_PW_CTL_IDX_DDI_B 1
#define ICL_PW_CTL_IDX_DDI_A 0
@@ -9197,9 +9250,11 @@ enum skl_power_gate {
#define _ICL_AUX_REG_IDX(pw_idx) ((pw_idx) - ICL_PW_CTL_IDX_AUX_A)
#define _ICL_AUX_ANAOVRD1_A 0x162398
#define _ICL_AUX_ANAOVRD1_B 0x6C398
+#define _TGL_AUX_ANAOVRD1_C 0x160398
#define ICL_AUX_ANAOVRD1(pw_idx) _MMIO(_PICK(_ICL_AUX_REG_IDX(pw_idx), \
_ICL_AUX_ANAOVRD1_A, \
- _ICL_AUX_ANAOVRD1_B))
+ _ICL_AUX_ANAOVRD1_B, \
+ _TGL_AUX_ANAOVRD1_C))
#define ICL_AUX_ANAOVRD1_LDO_BYPASS (1 << 7)
#define ICL_AUX_ANAOVRD1_ENABLE (1 << 0)
@@ -9321,6 +9376,7 @@ enum skl_power_gate {
#define _TRANS_DDI_FUNC_CTL_A 0x60400
#define _TRANS_DDI_FUNC_CTL_B 0x61400
#define _TRANS_DDI_FUNC_CTL_C 0x62400
+#define _TRANS_DDI_FUNC_CTL_D 0x63400
#define _TRANS_DDI_FUNC_CTL_EDP 0x6F400
#define _TRANS_DDI_FUNC_CTL_DSI0 0x6b400
#define _TRANS_DDI_FUNC_CTL_DSI1 0x6bc00
@@ -9328,10 +9384,12 @@ enum skl_power_gate {
#define TRANS_DDI_FUNC_ENABLE (1 << 31)
/* Those bits are ignored by pipe EDP since it can only connect to DDI A */
-#define TRANS_DDI_PORT_MASK (7 << 28)
#define TRANS_DDI_PORT_SHIFT 28
-#define TRANS_DDI_SELECT_PORT(x) ((x) << 28)
-#define TRANS_DDI_PORT_NONE (0 << 28)
+#define TGL_TRANS_DDI_PORT_SHIFT 27
+#define TRANS_DDI_PORT_MASK (7 << TRANS_DDI_PORT_SHIFT)
+#define TGL_TRANS_DDI_PORT_MASK (0xf << TGL_TRANS_DDI_PORT_SHIFT)
+#define TRANS_DDI_SELECT_PORT(x) ((x) << TRANS_DDI_PORT_SHIFT)
+#define TGL_TRANS_DDI_SELECT_PORT(x) (((x) + 1) << TGL_TRANS_DDI_PORT_SHIFT)
#define TRANS_DDI_MODE_SELECT_MASK (7 << 24)
#define TRANS_DDI_MODE_SELECT_HDMI (0 << 24)
#define TRANS_DDI_MODE_SELECT_DVI (1 << 24)
@@ -9541,6 +9599,9 @@ enum skl_power_gate {
/* For each transcoder, we need to select the corresponding port clock */
#define TRANS_CLK_SEL_DISABLED (0x0 << 29)
#define TRANS_CLK_SEL_PORT(x) (((x) + 1) << 29)
+#define TGL_TRANS_CLK_SEL_DISABLED (0x0 << 28)
+#define TGL_TRANS_CLK_SEL_PORT(x) (((x) + 1) << 28)
+
#define CDCLK_FREQ _MMIO(0x46200)
@@ -9672,17 +9733,22 @@ enum skl_power_gate {
* CNL Clocks
*/
#define DPCLKA_CFGCR0 _MMIO(0x6C200)
-#define DPCLKA_CFGCR0_ICL _MMIO(0x164280)
#define DPCLKA_CFGCR0_DDI_CLK_OFF(port) (1 << ((port) == PORT_F ? 23 : \
(port) + 10))
-#define ICL_DPCLKA_CFGCR0_DDI_CLK_OFF(port) (1 << ((port) + 10))
-#define ICL_DPCLKA_CFGCR0_TC_CLK_OFF(tc_port) (1 << ((tc_port) == PORT_TC4 ? \
- 21 : (tc_port) + 12))
#define DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(port) ((port) == PORT_F ? 21 : \
(port) * 2)
#define DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(port) (3 << DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(port))
#define DPCLKA_CFGCR0_DDI_CLK_SEL(pll, port) ((pll) << DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(port))
+#define ICL_DPCLKA_CFGCR0 _MMIO(0x164280)
+#define ICL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy) (1 << _PICK(phy, 10, 11, 24))
+#define ICL_DPCLKA_CFGCR0_TC_CLK_OFF(tc_port) (1 << ((tc_port) < PORT_TC4 ? \
+ (tc_port) + 12 : \
+ (tc_port) - PORT_TC4 + 21))
+#define ICL_DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(phy) ((phy) * 2)
+#define ICL_DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(phy) (3 << ICL_DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(phy))
+#define ICL_DPCLKA_CFGCR0_DDI_CLK_SEL(pll, phy) ((pll) << ICL_DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(phy))
+
/* CNL PLL */
#define DPLL0_ENABLE 0x46010
#define DPLL1_ENABLE 0x46014
@@ -9887,6 +9953,7 @@ enum skl_power_gate {
#define DPLL_CFGCR1_PDIV_7 (8 << 2)
#define DPLL_CFGCR1_CENTRAL_FREQ (3 << 0)
#define DPLL_CFGCR1_CENTRAL_FREQ_8400 (3 << 0)
+#define TGL_DPLL_CFGCR1_CFSELOVRD_NORMAL_XTAL (0 << 0)
#define CNL_DPLL_CFGCR1(pll) _MMIO_PLL(pll, _CNL_DPLL0_CFGCR1, _CNL_DPLL1_CFGCR1)
#define _ICL_DPLL0_CFGCR0 0x164000
@@ -9899,6 +9966,22 @@ enum skl_power_gate {
#define ICL_DPLL_CFGCR1(pll) _MMIO_PLL(pll, _ICL_DPLL0_CFGCR1, \
_ICL_DPLL1_CFGCR1)
+#define _TGL_DPLL0_CFGCR0 0x164284
+#define _TGL_DPLL1_CFGCR0 0x16428C
+/* TODO: add DPLL4 */
+#define _TGL_TBTPLL_CFGCR0 0x16429C
+#define TGL_DPLL_CFGCR0(pll) _MMIO_PLL3(pll, _TGL_DPLL0_CFGCR0, \
+ _TGL_DPLL1_CFGCR0, \
+ _TGL_TBTPLL_CFGCR0)
+
+#define _TGL_DPLL0_CFGCR1 0x164288
+#define _TGL_DPLL1_CFGCR1 0x164290
+/* TODO: add DPLL4 */
+#define _TGL_TBTPLL_CFGCR1 0x1642A0
+#define TGL_DPLL_CFGCR1(pll) _MMIO_PLL3(pll, _TGL_DPLL0_CFGCR1, \
+ _TGL_DPLL1_CFGCR1, \
+ _TGL_TBTPLL_CFGCR1)
+
/* BXT display engine PLL */
#define BXT_DE_PLL_CTL _MMIO(0x6d000)
#define BXT_DE_PLL_RATIO(x) (x) /* {60,65,100} * 19.2MHz */
@@ -11145,6 +11228,7 @@ enum skl_power_gate {
#define _ICL_PHY_MISC_B 0x64C04
#define ICL_PHY_MISC(port) _MMIO_PORT(port, _ICL_PHY_MISC_A, \
_ICL_PHY_MISC_B)
+#define ICL_PHY_MISC_MUX_DDID (1 << 28)
#define ICL_PHY_MISC_DE_IO_COMP_PWR_DOWN (1 << 23)
/* Icelake Display Stream Compression Registers */
@@ -11454,17 +11538,18 @@ enum skl_power_gate {
_ICL_DSC1_RC_BUF_THRESH_1_UDW_PB, \
_ICL_DSC1_RC_BUF_THRESH_1_UDW_PC)
-#define PORT_TX_DFLEXDPSP _MMIO(FIA1_BASE + 0x008A0)
+#define PORT_TX_DFLEXDPSP(fia) _MMIO_FIA((fia), 0x008A0)
+#define MODULAR_FIA_MASK (1 << 4)
#define TC_LIVE_STATE_TBT(tc_port) (1 << ((tc_port) * 8 + 6))
#define TC_LIVE_STATE_TC(tc_port) (1 << ((tc_port) * 8 + 5))
#define DP_LANE_ASSIGNMENT_SHIFT(tc_port) ((tc_port) * 8)
#define DP_LANE_ASSIGNMENT_MASK(tc_port) (0xf << ((tc_port) * 8))
#define DP_LANE_ASSIGNMENT(tc_port, x) ((x) << ((tc_port) * 8))
-#define PORT_TX_DFLEXDPPMS _MMIO(FIA1_BASE + 0x00890)
+#define PORT_TX_DFLEXDPPMS(fia) _MMIO_FIA((fia), 0x00890)
#define DP_PHY_MODE_STATUS_COMPLETED(tc_port) (1 << (tc_port))
-#define PORT_TX_DFLEXDPCSSS _MMIO(FIA1_BASE + 0x00894)
+#define PORT_TX_DFLEXDPCSSS(fia) _MMIO_FIA((fia), 0x00894)
#define DP_PHY_MODE_STATUS_NOT_SAFE(tc_port) (1 << (tc_port))
#endif /* _I915_REG_H_ */
diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c
index a195a92d0105..8ac7d14ec8c9 100644
--- a/drivers/gpu/drm/i915/i915_request.c
+++ b/drivers/gpu/drm/i915/i915_request.c
@@ -119,6 +119,50 @@ const struct dma_fence_ops i915_fence_ops = {
.release = i915_fence_release,
};
+static void irq_execute_cb(struct irq_work *wrk)
+{
+ struct execute_cb *cb = container_of(wrk, typeof(*cb), work);
+
+ i915_sw_fence_complete(cb->fence);
+ kmem_cache_free(global.slab_execute_cbs, cb);
+}
+
+static void irq_execute_cb_hook(struct irq_work *wrk)
+{
+ struct execute_cb *cb = container_of(wrk, typeof(*cb), work);
+
+ cb->hook(container_of(cb->fence, struct i915_request, submit),
+ &cb->signal->fence);
+ i915_request_put(cb->signal);
+
+ irq_execute_cb(wrk);
+}
+
+static void __notify_execute_cb(struct i915_request *rq)
+{
+ struct execute_cb *cb;
+
+ lockdep_assert_held(&rq->lock);
+
+ if (list_empty(&rq->execute_cb))
+ return;
+
+ list_for_each_entry(cb, &rq->execute_cb, link)
+ irq_work_queue(&cb->work);
+
+ /*
+ * XXX Rollback on __i915_request_unsubmit()
+ *
+ * In the future, perhaps when we have an active time-slicing scheduler,
+ * it will be interesting to unsubmit parallel execution and remove
+ * busywaits from the GPU until their master is restarted. This is
+ * quite hairy, we have to carefully rollback the fence and do a
+ * preempt-to-idle cycle on the target engine, all the while the
+ * master execute_cb may refire.
+ */
+ INIT_LIST_HEAD(&rq->execute_cb);
+}
+
static inline void
i915_request_remove_from_client(struct i915_request *request)
{
@@ -232,6 +276,12 @@ static bool i915_request_retire(struct i915_request *rq)
local_irq_disable();
+ /*
+ * We only loosely track inflight requests across preemption,
+ * and so we may find ourselves attempting to retire a _completed_
+ * request that we have removed from the HW and put back on a run
+ * queue.
+ */
spin_lock(&rq->engine->active.lock);
list_del(&rq->sched.link);
spin_unlock(&rq->engine->active.lock);
@@ -242,10 +292,15 @@ static bool i915_request_retire(struct i915_request *rq)
dma_fence_signal_locked(&rq->fence);
if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &rq->fence.flags))
i915_request_cancel_breadcrumb(rq);
- if (rq->waitboost) {
+ if (i915_request_has_waitboost(rq)) {
GEM_BUG_ON(!atomic_read(&rq->i915->gt_pm.rps.num_waiters));
atomic_dec(&rq->i915->gt_pm.rps.num_waiters);
}
+ if (!test_bit(I915_FENCE_FLAG_ACTIVE, &rq->fence.flags)) {
+ set_bit(I915_FENCE_FLAG_ACTIVE, &rq->fence.flags);
+ __notify_execute_cb(rq);
+ }
+ GEM_BUG_ON(!list_empty(&rq->execute_cb));
spin_unlock(&rq->lock);
local_irq_enable();
@@ -285,50 +340,6 @@ void i915_request_retire_upto(struct i915_request *rq)
} while (i915_request_retire(tmp) && tmp != rq);
}
-static void irq_execute_cb(struct irq_work *wrk)
-{
- struct execute_cb *cb = container_of(wrk, typeof(*cb), work);
-
- i915_sw_fence_complete(cb->fence);
- kmem_cache_free(global.slab_execute_cbs, cb);
-}
-
-static void irq_execute_cb_hook(struct irq_work *wrk)
-{
- struct execute_cb *cb = container_of(wrk, typeof(*cb), work);
-
- cb->hook(container_of(cb->fence, struct i915_request, submit),
- &cb->signal->fence);
- i915_request_put(cb->signal);
-
- irq_execute_cb(wrk);
-}
-
-static void __notify_execute_cb(struct i915_request *rq)
-{
- struct execute_cb *cb;
-
- lockdep_assert_held(&rq->lock);
-
- if (list_empty(&rq->execute_cb))
- return;
-
- list_for_each_entry(cb, &rq->execute_cb, link)
- irq_work_queue(&cb->work);
-
- /*
- * XXX Rollback on __i915_request_unsubmit()
- *
- * In the future, perhaps when we have an active time-slicing scheduler,
- * it will be interesting to unsubmit parallel execution and remove
- * busywaits from the GPU until their master is restarted. This is
- * quite hairy, we have to carefully rollback the fence and do a
- * preempt-to-idle cycle on the target engine, all the while the
- * master execute_cb may refire.
- */
- INIT_LIST_HEAD(&rq->execute_cb);
-}
-
static int
__i915_request_await_execution(struct i915_request *rq,
struct i915_request *signal,
@@ -596,7 +607,7 @@ out:
struct i915_request *
__i915_request_create(struct intel_context *ce, gfp_t gfp)
{
- struct i915_timeline *tl = ce->ring->timeline;
+ struct intel_timeline *tl = ce->ring->timeline;
struct i915_request *rq;
u32 seqno;
int ret;
@@ -645,7 +656,7 @@ __i915_request_create(struct intel_context *ce, gfp_t gfp)
}
}
- ret = i915_timeline_get_seqno(tl, rq, &seqno);
+ ret = intel_timeline_get_seqno(tl, rq, &seqno);
if (ret)
goto err_free;
@@ -673,7 +684,7 @@ __i915_request_create(struct intel_context *ce, gfp_t gfp)
rq->file_priv = NULL;
rq->batch = NULL;
rq->capture_list = NULL;
- rq->waitboost = false;
+ rq->flags = 0;
rq->execution_mask = ALL_ENGINES;
INIT_LIST_HEAD(&rq->active_list);
@@ -764,7 +775,7 @@ i915_request_await_start(struct i915_request *rq, struct i915_request *signal)
return 0;
signal = list_prev_entry(signal, ring_link);
- if (i915_timeline_sync_is_later(rq->timeline, &signal->fence))
+ if (intel_timeline_sync_is_later(rq->timeline, &signal->fence))
return 0;
return i915_sw_fence_await_dma_fence(&rq->submit,
@@ -818,7 +829,7 @@ emit_semaphore_wait(struct i915_request *to,
return err;
/* We need to pin the signaler's HWSP until we are finished reading. */
- err = i915_timeline_read_hwsp(from, to, &hwsp_offset);
+ err = intel_timeline_read_hwsp(from, to, &hwsp_offset);
if (err)
return err;
@@ -929,7 +940,7 @@ i915_request_await_dma_fence(struct i915_request *rq, struct dma_fence *fence)
/* Squash repeated waits to the same timelines */
if (fence->context != rq->i915->mm.unordered_timeline &&
- i915_timeline_sync_is_later(rq->timeline, fence))
+ intel_timeline_sync_is_later(rq->timeline, fence))
continue;
if (dma_fence_is_i915(fence))
@@ -943,7 +954,7 @@ i915_request_await_dma_fence(struct i915_request *rq, struct dma_fence *fence)
/* Record the latest fence used against each timeline */
if (fence->context != rq->i915->mm.unordered_timeline)
- i915_timeline_sync_set(rq->timeline, fence);
+ intel_timeline_sync_set(rq->timeline, fence);
} while (--nchild);
return 0;
@@ -1081,7 +1092,7 @@ void i915_request_skip(struct i915_request *rq, int error)
static struct i915_request *
__i915_request_add_to_timeline(struct i915_request *rq)
{
- struct i915_timeline *timeline = rq->timeline;
+ struct intel_timeline *timeline = rq->timeline;
struct i915_request *prev;
/*
@@ -1390,8 +1401,7 @@ long i915_request_wait(struct i915_request *rq,
* serialise wait/reset with an explicit lock, we do want
* lockdep to detect potential dependency cycles.
*/
- mutex_acquire(&rq->i915->gpu_error.wedge_mutex.dep_map,
- 0, 0, _THIS_IP_);
+ mutex_acquire(&rq->engine->gt->reset.mutex.dep_map, 0, 0, _THIS_IP_);
/*
* Optimistic spin before touching IRQs.
@@ -1447,8 +1457,10 @@ long i915_request_wait(struct i915_request *rq,
for (;;) {
set_current_state(state);
- if (i915_request_completed(rq))
+ if (i915_request_completed(rq)) {
+ dma_fence_signal(&rq->fence);
break;
+ }
if (signal_pending_state(state, current)) {
timeout = -ERESTARTSYS;
@@ -1467,7 +1479,7 @@ long i915_request_wait(struct i915_request *rq,
dma_fence_remove_callback(&rq->fence, &wait.cb);
out:
- mutex_release(&rq->i915->gpu_error.wedge_mutex.dep_map, 0, _THIS_IP_);
+ mutex_release(&rq->engine->gt->reset.mutex.dep_map, 0, _THIS_IP_);
trace_i915_request_wait_end(rq);
return timeout;
}
diff --git a/drivers/gpu/drm/i915/i915_request.h b/drivers/gpu/drm/i915/i915_request.h
index edbbdfec24ab..313df3c37158 100644
--- a/drivers/gpu/drm/i915/i915_request.h
+++ b/drivers/gpu/drm/i915/i915_request.h
@@ -28,6 +28,7 @@
#include <linux/dma-fence.h>
#include <linux/lockdep.h>
+#include "gt/intel_context_types.h"
#include "gt/intel_engine_types.h"
#include "i915_gem.h"
@@ -40,8 +41,8 @@
struct drm_file;
struct drm_i915_gem_object;
struct i915_request;
-struct i915_timeline;
-struct i915_timeline_cacheline;
+struct intel_timeline;
+struct intel_timeline_cacheline;
struct i915_capture_list {
struct i915_capture_list *next;
@@ -112,7 +113,7 @@ struct i915_request {
struct intel_engine_cs *engine;
struct intel_context *hw_context;
struct intel_ring *ring;
- struct i915_timeline *timeline;
+ struct intel_timeline *timeline;
struct list_head signal_link;
/*
@@ -175,7 +176,7 @@ struct i915_request {
* inside the timeline's HWSP vma, but it is only valid while this
* request has not completed and guarded by the timeline mutex.
*/
- struct i915_timeline_cacheline *hwsp_cacheline;
+ struct intel_timeline_cacheline *hwsp_cacheline;
/** Position in the ring of the start of the request */
u32 head;
@@ -215,7 +216,9 @@ struct i915_request {
/** Time at which this request was emitted, in jiffies. */
unsigned long emitted_jiffies;
- bool waitboost;
+ unsigned long flags;
+#define I915_REQUEST_WAITBOOST BIT(0)
+#define I915_REQUEST_NOPREEMPT BIT(1)
/** timeline->request entry for this request */
struct list_head link;
@@ -429,6 +432,17 @@ static inline void i915_request_mark_complete(struct i915_request *rq)
rq->hwsp_seqno = (u32 *)&rq->fence.seqno; /* decouple from HWSP */
}
+static inline bool i915_request_has_waitboost(const struct i915_request *rq)
+{
+ return rq->flags & I915_REQUEST_WAITBOOST;
+}
+
+static inline bool i915_request_has_nopreempt(const struct i915_request *rq)
+{
+ /* Preemption should only be disabled very rarely */
+ return unlikely(rq->flags & I915_REQUEST_NOPREEMPT);
+}
+
bool i915_retire_requests(struct drm_i915_private *i915);
#endif /* I915_REQUEST_H */
diff --git a/drivers/gpu/drm/i915/i915_scheduler.c b/drivers/gpu/drm/i915/i915_scheduler.c
index 2e9b38bdc33c..0bd452e851d8 100644
--- a/drivers/gpu/drm/i915/i915_scheduler.c
+++ b/drivers/gpu/drm/i915/i915_scheduler.c
@@ -179,8 +179,7 @@ static inline int rq_prio(const struct i915_request *rq)
static void kick_submission(struct intel_engine_cs *engine, int prio)
{
- const struct i915_request *inflight =
- port_request(engine->execlists.port);
+ const struct i915_request *inflight = *engine->execlists.active;
/*
* If we are already the currently executing context, don't
@@ -395,6 +394,7 @@ bool __i915_sched_node_add_dependency(struct i915_sched_node *node,
list_add(&dep->wait_link, &signal->waiters_list);
list_add(&dep->signal_link, &node->signalers_list);
dep->signaler = signal;
+ dep->waiter = node;
dep->flags = flags;
/* Keep track of whether anyone on this chain has a semaphore */
diff --git a/drivers/gpu/drm/i915/i915_scheduler_types.h b/drivers/gpu/drm/i915/i915_scheduler_types.h
index 3e309631bd0b..aad81acba9dc 100644
--- a/drivers/gpu/drm/i915/i915_scheduler_types.h
+++ b/drivers/gpu/drm/i915/i915_scheduler_types.h
@@ -62,6 +62,7 @@ struct i915_sched_node {
struct i915_dependency {
struct i915_sched_node *signaler;
+ struct i915_sched_node *waiter;
struct list_head signal_link;
struct list_head wait_link;
struct list_head dfs_link;
diff --git a/drivers/gpu/drm/i915/i915_selftest.h b/drivers/gpu/drm/i915/i915_selftest.h
index 207e21b478f2..acdf6eb9e262 100644
--- a/drivers/gpu/drm/i915/i915_selftest.h
+++ b/drivers/gpu/drm/i915/i915_selftest.h
@@ -66,12 +66,37 @@ struct i915_subtest {
const char *name;
};
+int __i915_nop_setup(void *data);
+int __i915_nop_teardown(int err, void *data);
+
+int __i915_live_setup(void *data);
+int __i915_live_teardown(int err, void *data);
+
+int __intel_gt_live_setup(void *data);
+int __intel_gt_live_teardown(int err, void *data);
+
int __i915_subtests(const char *caller,
+ int (*setup)(void *data),
+ int (*teardown)(int err, void *data),
const struct i915_subtest *st,
unsigned int count,
void *data);
#define i915_subtests(T, data) \
- __i915_subtests(__func__, T, ARRAY_SIZE(T), data)
+ __i915_subtests(__func__, \
+ __i915_nop_setup, __i915_nop_teardown, \
+ T, ARRAY_SIZE(T), data)
+#define i915_live_subtests(T, data) ({ \
+ typecheck(struct drm_i915_private *, data); \
+ __i915_subtests(__func__, \
+ __i915_live_setup, __i915_live_teardown, \
+ T, ARRAY_SIZE(T), data); \
+})
+#define intel_gt_live_subtests(T, data) ({ \
+ typecheck(struct intel_gt *, data); \
+ __i915_subtests(__func__, \
+ __intel_gt_live_setup, __intel_gt_live_teardown, \
+ T, ARRAY_SIZE(T), data); \
+})
#define SUBTEST(x) { x, #x }
diff --git a/drivers/gpu/drm/i915/i915_timeline.h b/drivers/gpu/drm/i915/i915_timeline.h
deleted file mode 100644
index 36e5e5a65155..000000000000
--- a/drivers/gpu/drm/i915/i915_timeline.h
+++ /dev/null
@@ -1,94 +0,0 @@
-/*
- * Copyright © 2016 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
- *
- */
-
-#ifndef I915_TIMELINE_H
-#define I915_TIMELINE_H
-
-#include <linux/lockdep.h>
-
-#include "i915_active.h"
-#include "i915_syncmap.h"
-#include "i915_timeline_types.h"
-
-int i915_timeline_init(struct drm_i915_private *i915,
- struct i915_timeline *tl,
- struct i915_vma *hwsp);
-void i915_timeline_fini(struct i915_timeline *tl);
-
-struct i915_timeline *
-i915_timeline_create(struct drm_i915_private *i915,
- struct i915_vma *global_hwsp);
-
-static inline struct i915_timeline *
-i915_timeline_get(struct i915_timeline *timeline)
-{
- kref_get(&timeline->kref);
- return timeline;
-}
-
-void __i915_timeline_free(struct kref *kref);
-static inline void i915_timeline_put(struct i915_timeline *timeline)
-{
- kref_put(&timeline->kref, __i915_timeline_free);
-}
-
-static inline int __i915_timeline_sync_set(struct i915_timeline *tl,
- u64 context, u32 seqno)
-{
- return i915_syncmap_set(&tl->sync, context, seqno);
-}
-
-static inline int i915_timeline_sync_set(struct i915_timeline *tl,
- const struct dma_fence *fence)
-{
- return __i915_timeline_sync_set(tl, fence->context, fence->seqno);
-}
-
-static inline bool __i915_timeline_sync_is_later(struct i915_timeline *tl,
- u64 context, u32 seqno)
-{
- return i915_syncmap_is_later(&tl->sync, context, seqno);
-}
-
-static inline bool i915_timeline_sync_is_later(struct i915_timeline *tl,
- const struct dma_fence *fence)
-{
- return __i915_timeline_sync_is_later(tl, fence->context, fence->seqno);
-}
-
-int i915_timeline_pin(struct i915_timeline *tl);
-int i915_timeline_get_seqno(struct i915_timeline *tl,
- struct i915_request *rq,
- u32 *seqno);
-void i915_timeline_unpin(struct i915_timeline *tl);
-
-int i915_timeline_read_hwsp(struct i915_request *from,
- struct i915_request *until,
- u32 *hwsp_offset);
-
-void i915_timelines_init(struct drm_i915_private *i915);
-void i915_timelines_park(struct drm_i915_private *i915);
-void i915_timelines_fini(struct drm_i915_private *i915);
-
-#endif
diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h
index cce426b23a24..da18b8d6b80c 100644
--- a/drivers/gpu/drm/i915/i915_trace.h
+++ b/drivers/gpu/drm/i915/i915_trace.h
@@ -293,16 +293,16 @@ TRACE_EVENT(intel_update_plane,
TP_STRUCT__entry(
__field(enum pipe, pipe)
- __field(const char *, name)
__field(u32, frame)
__field(u32, scanline)
__array(int, src, 4)
__array(int, dst, 4)
+ __string(name, plane->name)
),
TP_fast_assign(
+ __assign_str(name, plane->name);
__entry->pipe = crtc->pipe;
- __entry->name = plane->name;
__entry->frame = intel_crtc_get_vblank_counter(crtc);
__entry->scanline = intel_get_crtc_scanline(crtc);
memcpy(__entry->src, &plane->state->src, sizeof(__entry->src));
@@ -310,7 +310,7 @@ TRACE_EVENT(intel_update_plane,
),
TP_printk("pipe %c, plane %s, frame=%u, scanline=%u, " DRM_RECT_FP_FMT " -> " DRM_RECT_FMT,
- pipe_name(__entry->pipe), __entry->name,
+ pipe_name(__entry->pipe), __get_str(name),
__entry->frame, __entry->scanline,
DRM_RECT_FP_ARG((const struct drm_rect *)__entry->src),
DRM_RECT_ARG((const struct drm_rect *)__entry->dst))
@@ -322,20 +322,20 @@ TRACE_EVENT(intel_disable_plane,
TP_STRUCT__entry(
__field(enum pipe, pipe)
- __field(const char *, name)
__field(u32, frame)
__field(u32, scanline)
+ __string(name, plane->name)
),
TP_fast_assign(
+ __assign_str(name, plane->name);
__entry->pipe = crtc->pipe;
- __entry->name = plane->name;
__entry->frame = intel_crtc_get_vblank_counter(crtc);
__entry->scanline = intel_get_crtc_scanline(crtc);
),
TP_printk("pipe %c, plane %s, frame=%u, scanline=%u",
- pipe_name(__entry->pipe), __entry->name,
+ pipe_name(__entry->pipe), __get_str(name),
__entry->frame, __entry->scanline)
);
diff --git a/drivers/gpu/drm/i915/i915_utils.h b/drivers/gpu/drm/i915/i915_utils.h
index 2987219a6300..4920ff9aba62 100644
--- a/drivers/gpu/drm/i915/i915_utils.h
+++ b/drivers/gpu/drm/i915/i915_utils.h
@@ -131,6 +131,18 @@ __check_struct_size(size_t base, size_t arr, size_t count, size_t *size)
((typeof(ptr))((unsigned long)(ptr) | __bits)); \
})
+#define ptr_count_dec(p_ptr) do { \
+ typeof(p_ptr) __p = (p_ptr); \
+ unsigned long __v = (unsigned long)(*__p); \
+ *__p = (typeof(*p_ptr))(--__v); \
+} while (0)
+
+#define ptr_count_inc(p_ptr) do { \
+ typeof(p_ptr) __p = (p_ptr); \
+ unsigned long __v = (unsigned long)(*__p); \
+ *__p = (typeof(*p_ptr))(++__v); \
+} while (0)
+
#define page_mask_bits(ptr) ptr_mask_bits(ptr, PAGE_SHIFT)
#define page_unmask_bits(ptr) ptr_unmask_bits(ptr, PAGE_SHIFT)
#define page_pack_bits(ptr, bits) ptr_pack_bits(ptr, bits, PAGE_SHIFT)
diff --git a/drivers/gpu/drm/i915/i915_vgpu.c b/drivers/gpu/drm/i915/i915_vgpu.c
index 94d3992b599d..dbd1fa3c7d90 100644
--- a/drivers/gpu/drm/i915/i915_vgpu.c
+++ b/drivers/gpu/drm/i915/i915_vgpu.c
@@ -52,34 +52,53 @@
*/
/**
- * i915_check_vgpu - detect virtual GPU
+ * i915_detect_vgpu - detect virtual GPU
* @dev_priv: i915 device private
*
* This function is called at the initialization stage, to detect whether
* running on a vGPU.
*/
-void i915_check_vgpu(struct drm_i915_private *dev_priv)
+void i915_detect_vgpu(struct drm_i915_private *dev_priv)
{
- struct intel_uncore *uncore = &dev_priv->uncore;
+ struct pci_dev *pdev = dev_priv->drm.pdev;
u64 magic;
u16 version_major;
+ void __iomem *shared_area;
BUILD_BUG_ON(sizeof(struct vgt_if) != VGT_PVINFO_SIZE);
- magic = __raw_uncore_read64(uncore, vgtif_reg(magic));
- if (magic != VGT_MAGIC)
+ /*
+ * This is called before we setup the main MMIO BAR mappings used via
+ * the uncore structure, so we need to access the BAR directly. Since
+ * we do not support VGT on older gens, return early so we don't have
+ * to consider differently numbered or sized MMIO bars
+ */
+ if (INTEL_GEN(dev_priv) < 6)
+ return;
+
+ shared_area = pci_iomap_range(pdev, 0, VGT_PVINFO_PAGE, VGT_PVINFO_SIZE);
+ if (!shared_area) {
+ DRM_ERROR("failed to map MMIO bar to check for VGT\n");
return;
+ }
+
+ magic = readq(shared_area + vgtif_offset(magic));
+ if (magic != VGT_MAGIC)
+ goto out;
- version_major = __raw_uncore_read16(uncore, vgtif_reg(version_major));
+ version_major = readw(shared_area + vgtif_offset(version_major));
if (version_major < VGT_VERSION_MAJOR) {
DRM_INFO("VGT interface version mismatch!\n");
- return;
+ goto out;
}
- dev_priv->vgpu.caps = __raw_uncore_read32(uncore, vgtif_reg(vgt_caps));
+ dev_priv->vgpu.caps = readl(shared_area + vgtif_offset(vgt_caps));
dev_priv->vgpu.active = true;
DRM_INFO("Virtual GPU for Intel GVT-g detected.\n");
+
+out:
+ pci_iounmap(pdev, shared_area);
}
bool intel_vgpu_has_full_ppgtt(struct drm_i915_private *dev_priv)
@@ -112,22 +131,22 @@ static void vgt_deballoon_space(struct i915_ggtt *ggtt,
/**
* intel_vgt_deballoon - deballoon reserved graphics address trunks
- * @dev_priv: i915 device private data
+ * @ggtt: the global GGTT from which we reserved earlier
*
* This function is called to deallocate the ballooned-out graphic memory, when
* driver is unloaded or when ballooning fails.
*/
-void intel_vgt_deballoon(struct drm_i915_private *dev_priv)
+void intel_vgt_deballoon(struct i915_ggtt *ggtt)
{
int i;
- if (!intel_vgpu_active(dev_priv))
+ if (!intel_vgpu_active(ggtt->vm.i915))
return;
DRM_DEBUG("VGT deballoon.\n");
for (i = 0; i < 4; i++)
- vgt_deballoon_space(&dev_priv->ggtt, &bl_info.space[i]);
+ vgt_deballoon_space(ggtt, &bl_info.space[i]);
}
static int vgt_balloon_space(struct i915_ggtt *ggtt,
@@ -153,7 +172,7 @@ static int vgt_balloon_space(struct i915_ggtt *ggtt,
/**
* intel_vgt_balloon - balloon out reserved graphics address trunks
- * @dev_priv: i915 device private data
+ * @ggtt: the global GGTT from which to reserve
*
* This function is called at the initialization stage, to balloon out the
* graphic address space allocated to other vGPUs, by marking these spaces as
@@ -195,22 +214,26 @@ static int vgt_balloon_space(struct i915_ggtt *ggtt,
* Returns:
* zero on success, non-zero if configuration invalid or ballooning failed
*/
-int intel_vgt_balloon(struct drm_i915_private *dev_priv)
+int intel_vgt_balloon(struct i915_ggtt *ggtt)
{
- struct i915_ggtt *ggtt = &dev_priv->ggtt;
+ struct intel_uncore *uncore = &ggtt->vm.i915->uncore;
unsigned long ggtt_end = ggtt->vm.total;
unsigned long mappable_base, mappable_size, mappable_end;
unsigned long unmappable_base, unmappable_size, unmappable_end;
int ret;
- if (!intel_vgpu_active(dev_priv))
+ if (!intel_vgpu_active(ggtt->vm.i915))
return 0;
- mappable_base = I915_READ(vgtif_reg(avail_rs.mappable_gmadr.base));
- mappable_size = I915_READ(vgtif_reg(avail_rs.mappable_gmadr.size));
- unmappable_base = I915_READ(vgtif_reg(avail_rs.nonmappable_gmadr.base));
- unmappable_size = I915_READ(vgtif_reg(avail_rs.nonmappable_gmadr.size));
+ mappable_base =
+ intel_uncore_read(uncore, vgtif_reg(avail_rs.mappable_gmadr.base));
+ mappable_size =
+ intel_uncore_read(uncore, vgtif_reg(avail_rs.mappable_gmadr.size));
+ unmappable_base =
+ intel_uncore_read(uncore, vgtif_reg(avail_rs.nonmappable_gmadr.base));
+ unmappable_size =
+ intel_uncore_read(uncore, vgtif_reg(avail_rs.nonmappable_gmadr.size));
mappable_end = mappable_base + mappable_size;
unmappable_end = unmappable_base + unmappable_size;
diff --git a/drivers/gpu/drm/i915/i915_vgpu.h b/drivers/gpu/drm/i915/i915_vgpu.h
index ebe1b7bced98..8b3663dad193 100644
--- a/drivers/gpu/drm/i915/i915_vgpu.h
+++ b/drivers/gpu/drm/i915/i915_vgpu.h
@@ -24,9 +24,10 @@
#ifndef _I915_VGPU_H_
#define _I915_VGPU_H_
+#include "i915_drv.h"
#include "i915_pvinfo.h"
-void i915_check_vgpu(struct drm_i915_private *dev_priv);
+void i915_detect_vgpu(struct drm_i915_private *dev_priv);
bool intel_vgpu_has_full_ppgtt(struct drm_i915_private *dev_priv);
@@ -42,7 +43,7 @@ intel_vgpu_has_huge_gtt(struct drm_i915_private *dev_priv)
return dev_priv->vgpu.caps & VGT_CAPS_HUGE_GTT;
}
-int intel_vgt_balloon(struct drm_i915_private *dev_priv);
-void intel_vgt_deballoon(struct drm_i915_private *dev_priv);
+int intel_vgt_balloon(struct i915_ggtt *ggtt);
+void intel_vgt_deballoon(struct i915_ggtt *ggtt);
#endif /* _I915_VGPU_H_ */
diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c
index a57729be8312..eb16a1a93bbc 100644
--- a/drivers/gpu/drm/i915/i915_vma.c
+++ b/drivers/gpu/drm/i915/i915_vma.c
@@ -22,11 +22,13 @@
*
*/
+#include <linux/sched/mm.h>
#include <drm/drm_gem.h>
#include "display/intel_frontbuffer.h"
#include "gt/intel_engine.h"
+#include "gt/intel_gt.h"
#include "i915_drv.h"
#include "i915_globals.h"
@@ -77,43 +79,20 @@ static void vma_print_allocator(struct i915_vma *vma, const char *reason)
#endif
-static void obj_bump_mru(struct drm_i915_gem_object *obj)
+static inline struct i915_vma *active_to_vma(struct i915_active *ref)
{
- struct drm_i915_private *i915 = to_i915(obj->base.dev);
- unsigned long flags;
-
- spin_lock_irqsave(&i915->mm.obj_lock, flags);
- list_move_tail(&obj->mm.link, &i915->mm.shrink_list);
- spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
+ return container_of(ref, typeof(struct i915_vma), active);
+}
- obj->mm.dirty = true; /* be paranoid */
+static int __i915_vma_active(struct i915_active *ref)
+{
+ i915_vma_get(active_to_vma(ref));
+ return 0;
}
static void __i915_vma_retire(struct i915_active *ref)
{
- struct i915_vma *vma = container_of(ref, typeof(*vma), active);
- struct drm_i915_gem_object *obj = vma->obj;
-
- GEM_BUG_ON(!i915_gem_object_is_active(obj));
- if (--obj->active_count)
- return;
-
- /* Prune the shared fence arrays iff completely idle (inc. external) */
- if (reservation_object_trylock(obj->base.resv)) {
- if (reservation_object_test_signaled_rcu(obj->base.resv, true))
- reservation_object_add_excl_fence(obj->base.resv, NULL);
- reservation_object_unlock(obj->base.resv);
- }
-
- /*
- * Bump our place on the bound list to keep it roughly in LRU order
- * so that we don't steal from recently used but inactive objects
- * (unless we are forced to ofc!)
- */
- if (i915_gem_object_is_shrinkable(obj))
- obj_bump_mru(obj);
-
- i915_gem_object_put(obj); /* and drop the active reference */
+ i915_vma_put(active_to_vma(ref));
}
static struct i915_vma *
@@ -125,7 +104,7 @@ vma_create(struct drm_i915_gem_object *obj,
struct rb_node *rb, **p;
/* The aliasing_ppgtt should never be used directly! */
- GEM_BUG_ON(vm == &vm->i915->mm.aliasing_ppgtt->vm);
+ GEM_BUG_ON(vm == &vm->i915->ggtt.alias->vm);
vma = i915_vma_alloc();
if (vma == NULL)
@@ -138,9 +117,17 @@ vma_create(struct drm_i915_gem_object *obj,
vma->size = obj->base.size;
vma->display_alignment = I915_GTT_MIN_ALIGNMENT;
- i915_active_init(vm->i915, &vma->active, __i915_vma_retire);
+ i915_active_init(vm->i915, &vma->active,
+ __i915_vma_active, __i915_vma_retire);
INIT_ACTIVE_REQUEST(&vma->last_fence);
+ /* Declare ourselves safe for use inside shrinkers */
+ if (IS_ENABLED(CONFIG_LOCKDEP)) {
+ fs_reclaim_acquire(GFP_KERNEL);
+ might_lock(&vma->active.mutex);
+ fs_reclaim_release(GFP_KERNEL);
+ }
+
INIT_LIST_HEAD(&vma->closed_link);
if (view && view->type != I915_GGTT_VIEW_NORMAL) {
@@ -408,7 +395,7 @@ void i915_vma_flush_writes(struct i915_vma *vma)
if (!i915_vma_has_ggtt_write(vma))
return;
- i915_gem_flush_ggtt_writes(vma->vm->i915);
+ intel_gt_flush_ggtt_writes(vma->vm->gt);
i915_vma_unset_ggtt_write(vma);
}
@@ -921,6 +908,7 @@ int i915_vma_move_to_active(struct i915_vma *vma,
unsigned int flags)
{
struct drm_i915_gem_object *obj = vma->obj;
+ int err;
assert_vma_held(vma);
assert_object_held(obj);
@@ -934,17 +922,9 @@ int i915_vma_move_to_active(struct i915_vma *vma,
* add the active reference first and queue for it to be dropped
* *last*.
*/
- if (!vma->active.count && !obj->active_count++)
- i915_gem_object_get(obj); /* once more for the active ref */
-
- if (unlikely(i915_active_ref(&vma->active, rq->fence.context, rq))) {
- if (!vma->active.count && !--obj->active_count)
- i915_gem_object_put(obj);
- return -ENOMEM;
- }
-
- GEM_BUG_ON(!i915_vma_is_active(vma));
- GEM_BUG_ON(!obj->active_count);
+ err = i915_active_ref(&vma->active, rq->fence.context, rq);
+ if (unlikely(err))
+ return err;
obj->write_domain = 0;
if (flags & EXEC_OBJECT_WRITE) {
@@ -956,11 +936,14 @@ int i915_vma_move_to_active(struct i915_vma *vma,
obj->read_domains = 0;
}
obj->read_domains |= I915_GEM_GPU_DOMAINS;
+ obj->mm.dirty = true;
if (flags & EXEC_OBJECT_NEEDS_FENCE)
__i915_active_request_set(&vma->last_fence, rq);
export_fence(vma, rq, flags);
+
+ GEM_BUG_ON(!i915_vma_is_active(vma));
return 0;
}
diff --git a/drivers/gpu/drm/i915/intel_device_info.c b/drivers/gpu/drm/i915/intel_device_info.c
index 7135d8dc32a7..f99c9fd497b2 100644
--- a/drivers/gpu/drm/i915/intel_device_info.c
+++ b/drivers/gpu/drm/i915/intel_device_info.c
@@ -58,6 +58,7 @@ static const char * const platform_names[] = {
PLATFORM_NAME(CANNONLAKE),
PLATFORM_NAME(ICELAKE),
PLATFORM_NAME(ELKHARTLAKE),
+ PLATFORM_NAME(TIGERLAKE),
};
#undef PLATFORM_NAME
@@ -929,35 +930,28 @@ void intel_device_info_runtime_init(struct drm_i915_private *dev_priv)
}
} else if (HAS_DISPLAY(dev_priv) && INTEL_GEN(dev_priv) >= 9) {
u32 dfsm = I915_READ(SKL_DFSM);
- u8 disabled_mask = 0;
- bool invalid;
- int num_bits;
+ u8 enabled_mask = BIT(info->num_pipes) - 1;
if (dfsm & SKL_DFSM_PIPE_A_DISABLE)
- disabled_mask |= BIT(PIPE_A);
+ enabled_mask &= ~BIT(PIPE_A);
if (dfsm & SKL_DFSM_PIPE_B_DISABLE)
- disabled_mask |= BIT(PIPE_B);
+ enabled_mask &= ~BIT(PIPE_B);
if (dfsm & SKL_DFSM_PIPE_C_DISABLE)
- disabled_mask |= BIT(PIPE_C);
-
- num_bits = hweight8(disabled_mask);
-
- switch (disabled_mask) {
- case BIT(PIPE_A):
- case BIT(PIPE_B):
- case BIT(PIPE_A) | BIT(PIPE_B):
- case BIT(PIPE_A) | BIT(PIPE_C):
- invalid = true;
- break;
- default:
- invalid = false;
- }
+ enabled_mask &= ~BIT(PIPE_C);
+ if (INTEL_GEN(dev_priv) >= 12 &&
+ (dfsm & TGL_DFSM_PIPE_D_DISABLE))
+ enabled_mask &= ~BIT(PIPE_D);
- if (num_bits > info->num_pipes || invalid)
- DRM_ERROR("invalid pipe fuse configuration: 0x%x\n",
- disabled_mask);
+ /*
+ * At least one pipe should be enabled and if there are
+ * disabled pipes, they should be the last ones, with no holes
+ * in the mask.
+ */
+ if (enabled_mask == 0 || !is_power_of_2(enabled_mask + 1))
+ DRM_ERROR("invalid pipe fuse configuration: enabled_mask=0x%x\n",
+ enabled_mask);
else
- info->num_pipes -= num_bits;
+ info->num_pipes = hweight8(enabled_mask);
}
/* Initialize slice/subslice/EU info */
diff --git a/drivers/gpu/drm/i915/intel_device_info.h b/drivers/gpu/drm/i915/intel_device_info.h
index ddafc819bf30..4f58e8d71b67 100644
--- a/drivers/gpu/drm/i915/intel_device_info.h
+++ b/drivers/gpu/drm/i915/intel_device_info.h
@@ -78,6 +78,8 @@ enum intel_platform {
/* gen11 */
INTEL_ICELAKE,
INTEL_ELKHARTLAKE,
+ /* gen12 */
+ INTEL_TIGERLAKE,
INTEL_MAX_PLATFORMS
};
@@ -110,7 +112,7 @@ enum intel_ppgtt_type {
func(gpu_reset_clobbers_display); \
func(has_reset_engine); \
func(has_fpga_dbg); \
- func(has_guc); \
+ func(has_gt_uc); \
func(has_l3_dpf); \
func(has_llc); \
func(has_logical_ring_contexts); \
@@ -136,6 +138,7 @@ enum intel_ppgtt_type {
func(has_gmch); \
func(has_hotplug); \
func(has_ipc); \
+ func(has_modular_fia); \
func(has_overlay); \
func(has_psr); \
func(overlay_needs_physical); \
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index 1d58f7ec5d84..c4016164c34e 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -101,20 +101,30 @@ struct intel_fbdev {
struct mutex hpd_lock;
};
+enum intel_hotplug_state {
+ INTEL_HOTPLUG_UNCHANGED,
+ INTEL_HOTPLUG_CHANGED,
+ INTEL_HOTPLUG_RETRY,
+};
+
struct intel_encoder {
struct drm_encoder base;
enum intel_output_type type;
enum port port;
unsigned int cloneable;
- bool (*hotplug)(struct intel_encoder *encoder,
- struct intel_connector *connector);
+ enum intel_hotplug_state (*hotplug)(struct intel_encoder *encoder,
+ struct intel_connector *connector,
+ bool irq_received);
enum intel_output_type (*compute_output_type)(struct intel_encoder *,
struct intel_crtc_state *,
struct drm_connector_state *);
int (*compute_config)(struct intel_encoder *,
struct intel_crtc_state *,
struct drm_connector_state *);
+ void (*update_prepare)(struct intel_atomic_state *,
+ struct intel_encoder *,
+ struct intel_crtc *);
void (*pre_pll_enable)(struct intel_encoder *,
const struct intel_crtc_state *,
const struct drm_connector_state *);
@@ -124,6 +134,9 @@ struct intel_encoder {
void (*enable)(struct intel_encoder *,
const struct intel_crtc_state *,
const struct drm_connector_state *);
+ void (*update_complete)(struct intel_atomic_state *,
+ struct intel_encoder *,
+ struct intel_crtc *);
void (*disable)(struct intel_encoder *,
const struct intel_crtc_state *,
const struct drm_connector_state *);
@@ -812,6 +825,15 @@ struct intel_crtc_state {
/* Actual register state of the dpll, for shared dpll cross-checking. */
struct intel_dpll_hw_state dpll_hw_state;
+ /*
+ * ICL reserved DPLLs for the CRTC/port. The active PLL is selected by
+ * setting shared_dpll and dpll_hw_state to one of these reserved ones.
+ */
+ struct icl_port_dpll {
+ struct intel_shared_dpll *pll;
+ struct intel_dpll_hw_state hw_state;
+ } icl_port_dplls[ICL_PORT_DPLL_COUNT];
+
/* DSI PLL registers */
struct {
u32 ctrl, div;
@@ -1224,8 +1246,13 @@ struct intel_digital_port {
/* Used for DP and ICL+ TypeC/DP and TypeC/HDMI ports. */
enum aux_ch aux_ch;
enum intel_display_power_domain ddi_io_power_domain;
+ struct mutex tc_lock; /* protects the TypeC port mode */
+ intel_wakeref_t tc_lock_wakeref;
+ int tc_link_refcount;
bool tc_legacy_port:1;
- enum tc_port_type tc_type;
+ char tc_port_name[8];
+ enum tc_port_mode tc_mode;
+ enum phy_fia tc_phy_fia;
void (*write_infoframe)(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
@@ -1473,8 +1500,8 @@ void intel_pps_unlock_regs_wa(struct drm_i915_private *dev_priv);
void intel_encoder_destroy(struct drm_encoder *encoder);
struct drm_display_mode *
intel_encoder_current_mode(struct intel_encoder *encoder);
-bool intel_port_is_combophy(struct drm_i915_private *dev_priv, enum port port);
-bool intel_port_is_tc(struct drm_i915_private *dev_priv, enum port port);
+bool intel_phy_is_combo(struct drm_i915_private *dev_priv, enum phy phy);
+bool intel_phy_is_tc(struct drm_i915_private *dev_priv, enum phy phy);
enum tc_port intel_port_to_tc(struct drm_i915_private *dev_priv,
enum port port);
int intel_get_pipe_from_crtc_id_ioctl(struct drm_device *dev, void *data,
diff --git a/drivers/gpu/drm/i915/intel_guc_fw.c b/drivers/gpu/drm/i915/intel_guc_fw.c
deleted file mode 100644
index 72cdafd9636a..000000000000
--- a/drivers/gpu/drm/i915/intel_guc_fw.c
+++ /dev/null
@@ -1,308 +0,0 @@
-/*
- * Copyright © 2014 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
- *
- * Authors:
- * Vinit Azad <vinit.azad@intel.com>
- * Ben Widawsky <ben@bwidawsk.net>
- * Dave Gordon <david.s.gordon@intel.com>
- * Alex Dai <yu.dai@intel.com>
- */
-
-#include "intel_guc_fw.h"
-#include "i915_drv.h"
-
-#define __MAKE_GUC_FW_PATH(KEY) \
- "i915/" \
- __stringify(KEY##_GUC_FW_PREFIX) "_guc_" \
- __stringify(KEY##_GUC_FW_MAJOR) "." \
- __stringify(KEY##_GUC_FW_MINOR) "." \
- __stringify(KEY##_GUC_FW_PATCH) ".bin"
-
-#define SKL_GUC_FW_PREFIX skl
-#define SKL_GUC_FW_MAJOR 32
-#define SKL_GUC_FW_MINOR 0
-#define SKL_GUC_FW_PATCH 3
-#define SKL_GUC_FIRMWARE_PATH __MAKE_GUC_FW_PATH(SKL)
-MODULE_FIRMWARE(SKL_GUC_FIRMWARE_PATH);
-
-#define BXT_GUC_FW_PREFIX bxt
-#define BXT_GUC_FW_MAJOR 32
-#define BXT_GUC_FW_MINOR 0
-#define BXT_GUC_FW_PATCH 3
-#define BXT_GUC_FIRMWARE_PATH __MAKE_GUC_FW_PATH(BXT)
-MODULE_FIRMWARE(BXT_GUC_FIRMWARE_PATH);
-
-#define KBL_GUC_FW_PREFIX kbl
-#define KBL_GUC_FW_MAJOR 32
-#define KBL_GUC_FW_MINOR 0
-#define KBL_GUC_FW_PATCH 3
-#define KBL_GUC_FIRMWARE_PATH __MAKE_GUC_FW_PATH(KBL)
-MODULE_FIRMWARE(KBL_GUC_FIRMWARE_PATH);
-
-#define GLK_GUC_FW_PREFIX glk
-#define GLK_GUC_FW_MAJOR 32
-#define GLK_GUC_FW_MINOR 0
-#define GLK_GUC_FW_PATCH 3
-#define GLK_GUC_FIRMWARE_PATH __MAKE_GUC_FW_PATH(GLK)
-MODULE_FIRMWARE(GLK_GUC_FIRMWARE_PATH);
-
-#define ICL_GUC_FW_PREFIX icl
-#define ICL_GUC_FW_MAJOR 32
-#define ICL_GUC_FW_MINOR 0
-#define ICL_GUC_FW_PATCH 3
-#define ICL_GUC_FIRMWARE_PATH __MAKE_GUC_FW_PATH(ICL)
-MODULE_FIRMWARE(ICL_GUC_FIRMWARE_PATH);
-
-static void guc_fw_select(struct intel_uc_fw *guc_fw)
-{
- struct intel_guc *guc = container_of(guc_fw, struct intel_guc, fw);
- struct drm_i915_private *i915 = guc_to_i915(guc);
-
- GEM_BUG_ON(guc_fw->type != INTEL_UC_FW_TYPE_GUC);
-
- if (!HAS_GUC(i915))
- return;
-
- if (i915_modparams.guc_firmware_path) {
- guc_fw->path = i915_modparams.guc_firmware_path;
- guc_fw->major_ver_wanted = 0;
- guc_fw->minor_ver_wanted = 0;
- } else if (IS_ICELAKE(i915)) {
- guc_fw->path = ICL_GUC_FIRMWARE_PATH;
- guc_fw->major_ver_wanted = ICL_GUC_FW_MAJOR;
- guc_fw->minor_ver_wanted = ICL_GUC_FW_MINOR;
- } else if (IS_GEMINILAKE(i915)) {
- guc_fw->path = GLK_GUC_FIRMWARE_PATH;
- guc_fw->major_ver_wanted = GLK_GUC_FW_MAJOR;
- guc_fw->minor_ver_wanted = GLK_GUC_FW_MINOR;
- } else if (IS_KABYLAKE(i915) || IS_COFFEELAKE(i915)) {
- guc_fw->path = KBL_GUC_FIRMWARE_PATH;
- guc_fw->major_ver_wanted = KBL_GUC_FW_MAJOR;
- guc_fw->minor_ver_wanted = KBL_GUC_FW_MINOR;
- } else if (IS_BROXTON(i915)) {
- guc_fw->path = BXT_GUC_FIRMWARE_PATH;
- guc_fw->major_ver_wanted = BXT_GUC_FW_MAJOR;
- guc_fw->minor_ver_wanted = BXT_GUC_FW_MINOR;
- } else if (IS_SKYLAKE(i915)) {
- guc_fw->path = SKL_GUC_FIRMWARE_PATH;
- guc_fw->major_ver_wanted = SKL_GUC_FW_MAJOR;
- guc_fw->minor_ver_wanted = SKL_GUC_FW_MINOR;
- }
-}
-
-/**
- * intel_guc_fw_init_early() - initializes GuC firmware struct
- * @guc: intel_guc struct
- *
- * On platforms with GuC selects firmware for uploading
- */
-void intel_guc_fw_init_early(struct intel_guc *guc)
-{
- struct intel_uc_fw *guc_fw = &guc->fw;
-
- intel_uc_fw_init_early(guc_fw, INTEL_UC_FW_TYPE_GUC);
- guc_fw_select(guc_fw);
-}
-
-static void guc_prepare_xfer(struct intel_guc *guc)
-{
- struct drm_i915_private *dev_priv = guc_to_i915(guc);
-
- /* Must program this register before loading the ucode with DMA */
- I915_WRITE(GUC_SHIM_CONTROL, GUC_DISABLE_SRAM_INIT_TO_ZEROES |
- GUC_ENABLE_READ_CACHE_LOGIC |
- GUC_ENABLE_MIA_CACHING |
- GUC_ENABLE_READ_CACHE_FOR_SRAM_DATA |
- GUC_ENABLE_READ_CACHE_FOR_WOPCM_DATA |
- GUC_ENABLE_MIA_CLOCK_GATING);
-
- if (IS_GEN9_LP(dev_priv))
- I915_WRITE(GEN9LP_GT_PM_CONFIG, GT_DOORBELL_ENABLE);
- else
- I915_WRITE(GEN9_GT_PM_CONFIG, GT_DOORBELL_ENABLE);
-
- if (IS_GEN(dev_priv, 9)) {
- /* DOP Clock Gating Enable for GuC clocks */
- I915_WRITE(GEN7_MISCCPCTL, (GEN8_DOP_CLOCK_GATE_GUC_ENABLE |
- I915_READ(GEN7_MISCCPCTL)));
-
- /* allows for 5us (in 10ns units) before GT can go to RC6 */
- I915_WRITE(GUC_ARAT_C6DIS, 0x1FF);
- }
-}
-
-/* Copy RSA signature from the fw image to HW for verification */
-static void guc_xfer_rsa(struct intel_guc *guc)
-{
- struct drm_i915_private *dev_priv = guc_to_i915(guc);
- struct intel_uc_fw *fw = &guc->fw;
- struct sg_table *pages = fw->obj->mm.pages;
- u32 rsa[UOS_RSA_SCRATCH_COUNT];
- int i;
-
- sg_pcopy_to_buffer(pages->sgl, pages->nents,
- rsa, sizeof(rsa), fw->rsa_offset);
-
- for (i = 0; i < UOS_RSA_SCRATCH_COUNT; i++)
- I915_WRITE(UOS_RSA_SCRATCH(i), rsa[i]);
-}
-
-static bool guc_xfer_completed(struct intel_guc *guc, u32 *status)
-{
- struct drm_i915_private *dev_priv = guc_to_i915(guc);
-
- /* Did we complete the xfer? */
- *status = I915_READ(DMA_CTRL);
- return !(*status & START_DMA);
-}
-
-/*
- * Read the GuC status register (GUC_STATUS) and store it in the
- * specified location; then return a boolean indicating whether
- * the value matches either of two values representing completion
- * of the GuC boot process.
- *
- * This is used for polling the GuC status in a wait_for()
- * loop below.
- */
-static inline bool guc_ready(struct intel_guc *guc, u32 *status)
-{
- struct drm_i915_private *dev_priv = guc_to_i915(guc);
- u32 val = I915_READ(GUC_STATUS);
- u32 uk_val = val & GS_UKERNEL_MASK;
-
- *status = val;
- return (uk_val == GS_UKERNEL_READY) ||
- ((val & GS_MIA_CORE_STATE) && (uk_val == GS_UKERNEL_LAPIC_DONE));
-}
-
-static int guc_wait_ucode(struct intel_guc *guc)
-{
- u32 status;
- int ret;
-
- /*
- * Wait for the GuC to start up.
- * NB: Docs recommend not using the interrupt for completion.
- * Measurements indicate this should take no more than 20ms, so a
- * timeout here indicates that the GuC has failed and is unusable.
- * (Higher levels of the driver may decide to reset the GuC and
- * attempt the ucode load again if this happens.)
- */
- ret = wait_for(guc_ready(guc, &status), 100);
- DRM_DEBUG_DRIVER("GuC status %#x\n", status);
-
- if ((status & GS_BOOTROM_MASK) == GS_BOOTROM_RSA_FAILED) {
- DRM_ERROR("GuC firmware signature verification failed\n");
- ret = -ENOEXEC;
- }
-
- if (ret == 0 && !guc_xfer_completed(guc, &status)) {
- DRM_ERROR("GuC is ready, but the xfer %08x is incomplete\n",
- status);
- ret = -ENXIO;
- }
-
- return ret;
-}
-
-/*
- * Transfer the firmware image to RAM for execution by the microcontroller.
- *
- * Architecturally, the DMA engine is bidirectional, and can potentially even
- * transfer between GTT locations. This functionality is left out of the API
- * for now as there is no need for it.
- */
-static int guc_xfer_ucode(struct intel_guc *guc)
-{
- struct drm_i915_private *dev_priv = guc_to_i915(guc);
- struct intel_uc_fw *guc_fw = &guc->fw;
- unsigned long offset;
-
- /*
- * The header plus uCode will be copied to WOPCM via DMA, excluding any
- * other components
- */
- I915_WRITE(DMA_COPY_SIZE, guc_fw->header_size + guc_fw->ucode_size);
-
- /* Set the source address for the new blob */
- offset = intel_uc_fw_ggtt_offset(guc_fw) + guc_fw->header_offset;
- I915_WRITE(DMA_ADDR_0_LOW, lower_32_bits(offset));
- I915_WRITE(DMA_ADDR_0_HIGH, upper_32_bits(offset) & 0xFFFF);
-
- /*
- * Set the DMA destination. Current uCode expects the code to be
- * loaded at 8k; locations below this are used for the stack.
- */
- I915_WRITE(DMA_ADDR_1_LOW, 0x2000);
- I915_WRITE(DMA_ADDR_1_HIGH, DMA_ADDRESS_SPACE_WOPCM);
-
- /* Finally start the DMA */
- I915_WRITE(DMA_CTRL, _MASKED_BIT_ENABLE(UOS_MOVE | START_DMA));
-
- return guc_wait_ucode(guc);
-}
-/*
- * Load the GuC firmware blob into the MinuteIA.
- */
-static int guc_fw_xfer(struct intel_uc_fw *guc_fw)
-{
- struct intel_guc *guc = container_of(guc_fw, struct intel_guc, fw);
- struct drm_i915_private *dev_priv = guc_to_i915(guc);
- int ret;
-
- GEM_BUG_ON(guc_fw->type != INTEL_UC_FW_TYPE_GUC);
-
- intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
-
- guc_prepare_xfer(guc);
-
- /*
- * Note that GuC needs the CSS header plus uKernel code to be copied
- * by the DMA engine in one operation, whereas the RSA signature is
- * loaded via MMIO.
- */
- guc_xfer_rsa(guc);
-
- ret = guc_xfer_ucode(guc);
-
- intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
-
- return ret;
-}
-
-/**
- * intel_guc_fw_upload() - load GuC uCode to device
- * @guc: intel_guc structure
- *
- * Called from intel_uc_init_hw() during driver load, resume from sleep and
- * after a GPU reset.
- *
- * The firmware image should have already been fetched into memory, so only
- * check that fetch succeeded, and then transfer the image to the h/w.
- *
- * Return: non-zero code on error
- */
-int intel_guc_fw_upload(struct intel_guc *guc)
-{
- return intel_uc_fw_upload(&guc->fw, guc_fw_xfer);
-}
diff --git a/drivers/gpu/drm/i915/intel_gvt.c b/drivers/gpu/drm/i915/intel_gvt.c
index 1d7d26e4cf14..c66b2d8a6219 100644
--- a/drivers/gpu/drm/i915/intel_gvt.c
+++ b/drivers/gpu/drm/i915/intel_gvt.c
@@ -95,7 +95,7 @@ int intel_gvt_init(struct drm_i915_private *dev_priv)
{
int ret;
- if (i915_inject_load_failure())
+ if (i915_inject_probe_failure())
return -ENODEV;
if (!i915_modparams.enable_gvt) {
@@ -122,13 +122,14 @@ bail:
}
/**
- * intel_gvt_cleanup - cleanup GVT components when i915 driver is unloading
+ * intel_gvt_driver_remove - cleanup GVT components when i915 driver is
+ * unbinding
* @dev_priv: drm i915 private *
*
* This function is called at the i915 driver unloading stage, to shutdown
* GVT components and release the related resources.
*/
-void intel_gvt_cleanup(struct drm_i915_private *dev_priv)
+void intel_gvt_driver_remove(struct drm_i915_private *dev_priv)
{
if (!intel_gvt_active(dev_priv))
return;
diff --git a/drivers/gpu/drm/i915/intel_gvt.h b/drivers/gpu/drm/i915/intel_gvt.h
index 61b246470282..502fad8a8652 100644
--- a/drivers/gpu/drm/i915/intel_gvt.h
+++ b/drivers/gpu/drm/i915/intel_gvt.h
@@ -24,11 +24,11 @@
#ifndef _INTEL_GVT_H_
#define _INTEL_GVT_H_
-struct intel_gvt;
+struct drm_i915_private;
#ifdef CONFIG_DRM_I915_GVT
int intel_gvt_init(struct drm_i915_private *dev_priv);
-void intel_gvt_cleanup(struct drm_i915_private *dev_priv);
+void intel_gvt_driver_remove(struct drm_i915_private *dev_priv);
int intel_gvt_init_device(struct drm_i915_private *dev_priv);
void intel_gvt_clean_device(struct drm_i915_private *dev_priv);
int intel_gvt_init_host(void);
@@ -38,7 +38,8 @@ static inline int intel_gvt_init(struct drm_i915_private *dev_priv)
{
return 0;
}
-static inline void intel_gvt_cleanup(struct drm_i915_private *dev_priv)
+
+static inline void intel_gvt_driver_remove(struct drm_i915_private *dev_priv)
{
}
diff --git a/drivers/gpu/drm/i915/intel_huc_fw.c b/drivers/gpu/drm/i915/intel_huc_fw.c
deleted file mode 100644
index 05cbf8338f53..000000000000
--- a/drivers/gpu/drm/i915/intel_huc_fw.c
+++ /dev/null
@@ -1,215 +0,0 @@
-/*
- * SPDX-License-Identifier: MIT
- *
- * Copyright © 2014-2018 Intel Corporation
- */
-
-#include "intel_huc_fw.h"
-#include "i915_drv.h"
-
-/**
- * DOC: HuC Firmware
- *
- * Motivation:
- * GEN9 introduces a new dedicated firmware for usage in media HEVC (High
- * Efficiency Video Coding) operations. Userspace can use the firmware
- * capabilities by adding HuC specific commands to batch buffers.
- *
- * Implementation:
- * The same firmware loader is used as the GuC. However, the actual
- * loading to HW is deferred until GEM initialization is done.
- *
- * Note that HuC firmware loading must be done before GuC loading.
- */
-
-#define BXT_HUC_FW_MAJOR 01
-#define BXT_HUC_FW_MINOR 8
-#define BXT_BLD_NUM 2893
-
-#define SKL_HUC_FW_MAJOR 01
-#define SKL_HUC_FW_MINOR 07
-#define SKL_BLD_NUM 1398
-
-#define KBL_HUC_FW_MAJOR 02
-#define KBL_HUC_FW_MINOR 00
-#define KBL_BLD_NUM 1810
-
-#define GLK_HUC_FW_MAJOR 03
-#define GLK_HUC_FW_MINOR 01
-#define GLK_BLD_NUM 2893
-
-#define ICL_HUC_FW_MAJOR 8
-#define ICL_HUC_FW_MINOR 4
-#define ICL_BLD_NUM 3238
-
-#define HUC_FW_PATH(platform, major, minor, bld_num) \
- "i915/" __stringify(platform) "_huc_ver" __stringify(major) "_" \
- __stringify(minor) "_" __stringify(bld_num) ".bin"
-
-#define I915_SKL_HUC_UCODE HUC_FW_PATH(skl, SKL_HUC_FW_MAJOR, \
- SKL_HUC_FW_MINOR, SKL_BLD_NUM)
-MODULE_FIRMWARE(I915_SKL_HUC_UCODE);
-
-#define I915_BXT_HUC_UCODE HUC_FW_PATH(bxt, BXT_HUC_FW_MAJOR, \
- BXT_HUC_FW_MINOR, BXT_BLD_NUM)
-MODULE_FIRMWARE(I915_BXT_HUC_UCODE);
-
-#define I915_KBL_HUC_UCODE HUC_FW_PATH(kbl, KBL_HUC_FW_MAJOR, \
- KBL_HUC_FW_MINOR, KBL_BLD_NUM)
-MODULE_FIRMWARE(I915_KBL_HUC_UCODE);
-
-#define I915_GLK_HUC_UCODE HUC_FW_PATH(glk, GLK_HUC_FW_MAJOR, \
- GLK_HUC_FW_MINOR, GLK_BLD_NUM)
-MODULE_FIRMWARE(I915_GLK_HUC_UCODE);
-
-#define I915_ICL_HUC_UCODE HUC_FW_PATH(icl, ICL_HUC_FW_MAJOR, \
- ICL_HUC_FW_MINOR, ICL_BLD_NUM)
-MODULE_FIRMWARE(I915_ICL_HUC_UCODE);
-
-static void huc_fw_select(struct intel_uc_fw *huc_fw)
-{
- struct intel_huc *huc = container_of(huc_fw, struct intel_huc, fw);
- struct drm_i915_private *dev_priv = huc_to_i915(huc);
-
- GEM_BUG_ON(huc_fw->type != INTEL_UC_FW_TYPE_HUC);
-
- if (!HAS_HUC(dev_priv))
- return;
-
- if (i915_modparams.huc_firmware_path) {
- huc_fw->path = i915_modparams.huc_firmware_path;
- huc_fw->major_ver_wanted = 0;
- huc_fw->minor_ver_wanted = 0;
- } else if (IS_SKYLAKE(dev_priv)) {
- huc_fw->path = I915_SKL_HUC_UCODE;
- huc_fw->major_ver_wanted = SKL_HUC_FW_MAJOR;
- huc_fw->minor_ver_wanted = SKL_HUC_FW_MINOR;
- } else if (IS_BROXTON(dev_priv)) {
- huc_fw->path = I915_BXT_HUC_UCODE;
- huc_fw->major_ver_wanted = BXT_HUC_FW_MAJOR;
- huc_fw->minor_ver_wanted = BXT_HUC_FW_MINOR;
- } else if (IS_KABYLAKE(dev_priv) || IS_COFFEELAKE(dev_priv)) {
- huc_fw->path = I915_KBL_HUC_UCODE;
- huc_fw->major_ver_wanted = KBL_HUC_FW_MAJOR;
- huc_fw->minor_ver_wanted = KBL_HUC_FW_MINOR;
- } else if (IS_GEMINILAKE(dev_priv)) {
- huc_fw->path = I915_GLK_HUC_UCODE;
- huc_fw->major_ver_wanted = GLK_HUC_FW_MAJOR;
- huc_fw->minor_ver_wanted = GLK_HUC_FW_MINOR;
- } else if (IS_ICELAKE(dev_priv)) {
- huc_fw->path = I915_ICL_HUC_UCODE;
- huc_fw->major_ver_wanted = ICL_HUC_FW_MAJOR;
- huc_fw->minor_ver_wanted = ICL_HUC_FW_MINOR;
- }
-}
-
-/**
- * intel_huc_fw_init_early() - initializes HuC firmware struct
- * @huc: intel_huc struct
- *
- * On platforms with HuC selects firmware for uploading
- */
-void intel_huc_fw_init_early(struct intel_huc *huc)
-{
- struct intel_uc_fw *huc_fw = &huc->fw;
-
- intel_uc_fw_init_early(huc_fw, INTEL_UC_FW_TYPE_HUC);
- huc_fw_select(huc_fw);
-}
-
-static void huc_xfer_rsa(struct intel_huc *huc)
-{
- struct intel_uc_fw *fw = &huc->fw;
- struct sg_table *pages = fw->obj->mm.pages;
-
- /*
- * HuC firmware image is outside GuC accessible range.
- * Copy the RSA signature out of the image into
- * the perma-pinned region set aside for it
- */
- sg_pcopy_to_buffer(pages->sgl, pages->nents,
- huc->rsa_data_vaddr, fw->rsa_size,
- fw->rsa_offset);
-}
-
-static int huc_xfer_ucode(struct intel_huc *huc)
-{
- struct intel_uc_fw *huc_fw = &huc->fw;
- struct drm_i915_private *dev_priv = huc_to_i915(huc);
- struct intel_uncore *uncore = &dev_priv->uncore;
- unsigned long offset = 0;
- u32 size;
- int ret;
-
- GEM_BUG_ON(huc_fw->type != INTEL_UC_FW_TYPE_HUC);
-
- intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL);
-
- /* Set the source address for the uCode */
- offset = intel_uc_fw_ggtt_offset(huc_fw) +
- huc_fw->header_offset;
- intel_uncore_write(uncore, DMA_ADDR_0_LOW,
- lower_32_bits(offset));
- intel_uncore_write(uncore, DMA_ADDR_0_HIGH,
- upper_32_bits(offset) & 0xFFFF);
-
- /*
- * Hardware doesn't look at destination address for HuC. Set it to 0,
- * but still program the correct address space.
- */
- intel_uncore_write(uncore, DMA_ADDR_1_LOW, 0);
- intel_uncore_write(uncore, DMA_ADDR_1_HIGH, DMA_ADDRESS_SPACE_WOPCM);
-
- size = huc_fw->header_size + huc_fw->ucode_size;
- intel_uncore_write(uncore, DMA_COPY_SIZE, size);
-
- /* Start the DMA */
- intel_uncore_write(uncore, DMA_CTRL,
- _MASKED_BIT_ENABLE(HUC_UKERNEL | START_DMA));
-
- /* Wait for DMA to finish */
- ret = intel_wait_for_register_fw(uncore, DMA_CTRL, START_DMA, 0, 100);
-
- DRM_DEBUG_DRIVER("HuC DMA transfer wait over with ret %d\n", ret);
-
- /* Disable the bits once DMA is over */
- intel_uncore_write(uncore, DMA_CTRL, _MASKED_BIT_DISABLE(HUC_UKERNEL));
-
- intel_uncore_forcewake_put(uncore, FORCEWAKE_ALL);
-
- return ret;
-}
-
-/**
- * huc_fw_xfer() - DMA's the firmware
- * @huc_fw: the firmware descriptor
- *
- * Transfer the firmware image to RAM for execution by the microcontroller.
- *
- * Return: 0 on success, non-zero on failure
- */
-static int huc_fw_xfer(struct intel_uc_fw *huc_fw)
-{
- struct intel_huc *huc = container_of(huc_fw, struct intel_huc, fw);
-
- huc_xfer_rsa(huc);
-
- return huc_xfer_ucode(huc);
-}
-
-/**
- * intel_huc_fw_upload() - load HuC uCode to device
- * @huc: intel_huc structure
- *
- * Called from intel_uc_init_hw() during driver load, resume from sleep and
- * after a GPU reset. Note that HuC must be loaded before GuC.
- *
- * The firmware image should have already been fetched into memory, so only
- * check that fetch succeeded, and then transfer the image to the h/w.
- *
- * Return: non-zero code on error
- */
-int intel_huc_fw_upload(struct intel_huc *huc)
-{
- return intel_uc_fw_upload(&huc->fw, huc_fw_xfer);
-}
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index d9a7a13ce32a..30399b245f07 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -1116,6 +1116,8 @@ static u16 g4x_compute_wm(const struct intel_crtc_state *crtc_state,
if (!intel_wm_plane_visible(crtc_state, plane_state))
return 0;
+ cpp = plane_state->base.fb->format->cpp[0];
+
/*
* Not 100% sure which way ELK should go here as the
* spec only says CL/CTG should assume 32bpp and BW
@@ -1129,9 +1131,7 @@ static u16 g4x_compute_wm(const struct intel_crtc_state *crtc_state,
*/
if (IS_GM45(dev_priv) && plane->id == PLANE_PRIMARY &&
level != G4X_WM_LEVEL_NORMAL)
- cpp = 4;
- else
- cpp = plane_state->base.fb->format->cpp[0];
+ cpp = max(cpp, 4u);
clock = adjusted_mode->crtc_clock;
htotal = adjusted_mode->crtc_htotal;
@@ -1198,8 +1198,8 @@ static bool g4x_raw_fbc_wm_set(struct intel_crtc_state *crtc_state,
return dirty;
}
-static u32 ilk_compute_fbc_wm(const struct intel_crtc_state *cstate,
- const struct intel_plane_state *pstate,
+static u32 ilk_compute_fbc_wm(const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state,
u32 pri_val);
static bool g4x_raw_plane_wm_compute(struct intel_crtc_state *crtc_state,
@@ -1566,13 +1566,13 @@ static void g4x_optimize_watermarks(struct intel_atomic_state *state,
struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
if (!crtc_state->wm.need_postvbl_update)
return;
mutex_lock(&dev_priv->wm.wm_mutex);
- intel_crtc->wm.active.g4x = crtc_state->wm.g4x.optimal;
+ crtc->wm.active.g4x = crtc_state->wm.g4x.optimal;
g4x_program_watermarks(dev_priv);
mutex_unlock(&dev_priv->wm.wm_mutex);
}
@@ -2185,13 +2185,13 @@ static void vlv_optimize_watermarks(struct intel_atomic_state *state,
struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
if (!crtc_state->wm.need_postvbl_update)
return;
mutex_lock(&dev_priv->wm.wm_mutex);
- intel_crtc->wm.active.vlv = crtc_state->wm.vlv.optimal;
+ crtc->wm.active.vlv = crtc_state->wm.vlv.optimal;
vlv_program_watermarks(dev_priv);
mutex_unlock(&dev_priv->wm.wm_mutex);
}
@@ -2493,8 +2493,8 @@ struct ilk_wm_maximums {
* For both WM_PIPE and WM_LP.
* mem_value must be in 0.1us units.
*/
-static u32 ilk_compute_pri_wm(const struct intel_crtc_state *cstate,
- const struct intel_plane_state *pstate,
+static u32 ilk_compute_pri_wm(const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state,
u32 mem_value, bool is_lp)
{
u32 method1, method2;
@@ -2503,19 +2503,19 @@ static u32 ilk_compute_pri_wm(const struct intel_crtc_state *cstate,
if (mem_value == 0)
return U32_MAX;
- if (!intel_wm_plane_visible(cstate, pstate))
+ if (!intel_wm_plane_visible(crtc_state, plane_state))
return 0;
- cpp = pstate->base.fb->format->cpp[0];
+ cpp = plane_state->base.fb->format->cpp[0];
- method1 = ilk_wm_method1(cstate->pixel_rate, cpp, mem_value);
+ method1 = ilk_wm_method1(crtc_state->pixel_rate, cpp, mem_value);
if (!is_lp)
return method1;
- method2 = ilk_wm_method2(cstate->pixel_rate,
- cstate->base.adjusted_mode.crtc_htotal,
- drm_rect_width(&pstate->base.dst),
+ method2 = ilk_wm_method2(crtc_state->pixel_rate,
+ crtc_state->base.adjusted_mode.crtc_htotal,
+ drm_rect_width(&plane_state->base.dst),
cpp, mem_value);
return min(method1, method2);
@@ -2525,8 +2525,8 @@ static u32 ilk_compute_pri_wm(const struct intel_crtc_state *cstate,
* For both WM_PIPE and WM_LP.
* mem_value must be in 0.1us units.
*/
-static u32 ilk_compute_spr_wm(const struct intel_crtc_state *cstate,
- const struct intel_plane_state *pstate,
+static u32 ilk_compute_spr_wm(const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state,
u32 mem_value)
{
u32 method1, method2;
@@ -2535,15 +2535,15 @@ static u32 ilk_compute_spr_wm(const struct intel_crtc_state *cstate,
if (mem_value == 0)
return U32_MAX;
- if (!intel_wm_plane_visible(cstate, pstate))
+ if (!intel_wm_plane_visible(crtc_state, plane_state))
return 0;
- cpp = pstate->base.fb->format->cpp[0];
+ cpp = plane_state->base.fb->format->cpp[0];
- method1 = ilk_wm_method1(cstate->pixel_rate, cpp, mem_value);
- method2 = ilk_wm_method2(cstate->pixel_rate,
- cstate->base.adjusted_mode.crtc_htotal,
- drm_rect_width(&pstate->base.dst),
+ method1 = ilk_wm_method1(crtc_state->pixel_rate, cpp, mem_value);
+ method2 = ilk_wm_method2(crtc_state->pixel_rate,
+ crtc_state->base.adjusted_mode.crtc_htotal,
+ drm_rect_width(&plane_state->base.dst),
cpp, mem_value);
return min(method1, method2);
}
@@ -2552,8 +2552,8 @@ static u32 ilk_compute_spr_wm(const struct intel_crtc_state *cstate,
* For both WM_PIPE and WM_LP.
* mem_value must be in 0.1us units.
*/
-static u32 ilk_compute_cur_wm(const struct intel_crtc_state *cstate,
- const struct intel_plane_state *pstate,
+static u32 ilk_compute_cur_wm(const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state,
u32 mem_value)
{
int cpp;
@@ -2561,29 +2561,29 @@ static u32 ilk_compute_cur_wm(const struct intel_crtc_state *cstate,
if (mem_value == 0)
return U32_MAX;
- if (!intel_wm_plane_visible(cstate, pstate))
+ if (!intel_wm_plane_visible(crtc_state, plane_state))
return 0;
- cpp = pstate->base.fb->format->cpp[0];
+ cpp = plane_state->base.fb->format->cpp[0];
- return ilk_wm_method2(cstate->pixel_rate,
- cstate->base.adjusted_mode.crtc_htotal,
- pstate->base.crtc_w, cpp, mem_value);
+ return ilk_wm_method2(crtc_state->pixel_rate,
+ crtc_state->base.adjusted_mode.crtc_htotal,
+ plane_state->base.crtc_w, cpp, mem_value);
}
/* Only for WM_LP. */
-static u32 ilk_compute_fbc_wm(const struct intel_crtc_state *cstate,
- const struct intel_plane_state *pstate,
+static u32 ilk_compute_fbc_wm(const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state,
u32 pri_val)
{
int cpp;
- if (!intel_wm_plane_visible(cstate, pstate))
+ if (!intel_wm_plane_visible(crtc_state, plane_state))
return 0;
- cpp = pstate->base.fb->format->cpp[0];
+ cpp = plane_state->base.fb->format->cpp[0];
- return ilk_wm_fbc(pri_val, drm_rect_width(&pstate->base.dst), cpp);
+ return ilk_wm_fbc(pri_val, drm_rect_width(&plane_state->base.dst), cpp);
}
static unsigned int
@@ -2752,7 +2752,7 @@ static bool ilk_validate_wm_level(int level,
static void ilk_compute_wm_level(const struct drm_i915_private *dev_priv,
const struct intel_crtc *intel_crtc,
int level,
- struct intel_crtc_state *cstate,
+ struct intel_crtc_state *crtc_state,
const struct intel_plane_state *pristate,
const struct intel_plane_state *sprstate,
const struct intel_plane_state *curstate,
@@ -2770,30 +2770,30 @@ static void ilk_compute_wm_level(const struct drm_i915_private *dev_priv,
}
if (pristate) {
- result->pri_val = ilk_compute_pri_wm(cstate, pristate,
+ result->pri_val = ilk_compute_pri_wm(crtc_state, pristate,
pri_latency, level);
- result->fbc_val = ilk_compute_fbc_wm(cstate, pristate, result->pri_val);
+ result->fbc_val = ilk_compute_fbc_wm(crtc_state, pristate, result->pri_val);
}
if (sprstate)
- result->spr_val = ilk_compute_spr_wm(cstate, sprstate, spr_latency);
+ result->spr_val = ilk_compute_spr_wm(crtc_state, sprstate, spr_latency);
if (curstate)
- result->cur_val = ilk_compute_cur_wm(cstate, curstate, cur_latency);
+ result->cur_val = ilk_compute_cur_wm(crtc_state, curstate, cur_latency);
result->enable = true;
}
static u32
-hsw_compute_linetime_wm(const struct intel_crtc_state *cstate)
+hsw_compute_linetime_wm(const struct intel_crtc_state *crtc_state)
{
const struct intel_atomic_state *intel_state =
- to_intel_atomic_state(cstate->base.state);
+ to_intel_atomic_state(crtc_state->base.state);
const struct drm_display_mode *adjusted_mode =
- &cstate->base.adjusted_mode;
+ &crtc_state->base.adjusted_mode;
u32 linetime, ips_linetime;
- if (!cstate->base.active)
+ if (!crtc_state->base.active)
return 0;
if (WARN_ON(adjusted_mode->crtc_clock == 0))
return 0;
@@ -3101,10 +3101,10 @@ static bool ilk_validate_pipe_wm(const struct drm_i915_private *dev_priv,
}
/* Compute new watermarks for the pipe */
-static int ilk_compute_pipe_wm(struct intel_crtc_state *cstate)
+static int ilk_compute_pipe_wm(struct intel_crtc_state *crtc_state)
{
- struct drm_atomic_state *state = cstate->base.state;
- struct intel_crtc *intel_crtc = to_intel_crtc(cstate->base.crtc);
+ struct drm_atomic_state *state = crtc_state->base.state;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
struct intel_pipe_wm *pipe_wm;
struct drm_device *dev = state->dev;
const struct drm_i915_private *dev_priv = to_i915(dev);
@@ -3116,9 +3116,9 @@ static int ilk_compute_pipe_wm(struct intel_crtc_state *cstate)
int level, max_level = ilk_wm_max_level(dev_priv), usable_level;
struct ilk_wm_maximums max;
- pipe_wm = &cstate->wm.ilk.optimal;
+ pipe_wm = &crtc_state->wm.ilk.optimal;
- drm_atomic_crtc_state_for_each_plane_state(plane, plane_state, &cstate->base) {
+ drm_atomic_crtc_state_for_each_plane_state(plane, plane_state, &crtc_state->base) {
const struct intel_plane_state *ps = to_intel_plane_state(plane_state);
if (plane->type == DRM_PLANE_TYPE_PRIMARY)
@@ -3129,7 +3129,7 @@ static int ilk_compute_pipe_wm(struct intel_crtc_state *cstate)
curstate = ps;
}
- pipe_wm->pipe_enabled = cstate->base.active;
+ pipe_wm->pipe_enabled = crtc_state->base.active;
if (sprstate) {
pipe_wm->sprites_enabled = sprstate->base.visible;
pipe_wm->sprites_scaled = sprstate->base.visible &&
@@ -3148,11 +3148,11 @@ static int ilk_compute_pipe_wm(struct intel_crtc_state *cstate)
usable_level = 0;
memset(&pipe_wm->wm, 0, sizeof(pipe_wm->wm));
- ilk_compute_wm_level(dev_priv, intel_crtc, 0, cstate,
+ ilk_compute_wm_level(dev_priv, intel_crtc, 0, crtc_state,
pristate, sprstate, curstate, &pipe_wm->wm[0]);
if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
- pipe_wm->linetime = hsw_compute_linetime_wm(cstate);
+ pipe_wm->linetime = hsw_compute_linetime_wm(crtc_state);
if (!ilk_validate_pipe_wm(dev_priv, pipe_wm))
return -EINVAL;
@@ -3162,7 +3162,7 @@ static int ilk_compute_pipe_wm(struct intel_crtc_state *cstate)
for (level = 1; level <= usable_level; level++) {
struct intel_wm_level *wm = &pipe_wm->wm[level];
- ilk_compute_wm_level(dev_priv, intel_crtc, level, cstate,
+ ilk_compute_wm_level(dev_priv, intel_crtc, level, crtc_state,
pristate, sprstate, curstate, wm);
/*
@@ -3736,14 +3736,13 @@ intel_disable_sagv(struct drm_i915_private *dev_priv)
return 0;
}
-bool intel_can_enable_sagv(struct drm_atomic_state *state)
+bool intel_can_enable_sagv(struct intel_atomic_state *state)
{
- struct drm_device *dev = state->dev;
+ struct drm_device *dev = state->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
struct intel_crtc *crtc;
struct intel_plane *plane;
- struct intel_crtc_state *cstate;
+ struct intel_crtc_state *crtc_state;
enum pipe pipe;
int level, latency;
int sagv_block_time_us;
@@ -3761,27 +3760,27 @@ bool intel_can_enable_sagv(struct drm_atomic_state *state)
/*
* If there are no active CRTCs, no additional checks need be performed
*/
- if (hweight32(intel_state->active_crtcs) == 0)
+ if (hweight32(state->active_crtcs) == 0)
return true;
/*
* SKL+ workaround: bspec recommends we disable SAGV when we have
* more then one pipe enabled
*/
- if (hweight32(intel_state->active_crtcs) > 1)
+ if (hweight32(state->active_crtcs) > 1)
return false;
/* Since we're now guaranteed to only have one active CRTC... */
- pipe = ffs(intel_state->active_crtcs) - 1;
+ pipe = ffs(state->active_crtcs) - 1;
crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
- cstate = to_intel_crtc_state(crtc->base.state);
+ crtc_state = to_intel_crtc_state(crtc->base.state);
if (crtc->base.state->adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
return false;
for_each_intel_plane_on_crtc(dev, crtc, plane) {
struct skl_plane_wm *wm =
- &cstate->wm.skl.optimal.planes[plane->id];
+ &crtc_state->wm.skl.optimal.planes[plane->id];
/* Skip this plane if it's not enabled */
if (!wm->wm[0].plane_en)
@@ -3812,7 +3811,7 @@ bool intel_can_enable_sagv(struct drm_atomic_state *state)
}
static u16 intel_get_ddb_size(struct drm_i915_private *dev_priv,
- const struct intel_crtc_state *cstate,
+ const struct intel_crtc_state *crtc_state,
const u64 total_data_rate,
const int num_active,
struct skl_ddb_allocation *ddb)
@@ -3826,7 +3825,7 @@ static u16 intel_get_ddb_size(struct drm_i915_private *dev_priv,
if (INTEL_GEN(dev_priv) < 11)
return ddb_size - 4; /* 4 blocks for bypass path allocation */
- adjusted_mode = &cstate->base.adjusted_mode;
+ adjusted_mode = &crtc_state->base.adjusted_mode;
total_data_bw = total_data_rate * drm_mode_vrefresh(adjusted_mode);
/*
@@ -3849,23 +3848,22 @@ static u16 intel_get_ddb_size(struct drm_i915_private *dev_priv,
static void
skl_ddb_get_pipe_allocation_limits(struct drm_i915_private *dev_priv,
- const struct intel_crtc_state *cstate,
+ const struct intel_crtc_state *crtc_state,
const u64 total_data_rate,
struct skl_ddb_allocation *ddb,
struct skl_ddb_entry *alloc, /* out */
int *num_active /* out */)
{
- struct drm_atomic_state *state = cstate->base.state;
+ struct drm_atomic_state *state = crtc_state->base.state;
struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
- struct drm_crtc *for_crtc = cstate->base.crtc;
- const struct drm_crtc_state *crtc_state;
- const struct drm_crtc *crtc;
+ struct drm_crtc *for_crtc = crtc_state->base.crtc;
+ const struct intel_crtc *crtc;
u32 pipe_width = 0, total_width = 0, width_before_pipe = 0;
enum pipe for_pipe = to_intel_crtc(for_crtc)->pipe;
u16 ddb_size;
u32 i;
- if (WARN_ON(!state) || !cstate->base.active) {
+ if (WARN_ON(!state) || !crtc_state->base.active) {
alloc->start = 0;
alloc->end = 0;
*num_active = hweight32(dev_priv->active_crtcs);
@@ -3877,7 +3875,7 @@ skl_ddb_get_pipe_allocation_limits(struct drm_i915_private *dev_priv,
else
*num_active = hweight32(dev_priv->active_crtcs);
- ddb_size = intel_get_ddb_size(dev_priv, cstate, total_data_rate,
+ ddb_size = intel_get_ddb_size(dev_priv, crtc_state, total_data_rate,
*num_active, ddb);
/*
@@ -3902,16 +3900,15 @@ skl_ddb_get_pipe_allocation_limits(struct drm_i915_private *dev_priv,
* framebuffer, So instead of allocating DDB equally among pipes
* distribute DDB based on resolution/width of the display.
*/
- for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
- const struct drm_display_mode *adjusted_mode;
+ for_each_new_intel_crtc_in_state(intel_state, crtc, crtc_state, i) {
+ const struct drm_display_mode *adjusted_mode =
+ &crtc_state->base.adjusted_mode;
+ enum pipe pipe = crtc->pipe;
int hdisplay, vdisplay;
- enum pipe pipe;
- if (!crtc_state->enable)
+ if (!crtc_state->base.enable)
continue;
- pipe = to_intel_crtc(crtc)->pipe;
- adjusted_mode = &crtc_state->adjusted_mode;
drm_mode_get_hv_timing(adjusted_mode, &hdisplay, &vdisplay);
total_width += hdisplay;
@@ -3930,7 +3927,7 @@ static int skl_compute_wm_params(const struct intel_crtc_state *crtc_state,
u64 modifier, unsigned int rotation,
u32 plane_pixel_rate, struct skl_wm_params *wp,
int color_plane);
-static void skl_compute_plane_wm(const struct intel_crtc_state *cstate,
+static void skl_compute_plane_wm(const struct intel_crtc_state *crtc_state,
int level,
const struct skl_wm_params *wp,
const struct skl_wm_level *result_prev,
@@ -4062,15 +4059,15 @@ void skl_ddb_get_hw_state(struct drm_i915_private *dev_priv,
* Caller should take care of dividing & rounding off the value.
*/
static uint_fixed_16_16_t
-skl_plane_downscale_amount(const struct intel_crtc_state *cstate,
- const struct intel_plane_state *pstate)
+skl_plane_downscale_amount(const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state)
{
- struct intel_plane *plane = to_intel_plane(pstate->base.plane);
+ struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
u32 src_w, src_h, dst_w, dst_h;
uint_fixed_16_16_t fp_w_ratio, fp_h_ratio;
uint_fixed_16_16_t downscale_h, downscale_w;
- if (WARN_ON(!intel_wm_plane_visible(cstate, pstate)))
+ if (WARN_ON(!intel_wm_plane_visible(crtc_state, plane_state)))
return u32_to_fixed16(0);
/* n.b., src is 16.16 fixed point, dst is whole integer */
@@ -4079,20 +4076,20 @@ skl_plane_downscale_amount(const struct intel_crtc_state *cstate,
* Cursors only support 0/180 degree rotation,
* hence no need to account for rotation here.
*/
- src_w = pstate->base.src_w >> 16;
- src_h = pstate->base.src_h >> 16;
- dst_w = pstate->base.crtc_w;
- dst_h = pstate->base.crtc_h;
+ src_w = plane_state->base.src_w >> 16;
+ src_h = plane_state->base.src_h >> 16;
+ dst_w = plane_state->base.crtc_w;
+ dst_h = plane_state->base.crtc_h;
} else {
/*
* Src coordinates are already rotated by 270 degrees for
* the 90/270 degree plane rotation cases (to match the
* GTT mapping), hence no need to account for rotation here.
*/
- src_w = drm_rect_width(&pstate->base.src) >> 16;
- src_h = drm_rect_height(&pstate->base.src) >> 16;
- dst_w = drm_rect_width(&pstate->base.dst);
- dst_h = drm_rect_height(&pstate->base.dst);
+ src_w = drm_rect_width(&plane_state->base.src) >> 16;
+ src_h = drm_rect_height(&plane_state->base.src) >> 16;
+ dst_w = drm_rect_width(&plane_state->base.dst);
+ dst_h = drm_rect_height(&plane_state->base.dst);
}
fp_w_ratio = div_fixed16(src_w, dst_w);
@@ -4137,49 +4134,46 @@ skl_pipe_downscale_amount(const struct intel_crtc_state *crtc_state)
}
int skl_check_pipe_max_pixel_rate(struct intel_crtc *intel_crtc,
- struct intel_crtc_state *cstate)
+ struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
- struct drm_crtc_state *crtc_state = &cstate->base;
- struct drm_atomic_state *state = crtc_state->state;
+ struct drm_atomic_state *state = crtc_state->base.state;
struct drm_plane *plane;
- const struct drm_plane_state *pstate;
- struct intel_plane_state *intel_pstate;
+ const struct drm_plane_state *drm_plane_state;
int crtc_clock, dotclk;
u32 pipe_max_pixel_rate;
uint_fixed_16_16_t pipe_downscale;
uint_fixed_16_16_t max_downscale = u32_to_fixed16(1);
- if (!cstate->base.enable)
+ if (!crtc_state->base.enable)
return 0;
- drm_atomic_crtc_state_for_each_plane_state(plane, pstate, crtc_state) {
+ drm_atomic_crtc_state_for_each_plane_state(plane, drm_plane_state, &crtc_state->base) {
uint_fixed_16_16_t plane_downscale;
uint_fixed_16_16_t fp_9_div_8 = div_fixed16(9, 8);
int bpp;
+ const struct intel_plane_state *plane_state =
+ to_intel_plane_state(drm_plane_state);
- if (!intel_wm_plane_visible(cstate,
- to_intel_plane_state(pstate)))
+ if (!intel_wm_plane_visible(crtc_state, plane_state))
continue;
- if (WARN_ON(!pstate->fb))
+ if (WARN_ON(!plane_state->base.fb))
return -EINVAL;
- intel_pstate = to_intel_plane_state(pstate);
- plane_downscale = skl_plane_downscale_amount(cstate,
- intel_pstate);
- bpp = pstate->fb->format->cpp[0] * 8;
+ plane_downscale = skl_plane_downscale_amount(crtc_state, plane_state);
+ bpp = plane_state->base.fb->format->cpp[0] * 8;
if (bpp == 64)
plane_downscale = mul_fixed16(plane_downscale,
fp_9_div_8);
max_downscale = max_fixed16(plane_downscale, max_downscale);
}
- pipe_downscale = skl_pipe_downscale_amount(cstate);
+ pipe_downscale = skl_pipe_downscale_amount(crtc_state);
pipe_downscale = mul_fixed16(pipe_downscale, max_downscale);
- crtc_clock = crtc_state->adjusted_mode.crtc_clock;
+ crtc_clock = crtc_state->base.adjusted_mode.crtc_clock;
dotclk = to_intel_atomic_state(state)->cdclk.logical.cdclk;
if (IS_GEMINILAKE(dev_priv) || INTEL_GEN(dev_priv) >= 10)
@@ -4196,12 +4190,11 @@ int skl_check_pipe_max_pixel_rate(struct intel_crtc *intel_crtc,
}
static u64
-skl_plane_relative_data_rate(const struct intel_crtc_state *cstate,
- const struct intel_plane_state *intel_pstate,
+skl_plane_relative_data_rate(const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state,
const int plane)
{
- struct intel_plane *intel_plane =
- to_intel_plane(intel_pstate->base.plane);
+ struct intel_plane *intel_plane = to_intel_plane(plane_state->base.plane);
u32 data_rate;
u32 width = 0, height = 0;
struct drm_framebuffer *fb;
@@ -4209,10 +4202,10 @@ skl_plane_relative_data_rate(const struct intel_crtc_state *cstate,
uint_fixed_16_16_t down_scale_amount;
u64 rate;
- if (!intel_pstate->base.visible)
+ if (!plane_state->base.visible)
return 0;
- fb = intel_pstate->base.fb;
+ fb = plane_state->base.fb;
format = fb->format->format;
if (intel_plane->id == PLANE_CURSOR)
@@ -4225,8 +4218,8 @@ skl_plane_relative_data_rate(const struct intel_crtc_state *cstate,
* the 90/270 degree plane rotation cases (to match the
* GTT mapping), hence no need to account for rotation here.
*/
- width = drm_rect_width(&intel_pstate->base.src) >> 16;
- height = drm_rect_height(&intel_pstate->base.src) >> 16;
+ width = drm_rect_width(&plane_state->base.src) >> 16;
+ height = drm_rect_height(&plane_state->base.src) >> 16;
/* UV plane does 1/2 pixel sub-sampling */
if (plane == 1 && is_planar_yuv_format(format)) {
@@ -4236,7 +4229,7 @@ skl_plane_relative_data_rate(const struct intel_crtc_state *cstate,
data_rate = width * height;
- down_scale_amount = skl_plane_downscale_amount(cstate, intel_pstate);
+ down_scale_amount = skl_plane_downscale_amount(crtc_state, plane_state);
rate = mul_round_up_u32_fixed16(data_rate, down_scale_amount);
@@ -4245,35 +4238,32 @@ skl_plane_relative_data_rate(const struct intel_crtc_state *cstate,
}
static u64
-skl_get_total_relative_data_rate(struct intel_crtc_state *intel_cstate,
+skl_get_total_relative_data_rate(struct intel_crtc_state *crtc_state,
u64 *plane_data_rate,
u64 *uv_plane_data_rate)
{
- struct drm_crtc_state *cstate = &intel_cstate->base;
- struct drm_atomic_state *state = cstate->state;
+ struct drm_atomic_state *state = crtc_state->base.state;
struct drm_plane *plane;
- const struct drm_plane_state *pstate;
+ const struct drm_plane_state *drm_plane_state;
u64 total_data_rate = 0;
if (WARN_ON(!state))
return 0;
/* Calculate and cache data rate for each plane */
- drm_atomic_crtc_state_for_each_plane_state(plane, pstate, cstate) {
+ drm_atomic_crtc_state_for_each_plane_state(plane, drm_plane_state, &crtc_state->base) {
enum plane_id plane_id = to_intel_plane(plane)->id;
+ const struct intel_plane_state *plane_state =
+ to_intel_plane_state(drm_plane_state);
u64 rate;
- const struct intel_plane_state *intel_pstate =
- to_intel_plane_state(pstate);
/* packed/y */
- rate = skl_plane_relative_data_rate(intel_cstate,
- intel_pstate, 0);
+ rate = skl_plane_relative_data_rate(crtc_state, plane_state, 0);
plane_data_rate[plane_id] = rate;
total_data_rate += rate;
/* uv-plane */
- rate = skl_plane_relative_data_rate(intel_cstate,
- intel_pstate, 1);
+ rate = skl_plane_relative_data_rate(crtc_state, plane_state, 1);
uv_plane_data_rate[plane_id] = rate;
total_data_rate += rate;
}
@@ -4282,28 +4272,25 @@ skl_get_total_relative_data_rate(struct intel_crtc_state *intel_cstate,
}
static u64
-icl_get_total_relative_data_rate(struct intel_crtc_state *intel_cstate,
+icl_get_total_relative_data_rate(struct intel_crtc_state *crtc_state,
u64 *plane_data_rate)
{
- struct drm_crtc_state *cstate = &intel_cstate->base;
- struct drm_atomic_state *state = cstate->state;
struct drm_plane *plane;
- const struct drm_plane_state *pstate;
+ const struct drm_plane_state *drm_plane_state;
u64 total_data_rate = 0;
- if (WARN_ON(!state))
+ if (WARN_ON(!crtc_state->base.state))
return 0;
/* Calculate and cache data rate for each plane */
- drm_atomic_crtc_state_for_each_plane_state(plane, pstate, cstate) {
- const struct intel_plane_state *intel_pstate =
- to_intel_plane_state(pstate);
+ drm_atomic_crtc_state_for_each_plane_state(plane, drm_plane_state, &crtc_state->base) {
+ const struct intel_plane_state *plane_state =
+ to_intel_plane_state(drm_plane_state);
enum plane_id plane_id = to_intel_plane(plane)->id;
u64 rate;
- if (!intel_pstate->linked_plane) {
- rate = skl_plane_relative_data_rate(intel_cstate,
- intel_pstate, 0);
+ if (!plane_state->linked_plane) {
+ rate = skl_plane_relative_data_rate(crtc_state, plane_state, 0);
plane_data_rate[plane_id] = rate;
total_data_rate += rate;
} else {
@@ -4316,18 +4303,16 @@ icl_get_total_relative_data_rate(struct intel_crtc_state *intel_cstate,
* NULL if we try get_new_plane_state(), so we
* always calculate from the master.
*/
- if (intel_pstate->slave)
+ if (plane_state->slave)
continue;
/* Y plane rate is calculated on the slave */
- rate = skl_plane_relative_data_rate(intel_cstate,
- intel_pstate, 0);
- y_plane_id = intel_pstate->linked_plane->id;
+ rate = skl_plane_relative_data_rate(crtc_state, plane_state, 0);
+ y_plane_id = plane_state->linked_plane->id;
plane_data_rate[y_plane_id] = rate;
total_data_rate += rate;
- rate = skl_plane_relative_data_rate(intel_cstate,
- intel_pstate, 1);
+ rate = skl_plane_relative_data_rate(crtc_state, plane_state, 1);
plane_data_rate[plane_id] = rate;
total_data_rate += rate;
}
@@ -4337,14 +4322,14 @@ icl_get_total_relative_data_rate(struct intel_crtc_state *intel_cstate,
}
static int
-skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
+skl_allocate_pipe_ddb(struct intel_crtc_state *crtc_state,
struct skl_ddb_allocation *ddb /* out */)
{
- struct drm_atomic_state *state = cstate->base.state;
- struct drm_crtc *crtc = cstate->base.crtc;
+ struct drm_atomic_state *state = crtc_state->base.state;
+ struct drm_crtc *crtc = crtc_state->base.crtc;
struct drm_i915_private *dev_priv = to_i915(crtc->dev);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- struct skl_ddb_entry *alloc = &cstate->wm.skl.ddb;
+ struct skl_ddb_entry *alloc = &crtc_state->wm.skl.ddb;
u16 alloc_size, start = 0;
u16 total[I915_MAX_PLANES] = {};
u16 uv_total[I915_MAX_PLANES] = {};
@@ -4357,40 +4342,40 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
int level;
/* Clear the partitioning for disabled planes. */
- memset(cstate->wm.skl.plane_ddb_y, 0, sizeof(cstate->wm.skl.plane_ddb_y));
- memset(cstate->wm.skl.plane_ddb_uv, 0, sizeof(cstate->wm.skl.plane_ddb_uv));
+ memset(crtc_state->wm.skl.plane_ddb_y, 0, sizeof(crtc_state->wm.skl.plane_ddb_y));
+ memset(crtc_state->wm.skl.plane_ddb_uv, 0, sizeof(crtc_state->wm.skl.plane_ddb_uv));
if (WARN_ON(!state))
return 0;
- if (!cstate->base.active) {
+ if (!crtc_state->base.active) {
alloc->start = alloc->end = 0;
return 0;
}
if (INTEL_GEN(dev_priv) >= 11)
total_data_rate =
- icl_get_total_relative_data_rate(cstate,
+ icl_get_total_relative_data_rate(crtc_state,
plane_data_rate);
else
total_data_rate =
- skl_get_total_relative_data_rate(cstate,
+ skl_get_total_relative_data_rate(crtc_state,
plane_data_rate,
uv_plane_data_rate);
- skl_ddb_get_pipe_allocation_limits(dev_priv, cstate, total_data_rate,
+ skl_ddb_get_pipe_allocation_limits(dev_priv, crtc_state, total_data_rate,
ddb, alloc, &num_active);
alloc_size = skl_ddb_entry_size(alloc);
if (alloc_size == 0)
return 0;
/* Allocate fixed number of blocks for cursor. */
- total[PLANE_CURSOR] = skl_cursor_allocation(cstate, num_active);
+ total[PLANE_CURSOR] = skl_cursor_allocation(crtc_state, num_active);
alloc_size -= total[PLANE_CURSOR];
- cstate->wm.skl.plane_ddb_y[PLANE_CURSOR].start =
+ crtc_state->wm.skl.plane_ddb_y[PLANE_CURSOR].start =
alloc->end - total[PLANE_CURSOR];
- cstate->wm.skl.plane_ddb_y[PLANE_CURSOR].end = alloc->end;
+ crtc_state->wm.skl.plane_ddb_y[PLANE_CURSOR].end = alloc->end;
if (total_data_rate == 0)
return 0;
@@ -4403,7 +4388,7 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
blocks = 0;
for_each_plane_id_on_crtc(intel_crtc, plane_id) {
const struct skl_plane_wm *wm =
- &cstate->wm.skl.optimal.planes[plane_id];
+ &crtc_state->wm.skl.optimal.planes[plane_id];
if (plane_id == PLANE_CURSOR) {
if (WARN_ON(wm->wm[level].min_ddb_alloc >
@@ -4438,7 +4423,7 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
*/
for_each_plane_id_on_crtc(intel_crtc, plane_id) {
const struct skl_plane_wm *wm =
- &cstate->wm.skl.optimal.planes[plane_id];
+ &crtc_state->wm.skl.optimal.planes[plane_id];
u64 rate;
u16 extra;
@@ -4477,9 +4462,9 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
start = alloc->start;
for_each_plane_id_on_crtc(intel_crtc, plane_id) {
struct skl_ddb_entry *plane_alloc =
- &cstate->wm.skl.plane_ddb_y[plane_id];
+ &crtc_state->wm.skl.plane_ddb_y[plane_id];
struct skl_ddb_entry *uv_plane_alloc =
- &cstate->wm.skl.plane_ddb_uv[plane_id];
+ &crtc_state->wm.skl.plane_ddb_uv[plane_id];
if (plane_id == PLANE_CURSOR)
continue;
@@ -4510,7 +4495,7 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
for (level++; level <= ilk_wm_max_level(dev_priv); level++) {
for_each_plane_id_on_crtc(intel_crtc, plane_id) {
struct skl_plane_wm *wm =
- &cstate->wm.skl.optimal.planes[plane_id];
+ &crtc_state->wm.skl.optimal.planes[plane_id];
/*
* We only disable the watermarks for each plane if
@@ -4547,7 +4532,7 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
*/
for_each_plane_id_on_crtc(intel_crtc, plane_id) {
struct skl_plane_wm *wm =
- &cstate->wm.skl.optimal.planes[plane_id];
+ &crtc_state->wm.skl.optimal.planes[plane_id];
if (wm->trans_wm.plane_res_b >= total[plane_id])
memset(&wm->trans_wm, 0, sizeof(wm->trans_wm));
@@ -4599,43 +4584,43 @@ skl_wm_method2(u32 pixel_rate, u32 pipe_htotal, u32 latency,
}
static uint_fixed_16_16_t
-intel_get_linetime_us(const struct intel_crtc_state *cstate)
+intel_get_linetime_us(const struct intel_crtc_state *crtc_state)
{
u32 pixel_rate;
u32 crtc_htotal;
uint_fixed_16_16_t linetime_us;
- if (!cstate->base.active)
+ if (!crtc_state->base.active)
return u32_to_fixed16(0);
- pixel_rate = cstate->pixel_rate;
+ pixel_rate = crtc_state->pixel_rate;
if (WARN_ON(pixel_rate == 0))
return u32_to_fixed16(0);
- crtc_htotal = cstate->base.adjusted_mode.crtc_htotal;
+ crtc_htotal = crtc_state->base.adjusted_mode.crtc_htotal;
linetime_us = div_fixed16(crtc_htotal * 1000, pixel_rate);
return linetime_us;
}
static u32
-skl_adjusted_plane_pixel_rate(const struct intel_crtc_state *cstate,
- const struct intel_plane_state *pstate)
+skl_adjusted_plane_pixel_rate(const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state)
{
u64 adjusted_pixel_rate;
uint_fixed_16_16_t downscale_amount;
/* Shouldn't reach here on disabled planes... */
- if (WARN_ON(!intel_wm_plane_visible(cstate, pstate)))
+ if (WARN_ON(!intel_wm_plane_visible(crtc_state, plane_state)))
return 0;
/*
* Adjusted plane pixel rate is just the pipe's adjusted pixel rate
* with additional adjustments for plane-specific scaling.
*/
- adjusted_pixel_rate = cstate->pixel_rate;
- downscale_amount = skl_plane_downscale_amount(cstate, pstate);
+ adjusted_pixel_rate = crtc_state->pixel_rate;
+ downscale_amount = skl_plane_downscale_amount(crtc_state, plane_state);
return mul_round_up_u32_fixed16(adjusted_pixel_rate,
downscale_amount);
@@ -4768,13 +4753,13 @@ static bool skl_wm_has_lines(struct drm_i915_private *dev_priv, int level)
return level > 0;
}
-static void skl_compute_plane_wm(const struct intel_crtc_state *cstate,
+static void skl_compute_plane_wm(const struct intel_crtc_state *crtc_state,
int level,
const struct skl_wm_params *wp,
const struct skl_wm_level *result_prev,
struct skl_wm_level *result /* out */)
{
- struct drm_i915_private *dev_priv = to_i915(cstate->base.crtc->dev);
+ struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
u32 latency = dev_priv->wm.skl_latency[level];
uint_fixed_16_16_t method1, method2;
uint_fixed_16_16_t selected_result;
@@ -4800,14 +4785,14 @@ static void skl_compute_plane_wm(const struct intel_crtc_state *cstate,
method1 = skl_wm_method1(dev_priv, wp->plane_pixel_rate,
wp->cpp, latency, wp->dbuf_block_size);
method2 = skl_wm_method2(wp->plane_pixel_rate,
- cstate->base.adjusted_mode.crtc_htotal,
+ crtc_state->base.adjusted_mode.crtc_htotal,
latency,
wp->plane_blocks_per_line);
if (wp->y_tiled) {
selected_result = max_fixed16(method2, wp->y_tile_minimum);
} else {
- if ((wp->cpp * cstate->base.adjusted_mode.crtc_htotal /
+ if ((wp->cpp * crtc_state->base.adjusted_mode.crtc_htotal /
wp->dbuf_block_size < 1) &&
(wp->plane_bytes_per_line / wp->dbuf_block_size < 1)) {
selected_result = method2;
@@ -4894,18 +4879,18 @@ static void skl_compute_plane_wm(const struct intel_crtc_state *cstate,
}
static void
-skl_compute_wm_levels(const struct intel_crtc_state *cstate,
+skl_compute_wm_levels(const struct intel_crtc_state *crtc_state,
const struct skl_wm_params *wm_params,
struct skl_wm_level *levels)
{
- struct drm_i915_private *dev_priv = to_i915(cstate->base.crtc->dev);
+ struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
int level, max_level = ilk_wm_max_level(dev_priv);
struct skl_wm_level *result_prev = &levels[0];
for (level = 0; level <= max_level; level++) {
struct skl_wm_level *result = &levels[level];
- skl_compute_plane_wm(cstate, level, wm_params,
+ skl_compute_plane_wm(crtc_state, level, wm_params,
result_prev, result);
result_prev = result;
@@ -4913,14 +4898,14 @@ skl_compute_wm_levels(const struct intel_crtc_state *cstate,
}
static u32
-skl_compute_linetime_wm(const struct intel_crtc_state *cstate)
+skl_compute_linetime_wm(const struct intel_crtc_state *crtc_state)
{
- struct drm_atomic_state *state = cstate->base.state;
+ struct drm_atomic_state *state = crtc_state->base.state;
struct drm_i915_private *dev_priv = to_i915(state->dev);
uint_fixed_16_16_t linetime_us;
u32 linetime_wm;
- linetime_us = intel_get_linetime_us(cstate);
+ linetime_us = intel_get_linetime_us(crtc_state);
linetime_wm = fixed16_to_u32_round_up(mul_u32_fixed16(8, linetime_us));
/* Display WA #1135: BXT:ALL GLK:ALL */
@@ -4930,11 +4915,11 @@ skl_compute_linetime_wm(const struct intel_crtc_state *cstate)
return linetime_wm;
}
-static void skl_compute_transition_wm(const struct intel_crtc_state *cstate,
+static void skl_compute_transition_wm(const struct intel_crtc_state *crtc_state,
const struct skl_wm_params *wp,
struct skl_plane_wm *wm)
{
- struct drm_device *dev = cstate->base.crtc->dev;
+ struct drm_device *dev = crtc_state->base.crtc->dev;
const struct drm_i915_private *dev_priv = to_i915(dev);
u16 trans_min, trans_y_tile_min;
const u16 trans_amount = 10; /* This is configurable amount */
@@ -5092,13 +5077,12 @@ static int icl_build_plane_wm(struct intel_crtc_state *crtc_state,
return 0;
}
-static int skl_build_pipe_wm(struct intel_crtc_state *cstate)
+static int skl_build_pipe_wm(struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *dev_priv = to_i915(cstate->base.crtc->dev);
- struct skl_pipe_wm *pipe_wm = &cstate->wm.skl.optimal;
- struct drm_crtc_state *crtc_state = &cstate->base;
+ struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
+ struct skl_pipe_wm *pipe_wm = &crtc_state->wm.skl.optimal;
struct drm_plane *plane;
- const struct drm_plane_state *pstate;
+ const struct drm_plane_state *drm_plane_state;
int ret;
/*
@@ -5107,19 +5091,20 @@ static int skl_build_pipe_wm(struct intel_crtc_state *cstate)
*/
memset(pipe_wm->planes, 0, sizeof(pipe_wm->planes));
- drm_atomic_crtc_state_for_each_plane_state(plane, pstate, crtc_state) {
- const struct intel_plane_state *intel_pstate =
- to_intel_plane_state(pstate);
+ drm_atomic_crtc_state_for_each_plane_state(plane, drm_plane_state,
+ &crtc_state->base) {
+ const struct intel_plane_state *plane_state =
+ to_intel_plane_state(drm_plane_state);
if (INTEL_GEN(dev_priv) >= 11)
- ret = icl_build_plane_wm(cstate, intel_pstate);
+ ret = icl_build_plane_wm(crtc_state, plane_state);
else
- ret = skl_build_plane_wm(cstate, intel_pstate);
+ ret = skl_build_plane_wm(crtc_state, plane_state);
if (ret)
return ret;
}
- pipe_wm->linetime = skl_compute_linetime_wm(cstate);
+ pipe_wm->linetime = skl_compute_linetime_wm(crtc_state);
return 0;
}
@@ -5273,10 +5258,10 @@ static u32
pipes_modified(struct intel_atomic_state *state)
{
struct intel_crtc *crtc;
- struct intel_crtc_state *cstate;
+ struct intel_crtc_state *crtc_state;
u32 i, ret = 0;
- for_each_new_intel_crtc_in_state(state, crtc, cstate, i)
+ for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i)
ret |= drm_crtc_mask(&crtc->base);
return ret;
@@ -5652,11 +5637,11 @@ skl_compute_wm(struct intel_atomic_state *state)
}
static void skl_atomic_update_crtc_wm(struct intel_atomic_state *state,
- struct intel_crtc_state *cstate)
+ struct intel_crtc_state *crtc_state)
{
- struct intel_crtc *crtc = to_intel_crtc(cstate->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
struct drm_i915_private *dev_priv = to_i915(state->base.dev);
- struct skl_pipe_wm *pipe_wm = &cstate->wm.skl.optimal;
+ struct skl_pipe_wm *pipe_wm = &crtc_state->wm.skl.optimal;
enum pipe pipe = crtc->pipe;
if (!(state->wm_results.dirty_pipes & drm_crtc_mask(&crtc->base)))
@@ -5666,9 +5651,9 @@ static void skl_atomic_update_crtc_wm(struct intel_atomic_state *state,
}
static void skl_initial_wm(struct intel_atomic_state *state,
- struct intel_crtc_state *cstate)
+ struct intel_crtc_state *crtc_state)
{
- struct intel_crtc *intel_crtc = to_intel_crtc(cstate->base.crtc);
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
struct drm_device *dev = intel_crtc->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
struct skl_ddb_values *results = &state->wm_results;
@@ -5678,8 +5663,8 @@ static void skl_initial_wm(struct intel_atomic_state *state,
mutex_lock(&dev_priv->wm.wm_mutex);
- if (cstate->base.active_changed)
- skl_atomic_update_crtc_wm(state, cstate);
+ if (crtc_state->base.active_changed)
+ skl_atomic_update_crtc_wm(state, crtc_state);
mutex_unlock(&dev_priv->wm.wm_mutex);
}
@@ -5735,28 +5720,29 @@ static void ilk_program_watermarks(struct drm_i915_private *dev_priv)
}
static void ilk_initial_watermarks(struct intel_atomic_state *state,
- struct intel_crtc_state *cstate)
+ struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *dev_priv = to_i915(cstate->base.crtc->dev);
- struct intel_crtc *intel_crtc = to_intel_crtc(cstate->base.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
mutex_lock(&dev_priv->wm.wm_mutex);
- intel_crtc->wm.active.ilk = cstate->wm.ilk.intermediate;
+ crtc->wm.active.ilk = crtc_state->wm.ilk.intermediate;
ilk_program_watermarks(dev_priv);
mutex_unlock(&dev_priv->wm.wm_mutex);
}
static void ilk_optimize_watermarks(struct intel_atomic_state *state,
- struct intel_crtc_state *cstate)
+ struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *dev_priv = to_i915(cstate->base.crtc->dev);
- struct intel_crtc *intel_crtc = to_intel_crtc(cstate->base.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+
+ if (!crtc_state->wm.need_postvbl_update)
+ return;
mutex_lock(&dev_priv->wm.wm_mutex);
- if (cstate->wm.need_postvbl_update) {
- intel_crtc->wm.active.ilk = cstate->wm.ilk.optimal;
- ilk_program_watermarks(dev_priv);
- }
+ crtc->wm.active.ilk = crtc_state->wm.ilk.optimal;
+ ilk_program_watermarks(dev_priv);
mutex_unlock(&dev_priv->wm.wm_mutex);
}
@@ -5812,13 +5798,13 @@ void skl_wm_get_hw_state(struct drm_i915_private *dev_priv)
struct skl_ddb_values *hw = &dev_priv->wm.skl_hw;
struct skl_ddb_allocation *ddb = &dev_priv->wm.skl_hw.ddb;
struct intel_crtc *crtc;
- struct intel_crtc_state *cstate;
+ struct intel_crtc_state *crtc_state;
skl_ddb_get_hw_state(dev_priv, ddb);
for_each_intel_crtc(&dev_priv->drm, crtc) {
- cstate = to_intel_crtc_state(crtc->base.state);
+ crtc_state = to_intel_crtc_state(crtc->base.state);
- skl_pipe_wm_get_hw_state(crtc, &cstate->wm.skl.optimal);
+ skl_pipe_wm_get_hw_state(crtc, &crtc_state->wm.skl.optimal);
if (crtc->active)
hw->dirty_pipes |= drm_crtc_mask(&crtc->base);
@@ -5835,8 +5821,8 @@ static void ilk_pipe_wm_get_hw_state(struct intel_crtc *crtc)
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
struct ilk_wm_values *hw = &dev_priv->wm.hw;
- struct intel_crtc_state *cstate = to_intel_crtc_state(crtc->base.state);
- struct intel_pipe_wm *active = &cstate->wm.ilk.optimal;
+ struct intel_crtc_state *crtc_state = to_intel_crtc_state(crtc->base.state);
+ struct intel_pipe_wm *active = &crtc_state->wm.ilk.optimal;
enum pipe pipe = crtc->pipe;
static const i915_reg_t wm0_pipe_reg[] = {
[PIPE_A] = WM0_PIPEA_ILK,
@@ -6891,9 +6877,10 @@ void gen6_rps_boost(struct i915_request *rq)
/* Serializes with i915_request_retire() */
boost = false;
spin_lock_irqsave(&rq->lock, flags);
- if (!rq->waitboost && !dma_fence_is_signaled_locked(&rq->fence)) {
+ if (!i915_request_has_waitboost(rq) &&
+ !dma_fence_is_signaled_locked(&rq->fence)) {
boost = !atomic_fetch_inc(&rps->num_waiters);
- rq->waitboost = true;
+ rq->flags |= I915_REQUEST_WAITBOOST;
}
spin_unlock_irqrestore(&rq->lock, flags);
if (!boost)
@@ -7175,7 +7162,7 @@ static void gen11_enable_rc6(struct drm_i915_private *dev_priv)
for_each_engine(engine, dev_priv, id)
I915_WRITE(RING_MAX_IDLE(engine->mmio_base), 10);
- if (HAS_GUC(dev_priv))
+ if (HAS_GT_UC(dev_priv))
I915_WRITE(GUC_MAX_IDLE_COUNT, 0xA);
I915_WRITE(GEN6_RC_SLEEP, 0);
@@ -7192,7 +7179,7 @@ static void gen11_enable_rc6(struct drm_i915_private *dev_priv)
* next request to execute. If the idle hysteresis is less than that
* interrupt service latency, the hardware will automatically gate
* the power well and we will then incur the wake up cost on top of
- * the service latency. A similar guide from intel_pstate is that we
+ * the service latency. A similar guide from plane_state is that we
* do not want the enable hysteresis to less than the wakeup latency.
*
* igt/gem_exec_nop/sequential provides a rough estimate for the
@@ -7256,7 +7243,7 @@ static void gen9_enable_rc6(struct drm_i915_private *dev_priv)
for_each_engine(engine, dev_priv, id)
I915_WRITE(RING_MAX_IDLE(engine->mmio_base), 10);
- if (HAS_GUC(dev_priv))
+ if (HAS_GT_UC(dev_priv))
I915_WRITE(GUC_MAX_IDLE_COUNT, 0xA);
I915_WRITE(GEN6_RC_SLEEP, 0);
@@ -7271,7 +7258,7 @@ static void gen9_enable_rc6(struct drm_i915_private *dev_priv)
* next request to execute. If the idle hysteresis is less than that
* interrupt service latency, the hardware will automatically gate
* the power well and we will then incur the wake up cost on top of
- * the service latency. A similar guide from intel_pstate is that we
+ * the service latency. A similar guide from plane_state is that we
* do not want the enable hysteresis to less than the wakeup latency.
*
* igt/gem_exec_nop/sequential provides a rough estimate for the
diff --git a/drivers/gpu/drm/i915/intel_pm.h b/drivers/gpu/drm/i915/intel_pm.h
index 1b489fa399e1..e3573e1e16e3 100644
--- a/drivers/gpu/drm/i915/intel_pm.h
+++ b/drivers/gpu/drm/i915/intel_pm.h
@@ -10,10 +10,10 @@
#include "i915_reg.h"
-struct drm_atomic_state;
struct drm_device;
struct drm_i915_private;
struct i915_request;
+struct intel_atomic_state;
struct intel_crtc;
struct intel_crtc_state;
struct intel_plane;
@@ -52,7 +52,7 @@ void skl_pipe_wm_get_hw_state(struct intel_crtc *crtc,
struct skl_pipe_wm *out);
void g4x_wm_sanitize(struct drm_i915_private *dev_priv);
void vlv_wm_sanitize(struct drm_i915_private *dev_priv);
-bool intel_can_enable_sagv(struct drm_atomic_state *state);
+bool intel_can_enable_sagv(struct intel_atomic_state *state);
int intel_enable_sagv(struct drm_i915_private *dev_priv);
int intel_disable_sagv(struct drm_i915_private *dev_priv);
bool skl_wm_level_equals(const struct skl_wm_level *l1,
diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c
index 8d1aebc3e857..b2a05850ea42 100644
--- a/drivers/gpu/drm/i915/intel_runtime_pm.c
+++ b/drivers/gpu/drm/i915/intel_runtime_pm.c
@@ -592,7 +592,7 @@ void intel_runtime_pm_disable(struct intel_runtime_pm *rpm)
pm_runtime_put(kdev);
}
-void intel_runtime_pm_cleanup(struct intel_runtime_pm *rpm)
+void intel_runtime_pm_driver_release(struct intel_runtime_pm *rpm)
{
int count = atomic_read(&rpm->wakeref_count);
diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.h b/drivers/gpu/drm/i915/intel_runtime_pm.h
index 2ee8f9522e05..ae64ff14c642 100644
--- a/drivers/gpu/drm/i915/intel_runtime_pm.h
+++ b/drivers/gpu/drm/i915/intel_runtime_pm.h
@@ -173,7 +173,7 @@ enable_rpm_wakeref_asserts(struct intel_runtime_pm *rpm)
void intel_runtime_pm_init_early(struct intel_runtime_pm *rpm);
void intel_runtime_pm_enable(struct intel_runtime_pm *rpm);
void intel_runtime_pm_disable(struct intel_runtime_pm *rpm);
-void intel_runtime_pm_cleanup(struct intel_runtime_pm *rpm);
+void intel_runtime_pm_driver_release(struct intel_runtime_pm *rpm);
intel_wakeref_t intel_runtime_pm_get(struct intel_runtime_pm *rpm);
intel_wakeref_t intel_runtime_pm_get_if_in_use(struct intel_runtime_pm *rpm);
diff --git a/drivers/gpu/drm/i915/intel_uc.c b/drivers/gpu/drm/i915/intel_uc.c
deleted file mode 100644
index ae45651ac73c..000000000000
--- a/drivers/gpu/drm/i915/intel_uc.c
+++ /dev/null
@@ -1,561 +0,0 @@
-/*
- * Copyright © 2016 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
- *
- */
-
-#include "gt/intel_reset.h"
-#include "intel_uc.h"
-#include "intel_guc.h"
-#include "intel_guc_ads.h"
-#include "intel_guc_submission.h"
-#include "i915_drv.h"
-
-static void guc_free_load_err_log(struct intel_guc *guc);
-
-/* Reset GuC providing us with fresh state for both GuC and HuC.
- */
-static int __intel_uc_reset_hw(struct drm_i915_private *dev_priv)
-{
- int ret;
- u32 guc_status;
-
- ret = intel_reset_guc(dev_priv);
- if (ret) {
- DRM_ERROR("Failed to reset GuC, ret = %d\n", ret);
- return ret;
- }
-
- guc_status = I915_READ(GUC_STATUS);
- WARN(!(guc_status & GS_MIA_IN_RESET),
- "GuC status: 0x%x, MIA core expected to be in reset\n",
- guc_status);
-
- return ret;
-}
-
-static int __get_platform_enable_guc(struct drm_i915_private *i915)
-{
- struct intel_uc_fw *guc_fw = &i915->guc.fw;
- struct intel_uc_fw *huc_fw = &i915->huc.fw;
- int enable_guc = 0;
-
- /* Default is to use HuC if we know GuC and HuC firmwares */
- if (intel_uc_fw_is_selected(guc_fw) && intel_uc_fw_is_selected(huc_fw))
- enable_guc |= ENABLE_GUC_LOAD_HUC;
-
- /* Any platform specific fine-tuning can be done here */
-
- return enable_guc;
-}
-
-static int __get_default_guc_log_level(struct drm_i915_private *i915)
-{
- int guc_log_level;
-
- if (!HAS_GUC(i915) || !intel_uc_is_using_guc(i915))
- guc_log_level = GUC_LOG_LEVEL_DISABLED;
- else if (IS_ENABLED(CONFIG_DRM_I915_DEBUG) ||
- IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
- guc_log_level = GUC_LOG_LEVEL_MAX;
- else
- guc_log_level = GUC_LOG_LEVEL_NON_VERBOSE;
-
- /* Any platform specific fine-tuning can be done here */
-
- return guc_log_level;
-}
-
-/**
- * sanitize_options_early - sanitize uC related modparam options
- * @i915: device private
- *
- * In case of "enable_guc" option this function will attempt to modify
- * it only if it was initially set to "auto(-1)". Default value for this
- * modparam varies between platforms and it is hardcoded in driver code.
- * Any other modparam value is only monitored against availability of the
- * related hardware or firmware definitions.
- *
- * In case of "guc_log_level" option this function will attempt to modify
- * it only if it was initially set to "auto(-1)" or if initial value was
- * "enable(1..4)" on platforms without the GuC. Default value for this
- * modparam varies between platforms and is usually set to "disable(0)"
- * unless GuC is enabled on given platform and the driver is compiled with
- * debug config when this modparam will default to "enable(1..4)".
- */
-static void sanitize_options_early(struct drm_i915_private *i915)
-{
- struct intel_uc_fw *guc_fw = &i915->guc.fw;
- struct intel_uc_fw *huc_fw = &i915->huc.fw;
-
- /* A negative value means "use platform default" */
- if (i915_modparams.enable_guc < 0)
- i915_modparams.enable_guc = __get_platform_enable_guc(i915);
-
- DRM_DEBUG_DRIVER("enable_guc=%d (submission:%s huc:%s)\n",
- i915_modparams.enable_guc,
- yesno(intel_uc_is_using_guc_submission(i915)),
- yesno(intel_uc_is_using_huc(i915)));
-
- /* Verify GuC firmware availability */
- if (intel_uc_is_using_guc(i915) && !intel_uc_fw_is_selected(guc_fw)) {
- DRM_WARN("Incompatible option detected: %s=%d, %s!\n",
- "enable_guc", i915_modparams.enable_guc,
- !HAS_GUC(i915) ? "no GuC hardware" :
- "no GuC firmware");
- }
-
- /* Verify HuC firmware availability */
- if (intel_uc_is_using_huc(i915) && !intel_uc_fw_is_selected(huc_fw)) {
- DRM_WARN("Incompatible option detected: %s=%d, %s!\n",
- "enable_guc", i915_modparams.enable_guc,
- !HAS_HUC(i915) ? "no HuC hardware" :
- "no HuC firmware");
- }
-
- /* XXX: GuC submission is unavailable for now */
- if (intel_uc_is_using_guc_submission(i915)) {
- DRM_INFO("Incompatible option detected: %s=%d, %s!\n",
- "enable_guc", i915_modparams.enable_guc,
- "GuC submission not supported");
- DRM_INFO("Switching to non-GuC submission mode!\n");
- i915_modparams.enable_guc &= ~ENABLE_GUC_SUBMISSION;
- }
-
- /* A negative value means "use platform/config default" */
- if (i915_modparams.guc_log_level < 0)
- i915_modparams.guc_log_level =
- __get_default_guc_log_level(i915);
-
- if (i915_modparams.guc_log_level > 0 && !intel_uc_is_using_guc(i915)) {
- DRM_WARN("Incompatible option detected: %s=%d, %s!\n",
- "guc_log_level", i915_modparams.guc_log_level,
- !HAS_GUC(i915) ? "no GuC hardware" :
- "GuC not enabled");
- i915_modparams.guc_log_level = 0;
- }
-
- if (i915_modparams.guc_log_level > GUC_LOG_LEVEL_MAX) {
- DRM_WARN("Incompatible option detected: %s=%d, %s!\n",
- "guc_log_level", i915_modparams.guc_log_level,
- "verbosity too high");
- i915_modparams.guc_log_level = GUC_LOG_LEVEL_MAX;
- }
-
- DRM_DEBUG_DRIVER("guc_log_level=%d (enabled:%s, verbose:%s, verbosity:%d)\n",
- i915_modparams.guc_log_level,
- yesno(i915_modparams.guc_log_level),
- yesno(GUC_LOG_LEVEL_IS_VERBOSE(i915_modparams.guc_log_level)),
- GUC_LOG_LEVEL_TO_VERBOSITY(i915_modparams.guc_log_level));
-
- /* Make sure that sanitization was done */
- GEM_BUG_ON(i915_modparams.enable_guc < 0);
- GEM_BUG_ON(i915_modparams.guc_log_level < 0);
-}
-
-void intel_uc_init_early(struct drm_i915_private *i915)
-{
- struct intel_guc *guc = &i915->guc;
- struct intel_huc *huc = &i915->huc;
-
- intel_guc_init_early(guc);
- intel_huc_init_early(huc);
-
- sanitize_options_early(i915);
-}
-
-void intel_uc_cleanup_early(struct drm_i915_private *i915)
-{
- struct intel_guc *guc = &i915->guc;
-
- guc_free_load_err_log(guc);
-}
-
-/**
- * intel_uc_init_mmio - setup uC MMIO access
- * @i915: device private
- *
- * Setup minimal state necessary for MMIO accesses later in the
- * initialization sequence.
- */
-void intel_uc_init_mmio(struct drm_i915_private *i915)
-{
- intel_guc_init_send_regs(&i915->guc);
-}
-
-static void guc_capture_load_err_log(struct intel_guc *guc)
-{
- if (!guc->log.vma || !intel_guc_log_get_level(&guc->log))
- return;
-
- if (!guc->load_err_log)
- guc->load_err_log = i915_gem_object_get(guc->log.vma->obj);
-
- return;
-}
-
-static void guc_free_load_err_log(struct intel_guc *guc)
-{
- if (guc->load_err_log)
- i915_gem_object_put(guc->load_err_log);
-}
-
-static void guc_reset_interrupts(struct intel_guc *guc)
-{
- guc->interrupts.reset(guc_to_i915(guc));
-}
-
-static void guc_enable_interrupts(struct intel_guc *guc)
-{
- guc->interrupts.enable(guc_to_i915(guc));
-}
-
-static void guc_disable_interrupts(struct intel_guc *guc)
-{
- guc->interrupts.disable(guc_to_i915(guc));
-}
-
-static int guc_enable_communication(struct intel_guc *guc)
-{
- guc_enable_interrupts(guc);
-
- return intel_guc_ct_enable(&guc->ct);
-}
-
-static void guc_stop_communication(struct intel_guc *guc)
-{
- intel_guc_ct_stop(&guc->ct);
-
- guc->send = intel_guc_send_nop;
- guc->handler = intel_guc_to_host_event_handler_nop;
-}
-
-static void guc_disable_communication(struct intel_guc *guc)
-{
- intel_guc_ct_disable(&guc->ct);
-
- guc_disable_interrupts(guc);
-
- guc->send = intel_guc_send_nop;
- guc->handler = intel_guc_to_host_event_handler_nop;
-}
-
-int intel_uc_init_misc(struct drm_i915_private *i915)
-{
- struct intel_guc *guc = &i915->guc;
- struct intel_huc *huc = &i915->huc;
- int ret;
-
- if (!USES_GUC(i915))
- return 0;
-
- ret = intel_guc_init_misc(guc);
- if (ret)
- return ret;
-
- if (USES_HUC(i915)) {
- ret = intel_huc_init_misc(huc);
- if (ret)
- goto err_guc;
- }
-
- return 0;
-
-err_guc:
- intel_guc_fini_misc(guc);
- return ret;
-}
-
-void intel_uc_fini_misc(struct drm_i915_private *i915)
-{
- struct intel_guc *guc = &i915->guc;
- struct intel_huc *huc = &i915->huc;
-
- if (!USES_GUC(i915))
- return;
-
- if (USES_HUC(i915))
- intel_huc_fini_misc(huc);
-
- intel_guc_fini_misc(guc);
-}
-
-int intel_uc_init(struct drm_i915_private *i915)
-{
- struct intel_guc *guc = &i915->guc;
- struct intel_huc *huc = &i915->huc;
- int ret;
-
- if (!USES_GUC(i915))
- return 0;
-
- if (!HAS_GUC(i915))
- return -ENODEV;
-
- /* XXX: GuC submission is unavailable for now */
- GEM_BUG_ON(USES_GUC_SUBMISSION(i915));
-
- ret = intel_guc_init(guc);
- if (ret)
- return ret;
-
- if (USES_HUC(i915)) {
- ret = intel_huc_init(huc);
- if (ret)
- goto err_guc;
- }
-
- if (USES_GUC_SUBMISSION(i915)) {
- /*
- * This is stuff we need to have available at fw load time
- * if we are planning to enable submission later
- */
- ret = intel_guc_submission_init(guc);
- if (ret)
- goto err_huc;
- }
-
- return 0;
-
-err_huc:
- if (USES_HUC(i915))
- intel_huc_fini(huc);
-err_guc:
- intel_guc_fini(guc);
- return ret;
-}
-
-void intel_uc_fini(struct drm_i915_private *i915)
-{
- struct intel_guc *guc = &i915->guc;
-
- if (!USES_GUC(i915))
- return;
-
- GEM_BUG_ON(!HAS_GUC(i915));
-
- if (USES_GUC_SUBMISSION(i915))
- intel_guc_submission_fini(guc);
-
- if (USES_HUC(i915))
- intel_huc_fini(&i915->huc);
-
- intel_guc_fini(guc);
-}
-
-static void __uc_sanitize(struct drm_i915_private *i915)
-{
- struct intel_guc *guc = &i915->guc;
- struct intel_huc *huc = &i915->huc;
-
- GEM_BUG_ON(!HAS_GUC(i915));
-
- intel_huc_sanitize(huc);
- intel_guc_sanitize(guc);
-
- __intel_uc_reset_hw(i915);
-}
-
-void intel_uc_sanitize(struct drm_i915_private *i915)
-{
- if (!USES_GUC(i915))
- return;
-
- __uc_sanitize(i915);
-}
-
-int intel_uc_init_hw(struct drm_i915_private *i915)
-{
- struct intel_guc *guc = &i915->guc;
- struct intel_huc *huc = &i915->huc;
- int ret, attempts;
-
- if (!USES_GUC(i915))
- return 0;
-
- GEM_BUG_ON(!HAS_GUC(i915));
-
- guc_reset_interrupts(guc);
-
- /* WaEnableuKernelHeaderValidFix:skl */
- /* WaEnableGuCBootHashCheckNotSet:skl,bxt,kbl */
- if (IS_GEN(i915, 9))
- attempts = 3;
- else
- attempts = 1;
-
- while (attempts--) {
- /*
- * Always reset the GuC just before (re)loading, so
- * that the state and timing are fairly predictable
- */
- ret = __intel_uc_reset_hw(i915);
- if (ret)
- goto err_out;
-
- if (USES_HUC(i915)) {
- ret = intel_huc_fw_upload(huc);
- if (ret)
- goto err_out;
- }
-
- intel_guc_ads_reset(guc);
- intel_guc_init_params(guc);
- ret = intel_guc_fw_upload(guc);
- if (ret == 0)
- break;
-
- DRM_DEBUG_DRIVER("GuC fw load failed: %d; will reset and "
- "retry %d more time(s)\n", ret, attempts);
- }
-
- /* Did we succeded or run out of retries? */
- if (ret)
- goto err_log_capture;
-
- ret = guc_enable_communication(guc);
- if (ret)
- goto err_log_capture;
-
- if (USES_HUC(i915)) {
- ret = intel_huc_auth(huc);
- if (ret)
- goto err_communication;
- }
-
- ret = intel_guc_sample_forcewake(guc);
- if (ret)
- goto err_communication;
-
- if (USES_GUC_SUBMISSION(i915)) {
- ret = intel_guc_submission_enable(guc);
- if (ret)
- goto err_communication;
- }
-
- dev_info(i915->drm.dev, "GuC firmware version %u.%u\n",
- guc->fw.major_ver_found, guc->fw.minor_ver_found);
- dev_info(i915->drm.dev, "GuC submission %s\n",
- enableddisabled(USES_GUC_SUBMISSION(i915)));
- dev_info(i915->drm.dev, "HuC %s\n",
- enableddisabled(USES_HUC(i915)));
-
- return 0;
-
- /*
- * We've failed to load the firmware :(
- */
-err_communication:
- guc_disable_communication(guc);
-err_log_capture:
- guc_capture_load_err_log(guc);
-err_out:
- __uc_sanitize(i915);
-
- /*
- * Note that there is no fallback as either user explicitly asked for
- * the GuC or driver default option was to run with the GuC enabled.
- */
- if (GEM_WARN_ON(ret == -EIO))
- ret = -EINVAL;
-
- dev_err(i915->drm.dev, "GuC initialization failed %d\n", ret);
- return ret;
-}
-
-void intel_uc_fini_hw(struct drm_i915_private *i915)
-{
- struct intel_guc *guc = &i915->guc;
-
- if (!intel_guc_is_loaded(guc))
- return;
-
- GEM_BUG_ON(!HAS_GUC(i915));
-
- if (USES_GUC_SUBMISSION(i915))
- intel_guc_submission_disable(guc);
-
- guc_disable_communication(guc);
- __uc_sanitize(i915);
-}
-
-/**
- * intel_uc_reset_prepare - Prepare for reset
- * @i915: device private
- *
- * Preparing for full gpu reset.
- */
-void intel_uc_reset_prepare(struct drm_i915_private *i915)
-{
- struct intel_guc *guc = &i915->guc;
-
- if (!intel_guc_is_loaded(guc))
- return;
-
- guc_stop_communication(guc);
- __uc_sanitize(i915);
-}
-
-void intel_uc_runtime_suspend(struct drm_i915_private *i915)
-{
- struct intel_guc *guc = &i915->guc;
- int err;
-
- if (!intel_guc_is_loaded(guc))
- return;
-
- err = intel_guc_suspend(guc);
- if (err)
- DRM_DEBUG_DRIVER("Failed to suspend GuC, err=%d", err);
-
- guc_disable_communication(guc);
-}
-
-void intel_uc_suspend(struct drm_i915_private *i915)
-{
- struct intel_guc *guc = &i915->guc;
- intel_wakeref_t wakeref;
-
- if (!intel_guc_is_loaded(guc))
- return;
-
- with_intel_runtime_pm(&i915->runtime_pm, wakeref)
- intel_uc_runtime_suspend(i915);
-}
-
-int intel_uc_resume(struct drm_i915_private *i915)
-{
- struct intel_guc *guc = &i915->guc;
- int err;
-
- if (!intel_guc_is_loaded(guc))
- return 0;
-
- guc_enable_communication(guc);
-
- err = intel_guc_resume(guc);
- if (err) {
- DRM_DEBUG_DRIVER("Failed to resume GuC, err=%d", err);
- return err;
- }
-
- return 0;
-}
diff --git a/drivers/gpu/drm/i915/intel_uc_fw.c b/drivers/gpu/drm/i915/intel_uc_fw.c
deleted file mode 100644
index f342ddd47df8..000000000000
--- a/drivers/gpu/drm/i915/intel_uc_fw.c
+++ /dev/null
@@ -1,357 +0,0 @@
-/*
- * Copyright © 2016-2017 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
- *
- */
-
-#include <linux/bitfield.h>
-#include <linux/firmware.h>
-#include <drm/drm_print.h>
-
-#include "intel_uc_fw.h"
-#include "i915_drv.h"
-
-/**
- * intel_uc_fw_fetch - fetch uC firmware
- *
- * @dev_priv: device private
- * @uc_fw: uC firmware
- *
- * Fetch uC firmware into GEM obj.
- */
-void intel_uc_fw_fetch(struct drm_i915_private *dev_priv,
- struct intel_uc_fw *uc_fw)
-{
- struct pci_dev *pdev = dev_priv->drm.pdev;
- struct drm_i915_gem_object *obj;
- const struct firmware *fw = NULL;
- struct uc_css_header *css;
- size_t size;
- int err;
-
- if (!uc_fw->path) {
- dev_info(dev_priv->drm.dev,
- "%s: No firmware was defined for %s!\n",
- intel_uc_fw_type_repr(uc_fw->type),
- intel_platform_name(INTEL_INFO(dev_priv)->platform));
- return;
- }
-
- DRM_DEBUG_DRIVER("%s fw fetch %s\n",
- intel_uc_fw_type_repr(uc_fw->type), uc_fw->path);
-
- uc_fw->fetch_status = INTEL_UC_FIRMWARE_PENDING;
- DRM_DEBUG_DRIVER("%s fw fetch %s\n",
- intel_uc_fw_type_repr(uc_fw->type),
- intel_uc_fw_status_repr(uc_fw->fetch_status));
-
- err = request_firmware(&fw, uc_fw->path, &pdev->dev);
- if (err) {
- DRM_DEBUG_DRIVER("%s fw request_firmware err=%d\n",
- intel_uc_fw_type_repr(uc_fw->type), err);
- goto fail;
- }
-
- DRM_DEBUG_DRIVER("%s fw size %zu ptr %p\n",
- intel_uc_fw_type_repr(uc_fw->type), fw->size, fw);
-
- /* Check the size of the blob before examining buffer contents */
- if (fw->size < sizeof(struct uc_css_header)) {
- DRM_WARN("%s: Unexpected firmware size (%zu, min %zu)\n",
- intel_uc_fw_type_repr(uc_fw->type),
- fw->size, sizeof(struct uc_css_header));
- err = -ENODATA;
- goto fail;
- }
-
- css = (struct uc_css_header *)fw->data;
-
- /* Firmware bits always start from header */
- uc_fw->header_offset = 0;
- uc_fw->header_size = (css->header_size_dw - css->modulus_size_dw -
- css->key_size_dw - css->exponent_size_dw) *
- sizeof(u32);
-
- if (uc_fw->header_size != sizeof(struct uc_css_header)) {
- DRM_WARN("%s: Mismatched firmware header definition\n",
- intel_uc_fw_type_repr(uc_fw->type));
- err = -ENOEXEC;
- goto fail;
- }
-
- /* then, uCode */
- uc_fw->ucode_offset = uc_fw->header_offset + uc_fw->header_size;
- uc_fw->ucode_size = (css->size_dw - css->header_size_dw) * sizeof(u32);
-
- /* now RSA */
- if (css->key_size_dw != UOS_RSA_SCRATCH_COUNT) {
- DRM_WARN("%s: Mismatched firmware RSA key size (%u)\n",
- intel_uc_fw_type_repr(uc_fw->type), css->key_size_dw);
- err = -ENOEXEC;
- goto fail;
- }
- uc_fw->rsa_offset = uc_fw->ucode_offset + uc_fw->ucode_size;
- uc_fw->rsa_size = css->key_size_dw * sizeof(u32);
-
- /* At least, it should have header, uCode and RSA. Size of all three. */
- size = uc_fw->header_size + uc_fw->ucode_size + uc_fw->rsa_size;
- if (fw->size < size) {
- DRM_WARN("%s: Truncated firmware (%zu, expected %zu)\n",
- intel_uc_fw_type_repr(uc_fw->type), fw->size, size);
- err = -ENOEXEC;
- goto fail;
- }
-
- /* Get version numbers from the CSS header */
- switch (uc_fw->type) {
- case INTEL_UC_FW_TYPE_GUC:
- uc_fw->major_ver_found = FIELD_GET(CSS_SW_VERSION_GUC_MAJOR,
- css->sw_version);
- uc_fw->minor_ver_found = FIELD_GET(CSS_SW_VERSION_GUC_MINOR,
- css->sw_version);
- break;
-
- case INTEL_UC_FW_TYPE_HUC:
- uc_fw->major_ver_found = FIELD_GET(CSS_SW_VERSION_HUC_MAJOR,
- css->sw_version);
- uc_fw->minor_ver_found = FIELD_GET(CSS_SW_VERSION_HUC_MINOR,
- css->sw_version);
- break;
-
- default:
- MISSING_CASE(uc_fw->type);
- break;
- }
-
- DRM_DEBUG_DRIVER("%s fw version %u.%u (wanted %u.%u)\n",
- intel_uc_fw_type_repr(uc_fw->type),
- uc_fw->major_ver_found, uc_fw->minor_ver_found,
- uc_fw->major_ver_wanted, uc_fw->minor_ver_wanted);
-
- if (uc_fw->major_ver_wanted == 0 && uc_fw->minor_ver_wanted == 0) {
- DRM_NOTE("%s: Skipping firmware version check\n",
- intel_uc_fw_type_repr(uc_fw->type));
- } else if (uc_fw->major_ver_found != uc_fw->major_ver_wanted ||
- uc_fw->minor_ver_found < uc_fw->minor_ver_wanted) {
- DRM_NOTE("%s: Wrong firmware version (%u.%u, required %u.%u)\n",
- intel_uc_fw_type_repr(uc_fw->type),
- uc_fw->major_ver_found, uc_fw->minor_ver_found,
- uc_fw->major_ver_wanted, uc_fw->minor_ver_wanted);
- err = -ENOEXEC;
- goto fail;
- }
-
- obj = i915_gem_object_create_shmem_from_data(dev_priv,
- fw->data, fw->size);
- if (IS_ERR(obj)) {
- err = PTR_ERR(obj);
- DRM_DEBUG_DRIVER("%s fw object_create err=%d\n",
- intel_uc_fw_type_repr(uc_fw->type), err);
- goto fail;
- }
-
- uc_fw->obj = obj;
- uc_fw->size = fw->size;
- uc_fw->fetch_status = INTEL_UC_FIRMWARE_SUCCESS;
- DRM_DEBUG_DRIVER("%s fw fetch %s\n",
- intel_uc_fw_type_repr(uc_fw->type),
- intel_uc_fw_status_repr(uc_fw->fetch_status));
-
- release_firmware(fw);
- return;
-
-fail:
- uc_fw->fetch_status = INTEL_UC_FIRMWARE_FAIL;
- DRM_DEBUG_DRIVER("%s fw fetch %s\n",
- intel_uc_fw_type_repr(uc_fw->type),
- intel_uc_fw_status_repr(uc_fw->fetch_status));
-
- DRM_WARN("%s: Failed to fetch firmware %s (error %d)\n",
- intel_uc_fw_type_repr(uc_fw->type), uc_fw->path, err);
- DRM_INFO("%s: Firmware can be downloaded from %s\n",
- intel_uc_fw_type_repr(uc_fw->type), INTEL_UC_FIRMWARE_URL);
-
- release_firmware(fw); /* OK even if fw is NULL */
-}
-
-static void intel_uc_fw_ggtt_bind(struct intel_uc_fw *uc_fw)
-{
- struct drm_i915_gem_object *obj = uc_fw->obj;
- struct i915_ggtt *ggtt = &to_i915(obj->base.dev)->ggtt;
- struct i915_vma dummy = {
- .node.start = intel_uc_fw_ggtt_offset(uc_fw),
- .node.size = obj->base.size,
- .pages = obj->mm.pages,
- .vm = &ggtt->vm,
- };
-
- GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
- GEM_BUG_ON(dummy.node.size > ggtt->uc_fw.size);
-
- /* uc_fw->obj cache domains were not controlled across suspend */
- drm_clflush_sg(dummy.pages);
-
- ggtt->vm.insert_entries(&ggtt->vm, &dummy, I915_CACHE_NONE, 0);
-}
-
-static void intel_uc_fw_ggtt_unbind(struct intel_uc_fw *uc_fw)
-{
- struct drm_i915_gem_object *obj = uc_fw->obj;
- struct i915_ggtt *ggtt = &to_i915(obj->base.dev)->ggtt;
- u64 start = intel_uc_fw_ggtt_offset(uc_fw);
-
- ggtt->vm.clear_range(&ggtt->vm, start, obj->base.size);
-}
-
-/**
- * intel_uc_fw_upload - load uC firmware using custom loader
- * @uc_fw: uC firmware
- * @xfer: custom uC firmware loader function
- *
- * Loads uC firmware using custom loader and updates internal flags.
- *
- * Return: 0 on success, non-zero on failure.
- */
-int intel_uc_fw_upload(struct intel_uc_fw *uc_fw,
- int (*xfer)(struct intel_uc_fw *uc_fw))
-{
- int err;
-
- DRM_DEBUG_DRIVER("%s fw load %s\n",
- intel_uc_fw_type_repr(uc_fw->type), uc_fw->path);
-
- if (uc_fw->fetch_status != INTEL_UC_FIRMWARE_SUCCESS)
- return -ENOEXEC;
-
- uc_fw->load_status = INTEL_UC_FIRMWARE_PENDING;
- DRM_DEBUG_DRIVER("%s fw load %s\n",
- intel_uc_fw_type_repr(uc_fw->type),
- intel_uc_fw_status_repr(uc_fw->load_status));
-
- /* Call custom loader */
- intel_uc_fw_ggtt_bind(uc_fw);
- err = xfer(uc_fw);
- intel_uc_fw_ggtt_unbind(uc_fw);
- if (err)
- goto fail;
-
- uc_fw->load_status = INTEL_UC_FIRMWARE_SUCCESS;
- DRM_DEBUG_DRIVER("%s fw load %s\n",
- intel_uc_fw_type_repr(uc_fw->type),
- intel_uc_fw_status_repr(uc_fw->load_status));
-
- DRM_INFO("%s: Loaded firmware %s (version %u.%u)\n",
- intel_uc_fw_type_repr(uc_fw->type),
- uc_fw->path,
- uc_fw->major_ver_found, uc_fw->minor_ver_found);
-
- return 0;
-
-fail:
- uc_fw->load_status = INTEL_UC_FIRMWARE_FAIL;
- DRM_DEBUG_DRIVER("%s fw load %s\n",
- intel_uc_fw_type_repr(uc_fw->type),
- intel_uc_fw_status_repr(uc_fw->load_status));
-
- DRM_WARN("%s: Failed to load firmware %s (error %d)\n",
- intel_uc_fw_type_repr(uc_fw->type), uc_fw->path, err);
-
- return err;
-}
-
-int intel_uc_fw_init(struct intel_uc_fw *uc_fw)
-{
- int err;
-
- if (uc_fw->fetch_status != INTEL_UC_FIRMWARE_SUCCESS)
- return -ENOEXEC;
-
- err = i915_gem_object_pin_pages(uc_fw->obj);
- if (err)
- DRM_DEBUG_DRIVER("%s fw pin-pages err=%d\n",
- intel_uc_fw_type_repr(uc_fw->type), err);
-
- return err;
-}
-
-void intel_uc_fw_fini(struct intel_uc_fw *uc_fw)
-{
- if (uc_fw->fetch_status != INTEL_UC_FIRMWARE_SUCCESS)
- return;
-
- i915_gem_object_unpin_pages(uc_fw->obj);
-}
-
-u32 intel_uc_fw_ggtt_offset(struct intel_uc_fw *uc_fw)
-{
- struct drm_i915_private *i915 = to_i915(uc_fw->obj->base.dev);
- struct i915_ggtt *ggtt = &i915->ggtt;
- struct drm_mm_node *node = &ggtt->uc_fw;
-
- GEM_BUG_ON(!node->allocated);
- GEM_BUG_ON(upper_32_bits(node->start));
- GEM_BUG_ON(upper_32_bits(node->start + node->size - 1));
-
- return lower_32_bits(node->start);
-}
-
-/**
- * intel_uc_fw_cleanup_fetch - cleanup uC firmware
- *
- * @uc_fw: uC firmware
- *
- * Cleans up uC firmware by releasing the firmware GEM obj.
- */
-void intel_uc_fw_cleanup_fetch(struct intel_uc_fw *uc_fw)
-{
- struct drm_i915_gem_object *obj;
-
- obj = fetch_and_zero(&uc_fw->obj);
- if (obj)
- i915_gem_object_put(obj);
-
- uc_fw->fetch_status = INTEL_UC_FIRMWARE_NONE;
-}
-
-/**
- * intel_uc_fw_dump - dump information about uC firmware
- * @uc_fw: uC firmware
- * @p: the &drm_printer
- *
- * Pretty printer for uC firmware.
- */
-void intel_uc_fw_dump(const struct intel_uc_fw *uc_fw, struct drm_printer *p)
-{
- drm_printf(p, "%s firmware: %s\n",
- intel_uc_fw_type_repr(uc_fw->type), uc_fw->path);
- drm_printf(p, "\tstatus: fetch %s, load %s\n",
- intel_uc_fw_status_repr(uc_fw->fetch_status),
- intel_uc_fw_status_repr(uc_fw->load_status));
- drm_printf(p, "\tversion: wanted %u.%u, found %u.%u\n",
- uc_fw->major_ver_wanted, uc_fw->minor_ver_wanted,
- uc_fw->major_ver_found, uc_fw->minor_ver_found);
- drm_printf(p, "\theader: offset %u, size %u\n",
- uc_fw->header_offset, uc_fw->header_size);
- drm_printf(p, "\tuCode: offset %u, size %u\n",
- uc_fw->ucode_offset, uc_fw->ucode_size);
- drm_printf(p, "\tRSA: offset %u, size %u\n",
- uc_fw->rsa_offset, uc_fw->rsa_size);
-}
diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
index da33aa672c3d..475ab3d4d91d 100644
--- a/drivers/gpu/drm/i915/intel_uncore.c
+++ b/drivers/gpu/drm/i915/intel_uncore.c
@@ -78,6 +78,8 @@ fw_domain_reset(const struct intel_uncore_forcewake_domain *d)
static inline void
fw_domain_arm_timer(struct intel_uncore_forcewake_domain *d)
{
+ GEM_BUG_ON(d->uncore->fw_domains_timer & d->mask);
+ d->uncore->fw_domains_timer |= d->mask;
d->wake_count++;
hrtimer_start_range_ns(&d->timer,
NSEC_PER_MSEC,
@@ -322,7 +324,7 @@ static void __gen6_gt_wait_for_fifo(struct intel_uncore *uncore)
/* On VLV, FIFO will be shared by both SW and HW.
* So, we need to read the FREE_ENTRIES everytime */
- if (IS_VALLEYVIEW(uncore_to_i915(uncore)))
+ if (IS_VALLEYVIEW(uncore->i915))
n = fifo_free_entries(uncore);
else
n = uncore->fifo_count;
@@ -344,7 +346,7 @@ intel_uncore_fw_release_timer(struct hrtimer *timer)
{
struct intel_uncore_forcewake_domain *domain =
container_of(timer, struct intel_uncore_forcewake_domain, timer);
- struct intel_uncore *uncore = forcewake_domain_to_uncore(domain);
+ struct intel_uncore *uncore = domain->uncore;
unsigned long irqflags;
assert_rpm_device_not_suspended(uncore->rpm);
@@ -353,9 +355,10 @@ intel_uncore_fw_release_timer(struct hrtimer *timer)
return HRTIMER_RESTART;
spin_lock_irqsave(&uncore->lock, irqflags);
- if (WARN_ON(domain->wake_count == 0))
- domain->wake_count++;
+ uncore->fw_domains_timer &= ~domain->mask;
+
+ GEM_BUG_ON(!domain->wake_count);
if (--domain->wake_count == 0)
uncore->funcs.force_wake_put(uncore, domain->mask);
@@ -485,15 +488,13 @@ check_for_unclaimed_mmio(struct intel_uncore *uncore)
return ret;
}
-static void __intel_uncore_early_sanitize(struct intel_uncore *uncore,
- unsigned int restore_forcewake)
+static void forcewake_early_sanitize(struct intel_uncore *uncore,
+ unsigned int restore_forcewake)
{
- /* clear out unclaimed reg detection bit */
- if (check_for_unclaimed_mmio(uncore))
- DRM_DEBUG("unclaimed mmio detected on uncore init, clearing\n");
+ GEM_BUG_ON(!intel_uncore_has_forcewake(uncore));
/* WaDisableShadowRegForCpd:chv */
- if (IS_CHERRYVIEW(uncore_to_i915(uncore))) {
+ if (IS_CHERRYVIEW(uncore->i915)) {
__raw_uncore_write32(uncore, GTFIFOCTL,
__raw_uncore_read32(uncore, GTFIFOCTL) |
GT_FIFO_CTL_BLOCK_ALL_POLICY_STALL |
@@ -515,6 +516,9 @@ static void __intel_uncore_early_sanitize(struct intel_uncore *uncore,
void intel_uncore_suspend(struct intel_uncore *uncore)
{
+ if (!intel_uncore_has_forcewake(uncore))
+ return;
+
iosf_mbi_punit_acquire();
iosf_mbi_unregister_pmic_bus_access_notifier_unlocked(
&uncore->pmic_bus_access_nb);
@@ -526,21 +530,24 @@ void intel_uncore_resume_early(struct intel_uncore *uncore)
{
unsigned int restore_forcewake;
+ if (intel_uncore_unclaimed_mmio(uncore))
+ DRM_DEBUG("unclaimed mmio detected on resume, clearing\n");
+
+ if (!intel_uncore_has_forcewake(uncore))
+ return;
+
restore_forcewake = fetch_and_zero(&uncore->fw_domains_saved);
- __intel_uncore_early_sanitize(uncore, restore_forcewake);
+ forcewake_early_sanitize(uncore, restore_forcewake);
iosf_mbi_register_pmic_bus_access_notifier(&uncore->pmic_bus_access_nb);
}
void intel_uncore_runtime_resume(struct intel_uncore *uncore)
{
- iosf_mbi_register_pmic_bus_access_notifier(&uncore->pmic_bus_access_nb);
-}
+ if (!intel_uncore_has_forcewake(uncore))
+ return;
-void intel_uncore_sanitize(struct drm_i915_private *dev_priv)
-{
- /* BIOS often leaves RC6 enabled, but disable it for hw init */
- intel_sanitize_gt_powersave(dev_priv);
+ iosf_mbi_register_pmic_bus_access_notifier(&uncore->pmic_bus_access_nb);
}
static void __intel_uncore_forcewake_get(struct intel_uncore *uncore,
@@ -628,7 +635,7 @@ void intel_uncore_forcewake_user_put(struct intel_uncore *uncore)
spin_lock_irq(&uncore->lock);
if (!--uncore->user_forcewake.count) {
if (intel_uncore_unclaimed_mmio(uncore))
- dev_info(uncore_to_i915(uncore)->drm.dev,
+ dev_info(uncore->i915->drm.dev,
"Invalid mmio detected during user access\n");
uncore->unclaimed_mmio_check =
@@ -669,8 +676,7 @@ static void __intel_uncore_forcewake_put(struct intel_uncore *uncore,
fw_domains &= uncore->fw_domains;
for_each_fw_domain_masked(domain, fw_domains, uncore, tmp) {
- if (WARN_ON(domain->wake_count == 0))
- continue;
+ GEM_BUG_ON(!domain->wake_count);
if (--domain->wake_count) {
domain->active = true;
@@ -734,15 +740,42 @@ void assert_forcewakes_inactive(struct intel_uncore *uncore)
void assert_forcewakes_active(struct intel_uncore *uncore,
enum forcewake_domains fw_domains)
{
+ struct intel_uncore_forcewake_domain *domain;
+ unsigned int tmp;
+
+ if (!IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM))
+ return;
+
if (!uncore->funcs.force_wake_get)
return;
+ spin_lock_irq(&uncore->lock);
+
assert_rpm_wakelock_held(uncore->rpm);
fw_domains &= uncore->fw_domains;
WARN(fw_domains & ~uncore->fw_domains_active,
"Expected %08x fw_domains to be active, but %08x are off\n",
fw_domains, fw_domains & ~uncore->fw_domains_active);
+
+ /*
+ * Check that the caller has an explicit wakeref and we don't mistake
+ * it for the auto wakeref.
+ */
+ for_each_fw_domain_masked(domain, fw_domains, uncore, tmp) {
+ unsigned int actual = READ_ONCE(domain->wake_count);
+ unsigned int expect = 1;
+
+ if (uncore->fw_domains_timer & domain->mask)
+ expect++; /* pending automatic release */
+
+ if (WARN(actual < expect,
+ "Expected domain %d to be held awake by caller, count=%d\n",
+ domain->id, actual))
+ break;
+ }
+
+ spin_unlock_irq(&uncore->lock);
}
/* We give fast paths for the really cool registers */
@@ -901,6 +934,12 @@ static bool is_gen##x##_shadowed(u32 offset) \
__is_genX_shadowed(8)
__is_genX_shadowed(11)
+static enum forcewake_domains
+gen6_reg_write_fw_domains(struct intel_uncore *uncore, i915_reg_t reg)
+{
+ return FORCEWAKE_RENDER;
+}
+
#define __gen8_reg_write_fw_domains(uncore, offset) \
({ \
enum forcewake_domains __fwd; \
@@ -1123,8 +1162,7 @@ static noinline void ___force_wake_auto(struct intel_uncore *uncore,
static inline void __force_wake_auto(struct intel_uncore *uncore,
enum forcewake_domains fw_domains)
{
- if (WARN_ON(!fw_domains))
- return;
+ GEM_BUG_ON(!fw_domains);
/* Turn on all requested but inactive supported forcewake domains. */
fw_domains &= uncore->fw_domains;
@@ -1145,26 +1183,23 @@ func##_read##x(struct intel_uncore *uncore, i915_reg_t reg, bool trace) { \
val = __raw_uncore_read##x(uncore, reg); \
GEN6_READ_FOOTER; \
}
-#define __gen6_read(x) __gen_read(gen6, x)
-#define __fwtable_read(x) __gen_read(fwtable, x)
-#define __gen11_fwtable_read(x) __gen_read(gen11_fwtable, x)
-
-__gen11_fwtable_read(8)
-__gen11_fwtable_read(16)
-__gen11_fwtable_read(32)
-__gen11_fwtable_read(64)
-__fwtable_read(8)
-__fwtable_read(16)
-__fwtable_read(32)
-__fwtable_read(64)
-__gen6_read(8)
-__gen6_read(16)
-__gen6_read(32)
-__gen6_read(64)
-
-#undef __gen11_fwtable_read
-#undef __fwtable_read
-#undef __gen6_read
+
+#define __gen_reg_read_funcs(func) \
+static enum forcewake_domains \
+func##_reg_read_fw_domains(struct intel_uncore *uncore, i915_reg_t reg) { \
+ return __##func##_reg_read_fw_domains(uncore, i915_mmio_reg_offset(reg)); \
+} \
+\
+__gen_read(func, 8) \
+__gen_read(func, 16) \
+__gen_read(func, 32) \
+__gen_read(func, 64)
+
+__gen_reg_read_funcs(gen11_fwtable);
+__gen_reg_read_funcs(fwtable);
+__gen_reg_read_funcs(gen6);
+
+#undef __gen_reg_read_funcs
#undef GEN6_READ_FOOTER
#undef GEN6_READ_HEADER
@@ -1225,6 +1260,9 @@ gen6_write##x(struct intel_uncore *uncore, i915_reg_t reg, u##x val, bool trace)
__raw_uncore_write##x(uncore, reg, val); \
GEN6_WRITE_FOOTER; \
}
+__gen6_write(8)
+__gen6_write(16)
+__gen6_write(32)
#define __gen_write(func, x) \
static void \
@@ -1237,38 +1275,33 @@ func##_write##x(struct intel_uncore *uncore, i915_reg_t reg, u##x val, bool trac
__raw_uncore_write##x(uncore, reg, val); \
GEN6_WRITE_FOOTER; \
}
-#define __gen8_write(x) __gen_write(gen8, x)
-#define __fwtable_write(x) __gen_write(fwtable, x)
-#define __gen11_fwtable_write(x) __gen_write(gen11_fwtable, x)
-
-__gen11_fwtable_write(8)
-__gen11_fwtable_write(16)
-__gen11_fwtable_write(32)
-__fwtable_write(8)
-__fwtable_write(16)
-__fwtable_write(32)
-__gen8_write(8)
-__gen8_write(16)
-__gen8_write(32)
-__gen6_write(8)
-__gen6_write(16)
-__gen6_write(32)
-#undef __gen11_fwtable_write
-#undef __fwtable_write
-#undef __gen8_write
-#undef __gen6_write
+#define __gen_reg_write_funcs(func) \
+static enum forcewake_domains \
+func##_reg_write_fw_domains(struct intel_uncore *uncore, i915_reg_t reg) { \
+ return __##func##_reg_write_fw_domains(uncore, i915_mmio_reg_offset(reg)); \
+} \
+\
+__gen_write(func, 8) \
+__gen_write(func, 16) \
+__gen_write(func, 32)
+
+__gen_reg_write_funcs(gen11_fwtable);
+__gen_reg_write_funcs(fwtable);
+__gen_reg_write_funcs(gen8);
+
+#undef __gen_reg_write_funcs
#undef GEN6_WRITE_FOOTER
#undef GEN6_WRITE_HEADER
-#define ASSIGN_WRITE_MMIO_VFUNCS(uncore, x) \
+#define ASSIGN_RAW_WRITE_MMIO_VFUNCS(uncore, x) \
do { \
(uncore)->funcs.mmio_writeb = x##_write8; \
(uncore)->funcs.mmio_writew = x##_write16; \
(uncore)->funcs.mmio_writel = x##_write32; \
} while (0)
-#define ASSIGN_READ_MMIO_VFUNCS(uncore, x) \
+#define ASSIGN_RAW_READ_MMIO_VFUNCS(uncore, x) \
do { \
(uncore)->funcs.mmio_readb = x##_read8; \
(uncore)->funcs.mmio_readw = x##_read16; \
@@ -1276,24 +1309,39 @@ do { \
(uncore)->funcs.mmio_readq = x##_read64; \
} while (0)
+#define ASSIGN_WRITE_MMIO_VFUNCS(uncore, x) \
+do { \
+ ASSIGN_RAW_WRITE_MMIO_VFUNCS((uncore), x); \
+ (uncore)->funcs.write_fw_domains = x##_reg_write_fw_domains; \
+} while (0)
+
+#define ASSIGN_READ_MMIO_VFUNCS(uncore, x) \
+do { \
+ ASSIGN_RAW_READ_MMIO_VFUNCS(uncore, x); \
+ (uncore)->funcs.read_fw_domains = x##_reg_read_fw_domains; \
+} while (0)
-static void fw_domain_init(struct intel_uncore *uncore,
- enum forcewake_domain_id domain_id,
- i915_reg_t reg_set,
- i915_reg_t reg_ack)
+static int __fw_domain_init(struct intel_uncore *uncore,
+ enum forcewake_domain_id domain_id,
+ i915_reg_t reg_set,
+ i915_reg_t reg_ack)
{
struct intel_uncore_forcewake_domain *d;
- if (WARN_ON(domain_id >= FW_DOMAIN_ID_COUNT))
- return;
+ GEM_BUG_ON(domain_id >= FW_DOMAIN_ID_COUNT);
+ GEM_BUG_ON(uncore->fw_domain[domain_id]);
- d = &uncore->fw_domain[domain_id];
+ if (i915_inject_probe_failure())
+ return -ENOMEM;
- WARN_ON(d->wake_count);
+ d = kzalloc(sizeof(*d), GFP_KERNEL);
+ if (!d)
+ return -ENOMEM;
WARN_ON(!i915_mmio_reg_valid(reg_set));
WARN_ON(!i915_mmio_reg_valid(reg_ack));
+ d->uncore = uncore;
d->wake_count = 0;
d->reg_set = uncore->regs + i915_mmio_reg_offset(reg_set);
d->reg_ack = uncore->regs + i915_mmio_reg_offset(reg_ack);
@@ -1310,7 +1358,6 @@ static void fw_domain_init(struct intel_uncore *uncore,
BUILD_BUG_ON(FORCEWAKE_MEDIA_VEBOX0 != (1 << FW_DOMAIN_ID_MEDIA_VEBOX0));
BUILD_BUG_ON(FORCEWAKE_MEDIA_VEBOX1 != (1 << FW_DOMAIN_ID_MEDIA_VEBOX1));
-
d->mask = BIT(domain_id);
hrtimer_init(&d->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
@@ -1319,6 +1366,10 @@ static void fw_domain_init(struct intel_uncore *uncore,
uncore->fw_domains |= BIT(domain_id);
fw_domain_reset(d);
+
+ uncore->fw_domain[domain_id] = d;
+
+ return 0;
}
static void fw_domain_fini(struct intel_uncore *uncore,
@@ -1326,30 +1377,41 @@ static void fw_domain_fini(struct intel_uncore *uncore,
{
struct intel_uncore_forcewake_domain *d;
- if (WARN_ON(domain_id >= FW_DOMAIN_ID_COUNT))
- return;
+ GEM_BUG_ON(domain_id >= FW_DOMAIN_ID_COUNT);
- d = &uncore->fw_domain[domain_id];
+ d = fetch_and_zero(&uncore->fw_domain[domain_id]);
+ if (!d)
+ return;
+ uncore->fw_domains &= ~BIT(domain_id);
WARN_ON(d->wake_count);
WARN_ON(hrtimer_cancel(&d->timer));
- memset(d, 0, sizeof(*d));
+ kfree(d);
+}
- uncore->fw_domains &= ~BIT(domain_id);
+static void intel_uncore_fw_domains_fini(struct intel_uncore *uncore)
+{
+ struct intel_uncore_forcewake_domain *d;
+ int tmp;
+
+ for_each_fw_domain(d, uncore, tmp)
+ fw_domain_fini(uncore, d->id);
}
-static void intel_uncore_fw_domains_init(struct intel_uncore *uncore)
+static int intel_uncore_fw_domains_init(struct intel_uncore *uncore)
{
- struct drm_i915_private *i915 = uncore_to_i915(uncore);
+ struct drm_i915_private *i915 = uncore->i915;
+ int ret = 0;
- if (!intel_uncore_has_forcewake(uncore))
- return;
+ GEM_BUG_ON(!intel_uncore_has_forcewake(uncore));
+
+#define fw_domain_init(uncore__, id__, set__, ack__) \
+ (ret ?: (ret = __fw_domain_init((uncore__), (id__), (set__), (ack__))))
if (INTEL_GEN(i915) >= 11) {
int i;
- uncore->funcs.force_wake_get =
- fw_domains_get_with_fallback;
+ uncore->funcs.force_wake_get = fw_domains_get_with_fallback;
uncore->funcs.force_wake_put = fw_domains_put;
fw_domain_init(uncore, FW_DOMAIN_ID_RENDER,
FORCEWAKE_RENDER_GEN9,
@@ -1357,6 +1419,7 @@ static void intel_uncore_fw_domains_init(struct intel_uncore *uncore)
fw_domain_init(uncore, FW_DOMAIN_ID_BLITTER,
FORCEWAKE_BLITTER_GEN9,
FORCEWAKE_ACK_BLITTER_GEN9);
+
for (i = 0; i < I915_MAX_VCS; i++) {
if (!HAS_ENGINE(i915, _VCS(i)))
continue;
@@ -1374,8 +1437,7 @@ static void intel_uncore_fw_domains_init(struct intel_uncore *uncore)
FORCEWAKE_ACK_MEDIA_VEBOX_GEN11(i));
}
} else if (IS_GEN_RANGE(i915, 9, 10)) {
- uncore->funcs.force_wake_get =
- fw_domains_get_with_fallback;
+ uncore->funcs.force_wake_get = fw_domains_get_with_fallback;
uncore->funcs.force_wake_put = fw_domains_put;
fw_domain_init(uncore, FW_DOMAIN_ID_RENDER,
FORCEWAKE_RENDER_GEN9,
@@ -1424,8 +1486,10 @@ static void intel_uncore_fw_domains_init(struct intel_uncore *uncore)
__raw_uncore_write32(uncore, FORCEWAKE, 0);
__raw_posting_read(uncore, ECOBUS);
- fw_domain_init(uncore, FW_DOMAIN_ID_RENDER,
- FORCEWAKE_MT, FORCEWAKE_MT_ACK);
+ ret = __fw_domain_init(uncore, FW_DOMAIN_ID_RENDER,
+ FORCEWAKE_MT, FORCEWAKE_MT_ACK);
+ if (ret)
+ goto out;
spin_lock_irq(&uncore->lock);
fw_domains_get_with_thread_status(uncore, FORCEWAKE_RENDER);
@@ -1436,6 +1500,7 @@ static void intel_uncore_fw_domains_init(struct intel_uncore *uncore)
if (!(ecobus & FORCEWAKE_MT_ENABLE)) {
DRM_INFO("No MT forcewake available on Ivybridge, this can result in issues\n");
DRM_INFO("when using vblank-synced partial screen updates.\n");
+ fw_domain_fini(uncore, FW_DOMAIN_ID_RENDER);
fw_domain_init(uncore, FW_DOMAIN_ID_RENDER,
FORCEWAKE, FORCEWAKE_ACK);
}
@@ -1447,8 +1512,16 @@ static void intel_uncore_fw_domains_init(struct intel_uncore *uncore)
FORCEWAKE, FORCEWAKE_ACK);
}
+#undef fw_domain_init
+
/* All future platforms are expected to require complex power gating */
- WARN_ON(uncore->fw_domains == 0);
+ WARN_ON(!ret && uncore->fw_domains == 0);
+
+out:
+ if (ret)
+ intel_uncore_fw_domains_fini(uncore);
+
+ return ret;
}
#define ASSIGN_FW_DOMAINS_TABLE(uncore, d) \
@@ -1493,7 +1566,7 @@ static int i915_pmic_bus_access_notifier(struct notifier_block *nb,
static int uncore_mmio_setup(struct intel_uncore *uncore)
{
- struct drm_i915_private *i915 = uncore_to_i915(uncore);
+ struct drm_i915_private *i915 = uncore->i915;
struct pci_dev *pdev = i915->drm.pdev;
int mmio_bar;
int mmio_size;
@@ -1523,49 +1596,46 @@ static int uncore_mmio_setup(struct intel_uncore *uncore)
static void uncore_mmio_cleanup(struct intel_uncore *uncore)
{
- struct drm_i915_private *i915 = uncore_to_i915(uncore);
- struct pci_dev *pdev = i915->drm.pdev;
+ struct pci_dev *pdev = uncore->i915->drm.pdev;
pci_iounmap(pdev, uncore->regs);
}
-void intel_uncore_init_early(struct intel_uncore *uncore)
+void intel_uncore_init_early(struct intel_uncore *uncore,
+ struct drm_i915_private *i915)
{
spin_lock_init(&uncore->lock);
+ uncore->i915 = i915;
+ uncore->rpm = &i915->runtime_pm;
}
-int intel_uncore_init_mmio(struct intel_uncore *uncore)
+static void uncore_raw_init(struct intel_uncore *uncore)
{
- struct drm_i915_private *i915 = uncore_to_i915(uncore);
- int ret;
+ GEM_BUG_ON(intel_uncore_has_forcewake(uncore));
- ret = uncore_mmio_setup(uncore);
- if (ret)
- return ret;
-
- i915_check_vgpu(i915);
+ if (IS_GEN(uncore->i915, 5)) {
+ ASSIGN_RAW_WRITE_MMIO_VFUNCS(uncore, gen5);
+ ASSIGN_RAW_READ_MMIO_VFUNCS(uncore, gen5);
+ } else {
+ ASSIGN_RAW_WRITE_MMIO_VFUNCS(uncore, gen2);
+ ASSIGN_RAW_READ_MMIO_VFUNCS(uncore, gen2);
+ }
+}
- if (INTEL_GEN(i915) > 5 && !intel_vgpu_active(i915))
- uncore->flags |= UNCORE_HAS_FORCEWAKE;
+static int uncore_forcewake_init(struct intel_uncore *uncore)
+{
+ struct drm_i915_private *i915 = uncore->i915;
+ int ret;
- intel_uncore_fw_domains_init(uncore);
- __intel_uncore_early_sanitize(uncore, 0);
+ GEM_BUG_ON(!intel_uncore_has_forcewake(uncore));
- uncore->unclaimed_mmio_check = 1;
- uncore->pmic_bus_access_nb.notifier_call =
- i915_pmic_bus_access_notifier;
+ ret = intel_uncore_fw_domains_init(uncore);
+ if (ret)
+ return ret;
- uncore->rpm = &i915->runtime_pm;
+ forcewake_early_sanitize(uncore, 0);
- if (!intel_uncore_has_forcewake(uncore)) {
- if (IS_GEN(i915, 5)) {
- ASSIGN_WRITE_MMIO_VFUNCS(uncore, gen5);
- ASSIGN_READ_MMIO_VFUNCS(uncore, gen5);
- } else {
- ASSIGN_WRITE_MMIO_VFUNCS(uncore, gen2);
- ASSIGN_READ_MMIO_VFUNCS(uncore, gen2);
- }
- } else if (IS_GEN_RANGE(i915, 6, 7)) {
+ if (IS_GEN_RANGE(i915, 6, 7)) {
ASSIGN_WRITE_MMIO_VFUNCS(uncore, gen6);
if (IS_VALLEYVIEW(i915)) {
@@ -1579,7 +1649,6 @@ int intel_uncore_init_mmio(struct intel_uncore *uncore)
ASSIGN_FW_DOMAINS_TABLE(uncore, __chv_fw_ranges);
ASSIGN_WRITE_MMIO_VFUNCS(uncore, fwtable);
ASSIGN_READ_MMIO_VFUNCS(uncore, fwtable);
-
} else {
ASSIGN_WRITE_MMIO_VFUNCS(uncore, gen8);
ASSIGN_READ_MMIO_VFUNCS(uncore, gen6);
@@ -1594,6 +1663,40 @@ int intel_uncore_init_mmio(struct intel_uncore *uncore)
ASSIGN_READ_MMIO_VFUNCS(uncore, gen11_fwtable);
}
+ uncore->pmic_bus_access_nb.notifier_call = i915_pmic_bus_access_notifier;
+ iosf_mbi_register_pmic_bus_access_notifier(&uncore->pmic_bus_access_nb);
+
+ return 0;
+}
+
+int intel_uncore_init_mmio(struct intel_uncore *uncore)
+{
+ struct drm_i915_private *i915 = uncore->i915;
+ int ret;
+
+ ret = uncore_mmio_setup(uncore);
+ if (ret)
+ return ret;
+
+ if (INTEL_GEN(i915) > 5 && !intel_vgpu_active(i915))
+ uncore->flags |= UNCORE_HAS_FORCEWAKE;
+
+ uncore->unclaimed_mmio_check = 1;
+
+ if (!intel_uncore_has_forcewake(uncore)) {
+ uncore_raw_init(uncore);
+ } else {
+ ret = uncore_forcewake_init(uncore);
+ if (ret)
+ goto out_mmio_cleanup;
+ }
+
+ /* make sure fw funcs are set if and only if we have fw*/
+ GEM_BUG_ON(intel_uncore_has_forcewake(uncore) != !!uncore->funcs.force_wake_get);
+ GEM_BUG_ON(intel_uncore_has_forcewake(uncore) != !!uncore->funcs.force_wake_put);
+ GEM_BUG_ON(intel_uncore_has_forcewake(uncore) != !!uncore->funcs.read_fw_domains);
+ GEM_BUG_ON(intel_uncore_has_forcewake(uncore) != !!uncore->funcs.write_fw_domains);
+
if (HAS_FPGA_DBG_UNCLAIMED(i915))
uncore->flags |= UNCORE_HAS_FPGA_DBG_UNCLAIMED;
@@ -1603,9 +1706,16 @@ int intel_uncore_init_mmio(struct intel_uncore *uncore)
if (IS_GEN_RANGE(i915, 6, 7))
uncore->flags |= UNCORE_HAS_FIFO;
- iosf_mbi_register_pmic_bus_access_notifier(&uncore->pmic_bus_access_nb);
+ /* clear out unclaimed reg detection bit */
+ if (check_for_unclaimed_mmio(uncore))
+ DRM_DEBUG("unclaimed mmio detected on uncore init, clearing\n");
return 0;
+
+out_mmio_cleanup:
+ uncore_mmio_cleanup(uncore);
+
+ return ret;
}
/*
@@ -1615,45 +1725,46 @@ int intel_uncore_init_mmio(struct intel_uncore *uncore)
*/
void intel_uncore_prune_mmio_domains(struct intel_uncore *uncore)
{
- struct drm_i915_private *i915 = uncore_to_i915(uncore);
+ struct drm_i915_private *i915 = uncore->i915;
+ enum forcewake_domains fw_domains = uncore->fw_domains;
+ enum forcewake_domain_id domain_id;
+ int i;
- if (INTEL_GEN(i915) >= 11) {
- enum forcewake_domains fw_domains = uncore->fw_domains;
- enum forcewake_domain_id domain_id;
- int i;
+ if (!intel_uncore_has_forcewake(uncore) || INTEL_GEN(i915) < 11)
+ return;
- for (i = 0; i < I915_MAX_VCS; i++) {
- domain_id = FW_DOMAIN_ID_MEDIA_VDBOX0 + i;
+ for (i = 0; i < I915_MAX_VCS; i++) {
+ domain_id = FW_DOMAIN_ID_MEDIA_VDBOX0 + i;
- if (HAS_ENGINE(i915, _VCS(i)))
- continue;
+ if (HAS_ENGINE(i915, _VCS(i)))
+ continue;
- if (fw_domains & BIT(domain_id))
- fw_domain_fini(uncore, domain_id);
- }
+ if (fw_domains & BIT(domain_id))
+ fw_domain_fini(uncore, domain_id);
+ }
- for (i = 0; i < I915_MAX_VECS; i++) {
- domain_id = FW_DOMAIN_ID_MEDIA_VEBOX0 + i;
+ for (i = 0; i < I915_MAX_VECS; i++) {
+ domain_id = FW_DOMAIN_ID_MEDIA_VEBOX0 + i;
- if (HAS_ENGINE(i915, _VECS(i)))
- continue;
+ if (HAS_ENGINE(i915, _VECS(i)))
+ continue;
- if (fw_domains & BIT(domain_id))
- fw_domain_fini(uncore, domain_id);
- }
+ if (fw_domains & BIT(domain_id))
+ fw_domain_fini(uncore, domain_id);
}
}
void intel_uncore_fini_mmio(struct intel_uncore *uncore)
{
- /* Paranoia: make sure we have disabled everything before we exit. */
- intel_uncore_sanitize(uncore_to_i915(uncore));
+ if (intel_uncore_has_forcewake(uncore)) {
+ iosf_mbi_punit_acquire();
+ iosf_mbi_unregister_pmic_bus_access_notifier_unlocked(
+ &uncore->pmic_bus_access_nb);
+ intel_uncore_forcewake_reset(uncore);
+ intel_uncore_fw_domains_fini(uncore);
+ iosf_mbi_punit_release();
+ }
- iosf_mbi_punit_acquire();
- iosf_mbi_unregister_pmic_bus_access_notifier_unlocked(
- &uncore->pmic_bus_access_nb);
- intel_uncore_forcewake_reset(uncore);
- iosf_mbi_punit_release();
uncore_mmio_cleanup(uncore);
}
@@ -1871,62 +1982,6 @@ out:
return ret;
}
-static enum forcewake_domains
-intel_uncore_forcewake_for_read(struct intel_uncore *uncore,
- i915_reg_t reg)
-{
- struct drm_i915_private *i915 = uncore_to_i915(uncore);
- u32 offset = i915_mmio_reg_offset(reg);
- enum forcewake_domains fw_domains;
-
- if (INTEL_GEN(i915) >= 11) {
- fw_domains = __gen11_fwtable_reg_read_fw_domains(uncore, offset);
- } else if (HAS_FWTABLE(i915)) {
- fw_domains = __fwtable_reg_read_fw_domains(uncore, offset);
- } else if (INTEL_GEN(i915) >= 6) {
- fw_domains = __gen6_reg_read_fw_domains(uncore, offset);
- } else {
- /* on devices with FW we expect to hit one of the above cases */
- if (intel_uncore_has_forcewake(uncore))
- MISSING_CASE(INTEL_GEN(i915));
-
- fw_domains = 0;
- }
-
- WARN_ON(fw_domains & ~uncore->fw_domains);
-
- return fw_domains;
-}
-
-static enum forcewake_domains
-intel_uncore_forcewake_for_write(struct intel_uncore *uncore,
- i915_reg_t reg)
-{
- struct drm_i915_private *i915 = uncore_to_i915(uncore);
- u32 offset = i915_mmio_reg_offset(reg);
- enum forcewake_domains fw_domains;
-
- if (INTEL_GEN(i915) >= 11) {
- fw_domains = __gen11_fwtable_reg_write_fw_domains(uncore, offset);
- } else if (HAS_FWTABLE(i915) && !IS_VALLEYVIEW(i915)) {
- fw_domains = __fwtable_reg_write_fw_domains(uncore, offset);
- } else if (IS_GEN(i915, 8)) {
- fw_domains = __gen8_reg_write_fw_domains(uncore, offset);
- } else if (IS_GEN_RANGE(i915, 6, 7)) {
- fw_domains = FORCEWAKE_RENDER;
- } else {
- /* on devices with FW we expect to hit one of the above cases */
- if (intel_uncore_has_forcewake(uncore))
- MISSING_CASE(INTEL_GEN(i915));
-
- fw_domains = 0;
- }
-
- WARN_ON(fw_domains & ~uncore->fw_domains);
-
- return fw_domains;
-}
-
/**
* intel_uncore_forcewake_for_reg - which forcewake domains are needed to access
* a register
@@ -1953,10 +2008,12 @@ intel_uncore_forcewake_for_reg(struct intel_uncore *uncore,
return 0;
if (op & FW_REG_READ)
- fw_domains = intel_uncore_forcewake_for_read(uncore, reg);
+ fw_domains = uncore->funcs.read_fw_domains(uncore, reg);
if (op & FW_REG_WRITE)
- fw_domains |= intel_uncore_forcewake_for_write(uncore, reg);
+ fw_domains |= uncore->funcs.write_fw_domains(uncore, reg);
+
+ WARN_ON(fw_domains & ~uncore->fw_domains);
return fw_domains;
}
diff --git a/drivers/gpu/drm/i915/intel_uncore.h b/drivers/gpu/drm/i915/intel_uncore.h
index 804a0faacc91..2f6ffa309669 100644
--- a/drivers/gpu/drm/i915/intel_uncore.h
+++ b/drivers/gpu/drm/i915/intel_uncore.h
@@ -70,6 +70,11 @@ struct intel_uncore_funcs {
void (*force_wake_put)(struct intel_uncore *uncore,
enum forcewake_domains domains);
+ enum forcewake_domains (*read_fw_domains)(struct intel_uncore *uncore,
+ i915_reg_t r);
+ enum forcewake_domains (*write_fw_domains)(struct intel_uncore *uncore,
+ i915_reg_t r);
+
u8 (*mmio_readb)(struct intel_uncore *uncore,
i915_reg_t r, bool trace);
u16 (*mmio_readw)(struct intel_uncore *uncore,
@@ -97,6 +102,7 @@ struct intel_forcewake_range {
struct intel_uncore {
void __iomem *regs;
+ struct drm_i915_private *i915;
struct intel_runtime_pm *rpm;
spinlock_t lock; /** lock is also taken in irq contexts. */
@@ -117,9 +123,11 @@ struct intel_uncore {
enum forcewake_domains fw_domains;
enum forcewake_domains fw_domains_active;
+ enum forcewake_domains fw_domains_timer;
enum forcewake_domains fw_domains_saved; /* user domains saved for S3 */
struct intel_uncore_forcewake_domain {
+ struct intel_uncore *uncore;
enum forcewake_domain_id id;
enum forcewake_domains mask;
unsigned int wake_count;
@@ -127,7 +135,7 @@ struct intel_uncore {
struct hrtimer timer;
u32 __iomem *reg_set;
u32 __iomem *reg_ack;
- } fw_domain[FW_DOMAIN_ID_COUNT];
+ } *fw_domain[FW_DOMAIN_ID_COUNT];
struct {
unsigned int count;
@@ -141,18 +149,12 @@ struct intel_uncore {
/* Iterate over initialised fw domains */
#define for_each_fw_domain_masked(domain__, mask__, uncore__, tmp__) \
- for (tmp__ = (mask__); \
- tmp__ ? (domain__ = &(uncore__)->fw_domain[__mask_next_bit(tmp__)]), 1 : 0;)
+ for (tmp__ = (mask__); tmp__ ;) \
+ for_each_if(domain__ = (uncore__)->fw_domain[__mask_next_bit(tmp__)])
#define for_each_fw_domain(domain__, uncore__, tmp__) \
for_each_fw_domain_masked(domain__, (uncore__)->fw_domains, uncore__, tmp__)
-static inline struct intel_uncore *
-forcewake_domain_to_uncore(const struct intel_uncore_forcewake_domain *d)
-{
- return container_of(d, struct intel_uncore, fw_domain[d->id]);
-}
-
static inline bool
intel_uncore_has_forcewake(const struct intel_uncore *uncore)
{
@@ -177,8 +179,8 @@ intel_uncore_has_fifo(const struct intel_uncore *uncore)
return uncore->flags & UNCORE_HAS_FIFO;
}
-void intel_uncore_sanitize(struct drm_i915_private *dev_priv);
-void intel_uncore_init_early(struct intel_uncore *uncore);
+void intel_uncore_init_early(struct intel_uncore *uncore,
+ struct drm_i915_private *i915);
int intel_uncore_init_mmio(struct intel_uncore *uncore);
void intel_uncore_prune_mmio_domains(struct intel_uncore *uncore);
bool intel_uncore_unclaimed_mmio(struct intel_uncore *uncore);
diff --git a/drivers/gpu/drm/i915/intel_wakeref.c b/drivers/gpu/drm/i915/intel_wakeref.c
index 3db6fa682823..06bd8b215cc2 100644
--- a/drivers/gpu/drm/i915/intel_wakeref.c
+++ b/drivers/gpu/drm/i915/intel_wakeref.c
@@ -5,7 +5,7 @@
*/
#include "intel_runtime_pm.h"
-#include "i915_gem.h"
+#include "intel_wakeref.h"
static void rpm_get(struct intel_runtime_pm *rpm, struct intel_wakeref *wf)
{
@@ -17,7 +17,7 @@ static void rpm_put(struct intel_runtime_pm *rpm, struct intel_wakeref *wf)
intel_wakeref_t wakeref = fetch_and_zero(&wf->wakeref);
intel_runtime_pm_put(rpm, wakeref);
- GEM_BUG_ON(!wakeref);
+ INTEL_WAKEREF_BUG_ON(!wakeref);
}
int __intel_wakeref_get_first(struct intel_runtime_pm *rpm,
@@ -48,6 +48,7 @@ int __intel_wakeref_get_first(struct intel_runtime_pm *rpm,
atomic_inc(&wf->count);
mutex_unlock(&wf->mutex);
+ INTEL_WAKEREF_BUG_ON(atomic_read(&wf->count) <= 0);
return 0;
}
@@ -115,7 +116,7 @@ void intel_wakeref_auto(struct intel_wakeref_auto *wf, unsigned long timeout)
if (!refcount_inc_not_zero(&wf->count)) {
spin_lock_irqsave(&wf->lock, flags);
if (!refcount_inc_not_zero(&wf->count)) {
- GEM_BUG_ON(wf->wakeref);
+ INTEL_WAKEREF_BUG_ON(wf->wakeref);
wf->wakeref = intel_runtime_pm_get_if_in_use(wf->rpm);
refcount_set(&wf->count, 1);
}
@@ -134,5 +135,5 @@ void intel_wakeref_auto(struct intel_wakeref_auto *wf, unsigned long timeout)
void intel_wakeref_auto_fini(struct intel_wakeref_auto *wf)
{
intel_wakeref_auto(wf, 0);
- GEM_BUG_ON(wf->wakeref);
+ INTEL_WAKEREF_BUG_ON(wf->wakeref);
}
diff --git a/drivers/gpu/drm/i915/intel_wakeref.h b/drivers/gpu/drm/i915/intel_wakeref.h
index 38275310b196..1d6f5986e4e5 100644
--- a/drivers/gpu/drm/i915/intel_wakeref.h
+++ b/drivers/gpu/drm/i915/intel_wakeref.h
@@ -13,6 +13,12 @@
#include <linux/stackdepot.h>
#include <linux/timer.h>
+#if IS_ENABLED(CONFIG_DRM_I915_DEBUG)
+#define INTEL_WAKEREF_BUG_ON(expr) BUG_ON(expr)
+#else
+#define INTEL_WAKEREF_BUG_ON(expr) BUILD_BUG_ON_INVALID(expr)
+#endif
+
struct intel_runtime_pm;
typedef depot_stack_handle_t intel_wakeref_t;
@@ -101,6 +107,7 @@ intel_wakeref_put(struct intel_runtime_pm *rpm,
struct intel_wakeref *wf,
int (*fn)(struct intel_wakeref *wf))
{
+ INTEL_WAKEREF_BUG_ON(atomic_read(&wf->count) <= 0);
if (atomic_dec_and_mutex_lock(&wf->count, &wf->mutex))
return __intel_wakeref_put_last(rpm, wf, fn);
@@ -136,13 +143,13 @@ intel_wakeref_unlock(struct intel_wakeref *wf)
}
/**
- * intel_wakeref_active: Query whether the wakeref is currently held
+ * intel_wakeref_is_active: Query whether the wakeref is currently held
* @wf: the wakeref
*
* Returns: true if the wakeref is currently held.
*/
static inline bool
-intel_wakeref_active(struct intel_wakeref *wf)
+intel_wakeref_is_active(const struct intel_wakeref *wf)
{
return READ_ONCE(wf->wakeref);
}
diff --git a/drivers/gpu/drm/i915/intel_wopcm.c b/drivers/gpu/drm/i915/intel_wopcm.c
index 7b4ba84b9fb8..0e86a9e85b49 100644
--- a/drivers/gpu/drm/i915/intel_wopcm.c
+++ b/drivers/gpu/drm/i915/intel_wopcm.c
@@ -74,7 +74,7 @@ void intel_wopcm_init_early(struct intel_wopcm *wopcm)
{
struct drm_i915_private *i915 = wopcm_to_i915(wopcm);
- if (!HAS_GUC(i915))
+ if (!HAS_GT_UC(i915))
return;
if (INTEL_GEN(i915) >= 11)
@@ -164,8 +164,8 @@ static inline int check_hw_restriction(struct drm_i915_private *i915,
int intel_wopcm_init(struct intel_wopcm *wopcm)
{
struct drm_i915_private *i915 = wopcm_to_i915(wopcm);
- u32 guc_fw_size = intel_uc_fw_get_upload_size(&i915->guc.fw);
- u32 huc_fw_size = intel_uc_fw_get_upload_size(&i915->huc.fw);
+ u32 guc_fw_size = intel_uc_fw_get_upload_size(&i915->gt.uc.guc.fw);
+ u32 huc_fw_size = intel_uc_fw_get_upload_size(&i915->gt.uc.huc.fw);
u32 ctx_rsvd = context_reserved_size(i915);
u32 guc_wopcm_base;
u32 guc_wopcm_size;
@@ -177,7 +177,7 @@ int intel_wopcm_init(struct intel_wopcm *wopcm)
GEM_BUG_ON(!wopcm->size);
- if (i915_inject_load_failure())
+ if (i915_inject_probe_failure())
return -E2BIG;
if (guc_fw_size >= wopcm->size) {
@@ -225,17 +225,18 @@ int intel_wopcm_init(struct intel_wopcm *wopcm)
return 0;
}
-static inline int write_and_verify(struct drm_i915_private *dev_priv,
- i915_reg_t reg, u32 val, u32 mask,
- u32 locked_bit)
+static int
+write_and_verify(struct intel_gt *gt,
+ i915_reg_t reg, u32 val, u32 mask, u32 locked_bit)
{
+ struct intel_uncore *uncore = gt->uncore;
u32 reg_val;
GEM_BUG_ON(val & ~mask);
- I915_WRITE(reg, val);
+ intel_uncore_write(uncore, reg, val);
- reg_val = I915_READ(reg);
+ reg_val = intel_uncore_read(uncore, reg);
return (reg_val & mask) != (val | locked_bit) ? -EIO : 0;
}
@@ -243,6 +244,7 @@ static inline int write_and_verify(struct drm_i915_private *dev_priv,
/**
* intel_wopcm_init_hw() - Setup GuC WOPCM registers.
* @wopcm: pointer to intel_wopcm.
+ * @gt: pointer to the containing GT
*
* Setup the GuC WOPCM size and offset registers with the calculated values. It
* will verify the register values to make sure the registers are locked with
@@ -250,29 +252,30 @@ static inline int write_and_verify(struct drm_i915_private *dev_priv,
*
* Return: 0 on success. -EIO if registers were locked with incorrect values.
*/
-int intel_wopcm_init_hw(struct intel_wopcm *wopcm)
+int intel_wopcm_init_hw(struct intel_wopcm *wopcm, struct intel_gt *gt)
{
- struct drm_i915_private *dev_priv = wopcm_to_i915(wopcm);
+ struct drm_i915_private *i915 = wopcm_to_i915(wopcm);
+ struct intel_uncore *uncore = gt->uncore;
u32 huc_agent;
u32 mask;
int err;
- if (!USES_GUC(dev_priv))
+ if (!USES_GUC(i915))
return 0;
- GEM_BUG_ON(!HAS_GUC(dev_priv));
+ GEM_BUG_ON(!HAS_GT_UC(i915));
GEM_BUG_ON(!wopcm->guc.size);
GEM_BUG_ON(!wopcm->guc.base);
- err = write_and_verify(dev_priv, GUC_WOPCM_SIZE, wopcm->guc.size,
+ err = write_and_verify(gt, GUC_WOPCM_SIZE, wopcm->guc.size,
GUC_WOPCM_SIZE_MASK | GUC_WOPCM_SIZE_LOCKED,
GUC_WOPCM_SIZE_LOCKED);
if (err)
goto err_out;
- huc_agent = USES_HUC(dev_priv) ? HUC_LOADING_AGENT_GUC : 0;
+ huc_agent = USES_HUC(i915) ? HUC_LOADING_AGENT_GUC : 0;
mask = GUC_WOPCM_OFFSET_MASK | GUC_WOPCM_OFFSET_VALID | huc_agent;
- err = write_and_verify(dev_priv, DMA_GUC_WOPCM_OFFSET,
+ err = write_and_verify(gt, DMA_GUC_WOPCM_OFFSET,
wopcm->guc.base | huc_agent, mask,
GUC_WOPCM_OFFSET_VALID);
if (err)
@@ -283,8 +286,9 @@ int intel_wopcm_init_hw(struct intel_wopcm *wopcm)
err_out:
DRM_ERROR("Failed to init WOPCM registers:\n");
DRM_ERROR("DMA_GUC_WOPCM_OFFSET=%#x\n",
- I915_READ(DMA_GUC_WOPCM_OFFSET));
- DRM_ERROR("GUC_WOPCM_SIZE=%#x\n", I915_READ(GUC_WOPCM_SIZE));
+ intel_uncore_read(uncore, DMA_GUC_WOPCM_OFFSET));
+ DRM_ERROR("GUC_WOPCM_SIZE=%#x\n",
+ intel_uncore_read(uncore, GUC_WOPCM_SIZE));
return err;
}
diff --git a/drivers/gpu/drm/i915/intel_wopcm.h b/drivers/gpu/drm/i915/intel_wopcm.h
index 114401971520..56aaed4d64ff 100644
--- a/drivers/gpu/drm/i915/intel_wopcm.h
+++ b/drivers/gpu/drm/i915/intel_wopcm.h
@@ -9,6 +9,8 @@
#include <linux/types.h>
+struct intel_gt;
+
/**
* struct intel_wopcm - Overall WOPCM info and WOPCM regions.
* @size: Size of overall WOPCM.
@@ -41,6 +43,6 @@ static inline u32 intel_wopcm_guc_size(struct intel_wopcm *wopcm)
void intel_wopcm_init_early(struct intel_wopcm *wopcm);
int intel_wopcm_init(struct intel_wopcm *wopcm);
-int intel_wopcm_init_hw(struct intel_wopcm *wopcm);
+int intel_wopcm_init_hw(struct intel_wopcm *wopcm, struct intel_gt *gt);
#endif
diff --git a/drivers/gpu/drm/i915/oa/Makefile b/drivers/gpu/drm/i915/oa/Makefile
new file mode 100644
index 000000000000..e69de29bb2d1
--- /dev/null
+++ b/drivers/gpu/drm/i915/oa/Makefile
diff --git a/drivers/gpu/drm/i915/i915_oa_bdw.c b/drivers/gpu/drm/i915/oa/i915_oa_bdw.c
index 4acdb94555b7..4acdb94555b7 100644
--- a/drivers/gpu/drm/i915/i915_oa_bdw.c
+++ b/drivers/gpu/drm/i915/oa/i915_oa_bdw.c
diff --git a/drivers/gpu/drm/i915/i915_oa_bdw.h b/drivers/gpu/drm/i915/oa/i915_oa_bdw.h
index 0e667f1a8aa1..b5ed68882588 100644
--- a/drivers/gpu/drm/i915/i915_oa_bdw.h
+++ b/drivers/gpu/drm/i915/oa/i915_oa_bdw.h
@@ -10,6 +10,6 @@
#ifndef __I915_OA_BDW_H__
#define __I915_OA_BDW_H__
-extern void i915_perf_load_test_config_bdw(struct drm_i915_private *dev_priv);
+void i915_perf_load_test_config_bdw(struct drm_i915_private *dev_priv);
#endif
diff --git a/drivers/gpu/drm/i915/i915_oa_bxt.c b/drivers/gpu/drm/i915/oa/i915_oa_bxt.c
index a44195c39923..a44195c39923 100644
--- a/drivers/gpu/drm/i915/i915_oa_bxt.c
+++ b/drivers/gpu/drm/i915/oa/i915_oa_bxt.c
diff --git a/drivers/gpu/drm/i915/i915_oa_bxt.h b/drivers/gpu/drm/i915/oa/i915_oa_bxt.h
index 679e92cf4f1d..43c3e4ab030a 100644
--- a/drivers/gpu/drm/i915/i915_oa_bxt.h
+++ b/drivers/gpu/drm/i915/oa/i915_oa_bxt.h
@@ -10,6 +10,6 @@
#ifndef __I915_OA_BXT_H__
#define __I915_OA_BXT_H__
-extern void i915_perf_load_test_config_bxt(struct drm_i915_private *dev_priv);
+void i915_perf_load_test_config_bxt(struct drm_i915_private *dev_priv);
#endif
diff --git a/drivers/gpu/drm/i915/i915_oa_cflgt2.c b/drivers/gpu/drm/i915/oa/i915_oa_cflgt2.c
index 7f60d51b8761..7f60d51b8761 100644
--- a/drivers/gpu/drm/i915/i915_oa_cflgt2.c
+++ b/drivers/gpu/drm/i915/oa/i915_oa_cflgt2.c
diff --git a/drivers/gpu/drm/i915/i915_oa_cflgt2.h b/drivers/gpu/drm/i915/oa/i915_oa_cflgt2.h
index 4d6025559bbe..1b4b563bc585 100644
--- a/drivers/gpu/drm/i915/i915_oa_cflgt2.h
+++ b/drivers/gpu/drm/i915/oa/i915_oa_cflgt2.h
@@ -10,6 +10,6 @@
#ifndef __I915_OA_CFLGT2_H__
#define __I915_OA_CFLGT2_H__
-extern void i915_perf_load_test_config_cflgt2(struct drm_i915_private *dev_priv);
+void i915_perf_load_test_config_cflgt2(struct drm_i915_private *dev_priv);
#endif
diff --git a/drivers/gpu/drm/i915/i915_oa_cflgt3.c b/drivers/gpu/drm/i915/oa/i915_oa_cflgt3.c
index a92c38e3a0ce..a92c38e3a0ce 100644
--- a/drivers/gpu/drm/i915/i915_oa_cflgt3.c
+++ b/drivers/gpu/drm/i915/oa/i915_oa_cflgt3.c
diff --git a/drivers/gpu/drm/i915/i915_oa_cflgt3.h b/drivers/gpu/drm/i915/oa/i915_oa_cflgt3.h
index 0697f4077402..500565e055cd 100644
--- a/drivers/gpu/drm/i915/i915_oa_cflgt3.h
+++ b/drivers/gpu/drm/i915/oa/i915_oa_cflgt3.h
@@ -10,6 +10,6 @@
#ifndef __I915_OA_CFLGT3_H__
#define __I915_OA_CFLGT3_H__
-extern void i915_perf_load_test_config_cflgt3(struct drm_i915_private *dev_priv);
+void i915_perf_load_test_config_cflgt3(struct drm_i915_private *dev_priv);
#endif
diff --git a/drivers/gpu/drm/i915/i915_oa_chv.c b/drivers/gpu/drm/i915/oa/i915_oa_chv.c
index 71ec889a0114..71ec889a0114 100644
--- a/drivers/gpu/drm/i915/i915_oa_chv.c
+++ b/drivers/gpu/drm/i915/oa/i915_oa_chv.c
diff --git a/drivers/gpu/drm/i915/i915_oa_chv.h b/drivers/gpu/drm/i915/oa/i915_oa_chv.h
index 0986eae3135f..ad85d6a6a573 100644
--- a/drivers/gpu/drm/i915/i915_oa_chv.h
+++ b/drivers/gpu/drm/i915/oa/i915_oa_chv.h
@@ -10,6 +10,6 @@
#ifndef __I915_OA_CHV_H__
#define __I915_OA_CHV_H__
-extern void i915_perf_load_test_config_chv(struct drm_i915_private *dev_priv);
+void i915_perf_load_test_config_chv(struct drm_i915_private *dev_priv);
#endif
diff --git a/drivers/gpu/drm/i915/i915_oa_cnl.c b/drivers/gpu/drm/i915/oa/i915_oa_cnl.c
index 5c23d883d6c9..5c23d883d6c9 100644
--- a/drivers/gpu/drm/i915/i915_oa_cnl.c
+++ b/drivers/gpu/drm/i915/oa/i915_oa_cnl.c
diff --git a/drivers/gpu/drm/i915/i915_oa_cnl.h b/drivers/gpu/drm/i915/oa/i915_oa_cnl.h
index e830a406aff2..9faaca38b587 100644
--- a/drivers/gpu/drm/i915/i915_oa_cnl.h
+++ b/drivers/gpu/drm/i915/oa/i915_oa_cnl.h
@@ -10,6 +10,6 @@
#ifndef __I915_OA_CNL_H__
#define __I915_OA_CNL_H__
-extern void i915_perf_load_test_config_cnl(struct drm_i915_private *dev_priv);
+void i915_perf_load_test_config_cnl(struct drm_i915_private *dev_priv);
#endif
diff --git a/drivers/gpu/drm/i915/i915_oa_glk.c b/drivers/gpu/drm/i915/oa/i915_oa_glk.c
index 4bdda66df7d2..4bdda66df7d2 100644
--- a/drivers/gpu/drm/i915/i915_oa_glk.c
+++ b/drivers/gpu/drm/i915/oa/i915_oa_glk.c
diff --git a/drivers/gpu/drm/i915/i915_oa_glk.h b/drivers/gpu/drm/i915/oa/i915_oa_glk.h
index 06dedf991edb..cc13a1e9fd3e 100644
--- a/drivers/gpu/drm/i915/i915_oa_glk.h
+++ b/drivers/gpu/drm/i915/oa/i915_oa_glk.h
@@ -10,6 +10,6 @@
#ifndef __I915_OA_GLK_H__
#define __I915_OA_GLK_H__
-extern void i915_perf_load_test_config_glk(struct drm_i915_private *dev_priv);
+void i915_perf_load_test_config_glk(struct drm_i915_private *dev_priv);
#endif
diff --git a/drivers/gpu/drm/i915/i915_oa_hsw.c b/drivers/gpu/drm/i915/oa/i915_oa_hsw.c
index cc6526fdd2bd..cc6526fdd2bd 100644
--- a/drivers/gpu/drm/i915/i915_oa_hsw.c
+++ b/drivers/gpu/drm/i915/oa/i915_oa_hsw.c
diff --git a/drivers/gpu/drm/i915/i915_oa_hsw.h b/drivers/gpu/drm/i915/oa/i915_oa_hsw.h
index 3d0c870cd0bd..f0ddcc79c761 100644
--- a/drivers/gpu/drm/i915/i915_oa_hsw.h
+++ b/drivers/gpu/drm/i915/oa/i915_oa_hsw.h
@@ -10,6 +10,6 @@
#ifndef __I915_OA_HSW_H__
#define __I915_OA_HSW_H__
-extern void i915_perf_load_test_config_hsw(struct drm_i915_private *dev_priv);
+void i915_perf_load_test_config_hsw(struct drm_i915_private *dev_priv);
#endif
diff --git a/drivers/gpu/drm/i915/i915_oa_icl.c b/drivers/gpu/drm/i915/oa/i915_oa_icl.c
index baa51427a543..baa51427a543 100644
--- a/drivers/gpu/drm/i915/i915_oa_icl.c
+++ b/drivers/gpu/drm/i915/oa/i915_oa_icl.c
diff --git a/drivers/gpu/drm/i915/i915_oa_icl.h b/drivers/gpu/drm/i915/oa/i915_oa_icl.h
index 24eaa97d61ba..e501651d385b 100644
--- a/drivers/gpu/drm/i915/i915_oa_icl.h
+++ b/drivers/gpu/drm/i915/oa/i915_oa_icl.h
@@ -10,6 +10,6 @@
#ifndef __I915_OA_ICL_H__
#define __I915_OA_ICL_H__
-extern void i915_perf_load_test_config_icl(struct drm_i915_private *dev_priv);
+void i915_perf_load_test_config_icl(struct drm_i915_private *dev_priv);
#endif
diff --git a/drivers/gpu/drm/i915/i915_oa_kblgt2.c b/drivers/gpu/drm/i915/oa/i915_oa_kblgt2.c
index 168e49ab0d4d..168e49ab0d4d 100644
--- a/drivers/gpu/drm/i915/i915_oa_kblgt2.c
+++ b/drivers/gpu/drm/i915/oa/i915_oa_kblgt2.c
diff --git a/drivers/gpu/drm/i915/i915_oa_kblgt2.h b/drivers/gpu/drm/i915/oa/i915_oa_kblgt2.h
index a55398a904de..dc460e6e0fae 100644
--- a/drivers/gpu/drm/i915/i915_oa_kblgt2.h
+++ b/drivers/gpu/drm/i915/oa/i915_oa_kblgt2.h
@@ -10,6 +10,6 @@
#ifndef __I915_OA_KBLGT2_H__
#define __I915_OA_KBLGT2_H__
-extern void i915_perf_load_test_config_kblgt2(struct drm_i915_private *dev_priv);
+void i915_perf_load_test_config_kblgt2(struct drm_i915_private *dev_priv);
#endif
diff --git a/drivers/gpu/drm/i915/i915_oa_kblgt3.c b/drivers/gpu/drm/i915/oa/i915_oa_kblgt3.c
index 6ffa553c388e..6ffa553c388e 100644
--- a/drivers/gpu/drm/i915/i915_oa_kblgt3.c
+++ b/drivers/gpu/drm/i915/oa/i915_oa_kblgt3.c
diff --git a/drivers/gpu/drm/i915/i915_oa_kblgt3.h b/drivers/gpu/drm/i915/oa/i915_oa_kblgt3.h
index 3ddd3483b7cc..5926992b735a 100644
--- a/drivers/gpu/drm/i915/i915_oa_kblgt3.h
+++ b/drivers/gpu/drm/i915/oa/i915_oa_kblgt3.h
@@ -10,6 +10,6 @@
#ifndef __I915_OA_KBLGT3_H__
#define __I915_OA_KBLGT3_H__
-extern void i915_perf_load_test_config_kblgt3(struct drm_i915_private *dev_priv);
+void i915_perf_load_test_config_kblgt3(struct drm_i915_private *dev_priv);
#endif
diff --git a/drivers/gpu/drm/i915/i915_oa_sklgt2.c b/drivers/gpu/drm/i915/oa/i915_oa_sklgt2.c
index 7ce6ee851d43..7ce6ee851d43 100644
--- a/drivers/gpu/drm/i915/i915_oa_sklgt2.c
+++ b/drivers/gpu/drm/i915/oa/i915_oa_sklgt2.c
diff --git a/drivers/gpu/drm/i915/i915_oa_sklgt2.h b/drivers/gpu/drm/i915/oa/i915_oa_sklgt2.h
index be6256037239..353db35b36c1 100644
--- a/drivers/gpu/drm/i915/i915_oa_sklgt2.h
+++ b/drivers/gpu/drm/i915/oa/i915_oa_sklgt2.h
@@ -10,6 +10,6 @@
#ifndef __I915_OA_SKLGT2_H__
#define __I915_OA_SKLGT2_H__
-extern void i915_perf_load_test_config_sklgt2(struct drm_i915_private *dev_priv);
+void i915_perf_load_test_config_sklgt2(struct drm_i915_private *dev_priv);
#endif
diff --git a/drivers/gpu/drm/i915/i915_oa_sklgt3.c b/drivers/gpu/drm/i915/oa/i915_oa_sklgt3.c
index 086ca2631e1c..086ca2631e1c 100644
--- a/drivers/gpu/drm/i915/i915_oa_sklgt3.c
+++ b/drivers/gpu/drm/i915/oa/i915_oa_sklgt3.c
diff --git a/drivers/gpu/drm/i915/i915_oa_sklgt3.h b/drivers/gpu/drm/i915/oa/i915_oa_sklgt3.h
index 650beb068e56..52f94c674b62 100644
--- a/drivers/gpu/drm/i915/i915_oa_sklgt3.h
+++ b/drivers/gpu/drm/i915/oa/i915_oa_sklgt3.h
@@ -10,6 +10,6 @@
#ifndef __I915_OA_SKLGT3_H__
#define __I915_OA_SKLGT3_H__
-extern void i915_perf_load_test_config_sklgt3(struct drm_i915_private *dev_priv);
+void i915_perf_load_test_config_sklgt3(struct drm_i915_private *dev_priv);
#endif
diff --git a/drivers/gpu/drm/i915/i915_oa_sklgt4.c b/drivers/gpu/drm/i915/oa/i915_oa_sklgt4.c
index b291a6eb8a87..b291a6eb8a87 100644
--- a/drivers/gpu/drm/i915/i915_oa_sklgt4.c
+++ b/drivers/gpu/drm/i915/oa/i915_oa_sklgt4.c
diff --git a/drivers/gpu/drm/i915/i915_oa_sklgt4.h b/drivers/gpu/drm/i915/oa/i915_oa_sklgt4.h
index 8dcf849d131e..8e364820cc63 100644
--- a/drivers/gpu/drm/i915/i915_oa_sklgt4.h
+++ b/drivers/gpu/drm/i915/oa/i915_oa_sklgt4.h
@@ -10,6 +10,6 @@
#ifndef __I915_OA_SKLGT4_H__
#define __I915_OA_SKLGT4_H__
-extern void i915_perf_load_test_config_sklgt4(struct drm_i915_private *dev_priv);
+void i915_perf_load_test_config_sklgt4(struct drm_i915_private *dev_priv);
#endif
diff --git a/drivers/gpu/drm/i915/selftests/i915_active.c b/drivers/gpu/drm/i915/selftests/i915_active.c
index c0b3537a5fa6..e5cd5d47e380 100644
--- a/drivers/gpu/drm/i915/selftests/i915_active.c
+++ b/drivers/gpu/drm/i915/selftests/i915_active.c
@@ -4,7 +4,10 @@
* Copyright © 2018 Intel Corporation
*/
+#include <linux/kref.h>
+
#include "gem/i915_gem_pm.h"
+#include "gt/intel_gt.h"
#include "i915_selftest.h"
@@ -13,37 +16,86 @@
struct live_active {
struct i915_active base;
+ struct kref ref;
bool retired;
};
-static void __live_active_retire(struct i915_active *base)
+static void __live_get(struct live_active *active)
+{
+ kref_get(&active->ref);
+}
+
+static void __live_free(struct live_active *active)
+{
+ i915_active_fini(&active->base);
+ kfree(active);
+}
+
+static void __live_release(struct kref *ref)
+{
+ struct live_active *active = container_of(ref, typeof(*active), ref);
+
+ __live_free(active);
+}
+
+static void __live_put(struct live_active *active)
+{
+ kref_put(&active->ref, __live_release);
+}
+
+static int __live_active(struct i915_active *base)
+{
+ struct live_active *active = container_of(base, typeof(*active), base);
+
+ __live_get(active);
+ return 0;
+}
+
+static void __live_retire(struct i915_active *base)
{
struct live_active *active = container_of(base, typeof(*active), base);
active->retired = true;
+ __live_put(active);
+}
+
+static struct live_active *__live_alloc(struct drm_i915_private *i915)
+{
+ struct live_active *active;
+
+ active = kzalloc(sizeof(*active), GFP_KERNEL);
+ if (!active)
+ return NULL;
+
+ kref_init(&active->ref);
+ i915_active_init(i915, &active->base, __live_active, __live_retire);
+
+ return active;
}
-static int __live_active_setup(struct drm_i915_private *i915,
- struct live_active *active)
+static struct live_active *
+__live_active_setup(struct drm_i915_private *i915)
{
struct intel_engine_cs *engine;
struct i915_sw_fence *submit;
+ struct live_active *active;
enum intel_engine_id id;
unsigned int count = 0;
int err = 0;
- submit = heap_fence_create(GFP_KERNEL);
- if (!submit)
- return -ENOMEM;
+ active = __live_alloc(i915);
+ if (!active)
+ return ERR_PTR(-ENOMEM);
- i915_active_init(i915, &active->base, __live_active_retire);
- active->retired = false;
+ submit = heap_fence_create(GFP_KERNEL);
+ if (!submit) {
+ kfree(active);
+ return ERR_PTR(-ENOMEM);
+ }
- if (!i915_active_acquire(&active->base)) {
- pr_err("First i915_active_acquire should report being idle\n");
- err = -EINVAL;
+ err = i915_active_acquire(&active->base);
+ if (err)
goto out;
- }
for_each_engine(engine, i915, id) {
struct i915_request *rq;
@@ -74,74 +126,92 @@ static int __live_active_setup(struct drm_i915_private *i915,
pr_err("i915_active retired before submission!\n");
err = -EINVAL;
}
- if (active->base.count != count) {
+ if (atomic_read(&active->base.count) != count) {
pr_err("i915_active not tracking all requests, found %d, expected %d\n",
- active->base.count, count);
+ atomic_read(&active->base.count), count);
err = -EINVAL;
}
out:
i915_sw_fence_commit(submit);
heap_fence_put(submit);
+ if (err) {
+ __live_put(active);
+ active = ERR_PTR(err);
+ }
- return err;
+ return active;
}
static int live_active_wait(void *arg)
{
struct drm_i915_private *i915 = arg;
- struct live_active active;
+ struct live_active *active;
intel_wakeref_t wakeref;
- int err;
+ int err = 0;
/* Check that we get a callback when requests retire upon waiting */
mutex_lock(&i915->drm.struct_mutex);
wakeref = intel_runtime_pm_get(&i915->runtime_pm);
- err = __live_active_setup(i915, &active);
+ active = __live_active_setup(i915);
+ if (IS_ERR(active)) {
+ err = PTR_ERR(active);
+ goto err;
+ }
- i915_active_wait(&active.base);
- if (!active.retired) {
+ i915_active_wait(&active->base);
+ if (!active->retired) {
pr_err("i915_active not retired after waiting!\n");
err = -EINVAL;
}
- i915_active_fini(&active.base);
+ __live_put(active);
+
if (igt_flush_test(i915, I915_WAIT_LOCKED))
err = -EIO;
+err:
intel_runtime_pm_put(&i915->runtime_pm, wakeref);
mutex_unlock(&i915->drm.struct_mutex);
+
return err;
}
static int live_active_retire(void *arg)
{
struct drm_i915_private *i915 = arg;
- struct live_active active;
+ struct live_active *active;
intel_wakeref_t wakeref;
- int err;
+ int err = 0;
/* Check that we get a callback when requests are indirectly retired */
mutex_lock(&i915->drm.struct_mutex);
wakeref = intel_runtime_pm_get(&i915->runtime_pm);
- err = __live_active_setup(i915, &active);
+ active = __live_active_setup(i915);
+ if (IS_ERR(active)) {
+ err = PTR_ERR(active);
+ goto err;
+ }
/* waits for & retires all requests */
if (igt_flush_test(i915, I915_WAIT_LOCKED))
err = -EIO;
- if (!active.retired) {
+ if (!active->retired) {
pr_err("i915_active not retired after flushing!\n");
err = -EINVAL;
}
- i915_active_fini(&active.base);
+ __live_put(active);
+
+err:
intel_runtime_pm_put(&i915->runtime_pm, wakeref);
mutex_unlock(&i915->drm.struct_mutex);
+
return err;
}
@@ -152,7 +222,7 @@ int i915_active_live_selftests(struct drm_i915_private *i915)
SUBTEST(live_active_retire),
};
- if (i915_terminally_wedged(i915))
+ if (intel_gt_is_wedged(&i915->gt))
return 0;
return i915_subtests(tests, i915);
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem.c b/drivers/gpu/drm/i915/selftests/i915_gem.c
index c6a01a6e87f1..bb6dd54a6ff3 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem.c
@@ -8,6 +8,7 @@
#include "gem/selftests/igt_gem_utils.h"
#include "gem/selftests/mock_context.h"
+#include "gt/intel_gt.h"
#include "i915_selftest.h"
@@ -115,7 +116,7 @@ static void pm_resume(struct drm_i915_private *i915)
* that runtime-pm just works.
*/
with_intel_runtime_pm(&i915->runtime_pm, wakeref) {
- intel_gt_sanitize(i915, false);
+ intel_gt_sanitize(&i915->gt, false);
i915_gem_sanitize(i915);
i915_gem_resume(i915);
}
@@ -154,8 +155,6 @@ static int igt_gem_suspend(void *arg)
mutex_lock(&i915->drm.struct_mutex);
err = switch_to_context(i915, ctx);
- if (igt_flush_test(i915, I915_WAIT_LOCKED))
- err = -EIO;
mutex_unlock(&i915->drm.struct_mutex);
out:
mock_file_free(i915, file);
@@ -195,8 +194,6 @@ static int igt_gem_hibernate(void *arg)
mutex_lock(&i915->drm.struct_mutex);
err = switch_to_context(i915, ctx);
- if (igt_flush_test(i915, I915_WAIT_LOCKED))
- err = -EIO;
mutex_unlock(&i915->drm.struct_mutex);
out:
mock_file_free(i915, file);
@@ -210,8 +207,8 @@ int i915_gem_live_selftests(struct drm_i915_private *i915)
SUBTEST(igt_gem_hibernate),
};
- if (i915_terminally_wedged(i915))
+ if (intel_gt_is_wedged(&i915->gt))
return 0;
- return i915_subtests(tests, i915);
+ return i915_live_subtests(tests, i915);
}
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_evict.c b/drivers/gpu/drm/i915/selftests/i915_gem_evict.c
index a3cb0aade6f1..b6449d0a8c17 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_evict.c
@@ -25,6 +25,7 @@
#include "gem/i915_gem_pm.h"
#include "gem/selftests/igt_gem_utils.h"
#include "gem/selftests/mock_context.h"
+#include "gt/intel_gt.h"
#include "i915_selftest.h"
@@ -557,7 +558,7 @@ int i915_gem_evict_live_selftests(struct drm_i915_private *i915)
SUBTEST(igt_evict_contexts),
};
- if (i915_terminally_wedged(i915))
+ if (intel_gt_is_wedged(&i915->gt))
return 0;
return i915_subtests(tests, i915);
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
index 1a60b9fe8221..31a51ca1ddcb 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
@@ -208,9 +208,7 @@ static int igt_ppgtt_alloc(void *arg)
}
err_ppgtt_cleanup:
- mutex_lock(&dev_priv->drm.struct_mutex);
i915_vm_put(&ppgtt->vm);
- mutex_unlock(&dev_priv->drm.struct_mutex);
return err;
}
@@ -1195,7 +1193,7 @@ static int igt_ggtt_page(void *arg)
iowrite32(n, vaddr + n);
io_mapping_unmap_atomic(vaddr);
}
- i915_gem_flush_ggtt_writes(i915);
+ intel_gt_flush_ggtt_writes(ggtt->vm.gt);
i915_random_reorder(order, count, &prng);
for (n = 0; n < count; n++) {
diff --git a/drivers/gpu/drm/i915/selftests/i915_live_selftests.h b/drivers/gpu/drm/i915/selftests/i915_live_selftests.h
index d5dc4427d664..2b31a4ee0b4c 100644
--- a/drivers/gpu/drm/i915/selftests/i915_live_selftests.h
+++ b/drivers/gpu/drm/i915/selftests/i915_live_selftests.h
@@ -12,7 +12,7 @@
selftest(sanitycheck, i915_live_sanitycheck) /* keep first (igt selfcheck) */
selftest(uncore, intel_uncore_live_selftests)
selftest(workarounds, intel_workarounds_live_selftests)
-selftest(timelines, i915_timeline_live_selftests)
+selftest(timelines, intel_timeline_live_selftests)
selftest(requests, i915_request_live_selftests)
selftest(active, i915_active_live_selftests)
selftest(objects, i915_gem_object_live_selftests)
diff --git a/drivers/gpu/drm/i915/selftests/i915_mock_selftests.h b/drivers/gpu/drm/i915/selftests/i915_mock_selftests.h
index 510eb176bb2c..b55da4d9ccba 100644
--- a/drivers/gpu/drm/i915/selftests/i915_mock_selftests.h
+++ b/drivers/gpu/drm/i915/selftests/i915_mock_selftests.h
@@ -15,7 +15,7 @@ selftest(scatterlist, scatterlist_mock_selftests)
selftest(syncmap, i915_syncmap_mock_selftests)
selftest(uncore, intel_uncore_mock_selftests)
selftest(engine, intel_engine_cs_mock_selftests)
-selftest(timelines, i915_timeline_mock_selftests)
+selftest(timelines, intel_timeline_mock_selftests)
selftest(requests, i915_request_mock_selftests)
selftest(objects, i915_gem_object_mock_selftests)
selftest(phys, i915_gem_phys_mock_selftests)
diff --git a/drivers/gpu/drm/i915/selftests/i915_request.c b/drivers/gpu/drm/i915/selftests/i915_request.c
index 298bb7116c51..86c299663934 100644
--- a/drivers/gpu/drm/i915/selftests/i915_request.c
+++ b/drivers/gpu/drm/i915/selftests/i915_request.c
@@ -27,6 +27,8 @@
#include "gem/i915_gem_pm.h"
#include "gem/selftests/mock_context.h"
+#include "gt/intel_gt.h"
+
#include "i915_random.h"
#include "i915_selftest.h"
#include "igt_live_test.h"
@@ -73,55 +75,58 @@ static int igt_wait_request(void *arg)
err = -ENOMEM;
goto out_unlock;
}
+ i915_request_get(request);
if (i915_request_wait(request, 0, 0) != -ETIME) {
pr_err("request wait (busy query) succeeded (expected timeout before submit!)\n");
- goto out_unlock;
+ goto out_request;
}
if (i915_request_wait(request, 0, T) != -ETIME) {
pr_err("request wait succeeded (expected timeout before submit!)\n");
- goto out_unlock;
+ goto out_request;
}
if (i915_request_completed(request)) {
pr_err("request completed before submit!!\n");
- goto out_unlock;
+ goto out_request;
}
i915_request_add(request);
if (i915_request_wait(request, 0, 0) != -ETIME) {
pr_err("request wait (busy query) succeeded (expected timeout after submit!)\n");
- goto out_unlock;
+ goto out_request;
}
if (i915_request_completed(request)) {
pr_err("request completed immediately!\n");
- goto out_unlock;
+ goto out_request;
}
if (i915_request_wait(request, 0, T / 2) != -ETIME) {
pr_err("request wait succeeded (expected timeout!)\n");
- goto out_unlock;
+ goto out_request;
}
if (i915_request_wait(request, 0, T) == -ETIME) {
pr_err("request wait timed out!\n");
- goto out_unlock;
+ goto out_request;
}
if (!i915_request_completed(request)) {
pr_err("request not complete after waiting!\n");
- goto out_unlock;
+ goto out_request;
}
if (i915_request_wait(request, 0, T) == -ETIME) {
pr_err("request wait timed out when already complete!\n");
- goto out_unlock;
+ goto out_request;
}
err = 0;
+out_request:
+ i915_request_put(request);
out_unlock:
mock_device_flush(i915);
mutex_unlock(&i915->drm.struct_mutex);
@@ -366,14 +371,16 @@ static int __igt_breadcrumbs_smoketest(void *arg)
if (!wait_event_timeout(wait->wait,
i915_sw_fence_done(wait),
- HZ / 2)) {
+ 5 * HZ)) {
struct i915_request *rq = requests[count - 1];
- pr_err("waiting for %d fences (last %llx:%lld) on %s timed out!\n",
- count,
+ pr_err("waiting for %d/%d fences (last %llx:%lld) on %s timed out!\n",
+ atomic_read(&wait->pending), count,
rq->fence.context, rq->fence.seqno,
t->engine->name);
- i915_gem_set_wedged(t->engine->i915);
+ GEM_TRACE_DUMP();
+
+ intel_gt_set_wedged(t->engine->gt);
GEM_BUG_ON(!i915_request_completed(rq));
i915_sw_fence_wait(wait);
err = -EIO;
@@ -622,7 +629,7 @@ static struct i915_vma *empty_batch(struct drm_i915_private *i915)
__i915_gem_object_flush_map(obj, 0, 64);
i915_gem_object_unpin_map(obj);
- i915_gem_chipset_flush(i915);
+ intel_gt_chipset_flush(&i915->gt);
vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
if (IS_ERR(vma)) {
@@ -791,7 +798,7 @@ static struct i915_vma *recursive_batch(struct drm_i915_private *i915)
__i915_gem_object_flush_map(obj, 0, 64);
i915_gem_object_unpin_map(obj);
- i915_gem_chipset_flush(i915);
+ intel_gt_chipset_flush(&i915->gt);
return vma;
@@ -809,7 +816,7 @@ static int recursive_batch_resolve(struct i915_vma *batch)
return PTR_ERR(cmd);
*cmd = MI_BATCH_BUFFER_END;
- i915_gem_chipset_flush(batch->vm->i915);
+ intel_gt_chipset_flush(batch->vm->gt);
i915_gem_object_unpin_map(batch->obj);
@@ -1031,7 +1038,7 @@ out_request:
I915_MAP_WC);
if (!IS_ERR(cmd)) {
*cmd = MI_BATCH_BUFFER_END;
- i915_gem_chipset_flush(i915);
+ intel_gt_chipset_flush(engine->gt);
i915_gem_object_unpin_map(request[id]->batch->obj);
}
@@ -1227,7 +1234,7 @@ int i915_request_live_selftests(struct drm_i915_private *i915)
SUBTEST(live_breadcrumbs_smoketest),
};
- if (i915_terminally_wedged(i915))
+ if (intel_gt_is_wedged(&i915->gt))
return 0;
return i915_subtests(tests, i915);
diff --git a/drivers/gpu/drm/i915/selftests/i915_selftest.c b/drivers/gpu/drm/i915/selftests/i915_selftest.c
index b18eaefef798..db9c645bbdfe 100644
--- a/drivers/gpu/drm/i915/selftests/i915_selftest.c
+++ b/drivers/gpu/drm/i915/selftests/i915_selftest.c
@@ -26,6 +26,8 @@
#include "../i915_drv.h"
#include "../i915_selftest.h"
+#include "igt_flush_test.h"
+
struct i915_selftest i915_selftest __read_mostly = {
.timeout_ms = 1000,
};
@@ -240,7 +242,61 @@ static bool apply_subtest_filter(const char *caller, const char *name)
return result;
}
+int __i915_nop_setup(void *data)
+{
+ return 0;
+}
+
+int __i915_nop_teardown(int err, void *data)
+{
+ return err;
+}
+
+int __i915_live_setup(void *data)
+{
+ struct drm_i915_private *i915 = data;
+
+ return intel_gt_terminally_wedged(&i915->gt);
+}
+
+int __i915_live_teardown(int err, void *data)
+{
+ struct drm_i915_private *i915 = data;
+
+ mutex_lock(&i915->drm.struct_mutex);
+ if (igt_flush_test(i915, I915_WAIT_LOCKED))
+ err = -EIO;
+ mutex_unlock(&i915->drm.struct_mutex);
+
+ i915_gem_drain_freed_objects(i915);
+
+ return err;
+}
+
+int __intel_gt_live_setup(void *data)
+{
+ struct intel_gt *gt = data;
+
+ return intel_gt_terminally_wedged(gt);
+}
+
+int __intel_gt_live_teardown(int err, void *data)
+{
+ struct intel_gt *gt = data;
+
+ mutex_lock(&gt->i915->drm.struct_mutex);
+ if (igt_flush_test(gt->i915, I915_WAIT_LOCKED))
+ err = -EIO;
+ mutex_unlock(&gt->i915->drm.struct_mutex);
+
+ i915_gem_drain_freed_objects(gt->i915);
+
+ return err;
+}
+
int __i915_subtests(const char *caller,
+ int (*setup)(void *data),
+ int (*teardown)(int err, void *data),
const struct i915_subtest *st,
unsigned int count,
void *data)
@@ -255,10 +311,17 @@ int __i915_subtests(const char *caller,
if (!apply_subtest_filter(caller, st->name))
continue;
+ err = setup(data);
+ if (err) {
+ pr_err(DRIVER_NAME "/%s: setup failed for %s\n",
+ caller, st->name);
+ return err;
+ }
+
pr_info(DRIVER_NAME ": Running %s/%s\n", caller, st->name);
GEM_TRACE("Running %s/%s\n", caller, st->name);
- err = st->func(data);
+ err = teardown(st->func(data), data);
if (err && err != -EINTR) {
pr_err(DRIVER_NAME "/%s: %s failed with error %d\n",
caller, st->name, err);
diff --git a/drivers/gpu/drm/i915/selftests/i915_vma.c b/drivers/gpu/drm/i915/selftests/i915_vma.c
index fbc79b14823a..a5bec0a4cdcc 100644
--- a/drivers/gpu/drm/i915/selftests/i915_vma.c
+++ b/drivers/gpu/drm/i915/selftests/i915_vma.c
@@ -193,6 +193,8 @@ static int igt_vma_create(void *arg)
list_del_init(&ctx->link);
mock_context_close(ctx);
}
+
+ cond_resched();
}
end:
@@ -341,6 +343,8 @@ static int igt_vma_pin1(void *arg)
goto out;
}
}
+
+ cond_resched();
}
err = 0;
@@ -597,6 +601,8 @@ static int igt_vma_rotate_remap(void *arg)
}
i915_vma_unpin(vma);
+
+ cond_resched();
}
}
}
@@ -752,6 +758,8 @@ static int igt_vma_partial(void *arg)
i915_vma_unpin(vma);
nvma++;
+
+ cond_resched();
}
}
@@ -961,6 +969,8 @@ static int igt_vma_remapped_gtt(void *arg)
}
}
i915_vma_unpin_iomap(vma);
+
+ cond_resched();
}
}
diff --git a/drivers/gpu/drm/i915/selftests/igt_flush_test.c b/drivers/gpu/drm/i915/selftests/igt_flush_test.c
index 5bfd1b2626a2..d3b5eb402d33 100644
--- a/drivers/gpu/drm/i915/selftests/igt_flush_test.c
+++ b/drivers/gpu/drm/i915/selftests/igt_flush_test.c
@@ -5,6 +5,7 @@
*/
#include "gem/i915_gem_context.h"
+#include "gt/intel_gt.h"
#include "i915_drv.h"
#include "i915_selftest.h"
@@ -13,7 +14,7 @@
int igt_flush_test(struct drm_i915_private *i915, unsigned int flags)
{
- int ret = i915_terminally_wedged(i915) ? -EIO : 0;
+ int ret = intel_gt_is_wedged(&i915->gt) ? -EIO : 0;
int repeat = !!(flags & I915_WAIT_LOCKED);
cond_resched();
@@ -27,7 +28,7 @@ int igt_flush_test(struct drm_i915_private *i915, unsigned int flags)
__builtin_return_address(0));
GEM_TRACE_DUMP();
- i915_gem_set_wedged(i915);
+ intel_gt_set_wedged(&i915->gt);
repeat = 0;
ret = -EIO;
}
diff --git a/drivers/gpu/drm/i915/selftests/igt_reset.c b/drivers/gpu/drm/i915/selftests/igt_reset.c
index 587df6fd4ffe..7ec8f8b049c6 100644
--- a/drivers/gpu/drm/i915/selftests/igt_reset.c
+++ b/drivers/gpu/drm/i915/selftests/igt_reset.c
@@ -7,47 +7,45 @@
#include "igt_reset.h"
#include "gt/intel_engine.h"
+#include "gt/intel_gt.h"
#include "../i915_drv.h"
-void igt_global_reset_lock(struct drm_i915_private *i915)
+void igt_global_reset_lock(struct intel_gt *gt)
{
struct intel_engine_cs *engine;
enum intel_engine_id id;
- pr_debug("%s: current gpu_error=%08lx\n",
- __func__, i915->gpu_error.flags);
+ pr_debug("%s: current gpu_error=%08lx\n", __func__, gt->reset.flags);
- while (test_and_set_bit(I915_RESET_BACKOFF, &i915->gpu_error.flags))
- wait_event(i915->gpu_error.reset_queue,
- !test_bit(I915_RESET_BACKOFF,
- &i915->gpu_error.flags));
+ while (test_and_set_bit(I915_RESET_BACKOFF, &gt->reset.flags))
+ wait_event(gt->reset.queue,
+ !test_bit(I915_RESET_BACKOFF, &gt->reset.flags));
- for_each_engine(engine, i915, id) {
+ for_each_engine(engine, gt->i915, id) {
while (test_and_set_bit(I915_RESET_ENGINE + id,
- &i915->gpu_error.flags))
- wait_on_bit(&i915->gpu_error.flags,
- I915_RESET_ENGINE + id,
+ &gt->reset.flags))
+ wait_on_bit(&gt->reset.flags, I915_RESET_ENGINE + id,
TASK_UNINTERRUPTIBLE);
}
}
-void igt_global_reset_unlock(struct drm_i915_private *i915)
+void igt_global_reset_unlock(struct intel_gt *gt)
{
struct intel_engine_cs *engine;
enum intel_engine_id id;
- for_each_engine(engine, i915, id)
- clear_bit(I915_RESET_ENGINE + id, &i915->gpu_error.flags);
+ for_each_engine(engine, gt->i915, id)
+ clear_bit(I915_RESET_ENGINE + id, &gt->reset.flags);
- clear_bit(I915_RESET_BACKOFF, &i915->gpu_error.flags);
- wake_up_all(&i915->gpu_error.reset_queue);
+ clear_bit(I915_RESET_BACKOFF, &gt->reset.flags);
+ wake_up_all(&gt->reset.queue);
}
-bool igt_force_reset(struct drm_i915_private *i915)
+bool igt_force_reset(struct intel_gt *gt)
{
- i915_gem_set_wedged(i915);
- i915_reset(i915, 0, NULL);
+ intel_gt_set_wedged(gt);
+ intel_gt_reset(gt, 0, NULL);
- return !i915_reset_failed(i915);
+ return !intel_gt_is_wedged(gt);
}
diff --git a/drivers/gpu/drm/i915/selftests/igt_reset.h b/drivers/gpu/drm/i915/selftests/igt_reset.h
index 363bd853e50f..851873b67ab3 100644
--- a/drivers/gpu/drm/i915/selftests/igt_reset.h
+++ b/drivers/gpu/drm/i915/selftests/igt_reset.h
@@ -7,10 +7,12 @@
#ifndef __I915_SELFTESTS_IGT_RESET_H__
#define __I915_SELFTESTS_IGT_RESET_H__
-#include "../i915_drv.h"
+#include <linux/types.h>
-void igt_global_reset_lock(struct drm_i915_private *i915);
-void igt_global_reset_unlock(struct drm_i915_private *i915);
-bool igt_force_reset(struct drm_i915_private *i915);
+struct intel_gt;
+
+void igt_global_reset_lock(struct intel_gt *gt);
+void igt_global_reset_unlock(struct intel_gt *gt);
+bool igt_force_reset(struct intel_gt *gt);
#endif
diff --git a/drivers/gpu/drm/i915/selftests/igt_spinner.c b/drivers/gpu/drm/i915/selftests/igt_spinner.c
index 1e59b543cf27..89b6552a6497 100644
--- a/drivers/gpu/drm/i915/selftests/igt_spinner.c
+++ b/drivers/gpu/drm/i915/selftests/igt_spinner.c
@@ -3,6 +3,7 @@
*
* Copyright © 2018 Intel Corporation
*/
+#include "gt/intel_gt.h"
#include "gem/selftests/igt_gem_utils.h"
@@ -18,6 +19,7 @@ int igt_spinner_init(struct igt_spinner *spin, struct drm_i915_private *i915)
memset(spin, 0, sizeof(*spin));
spin->i915 = i915;
+ spin->gt = &i915->gt;
spin->hws = i915_gem_object_create_internal(i915, PAGE_SIZE);
if (IS_ERR(spin->hws)) {
@@ -94,6 +96,8 @@ igt_spinner_create_request(struct igt_spinner *spin,
u32 *batch;
int err;
+ spin->gt = engine->gt;
+
vma = i915_vma_instance(spin->obj, ctx->vm, NULL);
if (IS_ERR(vma))
return ERR_CAST(vma);
@@ -138,7 +142,7 @@ igt_spinner_create_request(struct igt_spinner *spin,
*batch++ = upper_32_bits(vma->node.start);
*batch++ = MI_BATCH_BUFFER_END; /* not reached */
- i915_gem_chipset_flush(spin->i915);
+ intel_gt_chipset_flush(engine->gt);
if (engine->emit_init_breadcrumb &&
rq->timeline->has_initial_breadcrumb) {
@@ -172,7 +176,7 @@ hws_seqno(const struct igt_spinner *spin, const struct i915_request *rq)
void igt_spinner_end(struct igt_spinner *spin)
{
*spin->batch = MI_BATCH_BUFFER_END;
- i915_gem_chipset_flush(spin->i915);
+ intel_gt_chipset_flush(spin->gt);
}
void igt_spinner_fini(struct igt_spinner *spin)
diff --git a/drivers/gpu/drm/i915/selftests/igt_spinner.h b/drivers/gpu/drm/i915/selftests/igt_spinner.h
index 34a88ac9b47a..1bfc39efa773 100644
--- a/drivers/gpu/drm/i915/selftests/igt_spinner.h
+++ b/drivers/gpu/drm/i915/selftests/igt_spinner.h
@@ -14,8 +14,11 @@
#include "i915_request.h"
#include "i915_selftest.h"
+struct intel_gt;
+
struct igt_spinner {
struct drm_i915_private *i915;
+ struct intel_gt *gt;
struct drm_i915_gem_object *hws;
struct drm_i915_gem_object *obj;
u32 *batch;
diff --git a/drivers/gpu/drm/i915/selftests/igt_wedge_me.h b/drivers/gpu/drm/i915/selftests/igt_wedge_me.h
deleted file mode 100644
index 08e5ff11bbd9..000000000000
--- a/drivers/gpu/drm/i915/selftests/igt_wedge_me.h
+++ /dev/null
@@ -1,58 +0,0 @@
-/*
- * SPDX-License-Identifier: MIT
- *
- * Copyright © 2018 Intel Corporation
- */
-
-#ifndef IGT_WEDGE_ME_H
-#define IGT_WEDGE_ME_H
-
-#include <linux/workqueue.h>
-
-#include "../i915_gem.h"
-
-struct drm_i915_private;
-
-struct igt_wedge_me {
- struct delayed_work work;
- struct drm_i915_private *i915;
- const char *name;
-};
-
-static void __igt_wedge_me(struct work_struct *work)
-{
- struct igt_wedge_me *w = container_of(work, typeof(*w), work.work);
-
- pr_err("%s timed out, cancelling test.\n", w->name);
-
- GEM_TRACE("%s timed out.\n", w->name);
- GEM_TRACE_DUMP();
-
- i915_gem_set_wedged(w->i915);
-}
-
-static void __igt_init_wedge(struct igt_wedge_me *w,
- struct drm_i915_private *i915,
- long timeout,
- const char *name)
-{
- w->i915 = i915;
- w->name = name;
-
- INIT_DELAYED_WORK_ONSTACK(&w->work, __igt_wedge_me);
- schedule_delayed_work(&w->work, timeout);
-}
-
-static void __igt_fini_wedge(struct igt_wedge_me *w)
-{
- cancel_delayed_work_sync(&w->work);
- destroy_delayed_work_on_stack(&w->work);
- w->i915 = NULL;
-}
-
-#define igt_wedge_on_timeout(W, DEV, TIMEOUT) \
- for (__igt_init_wedge((W), (DEV), (TIMEOUT), __func__); \
- (W)->i915; \
- __igt_fini_wedge((W)))
-
-#endif /* IGT_WEDGE_ME_H */
diff --git a/drivers/gpu/drm/i915/selftests/mock_gem_device.c b/drivers/gpu/drm/i915/selftests/mock_gem_device.c
index 64bc51400ae7..fd4cc4809eb8 100644
--- a/drivers/gpu/drm/i915/selftests/mock_gem_device.c
+++ b/drivers/gpu/drm/i915/selftests/mock_gem_device.c
@@ -25,6 +25,7 @@
#include <linux/pm_domain.h>
#include <linux/pm_runtime.h>
+#include "gt/intel_gt.h"
#include "gt/mock_engine.h"
#include "mock_request.h"
@@ -67,7 +68,7 @@ static void mock_device_release(struct drm_device *dev)
i915_gem_contexts_fini(i915);
mutex_unlock(&i915->drm.struct_mutex);
- i915_timelines_fini(i915);
+ intel_timelines_fini(i915);
drain_workqueue(i915->wq);
i915_gem_drain_freed_objects(i915);
@@ -179,14 +180,9 @@ struct drm_i915_private *mock_gem_device(void)
mock_uncore_init(&i915->uncore);
i915_gem_init__mm(i915);
- intel_gt_pm_init(i915);
+ intel_gt_init_early(&i915->gt, i915);
atomic_inc(&i915->gt.wakeref.count); /* disable; no hw support */
- init_waitqueue_head(&i915->gpu_error.wait_queue);
- init_waitqueue_head(&i915->gpu_error.reset_queue);
- init_srcu_struct(&i915->gpu_error.reset_backoff_srcu);
- mutex_init(&i915->gpu_error.wedge_mutex);
-
i915->wq = alloc_ordered_workqueue("mock", 0);
if (!i915->wq)
goto err_drv;
@@ -198,11 +194,7 @@ struct drm_i915_private *mock_gem_device(void)
i915->gt.awake = true;
- i915_timelines_init(i915);
-
- INIT_LIST_HEAD(&i915->gt.active_rings);
- INIT_LIST_HEAD(&i915->gt.closed_vma);
- spin_lock_init(&i915->gt.closed_lock);
+ intel_timelines_init(i915);
mutex_lock(&i915->drm.struct_mutex);
@@ -233,7 +225,7 @@ err_engine:
mock_engine_free(i915->engine[RCS0]);
err_unlock:
mutex_unlock(&i915->drm.struct_mutex);
- i915_timelines_fini(i915);
+ intel_timelines_fini(i915);
destroy_workqueue(i915->wq);
err_drv:
drm_mode_config_cleanup(&i915->drm);
diff --git a/drivers/gpu/drm/i915/selftests/mock_gtt.c b/drivers/gpu/drm/i915/selftests/mock_gtt.c
index f625c307a406..e62a67e0f79c 100644
--- a/drivers/gpu/drm/i915/selftests/mock_gtt.c
+++ b/drivers/gpu/drm/i915/selftests/mock_gtt.c
@@ -98,6 +98,7 @@ void mock_init_ggtt(struct drm_i915_private *i915, struct i915_ggtt *ggtt)
{
memset(ggtt, 0, sizeof(*ggtt));
+ ggtt->vm.gt = &i915->gt;
ggtt->vm.i915 = i915;
ggtt->vm.is_ggtt = true;
@@ -116,6 +117,8 @@ void mock_init_ggtt(struct drm_i915_private *i915, struct i915_ggtt *ggtt)
ggtt->vm.vma_ops.clear_pages = clear_pages;
i915_address_space_init(&ggtt->vm, VM_CLASS_GGTT);
+
+ intel_gt_init_hw(i915);
}
void mock_fini_ggtt(struct i915_ggtt *ggtt)
diff --git a/drivers/gpu/drm/i915/selftests/mock_uncore.c b/drivers/gpu/drm/i915/selftests/mock_uncore.c
index ff8999c63a12..49585f16d4a2 100644
--- a/drivers/gpu/drm/i915/selftests/mock_uncore.c
+++ b/drivers/gpu/drm/i915/selftests/mock_uncore.c
@@ -41,6 +41,6 @@ __nop_read(64)
void mock_uncore_init(struct intel_uncore *uncore)
{
- ASSIGN_WRITE_MMIO_VFUNCS(uncore, nop);
- ASSIGN_READ_MMIO_VFUNCS(uncore, nop);
+ ASSIGN_RAW_WRITE_MMIO_VFUNCS(uncore, nop);
+ ASSIGN_RAW_READ_MMIO_VFUNCS(uncore, nop);
}
diff --git a/drivers/gpu/drm/imx/dw_hdmi-imx.c b/drivers/gpu/drm/imx/dw_hdmi-imx.c
index 06393cd1067d..5a3ad6fc8ea7 100644
--- a/drivers/gpu/drm/imx/dw_hdmi-imx.c
+++ b/drivers/gpu/drm/imx/dw_hdmi-imx.c
@@ -3,19 +3,21 @@
*
* derived from imx-hdmi.c(renamed to bridge/dw_hdmi.c now)
*/
-#include <linux/module.h>
-#include <linux/platform_device.h>
+
#include <linux/component.h>
#include <linux/mfd/syscon.h>
#include <linux/mfd/syscon/imx6q-iomuxc-gpr.h>
-#include <drm/bridge/dw_hdmi.h>
-#include <video/imx-ipu-v3.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
#include <linux/regmap.h>
-#include <drm/drm_of.h>
-#include <drm/drmP.h>
+
+#include <video/imx-ipu-v3.h>
+
+#include <drm/bridge/dw_hdmi.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_edid.h>
#include <drm/drm_encoder_slave.h>
+#include <drm/drm_of.h>
#include "imx-drm.h"
diff --git a/drivers/gpu/drm/imx/imx-drm-core.c b/drivers/gpu/drm/imx/imx-drm-core.c
index 3e8bece620df..da87c70e413b 100644
--- a/drivers/gpu/drm/imx/imx-drm-core.c
+++ b/drivers/gpu/drm/imx/imx-drm-core.c
@@ -4,14 +4,18 @@
*
* Copyright (C) 2011 Sascha Hauer, Pengutronix
*/
+
#include <linux/component.h>
#include <linux/device.h>
#include <linux/dma-buf.h>
#include <linux/module.h>
#include <linux/platform_device.h>
-#include <drm/drmP.h>
+
+#include <video/imx-ipu-v3.h>
+
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_drv.h>
#include <drm/drm_fb_cma_helper.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_gem_cma_helper.h>
@@ -19,7 +23,7 @@
#include <drm/drm_of.h>
#include <drm/drm_plane_helper.h>
#include <drm/drm_probe_helper.h>
-#include <video/imx-ipu-v3.h>
+#include <drm/drm_vblank.h>
#include "imx-drm.h"
#include "ipuv3-plane.h"
@@ -147,16 +151,13 @@ static const struct drm_ioctl_desc imx_drm_ioctls[] = {
};
static struct drm_driver imx_drm_driver = {
- .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME |
- DRIVER_ATOMIC,
+ .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_ATOMIC,
.gem_free_object_unlocked = drm_gem_cma_free_object,
.gem_vm_ops = &drm_gem_cma_vm_ops,
.dumb_create = drm_gem_cma_dumb_create,
.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
- .gem_prime_import = drm_gem_prime_import,
- .gem_prime_export = drm_gem_prime_export,
.gem_prime_get_sg_table = drm_gem_cma_prime_get_sg_table,
.gem_prime_import_sg_table = drm_gem_cma_prime_import_sg_table,
.gem_prime_vmap = drm_gem_cma_prime_vmap,
diff --git a/drivers/gpu/drm/imx/imx-ldb.c b/drivers/gpu/drm/imx/imx-ldb.c
index 383733302280..db461b6a257f 100644
--- a/drivers/gpu/drm/imx/imx-ldb.c
+++ b/drivers/gpu/drm/imx/imx-ldb.c
@@ -5,25 +5,27 @@
* Copyright (C) 2012 Sascha Hauer, Pengutronix
*/
-#include <linux/module.h>
#include <linux/clk.h>
#include <linux/component.h>
-#include <drm/drmP.h>
-#include <drm/drm_atomic.h>
-#include <drm/drm_atomic_helper.h>
-#include <drm/drm_fb_helper.h>
-#include <drm/drm_of.h>
-#include <drm/drm_panel.h>
-#include <drm/drm_probe_helper.h>
#include <linux/mfd/syscon.h>
#include <linux/mfd/syscon/imx6q-iomuxc-gpr.h>
+#include <linux/module.h>
#include <linux/of_device.h>
#include <linux/of_graph.h>
-#include <video/of_display_timing.h>
-#include <video/of_videomode.h>
#include <linux/regmap.h>
#include <linux/videodev2.h>
+#include <video/of_display_timing.h>
+#include <video/of_videomode.h>
+
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_fb_helper.h>
+#include <drm/drm_of.h>
+#include <drm/drm_panel.h>
+#include <drm/drm_print.h>
+#include <drm/drm_probe_helper.h>
+
#include "imx-drm.h"
#define DRIVER_NAME "imx-ldb"
@@ -460,9 +462,10 @@ static int imx_ldb_register(struct drm_device *drm,
*/
drm_connector_helper_add(&imx_ldb_ch->connector,
&imx_ldb_connector_helper_funcs);
- drm_connector_init(drm, &imx_ldb_ch->connector,
- &imx_ldb_connector_funcs,
- DRM_MODE_CONNECTOR_LVDS);
+ drm_connector_init_with_ddc(drm, &imx_ldb_ch->connector,
+ &imx_ldb_connector_funcs,
+ DRM_MODE_CONNECTOR_LVDS,
+ imx_ldb_ch->ddc);
drm_connector_attach_encoder(&imx_ldb_ch->connector, encoder);
}
diff --git a/drivers/gpu/drm/imx/imx-tve.c b/drivers/gpu/drm/imx/imx-tve.c
index e725af8a0025..5bbfaa2cd0f4 100644
--- a/drivers/gpu/drm/imx/imx-tve.c
+++ b/drivers/gpu/drm/imx/imx-tve.c
@@ -5,20 +5,22 @@
* Copyright (C) 2013 Philipp Zabel, Pengutronix
*/
-#include <linux/clk.h>
#include <linux/clk-provider.h>
+#include <linux/clk.h>
#include <linux/component.h>
-#include <linux/module.h>
#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/regulator/consumer.h>
#include <linux/spinlock.h>
#include <linux/videodev2.h>
-#include <drm/drmP.h>
+
+#include <video/imx-ipu-v3.h>
+
#include <drm/drm_atomic_helper.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_probe_helper.h>
-#include <video/imx-ipu-v3.h>
#include "imx-drm.h"
@@ -482,8 +484,10 @@ static int imx_tve_register(struct drm_device *drm, struct imx_tve *tve)
drm_connector_helper_add(&tve->connector,
&imx_tve_connector_helper_funcs);
- drm_connector_init(drm, &tve->connector, &imx_tve_connector_funcs,
- DRM_MODE_CONNECTOR_VGA);
+ drm_connector_init_with_ddc(drm, &tve->connector,
+ &imx_tve_connector_funcs,
+ DRM_MODE_CONNECTOR_VGA,
+ tve->ddc);
drm_connector_attach_encoder(&tve->connector, &tve->encoder);
diff --git a/drivers/gpu/drm/imx/ipuv3-crtc.c b/drivers/gpu/drm/imx/ipuv3-crtc.c
index c436a28d50e4..63c0284f8b3c 100644
--- a/drivers/gpu/drm/imx/ipuv3-crtc.c
+++ b/drivers/gpu/drm/imx/ipuv3-crtc.c
@@ -4,21 +4,25 @@
*
* Copyright (C) 2011 Sascha Hauer, Pengutronix
*/
+
#include <linux/clk.h>
#include <linux/component.h>
#include <linux/device.h>
+#include <linux/dma-mapping.h>
#include <linux/errno.h>
#include <linux/export.h>
#include <linux/module.h>
#include <linux/platform_device.h>
-#include <drm/drmP.h>
+
+#include <video/imx-ipu-v3.h>
+
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_fb_cma_helper.h>
#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_probe_helper.h>
+#include <drm/drm_vblank.h>
-#include <video/imx-ipu-v3.h>
#include "imx-drm.h"
#include "ipuv3-plane.h"
diff --git a/drivers/gpu/drm/imx/ipuv3-plane.c b/drivers/gpu/drm/imx/ipuv3-plane.c
index 2a1e071d39ee..28826c0aa24a 100644
--- a/drivers/gpu/drm/imx/ipuv3-plane.c
+++ b/drivers/gpu/drm/imx/ipuv3-plane.c
@@ -5,15 +5,16 @@
* Copyright (C) 2013 Philipp Zabel, Pengutronix
*/
-#include <drm/drmP.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_fourcc.h>
#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_plane_helper.h>
-#include "video/imx-ipu-v3.h"
+#include <video/imx-ipu-v3.h>
+
#include "imx-drm.h"
#include "ipuv3-plane.h"
diff --git a/drivers/gpu/drm/imx/parallel-display.c b/drivers/gpu/drm/imx/parallel-display.c
index 1a76de1e8e7b..2e51b2fade75 100644
--- a/drivers/gpu/drm/imx/parallel-display.c
+++ b/drivers/gpu/drm/imx/parallel-display.c
@@ -7,14 +7,16 @@
#include <linux/component.h>
#include <linux/module.h>
-#include <drm/drmP.h>
+#include <linux/platform_device.h>
+#include <linux/videodev2.h>
+
+#include <video/of_display_timing.h>
+
#include <drm/drm_atomic_helper.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_of.h>
#include <drm/drm_panel.h>
#include <drm/drm_probe_helper.h>
-#include <linux/videodev2.h>
-#include <video/of_display_timing.h>
#include "imx-drm.h"
diff --git a/drivers/gpu/drm/ingenic/ingenic-drm.c b/drivers/gpu/drm/ingenic/ingenic-drm.c
index e9f9e9fb9b17..ce1fae3a78a9 100644
--- a/drivers/gpu/drm/ingenic/ingenic-drm.c
+++ b/drivers/gpu/drm/ingenic/ingenic-drm.c
@@ -166,6 +166,8 @@ struct ingenic_drm {
struct ingenic_dma_hwdesc *dma_hwdesc;
dma_addr_t dma_hwdesc_phys;
+
+ bool panel_is_sharp;
};
static const u32 ingenic_drm_primary_formats[] = {
@@ -283,6 +285,13 @@ static void ingenic_drm_crtc_update_timings(struct ingenic_drm *priv,
regmap_write(priv->map, JZ_REG_LCD_DAV,
vds << JZ_LCD_DAV_VDS_OFFSET |
vde << JZ_LCD_DAV_VDE_OFFSET);
+
+ if (priv->panel_is_sharp) {
+ regmap_write(priv->map, JZ_REG_LCD_PS, hde << 16 | (hde + 1));
+ regmap_write(priv->map, JZ_REG_LCD_CLS, hde << 16 | (hde + 1));
+ regmap_write(priv->map, JZ_REG_LCD_SPL, hpe << 16 | (hpe + 1));
+ regmap_write(priv->map, JZ_REG_LCD_REV, mode->htotal << 16);
+ }
}
static void ingenic_drm_crtc_update_ctrl(struct ingenic_drm *priv,
@@ -378,11 +387,18 @@ static void ingenic_drm_encoder_atomic_mode_set(struct drm_encoder *encoder,
{
struct ingenic_drm *priv = drm_encoder_get_priv(encoder);
struct drm_display_mode *mode = &crtc_state->adjusted_mode;
- struct drm_display_info *info = &conn_state->connector->display_info;
- unsigned int cfg = JZ_LCD_CFG_PS_DISABLE
- | JZ_LCD_CFG_CLS_DISABLE
- | JZ_LCD_CFG_SPL_DISABLE
- | JZ_LCD_CFG_REV_DISABLE;
+ struct drm_connector *conn = conn_state->connector;
+ struct drm_display_info *info = &conn->display_info;
+ unsigned int cfg;
+
+ priv->panel_is_sharp = info->bus_flags & DRM_BUS_FLAG_SHARP_SIGNALS;
+
+ if (priv->panel_is_sharp) {
+ cfg = JZ_LCD_CFG_MODE_SPECIAL_TFT_1 | JZ_LCD_CFG_REV_POLARITY;
+ } else {
+ cfg = JZ_LCD_CFG_PS_DISABLE | JZ_LCD_CFG_CLS_DISABLE
+ | JZ_LCD_CFG_SPL_DISABLE | JZ_LCD_CFG_REV_DISABLE;
+ }
if (mode->flags & DRM_MODE_FLAG_NHSYNC)
cfg |= JZ_LCD_CFG_HSYNC_ACTIVE_LOW;
@@ -393,24 +409,29 @@ static void ingenic_drm_encoder_atomic_mode_set(struct drm_encoder *encoder,
if (info->bus_flags & DRM_BUS_FLAG_PIXDATA_NEGEDGE)
cfg |= JZ_LCD_CFG_PCLK_FALLING_EDGE;
- if (conn_state->connector->connector_type == DRM_MODE_CONNECTOR_TV) {
- if (mode->flags & DRM_MODE_FLAG_INTERLACE)
- cfg |= JZ_LCD_CFG_MODE_TV_OUT_I;
- else
- cfg |= JZ_LCD_CFG_MODE_TV_OUT_P;
- } else {
- switch (*info->bus_formats) {
- case MEDIA_BUS_FMT_RGB565_1X16:
- cfg |= JZ_LCD_CFG_MODE_GENERIC_16BIT;
- break;
- case MEDIA_BUS_FMT_RGB666_1X18:
- cfg |= JZ_LCD_CFG_MODE_GENERIC_18BIT;
- break;
- case MEDIA_BUS_FMT_RGB888_1X24:
- cfg |= JZ_LCD_CFG_MODE_GENERIC_24BIT;
- break;
- default:
- break;
+ if (!priv->panel_is_sharp) {
+ if (conn->connector_type == DRM_MODE_CONNECTOR_TV) {
+ if (mode->flags & DRM_MODE_FLAG_INTERLACE)
+ cfg |= JZ_LCD_CFG_MODE_TV_OUT_I;
+ else
+ cfg |= JZ_LCD_CFG_MODE_TV_OUT_P;
+ } else {
+ switch (*info->bus_formats) {
+ case MEDIA_BUS_FMT_RGB565_1X16:
+ cfg |= JZ_LCD_CFG_MODE_GENERIC_16BIT;
+ break;
+ case MEDIA_BUS_FMT_RGB666_1X18:
+ cfg |= JZ_LCD_CFG_MODE_GENERIC_18BIT;
+ break;
+ case MEDIA_BUS_FMT_RGB888_1X24:
+ cfg |= JZ_LCD_CFG_MODE_GENERIC_24BIT;
+ break;
+ case MEDIA_BUS_FMT_RGB888_3X8:
+ cfg |= JZ_LCD_CFG_MODE_8BIT_SERIAL;
+ break;
+ default:
+ break;
+ }
}
}
@@ -433,6 +454,7 @@ static int ingenic_drm_encoder_atomic_check(struct drm_encoder *encoder,
case MEDIA_BUS_FMT_RGB565_1X16:
case MEDIA_BUS_FMT_RGB666_1X18:
case MEDIA_BUS_FMT_RGB888_1X24:
+ case MEDIA_BUS_FMT_RGB888_3X8:
return 0;
default:
return -EINVAL;
@@ -484,8 +506,7 @@ static void ingenic_drm_disable_vblank(struct drm_crtc *crtc)
DEFINE_DRM_GEM_CMA_FOPS(ingenic_drm_fops);
static struct drm_driver ingenic_drm_driver_data = {
- .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME
- | DRIVER_ATOMIC,
+ .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_ATOMIC,
.name = "ingenic-drm",
.desc = "DRM module for Ingenic SoCs",
.date = "20190422",
@@ -581,7 +602,6 @@ static int ingenic_drm_probe(struct platform_device *pdev)
struct drm_bridge *bridge;
struct drm_panel *panel;
struct drm_device *drm;
- struct resource *mem;
void __iomem *base;
long parent_rate;
int ret, irq;
@@ -615,8 +635,7 @@ static int ingenic_drm_probe(struct platform_device *pdev)
drm->mode_config.max_height = 600;
drm->mode_config.funcs = &ingenic_drm_mode_config_funcs;
- mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- base = devm_ioremap_resource(dev, mem);
+ base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(base)) {
dev_err(dev, "Failed to get memory resource");
return PTR_ERR(base);
diff --git a/drivers/gpu/drm/lima/lima_device.c b/drivers/gpu/drm/lima/lima_device.c
index 570d0e93f9a9..d86b8d81a483 100644
--- a/drivers/gpu/drm/lima/lima_device.c
+++ b/drivers/gpu/drm/lima/lima_device.c
@@ -80,26 +80,23 @@ const char *lima_ip_name(struct lima_ip *ip)
static int lima_clk_init(struct lima_device *dev)
{
int err;
- unsigned long bus_rate, gpu_rate;
dev->clk_bus = devm_clk_get(dev->dev, "bus");
if (IS_ERR(dev->clk_bus)) {
- dev_err(dev->dev, "get bus clk failed %ld\n", PTR_ERR(dev->clk_bus));
- return PTR_ERR(dev->clk_bus);
+ err = PTR_ERR(dev->clk_bus);
+ if (err != -EPROBE_DEFER)
+ dev_err(dev->dev, "get bus clk failed %d\n", err);
+ return err;
}
dev->clk_gpu = devm_clk_get(dev->dev, "core");
if (IS_ERR(dev->clk_gpu)) {
- dev_err(dev->dev, "get core clk failed %ld\n", PTR_ERR(dev->clk_gpu));
- return PTR_ERR(dev->clk_gpu);
+ err = PTR_ERR(dev->clk_gpu);
+ if (err != -EPROBE_DEFER)
+ dev_err(dev->dev, "get core clk failed %d\n", err);
+ return err;
}
- bus_rate = clk_get_rate(dev->clk_bus);
- dev_info(dev->dev, "bus rate = %lu\n", bus_rate);
-
- gpu_rate = clk_get_rate(dev->clk_gpu);
- dev_info(dev->dev, "mod rate = %lu", gpu_rate);
-
err = clk_prepare_enable(dev->clk_bus);
if (err)
return err;
@@ -111,11 +108,17 @@ static int lima_clk_init(struct lima_device *dev)
dev->reset = devm_reset_control_get_optional(dev->dev, NULL);
if (IS_ERR(dev->reset)) {
err = PTR_ERR(dev->reset);
+ if (err != -EPROBE_DEFER)
+ dev_err(dev->dev, "get reset controller failed %d\n",
+ err);
goto error_out1;
} else if (dev->reset != NULL) {
err = reset_control_deassert(dev->reset);
- if (err)
+ if (err) {
+ dev_err(dev->dev,
+ "reset controller deassert failed %d\n", err);
goto error_out1;
+ }
}
return 0;
@@ -145,7 +148,8 @@ static int lima_regulator_init(struct lima_device *dev)
dev->regulator = NULL;
if (ret == -ENODEV)
return 0;
- dev_err(dev->dev, "failed to get regulator: %d\n", ret);
+ if (ret != -EPROBE_DEFER)
+ dev_err(dev->dev, "failed to get regulator: %d\n", ret);
return ret;
}
@@ -291,16 +295,12 @@ int lima_device_init(struct lima_device *ldev)
dma_set_coherent_mask(ldev->dev, DMA_BIT_MASK(32));
err = lima_clk_init(ldev);
- if (err) {
- dev_err(ldev->dev, "clk init fail %d\n", err);
+ if (err)
return err;
- }
err = lima_regulator_init(ldev);
- if (err) {
- dev_err(ldev->dev, "regulator init fail %d\n", err);
+ if (err)
goto err_out0;
- }
ldev->empty_vm = lima_vm_create(ldev);
if (!ldev->empty_vm) {
@@ -343,6 +343,9 @@ int lima_device_init(struct lima_device *ldev)
if (err)
goto err_out5;
+ dev_info(ldev->dev, "bus rate = %lu\n", clk_get_rate(ldev->clk_bus));
+ dev_info(ldev->dev, "mod rate = %lu", clk_get_rate(ldev->clk_gpu));
+
return 0;
err_out5:
diff --git a/drivers/gpu/drm/lima/lima_drv.c b/drivers/gpu/drm/lima/lima_drv.c
index b29c26cd13b2..75ec703d22e0 100644
--- a/drivers/gpu/drm/lima/lima_drv.c
+++ b/drivers/gpu/drm/lima/lima_drv.c
@@ -231,13 +231,13 @@ static void lima_drm_driver_postclose(struct drm_device *dev, struct drm_file *f
}
static const struct drm_ioctl_desc lima_drm_driver_ioctls[] = {
- DRM_IOCTL_DEF_DRV(LIMA_GET_PARAM, lima_ioctl_get_param, DRM_AUTH|DRM_RENDER_ALLOW),
- DRM_IOCTL_DEF_DRV(LIMA_GEM_CREATE, lima_ioctl_gem_create, DRM_AUTH|DRM_RENDER_ALLOW),
- DRM_IOCTL_DEF_DRV(LIMA_GEM_INFO, lima_ioctl_gem_info, DRM_AUTH|DRM_RENDER_ALLOW),
- DRM_IOCTL_DEF_DRV(LIMA_GEM_SUBMIT, lima_ioctl_gem_submit, DRM_AUTH|DRM_RENDER_ALLOW),
- DRM_IOCTL_DEF_DRV(LIMA_GEM_WAIT, lima_ioctl_gem_wait, DRM_AUTH|DRM_RENDER_ALLOW),
- DRM_IOCTL_DEF_DRV(LIMA_CTX_CREATE, lima_ioctl_ctx_create, DRM_AUTH|DRM_RENDER_ALLOW),
- DRM_IOCTL_DEF_DRV(LIMA_CTX_FREE, lima_ioctl_ctx_free, DRM_AUTH|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(LIMA_GET_PARAM, lima_ioctl_get_param, DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(LIMA_GEM_CREATE, lima_ioctl_gem_create, DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(LIMA_GEM_INFO, lima_ioctl_gem_info, DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(LIMA_GEM_SUBMIT, lima_ioctl_gem_submit, DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(LIMA_GEM_WAIT, lima_ioctl_gem_wait, DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(LIMA_CTX_CREATE, lima_ioctl_ctx_create, DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(LIMA_CTX_FREE, lima_ioctl_ctx_free, DRM_RENDER_ALLOW),
};
static const struct file_operations lima_drm_driver_fops = {
@@ -252,7 +252,7 @@ static const struct file_operations lima_drm_driver_fops = {
};
static struct drm_driver lima_drm_driver = {
- .driver_features = DRIVER_RENDER | DRIVER_GEM | DRIVER_PRIME | DRIVER_SYNCOBJ,
+ .driver_features = DRIVER_RENDER | DRIVER_GEM | DRIVER_SYNCOBJ,
.open = lima_drm_driver_open,
.postclose = lima_drm_driver_postclose,
.ioctls = lima_drm_driver_ioctls,
@@ -307,10 +307,8 @@ static int lima_pdev_probe(struct platform_device *pdev)
ldev->ddev = ddev;
err = lima_device_init(ldev);
- if (err) {
- dev_err(&pdev->dev, "Fatal error during GPU init\n");
+ if (err)
goto err_out1;
- }
/*
* Register the DRM device with the core and the connectors with
diff --git a/drivers/gpu/drm/lima/lima_gem.c b/drivers/gpu/drm/lima/lima_gem.c
index 477c0f766663..fd1a024703d2 100644
--- a/drivers/gpu/drm/lima/lima_gem.c
+++ b/drivers/gpu/drm/lima/lima_gem.c
@@ -24,7 +24,7 @@ int lima_gem_create_handle(struct drm_device *dev, struct drm_file *file,
struct lima_bo *bo;
struct lima_device *ldev = to_lima_dev(dev);
- bo = lima_bo_create(ldev, size, flags, NULL, NULL);
+ bo = lima_bo_create(ldev, size, flags, NULL);
if (IS_ERR(bo))
return PTR_ERR(bo);
diff --git a/drivers/gpu/drm/lima/lima_gem_prime.c b/drivers/gpu/drm/lima/lima_gem_prime.c
index 9c6d9f1dba55..e3eb251e0a12 100644
--- a/drivers/gpu/drm/lima/lima_gem_prime.c
+++ b/drivers/gpu/drm/lima/lima_gem_prime.c
@@ -18,8 +18,7 @@ struct drm_gem_object *lima_gem_prime_import_sg_table(
struct lima_device *ldev = to_lima_dev(dev);
struct lima_bo *bo;
- bo = lima_bo_create(ldev, attach->dmabuf->size, 0, sgt,
- attach->dmabuf->resv);
+ bo = lima_bo_create(ldev, attach->dmabuf->size, 0, sgt);
if (IS_ERR(bo))
return ERR_CAST(bo);
diff --git a/drivers/gpu/drm/lima/lima_object.c b/drivers/gpu/drm/lima/lima_object.c
index 5c41f859a72f..87123b1d083c 100644
--- a/drivers/gpu/drm/lima/lima_object.c
+++ b/drivers/gpu/drm/lima/lima_object.c
@@ -33,8 +33,7 @@ void lima_bo_destroy(struct lima_bo *bo)
kfree(bo);
}
-static struct lima_bo *lima_bo_create_struct(struct lima_device *dev, u32 size, u32 flags,
- struct reservation_object *resv)
+static struct lima_bo *lima_bo_create_struct(struct lima_device *dev, u32 size, u32 flags)
{
struct lima_bo *bo;
int err;
@@ -47,7 +46,6 @@ static struct lima_bo *lima_bo_create_struct(struct lima_device *dev, u32 size,
mutex_init(&bo->lock);
INIT_LIST_HEAD(&bo->va);
- bo->gem.resv = resv;
err = drm_gem_object_init(dev->ddev, &bo->gem, size);
if (err) {
@@ -59,14 +57,13 @@ static struct lima_bo *lima_bo_create_struct(struct lima_device *dev, u32 size,
}
struct lima_bo *lima_bo_create(struct lima_device *dev, u32 size,
- u32 flags, struct sg_table *sgt,
- struct reservation_object *resv)
+ u32 flags, struct sg_table *sgt)
{
int i, err;
size_t npages;
struct lima_bo *bo, *ret;
- bo = lima_bo_create_struct(dev, size, flags, resv);
+ bo = lima_bo_create_struct(dev, size, flags);
if (IS_ERR(bo))
return bo;
diff --git a/drivers/gpu/drm/lima/lima_object.h b/drivers/gpu/drm/lima/lima_object.h
index 6738724afb7b..31ca2d8dc0a1 100644
--- a/drivers/gpu/drm/lima/lima_object.h
+++ b/drivers/gpu/drm/lima/lima_object.h
@@ -27,8 +27,7 @@ to_lima_bo(struct drm_gem_object *obj)
}
struct lima_bo *lima_bo_create(struct lima_device *dev, u32 size,
- u32 flags, struct sg_table *sgt,
- struct reservation_object *resv);
+ u32 flags, struct sg_table *sgt);
void lima_bo_destroy(struct lima_bo *bo);
void *lima_bo_vmap(struct lima_bo *bo);
void lima_bo_vunmap(struct lima_bo *bo);
diff --git a/drivers/gpu/drm/lima/lima_vm.h b/drivers/gpu/drm/lima/lima_vm.h
index caee2f8a29b4..e0bdedcf14dd 100644
--- a/drivers/gpu/drm/lima/lima_vm.h
+++ b/drivers/gpu/drm/lima/lima_vm.h
@@ -15,9 +15,9 @@
#define LIMA_VM_NUM_PT_PER_BT (1 << LIMA_VM_NUM_PT_PER_BT_SHIFT)
#define LIMA_VM_NUM_BT (LIMA_PAGE_ENT_NUM >> LIMA_VM_NUM_PT_PER_BT_SHIFT)
-#define LIMA_VA_RESERVE_START 0xFFF00000
+#define LIMA_VA_RESERVE_START 0x0FFF00000ULL
#define LIMA_VA_RESERVE_DLBU LIMA_VA_RESERVE_START
-#define LIMA_VA_RESERVE_END 0x100000000
+#define LIMA_VA_RESERVE_END 0x100000000ULL
struct lima_device;
diff --git a/drivers/gpu/drm/mcde/mcde_drv.c b/drivers/gpu/drm/mcde/mcde_drv.c
index baf63fb6850a..982fe8485a61 100644
--- a/drivers/gpu/drm/mcde/mcde_drv.c
+++ b/drivers/gpu/drm/mcde/mcde_drv.c
@@ -237,7 +237,7 @@ DEFINE_DRM_GEM_CMA_FOPS(drm_fops);
static struct drm_driver mcde_drm_driver = {
.driver_features =
- DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME | DRIVER_ATOMIC,
+ DRIVER_MODESET | DRIVER_GEM | DRIVER_ATOMIC,
.release = mcde_release,
.lastclose = drm_fb_helper_lastclose,
.ioctls = NULL,
@@ -254,8 +254,6 @@ static struct drm_driver mcde_drm_driver = {
.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
- .gem_prime_import = drm_gem_prime_import,
- .gem_prime_export = drm_gem_prime_export,
.gem_prime_get_sg_table = drm_gem_cma_prime_get_sg_table,
.gem_prime_import_sg_table = drm_gem_cma_prime_import_sg_table,
.gem_prime_vmap = drm_gem_cma_prime_vmap,
@@ -319,7 +317,7 @@ static int mcde_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct drm_device *drm;
struct mcde *mcde;
- struct component_match *match;
+ struct component_match *match = NULL;
struct resource *res;
u32 pid;
u32 val;
@@ -485,6 +483,10 @@ static int mcde_probe(struct platform_device *pdev)
}
put_device(p);
}
+ if (!match) {
+ dev_err(dev, "no matching components\n");
+ return -ENODEV;
+ }
if (IS_ERR(match)) {
dev_err(dev, "could not create component match\n");
ret = PTR_ERR(match);
diff --git a/drivers/gpu/drm/mediatek/mtk_disp_color.c b/drivers/gpu/drm/mediatek/mtk_disp_color.c
index f33d98b356d6..59de2a46aa49 100644
--- a/drivers/gpu/drm/mediatek/mtk_disp_color.c
+++ b/drivers/gpu/drm/mediatek/mtk_disp_color.c
@@ -3,9 +3,9 @@
* Copyright (c) 2017 MediaTek Inc.
*/
-#include <drm/drmP.h>
#include <linux/clk.h>
#include <linux/component.h>
+#include <linux/module.h>
#include <linux/of_device.h>
#include <linux/of_irq.h>
#include <linux/platform_device.h>
diff --git a/drivers/gpu/drm/mediatek/mtk_disp_ovl.c b/drivers/gpu/drm/mediatek/mtk_disp_ovl.c
index c4f07c28c74f..21851756c579 100644
--- a/drivers/gpu/drm/mediatek/mtk_disp_ovl.c
+++ b/drivers/gpu/drm/mediatek/mtk_disp_ovl.c
@@ -3,9 +3,9 @@
* Copyright (c) 2015 MediaTek Inc.
*/
-#include <drm/drmP.h>
#include <linux/clk.h>
#include <linux/component.h>
+#include <linux/module.h>
#include <linux/of_device.h>
#include <linux/of_irq.h>
#include <linux/platform_device.h>
diff --git a/drivers/gpu/drm/mediatek/mtk_disp_rdma.c b/drivers/gpu/drm/mediatek/mtk_disp_rdma.c
index 9a6f0a29e43c..405afef31407 100644
--- a/drivers/gpu/drm/mediatek/mtk_disp_rdma.c
+++ b/drivers/gpu/drm/mediatek/mtk_disp_rdma.c
@@ -3,9 +3,9 @@
* Copyright (c) 2015 MediaTek Inc.
*/
-#include <drm/drmP.h>
#include <linux/clk.h>
#include <linux/component.h>
+#include <linux/module.h>
#include <linux/of_device.h>
#include <linux/of_irq.h>
#include <linux/platform_device.h>
diff --git a/drivers/gpu/drm/mediatek/mtk_dpi.c b/drivers/gpu/drm/mediatek/mtk_dpi.c
index bacd989cc9aa..be6d95c5ff25 100644
--- a/drivers/gpu/drm/mediatek/mtk_dpi.c
+++ b/drivers/gpu/drm/mediatek/mtk_dpi.c
@@ -3,21 +3,23 @@
* Copyright (c) 2014 MediaTek Inc.
* Author: Jie Qiu <jie.qiu@mediatek.com>
*/
-#include <drm/drmP.h>
-#include <drm/drm_crtc.h>
-#include <drm/drm_atomic_helper.h>
-#include <drm/drm_of.h>
-#include <linux/kernel.h>
+
+#include <linux/clk.h>
#include <linux/component.h>
-#include <linux/platform_device.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/of_graph.h>
-#include <linux/interrupt.h>
+#include <linux/platform_device.h>
#include <linux/types.h>
-#include <linux/clk.h>
+
#include <video/videomode.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_of.h>
+
#include "mtk_dpi_regs.h"
#include "mtk_drm_ddp_comp.h"
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
index a9007210dda1..34a731755791 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
@@ -3,14 +3,16 @@
* Copyright (c) 2015 MediaTek Inc.
*/
+#include <linux/clk.h>
+#include <linux/pm_runtime.h>
+
#include <asm/barrier.h>
-#include <drm/drmP.h>
+#include <soc/mediatek/smi.h>
+
#include <drm/drm_atomic_helper.h>
#include <drm/drm_plane_helper.h>
#include <drm/drm_probe_helper.h>
-#include <linux/clk.h>
-#include <linux/pm_runtime.h>
-#include <soc/mediatek/smi.h>
+#include <drm/drm_vblank.h>
#include "mtk_drm_drv.h"
#include "mtk_drm_crtc.h"
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c b/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c
index b38963f1f2ec..efa85973e46b 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c
@@ -12,7 +12,7 @@
#include <linux/of_irq.h>
#include <linux/of_platform.h>
#include <linux/platform_device.h>
-#include <drm/drmP.h>
+
#include "mtk_drm_drv.h"
#include "mtk_drm_plane.h"
#include "mtk_drm_ddp_comp.h"
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_drv.c b/drivers/gpu/drm/mediatek/mtk_drm_drv.c
index 95fdbd0fbcac..2ee809a6f3dc 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_drv.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_drv.c
@@ -4,22 +4,26 @@
* Author: YT SHEN <yt.shen@mediatek.com>
*/
-#include <drm/drmP.h>
+#include <linux/component.h>
+#include <linux/iommu.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/of_platform.h>
+#include <linux/pm_runtime.h>
+
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_drv.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_gem.h>
#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_of.h>
#include <drm/drm_probe_helper.h>
-#include <linux/component.h>
-#include <linux/iommu.h>
-#include <linux/of_address.h>
-#include <linux/of_platform.h>
-#include <linux/pm_runtime.h>
+#include <drm/drm_vblank.h>
#include "mtk_drm_crtc.h"
#include "mtk_drm_ddp.h"
+#include "mtk_drm_ddp.h"
#include "mtk_drm_ddp_comp.h"
#include "mtk_drm_drv.h"
#include "mtk_drm_fb.h"
@@ -38,22 +42,12 @@ static void mtk_atomic_schedule(struct mtk_drm_private *private,
schedule_work(&private->commit.work);
}
-static void mtk_atomic_wait_for_fences(struct drm_atomic_state *state)
-{
- struct drm_plane *plane;
- struct drm_plane_state *new_plane_state;
- int i;
-
- for_each_new_plane_in_state(state, plane, new_plane_state, i)
- mtk_fb_wait(new_plane_state->fb);
-}
-
static void mtk_atomic_complete(struct mtk_drm_private *private,
struct drm_atomic_state *state)
{
struct drm_device *drm = private->drm;
- mtk_atomic_wait_for_fences(state);
+ drm_atomic_helper_wait_for_fences(drm, state, false);
/*
* Mediatek drm supports runtime PM, so plane registers cannot be
@@ -321,8 +315,7 @@ static const struct file_operations mtk_drm_fops = {
};
static struct drm_driver mtk_drm_driver = {
- .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME |
- DRIVER_ATOMIC,
+ .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_ATOMIC,
.gem_free_object_unlocked = mtk_drm_gem_free_object,
.gem_vm_ops = &drm_gem_cma_vm_ops,
@@ -330,8 +323,6 @@ static struct drm_driver mtk_drm_driver = {
.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
- .gem_prime_export = drm_gem_prime_export,
- .gem_prime_import = drm_gem_prime_import,
.gem_prime_get_sg_table = mtk_gem_prime_get_sg_table,
.gem_prime_import_sg_table = mtk_gem_prime_import_sg_table,
.gem_prime_mmap = mtk_drm_gem_mmap_buf,
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_fb.c b/drivers/gpu/drm/mediatek/mtk_drm_fb.c
index 4c3ad7de2d3b..ae40b080ae47 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_fb.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_fb.c
@@ -3,13 +3,14 @@
* Copyright (c) 2015 MediaTek Inc.
*/
-#include <drm/drmP.h>
+#include <linux/dma-buf.h>
+#include <linux/reservation.h>
+
#include <drm/drm_modeset_helper.h>
#include <drm/drm_fb_helper.h>
+#include <drm/drm_fourcc.h>
#include <drm/drm_gem.h>
#include <drm/drm_gem_framebuffer_helper.h>
-#include <linux/dma-buf.h>
-#include <linux/reservation.h>
#include "mtk_drm_drv.h"
#include "mtk_drm_fb.h"
@@ -49,34 +50,6 @@ static struct drm_framebuffer *mtk_drm_framebuffer_init(struct drm_device *dev,
return fb;
}
-/*
- * Wait for any exclusive fence in fb's gem object's reservation object.
- *
- * Returns -ERESTARTSYS if interrupted, else 0.
- */
-int mtk_fb_wait(struct drm_framebuffer *fb)
-{
- struct drm_gem_object *gem;
- struct reservation_object *resv;
- long ret;
-
- if (!fb)
- return 0;
-
- gem = fb->obj[0];
- if (!gem || !gem->dma_buf || !gem->dma_buf->resv)
- return 0;
-
- resv = gem->dma_buf->resv;
- ret = reservation_object_wait_timeout_rcu(resv, false, true,
- MAX_SCHEDULE_TIMEOUT);
- /* MAX_SCHEDULE_TIMEOUT on success, -ERESTARTSYS if interrupted */
- if (WARN_ON(ret < 0))
- return ret;
-
- return 0;
-}
-
struct drm_framebuffer *mtk_drm_mode_fb_create(struct drm_device *dev,
struct drm_file *file,
const struct drm_mode_fb_cmd2 *cmd)
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_fb.h b/drivers/gpu/drm/mediatek/mtk_drm_fb.h
index 6b80c28e33cf..eb64d26001c6 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_fb.h
+++ b/drivers/gpu/drm/mediatek/mtk_drm_fb.h
@@ -6,7 +6,6 @@
#ifndef MTK_DRM_FB_H
#define MTK_DRM_FB_H
-int mtk_fb_wait(struct drm_framebuffer *fb);
struct drm_framebuffer *mtk_drm_mode_fb_create(struct drm_device *dev,
struct drm_file *file,
const struct drm_mode_fb_cmd2 *cmd);
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_gem.c b/drivers/gpu/drm/mediatek/mtk_drm_gem.c
index 0d69698f8173..ca672f1d140d 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_gem.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_gem.c
@@ -3,10 +3,13 @@
* Copyright (c) 2015 MediaTek Inc.
*/
-#include <drm/drmP.h>
-#include <drm/drm_gem.h>
#include <linux/dma-buf.h>
+#include <drm/drm.h>
+#include <drm/drm_device.h>
+#include <drm/drm_gem.h>
+#include <drm/drm_prime.h>
+
#include "mtk_drm_drv.h"
#include "mtk_drm_gem.h"
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_plane.c b/drivers/gpu/drm/mediatek/mtk_drm_plane.c
index f2ef83aed6f9..584a9ecadce6 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_plane.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_plane.c
@@ -4,10 +4,11 @@
* Author: CK Hu <ck.hu@mediatek.com>
*/
-#include <drm/drmP.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_fourcc.h>
#include <drm/drm_plane_helper.h>
+#include <drm/drm_gem_framebuffer_helper.h>
#include "mtk_drm_crtc.h"
#include "mtk_drm_ddp_comp.h"
@@ -146,6 +147,7 @@ static void mtk_plane_atomic_disable(struct drm_plane *plane,
}
static const struct drm_plane_helper_funcs mtk_plane_helper_funcs = {
+ .prepare_fb = drm_gem_fb_prepare_fb,
.atomic_check = mtk_plane_atomic_check,
.atomic_update = mtk_plane_atomic_update,
.atomic_disable = mtk_plane_atomic_disable,
diff --git a/drivers/gpu/drm/mediatek/mtk_dsi.c b/drivers/gpu/drm/mediatek/mtk_dsi.c
index b91c4616644a..224afb666881 100644
--- a/drivers/gpu/drm/mediatek/mtk_dsi.c
+++ b/drivers/gpu/drm/mediatek/mtk_dsi.c
@@ -3,12 +3,6 @@
* Copyright (c) 2015 MediaTek Inc.
*/
-#include <drm/drmP.h>
-#include <drm/drm_atomic_helper.h>
-#include <drm/drm_mipi_dsi.h>
-#include <drm/drm_panel.h>
-#include <drm/drm_of.h>
-#include <drm/drm_probe_helper.h>
#include <linux/clk.h>
#include <linux/component.h>
#include <linux/iopoll.h>
@@ -17,9 +11,17 @@
#include <linux/of_platform.h>
#include <linux/phy/phy.h>
#include <linux/platform_device.h>
+
#include <video/mipi_display.h>
#include <video/videomode.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_of.h>
+#include <drm/drm_panel.h>
+#include <drm/drm_print.h>
+#include <drm/drm_probe_helper.h>
+
#include "mtk_drm_ddp_comp.h"
#define DSI_START 0x00
diff --git a/drivers/gpu/drm/mediatek/mtk_hdmi.c b/drivers/gpu/drm/mediatek/mtk_hdmi.c
index 5d6a9f094df5..ce91b61364eb 100644
--- a/drivers/gpu/drm/mediatek/mtk_hdmi.c
+++ b/drivers/gpu/drm/mediatek/mtk_hdmi.c
@@ -3,11 +3,7 @@
* Copyright (c) 2014 MediaTek Inc.
* Author: Jie Qiu <jie.qiu@mediatek.com>
*/
-#include <drm/drmP.h>
-#include <drm/drm_atomic_helper.h>
-#include <drm/drm_crtc.h>
-#include <drm/drm_probe_helper.h>
-#include <drm/drm_edid.h>
+
#include <linux/arm-smccc.h>
#include <linux/clk.h>
#include <linux/delay.h>
@@ -23,7 +19,15 @@
#include <linux/phy/phy.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
+
#include <sound/hdmi-codec.h>
+
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_edid.h>
+#include <drm/drm_print.h>
+#include <drm/drm_probe_helper.h>
+
#include "mtk_cec.h"
#include "mtk_hdmi.h"
#include "mtk_hdmi_regs.h"
diff --git a/drivers/gpu/drm/meson/meson_crtc.c b/drivers/gpu/drm/meson/meson_crtc.c
index aa8ea107524e..3320a74e67fa 100644
--- a/drivers/gpu/drm/meson/meson_crtc.c
+++ b/drivers/gpu/drm/meson/meson_crtc.c
@@ -9,23 +9,21 @@
* Jasper St. Pierre <jstpierre@mecheye.net>
*/
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/mutex.h>
-#include <linux/platform_device.h>
#include <linux/bitfield.h>
-#include <drm/drmP.h>
-#include <drm/drm_atomic.h>
+#include <linux/soc/amlogic/meson-canvas.h>
+
#include <drm/drm_atomic_helper.h>
-#include <drm/drm_flip_work.h>
+#include <drm/drm_device.h>
+#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
+#include <drm/drm_vblank.h>
#include "meson_crtc.h"
#include "meson_plane.h"
+#include "meson_registers.h"
#include "meson_venc.h"
-#include "meson_vpp.h"
#include "meson_viu.h"
-#include "meson_registers.h"
+#include "meson_vpp.h"
#define MESON_G12A_VIU_OFFSET 0x5ec0
diff --git a/drivers/gpu/drm/meson/meson_drv.c b/drivers/gpu/drm/meson/meson_drv.c
index 2310c96fff46..42af49afdd75 100644
--- a/drivers/gpu/drm/meson/meson_drv.c
+++ b/drivers/gpu/drm/meson/meson_drv.c
@@ -8,35 +8,30 @@
* Jasper St. Pierre <jstpierre@mecheye.net>
*/
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/mutex.h>
-#include <linux/platform_device.h>
#include <linux/component.h>
+#include <linux/module.h>
#include <linux/of_graph.h>
+#include <linux/platform_device.h>
+#include <linux/soc/amlogic/meson-canvas.h>
-#include <drm/drmP.h>
-#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
-#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_drv.h>
#include <drm/drm_fb_helper.h>
-#include <drm/drm_flip_work.h>
#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
-#include <drm/drm_plane_helper.h>
+#include <drm/drm_irq.h>
+#include <drm/drm_modeset_helper_vtables.h>
#include <drm/drm_probe_helper.h>
-#include <drm/drm_rect.h>
+#include <drm/drm_vblank.h>
+#include "meson_crtc.h"
#include "meson_drv.h"
-#include "meson_plane.h"
#include "meson_overlay.h"
-#include "meson_crtc.h"
+#include "meson_plane.h"
+#include "meson_registers.h"
#include "meson_venc_cvbs.h"
-
-#include "meson_vpp.h"
#include "meson_viu.h"
-#include "meson_venc.h"
-#include "meson_registers.h"
+#include "meson_vpp.h"
#define DRIVER_NAME "meson"
#define DRIVER_DESC "Amlogic Meson DRM driver"
@@ -93,9 +88,7 @@ static int meson_dumb_create(struct drm_file *file, struct drm_device *dev,
DEFINE_DRM_GEM_CMA_FOPS(fops);
static struct drm_driver meson_driver = {
- .driver_features = DRIVER_GEM |
- DRIVER_MODESET | DRIVER_PRIME |
- DRIVER_ATOMIC,
+ .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC,
/* IRQ */
.irq_handler = meson_irq,
@@ -103,8 +96,6 @@ static struct drm_driver meson_driver = {
/* PRIME Ops */
.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
- .gem_prime_import = drm_gem_prime_import,
- .gem_prime_export = drm_gem_prime_export,
.gem_prime_get_sg_table = drm_gem_cma_prime_get_sg_table,
.gem_prime_import_sg_table = drm_gem_cma_prime_import_sg_table,
.gem_prime_vmap = drm_gem_cma_prime_vmap,
diff --git a/drivers/gpu/drm/meson/meson_drv.h b/drivers/gpu/drm/meson/meson_drv.h
index 7b6593f33dfe..c9aaec1a846e 100644
--- a/drivers/gpu/drm/meson/meson_drv.h
+++ b/drivers/gpu/drm/meson/meson_drv.h
@@ -7,11 +7,14 @@
#ifndef __MESON_DRV_H
#define __MESON_DRV_H
-#include <linux/platform_device.h>
-#include <linux/regmap.h>
+#include <linux/device.h>
#include <linux/of.h>
-#include <linux/soc/amlogic/meson-canvas.h>
-#include <drm/drmP.h>
+#include <linux/regmap.h>
+
+struct drm_crtc;
+struct drm_device;
+struct drm_plane;
+struct meson_drm;
struct meson_drm {
struct device *dev;
diff --git a/drivers/gpu/drm/meson/meson_dw_hdmi.c b/drivers/gpu/drm/meson/meson_dw_hdmi.c
index df3f9ddd2234..9f0b08eaf003 100644
--- a/drivers/gpu/drm/meson/meson_dw_hdmi.c
+++ b/drivers/gpu/drm/meson/meson_dw_hdmi.c
@@ -5,29 +5,30 @@
* Copyright (C) 2015 Amlogic, Inc. All rights reserved.
*/
+#include <linux/clk.h>
+#include <linux/component.h>
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/component.h>
#include <linux/of_device.h>
#include <linux/of_graph.h>
-#include <linux/reset.h>
-#include <linux/clk.h>
#include <linux/regulator/consumer.h>
+#include <linux/reset.h>
-#include <drm/drmP.h>
+#include <drm/bridge/dw_hdmi.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_device.h>
#include <drm/drm_edid.h>
#include <drm/drm_probe_helper.h>
-#include <drm/bridge/dw_hdmi.h>
+#include <drm/drm_print.h>
-#include <uapi/linux/media-bus-format.h>
-#include <uapi/linux/videodev2.h>
+#include <linux/media-bus-format.h>
+#include <linux/videodev2.h>
#include "meson_drv.h"
-#include "meson_venc.h"
-#include "meson_vclk.h"
#include "meson_dw_hdmi.h"
#include "meson_registers.h"
+#include "meson_vclk.h"
+#include "meson_venc.h"
#define DRIVER_NAME "meson-dw-hdmi"
#define DRIVER_DESC "Amlogic Meson HDMI-TX DRM driver"
diff --git a/drivers/gpu/drm/meson/meson_overlay.c b/drivers/gpu/drm/meson/meson_overlay.c
index cc7c6ae3013d..5aa9dcb4b35e 100644
--- a/drivers/gpu/drm/meson/meson_overlay.c
+++ b/drivers/gpu/drm/meson/meson_overlay.c
@@ -5,24 +5,21 @@
* Copyright (C) 2015 Amlogic, Inc. All rights reserved.
*/
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/mutex.h>
#include <linux/bitfield.h>
-#include <linux/platform_device.h>
-#include <drm/drmP.h>
+
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_device.h>
+#include <drm/drm_fourcc.h>
#include <drm/drm_plane_helper.h>
#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_fb_cma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
-#include <drm/drm_rect.h>
#include "meson_overlay.h"
-#include "meson_vpp.h"
-#include "meson_viu.h"
#include "meson_registers.h"
+#include "meson_viu.h"
+#include "meson_vpp.h"
/* VD1_IF0_GEN_REG */
#define VD_URGENT_CHROMA BIT(28)
diff --git a/drivers/gpu/drm/meson/meson_plane.c b/drivers/gpu/drm/meson/meson_plane.c
index 7a7e88dadd0b..80b8d70c4d75 100644
--- a/drivers/gpu/drm/meson/meson_plane.c
+++ b/drivers/gpu/drm/meson/meson_plane.c
@@ -9,24 +9,20 @@
* Jasper St. Pierre <jstpierre@mecheye.net>
*/
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/mutex.h>
#include <linux/bitfield.h>
-#include <linux/platform_device.h>
-#include <drm/drmP.h>
+
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
-#include <drm/drm_plane_helper.h>
-#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_device.h>
#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_fourcc.h>
+#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
-#include <drm/drm_rect.h>
+#include <drm/drm_plane_helper.h>
#include "meson_plane.h"
-#include "meson_vpp.h"
-#include "meson_viu.h"
#include "meson_registers.h"
+#include "meson_viu.h"
/* OSD_SCI_WH_M1 */
#define SCI_WH_M1_W(w) FIELD_PREP(GENMASK(28, 16), w)
diff --git a/drivers/gpu/drm/meson/meson_registers.h b/drivers/gpu/drm/meson/meson_registers.h
index 410e324d6f93..057453ce027c 100644
--- a/drivers/gpu/drm/meson/meson_registers.h
+++ b/drivers/gpu/drm/meson/meson_registers.h
@@ -6,6 +6,8 @@
#ifndef __MESON_REGISTERS_H
#define __MESON_REGISTERS_H
+#include <linux/io.h>
+
/* Shift all registers by 2 */
#define _REG(reg) ((reg) << 2)
diff --git a/drivers/gpu/drm/meson/meson_vclk.c b/drivers/gpu/drm/meson/meson_vclk.c
index 26732f038d19..8abff51f937d 100644
--- a/drivers/gpu/drm/meson/meson_vclk.c
+++ b/drivers/gpu/drm/meson/meson_vclk.c
@@ -5,9 +5,10 @@
* Copyright (C) 2015 Amlogic, Inc. All rights reserved.
*/
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <drm/drmP.h>
+#include <linux/export.h>
+
+#include <drm/drm_print.h>
+
#include "meson_drv.h"
#include "meson_vclk.h"
diff --git a/drivers/gpu/drm/meson/meson_vclk.h b/drivers/gpu/drm/meson/meson_vclk.h
index ed993d20abda..b62125540aef 100644
--- a/drivers/gpu/drm/meson/meson_vclk.h
+++ b/drivers/gpu/drm/meson/meson_vclk.h
@@ -9,6 +9,10 @@
#ifndef __MESON_VCLK_H
#define __MESON_VCLK_H
+#include <drm/drm_modes.h>
+
+struct meson_drm;
+
enum {
MESON_VCLK_TARGET_CVBS = 0,
MESON_VCLK_TARGET_HDMI = 1,
diff --git a/drivers/gpu/drm/meson/meson_venc.c b/drivers/gpu/drm/meson/meson_venc.c
index 7b7a0d8d737c..3d4791798ae0 100644
--- a/drivers/gpu/drm/meson/meson_venc.c
+++ b/drivers/gpu/drm/meson/meson_venc.c
@@ -5,14 +5,14 @@
* Copyright (C) 2015 Amlogic, Inc. All rights reserved.
*/
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <drm/drmP.h>
+#include <linux/export.h>
+
+#include <drm/drm_modes.h>
+
#include "meson_drv.h"
+#include "meson_registers.h"
#include "meson_venc.h"
#include "meson_vpp.h"
-#include "meson_vclk.h"
-#include "meson_registers.h"
/**
* DOC: Video Encoder
diff --git a/drivers/gpu/drm/meson/meson_venc.h b/drivers/gpu/drm/meson/meson_venc.h
index 985642a1678e..576768bdd08d 100644
--- a/drivers/gpu/drm/meson/meson_venc.h
+++ b/drivers/gpu/drm/meson/meson_venc.h
@@ -14,6 +14,8 @@
#ifndef __MESON_VENC_H
#define __MESON_VENC_H
+struct drm_display_mode;
+
enum {
MESON_VENC_MODE_NONE = 0,
MESON_VENC_MODE_CVBS_PAL,
diff --git a/drivers/gpu/drm/meson/meson_venc_cvbs.c b/drivers/gpu/drm/meson/meson_venc_cvbs.c
index 6313a519f257..45a467f10b9b 100644
--- a/drivers/gpu/drm/meson/meson_venc_cvbs.c
+++ b/drivers/gpu/drm/meson/meson_venc_cvbs.c
@@ -9,19 +9,18 @@
* Jasper St. Pierre <jstpierre@mecheye.net>
*/
-#include <linux/kernel.h>
-#include <linux/module.h>
+#include <linux/export.h>
#include <linux/of_graph.h>
-#include <drm/drmP.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_device.h>
#include <drm/drm_edid.h>
#include <drm/drm_probe_helper.h>
+#include <drm/drm_print.h>
-#include "meson_venc_cvbs.h"
-#include "meson_venc.h"
-#include "meson_vclk.h"
#include "meson_registers.h"
+#include "meson_vclk.h"
+#include "meson_venc_cvbs.h"
/* HHI VDAC Registers */
#define HHI_VDAC_CNTL0 0x2F4 /* 0xbd offset in data sheet */
diff --git a/drivers/gpu/drm/meson/meson_viu.c b/drivers/gpu/drm/meson/meson_viu.c
index 4b2b3024d371..9f8a450d50d5 100644
--- a/drivers/gpu/drm/meson/meson_viu.c
+++ b/drivers/gpu/drm/meson/meson_viu.c
@@ -6,13 +6,10 @@
* Copyright (C) 2014 Endless Mobile
*/
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <drm/drmP.h>
+#include <linux/export.h>
+
#include "meson_drv.h"
#include "meson_viu.h"
-#include "meson_vpp.h"
-#include "meson_venc.h"
#include "meson_registers.h"
/**
diff --git a/drivers/gpu/drm/meson/meson_vpp.c b/drivers/gpu/drm/meson/meson_vpp.c
index bfee30fa6e34..cbe6cf46e541 100644
--- a/drivers/gpu/drm/meson/meson_vpp.c
+++ b/drivers/gpu/drm/meson/meson_vpp.c
@@ -6,12 +6,11 @@
* Copyright (C) 2014 Endless Mobile
*/
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <drm/drmP.h>
+#include <linux/export.h>
+
#include "meson_drv.h"
-#include "meson_vpp.h"
#include "meson_registers.h"
+#include "meson_vpp.h"
/**
* DOC: Video Post Processing
diff --git a/drivers/gpu/drm/meson/meson_vpp.h b/drivers/gpu/drm/meson/meson_vpp.h
index 9fc82db8a12d..afc9553ed8d3 100644
--- a/drivers/gpu/drm/meson/meson_vpp.h
+++ b/drivers/gpu/drm/meson/meson_vpp.h
@@ -9,6 +9,9 @@
#ifndef __MESON_VPP_H
#define __MESON_VPP_H
+struct drm_rect;
+struct meson_drm;
+
/* Mux VIU/VPP to ENCI */
#define MESON_VIU_VPP_MUX_ENCI 0x5
/* Mux VIU/VPP to ENCP */
diff --git a/drivers/gpu/drm/mga/mga_dma.c b/drivers/gpu/drm/mga/mga_dma.c
index 1ffdafea27e4..85c74364ce24 100644
--- a/drivers/gpu/drm/mga/mga_dma.c
+++ b/drivers/gpu/drm/mga/mga_dma.c
@@ -35,8 +35,8 @@
* \author Gareth Hughes <gareth@valinux.com>
*/
-#include <drm/drmP.h>
-#include <drm/mga_drm.h>
+#include <linux/delay.h>
+
#include "mga_drv.h"
#define MGA_DEFAULT_USEC_TIMEOUT 10000
@@ -62,7 +62,7 @@ int mga_do_wait_for_idle(drm_mga_private_t *dev_priv)
MGA_WRITE8(MGA_CRTC_INDEX, 0);
return 0;
}
- DRM_UDELAY(1);
+ udelay(1);
}
#if MGA_DMA_DEBUG
@@ -114,7 +114,7 @@ void mga_do_dma_flush(drm_mga_private_t *dev_priv)
status = MGA_READ(MGA_STATUS) & MGA_ENGINE_IDLE_MASK;
if (status == MGA_ENDPRDMASTS)
break;
- DRM_UDELAY(1);
+ udelay(1);
}
if (primary->tail == primary->last_flush) {
@@ -1120,7 +1120,7 @@ int mga_dma_buffers(struct drm_device *dev, void *data,
*/
if (d->send_count != 0) {
DRM_ERROR("Process %d trying to send %d buffers via drmDMA\n",
- DRM_CURRENTPID, d->send_count);
+ task_pid_nr(current), d->send_count);
return -EINVAL;
}
@@ -1128,7 +1128,8 @@ int mga_dma_buffers(struct drm_device *dev, void *data,
*/
if (d->request_count < 0 || d->request_count > dma->buf_count) {
DRM_ERROR("Process %d trying to get %d buffers (of %d max)\n",
- DRM_CURRENTPID, d->request_count, dma->buf_count);
+ task_pid_nr(current), d->request_count,
+ dma->buf_count);
return -EINVAL;
}
diff --git a/drivers/gpu/drm/mga/mga_drv.c b/drivers/gpu/drm/mga/mga_drv.c
index 6e1d1054ad06..71128e6f6ae9 100644
--- a/drivers/gpu/drm/mga/mga_drv.c
+++ b/drivers/gpu/drm/mga/mga_drv.c
@@ -31,12 +31,11 @@
#include <linux/module.h>
-#include <drm/drmP.h>
-#include <drm/mga_drm.h>
-#include "mga_drv.h"
-
+#include <drm/drm_drv.h>
#include <drm/drm_pciids.h>
+#include "mga_drv.h"
+
static struct pci_device_id pciidlist[] = {
mga_PCI_IDS
};
diff --git a/drivers/gpu/drm/mga/mga_drv.h b/drivers/gpu/drm/mga/mga_drv.h
index a45bb22275a7..d5deecb93975 100644
--- a/drivers/gpu/drm/mga/mga_drv.h
+++ b/drivers/gpu/drm/mga/mga_drv.h
@@ -31,7 +31,20 @@
#ifndef __MGA_DRV_H__
#define __MGA_DRV_H__
+#include <linux/irqreturn.h>
+#include <linux/slab.h>
+
+#include <drm/drm_agpsupport.h>
+#include <drm/drm_device.h>
+#include <drm/drm_file.h>
+#include <drm/drm_ioctl.h>
+#include <drm/drm_irq.h>
#include <drm/drm_legacy.h>
+#include <drm/drm_pci.h>
+#include <drm/drm_print.h>
+#include <drm/drm_sarea.h>
+#include <drm/drm_vblank.h>
+#include <drm/mga_drm.h>
/* General customization:
*/
@@ -188,7 +201,7 @@ extern int mga_warp_init(drm_mga_private_t *dev_priv);
extern int mga_enable_vblank(struct drm_device *dev, unsigned int pipe);
extern void mga_disable_vblank(struct drm_device *dev, unsigned int pipe);
extern u32 mga_get_vblank_counter(struct drm_device *dev, unsigned int pipe);
-extern int mga_driver_fence_wait(struct drm_device *dev, unsigned int *sequence);
+extern void mga_driver_fence_wait(struct drm_device *dev, unsigned int *sequence);
extern int mga_driver_vblank_wait(struct drm_device *dev, unsigned int *sequence);
extern irqreturn_t mga_driver_irq_handler(int irq, void *arg);
extern void mga_driver_irq_preinstall(struct drm_device *dev);
@@ -199,10 +212,14 @@ extern long mga_compat_ioctl(struct file *filp, unsigned int cmd,
#define mga_flush_write_combine() wmb()
-#define MGA_READ8(reg) DRM_READ8(dev_priv->mmio, (reg))
-#define MGA_READ(reg) DRM_READ32(dev_priv->mmio, (reg))
-#define MGA_WRITE8(reg, val) DRM_WRITE8(dev_priv->mmio, (reg), (val))
-#define MGA_WRITE(reg, val) DRM_WRITE32(dev_priv->mmio, (reg), (val))
+#define MGA_READ8(reg) \
+ readb(((void __iomem *)dev_priv->mmio->handle) + (reg))
+#define MGA_READ(reg) \
+ readl(((void __iomem *)dev_priv->mmio->handle) + (reg))
+#define MGA_WRITE8(reg, val) \
+ writeb(val, ((void __iomem *)dev_priv->mmio->handle) + (reg))
+#define MGA_WRITE(reg, val) \
+ writel(val, ((void __iomem *)dev_priv->mmio->handle) + (reg))
#define DWGREG0 0x1c00
#define DWGREG0_END 0x1dff
diff --git a/drivers/gpu/drm/mga/mga_ioc32.c b/drivers/gpu/drm/mga/mga_ioc32.c
index 245fb2e359cf..6ccd270789c6 100644
--- a/drivers/gpu/drm/mga/mga_ioc32.c
+++ b/drivers/gpu/drm/mga/mga_ioc32.c
@@ -30,10 +30,9 @@
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
+
#include <linux/compat.h>
-#include <drm/drmP.h>
-#include <drm/mga_drm.h>
#include "mga_drv.h"
typedef struct drm32_mga_init {
diff --git a/drivers/gpu/drm/mga/mga_irq.c b/drivers/gpu/drm/mga/mga_irq.c
index 693ba708cfed..a7e6ffc80a78 100644
--- a/drivers/gpu/drm/mga/mga_irq.c
+++ b/drivers/gpu/drm/mga/mga_irq.c
@@ -31,8 +31,6 @@
* Eric Anholt <anholt@FreeBSD.org>
*/
-#include <drm/drmP.h>
-#include <drm/mga_drm.h>
#include "mga_drv.h"
u32 mga_get_vblank_counter(struct drm_device *dev, unsigned int pipe)
@@ -118,23 +116,21 @@ void mga_disable_vblank(struct drm_device *dev, unsigned int pipe)
/* MGA_WRITE(MGA_IEN, MGA_VLINEIEN | MGA_SOFTRAPEN); */
}
-int mga_driver_fence_wait(struct drm_device *dev, unsigned int *sequence)
+void mga_driver_fence_wait(struct drm_device *dev, unsigned int *sequence)
{
drm_mga_private_t *dev_priv = (drm_mga_private_t *) dev->dev_private;
unsigned int cur_fence;
- int ret = 0;
/* Assume that the user has missed the current sequence number
* by about a day rather than she wants to wait for years
* using fences.
*/
- DRM_WAIT_ON(ret, dev_priv->fence_queue, 3 * HZ,
+ wait_event_timeout(dev_priv->fence_queue,
(((cur_fence = atomic_read(&dev_priv->last_fence_retired))
- - *sequence) <= (1 << 23)));
+ - *sequence) <= (1 << 23)),
+ msecs_to_jiffies(3000));
*sequence = cur_fence;
-
- return ret;
}
void mga_driver_irq_preinstall(struct drm_device *dev)
diff --git a/drivers/gpu/drm/mga/mga_state.c b/drivers/gpu/drm/mga/mga_state.c
index e5f6b735f575..77a0b006f066 100644
--- a/drivers/gpu/drm/mga/mga_state.c
+++ b/drivers/gpu/drm/mga/mga_state.c
@@ -32,8 +32,6 @@
* Gareth Hughes <gareth@valinux.com>
*/
-#include <drm/drmP.h>
-#include <drm/mga_drm.h>
#include "mga_drv.h"
/* ================================================================
@@ -1016,7 +1014,7 @@ int mga_getparam(struct drm_device *dev, void *data, struct drm_file *file_priv)
return -EINVAL;
}
- DRM_DEBUG("pid=%d\n", DRM_CURRENTPID);
+ DRM_DEBUG("pid=%d\n", task_pid_nr(current));
switch (param->param) {
case MGA_PARAM_IRQ_NR:
@@ -1048,7 +1046,7 @@ static int mga_set_fence(struct drm_device *dev, void *data, struct drm_file *fi
return -EINVAL;
}
- DRM_DEBUG("pid=%d\n", DRM_CURRENTPID);
+ DRM_DEBUG("pid=%d\n", task_pid_nr(current));
/* I would normal do this assignment in the declaration of fence,
* but dev_priv may be NULL.
@@ -1077,7 +1075,7 @@ file_priv)
return -EINVAL;
}
- DRM_DEBUG("pid=%d\n", DRM_CURRENTPID);
+ DRM_DEBUG("pid=%d\n", task_pid_nr(current));
mga_driver_fence_wait(dev, fence);
return 0;
diff --git a/drivers/gpu/drm/mga/mga_warp.c b/drivers/gpu/drm/mga/mga_warp.c
index 0b76352260a9..b5ef1d2c8b1c 100644
--- a/drivers/gpu/drm/mga/mga_warp.c
+++ b/drivers/gpu/drm/mga/mga_warp.c
@@ -29,11 +29,9 @@
#include <linux/firmware.h>
#include <linux/ihex.h>
-#include <linux/platform_device.h>
#include <linux/module.h>
+#include <linux/platform_device.h>
-#include <drm/drmP.h>
-#include <drm/mga_drm.h>
#include "mga_drv.h"
#define FIRMWARE_G200 "matrox/g200_warp.fw"
diff --git a/drivers/gpu/drm/mgag200/Makefile b/drivers/gpu/drm/mgag200/Makefile
index 98d204408bd0..04b281bcf655 100644
--- a/drivers/gpu/drm/mgag200/Makefile
+++ b/drivers/gpu/drm/mgag200/Makefile
@@ -1,5 +1,5 @@
# SPDX-License-Identifier: GPL-2.0-only
mgag200-y := mgag200_main.o mgag200_mode.o mgag200_cursor.o \
- mgag200_drv.o mgag200_fb.o mgag200_i2c.o mgag200_ttm.o
+ mgag200_drv.o mgag200_i2c.o mgag200_ttm.o
obj-$(CONFIG_DRM_MGAG200) += mgag200.o
diff --git a/drivers/gpu/drm/mgag200/mgag200_cursor.c b/drivers/gpu/drm/mgag200/mgag200_cursor.c
index f0c61a92351c..289ce3e29032 100644
--- a/drivers/gpu/drm/mgag200/mgag200_cursor.c
+++ b/drivers/gpu/drm/mgag200/mgag200_cursor.c
@@ -5,7 +5,8 @@
* Author: Christopher Harvey <charvey@matrox.com>
*/
-#include <drm/drmP.h>
+#include <drm/drm_pci.h>
+
#include "mgag200_drv.h"
static bool warn_transparent = true;
@@ -98,11 +99,12 @@ int mga_crtc_cursor_set(struct drm_crtc *crtc,
}
/* Pin and map up-coming buffer to write colour indices */
- ret = drm_gem_vram_pin(pixels_next, 0);
- if (ret)
+ ret = drm_gem_vram_pin(pixels_next, DRM_GEM_VRAM_PL_FLAG_VRAM);
+ if (ret) {
dev_err(&dev->pdev->dev,
"failed to pin cursor buffer: %d\n", ret);
goto err_drm_gem_vram_kunmap_src;
+ }
dst = drm_gem_vram_kmap(pixels_next, true, NULL);
if (IS_ERR(dst)) {
ret = PTR_ERR(dst);
@@ -110,7 +112,7 @@ int mga_crtc_cursor_set(struct drm_crtc *crtc,
"failed to kmap cursor updates: %d\n", ret);
goto err_drm_gem_vram_unpin_dst;
}
- gpu_addr = drm_gem_vram_offset(pixels_2);
+ gpu_addr = drm_gem_vram_offset(pixels_next);
if (gpu_addr < 0) {
ret = (int)gpu_addr;
dev_err(&dev->pdev->dev,
@@ -211,7 +213,6 @@ int mga_crtc_cursor_set(struct drm_crtc *crtc,
mdev->cursor.pixels_current = pixels_next;
drm_gem_vram_kunmap(pixels_next);
- drm_gem_vram_unpin(pixels_next);
drm_gem_vram_kunmap(gbo);
drm_gem_vram_unpin(gbo);
drm_gem_object_put_unlocked(obj);
diff --git a/drivers/gpu/drm/mgag200/mgag200_drv.c b/drivers/gpu/drm/mgag200/mgag200_drv.c
index aafa1cb31f50..afd9119b6cf1 100644
--- a/drivers/gpu/drm/mgag200/mgag200_drv.c
+++ b/drivers/gpu/drm/mgag200/mgag200_drv.c
@@ -5,14 +5,18 @@
* Authors: Matthew Garrett
* Dave Airlie
*/
+
#include <linux/module.h>
#include <linux/console.h>
-#include <drm/drmP.h>
-
-#include "mgag200_drv.h"
+#include <drm/drm_drv.h>
+#include <drm/drm_file.h>
+#include <drm/drm_ioctl.h>
+#include <drm/drm_pci.h>
#include <drm/drm_pciids.h>
+#include "mgag200_drv.h"
+
/*
* This is the generic driver code. This binds the driver to the drm core,
* which then performs further device association and calls our graphics init
diff --git a/drivers/gpu/drm/mgag200/mgag200_drv.h b/drivers/gpu/drm/mgag200/mgag200_drv.h
index c47671ce6c48..1c93f8dc08c7 100644
--- a/drivers/gpu/drm/mgag200/mgag200_drv.h
+++ b/drivers/gpu/drm/mgag200/mgag200_drv.h
@@ -10,19 +10,17 @@
#ifndef __MGAG200_DRV_H__
#define __MGAG200_DRV_H__
+#include <linux/i2c-algo-bit.h>
+#include <linux/i2c.h>
+
#include <video/vga.h>
#include <drm/drm_encoder.h>
#include <drm/drm_fb_helper.h>
-
#include <drm/drm_gem.h>
#include <drm/drm_gem_vram_helper.h>
-
#include <drm/drm_vram_mm_helper.h>
-#include <linux/i2c.h>
-#include <linux/i2c-algo-bit.h>
-
#include "mgag200_reg.h"
#define DRIVER_AUTHOR "Matthew Garrett"
@@ -100,21 +98,6 @@
#define to_mga_crtc(x) container_of(x, struct mga_crtc, base)
#define to_mga_encoder(x) container_of(x, struct mga_encoder, base)
#define to_mga_connector(x) container_of(x, struct mga_connector, base)
-#define to_mga_framebuffer(x) container_of(x, struct mga_framebuffer, base)
-
-struct mga_framebuffer {
- struct drm_framebuffer base;
- struct drm_gem_object *obj;
-};
-
-struct mga_fbdev {
- struct drm_fb_helper helper; /* must be first */
- struct mga_framebuffer mfb;
- void *sysram;
- int size;
- int x1, y1, x2, y2; /* dirty rect */
- spinlock_t dirty_lock;
-};
struct mga_crtc {
struct drm_crtc base;
@@ -189,7 +172,6 @@ struct mga_device {
struct mga_mc mc;
struct mga_mode_info mode_info;
- struct mga_fbdev *mfbdev;
struct mga_cursor cursor;
bool suspended;
@@ -210,25 +192,9 @@ struct mga_device {
int mgag200_modeset_init(struct mga_device *mdev);
void mgag200_modeset_fini(struct mga_device *mdev);
- /* mgag200_fb.c */
-int mgag200_fbdev_init(struct mga_device *mdev);
-void mgag200_fbdev_fini(struct mga_device *mdev);
-
/* mgag200_main.c */
-int mgag200_framebuffer_init(struct drm_device *dev,
- struct mga_framebuffer *mfb,
- const struct drm_mode_fb_cmd2 *mode_cmd,
- struct drm_gem_object *obj);
-
-
int mgag200_driver_load(struct drm_device *dev, unsigned long flags);
void mgag200_driver_unload(struct drm_device *dev);
-int mgag200_gem_create(struct drm_device *dev,
- u32 size, bool iskernel,
- struct drm_gem_object **obj);
-int mgag200_dumb_create(struct drm_file *file,
- struct drm_device *dev,
- struct drm_mode_create_dumb *args);
/* mgag200_i2c.c */
struct mga_i2c_chan *mgag200_i2c_create(struct drm_device *dev);
diff --git a/drivers/gpu/drm/mgag200/mgag200_fb.c b/drivers/gpu/drm/mgag200/mgag200_fb.c
deleted file mode 100644
index 8adb33228732..000000000000
--- a/drivers/gpu/drm/mgag200/mgag200_fb.c
+++ /dev/null
@@ -1,315 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright 2010 Matt Turner.
- * Copyright 2012 Red Hat
- *
- * Authors: Matthew Garrett
- * Matt Turner
- * Dave Airlie
- */
-#include <linux/module.h>
-#include <drm/drmP.h>
-#include <drm/drm_util.h>
-#include <drm/drm_fb_helper.h>
-#include <drm/drm_crtc_helper.h>
-
-#include "mgag200_drv.h"
-
-static void mga_dirty_update(struct mga_fbdev *mfbdev,
- int x, int y, int width, int height)
-{
- int i;
- struct drm_gem_object *obj;
- struct drm_gem_vram_object *gbo;
- int src_offset, dst_offset;
- int bpp = mfbdev->mfb.base.format->cpp[0];
- int ret;
- u8 *dst;
- bool unmap = false;
- bool store_for_later = false;
- int x2, y2;
- unsigned long flags;
-
- obj = mfbdev->mfb.obj;
- gbo = drm_gem_vram_of_gem(obj);
-
- if (drm_can_sleep()) {
- /* We pin the BO so it won't be moved during the
- * update. The actual location, video RAM or system
- * memory, is not important.
- */
- ret = drm_gem_vram_pin(gbo, 0);
- if (ret) {
- if (ret != -EBUSY)
- return;
- store_for_later = true;
- }
- } else {
- store_for_later = true;
- }
-
- x2 = x + width - 1;
- y2 = y + height - 1;
- spin_lock_irqsave(&mfbdev->dirty_lock, flags);
-
- if (mfbdev->y1 < y)
- y = mfbdev->y1;
- if (mfbdev->y2 > y2)
- y2 = mfbdev->y2;
- if (mfbdev->x1 < x)
- x = mfbdev->x1;
- if (mfbdev->x2 > x2)
- x2 = mfbdev->x2;
-
- if (store_for_later) {
- mfbdev->x1 = x;
- mfbdev->x2 = x2;
- mfbdev->y1 = y;
- mfbdev->y2 = y2;
- spin_unlock_irqrestore(&mfbdev->dirty_lock, flags);
- return;
- }
-
- mfbdev->x1 = mfbdev->y1 = INT_MAX;
- mfbdev->x2 = mfbdev->y2 = 0;
- spin_unlock_irqrestore(&mfbdev->dirty_lock, flags);
-
- dst = drm_gem_vram_kmap(gbo, false, NULL);
- if (IS_ERR(dst)) {
- DRM_ERROR("failed to kmap fb updates\n");
- goto out;
- } else if (!dst) {
- dst = drm_gem_vram_kmap(gbo, true, NULL);
- if (IS_ERR(dst)) {
- DRM_ERROR("failed to kmap fb updates\n");
- goto out;
- }
- unmap = true;
- }
-
- for (i = y; i <= y2; i++) {
- /* assume equal stride for now */
- src_offset = dst_offset =
- i * mfbdev->mfb.base.pitches[0] + (x * bpp);
- memcpy_toio(dst + dst_offset, mfbdev->sysram + src_offset,
- (x2 - x + 1) * bpp);
- }
-
- if (unmap)
- drm_gem_vram_kunmap(gbo);
-
-out:
- drm_gem_vram_unpin(gbo);
-}
-
-static void mga_fillrect(struct fb_info *info,
- const struct fb_fillrect *rect)
-{
- struct mga_fbdev *mfbdev = info->par;
- drm_fb_helper_sys_fillrect(info, rect);
- mga_dirty_update(mfbdev, rect->dx, rect->dy, rect->width,
- rect->height);
-}
-
-static void mga_copyarea(struct fb_info *info,
- const struct fb_copyarea *area)
-{
- struct mga_fbdev *mfbdev = info->par;
- drm_fb_helper_sys_copyarea(info, area);
- mga_dirty_update(mfbdev, area->dx, area->dy, area->width,
- area->height);
-}
-
-static void mga_imageblit(struct fb_info *info,
- const struct fb_image *image)
-{
- struct mga_fbdev *mfbdev = info->par;
- drm_fb_helper_sys_imageblit(info, image);
- mga_dirty_update(mfbdev, image->dx, image->dy, image->width,
- image->height);
-}
-
-
-static struct fb_ops mgag200fb_ops = {
- .owner = THIS_MODULE,
- .fb_check_var = drm_fb_helper_check_var,
- .fb_set_par = drm_fb_helper_set_par,
- .fb_fillrect = mga_fillrect,
- .fb_copyarea = mga_copyarea,
- .fb_imageblit = mga_imageblit,
- .fb_pan_display = drm_fb_helper_pan_display,
- .fb_blank = drm_fb_helper_blank,
- .fb_setcmap = drm_fb_helper_setcmap,
-};
-
-static int mgag200fb_create_object(struct mga_fbdev *afbdev,
- const struct drm_mode_fb_cmd2 *mode_cmd,
- struct drm_gem_object **gobj_p)
-{
- struct drm_device *dev = afbdev->helper.dev;
- u32 size;
- struct drm_gem_object *gobj;
- int ret = 0;
-
- size = mode_cmd->pitches[0] * mode_cmd->height;
- ret = mgag200_gem_create(dev, size, true, &gobj);
- if (ret)
- return ret;
-
- *gobj_p = gobj;
- return ret;
-}
-
-static int mgag200fb_create(struct drm_fb_helper *helper,
- struct drm_fb_helper_surface_size *sizes)
-{
- struct mga_fbdev *mfbdev =
- container_of(helper, struct mga_fbdev, helper);
- struct drm_device *dev = mfbdev->helper.dev;
- struct drm_mode_fb_cmd2 mode_cmd;
- struct mga_device *mdev = dev->dev_private;
- struct fb_info *info;
- struct drm_framebuffer *fb;
- struct drm_gem_object *gobj = NULL;
- int ret;
- void *sysram;
- int size;
-
- mode_cmd.width = sizes->surface_width;
- mode_cmd.height = sizes->surface_height;
- mode_cmd.pitches[0] = mode_cmd.width * ((sizes->surface_bpp + 7) / 8);
-
- mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp,
- sizes->surface_depth);
- size = mode_cmd.pitches[0] * mode_cmd.height;
-
- ret = mgag200fb_create_object(mfbdev, &mode_cmd, &gobj);
- if (ret) {
- DRM_ERROR("failed to create fbcon backing object %d\n", ret);
- return ret;
- }
-
- sysram = vmalloc(size);
- if (!sysram) {
- ret = -ENOMEM;
- goto err_sysram;
- }
-
- info = drm_fb_helper_alloc_fbi(helper);
- if (IS_ERR(info)) {
- ret = PTR_ERR(info);
- goto err_alloc_fbi;
- }
-
- ret = mgag200_framebuffer_init(dev, &mfbdev->mfb, &mode_cmd, gobj);
- if (ret)
- goto err_alloc_fbi;
-
- mfbdev->sysram = sysram;
- mfbdev->size = size;
-
- fb = &mfbdev->mfb.base;
-
- /* setup helper */
- mfbdev->helper.fb = fb;
-
- info->fbops = &mgag200fb_ops;
-
- /* setup aperture base/size for vesafb takeover */
- info->apertures->ranges[0].base = mdev->dev->mode_config.fb_base;
- info->apertures->ranges[0].size = mdev->mc.vram_size;
-
- drm_fb_helper_fill_info(info, &mfbdev->helper, sizes);
-
- info->screen_base = sysram;
- info->screen_size = size;
- info->pixmap.flags = FB_PIXMAP_SYSTEM;
-
- DRM_DEBUG_KMS("allocated %dx%d\n",
- fb->width, fb->height);
-
- return 0;
-
-err_alloc_fbi:
- vfree(sysram);
-err_sysram:
- drm_gem_object_put_unlocked(gobj);
-
- return ret;
-}
-
-static int mga_fbdev_destroy(struct drm_device *dev,
- struct mga_fbdev *mfbdev)
-{
- struct mga_framebuffer *mfb = &mfbdev->mfb;
-
- drm_fb_helper_unregister_fbi(&mfbdev->helper);
-
- if (mfb->obj) {
- drm_gem_object_put_unlocked(mfb->obj);
- mfb->obj = NULL;
- }
- drm_fb_helper_fini(&mfbdev->helper);
- vfree(mfbdev->sysram);
- drm_framebuffer_unregister_private(&mfb->base);
- drm_framebuffer_cleanup(&mfb->base);
-
- return 0;
-}
-
-static const struct drm_fb_helper_funcs mga_fb_helper_funcs = {
- .fb_probe = mgag200fb_create,
-};
-
-int mgag200_fbdev_init(struct mga_device *mdev)
-{
- struct mga_fbdev *mfbdev;
- int ret;
- int bpp_sel = 32;
-
- /* prefer 16bpp on low end gpus with limited VRAM */
- if (IS_G200_SE(mdev) && mdev->mc.vram_size < (2048*1024))
- bpp_sel = 16;
-
- mfbdev = devm_kzalloc(mdev->dev->dev, sizeof(struct mga_fbdev), GFP_KERNEL);
- if (!mfbdev)
- return -ENOMEM;
-
- mdev->mfbdev = mfbdev;
- spin_lock_init(&mfbdev->dirty_lock);
-
- drm_fb_helper_prepare(mdev->dev, &mfbdev->helper, &mga_fb_helper_funcs);
-
- ret = drm_fb_helper_init(mdev->dev, &mfbdev->helper,
- MGAG200FB_CONN_LIMIT);
- if (ret)
- goto err_fb_helper;
-
- ret = drm_fb_helper_single_add_all_connectors(&mfbdev->helper);
- if (ret)
- goto err_fb_setup;
-
- /* disable all the possible outputs/crtcs before entering KMS mode */
- drm_helper_disable_unused_functions(mdev->dev);
-
- ret = drm_fb_helper_initial_config(&mfbdev->helper, bpp_sel);
- if (ret)
- goto err_fb_setup;
-
- return 0;
-
-err_fb_setup:
- drm_fb_helper_fini(&mfbdev->helper);
-err_fb_helper:
- mdev->mfbdev = NULL;
-
- return ret;
-}
-
-void mgag200_fbdev_fini(struct mga_device *mdev)
-{
- if (!mdev->mfbdev)
- return;
-
- mga_fbdev_destroy(mdev->dev, mdev->mfbdev);
-}
diff --git a/drivers/gpu/drm/mgag200/mgag200_i2c.c b/drivers/gpu/drm/mgag200/mgag200_i2c.c
index 77d1c4771786..51d4037f00d4 100644
--- a/drivers/gpu/drm/mgag200/mgag200_i2c.c
+++ b/drivers/gpu/drm/mgag200/mgag200_i2c.c
@@ -25,10 +25,12 @@
/*
* Authors: Dave Airlie <airlied@redhat.com>
*/
+
#include <linux/export.h>
-#include <linux/i2c.h>
#include <linux/i2c-algo-bit.h>
-#include <drm/drmP.h>
+#include <linux/i2c.h>
+
+#include <drm/drm_pci.h>
#include "mgag200_drv.h"
diff --git a/drivers/gpu/drm/mgag200/mgag200_main.c b/drivers/gpu/drm/mgag200/mgag200_main.c
index dd61ccc5af5c..a9773334dedf 100644
--- a/drivers/gpu/drm/mgag200/mgag200_main.c
+++ b/drivers/gpu/drm/mgag200/mgag200_main.c
@@ -7,70 +7,15 @@
* Matt Turner
* Dave Airlie
*/
-#include <drm/drmP.h>
-#include <drm/drm_crtc_helper.h>
-#include "mgag200_drv.h"
-static void mga_user_framebuffer_destroy(struct drm_framebuffer *fb)
-{
- struct mga_framebuffer *mga_fb = to_mga_framebuffer(fb);
-
- drm_gem_object_put_unlocked(mga_fb->obj);
- drm_framebuffer_cleanup(fb);
- kfree(fb);
-}
-
-static const struct drm_framebuffer_funcs mga_fb_funcs = {
- .destroy = mga_user_framebuffer_destroy,
-};
-
-int mgag200_framebuffer_init(struct drm_device *dev,
- struct mga_framebuffer *gfb,
- const struct drm_mode_fb_cmd2 *mode_cmd,
- struct drm_gem_object *obj)
-{
- int ret;
-
- drm_helper_mode_fill_fb_struct(dev, &gfb->base, mode_cmd);
- gfb->obj = obj;
- ret = drm_framebuffer_init(dev, &gfb->base, &mga_fb_funcs);
- if (ret) {
- DRM_ERROR("drm_framebuffer_init failed: %d\n", ret);
- return ret;
- }
- return 0;
-}
-
-static struct drm_framebuffer *
-mgag200_user_framebuffer_create(struct drm_device *dev,
- struct drm_file *filp,
- const struct drm_mode_fb_cmd2 *mode_cmd)
-{
- struct drm_gem_object *obj;
- struct mga_framebuffer *mga_fb;
- int ret;
-
- obj = drm_gem_object_lookup(filp, mode_cmd->handles[0]);
- if (obj == NULL)
- return ERR_PTR(-ENOENT);
-
- mga_fb = kzalloc(sizeof(*mga_fb), GFP_KERNEL);
- if (!mga_fb) {
- drm_gem_object_put_unlocked(obj);
- return ERR_PTR(-ENOMEM);
- }
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_gem_framebuffer_helper.h>
+#include <drm/drm_pci.h>
- ret = mgag200_framebuffer_init(dev, mga_fb, mode_cmd, obj);
- if (ret) {
- drm_gem_object_put_unlocked(obj);
- kfree(mga_fb);
- return ERR_PTR(ret);
- }
- return &mga_fb->base;
-}
+#include "mgag200_drv.h"
static const struct drm_mode_config_funcs mga_mode_funcs = {
- .fb_create = mgag200_user_framebuffer_create,
+ .fb_create = drm_gem_fb_create
};
static int mga_probe_vram(struct mga_device *mdev, void __iomem *mem)
@@ -217,7 +162,7 @@ int mgag200_driver_load(struct drm_device *dev, unsigned long flags)
if (IS_G200_SE(mdev) && mdev->mc.vram_size < (2048*1024))
dev->mode_config.preferred_depth = 16;
else
- dev->mode_config.preferred_depth = 24;
+ dev->mode_config.preferred_depth = 32;
dev->mode_config.prefer_shadow = 1;
r = mgag200_modeset_init(mdev);
@@ -241,6 +186,10 @@ int mgag200_driver_load(struct drm_device *dev, unsigned long flags)
}
mdev->cursor.pixels_current = NULL;
+ r = drm_fbdev_generic_setup(mdev->dev, 0);
+ if (r)
+ goto err_modeset;
+
return 0;
err_modeset:
@@ -259,32 +208,7 @@ void mgag200_driver_unload(struct drm_device *dev)
if (mdev == NULL)
return;
mgag200_modeset_fini(mdev);
- mgag200_fbdev_fini(mdev);
drm_mode_config_cleanup(dev);
mgag200_mm_fini(mdev);
dev->dev_private = NULL;
}
-
-int mgag200_gem_create(struct drm_device *dev,
- u32 size, bool iskernel,
- struct drm_gem_object **obj)
-{
- struct drm_gem_vram_object *gbo;
- int ret;
-
- *obj = NULL;
-
- size = roundup(size, PAGE_SIZE);
- if (size == 0)
- return -EINVAL;
-
- gbo = drm_gem_vram_create(dev, &dev->vram_mm->bdev, size, 0, false);
- if (IS_ERR(gbo)) {
- ret = PTR_ERR(gbo);
- if (ret != -ERESTARTSYS)
- DRM_ERROR("failed to allocate GEM object\n");
- return ret;
- }
- *obj = &gbo->gem;
- return 0;
-}
diff --git a/drivers/gpu/drm/mgag200/mgag200_mode.c b/drivers/gpu/drm/mgag200/mgag200_mode.c
index a25054015e8c..5e778b5f1a10 100644
--- a/drivers/gpu/drm/mgag200/mgag200_mode.c
+++ b/drivers/gpu/drm/mgag200/mgag200_mode.c
@@ -10,8 +10,9 @@
#include <linux/delay.h>
-#include <drm/drmP.h>
#include <drm/drm_crtc_helper.h>
+#include <drm/drm_fourcc.h>
+#include <drm/drm_pci.h>
#include <drm/drm_plane_helper.h>
#include <drm/drm_probe_helper.h>
@@ -859,28 +860,16 @@ static int mga_crtc_do_set_base(struct drm_crtc *crtc,
struct drm_framebuffer *fb,
int x, int y, int atomic)
{
- struct mga_device *mdev = crtc->dev->dev_private;
- struct drm_gem_object *obj;
- struct mga_framebuffer *mga_fb;
struct drm_gem_vram_object *gbo;
int ret;
s64 gpu_addr;
- void *base;
if (!atomic && fb) {
- mga_fb = to_mga_framebuffer(fb);
- obj = mga_fb->obj;
- gbo = drm_gem_vram_of_gem(obj);
-
- /* unmap if console */
- if (&mdev->mfbdev->mfb == mga_fb)
- drm_gem_vram_kunmap(gbo);
+ gbo = drm_gem_vram_of_gem(fb->obj[0]);
drm_gem_vram_unpin(gbo);
}
- mga_fb = to_mga_framebuffer(crtc->primary->fb);
- obj = mga_fb->obj;
- gbo = drm_gem_vram_of_gem(obj);
+ gbo = drm_gem_vram_of_gem(crtc->primary->fb->obj[0]);
ret = drm_gem_vram_pin(gbo, DRM_GEM_VRAM_PL_FLAG_VRAM);
if (ret)
@@ -891,15 +880,6 @@ static int mga_crtc_do_set_base(struct drm_crtc *crtc,
goto err_drm_gem_vram_unpin;
}
- if (&mdev->mfbdev->mfb == mga_fb) {
- /* if pushing console in kmap it */
- base = drm_gem_vram_kmap(gbo, true, NULL);
- if (IS_ERR(base)) {
- ret = PTR_ERR(base);
- DRM_ERROR("failed to kmap fbcon\n");
- }
- }
-
mga_set_start_address(crtc, (u32)gpu_addr);
return 0;
@@ -1423,14 +1403,9 @@ static void mga_crtc_disable(struct drm_crtc *crtc)
DRM_DEBUG_KMS("\n");
mga_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
if (crtc->primary->fb) {
- struct mga_device *mdev = crtc->dev->dev_private;
- struct mga_framebuffer *mga_fb = to_mga_framebuffer(crtc->primary->fb);
- struct drm_gem_object *obj = mga_fb->obj;
- struct drm_gem_vram_object *gbo = drm_gem_vram_of_gem(obj);
-
- /* unmap if console */
- if (&mdev->mfbdev->mfb == mga_fb)
- drm_gem_vram_kunmap(gbo);
+ struct drm_framebuffer *fb = crtc->primary->fb;
+ struct drm_gem_vram_object *gbo =
+ drm_gem_vram_of_gem(fb->obj[0]);
drm_gem_vram_unpin(gbo);
}
crtc->primary->fb = NULL;
@@ -1703,18 +1678,19 @@ static struct drm_connector *mga_vga_init(struct drm_device *dev)
return NULL;
connector = &mga_connector->base;
+ mga_connector->i2c = mgag200_i2c_create(dev);
+ if (!mga_connector->i2c)
+ DRM_ERROR("failed to add ddc bus\n");
- drm_connector_init(dev, connector,
- &mga_vga_connector_funcs, DRM_MODE_CONNECTOR_VGA);
+ drm_connector_init_with_ddc(dev, connector,
+ &mga_vga_connector_funcs,
+ DRM_MODE_CONNECTOR_VGA,
+ &mga_connector->i2c->adapter);
drm_connector_helper_add(connector, &mga_vga_connector_helper_funcs);
drm_connector_register(connector);
- mga_connector->i2c = mgag200_i2c_create(dev);
- if (!mga_connector->i2c)
- DRM_ERROR("failed to add ddc bus\n");
-
return connector;
}
@@ -1723,7 +1699,6 @@ int mgag200_modeset_init(struct mga_device *mdev)
{
struct drm_encoder *encoder;
struct drm_connector *connector;
- int ret;
mdev->mode_info.mode_config_initialized = true;
@@ -1748,12 +1723,6 @@ int mgag200_modeset_init(struct mga_device *mdev)
drm_connector_attach_encoder(connector, encoder);
- ret = mgag200_fbdev_init(mdev);
- if (ret) {
- DRM_ERROR("mga_fbdev_init failed\n");
- return ret;
- }
-
return 0;
}
diff --git a/drivers/gpu/drm/mgag200/mgag200_ttm.c b/drivers/gpu/drm/mgag200/mgag200_ttm.c
index 59294c0fd24a..73a6b848601c 100644
--- a/drivers/gpu/drm/mgag200/mgag200_ttm.c
+++ b/drivers/gpu/drm/mgag200/mgag200_ttm.c
@@ -25,7 +25,8 @@
/*
* Authors: Dave Airlie <airlied@redhat.com>
*/
-#include <drm/drmP.h>
+
+#include <drm/drm_pci.h>
#include "mgag200_drv.h"
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c
index 45bfac9e3af7..8cf0b8a4ed03 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c
@@ -12,6 +12,7 @@
#include <drm/drm_damage_helper.h>
#include <drm/drm_atomic_uapi.h>
+#include <drm/drm_gem_framebuffer_helper.h>
#include "msm_drv.h"
#include "dpu_kms.h"
@@ -764,8 +765,6 @@ static int dpu_plane_prepare_fb(struct drm_plane *plane,
struct dpu_plane *pdpu = to_dpu_plane(plane);
struct dpu_plane_state *pstate = to_dpu_plane_state(new_state);
struct dpu_hw_fmt_layout layout;
- struct drm_gem_object *obj;
- struct dma_fence *fence;
struct dpu_kms *kms = _dpu_plane_get_kms(&pdpu->base);
int ret;
@@ -782,10 +781,7 @@ static int dpu_plane_prepare_fb(struct drm_plane *plane,
* we can use msm_atomic_prepare_fb() instead of doing the
* implicit fence and fb prepare by hand here.
*/
- obj = msm_framebuffer_bo(new_state->fb, 0);
- fence = reservation_object_get_excl_rcu(obj->resv);
- if (fence)
- drm_atomic_set_fence_for_plane(new_state, fence);
+ drm_gem_fb_prepare_fb(plane, new_state);
if (pstate->aspace) {
ret = msm_framebuffer_prepare(new_state->fb,
diff --git a/drivers/gpu/drm/msm/msm_atomic.c b/drivers/gpu/drm/msm/msm_atomic.c
index dd16babdd8c0..169d5f915e68 100644
--- a/drivers/gpu/drm/msm/msm_atomic.c
+++ b/drivers/gpu/drm/msm/msm_atomic.c
@@ -5,6 +5,7 @@
*/
#include <drm/drm_atomic_uapi.h>
+#include <drm/drm_gem_framebuffer_helper.h>
#include "msm_drv.h"
#include "msm_gem.h"
@@ -37,16 +38,11 @@ int msm_atomic_prepare_fb(struct drm_plane *plane,
{
struct msm_drm_private *priv = plane->dev->dev_private;
struct msm_kms *kms = priv->kms;
- struct drm_gem_object *obj;
- struct dma_fence *fence;
if (!new_state->fb)
return 0;
- obj = msm_framebuffer_bo(new_state->fb, 0);
- fence = reservation_object_get_excl_rcu(obj->resv);
-
- drm_atomic_set_fence_for_plane(new_state, fence);
+ drm_gem_fb_prepare_fb(plane, new_state);
return msm_framebuffer_prepare(new_state->fb, kms->aspace);
}
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
index c356f5ccf253..ee031c086805 100644
--- a/drivers/gpu/drm/msm/msm_drv.c
+++ b/drivers/gpu/drm/msm/msm_drv.c
@@ -984,17 +984,17 @@ static int msm_ioctl_submitqueue_close(struct drm_device *dev, void *data,
}
static const struct drm_ioctl_desc msm_ioctls[] = {
- DRM_IOCTL_DEF_DRV(MSM_GET_PARAM, msm_ioctl_get_param, DRM_AUTH|DRM_RENDER_ALLOW),
- DRM_IOCTL_DEF_DRV(MSM_GEM_NEW, msm_ioctl_gem_new, DRM_AUTH|DRM_RENDER_ALLOW),
- DRM_IOCTL_DEF_DRV(MSM_GEM_INFO, msm_ioctl_gem_info, DRM_AUTH|DRM_RENDER_ALLOW),
- DRM_IOCTL_DEF_DRV(MSM_GEM_CPU_PREP, msm_ioctl_gem_cpu_prep, DRM_AUTH|DRM_RENDER_ALLOW),
- DRM_IOCTL_DEF_DRV(MSM_GEM_CPU_FINI, msm_ioctl_gem_cpu_fini, DRM_AUTH|DRM_RENDER_ALLOW),
- DRM_IOCTL_DEF_DRV(MSM_GEM_SUBMIT, msm_ioctl_gem_submit, DRM_AUTH|DRM_RENDER_ALLOW),
- DRM_IOCTL_DEF_DRV(MSM_WAIT_FENCE, msm_ioctl_wait_fence, DRM_AUTH|DRM_RENDER_ALLOW),
- DRM_IOCTL_DEF_DRV(MSM_GEM_MADVISE, msm_ioctl_gem_madvise, DRM_AUTH|DRM_RENDER_ALLOW),
- DRM_IOCTL_DEF_DRV(MSM_SUBMITQUEUE_NEW, msm_ioctl_submitqueue_new, DRM_AUTH|DRM_RENDER_ALLOW),
- DRM_IOCTL_DEF_DRV(MSM_SUBMITQUEUE_CLOSE, msm_ioctl_submitqueue_close, DRM_AUTH|DRM_RENDER_ALLOW),
- DRM_IOCTL_DEF_DRV(MSM_SUBMITQUEUE_QUERY, msm_ioctl_submitqueue_query, DRM_AUTH|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(MSM_GET_PARAM, msm_ioctl_get_param, DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(MSM_GEM_NEW, msm_ioctl_gem_new, DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(MSM_GEM_INFO, msm_ioctl_gem_info, DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(MSM_GEM_CPU_PREP, msm_ioctl_gem_cpu_prep, DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(MSM_GEM_CPU_FINI, msm_ioctl_gem_cpu_fini, DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(MSM_GEM_SUBMIT, msm_ioctl_gem_submit, DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(MSM_WAIT_FENCE, msm_ioctl_wait_fence, DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(MSM_GEM_MADVISE, msm_ioctl_gem_madvise, DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(MSM_SUBMITQUEUE_NEW, msm_ioctl_submitqueue_new, DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(MSM_SUBMITQUEUE_CLOSE, msm_ioctl_submitqueue_close, DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(MSM_SUBMITQUEUE_QUERY, msm_ioctl_submitqueue_query, DRM_RENDER_ALLOW),
};
static const struct vm_operations_struct vm_ops = {
@@ -1017,7 +1017,6 @@ static const struct file_operations fops = {
static struct drm_driver msm_driver = {
.driver_features = DRIVER_GEM |
- DRIVER_PRIME |
DRIVER_RENDER |
DRIVER_ATOMIC |
DRIVER_MODESET,
@@ -1036,8 +1035,6 @@ static struct drm_driver msm_driver = {
.dumb_map_offset = msm_gem_dumb_map_offset,
.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
- .gem_prime_export = drm_gem_prime_export,
- .gem_prime_import = drm_gem_prime_import,
.gem_prime_pin = msm_gem_prime_pin,
.gem_prime_unpin = msm_gem_prime_unpin,
.gem_prime_get_sg_table = msm_gem_prime_get_sg_table,
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
index 8cf6362e64bf..8cc70026c358 100644
--- a/drivers/gpu/drm/msm/msm_gem.c
+++ b/drivers/gpu/drm/msm/msm_gem.c
@@ -975,7 +975,6 @@ int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file,
static int msm_gem_new_impl(struct drm_device *dev,
uint32_t size, uint32_t flags,
- struct reservation_object *resv,
struct drm_gem_object **obj,
bool struct_mutex_locked)
{
@@ -1002,9 +1001,6 @@ static int msm_gem_new_impl(struct drm_device *dev,
msm_obj->flags = flags;
msm_obj->madv = MSM_MADV_WILLNEED;
- if (resv)
- msm_obj->base.resv = resv;
-
INIT_LIST_HEAD(&msm_obj->submit_entry);
INIT_LIST_HEAD(&msm_obj->vmas);
@@ -1046,7 +1042,7 @@ static struct drm_gem_object *_msm_gem_new(struct drm_device *dev,
if (size == 0)
return ERR_PTR(-EINVAL);
- ret = msm_gem_new_impl(dev, size, flags, NULL, &obj, struct_mutex_locked);
+ ret = msm_gem_new_impl(dev, size, flags, &obj, struct_mutex_locked);
if (ret)
goto fail;
@@ -1123,7 +1119,7 @@ struct drm_gem_object *msm_gem_import(struct drm_device *dev,
size = PAGE_ALIGN(dmabuf->size);
- ret = msm_gem_new_impl(dev, size, MSM_BO_WC, dmabuf->resv, &obj, false);
+ ret = msm_gem_new_impl(dev, size, MSM_BO_WC, &obj, false);
if (ret)
goto fail;
diff --git a/drivers/gpu/drm/msm/msm_gpu.c b/drivers/gpu/drm/msm/msm_gpu.c
index 4edb874548b3..f7308d68c5ed 100644
--- a/drivers/gpu/drm/msm/msm_gpu.c
+++ b/drivers/gpu/drm/msm/msm_gpu.c
@@ -95,7 +95,8 @@ static void msm_devfreq_init(struct msm_gpu *gpu)
*/
gpu->devfreq.devfreq = devm_devfreq_add_device(&gpu->pdev->dev,
- &msm_devfreq_profile, "simple_ondemand", NULL);
+ &msm_devfreq_profile, DEVFREQ_GOV_SIMPLE_ONDEMAND,
+ NULL);
if (IS_ERR(gpu->devfreq.devfreq)) {
DRM_DEV_ERROR(&gpu->pdev->dev, "Couldn't initialize GPU devfreq\n");
diff --git a/drivers/gpu/drm/mxsfb/mxsfb_crtc.c b/drivers/gpu/drm/mxsfb/mxsfb_crtc.c
index 93f413345e0d..12421567af89 100644
--- a/drivers/gpu/drm/mxsfb/mxsfb_crtc.c
+++ b/drivers/gpu/drm/mxsfb/mxsfb_crtc.c
@@ -8,21 +8,23 @@
* Copyright (C) 2008 Embedded Alley Solutions, Inc All Rights Reserved.
*/
-#include <drm/drmP.h>
+#include <linux/clk.h>
+#include <linux/iopoll.h>
+#include <linux/of_graph.h>
+#include <linux/platform_data/simplefb.h>
+
+#include <video/videomode.h>
+
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc.h>
-#include <drm/drm_fb_helper.h>
#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_fb_helper.h>
#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_of.h>
#include <drm/drm_plane_helper.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_simple_kms_helper.h>
-#include <linux/clk.h>
-#include <linux/iopoll.h>
-#include <linux/of_graph.h>
-#include <linux/platform_data/simplefb.h>
-#include <video/videomode.h>
+#include <drm/drm_vblank.h>
#include "mxsfb_drv.h"
#include "mxsfb_regs.h"
diff --git a/drivers/gpu/drm/mxsfb/mxsfb_drv.c b/drivers/gpu/drm/mxsfb/mxsfb_drv.c
index 6fafc90da4ec..878ef6822812 100644
--- a/drivers/gpu/drm/mxsfb/mxsfb_drv.c
+++ b/drivers/gpu/drm/mxsfb/mxsfb_drv.c
@@ -8,29 +8,32 @@
* Copyright (C) 2008 Embedded Alley Solutions, Inc All Rights Reserved.
*/
-#include <linux/module.h>
-#include <linux/spinlock.h>
#include <linux/clk.h>
#include <linux/component.h>
+#include <linux/dma-mapping.h>
#include <linux/list.h>
+#include <linux/module.h>
#include <linux/of_device.h>
#include <linux/of_graph.h>
#include <linux/of_reserved_mem.h>
#include <linux/pm_runtime.h>
#include <linux/reservation.h>
+#include <linux/spinlock.h>
-#include <drm/drmP.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc.h>
-#include <drm/drm_fb_helper.h>
+#include <drm/drm_drv.h>
#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_fb_helper.h>
#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
+#include <drm/drm_irq.h>
#include <drm/drm_of.h>
#include <drm/drm_panel.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_simple_kms_helper.h>
+#include <drm/drm_vblank.h>
#include "mxsfb_drv.h"
#include "mxsfb_regs.h"
@@ -313,8 +316,7 @@ static irqreturn_t mxsfb_irq_handler(int irq, void *data)
DEFINE_DRM_GEM_CMA_FOPS(fops);
static struct drm_driver mxsfb_driver = {
- .driver_features = DRIVER_GEM | DRIVER_MODESET |
- DRIVER_PRIME | DRIVER_ATOMIC,
+ .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC,
.irq_handler = mxsfb_irq_handler,
.irq_preinstall = mxsfb_irq_preinstall,
.irq_uninstall = mxsfb_irq_preinstall,
@@ -323,8 +325,6 @@ static struct drm_driver mxsfb_driver = {
.dumb_create = drm_gem_cma_dumb_create,
.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
- .gem_prime_export = drm_gem_prime_export,
- .gem_prime_import = drm_gem_prime_import,
.gem_prime_get_sg_table = drm_gem_cma_prime_get_sg_table,
.gem_prime_import_sg_table = drm_gem_cma_prime_import_sg_table,
.gem_prime_vmap = drm_gem_cma_prime_vmap,
diff --git a/drivers/gpu/drm/mxsfb/mxsfb_out.c b/drivers/gpu/drm/mxsfb/mxsfb_out.c
index 91e76f9cead6..231d016c6f47 100644
--- a/drivers/gpu/drm/mxsfb/mxsfb_out.c
+++ b/drivers/gpu/drm/mxsfb/mxsfb_out.c
@@ -15,7 +15,6 @@
#include <drm/drm_plane_helper.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_simple_kms_helper.h>
-#include <drm/drmP.h>
#include "mxsfb_drv.h"
diff --git a/drivers/gpu/drm/nouveau/dispnv50/wndw.c b/drivers/gpu/drm/nouveau/dispnv50/wndw.c
index 283ff690350e..89f8e76a2d7d 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/wndw.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/wndw.c
@@ -457,7 +457,7 @@ nv50_wndw_prepare_fb(struct drm_plane *plane, struct drm_plane_state *state)
asyw->image.handle[0] = ctxdma->object.handle;
}
- asyw->state.fence = reservation_object_get_excl_rcu(fb->nvbo->bo.resv);
+ asyw->state.fence = reservation_object_get_excl_rcu(fb->nvbo->bo.base.resv);
asyw->image.offset[0] = fb->nvbo->bo.offset;
if (wndw->func->prepare) {
diff --git a/drivers/gpu/drm/nouveau/nouveau_abi16.c b/drivers/gpu/drm/nouveau/nouveau_abi16.c
index c3fd5dd39ed9..e2bae1424502 100644
--- a/drivers/gpu/drm/nouveau/nouveau_abi16.c
+++ b/drivers/gpu/drm/nouveau/nouveau_abi16.c
@@ -139,7 +139,7 @@ nouveau_abi16_chan_fini(struct nouveau_abi16 *abi16,
if (chan->ntfy) {
nouveau_vma_del(&chan->ntfy_vma);
nouveau_bo_unpin(chan->ntfy);
- drm_gem_object_put_unlocked(&chan->ntfy->gem);
+ drm_gem_object_put_unlocked(&chan->ntfy->bo.base);
}
if (chan->heap.block_size)
@@ -245,12 +245,6 @@ nouveau_abi16_ioctl_getparam(ABI16_IOCTL_ARGS)
}
int
-nouveau_abi16_ioctl_setparam(ABI16_IOCTL_ARGS)
-{
- return -EINVAL;
-}
-
-int
nouveau_abi16_ioctl_channel_alloc(ABI16_IOCTL_ARGS)
{
struct drm_nouveau_channel_alloc *init = data;
@@ -345,7 +339,7 @@ nouveau_abi16_ioctl_channel_alloc(ABI16_IOCTL_ARGS)
goto done;
}
- ret = drm_gem_handle_create(file_priv, &chan->ntfy->gem,
+ ret = drm_gem_handle_create(file_priv, &chan->ntfy->bo.base,
&init->notifier_handle);
if (ret)
goto done;
diff --git a/drivers/gpu/drm/nouveau/nouveau_abi16.h b/drivers/gpu/drm/nouveau/nouveau_abi16.h
index 195546719bfe..70f6aa5c9dd1 100644
--- a/drivers/gpu/drm/nouveau/nouveau_abi16.h
+++ b/drivers/gpu/drm/nouveau/nouveau_abi16.h
@@ -6,7 +6,6 @@
struct drm_device *dev, void *data, struct drm_file *file_priv
int nouveau_abi16_ioctl_getparam(ABI16_IOCTL_ARGS);
-int nouveau_abi16_ioctl_setparam(ABI16_IOCTL_ARGS);
int nouveau_abi16_ioctl_channel_alloc(ABI16_IOCTL_ARGS);
int nouveau_abi16_ioctl_channel_free(ABI16_IOCTL_ARGS);
int nouveau_abi16_ioctl_grobj_alloc(ABI16_IOCTL_ARGS);
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index 34a998012bf6..99e391be9370 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -136,7 +136,7 @@ nouveau_bo_del_ttm(struct ttm_buffer_object *bo)
struct drm_device *dev = drm->dev;
struct nouveau_bo *nvbo = nouveau_bo(bo);
- if (unlikely(nvbo->gem.filp))
+ if (unlikely(nvbo->bo.base.filp))
DRM_ERROR("bo %p still attached to GEM object\n", bo);
WARN_ON(nvbo->pin_refcnt > 0);
nv10_bo_put_tile_region(dev, nvbo->tile, NULL);
@@ -299,6 +299,7 @@ nouveau_bo_new(struct nouveau_cli *cli, u64 size, int align,
type, &nvbo->placement,
align >> PAGE_SHIFT, false, acc_size, sg,
robj, nouveau_bo_del_ttm);
+
if (ret) {
/* ttm will call nouveau_bo_del_ttm if it fails.. */
return ret;
@@ -1323,7 +1324,7 @@ nouveau_bo_vm_cleanup(struct ttm_buffer_object *bo,
{
struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
struct drm_device *dev = drm->dev;
- struct dma_fence *fence = reservation_object_get_excl(bo->resv);
+ struct dma_fence *fence = reservation_object_get_excl(bo->base.resv);
nv10_bo_put_tile_region(dev, *old_tile, fence);
*old_tile = new_tile;
@@ -1400,7 +1401,7 @@ nouveau_bo_verify_access(struct ttm_buffer_object *bo, struct file *filp)
{
struct nouveau_bo *nvbo = nouveau_bo(bo);
- return drm_vma_node_verify_access(&nvbo->gem.vma_node,
+ return drm_vma_node_verify_access(&nvbo->bo.base.vma_node,
filp->private_data);
}
@@ -1654,7 +1655,7 @@ nouveau_ttm_tt_unpopulate(struct ttm_tt *ttm)
void
nouveau_bo_fence(struct nouveau_bo *nvbo, struct nouveau_fence *fence, bool exclusive)
{
- struct reservation_object *resv = nvbo->bo.resv;
+ struct reservation_object *resv = nvbo->bo.base.resv;
if (exclusive)
reservation_object_add_excl_fence(resv, &fence->base);
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.h b/drivers/gpu/drm/nouveau/nouveau_bo.h
index 383ac36d5869..d675efe8e7f9 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.h
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.h
@@ -35,11 +35,6 @@ struct nouveau_bo {
struct nouveau_drm_tile *tile;
- /* Only valid if allocated via nouveau_gem_new() and iff you hold a
- * gem reference to it! For debugging, use gem.filp != NULL to test
- * whether it is valid. */
- struct drm_gem_object gem;
-
/* protect by the ttm reservation lock */
int pin_refcnt;
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c
index 8f15281faa79..330d7d29a6e3 100644
--- a/drivers/gpu/drm/nouveau/nouveau_connector.c
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.c
@@ -1349,7 +1349,7 @@ nouveau_connector_create(struct drm_device *dev,
break;
case DRM_MODE_CONNECTOR_DisplayPort:
case DRM_MODE_CONNECTOR_eDP:
- nv_connector->aux.dev = dev->dev;
+ nv_connector->aux.dev = connector->kdev;
nv_connector->aux.transfer = nouveau_connector_aux_xfer;
snprintf(aux_name, sizeof(aux_name), "sor-%04x-%04x",
dcbe->hasht, dcbe->hashm);
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c
index 832da8e0020d..98afc50162e9 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.c
+++ b/drivers/gpu/drm/nouveau/nouveau_display.c
@@ -201,7 +201,7 @@ nouveau_user_framebuffer_destroy(struct drm_framebuffer *drm_fb)
struct nouveau_framebuffer *fb = nouveau_framebuffer(drm_fb);
if (fb->nvbo)
- drm_gem_object_put_unlocked(&fb->nvbo->gem);
+ drm_gem_object_put_unlocked(&fb->nvbo->bo.base);
drm_framebuffer_cleanup(drm_fb);
kfree(fb);
@@ -214,7 +214,7 @@ nouveau_user_framebuffer_create_handle(struct drm_framebuffer *drm_fb,
{
struct nouveau_framebuffer *fb = nouveau_framebuffer(drm_fb);
- return drm_gem_handle_create(file_priv, &fb->nvbo->gem, handle);
+ return drm_gem_handle_create(file_priv, &fb->nvbo->bo.base, handle);
}
static const struct drm_framebuffer_funcs nouveau_framebuffer_funcs = {
@@ -660,8 +660,8 @@ nouveau_display_dumb_create(struct drm_file *file_priv, struct drm_device *dev,
if (ret)
return ret;
- ret = drm_gem_handle_create(file_priv, &bo->gem, &args->handle);
- drm_gem_object_put_unlocked(&bo->gem);
+ ret = drm_gem_handle_create(file_priv, &bo->bo.base, &args->handle);
+ drm_gem_object_put_unlocked(&bo->bo.base);
return ret;
}
@@ -675,7 +675,7 @@ nouveau_display_dumb_map_offset(struct drm_file *file_priv,
gem = drm_gem_object_lookup(file_priv, handle);
if (gem) {
struct nouveau_bo *bo = nouveau_gem_object(gem);
- *poffset = drm_vma_node_offset_addr(&bo->bo.vma_node);
+ *poffset = drm_vma_node_offset_addr(&bo->bo.base.vma_node);
drm_gem_object_put_unlocked(gem);
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c
index 7c2fcaba42d6..7e045580a3a4 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.c
@@ -1046,20 +1046,20 @@ nouveau_drm_postclose(struct drm_device *dev, struct drm_file *fpriv)
static const struct drm_ioctl_desc
nouveau_ioctls[] = {
- DRM_IOCTL_DEF_DRV(NOUVEAU_GETPARAM, nouveau_abi16_ioctl_getparam, DRM_AUTH|DRM_RENDER_ALLOW),
- DRM_IOCTL_DEF_DRV(NOUVEAU_SETPARAM, nouveau_abi16_ioctl_setparam, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF_DRV(NOUVEAU_CHANNEL_ALLOC, nouveau_abi16_ioctl_channel_alloc, DRM_AUTH|DRM_RENDER_ALLOW),
- DRM_IOCTL_DEF_DRV(NOUVEAU_CHANNEL_FREE, nouveau_abi16_ioctl_channel_free, DRM_AUTH|DRM_RENDER_ALLOW),
- DRM_IOCTL_DEF_DRV(NOUVEAU_GROBJ_ALLOC, nouveau_abi16_ioctl_grobj_alloc, DRM_AUTH|DRM_RENDER_ALLOW),
- DRM_IOCTL_DEF_DRV(NOUVEAU_NOTIFIEROBJ_ALLOC, nouveau_abi16_ioctl_notifierobj_alloc, DRM_AUTH|DRM_RENDER_ALLOW),
- DRM_IOCTL_DEF_DRV(NOUVEAU_GPUOBJ_FREE, nouveau_abi16_ioctl_gpuobj_free, DRM_AUTH|DRM_RENDER_ALLOW),
- DRM_IOCTL_DEF_DRV(NOUVEAU_SVM_INIT, nouveau_svmm_init, DRM_AUTH|DRM_RENDER_ALLOW),
- DRM_IOCTL_DEF_DRV(NOUVEAU_SVM_BIND, nouveau_svmm_bind, DRM_AUTH|DRM_RENDER_ALLOW),
- DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_NEW, nouveau_gem_ioctl_new, DRM_AUTH|DRM_RENDER_ALLOW),
- DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_PUSHBUF, nouveau_gem_ioctl_pushbuf, DRM_AUTH|DRM_RENDER_ALLOW),
- DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_CPU_PREP, nouveau_gem_ioctl_cpu_prep, DRM_AUTH|DRM_RENDER_ALLOW),
- DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_CPU_FINI, nouveau_gem_ioctl_cpu_fini, DRM_AUTH|DRM_RENDER_ALLOW),
- DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_INFO, nouveau_gem_ioctl_info, DRM_AUTH|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(NOUVEAU_GETPARAM, nouveau_abi16_ioctl_getparam, DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(NOUVEAU_SETPARAM, drm_invalid_op, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF_DRV(NOUVEAU_CHANNEL_ALLOC, nouveau_abi16_ioctl_channel_alloc, DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(NOUVEAU_CHANNEL_FREE, nouveau_abi16_ioctl_channel_free, DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(NOUVEAU_GROBJ_ALLOC, nouveau_abi16_ioctl_grobj_alloc, DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(NOUVEAU_NOTIFIEROBJ_ALLOC, nouveau_abi16_ioctl_notifierobj_alloc, DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(NOUVEAU_GPUOBJ_FREE, nouveau_abi16_ioctl_gpuobj_free, DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(NOUVEAU_SVM_INIT, nouveau_svmm_init, DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(NOUVEAU_SVM_BIND, nouveau_svmm_bind, DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_NEW, nouveau_gem_ioctl_new, DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_PUSHBUF, nouveau_gem_ioctl_pushbuf, DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_CPU_PREP, nouveau_gem_ioctl_cpu_prep, DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_CPU_FINI, nouveau_gem_ioctl_cpu_fini, DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_INFO, nouveau_gem_ioctl_info, DRM_RENDER_ALLOW),
};
long
@@ -1105,7 +1105,7 @@ nouveau_driver_fops = {
static struct drm_driver
driver_stub = {
.driver_features =
- DRIVER_GEM | DRIVER_MODESET | DRIVER_PRIME | DRIVER_RENDER
+ DRIVER_GEM | DRIVER_MODESET | DRIVER_RENDER
#if defined(CONFIG_NOUVEAU_LEGACY_CTX_SUPPORT)
| DRIVER_KMS_LEGACY_CONTEXT
#endif
@@ -1130,10 +1130,7 @@ driver_stub = {
.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
- .gem_prime_export = drm_gem_prime_export,
- .gem_prime_import = drm_gem_prime_import,
.gem_prime_pin = nouveau_gem_prime_pin,
- .gem_prime_res_obj = nouveau_gem_prime_res_obj,
.gem_prime_unpin = nouveau_gem_prime_unpin,
.gem_prime_get_sg_table = nouveau_gem_prime_get_sg_table,
.gem_prime_import_sg_table = nouveau_gem_prime_import_sg_table,
diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c
index d4964f3397a1..e5f249ab216a 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fence.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fence.c
@@ -335,7 +335,7 @@ nouveau_fence_sync(struct nouveau_bo *nvbo, struct nouveau_channel *chan, bool e
{
struct nouveau_fence_chan *fctx = chan->fence;
struct dma_fence *fence;
- struct reservation_object *resv = nvbo->bo.resv;
+ struct reservation_object *resv = nvbo->bo.base.resv;
struct reservation_object_list *fobj;
struct nouveau_fence *f;
int ret = 0, i;
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
index b4bda716564d..c7368aa0bdec 100644
--- a/drivers/gpu/drm/nouveau/nouveau_gem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
@@ -205,13 +205,13 @@ nouveau_gem_new(struct nouveau_cli *cli, u64 size, int align, uint32_t domain,
/* Initialize the embedded gem-object. We return a single gem-reference
* to the caller, instead of a normal nouveau_bo ttm reference. */
- ret = drm_gem_object_init(drm->dev, &nvbo->gem, nvbo->bo.mem.size);
+ ret = drm_gem_object_init(drm->dev, &nvbo->bo.base, nvbo->bo.mem.size);
if (ret) {
nouveau_bo_ref(NULL, pnvbo);
return -ENOMEM;
}
- nvbo->bo.persistent_swap_storage = nvbo->gem.filp;
+ nvbo->bo.persistent_swap_storage = nvbo->bo.base.filp;
return 0;
}
@@ -240,7 +240,7 @@ nouveau_gem_info(struct drm_file *file_priv, struct drm_gem_object *gem,
}
rep->size = nvbo->bo.mem.num_pages << PAGE_SHIFT;
- rep->map_handle = drm_vma_node_offset_addr(&nvbo->bo.vma_node);
+ rep->map_handle = drm_vma_node_offset_addr(&nvbo->bo.base.vma_node);
rep->tile_mode = nvbo->mode;
rep->tile_flags = nvbo->contig ? 0 : NOUVEAU_GEM_TILE_NONCONTIG;
if (cli->device.info.family >= NV_DEVICE_INFO_V0_FERMI)
@@ -268,15 +268,16 @@ nouveau_gem_ioctl_new(struct drm_device *dev, void *data,
if (ret)
return ret;
- ret = drm_gem_handle_create(file_priv, &nvbo->gem, &req->info.handle);
+ ret = drm_gem_handle_create(file_priv, &nvbo->bo.base,
+ &req->info.handle);
if (ret == 0) {
- ret = nouveau_gem_info(file_priv, &nvbo->gem, &req->info);
+ ret = nouveau_gem_info(file_priv, &nvbo->bo.base, &req->info);
if (ret)
drm_gem_handle_delete(file_priv, req->info.handle);
}
/* drop reference from allocate - handle holds it now */
- drm_gem_object_put_unlocked(&nvbo->gem);
+ drm_gem_object_put_unlocked(&nvbo->bo.base);
return ret;
}
@@ -355,7 +356,7 @@ validate_fini_no_ticket(struct validate_op *op, struct nouveau_channel *chan,
list_del(&nvbo->entry);
nvbo->reserved_by = NULL;
ttm_bo_unreserve(&nvbo->bo);
- drm_gem_object_put_unlocked(&nvbo->gem);
+ drm_gem_object_put_unlocked(&nvbo->bo.base);
}
}
@@ -493,7 +494,7 @@ validate_list(struct nouveau_channel *chan, struct nouveau_cli *cli,
list_for_each_entry(nvbo, list, entry) {
struct drm_nouveau_gem_pushbuf_bo *b = &pbbo[nvbo->pbbo_index];
- ret = nouveau_gem_set_domain(&nvbo->gem, b->read_domains,
+ ret = nouveau_gem_set_domain(&nvbo->bo.base, b->read_domains,
b->write_domains,
b->valid_domains);
if (unlikely(ret)) {
@@ -886,7 +887,7 @@ nouveau_gem_ioctl_cpu_prep(struct drm_device *dev, void *data,
return -ENOENT;
nvbo = nouveau_gem_object(gem);
- lret = reservation_object_wait_timeout_rcu(nvbo->bo.resv, write, true,
+ lret = reservation_object_wait_timeout_rcu(nvbo->bo.base.resv, write, true,
no_wait ? 0 : 30 * HZ);
if (!lret)
ret = -EBUSY;
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.h b/drivers/gpu/drm/nouveau/nouveau_gem.h
index 03371204a47c..40ba0f1ba5aa 100644
--- a/drivers/gpu/drm/nouveau/nouveau_gem.h
+++ b/drivers/gpu/drm/nouveau/nouveau_gem.h
@@ -10,7 +10,7 @@
static inline struct nouveau_bo *
nouveau_gem_object(struct drm_gem_object *gem)
{
- return gem ? container_of(gem, struct nouveau_bo, gem) : NULL;
+ return gem ? container_of(gem, struct nouveau_bo, bo.base) : NULL;
}
/* nouveau_gem.c */
@@ -33,7 +33,6 @@ extern int nouveau_gem_ioctl_info(struct drm_device *, void *,
struct drm_file *);
extern int nouveau_gem_prime_pin(struct drm_gem_object *);
-struct reservation_object *nouveau_gem_prime_res_obj(struct drm_gem_object *);
extern void nouveau_gem_prime_unpin(struct drm_gem_object *);
extern struct sg_table *nouveau_gem_prime_get_sg_table(struct drm_gem_object *);
extern struct drm_gem_object *nouveau_gem_prime_import_sg_table(
diff --git a/drivers/gpu/drm/nouveau/nouveau_prime.c b/drivers/gpu/drm/nouveau/nouveau_prime.c
index 1fefc93af1d7..e86ad7ae622b 100644
--- a/drivers/gpu/drm/nouveau/nouveau_prime.c
+++ b/drivers/gpu/drm/nouveau/nouveau_prime.c
@@ -68,10 +68,10 @@ struct drm_gem_object *nouveau_gem_prime_import_sg_table(struct drm_device *dev,
flags = TTM_PL_FLAG_TT;
- ww_mutex_lock(&robj->lock, NULL);
+ reservation_object_lock(robj, NULL);
ret = nouveau_bo_new(&drm->client, attach->dmabuf->size, 0, flags, 0, 0,
sg, robj, &nvbo);
- ww_mutex_unlock(&robj->lock);
+ reservation_object_unlock(robj);
if (ret)
return ERR_PTR(ret);
@@ -79,13 +79,13 @@ struct drm_gem_object *nouveau_gem_prime_import_sg_table(struct drm_device *dev,
/* Initialize the embedded gem-object. We return a single gem-reference
* to the caller, instead of a normal nouveau_bo ttm reference. */
- ret = drm_gem_object_init(dev, &nvbo->gem, nvbo->bo.mem.size);
+ ret = drm_gem_object_init(dev, &nvbo->bo.base, nvbo->bo.mem.size);
if (ret) {
nouveau_bo_ref(NULL, &nvbo);
return ERR_PTR(-ENOMEM);
}
- return &nvbo->gem;
+ return &nvbo->bo.base;
}
int nouveau_gem_prime_pin(struct drm_gem_object *obj)
@@ -107,10 +107,3 @@ void nouveau_gem_prime_unpin(struct drm_gem_object *obj)
nouveau_bo_unpin(nvbo);
}
-
-struct reservation_object *nouveau_gem_prime_res_obj(struct drm_gem_object *obj)
-{
- struct nouveau_bo *nvbo = nouveau_gem_object(obj);
-
- return nvbo->bo.resv;
-}
diff --git a/drivers/gpu/drm/omapdrm/omap_crtc.c b/drivers/gpu/drm/omapdrm/omap_crtc.c
index f9ac9afc5641..3c5ddbf30e97 100644
--- a/drivers/gpu/drm/omapdrm/omap_crtc.c
+++ b/drivers/gpu/drm/omapdrm/omap_crtc.c
@@ -4,12 +4,14 @@
* Author: Rob Clark <rob@ti.com>
*/
+#include <linux/math64.h>
+
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc.h>
#include <drm/drm_mode.h>
#include <drm/drm_plane_helper.h>
-#include <linux/math64.h>
+#include <drm/drm_vblank.h>
#include "omap_drv.h"
diff --git a/drivers/gpu/drm/omapdrm/omap_debugfs.c b/drivers/gpu/drm/omapdrm/omap_debugfs.c
index 2b283f68fab7..34dfb33145b4 100644
--- a/drivers/gpu/drm/omapdrm/omap_debugfs.c
+++ b/drivers/gpu/drm/omapdrm/omap_debugfs.c
@@ -7,6 +7,8 @@
#include <linux/seq_file.h>
#include <drm/drm_crtc.h>
+#include <drm/drm_debugfs.h>
+#include <drm/drm_file.h>
#include <drm/drm_fb_helper.h>
#include "omap_drv.h"
diff --git a/drivers/gpu/drm/omapdrm/omap_drv.c b/drivers/gpu/drm/omapdrm/omap_drv.c
index 288c59dae56a..9f652d2e7af1 100644
--- a/drivers/gpu/drm/omapdrm/omap_drv.c
+++ b/drivers/gpu/drm/omapdrm/omap_drv.c
@@ -4,15 +4,21 @@
* Author: Rob Clark <rob@ti.com>
*/
-#include <linux/of.h>
+#include <linux/dma-mapping.h>
+#include <linux/platform_device.h>
#include <linux/sort.h>
#include <linux/sys_soc.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_drv.h>
#include <drm/drm_fb_helper.h>
-#include <drm/drm_probe_helper.h>
+#include <drm/drm_file.h>
+#include <drm/drm_ioctl.h>
#include <drm/drm_panel.h>
+#include <drm/drm_prime.h>
+#include <drm/drm_probe_helper.h>
+#include <drm/drm_vblank.h>
#include "omap_dmm_tiler.h"
#include "omap_drv.h"
@@ -466,19 +472,19 @@ static int ioctl_gem_info(struct drm_device *dev, void *data,
static const struct drm_ioctl_desc ioctls[DRM_COMMAND_END - DRM_COMMAND_BASE] = {
DRM_IOCTL_DEF_DRV(OMAP_GET_PARAM, ioctl_get_param,
- DRM_AUTH | DRM_RENDER_ALLOW),
+ DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(OMAP_SET_PARAM, drm_invalid_op,
DRM_AUTH | DRM_MASTER | DRM_ROOT_ONLY),
DRM_IOCTL_DEF_DRV(OMAP_GEM_NEW, ioctl_gem_new,
- DRM_AUTH | DRM_RENDER_ALLOW),
+ DRM_RENDER_ALLOW),
/* Deprecated, to be removed. */
DRM_IOCTL_DEF_DRV(OMAP_GEM_CPU_PREP, drm_noop,
- DRM_AUTH | DRM_RENDER_ALLOW),
+ DRM_RENDER_ALLOW),
/* Deprecated, to be removed. */
DRM_IOCTL_DEF_DRV(OMAP_GEM_CPU_FINI, drm_noop,
- DRM_AUTH | DRM_RENDER_ALLOW),
+ DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(OMAP_GEM_INFO, ioctl_gem_info,
- DRM_AUTH | DRM_RENDER_ALLOW),
+ DRM_RENDER_ALLOW),
};
/*
@@ -513,7 +519,7 @@ static const struct file_operations omapdriver_fops = {
};
static struct drm_driver omap_drm_driver = {
- .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME |
+ .driver_features = DRIVER_MODESET | DRIVER_GEM |
DRIVER_ATOMIC | DRIVER_RENDER,
.open = dev_open,
.lastclose = drm_fb_helper_lastclose,
diff --git a/drivers/gpu/drm/omapdrm/omap_drv.h b/drivers/gpu/drm/omapdrm/omap_drv.h
index 025bd57081d5..7c4b66efcaa7 100644
--- a/drivers/gpu/drm/omapdrm/omap_drv.h
+++ b/drivers/gpu/drm/omapdrm/omap_drv.h
@@ -11,12 +11,11 @@
#include <linux/types.h>
#include <linux/workqueue.h>
-#include <drm/drmP.h>
+#include "dss/omapdss.h"
+
#include <drm/drm_gem.h>
#include <drm/omap_drm.h>
-#include "dss/omapdss.h"
-
#include "omap_connector.h"
#include "omap_crtc.h"
#include "omap_encoder.h"
diff --git a/drivers/gpu/drm/omapdrm/omap_fb.c b/drivers/gpu/drm/omapdrm/omap_fb.c
index 7e89e5cb4068..1b8b5108caf8 100644
--- a/drivers/gpu/drm/omapdrm/omap_fb.c
+++ b/drivers/gpu/drm/omapdrm/omap_fb.c
@@ -4,10 +4,10 @@
* Author: Rob Clark <rob@ti.com>
*/
-#include <linux/seq_file.h>
+#include <linux/dma-mapping.h>
-#include <drm/drm_crtc.h>
#include <drm/drm_modeset_helper.h>
+#include <drm/drm_fourcc.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include "omap_dmm_tiler.h"
diff --git a/drivers/gpu/drm/omapdrm/omap_fbdev.c b/drivers/gpu/drm/omapdrm/omap_fbdev.c
index 561c4812545b..58f53946ee4d 100644
--- a/drivers/gpu/drm/omapdrm/omap_fbdev.c
+++ b/drivers/gpu/drm/omapdrm/omap_fbdev.c
@@ -7,6 +7,8 @@
#include <drm/drm_crtc.h>
#include <drm/drm_util.h>
#include <drm/drm_fb_helper.h>
+#include <drm/drm_file.h>
+#include <drm/drm_fourcc.h>
#include "omap_drv.h"
@@ -76,8 +78,6 @@ static struct fb_ops omap_fb_ops = {
.fb_setcmap = drm_fb_helper_setcmap,
.fb_blank = drm_fb_helper_blank,
.fb_pan_display = omap_fbdev_pan_display,
- .fb_debug_enter = drm_fb_helper_debug_enter,
- .fb_debug_leave = drm_fb_helper_debug_leave,
.fb_ioctl = drm_fb_helper_ioctl,
.fb_read = drm_fb_helper_sys_read,
diff --git a/drivers/gpu/drm/omapdrm/omap_gem.c b/drivers/gpu/drm/omapdrm/omap_gem.c
index 37378dbc50d0..08f539efddfb 100644
--- a/drivers/gpu/drm/omapdrm/omap_gem.c
+++ b/drivers/gpu/drm/omapdrm/omap_gem.c
@@ -4,11 +4,13 @@
* Author: Rob Clark <rob.clark@linaro.org>
*/
+#include <linux/dma-mapping.h>
#include <linux/seq_file.h>
#include <linux/shmem_fs.h>
#include <linux/spinlock.h>
#include <linux/pfn_t.h>
+#include <drm/drm_prime.h>
#include <drm/drm_vma_manager.h>
#include "omap_drv.h"
diff --git a/drivers/gpu/drm/omapdrm/omap_gem.h b/drivers/gpu/drm/omapdrm/omap_gem.h
index 31cf345bf8ae..729b7812a815 100644
--- a/drivers/gpu/drm/omapdrm/omap_gem.h
+++ b/drivers/gpu/drm/omapdrm/omap_gem.h
@@ -65,8 +65,7 @@ u64 omap_gem_mmap_offset(struct drm_gem_object *obj);
size_t omap_gem_mmap_size(struct drm_gem_object *obj);
/* PRIME Interface */
-struct dma_buf *omap_gem_prime_export(struct drm_device *dev,
- struct drm_gem_object *obj, int flags);
+struct dma_buf *omap_gem_prime_export(struct drm_gem_object *obj, int flags);
struct drm_gem_object *omap_gem_prime_import(struct drm_device *dev,
struct dma_buf *buffer);
diff --git a/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c b/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c
index 07c0b1b486f7..e8c3ae7ac77e 100644
--- a/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c
+++ b/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c
@@ -5,6 +5,9 @@
*/
#include <linux/dma-buf.h>
+#include <linux/highmem.h>
+
+#include <drm/drm_prime.h>
#include "omap_drv.h"
@@ -125,8 +128,7 @@ static const struct dma_buf_ops omap_dmabuf_ops = {
.mmap = omap_gem_dmabuf_mmap,
};
-struct dma_buf *omap_gem_prime_export(struct drm_device *dev,
- struct drm_gem_object *obj, int flags)
+struct dma_buf *omap_gem_prime_export(struct drm_gem_object *obj, int flags)
{
DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
@@ -135,7 +137,7 @@ struct dma_buf *omap_gem_prime_export(struct drm_device *dev,
exp_info.flags = flags;
exp_info.priv = obj;
- return drm_gem_dmabuf_export(dev, &exp_info);
+ return drm_gem_dmabuf_export(obj->dev, &exp_info);
}
/* -----------------------------------------------------------------------------
diff --git a/drivers/gpu/drm/omapdrm/omap_irq.c b/drivers/gpu/drm/omapdrm/omap_irq.c
index 726a013e7988..382bcdc72ac0 100644
--- a/drivers/gpu/drm/omapdrm/omap_irq.c
+++ b/drivers/gpu/drm/omapdrm/omap_irq.c
@@ -4,6 +4,8 @@
* Author: Rob Clark <rob.clark@linaro.org>
*/
+#include <drm/drm_vblank.h>
+
#include "omap_drv.h"
struct omap_irq_wait {
diff --git a/drivers/gpu/drm/panel/Kconfig b/drivers/gpu/drm/panel/Kconfig
index d9d931aa6e26..eaecd40cc32e 100644
--- a/drivers/gpu/drm/panel/Kconfig
+++ b/drivers/gpu/drm/panel/Kconfig
@@ -111,6 +111,15 @@ config DRM_PANEL_LG_LG4573
Say Y here if you want to enable support for LG4573 RGB panel.
To compile this driver as a module, choose M here.
+config DRM_PANEL_NOVATEK_NT39016
+ tristate "Novatek NT39016 RGB/SPI panel"
+ depends on OF && SPI
+ depends on BACKLIGHT_CLASS_DEVICE
+ select REGMAP_SPI
+ help
+ Say Y here if you want to enable support for the panels built
+ around the Novatek NT39016 display controller.
+
config DRM_PANEL_OLIMEX_LCD_OLINUXINO
tristate "Olimex LCD-OLinuXino panel"
depends on OF
@@ -159,6 +168,15 @@ config DRM_PANEL_RASPBERRYPI_TOUCHSCREEN
Pi 7" Touchscreen. To compile this driver as a module,
choose M here.
+config DRM_PANEL_RAYDIUM_RM67191
+ tristate "Raydium RM67191 FHD 1080x1920 DSI video mode panel"
+ depends on OF
+ depends on DRM_MIPI_DSI
+ depends on BACKLIGHT_CLASS_DEVICE
+ help
+ Say Y here if you want to enable support for Raydium RM67191 FHD
+ (1080x1920) DSI panel.
+
config DRM_PANEL_RAYDIUM_RM68200
tristate "Raydium RM68200 720x1280 DSI video mode panel"
depends on OF
diff --git a/drivers/gpu/drm/panel/Makefile b/drivers/gpu/drm/panel/Makefile
index fb0cb3aaa9e6..62dae45f8f74 100644
--- a/drivers/gpu/drm/panel/Makefile
+++ b/drivers/gpu/drm/panel/Makefile
@@ -9,11 +9,13 @@ obj-$(CONFIG_DRM_PANEL_INNOLUX_P079ZCA) += panel-innolux-p079zca.o
obj-$(CONFIG_DRM_PANEL_JDI_LT070ME05000) += panel-jdi-lt070me05000.o
obj-$(CONFIG_DRM_PANEL_KINGDISPLAY_KD097D04) += panel-kingdisplay-kd097d04.o
obj-$(CONFIG_DRM_PANEL_LG_LG4573) += panel-lg-lg4573.o
+obj-$(CONFIG_DRM_PANEL_NOVATEK_NT39016) += panel-novatek-nt39016.o
obj-$(CONFIG_DRM_PANEL_OLIMEX_LCD_OLINUXINO) += panel-olimex-lcd-olinuxino.o
obj-$(CONFIG_DRM_PANEL_ORISETECH_OTM8009A) += panel-orisetech-otm8009a.o
obj-$(CONFIG_DRM_PANEL_OSD_OSD101T2587_53TS) += panel-osd-osd101t2587-53ts.o
obj-$(CONFIG_DRM_PANEL_PANASONIC_VVX10F034N00) += panel-panasonic-vvx10f034n00.o
obj-$(CONFIG_DRM_PANEL_RASPBERRYPI_TOUCHSCREEN) += panel-raspberrypi-touchscreen.o
+obj-$(CONFIG_DRM_PANEL_RAYDIUM_RM67191) += panel-raydium-rm67191.o
obj-$(CONFIG_DRM_PANEL_RAYDIUM_RM68200) += panel-raydium-rm68200.o
obj-$(CONFIG_DRM_PANEL_ROCKTECH_JH057N00900) += panel-rocktech-jh057n00900.o
obj-$(CONFIG_DRM_PANEL_RONBO_RB070D30) += panel-ronbo-rb070d30.o
diff --git a/drivers/gpu/drm/panel/panel-lvds.c b/drivers/gpu/drm/panel/panel-lvds.c
index 1ec57d0806a8..ad47cc95459e 100644
--- a/drivers/gpu/drm/panel/panel-lvds.c
+++ b/drivers/gpu/drm/panel/panel-lvds.c
@@ -147,8 +147,11 @@ static int panel_lvds_parse_dt(struct panel_lvds *lvds)
int ret;
ret = of_get_display_timing(np, "panel-timing", &timing);
- if (ret < 0)
+ if (ret < 0) {
+ dev_err(lvds->dev, "%pOF: problems parsing panel-timing (%d)\n",
+ np, ret);
return ret;
+ }
videomode_from_timing(&timing, &lvds->video_mode);
diff --git a/drivers/gpu/drm/panel/panel-novatek-nt39016.c b/drivers/gpu/drm/panel/panel-novatek-nt39016.c
new file mode 100644
index 000000000000..2ad1063b068d
--- /dev/null
+++ b/drivers/gpu/drm/panel/panel-novatek-nt39016.c
@@ -0,0 +1,359 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Novatek NT39016 TFT LCD panel driver
+ *
+ * Copyright (C) 2017, Maarten ter Huurne <maarten@treewalker.org>
+ * Copyright (C) 2019, Paul Cercueil <paul@crapouillou.net>
+ */
+
+#include <linux/backlight.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/gpio/consumer.h>
+#include <linux/media-bus-format.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/regmap.h>
+#include <linux/regulator/consumer.h>
+#include <linux/spi/spi.h>
+
+#include <drm/drm_modes.h>
+#include <drm/drm_panel.h>
+
+enum nt39016_regs {
+ NT39016_REG_SYSTEM,
+ NT39016_REG_TIMING,
+ NT39016_REG_OP,
+ NT39016_REG_DATA_IN,
+ NT39016_REG_SRC_TIMING_DELAY,
+ NT39016_REG_GATE_TIMING_DELAY,
+ NT39016_REG_RESERVED,
+ NT39016_REG_INITIAL_FUNC,
+ NT39016_REG_CONTRAST,
+ NT39016_REG_BRIGHTNESS,
+ NT39016_REG_HUE_SATURATION,
+ NT39016_REG_RB_SUBCONTRAST,
+ NT39016_REG_R_SUBBRIGHTNESS,
+ NT39016_REG_B_SUBBRIGHTNESS,
+ NT39016_REG_VCOMDC,
+ NT39016_REG_VCOMAC,
+ NT39016_REG_VGAM2,
+ NT39016_REG_VGAM34,
+ NT39016_REG_VGAM56,
+ NT39016_REG_VCOMDC_TRIM = 0x1e,
+ NT39016_REG_DISPLAY_MODE = 0x20,
+};
+
+#define NT39016_SYSTEM_RESET_N BIT(0)
+#define NT39016_SYSTEM_STANDBY BIT(1)
+
+struct nt39016_panel_info {
+ struct drm_display_mode display_mode;
+ u16 width_mm, height_mm;
+ u32 bus_format, bus_flags;
+};
+
+struct nt39016 {
+ struct drm_panel drm_panel;
+ struct device *dev;
+ struct regmap *map;
+ struct regulator *supply;
+ const struct nt39016_panel_info *panel_info;
+
+ struct gpio_desc *reset_gpio;
+
+ struct backlight_device *backlight;
+};
+
+static inline struct nt39016 *to_nt39016(struct drm_panel *panel)
+{
+ return container_of(panel, struct nt39016, drm_panel);
+}
+
+#define RV(REG, VAL) { .reg = (REG), .def = (VAL), .delay_us = 2 }
+static const struct reg_sequence nt39016_panel_regs[] = {
+ RV(NT39016_REG_SYSTEM, 0x00),
+ RV(NT39016_REG_TIMING, 0x00),
+ RV(NT39016_REG_OP, 0x03),
+ RV(NT39016_REG_DATA_IN, 0xCC),
+ RV(NT39016_REG_SRC_TIMING_DELAY, 0x46),
+ RV(NT39016_REG_GATE_TIMING_DELAY, 0x05),
+ RV(NT39016_REG_RESERVED, 0x00),
+ RV(NT39016_REG_INITIAL_FUNC, 0x00),
+ RV(NT39016_REG_CONTRAST, 0x08),
+ RV(NT39016_REG_BRIGHTNESS, 0x40),
+ RV(NT39016_REG_HUE_SATURATION, 0x88),
+ RV(NT39016_REG_RB_SUBCONTRAST, 0x88),
+ RV(NT39016_REG_R_SUBBRIGHTNESS, 0x20),
+ RV(NT39016_REG_B_SUBBRIGHTNESS, 0x20),
+ RV(NT39016_REG_VCOMDC, 0x67),
+ RV(NT39016_REG_VCOMAC, 0xA4),
+ RV(NT39016_REG_VGAM2, 0x04),
+ RV(NT39016_REG_VGAM34, 0x24),
+ RV(NT39016_REG_VGAM56, 0x24),
+ RV(NT39016_REG_DISPLAY_MODE, 0x00),
+};
+
+#undef RV
+
+static const struct regmap_range nt39016_regmap_no_ranges[] = {
+ regmap_reg_range(0x13, 0x1D),
+ regmap_reg_range(0x1F, 0x1F),
+};
+
+static const struct regmap_access_table nt39016_regmap_access_table = {
+ .no_ranges = nt39016_regmap_no_ranges,
+ .n_no_ranges = ARRAY_SIZE(nt39016_regmap_no_ranges),
+};
+
+static const struct regmap_config nt39016_regmap_config = {
+ .reg_bits = 6,
+ .pad_bits = 2,
+ .val_bits = 8,
+
+ .max_register = NT39016_REG_DISPLAY_MODE,
+ .wr_table = &nt39016_regmap_access_table,
+ .write_flag_mask = 0x02,
+
+ .cache_type = REGCACHE_FLAT,
+};
+
+static int nt39016_prepare(struct drm_panel *drm_panel)
+{
+ struct nt39016 *panel = to_nt39016(drm_panel);
+ int err;
+
+ err = regulator_enable(panel->supply);
+ if (err) {
+ dev_err(panel->dev, "Failed to enable power supply: %d", err);
+ return err;
+ }
+
+ /*
+ * Reset the NT39016.
+ * The documentation says the reset pulse should be at least 40 us to
+ * pass the glitch filter, but when testing I see some resets fail and
+ * some succeed when using a 70 us delay, so we use 100 us instead.
+ */
+ gpiod_set_value_cansleep(panel->reset_gpio, 1);
+ usleep_range(100, 1000);
+ gpiod_set_value_cansleep(panel->reset_gpio, 0);
+ udelay(2);
+
+ /* Init all registers. */
+ err = regmap_multi_reg_write(panel->map, nt39016_panel_regs,
+ ARRAY_SIZE(nt39016_panel_regs));
+ if (err) {
+ dev_err(panel->dev, "Failed to init registers: %d", err);
+ goto err_disable_regulator;
+ }
+
+ return 0;
+
+err_disable_regulator:
+ regulator_disable(panel->supply);
+ return err;
+}
+
+static int nt39016_unprepare(struct drm_panel *drm_panel)
+{
+ struct nt39016 *panel = to_nt39016(drm_panel);
+
+ gpiod_set_value_cansleep(panel->reset_gpio, 1);
+
+ regulator_disable(panel->supply);
+
+ return 0;
+}
+
+static int nt39016_enable(struct drm_panel *drm_panel)
+{
+ struct nt39016 *panel = to_nt39016(drm_panel);
+ int ret;
+
+ ret = regmap_write(panel->map, NT39016_REG_SYSTEM,
+ NT39016_SYSTEM_RESET_N | NT39016_SYSTEM_STANDBY);
+ if (ret) {
+ dev_err(panel->dev, "Unable to enable panel: %d", ret);
+ return ret;
+ }
+
+ if (panel->backlight) {
+ /* Wait for the picture to be ready before enabling backlight */
+ msleep(150);
+
+ ret = backlight_enable(panel->backlight);
+ }
+
+ return ret;
+}
+
+static int nt39016_disable(struct drm_panel *drm_panel)
+{
+ struct nt39016 *panel = to_nt39016(drm_panel);
+ int err;
+
+ backlight_disable(panel->backlight);
+
+ err = regmap_write(panel->map, NT39016_REG_SYSTEM,
+ NT39016_SYSTEM_RESET_N);
+ if (err) {
+ dev_err(panel->dev, "Unable to disable panel: %d", err);
+ return err;
+ }
+
+ return 0;
+}
+
+static int nt39016_get_modes(struct drm_panel *drm_panel)
+{
+ struct nt39016 *panel = to_nt39016(drm_panel);
+ const struct nt39016_panel_info *panel_info = panel->panel_info;
+ struct drm_connector *connector = drm_panel->connector;
+ struct drm_display_mode *mode;
+
+ mode = drm_mode_duplicate(drm_panel->drm, &panel_info->display_mode);
+ if (!mode)
+ return -ENOMEM;
+
+ drm_mode_set_name(mode);
+
+ mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
+ drm_mode_probed_add(connector, mode);
+
+ connector->display_info.bpc = 8;
+ connector->display_info.width_mm = panel_info->width_mm;
+ connector->display_info.height_mm = panel_info->height_mm;
+
+ drm_display_info_set_bus_formats(&connector->display_info,
+ &panel_info->bus_format, 1);
+ connector->display_info.bus_flags = panel_info->bus_flags;
+
+ return 1;
+}
+
+static const struct drm_panel_funcs nt39016_funcs = {
+ .prepare = nt39016_prepare,
+ .unprepare = nt39016_unprepare,
+ .enable = nt39016_enable,
+ .disable = nt39016_disable,
+ .get_modes = nt39016_get_modes,
+};
+
+static int nt39016_probe(struct spi_device *spi)
+{
+ struct device *dev = &spi->dev;
+ struct nt39016 *panel;
+ int err;
+
+ panel = devm_kzalloc(dev, sizeof(*panel), GFP_KERNEL);
+ if (!panel)
+ return -ENOMEM;
+
+ panel->dev = dev;
+ spi_set_drvdata(spi, panel);
+
+ panel->panel_info = of_device_get_match_data(dev);
+ if (!panel->panel_info)
+ return -EINVAL;
+
+ panel->supply = devm_regulator_get(dev, "power");
+ if (IS_ERR(panel->supply)) {
+ dev_err(dev, "Failed to get power supply");
+ return PTR_ERR(panel->supply);
+ }
+
+ panel->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH);
+ if (IS_ERR(panel->reset_gpio)) {
+ dev_err(dev, "Failed to get reset GPIO");
+ return PTR_ERR(panel->reset_gpio);
+ }
+
+ spi->bits_per_word = 8;
+ spi->mode = SPI_MODE_3 | SPI_3WIRE;
+ err = spi_setup(spi);
+ if (err) {
+ dev_err(dev, "Failed to setup SPI");
+ return err;
+ }
+
+ panel->map = devm_regmap_init_spi(spi, &nt39016_regmap_config);
+ if (IS_ERR(panel->map)) {
+ dev_err(dev, "Failed to init regmap");
+ return PTR_ERR(panel->map);
+ }
+
+ panel->backlight = devm_of_find_backlight(dev);
+ if (IS_ERR(panel->backlight)) {
+ err = PTR_ERR(panel->backlight);
+ if (err != -EPROBE_DEFER)
+ dev_err(dev, "Failed to get backlight handle");
+ return err;
+ }
+
+ drm_panel_init(&panel->drm_panel);
+ panel->drm_panel.dev = dev;
+ panel->drm_panel.funcs = &nt39016_funcs;
+
+ err = drm_panel_add(&panel->drm_panel);
+ if (err < 0) {
+ dev_err(dev, "Failed to register panel");
+ return err;
+ }
+
+ return 0;
+}
+
+static int nt39016_remove(struct spi_device *spi)
+{
+ struct nt39016 *panel = spi_get_drvdata(spi);
+
+ drm_panel_remove(&panel->drm_panel);
+
+ nt39016_disable(&panel->drm_panel);
+ nt39016_unprepare(&panel->drm_panel);
+
+ return 0;
+}
+
+static const struct nt39016_panel_info kd035g6_info = {
+ .display_mode = {
+ .clock = 6000,
+ .hdisplay = 320,
+ .hsync_start = 320 + 10,
+ .hsync_end = 320 + 10 + 50,
+ .htotal = 320 + 10 + 50 + 20,
+ .vdisplay = 240,
+ .vsync_start = 240 + 5,
+ .vsync_end = 240 + 5 + 1,
+ .vtotal = 240 + 5 + 1 + 4,
+ .vrefresh = 60,
+ .flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC,
+ },
+ .width_mm = 71,
+ .height_mm = 53,
+ .bus_format = MEDIA_BUS_FMT_RGB888_1X24,
+ .bus_flags = DRM_BUS_FLAG_PIXDATA_NEGEDGE,
+};
+
+static const struct of_device_id nt39016_of_match[] = {
+ { .compatible = "kingdisplay,kd035g6-54nt", .data = &kd035g6_info },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, nt39016_of_match);
+
+static struct spi_driver nt39016_driver = {
+ .driver = {
+ .name = "nt39016",
+ .of_match_table = nt39016_of_match,
+ },
+ .probe = nt39016_probe,
+ .remove = nt39016_remove,
+};
+
+module_spi_driver(nt39016_driver);
+
+MODULE_AUTHOR("Maarten ter Huurne <maarten@treewalker.org>");
+MODULE_AUTHOR("Paul Cercueil <paul@crapouillou.net>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/panel/panel-raspberrypi-touchscreen.c b/drivers/gpu/drm/panel/panel-raspberrypi-touchscreen.c
index 28c0620dfe0f..b5b14aa059ea 100644
--- a/drivers/gpu/drm/panel/panel-raspberrypi-touchscreen.c
+++ b/drivers/gpu/drm/panel/panel-raspberrypi-touchscreen.c
@@ -399,7 +399,13 @@ static int rpi_touchscreen_probe(struct i2c_client *i2c,
/* Look up the DSI host. It needs to probe before we do. */
endpoint = of_graph_get_next_endpoint(dev->of_node, NULL);
+ if (!endpoint)
+ return -ENODEV;
+
dsi_host_node = of_graph_get_remote_port_parent(endpoint);
+ if (!dsi_host_node)
+ goto error;
+
host = of_find_mipi_dsi_host_by_node(dsi_host_node);
of_node_put(dsi_host_node);
if (!host) {
@@ -408,6 +414,9 @@ static int rpi_touchscreen_probe(struct i2c_client *i2c,
}
info.node = of_graph_get_remote_port(endpoint);
+ if (!info.node)
+ goto error;
+
of_node_put(endpoint);
ts->dsi = mipi_dsi_device_register_full(host, &info);
@@ -428,6 +437,10 @@ static int rpi_touchscreen_probe(struct i2c_client *i2c,
return ret;
return 0;
+
+error:
+ of_node_put(endpoint);
+ return -ENODEV;
}
static int rpi_touchscreen_remove(struct i2c_client *i2c)
diff --git a/drivers/gpu/drm/panel/panel-raydium-rm67191.c b/drivers/gpu/drm/panel/panel-raydium-rm67191.c
new file mode 100644
index 000000000000..6a5d37006103
--- /dev/null
+++ b/drivers/gpu/drm/panel/panel-raydium-rm67191.c
@@ -0,0 +1,668 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Raydium RM67191 MIPI-DSI panel driver
+ *
+ * Copyright 2019 NXP
+ */
+
+#include <linux/backlight.h>
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/regulator/consumer.h>
+
+#include <video/mipi_display.h>
+#include <video/of_videomode.h>
+#include <video/videomode.h>
+
+#include <drm/drm_crtc.h>
+#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_panel.h>
+#include <drm/drm_print.h>
+
+/* Panel specific color-format bits */
+#define COL_FMT_16BPP 0x55
+#define COL_FMT_18BPP 0x66
+#define COL_FMT_24BPP 0x77
+
+/* Write Manufacture Command Set Control */
+#define WRMAUCCTR 0xFE
+
+/* Manufacturer Command Set pages (CMD2) */
+struct cmd_set_entry {
+ u8 cmd;
+ u8 param;
+};
+
+/*
+ * There is no description in the Reference Manual about these commands.
+ * We received them from vendor, so just use them as is.
+ */
+static const struct cmd_set_entry manufacturer_cmd_set[] = {
+ {0xFE, 0x0B},
+ {0x28, 0x40},
+ {0x29, 0x4F},
+ {0xFE, 0x0E},
+ {0x4B, 0x00},
+ {0x4C, 0x0F},
+ {0x4D, 0x20},
+ {0x4E, 0x40},
+ {0x4F, 0x60},
+ {0x50, 0xA0},
+ {0x51, 0xC0},
+ {0x52, 0xE0},
+ {0x53, 0xFF},
+ {0xFE, 0x0D},
+ {0x18, 0x08},
+ {0x42, 0x00},
+ {0x08, 0x41},
+ {0x46, 0x02},
+ {0x72, 0x09},
+ {0xFE, 0x0A},
+ {0x24, 0x17},
+ {0x04, 0x07},
+ {0x1A, 0x0C},
+ {0x0F, 0x44},
+ {0xFE, 0x04},
+ {0x00, 0x0C},
+ {0x05, 0x08},
+ {0x06, 0x08},
+ {0x08, 0x08},
+ {0x09, 0x08},
+ {0x0A, 0xE6},
+ {0x0B, 0x8C},
+ {0x1A, 0x12},
+ {0x1E, 0xE0},
+ {0x29, 0x93},
+ {0x2A, 0x93},
+ {0x2F, 0x02},
+ {0x31, 0x02},
+ {0x33, 0x05},
+ {0x37, 0x2D},
+ {0x38, 0x2D},
+ {0x3A, 0x1E},
+ {0x3B, 0x1E},
+ {0x3D, 0x27},
+ {0x3F, 0x80},
+ {0x40, 0x40},
+ {0x41, 0xE0},
+ {0x4F, 0x2F},
+ {0x50, 0x1E},
+ {0xFE, 0x06},
+ {0x00, 0xCC},
+ {0x05, 0x05},
+ {0x07, 0xA2},
+ {0x08, 0xCC},
+ {0x0D, 0x03},
+ {0x0F, 0xA2},
+ {0x32, 0xCC},
+ {0x37, 0x05},
+ {0x39, 0x83},
+ {0x3A, 0xCC},
+ {0x41, 0x04},
+ {0x43, 0x83},
+ {0x44, 0xCC},
+ {0x49, 0x05},
+ {0x4B, 0xA2},
+ {0x4C, 0xCC},
+ {0x51, 0x03},
+ {0x53, 0xA2},
+ {0x75, 0xCC},
+ {0x7A, 0x03},
+ {0x7C, 0x83},
+ {0x7D, 0xCC},
+ {0x82, 0x02},
+ {0x84, 0x83},
+ {0x85, 0xEC},
+ {0x86, 0x0F},
+ {0x87, 0xFF},
+ {0x88, 0x00},
+ {0x8A, 0x02},
+ {0x8C, 0xA2},
+ {0x8D, 0xEA},
+ {0x8E, 0x01},
+ {0x8F, 0xE8},
+ {0xFE, 0x06},
+ {0x90, 0x0A},
+ {0x92, 0x06},
+ {0x93, 0xA0},
+ {0x94, 0xA8},
+ {0x95, 0xEC},
+ {0x96, 0x0F},
+ {0x97, 0xFF},
+ {0x98, 0x00},
+ {0x9A, 0x02},
+ {0x9C, 0xA2},
+ {0xAC, 0x04},
+ {0xFE, 0x06},
+ {0xB1, 0x12},
+ {0xB2, 0x17},
+ {0xB3, 0x17},
+ {0xB4, 0x17},
+ {0xB5, 0x17},
+ {0xB6, 0x11},
+ {0xB7, 0x08},
+ {0xB8, 0x09},
+ {0xB9, 0x06},
+ {0xBA, 0x07},
+ {0xBB, 0x17},
+ {0xBC, 0x17},
+ {0xBD, 0x17},
+ {0xBE, 0x17},
+ {0xBF, 0x17},
+ {0xC0, 0x17},
+ {0xC1, 0x17},
+ {0xC2, 0x17},
+ {0xC3, 0x17},
+ {0xC4, 0x0F},
+ {0xC5, 0x0E},
+ {0xC6, 0x00},
+ {0xC7, 0x01},
+ {0xC8, 0x10},
+ {0xFE, 0x06},
+ {0x95, 0xEC},
+ {0x8D, 0xEE},
+ {0x44, 0xEC},
+ {0x4C, 0xEC},
+ {0x32, 0xEC},
+ {0x3A, 0xEC},
+ {0x7D, 0xEC},
+ {0x75, 0xEC},
+ {0x00, 0xEC},
+ {0x08, 0xEC},
+ {0x85, 0xEC},
+ {0xA6, 0x21},
+ {0xA7, 0x05},
+ {0xA9, 0x06},
+ {0x82, 0x06},
+ {0x41, 0x06},
+ {0x7A, 0x07},
+ {0x37, 0x07},
+ {0x05, 0x06},
+ {0x49, 0x06},
+ {0x0D, 0x04},
+ {0x51, 0x04},
+};
+
+static const u32 rad_bus_formats[] = {
+ MEDIA_BUS_FMT_RGB888_1X24,
+ MEDIA_BUS_FMT_RGB666_1X18,
+ MEDIA_BUS_FMT_RGB565_1X16,
+};
+
+static const u32 rad_bus_flags = DRM_BUS_FLAG_DE_LOW |
+ DRM_BUS_FLAG_PIXDATA_NEGEDGE;
+
+struct rad_panel {
+ struct drm_panel panel;
+ struct mipi_dsi_device *dsi;
+
+ struct gpio_desc *reset;
+ struct backlight_device *backlight;
+
+ struct regulator_bulk_data *supplies;
+ unsigned int num_supplies;
+
+ bool prepared;
+ bool enabled;
+};
+
+static const struct drm_display_mode default_mode = {
+ .clock = 132000,
+ .hdisplay = 1080,
+ .hsync_start = 1080 + 20,
+ .hsync_end = 1080 + 20 + 2,
+ .htotal = 1080 + 20 + 2 + 34,
+ .vdisplay = 1920,
+ .vsync_start = 1920 + 10,
+ .vsync_end = 1920 + 10 + 2,
+ .vtotal = 1920 + 10 + 2 + 4,
+ .vrefresh = 60,
+ .width_mm = 68,
+ .height_mm = 121,
+ .flags = DRM_MODE_FLAG_NHSYNC |
+ DRM_MODE_FLAG_NVSYNC,
+};
+
+static inline struct rad_panel *to_rad_panel(struct drm_panel *panel)
+{
+ return container_of(panel, struct rad_panel, panel);
+}
+
+static int rad_panel_push_cmd_list(struct mipi_dsi_device *dsi)
+{
+ size_t i;
+ size_t count = ARRAY_SIZE(manufacturer_cmd_set);
+ int ret = 0;
+
+ for (i = 0; i < count; i++) {
+ const struct cmd_set_entry *entry = &manufacturer_cmd_set[i];
+ u8 buffer[2] = { entry->cmd, entry->param };
+
+ ret = mipi_dsi_generic_write(dsi, &buffer, sizeof(buffer));
+ if (ret < 0)
+ return ret;
+ }
+
+ return ret;
+};
+
+static int color_format_from_dsi_format(enum mipi_dsi_pixel_format format)
+{
+ switch (format) {
+ case MIPI_DSI_FMT_RGB565:
+ return COL_FMT_16BPP;
+ case MIPI_DSI_FMT_RGB666:
+ case MIPI_DSI_FMT_RGB666_PACKED:
+ return COL_FMT_18BPP;
+ case MIPI_DSI_FMT_RGB888:
+ return COL_FMT_24BPP;
+ default:
+ return COL_FMT_24BPP; /* for backward compatibility */
+ }
+};
+
+static int rad_panel_prepare(struct drm_panel *panel)
+{
+ struct rad_panel *rad = to_rad_panel(panel);
+ int ret;
+
+ if (rad->prepared)
+ return 0;
+
+ ret = regulator_bulk_enable(rad->num_supplies, rad->supplies);
+ if (ret)
+ return ret;
+
+ if (rad->reset) {
+ gpiod_set_value_cansleep(rad->reset, 1);
+ usleep_range(3000, 5000);
+ gpiod_set_value_cansleep(rad->reset, 0);
+ usleep_range(18000, 20000);
+ }
+
+ rad->prepared = true;
+
+ return 0;
+}
+
+static int rad_panel_unprepare(struct drm_panel *panel)
+{
+ struct rad_panel *rad = to_rad_panel(panel);
+ int ret;
+
+ if (!rad->prepared)
+ return 0;
+
+ /*
+ * Right after asserting the reset, we need to release it, so that the
+ * touch driver can have an active connection with the touch controller
+ * even after the display is turned off.
+ */
+ if (rad->reset) {
+ gpiod_set_value_cansleep(rad->reset, 1);
+ usleep_range(15000, 17000);
+ gpiod_set_value_cansleep(rad->reset, 0);
+ }
+
+ ret = regulator_bulk_disable(rad->num_supplies, rad->supplies);
+ if (ret)
+ return ret;
+
+ rad->prepared = false;
+
+ return 0;
+}
+
+static int rad_panel_enable(struct drm_panel *panel)
+{
+ struct rad_panel *rad = to_rad_panel(panel);
+ struct mipi_dsi_device *dsi = rad->dsi;
+ struct device *dev = &dsi->dev;
+ int color_format = color_format_from_dsi_format(dsi->format);
+ int ret;
+
+ if (rad->enabled)
+ return 0;
+
+ dsi->mode_flags |= MIPI_DSI_MODE_LPM;
+
+ ret = rad_panel_push_cmd_list(dsi);
+ if (ret < 0) {
+ DRM_DEV_ERROR(dev, "Failed to send MCS (%d)\n", ret);
+ goto fail;
+ }
+
+ /* Select User Command Set table (CMD1) */
+ ret = mipi_dsi_generic_write(dsi, (u8[]){ WRMAUCCTR, 0x00 }, 2);
+ if (ret < 0)
+ goto fail;
+
+ /* Software reset */
+ ret = mipi_dsi_dcs_soft_reset(dsi);
+ if (ret < 0) {
+ DRM_DEV_ERROR(dev, "Failed to do Software Reset (%d)\n", ret);
+ goto fail;
+ }
+
+ usleep_range(15000, 17000);
+
+ /* Set DSI mode */
+ ret = mipi_dsi_generic_write(dsi, (u8[]){ 0xC2, 0x0B }, 2);
+ if (ret < 0) {
+ DRM_DEV_ERROR(dev, "Failed to set DSI mode (%d)\n", ret);
+ goto fail;
+ }
+ /* Set tear ON */
+ ret = mipi_dsi_dcs_set_tear_on(dsi, MIPI_DSI_DCS_TEAR_MODE_VBLANK);
+ if (ret < 0) {
+ DRM_DEV_ERROR(dev, "Failed to set tear ON (%d)\n", ret);
+ goto fail;
+ }
+ /* Set tear scanline */
+ ret = mipi_dsi_dcs_set_tear_scanline(dsi, 0x380);
+ if (ret < 0) {
+ DRM_DEV_ERROR(dev, "Failed to set tear scanline (%d)\n", ret);
+ goto fail;
+ }
+ /* Set pixel format */
+ ret = mipi_dsi_dcs_set_pixel_format(dsi, color_format);
+ DRM_DEV_DEBUG_DRIVER(dev, "Interface color format set to 0x%x\n",
+ color_format);
+ if (ret < 0) {
+ DRM_DEV_ERROR(dev, "Failed to set pixel format (%d)\n", ret);
+ goto fail;
+ }
+ /* Exit sleep mode */
+ ret = mipi_dsi_dcs_exit_sleep_mode(dsi);
+ if (ret < 0) {
+ DRM_DEV_ERROR(dev, "Failed to exit sleep mode (%d)\n", ret);
+ goto fail;
+ }
+
+ usleep_range(5000, 7000);
+
+ ret = mipi_dsi_dcs_set_display_on(dsi);
+ if (ret < 0) {
+ DRM_DEV_ERROR(dev, "Failed to set display ON (%d)\n", ret);
+ goto fail;
+ }
+
+ backlight_enable(rad->backlight);
+
+ rad->enabled = true;
+
+ return 0;
+
+fail:
+ gpiod_set_value_cansleep(rad->reset, 1);
+
+ return ret;
+}
+
+static int rad_panel_disable(struct drm_panel *panel)
+{
+ struct rad_panel *rad = to_rad_panel(panel);
+ struct mipi_dsi_device *dsi = rad->dsi;
+ struct device *dev = &dsi->dev;
+ int ret;
+
+ if (!rad->enabled)
+ return 0;
+
+ dsi->mode_flags |= MIPI_DSI_MODE_LPM;
+
+ backlight_disable(rad->backlight);
+
+ usleep_range(10000, 12000);
+
+ ret = mipi_dsi_dcs_set_display_off(dsi);
+ if (ret < 0) {
+ DRM_DEV_ERROR(dev, "Failed to set display OFF (%d)\n", ret);
+ return ret;
+ }
+
+ usleep_range(5000, 10000);
+
+ ret = mipi_dsi_dcs_enter_sleep_mode(dsi);
+ if (ret < 0) {
+ DRM_DEV_ERROR(dev, "Failed to enter sleep mode (%d)\n", ret);
+ return ret;
+ }
+
+ rad->enabled = false;
+
+ return 0;
+}
+
+static int rad_panel_get_modes(struct drm_panel *panel)
+{
+ struct drm_connector *connector = panel->connector;
+ struct drm_display_mode *mode;
+
+ mode = drm_mode_duplicate(panel->drm, &default_mode);
+ if (!mode) {
+ DRM_DEV_ERROR(panel->dev, "failed to add mode %ux%ux@%u\n",
+ default_mode.hdisplay, default_mode.vdisplay,
+ default_mode.vrefresh);
+ return -ENOMEM;
+ }
+
+ drm_mode_set_name(mode);
+ mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
+ drm_mode_probed_add(panel->connector, mode);
+
+ connector->display_info.width_mm = mode->width_mm;
+ connector->display_info.height_mm = mode->height_mm;
+ connector->display_info.bus_flags = rad_bus_flags;
+
+ drm_display_info_set_bus_formats(&connector->display_info,
+ rad_bus_formats,
+ ARRAY_SIZE(rad_bus_formats));
+ return 1;
+}
+
+static int rad_bl_get_brightness(struct backlight_device *bl)
+{
+ struct mipi_dsi_device *dsi = bl_get_data(bl);
+ struct rad_panel *rad = mipi_dsi_get_drvdata(dsi);
+ u16 brightness;
+ int ret;
+
+ if (!rad->prepared)
+ return 0;
+
+ dsi->mode_flags &= ~MIPI_DSI_MODE_LPM;
+
+ ret = mipi_dsi_dcs_get_display_brightness(dsi, &brightness);
+ if (ret < 0)
+ return ret;
+
+ bl->props.brightness = brightness;
+
+ return brightness & 0xff;
+}
+
+static int rad_bl_update_status(struct backlight_device *bl)
+{
+ struct mipi_dsi_device *dsi = bl_get_data(bl);
+ struct rad_panel *rad = mipi_dsi_get_drvdata(dsi);
+ int ret = 0;
+
+ if (!rad->prepared)
+ return 0;
+
+ dsi->mode_flags &= ~MIPI_DSI_MODE_LPM;
+
+ ret = mipi_dsi_dcs_set_display_brightness(dsi, bl->props.brightness);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static const struct backlight_ops rad_bl_ops = {
+ .update_status = rad_bl_update_status,
+ .get_brightness = rad_bl_get_brightness,
+};
+
+static const struct drm_panel_funcs rad_panel_funcs = {
+ .prepare = rad_panel_prepare,
+ .unprepare = rad_panel_unprepare,
+ .enable = rad_panel_enable,
+ .disable = rad_panel_disable,
+ .get_modes = rad_panel_get_modes,
+};
+
+static const char * const rad_supply_names[] = {
+ "v3p3",
+ "v1p8",
+};
+
+static int rad_init_regulators(struct rad_panel *rad)
+{
+ struct device *dev = &rad->dsi->dev;
+ int i;
+
+ rad->num_supplies = ARRAY_SIZE(rad_supply_names);
+ rad->supplies = devm_kcalloc(dev, rad->num_supplies,
+ sizeof(*rad->supplies), GFP_KERNEL);
+ if (!rad->supplies)
+ return -ENOMEM;
+
+ for (i = 0; i < rad->num_supplies; i++)
+ rad->supplies[i].supply = rad_supply_names[i];
+
+ return devm_regulator_bulk_get(dev, rad->num_supplies, rad->supplies);
+};
+
+static int rad_panel_probe(struct mipi_dsi_device *dsi)
+{
+ struct device *dev = &dsi->dev;
+ struct device_node *np = dev->of_node;
+ struct rad_panel *panel;
+ struct backlight_properties bl_props;
+ int ret;
+ u32 video_mode;
+
+ panel = devm_kzalloc(&dsi->dev, sizeof(*panel), GFP_KERNEL);
+ if (!panel)
+ return -ENOMEM;
+
+ mipi_dsi_set_drvdata(dsi, panel);
+
+ panel->dsi = dsi;
+
+ dsi->format = MIPI_DSI_FMT_RGB888;
+ dsi->mode_flags = MIPI_DSI_MODE_VIDEO_HSE | MIPI_DSI_MODE_VIDEO |
+ MIPI_DSI_CLOCK_NON_CONTINUOUS;
+
+ ret = of_property_read_u32(np, "video-mode", &video_mode);
+ if (!ret) {
+ switch (video_mode) {
+ case 0:
+ /* burst mode */
+ dsi->mode_flags |= MIPI_DSI_MODE_VIDEO_BURST;
+ break;
+ case 1:
+ /* non-burst mode with sync event */
+ break;
+ case 2:
+ /* non-burst mode with sync pulse */
+ dsi->mode_flags |= MIPI_DSI_MODE_VIDEO_SYNC_PULSE;
+ break;
+ default:
+ dev_warn(dev, "invalid video mode %d\n", video_mode);
+ break;
+ }
+ }
+
+ ret = of_property_read_u32(np, "dsi-lanes", &dsi->lanes);
+ if (ret) {
+ dev_err(dev, "Failed to get dsi-lanes property (%d)\n", ret);
+ return ret;
+ }
+
+ panel->reset = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_LOW);
+ if (IS_ERR(panel->reset))
+ return PTR_ERR(panel->reset);
+
+ memset(&bl_props, 0, sizeof(bl_props));
+ bl_props.type = BACKLIGHT_RAW;
+ bl_props.brightness = 255;
+ bl_props.max_brightness = 255;
+
+ panel->backlight = devm_backlight_device_register(dev, dev_name(dev),
+ dev, dsi, &rad_bl_ops,
+ &bl_props);
+ if (IS_ERR(panel->backlight)) {
+ ret = PTR_ERR(panel->backlight);
+ dev_err(dev, "Failed to register backlight (%d)\n", ret);
+ return ret;
+ }
+
+ ret = rad_init_regulators(panel);
+ if (ret)
+ return ret;
+
+ drm_panel_init(&panel->panel);
+ panel->panel.funcs = &rad_panel_funcs;
+ panel->panel.dev = dev;
+ dev_set_drvdata(dev, panel);
+
+ ret = drm_panel_add(&panel->panel);
+ if (ret)
+ return ret;
+
+ ret = mipi_dsi_attach(dsi);
+ if (ret)
+ drm_panel_remove(&panel->panel);
+
+ return ret;
+}
+
+static int rad_panel_remove(struct mipi_dsi_device *dsi)
+{
+ struct rad_panel *rad = mipi_dsi_get_drvdata(dsi);
+ struct device *dev = &dsi->dev;
+ int ret;
+
+ ret = mipi_dsi_detach(dsi);
+ if (ret)
+ DRM_DEV_ERROR(dev, "Failed to detach from host (%d)\n",
+ ret);
+
+ drm_panel_remove(&rad->panel);
+
+ return 0;
+}
+
+static void rad_panel_shutdown(struct mipi_dsi_device *dsi)
+{
+ struct rad_panel *rad = mipi_dsi_get_drvdata(dsi);
+
+ rad_panel_disable(&rad->panel);
+ rad_panel_unprepare(&rad->panel);
+}
+
+static const struct of_device_id rad_of_match[] = {
+ { .compatible = "raydium,rm67191", },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, rad_of_match);
+
+static struct mipi_dsi_driver rad_panel_driver = {
+ .driver = {
+ .name = "panel-raydium-rm67191",
+ .of_match_table = rad_of_match,
+ },
+ .probe = rad_panel_probe,
+ .remove = rad_panel_remove,
+ .shutdown = rad_panel_shutdown,
+};
+module_mipi_dsi_driver(rad_panel_driver);
+
+MODULE_AUTHOR("Robert Chiras <robert.chiras@nxp.com>");
+MODULE_DESCRIPTION("DRM Driver for Raydium RM67191 MIPI DSI panel");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/panel/panel-rocktech-jh057n00900.c b/drivers/gpu/drm/panel/panel-rocktech-jh057n00900.c
index 6dcb692c4701..b9109922397f 100644
--- a/drivers/gpu/drm/panel/panel-rocktech-jh057n00900.c
+++ b/drivers/gpu/drm/panel/panel-rocktech-jh057n00900.c
@@ -15,6 +15,7 @@
#include <linux/gpio/consumer.h>
#include <linux/media-bus-format.h>
#include <linux/module.h>
+#include <linux/regulator/consumer.h>
#include <video/display_timing.h>
#include <video/mipi_display.h>
@@ -33,6 +34,7 @@
#define ST7703_CMD_SETEXTC 0xB9
#define ST7703_CMD_SETMIPI 0xBA
#define ST7703_CMD_SETVDC 0xBC
+#define ST7703_CMD_UNKNOWN0 0xBF
#define ST7703_CMD_SETSCR 0xC0
#define ST7703_CMD_SETPOWER 0xC1
#define ST7703_CMD_SETPANEL 0xCC
@@ -46,6 +48,8 @@ struct jh057n {
struct drm_panel panel;
struct gpio_desc *reset_gpio;
struct backlight_device *backlight;
+ struct regulator *vcc;
+ struct regulator *iovcc;
bool prepared;
struct dentry *debugfs;
@@ -94,7 +98,7 @@ static int jh057n_init_sequence(struct jh057n *ctx)
msleep(20);
dsi_generic_write_seq(dsi, ST7703_CMD_SETVCOM, 0x3F, 0x3F);
- dsi_generic_write_seq(dsi, 0xBF, 0x02, 0x11, 0x00);
+ dsi_generic_write_seq(dsi, ST7703_CMD_UNKNOWN0, 0x02, 0x11, 0x00);
dsi_generic_write_seq(dsi, ST7703_CMD_SETGIP1,
0x82, 0x10, 0x06, 0x05, 0x9E, 0x0A, 0xA5, 0x12,
0x31, 0x23, 0x37, 0x83, 0x04, 0xBC, 0x27, 0x38,
@@ -123,7 +127,7 @@ static int jh057n_init_sequence(struct jh057n *ctx)
ret = mipi_dsi_dcs_exit_sleep_mode(dsi);
if (ret < 0) {
- DRM_DEV_ERROR(dev, "Failed to exit sleep mode\n");
+ DRM_DEV_ERROR(dev, "Failed to exit sleep mode: %d\n", ret);
return ret;
}
/* Panel is operational 120 msec after reset */
@@ -139,6 +143,14 @@ static int jh057n_init_sequence(struct jh057n *ctx)
static int jh057n_enable(struct drm_panel *panel)
{
struct jh057n *ctx = panel_to_jh057n(panel);
+ int ret;
+
+ ret = jh057n_init_sequence(ctx);
+ if (ret < 0) {
+ DRM_DEV_ERROR(ctx->dev, "Panel init sequence failed: %d\n",
+ ret);
+ return ret;
+ }
return backlight_enable(ctx->backlight);
}
@@ -146,19 +158,21 @@ static int jh057n_enable(struct drm_panel *panel)
static int jh057n_disable(struct drm_panel *panel)
{
struct jh057n *ctx = panel_to_jh057n(panel);
+ struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev);
- return backlight_disable(ctx->backlight);
+ backlight_disable(ctx->backlight);
+ return mipi_dsi_dcs_set_display_off(dsi);
}
static int jh057n_unprepare(struct drm_panel *panel)
{
struct jh057n *ctx = panel_to_jh057n(panel);
- struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev);
if (!ctx->prepared)
return 0;
- mipi_dsi_dcs_set_display_off(dsi);
+ regulator_disable(ctx->iovcc);
+ regulator_disable(ctx->vcc);
ctx->prepared = false;
return 0;
@@ -173,21 +187,31 @@ static int jh057n_prepare(struct drm_panel *panel)
return 0;
DRM_DEV_DEBUG_DRIVER(ctx->dev, "Resetting the panel\n");
+ ret = regulator_enable(ctx->vcc);
+ if (ret < 0) {
+ DRM_DEV_ERROR(ctx->dev,
+ "Failed to enable vcc supply: %d\n", ret);
+ return ret;
+ }
+ ret = regulator_enable(ctx->iovcc);
+ if (ret < 0) {
+ DRM_DEV_ERROR(ctx->dev,
+ "Failed to enable iovcc supply: %d\n", ret);
+ goto disable_vcc;
+ }
+
gpiod_set_value_cansleep(ctx->reset_gpio, 1);
usleep_range(20, 40);
gpiod_set_value_cansleep(ctx->reset_gpio, 0);
msleep(20);
- ret = jh057n_init_sequence(ctx);
- if (ret < 0) {
- DRM_DEV_ERROR(ctx->dev, "Panel init sequence failed: %d\n",
- ret);
- return ret;
- }
-
ctx->prepared = true;
return 0;
+
+disable_vcc:
+ regulator_disable(ctx->vcc);
+ return ret;
}
static const struct drm_display_mode default_mode = {
@@ -300,6 +324,25 @@ static int jh057n_probe(struct mipi_dsi_device *dsi)
if (IS_ERR(ctx->backlight))
return PTR_ERR(ctx->backlight);
+ ctx->vcc = devm_regulator_get(dev, "vcc");
+ if (IS_ERR(ctx->vcc)) {
+ ret = PTR_ERR(ctx->vcc);
+ if (ret != -EPROBE_DEFER)
+ DRM_DEV_ERROR(dev,
+ "Failed to request vcc regulator: %d\n",
+ ret);
+ return ret;
+ }
+ ctx->iovcc = devm_regulator_get(dev, "iovcc");
+ if (IS_ERR(ctx->iovcc)) {
+ ret = PTR_ERR(ctx->iovcc);
+ if (ret != -EPROBE_DEFER)
+ DRM_DEV_ERROR(dev,
+ "Failed to request iovcc regulator: %d\n",
+ ret);
+ return ret;
+ }
+
drm_panel_init(&ctx->panel);
ctx->panel.dev = dev;
ctx->panel.funcs = &jh057n_drm_funcs;
@@ -308,7 +351,9 @@ static int jh057n_probe(struct mipi_dsi_device *dsi)
ret = mipi_dsi_attach(dsi);
if (ret < 0) {
- DRM_DEV_ERROR(dev, "mipi_dsi_attach failed. Is host ready?\n");
+ DRM_DEV_ERROR(dev,
+ "mipi_dsi_attach failed (%d). Is host ready?\n",
+ ret);
drm_panel_remove(&ctx->panel);
return ret;
}
@@ -327,12 +372,12 @@ static void jh057n_shutdown(struct mipi_dsi_device *dsi)
struct jh057n *ctx = mipi_dsi_get_drvdata(dsi);
int ret;
- ret = jh057n_unprepare(&ctx->panel);
+ ret = drm_panel_unprepare(&ctx->panel);
if (ret < 0)
DRM_DEV_ERROR(&dsi->dev, "Failed to unprepare panel: %d\n",
ret);
- ret = jh057n_disable(&ctx->panel);
+ ret = drm_panel_disable(&ctx->panel);
if (ret < 0)
DRM_DEV_ERROR(&dsi->dev, "Failed to disable panel: %d\n",
ret);
diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c
index 5a93c4edf1e4..bff7578f84dd 100644
--- a/drivers/gpu/drm/panel/panel-simple.c
+++ b/drivers/gpu/drm/panel/panel-simple.c
@@ -30,6 +30,7 @@
#include <linux/regulator/consumer.h>
#include <video/display_timing.h>
+#include <video/of_display_timing.h>
#include <video/videomode.h>
#include <drm/drm_crtc.h>
@@ -37,6 +38,22 @@
#include <drm/drm_mipi_dsi.h>
#include <drm/drm_panel.h>
+/**
+ * @modes: Pointer to array of fixed modes appropriate for this panel. If
+ * only one mode then this can just be the address of this the mode.
+ * NOTE: cannot be used with "timings" and also if this is specified
+ * then you cannot override the mode in the device tree.
+ * @num_modes: Number of elements in modes array.
+ * @timings: Pointer to array of display timings. NOTE: cannot be used with
+ * "modes" and also these will be used to validate a device tree
+ * override if one is present.
+ * @num_timings: Number of elements in timings array.
+ * @bpc: Bits per color.
+ * @size: Structure containing the physical size of this panel.
+ * @delay: Structure containing various delay values for this panel.
+ * @bus_format: See MEDIA_BUS_FMT_... defines.
+ * @bus_flags: See DRM_BUS_FLAG_... defines.
+ */
struct panel_desc {
const struct drm_display_mode *modes;
unsigned int num_modes;
@@ -92,6 +109,8 @@ struct panel_simple {
struct i2c_adapter *ddc;
struct gpio_desc *enable_gpio;
+
+ struct drm_display_mode override_mode;
};
static inline struct panel_simple *to_panel_simple(struct drm_panel *panel)
@@ -99,16 +118,13 @@ static inline struct panel_simple *to_panel_simple(struct drm_panel *panel)
return container_of(panel, struct panel_simple, base);
}
-static int panel_simple_get_fixed_modes(struct panel_simple *panel)
+static unsigned int panel_simple_get_timings_modes(struct panel_simple *panel)
{
struct drm_connector *connector = panel->base.connector;
struct drm_device *drm = panel->base.drm;
struct drm_display_mode *mode;
unsigned int i, num = 0;
- if (!panel->desc)
- return 0;
-
for (i = 0; i < panel->desc->num_timings; i++) {
const struct display_timing *dt = &panel->desc->timings[i];
struct videomode vm;
@@ -132,6 +148,16 @@ static int panel_simple_get_fixed_modes(struct panel_simple *panel)
num++;
}
+ return num;
+}
+
+static unsigned int panel_simple_get_display_modes(struct panel_simple *panel)
+{
+ struct drm_connector *connector = panel->base.connector;
+ struct drm_device *drm = panel->base.drm;
+ struct drm_display_mode *mode;
+ unsigned int i, num = 0;
+
for (i = 0; i < panel->desc->num_modes; i++) {
const struct drm_display_mode *m = &panel->desc->modes[i];
@@ -153,6 +179,44 @@ static int panel_simple_get_fixed_modes(struct panel_simple *panel)
num++;
}
+ return num;
+}
+
+static int panel_simple_get_non_edid_modes(struct panel_simple *panel)
+{
+ struct drm_connector *connector = panel->base.connector;
+ struct drm_device *drm = panel->base.drm;
+ struct drm_display_mode *mode;
+ bool has_override = panel->override_mode.type;
+ unsigned int num = 0;
+
+ if (!panel->desc)
+ return 0;
+
+ if (has_override) {
+ mode = drm_mode_duplicate(drm, &panel->override_mode);
+ if (mode) {
+ drm_mode_probed_add(connector, mode);
+ num = 1;
+ } else {
+ dev_err(drm->dev, "failed to add override mode\n");
+ }
+ }
+
+ /* Only add timings if override was not there or failed to validate */
+ if (num == 0 && panel->desc->num_timings)
+ num = panel_simple_get_timings_modes(panel);
+
+ /*
+ * Only add fixed modes if timings/override added no mode.
+ *
+ * We should only ever have either the display timings specified
+ * or a fixed mode. Anything else is rather bogus.
+ */
+ WARN_ON(panel->desc->num_timings && panel->desc->num_modes);
+ if (num == 0)
+ num = panel_simple_get_display_modes(panel);
+
connector->display_info.bpc = panel->desc->bpc;
connector->display_info.width_mm = panel->desc->size.width;
connector->display_info.height_mm = panel->desc->size.height;
@@ -269,7 +333,7 @@ static int panel_simple_get_modes(struct drm_panel *panel)
}
/* add hard-coded panel modes */
- num += panel_simple_get_fixed_modes(p);
+ num += panel_simple_get_non_edid_modes(p);
return num;
}
@@ -300,10 +364,58 @@ static const struct drm_panel_funcs panel_simple_funcs = {
.get_timings = panel_simple_get_timings,
};
+#define PANEL_SIMPLE_BOUNDS_CHECK(to_check, bounds, field) \
+ (to_check->field.typ >= bounds->field.min && \
+ to_check->field.typ <= bounds->field.max)
+static void panel_simple_parse_panel_timing_node(struct device *dev,
+ struct panel_simple *panel,
+ const struct display_timing *ot)
+{
+ const struct panel_desc *desc = panel->desc;
+ struct videomode vm;
+ unsigned int i;
+
+ if (WARN_ON(desc->num_modes)) {
+ dev_err(dev, "Reject override mode: panel has a fixed mode\n");
+ return;
+ }
+ if (WARN_ON(!desc->num_timings)) {
+ dev_err(dev, "Reject override mode: no timings specified\n");
+ return;
+ }
+
+ for (i = 0; i < panel->desc->num_timings; i++) {
+ const struct display_timing *dt = &panel->desc->timings[i];
+
+ if (!PANEL_SIMPLE_BOUNDS_CHECK(ot, dt, hactive) ||
+ !PANEL_SIMPLE_BOUNDS_CHECK(ot, dt, hfront_porch) ||
+ !PANEL_SIMPLE_BOUNDS_CHECK(ot, dt, hback_porch) ||
+ !PANEL_SIMPLE_BOUNDS_CHECK(ot, dt, hsync_len) ||
+ !PANEL_SIMPLE_BOUNDS_CHECK(ot, dt, vactive) ||
+ !PANEL_SIMPLE_BOUNDS_CHECK(ot, dt, vfront_porch) ||
+ !PANEL_SIMPLE_BOUNDS_CHECK(ot, dt, vback_porch) ||
+ !PANEL_SIMPLE_BOUNDS_CHECK(ot, dt, vsync_len))
+ continue;
+
+ if (ot->flags != dt->flags)
+ continue;
+
+ videomode_from_timing(ot, &vm);
+ drm_display_mode_from_videomode(&vm, &panel->override_mode);
+ panel->override_mode.type |= DRM_MODE_TYPE_DRIVER |
+ DRM_MODE_TYPE_PREFERRED;
+ break;
+ }
+
+ if (WARN_ON(!panel->override_mode.type))
+ dev_err(dev, "Reject override mode: No display_timing found\n");
+}
+
static int panel_simple_probe(struct device *dev, const struct panel_desc *desc)
{
struct device_node *backlight, *ddc;
struct panel_simple *panel;
+ struct display_timing dt;
int err;
panel = devm_kzalloc(dev, sizeof(*panel), GFP_KERNEL);
@@ -349,6 +461,9 @@ static int panel_simple_probe(struct device *dev, const struct panel_desc *desc)
}
}
+ if (!of_get_display_timing(dev->of_node, "panel-timing", &dt))
+ panel_simple_parse_panel_timing_node(dev, panel, &dt);
+
drm_panel_init(&panel->base);
panel->base.dev = dev;
panel->base.funcs = &panel_simple_funcs;
@@ -496,22 +611,21 @@ static const struct panel_desc auo_b101aw03 = {
},
};
-static const struct drm_display_mode auo_b101ean01_mode = {
- .clock = 72500,
- .hdisplay = 1280,
- .hsync_start = 1280 + 119,
- .hsync_end = 1280 + 119 + 32,
- .htotal = 1280 + 119 + 32 + 21,
- .vdisplay = 800,
- .vsync_start = 800 + 4,
- .vsync_end = 800 + 4 + 20,
- .vtotal = 800 + 4 + 20 + 8,
- .vrefresh = 60,
+static const struct display_timing auo_b101ean01_timing = {
+ .pixelclock = { 65300000, 72500000, 75000000 },
+ .hactive = { 1280, 1280, 1280 },
+ .hfront_porch = { 18, 119, 119 },
+ .hback_porch = { 21, 21, 21 },
+ .hsync_len = { 32, 32, 32 },
+ .vactive = { 800, 800, 800 },
+ .vfront_porch = { 4, 4, 4 },
+ .vback_porch = { 8, 8, 8 },
+ .vsync_len = { 18, 20, 20 },
};
static const struct panel_desc auo_b101ean01 = {
- .modes = &auo_b101ean01_mode,
- .num_modes = 1,
+ .timings = &auo_b101ean01_timing,
+ .num_timings = 1,
.bpc = 6,
.size = {
.width = 217,
@@ -724,9 +838,9 @@ static const struct panel_desc auo_g133han01 = {
static const struct display_timing auo_g185han01_timings = {
.pixelclock = { 120000000, 144000000, 175000000 },
.hactive = { 1920, 1920, 1920 },
- .hfront_porch = { 18, 60, 74 },
- .hback_porch = { 12, 44, 54 },
- .hsync_len = { 10, 24, 32 },
+ .hfront_porch = { 36, 120, 148 },
+ .hback_porch = { 24, 88, 108 },
+ .hsync_len = { 20, 48, 64 },
.vactive = { 1080, 1080, 1080 },
.vfront_porch = { 6, 10, 40 },
.vback_porch = { 2, 5, 20 },
@@ -1335,6 +1449,31 @@ static const struct panel_desc giantplus_gpg482739qs5 = {
.bus_format = MEDIA_BUS_FMT_RGB888_1X24,
};
+static const struct display_timing giantplus_gpm940b0_timing = {
+ .pixelclock = { 13500000, 27000000, 27500000 },
+ .hactive = { 320, 320, 320 },
+ .hfront_porch = { 14, 686, 718 },
+ .hback_porch = { 50, 70, 255 },
+ .hsync_len = { 1, 1, 1 },
+ .vactive = { 240, 240, 240 },
+ .vfront_porch = { 1, 1, 179 },
+ .vback_porch = { 1, 21, 31 },
+ .vsync_len = { 1, 1, 6 },
+ .flags = DISPLAY_FLAGS_HSYNC_LOW | DISPLAY_FLAGS_VSYNC_LOW,
+};
+
+static const struct panel_desc giantplus_gpm940b0 = {
+ .timings = &giantplus_gpm940b0_timing,
+ .num_timings = 1,
+ .bpc = 8,
+ .size = {
+ .width = 60,
+ .height = 45,
+ },
+ .bus_format = MEDIA_BUS_FMT_RGB888_3X8,
+ .bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_PIXDATA_NEGEDGE,
+};
+
static const struct display_timing hannstar_hsd070pww1_timing = {
.pixelclock = { 64300000, 71100000, 82000000 },
.hactive = { 1280, 1280, 1280 },
@@ -1578,23 +1717,32 @@ static const struct panel_desc innolux_g121x1_l03 = {
},
};
-static const struct drm_display_mode innolux_n116bge_mode = {
- .clock = 76420,
- .hdisplay = 1366,
- .hsync_start = 1366 + 136,
- .hsync_end = 1366 + 136 + 30,
- .htotal = 1366 + 136 + 30 + 60,
- .vdisplay = 768,
- .vsync_start = 768 + 8,
- .vsync_end = 768 + 8 + 12,
- .vtotal = 768 + 8 + 12 + 12,
- .vrefresh = 60,
- .flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC,
+/*
+ * Datasheet specifies that at 60 Hz refresh rate:
+ * - total horizontal time: { 1506, 1592, 1716 }
+ * - total vertical time: { 788, 800, 868 }
+ *
+ * ...but doesn't go into exactly how that should be split into a front
+ * porch, back porch, or sync length. For now we'll leave a single setting
+ * here which allows a bit of tweaking of the pixel clock at the expense of
+ * refresh rate.
+ */
+static const struct display_timing innolux_n116bge_timing = {
+ .pixelclock = { 72600000, 76420000, 80240000 },
+ .hactive = { 1366, 1366, 1366 },
+ .hfront_porch = { 136, 136, 136 },
+ .hback_porch = { 60, 60, 60 },
+ .hsync_len = { 30, 30, 30 },
+ .vactive = { 768, 768, 768 },
+ .vfront_porch = { 8, 8, 8 },
+ .vback_porch = { 12, 12, 12 },
+ .vsync_len = { 12, 12, 12 },
+ .flags = DISPLAY_FLAGS_VSYNC_LOW | DISPLAY_FLAGS_HSYNC_LOW,
};
static const struct panel_desc innolux_n116bge = {
- .modes = &innolux_n116bge_mode,
- .num_modes = 1,
+ .timings = &innolux_n116bge_timing,
+ .num_timings = 1,
.bpc = 6,
.size = {
.width = 256,
@@ -2157,6 +2305,33 @@ static const struct panel_desc ontat_yx700wv03 = {
.bus_format = MEDIA_BUS_FMT_RGB666_1X18,
};
+static const struct drm_display_mode ortustech_com37h3m_mode = {
+ .clock = 22153,
+ .hdisplay = 480,
+ .hsync_start = 480 + 8,
+ .hsync_end = 480 + 8 + 10,
+ .htotal = 480 + 8 + 10 + 10,
+ .vdisplay = 640,
+ .vsync_start = 640 + 4,
+ .vsync_end = 640 + 4 + 3,
+ .vtotal = 640 + 4 + 3 + 4,
+ .vrefresh = 60,
+ .flags = DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_NHSYNC,
+};
+
+static const struct panel_desc ortustech_com37h3m = {
+ .modes = &ortustech_com37h3m_mode,
+ .num_modes = 1,
+ .bpc = 8,
+ .size = {
+ .width = 56, /* 56.16mm */
+ .height = 75, /* 74.88mm */
+ },
+ .bus_format = MEDIA_BUS_FMT_RGB888_1X24,
+ .bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_PIXDATA_POSEDGE |
+ DRM_BUS_FLAG_SYNC_DRIVE_POSEDGE,
+};
+
static const struct drm_display_mode ortustech_com43h4m85ulc_mode = {
.clock = 25000,
.hdisplay = 480,
@@ -2354,6 +2529,59 @@ static const struct panel_desc samsung_ltn140at29_301 = {
},
};
+static const struct drm_display_mode sharp_ld_d5116z01b_mode = {
+ .clock = 168480,
+ .hdisplay = 1920,
+ .hsync_start = 1920 + 48,
+ .hsync_end = 1920 + 48 + 32,
+ .htotal = 1920 + 48 + 32 + 80,
+ .vdisplay = 1280,
+ .vsync_start = 1280 + 3,
+ .vsync_end = 1280 + 3 + 10,
+ .vtotal = 1280 + 3 + 10 + 57,
+ .vrefresh = 60,
+ .flags = DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC,
+};
+
+static const struct panel_desc sharp_ld_d5116z01b = {
+ .modes = &sharp_ld_d5116z01b_mode,
+ .num_modes = 1,
+ .bpc = 8,
+ .size = {
+ .width = 260,
+ .height = 120,
+ },
+ .bus_format = MEDIA_BUS_FMT_RGB888_1X24,
+ .bus_flags = DRM_BUS_FLAG_DATA_MSB_TO_LSB,
+};
+
+static const struct drm_display_mode sharp_lq070y3dg3b_mode = {
+ .clock = 33260,
+ .hdisplay = 800,
+ .hsync_start = 800 + 64,
+ .hsync_end = 800 + 64 + 128,
+ .htotal = 800 + 64 + 128 + 64,
+ .vdisplay = 480,
+ .vsync_start = 480 + 8,
+ .vsync_end = 480 + 8 + 2,
+ .vtotal = 480 + 8 + 2 + 35,
+ .vrefresh = 60,
+ .flags = DISPLAY_FLAGS_PIXDATA_POSEDGE,
+};
+
+static const struct panel_desc sharp_lq070y3dg3b = {
+ .modes = &sharp_lq070y3dg3b_mode,
+ .num_modes = 1,
+ .bpc = 8,
+ .size = {
+ .width = 152, /* 152.4mm */
+ .height = 91, /* 91.4mm */
+ },
+ .bus_format = MEDIA_BUS_FMT_RGB888_1X24,
+ .bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_PIXDATA_POSEDGE |
+ DRM_BUS_FLAG_SYNC_DRIVE_POSEDGE,
+};
+
static const struct drm_display_mode sharp_lq035q7db03_mode = {
.clock = 5500,
.hdisplay = 240,
@@ -2454,6 +2682,33 @@ static const struct panel_desc sharp_lq150x1lg11 = {
.bus_format = MEDIA_BUS_FMT_RGB565_1X16,
};
+static const struct display_timing sharp_ls020b1dd01d_timing = {
+ .pixelclock = { 2000000, 4200000, 5000000 },
+ .hactive = { 240, 240, 240 },
+ .hfront_porch = { 66, 66, 66 },
+ .hback_porch = { 1, 1, 1 },
+ .hsync_len = { 1, 1, 1 },
+ .vactive = { 160, 160, 160 },
+ .vfront_porch = { 52, 52, 52 },
+ .vback_porch = { 6, 6, 6 },
+ .vsync_len = { 10, 10, 10 },
+ .flags = DISPLAY_FLAGS_HSYNC_HIGH | DISPLAY_FLAGS_VSYNC_LOW,
+};
+
+static const struct panel_desc sharp_ls020b1dd01d = {
+ .timings = &sharp_ls020b1dd01d_timing,
+ .num_timings = 1,
+ .bpc = 6,
+ .size = {
+ .width = 42,
+ .height = 28,
+ },
+ .bus_format = MEDIA_BUS_FMT_RGB565_1X16,
+ .bus_flags = DRM_BUS_FLAG_DE_HIGH
+ | DRM_BUS_FLAG_PIXDATA_NEGEDGE
+ | DRM_BUS_FLAG_SHARP_SIGNALS,
+};
+
static const struct drm_display_mode shelly_sca07010_bfn_lnn_mode = {
.clock = 33300,
.hdisplay = 800,
@@ -2883,6 +3138,9 @@ static const struct of_device_id platform_of_match[] = {
.compatible = "giantplus,gpg482739qs5",
.data = &giantplus_gpg482739qs5
}, {
+ .compatible = "giantplus,gpm940b0",
+ .data = &giantplus_gpm940b0,
+ }, {
.compatible = "hannstar,hsd070pww1",
.data = &hannstar_hsd070pww1,
}, {
@@ -2979,6 +3237,12 @@ static const struct of_device_id platform_of_match[] = {
.compatible = "ontat,yx700wv03",
.data = &ontat_yx700wv03,
}, {
+ .compatible = "ortustech,com37h3m05dtc",
+ .data = &ortustech_com37h3m,
+ }, {
+ .compatible = "ortustech,com37h3m99dtc",
+ .data = &ortustech_com37h3m,
+ }, {
.compatible = "ortustech,com43h4m85ulc",
.data = &ortustech_com43h4m85ulc,
}, {
@@ -3003,9 +3267,15 @@ static const struct of_device_id platform_of_match[] = {
.compatible = "samsung,ltn140at29-301",
.data = &samsung_ltn140at29_301,
}, {
+ .compatible = "sharp,ld-d5116z01b",
+ .data = &sharp_ld_d5116z01b,
+ }, {
.compatible = "sharp,lq035q7db03",
.data = &sharp_lq035q7db03,
}, {
+ .compatible = "sharp,lq070y3dg3b",
+ .data = &sharp_lq070y3dg3b,
+ }, {
.compatible = "sharp,lq101k1ly04",
.data = &sharp_lq101k1ly04,
}, {
@@ -3015,6 +3285,9 @@ static const struct of_device_id platform_of_match[] = {
.compatible = "sharp,lq150x1lg11",
.data = &sharp_lq150x1lg11,
}, {
+ .compatible = "sharp,ls020b1dd01d",
+ .data = &sharp_ls020b1dd01d,
+ }, {
.compatible = "shelly,sca07010-bfn-lnn",
.data = &shelly_sca07010_bfn_lnn,
}, {
diff --git a/drivers/gpu/drm/panfrost/panfrost_devfreq.c b/drivers/gpu/drm/panfrost/panfrost_devfreq.c
index db798532b0b6..a7c18bceb7fd 100644
--- a/drivers/gpu/drm/panfrost/panfrost_devfreq.c
+++ b/drivers/gpu/drm/panfrost/panfrost_devfreq.c
@@ -157,7 +157,8 @@ int panfrost_devfreq_init(struct panfrost_device *pfdev)
dev_pm_opp_put(opp);
pfdev->devfreq.devfreq = devm_devfreq_add_device(&pfdev->pdev->dev,
- &panfrost_devfreq_profile, "simple_ondemand", NULL);
+ &panfrost_devfreq_profile, DEVFREQ_GOV_SIMPLE_ONDEMAND,
+ NULL);
if (IS_ERR(pfdev->devfreq.devfreq)) {
DRM_DEV_ERROR(&pfdev->pdev->dev, "Couldn't initialize GPU devfreq\n");
ret = PTR_ERR(pfdev->devfreq.devfreq);
diff --git a/drivers/gpu/drm/panfrost/panfrost_device.h b/drivers/gpu/drm/panfrost/panfrost_device.h
index 83cc01cafde1..ea5948ff3647 100644
--- a/drivers/gpu/drm/panfrost/panfrost_device.h
+++ b/drivers/gpu/drm/panfrost/panfrost_device.h
@@ -43,6 +43,7 @@ struct panfrost_features {
u32 js_features[16];
u32 nr_core_groups;
+ u32 thread_tls_alloc;
unsigned long hw_features[64 / BITS_PER_LONG];
unsigned long hw_issues[64 / BITS_PER_LONG];
diff --git a/drivers/gpu/drm/panfrost/panfrost_drv.c b/drivers/gpu/drm/panfrost/panfrost_drv.c
index 85b4b51b6a0d..b187daa4da85 100644
--- a/drivers/gpu/drm/panfrost/panfrost_drv.c
+++ b/drivers/gpu/drm/panfrost/panfrost_drv.c
@@ -32,10 +32,42 @@ static int panfrost_ioctl_get_param(struct drm_device *ddev, void *data, struct
if (param->pad != 0)
return -EINVAL;
+#define PANFROST_FEATURE(name, member) \
+ case DRM_PANFROST_PARAM_ ## name: \
+ param->value = pfdev->features.member; \
+ break
+#define PANFROST_FEATURE_ARRAY(name, member, max) \
+ case DRM_PANFROST_PARAM_ ## name ## 0 ... \
+ DRM_PANFROST_PARAM_ ## name ## max: \
+ param->value = pfdev->features.member[param->param - \
+ DRM_PANFROST_PARAM_ ## name ## 0]; \
+ break
+
switch (param->param) {
- case DRM_PANFROST_PARAM_GPU_PROD_ID:
- param->value = pfdev->features.id;
- break;
+ PANFROST_FEATURE(GPU_PROD_ID, id);
+ PANFROST_FEATURE(GPU_REVISION, revision);
+ PANFROST_FEATURE(SHADER_PRESENT, shader_present);
+ PANFROST_FEATURE(TILER_PRESENT, tiler_present);
+ PANFROST_FEATURE(L2_PRESENT, l2_present);
+ PANFROST_FEATURE(STACK_PRESENT, stack_present);
+ PANFROST_FEATURE(AS_PRESENT, as_present);
+ PANFROST_FEATURE(JS_PRESENT, js_present);
+ PANFROST_FEATURE(L2_FEATURES, l2_features);
+ PANFROST_FEATURE(CORE_FEATURES, core_features);
+ PANFROST_FEATURE(TILER_FEATURES, tiler_features);
+ PANFROST_FEATURE(MEM_FEATURES, mem_features);
+ PANFROST_FEATURE(MMU_FEATURES, mmu_features);
+ PANFROST_FEATURE(THREAD_FEATURES, thread_features);
+ PANFROST_FEATURE(MAX_THREADS, max_threads);
+ PANFROST_FEATURE(THREAD_MAX_WORKGROUP_SZ,
+ thread_max_workgroup_sz);
+ PANFROST_FEATURE(THREAD_MAX_BARRIER_SZ,
+ thread_max_barrier_sz);
+ PANFROST_FEATURE(COHERENCY_FEATURES, coherency_features);
+ PANFROST_FEATURE_ARRAY(TEXTURE_FEATURES, texture_features, 3);
+ PANFROST_FEATURE_ARRAY(JS_FEATURES, js_features, 15);
+ PANFROST_FEATURE(NR_CORE_GROUPS, nr_core_groups);
+ PANFROST_FEATURE(THREAD_TLS_ALLOC, thread_tls_alloc);
default:
return -EINVAL;
}
@@ -357,8 +389,7 @@ static const struct drm_ioctl_desc panfrost_drm_driver_ioctls[] = {
DEFINE_DRM_GEM_SHMEM_FOPS(panfrost_drm_driver_fops);
static struct drm_driver panfrost_drm_driver = {
- .driver_features = DRIVER_RENDER | DRIVER_GEM | DRIVER_PRIME |
- DRIVER_SYNCOBJ,
+ .driver_features = DRIVER_RENDER | DRIVER_GEM | DRIVER_SYNCOBJ,
.open = panfrost_open,
.postclose = panfrost_postclose,
.ioctls = panfrost_drm_driver_ioctls,
diff --git a/drivers/gpu/drm/panfrost/panfrost_gem.c b/drivers/gpu/drm/panfrost/panfrost_gem.c
index b46416be5a54..543ab1b81bd5 100644
--- a/drivers/gpu/drm/panfrost/panfrost_gem.c
+++ b/drivers/gpu/drm/panfrost/panfrost_gem.c
@@ -92,8 +92,6 @@ panfrost_gem_prime_import_sg_table(struct drm_device *dev,
pobj = to_panfrost_bo(obj);
- obj->resv = attach->dmabuf->resv;
-
panfrost_mmu_map(pobj);
return obj;
diff --git a/drivers/gpu/drm/panfrost/panfrost_gpu.c b/drivers/gpu/drm/panfrost/panfrost_gpu.c
index 20ab333fc925..f67ed925c0ef 100644
--- a/drivers/gpu/drm/panfrost/panfrost_gpu.c
+++ b/drivers/gpu/drm/panfrost/panfrost_gpu.c
@@ -232,6 +232,8 @@ static void panfrost_gpu_init_features(struct panfrost_device *pfdev)
pfdev->features.stack_present = gpu_read(pfdev, GPU_STACK_PRESENT_LO);
pfdev->features.stack_present |= (u64)gpu_read(pfdev, GPU_STACK_PRESENT_HI) << 32;
+ pfdev->features.thread_tls_alloc = gpu_read(pfdev, GPU_THREAD_TLS_ALLOC);
+
gpu_id = gpu_read(pfdev, GPU_ID);
pfdev->features.revision = gpu_id & 0xffff;
pfdev->features.id = gpu_id >> 16;
diff --git a/drivers/gpu/drm/pl111/pl111_debugfs.c b/drivers/gpu/drm/pl111/pl111_debugfs.c
index 8d6a40469f0b..3c8e82016854 100644
--- a/drivers/gpu/drm/pl111/pl111_debugfs.c
+++ b/drivers/gpu/drm/pl111/pl111_debugfs.c
@@ -5,8 +5,10 @@
#include <linux/amba/clcd-regs.h>
#include <linux/seq_file.h>
+
#include <drm/drm_debugfs.h>
-#include <drm/drmP.h>
+#include <drm/drm_file.h>
+
#include "pl111_drm.h"
#define REGDEF(reg) { reg, #reg }
diff --git a/drivers/gpu/drm/pl111/pl111_display.c b/drivers/gpu/drm/pl111/pl111_display.c
index 15d2755fdba4..9a153125e5d2 100644
--- a/drivers/gpu/drm/pl111/pl111_display.c
+++ b/drivers/gpu/drm/pl111/pl111_display.c
@@ -11,14 +11,16 @@
#include <linux/amba/clcd-regs.h>
#include <linux/clk.h>
+#include <linux/delay.h>
#include <linux/version.h>
#include <linux/dma-buf.h>
#include <linux/of_graph.h>
-#include <drm/drmP.h>
+#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_fourcc.h>
#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
-#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_vblank.h>
#include "pl111_drm.h"
@@ -546,25 +548,8 @@ pl111_init_clock_divider(struct drm_device *drm)
int pl111_display_init(struct drm_device *drm)
{
struct pl111_drm_dev_private *priv = drm->dev_private;
- struct device *dev = drm->dev;
- struct device_node *endpoint;
- u32 tft_r0b0g0[3];
int ret;
- endpoint = of_graph_get_next_endpoint(dev->of_node, NULL);
- if (!endpoint)
- return -ENODEV;
-
- if (of_property_read_u32_array(endpoint,
- "arm,pl11x,tft-r0g0b0-pads",
- tft_r0b0g0,
- ARRAY_SIZE(tft_r0b0g0)) != 0) {
- dev_err(dev, "arm,pl11x,tft-r0g0b0-pads should be 3 ints\n");
- of_node_put(endpoint);
- return -ENOENT;
- }
- of_node_put(endpoint);
-
ret = pl111_init_clock_divider(drm);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/pl111/pl111_drm.h b/drivers/gpu/drm/pl111/pl111_drm.h
index b2c5e9f34051..77d2da9a8a7c 100644
--- a/drivers/gpu/drm/pl111/pl111_drm.h
+++ b/drivers/gpu/drm/pl111/pl111_drm.h
@@ -13,14 +13,15 @@
#ifndef _PL111_DRM_H_
#define _PL111_DRM_H_
-#include <drm/drm_gem.h>
-#include <drm/drm_simple_kms_helper.h>
+#include <linux/clk-provider.h>
+#include <linux/interrupt.h>
+
+#include <drm/drm_bridge.h>
#include <drm/drm_connector.h>
#include <drm/drm_encoder.h>
+#include <drm/drm_gem.h>
#include <drm/drm_panel.h>
-#include <drm/drm_bridge.h>
-#include <linux/clk-provider.h>
-#include <linux/interrupt.h>
+#include <drm/drm_simple_kms_helper.h>
#define CLCD_IRQ_NEXTBASE_UPDATE BIT(2)
diff --git a/drivers/gpu/drm/pl111/pl111_drv.c b/drivers/gpu/drm/pl111/pl111_drv.c
index 01f8462aa2db..276b53473a84 100644
--- a/drivers/gpu/drm/pl111/pl111_drv.c
+++ b/drivers/gpu/drm/pl111/pl111_drv.c
@@ -48,18 +48,18 @@
#include <linux/amba/bus.h>
#include <linux/amba/clcd-regs.h>
-#include <linux/version.h>
-#include <linux/shmem_fs.h>
#include <linux/dma-buf.h>
#include <linux/module.h>
-#include <linux/slab.h>
#include <linux/of.h>
#include <linux/of_graph.h>
#include <linux/of_reserved_mem.h>
+#include <linux/shmem_fs.h>
+#include <linux/slab.h>
+#include <linux/version.h>
-#include <drm/drmP.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_bridge.h>
+#include <drm/drm_drv.h>
#include <drm/drm_fb_cma_helper.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_gem_cma_helper.h>
@@ -67,6 +67,7 @@
#include <drm/drm_of.h>
#include <drm/drm_panel.h>
#include <drm/drm_probe_helper.h>
+#include <drm/drm_vblank.h>
#include "pl111_drm.h"
#include "pl111_versatile.h"
@@ -224,7 +225,7 @@ DEFINE_DRM_GEM_CMA_FOPS(drm_fops);
static struct drm_driver pl111_drm_driver = {
.driver_features =
- DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME | DRIVER_ATOMIC,
+ DRIVER_MODESET | DRIVER_GEM | DRIVER_ATOMIC,
.ioctls = NULL,
.fops = &drm_fops,
.name = "pl111",
@@ -238,9 +239,7 @@ static struct drm_driver pl111_drm_driver = {
.gem_vm_ops = &drm_gem_cma_vm_ops,
.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
- .gem_prime_import = drm_gem_prime_import,
.gem_prime_import_sg_table = pl111_gem_import_sg_table,
- .gem_prime_export = drm_gem_prime_export,
.gem_prime_get_sg_table = drm_gem_cma_prime_get_sg_table,
.gem_prime_mmap = drm_gem_cma_prime_mmap,
.gem_prime_vmap = drm_gem_cma_prime_vmap,
diff --git a/drivers/gpu/drm/pl111/pl111_nomadik.h b/drivers/gpu/drm/pl111/pl111_nomadik.h
index 19d663d46353..47ccf5c839fc 100644
--- a/drivers/gpu/drm/pl111/pl111_nomadik.h
+++ b/drivers/gpu/drm/pl111/pl111_nomadik.h
@@ -1,10 +1,11 @@
// SPDX-License-Identifier: GPL-2.0+
-#include <linux/device.h>
#ifndef PL111_NOMADIK_H
#define PL111_NOMADIK_H
#endif
+struct device;
+
#ifdef CONFIG_ARCH_NOMADIK
void pl111_nomadik_init(struct device *dev);
diff --git a/drivers/gpu/drm/pl111/pl111_versatile.c b/drivers/gpu/drm/pl111/pl111_versatile.c
index 38f4ee05285e..09aeaffb7660 100644
--- a/drivers/gpu/drm/pl111/pl111_versatile.c
+++ b/drivers/gpu/drm/pl111/pl111_versatile.c
@@ -1,13 +1,14 @@
// SPDX-License-Identifier: GPL-2.0-only
+
#include <linux/amba/clcd-regs.h>
+#include <linux/bitops.h>
#include <linux/device.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_platform.h>
#include <linux/regmap.h>
-#include <linux/mfd/syscon.h>
-#include <linux/bitops.h>
-#include <linux/module.h>
-#include <drm/drmP.h>
+
#include "pl111_versatile.h"
#include "pl111_vexpress.h"
#include "pl111_drm.h"
diff --git a/drivers/gpu/drm/pl111/pl111_versatile.h b/drivers/gpu/drm/pl111/pl111_versatile.h
index 41aa6d969dc6..143877010042 100644
--- a/drivers/gpu/drm/pl111/pl111_versatile.h
+++ b/drivers/gpu/drm/pl111/pl111_versatile.h
@@ -4,6 +4,9 @@
#ifndef PL111_VERSATILE_H
#define PL111_VERSATILE_H
+struct device;
+struct pl111_drm_dev_private;
+
int pl111_versatile_init(struct device *dev, struct pl111_drm_dev_private *priv);
#endif
diff --git a/drivers/gpu/drm/pl111/pl111_vexpress.c b/drivers/gpu/drm/pl111/pl111_vexpress.c
index 38c938c9adda..350570fe06b5 100644
--- a/drivers/gpu/drm/pl111/pl111_vexpress.c
+++ b/drivers/gpu/drm/pl111/pl111_vexpress.c
@@ -51,6 +51,7 @@ int pl111_vexpress_clcd_init(struct device *dev,
}
if (of_device_is_compatible(child, "arm,hdlcd")) {
has_coretile_hdlcd = true;
+ of_node_put(child);
break;
}
}
diff --git a/drivers/gpu/drm/qxl/qxl_cmd.c b/drivers/gpu/drm/qxl/qxl_cmd.c
index 0a2e51af1230..ef09dc6bc635 100644
--- a/drivers/gpu/drm/qxl/qxl_cmd.c
+++ b/drivers/gpu/drm/qxl/qxl_cmd.c
@@ -25,6 +25,8 @@
/* QXL cmd/ring handling */
+#include <linux/delay.h>
+
#include <drm/drm_util.h>
#include "qxl_drv.h"
@@ -375,7 +377,7 @@ void qxl_io_destroy_primary(struct qxl_device *qdev)
{
wait_for_io_cmd(qdev, 0, QXL_IO_DESTROY_PRIMARY_ASYNC);
qdev->primary_bo->is_primary = false;
- drm_gem_object_put_unlocked(&qdev->primary_bo->gem_base);
+ drm_gem_object_put_unlocked(&qdev->primary_bo->tbo.base);
qdev->primary_bo = NULL;
}
@@ -402,7 +404,7 @@ void qxl_io_create_primary(struct qxl_device *qdev, struct qxl_bo *bo)
wait_for_io_cmd(qdev, 0, QXL_IO_CREATE_PRIMARY_ASYNC);
qdev->primary_bo = bo;
qdev->primary_bo->is_primary = true;
- drm_gem_object_get(&qdev->primary_bo->gem_base);
+ drm_gem_object_get(&qdev->primary_bo->tbo.base);
}
void qxl_io_memslot_add(struct qxl_device *qdev, uint8_t id)
diff --git a/drivers/gpu/drm/qxl/qxl_debugfs.c b/drivers/gpu/drm/qxl/qxl_debugfs.c
index 118422549828..94439212a5c5 100644
--- a/drivers/gpu/drm/qxl/qxl_debugfs.c
+++ b/drivers/gpu/drm/qxl/qxl_debugfs.c
@@ -28,9 +28,9 @@
* Alon Levy <alevy@redhat.com>
*/
-#include <linux/debugfs.h>
+#include <drm/drm_debugfs.h>
+#include <drm/drm_file.h>
-#include <drm/drmP.h>
#include "qxl_drv.h"
#include "qxl_object.h"
@@ -61,12 +61,12 @@ qxl_debugfs_buffers_info(struct seq_file *m, void *data)
int rel;
rcu_read_lock();
- fobj = rcu_dereference(bo->tbo.resv->fence);
+ fobj = rcu_dereference(bo->tbo.base.resv->fence);
rel = fobj ? fobj->shared_count : 0;
rcu_read_unlock();
seq_printf(m, "size %ld, pc %d, num releases %d\n",
- (unsigned long)bo->gem_base.size,
+ (unsigned long)bo->tbo.base.size,
bo->pin_count, rel);
}
return 0;
diff --git a/drivers/gpu/drm/qxl/qxl_display.c b/drivers/gpu/drm/qxl/qxl_display.c
index 8b319ebbb0fb..16d73b22f3f5 100644
--- a/drivers/gpu/drm/qxl/qxl_display.c
+++ b/drivers/gpu/drm/qxl/qxl_display.c
@@ -24,11 +24,14 @@
*/
#include <linux/crc32.h>
+#include <linux/delay.h>
+
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_plane_helper.h>
#include <drm/drm_probe_helper.h>
+#include <drm/drm_vblank.h>
#include "qxl_drv.h"
#include "qxl_object.h"
@@ -794,7 +797,7 @@ static int qxl_plane_prepare_fb(struct drm_plane *plane,
qdev->dumb_shadow_bo->surf.height != surf.height) {
if (qdev->dumb_shadow_bo) {
drm_gem_object_put_unlocked
- (&qdev->dumb_shadow_bo->gem_base);
+ (&qdev->dumb_shadow_bo->tbo.base);
qdev->dumb_shadow_bo = NULL;
}
qxl_bo_create(qdev, surf.height * surf.stride,
@@ -804,10 +807,10 @@ static int qxl_plane_prepare_fb(struct drm_plane *plane,
if (user_bo->shadow != qdev->dumb_shadow_bo) {
if (user_bo->shadow) {
drm_gem_object_put_unlocked
- (&user_bo->shadow->gem_base);
+ (&user_bo->shadow->tbo.base);
user_bo->shadow = NULL;
}
- drm_gem_object_get(&qdev->dumb_shadow_bo->gem_base);
+ drm_gem_object_get(&qdev->dumb_shadow_bo->tbo.base);
user_bo->shadow = qdev->dumb_shadow_bo;
}
}
@@ -838,7 +841,7 @@ static void qxl_plane_cleanup_fb(struct drm_plane *plane,
qxl_bo_unpin(user_bo);
if (old_state->fb != plane->state->fb && user_bo->shadow) {
- drm_gem_object_put_unlocked(&user_bo->shadow->gem_base);
+ drm_gem_object_put_unlocked(&user_bo->shadow->tbo.base);
user_bo->shadow = NULL;
}
}
diff --git a/drivers/gpu/drm/qxl/qxl_draw.c b/drivers/gpu/drm/qxl/qxl_draw.c
index 97c3f1a95a32..5bebf1ea1c5d 100644
--- a/drivers/gpu/drm/qxl/qxl_draw.c
+++ b/drivers/gpu/drm/qxl/qxl_draw.c
@@ -20,6 +20,8 @@
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
+#include <drm/drm_fourcc.h>
+
#include "qxl_drv.h"
#include "qxl_object.h"
diff --git a/drivers/gpu/drm/qxl/qxl_drv.c b/drivers/gpu/drm/qxl/qxl_drv.c
index f33e349c4ec5..c1802e01d9f6 100644
--- a/drivers/gpu/drm/qxl/qxl_drv.c
+++ b/drivers/gpu/drm/qxl/qxl_drv.c
@@ -28,14 +28,18 @@
* Alon Levy <alevy@redhat.com>
*/
-#include <linux/module.h>
+#include "qxl_drv.h"
#include <linux/console.h>
+#include <linux/module.h>
+#include <linux/pci.h>
-#include <drm/drmP.h>
#include <drm/drm.h>
+#include <drm/drm_drv.h>
+#include <drm/drm_file.h>
#include <drm/drm_modeset_helper.h>
+#include <drm/drm_prime.h>
#include <drm/drm_probe_helper.h>
-#include "qxl_drv.h"
+
#include "qxl_object.h"
static const struct pci_device_id pciidlist[] = {
@@ -206,16 +210,14 @@ static int qxl_pm_resume(struct device *dev)
static int qxl_pm_thaw(struct device *dev)
{
- struct pci_dev *pdev = to_pci_dev(dev);
- struct drm_device *drm_dev = pci_get_drvdata(pdev);
+ struct drm_device *drm_dev = dev_get_drvdata(dev);
return qxl_drm_resume(drm_dev, true);
}
static int qxl_pm_freeze(struct device *dev)
{
- struct pci_dev *pdev = to_pci_dev(dev);
- struct drm_device *drm_dev = pci_get_drvdata(pdev);
+ struct drm_device *drm_dev = dev_get_drvdata(dev);
return qxl_drm_freeze(drm_dev);
}
@@ -247,8 +249,7 @@ static struct pci_driver qxl_pci_driver = {
};
static struct drm_driver qxl_driver = {
- .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_PRIME |
- DRIVER_ATOMIC,
+ .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC,
.dumb_create = qxl_mode_dumb_create,
.dumb_map_offset = qxl_mode_dumb_mmap,
@@ -257,8 +258,6 @@ static struct drm_driver qxl_driver = {
#endif
.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
- .gem_prime_export = drm_gem_prime_export,
- .gem_prime_import = drm_gem_prime_import,
.gem_prime_pin = qxl_gem_prime_pin,
.gem_prime_unpin = qxl_gem_prime_unpin,
.gem_prime_get_sg_table = qxl_gem_prime_get_sg_table,
diff --git a/drivers/gpu/drm/qxl/qxl_drv.h b/drivers/gpu/drm/qxl/qxl_drv.h
index 2896bb6fdbf4..9e034c5fa87d 100644
--- a/drivers/gpu/drm/qxl/qxl_drv.h
+++ b/drivers/gpu/drm/qxl/qxl_drv.h
@@ -31,22 +31,21 @@
*/
#include <linux/dma-fence.h>
-#include <linux/workqueue.h>
#include <linux/firmware.h>
#include <linux/platform_device.h>
+#include <linux/workqueue.h>
#include <drm/drm_crtc.h>
#include <drm/drm_encoder.h>
#include <drm/drm_fb_helper.h>
+#include <drm/drm_ioctl.h>
#include <drm/drm_gem.h>
-#include <drm/drmP.h>
+#include <drm/qxl_drm.h>
#include <drm/ttm/ttm_bo_api.h>
#include <drm/ttm/ttm_bo_driver.h>
-/* just for ttm_validate_buffer */
#include <drm/ttm/ttm_execbuf_util.h>
#include <drm/ttm/ttm_module.h>
#include <drm/ttm/ttm_placement.h>
-#include <drm/qxl_drm.h>
#include "qxl_dev.h"
@@ -72,12 +71,13 @@ extern int qxl_max_ioctls;
QXL_INTERRUPT_CLIENT_MONITORS_CONFIG)
struct qxl_bo {
+ struct ttm_buffer_object tbo;
+
/* Protected by gem.mutex */
struct list_head list;
/* Protected by tbo.reserved */
struct ttm_place placements[3];
struct ttm_placement placement;
- struct ttm_buffer_object tbo;
struct ttm_bo_kmap_obj kmap;
unsigned int pin_count;
void *kptr;
@@ -85,7 +85,6 @@ struct qxl_bo {
int type;
/* Constant after initialization */
- struct drm_gem_object gem_base;
unsigned int is_primary:1; /* is this now a primary surface */
unsigned int is_dumb:1;
struct qxl_bo *shadow;
@@ -94,7 +93,7 @@ struct qxl_bo {
uint32_t surface_id;
struct qxl_release *surf_create;
};
-#define gem_to_qxl_bo(gobj) container_of((gobj), struct qxl_bo, gem_base)
+#define gem_to_qxl_bo(gobj) container_of((gobj), struct qxl_bo, tbo.base)
#define to_qxl_bo(tobj) container_of((tobj), struct qxl_bo, tbo)
struct qxl_gem {
diff --git a/drivers/gpu/drm/qxl/qxl_gem.c b/drivers/gpu/drm/qxl/qxl_gem.c
index 89606c819d82..69f37db1027a 100644
--- a/drivers/gpu/drm/qxl/qxl_gem.c
+++ b/drivers/gpu/drm/qxl/qxl_gem.c
@@ -23,7 +23,6 @@
* Alon Levy
*/
-#include <drm/drmP.h>
#include <drm/drm.h>
#include "qxl_drv.h"
@@ -64,7 +63,7 @@ int qxl_gem_object_create(struct qxl_device *qdev, int size,
size, initial_domain, alignment, r);
return r;
}
- *obj = &qbo->gem_base;
+ *obj = &qbo->tbo.base;
mutex_lock(&qdev->gem.mutex);
list_add_tail(&qbo->list, &qdev->gem.objects);
diff --git a/drivers/gpu/drm/qxl/qxl_ioctl.c b/drivers/gpu/drm/qxl/qxl_ioctl.c
index d410e2925162..8117a45b3610 100644
--- a/drivers/gpu/drm/qxl/qxl_ioctl.c
+++ b/drivers/gpu/drm/qxl/qxl_ioctl.c
@@ -23,6 +23,9 @@
* Alon Levy
*/
+#include <linux/pci.h>
+#include <linux/uaccess.h>
+
#include "qxl_drv.h"
#include "qxl_object.h"
diff --git a/drivers/gpu/drm/qxl/qxl_irq.c b/drivers/gpu/drm/qxl/qxl_irq.c
index 3bb31add6350..8435af108632 100644
--- a/drivers/gpu/drm/qxl/qxl_irq.c
+++ b/drivers/gpu/drm/qxl/qxl_irq.c
@@ -23,6 +23,10 @@
* Alon Levy
*/
+#include <linux/pci.h>
+
+#include <drm/drm_irq.h>
+
#include "qxl_drv.h"
irqreturn_t qxl_irq_handler(int irq, void *arg)
diff --git a/drivers/gpu/drm/qxl/qxl_kms.c b/drivers/gpu/drm/qxl/qxl_kms.c
index bee61fa2c9bc..611cbe7aee69 100644
--- a/drivers/gpu/drm/qxl/qxl_kms.c
+++ b/drivers/gpu/drm/qxl/qxl_kms.c
@@ -23,11 +23,14 @@
* Alon Levy
*/
-#include "qxl_drv.h"
-#include "qxl_object.h"
+#include <linux/io-mapping.h>
+#include <linux/pci.h>
+#include <drm/drm_drv.h>
#include <drm/drm_probe_helper.h>
-#include <linux/io-mapping.h>
+
+#include "qxl_drv.h"
+#include "qxl_object.h"
int qxl_log_level;
diff --git a/drivers/gpu/drm/qxl/qxl_object.c b/drivers/gpu/drm/qxl/qxl_object.c
index 4928fa602944..548dfe6f3b26 100644
--- a/drivers/gpu/drm/qxl/qxl_object.c
+++ b/drivers/gpu/drm/qxl/qxl_object.c
@@ -33,14 +33,14 @@ static void qxl_ttm_bo_destroy(struct ttm_buffer_object *tbo)
struct qxl_device *qdev;
bo = to_qxl_bo(tbo);
- qdev = (struct qxl_device *)bo->gem_base.dev->dev_private;
+ qdev = (struct qxl_device *)bo->tbo.base.dev->dev_private;
qxl_surface_evict(qdev, bo, false);
WARN_ON_ONCE(bo->map_count > 0);
mutex_lock(&qdev->gem.mutex);
list_del_init(&bo->list);
mutex_unlock(&qdev->gem.mutex);
- drm_gem_object_release(&bo->gem_base);
+ drm_gem_object_release(&bo->tbo.base);
kfree(bo);
}
@@ -95,7 +95,7 @@ int qxl_bo_create(struct qxl_device *qdev,
if (bo == NULL)
return -ENOMEM;
size = roundup(size, PAGE_SIZE);
- r = drm_gem_object_init(&qdev->ddev, &bo->gem_base, size);
+ r = drm_gem_object_init(&qdev->ddev, &bo->tbo.base, size);
if (unlikely(r)) {
kfree(bo);
return r;
@@ -214,20 +214,20 @@ void qxl_bo_unref(struct qxl_bo **bo)
if ((*bo) == NULL)
return;
- drm_gem_object_put_unlocked(&(*bo)->gem_base);
+ drm_gem_object_put_unlocked(&(*bo)->tbo.base);
*bo = NULL;
}
struct qxl_bo *qxl_bo_ref(struct qxl_bo *bo)
{
- drm_gem_object_get(&bo->gem_base);
+ drm_gem_object_get(&bo->tbo.base);
return bo;
}
static int __qxl_bo_pin(struct qxl_bo *bo)
{
struct ttm_operation_ctx ctx = { false, false };
- struct drm_device *ddev = bo->gem_base.dev;
+ struct drm_device *ddev = bo->tbo.base.dev;
int r;
if (bo->pin_count) {
@@ -247,7 +247,7 @@ static int __qxl_bo_pin(struct qxl_bo *bo)
static int __qxl_bo_unpin(struct qxl_bo *bo)
{
struct ttm_operation_ctx ctx = { false, false };
- struct drm_device *ddev = bo->gem_base.dev;
+ struct drm_device *ddev = bo->tbo.base.dev;
int r, i;
if (!bo->pin_count) {
@@ -310,13 +310,13 @@ void qxl_bo_force_delete(struct qxl_device *qdev)
dev_err(qdev->ddev.dev, "Userspace still has active objects !\n");
list_for_each_entry_safe(bo, n, &qdev->gem.objects, list) {
dev_err(qdev->ddev.dev, "%p %p %lu %lu force free\n",
- &bo->gem_base, bo, (unsigned long)bo->gem_base.size,
- *((unsigned long *)&bo->gem_base.refcount));
+ &bo->tbo.base, bo, (unsigned long)bo->tbo.base.size,
+ *((unsigned long *)&bo->tbo.base.refcount));
mutex_lock(&qdev->gem.mutex);
list_del_init(&bo->list);
mutex_unlock(&qdev->gem.mutex);
/* this should unref the ttm bo */
- drm_gem_object_put_unlocked(&bo->gem_base);
+ drm_gem_object_put_unlocked(&bo->tbo.base);
}
}
diff --git a/drivers/gpu/drm/qxl/qxl_object.h b/drivers/gpu/drm/qxl/qxl_object.h
index 255b914e2a7b..8ae54ba7857c 100644
--- a/drivers/gpu/drm/qxl/qxl_object.h
+++ b/drivers/gpu/drm/qxl/qxl_object.h
@@ -34,7 +34,7 @@ static inline int qxl_bo_reserve(struct qxl_bo *bo, bool no_wait)
r = ttm_bo_reserve(&bo->tbo, true, no_wait, NULL);
if (unlikely(r != 0)) {
if (r != -ERESTARTSYS) {
- struct drm_device *ddev = bo->gem_base.dev;
+ struct drm_device *ddev = bo->tbo.base.dev;
dev_err(ddev->dev, "%p reserve failed\n", bo);
}
@@ -60,7 +60,7 @@ static inline unsigned long qxl_bo_size(struct qxl_bo *bo)
static inline u64 qxl_bo_mmap_offset(struct qxl_bo *bo)
{
- return drm_vma_node_offset_addr(&bo->tbo.vma_node);
+ return drm_vma_node_offset_addr(&bo->tbo.base.vma_node);
}
static inline int qxl_bo_wait(struct qxl_bo *bo, u32 *mem_type,
@@ -71,7 +71,7 @@ static inline int qxl_bo_wait(struct qxl_bo *bo, u32 *mem_type,
r = ttm_bo_reserve(&bo->tbo, true, no_wait, NULL);
if (unlikely(r != 0)) {
if (r != -ERESTARTSYS) {
- struct drm_device *ddev = bo->gem_base.dev;
+ struct drm_device *ddev = bo->tbo.base.dev;
dev_err(ddev->dev, "%p reserve failed for wait\n",
bo);
diff --git a/drivers/gpu/drm/qxl/qxl_release.c b/drivers/gpu/drm/qxl/qxl_release.c
index 49f9a9385393..df55b83e0a55 100644
--- a/drivers/gpu/drm/qxl/qxl_release.c
+++ b/drivers/gpu/drm/qxl/qxl_release.c
@@ -19,9 +19,13 @@
* IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
+
+#include <linux/delay.h>
+
+#include <trace/events/dma_fence.h>
+
#include "qxl_drv.h"
#include "qxl_object.h"
-#include <trace/events/dma_fence.h>
/*
* drawable cmd cache - allocate a bunch of VRAM pages, suballocate
@@ -234,12 +238,12 @@ static int qxl_release_validate_bo(struct qxl_bo *bo)
return ret;
}
- ret = reservation_object_reserve_shared(bo->tbo.resv, 1);
+ ret = reservation_object_reserve_shared(bo->tbo.base.resv, 1);
if (ret)
return ret;
/* allocate a surface for reserved + validated buffers */
- ret = qxl_bo_check_id(bo->gem_base.dev->dev_private, bo);
+ ret = qxl_bo_check_id(bo->tbo.base.dev->dev_private, bo);
if (ret)
return ret;
return 0;
@@ -454,9 +458,9 @@ void qxl_release_fence_buffer_objects(struct qxl_release *release)
list_for_each_entry(entry, &release->bos, head) {
bo = entry->bo;
- reservation_object_add_shared_fence(bo->resv, &release->base);
+ reservation_object_add_shared_fence(bo->base.resv, &release->base);
ttm_bo_add_to_lru(bo);
- reservation_object_unlock(bo->resv);
+ reservation_object_unlock(bo->base.resv);
}
spin_unlock(&glob->lru_lock);
ww_acquire_fini(&release->ticket);
diff --git a/drivers/gpu/drm/qxl/qxl_ttm.c b/drivers/gpu/drm/qxl/qxl_ttm.c
index 0234f8556ada..9b24514c75aa 100644
--- a/drivers/gpu/drm/qxl/qxl_ttm.c
+++ b/drivers/gpu/drm/qxl/qxl_ttm.c
@@ -23,19 +23,21 @@
* Alon Levy
*/
+#include <linux/delay.h>
+
+#include <drm/drm.h>
+#include <drm/drm_file.h>
+#include <drm/drm_debugfs.h>
+#include <drm/qxl_drm.h>
#include <drm/ttm/ttm_bo_api.h>
#include <drm/ttm/ttm_bo_driver.h>
-#include <drm/ttm/ttm_placement.h>
-#include <drm/ttm/ttm_page_alloc.h>
#include <drm/ttm/ttm_module.h>
-#include <drm/drmP.h>
-#include <drm/drm.h>
-#include <drm/qxl_drm.h>
+#include <drm/ttm/ttm_page_alloc.h>
+#include <drm/ttm/ttm_placement.h>
+
#include "qxl_drv.h"
#include "qxl_object.h"
-#include <linux/delay.h>
-
static struct qxl_device *qxl_get_qdev(struct ttm_bo_device *bdev)
{
struct qxl_mman *mman;
@@ -153,7 +155,7 @@ static int qxl_verify_access(struct ttm_buffer_object *bo, struct file *filp)
{
struct qxl_bo *qbo = to_qxl_bo(bo);
- return drm_vma_node_verify_access(&qbo->gem_base.vma_node,
+ return drm_vma_node_verify_access(&qbo->tbo.base.vma_node,
filp->private_data);
}
@@ -295,7 +297,7 @@ static void qxl_bo_move_notify(struct ttm_buffer_object *bo,
if (!qxl_ttm_bo_is_qxl_bo(bo))
return;
qbo = to_qxl_bo(bo);
- qdev = qbo->gem_base.dev->dev_private;
+ qdev = qbo->tbo.base.dev->dev_private;
if (bo->mem.mem_type == TTM_PL_PRIV && qbo->surface_id)
qxl_surface_evict(qdev, qbo, new_mem ? true : false);
diff --git a/drivers/gpu/drm/r128/r128_ioc32.c b/drivers/gpu/drm/r128/r128_ioc32.c
index 6589f9e0310e..6ac71755c22d 100644
--- a/drivers/gpu/drm/r128/r128_ioc32.c
+++ b/drivers/gpu/drm/r128/r128_ioc32.c
@@ -29,10 +29,11 @@
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
+
#include <linux/compat.h>
-#include <drm/drmP.h>
#include <drm/r128_drm.h>
+
#include "r128_drv.h"
typedef struct drm_r128_init32 {
diff --git a/drivers/gpu/drm/r128/r128_irq.c b/drivers/gpu/drm/r128/r128_irq.c
index 9730f4918944..d84e9c96e20a 100644
--- a/drivers/gpu/drm/r128/r128_irq.c
+++ b/drivers/gpu/drm/r128/r128_irq.c
@@ -30,8 +30,11 @@
* Eric Anholt <anholt@FreeBSD.org>
*/
-#include <drm/drmP.h>
+#include <drm/drm_device.h>
+#include <drm/drm_print.h>
+#include <drm/drm_vblank.h>
#include <drm/r128_drm.h>
+
#include "r128_drv.h"
u32 r128_get_vblank_counter(struct drm_device *dev, unsigned int pipe)
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index 32808e50be12..3f7701321d21 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -505,7 +505,6 @@ struct radeon_bo {
struct list_head va;
/* Constant after initialization */
struct radeon_device *rdev;
- struct drm_gem_object gem_base;
struct ttm_bo_kmap_obj dma_buf_vmap;
pid_t pid;
@@ -513,7 +512,7 @@ struct radeon_bo {
struct radeon_mn *mn;
struct list_head mn_list;
};
-#define gem_to_radeon_bo(gobj) container_of((gobj), struct radeon_bo, gem_base)
+#define gem_to_radeon_bo(gobj) container_of((gobj), struct radeon_bo, tbo.base)
int radeon_gem_debugfs_init(struct radeon_device *rdev);
diff --git a/drivers/gpu/drm/radeon/radeon_benchmark.c b/drivers/gpu/drm/radeon/radeon_benchmark.c
index 7ce5064a59f6..1ea50ce16312 100644
--- a/drivers/gpu/drm/radeon/radeon_benchmark.c
+++ b/drivers/gpu/drm/radeon/radeon_benchmark.c
@@ -122,7 +122,7 @@ static void radeon_benchmark_move(struct radeon_device *rdev, unsigned size,
if (rdev->asic->copy.dma) {
time = radeon_benchmark_do_move(rdev, size, saddr, daddr,
RADEON_BENCHMARK_COPY_DMA, n,
- dobj->tbo.resv);
+ dobj->tbo.base.resv);
if (time < 0)
goto out_cleanup;
if (time > 0)
@@ -133,7 +133,7 @@ static void radeon_benchmark_move(struct radeon_device *rdev, unsigned size,
if (rdev->asic->copy.blit) {
time = radeon_benchmark_do_move(rdev, size, saddr, daddr,
RADEON_BENCHMARK_COPY_BLIT, n,
- dobj->tbo.resv);
+ dobj->tbo.base.resv);
if (time < 0)
goto out_cleanup;
if (time > 0)
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c
index cef0e697a2ea..7e5254a34e84 100644
--- a/drivers/gpu/drm/radeon/radeon_cs.c
+++ b/drivers/gpu/drm/radeon/radeon_cs.c
@@ -257,7 +257,7 @@ static int radeon_cs_sync_rings(struct radeon_cs_parser *p)
list_for_each_entry(reloc, &p->validated, tv.head) {
struct reservation_object *resv;
- resv = reloc->robj->tbo.resv;
+ resv = reloc->robj->tbo.base.resv;
r = radeon_sync_resv(p->rdev, &p->ib.sync, resv,
reloc->tv.num_shared);
if (r)
@@ -443,7 +443,7 @@ static void radeon_cs_parser_fini(struct radeon_cs_parser *parser, int error, bo
if (bo == NULL)
continue;
- drm_gem_object_put_unlocked(&bo->gem_base);
+ drm_gem_object_put_unlocked(&bo->tbo.base);
}
}
kfree(parser->track);
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index bd52f15e6330..7bf73230ac0b 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -275,7 +275,7 @@ static void radeon_unpin_work_func(struct work_struct *__work)
} else
DRM_ERROR("failed to reserve buffer after flip\n");
- drm_gem_object_put_unlocked(&work->old_rbo->gem_base);
+ drm_gem_object_put_unlocked(&work->old_rbo->tbo.base);
kfree(work);
}
@@ -533,7 +533,7 @@ static int radeon_crtc_page_flip_target(struct drm_crtc *crtc,
DRM_ERROR("failed to pin new rbo buffer before flip\n");
goto cleanup;
}
- work->fence = dma_fence_get(reservation_object_get_excl(new_rbo->tbo.resv));
+ work->fence = dma_fence_get(reservation_object_get_excl(new_rbo->tbo.base.resv));
radeon_bo_get_tiling_flags(new_rbo, &tiling_flags, NULL);
radeon_bo_unreserve(new_rbo);
@@ -607,7 +607,7 @@ pflip_cleanup:
radeon_bo_unreserve(new_rbo);
cleanup:
- drm_gem_object_put_unlocked(&work->old_rbo->gem_base);
+ drm_gem_object_put_unlocked(&work->old_rbo->tbo.base);
dma_fence_put(work->fence);
kfree(work);
return r;
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index 66ff84ec665b..5838162f687f 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -130,8 +130,7 @@ int radeon_gem_object_open(struct drm_gem_object *obj,
struct drm_file *file_priv);
void radeon_gem_object_close(struct drm_gem_object *obj,
struct drm_file *file_priv);
-struct dma_buf *radeon_gem_prime_export(struct drm_device *dev,
- struct drm_gem_object *gobj,
+struct dma_buf *radeon_gem_prime_export(struct drm_gem_object *gobj,
int flags);
extern int radeon_get_crtc_scanoutpos(struct drm_device *dev, unsigned int crtc,
unsigned int flags, int *vpos, int *hpos,
@@ -153,7 +152,6 @@ struct drm_gem_object *radeon_gem_prime_import_sg_table(struct drm_device *dev,
struct sg_table *sg);
int radeon_gem_prime_pin(struct drm_gem_object *obj);
void radeon_gem_prime_unpin(struct drm_gem_object *obj);
-struct reservation_object *radeon_gem_prime_res_obj(struct drm_gem_object *);
void *radeon_gem_prime_vmap(struct drm_gem_object *obj);
void radeon_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr);
@@ -542,7 +540,7 @@ radeon_get_crtc_scanout_position(struct drm_device *dev, unsigned int pipe,
static struct drm_driver kms_driver = {
.driver_features =
- DRIVER_USE_AGP | DRIVER_GEM | DRIVER_PRIME | DRIVER_RENDER,
+ DRIVER_USE_AGP | DRIVER_GEM | DRIVER_RENDER,
.load = radeon_driver_load_kms,
.open = radeon_driver_open_kms,
.postclose = radeon_driver_postclose_kms,
@@ -568,10 +566,8 @@ static struct drm_driver kms_driver = {
.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
.gem_prime_export = radeon_gem_prime_export,
- .gem_prime_import = drm_gem_prime_import,
.gem_prime_pin = radeon_gem_prime_pin,
.gem_prime_unpin = radeon_gem_prime_unpin,
- .gem_prime_res_obj = radeon_gem_prime_res_obj,
.gem_prime_get_sg_table = radeon_gem_prime_get_sg_table,
.gem_prime_import_sg_table = radeon_gem_prime_import_sg_table,
.gem_prime_vmap = radeon_gem_prime_vmap,
diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c
index d8bc5d2dfd61..03873f21a734 100644
--- a/drivers/gpu/drm/radeon/radeon_gem.c
+++ b/drivers/gpu/drm/radeon/radeon_gem.c
@@ -83,7 +83,7 @@ retry:
}
return r;
}
- *obj = &robj->gem_base;
+ *obj = &robj->tbo.base;
robj->pid = task_pid_nr(current);
mutex_lock(&rdev->gem.mutex);
@@ -114,7 +114,7 @@ static int radeon_gem_set_domain(struct drm_gem_object *gobj,
}
if (domain == RADEON_GEM_DOMAIN_CPU) {
/* Asking for cpu access wait for object idle */
- r = reservation_object_wait_timeout_rcu(robj->tbo.resv, true, true, 30 * HZ);
+ r = reservation_object_wait_timeout_rcu(robj->tbo.base.resv, true, true, 30 * HZ);
if (!r)
r = -EBUSY;
@@ -449,7 +449,7 @@ int radeon_gem_busy_ioctl(struct drm_device *dev, void *data,
}
robj = gem_to_radeon_bo(gobj);
- r = reservation_object_test_signaled_rcu(robj->tbo.resv, true);
+ r = reservation_object_test_signaled_rcu(robj->tbo.base.resv, true);
if (r == 0)
r = -EBUSY;
else
@@ -478,7 +478,7 @@ int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
}
robj = gem_to_radeon_bo(gobj);
- ret = reservation_object_wait_timeout_rcu(robj->tbo.resv, true, true, 30 * HZ);
+ ret = reservation_object_wait_timeout_rcu(robj->tbo.base.resv, true, true, 30 * HZ);
if (ret == 0)
r = -EBUSY;
else if (ret < 0)
diff --git a/drivers/gpu/drm/radeon/radeon_mn.c b/drivers/gpu/drm/radeon/radeon_mn.c
index 8c3871ed23a9..0d64ace0e6c1 100644
--- a/drivers/gpu/drm/radeon/radeon_mn.c
+++ b/drivers/gpu/drm/radeon/radeon_mn.c
@@ -163,7 +163,7 @@ static int radeon_mn_invalidate_range_start(struct mmu_notifier *mn,
continue;
}
- r = reservation_object_wait_timeout_rcu(bo->tbo.resv,
+ r = reservation_object_wait_timeout_rcu(bo->tbo.base.resv,
true, false, MAX_SCHEDULE_TIMEOUT);
if (r <= 0)
DRM_ERROR("(%ld) failed to wait for user bo\n", r);
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
index 21f73fc86f38..9db8ba29ef68 100644
--- a/drivers/gpu/drm/radeon/radeon_object.c
+++ b/drivers/gpu/drm/radeon/radeon_object.c
@@ -85,9 +85,9 @@ static void radeon_ttm_bo_destroy(struct ttm_buffer_object *tbo)
mutex_unlock(&bo->rdev->gem.mutex);
radeon_bo_clear_surface_reg(bo);
WARN_ON_ONCE(!list_empty(&bo->va));
- if (bo->gem_base.import_attach)
- drm_prime_gem_destroy(&bo->gem_base, bo->tbo.sg);
- drm_gem_object_release(&bo->gem_base);
+ if (bo->tbo.base.import_attach)
+ drm_prime_gem_destroy(&bo->tbo.base, bo->tbo.sg);
+ drm_gem_object_release(&bo->tbo.base);
kfree(bo);
}
@@ -209,7 +209,7 @@ int radeon_bo_create(struct radeon_device *rdev,
bo = kzalloc(sizeof(struct radeon_bo), GFP_KERNEL);
if (bo == NULL)
return -ENOMEM;
- drm_gem_private_object_init(rdev->ddev, &bo->gem_base, size);
+ drm_gem_private_object_init(rdev->ddev, &bo->tbo.base, size);
bo->rdev = rdev;
bo->surface_reg = -1;
INIT_LIST_HEAD(&bo->list);
@@ -442,13 +442,13 @@ void radeon_bo_force_delete(struct radeon_device *rdev)
dev_err(rdev->dev, "Userspace still has active objects !\n");
list_for_each_entry_safe(bo, n, &rdev->gem.objects, list) {
dev_err(rdev->dev, "%p %p %lu %lu force free\n",
- &bo->gem_base, bo, (unsigned long)bo->gem_base.size,
- *((unsigned long *)&bo->gem_base.refcount));
+ &bo->tbo.base, bo, (unsigned long)bo->tbo.base.size,
+ *((unsigned long *)&bo->tbo.base.refcount));
mutex_lock(&bo->rdev->gem.mutex);
list_del_init(&bo->list);
mutex_unlock(&bo->rdev->gem.mutex);
/* this should unref the ttm bo */
- drm_gem_object_put_unlocked(&bo->gem_base);
+ drm_gem_object_put_unlocked(&bo->tbo.base);
}
}
@@ -610,7 +610,7 @@ int radeon_bo_get_surface_reg(struct radeon_bo *bo)
int steal;
int i;
- lockdep_assert_held(&bo->tbo.resv->lock.base);
+ reservation_object_assert_held(bo->tbo.base.resv);
if (!bo->tiling_flags)
return 0;
@@ -736,7 +736,7 @@ void radeon_bo_get_tiling_flags(struct radeon_bo *bo,
uint32_t *tiling_flags,
uint32_t *pitch)
{
- lockdep_assert_held(&bo->tbo.resv->lock.base);
+ reservation_object_assert_held(bo->tbo.base.resv);
if (tiling_flags)
*tiling_flags = bo->tiling_flags;
@@ -748,7 +748,7 @@ int radeon_bo_check_tiling(struct radeon_bo *bo, bool has_moved,
bool force_drop)
{
if (!force_drop)
- lockdep_assert_held(&bo->tbo.resv->lock.base);
+ reservation_object_assert_held(bo->tbo.base.resv);
if (!(bo->tiling_flags & RADEON_TILING_SURFACE))
return 0;
@@ -870,7 +870,7 @@ int radeon_bo_wait(struct radeon_bo *bo, u32 *mem_type, bool no_wait)
void radeon_bo_fence(struct radeon_bo *bo, struct radeon_fence *fence,
bool shared)
{
- struct reservation_object *resv = bo->tbo.resv;
+ struct reservation_object *resv = bo->tbo.base.resv;
if (shared)
reservation_object_add_shared_fence(resv, &fence->base);
diff --git a/drivers/gpu/drm/radeon/radeon_object.h b/drivers/gpu/drm/radeon/radeon_object.h
index 9ffd8215d38a..e5554bf9140e 100644
--- a/drivers/gpu/drm/radeon/radeon_object.h
+++ b/drivers/gpu/drm/radeon/radeon_object.h
@@ -116,7 +116,7 @@ static inline unsigned radeon_bo_gpu_page_alignment(struct radeon_bo *bo)
*/
static inline u64 radeon_bo_mmap_offset(struct radeon_bo *bo)
{
- return drm_vma_node_offset_addr(&bo->tbo.vma_node);
+ return drm_vma_node_offset_addr(&bo->tbo.base.vma_node);
}
extern int radeon_bo_wait(struct radeon_bo *bo, u32 *mem_type,
diff --git a/drivers/gpu/drm/radeon/radeon_prime.c b/drivers/gpu/drm/radeon/radeon_prime.c
index d3a5bea9a2c5..52b0d0cd8cbe 100644
--- a/drivers/gpu/drm/radeon/radeon_prime.c
+++ b/drivers/gpu/drm/radeon/radeon_prime.c
@@ -68,10 +68,10 @@ struct drm_gem_object *radeon_gem_prime_import_sg_table(struct drm_device *dev,
struct radeon_bo *bo;
int ret;
- ww_mutex_lock(&resv->lock, NULL);
+ reservation_object_lock(resv, NULL);
ret = radeon_bo_create(rdev, attach->dmabuf->size, PAGE_SIZE, false,
RADEON_GEM_DOMAIN_GTT, 0, sg, resv, &bo);
- ww_mutex_unlock(&resv->lock);
+ reservation_object_unlock(resv);
if (ret)
return ERR_PTR(ret);
@@ -80,7 +80,7 @@ struct drm_gem_object *radeon_gem_prime_import_sg_table(struct drm_device *dev,
mutex_unlock(&rdev->gem.mutex);
bo->prime_shared_count = 1;
- return &bo->gem_base;
+ return &bo->tbo.base;
}
int radeon_gem_prime_pin(struct drm_gem_object *obj)
@@ -117,19 +117,11 @@ void radeon_gem_prime_unpin(struct drm_gem_object *obj)
}
-struct reservation_object *radeon_gem_prime_res_obj(struct drm_gem_object *obj)
-{
- struct radeon_bo *bo = gem_to_radeon_bo(obj);
-
- return bo->tbo.resv;
-}
-
-struct dma_buf *radeon_gem_prime_export(struct drm_device *dev,
- struct drm_gem_object *gobj,
+struct dma_buf *radeon_gem_prime_export(struct drm_gem_object *gobj,
int flags)
{
struct radeon_bo *bo = gem_to_radeon_bo(gobj);
if (radeon_ttm_tt_has_userptr(bo->tbo.ttm))
return ERR_PTR(-EPERM);
- return drm_gem_prime_export(dev, gobj, flags);
+ return drm_gem_prime_export(gobj, flags);
}
diff --git a/drivers/gpu/drm/radeon/radeon_test.c b/drivers/gpu/drm/radeon/radeon_test.c
index 0f6ba81a1669..a5e1d2139e80 100644
--- a/drivers/gpu/drm/radeon/radeon_test.c
+++ b/drivers/gpu/drm/radeon/radeon_test.c
@@ -120,11 +120,11 @@ static void radeon_do_test_moves(struct radeon_device *rdev, int flag)
if (ring == R600_RING_TYPE_DMA_INDEX)
fence = radeon_copy_dma(rdev, gtt_addr, vram_addr,
size / RADEON_GPU_PAGE_SIZE,
- vram_obj->tbo.resv);
+ vram_obj->tbo.base.resv);
else
fence = radeon_copy_blit(rdev, gtt_addr, vram_addr,
size / RADEON_GPU_PAGE_SIZE,
- vram_obj->tbo.resv);
+ vram_obj->tbo.base.resv);
if (IS_ERR(fence)) {
DRM_ERROR("Failed GTT->VRAM copy %d\n", i);
r = PTR_ERR(fence);
@@ -171,11 +171,11 @@ static void radeon_do_test_moves(struct radeon_device *rdev, int flag)
if (ring == R600_RING_TYPE_DMA_INDEX)
fence = radeon_copy_dma(rdev, vram_addr, gtt_addr,
size / RADEON_GPU_PAGE_SIZE,
- vram_obj->tbo.resv);
+ vram_obj->tbo.base.resv);
else
fence = radeon_copy_blit(rdev, vram_addr, gtt_addr,
size / RADEON_GPU_PAGE_SIZE,
- vram_obj->tbo.resv);
+ vram_obj->tbo.base.resv);
if (IS_ERR(fence)) {
DRM_ERROR("Failed VRAM->GTT copy %d\n", i);
r = PTR_ERR(fence);
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
index fb3696bc616d..35ac75a11d38 100644
--- a/drivers/gpu/drm/radeon/radeon_ttm.c
+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
@@ -184,7 +184,7 @@ static int radeon_verify_access(struct ttm_buffer_object *bo, struct file *filp)
if (radeon_ttm_tt_has_userptr(bo->ttm))
return -EPERM;
- return drm_vma_node_verify_access(&rbo->gem_base.vma_node,
+ return drm_vma_node_verify_access(&rbo->tbo.base.vma_node,
filp->private_data);
}
@@ -244,7 +244,7 @@ static int radeon_move_blit(struct ttm_buffer_object *bo,
BUILD_BUG_ON((PAGE_SIZE % RADEON_GPU_PAGE_SIZE) != 0);
num_pages = new_mem->num_pages * (PAGE_SIZE / RADEON_GPU_PAGE_SIZE);
- fence = radeon_copy(rdev, old_start, new_start, num_pages, bo->resv);
+ fence = radeon_copy(rdev, old_start, new_start, num_pages, bo->base.resv);
if (IS_ERR(fence))
return PTR_ERR(fence);
diff --git a/drivers/gpu/drm/radeon/radeon_uvd.c b/drivers/gpu/drm/radeon/radeon_uvd.c
index ff4f794d1c86..311e69c2ed7f 100644
--- a/drivers/gpu/drm/radeon/radeon_uvd.c
+++ b/drivers/gpu/drm/radeon/radeon_uvd.c
@@ -477,7 +477,7 @@ static int radeon_uvd_cs_msg(struct radeon_cs_parser *p, struct radeon_bo *bo,
return -EINVAL;
}
- f = reservation_object_get_excl(bo->tbo.resv);
+ f = reservation_object_get_excl(bo->tbo.base.resv);
if (f) {
r = radeon_fence_wait((struct radeon_fence *)f, false);
if (r) {
diff --git a/drivers/gpu/drm/radeon/radeon_vm.c b/drivers/gpu/drm/radeon/radeon_vm.c
index 8512b02e9583..e48a05533126 100644
--- a/drivers/gpu/drm/radeon/radeon_vm.c
+++ b/drivers/gpu/drm/radeon/radeon_vm.c
@@ -702,7 +702,7 @@ int radeon_vm_update_page_directory(struct radeon_device *rdev,
if (ib.length_dw != 0) {
radeon_asic_vm_pad_ib(rdev, &ib);
- radeon_sync_resv(rdev, &ib.sync, pd->tbo.resv, true);
+ radeon_sync_resv(rdev, &ib.sync, pd->tbo.base.resv, true);
WARN_ON(ib.length_dw > ndw);
r = radeon_ib_schedule(rdev, &ib, NULL, false);
if (r) {
@@ -830,8 +830,8 @@ static int radeon_vm_update_ptes(struct radeon_device *rdev,
uint64_t pte;
int r;
- radeon_sync_resv(rdev, &ib->sync, pt->tbo.resv, true);
- r = reservation_object_reserve_shared(pt->tbo.resv, 1);
+ radeon_sync_resv(rdev, &ib->sync, pt->tbo.base.resv, true);
+ r = reservation_object_reserve_shared(pt->tbo.base.resv, 1);
if (r)
return r;
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_drv.c b/drivers/gpu/drm/rcar-du/rcar_du_drv.c
index 6df37c2a9678..9c93eb4fad8b 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_drv.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_drv.c
@@ -441,14 +441,11 @@ MODULE_DEVICE_TABLE(of, rcar_du_of_table);
DEFINE_DRM_GEM_CMA_FOPS(rcar_du_fops);
static struct drm_driver rcar_du_driver = {
- .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_PRIME
- | DRIVER_ATOMIC,
+ .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC,
.gem_free_object_unlocked = drm_gem_cma_free_object,
.gem_vm_ops = &drm_gem_cma_vm_ops,
.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
- .gem_prime_import = drm_gem_prime_import,
- .gem_prime_export = drm_gem_prime_export,
.gem_prime_get_sg_table = drm_gem_cma_prime_get_sg_table,
.gem_prime_import_sg_table = drm_gem_cma_prime_import_sg_table,
.gem_prime_vmap = drm_gem_cma_prime_vmap,
diff --git a/drivers/gpu/drm/rockchip/Makefile b/drivers/gpu/drm/rockchip/Makefile
index 524684ba7f6a..17a9e7eb2130 100644
--- a/drivers/gpu/drm/rockchip/Makefile
+++ b/drivers/gpu/drm/rockchip/Makefile
@@ -4,8 +4,7 @@
# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
rockchipdrm-y := rockchip_drm_drv.o rockchip_drm_fb.o \
- rockchip_drm_gem.o rockchip_drm_psr.o \
- rockchip_drm_vop.o rockchip_vop_reg.o
+ rockchip_drm_gem.o rockchip_drm_vop.o rockchip_vop_reg.o
rockchipdrm-$(CONFIG_DRM_FBDEV_EMULATION) += rockchip_drm_fbdev.o
rockchipdrm-$(CONFIG_ROCKCHIP_ANALOGIX_DP) += analogix_dp-rockchip.o
diff --git a/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c b/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c
index 95e5c517a15f..7d7cb57410fc 100644
--- a/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c
+++ b/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c
@@ -16,19 +16,18 @@
#include <linux/reset.h>
#include <linux/clk.h>
-#include <drm/drmP.h>
-#include <drm/drm_dp_helper.h>
-#include <drm/drm_of.h>
-#include <drm/drm_panel.h>
-#include <drm/drm_probe_helper.h>
-
#include <video/of_videomode.h>
#include <video/videomode.h>
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
#include <drm/bridge/analogix_dp.h>
+#include <drm/drm_dp_helper.h>
+#include <drm/drm_of.h>
+#include <drm/drm_panel.h>
+#include <drm/drm_probe_helper.h>
#include "rockchip_drm_drv.h"
-#include "rockchip_drm_psr.h"
#include "rockchip_drm_vop.h"
#define RK3288_GRF_SOC_CON6 0x25c
@@ -73,29 +72,6 @@ struct rockchip_dp_device {
struct analogix_dp_plat_data plat_data;
};
-static int analogix_dp_psr_set(struct drm_encoder *encoder, bool enabled)
-{
- struct rockchip_dp_device *dp = to_dp(encoder);
- int ret;
-
- if (!analogix_dp_psr_enabled(dp->adp))
- return 0;
-
- DRM_DEV_DEBUG(dp->dev, "%s PSR...\n", enabled ? "Entry" : "Exit");
-
- ret = rockchip_drm_wait_vact_end(dp->encoder.crtc,
- PSR_WAIT_LINE_FLAG_TIMEOUT_MS);
- if (ret) {
- DRM_DEV_ERROR(dp->dev, "line flag interrupt did not arrive\n");
- return -ETIMEDOUT;
- }
-
- if (enabled)
- return analogix_dp_enable_psr(dp->adp);
- else
- return analogix_dp_disable_psr(dp->adp);
-}
-
static int rockchip_dp_pre_init(struct rockchip_dp_device *dp)
{
reset_control_assert(dp->rst);
@@ -126,21 +102,9 @@ static int rockchip_dp_poweron_start(struct analogix_dp_plat_data *plat_data)
return ret;
}
-static int rockchip_dp_poweron_end(struct analogix_dp_plat_data *plat_data)
-{
- struct rockchip_dp_device *dp = to_dp(plat_data);
-
- return rockchip_drm_psr_inhibit_put(&dp->encoder);
-}
-
static int rockchip_dp_powerdown(struct analogix_dp_plat_data *plat_data)
{
struct rockchip_dp_device *dp = to_dp(plat_data);
- int ret;
-
- ret = rockchip_drm_psr_inhibit_get(&dp->encoder);
- if (ret != 0)
- return ret;
clk_disable_unprepare(dp->pclk);
@@ -180,12 +144,42 @@ static void rockchip_dp_drm_encoder_mode_set(struct drm_encoder *encoder,
/* do nothing */
}
-static void rockchip_dp_drm_encoder_enable(struct drm_encoder *encoder)
+static
+struct drm_crtc *rockchip_dp_drm_get_new_crtc(struct drm_encoder *encoder,
+ struct drm_atomic_state *state)
+{
+ struct drm_connector *connector;
+ struct drm_connector_state *conn_state;
+
+ connector = drm_atomic_get_new_connector_for_encoder(state, encoder);
+ if (!connector)
+ return NULL;
+
+ conn_state = drm_atomic_get_new_connector_state(state, connector);
+ if (!conn_state)
+ return NULL;
+
+ return conn_state->crtc;
+}
+
+static void rockchip_dp_drm_encoder_enable(struct drm_encoder *encoder,
+ struct drm_atomic_state *state)
{
struct rockchip_dp_device *dp = to_dp(encoder);
+ struct drm_crtc *crtc;
+ struct drm_crtc_state *old_crtc_state;
int ret;
u32 val;
+ crtc = rockchip_dp_drm_get_new_crtc(encoder, state);
+ if (!crtc)
+ return;
+
+ old_crtc_state = drm_atomic_get_old_crtc_state(state, crtc);
+ /* Coming back from self refresh, nothing to do */
+ if (old_crtc_state && old_crtc_state->self_refresh_active)
+ return;
+
ret = drm_of_encoder_active_endpoint_id(dp->dev->of_node, encoder);
if (ret < 0)
return;
@@ -210,9 +204,27 @@ static void rockchip_dp_drm_encoder_enable(struct drm_encoder *encoder)
clk_disable_unprepare(dp->grfclk);
}
-static void rockchip_dp_drm_encoder_nop(struct drm_encoder *encoder)
+static void rockchip_dp_drm_encoder_disable(struct drm_encoder *encoder,
+ struct drm_atomic_state *state)
{
- /* do nothing */
+ struct rockchip_dp_device *dp = to_dp(encoder);
+ struct drm_crtc *crtc;
+ struct drm_crtc_state *new_crtc_state = NULL;
+ int ret;
+
+ crtc = rockchip_dp_drm_get_new_crtc(encoder, state);
+ /* No crtc means we're doing a full shutdown */
+ if (!crtc)
+ return;
+
+ new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
+ /* If we're not entering self-refresh, no need to wait for vact */
+ if (!new_crtc_state || !new_crtc_state->self_refresh_active)
+ return;
+
+ ret = rockchip_drm_wait_vact_end(crtc, PSR_WAIT_LINE_FLAG_TIMEOUT_MS);
+ if (ret)
+ DRM_DEV_ERROR(dp->dev, "line flag irq timed out\n");
}
static int
@@ -241,8 +253,8 @@ rockchip_dp_drm_encoder_atomic_check(struct drm_encoder *encoder,
static struct drm_encoder_helper_funcs rockchip_dp_encoder_helper_funcs = {
.mode_fixup = rockchip_dp_drm_encoder_mode_fixup,
.mode_set = rockchip_dp_drm_encoder_mode_set,
- .enable = rockchip_dp_drm_encoder_enable,
- .disable = rockchip_dp_drm_encoder_nop,
+ .atomic_enable = rockchip_dp_drm_encoder_enable,
+ .atomic_disable = rockchip_dp_drm_encoder_disable,
.atomic_check = rockchip_dp_drm_encoder_atomic_check,
};
@@ -334,23 +346,16 @@ static int rockchip_dp_bind(struct device *dev, struct device *master,
dp->plat_data.dev_type = dp->data->chip_type;
dp->plat_data.power_on_start = rockchip_dp_poweron_start;
- dp->plat_data.power_on_end = rockchip_dp_poweron_end;
dp->plat_data.power_off = rockchip_dp_powerdown;
dp->plat_data.get_modes = rockchip_dp_get_modes;
- ret = rockchip_drm_psr_register(&dp->encoder, analogix_dp_psr_set);
- if (ret < 0)
- goto err_cleanup_encoder;
-
dp->adp = analogix_dp_bind(dev, dp->drm_dev, &dp->plat_data);
if (IS_ERR(dp->adp)) {
ret = PTR_ERR(dp->adp);
- goto err_unreg_psr;
+ goto err_cleanup_encoder;
}
return 0;
-err_unreg_psr:
- rockchip_drm_psr_unregister(&dp->encoder);
err_cleanup_encoder:
dp->encoder.funcs->destroy(&dp->encoder);
return ret;
@@ -362,7 +367,6 @@ static void rockchip_dp_unbind(struct device *dev, struct device *master,
struct rockchip_dp_device *dp = dev_get_drvdata(dev);
analogix_dp_unbind(dp->adp);
- rockchip_drm_psr_unregister(&dp->encoder);
dp->encoder.funcs->destroy(&dp->encoder);
dp->adp = ERR_PTR(-ENODEV);
diff --git a/drivers/gpu/drm/rockchip/cdn-dp-core.c b/drivers/gpu/drm/rockchip/cdn-dp-core.c
index 8c32c32be85c..d505ea7d5384 100644
--- a/drivers/gpu/drm/rockchip/cdn-dp-core.c
+++ b/drivers/gpu/drm/rockchip/cdn-dp-core.c
@@ -4,24 +4,23 @@
* Author: Chris Zhong <zyw@rock-chips.com>
*/
-#include <drm/drmP.h>
-#include <drm/drm_atomic_helper.h>
-#include <drm/drm_dp_helper.h>
-#include <drm/drm_edid.h>
-#include <drm/drm_of.h>
-#include <drm/drm_probe_helper.h>
-
#include <linux/clk.h>
#include <linux/component.h>
#include <linux/extcon.h>
#include <linux/firmware.h>
-#include <linux/regmap.h>
-#include <linux/reset.h>
#include <linux/mfd/syscon.h>
#include <linux/phy/phy.h>
+#include <linux/regmap.h>
+#include <linux/reset.h>
#include <sound/hdmi-codec.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_dp_helper.h>
+#include <drm/drm_edid.h>
+#include <drm/drm_of.h>
+#include <drm/drm_probe_helper.h>
+
#include "cdn-dp-core.h"
#include "cdn-dp-reg.h"
#include "rockchip_drm_vop.h"
diff --git a/drivers/gpu/drm/rockchip/cdn-dp-core.h b/drivers/gpu/drm/rockchip/cdn-dp-core.h
index f18a01e6cbc2..b85ea89eb60b 100644
--- a/drivers/gpu/drm/rockchip/cdn-dp-core.h
+++ b/drivers/gpu/drm/rockchip/cdn-dp-core.h
@@ -7,10 +7,10 @@
#ifndef _CDN_DP_CORE_H
#define _CDN_DP_CORE_H
-#include <drm/drmP.h>
#include <drm/drm_dp_helper.h>
#include <drm/drm_panel.h>
#include <drm/drm_probe_helper.h>
+
#include "rockchip_drm_drv.h"
#define MAX_PHY 2
diff --git a/drivers/gpu/drm/rockchip/dw-mipi-dsi-rockchip.c b/drivers/gpu/drm/rockchip/dw-mipi-dsi-rockchip.c
index ef8486e5e2cd..bc073ec5c183 100644
--- a/drivers/gpu/drm/rockchip/dw-mipi-dsi-rockchip.c
+++ b/drivers/gpu/drm/rockchip/dw-mipi-dsi-rockchip.c
@@ -6,10 +6,6 @@
* Nickey Yang <nickey.yang@rock-chips.com>
*/
-#include <drm/drmP.h>
-#include <drm/drm_mipi_dsi.h>
-#include <drm/bridge/dw_mipi_dsi.h>
-#include <drm/drm_of.h>
#include <linux/clk.h>
#include <linux/iopoll.h>
#include <linux/math64.h>
@@ -18,8 +14,13 @@
#include <linux/of_device.h>
#include <linux/pm_runtime.h>
#include <linux/regmap.h>
+
#include <video/mipi_display.h>
+#include <drm/bridge/dw_mipi_dsi.h>
+#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_of.h>
+
#include "rockchip_drm_drv.h"
#include "rockchip_drm_vop.h"
diff --git a/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c b/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c
index cdc304d4cd02..906891b03a38 100644
--- a/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c
+++ b/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c
@@ -10,11 +10,10 @@
#include <linux/phy/phy.h>
#include <linux/regmap.h>
-#include <drm/drm_of.h>
-#include <drm/drmP.h>
+#include <drm/bridge/dw_hdmi.h>
#include <drm/drm_edid.h>
+#include <drm/drm_of.h>
#include <drm/drm_probe_helper.h>
-#include <drm/bridge/dw_hdmi.h>
#include "rockchip_drm_drv.h"
#include "rockchip_drm_vop.h"
diff --git a/drivers/gpu/drm/rockchip/inno_hdmi.c b/drivers/gpu/drm/rockchip/inno_hdmi.c
index f8ca98d294d0..ed344a795b4d 100644
--- a/drivers/gpu/drm/rockchip/inno_hdmi.c
+++ b/drivers/gpu/drm/rockchip/inno_hdmi.c
@@ -15,10 +15,9 @@
#include <linux/mutex.h>
#include <linux/of_device.h>
-#include <drm/drm_of.h>
-#include <drm/drmP.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_edid.h>
+#include <drm/drm_of.h>
#include <drm/drm_probe_helper.h>
#include "rockchip_drm_drv.h"
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_drv.c b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c
index 53d2c5bd61dc..30c177eb3022 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_drv.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c
@@ -6,11 +6,6 @@
* based on exynos_drm_drv.c
*/
-#include <drm/drmP.h>
-#include <drm/drm_fb_helper.h>
-#include <drm/drm_gem_cma_helper.h>
-#include <drm/drm_of.h>
-#include <drm/drm_probe_helper.h>
#include <linux/dma-mapping.h>
#include <linux/dma-iommu.h>
#include <linux/pm_runtime.h>
@@ -21,6 +16,13 @@
#include <linux/console.h>
#include <linux/iommu.h>
+#include <drm/drm_drv.h>
+#include <drm/drm_fb_helper.h>
+#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_of.h>
+#include <drm/drm_probe_helper.h>
+#include <drm/drm_vblank.h>
+
#include "rockchip_drm_drv.h"
#include "rockchip_drm_fb.h"
#include "rockchip_drm_fbdev.h"
@@ -212,16 +214,13 @@ static const struct file_operations rockchip_drm_driver_fops = {
};
static struct drm_driver rockchip_drm_driver = {
- .driver_features = DRIVER_MODESET | DRIVER_GEM |
- DRIVER_PRIME | DRIVER_ATOMIC,
+ .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_ATOMIC,
.lastclose = drm_fb_helper_lastclose,
.gem_vm_ops = &drm_gem_cma_vm_ops,
.gem_free_object_unlocked = rockchip_gem_free_object,
.dumb_create = rockchip_gem_dumb_create,
.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
- .gem_prime_import = drm_gem_prime_import,
- .gem_prime_export = drm_gem_prime_export,
.gem_prime_get_sg_table = rockchip_gem_prime_get_sg_table,
.gem_prime_import_sg_table = rockchip_gem_prime_import_sg_table,
.gem_prime_vmap = rockchip_gem_prime_vmap,
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_fb.c b/drivers/gpu/drm/rockchip/rockchip_drm_fb.c
index 64ca87cf6d50..ca01234c037c 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_fb.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_fb.c
@@ -5,18 +5,18 @@
*/
#include <linux/kernel.h>
+
#include <drm/drm.h>
-#include <drm/drmP.h>
#include <drm/drm_atomic.h>
#include <drm/drm_damage_helper.h>
#include <drm/drm_fb_helper.h>
+#include <drm/drm_fourcc.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_probe_helper.h>
#include "rockchip_drm_drv.h"
#include "rockchip_drm_fb.h"
#include "rockchip_drm_gem.h"
-#include "rockchip_drm_psr.h"
static const struct drm_framebuffer_funcs rockchip_drm_fb_funcs = {
.destroy = drm_gem_fb_destroy,
@@ -105,31 +105,8 @@ err_gem_object_unreference:
return ERR_PTR(ret);
}
-static void
-rockchip_atomic_helper_commit_tail_rpm(struct drm_atomic_state *old_state)
-{
- struct drm_device *dev = old_state->dev;
-
- rockchip_drm_psr_inhibit_get_state(old_state);
-
- drm_atomic_helper_commit_modeset_disables(dev, old_state);
-
- drm_atomic_helper_commit_modeset_enables(dev, old_state);
-
- drm_atomic_helper_commit_planes(dev, old_state,
- DRM_PLANE_COMMIT_ACTIVE_ONLY);
-
- rockchip_drm_psr_inhibit_put_state(old_state);
-
- drm_atomic_helper_commit_hw_done(old_state);
-
- drm_atomic_helper_wait_for_vblanks(dev, old_state);
-
- drm_atomic_helper_cleanup_planes(dev, old_state);
-}
-
static const struct drm_mode_config_helper_funcs rockchip_mode_config_helpers = {
- .atomic_commit_tail = rockchip_atomic_helper_commit_tail_rpm,
+ .atomic_commit_tail = drm_atomic_helper_commit_tail_rpm,
};
static const struct drm_mode_config_funcs rockchip_drm_mode_config_funcs = {
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_fbdev.c b/drivers/gpu/drm/rockchip/rockchip_drm_fbdev.c
index bb8ac18298f6..02be6c5ff857 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_fbdev.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_fbdev.c
@@ -5,8 +5,8 @@
*/
#include <drm/drm.h>
-#include <drm/drmP.h>
#include <drm/drm_fb_helper.h>
+#include <drm/drm_fourcc.h>
#include <drm/drm_probe_helper.h>
#include "rockchip_drm_drv.h"
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_gem.c b/drivers/gpu/drm/rockchip/rockchip_drm_gem.c
index ba9e77acbe16..291e89b4045f 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_gem.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_gem.c
@@ -4,14 +4,14 @@
* Author:Mark Yao <mark.yao@rock-chips.com>
*/
+#include <linux/dma-buf.h>
+#include <linux/iommu.h>
+
#include <drm/drm.h>
-#include <drm/drmP.h>
#include <drm/drm_gem.h>
+#include <drm/drm_prime.h>
#include <drm/drm_vma_manager.h>
-#include <linux/dma-buf.h>
-#include <linux/iommu.h>
-
#include "rockchip_drm_drv.h"
#include "rockchip_drm_gem.h"
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_psr.c b/drivers/gpu/drm/rockchip/rockchip_drm_psr.c
deleted file mode 100644
index b604747fe453..000000000000
--- a/drivers/gpu/drm/rockchip/rockchip_drm_psr.c
+++ /dev/null
@@ -1,282 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd
- * Author: Yakir Yang <ykk@rock-chips.com>
- */
-
-#include <drm/drmP.h>
-#include <drm/drm_atomic.h>
-#include <drm/drm_probe_helper.h>
-
-#include "rockchip_drm_drv.h"
-#include "rockchip_drm_psr.h"
-
-#define PSR_FLUSH_TIMEOUT_MS 100
-
-struct psr_drv {
- struct list_head list;
- struct drm_encoder *encoder;
-
- struct mutex lock;
- int inhibit_count;
- bool enabled;
-
- struct delayed_work flush_work;
-
- int (*set)(struct drm_encoder *encoder, bool enable);
-};
-
-static struct psr_drv *find_psr_by_encoder(struct drm_encoder *encoder)
-{
- struct rockchip_drm_private *drm_drv = encoder->dev->dev_private;
- struct psr_drv *psr;
-
- mutex_lock(&drm_drv->psr_list_lock);
- list_for_each_entry(psr, &drm_drv->psr_list, list) {
- if (psr->encoder == encoder)
- goto out;
- }
- psr = ERR_PTR(-ENODEV);
-
-out:
- mutex_unlock(&drm_drv->psr_list_lock);
- return psr;
-}
-
-static int psr_set_state_locked(struct psr_drv *psr, bool enable)
-{
- int ret;
-
- if (psr->inhibit_count > 0)
- return -EINVAL;
-
- if (enable == psr->enabled)
- return 0;
-
- ret = psr->set(psr->encoder, enable);
- if (ret)
- return ret;
-
- psr->enabled = enable;
- return 0;
-}
-
-static void psr_flush_handler(struct work_struct *work)
-{
- struct psr_drv *psr = container_of(to_delayed_work(work),
- struct psr_drv, flush_work);
-
- mutex_lock(&psr->lock);
- psr_set_state_locked(psr, true);
- mutex_unlock(&psr->lock);
-}
-
-/**
- * rockchip_drm_psr_inhibit_put - release PSR inhibit on given encoder
- * @encoder: encoder to obtain the PSR encoder
- *
- * Decrements PSR inhibit count on given encoder. Should be called only
- * for a PSR inhibit count increment done before. If PSR inhibit counter
- * reaches zero, PSR flush work is scheduled to make the hardware enter
- * PSR mode in PSR_FLUSH_TIMEOUT_MS.
- *
- * Returns:
- * Zero on success, negative errno on failure.
- */
-int rockchip_drm_psr_inhibit_put(struct drm_encoder *encoder)
-{
- struct psr_drv *psr = find_psr_by_encoder(encoder);
-
- if (IS_ERR(psr))
- return PTR_ERR(psr);
-
- mutex_lock(&psr->lock);
- --psr->inhibit_count;
- WARN_ON(psr->inhibit_count < 0);
- if (!psr->inhibit_count)
- mod_delayed_work(system_wq, &psr->flush_work,
- PSR_FLUSH_TIMEOUT_MS);
- mutex_unlock(&psr->lock);
-
- return 0;
-}
-EXPORT_SYMBOL(rockchip_drm_psr_inhibit_put);
-
-void rockchip_drm_psr_inhibit_get_state(struct drm_atomic_state *state)
-{
- struct drm_crtc *crtc;
- struct drm_crtc_state *crtc_state;
- struct drm_encoder *encoder;
- u32 encoder_mask = 0;
- int i;
-
- for_each_old_crtc_in_state(state, crtc, crtc_state, i) {
- encoder_mask |= crtc_state->encoder_mask;
- encoder_mask |= crtc->state->encoder_mask;
- }
-
- drm_for_each_encoder_mask(encoder, state->dev, encoder_mask)
- rockchip_drm_psr_inhibit_get(encoder);
-}
-EXPORT_SYMBOL(rockchip_drm_psr_inhibit_get_state);
-
-void rockchip_drm_psr_inhibit_put_state(struct drm_atomic_state *state)
-{
- struct drm_crtc *crtc;
- struct drm_crtc_state *crtc_state;
- struct drm_encoder *encoder;
- u32 encoder_mask = 0;
- int i;
-
- for_each_old_crtc_in_state(state, crtc, crtc_state, i) {
- encoder_mask |= crtc_state->encoder_mask;
- encoder_mask |= crtc->state->encoder_mask;
- }
-
- drm_for_each_encoder_mask(encoder, state->dev, encoder_mask)
- rockchip_drm_psr_inhibit_put(encoder);
-}
-EXPORT_SYMBOL(rockchip_drm_psr_inhibit_put_state);
-
-/**
- * rockchip_drm_psr_inhibit_get - acquire PSR inhibit on given encoder
- * @encoder: encoder to obtain the PSR encoder
- *
- * Increments PSR inhibit count on given encoder. This function guarantees
- * that after it returns PSR is turned off on given encoder and no PSR-related
- * hardware state change occurs at least until a matching call to
- * rockchip_drm_psr_inhibit_put() is done.
- *
- * Returns:
- * Zero on success, negative errno on failure.
- */
-int rockchip_drm_psr_inhibit_get(struct drm_encoder *encoder)
-{
- struct psr_drv *psr = find_psr_by_encoder(encoder);
-
- if (IS_ERR(psr))
- return PTR_ERR(psr);
-
- mutex_lock(&psr->lock);
- psr_set_state_locked(psr, false);
- ++psr->inhibit_count;
- mutex_unlock(&psr->lock);
- cancel_delayed_work_sync(&psr->flush_work);
-
- return 0;
-}
-EXPORT_SYMBOL(rockchip_drm_psr_inhibit_get);
-
-static void rockchip_drm_do_flush(struct psr_drv *psr)
-{
- cancel_delayed_work_sync(&psr->flush_work);
-
- mutex_lock(&psr->lock);
- if (!psr_set_state_locked(psr, false))
- mod_delayed_work(system_wq, &psr->flush_work,
- PSR_FLUSH_TIMEOUT_MS);
- mutex_unlock(&psr->lock);
-}
-
-/**
- * rockchip_drm_psr_flush_all - force to flush all registered PSR encoders
- * @dev: drm device
- *
- * Disable the PSR function for all registered encoders, and then enable the
- * PSR function back after PSR_FLUSH_TIMEOUT. If encoder PSR state have been
- * changed during flush time, then keep the state no change after flush
- * timeout.
- *
- * Returns:
- * Zero on success, negative errno on failure.
- */
-void rockchip_drm_psr_flush_all(struct drm_device *dev)
-{
- struct rockchip_drm_private *drm_drv = dev->dev_private;
- struct psr_drv *psr;
-
- mutex_lock(&drm_drv->psr_list_lock);
- list_for_each_entry(psr, &drm_drv->psr_list, list)
- rockchip_drm_do_flush(psr);
- mutex_unlock(&drm_drv->psr_list_lock);
-}
-EXPORT_SYMBOL(rockchip_drm_psr_flush_all);
-
-/**
- * rockchip_drm_psr_register - register encoder to psr driver
- * @encoder: encoder that obtain the PSR function
- * @psr_set: call back to set PSR state
- *
- * The function returns with PSR inhibit counter initialized with one
- * and the caller (typically encoder driver) needs to call
- * rockchip_drm_psr_inhibit_put() when it becomes ready to accept PSR
- * enable request.
- *
- * Returns:
- * Zero on success, negative errno on failure.
- */
-int rockchip_drm_psr_register(struct drm_encoder *encoder,
- int (*psr_set)(struct drm_encoder *, bool enable))
-{
- struct rockchip_drm_private *drm_drv;
- struct psr_drv *psr;
-
- if (!encoder || !psr_set)
- return -EINVAL;
-
- drm_drv = encoder->dev->dev_private;
-
- psr = kzalloc(sizeof(struct psr_drv), GFP_KERNEL);
- if (!psr)
- return -ENOMEM;
-
- INIT_DELAYED_WORK(&psr->flush_work, psr_flush_handler);
- mutex_init(&psr->lock);
-
- psr->inhibit_count = 1;
- psr->enabled = false;
- psr->encoder = encoder;
- psr->set = psr_set;
-
- mutex_lock(&drm_drv->psr_list_lock);
- list_add_tail(&psr->list, &drm_drv->psr_list);
- mutex_unlock(&drm_drv->psr_list_lock);
-
- return 0;
-}
-EXPORT_SYMBOL(rockchip_drm_psr_register);
-
-/**
- * rockchip_drm_psr_unregister - unregister encoder to psr driver
- * @encoder: encoder that obtain the PSR function
- * @psr_set: call back to set PSR state
- *
- * It is expected that the PSR inhibit counter is 1 when this function is
- * called, which corresponds to a state when related encoder has been
- * disconnected from any CRTCs and its driver called
- * rockchip_drm_psr_inhibit_get() to stop the PSR logic.
- *
- * Returns:
- * Zero on success, negative errno on failure.
- */
-void rockchip_drm_psr_unregister(struct drm_encoder *encoder)
-{
- struct rockchip_drm_private *drm_drv = encoder->dev->dev_private;
- struct psr_drv *psr, *n;
-
- mutex_lock(&drm_drv->psr_list_lock);
- list_for_each_entry_safe(psr, n, &drm_drv->psr_list, list) {
- if (psr->encoder == encoder) {
- /*
- * Any other value would mean that the encoder
- * is still in use.
- */
- WARN_ON(psr->inhibit_count != 1);
-
- list_del(&psr->list);
- kfree(psr);
- }
- }
- mutex_unlock(&drm_drv->psr_list_lock);
-}
-EXPORT_SYMBOL(rockchip_drm_psr_unregister);
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_psr.h b/drivers/gpu/drm/rockchip/rockchip_drm_psr.h
deleted file mode 100644
index 28a9c399114e..000000000000
--- a/drivers/gpu/drm/rockchip/rockchip_drm_psr.h
+++ /dev/null
@@ -1,22 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd
- * Author: Yakir Yang <ykk@rock-chips.com>
- */
-
-#ifndef __ROCKCHIP_DRM_PSR___
-#define __ROCKCHIP_DRM_PSR___
-
-void rockchip_drm_psr_flush_all(struct drm_device *dev);
-
-int rockchip_drm_psr_inhibit_put(struct drm_encoder *encoder);
-int rockchip_drm_psr_inhibit_get(struct drm_encoder *encoder);
-
-void rockchip_drm_psr_inhibit_get_state(struct drm_atomic_state *state);
-void rockchip_drm_psr_inhibit_put_state(struct drm_atomic_state *state);
-
-int rockchip_drm_psr_register(struct drm_encoder *encoder,
- int (*psr_set)(struct drm_encoder *, bool enable));
-void rockchip_drm_psr_unregister(struct drm_encoder *encoder);
-
-#endif /* __ROCKCHIP_DRM_PSR__ */
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
index 09a790c2f3a1..2f821c58007c 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
@@ -4,40 +4,43 @@
* Author:Mark Yao <mark.yao@rock-chips.com>
*/
+#include <linux/clk.h>
+#include <linux/component.h>
+#include <linux/delay.h>
+#include <linux/iopoll.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/overflow.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/reset.h>
+
#include <drm/drm.h>
-#include <drm/drmP.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_uapi.h>
#include <drm/drm_crtc.h>
#include <drm/drm_flip_work.h>
+#include <drm/drm_fourcc.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_plane_helper.h>
#include <drm/drm_probe_helper.h>
+#include <drm/drm_self_refresh_helper.h>
+#include <drm/drm_vblank.h>
+
#ifdef CONFIG_DRM_ANALOGIX_DP
#include <drm/bridge/analogix_dp.h>
#endif
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/platform_device.h>
-#include <linux/clk.h>
-#include <linux/iopoll.h>
-#include <linux/of.h>
-#include <linux/of_device.h>
-#include <linux/pm_runtime.h>
-#include <linux/component.h>
-#include <linux/overflow.h>
-
-#include <linux/reset.h>
-#include <linux/delay.h>
-
#include "rockchip_drm_drv.h"
#include "rockchip_drm_gem.h"
#include "rockchip_drm_fb.h"
-#include "rockchip_drm_psr.h"
#include "rockchip_drm_vop.h"
#include "rockchip_rgb.h"
+#define VOP_SELF_REFRESH_ENTRY_DELAY_MS 100
+
#define VOP_WIN_SET(vop, win, name, v) \
vop_reg_set(vop, &win->phy->name, win->base, ~0, v, #name)
#define VOP_SCL_SET(vop, win, name, v) \
@@ -79,7 +82,7 @@
vop_get_intr_type(vop, &vop->data->intr->name, type)
#define VOP_WIN_GET(vop, win, name) \
- vop_read_reg(vop, win->offset, win->phy->name)
+ vop_read_reg(vop, win->base, &win->phy->name)
#define VOP_WIN_HAS_REG(win, name) \
(!!(win->phy->name.mask))
@@ -124,6 +127,7 @@ struct vop {
bool is_enabled;
struct completion dsp_hold_completion;
+ unsigned int win_enabled;
/* protected by dev->event_lock */
struct drm_pending_vblank_event *event;
@@ -528,8 +532,10 @@ static void vop_core_clks_disable(struct vop *vop)
clk_disable(vop->hclk);
}
-static void vop_win_disable(struct vop *vop, const struct vop_win_data *win)
+static void vop_win_disable(struct vop *vop, const struct vop_win *vop_win)
{
+ const struct vop_win_data *win = vop_win->data;
+
if (win->phy->scl && win->phy->scl->ext) {
VOP_SCL_SET_EXT(vop, win, yrgb_hor_scl_mode, SCALE_NONE);
VOP_SCL_SET_EXT(vop, win, yrgb_ver_scl_mode, SCALE_NONE);
@@ -538,9 +544,10 @@ static void vop_win_disable(struct vop *vop, const struct vop_win_data *win)
}
VOP_WIN_SET(vop, win, enable, 0);
+ vop->win_enabled &= ~BIT(VOP_WIN_TO_INDEX(vop_win));
}
-static int vop_enable(struct drm_crtc *crtc)
+static int vop_enable(struct drm_crtc *crtc, struct drm_crtc_state *old_state)
{
struct vop *vop = to_vop(crtc);
int ret, i;
@@ -580,12 +587,17 @@ static int vop_enable(struct drm_crtc *crtc)
* We need to make sure that all windows are disabled before we
* enable the crtc. Otherwise we might try to scan from a destroyed
* buffer later.
+ *
+ * In the case of enable-after-PSR, we don't need to worry about this
+ * case since the buffer is guaranteed to be valid and disabling the
+ * window will result in screen glitches on PSR exit.
*/
- for (i = 0; i < vop->data->win_size; i++) {
- struct vop_win *vop_win = &vop->win[i];
- const struct vop_win_data *win = vop_win->data;
+ if (!old_state || !old_state->self_refresh_active) {
+ for (i = 0; i < vop->data->win_size; i++) {
+ struct vop_win *vop_win = &vop->win[i];
- vop_win_disable(vop, win);
+ vop_win_disable(vop, vop_win);
+ }
}
spin_unlock(&vop->reg_lock);
@@ -615,6 +627,25 @@ err_put_pm_runtime:
return ret;
}
+static void rockchip_drm_set_win_enabled(struct drm_crtc *crtc, bool enabled)
+{
+ struct vop *vop = to_vop(crtc);
+ int i;
+
+ spin_lock(&vop->reg_lock);
+
+ for (i = 0; i < vop->data->win_size; i++) {
+ struct vop_win *vop_win = &vop->win[i];
+ const struct vop_win_data *win = vop_win->data;
+
+ VOP_WIN_SET(vop, win, enable,
+ enabled && (vop->win_enabled & BIT(i)));
+ }
+ vop_cfg_done(vop);
+
+ spin_unlock(&vop->reg_lock);
+}
+
static void vop_crtc_atomic_disable(struct drm_crtc *crtc,
struct drm_crtc_state *old_state)
{
@@ -622,9 +653,16 @@ static void vop_crtc_atomic_disable(struct drm_crtc *crtc,
WARN_ON(vop->event);
+ if (crtc->state->self_refresh_active)
+ rockchip_drm_set_win_enabled(crtc, false);
+
mutex_lock(&vop->vop_lock);
+
drm_crtc_vblank_off(crtc);
+ if (crtc->state->self_refresh_active)
+ goto out;
+
/*
* Vop standby will take effect at end of current frame,
* if dsp hold valid irq happen, it means standby complete.
@@ -655,6 +693,8 @@ static void vop_crtc_atomic_disable(struct drm_crtc *crtc,
clk_disable(vop->dclk);
vop_core_clks_disable(vop);
pm_runtime_put(vop->dev);
+
+out:
mutex_unlock(&vop->vop_lock);
if (crtc->state->event && !crtc->state->active) {
@@ -726,7 +766,6 @@ static void vop_plane_atomic_disable(struct drm_plane *plane,
struct drm_plane_state *old_state)
{
struct vop_win *vop_win = to_vop_win(plane);
- const struct vop_win_data *win = vop_win->data;
struct vop *vop = to_vop(old_state->crtc);
if (!old_state->crtc)
@@ -734,7 +773,7 @@ static void vop_plane_atomic_disable(struct drm_plane *plane,
spin_lock(&vop->reg_lock);
- vop_win_disable(vop, win);
+ vop_win_disable(vop, vop_win);
spin_unlock(&vop->reg_lock);
}
@@ -873,6 +912,7 @@ static void vop_plane_atomic_update(struct drm_plane *plane,
}
VOP_WIN_SET(vop, win, enable, 1);
+ vop->win_enabled |= BIT(win_index);
spin_unlock(&vop->reg_lock);
}
@@ -924,12 +964,10 @@ static void vop_plane_atomic_async_update(struct drm_plane *plane,
swap(plane->state->fb, new_state->fb);
if (vop->is_enabled) {
- rockchip_drm_psr_inhibit_get_state(new_state->state);
vop_plane_atomic_update(plane, plane->state);
spin_lock(&vop->reg_lock);
vop_cfg_done(vop);
spin_unlock(&vop->reg_lock);
- rockchip_drm_psr_inhibit_put_state(new_state->state);
/*
* A scanout can still be occurring, so we can't drop the
@@ -1033,11 +1071,17 @@ static void vop_crtc_atomic_enable(struct drm_crtc *crtc,
int dither_bpc = s->output_bpc ? s->output_bpc : 10;
int ret;
+ if (old_state && old_state->self_refresh_active) {
+ drm_crtc_vblank_on(crtc);
+ rockchip_drm_set_win_enabled(crtc, true);
+ return;
+ }
+
mutex_lock(&vop->vop_lock);
WARN_ON(vop->event);
- ret = vop_enable(crtc);
+ ret = vop_enable(crtc, old_state);
if (ret) {
mutex_unlock(&vop->vop_lock);
DRM_DEV_ERROR(vop->dev, "Failed to enable vop (%d)\n", ret);
@@ -1519,6 +1563,13 @@ static int vop_create_crtc(struct vop *vop)
init_completion(&vop->line_flag_completion);
crtc->port = port;
+ ret = drm_self_refresh_helper_init(crtc,
+ VOP_SELF_REFRESH_ENTRY_DELAY_MS);
+ if (ret)
+ DRM_DEV_DEBUG_KMS(vop->dev,
+ "Failed to init %s with SR helpers %d, ignoring\n",
+ crtc->name, ret);
+
return 0;
err_cleanup_crtc:
@@ -1536,6 +1587,8 @@ static void vop_destroy_crtc(struct vop *vop)
struct drm_device *drm_dev = vop->drm_dev;
struct drm_plane *plane, *tmp;
+ drm_self_refresh_helper_cleanup(crtc);
+
of_node_put(crtc->port);
/*
@@ -1560,7 +1613,6 @@ static void vop_destroy_crtc(struct vop *vop)
static int vop_initial(struct vop *vop)
{
- const struct vop_data *vop_data = vop->data;
struct reset_control *ahb_rst;
int i, ret;
@@ -1627,12 +1679,13 @@ static int vop_initial(struct vop *vop)
VOP_REG_SET(vop, misc, global_regdone_en, 1);
VOP_REG_SET(vop, common, dsp_blank, 0);
- for (i = 0; i < vop_data->win_size; i++) {
- const struct vop_win_data *win = &vop_data->win[i];
+ for (i = 0; i < vop->data->win_size; i++) {
+ struct vop_win *vop_win = &vop->win[i];
+ const struct vop_win_data *win = vop_win->data;
int channel = i * 2 + 1;
VOP_WIN_SET(vop, win, channel, (channel + 1) << 4 | channel);
- vop_win_disable(vop, win);
+ vop_win_disable(vop, vop_win);
VOP_WIN_SET(vop, win, gate, 1);
}
diff --git a/drivers/gpu/drm/rockchip/rockchip_lvds.c b/drivers/gpu/drm/rockchip/rockchip_lvds.c
index 830858a809e5..64aefa856896 100644
--- a/drivers/gpu/drm/rockchip/rockchip_lvds.c
+++ b/drivers/gpu/drm/rockchip/rockchip_lvds.c
@@ -6,21 +6,21 @@
* Sandy Huang <hjc@rock-chips.com>
*/
-#include <drm/drmP.h>
-#include <drm/drm_atomic_helper.h>
-#include <drm/drm_dp_helper.h>
-#include <drm/drm_panel.h>
-#include <drm/drm_of.h>
-#include <drm/drm_probe_helper.h>
-
-#include <linux/component.h>
#include <linux/clk.h>
+#include <linux/component.h>
#include <linux/mfd/syscon.h>
#include <linux/of_graph.h>
#include <linux/pinctrl/devinfo.h>
+#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/regmap.h>
#include <linux/reset.h>
+#include <drm/drm_atomic_helper.h>
+
+#include <drm/drm_dp_helper.h>
+#include <drm/drm_of.h>
+#include <drm/drm_panel.h>
+#include <drm/drm_probe_helper.h>
#include "rockchip_drm_drv.h"
#include "rockchip_drm_vop.h"
diff --git a/drivers/gpu/drm/rockchip/rockchip_rgb.c b/drivers/gpu/drm/rockchip/rockchip_rgb.c
index ce4d82d293e4..89e0bb0fe0ab 100644
--- a/drivers/gpu/drm/rockchip/rockchip_rgb.c
+++ b/drivers/gpu/drm/rockchip/rockchip_rgb.c
@@ -5,16 +5,15 @@
* Sandy Huang <hjc@rock-chips.com>
*/
-#include <drm/drmP.h>
+#include <linux/component.h>
+#include <linux/of_graph.h>
+
#include <drm/drm_atomic_helper.h>
#include <drm/drm_dp_helper.h>
-#include <drm/drm_panel.h>
#include <drm/drm_of.h>
+#include <drm/drm_panel.h>
#include <drm/drm_probe_helper.h>
-#include <linux/component.h>
-#include <linux/of_graph.h>
-
#include "rockchip_drm_drv.h"
#include "rockchip_drm_vop.h"
diff --git a/drivers/gpu/drm/rockchip/rockchip_vop_reg.c b/drivers/gpu/drm/rockchip/rockchip_vop_reg.c
index 7b9c74750f6d..d1494be14471 100644
--- a/drivers/gpu/drm/rockchip/rockchip_vop_reg.c
+++ b/drivers/gpu/drm/rockchip/rockchip_vop_reg.c
@@ -4,10 +4,15 @@
* Author:Mark Yao <mark.yao@rock-chips.com>
*/
-#include <drm/drmP.h>
-
-#include <linux/kernel.h>
#include <linux/component.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+
+#include <drm/drm_fourcc.h>
+#include <drm/drm_plane.h>
+#include <drm/drm_print.h>
#include "rockchip_drm_vop.h"
#include "rockchip_vop_reg.h"
diff --git a/drivers/gpu/drm/scheduler/gpu_scheduler_trace.h b/drivers/gpu/drm/scheduler/gpu_scheduler_trace.h
index 1626f3967130..d79086498aff 100644
--- a/drivers/gpu/drm/scheduler/gpu_scheduler_trace.h
+++ b/drivers/gpu/drm/scheduler/gpu_scheduler_trace.h
@@ -28,8 +28,6 @@
#include <linux/types.h>
#include <linux/tracepoint.h>
-#include <drm/drmP.h>
-
#undef TRACE_SYSTEM
#define TRACE_SYSTEM gpu_scheduler
#define TRACE_INCLUDE_FILE gpu_scheduler_trace
diff --git a/drivers/gpu/drm/scheduler/sched_entity.c b/drivers/gpu/drm/scheduler/sched_entity.c
index 35ddbec1375a..d5a6a946f486 100644
--- a/drivers/gpu/drm/scheduler/sched_entity.c
+++ b/drivers/gpu/drm/scheduler/sched_entity.c
@@ -22,6 +22,9 @@
*/
#include <linux/kthread.h>
+#include <linux/slab.h>
+
+#include <drm/drm_print.h>
#include <drm/gpu_scheduler.h>
#include "gpu_scheduler_trace.h"
diff --git a/drivers/gpu/drm/scheduler/sched_fence.c b/drivers/gpu/drm/scheduler/sched_fence.c
index d8d2dff9ea2f..54977408f574 100644
--- a/drivers/gpu/drm/scheduler/sched_fence.c
+++ b/drivers/gpu/drm/scheduler/sched_fence.c
@@ -22,9 +22,11 @@
*/
#include <linux/kthread.h>
-#include <linux/wait.h>
+#include <linux/module.h>
#include <linux/sched.h>
-#include <drm/drmP.h>
+#include <linux/slab.h>
+#include <linux/wait.h>
+
#include <drm/gpu_scheduler.h>
static struct kmem_cache *sched_fence_slab;
diff --git a/drivers/gpu/drm/scheduler/sched_main.c b/drivers/gpu/drm/scheduler/sched_main.c
index c1058eece16b..9a0ee74d82dc 100644
--- a/drivers/gpu/drm/scheduler/sched_main.c
+++ b/drivers/gpu/drm/scheduler/sched_main.c
@@ -48,7 +48,8 @@
#include <linux/wait.h>
#include <linux/sched.h>
#include <uapi/linux/sched/types.h>
-#include <drm/drmP.h>
+
+#include <drm/drm_print.h>
#include <drm/gpu_scheduler.h>
#include <drm/spsc_queue.h>
diff --git a/drivers/gpu/drm/selftests/test-drm_framebuffer.c b/drivers/gpu/drm/selftests/test-drm_framebuffer.c
index a04d02dacce2..74d5561a862b 100644
--- a/drivers/gpu/drm/selftests/test-drm_framebuffer.c
+++ b/drivers/gpu/drm/selftests/test-drm_framebuffer.c
@@ -3,7 +3,12 @@
* Test cases for the drm_framebuffer functions
*/
-#include <drm/drmP.h>
+#include <linux/kernel.h>
+
+#include <drm/drm_device.h>
+#include <drm/drm_mode.h>
+#include <drm/drm_fourcc.h>
+
#include "../drm_crtc_internal.h"
#include "test-drm_modeset_common.h"
diff --git a/drivers/gpu/drm/shmobile/shmob_drm_crtc.c b/drivers/gpu/drm/shmobile/shmob_drm_crtc.c
index b6988a6d698e..75a752d59ef1 100644
--- a/drivers/gpu/drm/shmobile/shmob_drm_crtc.c
+++ b/drivers/gpu/drm/shmobile/shmob_drm_crtc.c
@@ -10,13 +10,14 @@
#include <linux/backlight.h>
#include <linux/clk.h>
-#include <drm/drmP.h>
#include <drm/drm_crtc.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_fourcc.h>
#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_plane_helper.h>
#include <drm/drm_probe_helper.h>
+#include <drm/drm_vblank.h>
#include "shmob_drm_backlight.h"
#include "shmob_drm_crtc.h"
diff --git a/drivers/gpu/drm/shmobile/shmob_drm_crtc.h b/drivers/gpu/drm/shmobile/shmob_drm_crtc.h
index 9ca6920641d8..21718843f46d 100644
--- a/drivers/gpu/drm/shmobile/shmob_drm_crtc.h
+++ b/drivers/gpu/drm/shmobile/shmob_drm_crtc.h
@@ -10,12 +10,14 @@
#ifndef __SHMOB_DRM_CRTC_H__
#define __SHMOB_DRM_CRTC_H__
-#include <drm/drmP.h>
#include <drm/drm_crtc.h>
+#include <drm/drm_connector.h>
#include <drm/drm_encoder.h>
struct backlight_device;
+struct drm_pending_vblank_event;
struct shmob_drm_device;
+struct shmob_drm_format_info;
struct shmob_drm_crtc {
struct drm_crtc crtc;
diff --git a/drivers/gpu/drm/shmobile/shmob_drm_drv.c b/drivers/gpu/drm/shmobile/shmob_drm_drv.c
index cb821adfc321..b8c0930959c7 100644
--- a/drivers/gpu/drm/shmobile/shmob_drm_drv.c
+++ b/drivers/gpu/drm/shmobile/shmob_drm_drv.c
@@ -15,10 +15,12 @@
#include <linux/pm.h>
#include <linux/slab.h>
-#include <drm/drmP.h>
#include <drm/drm_crtc_helper.h>
+#include <drm/drm_drv.h>
#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_irq.h>
#include <drm/drm_probe_helper.h>
+#include <drm/drm_vblank.h>
#include "shmob_drm_drv.h"
#include "shmob_drm_kms.h"
@@ -127,15 +129,12 @@ static irqreturn_t shmob_drm_irq(int irq, void *arg)
DEFINE_DRM_GEM_CMA_FOPS(shmob_drm_fops);
static struct drm_driver shmob_drm_driver = {
- .driver_features = DRIVER_GEM | DRIVER_MODESET
- | DRIVER_PRIME,
+ .driver_features = DRIVER_GEM | DRIVER_MODESET,
.irq_handler = shmob_drm_irq,
.gem_free_object_unlocked = drm_gem_cma_free_object,
.gem_vm_ops = &drm_gem_cma_vm_ops,
.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
- .gem_prime_import = drm_gem_prime_import,
- .gem_prime_export = drm_gem_prime_export,
.gem_prime_get_sg_table = drm_gem_cma_prime_get_sg_table,
.gem_prime_import_sg_table = drm_gem_cma_prime_import_sg_table,
.gem_prime_vmap = drm_gem_cma_prime_vmap,
diff --git a/drivers/gpu/drm/shmobile/shmob_drm_kms.c b/drivers/gpu/drm/shmobile/shmob_drm_kms.c
index 2e08bc203bf9..c51197b6fd85 100644
--- a/drivers/gpu/drm/shmobile/shmob_drm_kms.c
+++ b/drivers/gpu/drm/shmobile/shmob_drm_kms.c
@@ -7,7 +7,6 @@
* Laurent Pinchart (laurent.pinchart@ideasonboard.com)
*/
-#include <drm/drmP.h>
#include <drm/drm_crtc.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_fb_cma_helper.h>
diff --git a/drivers/gpu/drm/shmobile/shmob_drm_plane.c b/drivers/gpu/drm/shmobile/shmob_drm_plane.c
index 1d1ee5e51351..cbc464f006b4 100644
--- a/drivers/gpu/drm/shmobile/shmob_drm_plane.c
+++ b/drivers/gpu/drm/shmobile/shmob_drm_plane.c
@@ -7,10 +7,10 @@
* Laurent Pinchart (laurent.pinchart@ideasonboard.com)
*/
-#include <drm/drmP.h>
#include <drm/drm_crtc.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_fourcc.h>
#include <drm/drm_gem_cma_helper.h>
#include "shmob_drm_drv.h"
diff --git a/drivers/gpu/drm/shmobile/shmob_drm_plane.h b/drivers/gpu/drm/shmobile/shmob_drm_plane.h
index bae67cc8c628..e72b21a4288f 100644
--- a/drivers/gpu/drm/shmobile/shmob_drm_plane.h
+++ b/drivers/gpu/drm/shmobile/shmob_drm_plane.h
@@ -10,6 +10,7 @@
#ifndef __SHMOB_DRM_PLANE_H__
#define __SHMOB_DRM_PLANE_H__
+struct drm_plane;
struct shmob_drm_device;
int shmob_drm_plane_create(struct shmob_drm_device *sdev, unsigned int index);
diff --git a/drivers/gpu/drm/shmobile/shmob_drm_regs.h b/drivers/gpu/drm/shmobile/shmob_drm_regs.h
index 9eb0b3d01df8..058533685c4c 100644
--- a/drivers/gpu/drm/shmobile/shmob_drm_regs.h
+++ b/drivers/gpu/drm/shmobile/shmob_drm_regs.h
@@ -11,6 +11,9 @@
#define __SHMOB_DRM_REGS_H__
#include <linux/io.h>
+#include <linux/jiffies.h>
+
+#include "shmob_drm_drv.h"
/* Register definitions */
#define LDDCKPAT1R 0x400
diff --git a/drivers/gpu/drm/sti/sti_drv.c b/drivers/gpu/drm/sti/sti_drv.c
index bb6ae6dd66c9..a39fc36f815b 100644
--- a/drivers/gpu/drm/sti/sti_drv.c
+++ b/drivers/gpu/drm/sti/sti_drv.c
@@ -23,7 +23,6 @@
#include "sti_crtc.h"
#include "sti_drv.h"
-#include "sti_drv.h"
#include "sti_plane.h"
#define DRIVER_NAME "sti"
@@ -141,8 +140,7 @@ static void sti_mode_config_init(struct drm_device *dev)
DEFINE_DRM_GEM_CMA_FOPS(sti_driver_fops);
static struct drm_driver sti_driver = {
- .driver_features = DRIVER_MODESET |
- DRIVER_GEM | DRIVER_PRIME | DRIVER_ATOMIC,
+ .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_ATOMIC,
.gem_free_object_unlocked = drm_gem_cma_free_object,
.gem_vm_ops = &drm_gem_cma_vm_ops,
.dumb_create = drm_gem_cma_dumb_create,
@@ -153,8 +151,6 @@ static struct drm_driver sti_driver = {
.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
- .gem_prime_export = drm_gem_prime_export,
- .gem_prime_import = drm_gem_prime_import,
.gem_prime_get_sg_table = drm_gem_cma_prime_get_sg_table,
.gem_prime_import_sg_table = drm_gem_cma_prime_import_sg_table,
.gem_prime_vmap = drm_gem_cma_prime_vmap,
diff --git a/drivers/gpu/drm/sti/sti_dvo.c b/drivers/gpu/drm/sti/sti_dvo.c
index 9e6d5d8b7030..e55870190bf5 100644
--- a/drivers/gpu/drm/sti/sti_dvo.c
+++ b/drivers/gpu/drm/sti/sti_dvo.c
@@ -221,8 +221,7 @@ static void sti_dvo_disable(struct drm_bridge *bridge)
writel(0x00000000, dvo->regs + DVO_DOF_CFG);
- if (dvo->panel)
- dvo->panel->funcs->disable(dvo->panel);
+ drm_panel_disable(dvo->panel);
/* Disable/unprepare dvo clock */
clk_disable_unprepare(dvo->clk_pix);
@@ -262,8 +261,7 @@ static void sti_dvo_pre_enable(struct drm_bridge *bridge)
if (clk_prepare_enable(dvo->clk))
DRM_ERROR("Failed to prepare/enable dvo clk\n");
- if (dvo->panel)
- dvo->panel->funcs->enable(dvo->panel);
+ drm_panel_enable(dvo->panel);
/* Set LUT */
writel(config->lowbyte, dvo->regs + DVO_LUT_PROG_LOW);
@@ -340,7 +338,7 @@ static int sti_dvo_connector_get_modes(struct drm_connector *connector)
struct sti_dvo *dvo = dvo_connector->dvo;
if (dvo->panel)
- return dvo->panel->funcs->get_modes(dvo->panel);
+ return drm_panel_get_modes(dvo->panel);
return 0;
}
diff --git a/drivers/gpu/drm/sti/sti_hdmi.c b/drivers/gpu/drm/sti/sti_hdmi.c
index f03d617edc4c..9862c322f0c4 100644
--- a/drivers/gpu/drm/sti/sti_hdmi.c
+++ b/drivers/gpu/drm/sti/sti_hdmi.c
@@ -849,10 +849,13 @@ static int hdmi_audio_configure(struct sti_hdmi *hdmi)
switch (info->channels) {
case 8:
audio_cfg |= HDMI_AUD_CFG_CH78_VALID;
+ /* fall through */
case 6:
audio_cfg |= HDMI_AUD_CFG_CH56_VALID;
+ /* fall through */
case 4:
audio_cfg |= HDMI_AUD_CFG_CH34_VALID | HDMI_AUD_CFG_8CH;
+ /* fall through */
case 2:
audio_cfg |= HDMI_AUD_CFG_CH12_VALID;
break;
@@ -1284,8 +1287,10 @@ static int sti_hdmi_bind(struct device *dev, struct device *master, void *data)
drm_connector->polled = DRM_CONNECTOR_POLL_HPD;
- drm_connector_init(drm_dev, drm_connector,
- &sti_hdmi_connector_funcs, DRM_MODE_CONNECTOR_HDMIA);
+ drm_connector_init_with_ddc(drm_dev, drm_connector,
+ &sti_hdmi_connector_funcs,
+ DRM_MODE_CONNECTOR_HDMIA,
+ hdmi->ddc_adapt);
drm_connector_helper_add(drm_connector,
&sti_hdmi_connector_helper_funcs);
diff --git a/drivers/gpu/drm/sti/sti_tvout.c b/drivers/gpu/drm/sti/sti_tvout.c
index e1b3c8cb7287..aba79c172512 100644
--- a/drivers/gpu/drm/sti/sti_tvout.c
+++ b/drivers/gpu/drm/sti/sti_tvout.c
@@ -669,10 +669,9 @@ sti_tvout_create_dvo_encoder(struct drm_device *dev,
encoder->tvout = tvout;
- drm_encoder = (struct drm_encoder *)encoder;
+ drm_encoder = &encoder->encoder;
drm_encoder->possible_crtcs = ENCODER_CRTC_MASK;
- drm_encoder->possible_clones = 1 << 0;
drm_encoder_init(dev, drm_encoder,
&sti_tvout_encoder_funcs, DRM_MODE_ENCODER_LVDS,
@@ -722,10 +721,9 @@ static struct drm_encoder *sti_tvout_create_hda_encoder(struct drm_device *dev,
encoder->tvout = tvout;
- drm_encoder = (struct drm_encoder *) encoder;
+ drm_encoder = &encoder->encoder;
drm_encoder->possible_crtcs = ENCODER_CRTC_MASK;
- drm_encoder->possible_clones = 1 << 0;
drm_encoder_init(dev, drm_encoder,
&sti_tvout_encoder_funcs, DRM_MODE_ENCODER_DAC, NULL);
@@ -771,10 +769,9 @@ static struct drm_encoder *sti_tvout_create_hdmi_encoder(struct drm_device *dev,
encoder->tvout = tvout;
- drm_encoder = (struct drm_encoder *) encoder;
+ drm_encoder = &encoder->encoder;
drm_encoder->possible_crtcs = ENCODER_CRTC_MASK;
- drm_encoder->possible_clones = 1 << 1;
drm_encoder_init(dev, drm_encoder,
&sti_tvout_encoder_funcs, DRM_MODE_ENCODER_TMDS, NULL);
@@ -790,6 +787,13 @@ static void sti_tvout_create_encoders(struct drm_device *dev,
tvout->hdmi = sti_tvout_create_hdmi_encoder(dev, tvout);
tvout->hda = sti_tvout_create_hda_encoder(dev, tvout);
tvout->dvo = sti_tvout_create_dvo_encoder(dev, tvout);
+
+ tvout->hdmi->possible_clones = drm_encoder_mask(tvout->hdmi) |
+ drm_encoder_mask(tvout->hda) | drm_encoder_mask(tvout->dvo);
+ tvout->hda->possible_clones = drm_encoder_mask(tvout->hdmi) |
+ drm_encoder_mask(tvout->hda) | drm_encoder_mask(tvout->dvo);
+ tvout->dvo->possible_clones = drm_encoder_mask(tvout->hdmi) |
+ drm_encoder_mask(tvout->hda) | drm_encoder_mask(tvout->dvo);
}
static void sti_tvout_destroy_encoders(struct sti_tvout *tvout)
diff --git a/drivers/gpu/drm/stm/drv.c b/drivers/gpu/drm/stm/drv.c
index 9dee4e430de5..5a9f9aca8bc2 100644
--- a/drivers/gpu/drm/stm/drv.c
+++ b/drivers/gpu/drm/stm/drv.c
@@ -54,8 +54,7 @@ static int stm_gem_cma_dumb_create(struct drm_file *file,
DEFINE_DRM_GEM_CMA_FOPS(drv_driver_fops);
static struct drm_driver drv_driver = {
- .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME |
- DRIVER_ATOMIC,
+ .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_ATOMIC,
.name = "stm",
.desc = "STMicroelectronics SoC DRM",
.date = "20170330",
@@ -68,8 +67,6 @@ static struct drm_driver drv_driver = {
.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
.gem_free_object_unlocked = drm_gem_cma_free_object,
.gem_vm_ops = &drm_gem_cma_vm_ops,
- .gem_prime_export = drm_gem_prime_export,
- .gem_prime_import = drm_gem_prime_import,
.gem_prime_get_sg_table = drm_gem_cma_prime_get_sg_table,
.gem_prime_import_sg_table = drm_gem_cma_prime_import_sg_table,
.gem_prime_vmap = drm_gem_cma_prime_vmap,
diff --git a/drivers/gpu/drm/stm/dw_mipi_dsi-stm.c b/drivers/gpu/drm/stm/dw_mipi_dsi-stm.c
index 0ab32fee6c1b..a03a642c147c 100644
--- a/drivers/gpu/drm/stm/dw_mipi_dsi-stm.c
+++ b/drivers/gpu/drm/stm/dw_mipi_dsi-stm.c
@@ -8,13 +8,17 @@
#include <linux/clk.h>
#include <linux/iopoll.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
+#include <linux/platform_device.h>
#include <linux/regulator/consumer.h>
-#include <drm/drmP.h>
-#include <drm/drm_mipi_dsi.h>
-#include <drm/bridge/dw_mipi_dsi.h>
+
#include <video/mipi_display.h>
+#include <drm/bridge/dw_mipi_dsi.h>
+#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_print.h>
+
#define HWVER_130 0x31333000 /* IP version 1.30 */
#define HWVER_131 0x31333100 /* IP version 1.31 */
diff --git a/drivers/gpu/drm/stm/ltdc.c b/drivers/gpu/drm/stm/ltdc.c
index 2fe6c4a8d915..3ab4fbf8eb0d 100644
--- a/drivers/gpu/drm/stm/ltdc.c
+++ b/drivers/gpu/drm/stm/ltdc.c
@@ -26,6 +26,7 @@
#include <drm/drm_fb_cma_helper.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_of.h>
#include <drm/drm_plane_helper.h>
#include <drm/drm_probe_helper.h>
@@ -922,6 +923,7 @@ static const struct drm_plane_funcs ltdc_plane_funcs = {
};
static const struct drm_plane_helper_funcs ltdc_plane_helper_funcs = {
+ .prepare_fb = drm_gem_fb_prepare_fb,
.atomic_check = ltdc_plane_atomic_check,
.atomic_update = ltdc_plane_atomic_update,
.atomic_disable = ltdc_plane_atomic_disable,
diff --git a/drivers/gpu/drm/sun4i/sun4i_backend.c b/drivers/gpu/drm/sun4i/sun4i_backend.c
index 78d8c3afe825..4e29f4fe4a05 100644
--- a/drivers/gpu/drm/sun4i/sun4i_backend.c
+++ b/drivers/gpu/drm/sun4i/sun4i_backend.c
@@ -6,21 +6,23 @@
* Maxime Ripard <maxime.ripard@free-electrons.com>
*/
-#include <drm/drmP.h>
+#include <linux/component.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/of_graph.h>
+#include <linux/platform_device.h>
+#include <linux/reset.h>
+
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc.h>
#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_fourcc.h>
#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_plane_helper.h>
#include <drm/drm_probe_helper.h>
-#include <linux/component.h>
-#include <linux/list.h>
-#include <linux/of_device.h>
-#include <linux/of_graph.h>
-#include <linux/reset.h>
-
#include "sun4i_backend.h"
#include "sun4i_drv.h"
#include "sun4i_frontend.h"
diff --git a/drivers/gpu/drm/sun4i/sun4i_crtc.c b/drivers/gpu/drm/sun4i/sun4i_crtc.c
index 9d8504f813a4..3a153648b369 100644
--- a/drivers/gpu/drm/sun4i/sun4i_crtc.c
+++ b/drivers/gpu/drm/sun4i/sun4i_crtc.c
@@ -6,12 +6,6 @@
* Maxime Ripard <maxime.ripard@free-electrons.com>
*/
-#include <drm/drmP.h>
-#include <drm/drm_atomic_helper.h>
-#include <drm/drm_crtc.h>
-#include <drm/drm_modes.h>
-#include <drm/drm_probe_helper.h>
-
#include <linux/clk-provider.h>
#include <linux/ioport.h>
#include <linux/of_address.h>
@@ -21,6 +15,13 @@
#include <video/videomode.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_modes.h>
+#include <drm/drm_print.h>
+#include <drm/drm_probe_helper.h>
+#include <drm/drm_vblank.h>
+
#include "sun4i_backend.h"
#include "sun4i_crtc.h"
#include "sun4i_drv.h"
diff --git a/drivers/gpu/drm/sun4i/sun4i_drv.c b/drivers/gpu/drm/sun4i/sun4i_drv.c
index 1a1b52e6f73e..a5757b11b730 100644
--- a/drivers/gpu/drm/sun4i/sun4i_drv.c
+++ b/drivers/gpu/drm/sun4i/sun4i_drv.c
@@ -8,16 +8,19 @@
#include <linux/component.h>
#include <linux/kfifo.h>
+#include <linux/module.h>
#include <linux/of_graph.h>
#include <linux/of_reserved_mem.h>
+#include <linux/platform_device.h>
-#include <drm/drmP.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_drv.h>
#include <drm/drm_fb_cma_helper.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_of.h>
#include <drm/drm_probe_helper.h>
+#include <drm/drm_vblank.h>
#include "sun4i_drv.h"
#include "sun4i_frontend.h"
@@ -38,7 +41,7 @@ static int drm_sun4i_gem_dumb_create(struct drm_file *file_priv,
DEFINE_DRM_GEM_CMA_FOPS(sun4i_drv_fops);
static struct drm_driver sun4i_drv_driver = {
- .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_PRIME | DRIVER_ATOMIC,
+ .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC,
/* Generic Operations */
.fops = &sun4i_drv_fops,
diff --git a/drivers/gpu/drm/sun4i/sun4i_framebuffer.c b/drivers/gpu/drm/sun4i/sun4i_framebuffer.c
index 35c040716680..1568f68f9a9e 100644
--- a/drivers/gpu/drm/sun4i/sun4i_framebuffer.c
+++ b/drivers/gpu/drm/sun4i/sun4i_framebuffer.c
@@ -9,7 +9,6 @@
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
-#include <drm/drmP.h>
#include "sun4i_drv.h"
#include "sun4i_framebuffer.h"
diff --git a/drivers/gpu/drm/sun4i/sun4i_frontend.c b/drivers/gpu/drm/sun4i/sun4i_frontend.c
index 346c8071bd38..ec2a032e07b9 100644
--- a/drivers/gpu/drm/sun4i/sun4i_frontend.c
+++ b/drivers/gpu/drm/sun4i/sun4i_frontend.c
@@ -3,9 +3,6 @@
* Copyright (C) 2017 Free Electrons
* Maxime Ripard <maxime.ripard@free-electrons.com>
*/
-#include <drm/drmP.h>
-#include <drm/drm_gem_cma_helper.h>
-#include <drm/drm_fb_cma_helper.h>
#include <linux/clk.h>
#include <linux/component.h>
@@ -16,6 +13,13 @@
#include <linux/regmap.h>
#include <linux/reset.h>
+#include <drm/drm_device.h>
+#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_fourcc.h>
+#include <drm/drm_framebuffer.h>
+#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_plane.h>
+
#include "sun4i_drv.h"
#include "sun4i_frontend.h"
diff --git a/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c b/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c
index 9c3f99339b82..eb8071a4d6d0 100644
--- a/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c
+++ b/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c
@@ -5,23 +5,24 @@
* Maxime Ripard <maxime.ripard@free-electrons.com>
*/
-#include <drm/drmP.h>
-#include <drm/drm_atomic_helper.h>
-#include <drm/drm_probe_helper.h>
-#include <drm/drm_edid.h>
-#include <drm/drm_encoder.h>
-#include <drm/drm_of.h>
-#include <drm/drm_panel.h>
-
#include <linux/clk.h>
#include <linux/component.h>
#include <linux/iopoll.h>
+#include <linux/module.h>
#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/regmap.h>
#include <linux/reset.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_edid.h>
+#include <drm/drm_encoder.h>
+#include <drm/drm_of.h>
+#include <drm/drm_panel.h>
+#include <drm/drm_print.h>
+#include <drm/drm_probe_helper.h>
+
#include "sun4i_backend.h"
#include "sun4i_crtc.h"
#include "sun4i_drv.h"
@@ -639,9 +640,10 @@ static int sun4i_hdmi_bind(struct device *dev, struct device *master,
drm_connector_helper_add(&hdmi->connector,
&sun4i_hdmi_connector_helper_funcs);
- ret = drm_connector_init(drm, &hdmi->connector,
- &sun4i_hdmi_connector_funcs,
- DRM_MODE_CONNECTOR_HDMIA);
+ ret = drm_connector_init_with_ddc(drm, &hdmi->connector,
+ &sun4i_hdmi_connector_funcs,
+ DRM_MODE_CONNECTOR_HDMIA,
+ hdmi->ddc_i2c);
if (ret) {
dev_err(dev,
"Couldn't initialise the HDMI connector\n");
diff --git a/drivers/gpu/drm/sun4i/sun4i_layer.c b/drivers/gpu/drm/sun4i/sun4i_layer.c
index e72dd4de90ce..c04f4ba0d69d 100644
--- a/drivers/gpu/drm/sun4i/sun4i_layer.c
+++ b/drivers/gpu/drm/sun4i/sun4i_layer.c
@@ -7,9 +7,8 @@
*/
#include <drm/drm_atomic_helper.h>
-#include <drm/drm_plane_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
-#include <drm/drmP.h>
+#include <drm/drm_plane_helper.h>
#include "sun4i_backend.h"
#include "sun4i_frontend.h"
diff --git a/drivers/gpu/drm/sun4i/sun4i_lvds.c b/drivers/gpu/drm/sun4i/sun4i_lvds.c
index 3a3ba99fed22..7fbf425acb55 100644
--- a/drivers/gpu/drm/sun4i/sun4i_lvds.c
+++ b/drivers/gpu/drm/sun4i/sun4i_lvds.c
@@ -6,10 +6,10 @@
#include <linux/clk.h>
-#include <drm/drmP.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_of.h>
#include <drm/drm_panel.h>
+#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
#include "sun4i_crtc.h"
diff --git a/drivers/gpu/drm/sun4i/sun4i_rgb.c b/drivers/gpu/drm/sun4i/sun4i_rgb.c
index a901ec689b62..aac56983f208 100644
--- a/drivers/gpu/drm/sun4i/sun4i_rgb.c
+++ b/drivers/gpu/drm/sun4i/sun4i_rgb.c
@@ -8,10 +8,10 @@
#include <linux/clk.h>
-#include <drm/drmP.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_of.h>
#include <drm/drm_panel.h>
+#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
#include "sun4i_crtc.h"
diff --git a/drivers/gpu/drm/sun4i/sun4i_tcon.c b/drivers/gpu/drm/sun4i/sun4i_tcon.c
index 64c43ee6bd92..690aeb822704 100644
--- a/drivers/gpu/drm/sun4i/sun4i_tcon.c
+++ b/drivers/gpu/drm/sun4i/sun4i_tcon.c
@@ -6,7 +6,15 @@
* Maxime Ripard <maxime.ripard@free-electrons.com>
*/
-#include <drm/drmP.h>
+#include <linux/component.h>
+#include <linux/ioport.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/of_irq.h>
+#include <linux/regmap.h>
+#include <linux/reset.h>
+
#include <drm/drm_atomic_helper.h>
#include <drm/drm_connector.h>
#include <drm/drm_crtc.h>
@@ -14,18 +22,12 @@
#include <drm/drm_modes.h>
#include <drm/drm_of.h>
#include <drm/drm_panel.h>
+#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
+#include <drm/drm_vblank.h>
#include <uapi/drm/drm_mode.h>
-#include <linux/component.h>
-#include <linux/ioport.h>
-#include <linux/of_address.h>
-#include <linux/of_device.h>
-#include <linux/of_irq.h>
-#include <linux/regmap.h>
-#include <linux/reset.h>
-
#include "sun4i_crtc.h"
#include "sun4i_dotclock.h"
#include "sun4i_drv.h"
@@ -478,7 +480,7 @@ static void sun4i_tcon0_mode_set_rgb(struct sun4i_tcon *tcon,
const struct drm_display_mode *mode)
{
struct drm_connector *connector = sun4i_tcon_get_connector(encoder);
- struct drm_display_info display_info = connector->display_info;
+ const struct drm_display_info *info = &connector->display_info;
unsigned int bp, hsync, vsync;
u8 clk_delay;
u32 val = 0;
@@ -539,7 +541,7 @@ static void sun4i_tcon0_mode_set_rgb(struct sun4i_tcon *tcon,
if (mode->flags & DRM_MODE_FLAG_PVSYNC)
val |= SUN4I_TCON0_IO_POL_VSYNC_POSITIVE;
- if (display_info.bus_flags & DRM_BUS_FLAG_DE_LOW)
+ if (info->bus_flags & DRM_BUS_FLAG_DE_LOW)
val |= SUN4I_TCON0_IO_POL_DE_NEGATIVE;
/*
@@ -557,10 +559,10 @@ static void sun4i_tcon0_mode_set_rgb(struct sun4i_tcon *tcon,
* Following code is a way to avoid quirks all around TCON
* and DOTCLOCK drivers.
*/
- if (display_info.bus_flags & DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE)
+ if (info->bus_flags & DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE)
clk_set_phase(tcon->dclk, 240);
- if (display_info.bus_flags & DRM_BUS_FLAG_PIXDATA_DRIVE_NEGEDGE)
+ if (info->bus_flags & DRM_BUS_FLAG_PIXDATA_DRIVE_NEGEDGE)
clk_set_phase(tcon->dclk, 0);
regmap_update_bits(tcon->regs, SUN4I_TCON0_IO_POL_REG,
diff --git a/drivers/gpu/drm/sun4i/sun4i_tv.c b/drivers/gpu/drm/sun4i/sun4i_tv.c
index f998153c141f..39c15282e448 100644
--- a/drivers/gpu/drm/sun4i/sun4i_tv.c
+++ b/drivers/gpu/drm/sun4i/sun4i_tv.c
@@ -8,14 +8,16 @@
#include <linux/clk.h>
#include <linux/component.h>
+#include <linux/module.h>
#include <linux/of_address.h>
+#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/reset.h>
-#include <drm/drmP.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_of.h>
#include <drm/drm_panel.h>
+#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
#include "sun4i_crtc.h"
diff --git a/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c b/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c
index a1fc8b520985..472f73985deb 100644
--- a/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c
+++ b/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c
@@ -9,19 +9,20 @@
#include <linux/clk.h>
#include <linux/component.h>
#include <linux/crc-ccitt.h>
+#include <linux/module.h>
#include <linux/of_address.h>
+#include <linux/phy/phy-mipi-dphy.h>
+#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/regmap.h>
#include <linux/reset.h>
#include <linux/slab.h>
-#include <linux/phy/phy.h>
-#include <linux/phy/phy-mipi-dphy.h>
-
-#include <drm/drmP.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_mipi_dsi.h>
#include <drm/drm_panel.h>
+#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
#include "sun4i_crtc.h"
diff --git a/drivers/gpu/drm/sun4i/sun8i_csc.c b/drivers/gpu/drm/sun4i/sun8i_csc.c
index b8c059f1a118..781955dd4995 100644
--- a/drivers/gpu/drm/sun4i/sun8i_csc.c
+++ b/drivers/gpu/drm/sun4i/sun8i_csc.c
@@ -3,7 +3,7 @@
* Copyright (C) Jernej Skrabec <jernej.skrabec@siol.net>
*/
-#include <drm/drmP.h>
+#include <drm/drm_print.h>
#include "sun8i_csc.h"
#include "sun8i_mixer.h"
@@ -18,16 +18,59 @@ static const u32 ccsc_base[2][2] = {
* First tree values in each line are multiplication factor and last
* value is constant, which is added at the end.
*/
-static const u32 yuv2rgb[] = {
- 0x000004A8, 0x00000000, 0x00000662, 0xFFFC845A,
- 0x000004A8, 0xFFFFFE6F, 0xFFFFFCBF, 0x00021DF4,
- 0x000004A8, 0x00000813, 0x00000000, 0xFFFBAC4A,
+
+static const u32 yuv2rgb[2][2][12] = {
+ [DRM_COLOR_YCBCR_LIMITED_RANGE] = {
+ [DRM_COLOR_YCBCR_BT601] = {
+ 0x000004A8, 0x00000000, 0x00000662, 0xFFFC8451,
+ 0x000004A8, 0xFFFFFE6F, 0xFFFFFCC0, 0x00021E4D,
+ 0x000004A8, 0x00000811, 0x00000000, 0xFFFBACA9,
+ },
+ [DRM_COLOR_YCBCR_BT709] = {
+ 0x000004A8, 0x00000000, 0x0000072B, 0xFFFC1F99,
+ 0x000004A8, 0xFFFFFF26, 0xFFFFFDDF, 0x00013383,
+ 0x000004A8, 0x00000873, 0x00000000, 0xFFFB7BEF,
+ }
+ },
+ [DRM_COLOR_YCBCR_FULL_RANGE] = {
+ [DRM_COLOR_YCBCR_BT601] = {
+ 0x00000400, 0x00000000, 0x0000059B, 0xFFFD322E,
+ 0x00000400, 0xFFFFFEA0, 0xFFFFFD25, 0x00021DD5,
+ 0x00000400, 0x00000716, 0x00000000, 0xFFFC74BD,
+ },
+ [DRM_COLOR_YCBCR_BT709] = {
+ 0x00000400, 0x00000000, 0x0000064C, 0xFFFCD9B4,
+ 0x00000400, 0xFFFFFF41, 0xFFFFFE21, 0x00014F96,
+ 0x00000400, 0x0000076C, 0x00000000, 0xFFFC49EF,
+ }
+ },
};
-static const u32 yvu2rgb[] = {
- 0x000004A8, 0x00000662, 0x00000000, 0xFFFC845A,
- 0x000004A8, 0xFFFFFCBF, 0xFFFFFE6F, 0x00021DF4,
- 0x000004A8, 0x00000000, 0x00000813, 0xFFFBAC4A,
+static const u32 yvu2rgb[2][2][12] = {
+ [DRM_COLOR_YCBCR_LIMITED_RANGE] = {
+ [DRM_COLOR_YCBCR_BT601] = {
+ 0x000004A8, 0x00000662, 0x00000000, 0xFFFC8451,
+ 0x000004A8, 0xFFFFFCC0, 0xFFFFFE6F, 0x00021E4D,
+ 0x000004A8, 0x00000000, 0x00000811, 0xFFFBACA9,
+ },
+ [DRM_COLOR_YCBCR_BT709] = {
+ 0x000004A8, 0x0000072B, 0x00000000, 0xFFFC1F99,
+ 0x000004A8, 0xFFFFFDDF, 0xFFFFFF26, 0x00013383,
+ 0x000004A8, 0x00000000, 0x00000873, 0xFFFB7BEF,
+ }
+ },
+ [DRM_COLOR_YCBCR_FULL_RANGE] = {
+ [DRM_COLOR_YCBCR_BT601] = {
+ 0x00000400, 0x0000059B, 0x00000000, 0xFFFD322E,
+ 0x00000400, 0xFFFFFD25, 0xFFFFFEA0, 0x00021DD5,
+ 0x00000400, 0x00000000, 0x00000716, 0xFFFC74BD,
+ },
+ [DRM_COLOR_YCBCR_BT709] = {
+ 0x00000400, 0x0000064C, 0x00000000, 0xFFFCD9B4,
+ 0x00000400, 0xFFFFFE21, 0xFFFFFF41, 0x00014F96,
+ 0x00000400, 0x00000000, 0x0000076C, 0xFFFC49EF,
+ }
+ },
};
/*
@@ -53,57 +96,98 @@ static const u32 yvu2rgb[] = {
* c20 c21 c22 [d2 const2]
*/
-static const u32 yuv2rgb_de3[] = {
- 0x0002542a, 0x00000000, 0x0003312a, 0xffc00000,
- 0x0002542a, 0xffff376b, 0xfffe5fc3, 0xfe000000,
- 0x0002542a, 0x000408d3, 0x00000000, 0xfe000000,
+static const u32 yuv2rgb_de3[2][2][12] = {
+ [DRM_COLOR_YCBCR_LIMITED_RANGE] = {
+ [DRM_COLOR_YCBCR_BT601] = {
+ 0x0002542A, 0x00000000, 0x0003312A, 0xFFC00000,
+ 0x0002542A, 0xFFFF376B, 0xFFFE5FC3, 0xFE000000,
+ 0x0002542A, 0x000408D2, 0x00000000, 0xFE000000,
+ },
+ [DRM_COLOR_YCBCR_BT709] = {
+ 0x0002542A, 0x00000000, 0x000395E2, 0xFFC00000,
+ 0x0002542A, 0xFFFF92D2, 0xFFFEEF27, 0xFE000000,
+ 0x0002542A, 0x0004398C, 0x00000000, 0xFE000000,
+ }
+ },
+ [DRM_COLOR_YCBCR_FULL_RANGE] = {
+ [DRM_COLOR_YCBCR_BT601] = {
+ 0x00020000, 0x00000000, 0x0002CDD2, 0x00000000,
+ 0x00020000, 0xFFFF4FCE, 0xFFFE925D, 0xFE000000,
+ 0x00020000, 0x00038B43, 0x00000000, 0xFE000000,
+ },
+ [DRM_COLOR_YCBCR_BT709] = {
+ 0x00020000, 0x00000000, 0x0003264C, 0x00000000,
+ 0x00020000, 0xFFFFA018, 0xFFFF1053, 0xFE000000,
+ 0x00020000, 0x0003B611, 0x00000000, 0xFE000000,
+ }
+ },
};
-static const u32 yvu2rgb_de3[] = {
- 0x0002542a, 0x0003312a, 0x00000000, 0xffc00000,
- 0x0002542a, 0xfffe5fc3, 0xffff376b, 0xfe000000,
- 0x0002542a, 0x00000000, 0x000408d3, 0xfe000000,
+static const u32 yvu2rgb_de3[2][2][12] = {
+ [DRM_COLOR_YCBCR_LIMITED_RANGE] = {
+ [DRM_COLOR_YCBCR_BT601] = {
+ 0x0002542A, 0x0003312A, 0x00000000, 0xFFC00000,
+ 0x0002542A, 0xFFFE5FC3, 0xFFFF376B, 0xFE000000,
+ 0x0002542A, 0x00000000, 0x000408D2, 0xFE000000,
+ },
+ [DRM_COLOR_YCBCR_BT709] = {
+ 0x0002542A, 0x000395E2, 0x00000000, 0xFFC00000,
+ 0x0002542A, 0xFFFEEF27, 0xFFFF92D2, 0xFE000000,
+ 0x0002542A, 0x00000000, 0x0004398C, 0xFE000000,
+ }
+ },
+ [DRM_COLOR_YCBCR_FULL_RANGE] = {
+ [DRM_COLOR_YCBCR_BT601] = {
+ 0x00020000, 0x0002CDD2, 0x00000000, 0x00000000,
+ 0x00020000, 0xFFFE925D, 0xFFFF4FCE, 0xFE000000,
+ 0x00020000, 0x00000000, 0x00038B43, 0xFE000000,
+ },
+ [DRM_COLOR_YCBCR_BT709] = {
+ 0x00020000, 0x0003264C, 0x00000000, 0x00000000,
+ 0x00020000, 0xFFFF1053, 0xFFFFA018, 0xFE000000,
+ 0x00020000, 0x00000000, 0x0003B611, 0xFE000000,
+ }
+ },
};
static void sun8i_csc_set_coefficients(struct regmap *map, u32 base,
- enum sun8i_csc_mode mode)
+ enum sun8i_csc_mode mode,
+ enum drm_color_encoding encoding,
+ enum drm_color_range range)
{
const u32 *table;
- int i, data;
+ u32 base_reg;
switch (mode) {
case SUN8I_CSC_MODE_YUV2RGB:
- table = yuv2rgb;
+ table = yuv2rgb[range][encoding];
break;
case SUN8I_CSC_MODE_YVU2RGB:
- table = yvu2rgb;
+ table = yvu2rgb[range][encoding];
break;
default:
DRM_WARN("Wrong CSC mode specified.\n");
return;
}
- for (i = 0; i < 12; i++) {
- data = table[i];
- /* For some reason, 0x200 must be added to constant parts */
- if (((i + 1) & 3) == 0)
- data += 0x200;
- regmap_write(map, SUN8I_CSC_COEFF(base, i), data);
- }
+ base_reg = SUN8I_CSC_COEFF(base, 0);
+ regmap_bulk_write(map, base_reg, table, 12);
}
static void sun8i_de3_ccsc_set_coefficients(struct regmap *map, int layer,
- enum sun8i_csc_mode mode)
+ enum sun8i_csc_mode mode,
+ enum drm_color_encoding encoding,
+ enum drm_color_range range)
{
const u32 *table;
u32 base_reg;
switch (mode) {
case SUN8I_CSC_MODE_YUV2RGB:
- table = yuv2rgb_de3;
+ table = yuv2rgb_de3[range][encoding];
break;
case SUN8I_CSC_MODE_YVU2RGB:
- table = yvu2rgb_de3;
+ table = yvu2rgb_de3[range][encoding];
break;
default:
DRM_WARN("Wrong CSC mode specified.\n");
@@ -142,19 +226,22 @@ static void sun8i_de3_ccsc_enable(struct regmap *map, int layer, bool enable)
}
void sun8i_csc_set_ccsc_coefficients(struct sun8i_mixer *mixer, int layer,
- enum sun8i_csc_mode mode)
+ enum sun8i_csc_mode mode,
+ enum drm_color_encoding encoding,
+ enum drm_color_range range)
{
u32 base;
if (mixer->cfg->is_de3) {
- sun8i_de3_ccsc_set_coefficients(mixer->engine.regs,
- layer, mode);
+ sun8i_de3_ccsc_set_coefficients(mixer->engine.regs, layer,
+ mode, encoding, range);
return;
}
base = ccsc_base[mixer->cfg->ccsc][layer];
- sun8i_csc_set_coefficients(mixer->engine.regs, base, mode);
+ sun8i_csc_set_coefficients(mixer->engine.regs, base,
+ mode, encoding, range);
}
void sun8i_csc_enable_ccsc(struct sun8i_mixer *mixer, int layer, bool enable)
diff --git a/drivers/gpu/drm/sun4i/sun8i_csc.h b/drivers/gpu/drm/sun4i/sun8i_csc.h
index dce4c444bcd6..f42441b1b14d 100644
--- a/drivers/gpu/drm/sun4i/sun8i_csc.h
+++ b/drivers/gpu/drm/sun4i/sun8i_csc.h
@@ -6,6 +6,8 @@
#ifndef _SUN8I_CSC_H_
#define _SUN8I_CSC_H_
+#include <drm/drm_color_mgmt.h>
+
struct sun8i_mixer;
/* VI channel CSC units offsets */
@@ -26,7 +28,9 @@ enum sun8i_csc_mode {
};
void sun8i_csc_set_ccsc_coefficients(struct sun8i_mixer *mixer, int layer,
- enum sun8i_csc_mode mode);
+ enum sun8i_csc_mode mode,
+ enum drm_color_encoding encoding,
+ enum drm_color_range range);
void sun8i_csc_enable_ccsc(struct sun8i_mixer *mixer, int layer, bool enable);
#endif
diff --git a/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c b/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c
index 39d8509d96a0..8ca5af0c912f 100644
--- a/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c
+++ b/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c
@@ -8,9 +8,8 @@
#include <linux/of_device.h>
#include <linux/platform_device.h>
-#include <drm/drm_of.h>
-#include <drm/drmP.h>
#include <drm/drm_crtc_helper.h>
+#include <drm/drm_of.h>
#include "sun8i_dw_hdmi.h"
#include "sun8i_tcon_top.h"
diff --git a/drivers/gpu/drm/sun4i/sun8i_mixer.c b/drivers/gpu/drm/sun4i/sun8i_mixer.c
index c2eedf58bf4b..8b803eb903b8 100644
--- a/drivers/gpu/drm/sun4i/sun8i_mixer.c
+++ b/drivers/gpu/drm/sun4i/sun8i_mixer.c
@@ -7,7 +7,13 @@
* Copyright (C) 2015 NextThing Co
*/
-#include <drm/drmP.h>
+#include <linux/component.h>
+#include <linux/dma-mapping.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/of_graph.h>
+#include <linux/reset.h>
+
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc.h>
#include <drm/drm_fb_cma_helper.h>
@@ -15,12 +21,6 @@
#include <drm/drm_plane_helper.h>
#include <drm/drm_probe_helper.h>
-#include <linux/component.h>
-#include <linux/dma-mapping.h>
-#include <linux/of_device.h>
-#include <linux/of_graph.h>
-#include <linux/reset.h>
-
#include "sun4i_drv.h"
#include "sun8i_mixer.h"
#include "sun8i_ui_layer.h"
diff --git a/drivers/gpu/drm/sun4i/sun8i_tcon_top.c b/drivers/gpu/drm/sun4i/sun8i_tcon_top.c
index 3267d0f9b9b2..75d8e60c149d 100644
--- a/drivers/gpu/drm/sun4i/sun8i_tcon_top.c
+++ b/drivers/gpu/drm/sun4i/sun8i_tcon_top.c
@@ -1,18 +1,18 @@
// SPDX-License-Identifier: GPL-2.0+
/* Copyright (c) 2018 Jernej Skrabec <jernej.skrabec@siol.net> */
-#include <drm/drmP.h>
-
-#include <dt-bindings/clock/sun8i-tcon-top.h>
#include <linux/bitfield.h>
#include <linux/component.h>
#include <linux/device.h>
+#include <linux/io.h>
#include <linux/module.h>
#include <linux/of_device.h>
#include <linux/of_graph.h>
#include <linux/platform_device.h>
+#include <dt-bindings/clock/sun8i-tcon-top.h>
+
#include "sun8i_tcon_top.h"
struct sun8i_tcon_top_quirks {
diff --git a/drivers/gpu/drm/sun4i/sun8i_ui_layer.c b/drivers/gpu/drm/sun4i/sun8i_ui_layer.c
index dd2a1c851939..c87fd842918e 100644
--- a/drivers/gpu/drm/sun4i/sun8i_ui_layer.c
+++ b/drivers/gpu/drm/sun4i/sun8i_ui_layer.c
@@ -13,11 +13,11 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc.h>
#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_fourcc.h>
#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_plane_helper.h>
#include <drm/drm_probe_helper.h>
-#include <drm/drmP.h>
#include "sun8i_ui_layer.h"
#include "sun8i_mixer.h"
diff --git a/drivers/gpu/drm/sun4i/sun8i_vi_layer.c b/drivers/gpu/drm/sun4i/sun8i_vi_layer.c
index bd0e6a52d1d8..42d445d23773 100644
--- a/drivers/gpu/drm/sun4i/sun8i_vi_layer.c
+++ b/drivers/gpu/drm/sun4i/sun8i_vi_layer.c
@@ -11,7 +11,6 @@
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_plane_helper.h>
#include <drm/drm_probe_helper.h>
-#include <drm/drmP.h>
#include "sun8i_vi_layer.h"
#include "sun8i_mixer.h"
@@ -232,7 +231,9 @@ static int sun8i_vi_layer_update_formats(struct sun8i_mixer *mixer, int channel,
SUN8I_MIXER_CHAN_VI_LAYER_ATTR_FBFMT_MASK, val);
if (fmt_info->csc != SUN8I_CSC_MODE_OFF) {
- sun8i_csc_set_ccsc_coefficients(mixer, channel, fmt_info->csc);
+ sun8i_csc_set_ccsc_coefficients(mixer, channel, fmt_info->csc,
+ state->color_encoding,
+ state->color_range);
sun8i_csc_enable_ccsc(mixer, channel, true);
} else {
sun8i_csc_enable_ccsc(mixer, channel, false);
@@ -441,6 +442,7 @@ struct sun8i_vi_layer *sun8i_vi_layer_init_one(struct drm_device *drm,
struct sun8i_mixer *mixer,
int index)
{
+ u32 supported_encodings, supported_ranges;
struct sun8i_vi_layer *layer;
unsigned int plane_cnt;
int ret;
@@ -469,6 +471,22 @@ struct sun8i_vi_layer *sun8i_vi_layer_init_one(struct drm_device *drm,
return ERR_PTR(ret);
}
+ supported_encodings = BIT(DRM_COLOR_YCBCR_BT601) |
+ BIT(DRM_COLOR_YCBCR_BT709);
+
+ supported_ranges = BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) |
+ BIT(DRM_COLOR_YCBCR_FULL_RANGE);
+
+ ret = drm_plane_create_color_properties(&layer->plane,
+ supported_encodings,
+ supported_ranges,
+ DRM_COLOR_YCBCR_BT709,
+ DRM_COLOR_YCBCR_LIMITED_RANGE);
+ if (ret) {
+ dev_err(drm->dev, "Couldn't add encoding and range properties!\n");
+ return ERR_PTR(ret);
+ }
+
drm_plane_helper_add(&layer->plane, &sun8i_vi_layer_helper_funcs);
layer->mixer = mixer;
layer->channel = index;
diff --git a/drivers/gpu/drm/tdfx/tdfx_drv.c b/drivers/gpu/drm/tdfx/tdfx_drv.c
index 3a1476818c65..c243af156ee7 100644
--- a/drivers/gpu/drm/tdfx/tdfx_drv.c
+++ b/drivers/gpu/drm/tdfx/tdfx_drv.c
@@ -32,11 +32,14 @@
#include <linux/module.h>
-#include <drm/drmP.h>
-#include "tdfx_drv.h"
-
-#include <drm/drm_pciids.h>
+#include <drm/drm_drv.h>
+#include <drm/drm_file.h>
+#include <drm/drm_ioctl.h>
#include <drm/drm_legacy.h>
+#include <drm/drm_pci.h>
+#include <drm/drm_pciids.h>
+
+#include "tdfx_drv.h"
static struct pci_device_id pciidlist[] = {
tdfx_PCI_IDS
diff --git a/drivers/gpu/drm/tegra/drm.c b/drivers/gpu/drm/tegra/drm.c
index ddb802bce0a3..870904bfad78 100644
--- a/drivers/gpu/drm/tegra/drm.c
+++ b/drivers/gpu/drm/tegra/drm.c
@@ -888,33 +888,33 @@ static int tegra_gem_get_flags(struct drm_device *drm, void *data,
static const struct drm_ioctl_desc tegra_drm_ioctls[] = {
#ifdef CONFIG_DRM_TEGRA_STAGING
DRM_IOCTL_DEF_DRV(TEGRA_GEM_CREATE, tegra_gem_create,
- DRM_UNLOCKED | DRM_RENDER_ALLOW),
+ DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(TEGRA_GEM_MMAP, tegra_gem_mmap,
- DRM_UNLOCKED | DRM_RENDER_ALLOW),
+ DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(TEGRA_SYNCPT_READ, tegra_syncpt_read,
- DRM_UNLOCKED | DRM_RENDER_ALLOW),
+ DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(TEGRA_SYNCPT_INCR, tegra_syncpt_incr,
- DRM_UNLOCKED | DRM_RENDER_ALLOW),
+ DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(TEGRA_SYNCPT_WAIT, tegra_syncpt_wait,
- DRM_UNLOCKED | DRM_RENDER_ALLOW),
+ DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(TEGRA_OPEN_CHANNEL, tegra_open_channel,
- DRM_UNLOCKED | DRM_RENDER_ALLOW),
+ DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(TEGRA_CLOSE_CHANNEL, tegra_close_channel,
- DRM_UNLOCKED | DRM_RENDER_ALLOW),
+ DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(TEGRA_GET_SYNCPT, tegra_get_syncpt,
- DRM_UNLOCKED | DRM_RENDER_ALLOW),
+ DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(TEGRA_SUBMIT, tegra_submit,
- DRM_UNLOCKED | DRM_RENDER_ALLOW),
+ DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(TEGRA_GET_SYNCPT_BASE, tegra_get_syncpt_base,
- DRM_UNLOCKED | DRM_RENDER_ALLOW),
+ DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(TEGRA_GEM_SET_TILING, tegra_gem_set_tiling,
- DRM_UNLOCKED | DRM_RENDER_ALLOW),
+ DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(TEGRA_GEM_GET_TILING, tegra_gem_get_tiling,
- DRM_UNLOCKED | DRM_RENDER_ALLOW),
+ DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(TEGRA_GEM_SET_FLAGS, tegra_gem_set_flags,
- DRM_UNLOCKED | DRM_RENDER_ALLOW),
+ DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(TEGRA_GEM_GET_FLAGS, tegra_gem_get_flags,
- DRM_UNLOCKED | DRM_RENDER_ALLOW),
+ DRM_RENDER_ALLOW),
#endif
};
@@ -1004,7 +1004,7 @@ static int tegra_debugfs_init(struct drm_minor *minor)
#endif
static struct drm_driver tegra_drm_driver = {
- .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME |
+ .driver_features = DRIVER_MODESET | DRIVER_GEM |
DRIVER_ATOMIC | DRIVER_RENDER,
.load = tegra_drm_load,
.unload = tegra_drm_unload,
diff --git a/drivers/gpu/drm/tegra/gem.c b/drivers/gpu/drm/tegra/gem.c
index df53a46285a3..0a3d925d5284 100644
--- a/drivers/gpu/drm/tegra/gem.c
+++ b/drivers/gpu/drm/tegra/gem.c
@@ -626,20 +626,19 @@ static const struct dma_buf_ops tegra_gem_prime_dmabuf_ops = {
.vunmap = tegra_gem_prime_vunmap,
};
-struct dma_buf *tegra_gem_prime_export(struct drm_device *drm,
- struct drm_gem_object *gem,
+struct dma_buf *tegra_gem_prime_export(struct drm_gem_object *gem,
int flags)
{
DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
exp_info.exp_name = KBUILD_MODNAME;
- exp_info.owner = drm->driver->fops->owner;
+ exp_info.owner = gem->dev->driver->fops->owner;
exp_info.ops = &tegra_gem_prime_dmabuf_ops;
exp_info.size = gem->size;
exp_info.flags = flags;
exp_info.priv = gem;
- return drm_gem_dmabuf_export(drm, &exp_info);
+ return drm_gem_dmabuf_export(gem->dev, &exp_info);
}
struct drm_gem_object *tegra_gem_prime_import(struct drm_device *drm,
diff --git a/drivers/gpu/drm/tegra/gem.h b/drivers/gpu/drm/tegra/gem.h
index 413eae83ad81..f1f758b25886 100644
--- a/drivers/gpu/drm/tegra/gem.h
+++ b/drivers/gpu/drm/tegra/gem.h
@@ -70,8 +70,7 @@ extern const struct vm_operations_struct tegra_bo_vm_ops;
int __tegra_gem_mmap(struct drm_gem_object *gem, struct vm_area_struct *vma);
int tegra_drm_mmap(struct file *file, struct vm_area_struct *vma);
-struct dma_buf *tegra_gem_prime_export(struct drm_device *drm,
- struct drm_gem_object *gem,
+struct dma_buf *tegra_gem_prime_export(struct drm_gem_object *gem,
int flags);
struct drm_gem_object *tegra_gem_prime_import(struct drm_device *drm,
struct dma_buf *buf);
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_crtc.c b/drivers/gpu/drm/tilcdc/tilcdc_crtc.c
index 650d162e374b..e9dd5e5cb4e7 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_crtc.c
+++ b/drivers/gpu/drm/tilcdc/tilcdc_crtc.c
@@ -4,16 +4,20 @@
* Author: Rob Clark <robdclark@gmail.com>
*/
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/of_graph.h>
+#include <linux/pm_runtime.h>
+
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc.h>
-#include <drm/drm_flip_work.h>
-#include <drm/drm_plane_helper.h>
-#include <linux/workqueue.h>
-#include <linux/completion.h>
-#include <linux/dma-mapping.h>
-#include <linux/of_graph.h>
-#include <linux/math64.h>
+#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_fourcc.h>
+#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_modeset_helper_vtables.h>
+#include <drm/drm_print.h>
+#include <drm/drm_vblank.h>
#include "tilcdc_drv.h"
#include "tilcdc_regs.h"
@@ -646,9 +650,6 @@ static bool tilcdc_crtc_mode_fixup(struct drm_crtc *crtc,
static int tilcdc_crtc_atomic_check(struct drm_crtc *crtc,
struct drm_crtc_state *state)
{
- struct drm_display_mode *mode = &state->mode;
- int ret;
-
/* If we are not active we don't care */
if (!state->active)
return 0;
@@ -660,12 +661,6 @@ static int tilcdc_crtc_atomic_check(struct drm_crtc *crtc,
return -EINVAL;
}
- ret = tilcdc_crtc_mode_valid(crtc, mode);
- if (ret) {
- dev_dbg(crtc->dev->dev, "Mode \"%s\" not valid", mode->name);
- return -EINVAL;
- }
-
return 0;
}
@@ -717,13 +712,6 @@ static const struct drm_crtc_funcs tilcdc_crtc_funcs = {
.disable_vblank = tilcdc_crtc_disable_vblank,
};
-static const struct drm_crtc_helper_funcs tilcdc_crtc_helper_funcs = {
- .mode_fixup = tilcdc_crtc_mode_fixup,
- .atomic_check = tilcdc_crtc_atomic_check,
- .atomic_enable = tilcdc_crtc_atomic_enable,
- .atomic_disable = tilcdc_crtc_atomic_disable,
-};
-
int tilcdc_crtc_max_width(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
@@ -738,7 +726,9 @@ int tilcdc_crtc_max_width(struct drm_crtc *crtc)
return max_width;
}
-int tilcdc_crtc_mode_valid(struct drm_crtc *crtc, struct drm_display_mode *mode)
+static enum drm_mode_status
+tilcdc_crtc_mode_valid(struct drm_crtc *crtc,
+ const struct drm_display_mode *mode)
{
struct tilcdc_drm_private *priv = crtc->dev->dev_private;
unsigned int bandwidth;
@@ -826,6 +816,14 @@ int tilcdc_crtc_mode_valid(struct drm_crtc *crtc, struct drm_display_mode *mode)
return MODE_OK;
}
+static const struct drm_crtc_helper_funcs tilcdc_crtc_helper_funcs = {
+ .mode_valid = tilcdc_crtc_mode_valid,
+ .mode_fixup = tilcdc_crtc_mode_fixup,
+ .atomic_check = tilcdc_crtc_atomic_check,
+ .atomic_enable = tilcdc_crtc_atomic_enable,
+ .atomic_disable = tilcdc_crtc_atomic_disable,
+};
+
void tilcdc_crtc_set_panel_info(struct drm_crtc *crtc,
const struct tilcdc_panel_info *info)
{
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_drv.c b/drivers/gpu/drm/tilcdc/tilcdc_drv.c
index 7339bab3a0a1..2a9e67597375 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_drv.c
+++ b/drivers/gpu/drm/tilcdc/tilcdc_drv.c
@@ -7,19 +7,30 @@
/* LCDC DRM driver, based on da8xx-fb */
#include <linux/component.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
#include <linux/pinctrl/consumer.h>
-#include <linux/suspend.h>
-#include <drm/drm_atomic.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_debugfs.h>
+#include <drm/drm_drv.h>
#include <drm/drm_fb_helper.h>
+#include <drm/drm_fourcc.h>
+#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
+#include <drm/drm_irq.h>
+#include <drm/drm_mm.h>
#include <drm/drm_probe_helper.h>
+#include <drm/drm_vblank.h>
+
#include "tilcdc_drv.h"
+#include "tilcdc_external.h"
+#include "tilcdc_panel.h"
#include "tilcdc_regs.h"
#include "tilcdc_tfp410.h"
-#include "tilcdc_panel.h"
-#include "tilcdc_external.h"
static LIST_HEAD(module_list);
@@ -188,7 +199,6 @@ static void tilcdc_fini(struct drm_device *dev)
drm_kms_helper_poll_fini(dev);
drm_irq_uninstall(dev);
drm_mode_config_cleanup(dev);
- tilcdc_remove_external_device(dev);
if (priv->clk)
clk_put(priv->clk);
@@ -501,8 +511,7 @@ static int tilcdc_debugfs_init(struct drm_minor *minor)
DEFINE_DRM_GEM_CMA_FOPS(fops);
static struct drm_driver tilcdc_driver = {
- .driver_features = (DRIVER_GEM | DRIVER_MODESET |
- DRIVER_PRIME | DRIVER_ATOMIC),
+ .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC,
.irq_handler = tilcdc_irq,
.gem_free_object_unlocked = drm_gem_cma_free_object,
.gem_print_info = drm_gem_cma_print_info,
@@ -511,8 +520,6 @@ static struct drm_driver tilcdc_driver = {
.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
- .gem_prime_import = drm_gem_prime_import,
- .gem_prime_export = drm_gem_prime_export,
.gem_prime_get_sg_table = drm_gem_cma_prime_get_sg_table,
.gem_prime_import_sg_table = drm_gem_cma_prime_import_sg_table,
.gem_prime_vmap = drm_gem_cma_prime_vmap,
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_drv.h b/drivers/gpu/drm/tilcdc/tilcdc_drv.h
index 99432296c0ff..18815e75ca4f 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_drv.h
+++ b/drivers/gpu/drm/tilcdc/tilcdc_drv.h
@@ -7,21 +7,24 @@
#ifndef __TILCDC_DRV_H__
#define __TILCDC_DRV_H__
-#include <linux/clk.h>
#include <linux/cpufreq.h>
-#include <linux/module.h>
-#include <linux/platform_device.h>
-#include <linux/pm.h>
-#include <linux/pm_runtime.h>
-#include <linux/slab.h>
-#include <linux/of.h>
-#include <linux/of_device.h>
-#include <linux/list.h>
-
-#include <drm/drmP.h>
-#include <drm/drm_bridge.h>
-#include <drm/drm_fb_cma_helper.h>
-#include <drm/drm_gem_cma_helper.h>
+#include <linux/irqreturn.h>
+
+#include <drm/drm_print.h>
+
+struct clk;
+struct workqueue_struct;
+
+struct drm_connector;
+struct drm_connector_helper_funcs;
+struct drm_crtc;
+struct drm_device;
+struct drm_display_mode;
+struct drm_encoder;
+struct drm_framebuffer;
+struct drm_minor;
+struct drm_pending_vblank_event;
+struct drm_plane;
/* Defaulting to pixel clock defined on AM335x */
#define TILCDC_DEFAULT_MAX_PIXELCLOCK 126000
@@ -74,7 +77,6 @@ struct tilcdc_drm_private {
struct drm_encoder *external_encoder;
struct drm_connector *external_connector;
- const struct drm_connector_helper_funcs *connector_funcs;
bool is_registered;
bool is_componentized;
@@ -156,7 +158,6 @@ void tilcdc_crtc_set_panel_info(struct drm_crtc *crtc,
const struct tilcdc_panel_info *info);
void tilcdc_crtc_set_simulate_vesa_sync(struct drm_crtc *crtc,
bool simulate_vesa_sync);
-int tilcdc_crtc_mode_valid(struct drm_crtc *crtc, struct drm_display_mode *mode);
int tilcdc_crtc_max_width(struct drm_crtc *crtc);
void tilcdc_crtc_shutdown(struct drm_crtc *crtc);
int tilcdc_crtc_update_fb(struct drm_crtc *crtc,
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_external.c b/drivers/gpu/drm/tilcdc/tilcdc_external.c
index 7050eb4cf152..43d756b7810e 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_external.c
+++ b/drivers/gpu/drm/tilcdc/tilcdc_external.c
@@ -6,6 +6,7 @@
#include <linux/component.h>
#include <linux/of_graph.h>
+
#include <drm/drm_atomic_helper.h>
#include <drm/drm_of.h>
@@ -37,64 +38,6 @@ static const struct tilcdc_panel_info panel_info_default = {
.raster_order = 0,
};
-static int tilcdc_external_mode_valid(struct drm_connector *connector,
- struct drm_display_mode *mode)
-{
- struct tilcdc_drm_private *priv = connector->dev->dev_private;
- int ret;
-
- ret = tilcdc_crtc_mode_valid(priv->crtc, mode);
- if (ret != MODE_OK)
- return ret;
-
- BUG_ON(priv->external_connector != connector);
- BUG_ON(!priv->connector_funcs);
-
- /* If the connector has its own mode_valid call it. */
- if (!IS_ERR(priv->connector_funcs) &&
- priv->connector_funcs->mode_valid)
- return priv->connector_funcs->mode_valid(connector, mode);
-
- return MODE_OK;
-}
-
-static int tilcdc_add_external_connector(struct drm_device *dev,
- struct drm_connector *connector)
-{
- struct tilcdc_drm_private *priv = dev->dev_private;
- struct drm_connector_helper_funcs *connector_funcs;
-
- /* There should never be more than one connector */
- if (WARN_ON(priv->external_connector))
- return -EINVAL;
-
- priv->external_connector = connector;
- connector_funcs = devm_kzalloc(dev->dev, sizeof(*connector_funcs),
- GFP_KERNEL);
- if (!connector_funcs)
- return -ENOMEM;
-
- /* connector->helper_private contains always struct
- * connector_helper_funcs pointer. For tilcdc crtc to have a
- * say if a specific mode is Ok, we need to install our own
- * helper functions. In our helper functions we copy
- * everything else but use our own mode_valid() (above).
- */
- if (connector->helper_private) {
- priv->connector_funcs = connector->helper_private;
- *connector_funcs = *priv->connector_funcs;
- } else {
- priv->connector_funcs = ERR_PTR(-ENOENT);
- }
- connector_funcs->mode_valid = tilcdc_external_mode_valid;
- drm_connector_helper_add(connector, connector_funcs);
-
- dev_dbg(dev->dev, "External connector '%s' connected\n",
- connector->name);
-
- return 0;
-}
-
static
struct drm_connector *tilcdc_encoder_find_connector(struct drm_device *ddev,
struct drm_encoder *encoder)
@@ -115,7 +58,6 @@ struct drm_connector *tilcdc_encoder_find_connector(struct drm_device *ddev,
int tilcdc_add_component_encoder(struct drm_device *ddev)
{
struct tilcdc_drm_private *priv = ddev->dev_private;
- struct drm_connector *connector;
struct drm_encoder *encoder;
list_for_each_entry(encoder, &ddev->mode_config.encoder_list, head)
@@ -127,28 +69,17 @@ int tilcdc_add_component_encoder(struct drm_device *ddev)
return -ENODEV;
}
- connector = tilcdc_encoder_find_connector(ddev, encoder);
+ priv->external_connector =
+ tilcdc_encoder_find_connector(ddev, encoder);
- if (!connector)
+ if (!priv->external_connector)
return -ENODEV;
/* Only tda998x is supported at the moment. */
tilcdc_crtc_set_simulate_vesa_sync(priv->crtc, true);
tilcdc_crtc_set_panel_info(priv->crtc, &panel_info_tda998x);
- return tilcdc_add_external_connector(ddev, connector);
-}
-
-void tilcdc_remove_external_device(struct drm_device *dev)
-{
- struct tilcdc_drm_private *priv = dev->dev_private;
-
- /* Restore the original helper functions, if any. */
- if (IS_ERR(priv->connector_funcs))
- drm_connector_helper_add(priv->external_connector, NULL);
- else if (priv->connector_funcs)
- drm_connector_helper_add(priv->external_connector,
- priv->connector_funcs);
+ return 0;
}
static const struct drm_encoder_funcs tilcdc_external_encoder_funcs = {
@@ -159,7 +90,6 @@ static
int tilcdc_attach_bridge(struct drm_device *ddev, struct drm_bridge *bridge)
{
struct tilcdc_drm_private *priv = ddev->dev_private;
- struct drm_connector *connector;
int ret;
priv->external_encoder->possible_crtcs = BIT(0);
@@ -172,13 +102,12 @@ int tilcdc_attach_bridge(struct drm_device *ddev, struct drm_bridge *bridge)
tilcdc_crtc_set_panel_info(priv->crtc, &panel_info_default);
- connector = tilcdc_encoder_find_connector(ddev, priv->external_encoder);
- if (!connector)
+ priv->external_connector =
+ tilcdc_encoder_find_connector(ddev, priv->external_encoder);
+ if (!priv->external_connector)
return -ENODEV;
- ret = tilcdc_add_external_connector(ddev, connector);
-
- return ret;
+ return 0;
}
int tilcdc_attach_external_device(struct drm_device *ddev)
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_external.h b/drivers/gpu/drm/tilcdc/tilcdc_external.h
index 7024b4877fdf..fb4476694cd8 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_external.h
+++ b/drivers/gpu/drm/tilcdc/tilcdc_external.h
@@ -8,7 +8,6 @@
#define __TILCDC_EXTERNAL_H__
int tilcdc_add_component_encoder(struct drm_device *dev);
-void tilcdc_remove_external_device(struct drm_device *dev);
int tilcdc_get_external_components(struct device *dev,
struct component_match **match);
int tilcdc_attach_external_device(struct drm_device *ddev);
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_panel.c b/drivers/gpu/drm/tilcdc/tilcdc_panel.c
index 22b100d2e174..5584e656b857 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_panel.c
+++ b/drivers/gpu/drm/tilcdc/tilcdc_panel.c
@@ -4,14 +4,17 @@
* Author: Rob Clark <robdclark@gmail.com>
*/
-#include <linux/pinctrl/pinmux.h>
-#include <linux/pinctrl/consumer.h>
-#include <linux/backlight.h>
#include <linux/gpio/consumer.h>
+#include <linux/pinctrl/consumer.h>
+#include <linux/platform_device.h>
+
#include <video/display_timing.h>
#include <video/of_display_timing.h>
#include <video/videomode.h>
-#include <drm/drm_atomic_helper.h>
+
+#include <drm/drm_atomic_state_helper.h>
+#include <drm/drm_connector.h>
+#include <drm/drm_modeset_helper_vtables.h>
#include <drm/drm_probe_helper.h>
#include "tilcdc_drv.h"
@@ -160,14 +163,6 @@ static int panel_connector_get_modes(struct drm_connector *connector)
return i;
}
-static int panel_connector_mode_valid(struct drm_connector *connector,
- struct drm_display_mode *mode)
-{
- struct tilcdc_drm_private *priv = connector->dev->dev_private;
- /* our only constraints are what the crtc can generate: */
- return tilcdc_crtc_mode_valid(priv->crtc, mode);
-}
-
static struct drm_encoder *panel_connector_best_encoder(
struct drm_connector *connector)
{
@@ -185,7 +180,6 @@ static const struct drm_connector_funcs panel_connector_funcs = {
static const struct drm_connector_helper_funcs panel_connector_helper_funcs = {
.get_modes = panel_connector_get_modes,
- .mode_valid = panel_connector_mode_valid,
.best_encoder = panel_connector_best_encoder,
};
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_plane.c b/drivers/gpu/drm/tilcdc/tilcdc_plane.c
index 8c2776acdf99..3abb9641f212 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_plane.c
+++ b/drivers/gpu/drm/tilcdc/tilcdc_plane.c
@@ -4,12 +4,10 @@
* Author: Jyri Sarha <jsarha@ti.com>
*/
-#include <drm/drmP.h>
-
#include <drm/drm_atomic.h>
#include <drm/drm_plane_helper.h>
#include <drm/drm_atomic_helper.h>
-#include <uapi/drm/drm_fourcc.h>
+#include <drm/drm_fourcc.h>
#include "tilcdc_drv.h"
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_tfp410.c b/drivers/gpu/drm/tilcdc/tilcdc_tfp410.c
index 62d014c20988..525dc1c0f1c1 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_tfp410.c
+++ b/drivers/gpu/drm/tilcdc/tilcdc_tfp410.c
@@ -4,12 +4,14 @@
* Author: Rob Clark <robdclark@gmail.com>
*/
-#include <linux/i2c.h>
#include <linux/gpio.h>
+#include <linux/mod_devicetable.h>
#include <linux/of_gpio.h>
-#include <linux/pinctrl/pinmux.h>
-#include <linux/pinctrl/consumer.h>
+#include <linux/platform_device.h>
+
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_encoder.h>
+#include <drm/drm_modeset_helper_vtables.h>
#include <drm/drm_probe_helper.h>
#include "tilcdc_drv.h"
@@ -173,14 +175,6 @@ static int tfp410_connector_get_modes(struct drm_connector *connector)
return ret;
}
-static int tfp410_connector_mode_valid(struct drm_connector *connector,
- struct drm_display_mode *mode)
-{
- struct tilcdc_drm_private *priv = connector->dev->dev_private;
- /* our only constraints are what the crtc can generate: */
- return tilcdc_crtc_mode_valid(priv->crtc, mode);
-}
-
static struct drm_encoder *tfp410_connector_best_encoder(
struct drm_connector *connector)
{
@@ -199,7 +193,6 @@ static const struct drm_connector_funcs tfp410_connector_funcs = {
static const struct drm_connector_helper_funcs tfp410_connector_helper_funcs = {
.get_modes = tfp410_connector_get_modes,
- .mode_valid = tfp410_connector_mode_valid,
.best_encoder = tfp410_connector_best_encoder,
};
diff --git a/drivers/gpu/drm/tinydrm/Kconfig b/drivers/gpu/drm/tiny/Kconfig
index 87819c82bcce..504763423d46 100644
--- a/drivers/gpu/drm/tinydrm/Kconfig
+++ b/drivers/gpu/drm/tiny/Kconfig
@@ -1,21 +1,21 @@
# SPDX-License-Identifier: GPL-2.0-only
-menuconfig DRM_TINYDRM
- tristate "Support for simple displays"
- depends on DRM
+
+config DRM_GM12U320
+ tristate "GM12U320 driver for USB projectors"
+ depends on DRM && USB
select DRM_KMS_HELPER
- select DRM_KMS_CMA_HELPER
+ select DRM_GEM_SHMEM_HELPER
help
- Choose this option if you have a tinydrm supported display.
- If M is selected the module will be called tinydrm.
-
-config TINYDRM_MIPI_DBI
- tristate
+ This is a KMS driver for projectors which use the GM12U320 chipset
+ for video transfer over USB2/3, such as the Acer C120 mini projector.
config TINYDRM_HX8357D
tristate "DRM support for HX8357D display panels"
- depends on DRM_TINYDRM && SPI
- depends on BACKLIGHT_CLASS_DEVICE
- select TINYDRM_MIPI_DBI
+ depends on DRM && SPI
+ select DRM_KMS_HELPER
+ select DRM_KMS_CMA_HELPER
+ select DRM_MIPI_DBI
+ select BACKLIGHT_CLASS_DEVICE
help
DRM driver for the following HX8357D panels:
* YX350HV15-T 3.5" 340x350 TFT (Adafruit 3.5")
@@ -24,8 +24,10 @@ config TINYDRM_HX8357D
config TINYDRM_ILI9225
tristate "DRM support for ILI9225 display panels"
- depends on DRM_TINYDRM && SPI
- select TINYDRM_MIPI_DBI
+ depends on DRM && SPI
+ select DRM_KMS_HELPER
+ select DRM_KMS_CMA_HELPER
+ select DRM_MIPI_DBI
help
DRM driver for the following Ilitek ILI9225 panels:
* No-name 2.2" color screen module
@@ -34,9 +36,11 @@ config TINYDRM_ILI9225
config TINYDRM_ILI9341
tristate "DRM support for ILI9341 display panels"
- depends on DRM_TINYDRM && SPI
- depends on BACKLIGHT_CLASS_DEVICE
- select TINYDRM_MIPI_DBI
+ depends on DRM && SPI
+ select DRM_KMS_HELPER
+ select DRM_KMS_CMA_HELPER
+ select DRM_MIPI_DBI
+ select BACKLIGHT_CLASS_DEVICE
help
DRM driver for the following Ilitek ILI9341 panels:
* YX240QV29-T 2.4" 240x320 TFT (Adafruit 2.4")
@@ -45,16 +49,20 @@ config TINYDRM_ILI9341
config TINYDRM_MI0283QT
tristate "DRM support for MI0283QT"
- depends on DRM_TINYDRM && SPI
- depends on BACKLIGHT_CLASS_DEVICE
- select TINYDRM_MIPI_DBI
+ depends on DRM && SPI
+ select DRM_KMS_HELPER
+ select DRM_KMS_CMA_HELPER
+ select DRM_MIPI_DBI
+ select BACKLIGHT_CLASS_DEVICE
help
DRM driver for the Multi-Inno MI0283QT display panel
If M is selected the module will be called mi0283qt.
config TINYDRM_REPAPER
tristate "DRM support for Pervasive Displays RePaper panels (V231)"
- depends on DRM_TINYDRM && SPI
+ depends on DRM && SPI
+ select DRM_KMS_HELPER
+ select DRM_KMS_CMA_HELPER
depends on THERMAL || !THERMAL
help
DRM driver for the following Pervasive Displays panels:
@@ -67,8 +75,10 @@ config TINYDRM_REPAPER
config TINYDRM_ST7586
tristate "DRM support for Sitronix ST7586 display panels"
- depends on DRM_TINYDRM && SPI
- select TINYDRM_MIPI_DBI
+ depends on DRM && SPI
+ select DRM_KMS_HELPER
+ select DRM_KMS_CMA_HELPER
+ select DRM_MIPI_DBI
help
DRM driver for the following Sitronix ST7586 panels:
* LEGO MINDSTORMS EV3
@@ -77,9 +87,11 @@ config TINYDRM_ST7586
config TINYDRM_ST7735R
tristate "DRM support for Sitronix ST7735R display panels"
- depends on DRM_TINYDRM && SPI
- depends on BACKLIGHT_CLASS_DEVICE
- select TINYDRM_MIPI_DBI
+ depends on DRM && SPI
+ select DRM_KMS_HELPER
+ select DRM_KMS_CMA_HELPER
+ select DRM_MIPI_DBI
+ select BACKLIGHT_CLASS_DEVICE
help
DRM driver Sitronix ST7735R with one of the following LCDs:
* JD-T18003-T01 1.8" 128x160 TFT
diff --git a/drivers/gpu/drm/tinydrm/Makefile b/drivers/gpu/drm/tiny/Makefile
index 48ec8ed9dc16..896cf31132d3 100644
--- a/drivers/gpu/drm/tinydrm/Makefile
+++ b/drivers/gpu/drm/tiny/Makefile
@@ -1,10 +1,6 @@
# SPDX-License-Identifier: GPL-2.0-only
-obj-$(CONFIG_DRM_TINYDRM) += core/
-# Controllers
-obj-$(CONFIG_TINYDRM_MIPI_DBI) += mipi-dbi.o
-
-# Displays
+obj-$(CONFIG_DRM_GM12U320) += gm12u320.o
obj-$(CONFIG_TINYDRM_HX8357D) += hx8357d.o
obj-$(CONFIG_TINYDRM_ILI9225) += ili9225.o
obj-$(CONFIG_TINYDRM_ILI9341) += ili9341.o
diff --git a/drivers/gpu/drm/tiny/gm12u320.c b/drivers/gpu/drm/tiny/gm12u320.c
new file mode 100644
index 000000000000..b6f47b8cf240
--- /dev/null
+++ b/drivers/gpu/drm/tiny/gm12u320.c
@@ -0,0 +1,814 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright 2019 Hans de Goede <hdegoede@redhat.com>
+ */
+
+#include <linux/dma-buf.h>
+#include <linux/module.h>
+#include <linux/usb.h>
+
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_atomic_state_helper.h>
+#include <drm/drm_connector.h>
+#include <drm/drm_damage_helper.h>
+#include <drm/drm_drv.h>
+#include <drm/drm_fb_helper.h>
+#include <drm/drm_file.h>
+#include <drm/drm_format_helper.h>
+#include <drm/drm_fourcc.h>
+#include <drm/drm_gem_shmem_helper.h>
+#include <drm/drm_gem_framebuffer_helper.h>
+#include <drm/drm_ioctl.h>
+#include <drm/drm_modeset_helper_vtables.h>
+#include <drm/drm_probe_helper.h>
+#include <drm/drm_simple_kms_helper.h>
+#include <drm/drm_vblank.h>
+
+static bool eco_mode;
+module_param(eco_mode, bool, 0644);
+MODULE_PARM_DESC(eco_mode, "Turn on Eco mode (less bright, more silent)");
+
+#define DRIVER_NAME "gm12u320"
+#define DRIVER_DESC "Grain Media GM12U320 USB projector display"
+#define DRIVER_DATE "2019"
+#define DRIVER_MAJOR 1
+#define DRIVER_MINOR 0
+#define DRIVER_PATCHLEVEL 1
+
+/*
+ * The DLP has an actual width of 854 pixels, but that is not a multiple
+ * of 8, breaking things left and right, so we export a width of 848.
+ */
+#define GM12U320_USER_WIDTH 848
+#define GM12U320_REAL_WIDTH 854
+#define GM12U320_HEIGHT 480
+
+#define GM12U320_BLOCK_COUNT 20
+
+#define MISC_RCV_EPT 1
+#define DATA_RCV_EPT 2
+#define DATA_SND_EPT 3
+#define MISC_SND_EPT 4
+
+#define DATA_BLOCK_HEADER_SIZE 84
+#define DATA_BLOCK_CONTENT_SIZE 64512
+#define DATA_BLOCK_FOOTER_SIZE 20
+#define DATA_BLOCK_SIZE (DATA_BLOCK_HEADER_SIZE + \
+ DATA_BLOCK_CONTENT_SIZE + \
+ DATA_BLOCK_FOOTER_SIZE)
+#define DATA_LAST_BLOCK_CONTENT_SIZE 4032
+#define DATA_LAST_BLOCK_SIZE (DATA_BLOCK_HEADER_SIZE + \
+ DATA_LAST_BLOCK_CONTENT_SIZE + \
+ DATA_BLOCK_FOOTER_SIZE)
+
+#define CMD_SIZE 31
+#define READ_STATUS_SIZE 13
+#define MISC_VALUE_SIZE 4
+
+#define CMD_TIMEOUT msecs_to_jiffies(200)
+#define DATA_TIMEOUT msecs_to_jiffies(1000)
+#define IDLE_TIMEOUT msecs_to_jiffies(2000)
+#define FIRST_FRAME_TIMEOUT msecs_to_jiffies(2000)
+
+#define MISC_REQ_GET_SET_ECO_A 0xff
+#define MISC_REQ_GET_SET_ECO_B 0x35
+/* Windows driver does once every second, with arg d = 1, other args 0 */
+#define MISC_REQ_UNKNOWN1_A 0xff
+#define MISC_REQ_UNKNOWN1_B 0x38
+/* Windows driver does this on init, with arg a, b = 0, c = 0xa0, d = 4 */
+#define MISC_REQ_UNKNOWN2_A 0xa5
+#define MISC_REQ_UNKNOWN2_B 0x00
+
+struct gm12u320_device {
+ struct drm_device dev;
+ struct drm_simple_display_pipe pipe;
+ struct drm_connector conn;
+ struct usb_device *udev;
+ unsigned char *cmd_buf;
+ unsigned char *data_buf[GM12U320_BLOCK_COUNT];
+ bool pipe_enabled;
+ struct {
+ bool run;
+ struct workqueue_struct *workq;
+ struct work_struct work;
+ wait_queue_head_t waitq;
+ struct mutex lock;
+ struct drm_framebuffer *fb;
+ struct drm_rect rect;
+ } fb_update;
+};
+
+static const char cmd_data[CMD_SIZE] = {
+ 0x55, 0x53, 0x42, 0x43, 0x00, 0x00, 0x00, 0x00,
+ 0x68, 0xfc, 0x00, 0x00, 0x00, 0x00, 0x10, 0xff,
+ 0x00, 0x00, 0x00, 0x00, 0xfc, 0x00, 0x80, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
+};
+
+static const char cmd_draw[CMD_SIZE] = {
+ 0x55, 0x53, 0x42, 0x43, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0xfe,
+ 0x00, 0x00, 0x00, 0xc0, 0xd1, 0x05, 0x00, 0x40,
+ 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00
+};
+
+static const char cmd_misc[CMD_SIZE] = {
+ 0x55, 0x53, 0x42, 0x43, 0x00, 0x00, 0x00, 0x00,
+ 0x04, 0x00, 0x00, 0x00, 0x80, 0x01, 0x10, 0xfd,
+ 0x00, 0x00, 0x00, 0xc0, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
+};
+
+static const char data_block_header[DATA_BLOCK_HEADER_SIZE] = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0xfb, 0x14, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x04, 0x15, 0x00, 0x00, 0xfc, 0x00, 0x00,
+ 0x01, 0x00, 0x00, 0xdb
+};
+
+static const char data_last_block_header[DATA_BLOCK_HEADER_SIZE] = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0xfb, 0x14, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x2a, 0x00, 0x20, 0x00, 0xc0, 0x0f, 0x00, 0x00,
+ 0x01, 0x00, 0x00, 0xd7
+};
+
+static const char data_block_footer[DATA_BLOCK_FOOTER_SIZE] = {
+ 0xfb, 0x14, 0x02, 0x20, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x80, 0x00, 0x00, 0x4f
+};
+
+static int gm12u320_usb_alloc(struct gm12u320_device *gm12u320)
+{
+ int i, block_size;
+ const char *hdr;
+
+ gm12u320->cmd_buf = kmalloc(CMD_SIZE, GFP_KERNEL);
+ if (!gm12u320->cmd_buf)
+ return -ENOMEM;
+
+ for (i = 0; i < GM12U320_BLOCK_COUNT; i++) {
+ if (i == GM12U320_BLOCK_COUNT - 1) {
+ block_size = DATA_LAST_BLOCK_SIZE;
+ hdr = data_last_block_header;
+ } else {
+ block_size = DATA_BLOCK_SIZE;
+ hdr = data_block_header;
+ }
+
+ gm12u320->data_buf[i] = kzalloc(block_size, GFP_KERNEL);
+ if (!gm12u320->data_buf[i])
+ return -ENOMEM;
+
+ memcpy(gm12u320->data_buf[i], hdr, DATA_BLOCK_HEADER_SIZE);
+ memcpy(gm12u320->data_buf[i] +
+ (block_size - DATA_BLOCK_FOOTER_SIZE),
+ data_block_footer, DATA_BLOCK_FOOTER_SIZE);
+ }
+
+ gm12u320->fb_update.workq = create_singlethread_workqueue(DRIVER_NAME);
+ if (!gm12u320->fb_update.workq)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static void gm12u320_usb_free(struct gm12u320_device *gm12u320)
+{
+ int i;
+
+ if (gm12u320->fb_update.workq)
+ destroy_workqueue(gm12u320->fb_update.workq);
+
+ for (i = 0; i < GM12U320_BLOCK_COUNT; i++)
+ kfree(gm12u320->data_buf[i]);
+
+ kfree(gm12u320->cmd_buf);
+}
+
+static int gm12u320_misc_request(struct gm12u320_device *gm12u320,
+ u8 req_a, u8 req_b,
+ u8 arg_a, u8 arg_b, u8 arg_c, u8 arg_d)
+{
+ int ret, len;
+
+ memcpy(gm12u320->cmd_buf, &cmd_misc, CMD_SIZE);
+ gm12u320->cmd_buf[20] = req_a;
+ gm12u320->cmd_buf[21] = req_b;
+ gm12u320->cmd_buf[22] = arg_a;
+ gm12u320->cmd_buf[23] = arg_b;
+ gm12u320->cmd_buf[24] = arg_c;
+ gm12u320->cmd_buf[25] = arg_d;
+
+ /* Send request */
+ ret = usb_bulk_msg(gm12u320->udev,
+ usb_sndbulkpipe(gm12u320->udev, MISC_SND_EPT),
+ gm12u320->cmd_buf, CMD_SIZE, &len, CMD_TIMEOUT);
+ if (ret || len != CMD_SIZE) {
+ dev_err(&gm12u320->udev->dev, "Misc. req. error %d\n", ret);
+ return -EIO;
+ }
+
+ /* Read value */
+ ret = usb_bulk_msg(gm12u320->udev,
+ usb_rcvbulkpipe(gm12u320->udev, MISC_RCV_EPT),
+ gm12u320->cmd_buf, MISC_VALUE_SIZE, &len,
+ DATA_TIMEOUT);
+ if (ret || len != MISC_VALUE_SIZE) {
+ dev_err(&gm12u320->udev->dev, "Misc. value error %d\n", ret);
+ return -EIO;
+ }
+ /* cmd_buf[0] now contains the read value, which we don't use */
+
+ /* Read status */
+ ret = usb_bulk_msg(gm12u320->udev,
+ usb_rcvbulkpipe(gm12u320->udev, MISC_RCV_EPT),
+ gm12u320->cmd_buf, READ_STATUS_SIZE, &len,
+ CMD_TIMEOUT);
+ if (ret || len != READ_STATUS_SIZE) {
+ dev_err(&gm12u320->udev->dev, "Misc. status error %d\n", ret);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static void gm12u320_32bpp_to_24bpp_packed(u8 *dst, u8 *src, int len)
+{
+ while (len--) {
+ *dst++ = *src++;
+ *dst++ = *src++;
+ *dst++ = *src++;
+ src++;
+ }
+}
+
+static void gm12u320_copy_fb_to_blocks(struct gm12u320_device *gm12u320)
+{
+ int block, dst_offset, len, remain, ret, x1, x2, y1, y2;
+ struct drm_framebuffer *fb;
+ void *vaddr;
+ u8 *src;
+
+ mutex_lock(&gm12u320->fb_update.lock);
+
+ if (!gm12u320->fb_update.fb)
+ goto unlock;
+
+ fb = gm12u320->fb_update.fb;
+ x1 = gm12u320->fb_update.rect.x1;
+ x2 = gm12u320->fb_update.rect.x2;
+ y1 = gm12u320->fb_update.rect.y1;
+ y2 = gm12u320->fb_update.rect.y2;
+
+ vaddr = drm_gem_shmem_vmap(fb->obj[0]);
+ if (IS_ERR(vaddr)) {
+ DRM_ERROR("failed to vmap fb: %ld\n", PTR_ERR(vaddr));
+ goto put_fb;
+ }
+
+ if (fb->obj[0]->import_attach) {
+ ret = dma_buf_begin_cpu_access(
+ fb->obj[0]->import_attach->dmabuf, DMA_FROM_DEVICE);
+ if (ret) {
+ DRM_ERROR("dma_buf_begin_cpu_access err: %d\n", ret);
+ goto vunmap;
+ }
+ }
+
+ src = vaddr + y1 * fb->pitches[0] + x1 * 4;
+
+ x1 += (GM12U320_REAL_WIDTH - GM12U320_USER_WIDTH) / 2;
+ x2 += (GM12U320_REAL_WIDTH - GM12U320_USER_WIDTH) / 2;
+
+ for (; y1 < y2; y1++) {
+ remain = 0;
+ len = (x2 - x1) * 3;
+ dst_offset = (y1 * GM12U320_REAL_WIDTH + x1) * 3;
+ block = dst_offset / DATA_BLOCK_CONTENT_SIZE;
+ dst_offset %= DATA_BLOCK_CONTENT_SIZE;
+
+ if ((dst_offset + len) > DATA_BLOCK_CONTENT_SIZE) {
+ remain = dst_offset + len - DATA_BLOCK_CONTENT_SIZE;
+ len = DATA_BLOCK_CONTENT_SIZE - dst_offset;
+ }
+
+ dst_offset += DATA_BLOCK_HEADER_SIZE;
+ len /= 3;
+
+ gm12u320_32bpp_to_24bpp_packed(
+ gm12u320->data_buf[block] + dst_offset,
+ src, len);
+
+ if (remain) {
+ block++;
+ dst_offset = DATA_BLOCK_HEADER_SIZE;
+ gm12u320_32bpp_to_24bpp_packed(
+ gm12u320->data_buf[block] + dst_offset,
+ src + len * 4, remain / 3);
+ }
+ src += fb->pitches[0];
+ }
+
+ if (fb->obj[0]->import_attach) {
+ ret = dma_buf_end_cpu_access(fb->obj[0]->import_attach->dmabuf,
+ DMA_FROM_DEVICE);
+ if (ret)
+ DRM_ERROR("dma_buf_end_cpu_access err: %d\n", ret);
+ }
+vunmap:
+ drm_gem_shmem_vunmap(fb->obj[0], vaddr);
+put_fb:
+ drm_framebuffer_put(fb);
+ gm12u320->fb_update.fb = NULL;
+unlock:
+ mutex_unlock(&gm12u320->fb_update.lock);
+}
+
+static int gm12u320_fb_update_ready(struct gm12u320_device *gm12u320)
+{
+ int ret;
+
+ mutex_lock(&gm12u320->fb_update.lock);
+ ret = !gm12u320->fb_update.run || gm12u320->fb_update.fb != NULL;
+ mutex_unlock(&gm12u320->fb_update.lock);
+
+ return ret;
+}
+
+static void gm12u320_fb_update_work(struct work_struct *work)
+{
+ struct gm12u320_device *gm12u320 =
+ container_of(work, struct gm12u320_device, fb_update.work);
+ int draw_status_timeout = FIRST_FRAME_TIMEOUT;
+ int block, block_size, len;
+ int frame = 0;
+ int ret = 0;
+
+ while (gm12u320->fb_update.run) {
+ gm12u320_copy_fb_to_blocks(gm12u320);
+
+ for (block = 0; block < GM12U320_BLOCK_COUNT; block++) {
+ if (block == GM12U320_BLOCK_COUNT - 1)
+ block_size = DATA_LAST_BLOCK_SIZE;
+ else
+ block_size = DATA_BLOCK_SIZE;
+
+ /* Send data command to device */
+ memcpy(gm12u320->cmd_buf, cmd_data, CMD_SIZE);
+ gm12u320->cmd_buf[8] = block_size & 0xff;
+ gm12u320->cmd_buf[9] = block_size >> 8;
+ gm12u320->cmd_buf[20] = 0xfc - block * 4;
+ gm12u320->cmd_buf[21] = block | (frame << 7);
+
+ ret = usb_bulk_msg(gm12u320->udev,
+ usb_sndbulkpipe(gm12u320->udev, DATA_SND_EPT),
+ gm12u320->cmd_buf, CMD_SIZE, &len,
+ CMD_TIMEOUT);
+ if (ret || len != CMD_SIZE)
+ goto err;
+
+ /* Send data block to device */
+ ret = usb_bulk_msg(gm12u320->udev,
+ usb_sndbulkpipe(gm12u320->udev, DATA_SND_EPT),
+ gm12u320->data_buf[block], block_size,
+ &len, DATA_TIMEOUT);
+ if (ret || len != block_size)
+ goto err;
+
+ /* Read status */
+ ret = usb_bulk_msg(gm12u320->udev,
+ usb_rcvbulkpipe(gm12u320->udev, DATA_RCV_EPT),
+ gm12u320->cmd_buf, READ_STATUS_SIZE, &len,
+ CMD_TIMEOUT);
+ if (ret || len != READ_STATUS_SIZE)
+ goto err;
+ }
+
+ /* Send draw command to device */
+ memcpy(gm12u320->cmd_buf, cmd_draw, CMD_SIZE);
+ ret = usb_bulk_msg(gm12u320->udev,
+ usb_sndbulkpipe(gm12u320->udev, DATA_SND_EPT),
+ gm12u320->cmd_buf, CMD_SIZE, &len, CMD_TIMEOUT);
+ if (ret || len != CMD_SIZE)
+ goto err;
+
+ /* Read status */
+ ret = usb_bulk_msg(gm12u320->udev,
+ usb_rcvbulkpipe(gm12u320->udev, DATA_RCV_EPT),
+ gm12u320->cmd_buf, READ_STATUS_SIZE, &len,
+ draw_status_timeout);
+ if (ret || len != READ_STATUS_SIZE)
+ goto err;
+
+ draw_status_timeout = CMD_TIMEOUT;
+ frame = !frame;
+
+ /*
+ * We must draw a frame every 2s otherwise the projector
+ * switches back to showing its logo.
+ */
+ wait_event_timeout(gm12u320->fb_update.waitq,
+ gm12u320_fb_update_ready(gm12u320),
+ IDLE_TIMEOUT);
+ }
+ return;
+err:
+ /* Do not log errors caused by module unload or device unplug */
+ if (ret != -ECONNRESET && ret != -ESHUTDOWN)
+ dev_err(&gm12u320->udev->dev, "Frame update error: %d\n", ret);
+}
+
+static void gm12u320_fb_mark_dirty(struct drm_framebuffer *fb,
+ struct drm_rect *dirty)
+{
+ struct gm12u320_device *gm12u320 = fb->dev->dev_private;
+ struct drm_framebuffer *old_fb = NULL;
+ bool wakeup = false;
+
+ mutex_lock(&gm12u320->fb_update.lock);
+
+ if (gm12u320->fb_update.fb != fb) {
+ old_fb = gm12u320->fb_update.fb;
+ drm_framebuffer_get(fb);
+ gm12u320->fb_update.fb = fb;
+ gm12u320->fb_update.rect = *dirty;
+ wakeup = true;
+ } else {
+ struct drm_rect *rect = &gm12u320->fb_update.rect;
+
+ rect->x1 = min(rect->x1, dirty->x1);
+ rect->y1 = min(rect->y1, dirty->y1);
+ rect->x2 = max(rect->x2, dirty->x2);
+ rect->y2 = max(rect->y2, dirty->y2);
+ }
+
+ mutex_unlock(&gm12u320->fb_update.lock);
+
+ if (wakeup)
+ wake_up(&gm12u320->fb_update.waitq);
+
+ if (old_fb)
+ drm_framebuffer_put(old_fb);
+}
+
+static void gm12u320_start_fb_update(struct gm12u320_device *gm12u320)
+{
+ mutex_lock(&gm12u320->fb_update.lock);
+ gm12u320->fb_update.run = true;
+ mutex_unlock(&gm12u320->fb_update.lock);
+
+ queue_work(gm12u320->fb_update.workq, &gm12u320->fb_update.work);
+}
+
+static void gm12u320_stop_fb_update(struct gm12u320_device *gm12u320)
+{
+ mutex_lock(&gm12u320->fb_update.lock);
+ gm12u320->fb_update.run = false;
+ mutex_unlock(&gm12u320->fb_update.lock);
+
+ wake_up(&gm12u320->fb_update.waitq);
+ cancel_work_sync(&gm12u320->fb_update.work);
+
+ mutex_lock(&gm12u320->fb_update.lock);
+ if (gm12u320->fb_update.fb) {
+ drm_framebuffer_put(gm12u320->fb_update.fb);
+ gm12u320->fb_update.fb = NULL;
+ }
+ mutex_unlock(&gm12u320->fb_update.lock);
+}
+
+static int gm12u320_set_ecomode(struct gm12u320_device *gm12u320)
+{
+ return gm12u320_misc_request(gm12u320, MISC_REQ_GET_SET_ECO_A,
+ MISC_REQ_GET_SET_ECO_B, 0x01 /* set */,
+ eco_mode ? 0x01 : 0x00, 0x00, 0x01);
+}
+
+/* ------------------------------------------------------------------ */
+/* gm12u320 connector */
+
+/*
+ * We use fake EDID info so that userspace know that it is dealing with
+ * an Acer projector, rather then listing this as an "unknown" monitor.
+ * Note this assumes this driver is only ever used with the Acer C120, if we
+ * add support for other devices the vendor and model should be parameterized.
+ */
+static struct edid gm12u320_edid = {
+ .header = { 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00 },
+ .mfg_id = { 0x04, 0x72 }, /* "ACR" */
+ .prod_code = { 0x20, 0xc1 }, /* C120h */
+ .serial = 0xaa55aa55,
+ .mfg_week = 1,
+ .mfg_year = 16,
+ .version = 1, /* EDID 1.3 */
+ .revision = 3, /* EDID 1.3 */
+ .input = 0x08, /* Analog input */
+ .features = 0x0a, /* Pref timing in DTD 1 */
+ .standard_timings = { { 1, 1 }, { 1, 1 }, { 1, 1 }, { 1, 1 },
+ { 1, 1 }, { 1, 1 }, { 1, 1 }, { 1, 1 } },
+ .detailed_timings = { {
+ .pixel_clock = 3383,
+ /* hactive = 848, hblank = 256 */
+ .data.pixel_data.hactive_lo = 0x50,
+ .data.pixel_data.hblank_lo = 0x00,
+ .data.pixel_data.hactive_hblank_hi = 0x31,
+ /* vactive = 480, vblank = 28 */
+ .data.pixel_data.vactive_lo = 0xe0,
+ .data.pixel_data.vblank_lo = 0x1c,
+ .data.pixel_data.vactive_vblank_hi = 0x10,
+ /* hsync offset 40 pw 128, vsync offset 1 pw 4 */
+ .data.pixel_data.hsync_offset_lo = 0x28,
+ .data.pixel_data.hsync_pulse_width_lo = 0x80,
+ .data.pixel_data.vsync_offset_pulse_width_lo = 0x14,
+ .data.pixel_data.hsync_vsync_offset_pulse_width_hi = 0x00,
+ /* Digital separate syncs, hsync+, vsync+ */
+ .data.pixel_data.misc = 0x1e,
+ }, {
+ .pixel_clock = 0,
+ .data.other_data.type = 0xfd, /* Monitor ranges */
+ .data.other_data.data.range.min_vfreq = 59,
+ .data.other_data.data.range.max_vfreq = 61,
+ .data.other_data.data.range.min_hfreq_khz = 29,
+ .data.other_data.data.range.max_hfreq_khz = 32,
+ .data.other_data.data.range.pixel_clock_mhz = 4, /* 40 MHz */
+ .data.other_data.data.range.flags = 0,
+ .data.other_data.data.range.formula.cvt = {
+ 0xa0, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20 },
+ }, {
+ .pixel_clock = 0,
+ .data.other_data.type = 0xfc, /* Model string */
+ .data.other_data.data.str.str = {
+ 'P', 'r', 'o', 'j', 'e', 'c', 't', 'o', 'r', '\n',
+ ' ', ' ', ' ' },
+ }, {
+ .pixel_clock = 0,
+ .data.other_data.type = 0xfe, /* Unspecified text / padding */
+ .data.other_data.data.str.str = {
+ '\n', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ',
+ ' ', ' ', ' ' },
+ } },
+ .checksum = 0x13,
+};
+
+static int gm12u320_conn_get_modes(struct drm_connector *connector)
+{
+ drm_connector_update_edid_property(connector, &gm12u320_edid);
+ return drm_add_edid_modes(connector, &gm12u320_edid);
+}
+
+static const struct drm_connector_helper_funcs gm12u320_conn_helper_funcs = {
+ .get_modes = gm12u320_conn_get_modes,
+};
+
+static const struct drm_connector_funcs gm12u320_conn_funcs = {
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .destroy = drm_connector_cleanup,
+ .reset = drm_atomic_helper_connector_reset,
+ .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+};
+
+static int gm12u320_conn_init(struct gm12u320_device *gm12u320)
+{
+ drm_connector_helper_add(&gm12u320->conn, &gm12u320_conn_helper_funcs);
+ return drm_connector_init(&gm12u320->dev, &gm12u320->conn,
+ &gm12u320_conn_funcs, DRM_MODE_CONNECTOR_VGA);
+}
+
+/* ------------------------------------------------------------------ */
+/* gm12u320 (simple) display pipe */
+
+static void gm12u320_pipe_enable(struct drm_simple_display_pipe *pipe,
+ struct drm_crtc_state *crtc_state,
+ struct drm_plane_state *plane_state)
+{
+ struct gm12u320_device *gm12u320 = pipe->crtc.dev->dev_private;
+ struct drm_rect rect = { 0, 0, GM12U320_USER_WIDTH, GM12U320_HEIGHT };
+
+ gm12u320_fb_mark_dirty(plane_state->fb, &rect);
+ gm12u320_start_fb_update(gm12u320);
+ gm12u320->pipe_enabled = true;
+}
+
+static void gm12u320_pipe_disable(struct drm_simple_display_pipe *pipe)
+{
+ struct gm12u320_device *gm12u320 = pipe->crtc.dev->dev_private;
+
+ gm12u320_stop_fb_update(gm12u320);
+ gm12u320->pipe_enabled = false;
+}
+
+static void gm12u320_pipe_update(struct drm_simple_display_pipe *pipe,
+ struct drm_plane_state *old_state)
+{
+ struct drm_plane_state *state = pipe->plane.state;
+ struct drm_crtc *crtc = &pipe->crtc;
+ struct drm_rect rect;
+
+ if (drm_atomic_helper_damage_merged(old_state, state, &rect))
+ gm12u320_fb_mark_dirty(pipe->plane.state->fb, &rect);
+
+ if (crtc->state->event) {
+ spin_lock_irq(&crtc->dev->event_lock);
+ drm_crtc_send_vblank_event(crtc, crtc->state->event);
+ crtc->state->event = NULL;
+ spin_unlock_irq(&crtc->dev->event_lock);
+ }
+}
+
+static const struct drm_simple_display_pipe_funcs gm12u320_pipe_funcs = {
+ .enable = gm12u320_pipe_enable,
+ .disable = gm12u320_pipe_disable,
+ .update = gm12u320_pipe_update,
+};
+
+static const uint32_t gm12u320_pipe_formats[] = {
+ DRM_FORMAT_XRGB8888,
+};
+
+static const uint64_t gm12u320_pipe_modifiers[] = {
+ DRM_FORMAT_MOD_LINEAR,
+ DRM_FORMAT_MOD_INVALID
+};
+
+static void gm12u320_driver_release(struct drm_device *dev)
+{
+ struct gm12u320_device *gm12u320 = dev->dev_private;
+
+ gm12u320_usb_free(gm12u320);
+ drm_mode_config_cleanup(dev);
+ drm_dev_fini(dev);
+ kfree(gm12u320);
+}
+
+DEFINE_DRM_GEM_SHMEM_FOPS(gm12u320_fops);
+
+static struct drm_driver gm12u320_drm_driver = {
+ .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_ATOMIC,
+
+ .name = DRIVER_NAME,
+ .desc = DRIVER_DESC,
+ .date = DRIVER_DATE,
+ .major = DRIVER_MAJOR,
+ .minor = DRIVER_MINOR,
+
+ .release = gm12u320_driver_release,
+ .fops = &gm12u320_fops,
+ DRM_GEM_SHMEM_DRIVER_OPS,
+};
+
+static const struct drm_mode_config_funcs gm12u320_mode_config_funcs = {
+ .fb_create = drm_gem_fb_create_with_dirty,
+ .atomic_check = drm_atomic_helper_check,
+ .atomic_commit = drm_atomic_helper_commit,
+};
+
+static int gm12u320_usb_probe(struct usb_interface *interface,
+ const struct usb_device_id *id)
+{
+ struct gm12u320_device *gm12u320;
+ struct drm_device *dev;
+ int ret;
+
+ /*
+ * The gm12u320 presents itself to the system as 2 usb mass-storage
+ * interfaces, we only care about / need the first one.
+ */
+ if (interface->cur_altsetting->desc.bInterfaceNumber != 0)
+ return -ENODEV;
+
+ gm12u320 = kzalloc(sizeof(*gm12u320), GFP_KERNEL);
+ if (gm12u320 == NULL)
+ return -ENOMEM;
+
+ gm12u320->udev = interface_to_usbdev(interface);
+ INIT_WORK(&gm12u320->fb_update.work, gm12u320_fb_update_work);
+ mutex_init(&gm12u320->fb_update.lock);
+ init_waitqueue_head(&gm12u320->fb_update.waitq);
+
+ dev = &gm12u320->dev;
+ ret = drm_dev_init(dev, &gm12u320_drm_driver, &interface->dev);
+ if (ret) {
+ kfree(gm12u320);
+ return ret;
+ }
+ dev->dev_private = gm12u320;
+
+ drm_mode_config_init(dev);
+ dev->mode_config.min_width = GM12U320_USER_WIDTH;
+ dev->mode_config.max_width = GM12U320_USER_WIDTH;
+ dev->mode_config.min_height = GM12U320_HEIGHT;
+ dev->mode_config.max_height = GM12U320_HEIGHT;
+ dev->mode_config.funcs = &gm12u320_mode_config_funcs;
+
+ ret = gm12u320_usb_alloc(gm12u320);
+ if (ret)
+ goto err_put;
+
+ ret = gm12u320_set_ecomode(gm12u320);
+ if (ret)
+ goto err_put;
+
+ ret = gm12u320_conn_init(gm12u320);
+ if (ret)
+ goto err_put;
+
+ ret = drm_simple_display_pipe_init(&gm12u320->dev,
+ &gm12u320->pipe,
+ &gm12u320_pipe_funcs,
+ gm12u320_pipe_formats,
+ ARRAY_SIZE(gm12u320_pipe_formats),
+ gm12u320_pipe_modifiers,
+ &gm12u320->conn);
+ if (ret)
+ goto err_put;
+
+ drm_mode_config_reset(dev);
+
+ usb_set_intfdata(interface, dev);
+ ret = drm_dev_register(dev, 0);
+ if (ret)
+ goto err_put;
+
+ drm_fbdev_generic_setup(dev, dev->mode_config.preferred_depth);
+
+ return 0;
+
+err_put:
+ drm_dev_put(dev);
+ return ret;
+}
+
+static void gm12u320_usb_disconnect(struct usb_interface *interface)
+{
+ struct drm_device *dev = usb_get_intfdata(interface);
+ struct gm12u320_device *gm12u320 = dev->dev_private;
+
+ gm12u320_stop_fb_update(gm12u320);
+ drm_dev_unplug(dev);
+ drm_dev_put(dev);
+}
+
+#ifdef CONFIG_PM
+static int gm12u320_suspend(struct usb_interface *interface,
+ pm_message_t message)
+{
+ struct drm_device *dev = usb_get_intfdata(interface);
+ struct gm12u320_device *gm12u320 = dev->dev_private;
+
+ if (gm12u320->pipe_enabled)
+ gm12u320_stop_fb_update(gm12u320);
+
+ return 0;
+}
+
+static int gm12u320_resume(struct usb_interface *interface)
+{
+ struct drm_device *dev = usb_get_intfdata(interface);
+ struct gm12u320_device *gm12u320 = dev->dev_private;
+
+ gm12u320_set_ecomode(gm12u320);
+ if (gm12u320->pipe_enabled)
+ gm12u320_start_fb_update(gm12u320);
+
+ return 0;
+}
+#endif
+
+static const struct usb_device_id id_table[] = {
+ { USB_DEVICE(0x1de1, 0xc102) },
+ {},
+};
+MODULE_DEVICE_TABLE(usb, id_table);
+
+static struct usb_driver gm12u320_usb_driver = {
+ .name = "gm12u320",
+ .probe = gm12u320_usb_probe,
+ .disconnect = gm12u320_usb_disconnect,
+ .id_table = id_table,
+#ifdef CONFIG_PM
+ .suspend = gm12u320_suspend,
+ .resume = gm12u320_resume,
+ .reset_resume = gm12u320_resume,
+#endif
+};
+
+module_usb_driver(gm12u320_usb_driver);
+MODULE_AUTHOR("Hans de Goede <hdegoede@redhat.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/tinydrm/hx8357d.c b/drivers/gpu/drm/tiny/hx8357d.c
index 5773d0fb6ca1..9af8ff84974f 100644
--- a/drivers/gpu/drm/tinydrm/hx8357d.c
+++ b/drivers/gpu/drm/tiny/hx8357d.c
@@ -21,9 +21,8 @@
#include <drm/drm_fb_helper.h>
#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
+#include <drm/drm_mipi_dbi.h>
#include <drm/drm_modeset_helper.h>
-#include <drm/tinydrm/mipi-dbi.h>
-#include <drm/tinydrm/tinydrm-helpers.h>
#include <video/mipi_display.h>
#define HX8357D_SETOSC 0xb0
@@ -48,7 +47,8 @@ static void yx240qv29_enable(struct drm_simple_display_pipe *pipe,
struct drm_crtc_state *crtc_state,
struct drm_plane_state *plane_state)
{
- struct mipi_dbi *mipi = drm_to_mipi_dbi(pipe->crtc.dev);
+ struct mipi_dbi_dev *dbidev = drm_to_mipi_dbi_dev(pipe->crtc.dev);
+ struct mipi_dbi *dbi = &dbidev->dbi;
u8 addr_mode;
int ret, idx;
@@ -57,29 +57,29 @@ static void yx240qv29_enable(struct drm_simple_display_pipe *pipe,
DRM_DEBUG_KMS("\n");
- ret = mipi_dbi_poweron_conditional_reset(mipi);
+ ret = mipi_dbi_poweron_conditional_reset(dbidev);
if (ret < 0)
goto out_exit;
if (ret == 1)
goto out_enable;
/* setextc */
- mipi_dbi_command(mipi, HX8357D_SETEXTC, 0xFF, 0x83, 0x57);
+ mipi_dbi_command(dbi, HX8357D_SETEXTC, 0xFF, 0x83, 0x57);
msleep(150);
/* setRGB which also enables SDO */
- mipi_dbi_command(mipi, HX8357D_SETRGB, 0x00, 0x00, 0x06, 0x06);
+ mipi_dbi_command(dbi, HX8357D_SETRGB, 0x00, 0x00, 0x06, 0x06);
/* -1.52V */
- mipi_dbi_command(mipi, HX8357D_SETCOM, 0x25);
+ mipi_dbi_command(dbi, HX8357D_SETCOM, 0x25);
/* Normal mode 70Hz, Idle mode 55 Hz */
- mipi_dbi_command(mipi, HX8357D_SETOSC, 0x68);
+ mipi_dbi_command(dbi, HX8357D_SETOSC, 0x68);
/* Set Panel - BGR, Gate direction swapped */
- mipi_dbi_command(mipi, HX8357D_SETPANEL, 0x05);
+ mipi_dbi_command(dbi, HX8357D_SETPANEL, 0x05);
- mipi_dbi_command(mipi, HX8357D_SETPOWER,
+ mipi_dbi_command(dbi, HX8357D_SETPOWER,
0x00, /* Not deep standby */
0x15, /* BT */
0x1C, /* VSPR */
@@ -87,7 +87,7 @@ static void yx240qv29_enable(struct drm_simple_display_pipe *pipe,
0x83, /* AP */
0xAA); /* FS */
- mipi_dbi_command(mipi, HX8357D_SETSTBA,
+ mipi_dbi_command(dbi, HX8357D_SETSTBA,
0x50, /* OPON normal */
0x50, /* OPON idle */
0x01, /* STBA */
@@ -95,7 +95,7 @@ static void yx240qv29_enable(struct drm_simple_display_pipe *pipe,
0x1E, /* STBA */
0x08); /* GEN */
- mipi_dbi_command(mipi, HX8357D_SETCYC,
+ mipi_dbi_command(dbi, HX8357D_SETCYC,
0x02, /* NW 0x02 */
0x40, /* RTN */
0x00, /* DIV */
@@ -104,7 +104,7 @@ static void yx240qv29_enable(struct drm_simple_display_pipe *pipe,
0x0D, /* GDON */
0x78); /* GDOFF */
- mipi_dbi_command(mipi, HX8357D_SETGAMMA,
+ mipi_dbi_command(dbi, HX8357D_SETGAMMA,
0x02,
0x0A,
0x11,
@@ -141,25 +141,25 @@ static void yx240qv29_enable(struct drm_simple_display_pipe *pipe,
0x01);
/* 16 bit */
- mipi_dbi_command(mipi, MIPI_DCS_SET_PIXEL_FORMAT,
+ mipi_dbi_command(dbi, MIPI_DCS_SET_PIXEL_FORMAT,
MIPI_DCS_PIXEL_FMT_16BIT);
/* TE off */
- mipi_dbi_command(mipi, MIPI_DCS_SET_TEAR_ON, 0x00);
+ mipi_dbi_command(dbi, MIPI_DCS_SET_TEAR_ON, 0x00);
/* tear line */
- mipi_dbi_command(mipi, MIPI_DCS_SET_TEAR_SCANLINE, 0x00, 0x02);
+ mipi_dbi_command(dbi, MIPI_DCS_SET_TEAR_SCANLINE, 0x00, 0x02);
/* Exit Sleep */
- mipi_dbi_command(mipi, MIPI_DCS_EXIT_SLEEP_MODE);
+ mipi_dbi_command(dbi, MIPI_DCS_EXIT_SLEEP_MODE);
msleep(150);
/* display on */
- mipi_dbi_command(mipi, MIPI_DCS_SET_DISPLAY_ON);
+ mipi_dbi_command(dbi, MIPI_DCS_SET_DISPLAY_ON);
usleep_range(5000, 7000);
out_enable:
- switch (mipi->rotation) {
+ switch (dbidev->rotation) {
default:
addr_mode = HX8357D_MADCTL_MX | HX8357D_MADCTL_MY;
break;
@@ -173,8 +173,8 @@ out_enable:
addr_mode = HX8357D_MADCTL_MV | HX8357D_MADCTL_MX;
break;
}
- mipi_dbi_command(mipi, MIPI_DCS_SET_ADDRESS_MODE, addr_mode);
- mipi_dbi_enable_flush(mipi, crtc_state, plane_state);
+ mipi_dbi_command(dbi, MIPI_DCS_SET_ADDRESS_MODE, addr_mode);
+ mipi_dbi_enable_flush(dbidev, crtc_state, plane_state);
out_exit:
drm_dev_exit(idx);
}
@@ -193,7 +193,7 @@ static const struct drm_display_mode yx350hv15_mode = {
DEFINE_DRM_GEM_CMA_FOPS(hx8357d_fops);
static struct drm_driver hx8357d_driver = {
- .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_PRIME | DRIVER_ATOMIC,
+ .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC,
.fops = &hx8357d_fops,
.release = mipi_dbi_release,
DRM_GEM_CMA_VMAP_DRIVER_OPS,
@@ -220,20 +220,20 @@ MODULE_DEVICE_TABLE(spi, hx8357d_id);
static int hx8357d_probe(struct spi_device *spi)
{
struct device *dev = &spi->dev;
+ struct mipi_dbi_dev *dbidev;
struct drm_device *drm;
- struct mipi_dbi *mipi;
struct gpio_desc *dc;
u32 rotation = 0;
int ret;
- mipi = kzalloc(sizeof(*mipi), GFP_KERNEL);
- if (!mipi)
+ dbidev = kzalloc(sizeof(*dbidev), GFP_KERNEL);
+ if (!dbidev)
return -ENOMEM;
- drm = &mipi->drm;
+ drm = &dbidev->drm;
ret = devm_drm_dev_init(dev, drm, &hx8357d_driver);
if (ret) {
- kfree(mipi);
+ kfree(dbidev);
return ret;
}
@@ -245,17 +245,17 @@ static int hx8357d_probe(struct spi_device *spi)
return PTR_ERR(dc);
}
- mipi->backlight = devm_of_find_backlight(dev);
- if (IS_ERR(mipi->backlight))
- return PTR_ERR(mipi->backlight);
+ dbidev->backlight = devm_of_find_backlight(dev);
+ if (IS_ERR(dbidev->backlight))
+ return PTR_ERR(dbidev->backlight);
device_property_read_u32(dev, "rotation", &rotation);
- ret = mipi_dbi_spi_init(spi, mipi, dc);
+ ret = mipi_dbi_spi_init(spi, &dbidev->dbi, dc);
if (ret)
return ret;
- ret = mipi_dbi_init(mipi, &hx8357d_pipe_funcs, &yx350hv15_mode, rotation);
+ ret = mipi_dbi_dev_init(dbidev, &hx8357d_pipe_funcs, &yx350hv15_mode, rotation);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/tinydrm/ili9225.c b/drivers/gpu/drm/tiny/ili9225.c
index ea69019f2f33..c66acc566c2b 100644
--- a/drivers/gpu/drm/tinydrm/ili9225.c
+++ b/drivers/gpu/drm/tiny/ili9225.c
@@ -24,10 +24,9 @@
#include <drm/drm_fourcc.h>
#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
+#include <drm/drm_mipi_dbi.h>
#include <drm/drm_rect.h>
#include <drm/drm_vblank.h>
-#include <drm/tinydrm/mipi-dbi.h>
-#include <drm/tinydrm/tinydrm-helpers.h>
#define ILI9225_DRIVER_READ_CODE 0x00
#define ILI9225_DRIVER_OUTPUT_CONTROL 0x01
@@ -69,27 +68,28 @@
#define ILI9225_GAMMA_CONTROL_9 0x58
#define ILI9225_GAMMA_CONTROL_10 0x59
-static inline int ili9225_command(struct mipi_dbi *mipi, u8 cmd, u16 data)
+static inline int ili9225_command(struct mipi_dbi *dbi, u8 cmd, u16 data)
{
u8 par[2] = { data >> 8, data & 0xff };
- return mipi_dbi_command_buf(mipi, cmd, par, 2);
+ return mipi_dbi_command_buf(dbi, cmd, par, 2);
}
static void ili9225_fb_dirty(struct drm_framebuffer *fb, struct drm_rect *rect)
{
struct drm_gem_cma_object *cma_obj = drm_fb_cma_get_gem_obj(fb, 0);
- struct mipi_dbi *mipi = drm_to_mipi_dbi(fb->dev);
+ struct mipi_dbi_dev *dbidev = drm_to_mipi_dbi_dev(fb->dev);
unsigned int height = rect->y2 - rect->y1;
unsigned int width = rect->x2 - rect->x1;
- bool swap = mipi->swap_bytes;
+ struct mipi_dbi *dbi = &dbidev->dbi;
+ bool swap = dbi->swap_bytes;
u16 x_start, y_start;
u16 x1, x2, y1, y2;
int idx, ret = 0;
bool full;
void *tr;
- if (!mipi->enabled)
+ if (!dbidev->enabled)
return;
if (!drm_dev_enter(fb->dev, &idx))
@@ -99,17 +99,17 @@ static void ili9225_fb_dirty(struct drm_framebuffer *fb, struct drm_rect *rect)
DRM_DEBUG_KMS("Flushing [FB:%d] " DRM_RECT_FMT "\n", fb->base.id, DRM_RECT_ARG(rect));
- if (!mipi->dc || !full || swap ||
+ if (!dbi->dc || !full || swap ||
fb->format->format == DRM_FORMAT_XRGB8888) {
- tr = mipi->tx_buf;
- ret = mipi_dbi_buf_copy(mipi->tx_buf, fb, rect, swap);
+ tr = dbidev->tx_buf;
+ ret = mipi_dbi_buf_copy(dbidev->tx_buf, fb, rect, swap);
if (ret)
goto err_msg;
} else {
tr = cma_obj->vaddr;
}
- switch (mipi->rotation) {
+ switch (dbidev->rotation) {
default:
x1 = rect->x1;
x2 = rect->x2 - 1;
@@ -144,15 +144,15 @@ static void ili9225_fb_dirty(struct drm_framebuffer *fb, struct drm_rect *rect)
break;
}
- ili9225_command(mipi, ILI9225_HORIZ_WINDOW_ADDR_1, x2);
- ili9225_command(mipi, ILI9225_HORIZ_WINDOW_ADDR_2, x1);
- ili9225_command(mipi, ILI9225_VERT_WINDOW_ADDR_1, y2);
- ili9225_command(mipi, ILI9225_VERT_WINDOW_ADDR_2, y1);
+ ili9225_command(dbi, ILI9225_HORIZ_WINDOW_ADDR_1, x2);
+ ili9225_command(dbi, ILI9225_HORIZ_WINDOW_ADDR_2, x1);
+ ili9225_command(dbi, ILI9225_VERT_WINDOW_ADDR_1, y2);
+ ili9225_command(dbi, ILI9225_VERT_WINDOW_ADDR_2, y1);
- ili9225_command(mipi, ILI9225_RAM_ADDRESS_SET_1, x_start);
- ili9225_command(mipi, ILI9225_RAM_ADDRESS_SET_2, y_start);
+ ili9225_command(dbi, ILI9225_RAM_ADDRESS_SET_1, x_start);
+ ili9225_command(dbi, ILI9225_RAM_ADDRESS_SET_2, y_start);
- ret = mipi_dbi_command_buf(mipi, ILI9225_WRITE_DATA_TO_GRAM, tr,
+ ret = mipi_dbi_command_buf(dbi, ILI9225_WRITE_DATA_TO_GRAM, tr,
width * height * 2);
err_msg:
if (ret)
@@ -183,9 +183,10 @@ static void ili9225_pipe_enable(struct drm_simple_display_pipe *pipe,
struct drm_crtc_state *crtc_state,
struct drm_plane_state *plane_state)
{
- struct mipi_dbi *mipi = drm_to_mipi_dbi(pipe->crtc.dev);
+ struct mipi_dbi_dev *dbidev = drm_to_mipi_dbi_dev(pipe->crtc.dev);
struct drm_framebuffer *fb = plane_state->fb;
struct device *dev = pipe->crtc.dev->dev;
+ struct mipi_dbi *dbi = &dbidev->dbi;
struct drm_rect rect = {
.x1 = 0,
.x2 = fb->width,
@@ -200,7 +201,7 @@ static void ili9225_pipe_enable(struct drm_simple_display_pipe *pipe,
DRM_DEBUG_KMS("\n");
- mipi_dbi_hw_reset(mipi);
+ mipi_dbi_hw_reset(dbi);
/*
* There don't seem to be two example init sequences that match, so
@@ -208,31 +209,31 @@ static void ili9225_pipe_enable(struct drm_simple_display_pipe *pipe,
* https://github.com/Nkawu/TFT_22_ILI9225/blob/master/src/TFT_22_ILI9225.cpp
*/
- ret = ili9225_command(mipi, ILI9225_POWER_CONTROL_1, 0x0000);
+ ret = ili9225_command(dbi, ILI9225_POWER_CONTROL_1, 0x0000);
if (ret) {
DRM_DEV_ERROR(dev, "Error sending command %d\n", ret);
goto out_exit;
}
- ili9225_command(mipi, ILI9225_POWER_CONTROL_2, 0x0000);
- ili9225_command(mipi, ILI9225_POWER_CONTROL_3, 0x0000);
- ili9225_command(mipi, ILI9225_POWER_CONTROL_4, 0x0000);
- ili9225_command(mipi, ILI9225_POWER_CONTROL_5, 0x0000);
+ ili9225_command(dbi, ILI9225_POWER_CONTROL_2, 0x0000);
+ ili9225_command(dbi, ILI9225_POWER_CONTROL_3, 0x0000);
+ ili9225_command(dbi, ILI9225_POWER_CONTROL_4, 0x0000);
+ ili9225_command(dbi, ILI9225_POWER_CONTROL_5, 0x0000);
msleep(40);
- ili9225_command(mipi, ILI9225_POWER_CONTROL_2, 0x0018);
- ili9225_command(mipi, ILI9225_POWER_CONTROL_3, 0x6121);
- ili9225_command(mipi, ILI9225_POWER_CONTROL_4, 0x006f);
- ili9225_command(mipi, ILI9225_POWER_CONTROL_5, 0x495f);
- ili9225_command(mipi, ILI9225_POWER_CONTROL_1, 0x0800);
+ ili9225_command(dbi, ILI9225_POWER_CONTROL_2, 0x0018);
+ ili9225_command(dbi, ILI9225_POWER_CONTROL_3, 0x6121);
+ ili9225_command(dbi, ILI9225_POWER_CONTROL_4, 0x006f);
+ ili9225_command(dbi, ILI9225_POWER_CONTROL_5, 0x495f);
+ ili9225_command(dbi, ILI9225_POWER_CONTROL_1, 0x0800);
msleep(10);
- ili9225_command(mipi, ILI9225_POWER_CONTROL_2, 0x103b);
+ ili9225_command(dbi, ILI9225_POWER_CONTROL_2, 0x103b);
msleep(50);
- switch (mipi->rotation) {
+ switch (dbidev->rotation) {
default:
am_id = 0x30;
break;
@@ -246,43 +247,43 @@ static void ili9225_pipe_enable(struct drm_simple_display_pipe *pipe,
am_id = 0x28;
break;
}
- ili9225_command(mipi, ILI9225_DRIVER_OUTPUT_CONTROL, 0x011c);
- ili9225_command(mipi, ILI9225_LCD_AC_DRIVING_CONTROL, 0x0100);
- ili9225_command(mipi, ILI9225_ENTRY_MODE, 0x1000 | am_id);
- ili9225_command(mipi, ILI9225_DISPLAY_CONTROL_1, 0x0000);
- ili9225_command(mipi, ILI9225_BLANK_PERIOD_CONTROL_1, 0x0808);
- ili9225_command(mipi, ILI9225_FRAME_CYCLE_CONTROL, 0x1100);
- ili9225_command(mipi, ILI9225_INTERFACE_CONTROL, 0x0000);
- ili9225_command(mipi, ILI9225_OSCILLATION_CONTROL, 0x0d01);
- ili9225_command(mipi, ILI9225_VCI_RECYCLING, 0x0020);
- ili9225_command(mipi, ILI9225_RAM_ADDRESS_SET_1, 0x0000);
- ili9225_command(mipi, ILI9225_RAM_ADDRESS_SET_2, 0x0000);
-
- ili9225_command(mipi, ILI9225_GATE_SCAN_CONTROL, 0x0000);
- ili9225_command(mipi, ILI9225_VERTICAL_SCROLL_1, 0x00db);
- ili9225_command(mipi, ILI9225_VERTICAL_SCROLL_2, 0x0000);
- ili9225_command(mipi, ILI9225_VERTICAL_SCROLL_3, 0x0000);
- ili9225_command(mipi, ILI9225_PARTIAL_DRIVING_POS_1, 0x00db);
- ili9225_command(mipi, ILI9225_PARTIAL_DRIVING_POS_2, 0x0000);
-
- ili9225_command(mipi, ILI9225_GAMMA_CONTROL_1, 0x0000);
- ili9225_command(mipi, ILI9225_GAMMA_CONTROL_2, 0x0808);
- ili9225_command(mipi, ILI9225_GAMMA_CONTROL_3, 0x080a);
- ili9225_command(mipi, ILI9225_GAMMA_CONTROL_4, 0x000a);
- ili9225_command(mipi, ILI9225_GAMMA_CONTROL_5, 0x0a08);
- ili9225_command(mipi, ILI9225_GAMMA_CONTROL_6, 0x0808);
- ili9225_command(mipi, ILI9225_GAMMA_CONTROL_7, 0x0000);
- ili9225_command(mipi, ILI9225_GAMMA_CONTROL_8, 0x0a00);
- ili9225_command(mipi, ILI9225_GAMMA_CONTROL_9, 0x0710);
- ili9225_command(mipi, ILI9225_GAMMA_CONTROL_10, 0x0710);
-
- ili9225_command(mipi, ILI9225_DISPLAY_CONTROL_1, 0x0012);
+ ili9225_command(dbi, ILI9225_DRIVER_OUTPUT_CONTROL, 0x011c);
+ ili9225_command(dbi, ILI9225_LCD_AC_DRIVING_CONTROL, 0x0100);
+ ili9225_command(dbi, ILI9225_ENTRY_MODE, 0x1000 | am_id);
+ ili9225_command(dbi, ILI9225_DISPLAY_CONTROL_1, 0x0000);
+ ili9225_command(dbi, ILI9225_BLANK_PERIOD_CONTROL_1, 0x0808);
+ ili9225_command(dbi, ILI9225_FRAME_CYCLE_CONTROL, 0x1100);
+ ili9225_command(dbi, ILI9225_INTERFACE_CONTROL, 0x0000);
+ ili9225_command(dbi, ILI9225_OSCILLATION_CONTROL, 0x0d01);
+ ili9225_command(dbi, ILI9225_VCI_RECYCLING, 0x0020);
+ ili9225_command(dbi, ILI9225_RAM_ADDRESS_SET_1, 0x0000);
+ ili9225_command(dbi, ILI9225_RAM_ADDRESS_SET_2, 0x0000);
+
+ ili9225_command(dbi, ILI9225_GATE_SCAN_CONTROL, 0x0000);
+ ili9225_command(dbi, ILI9225_VERTICAL_SCROLL_1, 0x00db);
+ ili9225_command(dbi, ILI9225_VERTICAL_SCROLL_2, 0x0000);
+ ili9225_command(dbi, ILI9225_VERTICAL_SCROLL_3, 0x0000);
+ ili9225_command(dbi, ILI9225_PARTIAL_DRIVING_POS_1, 0x00db);
+ ili9225_command(dbi, ILI9225_PARTIAL_DRIVING_POS_2, 0x0000);
+
+ ili9225_command(dbi, ILI9225_GAMMA_CONTROL_1, 0x0000);
+ ili9225_command(dbi, ILI9225_GAMMA_CONTROL_2, 0x0808);
+ ili9225_command(dbi, ILI9225_GAMMA_CONTROL_3, 0x080a);
+ ili9225_command(dbi, ILI9225_GAMMA_CONTROL_4, 0x000a);
+ ili9225_command(dbi, ILI9225_GAMMA_CONTROL_5, 0x0a08);
+ ili9225_command(dbi, ILI9225_GAMMA_CONTROL_6, 0x0808);
+ ili9225_command(dbi, ILI9225_GAMMA_CONTROL_7, 0x0000);
+ ili9225_command(dbi, ILI9225_GAMMA_CONTROL_8, 0x0a00);
+ ili9225_command(dbi, ILI9225_GAMMA_CONTROL_9, 0x0710);
+ ili9225_command(dbi, ILI9225_GAMMA_CONTROL_10, 0x0710);
+
+ ili9225_command(dbi, ILI9225_DISPLAY_CONTROL_1, 0x0012);
msleep(50);
- ili9225_command(mipi, ILI9225_DISPLAY_CONTROL_1, 0x1017);
+ ili9225_command(dbi, ILI9225_DISPLAY_CONTROL_1, 0x1017);
- mipi->enabled = true;
+ dbidev->enabled = true;
ili9225_fb_dirty(fb, &rect);
out_exit:
drm_dev_exit(idx);
@@ -290,7 +291,8 @@ out_exit:
static void ili9225_pipe_disable(struct drm_simple_display_pipe *pipe)
{
- struct mipi_dbi *mipi = drm_to_mipi_dbi(pipe->crtc.dev);
+ struct mipi_dbi_dev *dbidev = drm_to_mipi_dbi_dev(pipe->crtc.dev);
+ struct mipi_dbi *dbi = &dbidev->dbi;
DRM_DEBUG_KMS("\n");
@@ -301,39 +303,39 @@ static void ili9225_pipe_disable(struct drm_simple_display_pipe *pipe)
* unplug.
*/
- if (!mipi->enabled)
+ if (!dbidev->enabled)
return;
- ili9225_command(mipi, ILI9225_DISPLAY_CONTROL_1, 0x0000);
+ ili9225_command(dbi, ILI9225_DISPLAY_CONTROL_1, 0x0000);
msleep(50);
- ili9225_command(mipi, ILI9225_POWER_CONTROL_2, 0x0007);
+ ili9225_command(dbi, ILI9225_POWER_CONTROL_2, 0x0007);
msleep(50);
- ili9225_command(mipi, ILI9225_POWER_CONTROL_1, 0x0a02);
+ ili9225_command(dbi, ILI9225_POWER_CONTROL_1, 0x0a02);
- mipi->enabled = false;
+ dbidev->enabled = false;
}
-static int ili9225_dbi_command(struct mipi_dbi *mipi, u8 *cmd, u8 *par,
+static int ili9225_dbi_command(struct mipi_dbi *dbi, u8 *cmd, u8 *par,
size_t num)
{
- struct spi_device *spi = mipi->spi;
+ struct spi_device *spi = dbi->spi;
unsigned int bpw = 8;
u32 speed_hz;
int ret;
- gpiod_set_value_cansleep(mipi->dc, 0);
+ gpiod_set_value_cansleep(dbi->dc, 0);
speed_hz = mipi_dbi_spi_cmd_max_speed(spi, 1);
- ret = tinydrm_spi_transfer(spi, speed_hz, NULL, 8, cmd, 1);
+ ret = mipi_dbi_spi_transfer(spi, speed_hz, 8, cmd, 1);
if (ret || !num)
return ret;
- if (*cmd == ILI9225_WRITE_DATA_TO_GRAM && !mipi->swap_bytes)
+ if (*cmd == ILI9225_WRITE_DATA_TO_GRAM && !dbi->swap_bytes)
bpw = 16;
- gpiod_set_value_cansleep(mipi->dc, 1);
+ gpiod_set_value_cansleep(dbi->dc, 1);
speed_hz = mipi_dbi_spi_cmd_max_speed(spi, num);
- return tinydrm_spi_transfer(spi, speed_hz, NULL, bpw, par, num);
+ return mipi_dbi_spi_transfer(spi, speed_hz, bpw, par, num);
}
static const struct drm_simple_display_pipe_funcs ili9225_pipe_funcs = {
@@ -350,8 +352,7 @@ static const struct drm_display_mode ili9225_mode = {
DEFINE_DRM_GEM_CMA_FOPS(ili9225_fops);
static struct drm_driver ili9225_driver = {
- .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_PRIME |
- DRIVER_ATOMIC,
+ .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC,
.fops = &ili9225_fops,
.release = mipi_dbi_release,
DRM_GEM_CMA_VMAP_DRIVER_OPS,
@@ -377,29 +378,31 @@ MODULE_DEVICE_TABLE(spi, ili9225_id);
static int ili9225_probe(struct spi_device *spi)
{
struct device *dev = &spi->dev;
+ struct mipi_dbi_dev *dbidev;
struct drm_device *drm;
- struct mipi_dbi *mipi;
+ struct mipi_dbi *dbi;
struct gpio_desc *rs;
u32 rotation = 0;
int ret;
- mipi = kzalloc(sizeof(*mipi), GFP_KERNEL);
- if (!mipi)
+ dbidev = kzalloc(sizeof(*dbidev), GFP_KERNEL);
+ if (!dbidev)
return -ENOMEM;
- drm = &mipi->drm;
+ dbi = &dbidev->dbi;
+ drm = &dbidev->drm;
ret = devm_drm_dev_init(dev, drm, &ili9225_driver);
if (ret) {
- kfree(mipi);
+ kfree(dbidev);
return ret;
}
drm_mode_config_init(drm);
- mipi->reset = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH);
- if (IS_ERR(mipi->reset)) {
+ dbi->reset = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH);
+ if (IS_ERR(dbi->reset)) {
DRM_DEV_ERROR(dev, "Failed to get gpio 'reset'\n");
- return PTR_ERR(mipi->reset);
+ return PTR_ERR(dbi->reset);
}
rs = devm_gpiod_get(dev, "rs", GPIOD_OUT_LOW);
@@ -410,14 +413,14 @@ static int ili9225_probe(struct spi_device *spi)
device_property_read_u32(dev, "rotation", &rotation);
- ret = mipi_dbi_spi_init(spi, mipi, rs);
+ ret = mipi_dbi_spi_init(spi, dbi, rs);
if (ret)
return ret;
/* override the command function set in mipi_dbi_spi_init() */
- mipi->command = ili9225_dbi_command;
+ dbi->command = ili9225_dbi_command;
- ret = mipi_dbi_init(mipi, &ili9225_pipe_funcs, &ili9225_mode, rotation);
+ ret = mipi_dbi_dev_init(dbidev, &ili9225_pipe_funcs, &ili9225_mode, rotation);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/tinydrm/ili9341.c b/drivers/gpu/drm/tiny/ili9341.c
index 4ade9e4b924f..33b51dc7faa8 100644
--- a/drivers/gpu/drm/tinydrm/ili9341.c
+++ b/drivers/gpu/drm/tiny/ili9341.c
@@ -20,9 +20,8 @@
#include <drm/drm_fb_helper.h>
#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
+#include <drm/drm_mipi_dbi.h>
#include <drm/drm_modeset_helper.h>
-#include <drm/tinydrm/mipi-dbi.h>
-#include <drm/tinydrm/tinydrm-helpers.h>
#include <video/mipi_display.h>
#define ILI9341_FRMCTR1 0xb1
@@ -54,7 +53,8 @@ static void yx240qv29_enable(struct drm_simple_display_pipe *pipe,
struct drm_crtc_state *crtc_state,
struct drm_plane_state *plane_state)
{
- struct mipi_dbi *mipi = drm_to_mipi_dbi(pipe->crtc.dev);
+ struct mipi_dbi_dev *dbidev = drm_to_mipi_dbi_dev(pipe->crtc.dev);
+ struct mipi_dbi *dbi = &dbidev->dbi;
u8 addr_mode;
int ret, idx;
@@ -63,57 +63,57 @@ static void yx240qv29_enable(struct drm_simple_display_pipe *pipe,
DRM_DEBUG_KMS("\n");
- ret = mipi_dbi_poweron_conditional_reset(mipi);
+ ret = mipi_dbi_poweron_conditional_reset(dbidev);
if (ret < 0)
goto out_exit;
if (ret == 1)
goto out_enable;
- mipi_dbi_command(mipi, MIPI_DCS_SET_DISPLAY_OFF);
+ mipi_dbi_command(dbi, MIPI_DCS_SET_DISPLAY_OFF);
- mipi_dbi_command(mipi, ILI9341_PWCTRLB, 0x00, 0xc1, 0x30);
- mipi_dbi_command(mipi, ILI9341_PWRSEQ, 0x64, 0x03, 0x12, 0x81);
- mipi_dbi_command(mipi, ILI9341_DTCTRLA, 0x85, 0x00, 0x78);
- mipi_dbi_command(mipi, ILI9341_PWCTRLA, 0x39, 0x2c, 0x00, 0x34, 0x02);
- mipi_dbi_command(mipi, ILI9341_PUMPCTRL, 0x20);
- mipi_dbi_command(mipi, ILI9341_DTCTRLB, 0x00, 0x00);
+ mipi_dbi_command(dbi, ILI9341_PWCTRLB, 0x00, 0xc1, 0x30);
+ mipi_dbi_command(dbi, ILI9341_PWRSEQ, 0x64, 0x03, 0x12, 0x81);
+ mipi_dbi_command(dbi, ILI9341_DTCTRLA, 0x85, 0x00, 0x78);
+ mipi_dbi_command(dbi, ILI9341_PWCTRLA, 0x39, 0x2c, 0x00, 0x34, 0x02);
+ mipi_dbi_command(dbi, ILI9341_PUMPCTRL, 0x20);
+ mipi_dbi_command(dbi, ILI9341_DTCTRLB, 0x00, 0x00);
/* Power Control */
- mipi_dbi_command(mipi, ILI9341_PWCTRL1, 0x23);
- mipi_dbi_command(mipi, ILI9341_PWCTRL2, 0x10);
+ mipi_dbi_command(dbi, ILI9341_PWCTRL1, 0x23);
+ mipi_dbi_command(dbi, ILI9341_PWCTRL2, 0x10);
/* VCOM */
- mipi_dbi_command(mipi, ILI9341_VMCTRL1, 0x3e, 0x28);
- mipi_dbi_command(mipi, ILI9341_VMCTRL2, 0x86);
+ mipi_dbi_command(dbi, ILI9341_VMCTRL1, 0x3e, 0x28);
+ mipi_dbi_command(dbi, ILI9341_VMCTRL2, 0x86);
/* Memory Access Control */
- mipi_dbi_command(mipi, MIPI_DCS_SET_PIXEL_FORMAT, MIPI_DCS_PIXEL_FMT_16BIT);
+ mipi_dbi_command(dbi, MIPI_DCS_SET_PIXEL_FORMAT, MIPI_DCS_PIXEL_FMT_16BIT);
/* Frame Rate */
- mipi_dbi_command(mipi, ILI9341_FRMCTR1, 0x00, 0x1b);
+ mipi_dbi_command(dbi, ILI9341_FRMCTR1, 0x00, 0x1b);
/* Gamma */
- mipi_dbi_command(mipi, ILI9341_EN3GAM, 0x00);
- mipi_dbi_command(mipi, MIPI_DCS_SET_GAMMA_CURVE, 0x01);
- mipi_dbi_command(mipi, ILI9341_PGAMCTRL,
+ mipi_dbi_command(dbi, ILI9341_EN3GAM, 0x00);
+ mipi_dbi_command(dbi, MIPI_DCS_SET_GAMMA_CURVE, 0x01);
+ mipi_dbi_command(dbi, ILI9341_PGAMCTRL,
0x0f, 0x31, 0x2b, 0x0c, 0x0e, 0x08, 0x4e, 0xf1,
0x37, 0x07, 0x10, 0x03, 0x0e, 0x09, 0x00);
- mipi_dbi_command(mipi, ILI9341_NGAMCTRL,
+ mipi_dbi_command(dbi, ILI9341_NGAMCTRL,
0x00, 0x0e, 0x14, 0x03, 0x11, 0x07, 0x31, 0xc1,
0x48, 0x08, 0x0f, 0x0c, 0x31, 0x36, 0x0f);
/* DDRAM */
- mipi_dbi_command(mipi, ILI9341_ETMOD, 0x07);
+ mipi_dbi_command(dbi, ILI9341_ETMOD, 0x07);
/* Display */
- mipi_dbi_command(mipi, ILI9341_DISCTRL, 0x08, 0x82, 0x27, 0x00);
- mipi_dbi_command(mipi, MIPI_DCS_EXIT_SLEEP_MODE);
+ mipi_dbi_command(dbi, ILI9341_DISCTRL, 0x08, 0x82, 0x27, 0x00);
+ mipi_dbi_command(dbi, MIPI_DCS_EXIT_SLEEP_MODE);
msleep(100);
- mipi_dbi_command(mipi, MIPI_DCS_SET_DISPLAY_ON);
+ mipi_dbi_command(dbi, MIPI_DCS_SET_DISPLAY_ON);
msleep(100);
out_enable:
- switch (mipi->rotation) {
+ switch (dbidev->rotation) {
default:
addr_mode = ILI9341_MADCTL_MX;
break;
@@ -129,8 +129,8 @@ out_enable:
break;
}
addr_mode |= ILI9341_MADCTL_BGR;
- mipi_dbi_command(mipi, MIPI_DCS_SET_ADDRESS_MODE, addr_mode);
- mipi_dbi_enable_flush(mipi, crtc_state, plane_state);
+ mipi_dbi_command(dbi, MIPI_DCS_SET_ADDRESS_MODE, addr_mode);
+ mipi_dbi_enable_flush(dbidev, crtc_state, plane_state);
out_exit:
drm_dev_exit(idx);
}
@@ -149,7 +149,7 @@ static const struct drm_display_mode yx240qv29_mode = {
DEFINE_DRM_GEM_CMA_FOPS(ili9341_fops);
static struct drm_driver ili9341_driver = {
- .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_PRIME | DRIVER_ATOMIC,
+ .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC,
.fops = &ili9341_fops,
.release = mipi_dbi_release,
DRM_GEM_CMA_VMAP_DRIVER_OPS,
@@ -176,29 +176,31 @@ MODULE_DEVICE_TABLE(spi, ili9341_id);
static int ili9341_probe(struct spi_device *spi)
{
struct device *dev = &spi->dev;
+ struct mipi_dbi_dev *dbidev;
struct drm_device *drm;
- struct mipi_dbi *mipi;
+ struct mipi_dbi *dbi;
struct gpio_desc *dc;
u32 rotation = 0;
int ret;
- mipi = kzalloc(sizeof(*mipi), GFP_KERNEL);
- if (!mipi)
+ dbidev = kzalloc(sizeof(*dbidev), GFP_KERNEL);
+ if (!dbidev)
return -ENOMEM;
- drm = &mipi->drm;
+ dbi = &dbidev->dbi;
+ drm = &dbidev->drm;
ret = devm_drm_dev_init(dev, drm, &ili9341_driver);
if (ret) {
- kfree(mipi);
+ kfree(dbidev);
return ret;
}
drm_mode_config_init(drm);
- mipi->reset = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_HIGH);
- if (IS_ERR(mipi->reset)) {
+ dbi->reset = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_HIGH);
+ if (IS_ERR(dbi->reset)) {
DRM_DEV_ERROR(dev, "Failed to get gpio 'reset'\n");
- return PTR_ERR(mipi->reset);
+ return PTR_ERR(dbi->reset);
}
dc = devm_gpiod_get_optional(dev, "dc", GPIOD_OUT_LOW);
@@ -207,17 +209,17 @@ static int ili9341_probe(struct spi_device *spi)
return PTR_ERR(dc);
}
- mipi->backlight = devm_of_find_backlight(dev);
- if (IS_ERR(mipi->backlight))
- return PTR_ERR(mipi->backlight);
+ dbidev->backlight = devm_of_find_backlight(dev);
+ if (IS_ERR(dbidev->backlight))
+ return PTR_ERR(dbidev->backlight);
device_property_read_u32(dev, "rotation", &rotation);
- ret = mipi_dbi_spi_init(spi, mipi, dc);
+ ret = mipi_dbi_spi_init(spi, dbi, dc);
if (ret)
return ret;
- ret = mipi_dbi_init(mipi, &ili9341_pipe_funcs, &yx240qv29_mode, rotation);
+ ret = mipi_dbi_dev_init(dbidev, &ili9341_pipe_funcs, &yx240qv29_mode, rotation);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/tinydrm/mi0283qt.c b/drivers/gpu/drm/tiny/mi0283qt.c
index fdefa53455d4..e2cfd9a17143 100644
--- a/drivers/gpu/drm/tinydrm/mi0283qt.c
+++ b/drivers/gpu/drm/tiny/mi0283qt.c
@@ -18,9 +18,8 @@
#include <drm/drm_fb_helper.h>
#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
+#include <drm/drm_mipi_dbi.h>
#include <drm/drm_modeset_helper.h>
-#include <drm/tinydrm/mipi-dbi.h>
-#include <drm/tinydrm/tinydrm-helpers.h>
#include <video/mipi_display.h>
#define ILI9341_FRMCTR1 0xb1
@@ -52,7 +51,8 @@ static void mi0283qt_enable(struct drm_simple_display_pipe *pipe,
struct drm_crtc_state *crtc_state,
struct drm_plane_state *plane_state)
{
- struct mipi_dbi *mipi = drm_to_mipi_dbi(pipe->crtc.dev);
+ struct mipi_dbi_dev *dbidev = drm_to_mipi_dbi_dev(pipe->crtc.dev);
+ struct mipi_dbi *dbi = &dbidev->dbi;
u8 addr_mode;
int ret, idx;
@@ -61,53 +61,53 @@ static void mi0283qt_enable(struct drm_simple_display_pipe *pipe,
DRM_DEBUG_KMS("\n");
- ret = mipi_dbi_poweron_conditional_reset(mipi);
+ ret = mipi_dbi_poweron_conditional_reset(dbidev);
if (ret < 0)
goto out_exit;
if (ret == 1)
goto out_enable;
- mipi_dbi_command(mipi, MIPI_DCS_SET_DISPLAY_OFF);
+ mipi_dbi_command(dbi, MIPI_DCS_SET_DISPLAY_OFF);
- mipi_dbi_command(mipi, ILI9341_PWCTRLB, 0x00, 0x83, 0x30);
- mipi_dbi_command(mipi, ILI9341_PWRSEQ, 0x64, 0x03, 0x12, 0x81);
- mipi_dbi_command(mipi, ILI9341_DTCTRLA, 0x85, 0x01, 0x79);
- mipi_dbi_command(mipi, ILI9341_PWCTRLA, 0x39, 0x2c, 0x00, 0x34, 0x02);
- mipi_dbi_command(mipi, ILI9341_PUMPCTRL, 0x20);
- mipi_dbi_command(mipi, ILI9341_DTCTRLB, 0x00, 0x00);
+ mipi_dbi_command(dbi, ILI9341_PWCTRLB, 0x00, 0x83, 0x30);
+ mipi_dbi_command(dbi, ILI9341_PWRSEQ, 0x64, 0x03, 0x12, 0x81);
+ mipi_dbi_command(dbi, ILI9341_DTCTRLA, 0x85, 0x01, 0x79);
+ mipi_dbi_command(dbi, ILI9341_PWCTRLA, 0x39, 0x2c, 0x00, 0x34, 0x02);
+ mipi_dbi_command(dbi, ILI9341_PUMPCTRL, 0x20);
+ mipi_dbi_command(dbi, ILI9341_DTCTRLB, 0x00, 0x00);
/* Power Control */
- mipi_dbi_command(mipi, ILI9341_PWCTRL1, 0x26);
- mipi_dbi_command(mipi, ILI9341_PWCTRL2, 0x11);
+ mipi_dbi_command(dbi, ILI9341_PWCTRL1, 0x26);
+ mipi_dbi_command(dbi, ILI9341_PWCTRL2, 0x11);
/* VCOM */
- mipi_dbi_command(mipi, ILI9341_VMCTRL1, 0x35, 0x3e);
- mipi_dbi_command(mipi, ILI9341_VMCTRL2, 0xbe);
+ mipi_dbi_command(dbi, ILI9341_VMCTRL1, 0x35, 0x3e);
+ mipi_dbi_command(dbi, ILI9341_VMCTRL2, 0xbe);
/* Memory Access Control */
- mipi_dbi_command(mipi, MIPI_DCS_SET_PIXEL_FORMAT, MIPI_DCS_PIXEL_FMT_16BIT);
+ mipi_dbi_command(dbi, MIPI_DCS_SET_PIXEL_FORMAT, MIPI_DCS_PIXEL_FMT_16BIT);
/* Frame Rate */
- mipi_dbi_command(mipi, ILI9341_FRMCTR1, 0x00, 0x1b);
+ mipi_dbi_command(dbi, ILI9341_FRMCTR1, 0x00, 0x1b);
/* Gamma */
- mipi_dbi_command(mipi, ILI9341_EN3GAM, 0x08);
- mipi_dbi_command(mipi, MIPI_DCS_SET_GAMMA_CURVE, 0x01);
- mipi_dbi_command(mipi, ILI9341_PGAMCTRL,
+ mipi_dbi_command(dbi, ILI9341_EN3GAM, 0x08);
+ mipi_dbi_command(dbi, MIPI_DCS_SET_GAMMA_CURVE, 0x01);
+ mipi_dbi_command(dbi, ILI9341_PGAMCTRL,
0x1f, 0x1a, 0x18, 0x0a, 0x0f, 0x06, 0x45, 0x87,
0x32, 0x0a, 0x07, 0x02, 0x07, 0x05, 0x00);
- mipi_dbi_command(mipi, ILI9341_NGAMCTRL,
+ mipi_dbi_command(dbi, ILI9341_NGAMCTRL,
0x00, 0x25, 0x27, 0x05, 0x10, 0x09, 0x3a, 0x78,
0x4d, 0x05, 0x18, 0x0d, 0x38, 0x3a, 0x1f);
/* DDRAM */
- mipi_dbi_command(mipi, ILI9341_ETMOD, 0x07);
+ mipi_dbi_command(dbi, ILI9341_ETMOD, 0x07);
/* Display */
- mipi_dbi_command(mipi, ILI9341_DISCTRL, 0x0a, 0x82, 0x27, 0x00);
- mipi_dbi_command(mipi, MIPI_DCS_EXIT_SLEEP_MODE);
+ mipi_dbi_command(dbi, ILI9341_DISCTRL, 0x0a, 0x82, 0x27, 0x00);
+ mipi_dbi_command(dbi, MIPI_DCS_EXIT_SLEEP_MODE);
msleep(100);
- mipi_dbi_command(mipi, MIPI_DCS_SET_DISPLAY_ON);
+ mipi_dbi_command(dbi, MIPI_DCS_SET_DISPLAY_ON);
msleep(100);
out_enable:
@@ -117,7 +117,7 @@ out_enable:
* As a result, we need to always apply the rotation value
* regardless of the display "on/off" state.
*/
- switch (mipi->rotation) {
+ switch (dbidev->rotation) {
default:
addr_mode = ILI9341_MADCTL_MV | ILI9341_MADCTL_MY |
ILI9341_MADCTL_MX;
@@ -133,8 +133,8 @@ out_enable:
break;
}
addr_mode |= ILI9341_MADCTL_BGR;
- mipi_dbi_command(mipi, MIPI_DCS_SET_ADDRESS_MODE, addr_mode);
- mipi_dbi_enable_flush(mipi, crtc_state, plane_state);
+ mipi_dbi_command(dbi, MIPI_DCS_SET_ADDRESS_MODE, addr_mode);
+ mipi_dbi_enable_flush(dbidev, crtc_state, plane_state);
out_exit:
drm_dev_exit(idx);
}
@@ -153,8 +153,7 @@ static const struct drm_display_mode mi0283qt_mode = {
DEFINE_DRM_GEM_CMA_FOPS(mi0283qt_fops);
static struct drm_driver mi0283qt_driver = {
- .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_PRIME |
- DRIVER_ATOMIC,
+ .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC,
.fops = &mi0283qt_fops,
.release = mipi_dbi_release,
DRM_GEM_CMA_VMAP_DRIVER_OPS,
@@ -181,29 +180,31 @@ MODULE_DEVICE_TABLE(spi, mi0283qt_id);
static int mi0283qt_probe(struct spi_device *spi)
{
struct device *dev = &spi->dev;
+ struct mipi_dbi_dev *dbidev;
struct drm_device *drm;
- struct mipi_dbi *mipi;
+ struct mipi_dbi *dbi;
struct gpio_desc *dc;
u32 rotation = 0;
int ret;
- mipi = kzalloc(sizeof(*mipi), GFP_KERNEL);
- if (!mipi)
+ dbidev = kzalloc(sizeof(*dbidev), GFP_KERNEL);
+ if (!dbidev)
return -ENOMEM;
- drm = &mipi->drm;
+ dbi = &dbidev->dbi;
+ drm = &dbidev->drm;
ret = devm_drm_dev_init(dev, drm, &mi0283qt_driver);
if (ret) {
- kfree(mipi);
+ kfree(dbidev);
return ret;
}
drm_mode_config_init(drm);
- mipi->reset = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_HIGH);
- if (IS_ERR(mipi->reset)) {
+ dbi->reset = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_HIGH);
+ if (IS_ERR(dbi->reset)) {
DRM_DEV_ERROR(dev, "Failed to get gpio 'reset'\n");
- return PTR_ERR(mipi->reset);
+ return PTR_ERR(dbi->reset);
}
dc = devm_gpiod_get_optional(dev, "dc", GPIOD_OUT_LOW);
@@ -212,21 +213,21 @@ static int mi0283qt_probe(struct spi_device *spi)
return PTR_ERR(dc);
}
- mipi->regulator = devm_regulator_get(dev, "power");
- if (IS_ERR(mipi->regulator))
- return PTR_ERR(mipi->regulator);
+ dbidev->regulator = devm_regulator_get(dev, "power");
+ if (IS_ERR(dbidev->regulator))
+ return PTR_ERR(dbidev->regulator);
- mipi->backlight = devm_of_find_backlight(dev);
- if (IS_ERR(mipi->backlight))
- return PTR_ERR(mipi->backlight);
+ dbidev->backlight = devm_of_find_backlight(dev);
+ if (IS_ERR(dbidev->backlight))
+ return PTR_ERR(dbidev->backlight);
device_property_read_u32(dev, "rotation", &rotation);
- ret = mipi_dbi_spi_init(spi, mipi, dc);
+ ret = mipi_dbi_spi_init(spi, dbi, dc);
if (ret)
return ret;
- ret = mipi_dbi_init(mipi, &mi0283qt_pipe_funcs, &mi0283qt_mode, rotation);
+ ret = mipi_dbi_dev_init(dbidev, &mi0283qt_pipe_funcs, &mi0283qt_mode, rotation);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/tinydrm/repaper.c b/drivers/gpu/drm/tiny/repaper.c
index 97a874b40394..76d179200775 100644
--- a/drivers/gpu/drm/tinydrm/repaper.c
+++ b/drivers/gpu/drm/tiny/repaper.c
@@ -23,6 +23,7 @@
#include <linux/thermal.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_connector.h>
#include <drm/drm_damage_helper.h>
#include <drm/drm_drv.h>
#include <drm/drm_fb_cma_helper.h>
@@ -30,10 +31,11 @@
#include <drm/drm_format_helper.h>
#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
+#include <drm/drm_modes.h>
#include <drm/drm_rect.h>
#include <drm/drm_vblank.h>
+#include <drm/drm_probe_helper.h>
#include <drm/drm_simple_kms_helper.h>
-#include <drm/tinydrm/tinydrm-helpers.h>
#define REPAPER_RID_G2_COG_ID 0x12
@@ -60,6 +62,8 @@ enum repaper_epd_border_byte {
struct repaper_epd {
struct drm_device drm;
struct drm_simple_display_pipe pipe;
+ const struct drm_display_mode *mode;
+ struct drm_connector connector;
struct spi_device *spi;
struct gpio_desc *panel_on;
@@ -873,6 +877,39 @@ static const struct drm_simple_display_pipe_funcs repaper_pipe_funcs = {
.prepare_fb = drm_gem_fb_simple_display_pipe_prepare_fb,
};
+static int repaper_connector_get_modes(struct drm_connector *connector)
+{
+ struct repaper_epd *epd = drm_to_epd(connector->dev);
+ struct drm_display_mode *mode;
+
+ mode = drm_mode_duplicate(connector->dev, epd->mode);
+ if (!mode) {
+ DRM_ERROR("Failed to duplicate mode\n");
+ return 0;
+ }
+
+ drm_mode_set_name(mode);
+ mode->type |= DRM_MODE_TYPE_PREFERRED;
+ drm_mode_probed_add(connector, mode);
+
+ connector->display_info.width_mm = mode->width_mm;
+ connector->display_info.height_mm = mode->height_mm;
+
+ return 1;
+}
+
+static const struct drm_connector_helper_funcs repaper_connector_hfuncs = {
+ .get_modes = repaper_connector_get_modes,
+};
+
+static const struct drm_connector_funcs repaper_connector_funcs = {
+ .reset = drm_atomic_helper_connector_reset,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .destroy = drm_connector_cleanup,
+ .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+};
+
static const struct drm_mode_config_funcs repaper_mode_config_funcs = {
.fb_create = drm_gem_fb_create_with_dirty,
.atomic_check = drm_atomic_helper_check,
@@ -925,8 +962,7 @@ static const u8 repaper_e2271cs021_cs[] = { 0x00, 0x00, 0x00, 0x7f,
DEFINE_DRM_GEM_CMA_FOPS(repaper_fops);
static struct drm_driver repaper_driver = {
- .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_PRIME |
- DRIVER_ATOMIC,
+ .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC,
.fops = &repaper_fops,
.release = repaper_release,
DRM_GEM_CMA_VMAP_DRIVER_OPS,
@@ -1096,6 +1132,7 @@ static int repaper_probe(struct spi_device *spi)
return -ENODEV;
}
+ epd->mode = mode;
epd->width = mode->hdisplay;
epd->height = mode->vdisplay;
epd->factored_stage_time = epd->stage_time;
@@ -1110,10 +1147,20 @@ static int repaper_probe(struct spi_device *spi)
if (!epd->current_frame)
return -ENOMEM;
- ret = tinydrm_display_pipe_init(drm, &epd->pipe, &repaper_pipe_funcs,
- DRM_MODE_CONNECTOR_VIRTUAL,
- repaper_formats,
- ARRAY_SIZE(repaper_formats), mode, 0);
+ drm->mode_config.min_width = mode->hdisplay;
+ drm->mode_config.max_width = mode->hdisplay;
+ drm->mode_config.min_height = mode->vdisplay;
+ drm->mode_config.max_height = mode->vdisplay;
+
+ drm_connector_helper_add(&epd->connector, &repaper_connector_hfuncs);
+ ret = drm_connector_init(drm, &epd->connector, &repaper_connector_funcs,
+ DRM_MODE_CONNECTOR_SPI);
+ if (ret)
+ return ret;
+
+ ret = drm_simple_display_pipe_init(drm, &epd->pipe, &repaper_pipe_funcs,
+ repaper_formats, ARRAY_SIZE(repaper_formats),
+ NULL, &epd->connector);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/tinydrm/st7586.c b/drivers/gpu/drm/tiny/st7586.c
index 9ac626265152..3cc21a1b30c8 100644
--- a/drivers/gpu/drm/tinydrm/st7586.c
+++ b/drivers/gpu/drm/tiny/st7586.c
@@ -21,10 +21,9 @@
#include <drm/drm_format_helper.h>
#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
+#include <drm/drm_mipi_dbi.h>
#include <drm/drm_rect.h>
#include <drm/drm_vblank.h>
-#include <drm/tinydrm/mipi-dbi.h>
-#include <drm/tinydrm/tinydrm-helpers.h>
/* controller-specific commands */
#define ST7586_DISP_MODE_GRAY 0x38
@@ -115,10 +114,11 @@ static int st7586_buf_copy(void *dst, struct drm_framebuffer *fb,
static void st7586_fb_dirty(struct drm_framebuffer *fb, struct drm_rect *rect)
{
- struct mipi_dbi *mipi = drm_to_mipi_dbi(fb->dev);
+ struct mipi_dbi_dev *dbidev = drm_to_mipi_dbi_dev(fb->dev);
+ struct mipi_dbi *dbi = &dbidev->dbi;
int start, end, idx, ret = 0;
- if (!mipi->enabled)
+ if (!dbidev->enabled)
return;
if (!drm_dev_enter(fb->dev, &idx))
@@ -130,7 +130,7 @@ static void st7586_fb_dirty(struct drm_framebuffer *fb, struct drm_rect *rect)
DRM_DEBUG_KMS("Flushing [FB:%d] " DRM_RECT_FMT "\n", fb->base.id, DRM_RECT_ARG(rect));
- ret = st7586_buf_copy(mipi->tx_buf, fb, rect);
+ ret = st7586_buf_copy(dbidev->tx_buf, fb, rect);
if (ret)
goto err_msg;
@@ -138,15 +138,15 @@ static void st7586_fb_dirty(struct drm_framebuffer *fb, struct drm_rect *rect)
start = rect->x1 / 3;
end = rect->x2 / 3;
- mipi_dbi_command(mipi, MIPI_DCS_SET_COLUMN_ADDRESS,
+ mipi_dbi_command(dbi, MIPI_DCS_SET_COLUMN_ADDRESS,
(start >> 8) & 0xFF, start & 0xFF,
(end >> 8) & 0xFF, (end - 1) & 0xFF);
- mipi_dbi_command(mipi, MIPI_DCS_SET_PAGE_ADDRESS,
+ mipi_dbi_command(dbi, MIPI_DCS_SET_PAGE_ADDRESS,
(rect->y1 >> 8) & 0xFF, rect->y1 & 0xFF,
(rect->y2 >> 8) & 0xFF, (rect->y2 - 1) & 0xFF);
- ret = mipi_dbi_command_buf(mipi, MIPI_DCS_WRITE_MEMORY_START,
- (u8 *)mipi->tx_buf,
+ ret = mipi_dbi_command_buf(dbi, MIPI_DCS_WRITE_MEMORY_START,
+ (u8 *)dbidev->tx_buf,
(end - start) * (rect->y2 - rect->y1));
err_msg:
if (ret)
@@ -177,8 +177,9 @@ static void st7586_pipe_enable(struct drm_simple_display_pipe *pipe,
struct drm_crtc_state *crtc_state,
struct drm_plane_state *plane_state)
{
- struct mipi_dbi *mipi = drm_to_mipi_dbi(pipe->crtc.dev);
+ struct mipi_dbi_dev *dbidev = drm_to_mipi_dbi_dev(pipe->crtc.dev);
struct drm_framebuffer *fb = plane_state->fb;
+ struct mipi_dbi *dbi = &dbidev->dbi;
struct drm_rect rect = {
.x1 = 0,
.x2 = fb->width,
@@ -193,35 +194,35 @@ static void st7586_pipe_enable(struct drm_simple_display_pipe *pipe,
DRM_DEBUG_KMS("\n");
- ret = mipi_dbi_poweron_reset(mipi);
+ ret = mipi_dbi_poweron_reset(dbidev);
if (ret)
goto out_exit;
- mipi_dbi_command(mipi, ST7586_AUTO_READ_CTRL, 0x9f);
- mipi_dbi_command(mipi, ST7586_OTP_RW_CTRL, 0x00);
+ mipi_dbi_command(dbi, ST7586_AUTO_READ_CTRL, 0x9f);
+ mipi_dbi_command(dbi, ST7586_OTP_RW_CTRL, 0x00);
msleep(10);
- mipi_dbi_command(mipi, ST7586_OTP_READ);
+ mipi_dbi_command(dbi, ST7586_OTP_READ);
msleep(20);
- mipi_dbi_command(mipi, ST7586_OTP_CTRL_OUT);
- mipi_dbi_command(mipi, MIPI_DCS_EXIT_SLEEP_MODE);
- mipi_dbi_command(mipi, MIPI_DCS_SET_DISPLAY_OFF);
+ mipi_dbi_command(dbi, ST7586_OTP_CTRL_OUT);
+ mipi_dbi_command(dbi, MIPI_DCS_EXIT_SLEEP_MODE);
+ mipi_dbi_command(dbi, MIPI_DCS_SET_DISPLAY_OFF);
msleep(50);
- mipi_dbi_command(mipi, ST7586_SET_VOP_OFFSET, 0x00);
- mipi_dbi_command(mipi, ST7586_SET_VOP, 0xe3, 0x00);
- mipi_dbi_command(mipi, ST7586_SET_BIAS_SYSTEM, 0x02);
- mipi_dbi_command(mipi, ST7586_SET_BOOST_LEVEL, 0x04);
- mipi_dbi_command(mipi, ST7586_ENABLE_ANALOG, 0x1d);
- mipi_dbi_command(mipi, ST7586_SET_NLINE_INV, 0x00);
- mipi_dbi_command(mipi, ST7586_DISP_MODE_GRAY);
- mipi_dbi_command(mipi, ST7586_ENABLE_DDRAM, 0x02);
+ mipi_dbi_command(dbi, ST7586_SET_VOP_OFFSET, 0x00);
+ mipi_dbi_command(dbi, ST7586_SET_VOP, 0xe3, 0x00);
+ mipi_dbi_command(dbi, ST7586_SET_BIAS_SYSTEM, 0x02);
+ mipi_dbi_command(dbi, ST7586_SET_BOOST_LEVEL, 0x04);
+ mipi_dbi_command(dbi, ST7586_ENABLE_ANALOG, 0x1d);
+ mipi_dbi_command(dbi, ST7586_SET_NLINE_INV, 0x00);
+ mipi_dbi_command(dbi, ST7586_DISP_MODE_GRAY);
+ mipi_dbi_command(dbi, ST7586_ENABLE_DDRAM, 0x02);
- switch (mipi->rotation) {
+ switch (dbidev->rotation) {
default:
addr_mode = 0x00;
break;
@@ -235,26 +236,26 @@ static void st7586_pipe_enable(struct drm_simple_display_pipe *pipe,
addr_mode = ST7586_DISP_CTRL_MX;
break;
}
- mipi_dbi_command(mipi, MIPI_DCS_SET_ADDRESS_MODE, addr_mode);
+ mipi_dbi_command(dbi, MIPI_DCS_SET_ADDRESS_MODE, addr_mode);
- mipi_dbi_command(mipi, ST7586_SET_DISP_DUTY, 0x7f);
- mipi_dbi_command(mipi, ST7586_SET_PART_DISP, 0xa0);
- mipi_dbi_command(mipi, MIPI_DCS_SET_PARTIAL_AREA, 0x00, 0x00, 0x00, 0x77);
- mipi_dbi_command(mipi, MIPI_DCS_EXIT_INVERT_MODE);
+ mipi_dbi_command(dbi, ST7586_SET_DISP_DUTY, 0x7f);
+ mipi_dbi_command(dbi, ST7586_SET_PART_DISP, 0xa0);
+ mipi_dbi_command(dbi, MIPI_DCS_SET_PARTIAL_AREA, 0x00, 0x00, 0x00, 0x77);
+ mipi_dbi_command(dbi, MIPI_DCS_EXIT_INVERT_MODE);
msleep(100);
- mipi->enabled = true;
+ dbidev->enabled = true;
st7586_fb_dirty(fb, &rect);
- mipi_dbi_command(mipi, MIPI_DCS_SET_DISPLAY_ON);
+ mipi_dbi_command(dbi, MIPI_DCS_SET_DISPLAY_ON);
out_exit:
drm_dev_exit(idx);
}
static void st7586_pipe_disable(struct drm_simple_display_pipe *pipe)
{
- struct mipi_dbi *mipi = drm_to_mipi_dbi(pipe->crtc.dev);
+ struct mipi_dbi_dev *dbidev = drm_to_mipi_dbi_dev(pipe->crtc.dev);
/*
* This callback is not protected by drm_dev_enter/exit since we want to
@@ -265,11 +266,11 @@ static void st7586_pipe_disable(struct drm_simple_display_pipe *pipe)
DRM_DEBUG_KMS("\n");
- if (!mipi->enabled)
+ if (!dbidev->enabled)
return;
- mipi_dbi_command(mipi, MIPI_DCS_SET_DISPLAY_OFF);
- mipi->enabled = false;
+ mipi_dbi_command(&dbidev->dbi, MIPI_DCS_SET_DISPLAY_OFF);
+ dbidev->enabled = false;
}
static const u32 st7586_formats[] = {
@@ -283,12 +284,6 @@ static const struct drm_simple_display_pipe_funcs st7586_pipe_funcs = {
.prepare_fb = drm_gem_fb_simple_display_pipe_prepare_fb,
};
-static const struct drm_mode_config_funcs st7586_mode_config_funcs = {
- .fb_create = drm_gem_fb_create_with_dirty,
- .atomic_check = drm_atomic_helper_check,
- .atomic_commit = drm_atomic_helper_commit,
-};
-
static const struct drm_display_mode st7586_mode = {
DRM_SIMPLE_MODE(178, 128, 37, 27),
};
@@ -296,8 +291,7 @@ static const struct drm_display_mode st7586_mode = {
DEFINE_DRM_GEM_CMA_FOPS(st7586_fops);
static struct drm_driver st7586_driver = {
- .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_PRIME |
- DRIVER_ATOMIC,
+ .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC,
.fops = &st7586_fops,
.release = mipi_dbi_release,
DRM_GEM_CMA_VMAP_DRIVER_OPS,
@@ -324,39 +318,34 @@ MODULE_DEVICE_TABLE(spi, st7586_id);
static int st7586_probe(struct spi_device *spi)
{
struct device *dev = &spi->dev;
+ struct mipi_dbi_dev *dbidev;
struct drm_device *drm;
- struct mipi_dbi *mipi;
+ struct mipi_dbi *dbi;
struct gpio_desc *a0;
u32 rotation = 0;
size_t bufsize;
int ret;
- mipi = kzalloc(sizeof(*mipi), GFP_KERNEL);
- if (!mipi)
+ dbidev = kzalloc(sizeof(*dbidev), GFP_KERNEL);
+ if (!dbidev)
return -ENOMEM;
- drm = &mipi->drm;
+ dbi = &dbidev->dbi;
+ drm = &dbidev->drm;
ret = devm_drm_dev_init(dev, drm, &st7586_driver);
if (ret) {
- kfree(mipi);
+ kfree(dbidev);
return ret;
}
drm_mode_config_init(drm);
- drm->mode_config.preferred_depth = 32;
- drm->mode_config.funcs = &st7586_mode_config_funcs;
-
- mutex_init(&mipi->cmdlock);
bufsize = (st7586_mode.vdisplay + 2) / 3 * st7586_mode.hdisplay;
- mipi->tx_buf = devm_kmalloc(dev, bufsize, GFP_KERNEL);
- if (!mipi->tx_buf)
- return -ENOMEM;
- mipi->reset = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH);
- if (IS_ERR(mipi->reset)) {
+ dbi->reset = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH);
+ if (IS_ERR(dbi->reset)) {
DRM_DEV_ERROR(dev, "Failed to get gpio 'reset'\n");
- return PTR_ERR(mipi->reset);
+ return PTR_ERR(dbi->reset);
}
a0 = devm_gpiod_get(dev, "a0", GPIOD_OUT_LOW);
@@ -366,14 +355,19 @@ static int st7586_probe(struct spi_device *spi)
}
device_property_read_u32(dev, "rotation", &rotation);
- mipi->rotation = rotation;
- ret = mipi_dbi_spi_init(spi, mipi, a0);
+ ret = mipi_dbi_spi_init(spi, dbi, a0);
if (ret)
return ret;
/* Cannot read from this controller via SPI */
- mipi->read_commands = NULL;
+ dbi->read_commands = NULL;
+
+ ret = mipi_dbi_dev_init_with_formats(dbidev, &st7586_pipe_funcs,
+ st7586_formats, ARRAY_SIZE(st7586_formats),
+ &st7586_mode, rotation, bufsize);
+ if (ret)
+ return ret;
/*
* we are using 8-bit data, so we are not actually swapping anything,
@@ -382,16 +376,7 @@ static int st7586_probe(struct spi_device *spi)
* bytes on little-endian systems and causes out of order data to be
* sent to the display).
*/
- mipi->swap_bytes = true;
-
- ret = tinydrm_display_pipe_init(drm, &mipi->pipe, &st7586_pipe_funcs,
- DRM_MODE_CONNECTOR_VIRTUAL,
- st7586_formats, ARRAY_SIZE(st7586_formats),
- &st7586_mode, rotation);
- if (ret)
- return ret;
-
- drm_plane_enable_fb_damage_clips(&mipi->pipe.plane);
+ dbi->swap_bytes = true;
drm_mode_config_reset(drm);
@@ -401,9 +386,6 @@ static int st7586_probe(struct spi_device *spi)
spi_set_drvdata(spi, drm);
- DRM_DEBUG_KMS("preferred_depth=%u, rotation = %u\n",
- drm->mode_config.preferred_depth, rotation);
-
drm_fbdev_generic_setup(drm, 0);
return 0;
diff --git a/drivers/gpu/drm/tinydrm/st7735r.c b/drivers/gpu/drm/tiny/st7735r.c
index ce9109e613e0..3f4487c71684 100644
--- a/drivers/gpu/drm/tinydrm/st7735r.c
+++ b/drivers/gpu/drm/tiny/st7735r.c
@@ -19,8 +19,7 @@
#include <drm/drm_fb_helper.h>
#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
-#include <drm/tinydrm/mipi-dbi.h>
-#include <drm/tinydrm/tinydrm-helpers.h>
+#include <drm/drm_mipi_dbi.h>
#define ST7735R_FRMCTR1 0xb1
#define ST7735R_FRMCTR2 0xb2
@@ -43,7 +42,8 @@ static void jd_t18003_t01_pipe_enable(struct drm_simple_display_pipe *pipe,
struct drm_crtc_state *crtc_state,
struct drm_plane_state *plane_state)
{
- struct mipi_dbi *mipi = drm_to_mipi_dbi(pipe->crtc.dev);
+ struct mipi_dbi_dev *dbidev = drm_to_mipi_dbi_dev(pipe->crtc.dev);
+ struct mipi_dbi *dbi = &dbidev->dbi;
int ret, idx;
u8 addr_mode;
@@ -52,28 +52,28 @@ static void jd_t18003_t01_pipe_enable(struct drm_simple_display_pipe *pipe,
DRM_DEBUG_KMS("\n");
- ret = mipi_dbi_poweron_reset(mipi);
+ ret = mipi_dbi_poweron_reset(dbidev);
if (ret)
goto out_exit;
msleep(150);
- mipi_dbi_command(mipi, MIPI_DCS_EXIT_SLEEP_MODE);
+ mipi_dbi_command(dbi, MIPI_DCS_EXIT_SLEEP_MODE);
msleep(500);
- mipi_dbi_command(mipi, ST7735R_FRMCTR1, 0x01, 0x2c, 0x2d);
- mipi_dbi_command(mipi, ST7735R_FRMCTR2, 0x01, 0x2c, 0x2d);
- mipi_dbi_command(mipi, ST7735R_FRMCTR3, 0x01, 0x2c, 0x2d, 0x01, 0x2c,
+ mipi_dbi_command(dbi, ST7735R_FRMCTR1, 0x01, 0x2c, 0x2d);
+ mipi_dbi_command(dbi, ST7735R_FRMCTR2, 0x01, 0x2c, 0x2d);
+ mipi_dbi_command(dbi, ST7735R_FRMCTR3, 0x01, 0x2c, 0x2d, 0x01, 0x2c,
0x2d);
- mipi_dbi_command(mipi, ST7735R_INVCTR, 0x07);
- mipi_dbi_command(mipi, ST7735R_PWCTR1, 0xa2, 0x02, 0x84);
- mipi_dbi_command(mipi, ST7735R_PWCTR2, 0xc5);
- mipi_dbi_command(mipi, ST7735R_PWCTR3, 0x0a, 0x00);
- mipi_dbi_command(mipi, ST7735R_PWCTR4, 0x8a, 0x2a);
- mipi_dbi_command(mipi, ST7735R_PWCTR5, 0x8a, 0xee);
- mipi_dbi_command(mipi, ST7735R_VMCTR1, 0x0e);
- mipi_dbi_command(mipi, MIPI_DCS_EXIT_INVERT_MODE);
- switch (mipi->rotation) {
+ mipi_dbi_command(dbi, ST7735R_INVCTR, 0x07);
+ mipi_dbi_command(dbi, ST7735R_PWCTR1, 0xa2, 0x02, 0x84);
+ mipi_dbi_command(dbi, ST7735R_PWCTR2, 0xc5);
+ mipi_dbi_command(dbi, ST7735R_PWCTR3, 0x0a, 0x00);
+ mipi_dbi_command(dbi, ST7735R_PWCTR4, 0x8a, 0x2a);
+ mipi_dbi_command(dbi, ST7735R_PWCTR5, 0x8a, 0xee);
+ mipi_dbi_command(dbi, ST7735R_VMCTR1, 0x0e);
+ mipi_dbi_command(dbi, MIPI_DCS_EXIT_INVERT_MODE);
+ switch (dbidev->rotation) {
default:
addr_mode = ST7735R_MX | ST7735R_MY;
break;
@@ -87,24 +87,24 @@ static void jd_t18003_t01_pipe_enable(struct drm_simple_display_pipe *pipe,
addr_mode = ST7735R_MY | ST7735R_MV;
break;
}
- mipi_dbi_command(mipi, MIPI_DCS_SET_ADDRESS_MODE, addr_mode);
- mipi_dbi_command(mipi, MIPI_DCS_SET_PIXEL_FORMAT,
+ mipi_dbi_command(dbi, MIPI_DCS_SET_ADDRESS_MODE, addr_mode);
+ mipi_dbi_command(dbi, MIPI_DCS_SET_PIXEL_FORMAT,
MIPI_DCS_PIXEL_FMT_16BIT);
- mipi_dbi_command(mipi, ST7735R_GAMCTRP1, 0x02, 0x1c, 0x07, 0x12, 0x37,
+ mipi_dbi_command(dbi, ST7735R_GAMCTRP1, 0x02, 0x1c, 0x07, 0x12, 0x37,
0x32, 0x29, 0x2d, 0x29, 0x25, 0x2b, 0x39, 0x00, 0x01,
0x03, 0x10);
- mipi_dbi_command(mipi, ST7735R_GAMCTRN1, 0x03, 0x1d, 0x07, 0x06, 0x2e,
+ mipi_dbi_command(dbi, ST7735R_GAMCTRN1, 0x03, 0x1d, 0x07, 0x06, 0x2e,
0x2c, 0x29, 0x2d, 0x2e, 0x2e, 0x37, 0x3f, 0x00, 0x00,
0x02, 0x10);
- mipi_dbi_command(mipi, MIPI_DCS_SET_DISPLAY_ON);
+ mipi_dbi_command(dbi, MIPI_DCS_SET_DISPLAY_ON);
msleep(100);
- mipi_dbi_command(mipi, MIPI_DCS_ENTER_NORMAL_MODE);
+ mipi_dbi_command(dbi, MIPI_DCS_ENTER_NORMAL_MODE);
msleep(20);
- mipi_dbi_enable_flush(mipi, crtc_state, plane_state);
+ mipi_dbi_enable_flush(dbidev, crtc_state, plane_state);
out_exit:
drm_dev_exit(idx);
}
@@ -123,8 +123,7 @@ static const struct drm_display_mode jd_t18003_t01_mode = {
DEFINE_DRM_GEM_CMA_FOPS(st7735r_fops);
static struct drm_driver st7735r_driver = {
- .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_PRIME |
- DRIVER_ATOMIC,
+ .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC,
.fops = &st7735r_fops,
.release = mipi_dbi_release,
DRM_GEM_CMA_VMAP_DRIVER_OPS,
@@ -151,29 +150,31 @@ MODULE_DEVICE_TABLE(spi, st7735r_id);
static int st7735r_probe(struct spi_device *spi)
{
struct device *dev = &spi->dev;
+ struct mipi_dbi_dev *dbidev;
struct drm_device *drm;
- struct mipi_dbi *mipi;
+ struct mipi_dbi *dbi;
struct gpio_desc *dc;
u32 rotation = 0;
int ret;
- mipi = kzalloc(sizeof(*mipi), GFP_KERNEL);
- if (!mipi)
+ dbidev = kzalloc(sizeof(*dbidev), GFP_KERNEL);
+ if (!dbidev)
return -ENOMEM;
- drm = &mipi->drm;
+ dbi = &dbidev->dbi;
+ drm = &dbidev->drm;
ret = devm_drm_dev_init(dev, drm, &st7735r_driver);
if (ret) {
- kfree(mipi);
+ kfree(dbidev);
return ret;
}
drm_mode_config_init(drm);
- mipi->reset = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH);
- if (IS_ERR(mipi->reset)) {
+ dbi->reset = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH);
+ if (IS_ERR(dbi->reset)) {
DRM_DEV_ERROR(dev, "Failed to get gpio 'reset'\n");
- return PTR_ERR(mipi->reset);
+ return PTR_ERR(dbi->reset);
}
dc = devm_gpiod_get(dev, "dc", GPIOD_OUT_LOW);
@@ -182,20 +183,20 @@ static int st7735r_probe(struct spi_device *spi)
return PTR_ERR(dc);
}
- mipi->backlight = devm_of_find_backlight(dev);
- if (IS_ERR(mipi->backlight))
- return PTR_ERR(mipi->backlight);
+ dbidev->backlight = devm_of_find_backlight(dev);
+ if (IS_ERR(dbidev->backlight))
+ return PTR_ERR(dbidev->backlight);
device_property_read_u32(dev, "rotation", &rotation);
- ret = mipi_dbi_spi_init(spi, mipi, dc);
+ ret = mipi_dbi_spi_init(spi, dbi, dc);
if (ret)
return ret;
/* Cannot read from Adafruit 1.8" display via SPI */
- mipi->read_commands = NULL;
+ dbi->read_commands = NULL;
- ret = mipi_dbi_init(mipi, &jd_t18003_t01_pipe_funcs, &jd_t18003_t01_mode, rotation);
+ ret = mipi_dbi_dev_init(dbidev, &jd_t18003_t01_pipe_funcs, &jd_t18003_t01_mode, rotation);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/tinydrm/core/Makefile b/drivers/gpu/drm/tinydrm/core/Makefile
deleted file mode 100644
index 01065e920aea..000000000000
--- a/drivers/gpu/drm/tinydrm/core/Makefile
+++ /dev/null
@@ -1,4 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0-only
-tinydrm-y := tinydrm-pipe.o tinydrm-helpers.o
-
-obj-$(CONFIG_DRM_TINYDRM) += tinydrm.o
diff --git a/drivers/gpu/drm/tinydrm/core/tinydrm-helpers.c b/drivers/gpu/drm/tinydrm/core/tinydrm-helpers.c
deleted file mode 100644
index dfeafac4c656..000000000000
--- a/drivers/gpu/drm/tinydrm/core/tinydrm-helpers.c
+++ /dev/null
@@ -1,207 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Copyright (C) 2016 Noralf Trønnes
- */
-
-#include <linux/backlight.h>
-#include <linux/dma-buf.h>
-#include <linux/module.h>
-#include <linux/pm.h>
-#include <linux/spi/spi.h>
-#include <linux/swab.h>
-
-#include <drm/drm_device.h>
-#include <drm/drm_drv.h>
-#include <drm/drm_fourcc.h>
-#include <drm/drm_framebuffer.h>
-#include <drm/drm_print.h>
-#include <drm/drm_rect.h>
-#include <drm/tinydrm/tinydrm-helpers.h>
-
-static unsigned int spi_max;
-module_param(spi_max, uint, 0400);
-MODULE_PARM_DESC(spi_max, "Set a lower SPI max transfer size");
-
-#if IS_ENABLED(CONFIG_SPI)
-
-/**
- * tinydrm_spi_max_transfer_size - Determine max SPI transfer size
- * @spi: SPI device
- * @max_len: Maximum buffer size needed (optional)
- *
- * This function returns the maximum size to use for SPI transfers. It checks
- * the SPI master, the optional @max_len and the module parameter spi_max and
- * returns the smallest.
- *
- * Returns:
- * Maximum size for SPI transfers
- */
-size_t tinydrm_spi_max_transfer_size(struct spi_device *spi, size_t max_len)
-{
- size_t ret;
-
- ret = min(spi_max_transfer_size(spi), spi->master->max_dma_len);
- if (max_len)
- ret = min(ret, max_len);
- if (spi_max)
- ret = min_t(size_t, ret, spi_max);
- ret &= ~0x3;
- if (ret < 4)
- ret = 4;
-
- return ret;
-}
-EXPORT_SYMBOL(tinydrm_spi_max_transfer_size);
-
-/**
- * tinydrm_spi_bpw_supported - Check if bits per word is supported
- * @spi: SPI device
- * @bpw: Bits per word
- *
- * This function checks to see if the SPI master driver supports @bpw.
- *
- * Returns:
- * True if @bpw is supported, false otherwise.
- */
-bool tinydrm_spi_bpw_supported(struct spi_device *spi, u8 bpw)
-{
- u32 bpw_mask = spi->master->bits_per_word_mask;
-
- if (bpw == 8)
- return true;
-
- if (!bpw_mask) {
- dev_warn_once(&spi->dev,
- "bits_per_word_mask not set, assume 8-bit only\n");
- return false;
- }
-
- if (bpw_mask & SPI_BPW_MASK(bpw))
- return true;
-
- return false;
-}
-EXPORT_SYMBOL(tinydrm_spi_bpw_supported);
-
-static void
-tinydrm_dbg_spi_print(struct spi_device *spi, struct spi_transfer *tr,
- const void *buf, int idx, bool tx)
-{
- u32 speed_hz = tr->speed_hz ? tr->speed_hz : spi->max_speed_hz;
- char linebuf[3 * 32];
-
- hex_dump_to_buffer(buf, tr->len, 16,
- DIV_ROUND_UP(tr->bits_per_word, 8),
- linebuf, sizeof(linebuf), false);
-
- printk(KERN_DEBUG
- " tr(%i): speed=%u%s, bpw=%i, len=%u, %s_buf=[%s%s]\n", idx,
- speed_hz > 1000000 ? speed_hz / 1000000 : speed_hz / 1000,
- speed_hz > 1000000 ? "MHz" : "kHz", tr->bits_per_word, tr->len,
- tx ? "tx" : "rx", linebuf, tr->len > 16 ? " ..." : "");
-}
-
-/* called through tinydrm_dbg_spi_message() */
-void _tinydrm_dbg_spi_message(struct spi_device *spi, struct spi_message *m)
-{
- struct spi_transfer *tmp;
- int i = 0;
-
- list_for_each_entry(tmp, &m->transfers, transfer_list) {
-
- if (tmp->tx_buf)
- tinydrm_dbg_spi_print(spi, tmp, tmp->tx_buf, i, true);
- if (tmp->rx_buf)
- tinydrm_dbg_spi_print(spi, tmp, tmp->rx_buf, i, false);
- i++;
- }
-}
-EXPORT_SYMBOL(_tinydrm_dbg_spi_message);
-
-/**
- * tinydrm_spi_transfer - SPI transfer helper
- * @spi: SPI device
- * @speed_hz: Override speed (optional)
- * @header: Optional header transfer
- * @bpw: Bits per word
- * @buf: Buffer to transfer
- * @len: Buffer length
- *
- * This SPI transfer helper breaks up the transfer of @buf into chunks which
- * the SPI master driver can handle. If the machine is Little Endian and the
- * SPI master driver doesn't support 16 bits per word, it swaps the bytes and
- * does a 8-bit transfer.
- * If @header is set, it is prepended to each SPI message.
- *
- * Returns:
- * Zero on success, negative error code on failure.
- */
-int tinydrm_spi_transfer(struct spi_device *spi, u32 speed_hz,
- struct spi_transfer *header, u8 bpw, const void *buf,
- size_t len)
-{
- struct spi_transfer tr = {
- .bits_per_word = bpw,
- .speed_hz = speed_hz,
- };
- struct spi_message m;
- u16 *swap_buf = NULL;
- size_t max_chunk;
- size_t chunk;
- int ret = 0;
-
- if (WARN_ON_ONCE(bpw != 8 && bpw != 16))
- return -EINVAL;
-
- max_chunk = tinydrm_spi_max_transfer_size(spi, 0);
-
- if (drm_debug & DRM_UT_DRIVER)
- pr_debug("[drm:%s] bpw=%u, max_chunk=%zu, transfers:\n",
- __func__, bpw, max_chunk);
-
- if (bpw == 16 && !tinydrm_spi_bpw_supported(spi, 16)) {
- tr.bits_per_word = 8;
- if (tinydrm_machine_little_endian()) {
- swap_buf = kmalloc(min(len, max_chunk), GFP_KERNEL);
- if (!swap_buf)
- return -ENOMEM;
- }
- }
-
- spi_message_init(&m);
- if (header)
- spi_message_add_tail(header, &m);
- spi_message_add_tail(&tr, &m);
-
- while (len) {
- chunk = min(len, max_chunk);
-
- tr.tx_buf = buf;
- tr.len = chunk;
-
- if (swap_buf) {
- const u16 *buf16 = buf;
- unsigned int i;
-
- for (i = 0; i < chunk / 2; i++)
- swap_buf[i] = swab16(buf16[i]);
-
- tr.tx_buf = swap_buf;
- }
-
- buf += chunk;
- len -= chunk;
-
- tinydrm_dbg_spi_message(spi, &m);
- ret = spi_sync(spi, &m);
- if (ret)
- return ret;
- }
-
- return 0;
-}
-EXPORT_SYMBOL(tinydrm_spi_transfer);
-
-#endif /* CONFIG_SPI */
-
-MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/tinydrm/core/tinydrm-pipe.c b/drivers/gpu/drm/tinydrm/core/tinydrm-pipe.c
deleted file mode 100644
index ed798fd95152..000000000000
--- a/drivers/gpu/drm/tinydrm/core/tinydrm-pipe.c
+++ /dev/null
@@ -1,179 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Copyright (C) 2016 Noralf Trønnes
- */
-
-#include <drm/drm_atomic_helper.h>
-#include <drm/drm_drv.h>
-#include <drm/drm_gem_framebuffer_helper.h>
-#include <drm/drm_modes.h>
-#include <drm/drm_probe_helper.h>
-#include <drm/drm_print.h>
-#include <drm/drm_simple_kms_helper.h>
-
-struct tinydrm_connector {
- struct drm_connector base;
- struct drm_display_mode mode;
-};
-
-static inline struct tinydrm_connector *
-to_tinydrm_connector(struct drm_connector *connector)
-{
- return container_of(connector, struct tinydrm_connector, base);
-}
-
-static int tinydrm_connector_get_modes(struct drm_connector *connector)
-{
- struct tinydrm_connector *tconn = to_tinydrm_connector(connector);
- struct drm_display_mode *mode;
-
- mode = drm_mode_duplicate(connector->dev, &tconn->mode);
- if (!mode) {
- DRM_ERROR("Failed to duplicate mode\n");
- return 0;
- }
-
- if (mode->name[0] == '\0')
- drm_mode_set_name(mode);
-
- mode->type |= DRM_MODE_TYPE_PREFERRED;
- drm_mode_probed_add(connector, mode);
-
- if (mode->width_mm) {
- connector->display_info.width_mm = mode->width_mm;
- connector->display_info.height_mm = mode->height_mm;
- }
-
- return 1;
-}
-
-static const struct drm_connector_helper_funcs tinydrm_connector_hfuncs = {
- .get_modes = tinydrm_connector_get_modes,
-};
-
-static enum drm_connector_status
-tinydrm_connector_detect(struct drm_connector *connector, bool force)
-{
- if (drm_dev_is_unplugged(connector->dev))
- return connector_status_disconnected;
-
- return connector->status;
-}
-
-static void tinydrm_connector_destroy(struct drm_connector *connector)
-{
- struct tinydrm_connector *tconn = to_tinydrm_connector(connector);
-
- drm_connector_cleanup(connector);
- kfree(tconn);
-}
-
-static const struct drm_connector_funcs tinydrm_connector_funcs = {
- .reset = drm_atomic_helper_connector_reset,
- .detect = tinydrm_connector_detect,
- .fill_modes = drm_helper_probe_single_connector_modes,
- .destroy = tinydrm_connector_destroy,
- .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
- .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
-};
-
-struct drm_connector *
-tinydrm_connector_create(struct drm_device *drm,
- const struct drm_display_mode *mode,
- int connector_type)
-{
- struct tinydrm_connector *tconn;
- struct drm_connector *connector;
- int ret;
-
- tconn = kzalloc(sizeof(*tconn), GFP_KERNEL);
- if (!tconn)
- return ERR_PTR(-ENOMEM);
-
- drm_mode_copy(&tconn->mode, mode);
- connector = &tconn->base;
-
- drm_connector_helper_add(connector, &tinydrm_connector_hfuncs);
- ret = drm_connector_init(drm, connector, &tinydrm_connector_funcs,
- connector_type);
- if (ret) {
- kfree(tconn);
- return ERR_PTR(ret);
- }
-
- connector->status = connector_status_connected;
-
- return connector;
-}
-
-static int tinydrm_rotate_mode(struct drm_display_mode *mode,
- unsigned int rotation)
-{
- if (rotation == 0 || rotation == 180) {
- return 0;
- } else if (rotation == 90 || rotation == 270) {
- swap(mode->hdisplay, mode->vdisplay);
- swap(mode->hsync_start, mode->vsync_start);
- swap(mode->hsync_end, mode->vsync_end);
- swap(mode->htotal, mode->vtotal);
- swap(mode->width_mm, mode->height_mm);
- return 0;
- } else {
- return -EINVAL;
- }
-}
-
-/**
- * tinydrm_display_pipe_init - Initialize display pipe
- * @drm: DRM device
- * @pipe: Display pipe
- * @funcs: Display pipe functions
- * @connector_type: Connector type
- * @formats: Array of supported formats (DRM_FORMAT\_\*)
- * @format_count: Number of elements in @formats
- * @mode: Supported mode
- * @rotation: Initial @mode rotation in degrees Counter Clock Wise
- *
- * This function sets up a &drm_simple_display_pipe with a &drm_connector that
- * has one fixed &drm_display_mode which is rotated according to @rotation.
- *
- * Returns:
- * Zero on success, negative error code on failure.
- */
-int tinydrm_display_pipe_init(struct drm_device *drm,
- struct drm_simple_display_pipe *pipe,
- const struct drm_simple_display_pipe_funcs *funcs,
- int connector_type,
- const uint32_t *formats,
- unsigned int format_count,
- const struct drm_display_mode *mode,
- unsigned int rotation)
-{
- struct drm_display_mode mode_copy;
- struct drm_connector *connector;
- int ret;
- static const uint64_t modifiers[] = {
- DRM_FORMAT_MOD_LINEAR,
- DRM_FORMAT_MOD_INVALID
- };
-
- drm_mode_copy(&mode_copy, mode);
- ret = tinydrm_rotate_mode(&mode_copy, rotation);
- if (ret) {
- DRM_ERROR("Illegal rotation value %u\n", rotation);
- return -EINVAL;
- }
-
- drm->mode_config.min_width = mode_copy.hdisplay;
- drm->mode_config.max_width = mode_copy.hdisplay;
- drm->mode_config.min_height = mode_copy.vdisplay;
- drm->mode_config.max_height = mode_copy.vdisplay;
-
- connector = tinydrm_connector_create(drm, &mode_copy, connector_type);
- if (IS_ERR(connector))
- return PTR_ERR(connector);
-
- return drm_simple_display_pipe_init(drm, pipe, funcs, formats,
- format_count, modifiers, connector);
-}
-EXPORT_SYMBOL(tinydrm_display_pipe_init);
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index 2070e8a57ed8..9157dcc897a2 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -160,7 +160,8 @@ static void ttm_bo_release_list(struct kref *list_kref)
ttm_tt_destroy(bo->ttm);
atomic_dec(&bo->bdev->glob->bo_count);
dma_fence_put(bo->moving);
- reservation_object_fini(&bo->ttm_resv);
+ if (!ttm_bo_uses_embedded_gem_object(bo))
+ reservation_object_fini(&bo->base._resv);
mutex_destroy(&bo->wu_mutex);
bo->destroy(bo);
ttm_mem_global_free(bdev->glob->mem_glob, acc_size);
@@ -172,7 +173,7 @@ static void ttm_bo_add_mem_to_lru(struct ttm_buffer_object *bo,
struct ttm_bo_device *bdev = bo->bdev;
struct ttm_mem_type_manager *man;
- reservation_object_assert_held(bo->resv);
+ reservation_object_assert_held(bo->base.resv);
if (!list_empty(&bo->lru))
return;
@@ -243,7 +244,7 @@ static void ttm_bo_bulk_move_set_pos(struct ttm_lru_bulk_move_pos *pos,
void ttm_bo_move_to_lru_tail(struct ttm_buffer_object *bo,
struct ttm_lru_bulk_move *bulk)
{
- reservation_object_assert_held(bo->resv);
+ reservation_object_assert_held(bo->base.resv);
ttm_bo_del_from_lru(bo);
ttm_bo_add_to_lru(bo);
@@ -276,8 +277,8 @@ void ttm_bo_bulk_move_lru_tail(struct ttm_lru_bulk_move *bulk)
if (!pos->first)
continue;
- reservation_object_assert_held(pos->first->resv);
- reservation_object_assert_held(pos->last->resv);
+ reservation_object_assert_held(pos->first->base.resv);
+ reservation_object_assert_held(pos->last->base.resv);
man = &pos->first->bdev->man[TTM_PL_TT];
list_bulk_move_tail(&man->lru[i], &pos->first->lru,
@@ -291,8 +292,8 @@ void ttm_bo_bulk_move_lru_tail(struct ttm_lru_bulk_move *bulk)
if (!pos->first)
continue;
- reservation_object_assert_held(pos->first->resv);
- reservation_object_assert_held(pos->last->resv);
+ reservation_object_assert_held(pos->first->base.resv);
+ reservation_object_assert_held(pos->last->base.resv);
man = &pos->first->bdev->man[TTM_PL_VRAM];
list_bulk_move_tail(&man->lru[i], &pos->first->lru,
@@ -306,8 +307,8 @@ void ttm_bo_bulk_move_lru_tail(struct ttm_lru_bulk_move *bulk)
if (!pos->first)
continue;
- reservation_object_assert_held(pos->first->resv);
- reservation_object_assert_held(pos->last->resv);
+ reservation_object_assert_held(pos->first->base.resv);
+ reservation_object_assert_held(pos->last->base.resv);
lru = &pos->first->bdev->glob->swap_lru[i];
list_bulk_move_tail(lru, &pos->first->swap, &pos->last->swap);
@@ -438,14 +439,14 @@ static int ttm_bo_individualize_resv(struct ttm_buffer_object *bo)
{
int r;
- if (bo->resv == &bo->ttm_resv)
+ if (bo->base.resv == &bo->base._resv)
return 0;
- BUG_ON(!reservation_object_trylock(&bo->ttm_resv));
+ BUG_ON(!reservation_object_trylock(&bo->base._resv));
- r = reservation_object_copy_fences(&bo->ttm_resv, bo->resv);
+ r = reservation_object_copy_fences(&bo->base._resv, bo->base.resv);
if (r)
- reservation_object_unlock(&bo->ttm_resv);
+ reservation_object_unlock(&bo->base._resv);
return r;
}
@@ -456,14 +457,14 @@ static void ttm_bo_flush_all_fences(struct ttm_buffer_object *bo)
struct dma_fence *fence;
int i;
- fobj = reservation_object_get_list(&bo->ttm_resv);
- fence = reservation_object_get_excl(&bo->ttm_resv);
+ fobj = reservation_object_get_list(&bo->base._resv);
+ fence = reservation_object_get_excl(&bo->base._resv);
if (fence && !fence->ops->signaled)
dma_fence_enable_sw_signaling(fence);
for (i = 0; fobj && i < fobj->shared_count; ++i) {
fence = rcu_dereference_protected(fobj->shared[i],
- reservation_object_held(bo->resv));
+ reservation_object_held(bo->base.resv));
if (!fence->ops->signaled)
dma_fence_enable_sw_signaling(fence);
@@ -481,23 +482,23 @@ static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo)
/* Last resort, if we fail to allocate memory for the
* fences block for the BO to become idle
*/
- reservation_object_wait_timeout_rcu(bo->resv, true, false,
+ reservation_object_wait_timeout_rcu(bo->base.resv, true, false,
30 * HZ);
spin_lock(&glob->lru_lock);
goto error;
}
spin_lock(&glob->lru_lock);
- ret = reservation_object_trylock(bo->resv) ? 0 : -EBUSY;
+ ret = reservation_object_trylock(bo->base.resv) ? 0 : -EBUSY;
if (!ret) {
- if (reservation_object_test_signaled_rcu(&bo->ttm_resv, true)) {
+ if (reservation_object_test_signaled_rcu(&bo->base._resv, true)) {
ttm_bo_del_from_lru(bo);
spin_unlock(&glob->lru_lock);
- if (bo->resv != &bo->ttm_resv)
- reservation_object_unlock(&bo->ttm_resv);
+ if (bo->base.resv != &bo->base._resv)
+ reservation_object_unlock(&bo->base._resv);
ttm_bo_cleanup_memtype_use(bo);
- reservation_object_unlock(bo->resv);
+ reservation_object_unlock(bo->base.resv);
return;
}
@@ -513,10 +514,10 @@ static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo)
ttm_bo_add_to_lru(bo);
}
- reservation_object_unlock(bo->resv);
+ reservation_object_unlock(bo->base.resv);
}
- if (bo->resv != &bo->ttm_resv)
- reservation_object_unlock(&bo->ttm_resv);
+ if (bo->base.resv != &bo->base._resv)
+ reservation_object_unlock(&bo->base._resv);
error:
kref_get(&bo->list_kref);
@@ -549,9 +550,9 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo,
int ret;
if (unlikely(list_empty(&bo->ddestroy)))
- resv = bo->resv;
+ resv = bo->base.resv;
else
- resv = &bo->ttm_resv;
+ resv = &bo->base._resv;
if (reservation_object_test_signaled_rcu(resv, true))
ret = 0;
@@ -562,7 +563,7 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo,
long lret;
if (unlock_resv)
- reservation_object_unlock(bo->resv);
+ reservation_object_unlock(bo->base.resv);
spin_unlock(&glob->lru_lock);
lret = reservation_object_wait_timeout_rcu(resv, true,
@@ -575,7 +576,7 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo,
return -EBUSY;
spin_lock(&glob->lru_lock);
- if (unlock_resv && !reservation_object_trylock(bo->resv)) {
+ if (unlock_resv && !reservation_object_trylock(bo->base.resv)) {
/*
* We raced, and lost, someone else holds the reservation now,
* and is probably busy in ttm_bo_cleanup_memtype_use.
@@ -592,7 +593,7 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo,
if (ret || unlikely(list_empty(&bo->ddestroy))) {
if (unlock_resv)
- reservation_object_unlock(bo->resv);
+ reservation_object_unlock(bo->base.resv);
spin_unlock(&glob->lru_lock);
return ret;
}
@@ -605,7 +606,7 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo,
ttm_bo_cleanup_memtype_use(bo);
if (unlock_resv)
- reservation_object_unlock(bo->resv);
+ reservation_object_unlock(bo->base.resv);
return 0;
}
@@ -631,14 +632,14 @@ static bool ttm_bo_delayed_delete(struct ttm_bo_device *bdev, bool remove_all)
kref_get(&bo->list_kref);
list_move_tail(&bo->ddestroy, &removed);
- if (remove_all || bo->resv != &bo->ttm_resv) {
+ if (remove_all || bo->base.resv != &bo->base._resv) {
spin_unlock(&glob->lru_lock);
- reservation_object_lock(bo->resv, NULL);
+ reservation_object_lock(bo->base.resv, NULL);
spin_lock(&glob->lru_lock);
ttm_bo_cleanup_refs(bo, false, !remove_all, true);
- } else if (reservation_object_trylock(bo->resv)) {
+ } else if (reservation_object_trylock(bo->base.resv)) {
ttm_bo_cleanup_refs(bo, false, !remove_all, true);
} else {
spin_unlock(&glob->lru_lock);
@@ -674,7 +675,7 @@ static void ttm_bo_release(struct kref *kref)
if (bo->bdev->driver->release_notify)
bo->bdev->driver->release_notify(bo);
- drm_vma_offset_remove(&bdev->vma_manager, &bo->vma_node);
+ drm_vma_offset_remove(&bdev->vma_manager, &bo->base.vma_node);
ttm_mem_io_lock(man, false);
ttm_mem_io_free_vm(bo);
ttm_mem_io_unlock(man);
@@ -710,7 +711,7 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo,
struct ttm_placement placement;
int ret = 0;
- reservation_object_assert_held(bo->resv);
+ reservation_object_assert_held(bo->base.resv);
placement.num_placement = 0;
placement.num_busy_placement = 0;
@@ -780,8 +781,8 @@ static bool ttm_bo_evict_swapout_allowable(struct ttm_buffer_object *bo,
{
bool ret = false;
- if (bo->resv == ctx->resv) {
- reservation_object_assert_held(bo->resv);
+ if (bo->base.resv == ctx->resv) {
+ reservation_object_assert_held(bo->base.resv);
if (ctx->flags & TTM_OPT_FLAG_ALLOW_RES_EVICT
|| !list_empty(&bo->ddestroy))
ret = true;
@@ -789,7 +790,7 @@ static bool ttm_bo_evict_swapout_allowable(struct ttm_buffer_object *bo,
if (busy)
*busy = false;
} else {
- ret = reservation_object_trylock(bo->resv);
+ ret = reservation_object_trylock(bo->base.resv);
*locked = ret;
if (busy)
*busy = !ret;
@@ -817,10 +818,10 @@ static int ttm_mem_evict_wait_busy(struct ttm_buffer_object *busy_bo,
return -EBUSY;
if (ctx->interruptible)
- r = reservation_object_lock_interruptible(busy_bo->resv,
+ r = reservation_object_lock_interruptible(busy_bo->base.resv,
ticket);
else
- r = reservation_object_lock(busy_bo->resv, ticket);
+ r = reservation_object_lock(busy_bo->base.resv, ticket);
/*
* TODO: It would be better to keep the BO locked until allocation is at
@@ -828,7 +829,7 @@ static int ttm_mem_evict_wait_busy(struct ttm_buffer_object *busy_bo,
* of TTM.
*/
if (!r)
- reservation_object_unlock(busy_bo->resv);
+ reservation_object_unlock(busy_bo->base.resv);
return r == -EDEADLK ? -EBUSY : r;
}
@@ -853,8 +854,8 @@ static int ttm_mem_evict_first(struct ttm_bo_device *bdev,
if (!ttm_bo_evict_swapout_allowable(bo, ctx, &locked,
&busy)) {
- if (busy && !busy_bo &&
- bo->resv->lock.ctx != ticket)
+ if (busy && !busy_bo && ticket !=
+ reservation_object_locking_ctx(bo->base.resv))
busy_bo = bo;
continue;
}
@@ -862,7 +863,7 @@ static int ttm_mem_evict_first(struct ttm_bo_device *bdev,
if (place && !bdev->driver->eviction_valuable(bo,
place)) {
if (locked)
- reservation_object_unlock(bo->resv);
+ reservation_object_unlock(bo->base.resv);
continue;
}
break;
@@ -934,9 +935,9 @@ static int ttm_bo_add_move_fence(struct ttm_buffer_object *bo,
spin_unlock(&man->move_lock);
if (fence) {
- reservation_object_add_shared_fence(bo->resv, fence);
+ reservation_object_add_shared_fence(bo->base.resv, fence);
- ret = reservation_object_reserve_shared(bo->resv, 1);
+ ret = reservation_object_reserve_shared(bo->base.resv, 1);
if (unlikely(ret)) {
dma_fence_put(fence);
return ret;
@@ -960,8 +961,10 @@ static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo,
{
struct ttm_bo_device *bdev = bo->bdev;
struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
+ struct ww_acquire_ctx *ticket;
int ret;
+ ticket = reservation_object_locking_ctx(bo->base.resv);
do {
ret = (*man->func->get_node)(man, bo, place, mem);
if (unlikely(ret != 0))
@@ -969,7 +972,7 @@ static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo,
if (mem->mm_node)
break;
ret = ttm_mem_evict_first(bdev, mem->mem_type, place, ctx,
- bo->resv->lock.ctx);
+ ticket);
if (unlikely(ret != 0))
return ret;
} while (1);
@@ -1091,7 +1094,7 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
bool type_found = false;
int i, ret;
- ret = reservation_object_reserve_shared(bo->resv, 1);
+ ret = reservation_object_reserve_shared(bo->base.resv, 1);
if (unlikely(ret))
return ret;
@@ -1172,7 +1175,7 @@ static int ttm_bo_move_buffer(struct ttm_buffer_object *bo,
int ret = 0;
struct ttm_mem_reg mem;
- reservation_object_assert_held(bo->resv);
+ reservation_object_assert_held(bo->base.resv);
mem.num_pages = bo->num_pages;
mem.size = mem.num_pages << PAGE_SHIFT;
@@ -1242,7 +1245,7 @@ int ttm_bo_validate(struct ttm_buffer_object *bo,
int ret;
uint32_t new_flags;
- reservation_object_assert_held(bo->resv);
+ reservation_object_assert_held(bo->base.resv);
/*
* Check whether we need to move buffer.
*/
@@ -1332,14 +1335,20 @@ int ttm_bo_init_reserved(struct ttm_bo_device *bdev,
bo->acc_size = acc_size;
bo->sg = sg;
if (resv) {
- bo->resv = resv;
- reservation_object_assert_held(bo->resv);
+ bo->base.resv = resv;
+ reservation_object_assert_held(bo->base.resv);
} else {
- bo->resv = &bo->ttm_resv;
+ bo->base.resv = &bo->base._resv;
+ }
+ if (!ttm_bo_uses_embedded_gem_object(bo)) {
+ /*
+ * bo.gem is not initialized, so we have to setup the
+ * struct elements we want use regardless.
+ */
+ reservation_object_init(&bo->base._resv);
+ drm_vma_node_reset(&bo->base.vma_node);
}
- reservation_object_init(&bo->ttm_resv);
atomic_inc(&bo->bdev->glob->bo_count);
- drm_vma_node_reset(&bo->vma_node);
/*
* For ttm_bo_type_device buffers, allocate
@@ -1347,14 +1356,14 @@ int ttm_bo_init_reserved(struct ttm_bo_device *bdev,
*/
if (bo->type == ttm_bo_type_device ||
bo->type == ttm_bo_type_sg)
- ret = drm_vma_offset_add(&bdev->vma_manager, &bo->vma_node,
+ ret = drm_vma_offset_add(&bdev->vma_manager, &bo->base.vma_node,
bo->mem.num_pages);
/* passed reservation objects should already be locked,
* since otherwise lockdep will be angered in radeon.
*/
if (!resv) {
- locked = reservation_object_trylock(bo->resv);
+ locked = reservation_object_trylock(bo->base.resv);
WARN_ON(!locked);
}
@@ -1775,7 +1784,7 @@ void ttm_bo_unmap_virtual_locked(struct ttm_buffer_object *bo)
{
struct ttm_bo_device *bdev = bo->bdev;
- drm_vma_node_unmap(&bo->vma_node, bdev->dev_mapping);
+ drm_vma_node_unmap(&bo->base.vma_node, bdev->dev_mapping);
ttm_mem_io_free_vm(bo);
}
@@ -1798,13 +1807,13 @@ int ttm_bo_wait(struct ttm_buffer_object *bo,
long timeout = 15 * HZ;
if (no_wait) {
- if (reservation_object_test_signaled_rcu(bo->resv, true))
+ if (reservation_object_test_signaled_rcu(bo->base.resv, true))
return 0;
else
return -EBUSY;
}
- timeout = reservation_object_wait_timeout_rcu(bo->resv, true,
+ timeout = reservation_object_wait_timeout_rcu(bo->base.resv, true,
interruptible, timeout);
if (timeout < 0)
return timeout;
@@ -1812,7 +1821,7 @@ int ttm_bo_wait(struct ttm_buffer_object *bo,
if (timeout == 0)
return -EBUSY;
- reservation_object_add_excl_fence(bo->resv, NULL);
+ reservation_object_add_excl_fence(bo->base.resv, NULL);
return 0;
}
EXPORT_SYMBOL(ttm_bo_wait);
@@ -1928,7 +1937,7 @@ out:
* already swapped buffer.
*/
if (locked)
- reservation_object_unlock(bo->resv);
+ reservation_object_unlock(bo->base.resv);
kref_put(&bo->list_kref, ttm_bo_release_list);
return ret;
}
@@ -1966,14 +1975,14 @@ int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo)
ret = mutex_lock_interruptible(&bo->wu_mutex);
if (unlikely(ret != 0))
return -ERESTARTSYS;
- if (!ww_mutex_is_locked(&bo->resv->lock))
+ if (!reservation_object_is_locked(bo->base.resv))
goto out_unlock;
- ret = reservation_object_lock_interruptible(bo->resv, NULL);
+ ret = reservation_object_lock_interruptible(bo->base.resv, NULL);
if (ret == -EINTR)
ret = -ERESTARTSYS;
if (unlikely(ret != 0))
goto out_unlock;
- reservation_object_unlock(bo->resv);
+ reservation_object_unlock(bo->base.resv);
out_unlock:
mutex_unlock(&bo->wu_mutex);
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c
index 9f918b992f7e..425a6d627b30 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_util.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
@@ -510,16 +510,16 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
INIT_LIST_HEAD(&fbo->base.io_reserve_lru);
mutex_init(&fbo->base.wu_mutex);
fbo->base.moving = NULL;
- drm_vma_node_reset(&fbo->base.vma_node);
+ drm_vma_node_reset(&fbo->base.base.vma_node);
atomic_set(&fbo->base.cpu_writers, 0);
kref_init(&fbo->base.list_kref);
kref_init(&fbo->base.kref);
fbo->base.destroy = &ttm_transfered_destroy;
fbo->base.acc_size = 0;
- fbo->base.resv = &fbo->base.ttm_resv;
- reservation_object_init(fbo->base.resv);
- ret = reservation_object_trylock(fbo->base.resv);
+ fbo->base.base.resv = &fbo->base.base._resv;
+ reservation_object_init(fbo->base.base.resv);
+ ret = reservation_object_trylock(fbo->base.base.resv);
WARN_ON(!ret);
*new_obj = &fbo->base;
@@ -689,7 +689,7 @@ int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
int ret;
struct ttm_buffer_object *ghost_obj;
- reservation_object_add_excl_fence(bo->resv, fence);
+ reservation_object_add_excl_fence(bo->base.resv, fence);
if (evict) {
ret = ttm_bo_wait(bo, false, false);
if (ret)
@@ -716,7 +716,7 @@ int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
if (ret)
return ret;
- reservation_object_add_excl_fence(ghost_obj->resv, fence);
+ reservation_object_add_excl_fence(ghost_obj->base.resv, fence);
/**
* If we're not moving to fixed memory, the TTM object
@@ -752,7 +752,7 @@ int ttm_bo_pipeline_move(struct ttm_buffer_object *bo,
int ret;
- reservation_object_add_excl_fence(bo->resv, fence);
+ reservation_object_add_excl_fence(bo->base.resv, fence);
if (!evict) {
struct ttm_buffer_object *ghost_obj;
@@ -772,7 +772,7 @@ int ttm_bo_pipeline_move(struct ttm_buffer_object *bo,
if (ret)
return ret;
- reservation_object_add_excl_fence(ghost_obj->resv, fence);
+ reservation_object_add_excl_fence(ghost_obj->base.resv, fence);
/**
* If we're not moving to fixed memory, the TTM object
@@ -841,7 +841,7 @@ int ttm_bo_pipeline_gutting(struct ttm_buffer_object *bo)
if (ret)
return ret;
- ret = reservation_object_copy_fences(ghost->resv, bo->resv);
+ ret = reservation_object_copy_fences(ghost->base.resv, bo->base.resv);
/* Last resort, wait for the BO to be idle when we are OOM */
if (ret)
ttm_bo_wait(bo, false, false);
diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c
index 6dacff49c1cc..85f5bcbe0c76 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_vm.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c
@@ -71,7 +71,7 @@ static vm_fault_t ttm_bo_vm_fault_idle(struct ttm_buffer_object *bo,
ttm_bo_get(bo);
up_read(&vmf->vma->vm_mm->mmap_sem);
(void) dma_fence_wait(bo->moving, true);
- reservation_object_unlock(bo->resv);
+ reservation_object_unlock(bo->base.resv);
ttm_bo_put(bo);
goto out_unlock;
}
@@ -131,7 +131,7 @@ static vm_fault_t ttm_bo_vm_fault(struct vm_fault *vmf)
* for reserve, and if it fails, retry the fault after waiting
* for the buffer to become unreserved.
*/
- if (unlikely(!reservation_object_trylock(bo->resv))) {
+ if (unlikely(!reservation_object_trylock(bo->base.resv))) {
if (vmf->flags & FAULT_FLAG_ALLOW_RETRY) {
if (!(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) {
ttm_bo_get(bo);
@@ -211,9 +211,9 @@ static vm_fault_t ttm_bo_vm_fault(struct vm_fault *vmf)
}
page_offset = ((address - vma->vm_start) >> PAGE_SHIFT) +
- vma->vm_pgoff - drm_vma_node_start(&bo->vma_node);
+ vma->vm_pgoff - drm_vma_node_start(&bo->base.vma_node);
page_last = vma_pages(vma) + vma->vm_pgoff -
- drm_vma_node_start(&bo->vma_node);
+ drm_vma_node_start(&bo->base.vma_node);
if (unlikely(page_offset >= bo->num_pages)) {
ret = VM_FAULT_SIGBUS;
@@ -267,7 +267,7 @@ static vm_fault_t ttm_bo_vm_fault(struct vm_fault *vmf)
} else if (unlikely(!page)) {
break;
}
- page->index = drm_vma_node_start(&bo->vma_node) +
+ page->index = drm_vma_node_start(&bo->base.vma_node) +
page_offset;
pfn = page_to_pfn(page);
}
@@ -296,7 +296,7 @@ static vm_fault_t ttm_bo_vm_fault(struct vm_fault *vmf)
out_io_unlock:
ttm_mem_io_unlock(man);
out_unlock:
- reservation_object_unlock(bo->resv);
+ reservation_object_unlock(bo->base.resv);
return ret;
}
@@ -413,7 +413,8 @@ static struct ttm_buffer_object *ttm_bo_vm_lookup(struct ttm_bo_device *bdev,
node = drm_vma_offset_lookup_locked(&bdev->vma_manager, offset, pages);
if (likely(node)) {
- bo = container_of(node, struct ttm_buffer_object, vma_node);
+ bo = container_of(node, struct ttm_buffer_object,
+ base.vma_node);
bo = ttm_bo_get_unless_zero(bo);
}
diff --git a/drivers/gpu/drm/ttm/ttm_execbuf_util.c b/drivers/gpu/drm/ttm/ttm_execbuf_util.c
index 957ec375a4ba..3aefe72fb5cb 100644
--- a/drivers/gpu/drm/ttm/ttm_execbuf_util.c
+++ b/drivers/gpu/drm/ttm/ttm_execbuf_util.c
@@ -39,7 +39,7 @@ static void ttm_eu_backoff_reservation_reverse(struct list_head *list,
list_for_each_entry_continue_reverse(entry, list, head) {
struct ttm_buffer_object *bo = entry->bo;
- reservation_object_unlock(bo->resv);
+ reservation_object_unlock(bo->base.resv);
}
}
@@ -71,7 +71,7 @@ void ttm_eu_backoff_reservation(struct ww_acquire_ctx *ticket,
if (list_empty(&bo->lru))
ttm_bo_add_to_lru(bo);
- reservation_object_unlock(bo->resv);
+ reservation_object_unlock(bo->base.resv);
}
spin_unlock(&glob->lru_lock);
@@ -114,7 +114,7 @@ int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket,
ret = __ttm_bo_reserve(bo, intr, (ticket == NULL), ticket);
if (!ret && unlikely(atomic_read(&bo->cpu_writers) > 0)) {
- reservation_object_unlock(bo->resv);
+ reservation_object_unlock(bo->base.resv);
ret = -EBUSY;
@@ -130,7 +130,7 @@ int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket,
if (!entry->num_shared)
continue;
- ret = reservation_object_reserve_shared(bo->resv,
+ ret = reservation_object_reserve_shared(bo->base.resv,
entry->num_shared);
if (!ret)
continue;
@@ -144,16 +144,16 @@ int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket,
if (ret == -EDEADLK) {
if (intr) {
- ret = ww_mutex_lock_slow_interruptible(&bo->resv->lock,
- ticket);
+ ret = reservation_object_lock_slow_interruptible(bo->base.resv,
+ ticket);
} else {
- ww_mutex_lock_slow(&bo->resv->lock, ticket);
+ reservation_object_lock_slow(bo->base.resv, ticket);
ret = 0;
}
}
if (!ret && entry->num_shared)
- ret = reservation_object_reserve_shared(bo->resv,
+ ret = reservation_object_reserve_shared(bo->base.resv,
entry->num_shared);
if (unlikely(ret != 0)) {
@@ -201,14 +201,14 @@ void ttm_eu_fence_buffer_objects(struct ww_acquire_ctx *ticket,
list_for_each_entry(entry, list, head) {
bo = entry->bo;
if (entry->num_shared)
- reservation_object_add_shared_fence(bo->resv, fence);
+ reservation_object_add_shared_fence(bo->base.resv, fence);
else
- reservation_object_add_excl_fence(bo->resv, fence);
+ reservation_object_add_excl_fence(bo->base.resv, fence);
if (list_empty(&bo->lru))
ttm_bo_add_to_lru(bo);
else
ttm_bo_move_to_lru_tail(bo, NULL);
- reservation_object_unlock(bo->resv);
+ reservation_object_unlock(bo->base.resv);
}
spin_unlock(&glob->lru_lock);
if (ticket)
diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c
index e3a0691582ff..00b4a3337840 100644
--- a/drivers/gpu/drm/ttm/ttm_tt.c
+++ b/drivers/gpu/drm/ttm/ttm_tt.c
@@ -48,7 +48,7 @@ int ttm_tt_create(struct ttm_buffer_object *bo, bool zero_alloc)
struct ttm_bo_device *bdev = bo->bdev;
uint32_t page_flags = 0;
- reservation_object_assert_held(bo->resv);
+ reservation_object_assert_held(bo->base.resv);
if (bdev->need_dma32)
page_flags |= TTM_PAGE_FLAG_DMA32;
diff --git a/drivers/gpu/drm/tve200/tve200_display.c b/drivers/gpu/drm/tve200/tve200_display.c
index 58fd31030834..d733bbc4ac0e 100644
--- a/drivers/gpu/drm/tve200/tve200_display.c
+++ b/drivers/gpu/drm/tve200/tve200_display.c
@@ -9,16 +9,18 @@
* Copyright (C) 2011 Texas Instruments
* Copyright (C) 2017 Eric Anholt
*/
+
#include <linux/clk.h>
#include <linux/version.h>
#include <linux/dma-buf.h>
#include <linux/of_graph.h>
-#include <drm/drmP.h>
-#include <drm/drm_panel.h>
+#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_fourcc.h>
#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
-#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_panel.h>
+#include <drm/drm_vblank.h>
#include "tve200_drm.h"
diff --git a/drivers/gpu/drm/tve200/tve200_drm.h b/drivers/gpu/drm/tve200/tve200_drm.h
index 62061b518397..5420b52ea16b 100644
--- a/drivers/gpu/drm/tve200/tve200_drm.h
+++ b/drivers/gpu/drm/tve200/tve200_drm.h
@@ -13,6 +13,18 @@
#ifndef _TVE200_DRM_H_
#define _TVE200_DRM_H_
+#include <linux/irqreturn.h>
+
+#include <drm/drm_simple_kms_helper.h>
+
+struct clk;
+struct drm_bridge;
+struct drm_connector;
+struct drm_device;
+struct drm_file;
+struct drm_mode_create_dumb;
+struct drm_panel;
+
/* Bits 2-31 are valid physical base addresses */
#define TVE200_Y_FRAME_BASE_ADDR 0x00
#define TVE200_U_FRAME_BASE_ADDR 0x04
@@ -89,9 +101,6 @@
#define TVE200_CTRL_4 0x24
#define TVE200_CTRL_4_RESET BIT(0) /* triggers reset of TVE200 */
-#include <drm/drm_gem.h>
-#include <drm/drm_simple_kms_helper.h>
-
struct tve200_drm_dev_private {
struct drm_device *drm;
diff --git a/drivers/gpu/drm/tve200/tve200_drv.c b/drivers/gpu/drm/tve200/tve200_drv.c
index 6e695fbeb6bc..416f24823c0a 100644
--- a/drivers/gpu/drm/tve200/tve200_drv.c
+++ b/drivers/gpu/drm/tve200/tve200_drv.c
@@ -37,9 +37,9 @@
#include <linux/slab.h>
#include <linux/version.h>
-#include <drm/drmP.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_bridge.h>
+#include <drm/drm_drv.h>
#include <drm/drm_fb_cma_helper.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_gem_cma_helper.h>
@@ -47,6 +47,7 @@
#include <drm/drm_of.h>
#include <drm/drm_panel.h>
#include <drm/drm_probe_helper.h>
+#include <drm/drm_vblank.h>
#include "tve200_drm.h"
@@ -137,8 +138,7 @@ finish:
DEFINE_DRM_GEM_CMA_FOPS(drm_fops);
static struct drm_driver tve200_drm_driver = {
- .driver_features =
- DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME | DRIVER_ATOMIC,
+ .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_ATOMIC,
.ioctls = NULL,
.fops = &drm_fops,
.name = "tve200",
@@ -153,8 +153,6 @@ static struct drm_driver tve200_drm_driver = {
.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
- .gem_prime_import = drm_gem_prime_import,
- .gem_prime_export = drm_gem_prime_export,
.gem_prime_get_sg_table = drm_gem_cma_prime_get_sg_table,
.gem_prime_import_sg_table = drm_gem_cma_prime_import_sg_table,
.gem_prime_vmap = drm_gem_cma_prime_vmap,
diff --git a/drivers/gpu/drm/udl/udl_connector.c b/drivers/gpu/drm/udl/udl_connector.c
index 921561875d7f..ddb61a60c610 100644
--- a/drivers/gpu/drm/udl/udl_connector.c
+++ b/drivers/gpu/drm/udl/udl_connector.c
@@ -7,11 +7,9 @@
* Copyright (C) 2009 Bernie Thompson <bernie@plugable.com>
*/
-#include <drm/drmP.h>
-#include <drm/drm_crtc.h>
-#include <drm/drm_edid.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_probe_helper.h>
+
#include "udl_connector.h"
#include "udl_drv.h"
diff --git a/drivers/gpu/drm/udl/udl_connector.h b/drivers/gpu/drm/udl/udl_connector.h
index 0fb0db5c4612..7f2d392df173 100644
--- a/drivers/gpu/drm/udl/udl_connector.h
+++ b/drivers/gpu/drm/udl/udl_connector.h
@@ -3,6 +3,8 @@
#include <drm/drm_crtc.h>
+struct edid;
+
struct udl_drm_connector {
struct drm_connector connector;
/* last udl_detect edid */
diff --git a/drivers/gpu/drm/udl/udl_dmabuf.c b/drivers/gpu/drm/udl/udl_dmabuf.c
index a28892146f7c..3108e9a9234b 100644
--- a/drivers/gpu/drm/udl/udl_dmabuf.c
+++ b/drivers/gpu/drm/udl/udl_dmabuf.c
@@ -5,11 +5,13 @@
* Copyright (c) 2014 The Chromium OS Authors
*/
-#include <drm/drmP.h>
-#include "udl_drv.h"
#include <linux/shmem_fs.h>
#include <linux/dma-buf.h>
+#include <drm/drm_prime.h>
+
+#include "udl_drv.h"
+
struct udl_drm_dmabuf_attachment {
struct sg_table sgt;
enum dma_data_direction dir;
@@ -170,8 +172,7 @@ static const struct dma_buf_ops udl_dmabuf_ops = {
.release = drm_gem_dmabuf_release,
};
-struct dma_buf *udl_gem_prime_export(struct drm_device *dev,
- struct drm_gem_object *obj, int flags)
+struct dma_buf *udl_gem_prime_export(struct drm_gem_object *obj, int flags)
{
DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
@@ -180,7 +181,7 @@ struct dma_buf *udl_gem_prime_export(struct drm_device *dev,
exp_info.flags = flags;
exp_info.priv = obj;
- return drm_gem_dmabuf_export(dev, &exp_info);
+ return drm_gem_dmabuf_export(obj->dev, &exp_info);
}
static int udl_prime_create(struct drm_device *dev,
diff --git a/drivers/gpu/drm/udl/udl_drv.c b/drivers/gpu/drm/udl/udl_drv.c
index 4a49facb608d..8426669433e4 100644
--- a/drivers/gpu/drm/udl/udl_drv.c
+++ b/drivers/gpu/drm/udl/udl_drv.c
@@ -4,9 +4,14 @@
*/
#include <linux/module.h>
-#include <drm/drmP.h>
+
#include <drm/drm_crtc_helper.h>
+#include <drm/drm_drv.h>
+#include <drm/drm_file.h>
+#include <drm/drm_ioctl.h>
#include <drm/drm_probe_helper.h>
+#include <drm/drm_print.h>
+
#include "udl_drv.h"
static int udl_usb_suspend(struct usb_interface *interface,
@@ -54,7 +59,7 @@ static void udl_driver_release(struct drm_device *dev)
}
static struct drm_driver driver = {
- .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME,
+ .driver_features = DRIVER_MODESET | DRIVER_GEM,
.release = udl_driver_release,
/* gem hooks */
diff --git a/drivers/gpu/drm/udl/udl_drv.h b/drivers/gpu/drm/udl/udl_drv.h
index a928801026c1..12a970fd9a87 100644
--- a/drivers/gpu/drm/udl/udl_drv.h
+++ b/drivers/gpu/drm/udl/udl_drv.h
@@ -11,9 +11,15 @@
#ifndef UDL_DRV_H
#define UDL_DRV_H
+#include <linux/mm_types.h>
#include <linux/usb.h>
+
+#include <drm/drm_device.h>
+#include <drm/drm_framebuffer.h>
#include <drm/drm_gem.h>
-#include <linux/mm_types.h>
+
+struct drm_encoder;
+struct drm_mode_create_dumb;
#define DRIVER_NAME "udl"
#define DRIVER_DESC "DisplayLink"
@@ -126,8 +132,7 @@ int udl_gem_mmap(struct drm_file *file_priv, struct drm_device *dev,
void udl_gem_free_object(struct drm_gem_object *gem_obj);
struct udl_gem_object *udl_gem_alloc_object(struct drm_device *dev,
size_t size);
-struct dma_buf *udl_gem_prime_export(struct drm_device *dev,
- struct drm_gem_object *obj, int flags);
+struct dma_buf *udl_gem_prime_export(struct drm_gem_object *obj, int flags);
struct drm_gem_object *udl_gem_prime_import(struct drm_device *dev,
struct dma_buf *dma_buf);
diff --git a/drivers/gpu/drm/udl/udl_encoder.c b/drivers/gpu/drm/udl/udl_encoder.c
index f87989e6ee51..203f041e737c 100644
--- a/drivers/gpu/drm/udl/udl_encoder.c
+++ b/drivers/gpu/drm/udl/udl_encoder.c
@@ -7,9 +7,9 @@
* Copyright (C) 2009 Bernie Thompson <bernie@plugable.com>
*/
-#include <drm/drmP.h>
-#include <drm/drm_crtc.h>
-#include <drm/drm_crtc_helper.h>
+#include <drm/drm_encoder.h>
+#include <drm/drm_modeset_helper_vtables.h>
+
#include "udl_drv.h"
/* dummy encoder */
diff --git a/drivers/gpu/drm/udl/udl_fb.c b/drivers/gpu/drm/udl/udl_fb.c
index e1116bf7b9d7..ef3504d06343 100644
--- a/drivers/gpu/drm/udl/udl_fb.c
+++ b/drivers/gpu/drm/udl/udl_fb.c
@@ -7,18 +7,17 @@
* Copyright (C) 2009 Jaya Kumar <jayakumar.lkml@gmail.com>
* Copyright (C) 2009 Bernie Thompson <bernie@plugable.com>
*/
-#include <linux/module.h>
-#include <linux/slab.h>
-#include <linux/fb.h>
+
+#include <linux/moduleparam.h>
#include <linux/dma-buf.h>
-#include <linux/mem_encrypt.h>
-#include <drm/drmP.h>
-#include <drm/drm_crtc.h>
#include <drm/drm_crtc_helper.h>
-#include "udl_drv.h"
-
+#include <drm/drm_drv.h>
#include <drm/drm_fb_helper.h>
+#include <drm/drm_fourcc.h>
+#include <drm/drm_modeset_helper.h>
+
+#include "udl_drv.h"
#define DL_DEFIO_WRITE_DELAY (HZ/20) /* fb_deferred_io.delay in jiffies */
diff --git a/drivers/gpu/drm/udl/udl_gem.c b/drivers/gpu/drm/udl/udl_gem.c
index c6ca2c09bc97..b23a5c2fcd80 100644
--- a/drivers/gpu/drm/udl/udl_gem.c
+++ b/drivers/gpu/drm/udl/udl_gem.c
@@ -3,10 +3,13 @@
* Copyright (C) 2012 Red Hat
*/
-#include <drm/drmP.h>
-#include "udl_drv.h"
-#include <linux/shmem_fs.h>
#include <linux/dma-buf.h>
+#include <linux/vmalloc.h>
+
+#include <drm/drm_mode.h>
+#include <drm/drm_prime.h>
+
+#include "udl_drv.h"
struct udl_gem_object *udl_gem_alloc_object(struct drm_device *dev,
size_t size)
diff --git a/drivers/gpu/drm/udl/udl_main.c b/drivers/gpu/drm/udl/udl_main.c
index 1a99c7647444..4e854e017390 100644
--- a/drivers/gpu/drm/udl/udl_main.c
+++ b/drivers/gpu/drm/udl/udl_main.c
@@ -7,9 +7,11 @@
* Copyright (C) 2009 Jaya Kumar <jayakumar.lkml@gmail.com>
* Copyright (C) 2009 Bernie Thompson <bernie@plugable.com>
*/
-#include <drm/drmP.h>
-#include <drm/drm_crtc_helper.h>
+
+#include <drm/drm.h>
+#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
+
#include "udl_drv.h"
/* -BULK_SIZE as per usb-skeleton. Can we get full page and avoid overhead? */
diff --git a/drivers/gpu/drm/udl/udl_modeset.c b/drivers/gpu/drm/udl/udl_modeset.c
index 793722d0c8cd..bc1ab6060dc6 100644
--- a/drivers/gpu/drm/udl/udl_modeset.c
+++ b/drivers/gpu/drm/udl/udl_modeset.c
@@ -9,10 +9,10 @@
*/
-#include <drm/drmP.h>
-#include <drm/drm_crtc.h>
#include <drm/drm_crtc_helper.h>
-#include <drm/drm_plane_helper.h>
+#include <drm/drm_modeset_helper_vtables.h>
+#include <drm/drm_vblank.h>
+
#include "udl_drv.h"
/*
diff --git a/drivers/gpu/drm/udl/udl_transfer.c b/drivers/gpu/drm/udl/udl_transfer.c
index 6837f592f6ba..1973a4c1e358 100644
--- a/drivers/gpu/drm/udl/udl_transfer.c
+++ b/drivers/gpu/drm/udl/udl_transfer.c
@@ -7,12 +7,8 @@
* Copyright (C) 2009 Bernie Thompson <bernie@plugable.com>
*/
-#include <linux/module.h>
-#include <linux/slab.h>
-#include <linux/fb.h>
#include <asm/unaligned.h>
-#include <drm/drmP.h>
#include "udl_drv.h"
#define MAX_CMD_PIXELS 255
diff --git a/drivers/gpu/drm/v3d/v3d_debugfs.c b/drivers/gpu/drm/v3d/v3d_debugfs.c
index 78a78938e81f..9e953ce64ef7 100644
--- a/drivers/gpu/drm/v3d/v3d_debugfs.c
+++ b/drivers/gpu/drm/v3d/v3d_debugfs.c
@@ -6,7 +6,8 @@
#include <linux/debugfs.h>
#include <linux/pm_runtime.h>
#include <linux/seq_file.h>
-#include <drm/drmP.h>
+
+#include <drm/drm_debugfs.h>
#include "v3d_drv.h"
#include "v3d_regs.h"
diff --git a/drivers/gpu/drm/v3d/v3d_drv.c b/drivers/gpu/drm/v3d/v3d_drv.c
index fea597f4db8a..3506ae2723ae 100644
--- a/drivers/gpu/drm/v3d/v3d_drv.c
+++ b/drivers/gpu/drm/v3d/v3d_drv.c
@@ -14,16 +14,19 @@
#include <linux/clk.h>
#include <linux/device.h>
+#include <linux/dma-mapping.h>
#include <linux/io.h>
#include <linux/module.h>
#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/reset.h>
+
+#include <drm/drm_drv.h>
#include <drm/drm_fb_cma_helper.h>
#include <drm/drm_fb_helper.h>
+#include <uapi/drm/v3d_drm.h>
-#include "uapi/drm/v3d_drm.h"
#include "v3d_drv.h"
#include "v3d_regs.h"
@@ -188,7 +191,6 @@ static const struct drm_ioctl_desc v3d_drm_ioctls[] = {
static struct drm_driver v3d_drm_driver = {
.driver_features = (DRIVER_GEM |
DRIVER_RENDER |
- DRIVER_PRIME |
DRIVER_SYNCOBJ),
.open = v3d_open,
diff --git a/drivers/gpu/drm/v3d/v3d_drv.h b/drivers/gpu/drm/v3d/v3d_drv.h
index 9aad9da1eb11..9a35c555ec52 100644
--- a/drivers/gpu/drm/v3d/v3d_drv.h
+++ b/drivers/gpu/drm/v3d/v3d_drv.h
@@ -1,14 +1,23 @@
// SPDX-License-Identifier: GPL-2.0+
/* Copyright (C) 2015-2018 Broadcom */
-#include <linux/mm_types.h>
-#include <drm/drmP.h>
+#include <linux/delay.h>
+#include <linux/mutex.h>
+#include <linux/spinlock_types.h>
+#include <linux/workqueue.h>
+
#include <drm/drm_encoder.h>
#include <drm/drm_gem.h>
#include <drm/drm_gem_shmem_helper.h>
#include <drm/gpu_scheduler.h>
+
#include "uapi/drm/v3d_drm.h"
+struct clk;
+struct device;
+struct platform_device;
+struct reset_control;
+
#define GMP_GRANULARITY (128 * 1024)
/* Enum for each of the V3D queues. */
diff --git a/drivers/gpu/drm/v3d/v3d_gem.c b/drivers/gpu/drm/v3d/v3d_gem.c
index 27e0f87075d9..79744137d89f 100644
--- a/drivers/gpu/drm/v3d/v3d_gem.c
+++ b/drivers/gpu/drm/v3d/v3d_gem.c
@@ -1,17 +1,19 @@
// SPDX-License-Identifier: GPL-2.0+
/* Copyright (C) 2014-2018 Broadcom */
-#include <drm/drmP.h>
-#include <drm/drm_syncobj.h>
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/io.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/reset.h>
-#include <linux/device.h>
-#include <linux/io.h>
#include <linux/sched/signal.h>
+#include <linux/uaccess.h>
+
+#include <drm/drm_syncobj.h>
+#include <uapi/drm/v3d_drm.h>
-#include "uapi/drm/v3d_drm.h"
#include "v3d_drv.h"
#include "v3d_regs.h"
#include "v3d_trace.h"
diff --git a/drivers/gpu/drm/v3d/v3d_irq.c b/drivers/gpu/drm/v3d/v3d_irq.c
index 268d8a889ac5..662e67279a7b 100644
--- a/drivers/gpu/drm/v3d/v3d_irq.c
+++ b/drivers/gpu/drm/v3d/v3d_irq.c
@@ -13,6 +13,8 @@
* current job can make progress.
*/
+#include <linux/platform_device.h>
+
#include "v3d_drv.h"
#include "v3d_regs.h"
#include "v3d_trace.h"
diff --git a/drivers/gpu/drm/vboxvideo/Makefile b/drivers/gpu/drm/vboxvideo/Makefile
index 1224f313af0c..55d798c76b21 100644
--- a/drivers/gpu/drm/vboxvideo/Makefile
+++ b/drivers/gpu/drm/vboxvideo/Makefile
@@ -1,6 +1,6 @@
# SPDX-License-Identifier: GPL-2.0
vboxvideo-y := hgsmi_base.o modesetting.o vbva_base.o \
vbox_drv.o vbox_fb.o vbox_hgsmi.o vbox_irq.o vbox_main.o \
- vbox_mode.o vbox_prime.o vbox_ttm.o
+ vbox_mode.o vbox_ttm.o
obj-$(CONFIG_DRM_VBOXVIDEO) += vboxvideo.o
diff --git a/drivers/gpu/drm/vboxvideo/vbox_drv.c b/drivers/gpu/drm/vboxvideo/vbox_drv.c
index 02537ab9cc08..6189ea89bb71 100644
--- a/drivers/gpu/drm/vboxvideo/vbox_drv.c
+++ b/drivers/gpu/drm/vboxvideo/vbox_drv.c
@@ -196,7 +196,7 @@ static const struct file_operations vbox_fops = {
static struct drm_driver driver = {
.driver_features =
- DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME | DRIVER_ATOMIC,
+ DRIVER_MODESET | DRIVER_GEM | DRIVER_ATOMIC,
.lastclose = drm_fb_helper_lastclose,
@@ -210,17 +210,6 @@ static struct drm_driver driver = {
.patchlevel = DRIVER_PATCHLEVEL,
DRM_GEM_VRAM_DRIVER,
- .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
- .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
- .gem_prime_export = drm_gem_prime_export,
- .gem_prime_import = drm_gem_prime_import,
- .gem_prime_pin = vbox_gem_prime_pin,
- .gem_prime_unpin = vbox_gem_prime_unpin,
- .gem_prime_get_sg_table = vbox_gem_prime_get_sg_table,
- .gem_prime_import_sg_table = vbox_gem_prime_import_sg_table,
- .gem_prime_vmap = vbox_gem_prime_vmap,
- .gem_prime_vunmap = vbox_gem_prime_vunmap,
- .gem_prime_mmap = vbox_gem_prime_mmap,
};
static int __init vbox_init(void)
diff --git a/drivers/gpu/drm/vboxvideo/vbox_drv.h b/drivers/gpu/drm/vboxvideo/vbox_drv.h
index 9028f946bc06..e8cb9efc6088 100644
--- a/drivers/gpu/drm/vboxvideo/vbox_drv.h
+++ b/drivers/gpu/drm/vboxvideo/vbox_drv.h
@@ -167,18 +167,6 @@ void vbox_mm_fini(struct vbox_private *vbox);
int vbox_gem_create(struct vbox_private *vbox,
u32 size, bool iskernel, struct drm_gem_object **obj);
-/* vbox_prime.c */
-int vbox_gem_prime_pin(struct drm_gem_object *obj);
-void vbox_gem_prime_unpin(struct drm_gem_object *obj);
-struct sg_table *vbox_gem_prime_get_sg_table(struct drm_gem_object *obj);
-struct drm_gem_object *vbox_gem_prime_import_sg_table(
- struct drm_device *dev, struct dma_buf_attachment *attach,
- struct sg_table *table);
-void *vbox_gem_prime_vmap(struct drm_gem_object *obj);
-void vbox_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr);
-int vbox_gem_prime_mmap(struct drm_gem_object *obj,
- struct vm_area_struct *area);
-
/* vbox_irq.c */
int vbox_irq_init(struct vbox_private *vbox);
void vbox_irq_fini(struct vbox_private *vbox);
diff --git a/drivers/gpu/drm/vboxvideo/vbox_main.c b/drivers/gpu/drm/vboxvideo/vbox_main.c
index 18693e2bf72a..02fa8277ff1e 100644
--- a/drivers/gpu/drm/vboxvideo/vbox_main.c
+++ b/drivers/gpu/drm/vboxvideo/vbox_main.c
@@ -292,7 +292,7 @@ int vbox_gem_create(struct vbox_private *vbox,
return ret;
}
- *obj = &gbo->gem;
+ *obj = &gbo->bo.base;
return 0;
}
diff --git a/drivers/gpu/drm/vboxvideo/vbox_prime.c b/drivers/gpu/drm/vboxvideo/vbox_prime.c
deleted file mode 100644
index 702b1aa53494..000000000000
--- a/drivers/gpu/drm/vboxvideo/vbox_prime.c
+++ /dev/null
@@ -1,56 +0,0 @@
-// SPDX-License-Identifier: MIT
-/*
- * Copyright (C) 2017 Oracle Corporation
- * Copyright 2017 Canonical
- * Authors: Andreas Pokorny
- */
-
-#include "vbox_drv.h"
-
-/*
- * Based on qxl_prime.c:
- * Empty Implementations as there should not be any other driver for a virtual
- * device that might share buffers with vboxvideo
- */
-
-int vbox_gem_prime_pin(struct drm_gem_object *obj)
-{
- WARN_ONCE(1, "not implemented");
- return -ENODEV;
-}
-
-void vbox_gem_prime_unpin(struct drm_gem_object *obj)
-{
- WARN_ONCE(1, "not implemented");
-}
-
-struct sg_table *vbox_gem_prime_get_sg_table(struct drm_gem_object *obj)
-{
- WARN_ONCE(1, "not implemented");
- return ERR_PTR(-ENODEV);
-}
-
-struct drm_gem_object *vbox_gem_prime_import_sg_table(
- struct drm_device *dev, struct dma_buf_attachment *attach,
- struct sg_table *table)
-{
- WARN_ONCE(1, "not implemented");
- return ERR_PTR(-ENODEV);
-}
-
-void *vbox_gem_prime_vmap(struct drm_gem_object *obj)
-{
- WARN_ONCE(1, "not implemented");
- return ERR_PTR(-ENODEV);
-}
-
-void vbox_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr)
-{
- WARN_ONCE(1, "not implemented");
-}
-
-int vbox_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *area)
-{
- WARN_ONCE(1, "not implemented");
- return -ENODEV;
-}
diff --git a/drivers/gpu/drm/vc4/vc4_bo.c b/drivers/gpu/drm/vc4/vc4_bo.c
index a75a2f98b82f..72d30d90b856 100644
--- a/drivers/gpu/drm/vc4/vc4_bo.c
+++ b/drivers/gpu/drm/vc4/vc4_bo.c
@@ -655,8 +655,7 @@ static void vc4_bo_cache_time_timer(struct timer_list *t)
schedule_work(&vc4->bo_cache.time_work);
}
-struct dma_buf *
-vc4_prime_export(struct drm_device *dev, struct drm_gem_object *obj, int flags)
+struct dma_buf * vc4_prime_export(struct drm_gem_object *obj, int flags)
{
struct vc4_bo *bo = to_vc4_bo(obj);
struct dma_buf *dmabuf;
@@ -678,7 +677,7 @@ vc4_prime_export(struct drm_device *dev, struct drm_gem_object *obj, int flags)
return ERR_PTR(ret);
}
- dmabuf = drm_gem_prime_export(dev, obj, flags);
+ dmabuf = drm_gem_prime_export(obj, flags);
if (IS_ERR(dmabuf))
vc4_bo_dec_usecnt(bo);
@@ -791,8 +790,6 @@ vc4_prime_import_sg_table(struct drm_device *dev,
if (IS_ERR(obj))
return obj;
- obj->resv = attach->dmabuf->resv;
-
return obj;
}
diff --git a/drivers/gpu/drm/vc4/vc4_crtc.c b/drivers/gpu/drm/vc4/vc4_crtc.c
index 5ea8db74418a..f1f0a7c87771 100644
--- a/drivers/gpu/drm/vc4/vc4_crtc.c
+++ b/drivers/gpu/drm/vc4/vc4_crtc.c
@@ -29,15 +29,18 @@
* ones that set the clock.
*/
+#include <linux/clk.h>
+#include <linux/component.h>
+#include <linux/of_device.h>
+
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_atomic_uapi.h>
+#include <drm/drm_fb_cma_helper.h>
#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
-#include <linux/clk.h>
-#include <drm/drm_fb_cma_helper.h>
-#include <linux/component.h>
-#include <linux/of_device.h>
+#include <drm/drm_vblank.h>
+
#include "vc4_drv.h"
#include "vc4_regs.h"
diff --git a/drivers/gpu/drm/vc4/vc4_debugfs.c b/drivers/gpu/drm/vc4/vc4_debugfs.c
index 4829a00c16b0..b61b2d3407b5 100644
--- a/drivers/gpu/drm/vc4/vc4_debugfs.c
+++ b/drivers/gpu/drm/vc4/vc4_debugfs.c
@@ -7,7 +7,6 @@
#include <linux/circ_buf.h>
#include <linux/ctype.h>
#include <linux/debugfs.h>
-#include <drm/drmP.h>
#include "vc4_drv.h"
#include "vc4_regs.h"
diff --git a/drivers/gpu/drm/vc4/vc4_drv.c b/drivers/gpu/drm/vc4/vc4_drv.c
index bf11930e40e1..048c70a7b592 100644
--- a/drivers/gpu/drm/vc4/vc4_drv.c
+++ b/drivers/gpu/drm/vc4/vc4_drv.c
@@ -23,16 +23,21 @@
#include <linux/clk.h>
#include <linux/component.h>
#include <linux/device.h>
+#include <linux/dma-mapping.h>
#include <linux/io.h>
#include <linux/module.h>
#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
+
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_drv.h>
#include <drm/drm_fb_cma_helper.h>
#include <drm/drm_fb_helper.h>
-#include <drm/drm_atomic_helper.h>
+#include <drm/drm_vblank.h>
#include "uapi/drm/vc4_drm.h"
+
#include "vc4_drv.h"
#include "vc4_regs.h"
@@ -177,7 +182,6 @@ static struct drm_driver vc4_drm_driver = {
DRIVER_ATOMIC |
DRIVER_GEM |
DRIVER_RENDER |
- DRIVER_PRIME |
DRIVER_SYNCOBJ),
.open = vc4_open,
.postclose = vc4_close,
@@ -199,7 +203,6 @@ static struct drm_driver vc4_drm_driver = {
.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
- .gem_prime_import = drm_gem_prime_import,
.gem_prime_export = vc4_prime_export,
.gem_prime_get_sg_table = drm_gem_cma_prime_get_sg_table,
.gem_prime_import_sg_table = vc4_prime_import_sg_table,
diff --git a/drivers/gpu/drm/vc4/vc4_drv.h b/drivers/gpu/drm/vc4/vc4_drv.h
index 616c011bcb82..6627b20c99e9 100644
--- a/drivers/gpu/drm/vc4/vc4_drv.h
+++ b/drivers/gpu/drm/vc4/vc4_drv.h
@@ -3,16 +3,23 @@
* Copyright (C) 2015 Broadcom
*/
-#include <linux/mm_types.h>
-#include <drm/drmP.h>
-#include <drm/drm_util.h>
+#include <linux/delay.h>
+#include <linux/refcount.h>
+#include <linux/uaccess.h>
+
+#include <drm/drm_atomic.h>
+#include <drm/drm_debugfs.h>
+#include <drm/drm_device.h>
#include <drm/drm_encoder.h>
#include <drm/drm_gem_cma_helper.h>
-#include <drm/drm_atomic.h>
-#include <drm/drm_syncobj.h>
+#include <drm/drm_mm.h>
+#include <drm/drm_modeset_lock.h>
#include "uapi/drm/vc4_drm.h"
+struct drm_device;
+struct drm_gem_object;
+
/* Don't forget to update vc4_bo.c: bo_type_names[] when adding to
* this.
*/
@@ -705,8 +712,7 @@ struct vc4_bo *vc4_bo_create(struct drm_device *dev, size_t size,
int vc4_dumb_create(struct drm_file *file_priv,
struct drm_device *dev,
struct drm_mode_create_dumb *args);
-struct dma_buf *vc4_prime_export(struct drm_device *dev,
- struct drm_gem_object *obj, int flags);
+struct dma_buf *vc4_prime_export(struct drm_gem_object *obj, int flags);
int vc4_create_bo_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int vc4_create_shader_bo_ioctl(struct drm_device *dev, void *data,
diff --git a/drivers/gpu/drm/vc4/vc4_dsi.c b/drivers/gpu/drm/vc4/vc4_dsi.c
index 1db39b570cf4..c78fa8144776 100644
--- a/drivers/gpu/drm/vc4/vc4_dsi.c
+++ b/drivers/gpu/drm/vc4/vc4_dsi.c
@@ -18,22 +18,25 @@
* hopefully present.
*/
-#include <drm/drm_atomic_helper.h>
-#include <drm/drm_edid.h>
-#include <drm/drm_mipi_dsi.h>
-#include <drm/drm_of.h>
-#include <drm/drm_panel.h>
-#include <drm/drm_probe_helper.h>
-#include <linux/clk.h>
#include <linux/clk-provider.h>
+#include <linux/clk.h>
#include <linux/completion.h>
#include <linux/component.h>
+#include <linux/dma-mapping.h>
#include <linux/dmaengine.h>
#include <linux/i2c.h>
#include <linux/io.h>
#include <linux/of_address.h>
#include <linux/of_platform.h>
#include <linux/pm_runtime.h>
+
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_edid.h>
+#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_of.h>
+#include <drm/drm_panel.h>
+#include <drm/drm_probe_helper.h>
+
#include "vc4_drv.h"
#include "vc4_regs.h"
diff --git a/drivers/gpu/drm/vc4/vc4_gem.c b/drivers/gpu/drm/vc4/vc4_gem.c
index 84795d928f20..b72b760e3018 100644
--- a/drivers/gpu/drm/vc4/vc4_gem.c
+++ b/drivers/gpu/drm/vc4/vc4_gem.c
@@ -29,6 +29,8 @@
#include <linux/sched/signal.h>
#include <linux/dma-fence-array.h>
+#include <drm/drm_syncobj.h>
+
#include "uapi/drm/vc4_drm.h"
#include "vc4_drv.h"
#include "vc4_regs.h"
diff --git a/drivers/gpu/drm/vc4/vc4_hvs.c b/drivers/gpu/drm/vc4/vc4_hvs.c
index 0f633bef6b9d..9936b15d0bf1 100644
--- a/drivers/gpu/drm/vc4/vc4_hvs.c
+++ b/drivers/gpu/drm/vc4/vc4_hvs.c
@@ -19,8 +19,11 @@
* each CRTC.
*/
-#include <drm/drm_atomic_helper.h>
#include <linux/component.h>
+#include <linux/platform_device.h>
+
+#include <drm/drm_atomic_helper.h>
+
#include "vc4_drv.h"
#include "vc4_regs.h"
diff --git a/drivers/gpu/drm/vc4/vc4_kms.c b/drivers/gpu/drm/vc4/vc4_kms.c
index 70d079b7b39f..78d4fb0499e3 100644
--- a/drivers/gpu/drm/vc4/vc4_kms.c
+++ b/drivers/gpu/drm/vc4/vc4_kms.c
@@ -11,12 +11,14 @@
* crtc, HDMI encoder).
*/
-#include <drm/drm_crtc.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_crtc.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_plane_helper.h>
#include <drm/drm_probe_helper.h>
+#include <drm/drm_vblank.h>
+
#include "vc4_drv.h"
#include "vc4_regs.h"
diff --git a/drivers/gpu/drm/vc4/vc4_plane.c b/drivers/gpu/drm/vc4/vc4_plane.c
index 0a0207c350a5..5e5f90810aca 100644
--- a/drivers/gpu/drm/vc4/vc4_plane.c
+++ b/drivers/gpu/drm/vc4/vc4_plane.c
@@ -17,11 +17,14 @@
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_atomic_uapi.h>
#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_fourcc.h>
+#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_plane_helper.h>
-#include <drm/drm_atomic_uapi.h>
#include "uapi/drm/vc4_drm.h"
+
#include "vc4_drv.h"
#include "vc4_regs.h"
@@ -1123,7 +1126,6 @@ static int vc4_prepare_fb(struct drm_plane *plane,
struct drm_plane_state *state)
{
struct vc4_bo *bo;
- struct dma_fence *fence;
int ret;
if (!state->fb)
@@ -1131,8 +1133,7 @@ static int vc4_prepare_fb(struct drm_plane *plane,
bo = to_vc4_bo(&drm_fb_cma_get_gem_obj(state->fb, 0)->base);
- fence = reservation_object_get_excl_rcu(bo->base.base.resv);
- drm_atomic_set_fence_for_plane(state, fence);
+ drm_gem_fb_prepare_fb(plane, state);
if (plane->state->fb == state->fb)
return 0;
diff --git a/drivers/gpu/drm/vc4/vc4_txp.c b/drivers/gpu/drm/vc4/vc4_txp.c
index 96f91c1b4b6e..1ce4d7142b6e 100644
--- a/drivers/gpu/drm/vc4/vc4_txp.c
+++ b/drivers/gpu/drm/vc4/vc4_txp.c
@@ -7,18 +7,20 @@
* Boris Brezillon <boris.brezillon@bootlin.com>
*/
-#include <drm/drm_atomic_helper.h>
-#include <drm/drm_fb_cma_helper.h>
-#include <drm/drm_edid.h>
-#include <drm/drm_panel.h>
-#include <drm/drm_probe_helper.h>
-#include <drm/drm_writeback.h>
#include <linux/clk.h>
#include <linux/component.h>
#include <linux/of_graph.h>
#include <linux/of_platform.h>
#include <linux/pm_runtime.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_edid.h>
+#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_fourcc.h>
+#include <drm/drm_panel.h>
+#include <drm/drm_probe_helper.h>
+#include <drm/drm_writeback.h>
+
#include "vc4_drv.h"
#include "vc4_regs.h"
diff --git a/drivers/gpu/drm/vc4/vc4_v3d.c b/drivers/gpu/drm/vc4/vc4_v3d.c
index fee4f90e71aa..cea77a21b205 100644
--- a/drivers/gpu/drm/vc4/vc4_v3d.c
+++ b/drivers/gpu/drm/vc4/vc4_v3d.c
@@ -7,7 +7,11 @@
#include <linux/clk.h>
#include <linux/component.h>
+#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
+
+#include <drm/drm_irq.h>
+
#include "vc4_drv.h"
#include "vc4_regs.h"
diff --git a/drivers/gpu/drm/vgem/vgem_drv.c b/drivers/gpu/drm/vgem/vgem_drv.c
index 11a8f99ba18c..5bd60ded3d81 100644
--- a/drivers/gpu/drm/vgem/vgem_drv.c
+++ b/drivers/gpu/drm/vgem/vgem_drv.c
@@ -30,10 +30,17 @@
* software renderer and the X server for efficient buffer sharing.
*/
+#include <linux/dma-buf.h>
#include <linux/module.h>
-#include <linux/ramfs.h>
+#include <linux/platform_device.h>
#include <linux/shmem_fs.h>
-#include <linux/dma-buf.h>
+#include <linux/vmalloc.h>
+
+#include <drm/drm_drv.h>
+#include <drm/drm_file.h>
+#include <drm/drm_ioctl.h>
+#include <drm/drm_prime.h>
+
#include "vgem_drv.h"
#define DRIVER_NAME "vgem"
@@ -214,7 +221,7 @@ static int vgem_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
args->size = gem_object->size;
args->pitch = pitch;
- DRM_DEBUG_DRIVER("Created object of size %lld\n", size);
+ DRM_DEBUG("Created object of size %lld\n", size);
return 0;
}
@@ -246,8 +253,8 @@ unref:
}
static struct drm_ioctl_desc vgem_ioctls[] = {
- DRM_IOCTL_DEF_DRV(VGEM_FENCE_ATTACH, vgem_fence_attach_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
- DRM_IOCTL_DEF_DRV(VGEM_FENCE_SIGNAL, vgem_fence_signal_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(VGEM_FENCE_ATTACH, vgem_fence_attach_ioctl, DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(VGEM_FENCE_SIGNAL, vgem_fence_signal_ioctl, DRM_RENDER_ALLOW),
};
static int vgem_mmap(struct file *filp, struct vm_area_struct *vma)
@@ -427,8 +434,7 @@ static void vgem_release(struct drm_device *dev)
}
static struct drm_driver vgem_driver = {
- .driver_features = DRIVER_GEM | DRIVER_PRIME |
- DRIVER_RENDER,
+ .driver_features = DRIVER_GEM | DRIVER_RENDER,
.release = vgem_release,
.open = vgem_open,
.postclose = vgem_postclose,
@@ -446,7 +452,6 @@ static struct drm_driver vgem_driver = {
.gem_prime_pin = vgem_prime_pin,
.gem_prime_unpin = vgem_prime_unpin,
.gem_prime_import = vgem_prime_import,
- .gem_prime_export = drm_gem_prime_export,
.gem_prime_import_sg_table = vgem_prime_import_sg_table,
.gem_prime_get_sg_table = vgem_prime_get_sg_table,
.gem_prime_vmap = vgem_prime_vmap,
diff --git a/drivers/gpu/drm/vgem/vgem_drv.h b/drivers/gpu/drm/vgem/vgem_drv.h
index 5c8f6d619ff3..0ed300317f87 100644
--- a/drivers/gpu/drm/vgem/vgem_drv.h
+++ b/drivers/gpu/drm/vgem/vgem_drv.h
@@ -29,7 +29,6 @@
#ifndef _VGEM_DRV_H_
#define _VGEM_DRV_H_
-#include <drm/drmP.h>
#include <drm/drm_gem.h>
#include <drm/drm_cache.h>
diff --git a/drivers/gpu/drm/vgem/vgem_fence.c b/drivers/gpu/drm/vgem/vgem_fence.c
index eb17c0cd3727..d8630467549c 100644
--- a/drivers/gpu/drm/vgem/vgem_fence.c
+++ b/drivers/gpu/drm/vgem/vgem_fence.c
@@ -23,6 +23,8 @@
#include <linux/dma-buf.h>
#include <linux/reservation.h>
+#include <drm/drm_file.h>
+
#include "vgem_drv.h"
#define VGEM_FENCE_TIMEOUT (10*HZ)
@@ -100,22 +102,6 @@ static struct dma_fence *vgem_fence_create(struct vgem_file *vfile,
return &fence->base;
}
-static int attach_dmabuf(struct drm_device *dev,
- struct drm_gem_object *obj)
-{
- struct dma_buf *dmabuf;
-
- if (obj->dma_buf)
- return 0;
-
- dmabuf = dev->driver->gem_prime_export(dev, obj, 0);
- if (IS_ERR(dmabuf))
- return PTR_ERR(dmabuf);
-
- obj->dma_buf = dmabuf;
- return 0;
-}
-
/*
* vgem_fence_attach_ioctl (DRM_IOCTL_VGEM_FENCE_ATTACH):
*
@@ -157,10 +143,6 @@ int vgem_fence_attach_ioctl(struct drm_device *dev,
if (!obj)
return -ENOENT;
- ret = attach_dmabuf(dev, obj);
- if (ret)
- goto err;
-
fence = vgem_fence_create(vfile, arg->flags);
if (!fence) {
ret = -ENOMEM;
@@ -168,7 +150,7 @@ int vgem_fence_attach_ioctl(struct drm_device *dev,
}
/* Check for a conflicting fence */
- resv = obj->dma_buf->resv;
+ resv = obj->resv;
if (!reservation_object_test_signaled_rcu(resv,
arg->flags & VGEM_FENCE_WRITE)) {
ret = -EBUSY;
diff --git a/drivers/gpu/drm/via/via_dma.c b/drivers/gpu/drm/via/via_dma.c
index d17d8f245c1a..1208445e341d 100644
--- a/drivers/gpu/drm/via/via_dma.c
+++ b/drivers/gpu/drm/via/via_dma.c
@@ -34,8 +34,15 @@
* Thomas Hellstrom.
*/
-#include <drm/drmP.h>
+#include <linux/delay.h>
+#include <linux/uaccess.h>
+
+#include <drm/drm.h>
+#include <drm/drm_agpsupport.h>
+#include <drm/drm_device.h>
+#include <drm/drm_file.h>
#include <drm/via_drm.h>
+
#include "via_drv.h"
#include "via_3d_reg.h"
@@ -430,14 +437,14 @@ static int via_hook_segment(drm_via_private_t *dev_priv,
diff = (uint32_t) (ptr - reader) - dev_priv->dma_diff;
count = 10000000;
while (diff == 0 && count--) {
- paused = (VIA_READ(0x41c) & 0x80000000);
+ paused = (via_read(dev_priv, 0x41c) & 0x80000000);
if (paused)
break;
reader = *(dev_priv->hw_addr_ptr);
diff = (uint32_t) (ptr - reader) - dev_priv->dma_diff;
}
- paused = VIA_READ(0x41c) & 0x80000000;
+ paused = via_read(dev_priv, 0x41c) & 0x80000000;
if (paused && !no_pci_fire) {
reader = *(dev_priv->hw_addr_ptr);
@@ -454,10 +461,10 @@ static int via_hook_segment(drm_via_private_t *dev_priv,
* doesn't make a difference.
*/
- VIA_WRITE(VIA_REG_TRANSET, (HC_ParaType_PreCR << 16));
- VIA_WRITE(VIA_REG_TRANSPACE, pause_addr_hi);
- VIA_WRITE(VIA_REG_TRANSPACE, pause_addr_lo);
- VIA_READ(VIA_REG_TRANSPACE);
+ via_write(dev_priv, VIA_REG_TRANSET, (HC_ParaType_PreCR << 16));
+ via_write(dev_priv, VIA_REG_TRANSPACE, pause_addr_hi);
+ via_write(dev_priv, VIA_REG_TRANSPACE, pause_addr_lo);
+ via_read(dev_priv, VIA_REG_TRANSPACE);
}
}
return paused;
@@ -467,10 +474,10 @@ static int via_wait_idle(drm_via_private_t *dev_priv)
{
int count = 10000000;
- while (!(VIA_READ(VIA_REG_STATUS) & VIA_VR_QUEUE_BUSY) && --count)
+ while (!(via_read(dev_priv, VIA_REG_STATUS) & VIA_VR_QUEUE_BUSY) && --count)
;
- while (count && (VIA_READ(VIA_REG_STATUS) &
+ while (count && (via_read(dev_priv, VIA_REG_STATUS) &
(VIA_CMD_RGTR_BUSY | VIA_2D_ENG_BUSY |
VIA_3D_ENG_BUSY)))
--count;
@@ -536,21 +543,21 @@ static void via_cmdbuf_start(drm_via_private_t *dev_priv)
via_flush_write_combine();
(void) *(volatile uint32_t *)dev_priv->last_pause_ptr;
- VIA_WRITE(VIA_REG_TRANSET, (HC_ParaType_PreCR << 16));
- VIA_WRITE(VIA_REG_TRANSPACE, command);
- VIA_WRITE(VIA_REG_TRANSPACE, start_addr_lo);
- VIA_WRITE(VIA_REG_TRANSPACE, end_addr_lo);
+ via_write(dev_priv, VIA_REG_TRANSET, (HC_ParaType_PreCR << 16));
+ via_write(dev_priv, VIA_REG_TRANSPACE, command);
+ via_write(dev_priv, VIA_REG_TRANSPACE, start_addr_lo);
+ via_write(dev_priv, VIA_REG_TRANSPACE, end_addr_lo);
- VIA_WRITE(VIA_REG_TRANSPACE, pause_addr_hi);
- VIA_WRITE(VIA_REG_TRANSPACE, pause_addr_lo);
+ via_write(dev_priv, VIA_REG_TRANSPACE, pause_addr_hi);
+ via_write(dev_priv, VIA_REG_TRANSPACE, pause_addr_lo);
wmb();
- VIA_WRITE(VIA_REG_TRANSPACE, command | HC_HAGPCMNT_MASK);
- VIA_READ(VIA_REG_TRANSPACE);
+ via_write(dev_priv, VIA_REG_TRANSPACE, command | HC_HAGPCMNT_MASK);
+ via_read(dev_priv, VIA_REG_TRANSPACE);
dev_priv->dma_diff = 0;
count = 10000000;
- while (!(VIA_READ(0x41c) & 0x80000000) && count--);
+ while (!(via_read(dev_priv, 0x41c) & 0x80000000) && count--);
reader = *(dev_priv->hw_addr_ptr);
ptr = ((volatile char *)dev_priv->last_pause_ptr - dev_priv->dma_ptr) +
diff --git a/drivers/gpu/drm/via/via_dmablit.c b/drivers/gpu/drm/via/via_dmablit.c
index 062067438f1d..feaa538026a0 100644
--- a/drivers/gpu/drm/via/via_dmablit.c
+++ b/drivers/gpu/drm/via/via_dmablit.c
@@ -34,13 +34,16 @@
* the same DMA mappings?
*/
-#include <drm/drmP.h>
-#include <drm/via_drm.h>
-#include "via_drv.h"
-#include "via_dmablit.h"
-
#include <linux/pagemap.h>
#include <linux/slab.h>
+#include <linux/vmalloc.h>
+
+#include <drm/drm_device.h>
+#include <drm/drm_pci.h>
+#include <drm/via_drm.h>
+
+#include "via_dmablit.h"
+#include "via_drv.h"
#define VIA_PGDN(x) (((unsigned long)(x)) & PAGE_MASK)
#define VIA_PGOFF(x) (((unsigned long)(x)) & ~PAGE_MASK)
@@ -214,16 +217,16 @@ via_fire_dmablit(struct drm_device *dev, drm_via_sg_info_t *vsg, int engine)
{
drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private;
- VIA_WRITE(VIA_PCI_DMA_MAR0 + engine*0x10, 0);
- VIA_WRITE(VIA_PCI_DMA_DAR0 + engine*0x10, 0);
- VIA_WRITE(VIA_PCI_DMA_CSR0 + engine*0x04, VIA_DMA_CSR_DD | VIA_DMA_CSR_TD |
+ via_write(dev_priv, VIA_PCI_DMA_MAR0 + engine*0x10, 0);
+ via_write(dev_priv, VIA_PCI_DMA_DAR0 + engine*0x10, 0);
+ via_write(dev_priv, VIA_PCI_DMA_CSR0 + engine*0x04, VIA_DMA_CSR_DD | VIA_DMA_CSR_TD |
VIA_DMA_CSR_DE);
- VIA_WRITE(VIA_PCI_DMA_MR0 + engine*0x04, VIA_DMA_MR_CM | VIA_DMA_MR_TDIE);
- VIA_WRITE(VIA_PCI_DMA_BCR0 + engine*0x10, 0);
- VIA_WRITE(VIA_PCI_DMA_DPR0 + engine*0x10, vsg->chain_start);
+ via_write(dev_priv, VIA_PCI_DMA_MR0 + engine*0x04, VIA_DMA_MR_CM | VIA_DMA_MR_TDIE);
+ via_write(dev_priv, VIA_PCI_DMA_BCR0 + engine*0x10, 0);
+ via_write(dev_priv, VIA_PCI_DMA_DPR0 + engine*0x10, vsg->chain_start);
wmb();
- VIA_WRITE(VIA_PCI_DMA_CSR0 + engine*0x04, VIA_DMA_CSR_DE | VIA_DMA_CSR_TS);
- VIA_READ(VIA_PCI_DMA_CSR0 + engine*0x04);
+ via_write(dev_priv, VIA_PCI_DMA_CSR0 + engine*0x04, VIA_DMA_CSR_DE | VIA_DMA_CSR_TS);
+ via_read(dev_priv, VIA_PCI_DMA_CSR0 + engine*0x04);
}
/*
@@ -291,7 +294,7 @@ via_abort_dmablit(struct drm_device *dev, int engine)
{
drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private;
- VIA_WRITE(VIA_PCI_DMA_CSR0 + engine*0x04, VIA_DMA_CSR_TA);
+ via_write(dev_priv, VIA_PCI_DMA_CSR0 + engine*0x04, VIA_DMA_CSR_TA);
}
static void
@@ -299,7 +302,7 @@ via_dmablit_engine_off(struct drm_device *dev, int engine)
{
drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private;
- VIA_WRITE(VIA_PCI_DMA_CSR0 + engine*0x04, VIA_DMA_CSR_TD | VIA_DMA_CSR_DD);
+ via_write(dev_priv, VIA_PCI_DMA_CSR0 + engine*0x04, VIA_DMA_CSR_TD | VIA_DMA_CSR_DD);
}
@@ -330,7 +333,7 @@ via_dmablit_handler(struct drm_device *dev, int engine, int from_irq)
spin_lock_irqsave(&blitq->blit_lock, irqsave);
done_transfer = blitq->is_active &&
- ((status = VIA_READ(VIA_PCI_DMA_CSR0 + engine*0x04)) & VIA_DMA_CSR_TD);
+ ((status = via_read(dev_priv, VIA_PCI_DMA_CSR0 + engine*0x04)) & VIA_DMA_CSR_TD);
done_transfer = done_transfer || (blitq->aborting && !(status & VIA_DMA_CSR_DE));
cur = blitq->cur;
@@ -349,7 +352,7 @@ via_dmablit_handler(struct drm_device *dev, int engine, int from_irq)
* Clear transfer done flag.
*/
- VIA_WRITE(VIA_PCI_DMA_CSR0 + engine*0x04, VIA_DMA_CSR_TD);
+ via_write(dev_priv, VIA_PCI_DMA_CSR0 + engine*0x04, VIA_DMA_CSR_TD);
blitq->is_active = 0;
blitq->aborting = 0;
@@ -436,7 +439,7 @@ via_dmablit_sync(struct drm_device *dev, uint32_t handle, int engine)
int ret = 0;
if (via_dmablit_active(blitq, engine, handle, &queue)) {
- DRM_WAIT_ON(ret, *queue, 3 * HZ,
+ VIA_WAIT_ON(ret, *queue, 3 * HZ,
!via_dmablit_active(blitq, engine, handle, NULL));
}
DRM_DEBUG("DMA blit sync handle 0x%x engine %d returned %d\n",
@@ -687,7 +690,7 @@ via_dmablit_grab_slot(drm_via_blitq_t *blitq, int engine)
while (blitq->num_free == 0) {
spin_unlock_irqrestore(&blitq->blit_lock, irqsave);
- DRM_WAIT_ON(ret, blitq->busy_queue, HZ, blitq->num_free > 0);
+ VIA_WAIT_ON(ret, blitq->busy_queue, HZ, blitq->num_free > 0);
if (ret)
return (-EINTR == ret) ? -EAGAIN : ret;
diff --git a/drivers/gpu/drm/via/via_drv.c b/drivers/gpu/drm/via/via_drv.c
index af6a12d3c058..666a16de84f9 100644
--- a/drivers/gpu/drm/via/via_drv.c
+++ b/drivers/gpu/drm/via/via_drv.c
@@ -24,11 +24,14 @@
#include <linux/module.h>
-#include <drm/drmP.h>
+#include <drm/drm_drv.h>
+#include <drm/drm_file.h>
+#include <drm/drm_pci.h>
+#include <drm/drm_pciids.h>
#include <drm/via_drm.h>
+
#include "via_drv.h"
-#include <drm/drm_pciids.h>
static int via_driver_open(struct drm_device *dev, struct drm_file *file)
{
diff --git a/drivers/gpu/drm/via/via_drv.h b/drivers/gpu/drm/via/via_drv.h
index 6d1ae834484c..d5ad1b05bf77 100644
--- a/drivers/gpu/drm/via/via_drv.h
+++ b/drivers/gpu/drm/via/via_drv.h
@@ -24,8 +24,16 @@
#ifndef _VIA_DRV_H_
#define _VIA_DRV_H_
-#include <drm/drm_mm.h>
+#include <linux/irqreturn.h>
+#include <linux/jiffies.h>
+#include <linux/sched.h>
+#include <linux/sched/signal.h>
+#include <linux/wait.h>
+
+#include <drm/drm_ioctl.h>
#include <drm/drm_legacy.h>
+#include <drm/drm_mm.h>
+#include <drm/via_drm.h>
#define DRIVER_AUTHOR "Various"
@@ -113,12 +121,67 @@ enum via_family {
};
/* VIA MMIO register access */
-#define VIA_BASE ((dev_priv->mmio))
+static inline u32 via_read(struct drm_via_private *dev_priv, u32 reg)
+{
+ return readl((void __iomem *)(dev_priv->mmio->handle + reg));
+}
+
+static inline void via_write(struct drm_via_private *dev_priv, u32 reg,
+ u32 val)
+{
+ writel(val, (void __iomem *)(dev_priv->mmio->handle + reg));
+}
+
+static inline void via_write8(struct drm_via_private *dev_priv, u32 reg,
+ u32 val)
+{
+ writeb(val, (void __iomem *)(dev_priv->mmio->handle + reg));
+}
+
+static inline void via_write8_mask(struct drm_via_private *dev_priv,
+ u32 reg, u32 mask, u32 val)
+{
+ u32 tmp;
+
+ tmp = readb((void __iomem *)(dev_priv->mmio->handle + reg));
+ tmp = (tmp & ~mask) | (val & mask);
+ writeb(tmp, (void __iomem *)(dev_priv->mmio->handle + reg));
+}
-#define VIA_READ(reg) DRM_READ32(VIA_BASE, reg)
-#define VIA_WRITE(reg, val) DRM_WRITE32(VIA_BASE, reg, val)
-#define VIA_READ8(reg) DRM_READ8(VIA_BASE, reg)
-#define VIA_WRITE8(reg, val) DRM_WRITE8(VIA_BASE, reg, val)
+/*
+ * Poll in a loop waiting for 'contidition' to be true.
+ * Note: A direct replacement with wait_event_interruptible_timeout()
+ * will not work unless driver is updated to emit wake_up()
+ * in relevant places that can impact the 'condition'
+ *
+ * Returns:
+ * ret keeps current value if 'condition' becomes true
+ * ret = -BUSY if timeout happens
+ * ret = -EINTR if a signal interrupted the waiting period
+ */
+#define VIA_WAIT_ON( ret, queue, timeout, condition ) \
+do { \
+ DECLARE_WAITQUEUE(entry, current); \
+ unsigned long end = jiffies + (timeout); \
+ add_wait_queue(&(queue), &entry); \
+ \
+ for (;;) { \
+ __set_current_state(TASK_INTERRUPTIBLE); \
+ if (condition) \
+ break; \
+ if (time_after_eq(jiffies, end)) { \
+ ret = -EBUSY; \
+ break; \
+ } \
+ schedule_timeout((HZ/100 > 1) ? HZ/100 : 1); \
+ if (signal_pending(current)) { \
+ ret = -EINTR; \
+ break; \
+ } \
+ } \
+ __set_current_state(TASK_RUNNING); \
+ remove_wait_queue(&(queue), &entry); \
+} while (0)
extern const struct drm_ioctl_desc via_ioctls[];
extern int via_max_ioctl;
diff --git a/drivers/gpu/drm/via/via_irq.c b/drivers/gpu/drm/via/via_irq.c
index c96830ccc0ec..24cc445169e2 100644
--- a/drivers/gpu/drm/via/via_irq.c
+++ b/drivers/gpu/drm/via/via_irq.c
@@ -35,8 +35,10 @@
* The refresh rate is also calculated for video playback sync purposes.
*/
-#include <drm/drmP.h>
+#include <drm/drm_device.h>
+#include <drm/drm_vblank.h>
#include <drm/via_drm.h>
+
#include "via_drv.h"
#define VIA_REG_INTERRUPT 0x200
@@ -108,7 +110,7 @@ irqreturn_t via_driver_irq_handler(int irq, void *arg)
drm_via_irq_t *cur_irq = dev_priv->via_irqs;
int i;
- status = VIA_READ(VIA_REG_INTERRUPT);
+ status = via_read(dev_priv, VIA_REG_INTERRUPT);
if (status & VIA_IRQ_VBLANK_PENDING) {
atomic_inc(&dev_priv->vbl_received);
if (!(atomic_read(&dev_priv->vbl_received) & 0x0F)) {
@@ -143,7 +145,7 @@ irqreturn_t via_driver_irq_handler(int irq, void *arg)
}
/* Acknowledge interrupts */
- VIA_WRITE(VIA_REG_INTERRUPT, status);
+ via_write(dev_priv, VIA_REG_INTERRUPT, status);
if (handled)
@@ -158,8 +160,8 @@ static __inline__ void viadrv_acknowledge_irqs(drm_via_private_t *dev_priv)
if (dev_priv) {
/* Acknowledge interrupts */
- status = VIA_READ(VIA_REG_INTERRUPT);
- VIA_WRITE(VIA_REG_INTERRUPT, status |
+ status = via_read(dev_priv, VIA_REG_INTERRUPT);
+ via_write(dev_priv, VIA_REG_INTERRUPT, status |
dev_priv->irq_pending_mask);
}
}
@@ -174,11 +176,11 @@ int via_enable_vblank(struct drm_device *dev, unsigned int pipe)
return -EINVAL;
}
- status = VIA_READ(VIA_REG_INTERRUPT);
- VIA_WRITE(VIA_REG_INTERRUPT, status | VIA_IRQ_VBLANK_ENABLE);
+ status = via_read(dev_priv, VIA_REG_INTERRUPT);
+ via_write(dev_priv, VIA_REG_INTERRUPT, status | VIA_IRQ_VBLANK_ENABLE);
- VIA_WRITE8(0x83d4, 0x11);
- VIA_WRITE8(0x83d5, VIA_READ8(0x83d5) | 0x30);
+ via_write8(dev_priv, 0x83d4, 0x11);
+ via_write8_mask(dev_priv, 0x83d5, 0x30, 0x30);
return 0;
}
@@ -188,11 +190,11 @@ void via_disable_vblank(struct drm_device *dev, unsigned int pipe)
drm_via_private_t *dev_priv = dev->dev_private;
u32 status;
- status = VIA_READ(VIA_REG_INTERRUPT);
- VIA_WRITE(VIA_REG_INTERRUPT, status & ~VIA_IRQ_VBLANK_ENABLE);
+ status = via_read(dev_priv, VIA_REG_INTERRUPT);
+ via_write(dev_priv, VIA_REG_INTERRUPT, status & ~VIA_IRQ_VBLANK_ENABLE);
- VIA_WRITE8(0x83d4, 0x11);
- VIA_WRITE8(0x83d5, VIA_READ8(0x83d5) & ~0x30);
+ via_write8(dev_priv, 0x83d4, 0x11);
+ via_write8_mask(dev_priv, 0x83d5, 0x30, 0);
if (pipe != 0)
DRM_ERROR("%s: bad crtc %u\n", __func__, pipe);
@@ -233,12 +235,12 @@ via_driver_irq_wait(struct drm_device *dev, unsigned int irq, int force_sequence
cur_irq = dev_priv->via_irqs + real_irq;
if (masks[real_irq][2] && !force_sequence) {
- DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * HZ,
- ((VIA_READ(masks[irq][2]) & masks[irq][3]) ==
+ VIA_WAIT_ON(ret, cur_irq->irq_queue, 3 * HZ,
+ ((via_read(dev_priv, masks[irq][2]) & masks[irq][3]) ==
masks[irq][4]));
cur_irq_sequence = atomic_read(&cur_irq->irq_received);
} else {
- DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * HZ,
+ VIA_WAIT_ON(ret, cur_irq->irq_queue, 3 * HZ,
(((cur_irq_sequence =
atomic_read(&cur_irq->irq_received)) -
*sequence) <= (1 << 23)));
@@ -292,8 +294,8 @@ void via_driver_irq_preinstall(struct drm_device *dev)
dev_priv->last_vblank_valid = 0;
/* Clear VSync interrupt regs */
- status = VIA_READ(VIA_REG_INTERRUPT);
- VIA_WRITE(VIA_REG_INTERRUPT, status &
+ status = via_read(dev_priv, VIA_REG_INTERRUPT);
+ via_write(dev_priv, VIA_REG_INTERRUPT, status &
~(dev_priv->irq_enable_mask));
/* Clear bits if they're already high */
@@ -310,13 +312,13 @@ int via_driver_irq_postinstall(struct drm_device *dev)
if (!dev_priv)
return -EINVAL;
- status = VIA_READ(VIA_REG_INTERRUPT);
- VIA_WRITE(VIA_REG_INTERRUPT, status | VIA_IRQ_GLOBAL
+ status = via_read(dev_priv, VIA_REG_INTERRUPT);
+ via_write(dev_priv, VIA_REG_INTERRUPT, status | VIA_IRQ_GLOBAL
| dev_priv->irq_enable_mask);
/* Some magic, oh for some data sheets ! */
- VIA_WRITE8(0x83d4, 0x11);
- VIA_WRITE8(0x83d5, VIA_READ8(0x83d5) | 0x30);
+ via_write8(dev_priv, 0x83d4, 0x11);
+ via_write8_mask(dev_priv, 0x83d5, 0x30, 0x30);
return 0;
}
@@ -331,11 +333,11 @@ void via_driver_irq_uninstall(struct drm_device *dev)
/* Some more magic, oh for some data sheets ! */
- VIA_WRITE8(0x83d4, 0x11);
- VIA_WRITE8(0x83d5, VIA_READ8(0x83d5) & ~0x30);
+ via_write8(dev_priv, 0x83d4, 0x11);
+ via_write8_mask(dev_priv, 0x83d5, 0x30, 0);
- status = VIA_READ(VIA_REG_INTERRUPT);
- VIA_WRITE(VIA_REG_INTERRUPT, status &
+ status = via_read(dev_priv, VIA_REG_INTERRUPT);
+ via_write(dev_priv, VIA_REG_INTERRUPT, status &
~(VIA_IRQ_VBLANK_ENABLE | dev_priv->irq_enable_mask));
}
}
diff --git a/drivers/gpu/drm/via/via_map.c b/drivers/gpu/drm/via/via_map.c
index 2ad865870372..431c150df014 100644
--- a/drivers/gpu/drm/via/via_map.c
+++ b/drivers/gpu/drm/via/via_map.c
@@ -21,8 +21,12 @@
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
-#include <drm/drmP.h>
+
+#include <drm/drm_device.h>
+#include <drm/drm_pci.h>
+#include <drm/drm_vblank.h>
#include <drm/via_drm.h>
+
#include "via_drv.h"
static int via_do_init_map(struct drm_device *dev, drm_via_init_t *init)
diff --git a/drivers/gpu/drm/via/via_mm.c b/drivers/gpu/drm/via/via_mm.c
index 4217d66a5cc6..45cc9e900260 100644
--- a/drivers/gpu/drm/via/via_mm.c
+++ b/drivers/gpu/drm/via/via_mm.c
@@ -25,8 +25,13 @@
* Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
*/
-#include <drm/drmP.h>
+#include <linux/slab.h>
+
+#include <drm/drm_device.h>
+#include <drm/drm_file.h>
+#include <drm/drm_irq.h>
#include <drm/via_drm.h>
+
#include "via_drv.h"
#define VIA_MM_ALIGN_SHIFT 4
diff --git a/drivers/gpu/drm/via/via_verifier.c b/drivers/gpu/drm/via/via_verifier.c
index fb2609434df7..8d8135f424ef 100644
--- a/drivers/gpu/drm/via/via_verifier.c
+++ b/drivers/gpu/drm/via/via_verifier.c
@@ -28,13 +28,13 @@
* be very slow.
*/
-#include "via_3d_reg.h"
-#include <drm/drmP.h>
-#include <drm/via_drm.h>
+#include <drm/drm_device.h>
#include <drm/drm_legacy.h>
-#include "via_verifier.h"
+#include <drm/via_drm.h>
+
+#include "via_3d_reg.h"
#include "via_drv.h"
-#include <linux/kernel.h>
+#include "via_verifier.h"
typedef enum {
state_command,
@@ -725,14 +725,14 @@ via_parse_header2(drm_via_private_t *dev_priv, uint32_t const **buffer,
next_fire = dev_priv->fire_offsets[*fire_count];
buf++;
cmd = (*buf & 0xFFFF0000) >> 16;
- VIA_WRITE(HC_REG_TRANS_SET + HC_REG_BASE, *buf++);
+ via_write(dev_priv, HC_REG_TRANS_SET + HC_REG_BASE, *buf++);
switch (cmd) {
case HC_ParaType_CmdVdata:
while ((buf < buf_end) &&
(*fire_count < dev_priv->num_fire_offsets) &&
(*buf & HC_ACMD_MASK) == HC_ACMD_HCmdB) {
while (buf <= next_fire) {
- VIA_WRITE(HC_REG_TRANS_SPACE + HC_REG_BASE +
+ via_write(dev_priv, HC_REG_TRANS_SPACE + HC_REG_BASE +
(burst & 63), *buf++);
burst += 4;
}
@@ -753,7 +753,7 @@ via_parse_header2(drm_via_private_t *dev_priv, uint32_t const **buffer,
(*buf & VIA_VIDEOMASK) == VIA_VIDEO_HEADER6)
break;
- VIA_WRITE(HC_REG_TRANS_SPACE + HC_REG_BASE +
+ via_write(dev_priv, HC_REG_TRANS_SPACE + HC_REG_BASE +
(burst & 63), *buf++);
burst += 4;
}
@@ -843,7 +843,7 @@ via_parse_header1(drm_via_private_t *dev_priv, uint32_t const **buffer,
cmd = *buf;
if ((cmd & HALCYON_HEADER1MASK) != HALCYON_HEADER1)
break;
- VIA_WRITE((cmd & ~HALCYON_HEADER1MASK) << 2, *++buf);
+ via_write(dev_priv, (cmd & ~HALCYON_HEADER1MASK) << 2, *++buf);
buf++;
}
*buffer = buf;
@@ -894,7 +894,7 @@ via_parse_vheader5(drm_via_private_t *dev_priv, uint32_t const **buffer,
i = count = *buf;
buf += 3;
while (i--)
- VIA_WRITE(addr, *buf++);
+ via_write(dev_priv, addr, *buf++);
if (count & 3)
buf += 4 - (count & 3);
*buffer = buf;
@@ -950,7 +950,7 @@ via_parse_vheader6(drm_via_private_t *dev_priv, uint32_t const **buffer,
buf += 3;
while (i--) {
addr = *buf++;
- VIA_WRITE(addr, *buf++);
+ via_write(dev_priv, addr, *buf++);
}
count <<= 1;
if (count & 3)
diff --git a/drivers/gpu/drm/via/via_video.c b/drivers/gpu/drm/via/via_video.c
index a9ffbad1cfdd..53b1f58f99b4 100644
--- a/drivers/gpu/drm/via/via_video.c
+++ b/drivers/gpu/drm/via/via_video.c
@@ -25,8 +25,9 @@
* Video and XvMC related functions.
*/
-#include <drm/drmP.h>
+#include <drm/drm_device.h>
#include <drm/via_drm.h>
+
#include "via_drv.h"
void via_init_futex(drm_via_private_t *dev_priv)
@@ -82,7 +83,7 @@ int via_decoder_futex(struct drm_device *dev, void *data, struct drm_file *file_
switch (fx->func) {
case VIA_FUTEX_WAIT:
- DRM_WAIT_ON(ret, dev_priv->decoder_queue[fx->lock],
+ VIA_WAIT_ON(ret, dev_priv->decoder_queue[fx->lock],
(fx->ms / 10) * (HZ / 100), *lock != fx->val);
return ret;
case VIA_FUTEX_WAKE:
diff --git a/drivers/gpu/drm/virtio/virtgpu_debugfs.c b/drivers/gpu/drm/virtio/virtgpu_debugfs.c
index ed0fcda713c3..5156e6b279db 100644
--- a/drivers/gpu/drm/virtio/virtgpu_debugfs.c
+++ b/drivers/gpu/drm/virtio/virtgpu_debugfs.c
@@ -23,8 +23,8 @@
* WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
-#include <linux/debugfs.h>
-#include <drm/drmP.h>
+#include <drm/drm_debugfs.h>
+#include <drm/drm_file.h>
#include "virtgpu_drv.h"
diff --git a/drivers/gpu/drm/virtio/virtgpu_display.c b/drivers/gpu/drm/virtio/virtgpu_display.c
index ba16e8cb7124..e622485ae826 100644
--- a/drivers/gpu/drm/virtio/virtgpu_display.c
+++ b/drivers/gpu/drm/virtio/virtgpu_display.c
@@ -25,11 +25,14 @@
* OTHER DEALINGS IN THE SOFTWARE.
*/
-#include "virtgpu_drv.h"
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_damage_helper.h>
+#include <drm/drm_fourcc.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_probe_helper.h>
-#include <drm/drm_damage_helper.h>
+#include <drm/drm_vblank.h>
+
+#include "virtgpu_drv.h"
#define XRES_MIN 32
#define YRES_MIN 32
diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.c b/drivers/gpu/drm/virtio/virtgpu_drv.c
index c50868753132..0fc32fa0b3c0 100644
--- a/drivers/gpu/drm/virtio/virtgpu_drv.c
+++ b/drivers/gpu/drm/virtio/virtgpu_drv.c
@@ -29,10 +29,13 @@
#include <linux/module.h>
#include <linux/console.h>
#include <linux/pci.h>
-#include <drm/drmP.h>
+
#include <drm/drm.h>
+#include <drm/drm_drv.h>
+#include <drm/drm_file.h>
#include "virtgpu_drv.h"
+
static struct drm_driver driver;
static int virtio_gpu_modeset = -1;
@@ -195,7 +198,7 @@ static const struct file_operations virtio_gpu_driver_fops = {
};
static struct drm_driver driver = {
- .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME | DRIVER_RENDER | DRIVER_ATOMIC,
+ .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_RENDER | DRIVER_ATOMIC,
.open = virtio_gpu_driver_open,
.postclose = virtio_gpu_driver_postclose,
@@ -207,8 +210,6 @@ static struct drm_driver driver = {
#endif
.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
- .gem_prime_export = drm_gem_prime_export,
- .gem_prime_import = drm_gem_prime_import,
.gem_prime_get_sg_table = virtgpu_gem_prime_get_sg_table,
.gem_prime_import_sg_table = virtgpu_gem_prime_import_sg_table,
.gem_prime_vmap = virtgpu_gem_prime_vmap,
diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.h b/drivers/gpu/drm/virtio/virtgpu_drv.h
index 9e2d3062b01d..e28829661724 100644
--- a/drivers/gpu/drm/virtio/virtgpu_drv.h
+++ b/drivers/gpu/drm/virtio/virtgpu_drv.h
@@ -31,16 +31,16 @@
#include <linux/virtio_config.h>
#include <linux/virtio_gpu.h>
-#include <drm/drmP.h>
-#include <drm/drm_gem.h>
#include <drm/drm_atomic.h>
#include <drm/drm_encoder.h>
#include <drm/drm_fb_helper.h>
+#include <drm/drm_gem.h>
+#include <drm/drm_ioctl.h>
#include <drm/drm_probe_helper.h>
#include <drm/ttm/ttm_bo_api.h>
#include <drm/ttm/ttm_bo_driver.h>
-#include <drm/ttm/ttm_placement.h>
#include <drm/ttm/ttm_module.h>
+#include <drm/ttm/ttm_placement.h>
#define DRIVER_NAME "virtio_gpu"
#define DRIVER_DESC "virtio GPU"
@@ -396,7 +396,7 @@ static inline void virtio_gpu_object_unref(struct virtio_gpu_object **bo)
static inline u64 virtio_gpu_object_mmap_offset(struct virtio_gpu_object *bo)
{
- return drm_vma_node_offset_addr(&bo->tbo.vma_node);
+ return drm_vma_node_offset_addr(&bo->tbo.base.vma_node);
}
static inline int virtio_gpu_object_reserve(struct virtio_gpu_object *bo,
diff --git a/drivers/gpu/drm/virtio/virtgpu_fence.c b/drivers/gpu/drm/virtio/virtgpu_fence.c
index 70d6c4329778..a0514f5bd006 100644
--- a/drivers/gpu/drm/virtio/virtgpu_fence.c
+++ b/drivers/gpu/drm/virtio/virtgpu_fence.c
@@ -23,8 +23,8 @@
* WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
-#include <drm/drmP.h>
#include <trace/events/dma_fence.h>
+
#include "virtgpu_drv.h"
static const char *virtio_get_driver_name(struct dma_fence *f)
diff --git a/drivers/gpu/drm/virtio/virtgpu_gem.c b/drivers/gpu/drm/virtio/virtgpu_gem.c
index 1e49e08dd545..292566146814 100644
--- a/drivers/gpu/drm/virtio/virtgpu_gem.c
+++ b/drivers/gpu/drm/virtio/virtgpu_gem.c
@@ -23,7 +23,9 @@
* WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
-#include <drm/drmP.h>
+#include <drm/drm_file.h>
+#include <drm/drm_fourcc.h>
+
#include "virtgpu_drv.h"
void virtio_gpu_gem_free_object(struct drm_gem_object *gem_obj)
diff --git a/drivers/gpu/drm/virtio/virtgpu_ioctl.c b/drivers/gpu/drm/virtio/virtgpu_ioctl.c
index ac60be9b5c19..3c430dd65f67 100644
--- a/drivers/gpu/drm/virtio/virtgpu_ioctl.c
+++ b/drivers/gpu/drm/virtio/virtgpu_ioctl.c
@@ -25,11 +25,13 @@
* OTHER DEALINGS IN THE SOFTWARE.
*/
-#include <drm/drmP.h>
-#include <drm/virtgpu_drm.h>
-#include <drm/ttm/ttm_execbuf_util.h>
+#include <linux/file.h>
#include <linux/sync_file.h>
+#include <drm/drm_file.h>
+#include <drm/ttm/ttm_execbuf_util.h>
+#include <drm/virtgpu_drm.h>
+
#include "virtgpu_drv.h"
static void convert_to_hw_box(struct virtio_gpu_box *dst,
@@ -394,7 +396,7 @@ static int virtio_gpu_transfer_from_host_ioctl(struct drm_device *dev,
(vgdev, qobj->hw_res_handle,
vfpriv->ctx_id, offset, args->level,
&box, fence);
- reservation_object_add_excl_fence(qobj->tbo.resv,
+ reservation_object_add_excl_fence(qobj->tbo.base.resv,
&fence->f);
dma_fence_put(&fence->f);
@@ -448,7 +450,7 @@ static int virtio_gpu_transfer_to_host_ioctl(struct drm_device *dev, void *data,
(vgdev, qobj,
vfpriv ? vfpriv->ctx_id : 0, offset,
args->level, &box, fence);
- reservation_object_add_excl_fence(qobj->tbo.resv,
+ reservation_object_add_excl_fence(qobj->tbo.base.resv,
&fence->f);
dma_fence_put(&fence->f);
}
@@ -553,34 +555,34 @@ copy_exit:
struct drm_ioctl_desc virtio_gpu_ioctls[DRM_VIRTIO_NUM_IOCTLS] = {
DRM_IOCTL_DEF_DRV(VIRTGPU_MAP, virtio_gpu_map_ioctl,
- DRM_AUTH | DRM_RENDER_ALLOW),
+ DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(VIRTGPU_EXECBUFFER, virtio_gpu_execbuffer_ioctl,
- DRM_AUTH | DRM_RENDER_ALLOW),
+ DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(VIRTGPU_GETPARAM, virtio_gpu_getparam_ioctl,
- DRM_AUTH | DRM_RENDER_ALLOW),
+ DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(VIRTGPU_RESOURCE_CREATE,
virtio_gpu_resource_create_ioctl,
- DRM_AUTH | DRM_RENDER_ALLOW),
+ DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(VIRTGPU_RESOURCE_INFO, virtio_gpu_resource_info_ioctl,
- DRM_AUTH | DRM_RENDER_ALLOW),
+ DRM_RENDER_ALLOW),
/* make transfer async to the main ring? - no sure, can we
* thread these in the underlying GL
*/
DRM_IOCTL_DEF_DRV(VIRTGPU_TRANSFER_FROM_HOST,
virtio_gpu_transfer_from_host_ioctl,
- DRM_AUTH | DRM_RENDER_ALLOW),
+ DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(VIRTGPU_TRANSFER_TO_HOST,
virtio_gpu_transfer_to_host_ioctl,
- DRM_AUTH | DRM_RENDER_ALLOW),
+ DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(VIRTGPU_WAIT, virtio_gpu_wait_ioctl,
- DRM_AUTH | DRM_RENDER_ALLOW),
+ DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(VIRTGPU_GET_CAPS, virtio_gpu_get_caps_ioctl,
- DRM_AUTH | DRM_RENDER_ALLOW),
+ DRM_RENDER_ALLOW),
};
diff --git a/drivers/gpu/drm/virtio/virtgpu_kms.c b/drivers/gpu/drm/virtio/virtgpu_kms.c
index 84b6a6bf00c6..c190702fab72 100644
--- a/drivers/gpu/drm/virtio/virtgpu_kms.c
+++ b/drivers/gpu/drm/virtio/virtgpu_kms.c
@@ -25,7 +25,9 @@
#include <linux/virtio.h>
#include <linux/virtio_config.h>
-#include <drm/drmP.h>
+
+#include <drm/drm_file.h>
+
#include "virtgpu_drv.h"
static void virtio_gpu_config_changed_work_func(struct work_struct *work)
diff --git a/drivers/gpu/drm/virtio/virtgpu_plane.c b/drivers/gpu/drm/virtio/virtgpu_plane.c
index 024c2aa0c929..3dc08f991a8d 100644
--- a/drivers/gpu/drm/virtio/virtgpu_plane.c
+++ b/drivers/gpu/drm/virtio/virtgpu_plane.c
@@ -23,9 +23,11 @@
* WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
-#include "virtgpu_drv.h"
-#include <drm/drm_plane_helper.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_fourcc.h>
+#include <drm/drm_plane_helper.h>
+
+#include "virtgpu_drv.h"
static const uint32_t virtio_gpu_formats[] = {
DRM_FORMAT_HOST_XRGB8888,
@@ -210,7 +212,7 @@ static void virtio_gpu_cursor_plane_update(struct drm_plane *plane,
0, 0, vgfb->fence);
ret = virtio_gpu_object_reserve(bo, false);
if (!ret) {
- reservation_object_add_excl_fence(bo->tbo.resv,
+ reservation_object_add_excl_fence(bo->tbo.base.resv,
&vgfb->fence->f);
dma_fence_put(&vgfb->fence->f);
vgfb->fence = NULL;
diff --git a/drivers/gpu/drm/virtio/virtgpu_prime.c b/drivers/gpu/drm/virtio/virtgpu_prime.c
index 8fbf71bd0c5e..dc642a884b88 100644
--- a/drivers/gpu/drm/virtio/virtgpu_prime.c
+++ b/drivers/gpu/drm/virtio/virtgpu_prime.c
@@ -22,6 +22,8 @@
* Authors: Andreas Pokorny
*/
+#include <drm/drm_prime.h>
+
#include "virtgpu_drv.h"
/* Empty Implementations as there should not be any other driver for a virtual
@@ -66,8 +68,5 @@ void virtgpu_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr)
int virtgpu_gem_prime_mmap(struct drm_gem_object *obj,
struct vm_area_struct *vma)
{
- struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(obj);
-
- bo->gem_base.vma_node.vm_node.start = bo->tbo.vma_node.vm_node.start;
return drm_gem_prime_mmap(obj, vma);
}
diff --git a/drivers/gpu/drm/virtio/virtgpu_ttm.c b/drivers/gpu/drm/virtio/virtgpu_ttm.c
index 300ef3a83538..f87903641847 100644
--- a/drivers/gpu/drm/virtio/virtgpu_ttm.c
+++ b/drivers/gpu/drm/virtio/virtgpu_ttm.c
@@ -25,17 +25,18 @@
* OTHER DEALINGS IN THE SOFTWARE.
*/
+#include <linux/delay.h>
+
+#include <drm/drm.h>
+#include <drm/drm_file.h>
#include <drm/ttm/ttm_bo_api.h>
#include <drm/ttm/ttm_bo_driver.h>
-#include <drm/ttm/ttm_placement.h>
-#include <drm/ttm/ttm_page_alloc.h>
#include <drm/ttm/ttm_module.h>
-#include <drm/drmP.h>
-#include <drm/drm.h>
+#include <drm/ttm/ttm_page_alloc.h>
+#include <drm/ttm/ttm_placement.h>
#include <drm/virtgpu_drm.h>
-#include "virtgpu_drv.h"
-#include <linux/delay.h>
+#include "virtgpu_drv.h"
static struct
virtio_gpu_device *virtio_gpu_get_vgdev(struct ttm_bo_device *bdev)
diff --git a/drivers/gpu/drm/virtio/virtgpu_vq.c b/drivers/gpu/drm/virtio/virtgpu_vq.c
index 981ee16e3ee9..7ac20490e1b4 100644
--- a/drivers/gpu/drm/virtio/virtgpu_vq.c
+++ b/drivers/gpu/drm/virtio/virtgpu_vq.c
@@ -26,13 +26,14 @@
* OTHER DEALINGS IN THE SOFTWARE.
*/
-#include <drm/drmP.h>
-#include "virtgpu_drv.h"
-#include "virtgpu_trace.h"
+#include <linux/dma-mapping.h>
#include <linux/virtio.h>
#include <linux/virtio_config.h>
#include <linux/virtio_ring.h>
+#include "virtgpu_drv.h"
+#include "virtgpu_trace.h"
+
#define MAX_INLINE_CMD_SIZE 96
#define MAX_INLINE_RESP_SIZE 24
#define VBUFFER_SIZE (sizeof(struct virtio_gpu_vbuffer) \
diff --git a/drivers/gpu/drm/vkms/Makefile b/drivers/gpu/drm/vkms/Makefile
index 89f09bec7b23..0b767d7efa24 100644
--- a/drivers/gpu/drm/vkms/Makefile
+++ b/drivers/gpu/drm/vkms/Makefile
@@ -1,4 +1,4 @@
# SPDX-License-Identifier: GPL-2.0-only
-vkms-y := vkms_drv.o vkms_plane.o vkms_output.o vkms_crtc.o vkms_gem.o vkms_crc.o
+vkms-y := vkms_drv.o vkms_plane.o vkms_output.o vkms_crtc.o vkms_gem.o vkms_composer.o
obj-$(CONFIG_DRM_VKMS) += vkms.o
diff --git a/drivers/gpu/drm/vkms/vkms_crc.c b/drivers/gpu/drm/vkms/vkms_composer.c
index e66ff25c008e..d5585695c64d 100644
--- a/drivers/gpu/drm/vkms/vkms_crc.c
+++ b/drivers/gpu/drm/vkms/vkms_composer.c
@@ -1,34 +1,37 @@
// SPDX-License-Identifier: GPL-2.0+
-#include "vkms_drv.h"
#include <linux/crc32.h>
+
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
+#include <drm/drm_vblank.h>
+
+#include "vkms_drv.h"
/**
* compute_crc - Compute CRC value on output frame
*
* @vaddr_out: address to final framebuffer
- * @crc_out: framebuffer's metadata
+ * @composer: framebuffer's metadata
*
* returns CRC value computed using crc32 on the visible portion of
* the final framebuffer at vaddr_out
*/
-static uint32_t compute_crc(void *vaddr_out, struct vkms_crc_data *crc_out)
+static uint32_t compute_crc(void *vaddr_out, struct vkms_composer *composer)
{
int i, j, src_offset;
- int x_src = crc_out->src.x1 >> 16;
- int y_src = crc_out->src.y1 >> 16;
- int h_src = drm_rect_height(&crc_out->src) >> 16;
- int w_src = drm_rect_width(&crc_out->src) >> 16;
+ int x_src = composer->src.x1 >> 16;
+ int y_src = composer->src.y1 >> 16;
+ int h_src = drm_rect_height(&composer->src) >> 16;
+ int w_src = drm_rect_width(&composer->src) >> 16;
u32 crc = 0;
for (i = y_src; i < y_src + h_src; ++i) {
for (j = x_src; j < x_src + w_src; ++j) {
- src_offset = crc_out->offset
- + (i * crc_out->pitch)
- + (j * crc_out->cpp);
+ src_offset = composer->offset
+ + (i * composer->pitch)
+ + (j * composer->cpp);
/* XRGB format ignores Alpha channel */
memset(vaddr_out + src_offset + 24, 0, 8);
crc = crc32_le(crc, vaddr_out + src_offset,
@@ -43,8 +46,8 @@ static uint32_t compute_crc(void *vaddr_out, struct vkms_crc_data *crc_out)
* blend - belnd value at vaddr_src with value at vaddr_dst
* @vaddr_dst: destination address
* @vaddr_src: source address
- * @crc_dst: destination framebuffer's metadata
- * @crc_src: source framebuffer's metadata
+ * @dest_composer: destination framebuffer's metadata
+ * @src_composer: source framebuffer's metadata
*
* Blend value at vaddr_src with value at vaddr_dst.
* Currently, this function write value at vaddr_src on value
@@ -55,31 +58,31 @@ static uint32_t compute_crc(void *vaddr_out, struct vkms_crc_data *crc_out)
* instead of overwriting it.
*/
static void blend(void *vaddr_dst, void *vaddr_src,
- struct vkms_crc_data *crc_dst,
- struct vkms_crc_data *crc_src)
+ struct vkms_composer *dest_composer,
+ struct vkms_composer *src_composer)
{
int i, j, j_dst, i_dst;
int offset_src, offset_dst;
- int x_src = crc_src->src.x1 >> 16;
- int y_src = crc_src->src.y1 >> 16;
+ int x_src = src_composer->src.x1 >> 16;
+ int y_src = src_composer->src.y1 >> 16;
- int x_dst = crc_src->dst.x1;
- int y_dst = crc_src->dst.y1;
- int h_dst = drm_rect_height(&crc_src->dst);
- int w_dst = drm_rect_width(&crc_src->dst);
+ int x_dst = src_composer->dst.x1;
+ int y_dst = src_composer->dst.y1;
+ int h_dst = drm_rect_height(&src_composer->dst);
+ int w_dst = drm_rect_width(&src_composer->dst);
int y_limit = y_src + h_dst;
int x_limit = x_src + w_dst;
for (i = y_src, i_dst = y_dst; i < y_limit; ++i) {
for (j = x_src, j_dst = x_dst; j < x_limit; ++j) {
- offset_dst = crc_dst->offset
- + (i_dst * crc_dst->pitch)
- + (j_dst++ * crc_dst->cpp);
- offset_src = crc_src->offset
- + (i * crc_src->pitch)
- + (j * crc_src->cpp);
+ offset_dst = dest_composer->offset
+ + (i_dst * dest_composer->pitch)
+ + (j_dst++ * dest_composer->cpp);
+ offset_src = src_composer->offset
+ + (i * src_composer->pitch)
+ + (j * src_composer->cpp);
memcpy(vaddr_dst + offset_dst,
vaddr_src + offset_src, sizeof(u32));
@@ -88,31 +91,27 @@ static void blend(void *vaddr_dst, void *vaddr_src,
}
}
-static void compose_cursor(struct vkms_crc_data *cursor_crc,
- struct vkms_crc_data *primary_crc, void *vaddr_out)
+static void compose_cursor(struct vkms_composer *cursor_composer,
+ struct vkms_composer *primary_composer,
+ void *vaddr_out)
{
struct drm_gem_object *cursor_obj;
struct vkms_gem_object *cursor_vkms_obj;
- cursor_obj = drm_gem_fb_get_obj(&cursor_crc->fb, 0);
+ cursor_obj = drm_gem_fb_get_obj(&cursor_composer->fb, 0);
cursor_vkms_obj = drm_gem_to_vkms_gem(cursor_obj);
- mutex_lock(&cursor_vkms_obj->pages_lock);
- if (!cursor_vkms_obj->vaddr) {
- DRM_WARN("cursor plane vaddr is NULL");
- goto out;
- }
-
- blend(vaddr_out, cursor_vkms_obj->vaddr, primary_crc, cursor_crc);
+ if (WARN_ON(!cursor_vkms_obj->vaddr))
+ return;
-out:
- mutex_unlock(&cursor_vkms_obj->pages_lock);
+ blend(vaddr_out, cursor_vkms_obj->vaddr,
+ primary_composer, cursor_composer);
}
-static uint32_t _vkms_get_crc(struct vkms_crc_data *primary_crc,
- struct vkms_crc_data *cursor_crc)
+static uint32_t _vkms_get_crc(struct vkms_composer *primary_composer,
+ struct vkms_composer *cursor_composer)
{
- struct drm_framebuffer *fb = &primary_crc->fb;
+ struct drm_framebuffer *fb = &primary_composer->fb;
struct drm_gem_object *gem_obj = drm_gem_fb_get_obj(fb, 0);
struct vkms_gem_object *vkms_obj = drm_gem_to_vkms_gem(gem_obj);
void *vaddr_out = kzalloc(vkms_obj->gem.size, GFP_KERNEL);
@@ -123,20 +122,17 @@ static uint32_t _vkms_get_crc(struct vkms_crc_data *primary_crc,
return 0;
}
- mutex_lock(&vkms_obj->pages_lock);
if (WARN_ON(!vkms_obj->vaddr)) {
- mutex_unlock(&vkms_obj->pages_lock);
kfree(vaddr_out);
return crc;
}
memcpy(vaddr_out, vkms_obj->vaddr, vkms_obj->gem.size);
- mutex_unlock(&vkms_obj->pages_lock);
- if (cursor_crc)
- compose_cursor(cursor_crc, primary_crc, vaddr_out);
+ if (cursor_composer)
+ compose_cursor(cursor_composer, primary_composer, vaddr_out);
- crc = compute_crc(vaddr_out, primary_crc);
+ crc = compute_crc(vaddr_out, primary_composer);
kfree(vaddr_out);
@@ -144,72 +140,57 @@ static uint32_t _vkms_get_crc(struct vkms_crc_data *primary_crc,
}
/**
- * vkms_crc_work_handle - ordered work_struct to compute CRC
+ * vkms_composer_worker - ordered work_struct to compute CRC
*
* @work: work_struct
*
- * Work handler for computing CRCs. work_struct scheduled in
+ * Work handler for composing and computing CRCs. work_struct scheduled in
* an ordered workqueue that's periodically scheduled to run by
* _vblank_handle() and flushed at vkms_atomic_crtc_destroy_state().
*/
-void vkms_crc_work_handle(struct work_struct *work)
+void vkms_composer_worker(struct work_struct *work)
{
struct vkms_crtc_state *crtc_state = container_of(work,
struct vkms_crtc_state,
- crc_work);
+ composer_work);
struct drm_crtc *crtc = crtc_state->base.crtc;
struct vkms_output *out = drm_crtc_to_vkms_output(crtc);
- struct vkms_device *vdev = container_of(out, struct vkms_device,
- output);
- struct vkms_crc_data *primary_crc = NULL;
- struct vkms_crc_data *cursor_crc = NULL;
- struct drm_plane *plane;
+ struct vkms_composer *primary_composer = NULL;
+ struct vkms_composer *cursor_composer = NULL;
u32 crc32 = 0;
u64 frame_start, frame_end;
- unsigned long flags;
+ bool crc_pending;
- spin_lock_irqsave(&out->state_lock, flags);
+ spin_lock_irq(&out->composer_lock);
frame_start = crtc_state->frame_start;
frame_end = crtc_state->frame_end;
- spin_unlock_irqrestore(&out->state_lock, flags);
-
- /* _vblank_handle() hasn't updated frame_start yet */
- if (!frame_start || frame_start == frame_end)
- goto out;
-
- drm_for_each_plane(plane, &vdev->drm) {
- struct vkms_plane_state *vplane_state;
- struct vkms_crc_data *crc_data;
-
- vplane_state = to_vkms_plane_state(plane->state);
- crc_data = vplane_state->crc_data;
+ crc_pending = crtc_state->crc_pending;
+ crtc_state->frame_start = 0;
+ crtc_state->frame_end = 0;
+ crtc_state->crc_pending = false;
+ spin_unlock_irq(&out->composer_lock);
- if (drm_framebuffer_read_refcount(&crc_data->fb) == 0)
- continue;
+ /*
+ * We raced with the vblank hrtimer and previous work already computed
+ * the crc, nothing to do.
+ */
+ if (!crc_pending)
+ return;
- if (plane->type == DRM_PLANE_TYPE_PRIMARY)
- primary_crc = crc_data;
- else
- cursor_crc = crc_data;
- }
+ if (crtc_state->num_active_planes >= 1)
+ primary_composer = crtc_state->active_planes[0]->composer;
- if (primary_crc)
- crc32 = _vkms_get_crc(primary_crc, cursor_crc);
+ if (crtc_state->num_active_planes == 2)
+ cursor_composer = crtc_state->active_planes[1]->composer;
- frame_end = drm_crtc_accurate_vblank_count(crtc);
+ if (primary_composer)
+ crc32 = _vkms_get_crc(primary_composer, cursor_composer);
- /* queue_work can fail to schedule crc_work; add crc for
- * missing frames
+ /*
+ * The worker can fall behind the vblank hrtimer, make sure we catch up.
*/
while (frame_start <= frame_end)
drm_crtc_add_crc_entry(crtc, true, frame_start++, &crc32);
-
-out:
- /* to avoid using the same value for frame number again */
- spin_lock_irqsave(&out->state_lock, flags);
- crtc_state->frame_end = frame_end;
- crtc_state->frame_start = 0;
- spin_unlock_irqrestore(&out->state_lock, flags);
}
static const char * const pipe_crc_sources[] = {"auto"};
@@ -256,17 +237,13 @@ int vkms_set_crc_source(struct drm_crtc *crtc, const char *src_name)
{
struct vkms_output *out = drm_crtc_to_vkms_output(crtc);
bool enabled = false;
- unsigned long flags;
int ret = 0;
ret = vkms_crc_parse_source(src_name, &enabled);
- /* make sure nothing is scheduled on crtc workq */
- flush_workqueue(out->crc_workq);
-
- spin_lock_irqsave(&out->lock, flags);
- out->crc_enabled = enabled;
- spin_unlock_irqrestore(&out->lock, flags);
+ spin_lock_irq(&out->lock);
+ out->composer_enabled = enabled;
+ spin_unlock_irq(&out->lock);
return ret;
}
diff --git a/drivers/gpu/drm/vkms/vkms_crtc.c b/drivers/gpu/drm/vkms/vkms_crtc.c
index 4d11292bc6f3..927dafaebc76 100644
--- a/drivers/gpu/drm/vkms/vkms_crtc.c
+++ b/drivers/gpu/drm/vkms/vkms_crtc.c
@@ -1,15 +1,18 @@
// SPDX-License-Identifier: GPL-2.0+
-#include "vkms_drv.h"
+#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_probe_helper.h>
+#include <drm/drm_vblank.h>
+
+#include "vkms_drv.h"
static enum hrtimer_restart vkms_vblank_simulate(struct hrtimer *timer)
{
struct vkms_output *output = container_of(timer, struct vkms_output,
vblank_hrtimer);
struct drm_crtc *crtc = &output->crtc;
- struct vkms_crtc_state *state = to_vkms_crtc_state(crtc->state);
+ struct vkms_crtc_state *state;
u64 ret_overrun;
bool ret;
@@ -23,20 +26,26 @@ static enum hrtimer_restart vkms_vblank_simulate(struct hrtimer *timer)
if (!ret)
DRM_ERROR("vkms failure on handling vblank");
- if (state && output->crc_enabled) {
+ state = output->composer_state;
+ if (state && output->composer_enabled) {
u64 frame = drm_crtc_accurate_vblank_count(crtc);
- /* update frame_start only if a queued vkms_crc_work_handle()
+ /* update frame_start only if a queued vkms_composer_worker()
* has read the data
*/
- spin_lock(&output->state_lock);
- if (!state->frame_start)
+ spin_lock(&output->composer_lock);
+ if (!state->crc_pending)
state->frame_start = frame;
- spin_unlock(&output->state_lock);
+ else
+ DRM_DEBUG_DRIVER("crc worker falling behind, frame_start: %llu, frame_end: %llu\n",
+ state->frame_start, frame);
+ state->frame_end = frame;
+ state->crc_pending = true;
+ spin_unlock(&output->composer_lock);
- ret = queue_work(output->crc_workq, &state->crc_work);
+ ret = queue_work(output->composer_workq, &state->composer_work);
if (!ret)
- DRM_WARN("failed to queue vkms_crc_work_handle");
+ DRM_DEBUG_DRIVER("Composer worker already queued\n");
}
spin_unlock(&output->lock);
@@ -107,7 +116,7 @@ vkms_atomic_crtc_duplicate_state(struct drm_crtc *crtc)
__drm_atomic_helper_crtc_duplicate_state(crtc, &vkms_state->base);
- INIT_WORK(&vkms_state->crc_work, vkms_crc_work_handle);
+ INIT_WORK(&vkms_state->composer_work, vkms_composer_worker);
return &vkms_state->base;
}
@@ -119,10 +128,9 @@ static void vkms_atomic_crtc_destroy_state(struct drm_crtc *crtc,
__drm_atomic_helper_crtc_destroy_state(state);
- if (vkms_state) {
- flush_work(&vkms_state->crc_work);
- kfree(vkms_state);
- }
+ WARN_ON(work_pending(&vkms_state->composer_work));
+ kfree(vkms_state->active_planes);
+ kfree(vkms_state);
}
static void vkms_atomic_crtc_reset(struct drm_crtc *crtc)
@@ -135,7 +143,7 @@ static void vkms_atomic_crtc_reset(struct drm_crtc *crtc)
__drm_atomic_helper_crtc_reset(crtc, &vkms_state->base);
if (vkms_state)
- INIT_WORK(&vkms_state->crc_work, vkms_crc_work_handle);
+ INIT_WORK(&vkms_state->composer_work, vkms_composer_worker);
}
static const struct drm_crtc_funcs vkms_crtc_funcs = {
@@ -152,6 +160,52 @@ static const struct drm_crtc_funcs vkms_crtc_funcs = {
.verify_crc_source = vkms_verify_crc_source,
};
+static int vkms_crtc_atomic_check(struct drm_crtc *crtc,
+ struct drm_crtc_state *state)
+{
+ struct vkms_crtc_state *vkms_state = to_vkms_crtc_state(state);
+ struct drm_plane *plane;
+ struct drm_plane_state *plane_state;
+ int i = 0, ret;
+
+ if (vkms_state->active_planes)
+ return 0;
+
+ ret = drm_atomic_add_affected_planes(state->state, crtc);
+ if (ret < 0)
+ return ret;
+
+ drm_for_each_plane_mask(plane, crtc->dev, state->plane_mask) {
+ plane_state = drm_atomic_get_existing_plane_state(state->state,
+ plane);
+ WARN_ON(!plane_state);
+
+ if (!plane_state->visible)
+ continue;
+
+ i++;
+ }
+
+ vkms_state->active_planes = kcalloc(i, sizeof(plane), GFP_KERNEL);
+ if (!vkms_state->active_planes)
+ return -ENOMEM;
+ vkms_state->num_active_planes = i;
+
+ i = 0;
+ drm_for_each_plane_mask(plane, crtc->dev, state->plane_mask) {
+ plane_state = drm_atomic_get_existing_plane_state(state->state,
+ plane);
+
+ if (!plane_state->visible)
+ continue;
+
+ vkms_state->active_planes[i++] =
+ to_vkms_plane_state(plane_state);
+ }
+
+ return 0;
+}
+
static void vkms_crtc_atomic_enable(struct drm_crtc *crtc,
struct drm_crtc_state *old_state)
{
@@ -170,7 +224,7 @@ static void vkms_crtc_atomic_begin(struct drm_crtc *crtc,
struct vkms_output *vkms_output = drm_crtc_to_vkms_output(crtc);
/* This lock is held across the atomic commit to block vblank timer
- * from scheduling vkms_crc_work_handle until the crc_data is updated
+ * from scheduling vkms_composer_worker until the composer is updated
*/
spin_lock_irq(&vkms_output->lock);
}
@@ -179,25 +233,27 @@ static void vkms_crtc_atomic_flush(struct drm_crtc *crtc,
struct drm_crtc_state *old_crtc_state)
{
struct vkms_output *vkms_output = drm_crtc_to_vkms_output(crtc);
- unsigned long flags;
if (crtc->state->event) {
- spin_lock_irqsave(&crtc->dev->event_lock, flags);
+ spin_lock(&crtc->dev->event_lock);
if (drm_crtc_vblank_get(crtc) != 0)
drm_crtc_send_vblank_event(crtc, crtc->state->event);
else
drm_crtc_arm_vblank_event(crtc, crtc->state->event);
- spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
+ spin_unlock(&crtc->dev->event_lock);
crtc->state->event = NULL;
}
+ vkms_output->composer_state = to_vkms_crtc_state(crtc->state);
+
spin_unlock_irq(&vkms_output->lock);
}
static const struct drm_crtc_helper_funcs vkms_crtc_helper_funcs = {
+ .atomic_check = vkms_crtc_atomic_check,
.atomic_begin = vkms_crtc_atomic_begin,
.atomic_flush = vkms_crtc_atomic_flush,
.atomic_enable = vkms_crtc_atomic_enable,
@@ -220,10 +276,10 @@ int vkms_crtc_init(struct drm_device *dev, struct drm_crtc *crtc,
drm_crtc_helper_add(crtc, &vkms_crtc_helper_funcs);
spin_lock_init(&vkms_out->lock);
- spin_lock_init(&vkms_out->state_lock);
+ spin_lock_init(&vkms_out->composer_lock);
- vkms_out->crc_workq = alloc_ordered_workqueue("vkms_crc_workq", 0);
- if (!vkms_out->crc_workq)
+ vkms_out->composer_workq = alloc_ordered_workqueue("vkms_composer", 0);
+ if (!vkms_out->composer_workq)
return -ENOMEM;
return ret;
diff --git a/drivers/gpu/drm/vkms/vkms_drv.c b/drivers/gpu/drm/vkms/vkms_drv.c
index 738dd6206d85..44ab9f8ef8be 100644
--- a/drivers/gpu/drm/vkms/vkms_drv.c
+++ b/drivers/gpu/drm/vkms/vkms_drv.c
@@ -10,11 +10,19 @@
*/
#include <linux/module.h>
-#include <drm/drm_gem.h>
+#include <linux/platform_device.h>
+
+#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
-#include <drm/drm_gem_framebuffer_helper.h>
+#include <drm/drm_drv.h>
#include <drm/drm_fb_helper.h>
+#include <drm/drm_file.h>
+#include <drm/drm_gem.h>
+#include <drm/drm_gem_framebuffer_helper.h>
+#include <drm/drm_ioctl.h>
#include <drm/drm_probe_helper.h>
+#include <drm/drm_vblank.h>
+
#include "vkms_drv.h"
#define DRIVER_NAME "vkms"
@@ -55,7 +63,36 @@ static void vkms_release(struct drm_device *dev)
drm_atomic_helper_shutdown(&vkms->drm);
drm_mode_config_cleanup(&vkms->drm);
drm_dev_fini(&vkms->drm);
- destroy_workqueue(vkms->output.crc_workq);
+ destroy_workqueue(vkms->output.composer_workq);
+}
+
+static void vkms_atomic_commit_tail(struct drm_atomic_state *old_state)
+{
+ struct drm_device *dev = old_state->dev;
+ struct drm_crtc *crtc;
+ struct drm_crtc_state *old_crtc_state;
+ int i;
+
+ drm_atomic_helper_commit_modeset_disables(dev, old_state);
+
+ drm_atomic_helper_commit_planes(dev, old_state, 0);
+
+ drm_atomic_helper_commit_modeset_enables(dev, old_state);
+
+ drm_atomic_helper_fake_vblank(old_state);
+
+ drm_atomic_helper_commit_hw_done(old_state);
+
+ drm_atomic_helper_wait_for_vblanks(dev, old_state);
+
+ for_each_old_crtc_in_state(old_state, crtc, old_crtc_state, i) {
+ struct vkms_crtc_state *vkms_state =
+ to_vkms_crtc_state(old_crtc_state);
+
+ flush_work(&vkms_state->composer_work);
+ }
+
+ drm_atomic_helper_cleanup_planes(dev, old_state);
}
static struct drm_driver vkms_driver = {
@@ -80,6 +117,10 @@ static const struct drm_mode_config_funcs vkms_mode_funcs = {
.atomic_commit = drm_atomic_helper_commit,
};
+static const struct drm_mode_config_helper_funcs vkms_mode_config_helpers = {
+ .atomic_commit_tail = vkms_atomic_commit_tail,
+};
+
static int vkms_modeset_init(struct vkms_device *vkmsdev)
{
struct drm_device *dev = &vkmsdev->drm;
@@ -91,8 +132,9 @@ static int vkms_modeset_init(struct vkms_device *vkmsdev)
dev->mode_config.max_width = XRES_MAX;
dev->mode_config.max_height = YRES_MAX;
dev->mode_config.preferred_depth = 24;
+ dev->mode_config.helper_private = &vkms_mode_config_helpers;
- return vkms_output_init(vkmsdev);
+ return vkms_output_init(vkmsdev, 0);
}
static int __init vkms_init(void)
diff --git a/drivers/gpu/drm/vkms/vkms_drv.h b/drivers/gpu/drm/vkms/vkms_drv.h
index b92c30c66a6f..5a95100fa18b 100644
--- a/drivers/gpu/drm/vkms/vkms_drv.h
+++ b/drivers/gpu/drm/vkms/vkms_drv.h
@@ -3,11 +3,11 @@
#ifndef _VKMS_DRV_H_
#define _VKMS_DRV_H_
-#include <drm/drmP.h>
+#include <linux/hrtimer.h>
+
#include <drm/drm.h>
#include <drm/drm_gem.h>
#include <drm/drm_encoder.h>
-#include <linux/hrtimer.h>
#define XRES_MIN 20
#define YRES_MIN 20
@@ -20,7 +20,7 @@
extern bool enable_cursor;
-struct vkms_crc_data {
+struct vkms_composer {
struct drm_framebuffer fb;
struct drm_rect src, dst;
unsigned int offset;
@@ -31,23 +31,30 @@ struct vkms_crc_data {
/**
* vkms_plane_state - Driver specific plane state
* @base: base plane state
- * @crc_data: data required for CRC computation
+ * @composer: data required for composing computation
*/
struct vkms_plane_state {
struct drm_plane_state base;
- struct vkms_crc_data *crc_data;
+ struct vkms_composer *composer;
};
/**
* vkms_crtc_state - Driver specific CRTC state
* @base: base CRTC state
- * @crc_work: work struct to compute and add CRC entries
+ * @composer_work: work struct to compose and add CRC entries
* @n_frame_start: start frame number for computed CRC
* @n_frame_end: end frame number for computed CRC
*/
struct vkms_crtc_state {
struct drm_crtc_state base;
- struct work_struct crc_work;
+ struct work_struct composer_work;
+
+ int num_active_planes;
+ /* stack of active planes for crc computation, should be in z order */
+ struct vkms_plane_state **active_planes;
+
+ /* below three are protected by vkms_output.composer_lock */
+ bool crc_pending;
u64 frame_start;
u64 frame_end;
};
@@ -59,13 +66,16 @@ struct vkms_output {
struct hrtimer vblank_hrtimer;
ktime_t period_ns;
struct drm_pending_vblank_event *event;
- bool crc_enabled;
- /* ordered wq for crc_work */
- struct workqueue_struct *crc_workq;
- /* protects concurrent access to crc_data */
+ /* ordered wq for composer_work */
+ struct workqueue_struct *composer_workq;
+ /* protects concurrent access to composer */
spinlock_t lock;
- /* protects concurrent access to crtc_state */
- spinlock_t state_lock;
+
+ /* protected by @lock */
+ bool composer_enabled;
+ struct vkms_crtc_state *composer_state;
+
+ spinlock_t composer_lock;
};
struct vkms_device {
@@ -105,10 +115,10 @@ bool vkms_get_vblank_timestamp(struct drm_device *dev, unsigned int pipe,
int *max_error, ktime_t *vblank_time,
bool in_vblank_irq);
-int vkms_output_init(struct vkms_device *vkmsdev);
+int vkms_output_init(struct vkms_device *vkmsdev, int index);
struct drm_plane *vkms_plane_init(struct vkms_device *vkmsdev,
- enum drm_plane_type type);
+ enum drm_plane_type type, int index);
/* Gem stuff */
struct drm_gem_object *vkms_gem_create(struct drm_device *dev,
@@ -133,6 +143,8 @@ const char *const *vkms_get_crc_sources(struct drm_crtc *crtc,
int vkms_set_crc_source(struct drm_crtc *crtc, const char *src_name);
int vkms_verify_crc_source(struct drm_crtc *crtc, const char *source_name,
size_t *values_cnt);
-void vkms_crc_work_handle(struct work_struct *work);
+
+/* Composer Support */
+void vkms_composer_worker(struct work_struct *work);
#endif /* _VKMS_DRV_H_ */
diff --git a/drivers/gpu/drm/vkms/vkms_gem.c b/drivers/gpu/drm/vkms/vkms_gem.c
index 69048e73377d..6489bfe0a149 100644
--- a/drivers/gpu/drm/vkms/vkms_gem.c
+++ b/drivers/gpu/drm/vkms/vkms_gem.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0+
#include <linux/shmem_fs.h>
+#include <linux/vmalloc.h>
#include "vkms_drv.h"
diff --git a/drivers/gpu/drm/vkms/vkms_output.c b/drivers/gpu/drm/vkms/vkms_output.c
index 56fb5c2a2315..fb1941a6522c 100644
--- a/drivers/gpu/drm/vkms/vkms_output.c
+++ b/drivers/gpu/drm/vkms/vkms_output.c
@@ -35,7 +35,7 @@ static const struct drm_connector_helper_funcs vkms_conn_helper_funcs = {
.get_modes = vkms_conn_get_modes,
};
-int vkms_output_init(struct vkms_device *vkmsdev)
+int vkms_output_init(struct vkms_device *vkmsdev, int index)
{
struct vkms_output *output = &vkmsdev->output;
struct drm_device *dev = &vkmsdev->drm;
@@ -45,12 +45,12 @@ int vkms_output_init(struct vkms_device *vkmsdev)
struct drm_plane *primary, *cursor = NULL;
int ret;
- primary = vkms_plane_init(vkmsdev, DRM_PLANE_TYPE_PRIMARY);
+ primary = vkms_plane_init(vkmsdev, DRM_PLANE_TYPE_PRIMARY, index);
if (IS_ERR(primary))
return PTR_ERR(primary);
if (enable_cursor) {
- cursor = vkms_plane_init(vkmsdev, DRM_PLANE_TYPE_CURSOR);
+ cursor = vkms_plane_init(vkmsdev, DRM_PLANE_TYPE_CURSOR, index);
if (IS_ERR(cursor)) {
ret = PTR_ERR(cursor);
goto err_cursor;
diff --git a/drivers/gpu/drm/vkms/vkms_plane.c b/drivers/gpu/drm/vkms/vkms_plane.c
index 0fceb6258422..5fc8f85aaf3d 100644
--- a/drivers/gpu/drm/vkms/vkms_plane.c
+++ b/drivers/gpu/drm/vkms/vkms_plane.c
@@ -1,10 +1,12 @@
// SPDX-License-Identifier: GPL-2.0+
-#include "vkms_drv.h"
-#include <drm/drm_plane_helper.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_fourcc.h>
#include <drm/drm_gem_framebuffer_helper.h>
+#include <drm/drm_plane_helper.h>
+
+#include "vkms_drv.h"
static const u32 vkms_formats[] = {
DRM_FORMAT_XRGB8888,
@@ -18,20 +20,20 @@ static struct drm_plane_state *
vkms_plane_duplicate_state(struct drm_plane *plane)
{
struct vkms_plane_state *vkms_state;
- struct vkms_crc_data *crc_data;
+ struct vkms_composer *composer;
vkms_state = kzalloc(sizeof(*vkms_state), GFP_KERNEL);
if (!vkms_state)
return NULL;
- crc_data = kzalloc(sizeof(*crc_data), GFP_KERNEL);
- if (!crc_data) {
- DRM_DEBUG_KMS("Couldn't allocate crc_data\n");
+ composer = kzalloc(sizeof(*composer), GFP_KERNEL);
+ if (!composer) {
+ DRM_DEBUG_KMS("Couldn't allocate composer\n");
kfree(vkms_state);
return NULL;
}
- vkms_state->crc_data = crc_data;
+ vkms_state->composer = composer;
__drm_atomic_helper_plane_duplicate_state(plane,
&vkms_state->base);
@@ -49,12 +51,12 @@ static void vkms_plane_destroy_state(struct drm_plane *plane,
/* dropping the reference we acquired in
* vkms_primary_plane_update()
*/
- if (drm_framebuffer_read_refcount(&vkms_state->crc_data->fb))
- drm_framebuffer_put(&vkms_state->crc_data->fb);
+ if (drm_framebuffer_read_refcount(&vkms_state->composer->fb))
+ drm_framebuffer_put(&vkms_state->composer->fb);
}
- kfree(vkms_state->crc_data);
- vkms_state->crc_data = NULL;
+ kfree(vkms_state->composer);
+ vkms_state->composer = NULL;
__drm_atomic_helper_plane_destroy_state(old_state);
kfree(vkms_state);
@@ -91,21 +93,21 @@ static void vkms_plane_atomic_update(struct drm_plane *plane,
{
struct vkms_plane_state *vkms_plane_state;
struct drm_framebuffer *fb = plane->state->fb;
- struct vkms_crc_data *crc_data;
+ struct vkms_composer *composer;
if (!plane->state->crtc || !fb)
return;
vkms_plane_state = to_vkms_plane_state(plane->state);
- crc_data = vkms_plane_state->crc_data;
- memcpy(&crc_data->src, &plane->state->src, sizeof(struct drm_rect));
- memcpy(&crc_data->dst, &plane->state->dst, sizeof(struct drm_rect));
- memcpy(&crc_data->fb, fb, sizeof(struct drm_framebuffer));
- drm_framebuffer_get(&crc_data->fb);
- crc_data->offset = fb->offsets[0];
- crc_data->pitch = fb->pitches[0];
- crc_data->cpp = fb->format->cpp[0];
+ composer = vkms_plane_state->composer;
+ memcpy(&composer->src, &plane->state->src, sizeof(struct drm_rect));
+ memcpy(&composer->dst, &plane->state->dst, sizeof(struct drm_rect));
+ memcpy(&composer->fb, fb, sizeof(struct drm_framebuffer));
+ drm_framebuffer_get(&composer->fb);
+ composer->offset = fb->offsets[0];
+ composer->pitch = fb->pitches[0];
+ composer->cpp = fb->format->cpp[0];
}
static int vkms_plane_atomic_check(struct drm_plane *plane,
@@ -176,7 +178,7 @@ static const struct drm_plane_helper_funcs vkms_primary_helper_funcs = {
};
struct drm_plane *vkms_plane_init(struct vkms_device *vkmsdev,
- enum drm_plane_type type)
+ enum drm_plane_type type, int index)
{
struct drm_device *dev = &vkmsdev->drm;
const struct drm_plane_helper_funcs *funcs;
@@ -198,7 +200,7 @@ struct drm_plane *vkms_plane_init(struct vkms_device *vkmsdev,
funcs = &vkms_primary_helper_funcs;
}
- ret = drm_universal_plane_init(dev, plane, 0,
+ ret = drm_universal_plane_init(dev, plane, 1 << index,
&vkms_plane_funcs,
formats, nformats,
NULL, type, NULL);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_blit.c b/drivers/gpu/drm/vmwgfx/vmwgfx_blit.c
index fc6673cde289..6c01ad2785dd 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_blit.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_blit.c
@@ -459,9 +459,9 @@ int vmw_bo_cpu_blit(struct ttm_buffer_object *dst,
/* Buffer objects need to be either pinned or reserved: */
if (!(dst->mem.placement & TTM_PL_FLAG_NO_EVICT))
- lockdep_assert_held(&dst->resv->lock.base);
+ reservation_object_assert_held(dst->base.resv);
if (!(src->mem.placement & TTM_PL_FLAG_NO_EVICT))
- lockdep_assert_held(&src->resv->lock.base);
+ reservation_object_assert_held(src->base.resv);
if (dst->ttm->state == tt_unpopulated) {
ret = dst->ttm->bdev->driver->ttm_tt_populate(dst->ttm, &ctx);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
index 5d5c2bce01f3..369034c0de31 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
@@ -342,7 +342,7 @@ void vmw_bo_pin_reserved(struct vmw_buffer_object *vbo, bool pin)
uint32_t old_mem_type = bo->mem.mem_type;
int ret;
- lockdep_assert_held(&bo->resv->lock.base);
+ reservation_object_assert_held(bo->base.resv);
if (pin) {
if (vbo->pin_count++ > 0)
@@ -690,7 +690,7 @@ static int vmw_user_bo_synccpu_grab(struct vmw_user_buffer_object *user_bo,
long lret;
lret = reservation_object_wait_timeout_rcu
- (bo->resv, true, true,
+ (bo->base.resv, true, true,
nonblock ? 0 : MAX_SCHEDULE_TIMEOUT);
if (!lret)
return -EBUSY;
@@ -835,7 +835,7 @@ int vmw_bo_alloc_ioctl(struct drm_device *dev, void *data,
goto out_no_bo;
rep->handle = handle;
- rep->map_handle = drm_vma_node_offset_addr(&vbo->base.vma_node);
+ rep->map_handle = drm_vma_node_offset_addr(&vbo->base.base.vma_node);
rep->cur_gmr_id = handle;
rep->cur_gmr_offset = 0;
@@ -1007,10 +1007,10 @@ void vmw_bo_fence_single(struct ttm_buffer_object *bo,
if (fence == NULL) {
vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL);
- reservation_object_add_excl_fence(bo->resv, &fence->base);
+ reservation_object_add_excl_fence(bo->base.resv, &fence->base);
dma_fence_put(&fence->base);
} else
- reservation_object_add_excl_fence(bo->resv, &fence->base);
+ reservation_object_add_excl_fence(bo->base.resv, &fence->base);
}
@@ -1077,7 +1077,7 @@ int vmw_dumb_map_offset(struct drm_file *file_priv,
if (ret != 0)
return -EINVAL;
- *offset = drm_vma_node_offset_addr(&out_buf->base.vma_node);
+ *offset = drm_vma_node_offset_addr(&out_buf->base.base.vma_node);
vmw_bo_unreference(&out_buf);
return 0;
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c
index b4f6e1217c9d..7984f172ec4a 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c
@@ -169,7 +169,7 @@ static int vmw_cotable_unscrub(struct vmw_resource *res)
} *cmd;
WARN_ON_ONCE(bo->mem.mem_type != VMW_PL_MOB);
- lockdep_assert_held(&bo->resv->lock.base);
+ reservation_object_assert_held(bo->base.resv);
cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
if (!cmd)
@@ -311,7 +311,7 @@ static int vmw_cotable_unbind(struct vmw_resource *res,
return 0;
WARN_ON_ONCE(bo->mem.mem_type != VMW_PL_MOB);
- lockdep_assert_held(&bo->resv->lock.base);
+ reservation_object_assert_held(bo->base.resv);
mutex_lock(&dev_priv->binding_mutex);
if (!vcotbl->scrubbed)
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index 9506190a0300..cd0d49d8a8da 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -186,7 +186,7 @@ static const struct drm_ioctl_desc vmw_ioctls[] = {
DRM_RENDER_ALLOW),
VMW_IOCTL_DEF(VMW_REF_SURFACE, vmw_surface_reference_ioctl,
DRM_AUTH | DRM_RENDER_ALLOW),
- VMW_IOCTL_DEF(VMW_EXECBUF, NULL, DRM_AUTH |
+ VMW_IOCTL_DEF(VMW_EXECBUF, vmw_execbuf_ioctl, DRM_AUTH |
DRM_RENDER_ALLOW),
VMW_IOCTL_DEF(VMW_FENCE_WAIT, vmw_fence_obj_wait_ioctl,
DRM_RENDER_ALLOW),
@@ -641,7 +641,6 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
INIT_LIST_HEAD(&dev_priv->res_lru[i]);
}
- mutex_init(&dev_priv->init_mutex);
init_waitqueue_head(&dev_priv->fence_queue);
init_waitqueue_head(&dev_priv->fifo_queue);
dev_priv->fence_queue_waiters = 0;
@@ -1121,15 +1120,7 @@ static long vmw_generic_ioctl(struct file *filp, unsigned int cmd,
&vmw_ioctls[nr - DRM_COMMAND_BASE];
if (nr == DRM_COMMAND_BASE + DRM_VMW_EXECBUF) {
- ret = (long) drm_ioctl_permit(ioctl->flags, file_priv);
- if (unlikely(ret != 0))
- return ret;
-
- if (unlikely((cmd & (IOC_IN | IOC_OUT)) != IOC_IN))
- goto out_io_encoding;
-
- return (long) vmw_execbuf_ioctl(dev, arg, file_priv,
- _IOC_SIZE(cmd));
+ return ioctl_func(filp, cmd, arg);
} else if (nr == DRM_COMMAND_BASE + DRM_VMW_UPDATE_LAYOUT) {
if (!drm_is_current_master(file_priv) &&
!capable(CAP_SYS_ADMIN))
@@ -1180,10 +1171,6 @@ static long vmw_compat_ioctl(struct file *filp, unsigned int cmd,
}
#endif
-static void vmw_lastclose(struct drm_device *dev)
-{
-}
-
static void vmw_master_init(struct vmw_master *vmaster)
{
ttm_lock_init(&vmaster->lock);
@@ -1551,10 +1538,9 @@ static const struct file_operations vmwgfx_driver_fops = {
static struct drm_driver driver = {
.driver_features =
- DRIVER_MODESET | DRIVER_PRIME | DRIVER_RENDER | DRIVER_ATOMIC,
+ DRIVER_MODESET | DRIVER_RENDER | DRIVER_ATOMIC,
.load = vmw_driver_load,
.unload = vmw_driver_unload,
- .lastclose = vmw_lastclose,
.get_vblank_counter = vmw_get_vblank_counter,
.enable_vblank = vmw_enable_vblank,
.disable_vblank = vmw_disable_vblank,
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
index 366dcfc1f9bb..dbb04dbcf478 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
@@ -484,11 +484,6 @@ struct vmw_private {
spinlock_t resource_lock;
struct idr res_idr[vmw_res_max];
- /*
- * Block lastclose from racing with firstopen.
- */
-
- struct mutex init_mutex;
/*
* A resource manager for kernel-only surfaces and
@@ -915,8 +910,8 @@ static inline struct page *vmw_piter_page(struct vmw_piter *viter)
* Command submission - vmwgfx_execbuf.c
*/
-extern int vmw_execbuf_ioctl(struct drm_device *dev, unsigned long data,
- struct drm_file *file_priv, size_t size);
+extern int vmw_execbuf_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
extern int vmw_execbuf_process(struct drm_file *file_priv,
struct vmw_private *dev_priv,
void __user *user_commands,
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
index 33533d126277..ff86d49dc5e8 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
@@ -3995,54 +3995,40 @@ void vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv)
mutex_unlock(&dev_priv->cmdbuf_mutex);
}
-int vmw_execbuf_ioctl(struct drm_device *dev, unsigned long data,
- struct drm_file *file_priv, size_t size)
+int vmw_execbuf_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
{
struct vmw_private *dev_priv = vmw_priv(dev);
- struct drm_vmw_execbuf_arg arg;
+ struct drm_vmw_execbuf_arg *arg = data;
int ret;
- static const size_t copy_offset[] = {
- offsetof(struct drm_vmw_execbuf_arg, context_handle),
- sizeof(struct drm_vmw_execbuf_arg)};
struct dma_fence *in_fence = NULL;
- if (unlikely(size < copy_offset[0])) {
- VMW_DEBUG_USER("Invalid command size, ioctl %d\n",
- DRM_VMW_EXECBUF);
- return -EINVAL;
- }
-
- if (copy_from_user(&arg, (void __user *) data, copy_offset[0]) != 0)
- return -EFAULT;
-
/*
* Extend the ioctl argument while maintaining backwards compatibility:
- * We take different code paths depending on the value of arg.version.
+ * We take different code paths depending on the value of arg->version.
+ *
+ * Note: The ioctl argument is extended and zeropadded by core DRM.
*/
- if (unlikely(arg.version > DRM_VMW_EXECBUF_VERSION ||
- arg.version == 0)) {
+ if (unlikely(arg->version > DRM_VMW_EXECBUF_VERSION ||
+ arg->version == 0)) {
VMW_DEBUG_USER("Incorrect execbuf version.\n");
return -EINVAL;
}
- if (arg.version > 1 &&
- copy_from_user(&arg.context_handle,
- (void __user *) (data + copy_offset[0]),
- copy_offset[arg.version - 1] - copy_offset[0]) != 0)
- return -EFAULT;
-
- switch (arg.version) {
+ switch (arg->version) {
case 1:
- arg.context_handle = (uint32_t) -1;
+ /* For v1 core DRM have extended + zeropadded the data */
+ arg->context_handle = (uint32_t) -1;
break;
case 2:
default:
+ /* For v2 and later core DRM would have correctly copied it */
break;
}
/* If imported a fence FD from elsewhere, then wait on it */
- if (arg.flags & DRM_VMW_EXECBUF_FLAG_IMPORT_FENCE_FD) {
- in_fence = sync_file_get_fence(arg.imported_fence_fd);
+ if (arg->flags & DRM_VMW_EXECBUF_FLAG_IMPORT_FENCE_FD) {
+ in_fence = sync_file_get_fence(arg->imported_fence_fd);
if (!in_fence) {
VMW_DEBUG_USER("Cannot get imported fence\n");
@@ -4059,11 +4045,11 @@ int vmw_execbuf_ioctl(struct drm_device *dev, unsigned long data,
return ret;
ret = vmw_execbuf_process(file_priv, dev_priv,
- (void __user *)(unsigned long)arg.commands,
- NULL, arg.command_size, arg.throttle_us,
- arg.context_handle,
- (void __user *)(unsigned long)arg.fence_rep,
- NULL, arg.flags);
+ (void __user *)(unsigned long)arg->commands,
+ NULL, arg->command_size, arg->throttle_us,
+ arg->context_handle,
+ (void __user *)(unsigned long)arg->fence_rep,
+ NULL, arg->flags);
ttm_read_unlock(&dev_priv->reservation_sem);
if (unlikely(ret != 0))
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
index b97bc8e5944b..34284f0f5084 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -1704,14 +1704,6 @@ vmw_kms_atomic_check_modeset(struct drm_device *dev,
if (ret)
return ret;
- if (!state->allow_modeset)
- return ret;
-
- /*
- * Legacy path do not set allow_modeset properly like
- * @drm_atomic_helper_update_plane, This will result in unnecessary call
- * to vmw_kms_check_topology. So extra set of check.
- */
for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
if (drm_atomic_crtc_needs_modeset(crtc_state))
need_modeset = true;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
index 1d38a8b2f2ec..701643b7b0c4 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
@@ -402,14 +402,14 @@ void vmw_resource_unreserve(struct vmw_resource *res,
if (switch_backup && new_backup != res->backup) {
if (res->backup) {
- lockdep_assert_held(&res->backup->base.resv->lock.base);
+ reservation_object_assert_held(res->backup->base.base.resv);
list_del_init(&res->mob_head);
vmw_bo_unreference(&res->backup);
}
if (new_backup) {
res->backup = vmw_bo_reference(new_backup);
- lockdep_assert_held(&new_backup->base.resv->lock.base);
+ reservation_object_assert_held(new_backup->base.base.resv);
list_add_tail(&res->mob_head, &new_backup->res_list);
} else {
res->backup = NULL;
@@ -691,7 +691,7 @@ void vmw_resource_unbind_list(struct vmw_buffer_object *vbo)
.num_shared = 0
};
- lockdep_assert_held(&vbo->base.resv->lock.base);
+ reservation_object_assert_held(vbo->base.base.resv);
list_for_each_entry_safe(res, next, &vbo->res_list, mob_head) {
if (!res->func->unbind)
continue;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
index 219471903bc1..3a6da3b66484 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
@@ -1669,7 +1669,7 @@ vmw_gb_surface_define_internal(struct drm_device *dev,
rep->backup_size = res->backup_size;
if (res->backup) {
rep->buffer_map_handle =
- drm_vma_node_offset_addr(&res->backup->base.vma_node);
+ drm_vma_node_offset_addr(&res->backup->base.base.vma_node);
rep->buffer_size = res->backup->base.num_pages * PAGE_SIZE;
rep->buffer_handle = backup_handle;
} else {
@@ -1745,7 +1745,7 @@ vmw_gb_surface_reference_internal(struct drm_device *dev,
rep->crep.backup_size = srf->res.backup_size;
rep->crep.buffer_handle = backup_handle;
rep->crep.buffer_map_handle =
- drm_vma_node_offset_addr(&srf->res.backup->base.vma_node);
+ drm_vma_node_offset_addr(&srf->res.backup->base.base.vma_node);
rep->crep.buffer_size = srf->res.backup->base.num_pages * PAGE_SIZE;
rep->creq.version = drm_vmw_gb_surface_v1;
diff --git a/drivers/gpu/drm/xen/xen_drm_front.c b/drivers/gpu/drm/xen/xen_drm_front.c
index 84aa4d61dc42..ba1828acd8c9 100644
--- a/drivers/gpu/drm/xen/xen_drm_front.c
+++ b/drivers/gpu/drm/xen/xen_drm_front.c
@@ -8,13 +8,18 @@
* Author: Oleksandr Andrushchenko <oleksandr_andrushchenko@epam.com>
*/
-#include <drm/drmP.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_drv.h>
+#include <drm/drm_ioctl.h>
#include <drm/drm_probe_helper.h>
+#include <drm/drm_file.h>
#include <drm/drm_gem.h>
-#include <linux/of_device.h>
-
#include <xen/platform_pci.h>
#include <xen/xen.h>
#include <xen/xenbus.h>
@@ -485,15 +490,12 @@ static const struct vm_operations_struct xen_drm_drv_vm_ops = {
};
static struct drm_driver xen_drm_driver = {
- .driver_features = DRIVER_GEM | DRIVER_MODESET |
- DRIVER_PRIME | DRIVER_ATOMIC,
+ .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC,
.release = xen_drm_drv_release,
.gem_vm_ops = &xen_drm_drv_vm_ops,
.gem_free_object_unlocked = xen_drm_drv_free_object_unlocked,
.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
- .gem_prime_import = drm_gem_prime_import,
- .gem_prime_export = drm_gem_prime_export,
.gem_prime_import_sg_table = xen_drm_front_gem_import_sg_table,
.gem_prime_get_sg_table = xen_drm_front_gem_get_sg_table,
.gem_prime_vmap = xen_drm_front_gem_prime_vmap,
diff --git a/drivers/gpu/drm/xen/xen_drm_front.h b/drivers/gpu/drm/xen/xen_drm_front.h
index 5693b4a4b02b..f92c258350ca 100644
--- a/drivers/gpu/drm/xen/xen_drm_front.h
+++ b/drivers/gpu/drm/xen/xen_drm_front.h
@@ -11,13 +11,18 @@
#ifndef __XEN_DRM_FRONT_H_
#define __XEN_DRM_FRONT_H_
-#include <drm/drmP.h>
-#include <drm/drm_simple_kms_helper.h>
-
#include <linux/scatterlist.h>
+#include <drm/drm_connector.h>
+#include <drm/drm_simple_kms_helper.h>
+
#include "xen_drm_front_cfg.h"
+struct drm_device;
+struct drm_framebuffer;
+struct drm_gem_object;
+struct drm_pending_vblank_event;
+
/**
* DOC: Driver modes of operation in terms of display buffers used
*
diff --git a/drivers/gpu/drm/xen/xen_drm_front_cfg.c b/drivers/gpu/drm/xen/xen_drm_front_cfg.c
index 5baf2b9de93c..ec53b9cc9e0e 100644
--- a/drivers/gpu/drm/xen/xen_drm_front_cfg.c
+++ b/drivers/gpu/drm/xen/xen_drm_front_cfg.c
@@ -8,10 +8,10 @@
* Author: Oleksandr Andrushchenko <oleksandr_andrushchenko@epam.com>
*/
-#include <drm/drmP.h>
-
#include <linux/device.h>
+#include <drm/drm_print.h>
+
#include <xen/interface/io/displif.h>
#include <xen/xenbus.h>
diff --git a/drivers/gpu/drm/xen/xen_drm_front_conn.c b/drivers/gpu/drm/xen/xen_drm_front_conn.c
index 9f5f31f77f1e..459702fa990e 100644
--- a/drivers/gpu/drm/xen/xen_drm_front_conn.c
+++ b/drivers/gpu/drm/xen/xen_drm_front_conn.c
@@ -9,6 +9,7 @@
*/
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_drv.h>
#include <drm/drm_probe_helper.h>
#include <video/videomode.h>
diff --git a/drivers/gpu/drm/xen/xen_drm_front_conn.h b/drivers/gpu/drm/xen/xen_drm_front_conn.h
index 39de7cf5adbe..3adacba9a23b 100644
--- a/drivers/gpu/drm/xen/xen_drm_front_conn.h
+++ b/drivers/gpu/drm/xen/xen_drm_front_conn.h
@@ -11,11 +11,10 @@
#ifndef __XEN_DRM_FRONT_CONN_H_
#define __XEN_DRM_FRONT_CONN_H_
-#include <drm/drmP.h>
-#include <drm/drm_crtc.h>
-#include <drm/drm_encoder.h>
+#include <linux/types.h>
-#include <linux/wait.h>
+struct drm_connector;
+struct xen_drm_front_drm_info;
struct xen_drm_front_drm_info;
diff --git a/drivers/gpu/drm/xen/xen_drm_front_evtchnl.c b/drivers/gpu/drm/xen/xen_drm_front_evtchnl.c
index 945226a95e9b..e10d95dddb99 100644
--- a/drivers/gpu/drm/xen/xen_drm_front_evtchnl.c
+++ b/drivers/gpu/drm/xen/xen_drm_front_evtchnl.c
@@ -8,11 +8,11 @@
* Author: Oleksandr Andrushchenko <oleksandr_andrushchenko@epam.com>
*/
-#include <drm/drmP.h>
-
#include <linux/errno.h>
#include <linux/irq.h>
+#include <drm/drm_print.h>
+
#include <xen/xenbus.h>
#include <xen/events.h>
#include <xen/grant_table.h>
diff --git a/drivers/gpu/drm/xen/xen_drm_front_gem.c b/drivers/gpu/drm/xen/xen_drm_front_gem.c
index a24548489dde..f0b85e094111 100644
--- a/drivers/gpu/drm/xen/xen_drm_front_gem.c
+++ b/drivers/gpu/drm/xen/xen_drm_front_gem.c
@@ -8,20 +8,19 @@
* Author: Oleksandr Andrushchenko <oleksandr_andrushchenko@epam.com>
*/
-#include "xen_drm_front_gem.h"
+#include <linux/dma-buf.h>
+#include <linux/scatterlist.h>
+#include <linux/shmem_fs.h>
-#include <drm/drmP.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_gem.h>
+#include <drm/drm_prime.h>
#include <drm/drm_probe_helper.h>
-#include <linux/dma-buf.h>
-#include <linux/scatterlist.h>
-#include <linux/shmem_fs.h>
-
#include <xen/balloon.h>
#include "xen_drm_front.h"
+#include "xen_drm_front_gem.h"
struct xen_gem_object {
struct drm_gem_object base;
diff --git a/drivers/gpu/drm/xen/xen_drm_front_gem.h b/drivers/gpu/drm/xen/xen_drm_front_gem.h
index d5ab734fdafe..a39675fa31b2 100644
--- a/drivers/gpu/drm/xen/xen_drm_front_gem.h
+++ b/drivers/gpu/drm/xen/xen_drm_front_gem.h
@@ -11,7 +11,12 @@
#ifndef __XEN_DRM_FRONT_GEM_H
#define __XEN_DRM_FRONT_GEM_H
-#include <drm/drmP.h>
+struct dma_buf_attachment;
+struct drm_device;
+struct drm_gem_object;
+struct file;
+struct sg_table;
+struct vm_area_struct;
struct drm_gem_object *xen_drm_front_gem_create(struct drm_device *dev,
size_t size);
diff --git a/drivers/gpu/drm/xen/xen_drm_front_kms.c b/drivers/gpu/drm/xen/xen_drm_front_kms.c
index c2955d375394..de990036199d 100644
--- a/drivers/gpu/drm/xen/xen_drm_front_kms.c
+++ b/drivers/gpu/drm/xen/xen_drm_front_kms.c
@@ -8,17 +8,18 @@
* Author: Oleksandr Andrushchenko <oleksandr_andrushchenko@epam.com>
*/
-#include "xen_drm_front_kms.h"
-
-#include <drm/drmP.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_drv.h>
+#include <drm/drm_fourcc.h>
#include <drm/drm_gem.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_probe_helper.h>
+#include <drm/drm_vblank.h>
#include "xen_drm_front.h"
#include "xen_drm_front_conn.h"
+#include "xen_drm_front_kms.h"
/*
* Timeout in ms to wait for frame done event from the backend:
diff --git a/drivers/gpu/drm/zte/zx_drm_drv.c b/drivers/gpu/drm/zte/zx_drm_drv.c
index 520d7369f85a..1141c1ed1ed0 100644
--- a/drivers/gpu/drm/zte/zx_drm_drv.c
+++ b/drivers/gpu/drm/zte/zx_drm_drv.c
@@ -14,13 +14,14 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc.h>
+#include <drm/drm_drv.h>
#include <drm/drm_fb_cma_helper.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_of.h>
#include <drm/drm_probe_helper.h>
-#include <drm/drmP.h>
+#include <drm/drm_vblank.h>
#include "zx_drm_drv.h"
#include "zx_vou.h"
@@ -34,15 +35,12 @@ static const struct drm_mode_config_funcs zx_drm_mode_config_funcs = {
DEFINE_DRM_GEM_CMA_FOPS(zx_drm_fops);
static struct drm_driver zx_drm_driver = {
- .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_PRIME |
- DRIVER_ATOMIC,
+ .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC,
.gem_free_object_unlocked = drm_gem_cma_free_object,
.gem_vm_ops = &drm_gem_cma_vm_ops,
.dumb_create = drm_gem_cma_dumb_create,
.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
- .gem_prime_export = drm_gem_prime_export,
- .gem_prime_import = drm_gem_prime_import,
.gem_prime_get_sg_table = drm_gem_cma_prime_get_sg_table,
.gem_prime_import_sg_table = drm_gem_cma_prime_import_sg_table,
.gem_prime_vmap = drm_gem_cma_prime_vmap,
diff --git a/drivers/gpu/drm/zte/zx_hdmi.c b/drivers/gpu/drm/zte/zx_hdmi.c
index bfe918b27c5c..a50f5a1f09b8 100644
--- a/drivers/gpu/drm/zte/zx_hdmi.c
+++ b/drivers/gpu/drm/zte/zx_hdmi.c
@@ -19,7 +19,7 @@
#include <drm/drm_edid.h>
#include <drm/drm_of.h>
#include <drm/drm_probe_helper.h>
-#include <drm/drmP.h>
+#include <drm/drm_print.h>
#include <sound/hdmi-codec.h>
diff --git a/drivers/gpu/drm/zte/zx_plane.c b/drivers/gpu/drm/zte/zx_plane.c
index 6b812aad411b..086c50fac689 100644
--- a/drivers/gpu/drm/zte/zx_plane.c
+++ b/drivers/gpu/drm/zte/zx_plane.c
@@ -7,10 +7,10 @@
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_fourcc.h>
#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_modeset_helper_vtables.h>
#include <drm/drm_plane_helper.h>
-#include <drm/drmP.h>
#include "zx_common_regs.h"
#include "zx_drm_drv.h"
diff --git a/drivers/gpu/drm/zte/zx_tvenc.c b/drivers/gpu/drm/zte/zx_tvenc.c
index a768c567b557..c598b7daf1f1 100644
--- a/drivers/gpu/drm/zte/zx_tvenc.c
+++ b/drivers/gpu/drm/zte/zx_tvenc.c
@@ -7,11 +7,13 @@
#include <linux/clk.h>
#include <linux/component.h>
#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
-#include <drm/drmP.h>
#include "zx_drm_drv.h"
#include "zx_tvenc_regs.h"
diff --git a/drivers/gpu/drm/zte/zx_vga.c b/drivers/gpu/drm/zte/zx_vga.c
index 1634a08707fb..9b67e419280c 100644
--- a/drivers/gpu/drm/zte/zx_vga.c
+++ b/drivers/gpu/drm/zte/zx_vga.c
@@ -7,11 +7,13 @@
#include <linux/clk.h>
#include <linux/component.h>
#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
-#include <drm/drmP.h>
#include "zx_drm_drv.h"
#include "zx_vga_regs.h"
diff --git a/drivers/gpu/drm/zte/zx_vou.c b/drivers/gpu/drm/zte/zx_vou.c
index 81b4cf107b75..5259ff2825f9 100644
--- a/drivers/gpu/drm/zte/zx_vou.c
+++ b/drivers/gpu/drm/zte/zx_vou.c
@@ -6,7 +6,10 @@
#include <linux/clk.h>
#include <linux/component.h>
+#include <linux/module.h>
#include <linux/of_address.h>
+#include <linux/platform_device.h>
+
#include <video/videomode.h>
#include <drm/drm_atomic_helper.h>
@@ -17,7 +20,7 @@
#include <drm/drm_of.h>
#include <drm/drm_plane_helper.h>
#include <drm/drm_probe_helper.h>
-#include <drm/drmP.h>
+#include <drm/drm_vblank.h>
#include "zx_common_regs.h"
#include "zx_drm_drv.h"
OpenPOWER on IntegriCloud