summaryrefslogtreecommitdiffstats
path: root/drivers/gpu
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu')
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu.h12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c111
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h20
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c180
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c28
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_csa.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c74
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_display.c38
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gem.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c42
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_job.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c47
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.c51
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.h5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c22
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h16
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c22
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c69
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c41
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cik_ih.c29
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cz_ih.c29
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_virtual.c17
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c59
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c37
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c44
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c117
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v9_0.h10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/iceland_ih.c29
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v11_0.c166
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v3_1.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/si_ih.c14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/soc15.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/soc15_common.h9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/tonga_ih.c29
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c33
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vega10_ih.c85
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vi.c18
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_chardev.c142
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_crat.c2
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device.c7
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_priv.h1
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_topology.c20
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c225
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h10
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c5
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_trace.h104
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c15
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc.c109
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link.c69
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c248
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_resource.c151
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_stream.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_helper.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_hw_types.h9
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_link.h6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_stream.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_types.h9
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_clk_mgr.c7
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce100/dce100_hw_sequencer.c14
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_compressor.c91
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c68
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_clk_mgr.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_clk_mgr.h6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c15
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer_debug.c39
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dm_event_log.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dm_pp_smu.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dm_services.h12
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/gpio_service.c65
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/gpio_service.h7
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/hw_factory.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/hw_translate.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/i2caux/i2caux.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/compressor.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/core_types.h12
-rw-r--r--drivers/gpu/drm/amd/display/include/bios_parser_types.h1
-rw-r--r--drivers/gpu/drm/amd/display/include/dal_types.h1
-rw-r--r--drivers/gpu/drm/amd/display/modules/color/color_gamma.c16
-rw-r--r--drivers/gpu/drm/amd/display/modules/freesync/freesync.c10
-rw-r--r--drivers/gpu/drm/amd/display/modules/inc/mod_info_packet.h14
-rw-r--r--drivers/gpu/drm/amd/display/modules/inc/mod_shared.h27
-rw-r--r--drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c15
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_9_4_0_offset.h32
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_9_4_0_sh_mask.h35
-rw-r--r--drivers/gpu/drm/amd/include/kgd_kfd_interface.h4
-rw-r--r--drivers/gpu/drm/amd/include/kgd_pp_interface.h17
-rw-r--r--drivers/gpu/drm/amd/powerplay/amd_powerplay.c82
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c49
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c27
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/pp_psm.c2
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c36
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c33
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c24
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c133
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h3
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/hwmgr.h6
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/smu7_ppsmc.h5
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c77
-rw-r--r--drivers/gpu/drm/armada/armada_gem.c2
-rw-r--r--drivers/gpu/drm/ast/ast_fb.c1
-rw-r--r--drivers/gpu/drm/ast/ast_main.c3
-rw-r--r--drivers/gpu/drm/ast/ast_mode.c36
-rw-r--r--drivers/gpu/drm/bridge/tc358767.c48
-rw-r--r--drivers/gpu/drm/bridge/ti-sn65dsi86.c2
-rw-r--r--drivers/gpu/drm/drm_atomic_state_helper.c3
-rw-r--r--drivers/gpu/drm/drm_auth.c2
-rw-r--r--drivers/gpu/drm/drm_damage_helper.c5
-rw-r--r--drivers/gpu/drm/drm_fb_helper.c2
-rw-r--r--drivers/gpu/drm/drm_file.c2
-rw-r--r--drivers/gpu/drm/drm_internal.h2
-rw-r--r--drivers/gpu/drm/drm_ioctl.c10
-rw-r--r--drivers/gpu/drm/drm_lease.c2
-rw-r--r--drivers/gpu/drm/drm_sysfs.c10
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_buffer.c2
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_drv.c20
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_drv.h11
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_dump.c9
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gpu.c37
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gpu.h12
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fimd.c123
-rw-r--r--drivers/gpu/drm/i915/Kconfig2
-rw-r--r--drivers/gpu/drm/i915/gvt/aperture_gm.c2
-rw-r--r--drivers/gpu/drm/i915/gvt/cmd_parser.c6
-rw-r--r--drivers/gpu/drm/i915/gvt/fb_decoder.c2
-rw-r--r--drivers/gpu/drm/i915/gvt/gtt.c7
-rw-r--r--drivers/gpu/drm/i915/gvt/gvt.c2
-rw-r--r--drivers/gpu/drm/i915/gvt/gvt.h4
-rw-r--r--drivers/gpu/drm/i915/gvt/handlers.c1
-rw-r--r--drivers/gpu/drm/i915/gvt/interrupt.c2
-rw-r--r--drivers/gpu/drm/i915/gvt/mmio_context.c2
-rw-r--r--drivers/gpu/drm/i915/gvt/scheduler.c33
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c9
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c31
-rw-r--r--drivers/gpu/drm/i915/i915_gem_userptr.c17
-rw-r--r--drivers/gpu/drm/i915/i915_ioc32.c2
-rw-r--r--drivers/gpu/drm/i915/i915_perf.c2
-rw-r--r--drivers/gpu/drm/i915/i915_query.c2
-rw-r--r--drivers/gpu/drm/i915/intel_display.c2
-rw-r--r--drivers/gpu/drm/i915/intel_lrc.c7
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c15
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem_gtt.c4
-rw-r--r--drivers/gpu/drm/mediatek/mtk_dsi.c11
-rw-r--r--drivers/gpu/drm/meson/meson_crtc.c27
-rw-r--r--drivers/gpu/drm/meson/meson_dw_hdmi.c1
-rw-r--r--drivers/gpu/drm/meson/meson_venc.c4
-rw-r--r--drivers/gpu/drm/meson/meson_viu.c12
-rw-r--r--drivers/gpu/drm/msm/Kconfig4
-rw-r--r--drivers/gpu/drm/msm/Makefile11
-rw-r--r--drivers/gpu/drm/msm/adreno/a2xx.xml.h298
-rw-r--r--drivers/gpu/drm/msm/adreno/a2xx_gpu.c492
-rw-r--r--drivers/gpu/drm/msm/adreno/a2xx_gpu.h21
-rw-r--r--drivers/gpu/drm/msm/adreno/a3xx.xml.h10
-rw-r--r--drivers/gpu/drm/msm/adreno/a3xx_gpu.c4
-rw-r--r--drivers/gpu/drm/msm/adreno/a4xx.xml.h10
-rw-r--r--drivers/gpu/drm/msm/adreno/a4xx_gpu.c4
-rw-r--r--drivers/gpu/drm/msm/adreno/a5xx.xml.h10
-rw-r--r--drivers/gpu/drm/msm/adreno/a5xx_debugfs.c8
-rw-r--r--drivers/gpu/drm/msm/adreno/a5xx_gpu.c45
-rw-r--r--drivers/gpu/drm/msm/adreno/a5xx_power.c15
-rw-r--r--drivers/gpu/drm/msm/adreno/a5xx_preempt.c20
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx.xml.h78
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gmu.c140
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gmu.h3
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gmu.xml.h10
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gpu.c89
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gpu.h8
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c1165
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gpu_state.h430
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_hfi.c14
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_common.xml.h19
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_device.c77
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gpu.c159
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gpu.h21
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h18
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_core_irq.c45
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_core_irq.h16
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.c169
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.h29
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c402
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.h68
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_dbg.c2393
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_dbg.h103
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c199
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.h30
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys.h14
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c21
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c8
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_formats.c7
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_blk.c10
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_blk.h2
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h9
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c50
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c18
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.h10
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.c36
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.h5
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.c21
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.h10
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.c23
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.h10
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.c20
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.h10
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_vbif.c1
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_io_util.c8
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_irq.c66
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_irq.h59
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c374
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h45
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_mdss.c14
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c130
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_power_handle.c240
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_power_handle.h217
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_trace.h101
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_vbif.c28
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_vbif.h15
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/msm_media_info.h359
-rw-r--r--drivers/gpu/drm/msm/disp/mdp4/mdp4_crtc.c8
-rw-r--r--drivers/gpu/drm/msm/disp/mdp4/mdp4_dtv_encoder.c12
-rw-r--r--drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c70
-rw-r--r--drivers/gpu/drm/msm/disp/mdp4/mdp4_lcdc_encoder.c43
-rw-r--r--drivers/gpu/drm/msm/disp/mdp4/mdp4_plane.c8
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_cfg.c90
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_cmd_encoder.c10
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c12
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_ctl.c16
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c30
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_mdss.c10
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c8
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_smp.c4
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi.c10
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_host.c28
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy.c28
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy_10nm.c6
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy_14nm.c6
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy_20nm.c2
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm.c2
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm_8960.c2
-rw-r--r--drivers/gpu/drm/msm/dsi/pll/dsi_pll.c2
-rw-r--r--drivers/gpu/drm/msm/dsi/pll/dsi_pll_10nm.c20
-rw-r--r--drivers/gpu/drm/msm/dsi/pll/dsi_pll_14nm.c12
-rw-r--r--drivers/gpu/drm/msm/dsi/pll/dsi_pll_28nm.c16
-rw-r--r--drivers/gpu/drm/msm/dsi/pll/dsi_pll_28nm_8960.c10
-rw-r--r--drivers/gpu/drm/msm/edp/edp.c8
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi.c40
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi.h1
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_bridge.c10
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_connector.c20
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_i2c.c2
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_phy.c12
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_phy_8996.c6
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_pll_8960.c4
-rw-r--r--drivers/gpu/drm/msm/msm_atomic.c8
-rw-r--r--drivers/gpu/drm/msm/msm_debugfs.c23
-rw-r--r--drivers/gpu/drm/msm/msm_drv.c219
-rw-r--r--drivers/gpu/drm/msm/msm_drv.h28
-rw-r--r--drivers/gpu/drm/msm/msm_fb.c14
-rw-r--r--drivers/gpu/drm/msm/msm_fbdev.c10
-rw-r--r--drivers/gpu/drm/msm/msm_gem.c219
-rw-r--r--drivers/gpu/drm/msm/msm_gem.h5
-rw-r--r--drivers/gpu/drm/msm/msm_gem_submit.c44
-rw-r--r--drivers/gpu/drm/msm/msm_gem_vma.c118
-rw-r--r--drivers/gpu/drm/msm/msm_gpu.c137
-rw-r--r--drivers/gpu/drm/msm/msm_gpu.h2
-rw-r--r--drivers/gpu/drm/msm/msm_gpu_trace.h90
-rw-r--r--drivers/gpu/drm/msm/msm_gpu_tracepoints.c6
-rw-r--r--drivers/gpu/drm/msm/msm_gpummu.c123
-rw-r--r--drivers/gpu/drm/msm/msm_iommu.c5
-rw-r--r--drivers/gpu/drm/msm/msm_kms.h3
-rw-r--r--drivers/gpu/drm/msm/msm_mmu.h6
-rw-r--r--drivers/gpu/drm/msm/msm_rd.c18
-rw-r--r--drivers/gpu/drm/msm/msm_ringbuffer.c14
-rw-r--r--drivers/gpu/drm/msm/msm_ringbuffer.h16
-rw-r--r--drivers/gpu/drm/nouveau/Kconfig3
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/disp.c30
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drm.c6
-rw-r--r--drivers/gpu/drm/omapdrm/displays/panel-dpi.c1
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dsi.c20
-rw-r--r--drivers/gpu/drm/omapdrm/dss/omapdss.h2
-rw-r--r--drivers/gpu/drm/omapdrm/omap_encoder.c58
-rw-r--r--drivers/gpu/drm/qxl/qxl_ioctl.c3
-rw-r--r--drivers/gpu/drm/qxl/qxl_release.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_cs.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_gem.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_mn.c16
-rw-r--r--drivers/gpu/drm/radeon/radeon_vm.c4
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_group.c21
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_drv.c6
-rw-r--r--drivers/gpu/drm/scheduler/sched_main.c91
-rw-r--r--drivers/gpu/drm/selftests/test-drm_damage_helper.c2
-rw-r--r--drivers/gpu/drm/tegra/dc.c38
-rw-r--r--drivers/gpu/drm/tegra/drm.c1
-rw-r--r--drivers/gpu/drm/tegra/falcon.c14
-rw-r--r--drivers/gpu/drm/tegra/hub.c48
-rw-r--r--drivers/gpu/drm/tegra/hub.h3
-rw-r--r--drivers/gpu/drm/tegra/sor.c254
-rw-r--r--drivers/gpu/drm/tegra/sor.h68
-rw-r--r--drivers/gpu/drm/tegra/vic.c46
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c33
-rw-r--r--drivers/gpu/drm/ttm/ttm_execbuf_util.c12
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.c8
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.h5
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_resource.c10
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c36
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_validation.c23
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_validation.h37
-rw-r--r--drivers/gpu/drm/xen/Kconfig1
-rw-r--r--drivers/gpu/drm/xen/Makefile1
-rw-r--r--drivers/gpu/drm/xen/xen_drm_front.c65
-rw-r--r--drivers/gpu/drm/xen/xen_drm_front_gem.c1
-rw-r--r--drivers/gpu/drm/xen/xen_drm_front_shbuf.c414
-rw-r--r--drivers/gpu/drm/xen/xen_drm_front_shbuf.h64
-rw-r--r--drivers/gpu/host1x/Makefile3
-rw-r--r--drivers/gpu/host1x/dev.c13
-rw-r--r--drivers/gpu/host1x/hw/channel_hw.c7
-rw-r--r--drivers/gpu/host1x/hw/debug_hw_1x06.c7
-rw-r--r--drivers/gpu/host1x/hw/host1x07.c44
-rw-r--r--drivers/gpu/host1x/hw/host1x07.h26
-rw-r--r--drivers/gpu/host1x/hw/host1x07_hardware.h142
-rw-r--r--drivers/gpu/host1x/hw/hw_host1x06_uclass.h2
-rw-r--r--drivers/gpu/host1x/hw/hw_host1x07_hypervisor.h32
-rw-r--r--drivers/gpu/host1x/hw/hw_host1x07_uclass.h181
-rw-r--r--drivers/gpu/host1x/hw/hw_host1x07_vm.h46
-rw-r--r--drivers/gpu/host1x/hw/syncpt_hw.c4
352 files changed, 8797 insertions, 7741 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index c8ad6bf6618a..bcef6ea4bcf9 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -82,6 +82,7 @@
#include "amdgpu_bo_list.h"
#include "amdgpu_gem.h"
#include "amdgpu_doorbell.h"
+#include "amdgpu_amdkfd.h"
#define MAX_GPU_INSTANCE 16
@@ -163,6 +164,7 @@ extern int amdgpu_si_support;
extern int amdgpu_cik_support;
#endif
+#define AMDGPU_VM_MAX_NUM_CTX 4096
#define AMDGPU_SG_THRESHOLD (256*1024*1024)
#define AMDGPU_DEFAULT_GTT_SIZE_MB 3072ULL /* 3GB by default */
#define AMDGPU_WAIT_IDLE_TIMEOUT_IN_MS 3000
@@ -235,7 +237,7 @@ enum amdgpu_kiq_irq {
#define MAX_KIQ_REG_WAIT 5000 /* in usecs, 5ms */
#define MAX_KIQ_REG_BAILOUT_INTERVAL 5 /* in msecs, 5ms */
-#define MAX_KIQ_REG_TRY 20
+#define MAX_KIQ_REG_TRY 80 /* 20 -> 80 */
int amdgpu_device_ip_set_clockgating_state(void *dev,
enum amd_ip_block_type block_type,
@@ -862,6 +864,9 @@ struct amdgpu_device {
/* GDS */
struct amdgpu_gds gds;
+ /* KFD */
+ struct amdgpu_kfd_dev kfd;
+
/* display related functionality */
struct amdgpu_display_manager dm;
@@ -875,9 +880,6 @@ struct amdgpu_device {
atomic64_t visible_pin_size;
atomic64_t gart_pin_size;
- /* amdkfd interface */
- struct kfd_dev *kfd;
-
/* soc15 register offset based on ip, instance and segment */
uint32_t *reg_offset[MAX_HWIP][HWIP_MAX_INSTANCE];
@@ -910,7 +912,9 @@ struct amdgpu_device {
bool in_gpu_reset;
struct mutex lock_reset;
struct amdgpu_doorbell_index doorbell_index;
+
int asic_reset_res;
+ struct work_struct xgmi_reset_work;
};
static inline struct amdgpu_device *amdgpu_ttm_adev(struct ttm_bo_device *bdev)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
index 47db65926d71..4376b17ca594 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
@@ -886,6 +886,5 @@ void amdgpu_acpi_get_backlight_caps(struct amdgpu_device *adev,
void amdgpu_acpi_fini(struct amdgpu_device *adev)
{
unregister_acpi_notifier(&adev->acpi_nb);
- if (adev->atif)
- kfree(adev->atif);
+ kfree(adev->atif);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
index d693b8047653..2dfaf158ef07 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
@@ -26,15 +26,26 @@
#include "amdgpu.h"
#include "amdgpu_gfx.h"
#include <linux/module.h>
+#include <linux/dma-buf.h>
const struct kgd2kfd_calls *kgd2kfd;
static const unsigned int compute_vmid_bitmap = 0xFF00;
+/* Total memory size in system memory and all GPU VRAM. Used to
+ * estimate worst case amount of memory to reserve for page tables
+ */
+uint64_t amdgpu_amdkfd_total_mem_size;
+
int amdgpu_amdkfd_init(void)
{
+ struct sysinfo si;
int ret;
+ si_meminfo(&si);
+ amdgpu_amdkfd_total_mem_size = si.totalram - si.totalhigh;
+ amdgpu_amdkfd_total_mem_size *= si.mem_unit;
+
#ifdef CONFIG_HSA_AMD
ret = kgd2kfd_init(KFD_INTERFACE_VERSION, &kgd2kfd);
if (ret)
@@ -87,8 +98,11 @@ void amdgpu_amdkfd_device_probe(struct amdgpu_device *adev)
return;
}
- adev->kfd = kgd2kfd->probe((struct kgd_dev *)adev,
- adev->pdev, kfd2kgd);
+ adev->kfd.dev = kgd2kfd->probe((struct kgd_dev *)adev,
+ adev->pdev, kfd2kgd);
+
+ if (adev->kfd.dev)
+ amdgpu_amdkfd_total_mem_size += adev->gmc.real_vram_size;
}
/**
@@ -128,7 +142,8 @@ void amdgpu_amdkfd_device_init(struct amdgpu_device *adev)
{
int i, n;
int last_valid_bit;
- if (adev->kfd) {
+
+ if (adev->kfd.dev) {
struct kgd2kfd_shared_resources gpu_resources = {
.compute_vmid_bitmap = compute_vmid_bitmap,
.num_pipe_per_mec = adev->gfx.mec.num_pipe_per_mec,
@@ -167,7 +182,7 @@ void amdgpu_amdkfd_device_init(struct amdgpu_device *adev)
&gpu_resources.doorbell_start_offset);
if (adev->asic_type < CHIP_VEGA10) {
- kgd2kfd->device_init(adev->kfd, &gpu_resources);
+ kgd2kfd->device_init(adev->kfd.dev, &gpu_resources);
return;
}
@@ -196,37 +211,37 @@ void amdgpu_amdkfd_device_init(struct amdgpu_device *adev)
gpu_resources.reserved_doorbell_mask = 0x1e0;
gpu_resources.reserved_doorbell_val = 0x0e0;
- kgd2kfd->device_init(adev->kfd, &gpu_resources);
+ kgd2kfd->device_init(adev->kfd.dev, &gpu_resources);
}
}
void amdgpu_amdkfd_device_fini(struct amdgpu_device *adev)
{
- if (adev->kfd) {
- kgd2kfd->device_exit(adev->kfd);
- adev->kfd = NULL;
+ if (adev->kfd.dev) {
+ kgd2kfd->device_exit(adev->kfd.dev);
+ adev->kfd.dev = NULL;
}
}
void amdgpu_amdkfd_interrupt(struct amdgpu_device *adev,
const void *ih_ring_entry)
{
- if (adev->kfd)
- kgd2kfd->interrupt(adev->kfd, ih_ring_entry);
+ if (adev->kfd.dev)
+ kgd2kfd->interrupt(adev->kfd.dev, ih_ring_entry);
}
void amdgpu_amdkfd_suspend(struct amdgpu_device *adev)
{
- if (adev->kfd)
- kgd2kfd->suspend(adev->kfd);
+ if (adev->kfd.dev)
+ kgd2kfd->suspend(adev->kfd.dev);
}
int amdgpu_amdkfd_resume(struct amdgpu_device *adev)
{
int r = 0;
- if (adev->kfd)
- r = kgd2kfd->resume(adev->kfd);
+ if (adev->kfd.dev)
+ r = kgd2kfd->resume(adev->kfd.dev);
return r;
}
@@ -235,8 +250,8 @@ int amdgpu_amdkfd_pre_reset(struct amdgpu_device *adev)
{
int r = 0;
- if (adev->kfd)
- r = kgd2kfd->pre_reset(adev->kfd);
+ if (adev->kfd.dev)
+ r = kgd2kfd->pre_reset(adev->kfd.dev);
return r;
}
@@ -245,8 +260,8 @@ int amdgpu_amdkfd_post_reset(struct amdgpu_device *adev)
{
int r = 0;
- if (adev->kfd)
- r = kgd2kfd->post_reset(adev->kfd);
+ if (adev->kfd.dev)
+ r = kgd2kfd->post_reset(adev->kfd.dev);
return r;
}
@@ -419,6 +434,62 @@ void amdgpu_amdkfd_get_cu_info(struct kgd_dev *kgd, struct kfd_cu_info *cu_info)
cu_info->lds_size = acu_info.lds_size;
}
+int amdgpu_amdkfd_get_dmabuf_info(struct kgd_dev *kgd, int dma_buf_fd,
+ struct kgd_dev **dma_buf_kgd,
+ uint64_t *bo_size, void *metadata_buffer,
+ size_t buffer_size, uint32_t *metadata_size,
+ uint32_t *flags)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
+ struct dma_buf *dma_buf;
+ struct drm_gem_object *obj;
+ struct amdgpu_bo *bo;
+ uint64_t metadata_flags;
+ int r = -EINVAL;
+
+ dma_buf = dma_buf_get(dma_buf_fd);
+ if (IS_ERR(dma_buf))
+ return PTR_ERR(dma_buf);
+
+ if (dma_buf->ops != &amdgpu_dmabuf_ops)
+ /* Can't handle non-graphics buffers */
+ goto out_put;
+
+ obj = dma_buf->priv;
+ if (obj->dev->driver != adev->ddev->driver)
+ /* Can't handle buffers from different drivers */
+ goto out_put;
+
+ adev = obj->dev->dev_private;
+ bo = gem_to_amdgpu_bo(obj);
+ if (!(bo->preferred_domains & (AMDGPU_GEM_DOMAIN_VRAM |
+ AMDGPU_GEM_DOMAIN_GTT)))
+ /* Only VRAM and GTT BOs are supported */
+ goto out_put;
+
+ r = 0;
+ if (dma_buf_kgd)
+ *dma_buf_kgd = (struct kgd_dev *)adev;
+ if (bo_size)
+ *bo_size = amdgpu_bo_size(bo);
+ if (metadata_size)
+ *metadata_size = bo->metadata_size;
+ if (metadata_buffer)
+ r = amdgpu_bo_get_metadata(bo, metadata_buffer, buffer_size,
+ metadata_size, &metadata_flags);
+ if (flags) {
+ *flags = (bo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM) ?
+ ALLOC_MEM_FLAGS_VRAM : ALLOC_MEM_FLAGS_GTT;
+
+ if (bo->flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED)
+ *flags |= ALLOC_MEM_FLAGS_PUBLIC;
+ }
+
+out_put:
+ dma_buf_put(dma_buf);
+ return r;
+}
+
uint64_t amdgpu_amdkfd_get_vram_usage(struct kgd_dev *kgd)
{
struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
@@ -501,7 +572,7 @@ void amdgpu_amdkfd_set_compute_idle(struct kgd_dev *kgd, bool idle)
bool amdgpu_amdkfd_is_kfd_vmid(struct amdgpu_device *adev, u32 vmid)
{
- if (adev->kfd) {
+ if (adev->kfd.dev) {
if ((1 << vmid) & compute_vmid_bitmap)
return true;
}
@@ -515,7 +586,7 @@ bool amdkfd_fence_check_mm(struct dma_fence *f, struct mm_struct *mm)
return false;
}
-void amdgpu_amdkfd_unreserve_system_memory_limit(struct amdgpu_bo *bo)
+void amdgpu_amdkfd_unreserve_memory_limit(struct amdgpu_bo *bo)
{
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
index bcf587b4ba98..70429f7aa9a8 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
@@ -27,7 +27,6 @@
#include <linux/types.h>
#include <linux/mm.h>
-#include <linux/mmu_context.h>
#include <linux/workqueue.h>
#include <kgd_kfd_interface.h>
#include <drm/ttm/ttm_execbuf_util.h>
@@ -35,6 +34,7 @@
#include "amdgpu_vm.h"
extern const struct kgd2kfd_calls *kgd2kfd;
+extern uint64_t amdgpu_amdkfd_total_mem_size;
struct amdgpu_device;
@@ -77,6 +77,11 @@ struct amdgpu_amdkfd_fence {
char timeline_name[TASK_COMM_LEN];
};
+struct amdgpu_kfd_dev {
+ struct kfd_dev *dev;
+ uint64_t vram_used;
+};
+
struct amdgpu_amdkfd_fence *amdgpu_amdkfd_fence_create(u64 context,
struct mm_struct *mm);
bool amdkfd_fence_check_mm(struct dma_fence *f, struct mm_struct *mm);
@@ -144,6 +149,11 @@ uint64_t amdgpu_amdkfd_get_gpu_clock_counter(struct kgd_dev *kgd);
uint32_t amdgpu_amdkfd_get_max_engine_clock_in_mhz(struct kgd_dev *kgd);
void amdgpu_amdkfd_get_cu_info(struct kgd_dev *kgd, struct kfd_cu_info *cu_info);
+int amdgpu_amdkfd_get_dmabuf_info(struct kgd_dev *kgd, int dma_buf_fd,
+ struct kgd_dev **dmabuf_kgd,
+ uint64_t *bo_size, void *metadata_buffer,
+ size_t buffer_size, uint32_t *metadata_size,
+ uint32_t *flags);
uint64_t amdgpu_amdkfd_get_vram_usage(struct kgd_dev *kgd);
uint64_t amdgpu_amdkfd_get_hive_id(struct kgd_dev *kgd);
@@ -195,7 +205,13 @@ int amdgpu_amdkfd_gpuvm_restore_process_bos(void *process_info,
int amdgpu_amdkfd_gpuvm_get_vm_fault_info(struct kgd_dev *kgd,
struct kfd_vm_fault_info *info);
+int amdgpu_amdkfd_gpuvm_import_dmabuf(struct kgd_dev *kgd,
+ struct dma_buf *dmabuf,
+ uint64_t va, void *vm,
+ struct kgd_mem **mem, uint64_t *size,
+ uint64_t *mmap_offset);
+
void amdgpu_amdkfd_gpuvm_init_mem_limits(void);
-void amdgpu_amdkfd_unreserve_system_memory_limit(struct amdgpu_bo *bo);
+void amdgpu_amdkfd_unreserve_memory_limit(struct amdgpu_bo *bo);
#endif /* AMDGPU_AMDKFD_H_INCLUDED */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
index 72a357dae070..ff7fac7df34b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
@@ -23,6 +23,7 @@
#include <linux/fdtable.h>
#include <linux/uaccess.h>
#include <linux/firmware.h>
+#include <linux/mmu_context.h>
#include <drm/drmP.h>
#include "amdgpu.h"
#include "amdgpu_amdkfd.h"
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c
index 0e2a56b6a9b6..56ea929f524b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c
@@ -24,6 +24,7 @@
#include <linux/fdtable.h>
#include <linux/uaccess.h>
#include <linux/firmware.h>
+#include <linux/mmu_context.h>
#include <drm/drmP.h>
#include "amdgpu.h"
#include "amdgpu_amdkfd.h"
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c
index 03b604c96d94..5c51d4910650 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c
@@ -26,6 +26,7 @@
#include <linux/fdtable.h>
#include <linux/uaccess.h>
#include <linux/firmware.h>
+#include <linux/mmu_context.h>
#include <drm/drmP.h>
#include "amdgpu.h"
#include "amdgpu_amdkfd.h"
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
index f3129b912714..be1ab43473c6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
@@ -25,6 +25,7 @@
#include <linux/list.h>
#include <linux/pagemap.h>
#include <linux/sched/mm.h>
+#include <linux/dma-buf.h>
#include <drm/drmP.h>
#include "amdgpu_object.h"
#include "amdgpu_vm.h"
@@ -110,17 +111,17 @@ void amdgpu_amdkfd_gpuvm_init_mem_limits(void)
(kfd_mem_limit.max_ttm_mem_limit >> 20));
}
-static int amdgpu_amdkfd_reserve_system_mem_limit(struct amdgpu_device *adev,
+static int amdgpu_amdkfd_reserve_mem_limit(struct amdgpu_device *adev,
uint64_t size, u32 domain, bool sg)
{
- size_t acc_size, system_mem_needed, ttm_mem_needed;
+ size_t acc_size, system_mem_needed, ttm_mem_needed, vram_needed;
+ uint64_t reserved_for_pt = amdgpu_amdkfd_total_mem_size >> 9;
int ret = 0;
acc_size = ttm_bo_dma_acc_size(&adev->mman.bdev, size,
sizeof(struct amdgpu_bo));
- spin_lock(&kfd_mem_limit.mem_limit_lock);
-
+ vram_needed = 0;
if (domain == AMDGPU_GEM_DOMAIN_GTT) {
/* TTM GTT memory */
system_mem_needed = acc_size + size;
@@ -133,23 +134,30 @@ static int amdgpu_amdkfd_reserve_system_mem_limit(struct amdgpu_device *adev,
/* VRAM and SG */
system_mem_needed = acc_size;
ttm_mem_needed = acc_size;
+ if (domain == AMDGPU_GEM_DOMAIN_VRAM)
+ vram_needed = size;
}
+ spin_lock(&kfd_mem_limit.mem_limit_lock);
+
if ((kfd_mem_limit.system_mem_used + system_mem_needed >
- kfd_mem_limit.max_system_mem_limit) ||
- (kfd_mem_limit.ttm_mem_used + ttm_mem_needed >
- kfd_mem_limit.max_ttm_mem_limit))
+ kfd_mem_limit.max_system_mem_limit) ||
+ (kfd_mem_limit.ttm_mem_used + ttm_mem_needed >
+ kfd_mem_limit.max_ttm_mem_limit) ||
+ (adev->kfd.vram_used + vram_needed >
+ adev->gmc.real_vram_size - reserved_for_pt)) {
ret = -ENOMEM;
- else {
+ } else {
kfd_mem_limit.system_mem_used += system_mem_needed;
kfd_mem_limit.ttm_mem_used += ttm_mem_needed;
+ adev->kfd.vram_used += vram_needed;
}
spin_unlock(&kfd_mem_limit.mem_limit_lock);
return ret;
}
-static void unreserve_system_mem_limit(struct amdgpu_device *adev,
+static void unreserve_mem_limit(struct amdgpu_device *adev,
uint64_t size, u32 domain, bool sg)
{
size_t acc_size;
@@ -167,6 +175,11 @@ static void unreserve_system_mem_limit(struct amdgpu_device *adev,
} else {
kfd_mem_limit.system_mem_used -= acc_size;
kfd_mem_limit.ttm_mem_used -= acc_size;
+ if (domain == AMDGPU_GEM_DOMAIN_VRAM) {
+ adev->kfd.vram_used -= size;
+ WARN_ONCE(adev->kfd.vram_used < 0,
+ "kfd VRAM memory accounting unbalanced");
+ }
}
WARN_ONCE(kfd_mem_limit.system_mem_used < 0,
"kfd system memory accounting unbalanced");
@@ -176,29 +189,18 @@ static void unreserve_system_mem_limit(struct amdgpu_device *adev,
spin_unlock(&kfd_mem_limit.mem_limit_lock);
}
-void amdgpu_amdkfd_unreserve_system_memory_limit(struct amdgpu_bo *bo)
+void amdgpu_amdkfd_unreserve_memory_limit(struct amdgpu_bo *bo)
{
- spin_lock(&kfd_mem_limit.mem_limit_lock);
+ struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
+ u32 domain = bo->preferred_domains;
+ bool sg = (bo->preferred_domains == AMDGPU_GEM_DOMAIN_CPU);
if (bo->flags & AMDGPU_AMDKFD_USERPTR_BO) {
- kfd_mem_limit.system_mem_used -=
- (bo->tbo.acc_size + amdgpu_bo_size(bo));
- kfd_mem_limit.ttm_mem_used -= bo->tbo.acc_size;
- } else if (bo->preferred_domains == AMDGPU_GEM_DOMAIN_GTT) {
- kfd_mem_limit.system_mem_used -=
- (bo->tbo.acc_size + amdgpu_bo_size(bo));
- kfd_mem_limit.ttm_mem_used -=
- (bo->tbo.acc_size + amdgpu_bo_size(bo));
- } else {
- kfd_mem_limit.system_mem_used -= bo->tbo.acc_size;
- kfd_mem_limit.ttm_mem_used -= bo->tbo.acc_size;
+ domain = AMDGPU_GEM_DOMAIN_CPU;
+ sg = false;
}
- WARN_ONCE(kfd_mem_limit.system_mem_used < 0,
- "kfd system memory accounting unbalanced");
- WARN_ONCE(kfd_mem_limit.ttm_mem_used < 0,
- "kfd TTM memory accounting unbalanced");
- spin_unlock(&kfd_mem_limit.mem_limit_lock);
+ unreserve_mem_limit(adev, amdgpu_bo_size(bo), domain, sg);
}
@@ -535,7 +537,7 @@ static void add_kgd_mem_to_kfd_bo_list(struct kgd_mem *mem,
struct amdgpu_bo *bo = mem->bo;
INIT_LIST_HEAD(&entry->head);
- entry->shared = true;
+ entry->num_shared = 1;
entry->bo = &bo->tbo;
mutex_lock(&process_info->lock);
if (userptr)
@@ -676,7 +678,7 @@ static int reserve_bo_and_vm(struct kgd_mem *mem,
ctx->kfd_bo.priority = 0;
ctx->kfd_bo.tv.bo = &bo->tbo;
- ctx->kfd_bo.tv.shared = true;
+ ctx->kfd_bo.tv.num_shared = 1;
ctx->kfd_bo.user_pages = NULL;
list_add(&ctx->kfd_bo.tv.head, &ctx->list);
@@ -740,7 +742,7 @@ static int reserve_bo_and_cond_vms(struct kgd_mem *mem,
ctx->kfd_bo.priority = 0;
ctx->kfd_bo.tv.bo = &bo->tbo;
- ctx->kfd_bo.tv.shared = true;
+ ctx->kfd_bo.tv.num_shared = 1;
ctx->kfd_bo.user_pages = NULL;
list_add(&ctx->kfd_bo.tv.head, &ctx->list);
@@ -885,6 +887,24 @@ update_gpuvm_pte_failed:
return ret;
}
+static struct sg_table *create_doorbell_sg(uint64_t addr, uint32_t size)
+{
+ struct sg_table *sg = kmalloc(sizeof(*sg), GFP_KERNEL);
+
+ if (!sg)
+ return NULL;
+ if (sg_alloc_table(sg, 1, GFP_KERNEL)) {
+ kfree(sg);
+ return NULL;
+ }
+ sg->sgl->dma_address = addr;
+ sg->sgl->length = size;
+#ifdef CONFIG_NEED_SG_DMA_LENGTH
+ sg->sgl->dma_length = size;
+#endif
+ return sg;
+}
+
static int process_validate_vms(struct amdkfd_process_info *process_info)
{
struct amdgpu_vm *peer_vm;
@@ -1168,6 +1188,8 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
{
struct amdgpu_device *adev = get_amdgpu_device(kgd);
struct amdgpu_vm *avm = (struct amdgpu_vm *)vm;
+ enum ttm_bo_type bo_type = ttm_bo_type_device;
+ struct sg_table *sg = NULL;
uint64_t user_addr = 0;
struct amdgpu_bo *bo;
struct amdgpu_bo_param bp;
@@ -1196,13 +1218,25 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
if (!offset || !*offset)
return -EINVAL;
user_addr = *offset;
+ } else if (flags & ALLOC_MEM_FLAGS_DOORBELL) {
+ domain = AMDGPU_GEM_DOMAIN_GTT;
+ alloc_domain = AMDGPU_GEM_DOMAIN_CPU;
+ bo_type = ttm_bo_type_sg;
+ alloc_flags = 0;
+ if (size > UINT_MAX)
+ return -EINVAL;
+ sg = create_doorbell_sg(*offset, size);
+ if (!sg)
+ return -ENOMEM;
} else {
return -EINVAL;
}
*mem = kzalloc(sizeof(struct kgd_mem), GFP_KERNEL);
- if (!*mem)
- return -ENOMEM;
+ if (!*mem) {
+ ret = -ENOMEM;
+ goto err;
+ }
INIT_LIST_HEAD(&(*mem)->bo_va_list);
mutex_init(&(*mem)->lock);
(*mem)->aql_queue = !!(flags & ALLOC_MEM_FLAGS_AQL_QUEUE_MEM);
@@ -1235,8 +1269,7 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
amdgpu_sync_create(&(*mem)->sync);
- ret = amdgpu_amdkfd_reserve_system_mem_limit(adev, size,
- alloc_domain, false);
+ ret = amdgpu_amdkfd_reserve_mem_limit(adev, size, alloc_domain, !!sg);
if (ret) {
pr_debug("Insufficient system memory\n");
goto err_reserve_limit;
@@ -1250,7 +1283,7 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
bp.byte_align = byte_align;
bp.domain = alloc_domain;
bp.flags = alloc_flags;
- bp.type = ttm_bo_type_device;
+ bp.type = bo_type;
bp.resv = NULL;
ret = amdgpu_bo_create(adev, &bp, &bo);
if (ret) {
@@ -1258,6 +1291,10 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
domain_string(alloc_domain), ret);
goto err_bo_create;
}
+ if (bo_type == ttm_bo_type_sg) {
+ bo->tbo.sg = sg;
+ bo->tbo.ttm->sg = sg;
+ }
bo->kfd_bo = *mem;
(*mem)->bo = bo;
if (user_addr)
@@ -1289,10 +1326,15 @@ allocate_init_user_pages_failed:
/* Don't unreserve system mem limit twice */
goto err_reserve_limit;
err_bo_create:
- unreserve_system_mem_limit(adev, size, alloc_domain, false);
+ unreserve_mem_limit(adev, size, alloc_domain, !!sg);
err_reserve_limit:
mutex_destroy(&(*mem)->lock);
kfree(*mem);
+err:
+ if (sg) {
+ sg_free_table(sg);
+ kfree(sg);
+ }
return ret;
}
@@ -1362,6 +1404,14 @@ int amdgpu_amdkfd_gpuvm_free_memory_of_gpu(
/* Free the sync object */
amdgpu_sync_free(&mem->sync);
+ /* If the SG is not NULL, it's one we created for a doorbell
+ * BO. We need to free it.
+ */
+ if (mem->bo->tbo.sg) {
+ sg_free_table(mem->bo->tbo.sg);
+ kfree(mem->bo->tbo.sg);
+ }
+
/* Free the BO*/
amdgpu_bo_unref(&mem->bo);
mutex_destroy(&mem->lock);
@@ -1664,6 +1714,60 @@ int amdgpu_amdkfd_gpuvm_get_vm_fault_info(struct kgd_dev *kgd,
return 0;
}
+int amdgpu_amdkfd_gpuvm_import_dmabuf(struct kgd_dev *kgd,
+ struct dma_buf *dma_buf,
+ uint64_t va, void *vm,
+ struct kgd_mem **mem, uint64_t *size,
+ uint64_t *mmap_offset)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
+ struct drm_gem_object *obj;
+ struct amdgpu_bo *bo;
+ struct amdgpu_vm *avm = (struct amdgpu_vm *)vm;
+
+ if (dma_buf->ops != &amdgpu_dmabuf_ops)
+ /* Can't handle non-graphics buffers */
+ return -EINVAL;
+
+ obj = dma_buf->priv;
+ if (obj->dev->dev_private != adev)
+ /* Can't handle buffers from other devices */
+ return -EINVAL;
+
+ bo = gem_to_amdgpu_bo(obj);
+ if (!(bo->preferred_domains & (AMDGPU_GEM_DOMAIN_VRAM |
+ AMDGPU_GEM_DOMAIN_GTT)))
+ /* Only VRAM and GTT BOs are supported */
+ return -EINVAL;
+
+ *mem = kzalloc(sizeof(struct kgd_mem), GFP_KERNEL);
+ if (!*mem)
+ return -ENOMEM;
+
+ if (size)
+ *size = amdgpu_bo_size(bo);
+
+ if (mmap_offset)
+ *mmap_offset = amdgpu_bo_mmap_offset(bo);
+
+ INIT_LIST_HEAD(&(*mem)->bo_va_list);
+ mutex_init(&(*mem)->lock);
+ (*mem)->mapping_flags =
+ AMDGPU_VM_PAGE_READABLE | AMDGPU_VM_PAGE_WRITEABLE |
+ AMDGPU_VM_PAGE_EXECUTABLE | AMDGPU_VM_MTYPE_NC;
+
+ (*mem)->bo = amdgpu_bo_ref(bo);
+ (*mem)->va = va;
+ (*mem)->domain = (bo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM) ?
+ AMDGPU_GEM_DOMAIN_VRAM : AMDGPU_GEM_DOMAIN_GTT;
+ (*mem)->mapped_to_gpu_memory = 0;
+ (*mem)->process_info = avm->process_info;
+ add_kgd_mem_to_kfd_bo_list(*mem, avm->process_info, false);
+ amdgpu_sync_create(&(*mem)->sync);
+
+ return 0;
+}
+
/* Evict a userptr BO by stopping the queues if necessary
*
* Runs in MMU notifier, may be in RECLAIM_FS context. This means it
@@ -1830,7 +1934,7 @@ static int validate_invalid_user_pages(struct amdkfd_process_info *process_info)
validate_list.head) {
list_add_tail(&mem->resv_list.head, &resv_list);
mem->resv_list.bo = mem->validate_list.bo;
- mem->resv_list.shared = mem->validate_list.shared;
+ mem->resv_list.num_shared = mem->validate_list.num_shared;
}
/* Reserve all BOs and page tables for validation */
@@ -2049,7 +2153,7 @@ int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence **ef)
list_add_tail(&mem->resv_list.head, &ctx.list);
mem->resv_list.bo = mem->validate_list.bo;
- mem->resv_list.shared = mem->validate_list.shared;
+ mem->resv_list.num_shared = mem->validate_list.num_shared;
}
ret = ttm_eu_reserve_buffers(&ctx.ticket, &ctx.list,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c
index 14d2982a47cc..5c79da8e1150 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c
@@ -118,7 +118,6 @@ int amdgpu_bo_list_create(struct amdgpu_device *adev, struct drm_file *filp,
entry->priority = min(info[i].bo_priority,
AMDGPU_BO_LIST_MAX_PRIORITY);
entry->tv.bo = &bo->tbo;
- entry->tv.shared = !bo->prime_shared_count;
if (bo->preferred_domains == AMDGPU_GEM_DOMAIN_GDS)
list->gds_obj = bo;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
index ceadeeadfa56..387f1cf1dc20 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
@@ -381,7 +381,8 @@ static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device,
(adev->pdev->revision == 0xe7) ||
(adev->pdev->revision == 0xef))) ||
((adev->pdev->device == 0x6fdf) &&
- (adev->pdev->revision == 0xef))) {
+ ((adev->pdev->revision == 0xef) ||
+ (adev->pdev->revision == 0xff)))) {
info->is_kicker = true;
strcpy(fw_name, "amdgpu/polaris10_k_smc.bin");
} else if ((adev->pdev->device == 0x67df) &&
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index dc54e9efd910..1c49b8266d69 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -50,7 +50,8 @@ static int amdgpu_cs_user_fence_chunk(struct amdgpu_cs_parser *p,
bo = amdgpu_bo_ref(gem_to_amdgpu_bo(gobj));
p->uf_entry.priority = 0;
p->uf_entry.tv.bo = &bo->tbo;
- p->uf_entry.tv.shared = true;
+ /* One for TTM and one for the CS job */
+ p->uf_entry.tv.num_shared = 2;
p->uf_entry.user_pages = NULL;
drm_gem_object_put_unlocked(gobj);
@@ -124,14 +125,14 @@ static int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, union drm_amdgpu_cs
goto free_chunk;
}
+ mutex_lock(&p->ctx->lock);
+
/* skip guilty context job */
if (atomic_read(&p->ctx->guilty) == 1) {
ret = -ECANCELED;
goto free_chunk;
}
- mutex_lock(&p->ctx->lock);
-
/* get chunks */
chunk_array_user = u64_to_user_ptr(cs->in.chunks);
if (copy_from_user(chunk_array, chunk_array_user,
@@ -598,6 +599,10 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
return r;
}
+ /* One for TTM and one for the CS job */
+ amdgpu_bo_list_for_each_entry(e, p->bo_list)
+ e->tv.num_shared = 2;
+
amdgpu_bo_list_get_list(p->bo_list, &p->validated);
if (p->bo_list->first_userptr != p->bo_list->num_entries)
p->mn = amdgpu_mn_get(p->adev, AMDGPU_MN_TYPE_GFX);
@@ -717,8 +722,14 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
gws = p->bo_list->gws_obj;
oa = p->bo_list->oa_obj;
- amdgpu_bo_list_for_each_entry(e, p->bo_list)
- e->bo_va = amdgpu_vm_bo_find(vm, ttm_to_amdgpu_bo(e->tv.bo));
+ amdgpu_bo_list_for_each_entry(e, p->bo_list) {
+ struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo);
+
+ /* Make sure we use the exclusive slot for shared BOs */
+ if (bo->prime_shared_count)
+ e->tv.num_shared = 0;
+ e->bo_va = amdgpu_vm_bo_find(vm, bo);
+ }
if (gds) {
p->job->gds_base = amdgpu_bo_gpu_offset(gds) >> PAGE_SHIFT;
@@ -955,10 +966,6 @@ static int amdgpu_cs_vm_handling(struct amdgpu_cs_parser *p)
if (r)
return r;
- r = reservation_object_reserve_shared(vm->root.base.bo->tbo.resv, 1);
- if (r)
- return r;
-
p->job->vm_pd_addr = amdgpu_gmc_pd_addr(vm->root.base.bo);
if (amdgpu_vm_debug) {
@@ -1421,6 +1428,9 @@ int amdgpu_cs_fence_to_handle_ioctl(struct drm_device *dev, void *data,
if (IS_ERR(fence))
return PTR_ERR(fence);
+ if (!fence)
+ fence = dma_fence_get_stub();
+
switch (info->in.what) {
case AMDGPU_FENCE_TO_HANDLE_GET_SYNCOBJ:
r = drm_syncobj_create(&syncobj, 0, fence);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_csa.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_csa.c
index 5b550706ee76..7e22be7ca68a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_csa.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_csa.c
@@ -74,7 +74,7 @@ int amdgpu_map_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm,
INIT_LIST_HEAD(&list);
INIT_LIST_HEAD(&csa_tv.head);
csa_tv.bo = &bo->tbo;
- csa_tv.shared = true;
+ csa_tv.num_shared = 1;
list_add(&csa_tv.head, &list);
amdgpu_vm_get_pd_bo(vm, &list, &pd);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
index 95f4c4139fc6..d85184b5b35c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
@@ -248,7 +248,7 @@ static int amdgpu_ctx_alloc(struct amdgpu_device *adev,
return -ENOMEM;
mutex_lock(&mgr->lock);
- r = idr_alloc(&mgr->ctx_handles, ctx, 1, 0, GFP_KERNEL);
+ r = idr_alloc(&mgr->ctx_handles, ctx, 1, AMDGPU_VM_MAX_NUM_CTX, GFP_KERNEL);
if (r < 0) {
mutex_unlock(&mgr->lock);
kfree(ctx);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index c75badfa5c4c..7ff3a28fc903 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -515,7 +515,6 @@ void amdgpu_device_pci_config_reset(struct amdgpu_device *adev)
*/
static int amdgpu_device_doorbell_init(struct amdgpu_device *adev)
{
- amdgpu_asic_init_doorbell_index(adev);
/* No doorbell on SI hardware generation */
if (adev->asic_type < CHIP_BONAIRE) {
@@ -529,6 +528,8 @@ static int amdgpu_device_doorbell_init(struct amdgpu_device *adev)
if (pci_resource_flags(adev->pdev, 2) & IORESOURCE_UNSET)
return -EINVAL;
+ amdgpu_asic_init_doorbell_index(adev);
+
/* doorbell bar mapping */
adev->doorbell.base = pci_resource_start(adev->pdev, 2);
adev->doorbell.size = pci_resource_len(adev->pdev, 2);
@@ -1700,8 +1701,10 @@ static int amdgpu_device_ip_init(struct amdgpu_device *adev)
amdgpu_xgmi_add_device(adev);
amdgpu_amdkfd_device_init(adev);
- if (amdgpu_sriov_vf(adev))
+ if (amdgpu_sriov_vf(adev)) {
+ amdgpu_virt_init_data_exchange(adev);
amdgpu_virt_release_full_gpu(adev, true);
+ }
return 0;
}
@@ -1864,6 +1867,9 @@ static int amdgpu_device_ip_fini(struct amdgpu_device *adev)
{
int i, r;
+ if (adev->gmc.xgmi.num_physical_nodes > 1)
+ amdgpu_xgmi_remove_device(adev);
+
amdgpu_amdkfd_device_fini(adev);
amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE);
@@ -2353,6 +2359,19 @@ bool amdgpu_device_has_dc_support(struct amdgpu_device *adev)
return amdgpu_device_asic_has_dc_support(adev->asic_type);
}
+
+static void amdgpu_device_xgmi_reset_func(struct work_struct *__work)
+{
+ struct amdgpu_device *adev =
+ container_of(__work, struct amdgpu_device, xgmi_reset_work);
+
+ adev->asic_reset_res = amdgpu_asic_reset(adev);
+ if (adev->asic_reset_res)
+ DRM_WARN("ASIC reset failed with err r, %d for drm dev, %s",
+ adev->asic_reset_res, adev->ddev->unique);
+}
+
+
/**
* amdgpu_device_init - initialize the driver
*
@@ -2451,6 +2470,8 @@ int amdgpu_device_init(struct amdgpu_device *adev,
INIT_DELAYED_WORK(&adev->gfx.gfx_off_delay_work,
amdgpu_device_delay_enable_gfx_off);
+ INIT_WORK(&adev->xgmi_reset_work, amdgpu_device_xgmi_reset_func);
+
adev->gfx.gfx_off_req_count = 1;
adev->pm.ac_power = power_supply_is_system_supplied() > 0 ? true : false;
@@ -2613,9 +2634,6 @@ fence_driver_init:
goto failed;
}
- if (amdgpu_sriov_vf(adev))
- amdgpu_virt_init_data_exchange(adev);
-
amdgpu_fbdev_init(adev);
r = amdgpu_pm_sysfs_init(adev);
@@ -2779,7 +2797,7 @@ int amdgpu_device_suspend(struct drm_device *dev, bool suspend, bool fbcon)
struct drm_framebuffer *fb = crtc->primary->fb;
struct amdgpu_bo *robj;
- if (amdgpu_crtc->cursor_bo) {
+ if (amdgpu_crtc->cursor_bo && !adev->enable_virtual_display) {
struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
r = amdgpu_bo_reserve(aobj, true);
if (r == 0) {
@@ -2887,7 +2905,7 @@ int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon)
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
- if (amdgpu_crtc->cursor_bo) {
+ if (amdgpu_crtc->cursor_bo && !adev->enable_virtual_display) {
struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
r = amdgpu_bo_reserve(aobj, true);
if (r == 0) {
@@ -3207,6 +3225,7 @@ static int amdgpu_device_reset_sriov(struct amdgpu_device *adev,
r = amdgpu_ib_ring_tests(adev);
error:
+ amdgpu_virt_init_data_exchange(adev);
amdgpu_virt_release_full_gpu(adev, true);
if (!r && adev->virt.gim_feature & AMDGIM_FEATURE_GIM_FLR_VRAMLOST) {
atomic_inc(&adev->vram_lost_counter);
@@ -3239,6 +3258,8 @@ bool amdgpu_device_should_recover_gpu(struct amdgpu_device *adev)
if (amdgpu_gpu_recovery == -1) {
switch (adev->asic_type) {
+ case CHIP_BONAIRE:
+ case CHIP_HAWAII:
case CHIP_TOPAZ:
case CHIP_TONGA:
case CHIP_FIJI:
@@ -3328,10 +3349,31 @@ static int amdgpu_do_asic_reset(struct amdgpu_hive_info *hive,
*/
if (need_full_reset) {
list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head) {
- r = amdgpu_asic_reset(tmp_adev);
- if (r)
- DRM_WARN("ASIC reset failed with err r, %d for drm dev, %s",
+ /* For XGMI run all resets in parallel to speed up the process */
+ if (tmp_adev->gmc.xgmi.num_physical_nodes > 1) {
+ if (!queue_work(system_highpri_wq, &tmp_adev->xgmi_reset_work))
+ r = -EALREADY;
+ } else
+ r = amdgpu_asic_reset(tmp_adev);
+
+ if (r) {
+ DRM_ERROR("ASIC reset failed with err r, %d for drm dev, %s",
r, tmp_adev->ddev->unique);
+ break;
+ }
+ }
+
+ /* For XGMI wait for all PSP resets to complete before proceed */
+ if (!r) {
+ list_for_each_entry(tmp_adev, device_list_handle,
+ gmc.xgmi.head) {
+ if (tmp_adev->gmc.xgmi.num_physical_nodes > 1) {
+ flush_work(&tmp_adev->xgmi_reset_work);
+ r = tmp_adev->asic_reset_res;
+ if (r)
+ break;
+ }
+ }
}
}
@@ -3434,14 +3476,16 @@ static void amdgpu_device_lock_adev(struct amdgpu_device *adev)
mutex_lock(&adev->lock_reset);
atomic_inc(&adev->gpu_reset_counter);
adev->in_gpu_reset = 1;
- /* Block kfd */
- amdgpu_amdkfd_pre_reset(adev);
+ /* Block kfd: SRIOV would do it separately */
+ if (!amdgpu_sriov_vf(adev))
+ amdgpu_amdkfd_pre_reset(adev);
}
static void amdgpu_device_unlock_adev(struct amdgpu_device *adev)
{
- /*unlock kfd */
- amdgpu_amdkfd_post_reset(adev);
+ /*unlock kfd: SRIOV would do it separately */
+ if (!amdgpu_sriov_vf(adev))
+ amdgpu_amdkfd_post_reset(adev);
amdgpu_vf_error_trans_all(adev);
adev->in_gpu_reset = 0;
mutex_unlock(&adev->lock_reset);
@@ -3518,8 +3562,6 @@ retry: /* Rest of adevs pre asic reset from XGMI hive. */
if (tmp_adev == adev)
continue;
- dev_info(tmp_adev->dev, "GPU reset begin for drm dev %s!\n", adev->ddev->unique);
-
amdgpu_device_lock_adev(tmp_adev);
r = amdgpu_device_pre_asic_reset(tmp_adev,
NULL,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
index 15ce7e681d67..dafc645b2e4e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
@@ -188,10 +188,12 @@ int amdgpu_display_crtc_page_flip_target(struct drm_crtc *crtc,
goto cleanup;
}
- r = amdgpu_bo_pin(new_abo, amdgpu_display_supported_domains(adev));
- if (unlikely(r != 0)) {
- DRM_ERROR("failed to pin new abo buffer before flip\n");
- goto unreserve;
+ if (!adev->enable_virtual_display) {
+ r = amdgpu_bo_pin(new_abo, amdgpu_display_supported_domains(adev));
+ if (unlikely(r != 0)) {
+ DRM_ERROR("failed to pin new abo buffer before flip\n");
+ goto unreserve;
+ }
}
r = amdgpu_ttm_alloc_gart(&new_abo->tbo);
@@ -211,7 +213,8 @@ int amdgpu_display_crtc_page_flip_target(struct drm_crtc *crtc,
amdgpu_bo_get_tiling_flags(new_abo, &tiling_flags);
amdgpu_bo_unreserve(new_abo);
- work->base = amdgpu_bo_gpu_offset(new_abo);
+ if (!adev->enable_virtual_display)
+ work->base = amdgpu_bo_gpu_offset(new_abo);
work->target_vblank = target - (uint32_t)drm_crtc_vblank_count(crtc) +
amdgpu_get_vblank_counter_kms(dev, work->crtc_id);
@@ -242,9 +245,10 @@ pflip_cleanup:
goto cleanup;
}
unpin:
- if (unlikely(amdgpu_bo_unpin(new_abo) != 0)) {
- DRM_ERROR("failed to unpin new abo in error path\n");
- }
+ if (!adev->enable_virtual_display)
+ if (unlikely(amdgpu_bo_unpin(new_abo) != 0))
+ DRM_ERROR("failed to unpin new abo in error path\n");
+
unreserve:
amdgpu_bo_unreserve(new_abo);
@@ -527,6 +531,17 @@ amdgpu_display_user_framebuffer_create(struct drm_device *dev,
struct drm_gem_object *obj;
struct amdgpu_framebuffer *amdgpu_fb;
int ret;
+ int height;
+ struct amdgpu_device *adev = dev->dev_private;
+ int cpp = drm_format_plane_cpp(mode_cmd->pixel_format, 0);
+ int pitch = mode_cmd->pitches[0] / cpp;
+
+ pitch = amdgpu_align_pitch(adev, pitch, cpp, false);
+ if (mode_cmd->pitches[0] != pitch) {
+ DRM_DEBUG_KMS("Invalid pitch: expecting %d but got %d\n",
+ pitch, mode_cmd->pitches[0]);
+ return ERR_PTR(-EINVAL);
+ }
obj = drm_gem_object_lookup(file_priv, mode_cmd->handles[0]);
if (obj == NULL) {
@@ -541,6 +556,13 @@ amdgpu_display_user_framebuffer_create(struct drm_device *dev,
return ERR_PTR(-EINVAL);
}
+ height = ALIGN(mode_cmd->height, 8);
+ if (obj->size < pitch * height) {
+ DRM_DEBUG_KMS("Invalid GEM size: expecting >= %d but got %zu\n",
+ pitch * height, obj->size);
+ return ERR_PTR(-EINVAL);
+ }
+
amdgpu_fb = kzalloc(sizeof(*amdgpu_fb), GFP_KERNEL);
if (amdgpu_fb == NULL) {
drm_gem_object_put_unlocked(obj);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index 90f474f98b6e..c806f984bcc5 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -865,6 +865,7 @@ static const struct pci_device_id pciidlist[] = {
/* VEGAM */
{0x1002, 0x694C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGAM},
{0x1002, 0x694E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGAM},
+ {0x1002, 0x694F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGAM},
/* Vega 10 */
{0x1002, 0x6860, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10},
{0x1002, 0x6861, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10},
@@ -873,7 +874,13 @@ static const struct pci_device_id pciidlist[] = {
{0x1002, 0x6864, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10},
{0x1002, 0x6867, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10},
{0x1002, 0x6868, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10},
+ {0x1002, 0x6869, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10},
+ {0x1002, 0x686a, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10},
+ {0x1002, 0x686b, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10},
{0x1002, 0x686c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10},
+ {0x1002, 0x686d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10},
+ {0x1002, 0x686e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10},
+ {0x1002, 0x686f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10},
{0x1002, 0x687f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10},
/* Vega 12 */
{0x1002, 0x69A0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA12},
@@ -886,6 +893,7 @@ static const struct pci_device_id pciidlist[] = {
{0x1002, 0x66A1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA20},
{0x1002, 0x66A2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA20},
{0x1002, 0x66A3, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA20},
+ {0x1002, 0x66A4, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA20},
{0x1002, 0x66A7, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA20},
{0x1002, 0x66AF, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA20},
/* Raven */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
index 7b3d1ebda9df..f4f00217546e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
@@ -169,7 +169,7 @@ void amdgpu_gem_object_close(struct drm_gem_object *obj,
INIT_LIST_HEAD(&duplicates);
tv.bo = &bo->tbo;
- tv.shared = true;
+ tv.num_shared = 1;
list_add(&tv.head, &list);
amdgpu_vm_get_pd_bo(vm, &list, &vm_pd);
@@ -604,7 +604,10 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
return -ENOENT;
abo = gem_to_amdgpu_bo(gobj);
tv.bo = &abo->tbo;
- tv.shared = !!(abo->flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID);
+ if (abo->flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID)
+ tv.num_shared = 1;
+ else
+ tv.num_shared = 0;
list_add(&tv.head, &list);
} else {
gobj = NULL;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.h
index d63daba9b17c..f1ddfc50bcc7 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.h
@@ -54,6 +54,8 @@ void *amdgpu_gem_prime_vmap(struct drm_gem_object *obj);
void amdgpu_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr);
int amdgpu_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma);
+extern const struct dma_buf_ops amdgpu_dmabuf_ops;
+
/*
* GEM objects.
*/
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h
index 8c57924c075f..81e6070d255b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h
@@ -99,6 +99,7 @@ struct amdgpu_xgmi {
unsigned num_physical_nodes;
/* gpu list in the same hive */
struct list_head head;
+ bool supported;
};
struct amdgpu_gmc {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h
index 9ce8c93ec19b..f877bb78d10a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h
@@ -51,14 +51,12 @@ struct amdgpu_ih_ring {
struct amdgpu_ih_funcs {
/* ring read/write ptr handling, called from interrupt context */
u32 (*get_wptr)(struct amdgpu_device *adev);
- bool (*prescreen_iv)(struct amdgpu_device *adev);
void (*decode_iv)(struct amdgpu_device *adev,
struct amdgpu_iv_entry *entry);
void (*set_rptr)(struct amdgpu_device *adev);
};
#define amdgpu_ih_get_wptr(adev) (adev)->irq.ih_funcs->get_wptr((adev))
-#define amdgpu_ih_prescreen_iv(adev) (adev)->irq.ih_funcs->prescreen_iv((adev))
#define amdgpu_ih_decode_iv(adev, iv) (adev)->irq.ih_funcs->decode_iv((adev), (iv))
#define amdgpu_ih_set_rptr(adev) (adev)->irq.ih_funcs->set_rptr((adev))
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
index 6b6524f04ce0..b7968f426862 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
@@ -145,13 +145,6 @@ static void amdgpu_irq_callback(struct amdgpu_device *adev,
u32 ring_index = ih->rptr >> 2;
struct amdgpu_iv_entry entry;
- /* Prescreening of high-frequency interrupts */
- if (!amdgpu_ih_prescreen_iv(adev))
- return;
-
- /* Before dispatching irq to IP blocks, send it to amdkfd */
- amdgpu_amdkfd_interrupt(adev, (const void *) &ih->ring[ring_index]);
-
entry.iv_entry = (const uint32_t *)&ih->ring[ring_index];
amdgpu_ih_decode_iv(adev, &entry);
@@ -371,39 +364,38 @@ void amdgpu_irq_dispatch(struct amdgpu_device *adev,
unsigned client_id = entry->client_id;
unsigned src_id = entry->src_id;
struct amdgpu_irq_src *src;
+ bool handled = false;
int r;
trace_amdgpu_iv(entry);
if (client_id >= AMDGPU_IRQ_CLIENTID_MAX) {
DRM_DEBUG("Invalid client_id in IV: %d\n", client_id);
- return;
- }
- if (src_id >= AMDGPU_MAX_IRQ_SRC_ID) {
+ } else if (src_id >= AMDGPU_MAX_IRQ_SRC_ID) {
DRM_DEBUG("Invalid src_id in IV: %d\n", src_id);
- return;
- }
- if (adev->irq.virq[src_id]) {
+ } else if (adev->irq.virq[src_id]) {
generic_handle_irq(irq_find_mapping(adev->irq.domain, src_id));
- } else {
- if (!adev->irq.client[client_id].sources) {
- DRM_DEBUG("Unregistered interrupt client_id: %d src_id: %d\n",
- client_id, src_id);
- return;
- }
- src = adev->irq.client[client_id].sources[src_id];
- if (!src) {
- DRM_DEBUG("Unhandled interrupt src_id: %d\n", src_id);
- return;
- }
+ } else if (!adev->irq.client[client_id].sources) {
+ DRM_DEBUG("Unregistered interrupt client_id: %d src_id: %d\n",
+ client_id, src_id);
+ } else if ((src = adev->irq.client[client_id].sources[src_id])) {
r = src->funcs->process(adev, src, entry);
- if (r)
+ if (r < 0)
DRM_ERROR("error processing interrupt (%d)\n", r);
+ else if (r)
+ handled = true;
+
+ } else {
+ DRM_DEBUG("Unhandled interrupt src_id: %d\n", src_id);
}
+
+ /* Send it to amdkfd as well if it isn't already handled */
+ if (!handled)
+ amdgpu_amdkfd_interrupt(adev, entry->iv_entry);
}
/**
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
index e0af44fd6a0c..0a17fb1af204 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
@@ -32,6 +32,9 @@ static void amdgpu_job_timedout(struct drm_sched_job *s_job)
{
struct amdgpu_ring *ring = to_amdgpu_ring(s_job->sched);
struct amdgpu_job *job = to_amdgpu_job(s_job);
+ struct amdgpu_task_info ti;
+
+ memset(&ti, 0, sizeof(struct amdgpu_task_info));
if (amdgpu_ring_soft_recovery(ring, job->vmid, s_job->s_fence->parent)) {
DRM_ERROR("ring %s timeout, but soft recovered\n",
@@ -39,9 +42,12 @@ static void amdgpu_job_timedout(struct drm_sched_job *s_job)
return;
}
+ amdgpu_vm_get_task_info(ring->adev, job->pasid, &ti);
DRM_ERROR("ring %s timeout, signaled seq=%u, emitted seq=%u\n",
job->base.sched->name, atomic_read(&ring->fence_drv.last_seq),
ring->fence_drv.sync_seq);
+ DRM_ERROR("Process information: process %s pid %d thread %s pid %d\n",
+ ti.process_name, ti.tgid, ti.task_name, ti.pid);
if (amdgpu_device_should_recover_gpu(ring->adev))
amdgpu_device_gpu_recover(ring->adev, job);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
index e55508b39496..3e6823fdd939 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
@@ -238,44 +238,40 @@ static void amdgpu_mn_invalidate_node(struct amdgpu_mn_node *node,
* amdgpu_mn_invalidate_range_start_gfx - callback to notify about mm change
*
* @mn: our notifier
- * @mm: the mm this callback is about
- * @start: start of updated range
- * @end: end of updated range
+ * @range: mmu notifier context
*
* Block for operations on BOs to finish and mark pages as accessed and
* potentially dirty.
*/
static int amdgpu_mn_invalidate_range_start_gfx(struct mmu_notifier *mn,
- struct mm_struct *mm,
- unsigned long start,
- unsigned long end,
- bool blockable)
+ const struct mmu_notifier_range *range)
{
struct amdgpu_mn *amn = container_of(mn, struct amdgpu_mn, mn);
struct interval_tree_node *it;
+ unsigned long end;
/* notification is exclusive, but interval is inclusive */
- end -= 1;
+ end = range->end - 1;
/* TODO we should be able to split locking for interval tree and
* amdgpu_mn_invalidate_node
*/
- if (amdgpu_mn_read_lock(amn, blockable))
+ if (amdgpu_mn_read_lock(amn, range->blockable))
return -EAGAIN;
- it = interval_tree_iter_first(&amn->objects, start, end);
+ it = interval_tree_iter_first(&amn->objects, range->start, end);
while (it) {
struct amdgpu_mn_node *node;
- if (!blockable) {
+ if (!range->blockable) {
amdgpu_mn_read_unlock(amn);
return -EAGAIN;
}
node = container_of(it, struct amdgpu_mn_node, it);
- it = interval_tree_iter_next(it, start, end);
+ it = interval_tree_iter_next(it, range->start, end);
- amdgpu_mn_invalidate_node(node, start, end);
+ amdgpu_mn_invalidate_node(node, range->start, end);
}
return 0;
@@ -294,39 +290,38 @@ static int amdgpu_mn_invalidate_range_start_gfx(struct mmu_notifier *mn,
* are restorted in amdgpu_mn_invalidate_range_end_hsa.
*/
static int amdgpu_mn_invalidate_range_start_hsa(struct mmu_notifier *mn,
- struct mm_struct *mm,
- unsigned long start,
- unsigned long end,
- bool blockable)
+ const struct mmu_notifier_range *range)
{
struct amdgpu_mn *amn = container_of(mn, struct amdgpu_mn, mn);
struct interval_tree_node *it;
+ unsigned long end;
/* notification is exclusive, but interval is inclusive */
- end -= 1;
+ end = range->end - 1;
- if (amdgpu_mn_read_lock(amn, blockable))
+ if (amdgpu_mn_read_lock(amn, range->blockable))
return -EAGAIN;
- it = interval_tree_iter_first(&amn->objects, start, end);
+ it = interval_tree_iter_first(&amn->objects, range->start, end);
while (it) {
struct amdgpu_mn_node *node;
struct amdgpu_bo *bo;
- if (!blockable) {
+ if (!range->blockable) {
amdgpu_mn_read_unlock(amn);
return -EAGAIN;
}
node = container_of(it, struct amdgpu_mn_node, it);
- it = interval_tree_iter_next(it, start, end);
+ it = interval_tree_iter_next(it, range->start, end);
list_for_each_entry(bo, &node->bos, mn_list) {
struct kgd_mem *mem = bo->kfd_bo;
if (amdgpu_ttm_tt_affect_userptr(bo->tbo.ttm,
- start, end))
- amdgpu_amdkfd_evict_userptr(mem, mm);
+ range->start,
+ end))
+ amdgpu_amdkfd_evict_userptr(mem, range->mm);
}
}
@@ -344,9 +339,7 @@ static int amdgpu_mn_invalidate_range_start_hsa(struct mmu_notifier *mn,
* Release the lock again to allow new command submissions.
*/
static void amdgpu_mn_invalidate_range_end(struct mmu_notifier *mn,
- struct mm_struct *mm,
- unsigned long start,
- unsigned long end)
+ const struct mmu_notifier_range *range)
{
struct amdgpu_mn *amn = container_of(mn, struct amdgpu_mn, mn);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
index cf768acb51dc..728e15e5d68a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
@@ -81,7 +81,7 @@ static void amdgpu_bo_destroy(struct ttm_buffer_object *tbo)
amdgpu_bo_subtract_pin_size(bo);
if (bo->kfd_bo)
- amdgpu_amdkfd_unreserve_system_memory_limit(bo);
+ amdgpu_amdkfd_unreserve_memory_limit(bo);
amdgpu_bo_kunmap(bo);
@@ -608,53 +608,6 @@ int amdgpu_bo_create(struct amdgpu_device *adev,
}
/**
- * amdgpu_bo_backup_to_shadow - Backs up an &amdgpu_bo buffer object
- * @adev: amdgpu device object
- * @ring: amdgpu_ring for the engine handling the buffer operations
- * @bo: &amdgpu_bo buffer to be backed up
- * @resv: reservation object with embedded fence
- * @fence: dma_fence associated with the operation
- * @direct: whether to submit the job directly
- *
- * Copies an &amdgpu_bo buffer object to its shadow object.
- * Not used for now.
- *
- * Returns:
- * 0 for success or a negative error code on failure.
- */
-int amdgpu_bo_backup_to_shadow(struct amdgpu_device *adev,
- struct amdgpu_ring *ring,
- struct amdgpu_bo *bo,
- struct reservation_object *resv,
- struct dma_fence **fence,
- bool direct)
-
-{
- struct amdgpu_bo *shadow = bo->shadow;
- uint64_t bo_addr, shadow_addr;
- int r;
-
- if (!shadow)
- return -EINVAL;
-
- bo_addr = amdgpu_bo_gpu_offset(bo);
- shadow_addr = amdgpu_bo_gpu_offset(bo->shadow);
-
- r = reservation_object_reserve_shared(bo->tbo.resv, 1);
- if (r)
- goto err;
-
- r = amdgpu_copy_buffer(ring, bo_addr, shadow_addr,
- amdgpu_bo_size(bo), resv, fence,
- direct, false);
- if (!r)
- amdgpu_bo_fence(bo, *fence, true);
-
-err:
- return r;
-}
-
-/**
* amdgpu_bo_validate - validate an &amdgpu_bo buffer object
* @bo: pointer to the buffer object
*
@@ -959,7 +912,7 @@ int amdgpu_bo_unpin(struct amdgpu_bo *bo)
struct ttm_operation_ctx ctx = { false, false };
int r, i;
- if (!bo->pin_count) {
+ if (WARN_ON_ONCE(!bo->pin_count)) {
dev_warn(adev->dev, "%p unpin not necessary\n", bo);
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
index 7d3312d0da11..9291c2f837e9 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
@@ -267,11 +267,6 @@ int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo);
void amdgpu_bo_fence(struct amdgpu_bo *bo, struct dma_fence *fence,
bool shared);
u64 amdgpu_bo_gpu_offset(struct amdgpu_bo *bo);
-int amdgpu_bo_backup_to_shadow(struct amdgpu_device *adev,
- struct amdgpu_ring *ring,
- struct amdgpu_bo *bo,
- struct reservation_object *resv,
- struct dma_fence **fence, bool direct);
int amdgpu_bo_validate(struct amdgpu_bo *bo);
int amdgpu_bo_restore_shadow(struct amdgpu_bo *shadow,
struct dma_fence **fence);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
index 1f61ed95727c..6896dec97fc7 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
@@ -2008,6 +2008,7 @@ void amdgpu_pm_print_power_states(struct amdgpu_device *adev)
int amdgpu_pm_sysfs_init(struct amdgpu_device *adev)
{
+ struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
int ret;
if (adev->pm.sysfs_initialized)
@@ -2091,12 +2092,14 @@ int amdgpu_pm_sysfs_init(struct amdgpu_device *adev)
"pp_power_profile_mode\n");
return ret;
}
- ret = device_create_file(adev->dev,
- &dev_attr_pp_od_clk_voltage);
- if (ret) {
- DRM_ERROR("failed to create device file "
- "pp_od_clk_voltage\n");
- return ret;
+ if (hwmgr->od_enabled) {
+ ret = device_create_file(adev->dev,
+ &dev_attr_pp_od_clk_voltage);
+ if (ret) {
+ DRM_ERROR("failed to create device file "
+ "pp_od_clk_voltage\n");
+ return ret;
+ }
}
ret = device_create_file(adev->dev,
&dev_attr_gpu_busy_percent);
@@ -2118,6 +2121,8 @@ int amdgpu_pm_sysfs_init(struct amdgpu_device *adev)
void amdgpu_pm_sysfs_fini(struct amdgpu_device *adev)
{
+ struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
+
if (adev->pm.dpm_enabled == 0)
return;
@@ -2138,8 +2143,9 @@ void amdgpu_pm_sysfs_fini(struct amdgpu_device *adev)
device_remove_file(adev->dev, &dev_attr_pp_mclk_od);
device_remove_file(adev->dev,
&dev_attr_pp_power_profile_mode);
- device_remove_file(adev->dev,
- &dev_attr_pp_od_clk_voltage);
+ if (hwmgr->od_enabled)
+ device_remove_file(adev->dev,
+ &dev_attr_pp_od_clk_voltage);
device_remove_file(adev->dev, &dev_attr_gpu_busy_percent);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c
index 3e44d889f7af..71913a18d142 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c
@@ -39,8 +39,6 @@
#include <drm/amdgpu_drm.h>
#include <linux/dma-buf.h>
-static const struct dma_buf_ops amdgpu_dmabuf_ops;
-
/**
* amdgpu_gem_prime_get_sg_table - &drm_driver.gem_prime_get_sg_table
* implementation
@@ -332,7 +330,7 @@ static int amdgpu_gem_begin_cpu_access(struct dma_buf *dma_buf,
return ret;
}
-static const struct dma_buf_ops amdgpu_dmabuf_ops = {
+const struct dma_buf_ops amdgpu_dmabuf_ops = {
.attach = amdgpu_gem_map_attach,
.detach = amdgpu_gem_map_detach,
.map_dma_buf = drm_gem_map_dma_buf,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
index e05dc66b1090..8fab0d637ee5 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
@@ -155,10 +155,14 @@ psp_cmd_submit_buf(struct psp_context *psp,
return ret;
}
-static void psp_prep_tmr_cmd_buf(struct psp_gfx_cmd_resp *cmd,
+static void psp_prep_tmr_cmd_buf(struct psp_context *psp,
+ struct psp_gfx_cmd_resp *cmd,
uint64_t tmr_mc, uint32_t size)
{
- cmd->cmd_id = GFX_CMD_ID_SETUP_TMR;
+ if (psp_support_vmr_ring(psp))
+ cmd->cmd_id = GFX_CMD_ID_SETUP_VMR;
+ else
+ cmd->cmd_id = GFX_CMD_ID_SETUP_TMR;
cmd->cmd.cmd_setup_tmr.buf_phy_addr_lo = lower_32_bits(tmr_mc);
cmd->cmd.cmd_setup_tmr.buf_phy_addr_hi = upper_32_bits(tmr_mc);
cmd->cmd.cmd_setup_tmr.buf_size = size;
@@ -192,7 +196,7 @@ static int psp_tmr_load(struct psp_context *psp)
if (!cmd)
return -ENOMEM;
- psp_prep_tmr_cmd_buf(cmd, psp->tmr_mc_addr, PSP_TMR_SIZE);
+ psp_prep_tmr_cmd_buf(psp, cmd, psp->tmr_mc_addr, PSP_TMR_SIZE);
DRM_INFO("reserve 0x%x from 0x%llx for PSP TMR SIZE\n",
PSP_TMR_SIZE, psp->tmr_mc_addr);
@@ -536,8 +540,10 @@ static int psp_load_fw(struct amdgpu_device *adev)
int ret;
struct psp_context *psp = &adev->psp;
- if (amdgpu_sriov_vf(adev) && adev->in_gpu_reset != 0)
+ if (amdgpu_sriov_vf(adev) && adev->in_gpu_reset) {
+ psp_ring_destroy(psp, PSP_RING_TYPE__KM);
goto skip_memalloc;
+ }
psp->cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL);
if (!psp->cmd)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
index 9ec5d1a666a6..3ee573b4016e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
@@ -83,12 +83,13 @@ struct psp_funcs
enum AMDGPU_UCODE_ID ucode_type);
bool (*smu_reload_quirk)(struct psp_context *psp);
int (*mode1_reset)(struct psp_context *psp);
- uint64_t (*xgmi_get_node_id)(struct psp_context *psp);
- uint64_t (*xgmi_get_hive_id)(struct psp_context *psp);
+ int (*xgmi_get_node_id)(struct psp_context *psp, uint64_t *node_id);
+ int (*xgmi_get_hive_id)(struct psp_context *psp, uint64_t *hive_id);
int (*xgmi_get_topology_info)(struct psp_context *psp, int number_devices,
struct psp_xgmi_topology_info *topology);
int (*xgmi_set_topology_info)(struct psp_context *psp, int number_devices,
struct psp_xgmi_topology_info *topology);
+ bool (*support_vmr_ring)(struct psp_context *psp);
};
struct psp_xgmi_context {
@@ -192,12 +193,14 @@ struct psp_xgmi_topology_info {
((psp)->funcs->bootloader_load_sos ? (psp)->funcs->bootloader_load_sos((psp)) : 0)
#define psp_smu_reload_quirk(psp) \
((psp)->funcs->smu_reload_quirk ? (psp)->funcs->smu_reload_quirk((psp)) : false)
+#define psp_support_vmr_ring(psp) \
+ ((psp)->funcs->support_vmr_ring ? (psp)->funcs->support_vmr_ring((psp)) : false)
#define psp_mode1_reset(psp) \
((psp)->funcs->mode1_reset ? (psp)->funcs->mode1_reset((psp)) : false)
-#define psp_xgmi_get_node_id(psp) \
- ((psp)->funcs->xgmi_get_node_id ? (psp)->funcs->xgmi_get_node_id((psp)) : 0)
-#define psp_xgmi_get_hive_id(psp) \
- ((psp)->funcs->xgmi_get_hive_id ? (psp)->funcs->xgmi_get_hive_id((psp)) : 0)
+#define psp_xgmi_get_node_id(psp, node_id) \
+ ((psp)->funcs->xgmi_get_node_id ? (psp)->funcs->xgmi_get_node_id((psp), (node_id)) : -EINVAL)
+#define psp_xgmi_get_hive_id(psp, hive_id) \
+ ((psp)->funcs->xgmi_get_hive_id ? (psp)->funcs->xgmi_get_hive_id((psp), (hive_id)) : -EINVAL)
#define psp_xgmi_get_topology_info(psp, num_device, topology) \
((psp)->funcs->xgmi_get_topology_info ? \
(psp)->funcs->xgmi_get_topology_info((psp), (num_device), (topology)) : -EINVAL)
@@ -217,7 +220,6 @@ extern const struct amdgpu_ip_block_version psp_v10_0_ip_block;
int psp_gpu_reset(struct amdgpu_device *adev);
int psp_xgmi_invoke(struct psp_context *psp, uint32_t ta_cmd_id);
-
extern const struct amdgpu_ip_block_version psp_v11_0_ip_block;
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
index 5b75bdc8dc28..335a0edf114b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
@@ -397,7 +397,7 @@ bool amdgpu_ring_soft_recovery(struct amdgpu_ring *ring, unsigned int vmid,
{
ktime_t deadline = ktime_add_us(ktime_get(), 10000);
- if (!ring->funcs->soft_recovery)
+ if (!ring->funcs->soft_recovery || !fence)
return false;
atomic_inc(&ring->adev->gpu_reset_counter);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
index 0beb01fef83f..d87e828a084b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
@@ -29,7 +29,7 @@
#include <drm/drm_print.h>
/* max number of rings */
-#define AMDGPU_MAX_RINGS 21
+#define AMDGPU_MAX_RINGS 23
#define AMDGPU_MAX_GFX_RINGS 1
#define AMDGPU_MAX_COMPUTE_RINGS 8
#define AMDGPU_MAX_VCE_RINGS 3
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
index e2e42e3fbcf3..ecf6f96df2ad 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
@@ -262,7 +262,7 @@ static int amdgpu_vcn_pause_dpg_mode(struct amdgpu_device *adev,
ring = &adev->vcn.ring_dec;
WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR,
- RREG32_SOC15(UVD, 0, mmUVD_SCRATCH2));
+ RREG32_SOC15(UVD, 0, mmUVD_SCRATCH2) & 0x7FFFFFFF);
SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_POWER_STATUS,
UVD_PGFSM_CONFIG__UVDM_UVDU_PWR_ON,
UVD_POWER_STATUS__UVD_POWER_STATUS_MASK, ret_code);
@@ -322,7 +322,7 @@ static int amdgpu_vcn_pause_dpg_mode(struct amdgpu_device *adev,
ring = &adev->vcn.ring_dec;
WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR,
- RREG32_SOC15(UVD, 0, mmUVD_SCRATCH2));
+ RREG32_SOC15(UVD, 0, mmUVD_SCRATCH2) & 0x7FFFFFFF);
SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_POWER_STATUS,
UVD_PGFSM_CONFIG__UVDM_UVDU_PWR_ON,
UVD_POWER_STATUS__UVD_POWER_STATUS_MASK, ret_code);
@@ -396,16 +396,26 @@ void amdgpu_vcn_ring_begin_use(struct amdgpu_ring *ring)
if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) {
struct dpg_pause_state new_state;
+ unsigned int fences = 0;
+ unsigned int i;
- if (ring->funcs->type == AMDGPU_RING_TYPE_VCN_ENC)
+ for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
+ fences += amdgpu_fence_count_emitted(&adev->vcn.ring_enc[i]);
+ }
+ if (fences)
new_state.fw_based = VCN_DPG_STATE__PAUSE;
else
- new_state.fw_based = adev->vcn.pause_state.fw_based;
+ new_state.fw_based = VCN_DPG_STATE__UNPAUSE;
- if (ring->funcs->type == AMDGPU_RING_TYPE_VCN_JPEG)
+ if (amdgpu_fence_count_emitted(&adev->vcn.ring_jpeg))
new_state.jpeg = VCN_DPG_STATE__PAUSE;
else
- new_state.jpeg = adev->vcn.pause_state.jpeg;
+ new_state.jpeg = VCN_DPG_STATE__UNPAUSE;
+
+ if (ring->funcs->type == AMDGPU_RING_TYPE_VCN_ENC)
+ new_state.fw_based = VCN_DPG_STATE__PAUSE;
+ else if (ring->funcs->type == AMDGPU_RING_TYPE_VCN_JPEG)
+ new_state.jpeg = VCN_DPG_STATE__PAUSE;
amdgpu_vcn_pause_dpg_mode(adev, &new_state);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index 58a2363040dd..d2ea5ce2cefb 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -181,7 +181,7 @@ static unsigned amdgpu_vm_num_entries(struct amdgpu_device *adev,
if (level == adev->vm_manager.root_level)
/* For the root directory */
- return round_up(adev->vm_manager.max_pfn, 1 << shift) >> shift;
+ return round_up(adev->vm_manager.max_pfn, 1ULL << shift) >> shift;
else if (level != AMDGPU_VM_PTB)
/* Everything in between */
return 512;
@@ -617,7 +617,8 @@ void amdgpu_vm_get_pd_bo(struct amdgpu_vm *vm,
{
entry->priority = 0;
entry->tv.bo = &vm->root.base.bo->tbo;
- entry->tv.shared = true;
+ /* One for the VM updates, one for TTM and one for the CS job */
+ entry->tv.num_shared = 3;
entry->user_pages = NULL;
list_add(&entry->tv.head, validated);
}
@@ -773,10 +774,6 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
ring = container_of(vm->entity.rq->sched, struct amdgpu_ring, sched);
- r = reservation_object_reserve_shared(bo->tbo.resv, 1);
- if (r)
- return r;
-
r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
if (r)
goto error;
@@ -850,9 +847,6 @@ static void amdgpu_vm_bo_param(struct amdgpu_device *adev, struct amdgpu_vm *vm,
bp->size = amdgpu_vm_bo_size(adev, level);
bp->byte_align = AMDGPU_GPU_PAGE_SIZE;
bp->domain = AMDGPU_GEM_DOMAIN_VRAM;
- if (bp->size <= PAGE_SIZE && adev->asic_type >= CHIP_VEGA10 &&
- adev->flags & AMD_IS_APU)
- bp->domain |= AMDGPU_GEM_DOMAIN_GTT;
bp->domain = amdgpu_bo_get_preferred_pin_domain(adev, bp->domain);
bp->flags = AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS |
AMDGPU_GEM_CREATE_CPU_GTT_USWC;
@@ -1656,9 +1650,11 @@ static int amdgpu_vm_update_ptes(struct amdgpu_pte_update_params *params,
if (!amdgpu_vm_pt_descendant(adev, &cursor))
return -ENOENT;
continue;
- } else if (frag >= parent_shift) {
+ } else if (frag >= parent_shift &&
+ cursor.level - 1 != adev->vm_manager.root_level) {
/* If the fragment size is even larger than the parent
- * shift we should go up one level and check it again.
+ * shift we should go up one level and check it again
+ * unless one level up is the root level.
*/
if (!amdgpu_vm_pt_ancestor(&cursor))
return -ENOENT;
@@ -1666,10 +1662,10 @@ static int amdgpu_vm_update_ptes(struct amdgpu_pte_update_params *params,
}
/* Looks good so far, calculate parameters for the update */
- incr = AMDGPU_GPU_PAGE_SIZE << shift;
+ incr = (uint64_t)AMDGPU_GPU_PAGE_SIZE << shift;
mask = amdgpu_vm_entries_mask(adev, cursor.level);
pe_start = ((cursor.pfn >> shift) & mask) * 8;
- entry_end = (mask + 1) << shift;
+ entry_end = (uint64_t)(mask + 1) << shift;
entry_end += cursor.pfn & ~(entry_end - 1);
entry_end = min(entry_end, end);
@@ -1682,7 +1678,7 @@ static int amdgpu_vm_update_ptes(struct amdgpu_pte_update_params *params,
flags | AMDGPU_PTE_FRAG(frag));
pe_start += nptes * 8;
- dst += nptes * AMDGPU_GPU_PAGE_SIZE << shift;
+ dst += (uint64_t)nptes * AMDGPU_GPU_PAGE_SIZE << shift;
frag_start = upd_end;
if (frag_start >= frag_end) {
@@ -1842,10 +1838,6 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
if (r)
goto error_free;
- r = reservation_object_reserve_shared(vm->root.base.bo->tbo.resv, 1);
- if (r)
- goto error_free;
-
r = amdgpu_vm_update_ptes(&params, start, last + 1, addr, flags);
if (r)
goto error_free;
@@ -3026,6 +3018,10 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
if (r)
goto error_free_root;
+ r = reservation_object_reserve_shared(root->tbo.resv, 1);
+ if (r)
+ goto error_unreserve;
+
r = amdgpu_vm_clear_bo(adev, vm, root,
adev->vm_manager.root_level,
vm->pte_support_ats);
@@ -3055,7 +3051,6 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
}
INIT_KFIFO(vm->faults);
- vm->fault_credit = 16;
return 0;
@@ -3268,42 +3263,6 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
}
/**
- * amdgpu_vm_pasid_fault_credit - Check fault credit for given PASID
- *
- * @adev: amdgpu_device pointer
- * @pasid: PASID do identify the VM
- *
- * This function is expected to be called in interrupt context.
- *
- * Returns:
- * True if there was fault credit, false otherwise
- */
-bool amdgpu_vm_pasid_fault_credit(struct amdgpu_device *adev,
- unsigned int pasid)
-{
- struct amdgpu_vm *vm;
-
- spin_lock(&adev->vm_manager.pasid_lock);
- vm = idr_find(&adev->vm_manager.pasid_idr, pasid);
- if (!vm) {
- /* VM not found, can't track fault credit */
- spin_unlock(&adev->vm_manager.pasid_lock);
- return true;
- }
-
- /* No lock needed. only accessed by IRQ handler */
- if (!vm->fault_credit) {
- /* Too many faults in this VM */
- spin_unlock(&adev->vm_manager.pasid_lock);
- return false;
- }
-
- vm->fault_credit--;
- spin_unlock(&adev->vm_manager.pasid_lock);
- return true;
-}
-
-/**
* amdgpu_vm_manager_init - init the VM manager
*
* @adev: amdgpu_device pointer
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
index 2a8898d19c8b..e8dcfd59fc93 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
@@ -229,9 +229,6 @@ struct amdgpu_vm {
/* Up to 128 pending retry page faults */
DECLARE_KFIFO(faults, u64, 128);
- /* Limit non-retry fault storms */
- unsigned int fault_credit;
-
/* Points to the KFD process VM info */
struct amdkfd_process_info *process_info;
@@ -299,8 +296,6 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm, unsigned int pasid);
void amdgpu_vm_release_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm);
void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm);
-bool amdgpu_vm_pasid_fault_credit(struct amdgpu_device *adev,
- unsigned int pasid);
void amdgpu_vm_get_pd_bo(struct amdgpu_vm *vm,
struct list_head *validated,
struct amdgpu_bo_list_entry *entry);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
index fb37e69f1bba..8a8bc60cb6b4 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
@@ -78,7 +78,7 @@ int amdgpu_xgmi_update_topology(struct amdgpu_hive_info *hive, struct amdgpu_dev
adev->gmc.xgmi.node_id,
adev->gmc.xgmi.hive_id, ret);
else
- dev_info(adev->dev, "XGMI: Add node %d to hive 0x%llx.\n",
+ dev_info(adev->dev, "XGMI: Set topology for node %d, hive 0x%llx.\n",
adev->gmc.xgmi.physical_node_id,
adev->gmc.xgmi.hive_id);
@@ -94,11 +94,22 @@ int amdgpu_xgmi_add_device(struct amdgpu_device *adev)
int count = 0, ret = -EINVAL;
- if ((adev->asic_type < CHIP_VEGA20) ||
- (adev->flags & AMD_IS_APU) )
+ if (!adev->gmc.xgmi.supported)
return 0;
- adev->gmc.xgmi.node_id = psp_xgmi_get_node_id(&adev->psp);
- adev->gmc.xgmi.hive_id = psp_xgmi_get_hive_id(&adev->psp);
+
+ ret = psp_xgmi_get_node_id(&adev->psp, &adev->gmc.xgmi.node_id);
+ if (ret) {
+ dev_err(adev->dev,
+ "XGMI: Failed to get node id\n");
+ return ret;
+ }
+
+ ret = psp_xgmi_get_hive_id(&adev->psp, &adev->gmc.xgmi.hive_id);
+ if (ret) {
+ dev_err(adev->dev,
+ "XGMI: Failed to get hive id\n");
+ return ret;
+ }
mutex_lock(&xgmi_mutex);
hive = amdgpu_get_xgmi_hive(adev);
@@ -135,3 +146,23 @@ exit:
mutex_unlock(&xgmi_mutex);
return ret;
}
+
+void amdgpu_xgmi_remove_device(struct amdgpu_device *adev)
+{
+ struct amdgpu_hive_info *hive;
+
+ if (!adev->gmc.xgmi.supported)
+ return;
+
+ mutex_lock(&xgmi_mutex);
+
+ hive = amdgpu_get_xgmi_hive(adev);
+ if (!hive)
+ goto exit;
+
+ if (!(hive->number_devices--))
+ mutex_destroy(&hive->hive_lock);
+
+exit:
+ mutex_unlock(&xgmi_mutex);
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h
index 6335bfdcc51d..6151eb9c8ad3 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h
@@ -35,5 +35,6 @@ struct amdgpu_hive_info {
struct amdgpu_hive_info *amdgpu_get_xgmi_hive(struct amdgpu_device *adev);
int amdgpu_xgmi_update_topology(struct amdgpu_hive_info *hive, struct amdgpu_device *adev);
int amdgpu_xgmi_add_device(struct amdgpu_device *adev);
+void amdgpu_xgmi_remove_device(struct amdgpu_device *adev);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/cik_ih.c b/drivers/gpu/drm/amd/amdgpu/cik_ih.c
index b5775c6a857b..8a8b4967a101 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/cik_ih.c
@@ -228,34 +228,6 @@ static u32 cik_ih_get_wptr(struct amdgpu_device *adev)
* [127:96] - reserved
*/
-/**
- * cik_ih_prescreen_iv - prescreen an interrupt vector
- *
- * @adev: amdgpu_device pointer
- *
- * Returns true if the interrupt vector should be further processed.
- */
-static bool cik_ih_prescreen_iv(struct amdgpu_device *adev)
-{
- u32 ring_index = adev->irq.ih.rptr >> 2;
- u16 pasid;
-
- switch (le32_to_cpu(adev->irq.ih.ring[ring_index]) & 0xff) {
- case 146:
- case 147:
- pasid = le32_to_cpu(adev->irq.ih.ring[ring_index + 2]) >> 16;
- if (!pasid || amdgpu_vm_pasid_fault_credit(adev, pasid))
- return true;
- break;
- default:
- /* Not a VM fault */
- return true;
- }
-
- adev->irq.ih.rptr += 16;
- return false;
-}
-
/**
* cik_ih_decode_iv - decode an interrupt vector
*
@@ -461,7 +433,6 @@ static const struct amd_ip_funcs cik_ih_ip_funcs = {
static const struct amdgpu_ih_funcs cik_ih_funcs = {
.get_wptr = cik_ih_get_wptr,
- .prescreen_iv = cik_ih_prescreen_iv,
.decode_iv = cik_ih_decode_iv,
.set_rptr = cik_ih_set_rptr
};
diff --git a/drivers/gpu/drm/amd/amdgpu/cz_ih.c b/drivers/gpu/drm/amd/amdgpu/cz_ih.c
index df5ac4d85a00..9d3ea298e116 100644
--- a/drivers/gpu/drm/amd/amdgpu/cz_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/cz_ih.c
@@ -208,34 +208,6 @@ static u32 cz_ih_get_wptr(struct amdgpu_device *adev)
}
/**
- * cz_ih_prescreen_iv - prescreen an interrupt vector
- *
- * @adev: amdgpu_device pointer
- *
- * Returns true if the interrupt vector should be further processed.
- */
-static bool cz_ih_prescreen_iv(struct amdgpu_device *adev)
-{
- u32 ring_index = adev->irq.ih.rptr >> 2;
- u16 pasid;
-
- switch (le32_to_cpu(adev->irq.ih.ring[ring_index]) & 0xff) {
- case 146:
- case 147:
- pasid = le32_to_cpu(adev->irq.ih.ring[ring_index + 2]) >> 16;
- if (!pasid || amdgpu_vm_pasid_fault_credit(adev, pasid))
- return true;
- break;
- default:
- /* Not a VM fault */
- return true;
- }
-
- adev->irq.ih.rptr += 16;
- return false;
-}
-
-/**
* cz_ih_decode_iv - decode an interrupt vector
*
* @adev: amdgpu_device pointer
@@ -442,7 +414,6 @@ static const struct amd_ip_funcs cz_ih_ip_funcs = {
static const struct amdgpu_ih_funcs cz_ih_funcs = {
.get_wptr = cz_ih_get_wptr,
- .prescreen_iv = cz_ih_prescreen_iv,
.decode_iv = cz_ih_decode_iv,
.set_rptr = cz_ih_set_rptr
};
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_virtual.c b/drivers/gpu/drm/amd/amdgpu/dce_virtual.c
index fdace004544d..e4cc1d48eaab 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_virtual.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_virtual.c
@@ -167,19 +167,6 @@ static void dce_virtual_crtc_disable(struct drm_crtc *crtc)
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
dce_virtual_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
- if (crtc->primary->fb) {
- int r;
- struct amdgpu_bo *abo;
-
- abo = gem_to_amdgpu_bo(crtc->primary->fb->obj[0]);
- r = amdgpu_bo_reserve(abo, true);
- if (unlikely(r))
- DRM_ERROR("failed to reserve abo before unpin\n");
- else {
- amdgpu_bo_unpin(abo);
- amdgpu_bo_unreserve(abo);
- }
- }
amdgpu_crtc->pll_id = ATOM_PPLL_INVALID;
amdgpu_crtc->encoder = NULL;
@@ -692,7 +679,9 @@ static int dce_virtual_pageflip(struct amdgpu_device *adev,
spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
drm_crtc_vblank_put(&amdgpu_crtc->base);
- schedule_work(&works->unpin_work);
+ amdgpu_bo_unref(&works->old_abo);
+ kfree(works->shared);
+ kfree(works);
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
index 1454fc306783..57cb3a51bda7 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
@@ -4068,6 +4068,11 @@ static void gfx_v8_0_rlc_start(struct amdgpu_device *adev)
static int gfx_v8_0_rlc_resume(struct amdgpu_device *adev)
{
+ if (amdgpu_sriov_vf(adev)) {
+ gfx_v8_0_init_csb(adev);
+ return 0;
+ }
+
adev->gfx.rlc.funcs->stop(adev);
adev->gfx.rlc.funcs->reset(adev);
gfx_v8_0_init_pg(adev);
@@ -4228,7 +4233,6 @@ static int gfx_v8_0_cp_gfx_resume(struct amdgpu_device *adev)
u32 tmp;
u32 rb_bufsz;
u64 rb_addr, rptr_addr, wptr_gpu_addr;
- int r;
/* Set the write pointer delay */
WREG32(mmCP_RB_WPTR_DELAY, 0);
@@ -4273,9 +4277,8 @@ static int gfx_v8_0_cp_gfx_resume(struct amdgpu_device *adev)
amdgpu_ring_clear_ring(ring);
gfx_v8_0_cp_gfx_start(adev);
ring->sched.ready = true;
- r = amdgpu_ring_test_helper(ring);
- return r;
+ return 0;
}
static void gfx_v8_0_cp_compute_enable(struct amdgpu_device *adev, bool enable)
@@ -4364,10 +4367,9 @@ static int gfx_v8_0_kiq_kcq_enable(struct amdgpu_device *adev)
amdgpu_ring_write(kiq_ring, upper_32_bits(wptr_addr));
}
- r = amdgpu_ring_test_helper(kiq_ring);
- if (r)
- DRM_ERROR("KCQ enable failed\n");
- return r;
+ amdgpu_ring_commit(kiq_ring);
+
+ return 0;
}
static int gfx_v8_0_deactivate_hqd(struct amdgpu_device *adev, u32 req)
@@ -4704,16 +4706,32 @@ static int gfx_v8_0_kcq_resume(struct amdgpu_device *adev)
if (r)
goto done;
- /* Test KCQs - reversing the order of rings seems to fix ring test failure
- * after GPU reset
- */
- for (i = adev->gfx.num_compute_rings - 1; i >= 0; i--) {
+done:
+ return r;
+}
+
+static int gfx_v8_0_cp_test_all_rings(struct amdgpu_device *adev)
+{
+ int r, i;
+ struct amdgpu_ring *ring;
+
+ /* collect all the ring_tests here, gfx, kiq, compute */
+ ring = &adev->gfx.gfx_ring[0];
+ r = amdgpu_ring_test_helper(ring);
+ if (r)
+ return r;
+
+ ring = &adev->gfx.kiq.ring;
+ r = amdgpu_ring_test_helper(ring);
+ if (r)
+ return r;
+
+ for (i = 0; i < adev->gfx.num_compute_rings; i++) {
ring = &adev->gfx.compute_ring[i];
- r = amdgpu_ring_test_helper(ring);
+ amdgpu_ring_test_helper(ring);
}
-done:
- return r;
+ return 0;
}
static int gfx_v8_0_cp_resume(struct amdgpu_device *adev)
@@ -4734,6 +4752,11 @@ static int gfx_v8_0_cp_resume(struct amdgpu_device *adev)
r = gfx_v8_0_kcq_resume(adev);
if (r)
return r;
+
+ r = gfx_v8_0_cp_test_all_rings(adev);
+ if (r)
+ return r;
+
gfx_v8_0_enable_gui_idle_interrupt(adev, true);
return 0;
@@ -4947,14 +4970,13 @@ static bool gfx_v8_0_check_soft_reset(void *handle)
static int gfx_v8_0_pre_soft_reset(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- u32 grbm_soft_reset = 0, srbm_soft_reset = 0;
+ u32 grbm_soft_reset = 0;
if ((!adev->gfx.grbm_soft_reset) &&
(!adev->gfx.srbm_soft_reset))
return 0;
grbm_soft_reset = adev->gfx.grbm_soft_reset;
- srbm_soft_reset = adev->gfx.srbm_soft_reset;
/* stop the rlc */
adev->gfx.rlc.funcs->stop(adev);
@@ -5051,14 +5073,13 @@ static int gfx_v8_0_soft_reset(void *handle)
static int gfx_v8_0_post_soft_reset(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- u32 grbm_soft_reset = 0, srbm_soft_reset = 0;
+ u32 grbm_soft_reset = 0;
if ((!adev->gfx.grbm_soft_reset) &&
(!adev->gfx.srbm_soft_reset))
return 0;
grbm_soft_reset = adev->gfx.grbm_soft_reset;
- srbm_soft_reset = adev->gfx.srbm_soft_reset;
if (REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_CP) ||
REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_CPF) ||
@@ -5083,6 +5104,8 @@ static int gfx_v8_0_post_soft_reset(void *handle)
REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_GFX))
gfx_v8_0_cp_gfx_resume(adev);
+ gfx_v8_0_cp_test_all_rings(adev);
+
adev->gfx.rlc.funcs->start(adev);
return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
index af8ccb014be3..fbca0494f871 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
@@ -86,6 +86,7 @@ MODULE_FIRMWARE("amdgpu/picasso_me.bin");
MODULE_FIRMWARE("amdgpu/picasso_mec.bin");
MODULE_FIRMWARE("amdgpu/picasso_mec2.bin");
MODULE_FIRMWARE("amdgpu/picasso_rlc.bin");
+MODULE_FIRMWARE("amdgpu/picasso_rlc_am4.bin");
MODULE_FIRMWARE("amdgpu/raven2_ce.bin");
MODULE_FIRMWARE("amdgpu/raven2_pfp.bin");
@@ -112,7 +113,10 @@ static const struct soc15_reg_golden golden_settings_gc_9_0[] =
SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_HI, 0xffffffff, 0x4a2c0e68),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_LO, 0xffffffff, 0xb5d3f197),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmVGT_CACHE_INVALIDATION, 0x3fff3af3, 0x19200000),
- SOC15_REG_GOLDEN_VALUE(GC, 0, mmVGT_GS_MAX_WAVE_ID, 0x00000fff, 0x000003ff)
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmVGT_GS_MAX_WAVE_ID, 0x00000fff, 0x000003ff),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_MEC1_F32_INT_DIS, 0x00000000, 0x00000800),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_MEC2_F32_INT_DIS, 0x00000000, 0x00000800),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_DEBUG, 0x00000000, 0x00008000)
};
static const struct soc15_reg_golden golden_settings_gc_9_0_vg10[] =
@@ -134,10 +138,7 @@ static const struct soc15_reg_golden golden_settings_gc_9_0_vg10[] =
SOC15_REG_GOLDEN_VALUE(GC, 0, mmRMI_UTCL1_CNTL2, 0x00030000, 0x00020000),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_CONFIG_CNTL_1, 0x0000000f, 0x01000107),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmTD_CNTL, 0x00001800, 0x00000800),
- SOC15_REG_GOLDEN_VALUE(GC, 0, mmWD_UTCL1_CNTL, 0x08000000, 0x08000080),
- SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_MEC1_F32_INT_DIS, 0x00000000, 0x00000800),
- SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_MEC2_F32_INT_DIS, 0x00000000, 0x00000800),
- SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_DEBUG, 0x00000000, 0x00008000)
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmWD_UTCL1_CNTL, 0x08000000, 0x08000080)
};
static const struct soc15_reg_golden golden_settings_gc_9_0_vg20[] =
@@ -645,7 +646,20 @@ static int gfx_v9_0_init_microcode(struct amdgpu_device *adev)
adev->gfx.ce_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
adev->gfx.ce_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
- snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_rlc.bin", chip_name);
+ /*
+ * For Picasso && AM4 SOCKET board, we use picasso_rlc_am4.bin
+ * instead of picasso_rlc.bin.
+ * Judgment method:
+ * PCO AM4: revision >= 0xC8 && revision <= 0xCF
+ * or revision >= 0xD8 && revision <= 0xDF
+ * otherwise is PCO FP5
+ */
+ if (!strcmp(chip_name, "picasso") &&
+ (((adev->pdev->revision >= 0xC8) && (adev->pdev->revision <= 0xCF)) ||
+ ((adev->pdev->revision >= 0xD8) && (adev->pdev->revision <= 0xDF))))
+ snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_rlc_am4.bin", chip_name);
+ else
+ snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_rlc.bin", chip_name);
err = request_firmware(&adev->gfx.rlc_fw, fw_name, adev->dev);
if (err)
goto out;
@@ -2326,12 +2340,13 @@ static void gfx_v9_0_rlc_start(struct amdgpu_device *adev)
#endif
WREG32_FIELD15(GC, 0, RLC_CNTL, RLC_ENABLE_F32, 1);
+ udelay(50);
/* carrizo do enable cp interrupt after cp inited */
- if (!(adev->flags & AMD_IS_APU))
+ if (!(adev->flags & AMD_IS_APU)) {
gfx_v9_0_enable_gui_idle_interrupt(adev, true);
-
- udelay(50);
+ udelay(50);
+ }
#ifdef AMDGPU_RLC_DEBUG_RETRY
/* RLC_GPM_GENERAL_6 : RLC Ucode version */
@@ -3572,6 +3587,8 @@ static void gfx_v9_0_update_medium_grain_clock_gating(struct amdgpu_device *adev
{
uint32_t data, def;
+ amdgpu_gfx_rlc_enter_safe_mode(adev);
+
/* It is disabled by HW by default */
if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGCG)) {
/* 1 - RLC_CGTT_MGCG_OVERRIDE */
@@ -3636,6 +3653,8 @@ static void gfx_v9_0_update_medium_grain_clock_gating(struct amdgpu_device *adev
WREG32_SOC15(GC, 0, mmCP_MEM_SLP_CNTL, data);
}
}
+
+ amdgpu_gfx_rlc_exit_safe_mode(adev);
}
static void gfx_v9_0_update_3d_clock_gating(struct amdgpu_device *adev,
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
index 531aaf377592..1ad7e6b8ed1d 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
@@ -56,6 +56,9 @@ MODULE_FIRMWARE("amdgpu/tonga_mc.bin");
MODULE_FIRMWARE("amdgpu/polaris11_mc.bin");
MODULE_FIRMWARE("amdgpu/polaris10_mc.bin");
MODULE_FIRMWARE("amdgpu/polaris12_mc.bin");
+MODULE_FIRMWARE("amdgpu/polaris11_k_mc.bin");
+MODULE_FIRMWARE("amdgpu/polaris10_k_mc.bin");
+MODULE_FIRMWARE("amdgpu/polaris12_k_mc.bin");
static const u32 golden_settings_tonga_a11[] =
{
@@ -224,13 +227,39 @@ static int gmc_v8_0_init_microcode(struct amdgpu_device *adev)
chip_name = "tonga";
break;
case CHIP_POLARIS11:
- chip_name = "polaris11";
+ if (((adev->pdev->device == 0x67ef) &&
+ ((adev->pdev->revision == 0xe0) ||
+ (adev->pdev->revision == 0xe5))) ||
+ ((adev->pdev->device == 0x67ff) &&
+ ((adev->pdev->revision == 0xcf) ||
+ (adev->pdev->revision == 0xef) ||
+ (adev->pdev->revision == 0xff))))
+ chip_name = "polaris11_k";
+ else if ((adev->pdev->device == 0x67ef) &&
+ (adev->pdev->revision == 0xe2))
+ chip_name = "polaris11_k";
+ else
+ chip_name = "polaris11";
break;
case CHIP_POLARIS10:
- chip_name = "polaris10";
+ if ((adev->pdev->device == 0x67df) &&
+ ((adev->pdev->revision == 0xe1) ||
+ (adev->pdev->revision == 0xf7)))
+ chip_name = "polaris10_k";
+ else
+ chip_name = "polaris10";
break;
case CHIP_POLARIS12:
- chip_name = "polaris12";
+ if (((adev->pdev->device == 0x6987) &&
+ ((adev->pdev->revision == 0xc0) ||
+ (adev->pdev->revision == 0xc3))) ||
+ ((adev->pdev->device == 0x6981) &&
+ ((adev->pdev->revision == 0x00) ||
+ (adev->pdev->revision == 0x01) ||
+ (adev->pdev->revision == 0x10))))
+ chip_name = "polaris12_k";
+ else
+ chip_name = "polaris12";
break;
case CHIP_FIJI:
case CHIP_CARRIZO:
@@ -337,7 +366,7 @@ static int gmc_v8_0_polaris_mc_load_microcode(struct amdgpu_device *adev)
const struct mc_firmware_header_v1_0 *hdr;
const __le32 *fw_data = NULL;
const __le32 *io_mc_regs = NULL;
- u32 data, vbios_version;
+ u32 data;
int i, ucode_size, regs_size;
/* Skip MC ucode loading on SR-IOV capable boards.
@@ -348,13 +377,6 @@ static int gmc_v8_0_polaris_mc_load_microcode(struct amdgpu_device *adev)
if (amdgpu_sriov_bios(adev))
return 0;
- WREG32(mmMC_SEQ_IO_DEBUG_INDEX, 0x9F);
- data = RREG32(mmMC_SEQ_IO_DEBUG_DATA);
- vbios_version = data & 0xf;
-
- if (vbios_version == 0)
- return 0;
-
if (!adev->gmc.fw)
return -EINVAL;
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
index 3a4e5d8d5162..bacdaef77b6c 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
@@ -244,6 +244,62 @@ static int gmc_v9_0_vm_fault_interrupt_state(struct amdgpu_device *adev,
return 0;
}
+/**
+ * vega10_ih_prescreen_iv - prescreen an interrupt vector
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Returns true if the interrupt vector should be further processed.
+ */
+static bool gmc_v9_0_prescreen_iv(struct amdgpu_device *adev,
+ struct amdgpu_iv_entry *entry,
+ uint64_t addr)
+{
+ struct amdgpu_vm *vm;
+ u64 key;
+ int r;
+
+ /* No PASID, can't identify faulting process */
+ if (!entry->pasid)
+ return true;
+
+ /* Not a retry fault */
+ if (!(entry->src_data[1] & 0x80))
+ return true;
+
+ /* Track retry faults in per-VM fault FIFO. */
+ spin_lock(&adev->vm_manager.pasid_lock);
+ vm = idr_find(&adev->vm_manager.pasid_idr, entry->pasid);
+ if (!vm) {
+ /* VM not found, process it normally */
+ spin_unlock(&adev->vm_manager.pasid_lock);
+ return true;
+ }
+
+ key = AMDGPU_VM_FAULT(entry->pasid, addr);
+ r = amdgpu_vm_add_fault(vm->fault_hash, key);
+
+ /* Hash table is full or the fault is already being processed,
+ * ignore further page faults
+ */
+ if (r != 0) {
+ spin_unlock(&adev->vm_manager.pasid_lock);
+ return false;
+ }
+ /* No locking required with single writer and single reader */
+ r = kfifo_put(&vm->faults, key);
+ if (!r) {
+ /* FIFO is full. Ignore it until there is space */
+ amdgpu_vm_clear_fault(vm->fault_hash, key);
+ spin_unlock(&adev->vm_manager.pasid_lock);
+ return false;
+ }
+
+ spin_unlock(&adev->vm_manager.pasid_lock);
+ /* It's the first fault for this address, process it normally */
+ return true;
+}
+
static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
struct amdgpu_iv_entry *entry)
@@ -255,6 +311,9 @@ static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev,
addr = (u64)entry->src_data[0] << 12;
addr |= ((u64)entry->src_data[1] & 0xf) << 44;
+ if (!gmc_v9_0_prescreen_iv(adev, entry, addr))
+ return 1; /* This also prevents sending it to KFD */
+
if (!amdgpu_sriov_vf(adev)) {
status = RREG32(hub->vm_l2_pro_fault_status);
WREG32_P(hub->vm_l2_pro_fault_cntl, 1, ~1);
@@ -659,37 +718,46 @@ static bool gmc_v9_0_keep_stolen_memory(struct amdgpu_device *adev)
}
}
-static int gmc_v9_0_late_init(void *handle)
+static int gmc_v9_0_allocate_vm_inv_eng(struct amdgpu_device *adev)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- /*
- * The latest engine allocation on gfx9 is:
- * Engine 0, 1: idle
- * Engine 2, 3: firmware
- * Engine 4~13: amdgpu ring, subject to change when ring number changes
- * Engine 14~15: idle
- * Engine 16: kfd tlb invalidation
- * Engine 17: Gart flushes
- */
- unsigned vm_inv_eng[AMDGPU_MAX_VMHUBS] = { 4, 4 };
+ struct amdgpu_ring *ring;
+ unsigned vm_inv_engs[AMDGPU_MAX_VMHUBS] =
+ {GFXHUB_FREE_VM_INV_ENGS_BITMAP, MMHUB_FREE_VM_INV_ENGS_BITMAP};
unsigned i;
- int r;
+ unsigned vmhub, inv_eng;
- if (!gmc_v9_0_keep_stolen_memory(adev))
- amdgpu_bo_late_init(adev);
+ for (i = 0; i < adev->num_rings; ++i) {
+ ring = adev->rings[i];
+ vmhub = ring->funcs->vmhub;
- for(i = 0; i < adev->num_rings; ++i) {
- struct amdgpu_ring *ring = adev->rings[i];
- unsigned vmhub = ring->funcs->vmhub;
+ inv_eng = ffs(vm_inv_engs[vmhub]);
+ if (!inv_eng) {
+ dev_err(adev->dev, "no VM inv eng for ring %s\n",
+ ring->name);
+ return -EINVAL;
+ }
+
+ ring->vm_inv_eng = inv_eng - 1;
+ change_bit(inv_eng - 1, (unsigned long *)(&vm_inv_engs[vmhub]));
- ring->vm_inv_eng = vm_inv_eng[vmhub]++;
dev_info(adev->dev, "ring %s uses VM inv eng %u on hub %u\n",
ring->name, ring->vm_inv_eng, ring->funcs->vmhub);
}
- /* Engine 16 is used for KFD and 17 for GART flushes */
- for(i = 0; i < AMDGPU_MAX_VMHUBS; ++i)
- BUG_ON(vm_inv_eng[i] > 16);
+ return 0;
+}
+
+static int gmc_v9_0_late_init(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ int r;
+
+ if (!gmc_v9_0_keep_stolen_memory(adev))
+ amdgpu_bo_late_init(adev);
+
+ r = gmc_v9_0_allocate_vm_inv_eng(adev);
+ if (r)
+ return r;
if (adev->asic_type == CHIP_VEGA10 && !amdgpu_sriov_vf(adev)) {
r = gmc_v9_0_ecc_available(adev);
@@ -902,6 +970,9 @@ static int gmc_v9_0_sw_init(void *handle)
/* This interrupt is VMC page fault.*/
r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VMC, VMC_1_0__SRCID__VM_FAULT,
&adev->gmc.vm_fault);
+ if (r)
+ return r;
+
r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_UTCL2, UTCL2_1_0__SRCID__FAULT,
&adev->gmc.vm_fault);
@@ -934,7 +1005,7 @@ static int gmc_v9_0_sw_init(void *handle)
}
adev->need_swiotlb = drm_get_max_iomem() > ((u64)1 << dma_bits);
- if (adev->asic_type == CHIP_VEGA20) {
+ if (adev->gmc.xgmi.supported) {
r = gfxhub_v1_1_get_xgmi_info(adev);
if (r)
return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.h b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.h
index b030ca5ea107..5c8deac65580 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.h
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.h
@@ -24,6 +24,16 @@
#ifndef __GMC_V9_0_H__
#define __GMC_V9_0_H__
+ /*
+ * The latest engine allocation on gfx9 is:
+ * Engine 2, 3: firmware
+ * Engine 0, 1, 4~16: amdgpu ring,
+ * subject to change when ring number changes
+ * Engine 17: Gart flushes
+ */
+#define GFXHUB_FREE_VM_INV_ENGS_BITMAP 0x1FFF3
+#define MMHUB_FREE_VM_INV_ENGS_BITMAP 0x1FFF3
+
extern const struct amd_ip_funcs gmc_v9_0_ip_funcs;
extern const struct amdgpu_ip_block_version gmc_v9_0_ip_block;
diff --git a/drivers/gpu/drm/amd/amdgpu/iceland_ih.c b/drivers/gpu/drm/amd/amdgpu/iceland_ih.c
index cf0fc61aebe6..a3984d10b604 100644
--- a/drivers/gpu/drm/amd/amdgpu/iceland_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/iceland_ih.c
@@ -208,34 +208,6 @@ static u32 iceland_ih_get_wptr(struct amdgpu_device *adev)
}
/**
- * iceland_ih_prescreen_iv - prescreen an interrupt vector
- *
- * @adev: amdgpu_device pointer
- *
- * Returns true if the interrupt vector should be further processed.
- */
-static bool iceland_ih_prescreen_iv(struct amdgpu_device *adev)
-{
- u32 ring_index = adev->irq.ih.rptr >> 2;
- u16 pasid;
-
- switch (le32_to_cpu(adev->irq.ih.ring[ring_index]) & 0xff) {
- case 146:
- case 147:
- pasid = le32_to_cpu(adev->irq.ih.ring[ring_index + 2]) >> 16;
- if (!pasid || amdgpu_vm_pasid_fault_credit(adev, pasid))
- return true;
- break;
- default:
- /* Not a VM fault */
- return true;
- }
-
- adev->irq.ih.rptr += 16;
- return false;
-}
-
-/**
* iceland_ih_decode_iv - decode an interrupt vector
*
* @adev: amdgpu_device pointer
@@ -440,7 +412,6 @@ static const struct amd_ip_funcs iceland_ih_ip_funcs = {
static const struct amdgpu_ih_funcs iceland_ih_funcs = {
.get_wptr = iceland_ih_get_wptr,
- .prescreen_iv = iceland_ih_prescreen_iv,
.decode_iv = iceland_ih_decode_iv,
.set_rptr = iceland_ih_set_rptr
};
diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
index 8cbb4655896a..b11a1c17a7f2 100644
--- a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
+++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
@@ -174,7 +174,7 @@ static int xgpu_ai_send_access_requests(struct amdgpu_device *adev,
return r;
}
/* Retrieve checksum from mailbox2 */
- if (req == IDH_REQ_GPU_INIT_ACCESS) {
+ if (req == IDH_REQ_GPU_INIT_ACCESS || req == IDH_REQ_GPU_RESET_ACCESS) {
adev->virt.fw_reserve.checksum_key =
RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0,
mmBIF_BX_PF0_MAILBOX_MSGBUF_RCV_DW2));
diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c b/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c
index 6f9c54978cc1..accdedd63c98 100644
--- a/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c
+++ b/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c
@@ -32,6 +32,7 @@
#define smnCPM_CONTROL 0x11180460
#define smnPCIE_CNTL2 0x11180070
#define smnPCIE_CONFIG_CNTL 0x11180044
+#define smnPCIE_CI_CNTL 0x11180080
static u32 nbio_v6_1_get_rev_id(struct amdgpu_device *adev)
{
@@ -270,6 +271,12 @@ static void nbio_v6_1_init_registers(struct amdgpu_device *adev)
if (def != data)
WREG32_PCIE(smnPCIE_CONFIG_CNTL, data);
+
+ def = data = RREG32_PCIE(smnPCIE_CI_CNTL);
+ data = REG_SET_FIELD(data, PCIE_CI_CNTL, CI_SLV_ORDERING_DIS, 1);
+
+ if (def != data)
+ WREG32_PCIE(smnPCIE_CI_CNTL, data);
}
const struct amdgpu_nbio_funcs nbio_v6_1_funcs = {
diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c b/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c
index f8cee95d61cc..4cd31a276dcd 100644
--- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c
+++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c
@@ -31,6 +31,7 @@
#define smnCPM_CONTROL 0x11180460
#define smnPCIE_CNTL2 0x11180070
+#define smnPCIE_CI_CNTL 0x11180080
static u32 nbio_v7_4_get_rev_id(struct amdgpu_device *adev)
{
@@ -222,7 +223,13 @@ static void nbio_v7_4_detect_hw_virt(struct amdgpu_device *adev)
static void nbio_v7_4_init_registers(struct amdgpu_device *adev)
{
+ uint32_t def, data;
+
+ def = data = RREG32_PCIE(smnPCIE_CI_CNTL);
+ data = REG_SET_FIELD(data, PCIE_CI_CNTL, CI_SLV_ORDERING_DIS, 1);
+ if (def != data)
+ WREG32_PCIE(smnPCIE_CI_CNTL, data);
}
const struct amdgpu_nbio_funcs nbio_v7_4_funcs = {
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h b/drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h
index 882bd83a28c4..0de00fbe9233 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h
+++ b/drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h
@@ -43,6 +43,8 @@ enum psp_gfx_crtl_cmd_id
GFX_CTRL_CMD_ID_ENABLE_INT = 0x00050000, /* enable PSP-to-Gfx interrupt */
GFX_CTRL_CMD_ID_DISABLE_INT = 0x00060000, /* disable PSP-to-Gfx interrupt */
GFX_CTRL_CMD_ID_MODE1_RST = 0x00070000, /* trigger the Mode 1 reset */
+ GFX_CTRL_CMD_ID_CONSUME_CMD = 0x000A0000, /* send interrupt to psp for updating write pointer of vf */
+ GFX_CTRL_CMD_ID_DESTROY_GPCOM_RING = 0x000C0000, /* destroy GPCOM ring */
GFX_CTRL_CMD_ID_MAX = 0x000F0000, /* max command ID */
};
@@ -89,7 +91,8 @@ enum psp_gfx_cmd_id
GFX_CMD_ID_LOAD_IP_FW = 0x00000006, /* load HW IP FW */
GFX_CMD_ID_DESTROY_TMR = 0x00000007, /* destroy TMR region */
GFX_CMD_ID_SAVE_RESTORE = 0x00000008, /* save/restore HW IP FW */
-
+ GFX_CMD_ID_SETUP_VMR = 0x00000009, /* setup VMR region */
+ GFX_CMD_ID_DESTROY_VMR = 0x0000000A, /* destroy VMR region */
};
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c
index e5dd052d9e06..0c6e7f9b143f 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c
@@ -34,6 +34,7 @@
#include "nbio/nbio_7_4_offset.h"
MODULE_FIRMWARE("amdgpu/vega20_sos.bin");
+MODULE_FIRMWARE("amdgpu/vega20_asd.bin");
MODULE_FIRMWARE("amdgpu/vega20_ta.bin");
/* address block */
@@ -100,6 +101,7 @@ static int psp_v11_0_init_microcode(struct psp_context *psp)
char fw_name[30];
int err = 0;
const struct psp_firmware_header_v1_0 *sos_hdr;
+ const struct psp_firmware_header_v1_0 *asd_hdr;
const struct ta_firmware_header_v1_0 *ta_hdr;
DRM_DEBUG("\n");
@@ -132,14 +134,30 @@ static int psp_v11_0_init_microcode(struct psp_context *psp)
adev->psp.sos_start_addr = (uint8_t *)adev->psp.sys_start_addr +
le32_to_cpu(sos_hdr->sos_offset_bytes);
+ snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_asd.bin", chip_name);
+ err = request_firmware(&adev->psp.asd_fw, fw_name, adev->dev);
+ if (err)
+ goto out1;
+
+ err = amdgpu_ucode_validate(adev->psp.asd_fw);
+ if (err)
+ goto out1;
+
+ asd_hdr = (const struct psp_firmware_header_v1_0 *)adev->psp.asd_fw->data;
+ adev->psp.asd_fw_version = le32_to_cpu(asd_hdr->header.ucode_version);
+ adev->psp.asd_feature_version = le32_to_cpu(asd_hdr->ucode_feature_version);
+ adev->psp.asd_ucode_size = le32_to_cpu(asd_hdr->header.ucode_size_bytes);
+ adev->psp.asd_start_addr = (uint8_t *)asd_hdr +
+ le32_to_cpu(asd_hdr->header.ucode_array_offset_bytes);
+
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_ta.bin", chip_name);
err = request_firmware(&adev->psp.ta_fw, fw_name, adev->dev);
if (err)
- goto out;
+ goto out2;
err = amdgpu_ucode_validate(adev->psp.ta_fw);
if (err)
- goto out;
+ goto out2;
ta_hdr = (const struct ta_firmware_header_v1_0 *)adev->psp.ta_fw->data;
adev->psp.ta_xgmi_ucode_version = le32_to_cpu(ta_hdr->ta_xgmi_ucode_version);
@@ -148,14 +166,18 @@ static int psp_v11_0_init_microcode(struct psp_context *psp)
le32_to_cpu(ta_hdr->header.ucode_array_offset_bytes);
return 0;
+
+out2:
+ release_firmware(adev->psp.ta_fw);
+ adev->psp.ta_fw = NULL;
+out1:
+ release_firmware(adev->psp.asd_fw);
+ adev->psp.asd_fw = NULL;
out:
- if (err) {
- dev_err(adev->dev,
- "psp v11.0: Failed to load firmware \"%s\"\n",
- fw_name);
- release_firmware(adev->psp.sos_fw);
- adev->psp.sos_fw = NULL;
- }
+ dev_err(adev->dev,
+ "psp v11.0: Failed to load firmware \"%s\"\n", fw_name);
+ release_firmware(adev->psp.sos_fw);
+ adev->psp.sos_fw = NULL;
return err;
}
@@ -171,8 +193,11 @@ static int psp_v11_0_bootloader_load_sysdrv(struct psp_context *psp)
* are already been loaded.
*/
sol_reg = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_81);
- if (sol_reg)
+ if (sol_reg) {
+ psp->sos_fw_version = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_58);
+ printk("sos fw version = 0x%x.\n", psp->sos_fw_version);
return 0;
+ }
/* Wait for bootloader to signify that is ready having bit 31 of C2PMSG_35 set to 1 */
ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_35),
@@ -288,6 +313,13 @@ static int psp_v11_0_ring_init(struct psp_context *psp,
return 0;
}
+static bool psp_v11_0_support_vmr_ring(struct psp_context *psp)
+{
+ if (amdgpu_sriov_vf(psp->adev) && psp->sos_fw_version > 0x80045)
+ return true;
+ return false;
+}
+
static int psp_v11_0_ring_create(struct psp_context *psp,
enum psp_ring_type ring_type)
{
@@ -296,26 +328,47 @@ static int psp_v11_0_ring_create(struct psp_context *psp,
struct psp_ring *ring = &psp->km_ring;
struct amdgpu_device *adev = psp->adev;
- /* Write low address of the ring to C2PMSG_69 */
- psp_ring_reg = lower_32_bits(ring->ring_mem_mc_addr);
- WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_69, psp_ring_reg);
- /* Write high address of the ring to C2PMSG_70 */
- psp_ring_reg = upper_32_bits(ring->ring_mem_mc_addr);
- WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_70, psp_ring_reg);
- /* Write size of ring to C2PMSG_71 */
- psp_ring_reg = ring->ring_size;
- WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_71, psp_ring_reg);
- /* Write the ring initialization command to C2PMSG_64 */
- psp_ring_reg = ring_type;
- psp_ring_reg = psp_ring_reg << 16;
- WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_64, psp_ring_reg);
-
- /* there might be handshake issue with hardware which needs delay */
- mdelay(20);
-
- /* Wait for response flag (bit 31) in C2PMSG_64 */
- ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64),
- 0x80000000, 0x8000FFFF, false);
+ if (psp_v11_0_support_vmr_ring(psp)) {
+ /* Write low address of the ring to C2PMSG_102 */
+ psp_ring_reg = lower_32_bits(ring->ring_mem_mc_addr);
+ WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_102, psp_ring_reg);
+ /* Write high address of the ring to C2PMSG_103 */
+ psp_ring_reg = upper_32_bits(ring->ring_mem_mc_addr);
+ WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_103, psp_ring_reg);
+
+ /* Write the ring initialization command to C2PMSG_101 */
+ WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_101,
+ GFX_CTRL_CMD_ID_INIT_GPCOM_RING);
+
+ /* there might be handshake issue with hardware which needs delay */
+ mdelay(20);
+
+ /* Wait for response flag (bit 31) in C2PMSG_101 */
+ ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_101),
+ 0x80000000, 0x8000FFFF, false);
+
+ } else {
+ /* Write low address of the ring to C2PMSG_69 */
+ psp_ring_reg = lower_32_bits(ring->ring_mem_mc_addr);
+ WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_69, psp_ring_reg);
+ /* Write high address of the ring to C2PMSG_70 */
+ psp_ring_reg = upper_32_bits(ring->ring_mem_mc_addr);
+ WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_70, psp_ring_reg);
+ /* Write size of ring to C2PMSG_71 */
+ psp_ring_reg = ring->ring_size;
+ WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_71, psp_ring_reg);
+ /* Write the ring initialization command to C2PMSG_64 */
+ psp_ring_reg = ring_type;
+ psp_ring_reg = psp_ring_reg << 16;
+ WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_64, psp_ring_reg);
+
+ /* there might be handshake issue with hardware which needs delay */
+ mdelay(20);
+
+ /* Wait for response flag (bit 31) in C2PMSG_64 */
+ ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64),
+ 0x80000000, 0x8000FFFF, false);
+ }
return ret;
}
@@ -326,15 +379,24 @@ static int psp_v11_0_ring_stop(struct psp_context *psp,
int ret = 0;
struct amdgpu_device *adev = psp->adev;
- /* Write the ring destroy command to C2PMSG_64 */
- WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_64, GFX_CTRL_CMD_ID_DESTROY_RINGS);
+ /* Write the ring destroy command*/
+ if (psp_v11_0_support_vmr_ring(psp))
+ WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_101,
+ GFX_CTRL_CMD_ID_DESTROY_GPCOM_RING);
+ else
+ WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_64,
+ GFX_CTRL_CMD_ID_DESTROY_RINGS);
/* there might be handshake issue with hardware which needs delay */
mdelay(20);
- /* Wait for response flag (bit 31) in C2PMSG_64 */
- ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64),
- 0x80000000, 0x80000000, false);
+ /* Wait for response flag (bit 31) */
+ if (psp_v11_0_support_vmr_ring(psp))
+ ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_101),
+ 0x80000000, 0x80000000, false);
+ else
+ ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64),
+ 0x80000000, 0x80000000, false);
return ret;
}
@@ -373,7 +435,10 @@ static int psp_v11_0_cmd_submit(struct psp_context *psp,
uint32_t rb_frame_size_dw = sizeof(struct psp_gfx_rb_frame) / 4;
/* KM (GPCOM) prepare write pointer */
- psp_write_ptr_reg = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_67);
+ if (psp_v11_0_support_vmr_ring(psp))
+ psp_write_ptr_reg = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_102);
+ else
+ psp_write_ptr_reg = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_67);
/* Update KM RB frame pointer to new frame */
/* write_frame ptr increments by size of rb_frame in bytes */
@@ -402,7 +467,11 @@ static int psp_v11_0_cmd_submit(struct psp_context *psp,
/* Update the write Pointer in DWORDs */
psp_write_ptr_reg = (psp_write_ptr_reg + rb_frame_size_dw) % ring_size_dw;
- WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_67, psp_write_ptr_reg);
+ if (psp_v11_0_support_vmr_ring(psp)) {
+ WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_102, psp_write_ptr_reg);
+ WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_101, GFX_CTRL_CMD_ID_CONSUME_CMD);
+ } else
+ WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_67, psp_write_ptr_reg);
return 0;
}
@@ -547,7 +616,7 @@ static int psp_v11_0_mode1_reset(struct psp_context *psp)
/*send the mode 1 reset command*/
WREG32(offset, GFX_CTRL_CMD_ID_MODE1_RST);
- mdelay(1000);
+ msleep(500);
offset = SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_33);
@@ -640,7 +709,7 @@ static int psp_v11_0_xgmi_set_topology_info(struct psp_context *psp,
return psp_xgmi_invoke(psp, TA_COMMAND_XGMI__SET_TOPOLOGY_INFO);
}
-static u64 psp_v11_0_xgmi_get_hive_id(struct psp_context *psp)
+static int psp_v11_0_xgmi_get_hive_id(struct psp_context *psp, uint64_t *hive_id)
{
struct ta_xgmi_shared_memory *xgmi_cmd;
int ret;
@@ -653,12 +722,14 @@ static u64 psp_v11_0_xgmi_get_hive_id(struct psp_context *psp)
/* Invoke xgmi ta to get hive id */
ret = psp_xgmi_invoke(psp, xgmi_cmd->cmd_id);
if (ret)
- return 0;
- else
- return xgmi_cmd->xgmi_out_message.get_hive_id.hive_id;
+ return ret;
+
+ *hive_id = xgmi_cmd->xgmi_out_message.get_hive_id.hive_id;
+
+ return 0;
}
-static u64 psp_v11_0_xgmi_get_node_id(struct psp_context *psp)
+static int psp_v11_0_xgmi_get_node_id(struct psp_context *psp, uint64_t *node_id)
{
struct ta_xgmi_shared_memory *xgmi_cmd;
int ret;
@@ -671,9 +742,11 @@ static u64 psp_v11_0_xgmi_get_node_id(struct psp_context *psp)
/* Invoke xgmi ta to get the node id */
ret = psp_xgmi_invoke(psp, xgmi_cmd->cmd_id);
if (ret)
- return 0;
- else
- return xgmi_cmd->xgmi_out_message.get_node_id.node_id;
+ return ret;
+
+ *node_id = xgmi_cmd->xgmi_out_message.get_node_id.node_id;
+
+ return 0;
}
static const struct psp_funcs psp_v11_0_funcs = {
@@ -692,6 +765,7 @@ static const struct psp_funcs psp_v11_0_funcs = {
.xgmi_set_topology_info = psp_v11_0_xgmi_set_topology_info,
.xgmi_get_hive_id = psp_v11_0_xgmi_get_hive_id,
.xgmi_get_node_id = psp_v11_0_xgmi_get_node_id,
+ .support_vmr_ring = psp_v11_0_support_vmr_ring,
};
void psp_v11_0_set_psp_funcs(struct psp_context *psp)
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c b/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c
index 7efb823dd3b1..79694ff16969 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c
+++ b/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c
@@ -240,8 +240,11 @@ static int psp_v3_1_bootloader_load_sos(struct psp_context *psp)
* are already been loaded.
*/
sol_reg = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_81);
- if (sol_reg)
+ if (sol_reg) {
+ psp->sos_fw_version = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_58);
+ printk("sos fw version = 0x%x.\n", psp->sos_fw_version);
return 0;
+ }
/* Wait for bootloader to signify that is ready having bit 31 of C2PMSG_35 set to 1 */
ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_35),
@@ -592,7 +595,7 @@ static int psp_v3_1_mode1_reset(struct psp_context *psp)
/*send the mode 1 reset command*/
WREG32(offset, GFX_CTRL_CMD_ID_MODE1_RST);
- mdelay(1000);
+ msleep(500);
offset = SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_33);
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
index 4b6d3e5c821f..6811a5d05b27 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
@@ -78,7 +78,6 @@ static const struct soc15_reg_golden golden_settings_sdma_4[] = {
SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC1_RB_WPTR_POLL_CNTL, 0x0000fff0, 0x00403000),
SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_PAGE, 0x000003ff, 0x000003c0),
SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_WATERMK, 0xfc000000, 0x00000000),
- SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_CHICKEN_BITS, 0xfe931f07, 0x02831f07),
SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_CLK_CTRL, 0xffffffff, 0x3f000100),
SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GFX_IB_CNTL, 0x800f0100, 0x00000100),
SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GFX_RB_WPTR_POLL_CNTL, 0x0000fff0, 0x00403000),
@@ -96,6 +95,7 @@ static const struct soc15_reg_golden golden_settings_sdma_4[] = {
static const struct soc15_reg_golden golden_settings_sdma_vg10[] = {
SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG, 0x0018773f, 0x00104002),
SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG_READ, 0x0018773f, 0x00104002),
+ SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_CHICKEN_BITS, 0xfe931f07, 0x02831d07),
SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GB_ADDR_CONFIG, 0x0018773f, 0x00104002),
SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GB_ADDR_CONFIG_READ, 0x0018773f, 0x00104002)
};
@@ -103,6 +103,7 @@ static const struct soc15_reg_golden golden_settings_sdma_vg10[] = {
static const struct soc15_reg_golden golden_settings_sdma_vg12[] = {
SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG, 0x0018773f, 0x00104001),
SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG_READ, 0x0018773f, 0x00104001),
+ SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_CHICKEN_BITS, 0xfe931f07, 0x02831d07),
SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GB_ADDR_CONFIG, 0x0018773f, 0x00104001),
SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GB_ADDR_CONFIG_READ, 0x0018773f, 0x00104001)
};
@@ -1458,8 +1459,7 @@ static bool sdma_v4_0_fw_support_paging_queue(struct amdgpu_device *adev)
/*return fw_version >= 31;*/
return false;
case CHIP_VEGA20:
- /*return fw_version >= 115;*/
- return false;
+ return fw_version >= 123;
default:
return false;
}
@@ -1706,13 +1706,15 @@ static int sdma_v4_0_process_trap_irq(struct amdgpu_device *adev,
amdgpu_fence_process(&adev->sdma.instance[instance].ring);
break;
case 1:
- /* XXX compute */
+ if (adev->asic_type == CHIP_VEGA20)
+ amdgpu_fence_process(&adev->sdma.instance[instance].page);
break;
case 2:
/* XXX compute */
break;
case 3:
- amdgpu_fence_process(&adev->sdma.instance[instance].page);
+ if (adev->asic_type != CHIP_VEGA20)
+ amdgpu_fence_process(&adev->sdma.instance[instance].page);
break;
}
return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/si_ih.c b/drivers/gpu/drm/amd/amdgpu/si_ih.c
index b3d7d9f83202..2938fb9f17cc 100644
--- a/drivers/gpu/drm/amd/amdgpu/si_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/si_ih.c
@@ -118,19 +118,6 @@ static u32 si_ih_get_wptr(struct amdgpu_device *adev)
return (wptr & adev->irq.ih.ptr_mask);
}
-/**
- * si_ih_prescreen_iv - prescreen an interrupt vector
- *
- * @adev: amdgpu_device pointer
- *
- * Returns true if the interrupt vector should be further processed.
- */
-static bool si_ih_prescreen_iv(struct amdgpu_device *adev)
-{
- /* Process all interrupts */
- return true;
-}
-
static void si_ih_decode_iv(struct amdgpu_device *adev,
struct amdgpu_iv_entry *entry)
{
@@ -301,7 +288,6 @@ static const struct amd_ip_funcs si_ih_ip_funcs = {
static const struct amdgpu_ih_funcs si_ih_funcs = {
.get_wptr = si_ih_get_wptr,
- .prescreen_iv = si_ih_prescreen_iv,
.decode_iv = si_ih_decode_iv,
.set_rptr = si_ih_set_rptr
};
diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c
index 83624e150ca7..8849b74078d6 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc15.c
+++ b/drivers/gpu/drm/amd/amdgpu/soc15.c
@@ -507,6 +507,9 @@ int soc15_set_ip_blocks(struct amdgpu_device *adev)
return -EINVAL;
}
+ if (adev->asic_type == CHIP_VEGA20)
+ adev->gmc.xgmi.supported = true;
+
if (adev->flags & AMD_IS_APU)
adev->nbio_funcs = &nbio_v7_0_funcs;
else if (adev->asic_type == CHIP_VEGA20)
diff --git a/drivers/gpu/drm/amd/amdgpu/soc15_common.h b/drivers/gpu/drm/amd/amdgpu/soc15_common.h
index 958b10a57073..49c262540940 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc15_common.h
+++ b/drivers/gpu/drm/amd/amdgpu/soc15_common.h
@@ -49,14 +49,19 @@
#define SOC15_WAIT_ON_RREG(ip, inst, reg, expected_value, mask, ret) \
do { \
+ uint32_t old_ = 0; \
uint32_t tmp_ = RREG32(adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg); \
uint32_t loop = adev->usec_timeout; \
while ((tmp_ & (mask)) != (expected_value)) { \
- udelay(2); \
+ if (old_ != tmp_) { \
+ loop = adev->usec_timeout; \
+ old_ = tmp_; \
+ } else \
+ udelay(1); \
tmp_ = RREG32(adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg); \
loop--; \
if (!loop) { \
- DRM_ERROR("Register(%d) [%s] failed to reach value 0x%08x != 0x%08x\n", \
+ DRM_WARN("Register(%d) [%s] failed to reach value 0x%08x != 0x%08x\n", \
inst, #reg, (unsigned)expected_value, (unsigned)(tmp_ & (mask))); \
ret = -ETIMEDOUT; \
break; \
diff --git a/drivers/gpu/drm/amd/amdgpu/tonga_ih.c b/drivers/gpu/drm/amd/amdgpu/tonga_ih.c
index dcdbb4d72472..15da06ddeb75 100644
--- a/drivers/gpu/drm/amd/amdgpu/tonga_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/tonga_ih.c
@@ -219,34 +219,6 @@ static u32 tonga_ih_get_wptr(struct amdgpu_device *adev)
}
/**
- * tonga_ih_prescreen_iv - prescreen an interrupt vector
- *
- * @adev: amdgpu_device pointer
- *
- * Returns true if the interrupt vector should be further processed.
- */
-static bool tonga_ih_prescreen_iv(struct amdgpu_device *adev)
-{
- u32 ring_index = adev->irq.ih.rptr >> 2;
- u16 pasid;
-
- switch (le32_to_cpu(adev->irq.ih.ring[ring_index]) & 0xff) {
- case 146:
- case 147:
- pasid = le32_to_cpu(adev->irq.ih.ring[ring_index + 2]) >> 16;
- if (!pasid || amdgpu_vm_pasid_fault_credit(adev, pasid))
- return true;
- break;
- default:
- /* Not a VM fault */
- return true;
- }
-
- adev->irq.ih.rptr += 16;
- return false;
-}
-
-/**
* tonga_ih_decode_iv - decode an interrupt vector
*
* @adev: amdgpu_device pointer
@@ -506,7 +478,6 @@ static const struct amd_ip_funcs tonga_ih_ip_funcs = {
static const struct amdgpu_ih_funcs tonga_ih_funcs = {
.get_wptr = tonga_ih_get_wptr,
- .prescreen_iv = tonga_ih_prescreen_iv,
.decode_iv = tonga_ih_decode_iv,
.set_rptr = tonga_ih_set_rptr
};
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
index 089645e78f98..aef924026a28 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
@@ -435,7 +435,7 @@ static int uvd_v7_0_sw_init(void *handle)
continue;
if (!amdgpu_sriov_vf(adev)) {
ring = &adev->uvd.inst[j].ring;
- sprintf(ring->name, "uvd<%d>", j);
+ sprintf(ring->name, "uvd_%d", ring->me);
r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.inst[j].irq, 0);
if (r)
return r;
@@ -443,7 +443,7 @@ static int uvd_v7_0_sw_init(void *handle)
for (i = 0; i < adev->uvd.num_enc_rings; ++i) {
ring = &adev->uvd.inst[j].ring_enc[i];
- sprintf(ring->name, "uvd_enc%d<%d>", i, j);
+ sprintf(ring->name, "uvd_enc_%d.%d", ring->me, i);
if (amdgpu_sriov_vf(adev)) {
ring->use_doorbell = true;
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
index c1a03505f956..89bb2fef90eb 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
@@ -48,6 +48,7 @@ static void vcn_v1_0_set_enc_ring_funcs(struct amdgpu_device *adev);
static void vcn_v1_0_set_jpeg_ring_funcs(struct amdgpu_device *adev);
static void vcn_v1_0_set_irq_funcs(struct amdgpu_device *adev);
static void vcn_v1_0_jpeg_ring_set_patch_ring(struct amdgpu_ring *ring, uint32_t ptr);
+static int vcn_v1_0_set_powergating_state(void *handle, enum amd_powergating_state state);
/**
* vcn_v1_0_early_init - set function pointers
@@ -213,8 +214,9 @@ static int vcn_v1_0_hw_fini(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct amdgpu_ring *ring = &adev->vcn.ring_dec;
- if (RREG32_SOC15(VCN, 0, mmUVD_STATUS))
- vcn_v1_0_stop(adev);
+ if ((adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) ||
+ RREG32_SOC15(VCN, 0, mmUVD_STATUS))
+ vcn_v1_0_set_powergating_state(adev, AMD_PG_STATE_GATE);
ring->sched.ready = false;
@@ -1086,7 +1088,8 @@ static int vcn_v1_0_start_dpg_mode(struct amdgpu_device *adev)
WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_RBC_RB_CNTL), 0,
~UVD_RBC_RB_CNTL__RB_NO_FETCH_MASK);
- /* initialize wptr */
+ /* initialize JPEG wptr */
+ ring = &adev->vcn.ring_jpeg;
ring->wptr = RREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_WPTR);
/* copy patch commands to the jpeg ring */
@@ -1158,21 +1161,29 @@ static int vcn_v1_0_stop_spg_mode(struct amdgpu_device *adev)
static int vcn_v1_0_stop_dpg_mode(struct amdgpu_device *adev)
{
int ret_code = 0;
+ uint32_t tmp;
/* Wait for power status to be UVD_POWER_STATUS__UVD_POWER_STATUS_TILES_OFF */
SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_POWER_STATUS,
UVD_POWER_STATUS__UVD_POWER_STATUS_TILES_OFF,
UVD_POWER_STATUS__UVD_POWER_STATUS_MASK, ret_code);
- if (!ret_code) {
- int tmp = RREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR) & 0x7FFFFFFF;
- /* wait for read ptr to be equal to write ptr */
- SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_RBC_RB_RPTR, tmp, 0xFFFFFFFF, ret_code);
+ /* wait for read ptr to be equal to write ptr */
+ tmp = RREG32_SOC15(UVD, 0, mmUVD_RB_WPTR);
+ SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_RB_RPTR, tmp, 0xFFFFFFFF, ret_code);
- SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_POWER_STATUS,
- UVD_POWER_STATUS__UVD_POWER_STATUS_TILES_OFF,
- UVD_POWER_STATUS__UVD_POWER_STATUS_MASK, ret_code);
- }
+ tmp = RREG32_SOC15(UVD, 0, mmUVD_RB_WPTR2);
+ SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_RB_RPTR2, tmp, 0xFFFFFFFF, ret_code);
+
+ tmp = RREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_WPTR);
+ SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_JRBC_RB_RPTR, tmp, 0xFFFFFFFF, ret_code);
+
+ tmp = RREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR) & 0x7FFFFFFF;
+ SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_RBC_RB_RPTR, tmp, 0xFFFFFFFF, ret_code);
+
+ SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_POWER_STATUS,
+ UVD_POWER_STATUS__UVD_POWER_STATUS_TILES_OFF,
+ UVD_POWER_STATUS__UVD_POWER_STATUS_MASK, ret_code);
/* disable dynamic power gating mode */
WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_POWER_STATUS), 0,
diff --git a/drivers/gpu/drm/amd/amdgpu/vega10_ih.c b/drivers/gpu/drm/amd/amdgpu/vega10_ih.c
index d84b687240d1..2c250b01a903 100644
--- a/drivers/gpu/drm/amd/amdgpu/vega10_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/vega10_ih.c
@@ -220,90 +220,6 @@ static u32 vega10_ih_get_wptr(struct amdgpu_device *adev)
}
/**
- * vega10_ih_prescreen_iv - prescreen an interrupt vector
- *
- * @adev: amdgpu_device pointer
- *
- * Returns true if the interrupt vector should be further processed.
- */
-static bool vega10_ih_prescreen_iv(struct amdgpu_device *adev)
-{
- u32 ring_index = adev->irq.ih.rptr >> 2;
- u32 dw0, dw3, dw4, dw5;
- u16 pasid;
- u64 addr, key;
- struct amdgpu_vm *vm;
- int r;
-
- dw0 = le32_to_cpu(adev->irq.ih.ring[ring_index + 0]);
- dw3 = le32_to_cpu(adev->irq.ih.ring[ring_index + 3]);
- dw4 = le32_to_cpu(adev->irq.ih.ring[ring_index + 4]);
- dw5 = le32_to_cpu(adev->irq.ih.ring[ring_index + 5]);
-
- /* Filter retry page faults, let only the first one pass. If
- * there are too many outstanding faults, ignore them until
- * some faults get cleared.
- */
- switch (dw0 & 0xff) {
- case SOC15_IH_CLIENTID_VMC:
- case SOC15_IH_CLIENTID_UTCL2:
- break;
- default:
- /* Not a VM fault */
- return true;
- }
-
- pasid = dw3 & 0xffff;
- /* No PASID, can't identify faulting process */
- if (!pasid)
- return true;
-
- /* Not a retry fault, check fault credit */
- if (!(dw5 & 0x80)) {
- if (!amdgpu_vm_pasid_fault_credit(adev, pasid))
- goto ignore_iv;
- return true;
- }
-
- /* Track retry faults in per-VM fault FIFO. */
- spin_lock(&adev->vm_manager.pasid_lock);
- vm = idr_find(&adev->vm_manager.pasid_idr, pasid);
- addr = ((u64)(dw5 & 0xf) << 44) | ((u64)dw4 << 12);
- key = AMDGPU_VM_FAULT(pasid, addr);
- if (!vm) {
- /* VM not found, process it normally */
- spin_unlock(&adev->vm_manager.pasid_lock);
- return true;
- } else {
- r = amdgpu_vm_add_fault(vm->fault_hash, key);
-
- /* Hash table is full or the fault is already being processed,
- * ignore further page faults
- */
- if (r != 0) {
- spin_unlock(&adev->vm_manager.pasid_lock);
- goto ignore_iv;
- }
- }
- /* No locking required with single writer and single reader */
- r = kfifo_put(&vm->faults, key);
- if (!r) {
- /* FIFO is full. Ignore it until there is space */
- amdgpu_vm_clear_fault(vm->fault_hash, key);
- spin_unlock(&adev->vm_manager.pasid_lock);
- goto ignore_iv;
- }
-
- spin_unlock(&adev->vm_manager.pasid_lock);
- /* It's the first fault for this address, process it normally */
- return true;
-
-ignore_iv:
- adev->irq.ih.rptr += 32;
- return false;
-}
-
-/**
* vega10_ih_decode_iv - decode an interrupt vector
*
* @adev: amdgpu_device pointer
@@ -487,7 +403,6 @@ const struct amd_ip_funcs vega10_ih_ip_funcs = {
static const struct amdgpu_ih_funcs vega10_ih_funcs = {
.get_wptr = vega10_ih_get_wptr,
- .prescreen_iv = vega10_ih_prescreen_iv,
.decode_iv = vega10_ih_decode_iv,
.set_rptr = vega10_ih_set_rptr
};
diff --git a/drivers/gpu/drm/amd/amdgpu/vi.c b/drivers/gpu/drm/amd/amdgpu/vi.c
index ff2906c215fa..77e367459101 100644
--- a/drivers/gpu/drm/amd/amdgpu/vi.c
+++ b/drivers/gpu/drm/amd/amdgpu/vi.c
@@ -87,9 +87,9 @@ static u32 vi_pcie_rreg(struct amdgpu_device *adev, u32 reg)
u32 r;
spin_lock_irqsave(&adev->pcie_idx_lock, flags);
- WREG32(mmPCIE_INDEX, reg);
- (void)RREG32(mmPCIE_INDEX);
- r = RREG32(mmPCIE_DATA);
+ WREG32_NO_KIQ(mmPCIE_INDEX, reg);
+ (void)RREG32_NO_KIQ(mmPCIE_INDEX);
+ r = RREG32_NO_KIQ(mmPCIE_DATA);
spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
return r;
}
@@ -99,10 +99,10 @@ static void vi_pcie_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
unsigned long flags;
spin_lock_irqsave(&adev->pcie_idx_lock, flags);
- WREG32(mmPCIE_INDEX, reg);
- (void)RREG32(mmPCIE_INDEX);
- WREG32(mmPCIE_DATA, v);
- (void)RREG32(mmPCIE_DATA);
+ WREG32_NO_KIQ(mmPCIE_INDEX, reg);
+ (void)RREG32_NO_KIQ(mmPCIE_INDEX);
+ WREG32_NO_KIQ(mmPCIE_DATA, v);
+ (void)RREG32_NO_KIQ(mmPCIE_DATA);
spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
}
@@ -123,8 +123,8 @@ static void vi_smc_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
unsigned long flags;
spin_lock_irqsave(&adev->smc_idx_lock, flags);
- WREG32(mmSMC_IND_INDEX_11, (reg));
- WREG32(mmSMC_IND_DATA_11, (v));
+ WREG32_NO_KIQ(mmSMC_IND_INDEX_11, (reg));
+ WREG32_NO_KIQ(mmSMC_IND_DATA_11, (v));
spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
index 5f4062b41add..083bd8114db1 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
@@ -33,6 +33,7 @@
#include <linux/time.h>
#include <linux/mm.h>
#include <linux/mman.h>
+#include <linux/dma-buf.h>
#include <asm/processor.h>
#include "kfd_priv.h"
#include "kfd_device_queue_manager.h"
@@ -157,8 +158,7 @@ static int set_queue_properties_from_user(struct queue_properties *q_properties,
}
if ((args->ring_base_address) &&
- (!access_ok(VERIFY_WRITE,
- (const void __user *) args->ring_base_address,
+ (!access_ok((const void __user *) args->ring_base_address,
sizeof(uint64_t)))) {
pr_err("Can't access ring base address\n");
return -EFAULT;
@@ -169,31 +169,27 @@ static int set_queue_properties_from_user(struct queue_properties *q_properties,
return -EINVAL;
}
- if (!access_ok(VERIFY_WRITE,
- (const void __user *) args->read_pointer_address,
+ if (!access_ok((const void __user *) args->read_pointer_address,
sizeof(uint32_t))) {
pr_err("Can't access read pointer\n");
return -EFAULT;
}
- if (!access_ok(VERIFY_WRITE,
- (const void __user *) args->write_pointer_address,
+ if (!access_ok((const void __user *) args->write_pointer_address,
sizeof(uint32_t))) {
pr_err("Can't access write pointer\n");
return -EFAULT;
}
if (args->eop_buffer_address &&
- !access_ok(VERIFY_WRITE,
- (const void __user *) args->eop_buffer_address,
+ !access_ok((const void __user *) args->eop_buffer_address,
sizeof(uint32_t))) {
pr_debug("Can't access eop buffer");
return -EFAULT;
}
if (args->ctx_save_restore_address &&
- !access_ok(VERIFY_WRITE,
- (const void __user *) args->ctx_save_restore_address,
+ !access_ok((const void __user *) args->ctx_save_restore_address,
sizeof(uint32_t))) {
pr_debug("Can't access ctx save restore buffer");
return -EFAULT;
@@ -364,8 +360,7 @@ static int kfd_ioctl_update_queue(struct file *filp, struct kfd_process *p,
}
if ((args->ring_base_address) &&
- (!access_ok(VERIFY_WRITE,
- (const void __user *) args->ring_base_address,
+ (!access_ok((const void __user *) args->ring_base_address,
sizeof(uint64_t)))) {
pr_err("Can't access ring base address\n");
return -EFAULT;
@@ -1273,6 +1268,12 @@ static int kfd_ioctl_alloc_memory_of_gpu(struct file *filep,
return -EINVAL;
}
+ if (flags & KFD_IOC_ALLOC_MEM_FLAGS_DOORBELL) {
+ if (args->size != kfd_doorbell_process_slice(dev))
+ return -EINVAL;
+ offset = kfd_get_process_doorbells(dev, p);
+ }
+
mutex_lock(&p->mutex);
pdd = kfd_bind_process_to_device(dev, p);
@@ -1550,6 +1551,115 @@ copy_from_user_failed:
return err;
}
+static int kfd_ioctl_get_dmabuf_info(struct file *filep,
+ struct kfd_process *p, void *data)
+{
+ struct kfd_ioctl_get_dmabuf_info_args *args = data;
+ struct kfd_dev *dev = NULL;
+ struct kgd_dev *dma_buf_kgd;
+ void *metadata_buffer = NULL;
+ uint32_t flags;
+ unsigned int i;
+ int r;
+
+ /* Find a KFD GPU device that supports the get_dmabuf_info query */
+ for (i = 0; kfd_topology_enum_kfd_devices(i, &dev) == 0; i++)
+ if (dev)
+ break;
+ if (!dev)
+ return -EINVAL;
+
+ if (args->metadata_ptr) {
+ metadata_buffer = kzalloc(args->metadata_size, GFP_KERNEL);
+ if (!metadata_buffer)
+ return -ENOMEM;
+ }
+
+ /* Get dmabuf info from KGD */
+ r = amdgpu_amdkfd_get_dmabuf_info(dev->kgd, args->dmabuf_fd,
+ &dma_buf_kgd, &args->size,
+ metadata_buffer, args->metadata_size,
+ &args->metadata_size, &flags);
+ if (r)
+ goto exit;
+
+ /* Reverse-lookup gpu_id from kgd pointer */
+ dev = kfd_device_by_kgd(dma_buf_kgd);
+ if (!dev) {
+ r = -EINVAL;
+ goto exit;
+ }
+ args->gpu_id = dev->id;
+ args->flags = flags;
+
+ /* Copy metadata buffer to user mode */
+ if (metadata_buffer) {
+ r = copy_to_user((void __user *)args->metadata_ptr,
+ metadata_buffer, args->metadata_size);
+ if (r != 0)
+ r = -EFAULT;
+ }
+
+exit:
+ kfree(metadata_buffer);
+
+ return r;
+}
+
+static int kfd_ioctl_import_dmabuf(struct file *filep,
+ struct kfd_process *p, void *data)
+{
+ struct kfd_ioctl_import_dmabuf_args *args = data;
+ struct kfd_process_device *pdd;
+ struct dma_buf *dmabuf;
+ struct kfd_dev *dev;
+ int idr_handle;
+ uint64_t size;
+ void *mem;
+ int r;
+
+ dev = kfd_device_by_id(args->gpu_id);
+ if (!dev)
+ return -EINVAL;
+
+ dmabuf = dma_buf_get(args->dmabuf_fd);
+ if (IS_ERR(dmabuf))
+ return PTR_ERR(dmabuf);
+
+ mutex_lock(&p->mutex);
+
+ pdd = kfd_bind_process_to_device(dev, p);
+ if (IS_ERR(pdd)) {
+ r = PTR_ERR(pdd);
+ goto err_unlock;
+ }
+
+ r = amdgpu_amdkfd_gpuvm_import_dmabuf(dev->kgd, dmabuf,
+ args->va_addr, pdd->vm,
+ (struct kgd_mem **)&mem, &size,
+ NULL);
+ if (r)
+ goto err_unlock;
+
+ idr_handle = kfd_process_device_create_obj_handle(pdd, mem);
+ if (idr_handle < 0) {
+ r = -EFAULT;
+ goto err_free;
+ }
+
+ mutex_unlock(&p->mutex);
+
+ args->handle = MAKE_HANDLE(args->gpu_id, idr_handle);
+
+ return 0;
+
+err_free:
+ amdgpu_amdkfd_gpuvm_free_memory_of_gpu(dev->kgd, (struct kgd_mem *)mem);
+err_unlock:
+ mutex_unlock(&p->mutex);
+ return r;
+}
+
#define AMDKFD_IOCTL_DEF(ioctl, _func, _flags) \
[_IOC_NR(ioctl)] = {.cmd = ioctl, .func = _func, .flags = _flags, \
.cmd_drv = 0, .name = #ioctl}
@@ -1635,7 +1745,13 @@ static const struct amdkfd_ioctl_desc amdkfd_ioctls[] = {
kfd_ioctl_set_cu_mask, 0),
AMDKFD_IOCTL_DEF(AMDKFD_IOC_GET_QUEUE_WAVE_STATE,
- kfd_ioctl_get_queue_wave_state, 0)
+ kfd_ioctl_get_queue_wave_state, 0),
+
+ AMDKFD_IOCTL_DEF(AMDKFD_IOC_GET_DMABUF_INFO,
+ kfd_ioctl_get_dmabuf_info, 0),
+
+ AMDKFD_IOCTL_DEF(AMDKFD_IOC_IMPORT_DMABUF,
+ kfd_ioctl_import_dmabuf, 0),
};
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c
index c02adbbeef2a..b7bc7d7d048f 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c
@@ -853,7 +853,7 @@ static int kfd_fill_mem_info_for_cpu(int numa_node_id, int *avail_size,
*/
pgdat = NODE_DATA(numa_node_id);
for (zone_type = 0; zone_type < MAX_NR_ZONES; zone_type++)
- mem_in_bytes += pgdat->node_zones[zone_type].managed_pages;
+ mem_in_bytes += zone_managed_pages(&pgdat->node_zones[zone_type]);
mem_in_bytes <<= PAGE_SHIFT;
sub_type_hdr->length_low = lower_32_bits(mem_in_bytes);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
index 9ed14a11afa2..8be9677c0c07 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
@@ -378,7 +378,13 @@ static const struct kfd_deviceid supported_devices[] = {
{ 0x6864, &vega10_device_info }, /* Vega10 */
{ 0x6867, &vega10_device_info }, /* Vega10 */
{ 0x6868, &vega10_device_info }, /* Vega10 */
+ { 0x6869, &vega10_device_info }, /* Vega10 */
+ { 0x686A, &vega10_device_info }, /* Vega10 */
+ { 0x686B, &vega10_device_info }, /* Vega10 */
{ 0x686C, &vega10_vf_device_info }, /* Vega10 vf*/
+ { 0x686D, &vega10_device_info }, /* Vega10 */
+ { 0x686E, &vega10_device_info }, /* Vega10 */
+ { 0x686F, &vega10_device_info }, /* Vega10 */
{ 0x687F, &vega10_device_info }, /* Vega10 */
{ 0x69A0, &vega12_device_info }, /* Vega12 */
{ 0x69A1, &vega12_device_info }, /* Vega12 */
@@ -389,6 +395,7 @@ static const struct kfd_deviceid supported_devices[] = {
{ 0x66a1, &vega20_device_info }, /* Vega20 */
{ 0x66a2, &vega20_device_info }, /* Vega20 */
{ 0x66a3, &vega20_device_info }, /* Vega20 */
+ { 0x66a4, &vega20_device_info }, /* Vega20 */
{ 0x66a7, &vega20_device_info }, /* Vega20 */
{ 0x66af, &vega20_device_info } /* Vega20 */
};
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
index dec8e64f36bd..0689d4ccbbc0 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
@@ -793,6 +793,7 @@ struct kfd_topology_device *kfd_topology_device_by_proximity_domain(
struct kfd_topology_device *kfd_topology_device_by_id(uint32_t gpu_id);
struct kfd_dev *kfd_device_by_id(uint32_t gpu_id);
struct kfd_dev *kfd_device_by_pci_dev(const struct pci_dev *pdev);
+struct kfd_dev *kfd_device_by_kgd(const struct kgd_dev *kgd);
int kfd_topology_enum_kfd_devices(uint8_t idx, struct kfd_dev **kdev);
int kfd_numa_node_to_apic_id(int numa_node_id);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
index aa793fcbbdcc..5f5b2acedbac 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
@@ -101,7 +101,25 @@ struct kfd_dev *kfd_device_by_pci_dev(const struct pci_dev *pdev)
down_read(&topology_lock);
list_for_each_entry(top_dev, &topology_device_list, list)
- if (top_dev->gpu->pdev == pdev) {
+ if (top_dev->gpu && top_dev->gpu->pdev == pdev) {
+ device = top_dev->gpu;
+ break;
+ }
+
+ up_read(&topology_lock);
+
+ return device;
+}
+
+struct kfd_dev *kfd_device_by_kgd(const struct kgd_dev *kgd)
+{
+ struct kfd_topology_device *top_dev;
+ struct kfd_dev *device = NULL;
+
+ down_read(&topology_lock);
+
+ list_for_each_entry(top_dev, &topology_device_list, list)
+ if (top_dev->gpu && top_dev->gpu->kgd == kgd) {
device = top_dev->gpu;
break;
}
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 32e791d9b9a8..34f35e9a3c46 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -23,6 +23,9 @@
*
*/
+/* The caprices of the preprocessor require that this be declared right here */
+#define CREATE_TRACE_POINTS
+
#include "dm_services_types.h"
#include "dc.h"
#include "dc/inc/core_types.h"
@@ -54,6 +57,7 @@
#include <drm/drmP.h>
#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_uapi.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_dp_mst_helper.h>
#include <drm/drm_fb_helper.h>
@@ -72,6 +76,7 @@
#include "modules/inc/mod_freesync.h"
#include "modules/power/power_helpers.h"
+#include "modules/inc/mod_info_packet.h"
#define FIRMWARE_RAVEN_DMCU "amdgpu/raven_dmcu.bin"
MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU);
@@ -129,6 +134,8 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state);
static int amdgpu_dm_atomic_check(struct drm_device *dev,
struct drm_atomic_state *state);
+static void handle_cursor_update(struct drm_plane *plane,
+ struct drm_plane_state *old_plane_state);
@@ -324,12 +331,29 @@ static void dm_crtc_high_irq(void *interrupt_params)
struct common_irq_params *irq_params = interrupt_params;
struct amdgpu_device *adev = irq_params->adev;
struct amdgpu_crtc *acrtc;
+ struct dm_crtc_state *acrtc_state;
acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK);
if (acrtc) {
drm_crtc_handle_vblank(&acrtc->base);
amdgpu_dm_crtc_handle_crc_irq(&acrtc->base);
+
+ acrtc_state = to_dm_crtc_state(acrtc->base.state);
+
+ if (acrtc_state->stream &&
+ acrtc_state->vrr_params.supported &&
+ acrtc_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE) {
+ mod_freesync_handle_v_update(
+ adev->dm.freesync_module,
+ acrtc_state->stream,
+ &acrtc_state->vrr_params);
+
+ dc_stream_adjust_vmin_vmax(
+ adev->dm.dc,
+ acrtc_state->stream,
+ &acrtc_state->vrr_params.adjust);
+ }
}
}
@@ -398,6 +422,8 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
/* Zero all the fields */
memset(&init_data, 0, sizeof(init_data));
+ mutex_init(&adev->dm.dc_lock);
+
if(amdgpu_dm_irq_init(adev)) {
DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
goto error;
@@ -512,6 +538,9 @@ static void amdgpu_dm_fini(struct amdgpu_device *adev)
/* DC Destroy TODO: Replace destroy DAL */
if (adev->dm.dc)
dc_destroy(&adev->dm.dc);
+
+ mutex_destroy(&adev->dm.dc_lock);
+
return;
}
@@ -670,22 +699,36 @@ static void s3_handle_mst(struct drm_device *dev, bool suspend)
{
struct amdgpu_dm_connector *aconnector;
struct drm_connector *connector;
+ struct drm_dp_mst_topology_mgr *mgr;
+ int ret;
+ bool need_hotplug = false;
drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
- aconnector = to_amdgpu_dm_connector(connector);
- if (aconnector->dc_link->type == dc_connection_mst_branch &&
- !aconnector->mst_port) {
+ list_for_each_entry(connector, &dev->mode_config.connector_list,
+ head) {
+ aconnector = to_amdgpu_dm_connector(connector);
+ if (aconnector->dc_link->type != dc_connection_mst_branch ||
+ aconnector->mst_port)
+ continue;
- if (suspend)
- drm_dp_mst_topology_mgr_suspend(&aconnector->mst_mgr);
- else
- drm_dp_mst_topology_mgr_resume(&aconnector->mst_mgr);
- }
+ mgr = &aconnector->mst_mgr;
+
+ if (suspend) {
+ drm_dp_mst_topology_mgr_suspend(mgr);
+ } else {
+ ret = drm_dp_mst_topology_mgr_resume(mgr);
+ if (ret < 0) {
+ drm_dp_mst_topology_mgr_set_mst(mgr, false);
+ need_hotplug = true;
+ }
+ }
}
drm_modeset_unlock(&dev->mode_config.connection_mutex);
+
+ if (need_hotplug)
+ drm_kms_helper_hotplug_event(dev);
}
/**
@@ -869,7 +912,6 @@ static int dm_resume(void *handle)
struct drm_plane_state *new_plane_state;
struct dm_plane_state *dm_new_plane_state;
enum dc_connection_type new_connection_type = dc_connection_none;
- int ret;
int i;
/* power on hardware */
@@ -942,13 +984,13 @@ static int dm_resume(void *handle)
}
}
- ret = drm_atomic_helper_resume(ddev, dm->cached_state);
+ drm_atomic_helper_resume(ddev, dm->cached_state);
dm->cached_state = NULL;
amdgpu_dm_irq_resume_late(adev);
- return ret;
+ return 0;
}
/**
@@ -2698,9 +2740,9 @@ static void fill_audio_info(struct audio_info *audio_info,
cea_revision = drm_connector->display_info.cea_rev;
- strncpy(audio_info->display_name,
+ strscpy(audio_info->display_name,
edid_caps->display_name,
- AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS - 1);
+ AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS);
if (cea_revision >= 3) {
audio_info->mode_count = edid_caps->audio_mode_count;
@@ -2930,6 +2972,7 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
if (dm_state && dm_state->freesync_capable)
stream->ignore_msa_timing_param = true;
+
finish:
if (sink && sink->sink_signal == SIGNAL_TYPE_VIRTUAL && aconnector->base.force != DRM_FORCE_ON)
dc_sink_release(sink);
@@ -2996,11 +3039,12 @@ dm_crtc_duplicate_state(struct drm_crtc *crtc)
dc_stream_retain(state->stream);
}
- state->adjust = cur->adjust;
+ state->vrr_params = cur->vrr_params;
state->vrr_infopacket = cur->vrr_infopacket;
state->abm_level = cur->abm_level;
state->vrr_supported = cur->vrr_supported;
state->freesync_config = cur->freesync_config;
+ state->crc_enabled = cur->crc_enabled;
/* TODO Duplicate dc_stream after objects are stream object is flattened */
@@ -3094,10 +3138,8 @@ int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector,
rmx_type = RMX_FULL;
break;
case DRM_MODE_SCALE_NONE:
- rmx_type = RMX_OFF;
- break;
default:
- rmx_type = RMX_ASPECT;
+ rmx_type = RMX_OFF;
break;
}
@@ -3210,10 +3252,11 @@ void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector)
state = kzalloc(sizeof(*state), GFP_KERNEL);
if (state) {
- state->scaling = RMX_ASPECT;
+ state->scaling = RMX_OFF;
state->underscan_enable = false;
state->underscan_hborder = 0;
state->underscan_vborder = 0;
+ state->max_bpc = 8;
__drm_atomic_helper_connector_reset(connector, &state->base);
}
@@ -3235,6 +3278,11 @@ amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector)
new_state->freesync_capable = state->freesync_capable;
new_state->abm_level = state->abm_level;
+ new_state->scaling = state->scaling;
+ new_state->underscan_enable = state->underscan_enable;
+ new_state->underscan_hborder = state->underscan_hborder;
+ new_state->underscan_vborder = state->underscan_vborder;
+ new_state->max_bpc = state->max_bpc;
return &new_state->base;
}
@@ -3607,10 +3655,53 @@ static int dm_plane_atomic_check(struct drm_plane *plane,
return -EINVAL;
}
+static int dm_plane_atomic_async_check(struct drm_plane *plane,
+ struct drm_plane_state *new_plane_state)
+{
+ struct drm_plane_state *old_plane_state =
+ drm_atomic_get_old_plane_state(new_plane_state->state, plane);
+
+ /* Only support async updates on cursor planes. */
+ if (plane->type != DRM_PLANE_TYPE_CURSOR)
+ return -EINVAL;
+
+ /*
+ * DRM calls prepare_fb and cleanup_fb on new_plane_state for
+ * async commits so don't allow fb changes.
+ */
+ if (old_plane_state->fb != new_plane_state->fb)
+ return -EINVAL;
+
+ return 0;
+}
+
+static void dm_plane_atomic_async_update(struct drm_plane *plane,
+ struct drm_plane_state *new_state)
+{
+ struct drm_plane_state *old_state =
+ drm_atomic_get_old_plane_state(new_state->state, plane);
+
+ if (plane->state->fb != new_state->fb)
+ drm_atomic_set_fb_for_plane(plane->state, new_state->fb);
+
+ plane->state->src_x = new_state->src_x;
+ plane->state->src_y = new_state->src_y;
+ plane->state->src_w = new_state->src_w;
+ plane->state->src_h = new_state->src_h;
+ plane->state->crtc_x = new_state->crtc_x;
+ plane->state->crtc_y = new_state->crtc_y;
+ plane->state->crtc_w = new_state->crtc_w;
+ plane->state->crtc_h = new_state->crtc_h;
+
+ handle_cursor_update(plane, old_state);
+}
+
static const struct drm_plane_helper_funcs dm_plane_helper_funcs = {
.prepare_fb = dm_plane_helper_prepare_fb,
.cleanup_fb = dm_plane_helper_cleanup_fb,
.atomic_check = dm_plane_atomic_check,
+ .atomic_async_check = dm_plane_atomic_async_check,
+ .atomic_async_update = dm_plane_atomic_async_update
};
/*
@@ -3824,7 +3915,7 @@ amdgpu_dm_create_common_mode(struct drm_encoder *encoder,
mode->hdisplay = hdisplay;
mode->vdisplay = vdisplay;
mode->type &= ~DRM_MODE_TYPE_PREFERRED;
- strncpy(mode->name, name, DRM_DISPLAY_MODE_LEN);
+ strscpy(mode->name, name, DRM_DISPLAY_MODE_LEN);
return mode;
@@ -4299,6 +4390,7 @@ static int get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc,
static void handle_cursor_update(struct drm_plane *plane,
struct drm_plane_state *old_plane_state)
{
+ struct amdgpu_device *adev = plane->dev->dev_private;
struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(plane->state->fb);
struct drm_crtc *crtc = afb ? plane->state->crtc : old_plane_state->crtc;
struct dm_crtc_state *crtc_state = crtc ? to_dm_crtc_state(crtc->state) : NULL;
@@ -4323,9 +4415,12 @@ static void handle_cursor_update(struct drm_plane *plane,
if (!position.enable) {
/* turn off cursor */
- if (crtc_state && crtc_state->stream)
+ if (crtc_state && crtc_state->stream) {
+ mutex_lock(&adev->dm.dc_lock);
dc_stream_set_cursor_position(crtc_state->stream,
&position);
+ mutex_unlock(&adev->dm.dc_lock);
+ }
return;
}
@@ -4343,6 +4438,7 @@ static void handle_cursor_update(struct drm_plane *plane,
attributes.pitch = attributes.width;
if (crtc_state->stream) {
+ mutex_lock(&adev->dm.dc_lock);
if (!dc_stream_set_cursor_attributes(crtc_state->stream,
&attributes))
DRM_ERROR("DC failed to set cursor attributes\n");
@@ -4350,6 +4446,7 @@ static void handle_cursor_update(struct drm_plane *plane,
if (!dc_stream_set_cursor_position(crtc_state->stream,
&position))
DRM_ERROR("DC failed to set cursor position\n");
+ mutex_unlock(&adev->dm.dc_lock);
}
}
@@ -4388,9 +4485,11 @@ struct dc_stream_status *dc_state_get_stream_status(
static void update_freesync_state_on_stream(
struct amdgpu_display_manager *dm,
struct dm_crtc_state *new_crtc_state,
- struct dc_stream_state *new_stream)
+ struct dc_stream_state *new_stream,
+ struct dc_plane_state *surface,
+ u32 flip_timestamp_in_us)
{
- struct mod_vrr_params vrr = {0};
+ struct mod_vrr_params vrr_params = new_crtc_state->vrr_params;
struct dc_info_packet vrr_infopacket = {0};
struct mod_freesync_config config = new_crtc_state->freesync_config;
@@ -4417,43 +4516,52 @@ static void update_freesync_state_on_stream(
mod_freesync_build_vrr_params(dm->freesync_module,
new_stream,
- &config, &vrr);
+ &config, &vrr_params);
+
+ if (surface) {
+ mod_freesync_handle_preflip(
+ dm->freesync_module,
+ surface,
+ new_stream,
+ flip_timestamp_in_us,
+ &vrr_params);
+ }
mod_freesync_build_vrr_infopacket(
dm->freesync_module,
new_stream,
- &vrr,
- packet_type_vrr,
- transfer_func_unknown,
+ &vrr_params,
+ PACKET_TYPE_VRR,
+ TRANSFER_FUNC_UNKNOWN,
&vrr_infopacket);
new_crtc_state->freesync_timing_changed =
- (memcmp(&new_crtc_state->adjust,
- &vrr.adjust,
- sizeof(vrr.adjust)) != 0);
+ (memcmp(&new_crtc_state->vrr_params.adjust,
+ &vrr_params.adjust,
+ sizeof(vrr_params.adjust)) != 0);
new_crtc_state->freesync_vrr_info_changed =
(memcmp(&new_crtc_state->vrr_infopacket,
&vrr_infopacket,
sizeof(vrr_infopacket)) != 0);
- new_crtc_state->adjust = vrr.adjust;
+ new_crtc_state->vrr_params = vrr_params;
new_crtc_state->vrr_infopacket = vrr_infopacket;
- new_stream->adjust = new_crtc_state->adjust;
+ new_stream->adjust = new_crtc_state->vrr_params.adjust;
new_stream->vrr_infopacket = vrr_infopacket;
if (new_crtc_state->freesync_vrr_info_changed)
DRM_DEBUG_KMS("VRR packet update: crtc=%u enabled=%d state=%d",
new_crtc_state->base.crtc->base.id,
(int)new_crtc_state->base.vrr_enabled,
- (int)vrr.state);
+ (int)vrr_params.state);
if (new_crtc_state->freesync_timing_changed)
DRM_DEBUG_KMS("VRR timing update: crtc=%u min=%u max=%u\n",
new_crtc_state->base.crtc->base.id,
- vrr.adjust.v_total_min,
- vrr.adjust.v_total_max);
+ vrr_params.adjust.v_total_min,
+ vrr_params.adjust.v_total_max);
}
/*
@@ -4467,6 +4575,7 @@ static void amdgpu_dm_do_flip(struct drm_crtc *crtc,
struct dc_state *state)
{
unsigned long flags;
+ uint64_t timestamp_ns;
uint32_t target_vblank;
int r, vpos, hpos;
struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
@@ -4480,6 +4589,7 @@ static void amdgpu_dm_do_flip(struct drm_crtc *crtc,
struct dc_stream_update stream_update = {0};
struct dm_crtc_state *acrtc_state = to_dm_crtc_state(crtc->state);
struct dc_stream_status *stream_status;
+ struct dc_plane_state *surface;
/* Prepare wait for target vblank early - before the fence-waits */
@@ -4529,6 +4639,9 @@ static void amdgpu_dm_do_flip(struct drm_crtc *crtc,
addr.address.grph.addr.high_part = upper_32_bits(afb->address);
addr.flip_immediate = async_flip;
+ timestamp_ns = ktime_get_ns();
+ addr.flip_timestamp_in_us = div_u64(timestamp_ns, 1000);
+
if (acrtc->base.state->event)
prepare_flip_isr(acrtc);
@@ -4542,8 +4655,10 @@ static void amdgpu_dm_do_flip(struct drm_crtc *crtc,
return;
}
- surface_updates->surface = stream_status->plane_states[0];
- if (!surface_updates->surface) {
+ surface = stream_status->plane_states[0];
+ surface_updates->surface = surface;
+
+ if (!surface) {
DRM_ERROR("No surface for CRTC: id=%d\n",
acrtc->crtc_id);
return;
@@ -4554,7 +4669,9 @@ static void amdgpu_dm_do_flip(struct drm_crtc *crtc,
update_freesync_state_on_stream(
&adev->dm,
acrtc_state,
- acrtc_state->stream);
+ acrtc_state->stream,
+ surface,
+ addr.flip_timestamp_in_us);
if (acrtc_state->freesync_timing_changed)
stream_update.adjust =
@@ -4565,6 +4682,16 @@ static void amdgpu_dm_do_flip(struct drm_crtc *crtc,
&acrtc_state->stream->vrr_infopacket;
}
+ /* Update surface timing information. */
+ surface->time.time_elapsed_in_us[surface->time.index] =
+ addr.flip_timestamp_in_us - surface->time.prev_update_time_in_us;
+ surface->time.prev_update_time_in_us = addr.flip_timestamp_in_us;
+ surface->time.index++;
+ if (surface->time.index >= DC_PLANE_UPDATE_TIMES_MAX)
+ surface->time.index = 0;
+
+ mutex_lock(&adev->dm.dc_lock);
+
dc_commit_updates_for_stream(adev->dm.dc,
surface_updates,
1,
@@ -4572,6 +4699,7 @@ static void amdgpu_dm_do_flip(struct drm_crtc *crtc,
&stream_update,
&surface_updates->surface,
state);
+ mutex_unlock(&adev->dm.dc_lock);
DRM_DEBUG_DRIVER("%s Flipping to hi: 0x%x, low: 0x%x \n",
__func__,
@@ -4586,6 +4714,7 @@ static void amdgpu_dm_do_flip(struct drm_crtc *crtc,
* with a dc_plane_state and follow the atomic model a bit more closely here.
*/
static bool commit_planes_to_stream(
+ struct amdgpu_display_manager *dm,
struct dc *dc,
struct dc_plane_state **plane_states,
uint8_t new_plane_count,
@@ -4662,11 +4791,13 @@ static bool commit_planes_to_stream(
updates[i].scaling_info = &scaling_info[i];
}
+ mutex_lock(&dm->dc_lock);
dc_commit_updates_for_stream(
dc,
updates,
new_plane_count,
dc_stream, stream_update, plane_states, state);
+ mutex_unlock(&dm->dc_lock);
kfree(flip_addr);
kfree(plane_info);
@@ -4772,7 +4903,8 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
dc_stream_attach->abm_level = acrtc_state->abm_level;
- if (false == commit_planes_to_stream(dm->dc,
+ if (false == commit_planes_to_stream(dm,
+ dm->dc,
plane_states_constructed,
planes_count,
acrtc_state,
@@ -4942,7 +5074,9 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
if (dc_state) {
dm_enable_per_frame_crtc_master_sync(dc_state);
+ mutex_lock(&dm->dc_lock);
WARN_ON(!dc_commit_state(dm->dc, dc_state));
+ mutex_unlock(&dm->dc_lock);
}
for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
@@ -5004,6 +5138,7 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
/*TODO How it works with MPO ?*/
if (!commit_planes_to_stream(
+ dm,
dm->dc,
status->plane_states,
status->plane_count,
@@ -5248,6 +5383,7 @@ static void get_freesync_config_for_crtc(
config.max_refresh_in_uhz =
aconnector->max_vfreq * 1000000;
config.vsif_supported = true;
+ config.btr = true;
}
new_crtc_state->freesync_config = config;
@@ -5258,8 +5394,8 @@ static void reset_freesync_config_for_crtc(
{
new_crtc_state->vrr_supported = false;
- memset(&new_crtc_state->adjust, 0,
- sizeof(new_crtc_state->adjust));
+ memset(&new_crtc_state->vrr_params, 0,
+ sizeof(new_crtc_state->vrr_params));
memset(&new_crtc_state->vrr_infopacket, 0,
sizeof(new_crtc_state->vrr_infopacket));
}
@@ -5896,6 +6032,13 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
ret = -EINVAL;
goto fail;
}
+ } else if (state->legacy_cursor_update) {
+ /*
+ * This is a fast cursor update coming from the plane update
+ * helper, check if it can be done asynchronously for better
+ * performance.
+ */
+ state->async_update = !drm_atomic_helper_async_check(dev, state);
}
/* Must be success */
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
index 4326dc256491..fbd161ddc3f4 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
@@ -135,6 +135,14 @@ struct amdgpu_display_manager {
struct drm_modeset_lock atomic_obj_lock;
/**
+ * @dc_lock:
+ *
+ * Guards access to DC functions that can issue register write
+ * sequences.
+ */
+ struct mutex dc_lock;
+
+ /**
* @irq_handler_list_low_tab:
*
* Low priority IRQ handler table.
@@ -260,7 +268,7 @@ struct dm_crtc_state {
bool vrr_supported;
struct mod_freesync_config freesync_config;
- struct dc_crtc_timing_adjust adjust;
+ struct mod_vrr_params vrr_params;
struct dc_info_packet vrr_infopacket;
int abm_level;
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
index d02c32a1039c..1b0d209d8367 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
@@ -342,10 +342,9 @@ dm_dp_add_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
master->connector_id);
aconnector->mst_encoder = dm_dp_create_fake_mst_encoder(master);
+ drm_connector_attach_encoder(&aconnector->base,
+ &aconnector->mst_encoder->base);
- /*
- * TODO: understand why this one is needed
- */
drm_object_attach_property(
&connector->base,
dev->mode_config.path_property,
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_trace.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_trace.h
new file mode 100644
index 000000000000..d898981684d5
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_trace.h
@@ -0,0 +1,104 @@
+/*
+ * Copyright 2018 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM amdgpu_dm
+
+#if !defined(_AMDGPU_DM_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _AMDGPU_DM_TRACE_H_
+
+#include <linux/tracepoint.h>
+
+TRACE_EVENT(amdgpu_dc_rreg,
+ TP_PROTO(unsigned long *read_count, uint32_t reg, uint32_t value),
+ TP_ARGS(read_count, reg, value),
+ TP_STRUCT__entry(
+ __field(uint32_t, reg)
+ __field(uint32_t, value)
+ ),
+ TP_fast_assign(
+ __entry->reg = reg;
+ __entry->value = value;
+ *read_count = *read_count + 1;
+ ),
+ TP_printk("reg=0x%08lx, value=0x%08lx",
+ (unsigned long)__entry->reg,
+ (unsigned long)__entry->value)
+);
+
+TRACE_EVENT(amdgpu_dc_wreg,
+ TP_PROTO(unsigned long *write_count, uint32_t reg, uint32_t value),
+ TP_ARGS(write_count, reg, value),
+ TP_STRUCT__entry(
+ __field(uint32_t, reg)
+ __field(uint32_t, value)
+ ),
+ TP_fast_assign(
+ __entry->reg = reg;
+ __entry->value = value;
+ *write_count = *write_count + 1;
+ ),
+ TP_printk("reg=0x%08lx, value=0x%08lx",
+ (unsigned long)__entry->reg,
+ (unsigned long)__entry->value)
+);
+
+
+TRACE_EVENT(amdgpu_dc_performance,
+ TP_PROTO(unsigned long read_count, unsigned long write_count,
+ unsigned long *last_read, unsigned long *last_write,
+ const char *func, unsigned int line),
+ TP_ARGS(read_count, write_count, last_read, last_write, func, line),
+ TP_STRUCT__entry(
+ __field(uint32_t, reads)
+ __field(uint32_t, writes)
+ __field(uint32_t, read_delta)
+ __field(uint32_t, write_delta)
+ __string(func, func)
+ __field(uint32_t, line)
+ ),
+ TP_fast_assign(
+ __entry->reads = read_count;
+ __entry->writes = write_count;
+ __entry->read_delta = read_count - *last_read;
+ __entry->write_delta = write_count - *last_write;
+ __assign_str(func, func);
+ __entry->line = line;
+ *last_read = read_count;
+ *last_write = write_count;
+ ),
+ TP_printk("%s:%d reads=%08ld (%08ld total), writes=%08ld (%08ld total)",
+ __get_str(func), __entry->line,
+ (unsigned long)__entry->read_delta,
+ (unsigned long)__entry->reads,
+ (unsigned long)__entry->write_delta,
+ (unsigned long)__entry->writes)
+);
+#endif /* _AMDGPU_DM_TRACE_H_ */
+
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#define TRACE_INCLUDE_FILE amdgpu_dm_trace
+#include <trace/define_trace.h>
diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
index 751bb614fc0e..c513ab6f3843 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
+++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
@@ -638,6 +638,7 @@ static enum bp_result get_ss_info_v4_1(
{
enum bp_result result = BP_RESULT_OK;
struct atom_display_controller_info_v4_1 *disp_cntl_tbl = NULL;
+ struct atom_smu_info_v3_3 *smu_info = NULL;
if (!ss_info)
return BP_RESULT_BADINPUT;
@@ -650,6 +651,7 @@ static enum bp_result get_ss_info_v4_1(
if (!disp_cntl_tbl)
return BP_RESULT_BADBIOSTABLE;
+
ss_info->type.STEP_AND_DELAY_INFO = false;
ss_info->spread_percentage_divider = 1000;
/* BIOS no longer uses target clock. Always enable for now */
@@ -688,6 +690,19 @@ static enum bp_result get_ss_info_v4_1(
*/
result = BP_RESULT_UNSUPPORTED;
break;
+ case AS_SIGNAL_TYPE_XGMI:
+ smu_info = GET_IMAGE(struct atom_smu_info_v3_3,
+ DATA_TABLES(smu_info));
+ if (!smu_info)
+ return BP_RESULT_BADBIOSTABLE;
+
+ ss_info->spread_spectrum_percentage =
+ smu_info->waflclk_ss_percentage;
+ ss_info->spread_spectrum_range =
+ smu_info->gpuclk_ss_rate_10hz * 10;
+ if (smu_info->waflclk_ss_mode & ATOM_SS_CENTRE_SPREAD_MODE)
+ ss_info->type.CENTER_MODE = true;
+ break;
default:
result = BP_RESULT_UNSUPPORTED;
}
diff --git a/drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.c b/drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.c
index 65b006ad372e..8196f3bb10c7 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.c
+++ b/drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.c
@@ -67,6 +67,7 @@ bool dal_bios_parser_init_cmd_tbl_helper2(
return true;
#endif
case DCE_VERSION_12_0:
+ case DCE_VERSION_12_1:
*h = dal_cmd_tbl_helper_dce112_get_table2();
return true;
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
index dba6b57830c7..5fd52094d459 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
@@ -62,6 +62,55 @@
const static char DC_BUILD_ID[] = "production-build";
+/**
+ * DOC: Overview
+ *
+ * DC is the OS-agnostic component of the amdgpu DC driver.
+ *
+ * DC maintains and validates a set of structs representing the state of the
+ * driver and writes that state to AMD hardware
+ *
+ * Main DC HW structs:
+ *
+ * struct dc - The central struct. One per driver. Created on driver load,
+ * destroyed on driver unload.
+ *
+ * struct dc_context - One per driver.
+ * Used as a backpointer by most other structs in dc.
+ *
+ * struct dc_link - One per connector (the physical DP, HDMI, miniDP, or eDP
+ * plugpoints). Created on driver load, destroyed on driver unload.
+ *
+ * struct dc_sink - One per display. Created on boot or hotplug.
+ * Destroyed on shutdown or hotunplug. A dc_link can have a local sink
+ * (the display directly attached). It may also have one or more remote
+ * sinks (in the Multi-Stream Transport case)
+ *
+ * struct resource_pool - One per driver. Represents the hw blocks not in the
+ * main pipeline. Not directly accessible by dm.
+ *
+ * Main dc state structs:
+ *
+ * These structs can be created and destroyed as needed. There is a full set of
+ * these structs in dc->current_state representing the currently programmed state.
+ *
+ * struct dc_state - The global DC state to track global state information,
+ * such as bandwidth values.
+ *
+ * struct dc_stream_state - Represents the hw configuration for the pipeline from
+ * a framebuffer to a display. Maps one-to-one with dc_sink.
+ *
+ * struct dc_plane_state - Represents a framebuffer. Each stream has at least one,
+ * and may have more in the Multi-Plane Overlay case.
+ *
+ * struct resource_context - Represents the programmable state of everything in
+ * the resource_pool. Not directly accessible by dm.
+ *
+ * struct pipe_ctx - A member of struct resource_context. Represents the
+ * internal hardware pipeline components. Each dc_plane_state has either
+ * one or two (in the pipe-split case).
+ */
+
/*******************************************************************************
* Private functions
******************************************************************************/
@@ -102,10 +151,6 @@ static bool create_links(
return false;
}
- if (connectors_num == 0 && num_virtual_links == 0) {
- dm_error("DC: Number of connectors is zero!\n");
- }
-
dm_output_to_console(
"DC: %s: connectors_num: physical:%d, virtual:%d\n",
__func__,
@@ -175,6 +220,17 @@ failed_alloc:
return false;
}
+static struct dc_perf_trace *dc_perf_trace_create(void)
+{
+ return kzalloc(sizeof(struct dc_perf_trace), GFP_KERNEL);
+}
+
+static void dc_perf_trace_destroy(struct dc_perf_trace **perf_trace)
+{
+ kfree(*perf_trace);
+ *perf_trace = NULL;
+}
+
/**
*****************************************************************************
* Function: dc_stream_adjust_vmin_vmax
@@ -240,7 +296,7 @@ bool dc_stream_get_crtc_position(struct dc *dc,
}
/**
- * dc_stream_configure_crc: Configure CRC capture for the given stream.
+ * dc_stream_configure_crc() - Configure CRC capture for the given stream.
* @dc: DC Object
* @stream: The stream to configure CRC on.
* @enable: Enable CRC if true, disable otherwise.
@@ -292,7 +348,7 @@ bool dc_stream_configure_crc(struct dc *dc, struct dc_stream_state *stream,
}
/**
- * dc_stream_get_crc: Get CRC values for the given stream.
+ * dc_stream_get_crc() - Get CRC values for the given stream.
* @dc: DC object
* @stream: The DC stream state of the stream to get CRCs from.
* @r_cr, g_y, b_cb: CRC values for the three channels are stored here.
@@ -536,6 +592,8 @@ static void destruct(struct dc *dc)
if (dc->ctx->created_bios)
dal_bios_parser_destroy(&dc->ctx->dc_bios);
+ dc_perf_trace_destroy(&dc->ctx->perf_trace);
+
kfree(dc->ctx);
dc->ctx = NULL;
@@ -659,6 +717,12 @@ static bool construct(struct dc *dc,
goto fail;
}
+ dc_ctx->perf_trace = dc_perf_trace_create();
+ if (!dc_ctx->perf_trace) {
+ ASSERT_CRITICAL(false);
+ goto fail;
+ }
+
/* Create GPIO service */
dc_ctx->gpio_service = dal_gpio_service_create(
dc_version,
@@ -1329,6 +1393,11 @@ static enum surface_update_type check_update_surfaces_for_stream(
return overall_type;
}
+/**
+ * dc_check_update_surfaces_for_stream() - Determine update type (fast, med, or full)
+ *
+ * See :c:type:`enum surface_update_type <surface_update_type>` for explanation of update types
+ */
enum surface_update_type dc_check_update_surfaces_for_stream(
struct dc *dc,
struct dc_surface_update *updates,
@@ -1398,7 +1467,8 @@ static void commit_planes_do_stream_update(struct dc *dc,
if ((stream_update->hdr_static_metadata && !stream->use_dynamic_meta) ||
stream_update->vrr_infopacket ||
- stream_update->vsc_infopacket) {
+ stream_update->vsc_infopacket ||
+ stream_update->vsp_infopacket) {
resource_build_info_frame(pipe_ctx);
dc->hwss.update_info_frame(pipe_ctx);
}
@@ -1409,6 +1479,14 @@ static void commit_planes_do_stream_update(struct dc *dc,
if (stream_update->output_csc_transform)
dc_stream_program_csc_matrix(dc, stream);
+ if (stream_update->dither_option) {
+ resource_build_bit_depth_reduction_params(pipe_ctx->stream,
+ &pipe_ctx->stream->bit_depth_params);
+ pipe_ctx->stream_res.opp->funcs->opp_program_fmt(pipe_ctx->stream_res.opp,
+ &stream->bit_depth_params,
+ &stream->clamping);
+ }
+
/* Full fe update*/
if (update_type == UPDATE_TYPE_FAST)
continue;
@@ -1492,9 +1570,6 @@ static void commit_planes_for_stream(struct dc *dc,
}
}
- if (update_type == UPDATE_TYPE_FULL)
- context_timing_trace(dc, &context->res_ctx);
-
// Update Type FAST, Surface updates
if (update_type == UPDATE_TYPE_FAST) {
/* Lock the top pipe while updating plane addrs, since freesync requires
@@ -1631,6 +1706,9 @@ enum dc_irq_source dc_interrupt_to_irq_source(
return dal_irq_service_to_irq_source(dc->res_pool->irqs, src_id, ext_id);
}
+/**
+ * dc_interrupt_set() - Enable/disable an AMD hw interrupt source
+ */
bool dc_interrupt_set(struct dc *dc, enum dc_irq_source src, bool enable)
{
@@ -1724,6 +1802,11 @@ static bool link_add_remote_sink_helper(struct dc_link *dc_link, struct dc_sink
return true;
}
+/**
+ * dc_link_add_remote_sink() - Create a sink and attach it to an existing link
+ *
+ * EDID length is in bytes
+ */
struct dc_sink *dc_link_add_remote_sink(
struct dc_link *link,
const uint8_t *edid,
@@ -1782,6 +1865,12 @@ fail_add_sink:
return NULL;
}
+/**
+ * dc_link_remove_remote_sink() - Remove a remote sink from a dc_link
+ *
+ * Note that this just removes the struct dc_sink - it doesn't
+ * program hardware or alter other members of dc_link
+ */
void dc_link_remove_remote_sink(struct dc_link *link, struct dc_sink *sink)
{
int i;
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
index 948596a02392..52deacf39841 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
@@ -198,6 +198,13 @@ static bool program_hpd_filter(
return result;
}
+/**
+ * dc_link_detect_sink() - Determine if there is a sink connected
+ *
+ * @type: Returned connection type
+ * Does not detect downstream devices, such as MST sinks
+ * or display connected through active dongles
+ */
bool dc_link_detect_sink(struct dc_link *link, enum dc_connection_type *type)
{
uint32_t is_hpd_high = 0;
@@ -208,6 +215,9 @@ bool dc_link_detect_sink(struct dc_link *link, enum dc_connection_type *type)
return true;
}
+ if (link->connector_signal == SIGNAL_TYPE_EDP)
+ link->dc->hwss.edp_wait_for_hpd_ready(link, true);
+
/* todo: may need to lock gpio access */
hpd_pin = get_hpd_gpio(link->ctx->dc_bios, link->link_id, link->ctx->gpio_service);
if (hpd_pin == NULL)
@@ -324,15 +334,15 @@ static enum signal_type get_basic_signal_type(
return SIGNAL_TYPE_NONE;
}
-/*
- * @brief
- * Check whether there is a dongle on DP connector
+/**
+ * dc_link_is_dp_sink_present() - Check if there is a native DP
+ * or passive DP-HDMI dongle connected
*/
bool dc_link_is_dp_sink_present(struct dc_link *link)
{
enum gpio_result gpio_result;
uint32_t clock_pin = 0;
-
+ uint8_t retry = 0;
struct ddc *ddc;
enum connector_id connector_id =
@@ -361,11 +371,22 @@ bool dc_link_is_dp_sink_present(struct dc_link *link)
return present;
}
- /* Read GPIO: DP sink is present if both clock and data pins are zero */
- /* [anaumov] in DAL2, there was no check for GPIO failure */
-
- gpio_result = dal_gpio_get_value(ddc->pin_clock, &clock_pin);
- ASSERT(gpio_result == GPIO_RESULT_OK);
+ /*
+ * Read GPIO: DP sink is present if both clock and data pins are zero
+ *
+ * [W/A] plug-unplug DP cable, sometimes customer board has
+ * one short pulse on clk_pin(1V, < 1ms). DP will be config to HDMI/DVI
+ * then monitor can't br light up. Add retry 3 times
+ * But in real passive dongle, it need additional 3ms to detect
+ */
+ do {
+ gpio_result = dal_gpio_get_value(ddc->pin_clock, &clock_pin);
+ ASSERT(gpio_result == GPIO_RESULT_OK);
+ if (clock_pin)
+ udelay(1000);
+ else
+ break;
+ } while (retry++ < 3);
present = (gpio_result == GPIO_RESULT_OK) && !clock_pin;
@@ -593,6 +614,14 @@ static bool is_same_edid(struct dc_edid *old_edid, struct dc_edid *new_edid)
return (memcmp(old_edid->raw_edid, new_edid->raw_edid, new_edid->length) == 0);
}
+/**
+ * dc_link_detect() - Detect if a sink is attached to a given link
+ *
+ * link->local_sink is created or destroyed as needed.
+ *
+ * This does not create remote sinks but will trigger DM
+ * to start MST detection if a branch is detected.
+ */
bool dc_link_detect(struct dc_link *link, enum dc_detect_reason reason)
{
struct dc_sink_init_data sink_init_data = { 0 };
@@ -688,12 +717,26 @@ bool dc_link_detect(struct dc_link *link, enum dc_detect_reason reason)
if (memcmp(&link->dpcd_caps, &prev_dpcd_caps, sizeof(struct dpcd_caps)))
same_dpcd = false;
}
- /* Active dongle downstream unplug */
+ /* Active dongle plug in without display or downstream unplug*/
if (link->type == dc_connection_active_dongle
&& link->dpcd_caps.sink_count.
bits.SINK_COUNT == 0) {
- if (prev_sink != NULL)
+ if (prev_sink != NULL) {
+ /* Downstream unplug */
dc_sink_release(prev_sink);
+ } else {
+ /* Empty dongle plug in */
+ for (i = 0; i < LINK_TRAINING_MAX_VERIFY_RETRY; i++) {
+ int fail_count = 0;
+
+ dp_verify_link_cap(link,
+ &link->reported_link_cap,
+ &fail_count);
+
+ if (fail_count == 0)
+ break;
+ }
+ }
return true;
}
@@ -2607,11 +2650,11 @@ void core_link_disable_stream(struct pipe_ctx *pipe_ctx, int option)
{
struct dc *core_dc = pipe_ctx->stream->ctx->dc;
+ core_dc->hwss.blank_stream(pipe_ctx);
+
if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST)
deallocate_mst_payload(pipe_ctx);
- core_dc->hwss.blank_stream(pipe_ctx);
-
core_dc->hwss.disable_stream(pipe_ctx, option);
disable_link(pipe_ctx->stream->sink->link, pipe_ctx->stream->signal);
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
index 4d1f8ac069c1..0caacb60b02f 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
@@ -1089,6 +1089,121 @@ static struct dc_link_settings get_max_link_cap(struct dc_link *link)
return max_link_cap;
}
+static enum dc_status read_hpd_rx_irq_data(
+ struct dc_link *link,
+ union hpd_irq_data *irq_data)
+{
+ static enum dc_status retval;
+
+ /* The HW reads 16 bytes from 200h on HPD,
+ * but if we get an AUX_DEFER, the HW cannot retry
+ * and this causes the CTS tests 4.3.2.1 - 3.2.4 to
+ * fail, so we now explicitly read 6 bytes which is
+ * the req from the above mentioned test cases.
+ *
+ * For DP 1.4 we need to read those from 2002h range.
+ */
+ if (link->dpcd_caps.dpcd_rev.raw < DPCD_REV_14)
+ retval = core_link_read_dpcd(
+ link,
+ DP_SINK_COUNT,
+ irq_data->raw,
+ sizeof(union hpd_irq_data));
+ else {
+ /* Read 14 bytes in a single read and then copy only the required fields.
+ * This is more efficient than doing it in two separate AUX reads. */
+
+ uint8_t tmp[DP_SINK_STATUS_ESI - DP_SINK_COUNT_ESI + 1];
+
+ retval = core_link_read_dpcd(
+ link,
+ DP_SINK_COUNT_ESI,
+ tmp,
+ sizeof(tmp));
+
+ if (retval != DC_OK)
+ return retval;
+
+ irq_data->bytes.sink_cnt.raw = tmp[DP_SINK_COUNT_ESI - DP_SINK_COUNT_ESI];
+ irq_data->bytes.device_service_irq.raw = tmp[DP_DEVICE_SERVICE_IRQ_VECTOR_ESI0 - DP_SINK_COUNT_ESI];
+ irq_data->bytes.lane01_status.raw = tmp[DP_LANE0_1_STATUS_ESI - DP_SINK_COUNT_ESI];
+ irq_data->bytes.lane23_status.raw = tmp[DP_LANE2_3_STATUS_ESI - DP_SINK_COUNT_ESI];
+ irq_data->bytes.lane_status_updated.raw = tmp[DP_LANE_ALIGN_STATUS_UPDATED_ESI - DP_SINK_COUNT_ESI];
+ irq_data->bytes.sink_status.raw = tmp[DP_SINK_STATUS_ESI - DP_SINK_COUNT_ESI];
+ }
+
+ return retval;
+}
+
+static bool hpd_rx_irq_check_link_loss_status(
+ struct dc_link *link,
+ union hpd_irq_data *hpd_irq_dpcd_data)
+{
+ uint8_t irq_reg_rx_power_state = 0;
+ enum dc_status dpcd_result = DC_ERROR_UNEXPECTED;
+ union lane_status lane_status;
+ uint32_t lane;
+ bool sink_status_changed;
+ bool return_code;
+
+ sink_status_changed = false;
+ return_code = false;
+
+ if (link->cur_link_settings.lane_count == 0)
+ return return_code;
+
+ /*1. Check that Link Status changed, before re-training.*/
+
+ /*parse lane status*/
+ for (lane = 0; lane < link->cur_link_settings.lane_count; lane++) {
+ /* check status of lanes 0,1
+ * changed DpcdAddress_Lane01Status (0x202)
+ */
+ lane_status.raw = get_nibble_at_index(
+ &hpd_irq_dpcd_data->bytes.lane01_status.raw,
+ lane);
+
+ if (!lane_status.bits.CHANNEL_EQ_DONE_0 ||
+ !lane_status.bits.CR_DONE_0 ||
+ !lane_status.bits.SYMBOL_LOCKED_0) {
+ /* if one of the channel equalization, clock
+ * recovery or symbol lock is dropped
+ * consider it as (link has been
+ * dropped) dp sink status has changed
+ */
+ sink_status_changed = true;
+ break;
+ }
+ }
+
+ /* Check interlane align.*/
+ if (sink_status_changed ||
+ !hpd_irq_dpcd_data->bytes.lane_status_updated.bits.INTERLANE_ALIGN_DONE) {
+
+ DC_LOG_HW_HPD_IRQ("%s: Link Status changed.\n", __func__);
+
+ return_code = true;
+
+ /*2. Check that we can handle interrupt: Not in FS DOS,
+ * Not in "Display Timeout" state, Link is trained.
+ */
+ dpcd_result = core_link_read_dpcd(link,
+ DP_SET_POWER,
+ &irq_reg_rx_power_state,
+ sizeof(irq_reg_rx_power_state));
+
+ if (dpcd_result != DC_OK) {
+ DC_LOG_HW_HPD_IRQ("%s: DPCD read failed to obtain power state.\n",
+ __func__);
+ } else {
+ if (irq_reg_rx_power_state != DP_SET_POWER_D0)
+ return_code = false;
+ }
+ }
+
+ return return_code;
+}
+
bool dp_verify_link_cap(
struct dc_link *link,
struct dc_link_settings *known_limit_link_setting,
@@ -1104,12 +1219,14 @@ bool dp_verify_link_cap(
struct clock_source *dp_cs;
enum clock_source_id dp_cs_id = CLOCK_SOURCE_ID_EXTERNAL;
enum link_training_result status;
+ union hpd_irq_data irq_data;
if (link->dc->debug.skip_detection_link_training) {
link->verified_link_cap = *known_limit_link_setting;
return true;
}
+ memset(&irq_data, 0, sizeof(irq_data));
success = false;
skip_link_training = false;
@@ -1168,9 +1285,15 @@ bool dp_verify_link_cap(
(*fail_count)++;
}
- if (success)
+ if (success) {
link->verified_link_cap = *cur;
-
+ udelay(1000);
+ if (read_hpd_rx_irq_data(link, &irq_data) == DC_OK)
+ if (hpd_rx_irq_check_link_loss_status(
+ link,
+ &irq_data))
+ (*fail_count)++;
+ }
/* always disable the link before trying another
* setting or before returning we'll enable it later
* based on the actual mode we're driving
@@ -1572,122 +1695,6 @@ void decide_link_settings(struct dc_stream_state *stream,
}
/*************************Short Pulse IRQ***************************/
-
-static bool hpd_rx_irq_check_link_loss_status(
- struct dc_link *link,
- union hpd_irq_data *hpd_irq_dpcd_data)
-{
- uint8_t irq_reg_rx_power_state = 0;
- enum dc_status dpcd_result = DC_ERROR_UNEXPECTED;
- union lane_status lane_status;
- uint32_t lane;
- bool sink_status_changed;
- bool return_code;
-
- sink_status_changed = false;
- return_code = false;
-
- if (link->cur_link_settings.lane_count == 0)
- return return_code;
-
- /*1. Check that Link Status changed, before re-training.*/
-
- /*parse lane status*/
- for (lane = 0; lane < link->cur_link_settings.lane_count; lane++) {
- /* check status of lanes 0,1
- * changed DpcdAddress_Lane01Status (0x202)
- */
- lane_status.raw = get_nibble_at_index(
- &hpd_irq_dpcd_data->bytes.lane01_status.raw,
- lane);
-
- if (!lane_status.bits.CHANNEL_EQ_DONE_0 ||
- !lane_status.bits.CR_DONE_0 ||
- !lane_status.bits.SYMBOL_LOCKED_0) {
- /* if one of the channel equalization, clock
- * recovery or symbol lock is dropped
- * consider it as (link has been
- * dropped) dp sink status has changed
- */
- sink_status_changed = true;
- break;
- }
- }
-
- /* Check interlane align.*/
- if (sink_status_changed ||
- !hpd_irq_dpcd_data->bytes.lane_status_updated.bits.INTERLANE_ALIGN_DONE) {
-
- DC_LOG_HW_HPD_IRQ("%s: Link Status changed.\n", __func__);
-
- return_code = true;
-
- /*2. Check that we can handle interrupt: Not in FS DOS,
- * Not in "Display Timeout" state, Link is trained.
- */
- dpcd_result = core_link_read_dpcd(link,
- DP_SET_POWER,
- &irq_reg_rx_power_state,
- sizeof(irq_reg_rx_power_state));
-
- if (dpcd_result != DC_OK) {
- DC_LOG_HW_HPD_IRQ("%s: DPCD read failed to obtain power state.\n",
- __func__);
- } else {
- if (irq_reg_rx_power_state != DP_SET_POWER_D0)
- return_code = false;
- }
- }
-
- return return_code;
-}
-
-static enum dc_status read_hpd_rx_irq_data(
- struct dc_link *link,
- union hpd_irq_data *irq_data)
-{
- static enum dc_status retval;
-
- /* The HW reads 16 bytes from 200h on HPD,
- * but if we get an AUX_DEFER, the HW cannot retry
- * and this causes the CTS tests 4.3.2.1 - 3.2.4 to
- * fail, so we now explicitly read 6 bytes which is
- * the req from the above mentioned test cases.
- *
- * For DP 1.4 we need to read those from 2002h range.
- */
- if (link->dpcd_caps.dpcd_rev.raw < DPCD_REV_14)
- retval = core_link_read_dpcd(
- link,
- DP_SINK_COUNT,
- irq_data->raw,
- sizeof(union hpd_irq_data));
- else {
- /* Read 14 bytes in a single read and then copy only the required fields.
- * This is more efficient than doing it in two separate AUX reads. */
-
- uint8_t tmp[DP_SINK_STATUS_ESI - DP_SINK_COUNT_ESI + 1];
-
- retval = core_link_read_dpcd(
- link,
- DP_SINK_COUNT_ESI,
- tmp,
- sizeof(tmp));
-
- if (retval != DC_OK)
- return retval;
-
- irq_data->bytes.sink_cnt.raw = tmp[DP_SINK_COUNT_ESI - DP_SINK_COUNT_ESI];
- irq_data->bytes.device_service_irq.raw = tmp[DP_DEVICE_SERVICE_IRQ_VECTOR_ESI0 - DP_SINK_COUNT_ESI];
- irq_data->bytes.lane01_status.raw = tmp[DP_LANE0_1_STATUS_ESI - DP_SINK_COUNT_ESI];
- irq_data->bytes.lane23_status.raw = tmp[DP_LANE2_3_STATUS_ESI - DP_SINK_COUNT_ESI];
- irq_data->bytes.lane_status_updated.raw = tmp[DP_LANE_ALIGN_STATUS_UPDATED_ESI - DP_SINK_COUNT_ESI];
- irq_data->bytes.sink_status.raw = tmp[DP_SINK_STATUS_ESI - DP_SINK_COUNT_ESI];
- }
-
- return retval;
-}
-
static bool allow_hpd_rx_irq(const struct dc_link *link)
{
/*
@@ -2196,7 +2203,7 @@ static void get_active_converter_info(
}
if (link->dpcd_caps.dpcd_rev.raw >= DPCD_REV_11) {
- uint8_t det_caps[4];
+ uint8_t det_caps[16]; /* CTS 4.2.2.7 expects source to read Detailed Capabilities Info : 00080h-0008F.*/
union dwnstream_port_caps_byte0 *port_caps =
(union dwnstream_port_caps_byte0 *)det_caps;
core_link_read_dpcd(link, DP_DOWNSTREAM_PORT_0,
@@ -2240,7 +2247,8 @@ static void get_active_converter_info(
translate_dpcd_max_bpc(
hdmi_color_caps.bits.MAX_BITS_PER_COLOR_COMPONENT);
- link->dpcd_caps.dongle_caps.extendedCapValid = true;
+ if (link->dpcd_caps.dongle_caps.dp_hdmi_max_pixel_clk != 0)
+ link->dpcd_caps.dongle_caps.extendedCapValid = true;
}
break;
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c
index 82cd1d6e6e59..0065ec7d5330 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c
@@ -96,6 +96,7 @@ void dp_enable_link_phy(
link_settings,
clock_source);
}
+ link->cur_link_settings = *link_settings;
dp_receiver_power_ctrl(link, true);
}
@@ -307,6 +308,7 @@ void dp_retrain_link_dp_test(struct dc_link *link,
link->link_enc,
link_setting,
pipes[i].clock_source->id);
+ link->cur_link_settings = *link_setting;
dp_receiver_power_ctrl(link, true);
@@ -316,7 +318,6 @@ void dp_retrain_link_dp_test(struct dc_link *link,
skip_video_pattern,
LINK_TRAINING_ATTEMPTS);
- link->cur_link_settings = *link_setting;
link->dc->hwss.enable_stream(&pipes[i]);
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
index 0bb844a7b990..76137df74a53 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
@@ -83,7 +83,10 @@ enum dce_version resource_parse_asic_id(struct hw_asic_id asic_id)
dc_version = DCE_VERSION_11_22;
break;
case FAMILY_AI:
- dc_version = DCE_VERSION_12_0;
+ if (ASICREV_IS_VEGA20_P(asic_id.hw_internal_rev))
+ dc_version = DCE_VERSION_12_1;
+ else
+ dc_version = DCE_VERSION_12_0;
break;
#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
case FAMILY_RV:
@@ -136,6 +139,7 @@ struct resource_pool *dc_create_resource_pool(
num_virtual_links, dc);
break;
case DCE_VERSION_12_0:
+ case DCE_VERSION_12_1:
res_pool = dce120_create_resource_pool(
num_virtual_links, dc);
break;
@@ -1447,6 +1451,14 @@ static bool are_stream_backends_same(
return true;
}
+/**
+ * dc_is_stream_unchanged() - Compare two stream states for equivalence.
+ *
+ * Checks if there a difference between the two states
+ * that would require a mode change.
+ *
+ * Does not compare cursor position or attributes.
+ */
bool dc_is_stream_unchanged(
struct dc_stream_state *old_stream, struct dc_stream_state *stream)
{
@@ -1457,6 +1469,9 @@ bool dc_is_stream_unchanged(
return true;
}
+/**
+ * dc_is_stream_scaling_unchanged() - Compare scaling rectangles of two streams.
+ */
bool dc_is_stream_scaling_unchanged(
struct dc_stream_state *old_stream, struct dc_stream_state *stream)
{
@@ -1616,6 +1631,9 @@ bool resource_is_stream_unchanged(
return false;
}
+/**
+ * dc_add_stream_to_ctx() - Add a new dc_stream_state to a dc_state.
+ */
enum dc_status dc_add_stream_to_ctx(
struct dc *dc,
struct dc_state *new_ctx,
@@ -1640,6 +1658,9 @@ enum dc_status dc_add_stream_to_ctx(
return res;
}
+/**
+ * dc_remove_stream_from_ctx() - Remove a stream from a dc_state.
+ */
enum dc_status dc_remove_stream_from_ctx(
struct dc *dc,
struct dc_state *new_ctx,
@@ -1860,6 +1881,12 @@ enum dc_status resource_map_pool_resources(
return DC_ERROR_UNEXPECTED;
}
+/**
+ * dc_resource_state_copy_construct_current() - Creates a new dc_state from existing state
+ * Is a shallow copy. Increments refcounts on existing streams and planes.
+ * @dc: copy out of dc->current_state
+ * @dst_ctx: copy into this
+ */
void dc_resource_state_copy_construct_current(
const struct dc *dc,
struct dc_state *dst_ctx)
@@ -1875,6 +1902,14 @@ void dc_resource_state_construct(
dst_ctx->dccg = dc->res_pool->clk_mgr;
}
+/**
+ * dc_validate_global_state() - Determine if HW can support a given state
+ * Checks HW resource availability and bandwidth requirement.
+ * @dc: dc struct for this driver
+ * @new_ctx: state to be validated
+ *
+ * Return: DC_OK if the result can be programmed. Otherwise, an error code.
+ */
enum dc_status dc_validate_global_state(
struct dc *dc,
struct dc_state *new_ctx)
@@ -2202,113 +2237,15 @@ static void set_vendor_info_packet(
struct dc_info_packet *info_packet,
struct dc_stream_state *stream)
{
- uint32_t length = 0;
- bool hdmi_vic_mode = false;
- uint8_t checksum = 0;
- uint32_t i = 0;
- enum dc_timing_3d_format format;
- // Can be different depending on packet content /*todo*/
- // unsigned int length = pPathMode->dolbyVision ? 24 : 5;
-
- info_packet->valid = false;
-
- format = stream->timing.timing_3d_format;
- if (stream->view_format == VIEW_3D_FORMAT_NONE)
- format = TIMING_3D_FORMAT_NONE;
-
- /* Can be different depending on packet content */
- length = 5;
-
- if (stream->timing.hdmi_vic != 0
- && stream->timing.h_total >= 3840
- && stream->timing.v_total >= 2160)
- hdmi_vic_mode = true;
-
- /* According to HDMI 1.4a CTS, VSIF should be sent
- * for both 3D stereo and HDMI VIC modes.
- * For all other modes, there is no VSIF sent. */
+ /* SPD info packet for FreeSync */
- if (format == TIMING_3D_FORMAT_NONE && !hdmi_vic_mode)
+ /* Check if Freesync is supported. Return if false. If true,
+ * set the corresponding bit in the info packet
+ */
+ if (!stream->vsp_infopacket.valid)
return;
- /* 24bit IEEE Registration identifier (0x000c03). LSB first. */
- info_packet->sb[1] = 0x03;
- info_packet->sb[2] = 0x0C;
- info_packet->sb[3] = 0x00;
-
- /*PB4: 5 lower bytes = 0 (reserved). 3 higher bits = HDMI_Video_Format.
- * The value for HDMI_Video_Format are:
- * 0x0 (0b000) - No additional HDMI video format is presented in this
- * packet
- * 0x1 (0b001) - Extended resolution format present. 1 byte of HDMI_VIC
- * parameter follows
- * 0x2 (0b010) - 3D format indication present. 3D_Structure and
- * potentially 3D_Ext_Data follows
- * 0x3..0x7 (0b011..0b111) - reserved for future use */
- if (format != TIMING_3D_FORMAT_NONE)
- info_packet->sb[4] = (2 << 5);
- else if (hdmi_vic_mode)
- info_packet->sb[4] = (1 << 5);
-
- /* PB5: If PB4 claims 3D timing (HDMI_Video_Format = 0x2):
- * 4 lower bites = 0 (reserved). 4 higher bits = 3D_Structure.
- * The value for 3D_Structure are:
- * 0x0 - Frame Packing
- * 0x1 - Field Alternative
- * 0x2 - Line Alternative
- * 0x3 - Side-by-Side (full)
- * 0x4 - L + depth
- * 0x5 - L + depth + graphics + graphics-depth
- * 0x6 - Top-and-Bottom
- * 0x7 - Reserved for future use
- * 0x8 - Side-by-Side (Half)
- * 0x9..0xE - Reserved for future use
- * 0xF - Not used */
- switch (format) {
- case TIMING_3D_FORMAT_HW_FRAME_PACKING:
- case TIMING_3D_FORMAT_SW_FRAME_PACKING:
- info_packet->sb[5] = (0x0 << 4);
- break;
-
- case TIMING_3D_FORMAT_SIDE_BY_SIDE:
- case TIMING_3D_FORMAT_SBS_SW_PACKED:
- info_packet->sb[5] = (0x8 << 4);
- length = 6;
- break;
-
- case TIMING_3D_FORMAT_TOP_AND_BOTTOM:
- case TIMING_3D_FORMAT_TB_SW_PACKED:
- info_packet->sb[5] = (0x6 << 4);
- break;
-
- default:
- break;
- }
-
- /*PB5: If PB4 is set to 0x1 (extended resolution format)
- * fill PB5 with the correct HDMI VIC code */
- if (hdmi_vic_mode)
- info_packet->sb[5] = stream->timing.hdmi_vic;
-
- /* Header */
- info_packet->hb0 = HDMI_INFOFRAME_TYPE_VENDOR; /* VSIF packet type. */
- info_packet->hb1 = 0x01; /* Version */
-
- /* 4 lower bits = Length, 4 higher bits = 0 (reserved) */
- info_packet->hb2 = (uint8_t) (length);
-
- /* Calculate checksum */
- checksum = 0;
- checksum += info_packet->hb0;
- checksum += info_packet->hb1;
- checksum += info_packet->hb2;
-
- for (i = 1; i <= length; i++)
- checksum += info_packet->sb[i];
-
- info_packet->sb[0] = (uint8_t) (0x100 - checksum);
-
- info_packet->valid = true;
+ *info_packet = stream->vsp_infopacket;
}
static void set_spd_info_packet(
@@ -2364,10 +2301,6 @@ void dc_resource_state_destruct(struct dc_state *context)
}
}
-/*
- * Copy src_ctx into dst_ctx and retain all surfaces and streams referenced
- * by the src_ctx
- */
void dc_resource_state_copy_construct(
const struct dc_state *src_ctx,
struct dc_state *dst_ctx)
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
index 780838a05f44..66e5c4623a49 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
@@ -170,7 +170,7 @@ struct dc_stream_status *dc_stream_get_status(
}
/**
- * Update the cursor attributes and set cursor surface address
+ * dc_stream_set_cursor_attributes() - Update cursor attributes and set cursor surface address
*/
bool dc_stream_set_cursor_attributes(
struct dc_stream_state *stream,
diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h
index dea8bc39c688..4b5bbb13ce7f 100644
--- a/drivers/gpu/drm/amd/display/dc/dc.h
+++ b/drivers/gpu/drm/amd/display/dc/dc.h
@@ -39,7 +39,7 @@
#include "inc/hw/dmcu.h"
#include "dml/display_mode_lib.h"
-#define DC_VER "3.2.06"
+#define DC_VER "3.2.08"
#define MAX_SURFACES 3
#define MAX_STREAMS 6
diff --git a/drivers/gpu/drm/amd/display/dc/dc_helper.c b/drivers/gpu/drm/amd/display/dc/dc_helper.c
index fcfd50b5dba0..4842d2378bbf 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_helper.c
+++ b/drivers/gpu/drm/amd/display/dc/dc_helper.c
@@ -234,14 +234,14 @@ uint32_t generic_reg_wait(const struct dc_context *ctx,
if (field_value == condition_value) {
if (i * delay_between_poll_us > 1000 &&
!IS_FPGA_MAXIMUS_DC(ctx->dce_environment))
- dm_output_to_console("REG_WAIT taking a while: %dms in %s line:%d\n",
+ DC_LOG_DC("REG_WAIT taking a while: %dms in %s line:%d\n",
delay_between_poll_us * i / 1000,
func_name, line);
return reg_val;
}
}
- dm_error("REG_WAIT timeout %dus * %d tries - %s line:%d\n",
+ DC_LOG_WARNING("REG_WAIT timeout %dus * %d tries - %s line:%d\n",
delay_between_poll_us, time_out_num_tries,
func_name, line);
diff --git a/drivers/gpu/drm/amd/display/dc/dc_hw_types.h b/drivers/gpu/drm/amd/display/dc/dc_hw_types.h
index 7825e4b5e97c..e72fce4eca65 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_hw_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_hw_types.h
@@ -192,7 +192,6 @@ enum surface_pixel_format {
/*swaped & float*/
SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F,
/*grow graphics here if necessary */
- SURFACE_PIXEL_FORMAT_VIDEO_AYCrCb8888,
SURFACE_PIXEL_FORMAT_VIDEO_BEGIN,
SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr =
SURFACE_PIXEL_FORMAT_VIDEO_BEGIN,
@@ -200,6 +199,7 @@ enum surface_pixel_format {
SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCbCr,
SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb,
SURFACE_PIXEL_FORMAT_SUBSAMPLE_END,
+ SURFACE_PIXEL_FORMAT_VIDEO_AYCrCb8888,
SURFACE_PIXEL_FORMAT_INVALID
/*grow 444 video here if necessary */
@@ -358,15 +358,16 @@ union dc_tiling_info {
} gfx8;
struct {
+ enum swizzle_mode_values swizzle;
unsigned int num_pipes;
- unsigned int num_banks;
+ unsigned int max_compressed_frags;
unsigned int pipe_interleave;
+
+ unsigned int num_banks;
unsigned int num_shader_engines;
unsigned int num_rb_per_se;
- unsigned int max_compressed_frags;
bool shaderEnable;
- enum swizzle_mode_values swizzle;
bool meta_linear;
bool rb_aligned;
bool pipe_aligned;
diff --git a/drivers/gpu/drm/amd/display/dc/dc_link.h b/drivers/gpu/drm/amd/display/dc/dc_link.h
index 8738f27a8708..29f19d57ff7a 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_link.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_link.h
@@ -128,8 +128,10 @@ struct dc_link {
const struct dc_link_status *dc_link_get_status(const struct dc_link *dc_link);
-/*
- * Return an enumerated dc_link. dc_link order is constant and determined at
+/**
+ * dc_get_link_at_index() - Return an enumerated dc_link.
+ *
+ * dc_link order is constant and determined at
* boot time. They cannot be created or destroyed.
* Use dc_get_caps() to get number of links.
*/
diff --git a/drivers/gpu/drm/amd/display/dc/dc_stream.h b/drivers/gpu/drm/amd/display/dc/dc_stream.h
index 771d9f17e26e..be34d638e15d 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_stream.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_stream.h
@@ -56,6 +56,7 @@ struct dc_stream_state {
struct dc_crtc_timing_adjust adjust;
struct dc_info_packet vrr_infopacket;
struct dc_info_packet vsc_infopacket;
+ struct dc_info_packet vsp_infopacket;
struct rect src; /* composition area */
struct rect dst; /* stream addressable area */
@@ -129,11 +130,13 @@ struct dc_stream_update {
struct dc_crtc_timing_adjust *adjust;
struct dc_info_packet *vrr_infopacket;
struct dc_info_packet *vsc_infopacket;
+ struct dc_info_packet *vsp_infopacket;
bool *dpms_off;
struct colorspace_transform *gamut_remap;
enum dc_color_space *output_color_space;
+ enum dc_dither_option *dither_option;
struct dc_csc_transform *output_csc_transform;
diff --git a/drivers/gpu/drm/amd/display/dc/dc_types.h b/drivers/gpu/drm/amd/display/dc/dc_types.h
index 6e12d640d020..0b20ae23f169 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_types.h
@@ -73,10 +73,18 @@ struct hw_asic_id {
void *atombios_base_address;
};
+struct dc_perf_trace {
+ unsigned long read_count;
+ unsigned long write_count;
+ unsigned long last_entry_read;
+ unsigned long last_entry_write;
+};
+
struct dc_context {
struct dc *dc;
void *driver_context; /* e.g. amdgpu_device */
+ struct dc_perf_trace *perf_trace;
void *cgs_device;
enum dce_environment dce_environment;
@@ -191,7 +199,6 @@ union display_content_support {
};
struct dc_panel_patch {
- unsigned int disconnect_delay;
unsigned int dppowerup_delay;
unsigned int extra_t12_ms;
};
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/dce/dce_clk_mgr.c
index bd22f51813bf..afd287f08bc9 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_clk_mgr.c
@@ -676,6 +676,11 @@ static void dce112_update_clocks(struct clk_mgr *clk_mgr,
{
struct dce_clk_mgr *clk_mgr_dce = TO_DCE_CLK_MGR(clk_mgr);
struct dm_pp_power_level_change_request level_change_req;
+ int unpatched_disp_clk = context->bw.dce.dispclk_khz;
+
+ /*TODO: W/A for dal3 linux, investigate why this works */
+ if (!clk_mgr_dce->dfs_bypass_active)
+ context->bw.dce.dispclk_khz = context->bw.dce.dispclk_khz * 115 / 100;
level_change_req.power_level = dce_get_required_clocks_state(clk_mgr, context);
/* get max clock state from PPLIB */
@@ -690,6 +695,8 @@ static void dce112_update_clocks(struct clk_mgr *clk_mgr,
clk_mgr->clks.dispclk_khz = context->bw.dce.dispclk_khz;
}
dce11_pplib_apply_display_requirements(clk_mgr->ctx->dc, context);
+
+ context->bw.dce.dispclk_khz = unpatched_disp_clk;
}
static void dce12_update_clocks(struct clk_mgr *clk_mgr,
diff --git a/drivers/gpu/drm/amd/display/dc/dce100/dce100_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce100/dce100_hw_sequencer.c
index bc50a8e25f4f..87771676acac 100644
--- a/drivers/gpu/drm/amd/display/dc/dce100/dce100_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dce100/dce100_hw_sequencer.c
@@ -117,6 +117,18 @@ void dce100_prepare_bandwidth(
false);
}
+void dce100_optimize_bandwidth(
+ struct dc *dc,
+ struct dc_state *context)
+{
+ dce110_set_safe_displaymarks(&context->res_ctx, dc->res_pool);
+
+ dc->res_pool->clk_mgr->funcs->update_clocks(
+ dc->res_pool->clk_mgr,
+ context,
+ true);
+}
+
/**************************************************************************/
void dce100_hw_sequencer_construct(struct dc *dc)
@@ -125,6 +137,6 @@ void dce100_hw_sequencer_construct(struct dc *dc)
dc->hwss.enable_display_power_gating = dce100_enable_display_power_gating;
dc->hwss.prepare_bandwidth = dce100_prepare_bandwidth;
- dc->hwss.optimize_bandwidth = dce100_prepare_bandwidth;
+ dc->hwss.optimize_bandwidth = dce100_optimize_bandwidth;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_compressor.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_compressor.c
index 1f7f25013217..52d50e24a995 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_compressor.c
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_compressor.c
@@ -64,65 +64,37 @@ static const struct dce110_compressor_reg_offsets reg_offsets[] = {
static const uint32_t dce11_one_lpt_channel_max_resolution = 2560 * 1600;
-enum fbc_idle_force {
- /* Bit 0 - Display registers updated */
- FBC_IDLE_FORCE_DISPLAY_REGISTER_UPDATE = 0x00000001,
-
- /* Bit 2 - FBC_GRPH_COMP_EN register updated */
- FBC_IDLE_FORCE_GRPH_COMP_EN = 0x00000002,
- /* Bit 3 - FBC_SRC_SEL register updated */
- FBC_IDLE_FORCE_SRC_SEL_CHANGE = 0x00000004,
- /* Bit 4 - FBC_MIN_COMPRESSION register updated */
- FBC_IDLE_FORCE_MIN_COMPRESSION_CHANGE = 0x00000008,
- /* Bit 5 - FBC_ALPHA_COMP_EN register updated */
- FBC_IDLE_FORCE_ALPHA_COMP_EN = 0x00000010,
- /* Bit 6 - FBC_ZERO_ALPHA_CHUNK_SKIP_EN register updated */
- FBC_IDLE_FORCE_ZERO_ALPHA_CHUNK_SKIP_EN = 0x00000020,
- /* Bit 7 - FBC_FORCE_COPY_TO_COMP_BUF register updated */
- FBC_IDLE_FORCE_FORCE_COPY_TO_COMP_BUF = 0x00000040,
-
- /* Bit 24 - Memory write to region 0 defined by MC registers. */
- FBC_IDLE_FORCE_MEMORY_WRITE_TO_REGION0 = 0x01000000,
- /* Bit 25 - Memory write to region 1 defined by MC registers */
- FBC_IDLE_FORCE_MEMORY_WRITE_TO_REGION1 = 0x02000000,
- /* Bit 26 - Memory write to region 2 defined by MC registers */
- FBC_IDLE_FORCE_MEMORY_WRITE_TO_REGION2 = 0x04000000,
- /* Bit 27 - Memory write to region 3 defined by MC registers. */
- FBC_IDLE_FORCE_MEMORY_WRITE_TO_REGION3 = 0x08000000,
-
- /* Bit 28 - Memory write from any client other than MCIF */
- FBC_IDLE_FORCE_MEMORY_WRITE_OTHER_THAN_MCIF = 0x10000000,
- /* Bit 29 - CG statics screen signal is inactive */
- FBC_IDLE_FORCE_CG_STATIC_SCREEN_IS_INACTIVE = 0x20000000,
-};
-
-
static uint32_t align_to_chunks_number_per_line(uint32_t pixels)
{
return 256 * ((pixels + 255) / 256);
}
-static void reset_lb_on_vblank(struct dc_context *ctx)
+static void reset_lb_on_vblank(struct compressor *compressor, uint32_t crtc_inst)
{
- uint32_t value, frame_count;
+ uint32_t value;
+ uint32_t frame_count;
+ uint32_t status_pos;
uint32_t retry = 0;
- uint32_t status_pos =
- dm_read_reg(ctx, mmCRTC_STATUS_POSITION);
+ struct dce110_compressor *cp110 = TO_DCE110_COMPRESSOR(compressor);
+
+ cp110->offsets = reg_offsets[crtc_inst];
+
+ status_pos = dm_read_reg(compressor->ctx, DCP_REG(mmCRTC_STATUS_POSITION));
/* Only if CRTC is enabled and counter is moving we wait for one frame. */
- if (status_pos != dm_read_reg(ctx, mmCRTC_STATUS_POSITION)) {
+ if (status_pos != dm_read_reg(compressor->ctx, DCP_REG(mmCRTC_STATUS_POSITION))) {
/* Resetting LB on VBlank */
- value = dm_read_reg(ctx, mmLB_SYNC_RESET_SEL);
+ value = dm_read_reg(compressor->ctx, DCP_REG(mmLB_SYNC_RESET_SEL));
set_reg_field_value(value, 3, LB_SYNC_RESET_SEL, LB_SYNC_RESET_SEL);
set_reg_field_value(value, 1, LB_SYNC_RESET_SEL, LB_SYNC_RESET_SEL2);
- dm_write_reg(ctx, mmLB_SYNC_RESET_SEL, value);
+ dm_write_reg(compressor->ctx, DCP_REG(mmLB_SYNC_RESET_SEL), value);
- frame_count = dm_read_reg(ctx, mmCRTC_STATUS_FRAME_COUNT);
+ frame_count = dm_read_reg(compressor->ctx, DCP_REG(mmCRTC_STATUS_FRAME_COUNT));
for (retry = 10000; retry > 0; retry--) {
- if (frame_count != dm_read_reg(ctx, mmCRTC_STATUS_FRAME_COUNT))
+ if (frame_count != dm_read_reg(compressor->ctx, DCP_REG(mmCRTC_STATUS_FRAME_COUNT)))
break;
udelay(10);
}
@@ -130,13 +102,11 @@ static void reset_lb_on_vblank(struct dc_context *ctx)
dm_error("Frame count did not increase for 100ms.\n");
/* Resetting LB on VBlank */
- value = dm_read_reg(ctx, mmLB_SYNC_RESET_SEL);
+ value = dm_read_reg(compressor->ctx, DCP_REG(mmLB_SYNC_RESET_SEL));
set_reg_field_value(value, 2, LB_SYNC_RESET_SEL, LB_SYNC_RESET_SEL);
set_reg_field_value(value, 0, LB_SYNC_RESET_SEL, LB_SYNC_RESET_SEL2);
- dm_write_reg(ctx, mmLB_SYNC_RESET_SEL, value);
-
+ dm_write_reg(compressor->ctx, DCP_REG(mmLB_SYNC_RESET_SEL), value);
}
-
}
static void wait_for_fbc_state_changed(
@@ -226,10 +196,10 @@ void dce110_compressor_enable_fbc(
uint32_t addr;
uint32_t value, misc_value;
-
addr = mmFBC_CNTL;
value = dm_read_reg(compressor->ctx, addr);
set_reg_field_value(value, 1, FBC_CNTL, FBC_GRPH_COMP_EN);
+ /* params->inst is valid HW CRTC instance start from 0 */
set_reg_field_value(
value,
params->inst,
@@ -238,8 +208,10 @@ void dce110_compressor_enable_fbc(
/* Keep track of enum controller_id FBC is attached to */
compressor->is_enabled = true;
- compressor->attached_inst = params->inst;
- cp110->offsets = reg_offsets[params->inst];
+ /* attached_inst is SW CRTC instance start from 1
+ * 0 = CONTROLLER_ID_UNDEFINED means not attached crtc
+ */
+ compressor->attached_inst = params->inst + CONTROLLER_ID_D0;
/* Toggle it as there is bug in HW */
set_reg_field_value(value, 0, FBC_CNTL, FBC_GRPH_COMP_EN);
@@ -268,9 +240,10 @@ void dce110_compressor_enable_fbc(
void dce110_compressor_disable_fbc(struct compressor *compressor)
{
struct dce110_compressor *cp110 = TO_DCE110_COMPRESSOR(compressor);
+ uint32_t crtc_inst = 0;
if (compressor->options.bits.FBC_SUPPORT) {
- if (dce110_compressor_is_fbc_enabled_in_hw(compressor, NULL)) {
+ if (dce110_compressor_is_fbc_enabled_in_hw(compressor, &crtc_inst)) {
uint32_t reg_data;
/* Turn off compression */
reg_data = dm_read_reg(compressor->ctx, mmFBC_CNTL);
@@ -284,8 +257,10 @@ void dce110_compressor_disable_fbc(struct compressor *compressor)
wait_for_fbc_state_changed(cp110, false);
}
- /* Sync line buffer - dce100/110 only*/
- reset_lb_on_vblank(compressor->ctx);
+ /* Sync line buffer which fbc was attached to dce100/110 only */
+ if (crtc_inst > CONTROLLER_ID_UNDEFINED && crtc_inst < CONTROLLER_ID_D3)
+ reset_lb_on_vblank(compressor,
+ crtc_inst - CONTROLLER_ID_D0);
}
}
@@ -328,6 +303,8 @@ void dce110_compressor_program_compressed_surface_address_and_pitch(
uint32_t compressed_surf_address_low_part =
compressor->compr_surface_address.addr.low_part;
+ cp110->offsets = reg_offsets[params->inst];
+
/* Clear content first. */
dm_write_reg(
compressor->ctx,
@@ -410,13 +387,7 @@ void dce110_compressor_set_fbc_invalidation_triggers(
value = dm_read_reg(compressor->ctx, addr);
set_reg_field_value(
value,
- fbc_trigger |
- FBC_IDLE_FORCE_GRPH_COMP_EN |
- FBC_IDLE_FORCE_SRC_SEL_CHANGE |
- FBC_IDLE_FORCE_MIN_COMPRESSION_CHANGE |
- FBC_IDLE_FORCE_ALPHA_COMP_EN |
- FBC_IDLE_FORCE_ZERO_ALPHA_CHUNK_SKIP_EN |
- FBC_IDLE_FORCE_FORCE_COPY_TO_COMP_BUF,
+ fbc_trigger,
FBC_IDLE_FORCE_CLEAR_MASK,
FBC_IDLE_FORCE_CLEAR_MASK);
dm_write_reg(compressor->ctx, addr, value);
@@ -549,7 +520,7 @@ void dce110_compressor_construct(struct dce110_compressor *compressor,
compressor->base.channel_interleave_size = 0;
compressor->base.dram_channels_num = 0;
compressor->base.lpt_channels_num = 0;
- compressor->base.attached_inst = 0;
+ compressor->base.attached_inst = CONTROLLER_ID_UNDEFINED;
compressor->base.is_enabled = false;
compressor->base.funcs = &dce110_compressor_funcs;
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
index 2f062bacd78a..4bf24758217f 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
@@ -1267,10 +1267,19 @@ static void program_scaler(const struct dc *dc,
pipe_ctx->plane_res.scl_data.lb_params.depth,
&pipe_ctx->stream->bit_depth_params);
- if (pipe_ctx->stream_res.tg->funcs->set_overscan_blank_color)
+ if (pipe_ctx->stream_res.tg->funcs->set_overscan_blank_color) {
+ /*
+ * The way 420 is packed, 2 channels carry Y component, 1 channel
+ * alternate between Cb and Cr, so both channels need the pixel
+ * value for Y
+ */
+ if (pipe_ctx->stream->timing.pixel_encoding == PIXEL_ENCODING_YCBCR420)
+ color.color_r_cr = color.color_g_y;
+
pipe_ctx->stream_res.tg->funcs->set_overscan_blank_color(
pipe_ctx->stream_res.tg,
&color);
+ }
pipe_ctx->plane_res.xfm->funcs->transform_set_scaler(pipe_ctx->plane_res.xfm,
&pipe_ctx->plane_res.scl_data);
@@ -1766,12 +1775,13 @@ static void set_static_screen_control(struct pipe_ctx **pipe_ctx,
* Check if FBC can be enabled
*/
static bool should_enable_fbc(struct dc *dc,
- struct dc_state *context,
- uint32_t *pipe_idx)
+ struct dc_state *context,
+ uint32_t *pipe_idx)
{
uint32_t i;
struct pipe_ctx *pipe_ctx = NULL;
struct resource_context *res_ctx = &context->res_ctx;
+ unsigned int underlay_idx = dc->res_pool->underlay_pipe_index;
ASSERT(dc->fbc_compressor);
@@ -1786,14 +1796,28 @@ static bool should_enable_fbc(struct dc *dc,
for (i = 0; i < dc->res_pool->pipe_count; i++) {
if (res_ctx->pipe_ctx[i].stream) {
+
pipe_ctx = &res_ctx->pipe_ctx[i];
- *pipe_idx = i;
- break;
+
+ if (!pipe_ctx)
+ continue;
+
+ /* fbc not applicable on underlay pipe */
+ if (pipe_ctx->pipe_idx != underlay_idx) {
+ *pipe_idx = i;
+ break;
+ }
}
}
- /* Pipe context should be found */
- ASSERT(pipe_ctx);
+ if (i == dc->res_pool->pipe_count)
+ return false;
+
+ if (!pipe_ctx->stream->sink)
+ return false;
+
+ if (!pipe_ctx->stream->sink->link)
+ return false;
/* Only supports eDP */
if (pipe_ctx->stream->sink->link->connector_signal != SIGNAL_TYPE_EDP)
@@ -1817,8 +1841,9 @@ static bool should_enable_fbc(struct dc *dc,
/*
* Enable FBC
*/
-static void enable_fbc(struct dc *dc,
- struct dc_state *context)
+static void enable_fbc(
+ struct dc *dc,
+ struct dc_state *context)
{
uint32_t pipe_idx = 0;
@@ -1828,10 +1853,9 @@ static void enable_fbc(struct dc *dc,
struct compressor *compr = dc->fbc_compressor;
struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[pipe_idx];
-
params.source_view_width = pipe_ctx->stream->timing.h_addressable;
params.source_view_height = pipe_ctx->stream->timing.v_addressable;
-
+ params.inst = pipe_ctx->stream_res.tg->inst;
compr->compr_surface_address.quad_part = dc->ctx->fbc_gpu_addr;
compr->funcs->surface_address_and_pitch(compr, &params);
@@ -2046,10 +2070,10 @@ enum dc_status dce110_apply_ctx_to_hw(
return status;
}
- dcb->funcs->set_scratch_critical_state(dcb, false);
-
if (dc->fbc_compressor)
- enable_fbc(dc, context);
+ enable_fbc(dc, dc->current_state);
+
+ dcb->funcs->set_scratch_critical_state(dcb, false);
return DC_OK;
}
@@ -2408,7 +2432,6 @@ static void dce110_program_front_end_for_pipe(
struct dc_plane_state *plane_state = pipe_ctx->plane_state;
struct xfm_grph_csc_adjustment adjust;
struct out_csc_color_matrix tbl_entry;
- unsigned int underlay_idx = dc->res_pool->underlay_pipe_index;
unsigned int i;
DC_LOGGER_INIT();
memset(&tbl_entry, 0, sizeof(tbl_entry));
@@ -2449,15 +2472,6 @@ static void dce110_program_front_end_for_pipe(
program_scaler(dc, pipe_ctx);
- /* fbc not applicable on Underlay pipe */
- if (dc->fbc_compressor && old_pipe->stream &&
- pipe_ctx->pipe_idx != underlay_idx) {
- if (plane_state->tiling_info.gfx8.array_mode == DC_ARRAY_LINEAR_GENERAL)
- dc->fbc_compressor->funcs->disable_fbc(dc->fbc_compressor);
- else
- enable_fbc(dc, dc->current_state);
- }
-
mi->funcs->mem_input_program_surface_config(
mi,
plane_state->format,
@@ -2534,6 +2548,9 @@ static void dce110_apply_ctx_for_surface(
if (num_planes == 0)
return;
+ if (dc->fbc_compressor)
+ dc->fbc_compressor->funcs->disable_fbc(dc->fbc_compressor);
+
for (i = 0; i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
struct pipe_ctx *old_pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
@@ -2576,6 +2593,9 @@ static void dce110_apply_ctx_for_surface(
(pipe_ctx->plane_state || old_pipe_ctx->plane_state))
dc->hwss.pipe_control_lock(dc, pipe_ctx, false);
}
+
+ if (dc->fbc_compressor)
+ enable_fbc(dc, dc->current_state);
}
static void dce110_power_down_fe(struct dc *dc, struct pipe_ctx *pipe_ctx)
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_clk_mgr.c
index f9d7d2c26cc2..54abedbf1b43 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_clk_mgr.c
@@ -328,12 +328,10 @@ static void dcn1_update_clocks(struct clk_mgr *clk_mgr,
*smu_req_cur = smu_req;
}
-
static const struct clk_mgr_funcs dcn1_funcs = {
.get_dp_ref_clk_frequency = dce12_get_dp_ref_freq_khz,
.update_clocks = dcn1_update_clocks
};
-
struct clk_mgr *dcn1_clk_mgr_create(struct dc_context *ctx)
{
struct dc_debug_options *debug = &ctx->dc->debug;
@@ -373,3 +371,5 @@ struct clk_mgr *dcn1_clk_mgr_create(struct dc_context *ctx)
return &clk_mgr_dce->base;
}
+
+
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_clk_mgr.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_clk_mgr.h
index 9dbaf6578006..a995eda443a3 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_clk_mgr.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_clk_mgr.h
@@ -28,6 +28,12 @@
#include "../dce/dce_clk_mgr.h"
+struct clk_bypass {
+ uint32_t dcfclk_bypass;
+ uint32_t dispclk_pypass;
+ uint32_t dprefclk_bypass;
+};
+
void dcn1_pplib_apply_display_requirements(
struct dc *dc,
struct dc_state *context);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c
index 3eea44092a04..7469333a2c8a 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c
@@ -324,7 +324,7 @@ bool cm_helper_translate_curve_to_hw_format(
if (output_tf == NULL || lut_params == NULL || output_tf->type == TF_TYPE_BYPASS)
return false;
- PERF_TRACE();
+ PERF_TRACE_CTX(output_tf->ctx);
corner_points = lut_params->corner_points;
rgb_resulted = lut_params->rgb_resulted;
@@ -513,7 +513,7 @@ bool cm_helper_translate_curve_to_degamma_hw_format(
if (output_tf == NULL || lut_params == NULL || output_tf->type == TF_TYPE_BYPASS)
return false;
- PERF_TRACE();
+ PERF_TRACE_CTX(output_tf->ctx);
corner_points = lut_params->corner_points;
rgb_resulted = lut_params->rgb_resulted;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
index 0bd33a713836..91e015e14355 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
@@ -2159,6 +2159,15 @@ static void dcn10_blank_pixel_data(
color_space = stream->output_color_space;
color_space_to_black_color(dc, color_space, &black_color);
+ /*
+ * The way 420 is packed, 2 channels carry Y component, 1 channel
+ * alternate between Cb and Cr, so both channels need the pixel
+ * value for Y
+ */
+ if (stream->timing.pixel_encoding == PIXEL_ENCODING_YCBCR420)
+ black_color.color_r_cr = black_color.color_g_y;
+
+
if (stream_res->tg->funcs->set_blank_color)
stream_res->tg->funcs->set_blank_color(
stream_res->tg,
@@ -2348,7 +2357,8 @@ static void dcn10_apply_ctx_for_surface(
struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
/* Skip inactive pipes and ones already updated */
- if (!pipe_ctx->stream || pipe_ctx->stream == stream)
+ if (!pipe_ctx->stream || pipe_ctx->stream == stream
+ || !pipe_ctx->plane_state)
continue;
pipe_ctx->stream_res.tg->funcs->lock(pipe_ctx->stream_res.tg);
@@ -2362,7 +2372,8 @@ static void dcn10_apply_ctx_for_surface(
for (i = 0; i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
- if (!pipe_ctx->stream || pipe_ctx->stream == stream)
+ if (!pipe_ctx->stream || pipe_ctx->stream == stream
+ || !pipe_ctx->plane_state)
continue;
dcn10_pipe_control_lock(dc, pipe_ctx, false);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer_debug.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer_debug.c
index 211bb240a720..cd469014baa3 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer_debug.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer_debug.c
@@ -44,6 +44,7 @@
#include "dcn10_hubp.h"
#include "dcn10_hubbub.h"
#include "dcn10_cm_common.h"
+#include "dcn10_clk_mgr.h"
static unsigned int snprintf_count(char *pBuf, unsigned int bufSize, char *fmt, ...)
{
@@ -463,19 +464,22 @@ static unsigned int dcn10_get_otg_states(struct dc *dc, char *pBuf, unsigned int
static unsigned int dcn10_get_clock_states(struct dc *dc, char *pBuf, unsigned int bufSize)
{
unsigned int chars_printed = 0;
+ unsigned int remaining_buffer = bufSize;
- chars_printed = snprintf_count(pBuf, bufSize, "dcfclk_khz,dcfclk_deep_sleep_khz,dispclk_khz,"
- "dppclk_khz,max_supported_dppclk_khz,fclk_khz,socclk_khz\n"
- "%d,%d,%d,%d,%d,%d,%d\n",
+ chars_printed = snprintf_count(pBuf, bufSize, "dcfclk,dcfclk_deep_sleep,dispclk,"
+ "dppclk,fclk,socclk\n"
+ "%d,%d,%d,%d,%d,%d\n",
dc->current_state->bw.dcn.clk.dcfclk_khz,
dc->current_state->bw.dcn.clk.dcfclk_deep_sleep_khz,
dc->current_state->bw.dcn.clk.dispclk_khz,
dc->current_state->bw.dcn.clk.dppclk_khz,
- dc->current_state->bw.dcn.clk.max_supported_dppclk_khz,
dc->current_state->bw.dcn.clk.fclk_khz,
dc->current_state->bw.dcn.clk.socclk_khz);
- return chars_printed;
+ remaining_buffer -= chars_printed;
+ pBuf += chars_printed;
+
+ return bufSize - remaining_buffer;
}
static void dcn10_clear_otpc_underflow(struct dc *dc)
@@ -538,16 +542,16 @@ void dcn10_get_hw_state(struct dc *dc, char *pBuf, unsigned int bufSize, unsigne
* Bit 0 - 15: Hardware block mask
* Bit 15: 1 = Invariant Only, 0 = All
*/
- const unsigned int DC_HW_STATE_MASK_HUBBUB = 0x1;
- const unsigned int DC_HW_STATE_MASK_HUBP = 0x2;
- const unsigned int DC_HW_STATE_MASK_RQ = 0x4;
- const unsigned int DC_HW_STATE_MASK_DLG = 0x8;
- const unsigned int DC_HW_STATE_MASK_TTU = 0x10;
- const unsigned int DC_HW_STATE_MASK_CM = 0x20;
- const unsigned int DC_HW_STATE_MASK_MPCC = 0x40;
- const unsigned int DC_HW_STATE_MASK_OTG = 0x80;
- const unsigned int DC_HW_STATE_MASK_CLOCKS = 0x100;
- const unsigned int DC_HW_STATE_INVAR_ONLY = 0x8000;
+ const unsigned int DC_HW_STATE_MASK_HUBBUB = 0x1;
+ const unsigned int DC_HW_STATE_MASK_HUBP = 0x2;
+ const unsigned int DC_HW_STATE_MASK_RQ = 0x4;
+ const unsigned int DC_HW_STATE_MASK_DLG = 0x8;
+ const unsigned int DC_HW_STATE_MASK_TTU = 0x10;
+ const unsigned int DC_HW_STATE_MASK_CM = 0x20;
+ const unsigned int DC_HW_STATE_MASK_MPCC = 0x40;
+ const unsigned int DC_HW_STATE_MASK_OTG = 0x80;
+ const unsigned int DC_HW_STATE_MASK_CLOCKS = 0x100;
+ const unsigned int DC_HW_STATE_INVAR_ONLY = 0x8000;
unsigned int chars_printed = 0;
unsigned int remaining_buf_size = bufSize;
@@ -603,6 +607,9 @@ void dcn10_get_hw_state(struct dc *dc, char *pBuf, unsigned int bufSize, unsigne
remaining_buf_size -= chars_printed;
}
- if ((mask & DC_HW_STATE_MASK_CLOCKS) && remaining_buf_size > 0)
+ if ((mask & DC_HW_STATE_MASK_CLOCKS) && remaining_buf_size > 0) {
chars_printed = dcn10_get_clock_states(dc, pBuf, remaining_buf_size);
+ pBuf += chars_printed;
+ remaining_buf_size -= chars_printed;
+ }
}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
index 47dbe4bb294a..5d4772dec0ba 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
@@ -202,7 +202,6 @@ enum dcn10_clk_src_array_id {
#define MMHUB_SR(reg_name)\
.reg_name = MMHUB_BASE(mm ## reg_name ## _BASE_IDX) + \
mm ## reg_name
-
/* macros to expend register list macro defined in HW object header file
* end *********************/
@@ -436,7 +435,6 @@ static const struct dcn_optc_mask tg_mask = {
TG_COMMON_MASK_SH_LIST_DCN1_0(_MASK)
};
-
static const struct bios_registers bios_regs = {
NBIO_SR(BIOS_SCRATCH_0),
NBIO_SR(BIOS_SCRATCH_3),
@@ -497,7 +495,6 @@ static const struct dce110_clk_src_mask cs_mask = {
CS_COMMON_MASK_SH_LIST_DCN1_0(_MASK)
};
-
static const struct resource_caps res_cap = {
.num_timing_generator = 4,
.num_opp = 4,
@@ -1277,7 +1274,6 @@ static bool construct(
goto fail;
}
}
-
pool->base.clk_mgr = dcn1_clk_mgr_create(ctx);
if (pool->base.clk_mgr == NULL) {
dm_error("DC: failed to create display clock!\n");
diff --git a/drivers/gpu/drm/amd/display/dc/dm_event_log.h b/drivers/gpu/drm/amd/display/dc/dm_event_log.h
index 34a701ca879e..65663f4d93e1 100644
--- a/drivers/gpu/drm/amd/display/dc/dm_event_log.h
+++ b/drivers/gpu/drm/amd/display/dc/dm_event_log.h
@@ -33,6 +33,7 @@
#define EVENT_LOG_AUX_REQ(ddc, type, action, address, len, data)
#define EVENT_LOG_AUX_REP(ddc, type, replyStatus, len, data)
+#define EVENT_LOG_CUST_MSG(tag, a, ...)
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dm_pp_smu.h b/drivers/gpu/drm/amd/display/dc/dm_pp_smu.h
index beb08fd12b1d..0029a39efb1c 100644
--- a/drivers/gpu/drm/amd/display/dc/dm_pp_smu.h
+++ b/drivers/gpu/drm/amd/display/dc/dm_pp_smu.h
@@ -102,7 +102,7 @@ struct pp_smu_funcs_rv {
*/
void (*set_display_count)(struct pp_smu *pp, int count);
- /* which SMU message? are reader and writer WM separate SMU msg? */
+ /* reader and writer WM's are sent together as part of one table*/
/*
* PPSMC_MSG_SetDriverDramAddrHigh
* PPSMC_MSG_SetDriverDramAddrLow
diff --git a/drivers/gpu/drm/amd/display/dc/dm_services.h b/drivers/gpu/drm/amd/display/dc/dm_services.h
index 28128c02de00..1961cc6d9143 100644
--- a/drivers/gpu/drm/amd/display/dc/dm_services.h
+++ b/drivers/gpu/drm/amd/display/dc/dm_services.h
@@ -31,6 +31,8 @@
#define __DM_SERVICES_H__
+#include "amdgpu_dm_trace.h"
+
/* TODO: remove when DC is complete. */
#include "dm_services_types.h"
#include "logger_interface.h"
@@ -70,6 +72,7 @@ static inline uint32_t dm_read_reg_func(
}
#endif
value = cgs_read_register(ctx->cgs_device, address);
+ trace_amdgpu_dc_rreg(&ctx->perf_trace->read_count, address, value);
return value;
}
@@ -90,6 +93,7 @@ static inline void dm_write_reg_func(
}
#endif
cgs_write_register(ctx->cgs_device, address, value);
+ trace_amdgpu_dc_wreg(&ctx->perf_trace->write_count, address, value);
}
static inline uint32_t dm_read_index_reg(
@@ -351,8 +355,12 @@ unsigned long long dm_get_elapse_time_in_ns(struct dc_context *ctx,
/*
* performance tracing
*/
-void dm_perf_trace_timestamp(const char *func_name, unsigned int line);
-#define PERF_TRACE() dm_perf_trace_timestamp(__func__, __LINE__)
+#define PERF_TRACE() trace_amdgpu_dc_performance(CTX->perf_trace->read_count,\
+ CTX->perf_trace->write_count, &CTX->perf_trace->last_entry_read,\
+ &CTX->perf_trace->last_entry_write, __func__, __LINE__)
+#define PERF_TRACE_CTX(__CTX) trace_amdgpu_dc_performance(__CTX->perf_trace->read_count,\
+ __CTX->perf_trace->write_count, &__CTX->perf_trace->last_entry_read,\
+ &__CTX->perf_trace->last_entry_write, __func__, __LINE__)
/*
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/gpio_service.c b/drivers/gpu/drm/amd/display/dc/gpio/gpio_service.c
index f20161c5706d..dada04296025 100644
--- a/drivers/gpu/drm/amd/display/dc/gpio/gpio_service.c
+++ b/drivers/gpu/drm/amd/display/dc/gpio/gpio_service.c
@@ -56,7 +56,6 @@ struct gpio_service *dal_gpio_service_create(
struct dc_context *ctx)
{
struct gpio_service *service;
-
uint32_t index_of_id;
service = kzalloc(sizeof(struct gpio_service), GFP_KERNEL);
@@ -78,44 +77,33 @@ struct gpio_service *dal_gpio_service_create(
goto failure_1;
}
- /* allocate and initialize business storage */
+ /* allocate and initialize busyness storage */
{
- const uint32_t bits_per_uint = sizeof(uint32_t) << 3;
-
index_of_id = 0;
service->ctx = ctx;
do {
uint32_t number_of_bits =
service->factory.number_of_pins[index_of_id];
+ uint32_t i = 0;
- uint32_t number_of_uints =
- (number_of_bits + bits_per_uint - 1) /
- bits_per_uint;
-
- uint32_t *slot;
-
- if (number_of_bits) {
- uint32_t index_of_uint = 0;
+ if (number_of_bits) {
+ service->busyness[index_of_id] =
+ kcalloc(number_of_bits, sizeof(char),
+ GFP_KERNEL);
- slot = kcalloc(number_of_uints,
- sizeof(uint32_t),
- GFP_KERNEL);
-
- if (!slot) {
+ if (!service->busyness[index_of_id]) {
BREAK_TO_DEBUGGER();
goto failure_2;
}
do {
- slot[index_of_uint] = 0;
-
- ++index_of_uint;
- } while (index_of_uint < number_of_uints);
- } else
- slot = NULL;
-
- service->busyness[index_of_id] = slot;
+ service->busyness[index_of_id][i] = 0;
+ ++i;
+ } while (i < number_of_bits);
+ } else {
+ service->busyness[index_of_id] = NULL;
+ }
++index_of_id;
} while (index_of_id < GPIO_ID_COUNT);
@@ -125,13 +113,8 @@ struct gpio_service *dal_gpio_service_create(
failure_2:
while (index_of_id) {
- uint32_t *slot;
-
--index_of_id;
-
- slot = service->busyness[index_of_id];
-
- kfree(slot);
+ kfree(service->busyness[index_of_id]);
}
failure_1:
@@ -169,9 +152,7 @@ void dal_gpio_service_destroy(
uint32_t index_of_id = 0;
do {
- uint32_t *slot = (*ptr)->busyness[index_of_id];
-
- kfree(slot);
+ kfree((*ptr)->busyness[index_of_id]);
++index_of_id;
} while (index_of_id < GPIO_ID_COUNT);
@@ -192,11 +173,7 @@ static bool is_pin_busy(
enum gpio_id id,
uint32_t en)
{
- const uint32_t bits_per_uint = sizeof(uint32_t) << 3;
-
- const uint32_t *slot = service->busyness[id] + (en / bits_per_uint);
-
- return 0 != (*slot & (1 << (en % bits_per_uint)));
+ return service->busyness[id][en];
}
static void set_pin_busy(
@@ -204,10 +181,7 @@ static void set_pin_busy(
enum gpio_id id,
uint32_t en)
{
- const uint32_t bits_per_uint = sizeof(uint32_t) << 3;
-
- service->busyness[id][en / bits_per_uint] |=
- (1 << (en % bits_per_uint));
+ service->busyness[id][en] = true;
}
static void set_pin_free(
@@ -215,10 +189,7 @@ static void set_pin_free(
enum gpio_id id,
uint32_t en)
{
- const uint32_t bits_per_uint = sizeof(uint32_t) << 3;
-
- service->busyness[id][en / bits_per_uint] &=
- ~(1 << (en % bits_per_uint));
+ service->busyness[id][en] = false;
}
enum gpio_result dal_gpio_service_open(
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/gpio_service.h b/drivers/gpu/drm/amd/display/dc/gpio/gpio_service.h
index c7f3081f59cc..1d501a43d13b 100644
--- a/drivers/gpu/drm/amd/display/dc/gpio/gpio_service.h
+++ b/drivers/gpu/drm/amd/display/dc/gpio/gpio_service.h
@@ -36,10 +36,9 @@ struct gpio_service {
/*
* @brief
* Business storage.
- * For each member of 'enum gpio_id',
- * store array of bits (packed into uint32_t slots),
- * index individual bit by 'en' value */
- uint32_t *busyness[GPIO_ID_COUNT];
+ * one byte For each member of 'enum gpio_id'
+ */
+ char *busyness[GPIO_ID_COUNT];
};
enum gpio_result dal_gpio_service_open(
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/hw_factory.c b/drivers/gpu/drm/amd/display/dc/gpio/hw_factory.c
index a683f4102e65..c2028c4744a6 100644
--- a/drivers/gpu/drm/amd/display/dc/gpio/hw_factory.c
+++ b/drivers/gpu/drm/amd/display/dc/gpio/hw_factory.c
@@ -79,6 +79,7 @@ bool dal_hw_factory_init(
dal_hw_factory_dce110_init(factory);
return true;
case DCE_VERSION_12_0:
+ case DCE_VERSION_12_1:
dal_hw_factory_dce120_init(factory);
return true;
#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/hw_translate.c b/drivers/gpu/drm/amd/display/dc/gpio/hw_translate.c
index 096f45628630..236ca28784a9 100644
--- a/drivers/gpu/drm/amd/display/dc/gpio/hw_translate.c
+++ b/drivers/gpu/drm/amd/display/dc/gpio/hw_translate.c
@@ -76,6 +76,7 @@ bool dal_hw_translate_init(
dal_hw_translate_dce110_init(translate);
return true;
case DCE_VERSION_12_0:
+ case DCE_VERSION_12_1:
dal_hw_translate_dce120_init(translate);
return true;
#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/i2caux.c b/drivers/gpu/drm/amd/display/dc/i2caux/i2caux.c
index e56093f26eed..1ad6e49102ff 100644
--- a/drivers/gpu/drm/amd/display/dc/i2caux/i2caux.c
+++ b/drivers/gpu/drm/amd/display/dc/i2caux/i2caux.c
@@ -90,6 +90,7 @@ struct i2caux *dal_i2caux_create(
case DCE_VERSION_10_0:
return dal_i2caux_dce100_create(ctx);
case DCE_VERSION_12_0:
+ case DCE_VERSION_12_1:
return dal_i2caux_dce120_create(ctx);
#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
case DCN_VERSION_1_0:
diff --git a/drivers/gpu/drm/amd/display/dc/inc/compressor.h b/drivers/gpu/drm/amd/display/dc/inc/compressor.h
index bcb18f5e1e60..7a147a9762a0 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/compressor.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/compressor.h
@@ -77,6 +77,7 @@ struct compressor_funcs {
};
struct compressor {
struct dc_context *ctx;
+ /* CONTROLLER_ID_D0 + instance, CONTROLLER_ID_UNDEFINED = 0 */
uint32_t attached_inst;
bool is_enabled;
const struct compressor_funcs *funcs;
diff --git a/drivers/gpu/drm/amd/display/dc/inc/core_types.h b/drivers/gpu/drm/amd/display/dc/inc/core_types.h
index e3ee96afa60e..b168a5e9dd9d 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/core_types.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/core_types.h
@@ -272,6 +272,17 @@ union bw_context {
struct dce_bw_output dce;
};
+/**
+ * struct dc_state - The full description of a state requested by a user
+ *
+ * @streams: Stream properties
+ * @stream_status: The planes on a given stream
+ * @res_ctx: Persistent state of resources
+ * @bw: The output from bandwidth and watermark calculations
+ * @pp_display_cfg: PowerPlay clocks and settings
+ * @dcn_bw_vars: non-stack memory to support bandwidth calculations
+ *
+ */
struct dc_state {
struct dc_stream_state *streams[MAX_PIPES];
struct dc_stream_status stream_status[MAX_PIPES];
@@ -279,7 +290,6 @@ struct dc_state {
struct resource_context res_ctx;
- /* The output from BW and WM calculations. */
union bw_context bw;
/* Note: these are big structures, do *not* put on stack! */
diff --git a/drivers/gpu/drm/amd/display/include/bios_parser_types.h b/drivers/gpu/drm/amd/display/include/bios_parser_types.h
index f8dbfa5b89f2..7fd78a696800 100644
--- a/drivers/gpu/drm/amd/display/include/bios_parser_types.h
+++ b/drivers/gpu/drm/amd/display/include/bios_parser_types.h
@@ -41,6 +41,7 @@ enum as_signal_type {
AS_SIGNAL_TYPE_LVDS,
AS_SIGNAL_TYPE_DISPLAY_PORT,
AS_SIGNAL_TYPE_GPU_PLL,
+ AS_SIGNAL_TYPE_XGMI,
AS_SIGNAL_TYPE_UNKNOWN
};
diff --git a/drivers/gpu/drm/amd/display/include/dal_types.h b/drivers/gpu/drm/amd/display/include/dal_types.h
index 89627133e188..f5bd869d4320 100644
--- a/drivers/gpu/drm/amd/display/include/dal_types.h
+++ b/drivers/gpu/drm/amd/display/include/dal_types.h
@@ -42,6 +42,7 @@ enum dce_version {
DCE_VERSION_11_2,
DCE_VERSION_11_22,
DCE_VERSION_12_0,
+ DCE_VERSION_12_1,
DCE_VERSION_MAX,
DCN_VERSION_1_0,
#if defined(CONFIG_DRM_AMD_DC_DCN1_01)
diff --git a/drivers/gpu/drm/amd/display/modules/color/color_gamma.c b/drivers/gpu/drm/amd/display/modules/color/color_gamma.c
index bbecbaefb741..479b77c2e89e 100644
--- a/drivers/gpu/drm/amd/display/modules/color/color_gamma.c
+++ b/drivers/gpu/drm/amd/display/modules/color/color_gamma.c
@@ -1761,7 +1761,7 @@ bool mod_color_calculate_degamma_params(struct dc_transfer_func *input_tf,
struct pwl_float_data *rgb_user = NULL;
struct pwl_float_data_ex *curve = NULL;
- struct gamma_pixel *axix_x = NULL;
+ struct gamma_pixel *axis_x = NULL;
struct pixel_gamma_point *coeff = NULL;
enum dc_transfer_func_predefined tf = TRANSFER_FUNCTION_SRGB;
bool ret = false;
@@ -1787,10 +1787,10 @@ bool mod_color_calculate_degamma_params(struct dc_transfer_func *input_tf,
GFP_KERNEL);
if (!curve)
goto curve_alloc_fail;
- axix_x = kvcalloc(ramp->num_entries + _EXTRA_POINTS, sizeof(*axix_x),
+ axis_x = kvcalloc(ramp->num_entries + _EXTRA_POINTS, sizeof(*axis_x),
GFP_KERNEL);
- if (!axix_x)
- goto axix_x_alloc_fail;
+ if (!axis_x)
+ goto axis_x_alloc_fail;
coeff = kvcalloc(MAX_HW_POINTS + _EXTRA_POINTS, sizeof(*coeff),
GFP_KERNEL);
if (!coeff)
@@ -1803,7 +1803,7 @@ bool mod_color_calculate_degamma_params(struct dc_transfer_func *input_tf,
tf = input_tf->tf;
build_evenly_distributed_points(
- axix_x,
+ axis_x,
ramp->num_entries,
dividers);
@@ -1828,7 +1828,7 @@ bool mod_color_calculate_degamma_params(struct dc_transfer_func *input_tf,
tf_pts->x_point_at_y1_blue = 1;
map_regamma_hw_to_x_user(ramp, coeff, rgb_user,
- coordinates_x, axix_x, curve,
+ coordinates_x, axis_x, curve,
MAX_HW_POINTS, tf_pts,
mapUserRamp && ramp->type != GAMMA_CUSTOM);
if (ramp->type == GAMMA_CUSTOM)
@@ -1838,8 +1838,8 @@ bool mod_color_calculate_degamma_params(struct dc_transfer_func *input_tf,
kvfree(coeff);
coeff_alloc_fail:
- kvfree(axix_x);
-axix_x_alloc_fail:
+ kvfree(axis_x);
+axis_x_alloc_fail:
kvfree(curve);
curve_alloc_fail:
kvfree(rgb_user);
diff --git a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c
index 620a171620ee..1544ed3f1747 100644
--- a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c
+++ b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c
@@ -608,12 +608,12 @@ static void build_vrr_infopacket_data(const struct mod_vrr_params *vrr,
static void build_vrr_infopacket_fs2_data(enum color_transfer_func app_tf,
struct dc_info_packet *infopacket)
{
- if (app_tf != transfer_func_unknown) {
+ if (app_tf != TRANSFER_FUNC_UNKNOWN) {
infopacket->valid = true;
infopacket->sb[6] |= 0x08; // PB6 = [Bit 3 = Native Color Active]
- if (app_tf == transfer_func_gamma_22) {
+ if (app_tf == TRANSFER_FUNC_GAMMA_22) {
infopacket->sb[9] |= 0x04; // PB6 = [Bit 2 = Gamma 2.2 EOTF Active]
}
}
@@ -688,11 +688,11 @@ void mod_freesync_build_vrr_infopacket(struct mod_freesync *mod_freesync,
return;
switch (packet_type) {
- case packet_type_fs2:
+ case PACKET_TYPE_FS2:
build_vrr_infopacket_v2(stream->signal, vrr, app_tf, infopacket);
break;
- case packet_type_vrr:
- case packet_type_fs1:
+ case PACKET_TYPE_VRR:
+ case PACKET_TYPE_FS1:
default:
build_vrr_infopacket_v1(stream->signal, vrr, infopacket);
}
diff --git a/drivers/gpu/drm/amd/display/modules/inc/mod_info_packet.h b/drivers/gpu/drm/amd/display/modules/inc/mod_info_packet.h
index 786b34380f85..5b1c9a4c7643 100644
--- a/drivers/gpu/drm/amd/display/modules/inc/mod_info_packet.h
+++ b/drivers/gpu/drm/amd/display/modules/inc/mod_info_packet.h
@@ -26,15 +26,13 @@
#ifndef MOD_INFO_PACKET_H_
#define MOD_INFO_PACKET_H_
-struct info_packet_inputs {
- const struct dc_stream_state *pStream;
-};
+#include "mod_shared.h"
-struct info_packets {
- struct dc_info_packet *pVscInfoPacket;
-};
+//Forward Declarations
+struct dc_stream_state;
+struct dc_info_packet;
-void mod_build_infopackets(struct info_packet_inputs *inputs,
- struct info_packets *info_packets);
+void mod_build_vsc_infopacket(const struct dc_stream_state *stream,
+ struct dc_info_packet *info_packet);
#endif
diff --git a/drivers/gpu/drm/amd/display/modules/inc/mod_shared.h b/drivers/gpu/drm/amd/display/modules/inc/mod_shared.h
index 238c431ae483..1bd02c0ac30c 100644
--- a/drivers/gpu/drm/amd/display/modules/inc/mod_shared.h
+++ b/drivers/gpu/drm/amd/display/modules/inc/mod_shared.h
@@ -23,27 +23,26 @@
*
*/
-
#ifndef MOD_SHARED_H_
#define MOD_SHARED_H_
enum color_transfer_func {
- transfer_func_unknown,
- transfer_func_srgb,
- transfer_func_bt709,
- transfer_func_pq2084,
- transfer_func_pq2084_interim,
- transfer_func_linear_0_1,
- transfer_func_linear_0_125,
- transfer_func_dolbyvision,
- transfer_func_gamma_22,
- transfer_func_gamma_26
+ TRANSFER_FUNC_UNKNOWN,
+ TRANSFER_FUNC_SRGB,
+ TRANSFER_FUNC_BT709,
+ TRANSFER_FUNC_PQ2084,
+ TRANSFER_FUNC_PQ2084_INTERIM,
+ TRANSFER_FUNC_LINEAR_0_1,
+ TRANSFER_FUNC_LINEAR_0_125,
+ TRANSFER_FUNC_GAMMA_22,
+ TRANSFER_FUNC_GAMMA_26
};
enum vrr_packet_type {
- packet_type_vrr,
- packet_type_fs1,
- packet_type_fs2
+ PACKET_TYPE_VRR,
+ PACKET_TYPE_FS1,
+ PACKET_TYPE_FS2
};
+
#endif /* MOD_SHARED_H_ */
diff --git a/drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c b/drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c
index ff8bfb9b43b0..db06fab2ad5c 100644
--- a/drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c
+++ b/drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c
@@ -25,6 +25,10 @@
#include "mod_info_packet.h"
#include "core_types.h"
+#include "dc_types.h"
+#include "mod_shared.h"
+
+#define HDMI_INFOFRAME_TYPE_VENDOR 0x81
enum ColorimetryRGBDP {
ColorimetryRGB_DP_sRGB = 0,
@@ -41,7 +45,7 @@ enum ColorimetryYCCDP {
ColorimetryYCC_DP_ITU2020YCbCr = 7,
};
-static void mod_build_vsc_infopacket(const struct dc_stream_state *stream,
+void mod_build_vsc_infopacket(const struct dc_stream_state *stream,
struct dc_info_packet *info_packet)
{
unsigned int vscPacketRevision = 0;
@@ -159,7 +163,7 @@ static void mod_build_vsc_infopacket(const struct dc_stream_state *stream,
* DPCD register is exposed in the new Extended Receiver Capability field for DPCD Rev. 1.4
* (and higher). When MISC1. bit 6. is Set to 1, a Source device uses a VSC SDP to indicate
* the Pixel Encoding/Colorimetry Format and that a Sink device must ignore MISC1, bit 7, and
- * MISC0, bits 7:1 (MISC1, bit 7. and MISC0, bits 7:1 become “don’t care”).)
+ * MISC0, bits 7:1 (MISC1, bit 7. and MISC0, bits 7:1 become "don't care").)
*/
if (vscPacketRevision == 0x5) {
/* Secondary-data Packet ID = 0 */
@@ -320,10 +324,3 @@ static void mod_build_vsc_infopacket(const struct dc_stream_state *stream,
}
-void mod_build_infopackets(struct info_packet_inputs *inputs,
- struct info_packets *info_packets)
-{
- if (info_packets->pVscInfoPacket != NULL)
- mod_build_vsc_infopacket(inputs->pStream, info_packets->pVscInfoPacket);
-}
-
diff --git a/drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_9_4_0_offset.h b/drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_9_4_0_offset.h
new file mode 100644
index 000000000000..8f515875a34d
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_9_4_0_offset.h
@@ -0,0 +1,32 @@
+/*
+ * Copyright (C) 2018 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
+ * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#ifndef _mmhub_9_4_0_OFFSET_HEADER
+#define _mmhub_9_4_0_OFFSET_HEADER
+
+
+// addressBlock: mmhub_utcl2_vmsharedpfdec
+// base address: 0x6a040
+#define mmMC_VM_XGMI_LFB_CNTL 0x0823
+#define mmMC_VM_XGMI_LFB_CNTL_BASE_IDX 0
+#define mmMC_VM_XGMI_LFB_SIZE 0x0824
+#define mmMC_VM_XGMI_LFB_SIZE_BASE_IDX 0
+
+#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_9_4_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_9_4_0_sh_mask.h
new file mode 100644
index 000000000000..0a6b072d191e
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_9_4_0_sh_mask.h
@@ -0,0 +1,35 @@
+/*
+ * Copyright (C) 2018 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
+ * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#ifndef _mmhub_9_4_0_SH_MASK_HEADER
+#define _mmhub_9_4_0_SH_MASK_HEADER
+
+
+// addressBlock: mmhub_utcl2_vmsharedpfdec
+//MC_VM_XGMI_LFB_CNTL
+#define MC_VM_XGMI_LFB_CNTL__PF_LFB_REGION__SHIFT 0x0
+#define MC_VM_XGMI_LFB_CNTL__PF_MAX_REGION__SHIFT 0x4
+#define MC_VM_XGMI_LFB_CNTL__PF_LFB_REGION_MASK 0x00000007L
+#define MC_VM_XGMI_LFB_CNTL__PF_MAX_REGION_MASK 0x00000070L
+//MC_VM_XGMI_LFB_SIZE
+#define MC_VM_XGMI_LFB_SIZE__PF_LFB_SIZE__SHIFT 0x0
+#define MC_VM_XGMI_LFB_SIZE__PF_LFB_SIZE_MASK 0x0000FFFFL
+
+#endif
diff --git a/drivers/gpu/drm/amd/include/kgd_kfd_interface.h b/drivers/gpu/drm/amd/include/kgd_kfd_interface.h
index 58ac0b90c310..8154d67388cc 100644
--- a/drivers/gpu/drm/amd/include/kgd_kfd_interface.h
+++ b/drivers/gpu/drm/amd/include/kgd_kfd_interface.h
@@ -188,8 +188,8 @@ struct tile_config {
*/
#define ALLOC_MEM_FLAGS_VRAM (1 << 0)
#define ALLOC_MEM_FLAGS_GTT (1 << 1)
-#define ALLOC_MEM_FLAGS_USERPTR (1 << 2) /* TODO */
-#define ALLOC_MEM_FLAGS_DOORBELL (1 << 3) /* TODO */
+#define ALLOC_MEM_FLAGS_USERPTR (1 << 2)
+#define ALLOC_MEM_FLAGS_DOORBELL (1 << 3)
/*
* Allocation flags attributes/access options.
diff --git a/drivers/gpu/drm/amd/include/kgd_pp_interface.h b/drivers/gpu/drm/amd/include/kgd_pp_interface.h
index 980e696989b1..789c4f288485 100644
--- a/drivers/gpu/drm/amd/include/kgd_pp_interface.h
+++ b/drivers/gpu/drm/amd/include/kgd_pp_interface.h
@@ -127,12 +127,13 @@ enum amd_pp_task {
};
enum PP_SMC_POWER_PROFILE {
- PP_SMC_POWER_PROFILE_FULLSCREEN3D = 0x0,
- PP_SMC_POWER_PROFILE_POWERSAVING = 0x1,
- PP_SMC_POWER_PROFILE_VIDEO = 0x2,
- PP_SMC_POWER_PROFILE_VR = 0x3,
- PP_SMC_POWER_PROFILE_COMPUTE = 0x4,
- PP_SMC_POWER_PROFILE_CUSTOM = 0x5,
+ PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT = 0x0,
+ PP_SMC_POWER_PROFILE_FULLSCREEN3D = 0x1,
+ PP_SMC_POWER_PROFILE_POWERSAVING = 0x2,
+ PP_SMC_POWER_PROFILE_VIDEO = 0x3,
+ PP_SMC_POWER_PROFILE_VR = 0x4,
+ PP_SMC_POWER_PROFILE_COMPUTE = 0x5,
+ PP_SMC_POWER_PROFILE_CUSTOM = 0x6,
};
enum {
@@ -276,6 +277,10 @@ struct amd_pm_funcs {
struct amd_pp_simple_clock_info *clocks);
int (*notify_smu_enable_pwe)(void *handle);
int (*enable_mgpu_fan_boost)(void *handle);
+ int (*set_active_display_count)(void *handle, uint32_t count);
+ int (*set_hard_min_dcefclk_by_freq)(void *handle, uint32_t clock);
+ int (*set_hard_min_fclk_by_freq)(void *handle, uint32_t clock);
+ int (*set_min_deep_sleep_dcefclk)(void *handle, uint32_t clock);
};
#endif
diff --git a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
index b68c2e0fef01..9bc27f468d5b 100644
--- a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
+++ b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
@@ -725,7 +725,7 @@ static int pp_dpm_force_clock_level(void *handle,
}
if (hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) {
- pr_info("force clock level is for dpm manual mode only.\n");
+ pr_debug("force clock level is for dpm manual mode only.\n");
return -EINVAL;
}
@@ -899,7 +899,7 @@ static int pp_set_power_profile_mode(void *handle, long *input, uint32_t size)
}
if (hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) {
- pr_info("power profile setting is for manual dpm mode only.\n");
+ pr_debug("power profile setting is for manual dpm mode only.\n");
return ret;
}
@@ -1072,7 +1072,7 @@ static int pp_get_current_clocks(void *handle,
&hw_clocks, PHM_PerformanceLevelDesignation_Activity);
if (ret) {
- pr_info("Error in phm_get_clock_info \n");
+ pr_debug("Error in phm_get_clock_info \n");
mutex_unlock(&hwmgr->smu_lock);
return -EINVAL;
}
@@ -1332,6 +1332,78 @@ static int pp_enable_mgpu_fan_boost(void *handle)
return 0;
}
+static int pp_set_min_deep_sleep_dcefclk(void *handle, uint32_t clock)
+{
+ struct pp_hwmgr *hwmgr = handle;
+
+ if (!hwmgr || !hwmgr->pm_en)
+ return -EINVAL;
+
+ if (hwmgr->hwmgr_func->set_min_deep_sleep_dcefclk == NULL) {
+ pr_debug("%s was not implemented.\n", __func__);
+ return -EINVAL;;
+ }
+
+ mutex_lock(&hwmgr->smu_lock);
+ hwmgr->hwmgr_func->set_min_deep_sleep_dcefclk(hwmgr, clock);
+ mutex_unlock(&hwmgr->smu_lock);
+
+ return 0;
+}
+
+static int pp_set_hard_min_dcefclk_by_freq(void *handle, uint32_t clock)
+{
+ struct pp_hwmgr *hwmgr = handle;
+
+ if (!hwmgr || !hwmgr->pm_en)
+ return -EINVAL;
+
+ if (hwmgr->hwmgr_func->set_hard_min_dcefclk_by_freq == NULL) {
+ pr_debug("%s was not implemented.\n", __func__);
+ return -EINVAL;;
+ }
+
+ mutex_lock(&hwmgr->smu_lock);
+ hwmgr->hwmgr_func->set_hard_min_dcefclk_by_freq(hwmgr, clock);
+ mutex_unlock(&hwmgr->smu_lock);
+
+ return 0;
+}
+
+static int pp_set_hard_min_fclk_by_freq(void *handle, uint32_t clock)
+{
+ struct pp_hwmgr *hwmgr = handle;
+
+ if (!hwmgr || !hwmgr->pm_en)
+ return -EINVAL;
+
+ if (hwmgr->hwmgr_func->set_hard_min_fclk_by_freq == NULL) {
+ pr_debug("%s was not implemented.\n", __func__);
+ return -EINVAL;;
+ }
+
+ mutex_lock(&hwmgr->smu_lock);
+ hwmgr->hwmgr_func->set_hard_min_fclk_by_freq(hwmgr, clock);
+ mutex_unlock(&hwmgr->smu_lock);
+
+ return 0;
+}
+
+static int pp_set_active_display_count(void *handle, uint32_t count)
+{
+ struct pp_hwmgr *hwmgr = handle;
+ int ret = 0;
+
+ if (!hwmgr || !hwmgr->pm_en)
+ return -EINVAL;
+
+ mutex_lock(&hwmgr->smu_lock);
+ ret = phm_set_active_display_count(hwmgr, count);
+ mutex_unlock(&hwmgr->smu_lock);
+
+ return ret;
+}
+
static const struct amd_pm_funcs pp_dpm_funcs = {
.load_firmware = pp_dpm_load_fw,
.wait_for_fw_loading_complete = pp_dpm_fw_loading_complete,
@@ -1378,4 +1450,8 @@ static const struct amd_pm_funcs pp_dpm_funcs = {
.get_display_mode_validation_clocks = pp_get_display_mode_validation_clocks,
.notify_smu_enable_pwe = pp_notify_smu_enable_pwe,
.enable_mgpu_fan_boost = pp_enable_mgpu_fan_boost,
+ .set_active_display_count = pp_set_active_display_count,
+ .set_min_deep_sleep_dcefclk = pp_set_min_deep_sleep_dcefclk,
+ .set_hard_min_dcefclk_by_freq = pp_set_hard_min_dcefclk_by_freq,
+ .set_hard_min_fclk_by_freq = pp_set_hard_min_fclk_by_freq,
};
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c b/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c
index 85119c2bdcc8..1f92a9f4c9e3 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c
@@ -80,7 +80,9 @@ int phm_enable_dynamic_state_management(struct pp_hwmgr *hwmgr)
PHM_FUNC_CHECK(hwmgr);
adev = hwmgr->adev;
- if (smum_is_dpm_running(hwmgr) && !amdgpu_passthrough(adev)) {
+ /* Skip for suspend/resume case */
+ if (smum_is_dpm_running(hwmgr) && !amdgpu_passthrough(adev)
+ && adev->in_suspend) {
pr_info("dpm has been enabled\n");
return 0;
}
@@ -286,8 +288,8 @@ int phm_store_dal_configuration_data(struct pp_hwmgr *hwmgr,
if (display_config == NULL)
return -EINVAL;
- if (NULL != hwmgr->hwmgr_func->set_deep_sleep_dcefclk)
- hwmgr->hwmgr_func->set_deep_sleep_dcefclk(hwmgr, display_config->min_dcef_deep_sleep_set_clk);
+ if (NULL != hwmgr->hwmgr_func->set_min_deep_sleep_dcefclk)
+ hwmgr->hwmgr_func->set_min_deep_sleep_dcefclk(hwmgr, display_config->min_dcef_deep_sleep_set_clk);
for (index = 0; index < display_config->num_path_including_non_display; index++) {
if (display_config->displays[index].controller_id != 0)
@@ -478,3 +480,44 @@ int phm_disable_smc_firmware_ctf(struct pp_hwmgr *hwmgr)
return hwmgr->hwmgr_func->disable_smc_firmware_ctf(hwmgr);
}
+
+int phm_set_active_display_count(struct pp_hwmgr *hwmgr, uint32_t count)
+{
+ PHM_FUNC_CHECK(hwmgr);
+
+ if (!hwmgr->hwmgr_func->set_active_display_count)
+ return -EINVAL;
+
+ return hwmgr->hwmgr_func->set_active_display_count(hwmgr, count);
+}
+
+int phm_set_min_deep_sleep_dcefclk(struct pp_hwmgr *hwmgr, uint32_t clock)
+{
+ PHM_FUNC_CHECK(hwmgr);
+
+ if (!hwmgr->hwmgr_func->set_min_deep_sleep_dcefclk)
+ return -EINVAL;
+
+ return hwmgr->hwmgr_func->set_min_deep_sleep_dcefclk(hwmgr, clock);
+}
+
+int phm_set_hard_min_dcefclk_by_freq(struct pp_hwmgr *hwmgr, uint32_t clock)
+{
+ PHM_FUNC_CHECK(hwmgr);
+
+ if (!hwmgr->hwmgr_func->set_hard_min_dcefclk_by_freq)
+ return -EINVAL;
+
+ return hwmgr->hwmgr_func->set_hard_min_dcefclk_by_freq(hwmgr, clock);
+}
+
+int phm_set_hard_min_fclk_by_freq(struct pp_hwmgr *hwmgr, uint32_t clock)
+{
+ PHM_FUNC_CHECK(hwmgr);
+
+ if (!hwmgr->hwmgr_func->set_hard_min_fclk_by_freq)
+ return -EINVAL;
+
+ return hwmgr->hwmgr_func->set_hard_min_fclk_by_freq(hwmgr, clock);
+}
+
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c
index 47ac92369739..310b102a9292 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c
@@ -64,17 +64,19 @@ static int ci_set_asic_special_caps(struct pp_hwmgr *hwmgr);
static void hwmgr_init_workload_prority(struct pp_hwmgr *hwmgr)
{
- hwmgr->workload_prority[PP_SMC_POWER_PROFILE_FULLSCREEN3D] = 2;
- hwmgr->workload_prority[PP_SMC_POWER_PROFILE_POWERSAVING] = 0;
- hwmgr->workload_prority[PP_SMC_POWER_PROFILE_VIDEO] = 1;
- hwmgr->workload_prority[PP_SMC_POWER_PROFILE_VR] = 3;
- hwmgr->workload_prority[PP_SMC_POWER_PROFILE_COMPUTE] = 4;
-
- hwmgr->workload_setting[0] = PP_SMC_POWER_PROFILE_POWERSAVING;
- hwmgr->workload_setting[1] = PP_SMC_POWER_PROFILE_VIDEO;
- hwmgr->workload_setting[2] = PP_SMC_POWER_PROFILE_FULLSCREEN3D;
- hwmgr->workload_setting[3] = PP_SMC_POWER_PROFILE_VR;
- hwmgr->workload_setting[4] = PP_SMC_POWER_PROFILE_COMPUTE;
+ hwmgr->workload_prority[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT] = 0;
+ hwmgr->workload_prority[PP_SMC_POWER_PROFILE_FULLSCREEN3D] = 1;
+ hwmgr->workload_prority[PP_SMC_POWER_PROFILE_POWERSAVING] = 2;
+ hwmgr->workload_prority[PP_SMC_POWER_PROFILE_VIDEO] = 3;
+ hwmgr->workload_prority[PP_SMC_POWER_PROFILE_VR] = 4;
+ hwmgr->workload_prority[PP_SMC_POWER_PROFILE_COMPUTE] = 5;
+
+ hwmgr->workload_setting[0] = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
+ hwmgr->workload_setting[1] = PP_SMC_POWER_PROFILE_FULLSCREEN3D;
+ hwmgr->workload_setting[2] = PP_SMC_POWER_PROFILE_POWERSAVING;
+ hwmgr->workload_setting[3] = PP_SMC_POWER_PROFILE_VIDEO;
+ hwmgr->workload_setting[4] = PP_SMC_POWER_PROFILE_VR;
+ hwmgr->workload_setting[5] = PP_SMC_POWER_PROFILE_COMPUTE;
}
int hwmgr_early_init(struct pp_hwmgr *hwmgr)
@@ -352,6 +354,9 @@ int hwmgr_handle_task(struct pp_hwmgr *hwmgr, enum amd_pp_task task_id,
switch (task_id) {
case AMD_PP_TASK_DISPLAY_CONFIG_CHANGE:
+ ret = phm_pre_display_configuration_changed(hwmgr);
+ if (ret)
+ return ret;
ret = phm_set_cpu_power_state(hwmgr);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/pp_psm.c b/drivers/gpu/drm/amd/powerplay/hwmgr/pp_psm.c
index 91ffb7bc4ee7..56437866d120 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/pp_psm.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/pp_psm.c
@@ -265,8 +265,6 @@ int psm_adjust_power_state_dynamic(struct pp_hwmgr *hwmgr, bool skip,
if (skip)
return 0;
- phm_pre_display_configuration_changed(hwmgr);
-
phm_display_configuration_changed(hwmgr);
if (hwmgr->ps)
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c
index dd18cb710391..f95c5f50eb0f 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c
@@ -216,12 +216,12 @@ static inline uint32_t convert_10k_to_mhz(uint32_t clock)
return (clock + 99) / 100;
}
-static int smu10_set_deep_sleep_dcefclk(struct pp_hwmgr *hwmgr, uint32_t clock)
+static int smu10_set_min_deep_sleep_dcefclk(struct pp_hwmgr *hwmgr, uint32_t clock)
{
struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend);
if (smu10_data->need_min_deep_sleep_dcefclk &&
- smu10_data->deep_sleep_dcefclk != convert_10k_to_mhz(clock)) {
+ smu10_data->deep_sleep_dcefclk != convert_10k_to_mhz(clock)) {
smu10_data->deep_sleep_dcefclk = convert_10k_to_mhz(clock);
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetMinDeepSleepDcefclk,
@@ -230,6 +230,34 @@ static int smu10_set_deep_sleep_dcefclk(struct pp_hwmgr *hwmgr, uint32_t clock)
return 0;
}
+static int smu10_set_hard_min_dcefclk_by_freq(struct pp_hwmgr *hwmgr, uint32_t clock)
+{
+ struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend);
+
+ if (smu10_data->dcf_actual_hard_min_freq &&
+ smu10_data->dcf_actual_hard_min_freq != convert_10k_to_mhz(clock)) {
+ smu10_data->dcf_actual_hard_min_freq = convert_10k_to_mhz(clock);
+ smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_SetHardMinDcefclkByFreq,
+ smu10_data->dcf_actual_hard_min_freq);
+ }
+ return 0;
+}
+
+static int smu10_set_hard_min_fclk_by_freq(struct pp_hwmgr *hwmgr, uint32_t clock)
+{
+ struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend);
+
+ if (smu10_data->f_actual_hard_min_freq &&
+ smu10_data->f_actual_hard_min_freq != convert_10k_to_mhz(clock)) {
+ smu10_data->f_actual_hard_min_freq = convert_10k_to_mhz(clock);
+ smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_SetHardMinFclkByFreq,
+ smu10_data->f_actual_hard_min_freq);
+ }
+ return 0;
+}
+
static int smu10_set_active_display_count(struct pp_hwmgr *hwmgr, uint32_t count)
{
struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend);
@@ -1206,7 +1234,7 @@ static const struct pp_hwmgr_func smu10_hwmgr_funcs = {
.get_max_high_clocks = smu10_get_max_high_clocks,
.read_sensor = smu10_read_sensor,
.set_active_display_count = smu10_set_active_display_count,
- .set_deep_sleep_dcefclk = smu10_set_deep_sleep_dcefclk,
+ .set_min_deep_sleep_dcefclk = smu10_set_min_deep_sleep_dcefclk,
.dynamic_state_management_enable = smu10_enable_dpm_tasks,
.power_off_asic = smu10_power_off_asic,
.asic_setup = smu10_setup_asic_task,
@@ -1217,6 +1245,8 @@ static const struct pp_hwmgr_func smu10_hwmgr_funcs = {
.display_clock_voltage_request = smu10_display_clock_voltage_request,
.powergate_gfx = smu10_gfx_off_control,
.powergate_sdma = smu10_powergate_sdma,
+ .set_hard_min_dcefclk_by_freq = smu10_set_hard_min_dcefclk_by_freq,
+ .set_hard_min_fclk_by_freq = smu10_set_hard_min_fclk_by_freq,
};
int smu10_init_function_pointers(struct pp_hwmgr *hwmgr)
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
index 5dcd21d29dbf..c8f5c00dd1e7 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
@@ -77,8 +77,9 @@
#define PCIE_BUS_CLK 10000
#define TCLK (PCIE_BUS_CLK / 10)
-static const struct profile_mode_setting smu7_profiling[6] =
- {{1, 0, 100, 30, 1, 0, 100, 10},
+static const struct profile_mode_setting smu7_profiling[7] =
+ {{0, 0, 0, 0, 0, 0, 0, 0},
+ {1, 0, 100, 30, 1, 0, 100, 10},
{1, 10, 0, 30, 0, 0, 0, 0},
{0, 0, 0, 0, 1, 10, 16, 31},
{1, 0, 11, 50, 1, 0, 100, 10},
@@ -2859,7 +2860,10 @@ static int smu7_vblank_too_short(struct pp_hwmgr *hwmgr,
case CHIP_POLARIS10:
case CHIP_POLARIS11:
case CHIP_POLARIS12:
- switch_limit_us = data->is_memory_gddr5 ? 190 : 150;
+ if (hwmgr->is_kicker)
+ switch_limit_us = data->is_memory_gddr5 ? 450 : 150;
+ else
+ switch_limit_us = data->is_memory_gddr5 ? 190 : 150;
break;
case CHIP_VEGAM:
switch_limit_us = 30;
@@ -3589,8 +3593,10 @@ static int smu7_find_dpm_states_clocks_in_dpm_table(struct pp_hwmgr *hwmgr, cons
}
if (i >= sclk_table->count) {
- data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_SCLK;
- sclk_table->dpm_levels[i-1].value = sclk;
+ if (sclk > sclk_table->dpm_levels[i-1].value) {
+ data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_SCLK;
+ sclk_table->dpm_levels[i-1].value = sclk;
+ }
} else {
/* TODO: Check SCLK in DAL's minimum clocks
* in case DeepSleep divider update is required.
@@ -3607,8 +3613,10 @@ static int smu7_find_dpm_states_clocks_in_dpm_table(struct pp_hwmgr *hwmgr, cons
}
if (i >= mclk_table->count) {
- data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_MCLK;
- mclk_table->dpm_levels[i-1].value = mclk;
+ if (mclk > mclk_table->dpm_levels[i-1].value) {
+ data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_MCLK;
+ mclk_table->dpm_levels[i-1].value = mclk;
+ }
}
if (data->display_timing.num_existing_displays != hwmgr->display_config->num_display)
@@ -4219,9 +4227,17 @@ static int smu7_check_mc_firmware(struct pp_hwmgr *hwmgr)
if (tmp & (1 << 23)) {
data->mem_latency_high = MEM_LATENCY_HIGH;
data->mem_latency_low = MEM_LATENCY_LOW;
+ if ((hwmgr->chip_id == CHIP_POLARIS10) ||
+ (hwmgr->chip_id == CHIP_POLARIS11) ||
+ (hwmgr->chip_id == CHIP_POLARIS12))
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_EnableFFC);
} else {
data->mem_latency_high = 330;
data->mem_latency_low = 330;
+ if ((hwmgr->chip_id == CHIP_POLARIS10) ||
+ (hwmgr->chip_id == CHIP_POLARIS11) ||
+ (hwmgr->chip_id == CHIP_POLARIS12))
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_DisableFFC);
}
return 0;
@@ -4874,7 +4890,8 @@ static int smu7_get_power_profile_mode(struct pp_hwmgr *hwmgr, char *buf)
uint32_t i, size = 0;
uint32_t len;
- static const char *profile_name[6] = {"3D_FULL_SCREEN",
+ static const char *profile_name[7] = {"BOOTUP_DEFAULT",
+ "3D_FULL_SCREEN",
"POWER_SAVING",
"VIDEO",
"VR",
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
index e2bc6e0c229f..91e3bbe6d61d 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
@@ -804,9 +804,9 @@ static int vega10_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
hwmgr->backend = data;
- hwmgr->workload_mask = 1 << hwmgr->workload_prority[PP_SMC_POWER_PROFILE_VIDEO];
- hwmgr->power_profile_mode = PP_SMC_POWER_PROFILE_VIDEO;
- hwmgr->default_power_profile_mode = PP_SMC_POWER_PROFILE_VIDEO;
+ hwmgr->workload_mask = 1 << hwmgr->workload_prority[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT];
+ hwmgr->power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
+ hwmgr->default_power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
vega10_set_default_registry_data(hwmgr);
data->disable_dpm_mask = 0xff;
@@ -3266,8 +3266,10 @@ static int vega10_find_dpm_states_clocks_in_dpm_table(struct pp_hwmgr *hwmgr, co
}
if (i >= sclk_table->count) {
- data->need_update_dpm_table |= DPMTABLE_OD_UPDATE_SCLK;
- sclk_table->dpm_levels[i-1].value = sclk;
+ if (sclk > sclk_table->dpm_levels[i-1].value) {
+ data->need_update_dpm_table |= DPMTABLE_OD_UPDATE_SCLK;
+ sclk_table->dpm_levels[i-1].value = sclk;
+ }
}
for (i = 0; i < mclk_table->count; i++) {
@@ -3276,8 +3278,10 @@ static int vega10_find_dpm_states_clocks_in_dpm_table(struct pp_hwmgr *hwmgr, co
}
if (i >= mclk_table->count) {
- data->need_update_dpm_table |= DPMTABLE_OD_UPDATE_MCLK;
- mclk_table->dpm_levels[i-1].value = mclk;
+ if (mclk > mclk_table->dpm_levels[i-1].value) {
+ data->need_update_dpm_table |= DPMTABLE_OD_UPDATE_MCLK;
+ mclk_table->dpm_levels[i-1].value = mclk;
+ }
}
if (data->display_timing.num_existing_displays != hwmgr->display_config->num_display)
@@ -4664,13 +4668,15 @@ static int vega10_get_power_profile_mode(struct pp_hwmgr *hwmgr, char *buf)
{
struct vega10_hwmgr *data = hwmgr->backend;
uint32_t i, size = 0;
- static const uint8_t profile_mode_setting[5][4] = {{70, 60, 1, 3,},
+ static const uint8_t profile_mode_setting[6][4] = {{70, 60, 0, 0,},
+ {70, 60, 1, 3,},
{90, 60, 0, 0,},
{70, 60, 0, 0,},
{70, 90, 0, 0,},
{30, 60, 0, 6,},
};
- static const char *profile_name[6] = {"3D_FULL_SCREEN",
+ static const char *profile_name[7] = {"BOOTUP_DEFAULT",
+ "3D_FULL_SCREEN",
"POWER_SAVING",
"VIDEO",
"VR",
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
index 2679d1240fa1..82935a3bd950 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
@@ -49,6 +49,10 @@
#include "soc15_common.h"
#include "smuio/smuio_9_0_offset.h"
#include "smuio/smuio_9_0_sh_mask.h"
+#include "nbio/nbio_7_4_sh_mask.h"
+
+#define smnPCIE_LC_SPEED_CNTL 0x11140290
+#define smnPCIE_LC_LINK_WIDTH_CNTL 0x11140288
static void vega20_set_default_registry_data(struct pp_hwmgr *hwmgr)
{
@@ -130,7 +134,7 @@ static void vega20_set_default_registry_data(struct pp_hwmgr *hwmgr)
data->registry_data.disable_auto_wattman = 1;
data->registry_data.auto_wattman_debug = 0;
data->registry_data.auto_wattman_sample_period = 100;
- data->registry_data.fclk_gfxclk_ratio = 0x3F6CCCCD;
+ data->registry_data.fclk_gfxclk_ratio = 0;
data->registry_data.auto_wattman_threshold = 50;
data->registry_data.gfxoff_controlled_by_driver = 1;
data->gfxoff_allowed = false;
@@ -386,9 +390,9 @@ static int vega20_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
hwmgr->backend = data;
- hwmgr->workload_mask = 1 << hwmgr->workload_prority[PP_SMC_POWER_PROFILE_VIDEO];
- hwmgr->power_profile_mode = PP_SMC_POWER_PROFILE_VIDEO;
- hwmgr->default_power_profile_mode = PP_SMC_POWER_PROFILE_VIDEO;
+ hwmgr->workload_mask = 1 << hwmgr->workload_prority[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT];
+ hwmgr->power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
+ hwmgr->default_power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
vega20_set_default_registry_data(hwmgr);
@@ -976,6 +980,9 @@ static int vega20_od8_set_feature_capabilities(
pp_table->FanZeroRpmEnable)
od_settings->overdrive8_capabilities |= OD8_FAN_ZERO_RPM_CONTROL;
+ if (!od_settings->overdrive8_capabilities)
+ hwmgr->od_enabled = false;
+
return 0;
}
@@ -1660,14 +1667,15 @@ static uint32_t vega20_find_highest_dpm_level(
return i;
}
-static int vega20_upload_dpm_min_level(struct pp_hwmgr *hwmgr)
+static int vega20_upload_dpm_min_level(struct pp_hwmgr *hwmgr, uint32_t feature_mask)
{
struct vega20_hwmgr *data =
(struct vega20_hwmgr *)(hwmgr->backend);
uint32_t min_freq;
int ret = 0;
- if (data->smu_features[GNLD_DPM_GFXCLK].enabled) {
+ if (data->smu_features[GNLD_DPM_GFXCLK].enabled &&
+ (feature_mask & FEATURE_DPM_GFXCLK_MASK)) {
min_freq = data->dpm_table.gfx_table.dpm_state.soft_min_level;
PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
hwmgr, PPSMC_MSG_SetSoftMinByFreq,
@@ -1676,23 +1684,18 @@ static int vega20_upload_dpm_min_level(struct pp_hwmgr *hwmgr)
return ret);
}
- if (data->smu_features[GNLD_DPM_UCLK].enabled) {
+ if (data->smu_features[GNLD_DPM_UCLK].enabled &&
+ (feature_mask & FEATURE_DPM_UCLK_MASK)) {
min_freq = data->dpm_table.mem_table.dpm_state.soft_min_level;
PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
hwmgr, PPSMC_MSG_SetSoftMinByFreq,
(PPCLK_UCLK << 16) | (min_freq & 0xffff))),
"Failed to set soft min memclk !",
return ret);
-
- min_freq = data->dpm_table.mem_table.dpm_state.hard_min_level;
- PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
- hwmgr, PPSMC_MSG_SetHardMinByFreq,
- (PPCLK_UCLK << 16) | (min_freq & 0xffff))),
- "Failed to set hard min memclk !",
- return ret);
}
- if (data->smu_features[GNLD_DPM_UVD].enabled) {
+ if (data->smu_features[GNLD_DPM_UVD].enabled &&
+ (feature_mask & FEATURE_DPM_UVD_MASK)) {
min_freq = data->dpm_table.vclk_table.dpm_state.soft_min_level;
PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
@@ -1710,7 +1713,8 @@ static int vega20_upload_dpm_min_level(struct pp_hwmgr *hwmgr)
return ret);
}
- if (data->smu_features[GNLD_DPM_VCE].enabled) {
+ if (data->smu_features[GNLD_DPM_VCE].enabled &&
+ (feature_mask & FEATURE_DPM_VCE_MASK)) {
min_freq = data->dpm_table.eclk_table.dpm_state.soft_min_level;
PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
@@ -1720,7 +1724,8 @@ static int vega20_upload_dpm_min_level(struct pp_hwmgr *hwmgr)
return ret);
}
- if (data->smu_features[GNLD_DPM_SOCCLK].enabled) {
+ if (data->smu_features[GNLD_DPM_SOCCLK].enabled &&
+ (feature_mask & FEATURE_DPM_SOCCLK_MASK)) {
min_freq = data->dpm_table.soc_table.dpm_state.soft_min_level;
PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
@@ -1733,14 +1738,15 @@ static int vega20_upload_dpm_min_level(struct pp_hwmgr *hwmgr)
return ret;
}
-static int vega20_upload_dpm_max_level(struct pp_hwmgr *hwmgr)
+static int vega20_upload_dpm_max_level(struct pp_hwmgr *hwmgr, uint32_t feature_mask)
{
struct vega20_hwmgr *data =
(struct vega20_hwmgr *)(hwmgr->backend);
uint32_t max_freq;
int ret = 0;
- if (data->smu_features[GNLD_DPM_GFXCLK].enabled) {
+ if (data->smu_features[GNLD_DPM_GFXCLK].enabled &&
+ (feature_mask & FEATURE_DPM_GFXCLK_MASK)) {
max_freq = data->dpm_table.gfx_table.dpm_state.soft_max_level;
PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
@@ -1750,7 +1756,8 @@ static int vega20_upload_dpm_max_level(struct pp_hwmgr *hwmgr)
return ret);
}
- if (data->smu_features[GNLD_DPM_UCLK].enabled) {
+ if (data->smu_features[GNLD_DPM_UCLK].enabled &&
+ (feature_mask & FEATURE_DPM_UCLK_MASK)) {
max_freq = data->dpm_table.mem_table.dpm_state.soft_max_level;
PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
@@ -1760,7 +1767,8 @@ static int vega20_upload_dpm_max_level(struct pp_hwmgr *hwmgr)
return ret);
}
- if (data->smu_features[GNLD_DPM_UVD].enabled) {
+ if (data->smu_features[GNLD_DPM_UVD].enabled &&
+ (feature_mask & FEATURE_DPM_UVD_MASK)) {
max_freq = data->dpm_table.vclk_table.dpm_state.soft_max_level;
PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
@@ -1777,7 +1785,8 @@ static int vega20_upload_dpm_max_level(struct pp_hwmgr *hwmgr)
return ret);
}
- if (data->smu_features[GNLD_DPM_VCE].enabled) {
+ if (data->smu_features[GNLD_DPM_VCE].enabled &&
+ (feature_mask & FEATURE_DPM_VCE_MASK)) {
max_freq = data->dpm_table.eclk_table.dpm_state.soft_max_level;
PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
@@ -1787,7 +1796,8 @@ static int vega20_upload_dpm_max_level(struct pp_hwmgr *hwmgr)
return ret);
}
- if (data->smu_features[GNLD_DPM_SOCCLK].enabled) {
+ if (data->smu_features[GNLD_DPM_SOCCLK].enabled &&
+ (feature_mask & FEATURE_DPM_SOCCLK_MASK)) {
max_freq = data->dpm_table.soc_table.dpm_state.soft_max_level;
PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
@@ -2126,12 +2136,12 @@ static int vega20_force_dpm_highest(struct pp_hwmgr *hwmgr)
data->dpm_table.mem_table.dpm_state.soft_max_level =
data->dpm_table.mem_table.dpm_levels[soft_level].value;
- ret = vega20_upload_dpm_min_level(hwmgr);
+ ret = vega20_upload_dpm_min_level(hwmgr, 0xFFFFFFFF);
PP_ASSERT_WITH_CODE(!ret,
"Failed to upload boot level to highest!",
return ret);
- ret = vega20_upload_dpm_max_level(hwmgr);
+ ret = vega20_upload_dpm_max_level(hwmgr, 0xFFFFFFFF);
PP_ASSERT_WITH_CODE(!ret,
"Failed to upload dpm max level to highest!",
return ret);
@@ -2158,12 +2168,12 @@ static int vega20_force_dpm_lowest(struct pp_hwmgr *hwmgr)
data->dpm_table.mem_table.dpm_state.soft_max_level =
data->dpm_table.mem_table.dpm_levels[soft_level].value;
- ret = vega20_upload_dpm_min_level(hwmgr);
+ ret = vega20_upload_dpm_min_level(hwmgr, 0xFFFFFFFF);
PP_ASSERT_WITH_CODE(!ret,
"Failed to upload boot level to highest!",
return ret);
- ret = vega20_upload_dpm_max_level(hwmgr);
+ ret = vega20_upload_dpm_max_level(hwmgr, 0xFFFFFFFF);
PP_ASSERT_WITH_CODE(!ret,
"Failed to upload dpm max level to highest!",
return ret);
@@ -2176,12 +2186,12 @@ static int vega20_unforce_dpm_levels(struct pp_hwmgr *hwmgr)
{
int ret = 0;
- ret = vega20_upload_dpm_min_level(hwmgr);
+ ret = vega20_upload_dpm_min_level(hwmgr, 0xFFFFFFFF);
PP_ASSERT_WITH_CODE(!ret,
"Failed to upload DPM Bootup Levels!",
return ret);
- ret = vega20_upload_dpm_max_level(hwmgr);
+ ret = vega20_upload_dpm_max_level(hwmgr, 0xFFFFFFFF);
PP_ASSERT_WITH_CODE(!ret,
"Failed to upload DPM Max Levels!",
return ret);
@@ -2234,17 +2244,24 @@ static int vega20_force_clock_level(struct pp_hwmgr *hwmgr,
soft_min_level = mask ? (ffs(mask) - 1) : 0;
soft_max_level = mask ? (fls(mask) - 1) : 0;
+ if (soft_max_level >= data->dpm_table.gfx_table.count) {
+ pr_err("Clock level specified %d is over max allowed %d\n",
+ soft_max_level,
+ data->dpm_table.gfx_table.count - 1);
+ return -EINVAL;
+ }
+
data->dpm_table.gfx_table.dpm_state.soft_min_level =
data->dpm_table.gfx_table.dpm_levels[soft_min_level].value;
data->dpm_table.gfx_table.dpm_state.soft_max_level =
data->dpm_table.gfx_table.dpm_levels[soft_max_level].value;
- ret = vega20_upload_dpm_min_level(hwmgr);
+ ret = vega20_upload_dpm_min_level(hwmgr, FEATURE_DPM_GFXCLK_MASK);
PP_ASSERT_WITH_CODE(!ret,
"Failed to upload boot level to lowest!",
return ret);
- ret = vega20_upload_dpm_max_level(hwmgr);
+ ret = vega20_upload_dpm_max_level(hwmgr, FEATURE_DPM_GFXCLK_MASK);
PP_ASSERT_WITH_CODE(!ret,
"Failed to upload dpm max level to highest!",
return ret);
@@ -2254,17 +2271,24 @@ static int vega20_force_clock_level(struct pp_hwmgr *hwmgr,
soft_min_level = mask ? (ffs(mask) - 1) : 0;
soft_max_level = mask ? (fls(mask) - 1) : 0;
+ if (soft_max_level >= data->dpm_table.mem_table.count) {
+ pr_err("Clock level specified %d is over max allowed %d\n",
+ soft_max_level,
+ data->dpm_table.mem_table.count - 1);
+ return -EINVAL;
+ }
+
data->dpm_table.mem_table.dpm_state.soft_min_level =
data->dpm_table.mem_table.dpm_levels[soft_min_level].value;
data->dpm_table.mem_table.dpm_state.soft_max_level =
data->dpm_table.mem_table.dpm_levels[soft_max_level].value;
- ret = vega20_upload_dpm_min_level(hwmgr);
+ ret = vega20_upload_dpm_min_level(hwmgr, FEATURE_DPM_UCLK_MASK);
PP_ASSERT_WITH_CODE(!ret,
"Failed to upload boot level to lowest!",
return ret);
- ret = vega20_upload_dpm_max_level(hwmgr);
+ ret = vega20_upload_dpm_max_level(hwmgr, FEATURE_DPM_UCLK_MASK);
PP_ASSERT_WITH_CODE(!ret,
"Failed to upload dpm max level to highest!",
return ret);
@@ -2272,6 +2296,18 @@ static int vega20_force_clock_level(struct pp_hwmgr *hwmgr,
break;
case PP_PCIE:
+ soft_min_level = mask ? (ffs(mask) - 1) : 0;
+ soft_max_level = mask ? (fls(mask) - 1) : 0;
+ if (soft_min_level >= NUM_LINK_LEVELS ||
+ soft_max_level >= NUM_LINK_LEVELS)
+ return -EINVAL;
+
+ ret = smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_SetMinLinkDpmByIndex, soft_min_level);
+ PP_ASSERT_WITH_CODE(!ret,
+ "Failed to set min link dpm level!",
+ return ret);
+
break;
default:
@@ -2748,9 +2784,14 @@ static int vega20_print_clock_levels(struct pp_hwmgr *hwmgr,
data->od8_settings.od8_settings_array;
OverDriveTable_t *od_table =
&(data->smc_state_table.overdrive_table);
+ struct phm_ppt_v3_information *pptable_information =
+ (struct phm_ppt_v3_information *)hwmgr->pptable;
+ PPTable_t *pptable = (PPTable_t *)pptable_information->smc_pptable;
+ struct amdgpu_device *adev = hwmgr->adev;
struct pp_clock_levels_with_latency clocks;
int i, now, size = 0;
int ret = 0;
+ uint32_t gen_speed, lane_width;
switch (type) {
case PP_SCLK:
@@ -2788,6 +2829,28 @@ static int vega20_print_clock_levels(struct pp_hwmgr *hwmgr,
break;
case PP_PCIE:
+ gen_speed = (RREG32_PCIE(smnPCIE_LC_SPEED_CNTL) &
+ PSWUSP0_PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE_MASK)
+ >> PSWUSP0_PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE__SHIFT;
+ lane_width = (RREG32_PCIE(smnPCIE_LC_LINK_WIDTH_CNTL) &
+ PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD_MASK)
+ >> PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD__SHIFT;
+ for (i = 0; i < NUM_LINK_LEVELS; i++)
+ size += sprintf(buf + size, "%d: %s %s %dMhz %s\n", i,
+ (pptable->PcieGenSpeed[i] == 0) ? "2.5GT/s," :
+ (pptable->PcieGenSpeed[i] == 1) ? "5.0GT/s," :
+ (pptable->PcieGenSpeed[i] == 2) ? "8.0GT/s," :
+ (pptable->PcieGenSpeed[i] == 3) ? "16.0GT/s," : "",
+ (pptable->PcieLaneCount[i] == 1) ? "x1" :
+ (pptable->PcieLaneCount[i] == 2) ? "x2" :
+ (pptable->PcieLaneCount[i] == 3) ? "x4" :
+ (pptable->PcieLaneCount[i] == 4) ? "x8" :
+ (pptable->PcieLaneCount[i] == 5) ? "x12" :
+ (pptable->PcieLaneCount[i] == 6) ? "x16" : "",
+ pptable->LclkFreq[i],
+ (gen_speed == pptable->PcieGenSpeed[i]) &&
+ (lane_width == pptable->PcieLaneCount[i]) ?
+ "*" : "");
break;
case OD_SCLK:
@@ -3208,6 +3271,9 @@ static int conv_power_profile_to_pplib_workload(int power_profile)
int pplib_workload = 0;
switch (power_profile) {
+ case PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT:
+ pplib_workload = WORKLOAD_DEFAULT_BIT;
+ break;
case PP_SMC_POWER_PROFILE_FULLSCREEN3D:
pplib_workload = WORKLOAD_PPLIB_FULL_SCREEN_3D_BIT;
break;
@@ -3237,6 +3303,7 @@ static int vega20_get_power_profile_mode(struct pp_hwmgr *hwmgr, char *buf)
uint32_t i, size = 0;
uint16_t workload_type = 0;
static const char *profile_name[] = {
+ "BOOTUP_DEFAULT",
"3D_FULL_SCREEN",
"POWER_SAVING",
"VIDEO",
diff --git a/drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h b/drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h
index 54fd0125d9cf..f4dab979a3a1 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h
@@ -463,5 +463,8 @@ extern int phm_display_clock_voltage_request(struct pp_hwmgr *hwmgr,
extern int phm_get_max_high_clocks(struct pp_hwmgr *hwmgr, struct amd_pp_simple_clock_info *clocks);
extern int phm_disable_smc_firmware_ctf(struct pp_hwmgr *hwmgr);
+
+extern int phm_set_active_display_count(struct pp_hwmgr *hwmgr, uint32_t count);
+
#endif /* _HARDWARE_MANAGER_H_ */
diff --git a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
index fb0f96f7cdbc..8cb831b6a016 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
@@ -309,7 +309,7 @@ struct pp_hwmgr_func {
int (*avfs_control)(struct pp_hwmgr *hwmgr, bool enable);
int (*disable_smc_firmware_ctf)(struct pp_hwmgr *hwmgr);
int (*set_active_display_count)(struct pp_hwmgr *hwmgr, uint32_t count);
- int (*set_deep_sleep_dcefclk)(struct pp_hwmgr *hwmgr, uint32_t clock);
+ int (*set_min_deep_sleep_dcefclk)(struct pp_hwmgr *hwmgr, uint32_t clock);
int (*start_thermal_controller)(struct pp_hwmgr *hwmgr, struct PP_TemperatureRange *range);
int (*notify_cac_buffer_info)(struct pp_hwmgr *hwmgr,
uint32_t virtual_addr_low,
@@ -332,6 +332,8 @@ struct pp_hwmgr_func {
int (*smus_notify_pwe)(struct pp_hwmgr *hwmgr);
int (*powergate_sdma)(struct pp_hwmgr *hwmgr, bool bgate);
int (*enable_mgpu_fan_boost)(struct pp_hwmgr *hwmgr);
+ int (*set_hard_min_dcefclk_by_freq)(struct pp_hwmgr *hwmgr, uint32_t clock);
+ int (*set_hard_min_fclk_by_freq)(struct pp_hwmgr *hwmgr, uint32_t clock);
};
struct pp_table_func {
@@ -703,7 +705,7 @@ enum PP_TABLE_VERSION {
/**
* The main hardware manager structure.
*/
-#define Workload_Policy_Max 5
+#define Workload_Policy_Max 6
struct pp_hwmgr {
void *adev;
diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu7_ppsmc.h b/drivers/gpu/drm/amd/powerplay/inc/smu7_ppsmc.h
index 62f36ba2435b..6e19f4c7cf8f 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/smu7_ppsmc.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/smu7_ppsmc.h
@@ -386,6 +386,8 @@ typedef uint16_t PPSMC_Result;
#define PPSMC_MSG_AgmResetPsm ((uint16_t) 0x403)
#define PPSMC_MSG_ReadVftCell ((uint16_t) 0x404)
+#define PPSMC_MSG_ApplyAvfsCksOffVoltage ((uint16_t) 0x415)
+
#define PPSMC_MSG_GFX_CU_PG_ENABLE ((uint16_t) 0x280)
#define PPSMC_MSG_GFX_CU_PG_DISABLE ((uint16_t) 0x281)
#define PPSMC_MSG_GetCurrPkgPwr ((uint16_t) 0x282)
@@ -395,6 +397,9 @@ typedef uint16_t PPSMC_Result;
#define PPSMC_MSG_SetVBITimeout ((uint16_t) 0x306)
+#define PPSMC_MSG_EnableFFC ((uint16_t) 0x307)
+#define PPSMC_MSG_DisableFFC ((uint16_t) 0x308)
+
#define PPSMC_MSG_EnableDpmDidt ((uint16_t) 0x309)
#define PPSMC_MSG_DisableDpmDidt ((uint16_t) 0x30A)
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c
index 2b2c26616902..52abca065764 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c
@@ -1528,8 +1528,21 @@ static int polaris10_populate_clock_stretcher_data_table(struct pp_hwmgr *hwmgr)
efuse = efuse >> 24;
if (hwmgr->chip_id == CHIP_POLARIS10) {
- min = 1000;
- max = 2300;
+ if (hwmgr->is_kicker) {
+ min = 1200;
+ max = 2500;
+ } else {
+ min = 1000;
+ max = 2300;
+ }
+ } else if (hwmgr->chip_id == CHIP_POLARIS11) {
+ if (hwmgr->is_kicker) {
+ min = 900;
+ max = 2100;
+ } else {
+ min = 1100;
+ max = 2100;
+ }
} else {
min = 1100;
max = 2100;
@@ -1626,6 +1639,7 @@ static int polaris10_populate_avfs_parameters(struct pp_hwmgr *hwmgr)
{
struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend);
+ struct amdgpu_device *adev = hwmgr->adev;
SMU74_Discrete_DpmTable *table = &(smu_data->smc_state_table);
int result = 0;
@@ -1646,6 +1660,59 @@ static int polaris10_populate_avfs_parameters(struct pp_hwmgr *hwmgr)
result = atomctrl_get_avfs_information(hwmgr, &avfs_params);
if (0 == result) {
+ if (((adev->pdev->device == 0x67ef) &&
+ ((adev->pdev->revision == 0xe0) ||
+ (adev->pdev->revision == 0xe5))) ||
+ ((adev->pdev->device == 0x67ff) &&
+ ((adev->pdev->revision == 0xcf) ||
+ (adev->pdev->revision == 0xef) ||
+ (adev->pdev->revision == 0xff)))) {
+ avfs_params.ucEnableApplyAVFS_CKS_OFF_Voltage = 1;
+ if ((adev->pdev->device == 0x67ef && adev->pdev->revision == 0xe5) ||
+ (adev->pdev->device == 0x67ff && adev->pdev->revision == 0xef)) {
+ if ((avfs_params.ulGB_VDROOP_TABLE_CKSOFF_a0 == 0xEA522DD3) &&
+ (avfs_params.ulGB_VDROOP_TABLE_CKSOFF_a1 == 0x5645A) &&
+ (avfs_params.ulGB_VDROOP_TABLE_CKSOFF_a2 == 0x33F9E) &&
+ (avfs_params.ulAVFSGB_FUSE_TABLE_CKSOFF_m1 == 0xFFFFC5CC) &&
+ (avfs_params.usAVFSGB_FUSE_TABLE_CKSOFF_m2 == 0x1B1A) &&
+ (avfs_params.ulAVFSGB_FUSE_TABLE_CKSOFF_b == 0xFFFFFCED)) {
+ avfs_params.ulGB_VDROOP_TABLE_CKSOFF_a0 = 0xF718F1D4;
+ avfs_params.ulGB_VDROOP_TABLE_CKSOFF_a1 = 0x323FD;
+ avfs_params.ulGB_VDROOP_TABLE_CKSOFF_a2 = 0x1E455;
+ avfs_params.ulAVFSGB_FUSE_TABLE_CKSOFF_m1 = 0;
+ avfs_params.usAVFSGB_FUSE_TABLE_CKSOFF_m2 = 0;
+ avfs_params.ulAVFSGB_FUSE_TABLE_CKSOFF_b = 0x23;
+ }
+ }
+ } else if (hwmgr->chip_id == CHIP_POLARIS12 && !hwmgr->is_kicker) {
+ avfs_params.ucEnableApplyAVFS_CKS_OFF_Voltage = 1;
+ avfs_params.ulGB_VDROOP_TABLE_CKSOFF_a0 = 0xF6B024DD;
+ avfs_params.ulGB_VDROOP_TABLE_CKSOFF_a1 = 0x3005E;
+ avfs_params.ulGB_VDROOP_TABLE_CKSOFF_a2 = 0x18A5F;
+ avfs_params.ulAVFSGB_FUSE_TABLE_CKSOFF_m1 = 0x315;
+ avfs_params.usAVFSGB_FUSE_TABLE_CKSOFF_m2 = 0xFED1;
+ avfs_params.ulAVFSGB_FUSE_TABLE_CKSOFF_b = 0x3B;
+ } else if (((adev->pdev->device == 0x67df) &&
+ ((adev->pdev->revision == 0xe0) ||
+ (adev->pdev->revision == 0xe3) ||
+ (adev->pdev->revision == 0xe4) ||
+ (adev->pdev->revision == 0xe5) ||
+ (adev->pdev->revision == 0xe7) ||
+ (adev->pdev->revision == 0xef))) ||
+ ((adev->pdev->device == 0x6fdf) &&
+ ((adev->pdev->revision == 0xef) ||
+ (adev->pdev->revision == 0xff)))) {
+ avfs_params.ucEnableApplyAVFS_CKS_OFF_Voltage = 1;
+ avfs_params.ulGB_VDROOP_TABLE_CKSOFF_a0 = 0xF843B66B;
+ avfs_params.ulGB_VDROOP_TABLE_CKSOFF_a1 = 0x59CB5;
+ avfs_params.ulGB_VDROOP_TABLE_CKSOFF_a2 = 0xFFFF287F;
+ avfs_params.ulAVFSGB_FUSE_TABLE_CKSOFF_m1 = 0;
+ avfs_params.usAVFSGB_FUSE_TABLE_CKSOFF_m2 = 0xFF23;
+ avfs_params.ulAVFSGB_FUSE_TABLE_CKSOFF_b = 0x58;
+ }
+ }
+
+ if (0 == result) {
table->BTCGB_VDROOP_TABLE[0].a0 = PP_HOST_TO_SMC_UL(avfs_params.ulGB_VDROOP_TABLE_CKSON_a0);
table->BTCGB_VDROOP_TABLE[0].a1 = PP_HOST_TO_SMC_UL(avfs_params.ulGB_VDROOP_TABLE_CKSON_a1);
table->BTCGB_VDROOP_TABLE[0].a2 = PP_HOST_TO_SMC_UL(avfs_params.ulGB_VDROOP_TABLE_CKSON_a2);
@@ -1984,6 +2051,12 @@ int polaris10_thermal_avfs_enable(struct pp_hwmgr *hwmgr)
smum_send_msg_to_smc(hwmgr, PPSMC_MSG_EnableAvfs);
+ /* Apply avfs cks-off voltages to avoid the overshoot
+ * when switching to the highest sclk frequency
+ */
+ if (data->apply_avfs_cks_off_voltage)
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ApplyAvfsCksOffVoltage);
+
return 0;
}
diff --git a/drivers/gpu/drm/armada/armada_gem.c b/drivers/gpu/drm/armada/armada_gem.c
index 892c1d9304bb..642d0e70d0f8 100644
--- a/drivers/gpu/drm/armada/armada_gem.c
+++ b/drivers/gpu/drm/armada/armada_gem.c
@@ -334,7 +334,7 @@ int armada_gem_pwrite_ioctl(struct drm_device *dev, void *data,
ptr = (char __user *)(uintptr_t)args->ptr;
- if (!access_ok(VERIFY_READ, ptr, args->size))
+ if (!access_ok(ptr, args->size))
return -EFAULT;
ret = fault_in_pages_readable(ptr, args->size);
diff --git a/drivers/gpu/drm/ast/ast_fb.c b/drivers/gpu/drm/ast/ast_fb.c
index 0cd827e11fa2..de26df0c6044 100644
--- a/drivers/gpu/drm/ast/ast_fb.c
+++ b/drivers/gpu/drm/ast/ast_fb.c
@@ -263,6 +263,7 @@ static void ast_fbdev_destroy(struct drm_device *dev,
{
struct ast_framebuffer *afb = &afbdev->afb;
+ drm_crtc_force_disable_all(dev);
drm_fb_helper_unregister_fbi(&afbdev->helper);
if (afb->obj) {
diff --git a/drivers/gpu/drm/ast/ast_main.c b/drivers/gpu/drm/ast/ast_main.c
index dac355812adc..373700c05a00 100644
--- a/drivers/gpu/drm/ast/ast_main.c
+++ b/drivers/gpu/drm/ast/ast_main.c
@@ -583,7 +583,8 @@ void ast_driver_unload(struct drm_device *dev)
drm_mode_config_cleanup(dev);
ast_mm_fini(ast);
- pci_iounmap(dev->pdev, ast->ioregs);
+ if (ast->ioregs != ast->regs + AST_IO_MM_OFFSET)
+ pci_iounmap(dev->pdev, ast->ioregs);
pci_iounmap(dev->pdev, ast->regs);
kfree(ast);
}
diff --git a/drivers/gpu/drm/ast/ast_mode.c b/drivers/gpu/drm/ast/ast_mode.c
index 7c6ac3cadb6b..8bb355d5d43d 100644
--- a/drivers/gpu/drm/ast/ast_mode.c
+++ b/drivers/gpu/drm/ast/ast_mode.c
@@ -973,9 +973,21 @@ static int get_clock(void *i2c_priv)
{
struct ast_i2c_chan *i2c = i2c_priv;
struct ast_private *ast = i2c->dev->dev_private;
- uint32_t val;
+ uint32_t val, val2, count, pass;
+
+ count = 0;
+ pass = 0;
+ val = (ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0x10) >> 4) & 0x01;
+ do {
+ val2 = (ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0x10) >> 4) & 0x01;
+ if (val == val2) {
+ pass++;
+ } else {
+ pass = 0;
+ val = (ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0x10) >> 4) & 0x01;
+ }
+ } while ((pass < 5) && (count++ < 0x10000));
- val = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0x10) >> 4;
return val & 1 ? 1 : 0;
}
@@ -983,9 +995,21 @@ static int get_data(void *i2c_priv)
{
struct ast_i2c_chan *i2c = i2c_priv;
struct ast_private *ast = i2c->dev->dev_private;
- uint32_t val;
+ uint32_t val, val2, count, pass;
+
+ count = 0;
+ pass = 0;
+ val = (ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0x20) >> 5) & 0x01;
+ do {
+ val2 = (ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0x20) >> 5) & 0x01;
+ if (val == val2) {
+ pass++;
+ } else {
+ pass = 0;
+ val = (ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0x20) >> 5) & 0x01;
+ }
+ } while ((pass < 5) && (count++ < 0x10000));
- val = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0x20) >> 5;
return val & 1 ? 1 : 0;
}
@@ -998,7 +1022,7 @@ static void set_clock(void *i2c_priv, int clock)
for (i = 0; i < 0x10000; i++) {
ujcrb7 = ((clock & 0x01) ? 0 : 1);
- ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0xfe, ujcrb7);
+ ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0xf4, ujcrb7);
jtemp = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0x01);
if (ujcrb7 == jtemp)
break;
@@ -1014,7 +1038,7 @@ static void set_data(void *i2c_priv, int data)
for (i = 0; i < 0x10000; i++) {
ujcrb7 = ((data & 0x01) ? 0 : 1) << 2;
- ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0xfb, ujcrb7);
+ ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0xf1, ujcrb7);
jtemp = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0x04);
if (ujcrb7 == jtemp)
break;
diff --git a/drivers/gpu/drm/bridge/tc358767.c b/drivers/gpu/drm/bridge/tc358767.c
index 8e28e738cb52..e6403b9549f1 100644
--- a/drivers/gpu/drm/bridge/tc358767.c
+++ b/drivers/gpu/drm/bridge/tc358767.c
@@ -98,6 +98,8 @@
#define DP0_STARTVAL 0x064c
#define DP0_ACTIVEVAL 0x0650
#define DP0_SYNCVAL 0x0654
+#define SYNCVAL_HS_POL_ACTIVE_LOW (1 << 15)
+#define SYNCVAL_VS_POL_ACTIVE_LOW (1 << 31)
#define DP0_MISC 0x0658
#define TU_SIZE_RECOMMENDED (63) /* LSCLK cycles per TU */
#define BPC_6 (0 << 5)
@@ -142,6 +144,8 @@
#define DP0_LTLOOPCTRL 0x06d8
#define DP0_SNKLTCTRL 0x06e4
+#define DP1_SRCCTRL 0x07a0
+
/* PHY */
#define DP_PHY_CTRL 0x0800
#define DP_PHY_RST BIT(28) /* DP PHY Global Soft Reset */
@@ -150,6 +154,7 @@
#define PHY_M1_RST BIT(12) /* Reset PHY1 Main Channel */
#define PHY_RDY BIT(16) /* PHY Main Channels Ready */
#define PHY_M0_RST BIT(8) /* Reset PHY0 Main Channel */
+#define PHY_2LANE BIT(2) /* PHY Enable 2 lanes */
#define PHY_A0_EN BIT(1) /* PHY Aux Channel0 Enable */
#define PHY_M0_EN BIT(0) /* PHY Main Channel0 Enable */
@@ -540,6 +545,7 @@ static int tc_aux_link_setup(struct tc_data *tc)
unsigned long rate;
u32 value;
int ret;
+ u32 dp_phy_ctrl;
rate = clk_get_rate(tc->refclk);
switch (rate) {
@@ -564,7 +570,10 @@ static int tc_aux_link_setup(struct tc_data *tc)
value |= SYSCLK_SEL_LSCLK | LSCLK_DIV_2;
tc_write(SYS_PLLPARAM, value);
- tc_write(DP_PHY_CTRL, BGREN | PWR_SW_EN | BIT(2) | PHY_A0_EN);
+ dp_phy_ctrl = BGREN | PWR_SW_EN | PHY_A0_EN;
+ if (tc->link.base.num_lanes == 2)
+ dp_phy_ctrl |= PHY_2LANE;
+ tc_write(DP_PHY_CTRL, dp_phy_ctrl);
/*
* Initially PLLs are in bypass. Force PLL parameter update,
@@ -719,7 +728,9 @@ static int tc_set_video_mode(struct tc_data *tc, struct drm_display_mode *mode)
tc_write(DP0_ACTIVEVAL, (mode->vdisplay << 16) | (mode->hdisplay));
- tc_write(DP0_SYNCVAL, (vsync_len << 16) | (hsync_len << 0));
+ tc_write(DP0_SYNCVAL, (vsync_len << 16) | (hsync_len << 0) |
+ ((mode->flags & DRM_MODE_FLAG_NHSYNC) ? SYNCVAL_HS_POL_ACTIVE_LOW : 0) |
+ ((mode->flags & DRM_MODE_FLAG_NVSYNC) ? SYNCVAL_VS_POL_ACTIVE_LOW : 0));
tc_write(DPIPXLFMT, VS_POL_ACTIVE_LOW | HS_POL_ACTIVE_LOW |
DE_POL_ACTIVE_HIGH | SUB_CFG_TYPE_CONFIG1 | DPI_BPP_RGB888);
@@ -829,12 +840,11 @@ static int tc_main_link_setup(struct tc_data *tc)
if (!tc->mode)
return -EINVAL;
- /* from excel file - DP0_SrcCtrl */
- tc_write(DP0_SRCCTRL, DP0_SRCCTRL_SCRMBLDIS | DP0_SRCCTRL_EN810B |
- DP0_SRCCTRL_LANESKEW | DP0_SRCCTRL_LANES_2 |
- DP0_SRCCTRL_BW27 | DP0_SRCCTRL_AUTOCORRECT);
- /* from excel file - DP1_SrcCtrl */
- tc_write(0x07a0, 0x00003083);
+ tc_write(DP0_SRCCTRL, tc_srcctrl(tc));
+ /* SSCG and BW27 on DP1 must be set to the same as on DP0 */
+ tc_write(DP1_SRCCTRL,
+ (tc->link.spread ? DP0_SRCCTRL_SSCG : 0) |
+ ((tc->link.base.rate != 162000) ? DP0_SRCCTRL_BW27 : 0));
rate = clk_get_rate(tc->refclk);
switch (rate) {
@@ -855,8 +865,11 @@ static int tc_main_link_setup(struct tc_data *tc)
}
value |= SYSCLK_SEL_LSCLK | LSCLK_DIV_2;
tc_write(SYS_PLLPARAM, value);
+
/* Setup Main Link */
- dp_phy_ctrl = BGREN | PWR_SW_EN | BIT(2) | PHY_A0_EN | PHY_M0_EN;
+ dp_phy_ctrl = BGREN | PWR_SW_EN | PHY_A0_EN | PHY_M0_EN;
+ if (tc->link.base.num_lanes == 2)
+ dp_phy_ctrl |= PHY_2LANE;
tc_write(DP_PHY_CTRL, dp_phy_ctrl);
msleep(100);
@@ -1105,10 +1118,20 @@ static bool tc_bridge_mode_fixup(struct drm_bridge *bridge,
static enum drm_mode_status tc_connector_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
+ struct tc_data *tc = connector_to_tc(connector);
+ u32 req, avail;
+ u32 bits_per_pixel = 24;
+
/* DPI interface clock limitation: upto 154 MHz */
if (mode->clock > 154000)
return MODE_CLOCK_HIGH;
+ req = mode->clock * bits_per_pixel / 8;
+ avail = tc->link.base.num_lanes * tc->link.base.rate;
+
+ if (req > avail)
+ return MODE_BAD;
+
return MODE_OK;
}
@@ -1186,7 +1209,8 @@ static int tc_bridge_attach(struct drm_bridge *bridge)
/* Create eDP connector */
drm_connector_helper_add(&tc->connector, &tc_connector_helper_funcs);
ret = drm_connector_init(drm, &tc->connector, &tc_connector_funcs,
- DRM_MODE_CONNECTOR_eDP);
+ tc->panel ? DRM_MODE_CONNECTOR_eDP :
+ DRM_MODE_CONNECTOR_DisplayPort);
if (ret)
return ret;
@@ -1195,6 +1219,10 @@ static int tc_bridge_attach(struct drm_bridge *bridge)
drm_display_info_set_bus_formats(&tc->connector.display_info,
&bus_format, 1);
+ tc->connector.display_info.bus_flags =
+ DRM_BUS_FLAG_DE_HIGH |
+ DRM_BUS_FLAG_PIXDATA_NEGEDGE |
+ DRM_BUS_FLAG_SYNC_NEGEDGE;
drm_connector_attach_encoder(&tc->connector, tc->bridge.encoder);
return 0;
diff --git a/drivers/gpu/drm/bridge/ti-sn65dsi86.c b/drivers/gpu/drm/bridge/ti-sn65dsi86.c
index 680566d97adc..10243965ee7c 100644
--- a/drivers/gpu/drm/bridge/ti-sn65dsi86.c
+++ b/drivers/gpu/drm/bridge/ti-sn65dsi86.c
@@ -54,7 +54,7 @@
#define SN_AUX_ADDR_7_0_REG 0x76
#define SN_AUX_LENGTH_REG 0x77
#define SN_AUX_CMD_REG 0x78
-#define AUX_CMD_SEND BIT(1)
+#define AUX_CMD_SEND BIT(0)
#define AUX_CMD_REQ(x) ((x) << 4)
#define SN_AUX_RDATA_REG(x) (0x79 + (x))
#define SN_SSC_CONFIG_REG 0x93
diff --git a/drivers/gpu/drm/drm_atomic_state_helper.c b/drivers/gpu/drm/drm_atomic_state_helper.c
index 60bd7d708e35..4985384e51f6 100644
--- a/drivers/gpu/drm/drm_atomic_state_helper.c
+++ b/drivers/gpu/drm/drm_atomic_state_helper.c
@@ -241,6 +241,7 @@ void __drm_atomic_helper_plane_duplicate_state(struct drm_plane *plane,
state->fence = NULL;
state->commit = NULL;
+ state->fb_damage_clips = NULL;
}
EXPORT_SYMBOL(__drm_atomic_helper_plane_duplicate_state);
@@ -285,6 +286,8 @@ void __drm_atomic_helper_plane_destroy_state(struct drm_plane_state *state)
if (state->commit)
drm_crtc_commit_put(state->commit);
+
+ drm_property_blob_put(state->fb_damage_clips);
}
EXPORT_SYMBOL(__drm_atomic_helper_plane_destroy_state);
diff --git a/drivers/gpu/drm/drm_auth.c b/drivers/gpu/drm/drm_auth.c
index d9c0f7573905..1669c42c40ed 100644
--- a/drivers/gpu/drm/drm_auth.c
+++ b/drivers/gpu/drm/drm_auth.c
@@ -142,6 +142,7 @@ static int drm_new_set_master(struct drm_device *dev, struct drm_file *fpriv)
lockdep_assert_held_once(&dev->master_mutex);
+ WARN_ON(fpriv->is_master);
old_master = fpriv->master;
fpriv->master = drm_master_create(dev);
if (!fpriv->master) {
@@ -170,6 +171,7 @@ out_err:
/* drop references and restore old master on failure */
drm_master_put(&fpriv->master);
fpriv->master = old_master;
+ fpriv->is_master = 0;
return ret;
}
diff --git a/drivers/gpu/drm/drm_damage_helper.c b/drivers/gpu/drm/drm_damage_helper.c
index 05c8e7267165..31032407254d 100644
--- a/drivers/gpu/drm/drm_damage_helper.c
+++ b/drivers/gpu/drm/drm_damage_helper.c
@@ -178,7 +178,7 @@ int drm_atomic_helper_dirtyfb(struct drm_framebuffer *fb,
state = drm_atomic_state_alloc(fb->dev);
if (!state) {
ret = -ENOMEM;
- goto out;
+ goto out_drop_locks;
}
state->acquire_ctx = &ctx;
@@ -238,6 +238,7 @@ out:
kfree(rects);
drm_atomic_state_put(state);
+out_drop_locks:
drm_modeset_drop_locks(&ctx);
drm_modeset_acquire_fini(&ctx);
@@ -250,7 +251,7 @@ EXPORT_SYMBOL(drm_atomic_helper_dirtyfb);
* drm_atomic_helper_damage_iter_init - Initialize the damage iterator.
* @iter: The iterator to initialize.
* @old_state: Old plane state for validation.
- * @new_state: Plane state from which to iterate the damage clips.
+ * @state: Plane state from which to iterate the damage clips.
*
* Initialize an iterator, which clips plane damage
* &drm_plane_state.fb_damage_clips to plane &drm_plane_state.src. This iterator
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index 5e9ca6f96379..d3af098b0922 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -71,7 +71,7 @@ MODULE_PARM_DESC(drm_fbdev_overalloc,
#if IS_ENABLED(CONFIG_DRM_FBDEV_LEAK_PHYS_SMEM)
static bool drm_leak_fbdev_smem = false;
module_param_unsafe(drm_leak_fbdev_smem, bool, 0600);
-MODULE_PARM_DESC(fbdev_emulation,
+MODULE_PARM_DESC(drm_leak_fbdev_smem,
"Allow unsafe leaking fbdev physical smem address [default=false]");
#endif
diff --git a/drivers/gpu/drm/drm_file.c b/drivers/gpu/drm/drm_file.c
index ffa8dc35515f..46f48f245eb5 100644
--- a/drivers/gpu/drm/drm_file.c
+++ b/drivers/gpu/drm/drm_file.c
@@ -525,7 +525,7 @@ ssize_t drm_read(struct file *filp, char __user *buffer,
struct drm_device *dev = file_priv->minor->dev;
ssize_t ret;
- if (!access_ok(VERIFY_WRITE, buffer, count))
+ if (!access_ok(buffer, count))
return -EFAULT;
ret = mutex_lock_interruptible(&file_priv->event_read_lock);
diff --git a/drivers/gpu/drm/drm_internal.h b/drivers/gpu/drm/drm_internal.h
index c7a7d7ce5d1c..d9caf205e0b3 100644
--- a/drivers/gpu/drm/drm_internal.h
+++ b/drivers/gpu/drm/drm_internal.h
@@ -99,6 +99,8 @@ struct device *drm_sysfs_minor_alloc(struct drm_minor *minor);
int drm_sysfs_connector_add(struct drm_connector *connector);
void drm_sysfs_connector_remove(struct drm_connector *connector);
+void drm_sysfs_lease_event(struct drm_device *dev);
+
/* drm_gem.c */
int drm_gem_init(struct drm_device *dev);
void drm_gem_destroy(struct drm_device *dev);
diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c
index 94bd872d56c4..7e6746b2d704 100644
--- a/drivers/gpu/drm/drm_ioctl.c
+++ b/drivers/gpu/drm/drm_ioctl.c
@@ -37,6 +37,7 @@
#include <linux/pci.h>
#include <linux/export.h>
+#include <linux/nospec.h>
/**
* DOC: getunique and setversion story
@@ -800,13 +801,17 @@ long drm_ioctl(struct file *filp,
if (is_driver_ioctl) {
/* driver ioctl */
- if (nr - DRM_COMMAND_BASE >= dev->driver->num_ioctls)
+ unsigned int index = nr - DRM_COMMAND_BASE;
+
+ if (index >= dev->driver->num_ioctls)
goto err_i1;
- ioctl = &dev->driver->ioctls[nr - DRM_COMMAND_BASE];
+ index = array_index_nospec(index, dev->driver->num_ioctls);
+ ioctl = &dev->driver->ioctls[index];
} else {
/* core ioctl */
if (nr >= DRM_CORE_IOCTL_COUNT)
goto err_i1;
+ nr = array_index_nospec(nr, DRM_CORE_IOCTL_COUNT);
ioctl = &drm_ioctls[nr];
}
@@ -888,6 +893,7 @@ bool drm_ioctl_flags(unsigned int nr, unsigned int *flags)
if (nr >= DRM_CORE_IOCTL_COUNT)
return false;
+ nr = array_index_nospec(nr, DRM_CORE_IOCTL_COUNT);
*flags = drm_ioctls[nr].flags;
return true;
diff --git a/drivers/gpu/drm/drm_lease.c b/drivers/gpu/drm/drm_lease.c
index 3650d3c46718..99cba8ea5d82 100644
--- a/drivers/gpu/drm/drm_lease.c
+++ b/drivers/gpu/drm/drm_lease.c
@@ -292,7 +292,7 @@ void drm_lease_destroy(struct drm_master *master)
if (master->lessor) {
/* Tell the master to check the lessee list */
- drm_sysfs_hotplug_event(dev);
+ drm_sysfs_lease_event(dev);
drm_master_put(&master->lessor);
}
diff --git a/drivers/gpu/drm/drm_sysfs.c b/drivers/gpu/drm/drm_sysfs.c
index b3c1daad1169..ecb7b33002bb 100644
--- a/drivers/gpu/drm/drm_sysfs.c
+++ b/drivers/gpu/drm/drm_sysfs.c
@@ -301,6 +301,16 @@ void drm_sysfs_connector_remove(struct drm_connector *connector)
connector->kdev = NULL;
}
+void drm_sysfs_lease_event(struct drm_device *dev)
+{
+ char *event_string = "LEASE=1";
+ char *envp[] = { event_string, NULL };
+
+ DRM_DEBUG("generating lease event\n");
+
+ kobject_uevent_env(&dev->primary->kdev->kobj, KOBJ_CHANGE, envp);
+}
+
/**
* drm_sysfs_hotplug_event - generate a DRM uevent
* @dev: DRM device
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_buffer.c b/drivers/gpu/drm/etnaviv/etnaviv_buffer.c
index 7fea74861a87..160ce3c060a5 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_buffer.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_buffer.c
@@ -439,6 +439,4 @@ void etnaviv_buffer_queue(struct etnaviv_gpu *gpu, u32 exec_state,
if (drm_debug & DRM_UT_DRIVER)
etnaviv_buffer_dump(gpu, buffer, 0, 0x50);
-
- gpu->lastctx = cmdbuf->ctx;
}
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_drv.c b/drivers/gpu/drm/etnaviv/etnaviv_drv.c
index 52802e6049e0..18c27f795cf6 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_drv.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_drv.c
@@ -72,14 +72,8 @@ static void etnaviv_postclose(struct drm_device *dev, struct drm_file *file)
for (i = 0; i < ETNA_MAX_PIPES; i++) {
struct etnaviv_gpu *gpu = priv->gpu[i];
- if (gpu) {
- mutex_lock(&gpu->lock);
- if (gpu->lastctx == ctx)
- gpu->lastctx = NULL;
- mutex_unlock(&gpu->lock);
-
+ if (gpu)
drm_sched_entity_destroy(&ctx->sched_entity[i]);
- }
}
kfree(ctx);
@@ -345,7 +339,6 @@ static int etnaviv_ioctl_gem_userptr(struct drm_device *dev, void *data,
struct drm_file *file)
{
struct drm_etnaviv_gem_userptr *args = data;
- int access;
if (args->flags & ~(ETNA_USERPTR_READ|ETNA_USERPTR_WRITE) ||
args->flags == 0)
@@ -357,12 +350,7 @@ static int etnaviv_ioctl_gem_userptr(struct drm_device *dev, void *data,
args->user_ptr & ~PAGE_MASK)
return -EINVAL;
- if (args->flags & ETNA_USERPTR_WRITE)
- access = VERIFY_WRITE;
- else
- access = VERIFY_READ;
-
- if (!access_ok(access, (void __user *)(unsigned long)args->user_ptr,
+ if (!access_ok((void __user *)(unsigned long)args->user_ptr,
args->user_size))
return -EFAULT;
@@ -523,7 +511,7 @@ static int etnaviv_bind(struct device *dev)
if (!priv) {
dev_err(dev, "failed to allocate private data\n");
ret = -ENOMEM;
- goto out_unref;
+ goto out_put;
}
drm->dev_private = priv;
@@ -549,7 +537,7 @@ out_register:
component_unbind_all(dev, drm);
out_bind:
kfree(priv);
-out_unref:
+out_put:
drm_dev_put(drm);
return ret;
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_drv.h b/drivers/gpu/drm/etnaviv/etnaviv_drv.h
index 8d02d1b7dcf5..4bf698de5996 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_drv.h
+++ b/drivers/gpu/drm/etnaviv/etnaviv_drv.h
@@ -107,17 +107,6 @@ static inline size_t size_vstruct(size_t nelem, size_t elem_size, size_t base)
return base + nelem * elem_size;
}
-/* returns true if fence a comes after fence b */
-static inline bool fence_after(u32 a, u32 b)
-{
- return (s32)(a - b) > 0;
-}
-
-static inline bool fence_after_eq(u32 a, u32 b)
-{
- return (s32)(a - b) >= 0;
-}
-
/*
* Etnaviv timeouts are specified wrt CLOCK_MONOTONIC, not jiffies.
* We need to calculate the timeout in terms of number of jiffies
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_dump.c b/drivers/gpu/drm/etnaviv/etnaviv_dump.c
index 9146e30e24a6..3fbb4855396c 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_dump.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_dump.c
@@ -118,6 +118,7 @@ void etnaviv_core_dump(struct etnaviv_gpu *gpu)
unsigned int n_obj, n_bomap_pages;
size_t file_size, mmu_size;
__le64 *bomap, *bomap_start;
+ unsigned long flags;
/* Only catch the first event, or when manually re-armed */
if (!etnaviv_dump_core)
@@ -134,13 +135,13 @@ void etnaviv_core_dump(struct etnaviv_gpu *gpu)
mmu_size + gpu->buffer.size;
/* Add in the active command buffers */
- spin_lock(&gpu->sched.job_list_lock);
+ spin_lock_irqsave(&gpu->sched.job_list_lock, flags);
list_for_each_entry(s_job, &gpu->sched.ring_mirror_list, node) {
submit = to_etnaviv_submit(s_job);
file_size += submit->cmdbuf.size;
n_obj++;
}
- spin_unlock(&gpu->sched.job_list_lock);
+ spin_unlock_irqrestore(&gpu->sched.job_list_lock, flags);
/* Add in the active buffer objects */
list_for_each_entry(vram, &gpu->mmu->mappings, mmu_node) {
@@ -182,14 +183,14 @@ void etnaviv_core_dump(struct etnaviv_gpu *gpu)
gpu->buffer.size,
etnaviv_cmdbuf_get_va(&gpu->buffer));
- spin_lock(&gpu->sched.job_list_lock);
+ spin_lock_irqsave(&gpu->sched.job_list_lock, flags);
list_for_each_entry(s_job, &gpu->sched.ring_mirror_list, node) {
submit = to_etnaviv_submit(s_job);
etnaviv_core_dump_mem(&iter, ETDUMP_BUF_CMD,
submit->cmdbuf.vaddr, submit->cmdbuf.size,
etnaviv_cmdbuf_get_va(&submit->cmdbuf));
}
- spin_unlock(&gpu->sched.job_list_lock);
+ spin_unlock_irqrestore(&gpu->sched.job_list_lock, flags);
/* Reserve space for the bomap */
if (n_bomap_pages) {
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
index f225fbc6edd2..6904535475de 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
@@ -3,10 +3,12 @@
* Copyright (C) 2015-2018 Etnaviv Project
*/
+#include <linux/clk.h>
#include <linux/component.h>
#include <linux/dma-fence.h>
#include <linux/moduleparam.h>
#include <linux/of_device.h>
+#include <linux/regulator/consumer.h>
#include <linux/thermal.h>
#include "etnaviv_cmdbuf.h"
@@ -976,7 +978,6 @@ int etnaviv_gpu_debugfs(struct etnaviv_gpu *gpu, struct seq_file *m)
void etnaviv_gpu_recover_hang(struct etnaviv_gpu *gpu)
{
- unsigned long flags;
unsigned int i = 0;
dev_err(gpu->dev, "recover hung GPU!\n");
@@ -989,15 +990,13 @@ void etnaviv_gpu_recover_hang(struct etnaviv_gpu *gpu)
etnaviv_hw_reset(gpu);
/* complete all events, the GPU won't do it after the reset */
- spin_lock_irqsave(&gpu->event_spinlock, flags);
+ spin_lock(&gpu->event_spinlock);
for_each_set_bit_from(i, gpu->event_bitmap, ETNA_NR_EVENTS)
complete(&gpu->event_free);
bitmap_zero(gpu->event_bitmap, ETNA_NR_EVENTS);
- spin_unlock_irqrestore(&gpu->event_spinlock, flags);
- gpu->completed_fence = gpu->active_fence;
+ spin_unlock(&gpu->event_spinlock);
etnaviv_gpu_hw_init(gpu);
- gpu->lastctx = NULL;
gpu->exec_state = -1;
mutex_unlock(&gpu->lock);
@@ -1032,7 +1031,7 @@ static bool etnaviv_fence_signaled(struct dma_fence *fence)
{
struct etnaviv_fence *f = to_etnaviv_fence(fence);
- return fence_completed(f->gpu, f->base.seqno);
+ return (s32)(f->gpu->completed_fence - f->base.seqno) >= 0;
}
static void etnaviv_fence_release(struct dma_fence *fence)
@@ -1071,6 +1070,12 @@ static struct dma_fence *etnaviv_gpu_fence_alloc(struct etnaviv_gpu *gpu)
return &f->base;
}
+/* returns true if fence a comes after fence b */
+static inline bool fence_after(u32 a, u32 b)
+{
+ return (s32)(a - b) > 0;
+}
+
/*
* event management:
*/
@@ -1078,7 +1083,7 @@ static struct dma_fence *etnaviv_gpu_fence_alloc(struct etnaviv_gpu *gpu)
static int event_alloc(struct etnaviv_gpu *gpu, unsigned nr_events,
unsigned int *events)
{
- unsigned long flags, timeout = msecs_to_jiffies(10 * 10000);
+ unsigned long timeout = msecs_to_jiffies(10 * 10000);
unsigned i, acquired = 0;
for (i = 0; i < nr_events; i++) {
@@ -1095,7 +1100,7 @@ static int event_alloc(struct etnaviv_gpu *gpu, unsigned nr_events,
timeout = ret;
}
- spin_lock_irqsave(&gpu->event_spinlock, flags);
+ spin_lock(&gpu->event_spinlock);
for (i = 0; i < nr_events; i++) {
int event = find_first_zero_bit(gpu->event_bitmap, ETNA_NR_EVENTS);
@@ -1105,7 +1110,7 @@ static int event_alloc(struct etnaviv_gpu *gpu, unsigned nr_events,
set_bit(event, gpu->event_bitmap);
}
- spin_unlock_irqrestore(&gpu->event_spinlock, flags);
+ spin_unlock(&gpu->event_spinlock);
return 0;
@@ -1118,18 +1123,11 @@ out:
static void event_free(struct etnaviv_gpu *gpu, unsigned int event)
{
- unsigned long flags;
-
- spin_lock_irqsave(&gpu->event_spinlock, flags);
-
if (!test_bit(event, gpu->event_bitmap)) {
dev_warn(gpu->dev, "event %u is already marked as free",
event);
- spin_unlock_irqrestore(&gpu->event_spinlock, flags);
} else {
clear_bit(event, gpu->event_bitmap);
- spin_unlock_irqrestore(&gpu->event_spinlock, flags);
-
complete(&gpu->event_free);
}
}
@@ -1306,8 +1304,6 @@ struct dma_fence *etnaviv_gpu_submit(struct etnaviv_gem_submit *submit)
goto out_unlock;
}
- gpu->active_fence = gpu_fence->seqno;
-
if (submit->nr_pmrs) {
gpu->event[event[1]].sync_point = &sync_point_perfmon_sample_pre;
kref_get(&submit->refcount);
@@ -1549,7 +1545,6 @@ static int etnaviv_gpu_hw_resume(struct etnaviv_gpu *gpu)
etnaviv_gpu_update_clock(gpu);
etnaviv_gpu_hw_init(gpu);
- gpu->lastctx = NULL;
gpu->exec_state = -1;
mutex_unlock(&gpu->lock);
@@ -1806,8 +1801,8 @@ static int etnaviv_gpu_rpm_suspend(struct device *dev)
struct etnaviv_gpu *gpu = dev_get_drvdata(dev);
u32 idle, mask;
- /* If we have outstanding fences, we're not idle */
- if (gpu->completed_fence != gpu->active_fence)
+ /* If there are any jobs in the HW queue, we're not idle */
+ if (atomic_read(&gpu->sched.hw_rq_count))
return -EBUSY;
/* Check whether the hardware (except FE) is idle */
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gpu.h b/drivers/gpu/drm/etnaviv/etnaviv_gpu.h
index 9a75a6937268..9bcf151f706b 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gpu.h
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.h
@@ -6,9 +6,6 @@
#ifndef __ETNAVIV_GPU_H__
#define __ETNAVIV_GPU_H__
-#include <linux/clk.h>
-#include <linux/regulator/consumer.h>
-
#include "etnaviv_cmdbuf.h"
#include "etnaviv_drv.h"
@@ -88,6 +85,8 @@ struct etnaviv_event {
struct etnaviv_cmdbuf_suballoc;
struct etnaviv_cmdbuf;
+struct regulator;
+struct clk;
#define ETNA_NR_EVENTS 30
@@ -98,7 +97,6 @@ struct etnaviv_gpu {
struct mutex lock;
struct etnaviv_chip_identity identity;
enum etnaviv_sec_mode sec_mode;
- struct etnaviv_file_private *lastctx;
struct workqueue_struct *wq;
struct drm_gpu_scheduler sched;
@@ -121,7 +119,6 @@ struct etnaviv_gpu {
struct mutex fence_lock;
struct idr fence_idr;
u32 next_fence;
- u32 active_fence;
u32 completed_fence;
wait_queue_head_t fence_event;
u64 fence_context;
@@ -161,11 +158,6 @@ static inline u32 gpu_read(struct etnaviv_gpu *gpu, u32 reg)
return readl(gpu->mmio + reg);
}
-static inline bool fence_completed(struct etnaviv_gpu *gpu, u32 fence)
-{
- return fence_after_eq(gpu->completed_fence, fence);
-}
-
int etnaviv_gpu_get_param(struct etnaviv_gpu *gpu, u32 param, u64 *value);
int etnaviv_gpu_init(struct etnaviv_gpu *gpu);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimd.c b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
index e3d6a8584715..786a8ee6f10f 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fimd.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
@@ -228,6 +228,21 @@ static const uint32_t fimd_formats[] = {
DRM_FORMAT_ARGB8888,
};
+static const unsigned int capabilities[WINDOWS_NR] = {
+ 0,
+ EXYNOS_DRM_PLANE_CAP_WIN_BLEND | EXYNOS_DRM_PLANE_CAP_PIX_BLEND,
+ EXYNOS_DRM_PLANE_CAP_WIN_BLEND | EXYNOS_DRM_PLANE_CAP_PIX_BLEND,
+ EXYNOS_DRM_PLANE_CAP_WIN_BLEND | EXYNOS_DRM_PLANE_CAP_PIX_BLEND,
+ EXYNOS_DRM_PLANE_CAP_WIN_BLEND | EXYNOS_DRM_PLANE_CAP_PIX_BLEND,
+};
+
+static inline void fimd_set_bits(struct fimd_context *ctx, u32 reg, u32 mask,
+ u32 val)
+{
+ val = (val & mask) | (readl(ctx->regs + reg) & ~mask);
+ writel(val, ctx->regs + reg);
+}
+
static int fimd_enable_vblank(struct exynos_drm_crtc *crtc)
{
struct fimd_context *ctx = crtc->ctx;
@@ -551,13 +566,88 @@ static void fimd_commit(struct exynos_drm_crtc *crtc)
writel(val, ctx->regs + VIDCON0);
}
+static void fimd_win_set_bldeq(struct fimd_context *ctx, unsigned int win,
+ unsigned int alpha, unsigned int pixel_alpha)
+{
+ u32 mask = BLENDEQ_A_FUNC_F(0xf) | BLENDEQ_B_FUNC_F(0xf);
+ u32 val = 0;
+
+ switch (pixel_alpha) {
+ case DRM_MODE_BLEND_PIXEL_NONE:
+ case DRM_MODE_BLEND_COVERAGE:
+ val |= BLENDEQ_A_FUNC_F(BLENDEQ_ALPHA_A);
+ val |= BLENDEQ_B_FUNC_F(BLENDEQ_ONE_MINUS_ALPHA_A);
+ break;
+ case DRM_MODE_BLEND_PREMULTI:
+ default:
+ if (alpha != DRM_BLEND_ALPHA_OPAQUE) {
+ val |= BLENDEQ_A_FUNC_F(BLENDEQ_ALPHA0);
+ val |= BLENDEQ_B_FUNC_F(BLENDEQ_ONE_MINUS_ALPHA_A);
+ } else {
+ val |= BLENDEQ_A_FUNC_F(BLENDEQ_ONE);
+ val |= BLENDEQ_B_FUNC_F(BLENDEQ_ONE_MINUS_ALPHA_A);
+ }
+ break;
+ }
+ fimd_set_bits(ctx, BLENDEQx(win), mask, val);
+}
-static void fimd_win_set_pixfmt(struct fimd_context *ctx, unsigned int win,
- uint32_t pixel_format, int width)
+static void fimd_win_set_bldmod(struct fimd_context *ctx, unsigned int win,
+ unsigned int alpha, unsigned int pixel_alpha)
{
- unsigned long val;
+ u32 win_alpha_l = (alpha >> 8) & 0xf;
+ u32 win_alpha_h = alpha >> 12;
+ u32 val = 0;
- val = WINCONx_ENWIN;
+ switch (pixel_alpha) {
+ case DRM_MODE_BLEND_PIXEL_NONE:
+ break;
+ case DRM_MODE_BLEND_COVERAGE:
+ case DRM_MODE_BLEND_PREMULTI:
+ default:
+ val |= WINCON1_ALPHA_SEL;
+ val |= WINCON1_BLD_PIX;
+ val |= WINCON1_ALPHA_MUL;
+ break;
+ }
+ fimd_set_bits(ctx, WINCON(win), WINCONx_BLEND_MODE_MASK, val);
+
+ /* OSD alpha */
+ val = VIDISD14C_ALPHA0_R(win_alpha_h) |
+ VIDISD14C_ALPHA0_G(win_alpha_h) |
+ VIDISD14C_ALPHA0_B(win_alpha_h) |
+ VIDISD14C_ALPHA1_R(0x0) |
+ VIDISD14C_ALPHA1_G(0x0) |
+ VIDISD14C_ALPHA1_B(0x0);
+ writel(val, ctx->regs + VIDOSD_C(win));
+
+ val = VIDW_ALPHA_R(win_alpha_l) | VIDW_ALPHA_G(win_alpha_l) |
+ VIDW_ALPHA_B(win_alpha_l);
+ writel(val, ctx->regs + VIDWnALPHA0(win));
+
+ val = VIDW_ALPHA_R(0x0) | VIDW_ALPHA_G(0x0) |
+ VIDW_ALPHA_B(0x0);
+ writel(val, ctx->regs + VIDWnALPHA1(win));
+
+ fimd_set_bits(ctx, BLENDCON, BLENDCON_NEW_MASK,
+ BLENDCON_NEW_8BIT_ALPHA_VALUE);
+}
+
+static void fimd_win_set_pixfmt(struct fimd_context *ctx, unsigned int win,
+ struct drm_framebuffer *fb, int width)
+{
+ struct exynos_drm_plane plane = ctx->planes[win];
+ struct exynos_drm_plane_state *state =
+ to_exynos_plane_state(plane.base.state);
+ uint32_t pixel_format = fb->format->format;
+ unsigned int alpha = state->base.alpha;
+ u32 val = WINCONx_ENWIN;
+ unsigned int pixel_alpha;
+
+ if (fb->format->has_alpha)
+ pixel_alpha = state->base.pixel_blend_mode;
+ else
+ pixel_alpha = DRM_MODE_BLEND_PIXEL_NONE;
/*
* In case of s3c64xx, window 0 doesn't support alpha channel.
@@ -591,8 +681,7 @@ static void fimd_win_set_pixfmt(struct fimd_context *ctx, unsigned int win,
break;
case DRM_FORMAT_ARGB8888:
default:
- val |= WINCON1_BPPMODE_25BPP_A1888
- | WINCON1_BLD_PIX | WINCON1_ALPHA_SEL;
+ val |= WINCON1_BPPMODE_25BPP_A1888;
val |= WINCONx_WSWP;
val |= WINCONx_BURSTLEN_16WORD;
break;
@@ -610,25 +699,12 @@ static void fimd_win_set_pixfmt(struct fimd_context *ctx, unsigned int win,
val &= ~WINCONx_BURSTLEN_MASK;
val |= WINCONx_BURSTLEN_4WORD;
}
-
- writel(val, ctx->regs + WINCON(win));
+ fimd_set_bits(ctx, WINCON(win), ~WINCONx_BLEND_MODE_MASK, val);
/* hardware window 0 doesn't support alpha channel. */
if (win != 0) {
- /* OSD alpha */
- val = VIDISD14C_ALPHA0_R(0xf) |
- VIDISD14C_ALPHA0_G(0xf) |
- VIDISD14C_ALPHA0_B(0xf) |
- VIDISD14C_ALPHA1_R(0xf) |
- VIDISD14C_ALPHA1_G(0xf) |
- VIDISD14C_ALPHA1_B(0xf);
-
- writel(val, ctx->regs + VIDOSD_C(win));
-
- val = VIDW_ALPHA_R(0xf) | VIDW_ALPHA_G(0xf) |
- VIDW_ALPHA_G(0xf);
- writel(val, ctx->regs + VIDWnALPHA0(win));
- writel(val, ctx->regs + VIDWnALPHA1(win));
+ fimd_win_set_bldmod(ctx, win, alpha, pixel_alpha);
+ fimd_win_set_bldeq(ctx, win, alpha, pixel_alpha);
}
}
@@ -785,7 +861,7 @@ static void fimd_update_plane(struct exynos_drm_crtc *crtc,
DRM_DEBUG_KMS("osd size = 0x%x\n", (unsigned int)val);
}
- fimd_win_set_pixfmt(ctx, win, fb->format->format, state->src.w);
+ fimd_win_set_pixfmt(ctx, win, fb, state->src.w);
/* hardware window 0 doesn't support color key. */
if (win != 0)
@@ -987,6 +1063,7 @@ static int fimd_bind(struct device *dev, struct device *master, void *data)
ctx->configs[i].num_pixel_formats = ARRAY_SIZE(fimd_formats);
ctx->configs[i].zpos = i;
ctx->configs[i].type = fimd_win_types[i];
+ ctx->configs[i].capabilities = capabilities[i];
ret = exynos_plane_init(drm_dev, &ctx->planes[i], i,
&ctx->configs[i]);
if (ret)
diff --git a/drivers/gpu/drm/i915/Kconfig b/drivers/gpu/drm/i915/Kconfig
index 33a458b7f1fc..148be8e1a090 100644
--- a/drivers/gpu/drm/i915/Kconfig
+++ b/drivers/gpu/drm/i915/Kconfig
@@ -131,5 +131,5 @@ config DRM_I915_GVT_KVMGT
menu "drm/i915 Debugging"
depends on DRM_I915
depends on EXPERT
-source drivers/gpu/drm/i915/Kconfig.debug
+source "drivers/gpu/drm/i915/Kconfig.debug"
endmenu
diff --git a/drivers/gpu/drm/i915/gvt/aperture_gm.c b/drivers/gpu/drm/i915/gvt/aperture_gm.c
index fe754022e356..359d37d5c958 100644
--- a/drivers/gpu/drm/i915/gvt/aperture_gm.c
+++ b/drivers/gpu/drm/i915/gvt/aperture_gm.c
@@ -61,10 +61,12 @@ static int alloc_gm(struct intel_vgpu *vgpu, bool high_gm)
}
mutex_lock(&dev_priv->drm.struct_mutex);
+ mmio_hw_access_pre(dev_priv);
ret = i915_gem_gtt_insert(&dev_priv->ggtt.vm, node,
size, I915_GTT_PAGE_SIZE,
I915_COLOR_UNEVICTABLE,
start, end, flags);
+ mmio_hw_access_post(dev_priv);
mutex_unlock(&dev_priv->drm.struct_mutex);
if (ret)
gvt_err("fail to alloc %s gm space from host\n",
diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.c b/drivers/gpu/drm/i915/gvt/cmd_parser.c
index 77edbfcb0f75..77ae634eb11c 100644
--- a/drivers/gpu/drm/i915/gvt/cmd_parser.c
+++ b/drivers/gpu/drm/i915/gvt/cmd_parser.c
@@ -1900,11 +1900,11 @@ static struct cmd_info cmd_info[] = {
{"MI_URB_CLEAR", OP_MI_URB_CLEAR, F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
- {"ME_SEMAPHORE_SIGNAL", OP_MI_SEMAPHORE_SIGNAL, F_LEN_VAR, R_ALL,
+ {"MI_SEMAPHORE_SIGNAL", OP_MI_SEMAPHORE_SIGNAL, F_LEN_VAR, R_ALL,
D_BDW_PLUS, 0, 8, NULL},
- {"ME_SEMAPHORE_WAIT", OP_MI_SEMAPHORE_WAIT, F_LEN_VAR, R_ALL, D_BDW_PLUS,
- ADDR_FIX_1(2), 8, cmd_handler_mi_semaphore_wait},
+ {"MI_SEMAPHORE_WAIT", OP_MI_SEMAPHORE_WAIT, F_LEN_VAR, R_ALL,
+ D_BDW_PLUS, ADDR_FIX_1(2), 8, cmd_handler_mi_semaphore_wait},
{"MI_STORE_DATA_IMM", OP_MI_STORE_DATA_IMM, F_LEN_VAR, R_ALL, D_BDW_PLUS,
ADDR_FIX_1(1), 10, cmd_handler_mi_store_data_imm},
diff --git a/drivers/gpu/drm/i915/gvt/fb_decoder.c b/drivers/gpu/drm/i915/gvt/fb_decoder.c
index 481896fb712a..85e6736f0a32 100644
--- a/drivers/gpu/drm/i915/gvt/fb_decoder.c
+++ b/drivers/gpu/drm/i915/gvt/fb_decoder.c
@@ -235,7 +235,7 @@ int intel_vgpu_decode_primary_plane(struct intel_vgpu *vgpu,
plane->bpp = skl_pixel_formats[fmt].bpp;
plane->drm_format = skl_pixel_formats[fmt].drm_format;
} else {
- plane->tiled = !!(val & DISPPLANE_TILED);
+ plane->tiled = val & DISPPLANE_TILED;
fmt = bdw_format_to_drm(val & DISPPLANE_PIXFORMAT_MASK);
plane->bpp = bdw_pixel_formats[fmt].bpp;
plane->drm_format = bdw_pixel_formats[fmt].drm_format;
diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c
index 58e166effa45..c7103dd2d8d5 100644
--- a/drivers/gpu/drm/i915/gvt/gtt.c
+++ b/drivers/gpu/drm/i915/gvt/gtt.c
@@ -2447,10 +2447,11 @@ static void intel_vgpu_destroy_all_ppgtt_mm(struct intel_vgpu *vgpu)
static void intel_vgpu_destroy_ggtt_mm(struct intel_vgpu *vgpu)
{
- struct intel_gvt_partial_pte *pos;
+ struct intel_gvt_partial_pte *pos, *next;
- list_for_each_entry(pos,
- &vgpu->gtt.ggtt_mm->ggtt_mm.partial_pte_list, list) {
+ list_for_each_entry_safe(pos, next,
+ &vgpu->gtt.ggtt_mm->ggtt_mm.partial_pte_list,
+ list) {
gvt_dbg_mm("partial PTE update on hold 0x%lx : 0x%llx\n",
pos->offset, pos->data);
kfree(pos);
diff --git a/drivers/gpu/drm/i915/gvt/gvt.c b/drivers/gpu/drm/i915/gvt/gvt.c
index 6ef5a7fc70df..733a2a0d0c30 100644
--- a/drivers/gpu/drm/i915/gvt/gvt.c
+++ b/drivers/gpu/drm/i915/gvt/gvt.c
@@ -437,7 +437,7 @@ int intel_gvt_init_device(struct drm_i915_private *dev_priv)
ret = intel_gvt_debugfs_init(gvt);
if (ret)
- gvt_err("debugfs registeration failed, go on.\n");
+ gvt_err("debugfs registration failed, go on.\n");
gvt_dbg_core("gvt device initialization is done\n");
dev_priv->gvt = gvt;
diff --git a/drivers/gpu/drm/i915/gvt/gvt.h b/drivers/gpu/drm/i915/gvt/gvt.h
index 31f6cdbe5c42..b4ab1dad0143 100644
--- a/drivers/gpu/drm/i915/gvt/gvt.h
+++ b/drivers/gpu/drm/i915/gvt/gvt.h
@@ -159,6 +159,10 @@ struct intel_vgpu_submission {
struct kmem_cache *workloads;
atomic_t running_workload_num;
struct i915_gem_context *shadow_ctx;
+ union {
+ u64 i915_context_pml4;
+ u64 i915_context_pdps[GEN8_3LVL_PDPES];
+ };
DECLARE_BITMAP(shadow_ctx_desc_updated, I915_NUM_ENGINES);
DECLARE_BITMAP(tlb_handle_pending, I915_NUM_ENGINES);
void *ring_scan_buffer[I915_NUM_ENGINES];
diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c
index aa280bb07125..b5475c91e2ef 100644
--- a/drivers/gpu/drm/i915/gvt/handlers.c
+++ b/drivers/gpu/drm/i915/gvt/handlers.c
@@ -475,6 +475,7 @@ static i915_reg_t force_nonpriv_white_list[] = {
_MMIO(0x7704),
_MMIO(0x7708),
_MMIO(0x770c),
+ _MMIO(0x83a8),
_MMIO(0xb110),
GEN8_L3SQCREG4,//_MMIO(0xb118)
_MMIO(0xe100),
diff --git a/drivers/gpu/drm/i915/gvt/interrupt.c b/drivers/gpu/drm/i915/gvt/interrupt.c
index 5daa23ae566b..6b9d1354ff29 100644
--- a/drivers/gpu/drm/i915/gvt/interrupt.c
+++ b/drivers/gpu/drm/i915/gvt/interrupt.c
@@ -126,7 +126,7 @@ static const char * const irq_name[INTEL_GVT_EVENT_MAX] = {
[FDI_RX_INTERRUPTS_TRANSCODER_C] = "FDI RX Interrupts Combined C",
[AUDIO_CP_CHANGE_TRANSCODER_C] = "Audio CP Change Transcoder C",
[AUDIO_CP_REQUEST_TRANSCODER_C] = "Audio CP Request Transcoder C",
- [ERR_AND_DBG] = "South Error and Debug Interupts Combined",
+ [ERR_AND_DBG] = "South Error and Debug Interrupts Combined",
[GMBUS] = "Gmbus",
[SDVO_B_HOTPLUG] = "SDVO B hotplug",
[CRT_HOTPLUG] = "CRT Hotplug",
diff --git a/drivers/gpu/drm/i915/gvt/mmio_context.c b/drivers/gpu/drm/i915/gvt/mmio_context.c
index 36a5147cd01e..d6e02c15ef97 100644
--- a/drivers/gpu/drm/i915/gvt/mmio_context.c
+++ b/drivers/gpu/drm/i915/gvt/mmio_context.c
@@ -158,6 +158,8 @@ static void load_render_mocs(struct drm_i915_private *dev_priv)
int ring_id, i;
for (ring_id = 0; ring_id < ARRAY_SIZE(regs); ring_id++) {
+ if (!HAS_ENGINE(dev_priv, ring_id))
+ continue;
offset.reg = regs[ring_id];
for (i = 0; i < GEN9_MOCS_SIZE; i++) {
gen9_render_mocs.control_table[ring_id][i] =
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c
index b8fbe3fabea3..1ad8c5e1455d 100644
--- a/drivers/gpu/drm/i915/gvt/scheduler.c
+++ b/drivers/gpu/drm/i915/gvt/scheduler.c
@@ -1079,6 +1079,21 @@ err:
return ret;
}
+static void
+i915_context_ppgtt_root_restore(struct intel_vgpu_submission *s)
+{
+ struct i915_hw_ppgtt *i915_ppgtt = s->shadow_ctx->ppgtt;
+ int i;
+
+ if (i915_vm_is_48bit(&i915_ppgtt->vm))
+ px_dma(&i915_ppgtt->pml4) = s->i915_context_pml4;
+ else {
+ for (i = 0; i < GEN8_3LVL_PDPES; i++)
+ px_dma(i915_ppgtt->pdp.page_directory[i]) =
+ s->i915_context_pdps[i];
+ }
+}
+
/**
* intel_vgpu_clean_submission - free submission-related resource for vGPU
* @vgpu: a vGPU
@@ -1091,6 +1106,7 @@ void intel_vgpu_clean_submission(struct intel_vgpu *vgpu)
struct intel_vgpu_submission *s = &vgpu->submission;
intel_vgpu_select_submission_ops(vgpu, ALL_ENGINES, 0);
+ i915_context_ppgtt_root_restore(s);
i915_gem_context_put(s->shadow_ctx);
kmem_cache_destroy(s->workloads);
}
@@ -1116,6 +1132,21 @@ void intel_vgpu_reset_submission(struct intel_vgpu *vgpu,
s->ops->reset(vgpu, engine_mask);
}
+static void
+i915_context_ppgtt_root_save(struct intel_vgpu_submission *s)
+{
+ struct i915_hw_ppgtt *i915_ppgtt = s->shadow_ctx->ppgtt;
+ int i;
+
+ if (i915_vm_is_48bit(&i915_ppgtt->vm))
+ s->i915_context_pml4 = px_dma(&i915_ppgtt->pml4);
+ else {
+ for (i = 0; i < GEN8_3LVL_PDPES; i++)
+ s->i915_context_pdps[i] =
+ px_dma(i915_ppgtt->pdp.page_directory[i]);
+ }
+}
+
/**
* intel_vgpu_setup_submission - setup submission-related resource for vGPU
* @vgpu: a vGPU
@@ -1138,6 +1169,8 @@ int intel_vgpu_setup_submission(struct intel_vgpu *vgpu)
if (IS_ERR(s->shadow_ctx))
return PTR_ERR(s->shadow_ctx);
+ i915_context_ppgtt_root_save(s);
+
bitmap_zero(s->shadow_ctx_desc_updated, I915_NUM_ENGINES);
s->workloads = kmem_cache_create_usercopy("gvt-g_vgpu_workload",
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index d36a9755ad91..216f52b744a6 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -1282,8 +1282,7 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data,
if (args->size == 0)
return 0;
- if (!access_ok(VERIFY_WRITE,
- u64_to_user_ptr(args->data_ptr),
+ if (!access_ok(u64_to_user_ptr(args->data_ptr),
args->size))
return -EFAULT;
@@ -1609,9 +1608,7 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
if (args->size == 0)
return 0;
- if (!access_ok(VERIFY_READ,
- u64_to_user_ptr(args->data_ptr),
- args->size))
+ if (!access_ok(u64_to_user_ptr(args->data_ptr), args->size))
return -EFAULT;
obj = i915_gem_object_lookup(file, args->handle);
@@ -2559,7 +2556,7 @@ static int i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
* If there's no chance of allocating enough pages for the whole
* object, bail early.
*/
- if (page_count > totalram_pages)
+ if (page_count > totalram_pages())
return -ENOMEM;
st = kmalloc(sizeof(*st), GFP_KERNEL);
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 10a4afb4f235..485b259127c3 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -26,7 +26,7 @@
*
*/
-#include <linux/dma_remapping.h>
+#include <linux/intel-iommu.h>
#include <linux/reservation.h>
#include <linux/sync_file.h>
#include <linux/uaccess.h>
@@ -1268,7 +1268,7 @@ relocate_entry(struct i915_vma *vma,
else if (gen >= 4)
len = 4;
else
- len = 6;
+ len = 3;
batch = reloc_gpu(eb, vma, len);
if (IS_ERR(batch))
@@ -1309,11 +1309,6 @@ relocate_entry(struct i915_vma *vma,
*batch++ = MI_STORE_DWORD_IMM | MI_MEM_VIRTUAL;
*batch++ = addr;
*batch++ = target_offset;
-
- /* And again for good measure (blb/pnv) */
- *batch++ = MI_STORE_DWORD_IMM | MI_MEM_VIRTUAL;
- *batch++ = addr;
- *batch++ = target_offset;
}
goto out;
@@ -1452,7 +1447,7 @@ static int eb_relocate_vma(struct i915_execbuffer *eb, struct i915_vma *vma)
* to read. However, if the array is not writable the user loses
* the updated relocation values.
*/
- if (unlikely(!access_ok(VERIFY_READ, urelocs, remain*sizeof(*urelocs))))
+ if (unlikely(!access_ok(urelocs, remain*sizeof(*urelocs))))
return -EFAULT;
do {
@@ -1559,7 +1554,7 @@ static int check_relocations(const struct drm_i915_gem_exec_object2 *entry)
addr = u64_to_user_ptr(entry->relocs_ptr);
size *= sizeof(struct drm_i915_gem_relocation_entry);
- if (!access_ok(VERIFY_READ, addr, size))
+ if (!access_ok(addr, size))
return -EFAULT;
end = addr + size;
@@ -1610,6 +1605,7 @@ static int eb_copy_relocations(const struct i915_execbuffer *eb)
(char __user *)urelocs + copied,
len)) {
end_user:
+ user_access_end();
kvfree(relocs);
err = -EFAULT;
goto err;
@@ -1628,7 +1624,9 @@ end_user:
* happened we would make the mistake of assuming that the
* relocations were valid.
*/
- user_access_begin();
+ if (!user_access_begin(urelocs, size))
+ goto end_user;
+
for (copied = 0; copied < nreloc; copied++)
unsafe_put_user(-1,
&urelocs[copied].presumed_offset,
@@ -2095,7 +2093,7 @@ get_fence_array(struct drm_i915_gem_execbuffer2 *args,
return ERR_PTR(-EINVAL);
user = u64_to_user_ptr(args->cliprects_ptr);
- if (!access_ok(VERIFY_READ, user, nfences * sizeof(*user)))
+ if (!access_ok(user, nfences * sizeof(*user)))
return ERR_PTR(-EFAULT);
fences = kvmalloc_array(nfences, sizeof(*fences),
@@ -2610,7 +2608,16 @@ i915_gem_execbuffer2_ioctl(struct drm_device *dev, void *data,
unsigned int i;
/* Copy the new buffer offsets back to the user's exec list. */
- user_access_begin();
+ /*
+ * Note: count * sizeof(*user_exec_list) does not overflow,
+ * because we checked 'count' in check_buffer_count().
+ *
+ * And this range already got effectively checked earlier
+ * when we did the "copy_from_user()" above.
+ */
+ if (!user_access_begin(user_exec_list, count * sizeof(*user_exec_list)))
+ goto end_user;
+
for (i = 0; i < args->buffer_count; i++) {
if (!(exec2_list[i].offset & UPDATE))
continue;
diff --git a/drivers/gpu/drm/i915/i915_gem_userptr.c b/drivers/gpu/drm/i915/i915_gem_userptr.c
index 2c9b284036d1..9558582c105e 100644
--- a/drivers/gpu/drm/i915/i915_gem_userptr.c
+++ b/drivers/gpu/drm/i915/i915_gem_userptr.c
@@ -113,27 +113,25 @@ static void del_object(struct i915_mmu_object *mo)
}
static int i915_gem_userptr_mn_invalidate_range_start(struct mmu_notifier *_mn,
- struct mm_struct *mm,
- unsigned long start,
- unsigned long end,
- bool blockable)
+ const struct mmu_notifier_range *range)
{
struct i915_mmu_notifier *mn =
container_of(_mn, struct i915_mmu_notifier, mn);
struct i915_mmu_object *mo;
struct interval_tree_node *it;
LIST_HEAD(cancelled);
+ unsigned long end;
if (RB_EMPTY_ROOT(&mn->objects.rb_root))
return 0;
/* interval ranges are inclusive, but invalidate range is exclusive */
- end--;
+ end = range->end - 1;
spin_lock(&mn->lock);
- it = interval_tree_iter_first(&mn->objects, start, end);
+ it = interval_tree_iter_first(&mn->objects, range->start, end);
while (it) {
- if (!blockable) {
+ if (!range->blockable) {
spin_unlock(&mn->lock);
return -EAGAIN;
}
@@ -151,7 +149,7 @@ static int i915_gem_userptr_mn_invalidate_range_start(struct mmu_notifier *_mn,
queue_work(mn->wq, &mo->work);
list_add(&mo->link, &cancelled);
- it = interval_tree_iter_next(it, start, end);
+ it = interval_tree_iter_next(it, range->start, end);
}
list_for_each_entry(mo, &cancelled, link)
del_object(mo);
@@ -791,8 +789,7 @@ i915_gem_userptr_ioctl(struct drm_device *dev,
if (offset_in_page(args->user_ptr | args->user_size))
return -EINVAL;
- if (!access_ok(args->flags & I915_USERPTR_READ_ONLY ? VERIFY_READ : VERIFY_WRITE,
- (char __user *)(unsigned long)args->user_ptr, args->user_size))
+ if (!access_ok((char __user *)(unsigned long)args->user_ptr, args->user_size))
return -EFAULT;
if (args->flags & I915_USERPTR_READ_ONLY) {
diff --git a/drivers/gpu/drm/i915/i915_ioc32.c b/drivers/gpu/drm/i915/i915_ioc32.c
index 0e5c580d117c..e869daf9c8a9 100644
--- a/drivers/gpu/drm/i915/i915_ioc32.c
+++ b/drivers/gpu/drm/i915/i915_ioc32.c
@@ -52,7 +52,7 @@ static int compat_i915_getparam(struct file *file, unsigned int cmd,
return -EFAULT;
request = compat_alloc_user_space(sizeof(*request));
- if (!access_ok(VERIFY_WRITE, request, sizeof(*request)) ||
+ if (!access_ok(request, sizeof(*request)) ||
__put_user(req32.param, &request->param) ||
__put_user((void __user *)(unsigned long)req32.value,
&request->value))
diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c
index 4529edfdcfc8..2b2eb57ca71f 100644
--- a/drivers/gpu/drm/i915/i915_perf.c
+++ b/drivers/gpu/drm/i915/i915_perf.c
@@ -3052,7 +3052,7 @@ static struct i915_oa_reg *alloc_oa_regs(struct drm_i915_private *dev_priv,
if (!n_regs)
return NULL;
- if (!access_ok(VERIFY_READ, regs, n_regs * sizeof(u32) * 2))
+ if (!access_ok(regs, n_regs * sizeof(u32) * 2))
return ERR_PTR(-EFAULT);
/* No is_valid function means we're not allowing any register to be programmed. */
diff --git a/drivers/gpu/drm/i915/i915_query.c b/drivers/gpu/drm/i915/i915_query.c
index 6fc4b8eeab42..fe56465cdfd6 100644
--- a/drivers/gpu/drm/i915/i915_query.c
+++ b/drivers/gpu/drm/i915/i915_query.c
@@ -46,7 +46,7 @@ static int query_topology_info(struct drm_i915_private *dev_priv,
if (topo.flags != 0)
return -EINVAL;
- if (!access_ok(VERIFY_WRITE, u64_to_user_ptr(query_item->data_ptr),
+ if (!access_ok(u64_to_user_ptr(query_item->data_ptr),
total_length))
return -EFAULT;
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 07c861884c70..3da9c0f9e948 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -46,7 +46,7 @@
#include <drm/drm_plane_helper.h>
#include <drm/drm_rect.h>
#include <drm/drm_atomic_uapi.h>
-#include <linux/dma_remapping.h>
+#include <linux/intel-iommu.h>
#include <linux/reservation.h>
/* Primary plane formats for gen <= 3 */
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index d7fa301b5ec7..4be167dcd209 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -398,8 +398,13 @@ static u64 execlists_update_context(struct i915_request *rq)
* may not be visible to the HW prior to the completion of the UC
* register write and that we may begin execution from the context
* before its image is complete leading to invalid PD chasing.
+ *
+ * Furthermore, Braswell, at least, wants a full mb to be sure that
+ * the writes are coherent in memory (visible to the GPU) prior to
+ * execution, and not just visible to other CPUs (as is the result of
+ * wmb).
*/
- wmb();
+ mb();
return ce->lrc_desc;
}
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index c5eb26a7ee79..fbeaec3994e7 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -69,19 +69,28 @@ unsigned int intel_ring_update_space(struct intel_ring *ring)
static int
gen2_render_ring_flush(struct i915_request *rq, u32 mode)
{
+ unsigned int num_store_dw;
u32 cmd, *cs;
cmd = MI_FLUSH;
-
+ num_store_dw = 0;
if (mode & EMIT_INVALIDATE)
cmd |= MI_READ_FLUSH;
+ if (mode & EMIT_FLUSH)
+ num_store_dw = 4;
- cs = intel_ring_begin(rq, 2);
+ cs = intel_ring_begin(rq, 2 + 3 * num_store_dw);
if (IS_ERR(cs))
return PTR_ERR(cs);
*cs++ = cmd;
- *cs++ = MI_NOOP;
+ while (num_store_dw--) {
+ *cs++ = MI_STORE_DWORD_IMM | MI_MEM_VIRTUAL;
+ *cs++ = i915_scratch_offset(rq->i915);
+ *cs++ = 0;
+ }
+ *cs++ = MI_FLUSH | MI_NO_WRITE_FLUSH;
+
intel_ring_advance(rq, cs);
return 0;
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
index 69fe86b30fbb..a9ed0ecc94e2 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
@@ -170,7 +170,7 @@ static int igt_ppgtt_alloc(void *arg)
* This should ensure that we do not run into the oomkiller during
* the test and take down the machine wilfully.
*/
- limit = totalram_pages << PAGE_SHIFT;
+ limit = totalram_pages() << PAGE_SHIFT;
limit = min(ppgtt->vm.total, limit);
/* Check we can allocate the entire range */
@@ -1244,7 +1244,7 @@ static int exercise_mock(struct drm_i915_private *i915,
u64 hole_start, u64 hole_end,
unsigned long end_time))
{
- const u64 limit = totalram_pages << PAGE_SHIFT;
+ const u64 limit = totalram_pages() << PAGE_SHIFT;
struct i915_gem_context *ctx;
struct i915_hw_ppgtt *ppgtt;
IGT_TIMEOUT(end_time);
diff --git a/drivers/gpu/drm/mediatek/mtk_dsi.c b/drivers/gpu/drm/mediatek/mtk_dsi.c
index 66df1b177959..27b507eb4a99 100644
--- a/drivers/gpu/drm/mediatek/mtk_dsi.c
+++ b/drivers/gpu/drm/mediatek/mtk_dsi.c
@@ -818,10 +818,13 @@ static int mtk_dsi_create_conn_enc(struct drm_device *drm, struct mtk_dsi *dsi)
dsi->encoder.possible_crtcs = 1;
/* If there's a bridge, attach to it and let it create the connector */
- ret = drm_bridge_attach(&dsi->encoder, dsi->bridge, NULL);
- if (ret) {
- DRM_ERROR("Failed to attach bridge to drm\n");
-
+ if (dsi->bridge) {
+ ret = drm_bridge_attach(&dsi->encoder, dsi->bridge, NULL);
+ if (ret) {
+ DRM_ERROR("Failed to attach bridge to drm\n");
+ goto err_encoder_cleanup;
+ }
+ } else {
/* Otherwise create our own connector and attach to a panel */
ret = mtk_dsi_create_connector(drm, dsi);
if (ret)
diff --git a/drivers/gpu/drm/meson/meson_crtc.c b/drivers/gpu/drm/meson/meson_crtc.c
index d78168f979db..75d97f1b2e8f 100644
--- a/drivers/gpu/drm/meson/meson_crtc.c
+++ b/drivers/gpu/drm/meson/meson_crtc.c
@@ -46,6 +46,7 @@ struct meson_crtc {
struct drm_crtc base;
struct drm_pending_vblank_event *event;
struct meson_drm *priv;
+ bool enabled;
};
#define to_meson_crtc(x) container_of(x, struct meson_crtc, base)
@@ -81,8 +82,7 @@ static const struct drm_crtc_funcs meson_crtc_funcs = {
};
-static void meson_crtc_atomic_enable(struct drm_crtc *crtc,
- struct drm_crtc_state *old_state)
+static void meson_crtc_enable(struct drm_crtc *crtc)
{
struct meson_crtc *meson_crtc = to_meson_crtc(crtc);
struct drm_crtc_state *crtc_state = crtc->state;
@@ -106,6 +106,22 @@ static void meson_crtc_atomic_enable(struct drm_crtc *crtc,
writel_bits_relaxed(VPP_POSTBLEND_ENABLE, VPP_POSTBLEND_ENABLE,
priv->io_base + _REG(VPP_MISC));
+ drm_crtc_vblank_on(crtc);
+
+ meson_crtc->enabled = true;
+}
+
+static void meson_crtc_atomic_enable(struct drm_crtc *crtc,
+ struct drm_crtc_state *old_state)
+{
+ struct meson_crtc *meson_crtc = to_meson_crtc(crtc);
+ struct meson_drm *priv = meson_crtc->priv;
+
+ DRM_DEBUG_DRIVER("\n");
+
+ if (!meson_crtc->enabled)
+ meson_crtc_enable(crtc);
+
priv->viu.osd1_enabled = true;
}
@@ -117,6 +133,8 @@ static void meson_crtc_atomic_disable(struct drm_crtc *crtc,
DRM_DEBUG_DRIVER("\n");
+ drm_crtc_vblank_off(crtc);
+
priv->viu.osd1_enabled = false;
priv->viu.osd1_commit = false;
@@ -135,6 +153,8 @@ static void meson_crtc_atomic_disable(struct drm_crtc *crtc,
crtc->state->event = NULL;
}
+
+ meson_crtc->enabled = false;
}
static void meson_crtc_atomic_begin(struct drm_crtc *crtc,
@@ -143,6 +163,9 @@ static void meson_crtc_atomic_begin(struct drm_crtc *crtc,
struct meson_crtc *meson_crtc = to_meson_crtc(crtc);
unsigned long flags;
+ if (crtc->state->enable && !meson_crtc->enabled)
+ meson_crtc_enable(crtc);
+
if (crtc->state->event) {
WARN_ON(drm_crtc_vblank_get(crtc) != 0);
diff --git a/drivers/gpu/drm/meson/meson_dw_hdmi.c b/drivers/gpu/drm/meson/meson_dw_hdmi.c
index d8c5cc34e22e..807111ebfdd9 100644
--- a/drivers/gpu/drm/meson/meson_dw_hdmi.c
+++ b/drivers/gpu/drm/meson/meson_dw_hdmi.c
@@ -696,6 +696,7 @@ static const struct regmap_config meson_dw_hdmi_regmap_config = {
.reg_read = meson_dw_hdmi_reg_read,
.reg_write = meson_dw_hdmi_reg_write,
.max_register = 0x10000,
+ .fast_io = true,
};
static bool meson_hdmi_connector_is_available(struct device *dev)
diff --git a/drivers/gpu/drm/meson/meson_venc.c b/drivers/gpu/drm/meson/meson_venc.c
index e95e0e7a7fa1..0ba04f6813e6 100644
--- a/drivers/gpu/drm/meson/meson_venc.c
+++ b/drivers/gpu/drm/meson/meson_venc.c
@@ -71,6 +71,7 @@
*/
/* HHI Registers */
+#define HHI_GCLK_MPEG2 0x148 /* 0x52 offset in data sheet */
#define HHI_VDAC_CNTL0 0x2F4 /* 0xbd offset in data sheet */
#define HHI_VDAC_CNTL1 0x2F8 /* 0xbe offset in data sheet */
#define HHI_HDMI_PHY_CNTL0 0x3a0 /* 0xe8 offset in data sheet */
@@ -840,6 +841,7 @@ struct meson_hdmi_venc_vic_mode {
{ 5, &meson_hdmi_encp_mode_1080i60 },
{ 20, &meson_hdmi_encp_mode_1080i50 },
{ 32, &meson_hdmi_encp_mode_1080p24 },
+ { 33, &meson_hdmi_encp_mode_1080p50 },
{ 34, &meson_hdmi_encp_mode_1080p30 },
{ 31, &meson_hdmi_encp_mode_1080p50 },
{ 16, &meson_hdmi_encp_mode_1080p60 },
@@ -1659,10 +1661,12 @@ unsigned int meson_venci_get_field(struct meson_drm *priv)
void meson_venc_enable_vsync(struct meson_drm *priv)
{
writel_relaxed(2, priv->io_base + _REG(VENC_INTCTRL));
+ regmap_update_bits(priv->hhi, HHI_GCLK_MPEG2, BIT(25), BIT(25));
}
void meson_venc_disable_vsync(struct meson_drm *priv)
{
+ regmap_update_bits(priv->hhi, HHI_GCLK_MPEG2, BIT(25), 0);
writel_relaxed(0, priv->io_base + _REG(VENC_INTCTRL));
}
diff --git a/drivers/gpu/drm/meson/meson_viu.c b/drivers/gpu/drm/meson/meson_viu.c
index 0ba87ff95530..e46e05f50bad 100644
--- a/drivers/gpu/drm/meson/meson_viu.c
+++ b/drivers/gpu/drm/meson/meson_viu.c
@@ -184,18 +184,18 @@ void meson_viu_set_osd_lut(struct meson_drm *priv, enum viu_lut_sel_e lut_sel,
if (lut_sel == VIU_LUT_OSD_OETF) {
writel(0, priv->io_base + _REG(addr_port));
- for (i = 0; i < 20; i++)
+ for (i = 0; i < (OSD_OETF_LUT_SIZE / 2); i++)
writel(r_map[i * 2] | (r_map[i * 2 + 1] << 16),
priv->io_base + _REG(data_port));
writel(r_map[OSD_OETF_LUT_SIZE - 1] | (g_map[0] << 16),
priv->io_base + _REG(data_port));
- for (i = 0; i < 20; i++)
+ for (i = 0; i < (OSD_OETF_LUT_SIZE / 2); i++)
writel(g_map[i * 2 + 1] | (g_map[i * 2 + 2] << 16),
priv->io_base + _REG(data_port));
- for (i = 0; i < 20; i++)
+ for (i = 0; i < (OSD_OETF_LUT_SIZE / 2); i++)
writel(b_map[i * 2] | (b_map[i * 2 + 1] << 16),
priv->io_base + _REG(data_port));
@@ -211,18 +211,18 @@ void meson_viu_set_osd_lut(struct meson_drm *priv, enum viu_lut_sel_e lut_sel,
} else if (lut_sel == VIU_LUT_OSD_EOTF) {
writel(0, priv->io_base + _REG(addr_port));
- for (i = 0; i < 20; i++)
+ for (i = 0; i < (OSD_EOTF_LUT_SIZE / 2); i++)
writel(r_map[i * 2] | (r_map[i * 2 + 1] << 16),
priv->io_base + _REG(data_port));
writel(r_map[OSD_EOTF_LUT_SIZE - 1] | (g_map[0] << 16),
priv->io_base + _REG(data_port));
- for (i = 0; i < 20; i++)
+ for (i = 0; i < (OSD_EOTF_LUT_SIZE / 2); i++)
writel(g_map[i * 2 + 1] | (g_map[i * 2 + 2] << 16),
priv->io_base + _REG(data_port));
- for (i = 0; i < 20; i++)
+ for (i = 0; i < (OSD_EOTF_LUT_SIZE / 2); i++)
writel(b_map[i * 2] | (b_map[i * 2 + 1] << 16),
priv->io_base + _REG(data_port));
diff --git a/drivers/gpu/drm/msm/Kconfig b/drivers/gpu/drm/msm/Kconfig
index 843a9d40c05e..cf549f1ed403 100644
--- a/drivers/gpu/drm/msm/Kconfig
+++ b/drivers/gpu/drm/msm/Kconfig
@@ -2,7 +2,7 @@
config DRM_MSM
tristate "MSM DRM"
depends on DRM
- depends on ARCH_QCOM || (ARM && COMPILE_TEST)
+ depends on ARCH_QCOM || SOC_IMX5 || (ARM && COMPILE_TEST)
depends on OF && COMMON_CLK
depends on MMU
select QCOM_MDT_LOADER if ARCH_QCOM
@@ -11,7 +11,7 @@ config DRM_MSM
select DRM_PANEL
select SHMEM
select TMPFS
- select QCOM_SCM
+ select QCOM_SCM if ARCH_QCOM
select WANT_DEV_COREDUMP
select SND_SOC_HDMI_CODEC if SND_SOC
select SYNC_FILE
diff --git a/drivers/gpu/drm/msm/Makefile b/drivers/gpu/drm/msm/Makefile
index 19ab521d4c3a..56a70c74af4e 100644
--- a/drivers/gpu/drm/msm/Makefile
+++ b/drivers/gpu/drm/msm/Makefile
@@ -6,6 +6,7 @@ ccflags-$(CONFIG_DRM_MSM_DSI) += -Idrivers/gpu/drm/msm/dsi
msm-y := \
adreno/adreno_device.o \
adreno/adreno_gpu.o \
+ adreno/a2xx_gpu.o \
adreno/a3xx_gpu.o \
adreno/a4xx_gpu.o \
adreno/a5xx_gpu.o \
@@ -14,6 +15,7 @@ msm-y := \
adreno/a6xx_gpu.o \
adreno/a6xx_gmu.o \
adreno/a6xx_hfi.o \
+ adreno/a6xx_gpu_state.o \
hdmi/hdmi.o \
hdmi/hdmi_audio.o \
hdmi/hdmi_bridge.o \
@@ -68,11 +70,9 @@ msm-y := \
disp/dpu1/dpu_hw_util.o \
disp/dpu1/dpu_hw_vbif.o \
disp/dpu1/dpu_io_util.o \
- disp/dpu1/dpu_irq.o \
disp/dpu1/dpu_kms.o \
disp/dpu1/dpu_mdss.o \
disp/dpu1/dpu_plane.o \
- disp/dpu1/dpu_power_handle.o \
disp/dpu1/dpu_rm.o \
disp/dpu1/dpu_vbif.o \
msm_atomic.o \
@@ -90,10 +90,11 @@ msm-y := \
msm_perf.o \
msm_rd.o \
msm_ringbuffer.o \
- msm_submitqueue.o
+ msm_submitqueue.o \
+ msm_gpu_tracepoints.o \
+ msm_gpummu.o
-msm-$(CONFIG_DEBUG_FS) += adreno/a5xx_debugfs.o \
- disp/dpu1/dpu_dbg.o
+msm-$(CONFIG_DEBUG_FS) += adreno/a5xx_debugfs.o
msm-$(CONFIG_DRM_FBDEV_EMULATION) += msm_fbdev.o
msm-$(CONFIG_COMMON_CLK) += disp/mdp4/mdp4_lvds_pll.o
diff --git a/drivers/gpu/drm/msm/adreno/a2xx.xml.h b/drivers/gpu/drm/msm/adreno/a2xx.xml.h
index 12b0ba270b5e..14eb52f3e605 100644
--- a/drivers/gpu/drm/msm/adreno/a2xx.xml.h
+++ b/drivers/gpu/drm/msm/adreno/a2xx.xml.h
@@ -10,13 +10,13 @@ git clone https://github.com/freedreno/envytools.git
The rules-ng-ng source files this header was generated from are:
- /home/robclark/src/envytools/rnndb/adreno.xml ( 501 bytes, from 2018-07-03 19:37:13)
- /home/robclark/src/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/adreno/a2xx.xml ( 36805 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/adreno/adreno_common.xml ( 13634 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/adreno/adreno_pm4.xml ( 42585 bytes, from 2018-10-04 19:06:37)
+- /home/robclark/src/envytools/rnndb/adreno/a2xx.xml ( 42463 bytes, from 2018-11-19 13:44:03)
+- /home/robclark/src/envytools/rnndb/adreno/adreno_common.xml ( 14201 bytes, from 2018-12-02 17:29:54)
+- /home/robclark/src/envytools/rnndb/adreno/adreno_pm4.xml ( 43052 bytes, from 2018-12-02 17:29:54)
- /home/robclark/src/envytools/rnndb/adreno/a3xx.xml ( 83840 bytes, from 2018-07-03 19:37:13)
- /home/robclark/src/envytools/rnndb/adreno/a4xx.xml ( 112086 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/adreno/a5xx.xml ( 147240 bytes, from 2018-10-04 19:06:37)
-- /home/robclark/src/envytools/rnndb/adreno/a6xx.xml ( 139581 bytes, from 2018-10-04 19:06:42)
+- /home/robclark/src/envytools/rnndb/adreno/a5xx.xml ( 147240 bytes, from 2018-12-02 17:29:54)
+- /home/robclark/src/envytools/rnndb/adreno/a6xx.xml ( 140790 bytes, from 2018-12-02 17:29:54)
- /home/robclark/src/envytools/rnndb/adreno/a6xx_gmu.xml ( 10431 bytes, from 2018-09-14 13:03:07)
- /home/robclark/src/envytools/rnndb/adreno/ocmem.xml ( 1773 bytes, from 2018-07-03 19:37:13)
@@ -239,7 +239,63 @@ enum sq_tex_swiz {
enum sq_tex_filter {
SQ_TEX_FILTER_POINT = 0,
SQ_TEX_FILTER_BILINEAR = 1,
- SQ_TEX_FILTER_BICUBIC = 2,
+ SQ_TEX_FILTER_BASEMAP = 2,
+ SQ_TEX_FILTER_USE_FETCH_CONST = 3,
+};
+
+enum sq_tex_aniso_filter {
+ SQ_TEX_ANISO_FILTER_DISABLED = 0,
+ SQ_TEX_ANISO_FILTER_MAX_1_1 = 1,
+ SQ_TEX_ANISO_FILTER_MAX_2_1 = 2,
+ SQ_TEX_ANISO_FILTER_MAX_4_1 = 3,
+ SQ_TEX_ANISO_FILTER_MAX_8_1 = 4,
+ SQ_TEX_ANISO_FILTER_MAX_16_1 = 5,
+ SQ_TEX_ANISO_FILTER_USE_FETCH_CONST = 7,
+};
+
+enum sq_tex_dimension {
+ SQ_TEX_DIMENSION_1D = 0,
+ SQ_TEX_DIMENSION_2D = 1,
+ SQ_TEX_DIMENSION_3D = 2,
+ SQ_TEX_DIMENSION_CUBE = 3,
+};
+
+enum sq_tex_border_color {
+ SQ_TEX_BORDER_COLOR_BLACK = 0,
+ SQ_TEX_BORDER_COLOR_WHITE = 1,
+ SQ_TEX_BORDER_COLOR_ACBYCR_BLACK = 2,
+ SQ_TEX_BORDER_COLOR_ACBCRY_BLACK = 3,
+};
+
+enum sq_tex_sign {
+ SQ_TEX_SIGN_UNISIGNED = 0,
+ SQ_TEX_SIGN_SIGNED = 1,
+ SQ_TEX_SIGN_UNISIGNED_BIASED = 2,
+ SQ_TEX_SIGN_GAMMA = 3,
+};
+
+enum sq_tex_endian {
+ SQ_TEX_ENDIAN_NONE = 0,
+ SQ_TEX_ENDIAN_8IN16 = 1,
+ SQ_TEX_ENDIAN_8IN32 = 2,
+ SQ_TEX_ENDIAN_16IN32 = 3,
+};
+
+enum sq_tex_clamp_policy {
+ SQ_TEX_CLAMP_POLICY_D3D = 0,
+ SQ_TEX_CLAMP_POLICY_OGL = 1,
+};
+
+enum sq_tex_num_format {
+ SQ_TEX_NUM_FORMAT_FRAC = 0,
+ SQ_TEX_NUM_FORMAT_INT = 1,
+};
+
+enum sq_tex_type {
+ SQ_TEX_TYPE_0 = 0,
+ SQ_TEX_TYPE_1 = 1,
+ SQ_TEX_TYPE_2 = 2,
+ SQ_TEX_TYPE_3 = 3,
};
#define REG_A2XX_RBBM_PATCH_RELEASE 0x00000001
@@ -323,6 +379,18 @@ static inline uint32_t A2XX_MH_MMU_CONFIG_PA_W_CLNT_BEHAVIOR(enum adreno_mmu_cln
}
#define REG_A2XX_MH_MMU_VA_RANGE 0x00000041
+#define A2XX_MH_MMU_VA_RANGE_NUM_64KB_REGIONS__MASK 0x00000fff
+#define A2XX_MH_MMU_VA_RANGE_NUM_64KB_REGIONS__SHIFT 0
+static inline uint32_t A2XX_MH_MMU_VA_RANGE_NUM_64KB_REGIONS(uint32_t val)
+{
+ return ((val) << A2XX_MH_MMU_VA_RANGE_NUM_64KB_REGIONS__SHIFT) & A2XX_MH_MMU_VA_RANGE_NUM_64KB_REGIONS__MASK;
+}
+#define A2XX_MH_MMU_VA_RANGE_VA_BASE__MASK 0xfffff000
+#define A2XX_MH_MMU_VA_RANGE_VA_BASE__SHIFT 12
+static inline uint32_t A2XX_MH_MMU_VA_RANGE_VA_BASE(uint32_t val)
+{
+ return ((val) << A2XX_MH_MMU_VA_RANGE_VA_BASE__SHIFT) & A2XX_MH_MMU_VA_RANGE_VA_BASE__MASK;
+}
#define REG_A2XX_MH_MMU_PT_BASE 0x00000042
@@ -331,6 +399,8 @@ static inline uint32_t A2XX_MH_MMU_CONFIG_PA_W_CLNT_BEHAVIOR(enum adreno_mmu_cln
#define REG_A2XX_MH_MMU_TRAN_ERROR 0x00000044
#define REG_A2XX_MH_MMU_INVALIDATE 0x00000045
+#define A2XX_MH_MMU_INVALIDATE_INVALIDATE_ALL 0x00000001
+#define A2XX_MH_MMU_INVALIDATE_INVALIDATE_TC 0x00000002
#define REG_A2XX_MH_MMU_MPU_BASE 0x00000046
@@ -389,12 +459,19 @@ static inline uint32_t A2XX_MH_MMU_CONFIG_PA_W_CLNT_BEHAVIOR(enum adreno_mmu_cln
#define REG_A2XX_RBBM_READ_ERROR 0x000003b3
#define REG_A2XX_RBBM_INT_CNTL 0x000003b4
+#define A2XX_RBBM_INT_CNTL_RDERR_INT_MASK 0x00000001
+#define A2XX_RBBM_INT_CNTL_DISPLAY_UPDATE_INT_MASK 0x00000002
+#define A2XX_RBBM_INT_CNTL_GUI_IDLE_INT_MASK 0x00080000
#define REG_A2XX_RBBM_INT_STATUS 0x000003b5
#define REG_A2XX_RBBM_INT_ACK 0x000003b6
#define REG_A2XX_MASTER_INT_SIGNAL 0x000003b7
+#define A2XX_MASTER_INT_SIGNAL_MH_INT_STAT 0x00000020
+#define A2XX_MASTER_INT_SIGNAL_SQ_INT_STAT 0x04000000
+#define A2XX_MASTER_INT_SIGNAL_CP_INT_STAT 0x40000000
+#define A2XX_MASTER_INT_SIGNAL_RBBM_INT_STAT 0x80000000
#define REG_A2XX_RBBM_PERIPHID1 0x000003f9
@@ -467,6 +544,19 @@ static inline uint32_t A2XX_MH_ARBITER_CONFIG_IN_FLIGHT_LIMIT(uint32_t val)
#define A2XX_MH_ARBITER_CONFIG_RB_CLNT_ENABLE 0x02000000
#define A2XX_MH_ARBITER_CONFIG_PA_CLNT_ENABLE 0x04000000
+#define REG_A2XX_MH_INTERRUPT_MASK 0x00000a42
+#define A2XX_MH_INTERRUPT_MASK_AXI_READ_ERROR 0x00000001
+#define A2XX_MH_INTERRUPT_MASK_AXI_WRITE_ERROR 0x00000002
+#define A2XX_MH_INTERRUPT_MASK_MMU_PAGE_FAULT 0x00000004
+
+#define REG_A2XX_MH_INTERRUPT_STATUS 0x00000a43
+
+#define REG_A2XX_MH_INTERRUPT_CLEAR 0x00000a44
+
+#define REG_A2XX_MH_CLNT_INTF_CTRL_CONFIG1 0x00000a54
+
+#define REG_A2XX_MH_CLNT_INTF_CTRL_CONFIG2 0x00000a55
+
#define REG_A2XX_A220_VSC_BIN_SIZE 0x00000c01
#define A2XX_A220_VSC_BIN_SIZE_WIDTH__MASK 0x0000001f
#define A2XX_A220_VSC_BIN_SIZE_WIDTH__SHIFT 0
@@ -648,6 +738,18 @@ static inline uint32_t A2XX_RB_BC_CONTROL_MEM_EXPORT_TIMEOUT_SELECT(uint32_t val
#define REG_A2XX_RB_DEBUG_DATA 0x00000f27
#define REG_A2XX_RB_SURFACE_INFO 0x00002000
+#define A2XX_RB_SURFACE_INFO_SURFACE_PITCH__MASK 0x00003fff
+#define A2XX_RB_SURFACE_INFO_SURFACE_PITCH__SHIFT 0
+static inline uint32_t A2XX_RB_SURFACE_INFO_SURFACE_PITCH(uint32_t val)
+{
+ return ((val) << A2XX_RB_SURFACE_INFO_SURFACE_PITCH__SHIFT) & A2XX_RB_SURFACE_INFO_SURFACE_PITCH__MASK;
+}
+#define A2XX_RB_SURFACE_INFO_MSAA_SAMPLES__MASK 0x0000c000
+#define A2XX_RB_SURFACE_INFO_MSAA_SAMPLES__SHIFT 14
+static inline uint32_t A2XX_RB_SURFACE_INFO_MSAA_SAMPLES(uint32_t val)
+{
+ return ((val) << A2XX_RB_SURFACE_INFO_MSAA_SAMPLES__SHIFT) & A2XX_RB_SURFACE_INFO_MSAA_SAMPLES__MASK;
+}
#define REG_A2XX_RB_COLOR_INFO 0x00002001
#define A2XX_RB_COLOR_INFO_FORMAT__MASK 0x0000000f
@@ -679,7 +781,7 @@ static inline uint32_t A2XX_RB_COLOR_INFO_SWAP(uint32_t val)
#define A2XX_RB_COLOR_INFO_BASE__SHIFT 12
static inline uint32_t A2XX_RB_COLOR_INFO_BASE(uint32_t val)
{
- return ((val >> 10) << A2XX_RB_COLOR_INFO_BASE__SHIFT) & A2XX_RB_COLOR_INFO_BASE__MASK;
+ return ((val >> 12) << A2XX_RB_COLOR_INFO_BASE__SHIFT) & A2XX_RB_COLOR_INFO_BASE__MASK;
}
#define REG_A2XX_RB_DEPTH_INFO 0x00002002
@@ -693,7 +795,7 @@ static inline uint32_t A2XX_RB_DEPTH_INFO_DEPTH_FORMAT(enum adreno_rb_depth_form
#define A2XX_RB_DEPTH_INFO_DEPTH_BASE__SHIFT 12
static inline uint32_t A2XX_RB_DEPTH_INFO_DEPTH_BASE(uint32_t val)
{
- return ((val >> 10) << A2XX_RB_DEPTH_INFO_DEPTH_BASE__SHIFT) & A2XX_RB_DEPTH_INFO_DEPTH_BASE__MASK;
+ return ((val >> 12) << A2XX_RB_DEPTH_INFO_DEPTH_BASE__SHIFT) & A2XX_RB_DEPTH_INFO_DEPTH_BASE__MASK;
}
#define REG_A2XX_A225_RB_COLOR_INFO3 0x00002005
@@ -1757,6 +1859,36 @@ static inline uint32_t A2XX_RB_COPY_DEST_OFFSET_Y(uint32_t val)
#define REG_A2XX_COHER_STATUS_PM4 0x00000a2b
#define REG_A2XX_SQ_TEX_0 0x00000000
+#define A2XX_SQ_TEX_0_TYPE__MASK 0x00000003
+#define A2XX_SQ_TEX_0_TYPE__SHIFT 0
+static inline uint32_t A2XX_SQ_TEX_0_TYPE(enum sq_tex_type val)
+{
+ return ((val) << A2XX_SQ_TEX_0_TYPE__SHIFT) & A2XX_SQ_TEX_0_TYPE__MASK;
+}
+#define A2XX_SQ_TEX_0_SIGN_X__MASK 0x0000000c
+#define A2XX_SQ_TEX_0_SIGN_X__SHIFT 2
+static inline uint32_t A2XX_SQ_TEX_0_SIGN_X(enum sq_tex_sign val)
+{
+ return ((val) << A2XX_SQ_TEX_0_SIGN_X__SHIFT) & A2XX_SQ_TEX_0_SIGN_X__MASK;
+}
+#define A2XX_SQ_TEX_0_SIGN_Y__MASK 0x00000030
+#define A2XX_SQ_TEX_0_SIGN_Y__SHIFT 4
+static inline uint32_t A2XX_SQ_TEX_0_SIGN_Y(enum sq_tex_sign val)
+{
+ return ((val) << A2XX_SQ_TEX_0_SIGN_Y__SHIFT) & A2XX_SQ_TEX_0_SIGN_Y__MASK;
+}
+#define A2XX_SQ_TEX_0_SIGN_Z__MASK 0x000000c0
+#define A2XX_SQ_TEX_0_SIGN_Z__SHIFT 6
+static inline uint32_t A2XX_SQ_TEX_0_SIGN_Z(enum sq_tex_sign val)
+{
+ return ((val) << A2XX_SQ_TEX_0_SIGN_Z__SHIFT) & A2XX_SQ_TEX_0_SIGN_Z__MASK;
+}
+#define A2XX_SQ_TEX_0_SIGN_W__MASK 0x00000300
+#define A2XX_SQ_TEX_0_SIGN_W__SHIFT 8
+static inline uint32_t A2XX_SQ_TEX_0_SIGN_W(enum sq_tex_sign val)
+{
+ return ((val) << A2XX_SQ_TEX_0_SIGN_W__SHIFT) & A2XX_SQ_TEX_0_SIGN_W__MASK;
+}
#define A2XX_SQ_TEX_0_CLAMP_X__MASK 0x00001c00
#define A2XX_SQ_TEX_0_CLAMP_X__SHIFT 10
static inline uint32_t A2XX_SQ_TEX_0_CLAMP_X(enum sq_tex_clamp val)
@@ -1775,14 +1907,46 @@ static inline uint32_t A2XX_SQ_TEX_0_CLAMP_Z(enum sq_tex_clamp val)
{
return ((val) << A2XX_SQ_TEX_0_CLAMP_Z__SHIFT) & A2XX_SQ_TEX_0_CLAMP_Z__MASK;
}
-#define A2XX_SQ_TEX_0_PITCH__MASK 0xffc00000
+#define A2XX_SQ_TEX_0_PITCH__MASK 0x7fc00000
#define A2XX_SQ_TEX_0_PITCH__SHIFT 22
static inline uint32_t A2XX_SQ_TEX_0_PITCH(uint32_t val)
{
return ((val >> 5) << A2XX_SQ_TEX_0_PITCH__SHIFT) & A2XX_SQ_TEX_0_PITCH__MASK;
}
+#define A2XX_SQ_TEX_0_TILED 0x00000002
#define REG_A2XX_SQ_TEX_1 0x00000001
+#define A2XX_SQ_TEX_1_FORMAT__MASK 0x0000003f
+#define A2XX_SQ_TEX_1_FORMAT__SHIFT 0
+static inline uint32_t A2XX_SQ_TEX_1_FORMAT(enum a2xx_sq_surfaceformat val)
+{
+ return ((val) << A2XX_SQ_TEX_1_FORMAT__SHIFT) & A2XX_SQ_TEX_1_FORMAT__MASK;
+}
+#define A2XX_SQ_TEX_1_ENDIANNESS__MASK 0x000000c0
+#define A2XX_SQ_TEX_1_ENDIANNESS__SHIFT 6
+static inline uint32_t A2XX_SQ_TEX_1_ENDIANNESS(enum sq_tex_endian val)
+{
+ return ((val) << A2XX_SQ_TEX_1_ENDIANNESS__SHIFT) & A2XX_SQ_TEX_1_ENDIANNESS__MASK;
+}
+#define A2XX_SQ_TEX_1_REQUEST_SIZE__MASK 0x00000300
+#define A2XX_SQ_TEX_1_REQUEST_SIZE__SHIFT 8
+static inline uint32_t A2XX_SQ_TEX_1_REQUEST_SIZE(uint32_t val)
+{
+ return ((val) << A2XX_SQ_TEX_1_REQUEST_SIZE__SHIFT) & A2XX_SQ_TEX_1_REQUEST_SIZE__MASK;
+}
+#define A2XX_SQ_TEX_1_STACKED 0x00000400
+#define A2XX_SQ_TEX_1_CLAMP_POLICY__MASK 0x00000800
+#define A2XX_SQ_TEX_1_CLAMP_POLICY__SHIFT 11
+static inline uint32_t A2XX_SQ_TEX_1_CLAMP_POLICY(enum sq_tex_clamp_policy val)
+{
+ return ((val) << A2XX_SQ_TEX_1_CLAMP_POLICY__SHIFT) & A2XX_SQ_TEX_1_CLAMP_POLICY__MASK;
+}
+#define A2XX_SQ_TEX_1_BASE_ADDRESS__MASK 0xfffff000
+#define A2XX_SQ_TEX_1_BASE_ADDRESS__SHIFT 12
+static inline uint32_t A2XX_SQ_TEX_1_BASE_ADDRESS(uint32_t val)
+{
+ return ((val >> 12) << A2XX_SQ_TEX_1_BASE_ADDRESS__SHIFT) & A2XX_SQ_TEX_1_BASE_ADDRESS__MASK;
+}
#define REG_A2XX_SQ_TEX_2 0x00000002
#define A2XX_SQ_TEX_2_WIDTH__MASK 0x00001fff
@@ -1797,8 +1961,20 @@ static inline uint32_t A2XX_SQ_TEX_2_HEIGHT(uint32_t val)
{
return ((val) << A2XX_SQ_TEX_2_HEIGHT__SHIFT) & A2XX_SQ_TEX_2_HEIGHT__MASK;
}
+#define A2XX_SQ_TEX_2_DEPTH__MASK 0xfc000000
+#define A2XX_SQ_TEX_2_DEPTH__SHIFT 26
+static inline uint32_t A2XX_SQ_TEX_2_DEPTH(uint32_t val)
+{
+ return ((val) << A2XX_SQ_TEX_2_DEPTH__SHIFT) & A2XX_SQ_TEX_2_DEPTH__MASK;
+}
#define REG_A2XX_SQ_TEX_3 0x00000003
+#define A2XX_SQ_TEX_3_NUM_FORMAT__MASK 0x00000001
+#define A2XX_SQ_TEX_3_NUM_FORMAT__SHIFT 0
+static inline uint32_t A2XX_SQ_TEX_3_NUM_FORMAT(enum sq_tex_num_format val)
+{
+ return ((val) << A2XX_SQ_TEX_3_NUM_FORMAT__SHIFT) & A2XX_SQ_TEX_3_NUM_FORMAT__MASK;
+}
#define A2XX_SQ_TEX_3_SWIZ_X__MASK 0x0000000e
#define A2XX_SQ_TEX_3_SWIZ_X__SHIFT 1
static inline uint32_t A2XX_SQ_TEX_3_SWIZ_X(enum sq_tex_swiz val)
@@ -1823,6 +1999,12 @@ static inline uint32_t A2XX_SQ_TEX_3_SWIZ_W(enum sq_tex_swiz val)
{
return ((val) << A2XX_SQ_TEX_3_SWIZ_W__SHIFT) & A2XX_SQ_TEX_3_SWIZ_W__MASK;
}
+#define A2XX_SQ_TEX_3_EXP_ADJUST__MASK 0x0007e000
+#define A2XX_SQ_TEX_3_EXP_ADJUST__SHIFT 13
+static inline uint32_t A2XX_SQ_TEX_3_EXP_ADJUST(uint32_t val)
+{
+ return ((val) << A2XX_SQ_TEX_3_EXP_ADJUST__SHIFT) & A2XX_SQ_TEX_3_EXP_ADJUST__MASK;
+}
#define A2XX_SQ_TEX_3_XY_MAG_FILTER__MASK 0x00180000
#define A2XX_SQ_TEX_3_XY_MAG_FILTER__SHIFT 19
static inline uint32_t A2XX_SQ_TEX_3_XY_MAG_FILTER(enum sq_tex_filter val)
@@ -1835,6 +2017,104 @@ static inline uint32_t A2XX_SQ_TEX_3_XY_MIN_FILTER(enum sq_tex_filter val)
{
return ((val) << A2XX_SQ_TEX_3_XY_MIN_FILTER__SHIFT) & A2XX_SQ_TEX_3_XY_MIN_FILTER__MASK;
}
+#define A2XX_SQ_TEX_3_MIP_FILTER__MASK 0x01800000
+#define A2XX_SQ_TEX_3_MIP_FILTER__SHIFT 23
+static inline uint32_t A2XX_SQ_TEX_3_MIP_FILTER(enum sq_tex_filter val)
+{
+ return ((val) << A2XX_SQ_TEX_3_MIP_FILTER__SHIFT) & A2XX_SQ_TEX_3_MIP_FILTER__MASK;
+}
+#define A2XX_SQ_TEX_3_ANISO_FILTER__MASK 0x0e000000
+#define A2XX_SQ_TEX_3_ANISO_FILTER__SHIFT 25
+static inline uint32_t A2XX_SQ_TEX_3_ANISO_FILTER(enum sq_tex_aniso_filter val)
+{
+ return ((val) << A2XX_SQ_TEX_3_ANISO_FILTER__SHIFT) & A2XX_SQ_TEX_3_ANISO_FILTER__MASK;
+}
+#define A2XX_SQ_TEX_3_BORDER_SIZE__MASK 0x80000000
+#define A2XX_SQ_TEX_3_BORDER_SIZE__SHIFT 31
+static inline uint32_t A2XX_SQ_TEX_3_BORDER_SIZE(uint32_t val)
+{
+ return ((val) << A2XX_SQ_TEX_3_BORDER_SIZE__SHIFT) & A2XX_SQ_TEX_3_BORDER_SIZE__MASK;
+}
+
+#define REG_A2XX_SQ_TEX_4 0x00000004
+#define A2XX_SQ_TEX_4_VOL_MAG_FILTER__MASK 0x00000001
+#define A2XX_SQ_TEX_4_VOL_MAG_FILTER__SHIFT 0
+static inline uint32_t A2XX_SQ_TEX_4_VOL_MAG_FILTER(enum sq_tex_filter val)
+{
+ return ((val) << A2XX_SQ_TEX_4_VOL_MAG_FILTER__SHIFT) & A2XX_SQ_TEX_4_VOL_MAG_FILTER__MASK;
+}
+#define A2XX_SQ_TEX_4_VOL_MIN_FILTER__MASK 0x00000002
+#define A2XX_SQ_TEX_4_VOL_MIN_FILTER__SHIFT 1
+static inline uint32_t A2XX_SQ_TEX_4_VOL_MIN_FILTER(enum sq_tex_filter val)
+{
+ return ((val) << A2XX_SQ_TEX_4_VOL_MIN_FILTER__SHIFT) & A2XX_SQ_TEX_4_VOL_MIN_FILTER__MASK;
+}
+#define A2XX_SQ_TEX_4_MIP_MIN_LEVEL__MASK 0x0000003c
+#define A2XX_SQ_TEX_4_MIP_MIN_LEVEL__SHIFT 2
+static inline uint32_t A2XX_SQ_TEX_4_MIP_MIN_LEVEL(uint32_t val)
+{
+ return ((val) << A2XX_SQ_TEX_4_MIP_MIN_LEVEL__SHIFT) & A2XX_SQ_TEX_4_MIP_MIN_LEVEL__MASK;
+}
+#define A2XX_SQ_TEX_4_MIP_MAX_LEVEL__MASK 0x000003c0
+#define A2XX_SQ_TEX_4_MIP_MAX_LEVEL__SHIFT 6
+static inline uint32_t A2XX_SQ_TEX_4_MIP_MAX_LEVEL(uint32_t val)
+{
+ return ((val) << A2XX_SQ_TEX_4_MIP_MAX_LEVEL__SHIFT) & A2XX_SQ_TEX_4_MIP_MAX_LEVEL__MASK;
+}
+#define A2XX_SQ_TEX_4_MAX_ANISO_WALK 0x00000400
+#define A2XX_SQ_TEX_4_MIN_ANISO_WALK 0x00000800
+#define A2XX_SQ_TEX_4_LOD_BIAS__MASK 0x003ff000
+#define A2XX_SQ_TEX_4_LOD_BIAS__SHIFT 12
+static inline uint32_t A2XX_SQ_TEX_4_LOD_BIAS(float val)
+{
+ return ((((int32_t)(val * 32.0))) << A2XX_SQ_TEX_4_LOD_BIAS__SHIFT) & A2XX_SQ_TEX_4_LOD_BIAS__MASK;
+}
+#define A2XX_SQ_TEX_4_GRAD_EXP_ADJUST_H__MASK 0x07c00000
+#define A2XX_SQ_TEX_4_GRAD_EXP_ADJUST_H__SHIFT 22
+static inline uint32_t A2XX_SQ_TEX_4_GRAD_EXP_ADJUST_H(uint32_t val)
+{
+ return ((val) << A2XX_SQ_TEX_4_GRAD_EXP_ADJUST_H__SHIFT) & A2XX_SQ_TEX_4_GRAD_EXP_ADJUST_H__MASK;
+}
+#define A2XX_SQ_TEX_4_GRAD_EXP_ADJUST_V__MASK 0xf8000000
+#define A2XX_SQ_TEX_4_GRAD_EXP_ADJUST_V__SHIFT 27
+static inline uint32_t A2XX_SQ_TEX_4_GRAD_EXP_ADJUST_V(uint32_t val)
+{
+ return ((val) << A2XX_SQ_TEX_4_GRAD_EXP_ADJUST_V__SHIFT) & A2XX_SQ_TEX_4_GRAD_EXP_ADJUST_V__MASK;
+}
+
+#define REG_A2XX_SQ_TEX_5 0x00000005
+#define A2XX_SQ_TEX_5_BORDER_COLOR__MASK 0x00000003
+#define A2XX_SQ_TEX_5_BORDER_COLOR__SHIFT 0
+static inline uint32_t A2XX_SQ_TEX_5_BORDER_COLOR(enum sq_tex_border_color val)
+{
+ return ((val) << A2XX_SQ_TEX_5_BORDER_COLOR__SHIFT) & A2XX_SQ_TEX_5_BORDER_COLOR__MASK;
+}
+#define A2XX_SQ_TEX_5_FORCE_BCW_MAX 0x00000004
+#define A2XX_SQ_TEX_5_TRI_CLAMP__MASK 0x00000018
+#define A2XX_SQ_TEX_5_TRI_CLAMP__SHIFT 3
+static inline uint32_t A2XX_SQ_TEX_5_TRI_CLAMP(uint32_t val)
+{
+ return ((val) << A2XX_SQ_TEX_5_TRI_CLAMP__SHIFT) & A2XX_SQ_TEX_5_TRI_CLAMP__MASK;
+}
+#define A2XX_SQ_TEX_5_ANISO_BIAS__MASK 0x000001e0
+#define A2XX_SQ_TEX_5_ANISO_BIAS__SHIFT 5
+static inline uint32_t A2XX_SQ_TEX_5_ANISO_BIAS(float val)
+{
+ return ((((int32_t)(val * 1.0))) << A2XX_SQ_TEX_5_ANISO_BIAS__SHIFT) & A2XX_SQ_TEX_5_ANISO_BIAS__MASK;
+}
+#define A2XX_SQ_TEX_5_DIMENSION__MASK 0x00000600
+#define A2XX_SQ_TEX_5_DIMENSION__SHIFT 9
+static inline uint32_t A2XX_SQ_TEX_5_DIMENSION(enum sq_tex_dimension val)
+{
+ return ((val) << A2XX_SQ_TEX_5_DIMENSION__SHIFT) & A2XX_SQ_TEX_5_DIMENSION__MASK;
+}
+#define A2XX_SQ_TEX_5_PACKED_MIPS 0x00000800
+#define A2XX_SQ_TEX_5_MIP_ADDRESS__MASK 0xfffff000
+#define A2XX_SQ_TEX_5_MIP_ADDRESS__SHIFT 12
+static inline uint32_t A2XX_SQ_TEX_5_MIP_ADDRESS(uint32_t val)
+{
+ return ((val >> 12) << A2XX_SQ_TEX_5_MIP_ADDRESS__SHIFT) & A2XX_SQ_TEX_5_MIP_ADDRESS__MASK;
+}
#endif /* A2XX_XML */
diff --git a/drivers/gpu/drm/msm/adreno/a2xx_gpu.c b/drivers/gpu/drm/msm/adreno/a2xx_gpu.c
new file mode 100644
index 000000000000..1f83bc18d500
--- /dev/null
+++ b/drivers/gpu/drm/msm/adreno/a2xx_gpu.c
@@ -0,0 +1,492 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2018 The Linux Foundation. All rights reserved. */
+
+#include "a2xx_gpu.h"
+#include "msm_gem.h"
+#include "msm_mmu.h"
+
+extern bool hang_debug;
+
+static void a2xx_dump(struct msm_gpu *gpu);
+static bool a2xx_idle(struct msm_gpu *gpu);
+
+static bool a2xx_me_init(struct msm_gpu *gpu)
+{
+ struct msm_ringbuffer *ring = gpu->rb[0];
+
+ OUT_PKT3(ring, CP_ME_INIT, 18);
+
+ /* All fields present (bits 9:0) */
+ OUT_RING(ring, 0x000003ff);
+ /* Disable/Enable Real-Time Stream processing (present but ignored) */
+ OUT_RING(ring, 0x00000000);
+ /* Enable (2D <-> 3D) implicit synchronization (present but ignored) */
+ OUT_RING(ring, 0x00000000);
+
+ OUT_RING(ring, REG_A2XX_RB_SURFACE_INFO - 0x2000);
+ OUT_RING(ring, REG_A2XX_PA_SC_WINDOW_OFFSET - 0x2000);
+ OUT_RING(ring, REG_A2XX_VGT_MAX_VTX_INDX - 0x2000);
+ OUT_RING(ring, REG_A2XX_SQ_PROGRAM_CNTL - 0x2000);
+ OUT_RING(ring, REG_A2XX_RB_DEPTHCONTROL - 0x2000);
+ OUT_RING(ring, REG_A2XX_PA_SU_POINT_SIZE - 0x2000);
+ OUT_RING(ring, REG_A2XX_PA_SC_LINE_CNTL - 0x2000);
+ OUT_RING(ring, REG_A2XX_PA_SU_POLY_OFFSET_FRONT_SCALE - 0x2000);
+
+ /* Vertex and Pixel Shader Start Addresses in instructions
+ * (3 DWORDS per instruction) */
+ OUT_RING(ring, 0x80000180);
+ /* Maximum Contexts */
+ OUT_RING(ring, 0x00000001);
+ /* Write Confirm Interval and The CP will wait the
+ * wait_interval * 16 clocks between polling */
+ OUT_RING(ring, 0x00000000);
+ /* NQ and External Memory Swap */
+ OUT_RING(ring, 0x00000000);
+ /* protected mode error checking (0x1f2 is REG_AXXX_CP_INT_CNTL) */
+ OUT_RING(ring, 0x200001f2);
+ /* Disable header dumping and Header dump address */
+ OUT_RING(ring, 0x00000000);
+ /* Header dump size */
+ OUT_RING(ring, 0x00000000);
+
+ /* enable protected mode */
+ OUT_PKT3(ring, CP_SET_PROTECTED_MODE, 1);
+ OUT_RING(ring, 1);
+
+ gpu->funcs->flush(gpu, ring);
+ return a2xx_idle(gpu);
+}
+
+static int a2xx_hw_init(struct msm_gpu *gpu)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ dma_addr_t pt_base, tran_error;
+ uint32_t *ptr, len;
+ int i, ret;
+
+ msm_gpummu_params(gpu->aspace->mmu, &pt_base, &tran_error);
+
+ DBG("%s", gpu->name);
+
+ /* halt ME to avoid ucode upload issues on a20x */
+ gpu_write(gpu, REG_AXXX_CP_ME_CNTL, AXXX_CP_ME_CNTL_HALT);
+
+ gpu_write(gpu, REG_A2XX_RBBM_PM_OVERRIDE1, 0xfffffffe);
+ gpu_write(gpu, REG_A2XX_RBBM_PM_OVERRIDE2, 0xffffffff);
+
+ /* note: kgsl uses 0x00000001 after first reset on a22x */
+ gpu_write(gpu, REG_A2XX_RBBM_SOFT_RESET, 0xffffffff);
+ msleep(30);
+ gpu_write(gpu, REG_A2XX_RBBM_SOFT_RESET, 0x00000000);
+
+ if (adreno_is_a225(adreno_gpu))
+ gpu_write(gpu, REG_A2XX_SQ_FLOW_CONTROL, 0x18000000);
+
+ /* note: kgsl uses 0x0000ffff for a20x */
+ gpu_write(gpu, REG_A2XX_RBBM_CNTL, 0x00004442);
+
+ /* MPU: physical range */
+ gpu_write(gpu, REG_A2XX_MH_MMU_MPU_BASE, 0x00000000);
+ gpu_write(gpu, REG_A2XX_MH_MMU_MPU_END, 0xfffff000);
+
+ gpu_write(gpu, REG_A2XX_MH_MMU_CONFIG, A2XX_MH_MMU_CONFIG_MMU_ENABLE |
+ A2XX_MH_MMU_CONFIG_RB_W_CLNT_BEHAVIOR(BEH_TRAN_RNG) |
+ A2XX_MH_MMU_CONFIG_CP_W_CLNT_BEHAVIOR(BEH_TRAN_RNG) |
+ A2XX_MH_MMU_CONFIG_CP_R0_CLNT_BEHAVIOR(BEH_TRAN_RNG) |
+ A2XX_MH_MMU_CONFIG_CP_R1_CLNT_BEHAVIOR(BEH_TRAN_RNG) |
+ A2XX_MH_MMU_CONFIG_CP_R2_CLNT_BEHAVIOR(BEH_TRAN_RNG) |
+ A2XX_MH_MMU_CONFIG_CP_R3_CLNT_BEHAVIOR(BEH_TRAN_RNG) |
+ A2XX_MH_MMU_CONFIG_CP_R4_CLNT_BEHAVIOR(BEH_TRAN_RNG) |
+ A2XX_MH_MMU_CONFIG_VGT_R0_CLNT_BEHAVIOR(BEH_TRAN_RNG) |
+ A2XX_MH_MMU_CONFIG_VGT_R1_CLNT_BEHAVIOR(BEH_TRAN_RNG) |
+ A2XX_MH_MMU_CONFIG_TC_R_CLNT_BEHAVIOR(BEH_TRAN_RNG) |
+ A2XX_MH_MMU_CONFIG_PA_W_CLNT_BEHAVIOR(BEH_TRAN_RNG));
+
+ /* same as parameters in adreno_gpu */
+ gpu_write(gpu, REG_A2XX_MH_MMU_VA_RANGE, SZ_16M |
+ A2XX_MH_MMU_VA_RANGE_NUM_64KB_REGIONS(0xfff));
+
+ gpu_write(gpu, REG_A2XX_MH_MMU_PT_BASE, pt_base);
+ gpu_write(gpu, REG_A2XX_MH_MMU_TRAN_ERROR, tran_error);
+
+ gpu_write(gpu, REG_A2XX_MH_MMU_INVALIDATE,
+ A2XX_MH_MMU_INVALIDATE_INVALIDATE_ALL |
+ A2XX_MH_MMU_INVALIDATE_INVALIDATE_TC);
+
+ gpu_write(gpu, REG_A2XX_MH_ARBITER_CONFIG,
+ A2XX_MH_ARBITER_CONFIG_SAME_PAGE_LIMIT(16) |
+ A2XX_MH_ARBITER_CONFIG_L1_ARB_ENABLE |
+ A2XX_MH_ARBITER_CONFIG_L1_ARB_HOLD_ENABLE |
+ A2XX_MH_ARBITER_CONFIG_PAGE_SIZE(1) |
+ A2XX_MH_ARBITER_CONFIG_TC_REORDER_ENABLE |
+ A2XX_MH_ARBITER_CONFIG_TC_ARB_HOLD_ENABLE |
+ A2XX_MH_ARBITER_CONFIG_IN_FLIGHT_LIMIT_ENABLE |
+ A2XX_MH_ARBITER_CONFIG_IN_FLIGHT_LIMIT(8) |
+ A2XX_MH_ARBITER_CONFIG_CP_CLNT_ENABLE |
+ A2XX_MH_ARBITER_CONFIG_VGT_CLNT_ENABLE |
+ A2XX_MH_ARBITER_CONFIG_TC_CLNT_ENABLE |
+ A2XX_MH_ARBITER_CONFIG_RB_CLNT_ENABLE |
+ A2XX_MH_ARBITER_CONFIG_PA_CLNT_ENABLE);
+ if (!adreno_is_a20x(adreno_gpu))
+ gpu_write(gpu, REG_A2XX_MH_CLNT_INTF_CTRL_CONFIG1, 0x00032f07);
+
+ gpu_write(gpu, REG_A2XX_SQ_VS_PROGRAM, 0x00000000);
+ gpu_write(gpu, REG_A2XX_SQ_PS_PROGRAM, 0x00000000);
+
+ gpu_write(gpu, REG_A2XX_RBBM_PM_OVERRIDE1, 0); /* 0x200 for msm8960? */
+ gpu_write(gpu, REG_A2XX_RBBM_PM_OVERRIDE2, 0); /* 0x80/0x1a0 for a22x? */
+
+ /* note: gsl doesn't set this */
+ gpu_write(gpu, REG_A2XX_RBBM_DEBUG, 0x00080000);
+
+ gpu_write(gpu, REG_A2XX_RBBM_INT_CNTL,
+ A2XX_RBBM_INT_CNTL_RDERR_INT_MASK);
+ gpu_write(gpu, REG_AXXX_CP_INT_CNTL,
+ AXXX_CP_INT_CNTL_T0_PACKET_IN_IB_MASK |
+ AXXX_CP_INT_CNTL_OPCODE_ERROR_MASK |
+ AXXX_CP_INT_CNTL_PROTECTED_MODE_ERROR_MASK |
+ AXXX_CP_INT_CNTL_RESERVED_BIT_ERROR_MASK |
+ AXXX_CP_INT_CNTL_IB_ERROR_MASK |
+ AXXX_CP_INT_CNTL_IB1_INT_MASK |
+ AXXX_CP_INT_CNTL_RB_INT_MASK);
+ gpu_write(gpu, REG_A2XX_SQ_INT_CNTL, 0);
+ gpu_write(gpu, REG_A2XX_MH_INTERRUPT_MASK,
+ A2XX_MH_INTERRUPT_MASK_AXI_READ_ERROR |
+ A2XX_MH_INTERRUPT_MASK_AXI_WRITE_ERROR |
+ A2XX_MH_INTERRUPT_MASK_MMU_PAGE_FAULT);
+
+ for (i = 3; i <= 5; i++)
+ if ((SZ_16K << i) == adreno_gpu->gmem)
+ break;
+ gpu_write(gpu, REG_A2XX_RB_EDRAM_INFO, i);
+
+ ret = adreno_hw_init(gpu);
+ if (ret)
+ return ret;
+
+ /* NOTE: PM4/micro-engine firmware registers look to be the same
+ * for a2xx and a3xx.. we could possibly push that part down to
+ * adreno_gpu base class. Or push both PM4 and PFP but
+ * parameterize the pfp ucode addr/data registers..
+ */
+
+ /* Load PM4: */
+ ptr = (uint32_t *)(adreno_gpu->fw[ADRENO_FW_PM4]->data);
+ len = adreno_gpu->fw[ADRENO_FW_PM4]->size / 4;
+ DBG("loading PM4 ucode version: %x", ptr[1]);
+
+ gpu_write(gpu, REG_AXXX_CP_DEBUG,
+ AXXX_CP_DEBUG_MIU_128BIT_WRITE_ENABLE);
+ gpu_write(gpu, REG_AXXX_CP_ME_RAM_WADDR, 0);
+ for (i = 1; i < len; i++)
+ gpu_write(gpu, REG_AXXX_CP_ME_RAM_DATA, ptr[i]);
+
+ /* Load PFP: */
+ ptr = (uint32_t *)(adreno_gpu->fw[ADRENO_FW_PFP]->data);
+ len = adreno_gpu->fw[ADRENO_FW_PFP]->size / 4;
+ DBG("loading PFP ucode version: %x", ptr[5]);
+
+ gpu_write(gpu, REG_A2XX_CP_PFP_UCODE_ADDR, 0);
+ for (i = 1; i < len; i++)
+ gpu_write(gpu, REG_A2XX_CP_PFP_UCODE_DATA, ptr[i]);
+
+ gpu_write(gpu, REG_AXXX_CP_QUEUE_THRESHOLDS, 0x000C0804);
+
+ /* clear ME_HALT to start micro engine */
+ gpu_write(gpu, REG_AXXX_CP_ME_CNTL, 0);
+
+ return a2xx_me_init(gpu) ? 0 : -EINVAL;
+}
+
+static void a2xx_recover(struct msm_gpu *gpu)
+{
+ int i;
+
+ adreno_dump_info(gpu);
+
+ for (i = 0; i < 8; i++) {
+ printk("CP_SCRATCH_REG%d: %u\n", i,
+ gpu_read(gpu, REG_AXXX_CP_SCRATCH_REG0 + i));
+ }
+
+ /* dump registers before resetting gpu, if enabled: */
+ if (hang_debug)
+ a2xx_dump(gpu);
+
+ gpu_write(gpu, REG_A2XX_RBBM_SOFT_RESET, 1);
+ gpu_read(gpu, REG_A2XX_RBBM_SOFT_RESET);
+ gpu_write(gpu, REG_A2XX_RBBM_SOFT_RESET, 0);
+ adreno_recover(gpu);
+}
+
+static void a2xx_destroy(struct msm_gpu *gpu)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a2xx_gpu *a2xx_gpu = to_a2xx_gpu(adreno_gpu);
+
+ DBG("%s", gpu->name);
+
+ adreno_gpu_cleanup(adreno_gpu);
+
+ kfree(a2xx_gpu);
+}
+
+static bool a2xx_idle(struct msm_gpu *gpu)
+{
+ /* wait for ringbuffer to drain: */
+ if (!adreno_idle(gpu, gpu->rb[0]))
+ return false;
+
+ /* then wait for GPU to finish: */
+ if (spin_until(!(gpu_read(gpu, REG_A2XX_RBBM_STATUS) &
+ A2XX_RBBM_STATUS_GUI_ACTIVE))) {
+ DRM_ERROR("%s: timeout waiting for GPU to idle!\n", gpu->name);
+
+ /* TODO maybe we need to reset GPU here to recover from hang? */
+ return false;
+ }
+
+ return true;
+}
+
+static irqreturn_t a2xx_irq(struct msm_gpu *gpu)
+{
+ uint32_t mstatus, status;
+
+ mstatus = gpu_read(gpu, REG_A2XX_MASTER_INT_SIGNAL);
+
+ if (mstatus & A2XX_MASTER_INT_SIGNAL_MH_INT_STAT) {
+ status = gpu_read(gpu, REG_A2XX_MH_INTERRUPT_STATUS);
+
+ dev_warn(gpu->dev->dev, "MH_INT: %08X\n", status);
+ dev_warn(gpu->dev->dev, "MMU_PAGE_FAULT: %08X\n",
+ gpu_read(gpu, REG_A2XX_MH_MMU_PAGE_FAULT));
+
+ gpu_write(gpu, REG_A2XX_MH_INTERRUPT_CLEAR, status);
+ }
+
+ if (mstatus & A2XX_MASTER_INT_SIGNAL_CP_INT_STAT) {
+ status = gpu_read(gpu, REG_AXXX_CP_INT_STATUS);
+
+ /* only RB_INT is expected */
+ if (status & ~AXXX_CP_INT_CNTL_RB_INT_MASK)
+ dev_warn(gpu->dev->dev, "CP_INT: %08X\n", status);
+
+ gpu_write(gpu, REG_AXXX_CP_INT_ACK, status);
+ }
+
+ if (mstatus & A2XX_MASTER_INT_SIGNAL_RBBM_INT_STAT) {
+ status = gpu_read(gpu, REG_A2XX_RBBM_INT_STATUS);
+
+ dev_warn(gpu->dev->dev, "RBBM_INT: %08X\n", status);
+
+ gpu_write(gpu, REG_A2XX_RBBM_INT_ACK, status);
+ }
+
+ msm_gpu_retire(gpu);
+
+ return IRQ_HANDLED;
+}
+
+static const unsigned int a200_registers[] = {
+ 0x0000, 0x0002, 0x0004, 0x000B, 0x003B, 0x003D, 0x0040, 0x0044,
+ 0x0046, 0x0047, 0x01C0, 0x01C1, 0x01C3, 0x01C8, 0x01D5, 0x01D9,
+ 0x01DC, 0x01DD, 0x01EA, 0x01EA, 0x01EE, 0x01F3, 0x01F6, 0x01F7,
+ 0x01FC, 0x01FF, 0x0391, 0x0392, 0x039B, 0x039E, 0x03B2, 0x03B5,
+ 0x03B7, 0x03B7, 0x03F8, 0x03FB, 0x0440, 0x0440, 0x0443, 0x0444,
+ 0x044B, 0x044B, 0x044D, 0x044F, 0x0452, 0x0452, 0x0454, 0x045B,
+ 0x047F, 0x047F, 0x0578, 0x0587, 0x05C9, 0x05C9, 0x05D0, 0x05D0,
+ 0x0601, 0x0604, 0x0606, 0x0609, 0x060B, 0x060E, 0x0613, 0x0614,
+ 0x0A29, 0x0A2B, 0x0A2F, 0x0A31, 0x0A40, 0x0A43, 0x0A45, 0x0A45,
+ 0x0A4E, 0x0A4F, 0x0C2C, 0x0C2C, 0x0C30, 0x0C30, 0x0C38, 0x0C3C,
+ 0x0C40, 0x0C40, 0x0C44, 0x0C44, 0x0C80, 0x0C86, 0x0C88, 0x0C94,
+ 0x0C99, 0x0C9A, 0x0CA4, 0x0CA5, 0x0D00, 0x0D03, 0x0D06, 0x0D06,
+ 0x0D08, 0x0D0B, 0x0D34, 0x0D35, 0x0DAE, 0x0DC1, 0x0DC8, 0x0DD4,
+ 0x0DD8, 0x0DD9, 0x0E00, 0x0E00, 0x0E02, 0x0E04, 0x0E17, 0x0E1E,
+ 0x0EC0, 0x0EC9, 0x0ECB, 0x0ECC, 0x0ED0, 0x0ED0, 0x0ED4, 0x0ED7,
+ 0x0EE0, 0x0EE2, 0x0F01, 0x0F02, 0x0F0C, 0x0F0C, 0x0F0E, 0x0F12,
+ 0x0F26, 0x0F2A, 0x0F2C, 0x0F2C, 0x2000, 0x2002, 0x2006, 0x200F,
+ 0x2080, 0x2082, 0x2100, 0x2109, 0x210C, 0x2114, 0x2180, 0x2184,
+ 0x21F5, 0x21F7, 0x2200, 0x2208, 0x2280, 0x2283, 0x2293, 0x2294,
+ 0x2300, 0x2308, 0x2312, 0x2312, 0x2316, 0x231D, 0x2324, 0x2326,
+ 0x2380, 0x2383, 0x2400, 0x2402, 0x2406, 0x240F, 0x2480, 0x2482,
+ 0x2500, 0x2509, 0x250C, 0x2514, 0x2580, 0x2584, 0x25F5, 0x25F7,
+ 0x2600, 0x2608, 0x2680, 0x2683, 0x2693, 0x2694, 0x2700, 0x2708,
+ 0x2712, 0x2712, 0x2716, 0x271D, 0x2724, 0x2726, 0x2780, 0x2783,
+ 0x4000, 0x4003, 0x4800, 0x4805, 0x4900, 0x4900, 0x4908, 0x4908,
+ ~0 /* sentinel */
+};
+
+static const unsigned int a220_registers[] = {
+ 0x0000, 0x0002, 0x0004, 0x000B, 0x003B, 0x003D, 0x0040, 0x0044,
+ 0x0046, 0x0047, 0x01C0, 0x01C1, 0x01C3, 0x01C8, 0x01D5, 0x01D9,
+ 0x01DC, 0x01DD, 0x01EA, 0x01EA, 0x01EE, 0x01F3, 0x01F6, 0x01F7,
+ 0x01FC, 0x01FF, 0x0391, 0x0392, 0x039B, 0x039E, 0x03B2, 0x03B5,
+ 0x03B7, 0x03B7, 0x03F8, 0x03FB, 0x0440, 0x0440, 0x0443, 0x0444,
+ 0x044B, 0x044B, 0x044D, 0x044F, 0x0452, 0x0452, 0x0454, 0x045B,
+ 0x047F, 0x047F, 0x0578, 0x0587, 0x05C9, 0x05C9, 0x05D0, 0x05D0,
+ 0x0601, 0x0604, 0x0606, 0x0609, 0x060B, 0x060E, 0x0613, 0x0614,
+ 0x0A29, 0x0A2B, 0x0A2F, 0x0A31, 0x0A40, 0x0A40, 0x0A42, 0x0A43,
+ 0x0A45, 0x0A45, 0x0A4E, 0x0A4F, 0x0C30, 0x0C30, 0x0C38, 0x0C39,
+ 0x0C3C, 0x0C3C, 0x0C80, 0x0C81, 0x0C88, 0x0C93, 0x0D00, 0x0D03,
+ 0x0D05, 0x0D06, 0x0D08, 0x0D0B, 0x0D34, 0x0D35, 0x0DAE, 0x0DC1,
+ 0x0DC8, 0x0DD4, 0x0DD8, 0x0DD9, 0x0E00, 0x0E00, 0x0E02, 0x0E04,
+ 0x0E17, 0x0E1E, 0x0EC0, 0x0EC9, 0x0ECB, 0x0ECC, 0x0ED0, 0x0ED0,
+ 0x0ED4, 0x0ED7, 0x0EE0, 0x0EE2, 0x0F01, 0x0F02, 0x2000, 0x2002,
+ 0x2006, 0x200F, 0x2080, 0x2082, 0x2100, 0x2102, 0x2104, 0x2109,
+ 0x210C, 0x2114, 0x2180, 0x2184, 0x21F5, 0x21F7, 0x2200, 0x2202,
+ 0x2204, 0x2204, 0x2208, 0x2208, 0x2280, 0x2282, 0x2294, 0x2294,
+ 0x2300, 0x2308, 0x2309, 0x230A, 0x2312, 0x2312, 0x2316, 0x2316,
+ 0x2318, 0x231D, 0x2324, 0x2326, 0x2380, 0x2383, 0x2400, 0x2402,
+ 0x2406, 0x240F, 0x2480, 0x2482, 0x2500, 0x2502, 0x2504, 0x2509,
+ 0x250C, 0x2514, 0x2580, 0x2584, 0x25F5, 0x25F7, 0x2600, 0x2602,
+ 0x2604, 0x2606, 0x2608, 0x2608, 0x2680, 0x2682, 0x2694, 0x2694,
+ 0x2700, 0x2708, 0x2712, 0x2712, 0x2716, 0x2716, 0x2718, 0x271D,
+ 0x2724, 0x2726, 0x2780, 0x2783, 0x4000, 0x4003, 0x4800, 0x4805,
+ 0x4900, 0x4900, 0x4908, 0x4908,
+ ~0 /* sentinel */
+};
+
+static const unsigned int a225_registers[] = {
+ 0x0000, 0x0002, 0x0004, 0x000B, 0x003B, 0x003D, 0x0040, 0x0044,
+ 0x0046, 0x0047, 0x013C, 0x013C, 0x0140, 0x014F, 0x01C0, 0x01C1,
+ 0x01C3, 0x01C8, 0x01D5, 0x01D9, 0x01DC, 0x01DD, 0x01EA, 0x01EA,
+ 0x01EE, 0x01F3, 0x01F6, 0x01F7, 0x01FC, 0x01FF, 0x0391, 0x0392,
+ 0x039B, 0x039E, 0x03B2, 0x03B5, 0x03B7, 0x03B7, 0x03F8, 0x03FB,
+ 0x0440, 0x0440, 0x0443, 0x0444, 0x044B, 0x044B, 0x044D, 0x044F,
+ 0x0452, 0x0452, 0x0454, 0x045B, 0x047F, 0x047F, 0x0578, 0x0587,
+ 0x05C9, 0x05C9, 0x05D0, 0x05D0, 0x0601, 0x0604, 0x0606, 0x0609,
+ 0x060B, 0x060E, 0x0613, 0x0614, 0x0A29, 0x0A2B, 0x0A2F, 0x0A31,
+ 0x0A40, 0x0A40, 0x0A42, 0x0A43, 0x0A45, 0x0A45, 0x0A4E, 0x0A4F,
+ 0x0C01, 0x0C1D, 0x0C30, 0x0C30, 0x0C38, 0x0C39, 0x0C3C, 0x0C3C,
+ 0x0C80, 0x0C81, 0x0C88, 0x0C93, 0x0D00, 0x0D03, 0x0D05, 0x0D06,
+ 0x0D08, 0x0D0B, 0x0D34, 0x0D35, 0x0DAE, 0x0DC1, 0x0DC8, 0x0DD4,
+ 0x0DD8, 0x0DD9, 0x0E00, 0x0E00, 0x0E02, 0x0E04, 0x0E17, 0x0E1E,
+ 0x0EC0, 0x0EC9, 0x0ECB, 0x0ECC, 0x0ED0, 0x0ED0, 0x0ED4, 0x0ED7,
+ 0x0EE0, 0x0EE2, 0x0F01, 0x0F02, 0x2000, 0x200F, 0x2080, 0x2082,
+ 0x2100, 0x2109, 0x210C, 0x2114, 0x2180, 0x2184, 0x21F5, 0x21F7,
+ 0x2200, 0x2202, 0x2204, 0x2206, 0x2208, 0x2210, 0x2220, 0x2222,
+ 0x2280, 0x2282, 0x2294, 0x2294, 0x2297, 0x2297, 0x2300, 0x230A,
+ 0x2312, 0x2312, 0x2315, 0x2316, 0x2318, 0x231D, 0x2324, 0x2326,
+ 0x2340, 0x2357, 0x2360, 0x2360, 0x2380, 0x2383, 0x2400, 0x240F,
+ 0x2480, 0x2482, 0x2500, 0x2509, 0x250C, 0x2514, 0x2580, 0x2584,
+ 0x25F5, 0x25F7, 0x2600, 0x2602, 0x2604, 0x2606, 0x2608, 0x2610,
+ 0x2620, 0x2622, 0x2680, 0x2682, 0x2694, 0x2694, 0x2697, 0x2697,
+ 0x2700, 0x270A, 0x2712, 0x2712, 0x2715, 0x2716, 0x2718, 0x271D,
+ 0x2724, 0x2726, 0x2740, 0x2757, 0x2760, 0x2760, 0x2780, 0x2783,
+ 0x4000, 0x4003, 0x4800, 0x4806, 0x4808, 0x4808, 0x4900, 0x4900,
+ 0x4908, 0x4908,
+ ~0 /* sentinel */
+};
+
+/* would be nice to not have to duplicate the _show() stuff with printk(): */
+static void a2xx_dump(struct msm_gpu *gpu)
+{
+ printk("status: %08x\n",
+ gpu_read(gpu, REG_A2XX_RBBM_STATUS));
+ adreno_dump(gpu);
+}
+
+static struct msm_gpu_state *a2xx_gpu_state_get(struct msm_gpu *gpu)
+{
+ struct msm_gpu_state *state = kzalloc(sizeof(*state), GFP_KERNEL);
+
+ if (!state)
+ return ERR_PTR(-ENOMEM);
+
+ adreno_gpu_state_get(gpu, state);
+
+ state->rbbm_status = gpu_read(gpu, REG_A2XX_RBBM_STATUS);
+
+ return state;
+}
+
+/* Register offset defines for A2XX - copy of A3XX */
+static const unsigned int a2xx_register_offsets[REG_ADRENO_REGISTER_MAX] = {
+ REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_BASE, REG_AXXX_CP_RB_BASE),
+ REG_ADRENO_SKIP(REG_ADRENO_CP_RB_BASE_HI),
+ REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_RPTR_ADDR, REG_AXXX_CP_RB_RPTR_ADDR),
+ REG_ADRENO_SKIP(REG_ADRENO_CP_RB_RPTR_ADDR_HI),
+ REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_RPTR, REG_AXXX_CP_RB_RPTR),
+ REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_WPTR, REG_AXXX_CP_RB_WPTR),
+ REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_CNTL, REG_AXXX_CP_RB_CNTL),
+};
+
+static const struct adreno_gpu_funcs funcs = {
+ .base = {
+ .get_param = adreno_get_param,
+ .hw_init = a2xx_hw_init,
+ .pm_suspend = msm_gpu_pm_suspend,
+ .pm_resume = msm_gpu_pm_resume,
+ .recover = a2xx_recover,
+ .submit = adreno_submit,
+ .flush = adreno_flush,
+ .active_ring = adreno_active_ring,
+ .irq = a2xx_irq,
+ .destroy = a2xx_destroy,
+#if defined(CONFIG_DEBUG_FS) || defined(CONFIG_DEV_COREDUMP)
+ .show = adreno_show,
+#endif
+ .gpu_state_get = a2xx_gpu_state_get,
+ .gpu_state_put = adreno_gpu_state_put,
+ },
+};
+
+static const struct msm_gpu_perfcntr perfcntrs[] = {
+/* TODO */
+};
+
+struct msm_gpu *a2xx_gpu_init(struct drm_device *dev)
+{
+ struct a2xx_gpu *a2xx_gpu = NULL;
+ struct adreno_gpu *adreno_gpu;
+ struct msm_gpu *gpu;
+ struct msm_drm_private *priv = dev->dev_private;
+ struct platform_device *pdev = priv->gpu_pdev;
+ int ret;
+
+ if (!pdev) {
+ dev_err(dev->dev, "no a2xx device\n");
+ ret = -ENXIO;
+ goto fail;
+ }
+
+ a2xx_gpu = kzalloc(sizeof(*a2xx_gpu), GFP_KERNEL);
+ if (!a2xx_gpu) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ adreno_gpu = &a2xx_gpu->base;
+ gpu = &adreno_gpu->base;
+
+ gpu->perfcntrs = perfcntrs;
+ gpu->num_perfcntrs = ARRAY_SIZE(perfcntrs);
+
+ if (adreno_is_a20x(adreno_gpu))
+ adreno_gpu->registers = a200_registers;
+ else if (adreno_is_a225(adreno_gpu))
+ adreno_gpu->registers = a225_registers;
+ else
+ adreno_gpu->registers = a220_registers;
+
+ adreno_gpu->reg_offsets = a2xx_register_offsets;
+
+ ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs, 1);
+ if (ret)
+ goto fail;
+
+ if (!gpu->aspace) {
+ dev_err(dev->dev, "No memory protection without MMU\n");
+ ret = -ENXIO;
+ goto fail;
+ }
+
+ return gpu;
+
+fail:
+ if (a2xx_gpu)
+ a2xx_destroy(&a2xx_gpu->base.base);
+
+ return ERR_PTR(ret);
+}
diff --git a/drivers/gpu/drm/msm/adreno/a2xx_gpu.h b/drivers/gpu/drm/msm/adreno/a2xx_gpu.h
new file mode 100644
index 000000000000..02fba2cb8932
--- /dev/null
+++ b/drivers/gpu/drm/msm/adreno/a2xx_gpu.h
@@ -0,0 +1,21 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2018 The Linux Foundation. All rights reserved. */
+
+#ifndef __A2XX_GPU_H__
+#define __A2XX_GPU_H__
+
+#include "adreno_gpu.h"
+
+/* arrg, somehow fb.h is getting pulled in: */
+#undef ROP_COPY
+#undef ROP_XOR
+
+#include "a2xx.xml.h"
+
+struct a2xx_gpu {
+ struct adreno_gpu base;
+ bool pm_enabled;
+};
+#define to_a2xx_gpu(x) container_of(x, struct a2xx_gpu, base)
+
+#endif /* __A2XX_GPU_H__ */
diff --git a/drivers/gpu/drm/msm/adreno/a3xx.xml.h b/drivers/gpu/drm/msm/adreno/a3xx.xml.h
index a89f7bb8b5cc..17059f242a98 100644
--- a/drivers/gpu/drm/msm/adreno/a3xx.xml.h
+++ b/drivers/gpu/drm/msm/adreno/a3xx.xml.h
@@ -10,13 +10,13 @@ git clone https://github.com/freedreno/envytools.git
The rules-ng-ng source files this header was generated from are:
- /home/robclark/src/envytools/rnndb/adreno.xml ( 501 bytes, from 2018-07-03 19:37:13)
- /home/robclark/src/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/adreno/a2xx.xml ( 36805 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/adreno/adreno_common.xml ( 13634 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/adreno/adreno_pm4.xml ( 42585 bytes, from 2018-10-04 19:06:37)
+- /home/robclark/src/envytools/rnndb/adreno/a2xx.xml ( 42463 bytes, from 2018-11-19 13:44:03)
+- /home/robclark/src/envytools/rnndb/adreno/adreno_common.xml ( 14201 bytes, from 2018-12-02 17:29:54)
+- /home/robclark/src/envytools/rnndb/adreno/adreno_pm4.xml ( 43052 bytes, from 2018-12-02 17:29:54)
- /home/robclark/src/envytools/rnndb/adreno/a3xx.xml ( 83840 bytes, from 2018-07-03 19:37:13)
- /home/robclark/src/envytools/rnndb/adreno/a4xx.xml ( 112086 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/adreno/a5xx.xml ( 147240 bytes, from 2018-10-04 19:06:37)
-- /home/robclark/src/envytools/rnndb/adreno/a6xx.xml ( 139581 bytes, from 2018-10-04 19:06:42)
+- /home/robclark/src/envytools/rnndb/adreno/a5xx.xml ( 147240 bytes, from 2018-12-02 17:29:54)
+- /home/robclark/src/envytools/rnndb/adreno/a6xx.xml ( 140790 bytes, from 2018-12-02 17:29:54)
- /home/robclark/src/envytools/rnndb/adreno/a6xx_gmu.xml ( 10431 bytes, from 2018-09-14 13:03:07)
- /home/robclark/src/envytools/rnndb/adreno/ocmem.xml ( 1773 bytes, from 2018-07-03 19:37:13)
diff --git a/drivers/gpu/drm/msm/adreno/a3xx_gpu.c b/drivers/gpu/drm/msm/adreno/a3xx_gpu.c
index 669c2d4b070d..c3b4bc6e4155 100644
--- a/drivers/gpu/drm/msm/adreno/a3xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a3xx_gpu.c
@@ -481,7 +481,7 @@ struct msm_gpu *a3xx_gpu_init(struct drm_device *dev)
int ret;
if (!pdev) {
- dev_err(dev->dev, "no a3xx device\n");
+ DRM_DEV_ERROR(dev->dev, "no a3xx device\n");
ret = -ENXIO;
goto fail;
}
@@ -528,7 +528,7 @@ struct msm_gpu *a3xx_gpu_init(struct drm_device *dev)
* to not be possible to restrict access, then we must
* implement a cmdstream validator.
*/
- dev_err(dev->dev, "No memory protection without IOMMU\n");
+ DRM_DEV_ERROR(dev->dev, "No memory protection without IOMMU\n");
ret = -ENXIO;
goto fail;
}
diff --git a/drivers/gpu/drm/msm/adreno/a4xx.xml.h b/drivers/gpu/drm/msm/adreno/a4xx.xml.h
index 858690f52854..9b51e25a9583 100644
--- a/drivers/gpu/drm/msm/adreno/a4xx.xml.h
+++ b/drivers/gpu/drm/msm/adreno/a4xx.xml.h
@@ -10,13 +10,13 @@ git clone https://github.com/freedreno/envytools.git
The rules-ng-ng source files this header was generated from are:
- /home/robclark/src/envytools/rnndb/adreno.xml ( 501 bytes, from 2018-07-03 19:37:13)
- /home/robclark/src/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/adreno/a2xx.xml ( 36805 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/adreno/adreno_common.xml ( 13634 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/adreno/adreno_pm4.xml ( 42585 bytes, from 2018-10-04 19:06:37)
+- /home/robclark/src/envytools/rnndb/adreno/a2xx.xml ( 42463 bytes, from 2018-11-19 13:44:03)
+- /home/robclark/src/envytools/rnndb/adreno/adreno_common.xml ( 14201 bytes, from 2018-12-02 17:29:54)
+- /home/robclark/src/envytools/rnndb/adreno/adreno_pm4.xml ( 43052 bytes, from 2018-12-02 17:29:54)
- /home/robclark/src/envytools/rnndb/adreno/a3xx.xml ( 83840 bytes, from 2018-07-03 19:37:13)
- /home/robclark/src/envytools/rnndb/adreno/a4xx.xml ( 112086 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/adreno/a5xx.xml ( 147240 bytes, from 2018-10-04 19:06:37)
-- /home/robclark/src/envytools/rnndb/adreno/a6xx.xml ( 139581 bytes, from 2018-10-04 19:06:42)
+- /home/robclark/src/envytools/rnndb/adreno/a5xx.xml ( 147240 bytes, from 2018-12-02 17:29:54)
+- /home/robclark/src/envytools/rnndb/adreno/a6xx.xml ( 140790 bytes, from 2018-12-02 17:29:54)
- /home/robclark/src/envytools/rnndb/adreno/a6xx_gmu.xml ( 10431 bytes, from 2018-09-14 13:03:07)
- /home/robclark/src/envytools/rnndb/adreno/ocmem.xml ( 1773 bytes, from 2018-07-03 19:37:13)
diff --git a/drivers/gpu/drm/msm/adreno/a4xx_gpu.c b/drivers/gpu/drm/msm/adreno/a4xx_gpu.c
index 7c4e6dc1ed59..18f9a8e0bf3b 100644
--- a/drivers/gpu/drm/msm/adreno/a4xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a4xx_gpu.c
@@ -561,7 +561,7 @@ struct msm_gpu *a4xx_gpu_init(struct drm_device *dev)
int ret;
if (!pdev) {
- dev_err(dev->dev, "no a4xx device\n");
+ DRM_DEV_ERROR(dev->dev, "no a4xx device\n");
ret = -ENXIO;
goto fail;
}
@@ -608,7 +608,7 @@ struct msm_gpu *a4xx_gpu_init(struct drm_device *dev)
* to not be possible to restrict access, then we must
* implement a cmdstream validator.
*/
- dev_err(dev->dev, "No memory protection without IOMMU\n");
+ DRM_DEV_ERROR(dev->dev, "No memory protection without IOMMU\n");
ret = -ENXIO;
goto fail;
}
diff --git a/drivers/gpu/drm/msm/adreno/a5xx.xml.h b/drivers/gpu/drm/msm/adreno/a5xx.xml.h
index b4944cc0e62f..cf4fe14ddd6e 100644
--- a/drivers/gpu/drm/msm/adreno/a5xx.xml.h
+++ b/drivers/gpu/drm/msm/adreno/a5xx.xml.h
@@ -10,13 +10,13 @@ git clone https://github.com/freedreno/envytools.git
The rules-ng-ng source files this header was generated from are:
- /home/robclark/src/envytools/rnndb/adreno.xml ( 501 bytes, from 2018-07-03 19:37:13)
- /home/robclark/src/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/adreno/a2xx.xml ( 36805 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/adreno/adreno_common.xml ( 13634 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/adreno/adreno_pm4.xml ( 42585 bytes, from 2018-10-04 19:06:37)
+- /home/robclark/src/envytools/rnndb/adreno/a2xx.xml ( 42463 bytes, from 2018-11-19 13:44:03)
+- /home/robclark/src/envytools/rnndb/adreno/adreno_common.xml ( 14201 bytes, from 2018-12-02 17:29:54)
+- /home/robclark/src/envytools/rnndb/adreno/adreno_pm4.xml ( 43052 bytes, from 2018-12-02 17:29:54)
- /home/robclark/src/envytools/rnndb/adreno/a3xx.xml ( 83840 bytes, from 2018-07-03 19:37:13)
- /home/robclark/src/envytools/rnndb/adreno/a4xx.xml ( 112086 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/adreno/a5xx.xml ( 147240 bytes, from 2018-10-04 19:06:37)
-- /home/robclark/src/envytools/rnndb/adreno/a6xx.xml ( 139581 bytes, from 2018-10-04 19:06:42)
+- /home/robclark/src/envytools/rnndb/adreno/a5xx.xml ( 147240 bytes, from 2018-12-02 17:29:54)
+- /home/robclark/src/envytools/rnndb/adreno/a6xx.xml ( 140790 bytes, from 2018-12-02 17:29:54)
- /home/robclark/src/envytools/rnndb/adreno/a6xx_gmu.xml ( 10431 bytes, from 2018-09-14 13:03:07)
- /home/robclark/src/envytools/rnndb/adreno/ocmem.xml ( 1773 bytes, from 2018-07-03 19:37:13)
diff --git a/drivers/gpu/drm/msm/adreno/a5xx_debugfs.c b/drivers/gpu/drm/msm/adreno/a5xx_debugfs.c
index d2127b1c4ece..d9af3aff690f 100644
--- a/drivers/gpu/drm/msm/adreno/a5xx_debugfs.c
+++ b/drivers/gpu/drm/msm/adreno/a5xx_debugfs.c
@@ -130,15 +130,13 @@ reset_set(void *data, u64 val)
adreno_gpu->fw[ADRENO_FW_PFP] = NULL;
if (a5xx_gpu->pm4_bo) {
- if (a5xx_gpu->pm4_iova)
- msm_gem_put_iova(a5xx_gpu->pm4_bo, gpu->aspace);
+ msm_gem_unpin_iova(a5xx_gpu->pm4_bo, gpu->aspace);
drm_gem_object_put(a5xx_gpu->pm4_bo);
a5xx_gpu->pm4_bo = NULL;
}
if (a5xx_gpu->pfp_bo) {
- if (a5xx_gpu->pfp_iova)
- msm_gem_put_iova(a5xx_gpu->pfp_bo, gpu->aspace);
+ msm_gem_unpin_iova(a5xx_gpu->pfp_bo, gpu->aspace);
drm_gem_object_put(a5xx_gpu->pfp_bo);
a5xx_gpu->pfp_bo = NULL;
}
@@ -173,7 +171,7 @@ int a5xx_debugfs_init(struct msm_gpu *gpu, struct drm_minor *minor)
minor->debugfs_root, minor);
if (ret) {
- dev_err(dev->dev, "could not install a5xx_debugfs_list\n");
+ DRM_DEV_ERROR(dev->dev, "could not install a5xx_debugfs_list\n");
return ret;
}
diff --git a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
index 8edd80bb0428..d5f5e56422f5 100644
--- a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
@@ -20,7 +20,6 @@
#include <linux/soc/qcom/mdt_loader.h>
#include <linux/pm_opp.h>
#include <linux/nvmem-consumer.h>
-#include <linux/iopoll.h>
#include <linux/slab.h>
#include "msm_gem.h"
#include "msm_mmu.h"
@@ -511,13 +510,16 @@ static int a5xx_ucode_init(struct msm_gpu *gpu)
a5xx_gpu->pm4_bo = adreno_fw_create_bo(gpu,
adreno_gpu->fw[ADRENO_FW_PM4], &a5xx_gpu->pm4_iova);
+
if (IS_ERR(a5xx_gpu->pm4_bo)) {
ret = PTR_ERR(a5xx_gpu->pm4_bo);
a5xx_gpu->pm4_bo = NULL;
- dev_err(gpu->dev->dev, "could not allocate PM4: %d\n",
+ DRM_DEV_ERROR(gpu->dev->dev, "could not allocate PM4: %d\n",
ret);
return ret;
}
+
+ msm_gem_object_set_name(a5xx_gpu->pm4_bo, "pm4fw");
}
if (!a5xx_gpu->pfp_bo) {
@@ -527,10 +529,12 @@ static int a5xx_ucode_init(struct msm_gpu *gpu)
if (IS_ERR(a5xx_gpu->pfp_bo)) {
ret = PTR_ERR(a5xx_gpu->pfp_bo);
a5xx_gpu->pfp_bo = NULL;
- dev_err(gpu->dev->dev, "could not allocate PFP: %d\n",
+ DRM_DEV_ERROR(gpu->dev->dev, "could not allocate PFP: %d\n",
ret);
return ret;
}
+
+ msm_gem_object_set_name(a5xx_gpu->pfp_bo, "pfpfw");
}
gpu_write64(gpu, REG_A5XX_CP_ME_INSTR_BASE_LO,
@@ -841,20 +845,17 @@ static void a5xx_destroy(struct msm_gpu *gpu)
a5xx_preempt_fini(gpu);
if (a5xx_gpu->pm4_bo) {
- if (a5xx_gpu->pm4_iova)
- msm_gem_put_iova(a5xx_gpu->pm4_bo, gpu->aspace);
+ msm_gem_unpin_iova(a5xx_gpu->pm4_bo, gpu->aspace);
drm_gem_object_put_unlocked(a5xx_gpu->pm4_bo);
}
if (a5xx_gpu->pfp_bo) {
- if (a5xx_gpu->pfp_iova)
- msm_gem_put_iova(a5xx_gpu->pfp_bo, gpu->aspace);
+ msm_gem_unpin_iova(a5xx_gpu->pfp_bo, gpu->aspace);
drm_gem_object_put_unlocked(a5xx_gpu->pfp_bo);
}
if (a5xx_gpu->gpmu_bo) {
- if (a5xx_gpu->gpmu_iova)
- msm_gem_put_iova(a5xx_gpu->gpmu_bo, gpu->aspace);
+ msm_gem_unpin_iova(a5xx_gpu->gpmu_bo, gpu->aspace);
drm_gem_object_put_unlocked(a5xx_gpu->gpmu_bo);
}
@@ -1028,7 +1029,7 @@ static void a5xx_fault_detect_irq(struct msm_gpu *gpu)
struct msm_drm_private *priv = dev->dev_private;
struct msm_ringbuffer *ring = gpu->funcs->active_ring(gpu);
- dev_err(dev->dev, "gpu fault ring %d fence %x status %8.8X rb %4.4x/%4.4x ib1 %16.16llX/%4.4x ib2 %16.16llX/%4.4x\n",
+ DRM_DEV_ERROR(dev->dev, "gpu fault ring %d fence %x status %8.8X rb %4.4x/%4.4x ib1 %16.16llX/%4.4x ib2 %16.16llX/%4.4x\n",
ring ? ring->id : -1, ring ? ring->seqno : 0,
gpu_read(gpu, REG_A5XX_RBBM_STATUS),
gpu_read(gpu, REG_A5XX_CP_RB_RPTR),
@@ -1134,7 +1135,7 @@ static const u32 a5xx_registers[] = {
static void a5xx_dump(struct msm_gpu *gpu)
{
- dev_info(gpu->dev->dev, "status: %08x\n",
+ DRM_DEV_INFO(gpu->dev->dev, "status: %08x\n",
gpu_read(gpu, REG_A5XX_RBBM_STATUS));
adreno_dump(gpu);
}
@@ -1211,10 +1212,6 @@ struct a5xx_gpu_state {
u32 *hlsqregs;
};
-#define gpu_poll_timeout(gpu, addr, val, cond, interval, timeout) \
- readl_poll_timeout((gpu)->mmio + ((addr) << 2), val, cond, \
- interval, timeout)
-
static int a5xx_crashdumper_init(struct msm_gpu *gpu,
struct a5xx_crashdumper *dumper)
{
@@ -1222,16 +1219,10 @@ static int a5xx_crashdumper_init(struct msm_gpu *gpu,
SZ_1M, MSM_BO_UNCACHED, gpu->aspace,
&dumper->bo, &dumper->iova);
- return PTR_ERR_OR_ZERO(dumper->ptr);
-}
-
-static void a5xx_crashdumper_free(struct msm_gpu *gpu,
- struct a5xx_crashdumper *dumper)
-{
- msm_gem_put_iova(dumper->bo, gpu->aspace);
- msm_gem_put_vaddr(dumper->bo);
+ if (!IS_ERR(dumper->ptr))
+ msm_gem_object_set_name(dumper->bo, "crashdump");
- drm_gem_object_put(dumper->bo);
+ return PTR_ERR_OR_ZERO(dumper->ptr);
}
static int a5xx_crashdumper_run(struct msm_gpu *gpu,
@@ -1326,7 +1317,7 @@ static void a5xx_gpu_state_get_hlsq_regs(struct msm_gpu *gpu,
if (a5xx_crashdumper_run(gpu, &dumper)) {
kfree(a5xx_state->hlsqregs);
- a5xx_crashdumper_free(gpu, &dumper);
+ msm_gem_kernel_put(dumper.bo, gpu->aspace, true);
return;
}
@@ -1334,7 +1325,7 @@ static void a5xx_gpu_state_get_hlsq_regs(struct msm_gpu *gpu,
memcpy(a5xx_state->hlsqregs, dumper.ptr + (256 * SZ_1K),
count * sizeof(u32));
- a5xx_crashdumper_free(gpu, &dumper);
+ msm_gem_kernel_put(dumper.bo, gpu->aspace, true);
}
static struct msm_gpu_state *a5xx_gpu_state_get(struct msm_gpu *gpu)
@@ -1505,7 +1496,7 @@ struct msm_gpu *a5xx_gpu_init(struct drm_device *dev)
int ret;
if (!pdev) {
- dev_err(dev->dev, "No A5XX device is defined\n");
+ DRM_DEV_ERROR(dev->dev, "No A5XX device is defined\n");
return ERR_PTR(-ENXIO);
}
diff --git a/drivers/gpu/drm/msm/adreno/a5xx_power.c b/drivers/gpu/drm/msm/adreno/a5xx_power.c
index 7a41e1c147e4..70e65c94e525 100644
--- a/drivers/gpu/drm/msm/adreno/a5xx_power.c
+++ b/drivers/gpu/drm/msm/adreno/a5xx_power.c
@@ -298,7 +298,9 @@ void a5xx_gpmu_ucode_init(struct msm_gpu *gpu)
MSM_BO_UNCACHED | MSM_BO_GPU_READONLY, gpu->aspace,
&a5xx_gpu->gpmu_bo, &a5xx_gpu->gpmu_iova);
if (IS_ERR(ptr))
- goto err;
+ return;
+
+ msm_gem_object_set_name(a5xx_gpu->gpmu_bo, "gpmufw");
while (cmds_size > 0) {
int i;
@@ -317,15 +319,4 @@ void a5xx_gpmu_ucode_init(struct msm_gpu *gpu)
msm_gem_put_vaddr(a5xx_gpu->gpmu_bo);
a5xx_gpu->gpmu_dwords = dwords;
-
- return;
-err:
- if (a5xx_gpu->gpmu_iova)
- msm_gem_put_iova(a5xx_gpu->gpmu_bo, gpu->aspace);
- if (a5xx_gpu->gpmu_bo)
- drm_gem_object_put(a5xx_gpu->gpmu_bo);
-
- a5xx_gpu->gpmu_bo = NULL;
- a5xx_gpu->gpmu_iova = 0;
- a5xx_gpu->gpmu_dwords = 0;
}
diff --git a/drivers/gpu/drm/msm/adreno/a5xx_preempt.c b/drivers/gpu/drm/msm/adreno/a5xx_preempt.c
index 4c357ead1be6..3d62310a535f 100644
--- a/drivers/gpu/drm/msm/adreno/a5xx_preempt.c
+++ b/drivers/gpu/drm/msm/adreno/a5xx_preempt.c
@@ -92,7 +92,7 @@ static void a5xx_preempt_timer(struct timer_list *t)
if (!try_preempt_state(a5xx_gpu, PREEMPT_TRIGGERED, PREEMPT_FAULTED))
return;
- dev_err(dev->dev, "%s: preemption timed out\n", gpu->name);
+ DRM_DEV_ERROR(dev->dev, "%s: preemption timed out\n", gpu->name);
queue_work(priv->wq, &gpu->recover_work);
}
@@ -188,7 +188,7 @@ void a5xx_preempt_irq(struct msm_gpu *gpu)
status = gpu_read(gpu, REG_A5XX_CP_CONTEXT_SWITCH_CNTL);
if (unlikely(status)) {
set_preempt_state(a5xx_gpu, PREEMPT_FAULTED);
- dev_err(dev->dev, "%s: Preemption failed to complete\n",
+ DRM_DEV_ERROR(dev->dev, "%s: Preemption failed to complete\n",
gpu->name);
queue_work(priv->wq, &gpu->recover_work);
return;
@@ -245,6 +245,8 @@ static int preempt_init_ring(struct a5xx_gpu *a5xx_gpu,
if (IS_ERR(ptr))
return PTR_ERR(ptr);
+ msm_gem_object_set_name(bo, "preempt");
+
a5xx_gpu->preempt_bo[ring->id] = bo;
a5xx_gpu->preempt_iova[ring->id] = iova;
a5xx_gpu->preempt[ring->id] = ptr;
@@ -267,18 +269,8 @@ void a5xx_preempt_fini(struct msm_gpu *gpu)
struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
int i;
- for (i = 0; i < gpu->nr_rings; i++) {
- if (!a5xx_gpu->preempt_bo[i])
- continue;
-
- msm_gem_put_vaddr(a5xx_gpu->preempt_bo[i]);
-
- if (a5xx_gpu->preempt_iova[i])
- msm_gem_put_iova(a5xx_gpu->preempt_bo[i], gpu->aspace);
-
- drm_gem_object_put(a5xx_gpu->preempt_bo[i]);
- a5xx_gpu->preempt_bo[i] = NULL;
- }
+ for (i = 0; i < gpu->nr_rings; i++)
+ msm_gem_kernel_put(a5xx_gpu->preempt_bo[i], gpu->aspace, true);
}
void a5xx_preempt_init(struct msm_gpu *gpu)
diff --git a/drivers/gpu/drm/msm/adreno/a6xx.xml.h b/drivers/gpu/drm/msm/adreno/a6xx.xml.h
index a6f7c40454a6..f44553ec3193 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx.xml.h
+++ b/drivers/gpu/drm/msm/adreno/a6xx.xml.h
@@ -10,13 +10,13 @@ git clone https://github.com/freedreno/envytools.git
The rules-ng-ng source files this header was generated from are:
- /home/robclark/src/envytools/rnndb/adreno.xml ( 501 bytes, from 2018-07-03 19:37:13)
- /home/robclark/src/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/adreno/a2xx.xml ( 36805 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/adreno/adreno_common.xml ( 13634 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/adreno/adreno_pm4.xml ( 42585 bytes, from 2018-10-04 19:06:37)
+- /home/robclark/src/envytools/rnndb/adreno/a2xx.xml ( 42463 bytes, from 2018-11-19 13:44:03)
+- /home/robclark/src/envytools/rnndb/adreno/adreno_common.xml ( 14201 bytes, from 2018-12-02 17:29:54)
+- /home/robclark/src/envytools/rnndb/adreno/adreno_pm4.xml ( 43052 bytes, from 2018-12-02 17:29:54)
- /home/robclark/src/envytools/rnndb/adreno/a3xx.xml ( 83840 bytes, from 2018-07-03 19:37:13)
- /home/robclark/src/envytools/rnndb/adreno/a4xx.xml ( 112086 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/adreno/a5xx.xml ( 147240 bytes, from 2018-10-04 19:06:37)
-- /home/robclark/src/envytools/rnndb/adreno/a6xx.xml ( 139581 bytes, from 2018-10-04 19:06:42)
+- /home/robclark/src/envytools/rnndb/adreno/a5xx.xml ( 147240 bytes, from 2018-12-02 17:29:54)
+- /home/robclark/src/envytools/rnndb/adreno/a6xx.xml ( 140790 bytes, from 2018-12-02 17:29:54)
- /home/robclark/src/envytools/rnndb/adreno/a6xx_gmu.xml ( 10431 bytes, from 2018-09-14 13:03:07)
- /home/robclark/src/envytools/rnndb/adreno/ocmem.xml ( 1773 bytes, from 2018-07-03 19:37:13)
@@ -501,7 +501,7 @@ enum a6xx_vfd_perfcounter_select {
PERF_VFDP_VS_STAGE_WAVES = 22,
};
-enum a6xx_hslq_perfcounter_select {
+enum a6xx_hlsq_perfcounter_select {
PERF_HLSQ_BUSY_CYCLES = 0,
PERF_HLSQ_STALL_CYCLES_UCHE = 1,
PERF_HLSQ_STALL_CYCLES_SP_STATE = 2,
@@ -2959,6 +2959,8 @@ static inline uint32_t A6XX_GRAS_SC_WINDOW_SCISSOR_BR_Y(uint32_t val)
#define A6XX_GRAS_LRZ_CNTL_ENABLE 0x00000001
#define A6XX_GRAS_LRZ_CNTL_LRZ_WRITE 0x00000002
#define A6XX_GRAS_LRZ_CNTL_GREATER 0x00000004
+#define A6XX_GRAS_LRZ_CNTL_UNK3 0x00000008
+#define A6XX_GRAS_LRZ_CNTL_UNK4 0x00000010
#define REG_A6XX_GRAS_UNKNOWN_8101 0x00008101
@@ -2997,6 +2999,13 @@ static inline uint32_t A6XX_GRAS_LRZ_BUFFER_PITCH_ARRAY_PITCH(uint32_t val)
#define REG_A6XX_GRAS_UNKNOWN_8110 0x00008110
#define REG_A6XX_GRAS_2D_BLIT_CNTL 0x00008400
+#define A6XX_GRAS_2D_BLIT_CNTL_COLOR_FORMAT__MASK 0x0000ff00
+#define A6XX_GRAS_2D_BLIT_CNTL_COLOR_FORMAT__SHIFT 8
+static inline uint32_t A6XX_GRAS_2D_BLIT_CNTL_COLOR_FORMAT(enum a6xx_color_fmt val)
+{
+ return ((val) << A6XX_GRAS_2D_BLIT_CNTL_COLOR_FORMAT__SHIFT) & A6XX_GRAS_2D_BLIT_CNTL_COLOR_FORMAT__MASK;
+}
+#define A6XX_GRAS_2D_BLIT_CNTL_SCISSOR 0x00010000
#define REG_A6XX_GRAS_2D_SRC_TL_X 0x00008401
#define A6XX_GRAS_2D_SRC_TL_X_X__MASK 0x00ffff00
@@ -3449,6 +3458,7 @@ static inline uint32_t A6XX_RB_BLEND_CNTL_ENABLE_BLEND(uint32_t val)
return ((val) << A6XX_RB_BLEND_CNTL_ENABLE_BLEND__SHIFT) & A6XX_RB_BLEND_CNTL_ENABLE_BLEND__MASK;
}
#define A6XX_RB_BLEND_CNTL_INDEPENDENT_BLEND 0x00000100
+#define A6XX_RB_BLEND_CNTL_ALPHA_TO_COVERAGE 0x00000400
#define A6XX_RB_BLEND_CNTL_SAMPLE_MASK__MASK 0xffff0000
#define A6XX_RB_BLEND_CNTL_SAMPLE_MASK__SHIFT 16
static inline uint32_t A6XX_RB_BLEND_CNTL_SAMPLE_MASK(uint32_t val)
@@ -3642,6 +3652,9 @@ static inline uint32_t A6XX_RB_WINDOW_OFFSET_Y(uint32_t val)
#define REG_A6XX_RB_SAMPLE_COUNT_CONTROL 0x00008891
#define A6XX_RB_SAMPLE_COUNT_CONTROL_COPY 0x00000002
+#define REG_A6XX_RB_LRZ_CNTL 0x00008898
+#define A6XX_RB_LRZ_CNTL_ENABLE 0x00000001
+
#define REG_A6XX_RB_UNKNOWN_88D0 0x000088d0
#define REG_A6XX_RB_BLIT_SCISSOR_TL 0x000088d1
@@ -3674,6 +3687,14 @@ static inline uint32_t A6XX_RB_BLIT_SCISSOR_BR_Y(uint32_t val)
return ((val) << A6XX_RB_BLIT_SCISSOR_BR_Y__SHIFT) & A6XX_RB_BLIT_SCISSOR_BR_Y__MASK;
}
+#define REG_A6XX_RB_MSAA_CNTL 0x000088d5
+#define A6XX_RB_MSAA_CNTL_SAMPLES__MASK 0x00000018
+#define A6XX_RB_MSAA_CNTL_SAMPLES__SHIFT 3
+static inline uint32_t A6XX_RB_MSAA_CNTL_SAMPLES(enum a3xx_msaa_samples val)
+{
+ return ((val) << A6XX_RB_MSAA_CNTL_SAMPLES__SHIFT) & A6XX_RB_MSAA_CNTL_SAMPLES__MASK;
+}
+
#define REG_A6XX_RB_BLIT_BASE_GMEM 0x000088d6
#define REG_A6XX_RB_BLIT_DST_INFO 0x000088d7
@@ -3684,6 +3705,12 @@ static inline uint32_t A6XX_RB_BLIT_DST_INFO_TILE_MODE(enum a6xx_tile_mode val)
return ((val) << A6XX_RB_BLIT_DST_INFO_TILE_MODE__SHIFT) & A6XX_RB_BLIT_DST_INFO_TILE_MODE__MASK;
}
#define A6XX_RB_BLIT_DST_INFO_FLAGS 0x00000004
+#define A6XX_RB_BLIT_DST_INFO_SAMPLES__MASK 0x00000018
+#define A6XX_RB_BLIT_DST_INFO_SAMPLES__SHIFT 3
+static inline uint32_t A6XX_RB_BLIT_DST_INFO_SAMPLES(enum a3xx_msaa_samples val)
+{
+ return ((val) << A6XX_RB_BLIT_DST_INFO_SAMPLES__SHIFT) & A6XX_RB_BLIT_DST_INFO_SAMPLES__MASK;
+}
#define A6XX_RB_BLIT_DST_INFO_COLOR_FORMAT__MASK 0x00007f80
#define A6XX_RB_BLIT_DST_INFO_COLOR_FORMAT__SHIFT 7
static inline uint32_t A6XX_RB_BLIT_DST_INFO_COLOR_FORMAT(enum a6xx_color_fmt val)
@@ -3780,6 +3807,9 @@ static inline uint32_t A6XX_RB_2D_BLIT_CNTL_COLOR_FORMAT(enum a6xx_color_fmt val
{
return ((val) << A6XX_RB_2D_BLIT_CNTL_COLOR_FORMAT__SHIFT) & A6XX_RB_2D_BLIT_CNTL_COLOR_FORMAT__MASK;
}
+#define A6XX_RB_2D_BLIT_CNTL_SCISSOR 0x00010000
+
+#define REG_A6XX_RB_UNKNOWN_8C01 0x00008c01
#define REG_A6XX_RB_2D_DST_INFO 0x00008c17
#define A6XX_RB_2D_DST_INFO_COLOR_FORMAT__MASK 0x000000ff
@@ -4465,6 +4495,7 @@ static inline uint32_t A6XX_SP_FS_CTRL_REG0_THREADSIZE(enum a3xx_threadsize val)
#define REG_A6XX_SP_BLEND_CNTL 0x0000a989
#define A6XX_SP_BLEND_CNTL_ENABLED 0x00000001
#define A6XX_SP_BLEND_CNTL_UNK8 0x00000100
+#define A6XX_SP_BLEND_CNTL_ALPHA_TO_COVERAGE 0x00000400
#define REG_A6XX_SP_SRGB_CNTL 0x0000a98a
#define A6XX_SP_SRGB_CNTL_SRGB_MRT0 0x00000001
@@ -4643,6 +4674,8 @@ static inline uint32_t A6XX_SP_FS_CONFIG_NSAMP(uint32_t val)
#define REG_A6XX_SP_UNKNOWN_AB20 0x0000ab20
+#define REG_A6XX_SP_UNKNOWN_ACC0 0x0000acc0
+
#define REG_A6XX_SP_UNKNOWN_AE00 0x0000ae00
#define REG_A6XX_SP_UNKNOWN_AE03 0x0000ae03
@@ -4700,11 +4733,34 @@ static inline uint32_t A6XX_SP_PS_2D_SRC_INFO_COLOR_SWAP(enum a3xx_color_swap va
return ((val) << A6XX_SP_PS_2D_SRC_INFO_COLOR_SWAP__SHIFT) & A6XX_SP_PS_2D_SRC_INFO_COLOR_SWAP__MASK;
}
#define A6XX_SP_PS_2D_SRC_INFO_FLAGS 0x00001000
+#define A6XX_SP_PS_2D_SRC_INFO_FILTER 0x00010000
+
+#define REG_A6XX_SP_PS_2D_SRC_SIZE 0x0000b4c1
+#define A6XX_SP_PS_2D_SRC_SIZE_WIDTH__MASK 0x00007fff
+#define A6XX_SP_PS_2D_SRC_SIZE_WIDTH__SHIFT 0
+static inline uint32_t A6XX_SP_PS_2D_SRC_SIZE_WIDTH(uint32_t val)
+{
+ return ((val) << A6XX_SP_PS_2D_SRC_SIZE_WIDTH__SHIFT) & A6XX_SP_PS_2D_SRC_SIZE_WIDTH__MASK;
+}
+#define A6XX_SP_PS_2D_SRC_SIZE_HEIGHT__MASK 0x3fff8000
+#define A6XX_SP_PS_2D_SRC_SIZE_HEIGHT__SHIFT 15
+static inline uint32_t A6XX_SP_PS_2D_SRC_SIZE_HEIGHT(uint32_t val)
+{
+ return ((val) << A6XX_SP_PS_2D_SRC_SIZE_HEIGHT__SHIFT) & A6XX_SP_PS_2D_SRC_SIZE_HEIGHT__MASK;
+}
#define REG_A6XX_SP_PS_2D_SRC_LO 0x0000b4c2
#define REG_A6XX_SP_PS_2D_SRC_HI 0x0000b4c3
+#define REG_A6XX_SP_PS_2D_SRC_PITCH 0x0000b4c4
+#define A6XX_SP_PS_2D_SRC_PITCH_PITCH__MASK 0x01fffe00
+#define A6XX_SP_PS_2D_SRC_PITCH_PITCH__SHIFT 9
+static inline uint32_t A6XX_SP_PS_2D_SRC_PITCH_PITCH(uint32_t val)
+{
+ return ((val >> 6) << A6XX_SP_PS_2D_SRC_PITCH_PITCH__SHIFT) & A6XX_SP_PS_2D_SRC_PITCH_PITCH__MASK;
+}
+
#define REG_A6XX_SP_PS_2D_SRC_FLAGS_LO 0x0000b4ca
#define REG_A6XX_SP_PS_2D_SRC_FLAGS_HI 0x0000b4cb
@@ -5033,6 +5089,12 @@ static inline uint32_t A6XX_TEX_CONST_0_MIPLVLS(uint32_t val)
{
return ((val) << A6XX_TEX_CONST_0_MIPLVLS__SHIFT) & A6XX_TEX_CONST_0_MIPLVLS__MASK;
}
+#define A6XX_TEX_CONST_0_SAMPLES__MASK 0x00300000
+#define A6XX_TEX_CONST_0_SAMPLES__SHIFT 20
+static inline uint32_t A6XX_TEX_CONST_0_SAMPLES(enum a3xx_msaa_samples val)
+{
+ return ((val) << A6XX_TEX_CONST_0_SAMPLES__SHIFT) & A6XX_TEX_CONST_0_SAMPLES__MASK;
+}
#define A6XX_TEX_CONST_0_FMT__MASK 0x3fc00000
#define A6XX_TEX_CONST_0_FMT__SHIFT 22
static inline uint32_t A6XX_TEX_CONST_0_FMT(enum a6xx_tex_fmt val)
@@ -5365,5 +5427,9 @@ static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL15(uint32_t val)
#define REG_A6XX_CX_DBGC_CFG_DBGBUS_TRACE_BUF2 0x00000030
+#define REG_A6XX_CX_MISC_SYSTEM_CACHE_CNTL_0 0x00000001
+
+#define REG_A6XX_CX_MISC_SYSTEM_CACHE_CNTL_1 0x00000002
+
#endif /* A6XX_XML */
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
index d4e98e5876bc..5beb83d1cf87 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
@@ -51,10 +51,31 @@ static irqreturn_t a6xx_hfi_irq(int irq, void *data)
return IRQ_HANDLED;
}
+bool a6xx_gmu_sptprac_is_on(struct a6xx_gmu *gmu)
+{
+ u32 val;
+
+ /* This can be called from gpu state code so make sure GMU is valid */
+ if (IS_ERR_OR_NULL(gmu->mmio))
+ return false;
+
+ val = gmu_read(gmu, REG_A6XX_GMU_SPTPRAC_PWR_CLK_STATUS);
+
+ return !(val &
+ (A6XX_GMU_SPTPRAC_PWR_CLK_STATUS_SPTPRAC_GDSC_POWER_OFF |
+ A6XX_GMU_SPTPRAC_PWR_CLK_STATUS_SP_CLOCK_OFF));
+}
+
/* Check to see if the GX rail is still powered */
-static bool a6xx_gmu_gx_is_on(struct a6xx_gmu *gmu)
+bool a6xx_gmu_gx_is_on(struct a6xx_gmu *gmu)
{
- u32 val = gmu_read(gmu, REG_A6XX_GMU_SPTPRAC_PWR_CLK_STATUS);
+ u32 val;
+
+ /* This can be called from gpu state code so make sure GMU is valid */
+ if (IS_ERR_OR_NULL(gmu->mmio))
+ return false;
+
+ val = gmu_read(gmu, REG_A6XX_GMU_SPTPRAC_PWR_CLK_STATUS);
return !(val &
(A6XX_GMU_SPTPRAC_PWR_CLK_STATUS_GX_HM_GDSC_POWER_OFF |
@@ -153,7 +174,7 @@ static int a6xx_gmu_start(struct a6xx_gmu *gmu)
val == 0xbabeface, 100, 10000);
if (ret)
- dev_err(gmu->dev, "GMU firmware initialization timed out\n");
+ DRM_DEV_ERROR(gmu->dev, "GMU firmware initialization timed out\n");
return ret;
}
@@ -168,7 +189,7 @@ static int a6xx_gmu_hfi_start(struct a6xx_gmu *gmu)
ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_HFI_CTRL_STATUS, val,
val & 1, 100, 10000);
if (ret)
- dev_err(gmu->dev, "Unable to start the HFI queues\n");
+ DRM_DEV_ERROR(gmu->dev, "Unable to start the HFI queues\n");
return ret;
}
@@ -209,7 +230,7 @@ int a6xx_gmu_set_oob(struct a6xx_gmu *gmu, enum a6xx_gmu_oob_state state)
val & (1 << ack), 100, 10000);
if (ret)
- dev_err(gmu->dev,
+ DRM_DEV_ERROR(gmu->dev,
"Timeout waiting for GMU OOB set %s: 0x%x\n",
name,
gmu_read(gmu, REG_A6XX_GMU_GMU2HOST_INTR_INFO));
@@ -251,7 +272,7 @@ static int a6xx_sptprac_enable(struct a6xx_gmu *gmu)
(val & 0x38) == 0x28, 1, 100);
if (ret) {
- dev_err(gmu->dev, "Unable to power on SPTPRAC: 0x%x\n",
+ DRM_DEV_ERROR(gmu->dev, "Unable to power on SPTPRAC: 0x%x\n",
gmu_read(gmu, REG_A6XX_GMU_SPTPRAC_PWR_CLK_STATUS));
}
@@ -273,7 +294,7 @@ static void a6xx_sptprac_disable(struct a6xx_gmu *gmu)
(val & 0x04), 100, 10000);
if (ret)
- dev_err(gmu->dev, "failed to power off SPTPRAC: 0x%x\n",
+ DRM_DEV_ERROR(gmu->dev, "failed to power off SPTPRAC: 0x%x\n",
gmu_read(gmu, REG_A6XX_GMU_SPTPRAC_PWR_CLK_STATUS));
}
@@ -317,7 +338,7 @@ static int a6xx_gmu_notify_slumber(struct a6xx_gmu *gmu)
/* Check to see if the GMU really did slumber */
if (gmu_read(gmu, REG_A6XX_GPU_GMU_CX_GMU_RPMH_POWER_STATE)
!= 0x0f) {
- dev_err(gmu->dev, "The GMU did not go into slumber\n");
+ DRM_DEV_ERROR(gmu->dev, "The GMU did not go into slumber\n");
ret = -ETIMEDOUT;
}
}
@@ -339,23 +360,27 @@ static int a6xx_rpmh_start(struct a6xx_gmu *gmu)
ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_RSCC_CONTROL_ACK, val,
val & (1 << 1), 100, 10000);
if (ret) {
- dev_err(gmu->dev, "Unable to power on the GPU RSC\n");
+ DRM_DEV_ERROR(gmu->dev, "Unable to power on the GPU RSC\n");
return ret;
}
ret = gmu_poll_timeout(gmu, REG_A6XX_RSCC_SEQ_BUSY_DRV0, val,
!val, 100, 10000);
- if (!ret) {
- gmu_write(gmu, REG_A6XX_GMU_RSCC_CONTROL_REQ, 0);
-
- /* Re-enable the power counter */
- gmu_write(gmu, REG_A6XX_GMU_CX_GMU_POWER_COUNTER_ENABLE, 1);
- return 0;
+ if (ret) {
+ DRM_DEV_ERROR(gmu->dev, "GPU RSC sequence stuck while waking up the GPU\n");
+ return ret;
}
- dev_err(gmu->dev, "GPU RSC sequence stuck while waking up the GPU\n");
- return ret;
+ gmu_write(gmu, REG_A6XX_GMU_RSCC_CONTROL_REQ, 0);
+
+ /* Set up CX GMU counter 0 to count busy ticks */
+ gmu_write(gmu, REG_A6XX_GPU_GMU_AO_GPU_CX_BUSY_MASK, 0xff000000);
+ gmu_rmw(gmu, REG_A6XX_GMU_CX_GMU_POWER_COUNTER_SELECT_0, 0xff, 0x20);
+
+ /* Enable the power counter */
+ gmu_write(gmu, REG_A6XX_GMU_CX_GMU_POWER_COUNTER_ENABLE, 1);
+ return 0;
}
static void a6xx_rpmh_stop(struct a6xx_gmu *gmu)
@@ -368,7 +393,7 @@ static void a6xx_rpmh_stop(struct a6xx_gmu *gmu)
ret = gmu_poll_timeout(gmu, REG_A6XX_GPU_RSCC_RSC_STATUS0_DRV0,
val, val & (1 << 16), 100, 10000);
if (ret)
- dev_err(gmu->dev, "Unable to power off the GPU RSC\n");
+ DRM_DEV_ERROR(gmu->dev, "Unable to power off the GPU RSC\n");
gmu_write(gmu, REG_A6XX_GMU_RSCC_CONTROL_REQ, 0);
}
@@ -520,7 +545,7 @@ static int a6xx_gmu_fw_start(struct a6xx_gmu *gmu, unsigned int state)
/* Sanity check the size of the firmware that was loaded */
if (adreno_gpu->fw[ADRENO_FW_GMU]->size > 0x8000) {
- dev_err(gmu->dev,
+ DRM_DEV_ERROR(gmu->dev,
"GMU firmware is bigger than the available region\n");
return -EINVAL;
}
@@ -764,7 +789,7 @@ int a6xx_gmu_stop(struct a6xx_gpu *a6xx_gpu)
*/
if (ret)
- dev_err(gmu->dev,
+ DRM_DEV_ERROR(gmu->dev,
"Unable to slumber GMU: status = 0%x/0%x\n",
gmu_read(gmu,
REG_A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS),
@@ -843,7 +868,7 @@ static struct a6xx_gmu_bo *a6xx_gmu_memory_alloc(struct a6xx_gmu *gmu,
IOMMU_READ | IOMMU_WRITE);
if (ret) {
- dev_err(gmu->dev, "Unable to map GMU buffer object\n");
+ DRM_DEV_ERROR(gmu->dev, "Unable to map GMU buffer object\n");
for (i = i - 1 ; i >= 0; i--)
iommu_unmap(gmu->domain,
@@ -902,26 +927,6 @@ static int a6xx_gmu_memory_probe(struct a6xx_gmu *gmu)
return ret;
}
-/* Get the list of RPMh voltage levels from cmd-db */
-static int a6xx_gmu_rpmh_arc_cmds(const char *id, void *vals, int size)
-{
- u32 len = cmd_db_read_aux_data_len(id);
-
- if (!len)
- return 0;
-
- if (WARN_ON(len > size))
- return -EINVAL;
-
- cmd_db_read_aux_data(id, vals, len);
-
- /*
- * The data comes back as an array of unsigned shorts so adjust the
- * count accordingly
- */
- return len >> 1;
-}
-
/* Return the 'arc-level' for the given frequency */
static u32 a6xx_gmu_get_arc_level(struct device *dev, unsigned long freq)
{
@@ -949,11 +954,30 @@ static u32 a6xx_gmu_get_arc_level(struct device *dev, unsigned long freq)
}
static int a6xx_gmu_rpmh_arc_votes_init(struct device *dev, u32 *votes,
- unsigned long *freqs, int freqs_count,
- u16 *pri, int pri_count,
- u16 *sec, int sec_count)
+ unsigned long *freqs, int freqs_count, const char *id)
{
int i, j;
+ const u16 *pri, *sec;
+ size_t pri_count, sec_count;
+
+ pri = cmd_db_read_aux_data(id, &pri_count);
+ if (IS_ERR(pri))
+ return PTR_ERR(pri);
+ /*
+ * The data comes back as an array of unsigned shorts so adjust the
+ * count accordingly
+ */
+ pri_count >>= 1;
+ if (!pri_count)
+ return -EINVAL;
+
+ sec = cmd_db_read_aux_data("mx.lvl", &sec_count);
+ if (IS_ERR(sec))
+ return PTR_ERR(sec);
+
+ sec_count >>= 1;
+ if (!sec_count)
+ return -EINVAL;
/* Construct a vote for each frequency */
for (i = 0; i < freqs_count; i++) {
@@ -969,12 +993,12 @@ static int a6xx_gmu_rpmh_arc_votes_init(struct device *dev, u32 *votes,
}
if (j == pri_count) {
- dev_err(dev,
+ DRM_DEV_ERROR(dev,
"Level %u not found in in the RPMh list\n",
level);
- dev_err(dev, "Available levels:\n");
+ DRM_DEV_ERROR(dev, "Available levels:\n");
for (j = 0; j < pri_count; j++)
- dev_err(dev, " %u\n", pri[j]);
+ DRM_DEV_ERROR(dev, " %u\n", pri[j]);
return -EINVAL;
}
@@ -1012,25 +1036,15 @@ static int a6xx_gmu_rpmh_votes_init(struct a6xx_gmu *gmu)
struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu);
struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
struct msm_gpu *gpu = &adreno_gpu->base;
-
- u16 gx[16], cx[16], mx[16];
- u32 gxcount, cxcount, mxcount;
int ret;
- /* Get the list of available voltage levels for each component */
- gxcount = a6xx_gmu_rpmh_arc_cmds("gfx.lvl", gx, sizeof(gx));
- cxcount = a6xx_gmu_rpmh_arc_cmds("cx.lvl", cx, sizeof(cx));
- mxcount = a6xx_gmu_rpmh_arc_cmds("mx.lvl", mx, sizeof(mx));
-
/* Build the GX votes */
ret = a6xx_gmu_rpmh_arc_votes_init(&gpu->pdev->dev, gmu->gx_arc_votes,
- gmu->gpu_freqs, gmu->nr_gpu_freqs,
- gx, gxcount, mx, mxcount);
+ gmu->gpu_freqs, gmu->nr_gpu_freqs, "gfx.lvl");
/* Build the CX votes */
ret |= a6xx_gmu_rpmh_arc_votes_init(gmu->dev, gmu->cx_arc_votes,
- gmu->gmu_freqs, gmu->nr_gmu_freqs,
- cx, cxcount, mx, mxcount);
+ gmu->gmu_freqs, gmu->nr_gmu_freqs, "cx.lvl");
return ret;
}
@@ -1081,7 +1095,7 @@ static int a6xx_gmu_pwrlevels_probe(struct a6xx_gmu *gmu)
*/
ret = dev_pm_opp_of_add_table(gmu->dev);
if (ret) {
- dev_err(gmu->dev, "Unable to set the OPP table for the GMU\n");
+ DRM_DEV_ERROR(gmu->dev, "Unable to set the OPP table for the GMU\n");
return ret;
}
@@ -1122,13 +1136,13 @@ static void __iomem *a6xx_gmu_get_mmio(struct platform_device *pdev,
IORESOURCE_MEM, name);
if (!res) {
- dev_err(&pdev->dev, "Unable to find the %s registers\n", name);
+ DRM_DEV_ERROR(&pdev->dev, "Unable to find the %s registers\n", name);
return ERR_PTR(-EINVAL);
}
ret = devm_ioremap(&pdev->dev, res->start, resource_size(res));
if (!ret) {
- dev_err(&pdev->dev, "Unable to map the %s registers\n", name);
+ DRM_DEV_ERROR(&pdev->dev, "Unable to map the %s registers\n", name);
return ERR_PTR(-EINVAL);
}
@@ -1145,7 +1159,7 @@ static int a6xx_gmu_get_irq(struct a6xx_gmu *gmu, struct platform_device *pdev,
ret = devm_request_irq(&pdev->dev, irq, handler, IRQF_TRIGGER_HIGH,
name, gmu);
if (ret) {
- dev_err(&pdev->dev, "Unable to get interrupt %s\n", name);
+ DRM_DEV_ERROR(&pdev->dev, "Unable to get interrupt %s\n", name);
return ret;
}
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gmu.h b/drivers/gpu/drm/msm/adreno/a6xx_gmu.h
index 35f765afae45..c721d9165d8e 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gmu.h
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gmu.h
@@ -164,4 +164,7 @@ void a6xx_hfi_init(struct a6xx_gmu *gmu);
int a6xx_hfi_start(struct a6xx_gmu *gmu, int boot_state);
void a6xx_hfi_stop(struct a6xx_gmu *gmu);
+bool a6xx_gmu_gx_is_on(struct a6xx_gmu *gmu);
+bool a6xx_gmu_sptprac_is_on(struct a6xx_gmu *gmu);
+
#endif
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gmu.xml.h b/drivers/gpu/drm/msm/adreno/a6xx_gmu.xml.h
index db56f263ed77..1cc1c135236b 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gmu.xml.h
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gmu.xml.h
@@ -10,13 +10,13 @@ git clone https://github.com/freedreno/envytools.git
The rules-ng-ng source files this header was generated from are:
- /home/robclark/src/envytools/rnndb/adreno.xml ( 501 bytes, from 2018-07-03 19:37:13)
- /home/robclark/src/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/adreno/a2xx.xml ( 36805 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/adreno/adreno_common.xml ( 13634 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/adreno/adreno_pm4.xml ( 42585 bytes, from 2018-10-04 19:06:37)
+- /home/robclark/src/envytools/rnndb/adreno/a2xx.xml ( 42463 bytes, from 2018-11-19 13:44:03)
+- /home/robclark/src/envytools/rnndb/adreno/adreno_common.xml ( 14201 bytes, from 2018-12-02 17:29:54)
+- /home/robclark/src/envytools/rnndb/adreno/adreno_pm4.xml ( 43052 bytes, from 2018-12-02 17:29:54)
- /home/robclark/src/envytools/rnndb/adreno/a3xx.xml ( 83840 bytes, from 2018-07-03 19:37:13)
- /home/robclark/src/envytools/rnndb/adreno/a4xx.xml ( 112086 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/adreno/a5xx.xml ( 147240 bytes, from 2018-10-04 19:06:37)
-- /home/robclark/src/envytools/rnndb/adreno/a6xx.xml ( 139581 bytes, from 2018-10-04 19:06:42)
+- /home/robclark/src/envytools/rnndb/adreno/a5xx.xml ( 147240 bytes, from 2018-12-02 17:29:54)
+- /home/robclark/src/envytools/rnndb/adreno/a6xx.xml ( 140790 bytes, from 2018-12-02 17:29:54)
- /home/robclark/src/envytools/rnndb/adreno/a6xx_gmu.xml ( 10431 bytes, from 2018-09-14 13:03:07)
- /home/robclark/src/envytools/rnndb/adreno/ocmem.xml ( 1773 bytes, from 2018-07-03 19:37:13)
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
index 631257c297fd..fefe773c989e 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
@@ -4,6 +4,7 @@
#include "msm_gem.h"
#include "msm_mmu.h"
+#include "msm_gpu_trace.h"
#include "a6xx_gpu.h"
#include "a6xx_gmu.xml.h"
@@ -67,13 +68,36 @@ static void a6xx_flush(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
gpu_write(gpu, REG_A6XX_CP_RB_WPTR, wptr);
}
+static void get_stats_counter(struct msm_ringbuffer *ring, u32 counter,
+ u64 iova)
+{
+ OUT_PKT7(ring, CP_REG_TO_MEM, 3);
+ OUT_RING(ring, counter | (1 << 30) | (2 << 18));
+ OUT_RING(ring, lower_32_bits(iova));
+ OUT_RING(ring, upper_32_bits(iova));
+}
+
static void a6xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
struct msm_file_private *ctx)
{
+ unsigned int index = submit->seqno % MSM_GPU_SUBMIT_STATS_COUNT;
struct msm_drm_private *priv = gpu->dev->dev_private;
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
struct msm_ringbuffer *ring = submit->ring;
unsigned int i;
+ get_stats_counter(ring, REG_A6XX_RBBM_PERFCTR_CP_0_LO,
+ rbmemptr_stats(ring, index, cpcycles_start));
+
+ /*
+ * For PM4 the GMU register offsets are calculated from the base of the
+ * GPU registers so we need to add 0x1a800 to the register value on A630
+ * to get the right value from PM4.
+ */
+ get_stats_counter(ring, REG_A6XX_GMU_ALWAYS_ON_COUNTER_L + 0x1a800,
+ rbmemptr_stats(ring, index, alwayson_start));
+
/* Invalidate CCU depth and color */
OUT_PKT7(ring, CP_EVENT_WRITE, 1);
OUT_RING(ring, PC_CCU_INVALIDATE_DEPTH);
@@ -98,6 +122,11 @@ static void a6xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
}
}
+ get_stats_counter(ring, REG_A6XX_RBBM_PERFCTR_CP_0_LO,
+ rbmemptr_stats(ring, index, cpcycles_end));
+ get_stats_counter(ring, REG_A6XX_GMU_ALWAYS_ON_COUNTER_L + 0x1a800,
+ rbmemptr_stats(ring, index, alwayson_end));
+
/* Write the fence to the scratch register */
OUT_PKT4(ring, REG_A6XX_CP_SCRATCH_REG(2), 1);
OUT_RING(ring, submit->seqno);
@@ -112,6 +141,10 @@ static void a6xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
OUT_RING(ring, upper_32_bits(rbmemptr(ring, fence)));
OUT_RING(ring, submit->seqno);
+ trace_msm_gpu_submit_flush(submit,
+ gmu_read64(&a6xx_gpu->gmu, REG_A6XX_GMU_ALWAYS_ON_COUNTER_L,
+ REG_A6XX_GMU_ALWAYS_ON_COUNTER_H));
+
a6xx_flush(gpu, ring);
}
@@ -300,6 +333,8 @@ static int a6xx_ucode_init(struct msm_gpu *gpu)
return ret;
}
+
+ msm_gem_object_set_name(a6xx_gpu->sqe_bo, "sqefw");
}
gpu_write64(gpu, REG_A6XX_CP_SQE_INSTR_BASE_LO,
@@ -387,14 +422,6 @@ static int a6xx_hw_init(struct msm_gpu *gpu)
/* Select CP0 to always count cycles */
gpu_write(gpu, REG_A6XX_CP_PERFCTR_CP_SEL_0, PERF_CP_ALWAYS_COUNT);
- /* FIXME: not sure if this should live here or in a6xx_gmu.c */
- gmu_write(&a6xx_gpu->gmu, REG_A6XX_GPU_GMU_AO_GPU_CX_BUSY_MASK,
- 0xff000000);
- gmu_rmw(&a6xx_gpu->gmu, REG_A6XX_GMU_CX_GMU_POWER_COUNTER_SELECT_0,
- 0xff, 0x20);
- gmu_write(&a6xx_gpu->gmu, REG_A6XX_GMU_CX_GMU_POWER_COUNTER_ENABLE,
- 0x01);
-
gpu_write(gpu, REG_A6XX_RB_NC_MODE_CNTL, 2 << 1);
gpu_write(gpu, REG_A6XX_TPL1_NC_MODE_CNTL, 2 << 1);
gpu_write(gpu, REG_A6XX_SP_NC_MODE_CNTL, 2 << 1);
@@ -481,7 +508,7 @@ out:
static void a6xx_dump(struct msm_gpu *gpu)
{
- dev_info(&gpu->pdev->dev, "status: %08x\n",
+ DRM_DEV_INFO(&gpu->pdev->dev, "status: %08x\n",
gpu_read(gpu, REG_A6XX_RBBM_STATUS));
adreno_dump(gpu);
}
@@ -498,7 +525,7 @@ static void a6xx_recover(struct msm_gpu *gpu)
adreno_dump_info(gpu);
for (i = 0; i < 8; i++)
- dev_info(&gpu->pdev->dev, "CP_SCRATCH_REG%d: %u\n", i,
+ DRM_DEV_INFO(&gpu->pdev->dev, "CP_SCRATCH_REG%d: %u\n", i,
gpu_read(gpu, REG_A6XX_CP_SCRATCH_REG(i)));
if (hang_debug)
@@ -645,33 +672,6 @@ static const u32 a6xx_register_offsets[REG_ADRENO_REGISTER_MAX] = {
REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_CNTL, REG_A6XX_CP_RB_CNTL),
};
-static const u32 a6xx_registers[] = {
- 0x0000, 0x0002, 0x0010, 0x0010, 0x0012, 0x0012, 0x0018, 0x001b,
- 0x001e, 0x0032, 0x0038, 0x003c, 0x0042, 0x0042, 0x0044, 0x0044,
- 0x0047, 0x0047, 0x0056, 0x0056, 0x00ad, 0x00ae, 0x00b0, 0x00fb,
- 0x0100, 0x011d, 0x0200, 0x020d, 0x0210, 0x0213, 0x0218, 0x023d,
- 0x0400, 0x04f9, 0x0500, 0x0500, 0x0505, 0x050b, 0x050e, 0x0511,
- 0x0533, 0x0533, 0x0540, 0x0555, 0x0800, 0x0808, 0x0810, 0x0813,
- 0x0820, 0x0821, 0x0823, 0x0827, 0x0830, 0x0833, 0x0840, 0x0843,
- 0x084f, 0x086f, 0x0880, 0x088a, 0x08a0, 0x08ab, 0x08c0, 0x08c4,
- 0x08d0, 0x08dd, 0x08f0, 0x08f3, 0x0900, 0x0903, 0x0908, 0x0911,
- 0x0928, 0x093e, 0x0942, 0x094d, 0x0980, 0x0984, 0x098d, 0x0996,
- 0x0998, 0x099e, 0x09a0, 0x09a6, 0x09a8, 0x09ae, 0x09b0, 0x09b1,
- 0x09c2, 0x09c8, 0x0a00, 0x0a03, 0x0c00, 0x0c04, 0x0c06, 0x0c06,
- 0x0c10, 0x0cd9, 0x0e00, 0x0e0e, 0x0e10, 0x0e13, 0x0e17, 0x0e19,
- 0x0e1c, 0x0e2b, 0x0e30, 0x0e32, 0x0e38, 0x0e39, 0x8600, 0x8601,
- 0x8610, 0x861b, 0x8620, 0x8620, 0x8628, 0x862b, 0x8630, 0x8637,
- 0x8e01, 0x8e01, 0x8e04, 0x8e05, 0x8e07, 0x8e08, 0x8e0c, 0x8e0c,
- 0x8e10, 0x8e1c, 0x8e20, 0x8e25, 0x8e28, 0x8e28, 0x8e2c, 0x8e2f,
- 0x8e3b, 0x8e3e, 0x8e40, 0x8e43, 0x8e50, 0x8e5e, 0x8e70, 0x8e77,
- 0x9600, 0x9604, 0x9624, 0x9637, 0x9e00, 0x9e01, 0x9e03, 0x9e0e,
- 0x9e11, 0x9e16, 0x9e19, 0x9e19, 0x9e1c, 0x9e1c, 0x9e20, 0x9e23,
- 0x9e30, 0x9e31, 0x9e34, 0x9e34, 0x9e70, 0x9e72, 0x9e78, 0x9e79,
- 0x9e80, 0x9fff, 0xa600, 0xa601, 0xa603, 0xa603, 0xa60a, 0xa60a,
- 0xa610, 0xa617, 0xa630, 0xa630,
- ~0
-};
-
static int a6xx_pm_resume(struct msm_gpu *gpu)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
@@ -724,14 +724,6 @@ static int a6xx_get_timestamp(struct msm_gpu *gpu, uint64_t *value)
return 0;
}
-#if defined(CONFIG_DEBUG_FS) || defined(CONFIG_DEV_COREDUMP)
-static void a6xx_show(struct msm_gpu *gpu, struct msm_gpu_state *state,
- struct drm_printer *p)
-{
- adreno_show(gpu, state, p);
-}
-#endif
-
static struct msm_ringbuffer *a6xx_active_ring(struct msm_gpu *gpu)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
@@ -746,8 +738,7 @@ static void a6xx_destroy(struct msm_gpu *gpu)
struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
if (a6xx_gpu->sqe_bo) {
- if (a6xx_gpu->sqe_iova)
- msm_gem_put_iova(a6xx_gpu->sqe_bo, gpu->aspace);
+ msm_gem_unpin_iova(a6xx_gpu->sqe_bo, gpu->aspace);
drm_gem_object_put_unlocked(a6xx_gpu->sqe_bo);
}
@@ -796,6 +787,8 @@ static const struct adreno_gpu_funcs funcs = {
.gpu_busy = a6xx_gpu_busy,
.gpu_get_freq = a6xx_gmu_get_freq,
.gpu_set_freq = a6xx_gmu_set_freq,
+ .gpu_state_get = a6xx_gpu_state_get,
+ .gpu_state_put = a6xx_gpu_state_put,
},
.get_timestamp = a6xx_get_timestamp,
};
@@ -817,7 +810,7 @@ struct msm_gpu *a6xx_gpu_init(struct drm_device *dev)
adreno_gpu = &a6xx_gpu->base;
gpu = &adreno_gpu->base;
- adreno_gpu->registers = a6xx_registers;
+ adreno_gpu->registers = NULL;
adreno_gpu->reg_offsets = a6xx_register_offsets;
ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs, 1);
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.h b/drivers/gpu/drm/msm/adreno/a6xx_gpu.h
index 4127dcebc202..528a4cfe07cd 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.h
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.h
@@ -56,6 +56,14 @@ void a6xx_gmu_clear_oob(struct a6xx_gmu *gmu, enum a6xx_gmu_oob_state state);
int a6xx_gmu_probe(struct a6xx_gpu *a6xx_gpu, struct device_node *node);
void a6xx_gmu_remove(struct a6xx_gpu *a6xx_gpu);
+
void a6xx_gmu_set_freq(struct msm_gpu *gpu, unsigned long freq);
unsigned long a6xx_gmu_get_freq(struct msm_gpu *gpu);
+
+void a6xx_show(struct msm_gpu *gpu, struct msm_gpu_state *state,
+ struct drm_printer *p);
+
+struct msm_gpu_state *a6xx_gpu_state_get(struct msm_gpu *gpu);
+int a6xx_gpu_state_put(struct msm_gpu_state *state);
+
#endif /* __A6XX_GPU_H__ */
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c
new file mode 100644
index 000000000000..e686331fa089
--- /dev/null
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c
@@ -0,0 +1,1165 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2018 The Linux Foundation. All rights reserved. */
+
+#include <linux/ascii85.h>
+#include "msm_gem.h"
+#include "a6xx_gpu.h"
+#include "a6xx_gmu.h"
+#include "a6xx_gpu_state.h"
+#include "a6xx_gmu.xml.h"
+
+struct a6xx_gpu_state_obj {
+ const void *handle;
+ u32 *data;
+};
+
+struct a6xx_gpu_state {
+ struct msm_gpu_state base;
+
+ struct a6xx_gpu_state_obj *gmu_registers;
+ int nr_gmu_registers;
+
+ struct a6xx_gpu_state_obj *registers;
+ int nr_registers;
+
+ struct a6xx_gpu_state_obj *shaders;
+ int nr_shaders;
+
+ struct a6xx_gpu_state_obj *clusters;
+ int nr_clusters;
+
+ struct a6xx_gpu_state_obj *dbgahb_clusters;
+ int nr_dbgahb_clusters;
+
+ struct a6xx_gpu_state_obj *indexed_regs;
+ int nr_indexed_regs;
+
+ struct a6xx_gpu_state_obj *debugbus;
+ int nr_debugbus;
+
+ struct a6xx_gpu_state_obj *vbif_debugbus;
+
+ struct a6xx_gpu_state_obj *cx_debugbus;
+ int nr_cx_debugbus;
+
+ struct list_head objs;
+};
+
+static inline int CRASHDUMP_WRITE(u64 *in, u32 reg, u32 val)
+{
+ in[0] = val;
+ in[1] = (((u64) reg) << 44 | (1 << 21) | 1);
+
+ return 2;
+}
+
+static inline int CRASHDUMP_READ(u64 *in, u32 reg, u32 dwords, u64 target)
+{
+ in[0] = target;
+ in[1] = (((u64) reg) << 44 | dwords);
+
+ return 2;
+}
+
+static inline int CRASHDUMP_FINI(u64 *in)
+{
+ in[0] = 0;
+ in[1] = 0;
+
+ return 2;
+}
+
+struct a6xx_crashdumper {
+ void *ptr;
+ struct drm_gem_object *bo;
+ u64 iova;
+};
+
+struct a6xx_state_memobj {
+ struct list_head node;
+ unsigned long long data[];
+};
+
+void *state_kcalloc(struct a6xx_gpu_state *a6xx_state, int nr, size_t objsize)
+{
+ struct a6xx_state_memobj *obj =
+ kzalloc((nr * objsize) + sizeof(*obj), GFP_KERNEL);
+
+ if (!obj)
+ return NULL;
+
+ list_add_tail(&obj->node, &a6xx_state->objs);
+ return &obj->data;
+}
+
+void *state_kmemdup(struct a6xx_gpu_state *a6xx_state, void *src,
+ size_t size)
+{
+ void *dst = state_kcalloc(a6xx_state, 1, size);
+
+ if (dst)
+ memcpy(dst, src, size);
+ return dst;
+}
+
+/*
+ * Allocate 1MB for the crashdumper scratch region - 8k for the script and
+ * the rest for the data
+ */
+#define A6XX_CD_DATA_OFFSET 8192
+#define A6XX_CD_DATA_SIZE (SZ_1M - 8192)
+
+static int a6xx_crashdumper_init(struct msm_gpu *gpu,
+ struct a6xx_crashdumper *dumper)
+{
+ dumper->ptr = msm_gem_kernel_new_locked(gpu->dev,
+ SZ_1M, MSM_BO_UNCACHED, gpu->aspace,
+ &dumper->bo, &dumper->iova);
+
+ if (!IS_ERR(dumper->ptr))
+ msm_gem_object_set_name(dumper->bo, "crashdump");
+
+ return PTR_ERR_OR_ZERO(dumper->ptr);
+}
+
+static int a6xx_crashdumper_run(struct msm_gpu *gpu,
+ struct a6xx_crashdumper *dumper)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
+ u32 val;
+ int ret;
+
+ if (IS_ERR_OR_NULL(dumper->ptr))
+ return -EINVAL;
+
+ if (!a6xx_gmu_sptprac_is_on(&a6xx_gpu->gmu))
+ return -EINVAL;
+
+ /* Make sure all pending memory writes are posted */
+ wmb();
+
+ gpu_write64(gpu, REG_A6XX_CP_CRASH_SCRIPT_BASE_LO,
+ REG_A6XX_CP_CRASH_SCRIPT_BASE_HI, dumper->iova);
+
+ gpu_write(gpu, REG_A6XX_CP_CRASH_DUMP_CNTL, 1);
+
+ ret = gpu_poll_timeout(gpu, REG_A6XX_CP_CRASH_DUMP_STATUS, val,
+ val & 0x02, 100, 10000);
+
+ gpu_write(gpu, REG_A6XX_CP_CRASH_DUMP_CNTL, 0);
+
+ return ret;
+}
+
+/* read a value from the GX debug bus */
+static int debugbus_read(struct msm_gpu *gpu, u32 block, u32 offset,
+ u32 *data)
+{
+ u32 reg = A6XX_DBGC_CFG_DBGBUS_SEL_D_PING_INDEX(offset) |
+ A6XX_DBGC_CFG_DBGBUS_SEL_D_PING_BLK_SEL(block);
+
+ gpu_write(gpu, REG_A6XX_DBGC_CFG_DBGBUS_SEL_A, reg);
+ gpu_write(gpu, REG_A6XX_DBGC_CFG_DBGBUS_SEL_B, reg);
+ gpu_write(gpu, REG_A6XX_DBGC_CFG_DBGBUS_SEL_C, reg);
+ gpu_write(gpu, REG_A6XX_DBGC_CFG_DBGBUS_SEL_D, reg);
+
+ /* Wait 1 us to make sure the data is flowing */
+ udelay(1);
+
+ data[0] = gpu_read(gpu, REG_A6XX_DBGC_CFG_DBGBUS_TRACE_BUF2);
+ data[1] = gpu_read(gpu, REG_A6XX_DBGC_CFG_DBGBUS_TRACE_BUF1);
+
+ return 2;
+}
+
+#define cxdbg_write(ptr, offset, val) \
+ msm_writel((val), (ptr) + ((offset) << 2))
+
+#define cxdbg_read(ptr, offset) \
+ msm_readl((ptr) + ((offset) << 2))
+
+/* read a value from the CX debug bus */
+static int cx_debugbus_read(void *__iomem cxdbg, u32 block, u32 offset,
+ u32 *data)
+{
+ u32 reg = A6XX_CX_DBGC_CFG_DBGBUS_SEL_A_PING_INDEX(offset) |
+ A6XX_CX_DBGC_CFG_DBGBUS_SEL_A_PING_BLK_SEL(block);
+
+ cxdbg_write(cxdbg, REG_A6XX_CX_DBGC_CFG_DBGBUS_SEL_A, reg);
+ cxdbg_write(cxdbg, REG_A6XX_CX_DBGC_CFG_DBGBUS_SEL_B, reg);
+ cxdbg_write(cxdbg, REG_A6XX_CX_DBGC_CFG_DBGBUS_SEL_C, reg);
+ cxdbg_write(cxdbg, REG_A6XX_CX_DBGC_CFG_DBGBUS_SEL_D, reg);
+
+ /* Wait 1 us to make sure the data is flowing */
+ udelay(1);
+
+ data[0] = cxdbg_read(cxdbg, REG_A6XX_CX_DBGC_CFG_DBGBUS_TRACE_BUF2);
+ data[1] = cxdbg_read(cxdbg, REG_A6XX_CX_DBGC_CFG_DBGBUS_TRACE_BUF1);
+
+ return 2;
+}
+
+/* Read a chunk of data from the VBIF debug bus */
+static int vbif_debugbus_read(struct msm_gpu *gpu, u32 ctrl0, u32 ctrl1,
+ u32 reg, int count, u32 *data)
+{
+ int i;
+
+ gpu_write(gpu, ctrl0, reg);
+
+ for (i = 0; i < count; i++) {
+ gpu_write(gpu, ctrl1, i);
+ data[i] = gpu_read(gpu, REG_A6XX_VBIF_TEST_BUS_OUT);
+ }
+
+ return count;
+}
+
+#define AXI_ARB_BLOCKS 2
+#define XIN_AXI_BLOCKS 5
+#define XIN_CORE_BLOCKS 4
+
+#define VBIF_DEBUGBUS_BLOCK_SIZE \
+ ((16 * AXI_ARB_BLOCKS) + \
+ (18 * XIN_AXI_BLOCKS) + \
+ (12 * XIN_CORE_BLOCKS))
+
+static void a6xx_get_vbif_debugbus_block(struct msm_gpu *gpu,
+ struct a6xx_gpu_state *a6xx_state,
+ struct a6xx_gpu_state_obj *obj)
+{
+ u32 clk, *ptr;
+ int i;
+
+ obj->data = state_kcalloc(a6xx_state, VBIF_DEBUGBUS_BLOCK_SIZE,
+ sizeof(u32));
+ if (!obj->data)
+ return;
+
+ obj->handle = NULL;
+
+ /* Get the current clock setting */
+ clk = gpu_read(gpu, REG_A6XX_VBIF_CLKON);
+
+ /* Force on the bus so we can read it */
+ gpu_write(gpu, REG_A6XX_VBIF_CLKON,
+ clk | A6XX_VBIF_CLKON_FORCE_ON_TESTBUS);
+
+ /* We will read from BUS2 first, so disable BUS1 */
+ gpu_write(gpu, REG_A6XX_VBIF_TEST_BUS1_CTRL0, 0);
+
+ /* Enable the VBIF bus for reading */
+ gpu_write(gpu, REG_A6XX_VBIF_TEST_BUS_OUT_CTRL, 1);
+
+ ptr = obj->data;
+
+ for (i = 0; i < AXI_ARB_BLOCKS; i++)
+ ptr += vbif_debugbus_read(gpu,
+ REG_A6XX_VBIF_TEST_BUS2_CTRL0,
+ REG_A6XX_VBIF_TEST_BUS2_CTRL1,
+ 1 << (i + 16), 16, ptr);
+
+ for (i = 0; i < XIN_AXI_BLOCKS; i++)
+ ptr += vbif_debugbus_read(gpu,
+ REG_A6XX_VBIF_TEST_BUS2_CTRL0,
+ REG_A6XX_VBIF_TEST_BUS2_CTRL1,
+ 1 << i, 18, ptr);
+
+ /* Stop BUS2 so we can turn on BUS1 */
+ gpu_write(gpu, REG_A6XX_VBIF_TEST_BUS2_CTRL0, 0);
+
+ for (i = 0; i < XIN_CORE_BLOCKS; i++)
+ ptr += vbif_debugbus_read(gpu,
+ REG_A6XX_VBIF_TEST_BUS1_CTRL0,
+ REG_A6XX_VBIF_TEST_BUS1_CTRL1,
+ 1 << i, 12, ptr);
+
+ /* Restore the VBIF clock setting */
+ gpu_write(gpu, REG_A6XX_VBIF_CLKON, clk);
+}
+
+static void a6xx_get_debugbus_block(struct msm_gpu *gpu,
+ struct a6xx_gpu_state *a6xx_state,
+ const struct a6xx_debugbus_block *block,
+ struct a6xx_gpu_state_obj *obj)
+{
+ int i;
+ u32 *ptr;
+
+ obj->data = state_kcalloc(a6xx_state, block->count, sizeof(u64));
+ if (!obj->data)
+ return;
+
+ obj->handle = block;
+
+ for (ptr = obj->data, i = 0; i < block->count; i++)
+ ptr += debugbus_read(gpu, block->id, i, ptr);
+}
+
+static void a6xx_get_cx_debugbus_block(void __iomem *cxdbg,
+ struct a6xx_gpu_state *a6xx_state,
+ const struct a6xx_debugbus_block *block,
+ struct a6xx_gpu_state_obj *obj)
+{
+ int i;
+ u32 *ptr;
+
+ obj->data = state_kcalloc(a6xx_state, block->count, sizeof(u64));
+ if (!obj->data)
+ return;
+
+ obj->handle = block;
+
+ for (ptr = obj->data, i = 0; i < block->count; i++)
+ ptr += cx_debugbus_read(cxdbg, block->id, i, ptr);
+}
+
+static void a6xx_get_debugbus(struct msm_gpu *gpu,
+ struct a6xx_gpu_state *a6xx_state)
+{
+ struct resource *res;
+ void __iomem *cxdbg = NULL;
+
+ /* Set up the GX debug bus */
+
+ gpu_write(gpu, REG_A6XX_DBGC_CFG_DBGBUS_CNTLT,
+ A6XX_DBGC_CFG_DBGBUS_CNTLT_SEGT(0xf));
+
+ gpu_write(gpu, REG_A6XX_DBGC_CFG_DBGBUS_CNTLM,
+ A6XX_DBGC_CFG_DBGBUS_CNTLM_ENABLE(0xf));
+
+ gpu_write(gpu, REG_A6XX_DBGC_CFG_DBGBUS_IVTL_0, 0);
+ gpu_write(gpu, REG_A6XX_DBGC_CFG_DBGBUS_IVTL_1, 0);
+ gpu_write(gpu, REG_A6XX_DBGC_CFG_DBGBUS_IVTL_2, 0);
+ gpu_write(gpu, REG_A6XX_DBGC_CFG_DBGBUS_IVTL_3, 0);
+
+ gpu_write(gpu, REG_A6XX_DBGC_CFG_DBGBUS_BYTEL_0, 0x76543210);
+ gpu_write(gpu, REG_A6XX_DBGC_CFG_DBGBUS_BYTEL_1, 0xFEDCBA98);
+
+ gpu_write(gpu, REG_A6XX_DBGC_CFG_DBGBUS_MASKL_0, 0);
+ gpu_write(gpu, REG_A6XX_DBGC_CFG_DBGBUS_MASKL_1, 0);
+ gpu_write(gpu, REG_A6XX_DBGC_CFG_DBGBUS_MASKL_2, 0);
+ gpu_write(gpu, REG_A6XX_DBGC_CFG_DBGBUS_MASKL_3, 0);
+
+ /* Set up the CX debug bus - it lives elsewhere in the system so do a
+ * temporary ioremap for the registers
+ */
+ res = platform_get_resource_byname(gpu->pdev, IORESOURCE_MEM,
+ "cx_dbgc");
+
+ if (res)
+ cxdbg = ioremap(res->start, resource_size(res));
+
+ if (cxdbg) {
+ cxdbg_write(cxdbg, REG_A6XX_DBGC_CFG_DBGBUS_CNTLT,
+ A6XX_DBGC_CFG_DBGBUS_CNTLT_SEGT(0xf));
+
+ cxdbg_write(cxdbg, REG_A6XX_DBGC_CFG_DBGBUS_CNTLM,
+ A6XX_DBGC_CFG_DBGBUS_CNTLM_ENABLE(0xf));
+
+ cxdbg_write(cxdbg, REG_A6XX_DBGC_CFG_DBGBUS_IVTL_0, 0);
+ cxdbg_write(cxdbg, REG_A6XX_DBGC_CFG_DBGBUS_IVTL_1, 0);
+ cxdbg_write(cxdbg, REG_A6XX_DBGC_CFG_DBGBUS_IVTL_2, 0);
+ cxdbg_write(cxdbg, REG_A6XX_DBGC_CFG_DBGBUS_IVTL_3, 0);
+
+ cxdbg_write(cxdbg, REG_A6XX_DBGC_CFG_DBGBUS_BYTEL_0,
+ 0x76543210);
+ cxdbg_write(cxdbg, REG_A6XX_DBGC_CFG_DBGBUS_BYTEL_1,
+ 0xFEDCBA98);
+
+ cxdbg_write(cxdbg, REG_A6XX_DBGC_CFG_DBGBUS_MASKL_0, 0);
+ cxdbg_write(cxdbg, REG_A6XX_DBGC_CFG_DBGBUS_MASKL_1, 0);
+ cxdbg_write(cxdbg, REG_A6XX_DBGC_CFG_DBGBUS_MASKL_2, 0);
+ cxdbg_write(cxdbg, REG_A6XX_DBGC_CFG_DBGBUS_MASKL_3, 0);
+ }
+
+ a6xx_state->debugbus = state_kcalloc(a6xx_state,
+ ARRAY_SIZE(a6xx_debugbus_blocks),
+ sizeof(*a6xx_state->debugbus));
+
+ if (a6xx_state->debugbus) {
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(a6xx_debugbus_blocks); i++)
+ a6xx_get_debugbus_block(gpu,
+ a6xx_state,
+ &a6xx_debugbus_blocks[i],
+ &a6xx_state->debugbus[i]);
+
+ a6xx_state->nr_debugbus = ARRAY_SIZE(a6xx_debugbus_blocks);
+ }
+
+ a6xx_state->vbif_debugbus =
+ state_kcalloc(a6xx_state, 1,
+ sizeof(*a6xx_state->vbif_debugbus));
+
+ if (a6xx_state->vbif_debugbus)
+ a6xx_get_vbif_debugbus_block(gpu, a6xx_state,
+ a6xx_state->vbif_debugbus);
+
+ if (cxdbg) {
+ a6xx_state->cx_debugbus =
+ state_kcalloc(a6xx_state,
+ ARRAY_SIZE(a6xx_cx_debugbus_blocks),
+ sizeof(*a6xx_state->cx_debugbus));
+
+ if (a6xx_state->cx_debugbus) {
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(a6xx_cx_debugbus_blocks); i++)
+ a6xx_get_cx_debugbus_block(cxdbg,
+ a6xx_state,
+ &a6xx_cx_debugbus_blocks[i],
+ &a6xx_state->cx_debugbus[i]);
+
+ a6xx_state->nr_cx_debugbus =
+ ARRAY_SIZE(a6xx_cx_debugbus_blocks);
+ }
+
+ iounmap(cxdbg);
+ }
+}
+
+#define RANGE(reg, a) ((reg)[(a) + 1] - (reg)[(a)] + 1)
+
+/* Read a data cluster from behind the AHB aperture */
+static void a6xx_get_dbgahb_cluster(struct msm_gpu *gpu,
+ struct a6xx_gpu_state *a6xx_state,
+ const struct a6xx_dbgahb_cluster *dbgahb,
+ struct a6xx_gpu_state_obj *obj,
+ struct a6xx_crashdumper *dumper)
+{
+ u64 *in = dumper->ptr;
+ u64 out = dumper->iova + A6XX_CD_DATA_OFFSET;
+ size_t datasize;
+ int i, regcount = 0;
+
+ for (i = 0; i < A6XX_NUM_CONTEXTS; i++) {
+ int j;
+
+ in += CRASHDUMP_WRITE(in, REG_A6XX_HLSQ_DBG_READ_SEL,
+ (dbgahb->statetype + i * 2) << 8);
+
+ for (j = 0; j < dbgahb->count; j += 2) {
+ int count = RANGE(dbgahb->registers, j);
+ u32 offset = REG_A6XX_HLSQ_DBG_AHB_READ_APERTURE +
+ dbgahb->registers[j] - (dbgahb->base >> 2);
+
+ in += CRASHDUMP_READ(in, offset, count, out);
+
+ out += count * sizeof(u32);
+
+ if (i == 0)
+ regcount += count;
+ }
+ }
+
+ CRASHDUMP_FINI(in);
+
+ datasize = regcount * A6XX_NUM_CONTEXTS * sizeof(u32);
+
+ if (WARN_ON(datasize > A6XX_CD_DATA_SIZE))
+ return;
+
+ if (a6xx_crashdumper_run(gpu, dumper))
+ return;
+
+ obj->handle = dbgahb;
+ obj->data = state_kmemdup(a6xx_state, dumper->ptr + A6XX_CD_DATA_OFFSET,
+ datasize);
+}
+
+static void a6xx_get_dbgahb_clusters(struct msm_gpu *gpu,
+ struct a6xx_gpu_state *a6xx_state,
+ struct a6xx_crashdumper *dumper)
+{
+ int i;
+
+ a6xx_state->dbgahb_clusters = state_kcalloc(a6xx_state,
+ ARRAY_SIZE(a6xx_dbgahb_clusters),
+ sizeof(*a6xx_state->dbgahb_clusters));
+
+ if (!a6xx_state->dbgahb_clusters)
+ return;
+
+ a6xx_state->nr_dbgahb_clusters = ARRAY_SIZE(a6xx_dbgahb_clusters);
+
+ for (i = 0; i < ARRAY_SIZE(a6xx_dbgahb_clusters); i++)
+ a6xx_get_dbgahb_cluster(gpu, a6xx_state,
+ &a6xx_dbgahb_clusters[i],
+ &a6xx_state->dbgahb_clusters[i], dumper);
+}
+
+/* Read a data cluster from the CP aperture with the crashdumper */
+static void a6xx_get_cluster(struct msm_gpu *gpu,
+ struct a6xx_gpu_state *a6xx_state,
+ const struct a6xx_cluster *cluster,
+ struct a6xx_gpu_state_obj *obj,
+ struct a6xx_crashdumper *dumper)
+{
+ u64 *in = dumper->ptr;
+ u64 out = dumper->iova + A6XX_CD_DATA_OFFSET;
+ size_t datasize;
+ int i, regcount = 0;
+
+ /* Some clusters need a selector register to be programmed too */
+ if (cluster->sel_reg)
+ in += CRASHDUMP_WRITE(in, cluster->sel_reg, cluster->sel_val);
+
+ for (i = 0; i < A6XX_NUM_CONTEXTS; i++) {
+ int j;
+
+ in += CRASHDUMP_WRITE(in, REG_A6XX_CP_APERTURE_CNTL_CD,
+ (cluster->id << 8) | (i << 4) | i);
+
+ for (j = 0; j < cluster->count; j += 2) {
+ int count = RANGE(cluster->registers, j);
+
+ in += CRASHDUMP_READ(in, cluster->registers[j],
+ count, out);
+
+ out += count * sizeof(u32);
+
+ if (i == 0)
+ regcount += count;
+ }
+ }
+
+ CRASHDUMP_FINI(in);
+
+ datasize = regcount * A6XX_NUM_CONTEXTS * sizeof(u32);
+
+ if (WARN_ON(datasize > A6XX_CD_DATA_SIZE))
+ return;
+
+ if (a6xx_crashdumper_run(gpu, dumper))
+ return;
+
+ obj->handle = cluster;
+ obj->data = state_kmemdup(a6xx_state, dumper->ptr + A6XX_CD_DATA_OFFSET,
+ datasize);
+}
+
+static void a6xx_get_clusters(struct msm_gpu *gpu,
+ struct a6xx_gpu_state *a6xx_state,
+ struct a6xx_crashdumper *dumper)
+{
+ int i;
+
+ a6xx_state->clusters = state_kcalloc(a6xx_state,
+ ARRAY_SIZE(a6xx_clusters), sizeof(*a6xx_state->clusters));
+
+ if (!a6xx_state->clusters)
+ return;
+
+ a6xx_state->nr_clusters = ARRAY_SIZE(a6xx_clusters);
+
+ for (i = 0; i < ARRAY_SIZE(a6xx_clusters); i++)
+ a6xx_get_cluster(gpu, a6xx_state, &a6xx_clusters[i],
+ &a6xx_state->clusters[i], dumper);
+}
+
+/* Read a shader / debug block from the HLSQ aperture with the crashdumper */
+static void a6xx_get_shader_block(struct msm_gpu *gpu,
+ struct a6xx_gpu_state *a6xx_state,
+ const struct a6xx_shader_block *block,
+ struct a6xx_gpu_state_obj *obj,
+ struct a6xx_crashdumper *dumper)
+{
+ u64 *in = dumper->ptr;
+ size_t datasize = block->size * A6XX_NUM_SHADER_BANKS * sizeof(u32);
+ int i;
+
+ if (WARN_ON(datasize > A6XX_CD_DATA_SIZE))
+ return;
+
+ for (i = 0; i < A6XX_NUM_SHADER_BANKS; i++) {
+ in += CRASHDUMP_WRITE(in, REG_A6XX_HLSQ_DBG_READ_SEL,
+ (block->type << 8) | i);
+
+ in += CRASHDUMP_READ(in, REG_A6XX_HLSQ_DBG_AHB_READ_APERTURE,
+ block->size, dumper->iova + A6XX_CD_DATA_OFFSET);
+ }
+
+ CRASHDUMP_FINI(in);
+
+ if (a6xx_crashdumper_run(gpu, dumper))
+ return;
+
+ obj->handle = block;
+ obj->data = state_kmemdup(a6xx_state, dumper->ptr + A6XX_CD_DATA_OFFSET,
+ datasize);
+}
+
+static void a6xx_get_shaders(struct msm_gpu *gpu,
+ struct a6xx_gpu_state *a6xx_state,
+ struct a6xx_crashdumper *dumper)
+{
+ int i;
+
+ a6xx_state->shaders = state_kcalloc(a6xx_state,
+ ARRAY_SIZE(a6xx_shader_blocks), sizeof(*a6xx_state->shaders));
+
+ if (!a6xx_state->shaders)
+ return;
+
+ a6xx_state->nr_shaders = ARRAY_SIZE(a6xx_shader_blocks);
+
+ for (i = 0; i < ARRAY_SIZE(a6xx_shader_blocks); i++)
+ a6xx_get_shader_block(gpu, a6xx_state, &a6xx_shader_blocks[i],
+ &a6xx_state->shaders[i], dumper);
+}
+
+/* Read registers from behind the HLSQ aperture with the crashdumper */
+static void a6xx_get_crashdumper_hlsq_registers(struct msm_gpu *gpu,
+ struct a6xx_gpu_state *a6xx_state,
+ const struct a6xx_registers *regs,
+ struct a6xx_gpu_state_obj *obj,
+ struct a6xx_crashdumper *dumper)
+
+{
+ u64 *in = dumper->ptr;
+ u64 out = dumper->iova + A6XX_CD_DATA_OFFSET;
+ int i, regcount = 0;
+
+ in += CRASHDUMP_WRITE(in, REG_A6XX_HLSQ_DBG_READ_SEL, regs->val1);
+
+ for (i = 0; i < regs->count; i += 2) {
+ u32 count = RANGE(regs->registers, i);
+ u32 offset = REG_A6XX_HLSQ_DBG_AHB_READ_APERTURE +
+ regs->registers[i] - (regs->val0 >> 2);
+
+ in += CRASHDUMP_READ(in, offset, count, out);
+
+ out += count * sizeof(u32);
+ regcount += count;
+ }
+
+ CRASHDUMP_FINI(in);
+
+ if (WARN_ON((regcount * sizeof(u32)) > A6XX_CD_DATA_SIZE))
+ return;
+
+ if (a6xx_crashdumper_run(gpu, dumper))
+ return;
+
+ obj->handle = regs;
+ obj->data = state_kmemdup(a6xx_state, dumper->ptr + A6XX_CD_DATA_OFFSET,
+ regcount * sizeof(u32));
+}
+
+/* Read a block of registers using the crashdumper */
+static void a6xx_get_crashdumper_registers(struct msm_gpu *gpu,
+ struct a6xx_gpu_state *a6xx_state,
+ const struct a6xx_registers *regs,
+ struct a6xx_gpu_state_obj *obj,
+ struct a6xx_crashdumper *dumper)
+
+{
+ u64 *in = dumper->ptr;
+ u64 out = dumper->iova + A6XX_CD_DATA_OFFSET;
+ int i, regcount = 0;
+
+ /* Some blocks might need to program a selector register first */
+ if (regs->val0)
+ in += CRASHDUMP_WRITE(in, regs->val0, regs->val1);
+
+ for (i = 0; i < regs->count; i += 2) {
+ u32 count = RANGE(regs->registers, i);
+
+ in += CRASHDUMP_READ(in, regs->registers[i], count, out);
+
+ out += count * sizeof(u32);
+ regcount += count;
+ }
+
+ CRASHDUMP_FINI(in);
+
+ if (WARN_ON((regcount * sizeof(u32)) > A6XX_CD_DATA_SIZE))
+ return;
+
+ if (a6xx_crashdumper_run(gpu, dumper))
+ return;
+
+ obj->handle = regs;
+ obj->data = state_kmemdup(a6xx_state, dumper->ptr + A6XX_CD_DATA_OFFSET,
+ regcount * sizeof(u32));
+}
+
+/* Read a block of registers via AHB */
+static void a6xx_get_ahb_gpu_registers(struct msm_gpu *gpu,
+ struct a6xx_gpu_state *a6xx_state,
+ const struct a6xx_registers *regs,
+ struct a6xx_gpu_state_obj *obj)
+{
+ int i, regcount = 0, index = 0;
+
+ for (i = 0; i < regs->count; i += 2)
+ regcount += RANGE(regs->registers, i);
+
+ obj->handle = (const void *) regs;
+ obj->data = state_kcalloc(a6xx_state, regcount, sizeof(u32));
+ if (!obj->data)
+ return;
+
+ for (i = 0; i < regs->count; i += 2) {
+ u32 count = RANGE(regs->registers, i);
+ int j;
+
+ for (j = 0; j < count; j++)
+ obj->data[index++] = gpu_read(gpu,
+ regs->registers[i] + j);
+ }
+}
+
+/* Read a block of GMU registers */
+static void _a6xx_get_gmu_registers(struct msm_gpu *gpu,
+ struct a6xx_gpu_state *a6xx_state,
+ const struct a6xx_registers *regs,
+ struct a6xx_gpu_state_obj *obj)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
+ struct a6xx_gmu *gmu = &a6xx_gpu->gmu;
+ int i, regcount = 0, index = 0;
+
+ for (i = 0; i < regs->count; i += 2)
+ regcount += RANGE(regs->registers, i);
+
+ obj->handle = (const void *) regs;
+ obj->data = state_kcalloc(a6xx_state, regcount, sizeof(u32));
+ if (!obj->data)
+ return;
+
+ for (i = 0; i < regs->count; i += 2) {
+ u32 count = RANGE(regs->registers, i);
+ int j;
+
+ for (j = 0; j < count; j++)
+ obj->data[index++] = gmu_read(gmu,
+ regs->registers[i] + j);
+ }
+}
+
+static void a6xx_get_gmu_registers(struct msm_gpu *gpu,
+ struct a6xx_gpu_state *a6xx_state)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
+
+ a6xx_state->gmu_registers = state_kcalloc(a6xx_state,
+ 2, sizeof(*a6xx_state->gmu_registers));
+
+ if (!a6xx_state->gmu_registers)
+ return;
+
+ a6xx_state->nr_gmu_registers = 2;
+
+ /* Get the CX GMU registers from AHB */
+ _a6xx_get_gmu_registers(gpu, a6xx_state, &a6xx_gmu_reglist[0],
+ &a6xx_state->gmu_registers[0]);
+
+ if (!a6xx_gmu_gx_is_on(&a6xx_gpu->gmu))
+ return;
+
+ /* Set the fence to ALLOW mode so we can access the registers */
+ gpu_write(gpu, REG_A6XX_GMU_AO_AHB_FENCE_CTRL, 0);
+
+ _a6xx_get_gmu_registers(gpu, a6xx_state, &a6xx_gmu_reglist[1],
+ &a6xx_state->gmu_registers[1]);
+}
+
+static void a6xx_get_registers(struct msm_gpu *gpu,
+ struct a6xx_gpu_state *a6xx_state,
+ struct a6xx_crashdumper *dumper)
+{
+ int i, count = ARRAY_SIZE(a6xx_ahb_reglist) +
+ ARRAY_SIZE(a6xx_reglist) +
+ ARRAY_SIZE(a6xx_hlsq_reglist);
+ int index = 0;
+
+ a6xx_state->registers = state_kcalloc(a6xx_state,
+ count, sizeof(*a6xx_state->registers));
+
+ if (!a6xx_state->registers)
+ return;
+
+ a6xx_state->nr_registers = count;
+
+ for (i = 0; i < ARRAY_SIZE(a6xx_ahb_reglist); i++)
+ a6xx_get_ahb_gpu_registers(gpu,
+ a6xx_state, &a6xx_ahb_reglist[i],
+ &a6xx_state->registers[index++]);
+
+ for (i = 0; i < ARRAY_SIZE(a6xx_reglist); i++)
+ a6xx_get_crashdumper_registers(gpu,
+ a6xx_state, &a6xx_reglist[i],
+ &a6xx_state->registers[index++],
+ dumper);
+
+ for (i = 0; i < ARRAY_SIZE(a6xx_hlsq_reglist); i++)
+ a6xx_get_crashdumper_hlsq_registers(gpu,
+ a6xx_state, &a6xx_hlsq_reglist[i],
+ &a6xx_state->registers[index++],
+ dumper);
+}
+
+/* Read a block of data from an indexed register pair */
+static void a6xx_get_indexed_regs(struct msm_gpu *gpu,
+ struct a6xx_gpu_state *a6xx_state,
+ const struct a6xx_indexed_registers *indexed,
+ struct a6xx_gpu_state_obj *obj)
+{
+ int i;
+
+ obj->handle = (const void *) indexed;
+ obj->data = state_kcalloc(a6xx_state, indexed->count, sizeof(u32));
+ if (!obj->data)
+ return;
+
+ /* All the indexed banks start at address 0 */
+ gpu_write(gpu, indexed->addr, 0);
+
+ /* Read the data - each read increments the internal address by 1 */
+ for (i = 0; i < indexed->count; i++)
+ obj->data[i] = gpu_read(gpu, indexed->data);
+}
+
+static void a6xx_get_indexed_registers(struct msm_gpu *gpu,
+ struct a6xx_gpu_state *a6xx_state)
+{
+ u32 mempool_size;
+ int count = ARRAY_SIZE(a6xx_indexed_reglist) + 1;
+ int i;
+
+ a6xx_state->indexed_regs = state_kcalloc(a6xx_state, count,
+ sizeof(a6xx_state->indexed_regs));
+ if (!a6xx_state->indexed_regs)
+ return;
+
+ for (i = 0; i < ARRAY_SIZE(a6xx_indexed_reglist); i++)
+ a6xx_get_indexed_regs(gpu, a6xx_state, &a6xx_indexed_reglist[i],
+ &a6xx_state->indexed_regs[i]);
+
+ /* Set the CP mempool size to 0 to stabilize it while dumping */
+ mempool_size = gpu_read(gpu, REG_A6XX_CP_MEM_POOL_SIZE);
+ gpu_write(gpu, REG_A6XX_CP_MEM_POOL_SIZE, 0);
+
+ /* Get the contents of the CP mempool */
+ a6xx_get_indexed_regs(gpu, a6xx_state, &a6xx_cp_mempool_indexed,
+ &a6xx_state->indexed_regs[i]);
+
+ /*
+ * Offset 0x2000 in the mempool is the size - copy the saved size over
+ * so the data is consistent
+ */
+ a6xx_state->indexed_regs[i].data[0x2000] = mempool_size;
+
+ /* Restore the size in the hardware */
+ gpu_write(gpu, REG_A6XX_CP_MEM_POOL_SIZE, mempool_size);
+
+ a6xx_state->nr_indexed_regs = count;
+}
+
+struct msm_gpu_state *a6xx_gpu_state_get(struct msm_gpu *gpu)
+{
+ struct a6xx_crashdumper dumper = { 0 };
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
+ struct a6xx_gpu_state *a6xx_state = kzalloc(sizeof(*a6xx_state),
+ GFP_KERNEL);
+
+ if (!a6xx_state)
+ return ERR_PTR(-ENOMEM);
+
+ INIT_LIST_HEAD(&a6xx_state->objs);
+
+ /* Get the generic state from the adreno core */
+ adreno_gpu_state_get(gpu, &a6xx_state->base);
+
+ a6xx_get_gmu_registers(gpu, a6xx_state);
+
+ /* If GX isn't on the rest of the data isn't going to be accessible */
+ if (!a6xx_gmu_gx_is_on(&a6xx_gpu->gmu))
+ return &a6xx_state->base;
+
+ /* Get the banks of indexed registers */
+ a6xx_get_indexed_registers(gpu, a6xx_state);
+
+ /* Try to initialize the crashdumper */
+ if (!a6xx_crashdumper_init(gpu, &dumper)) {
+ a6xx_get_registers(gpu, a6xx_state, &dumper);
+ a6xx_get_shaders(gpu, a6xx_state, &dumper);
+ a6xx_get_clusters(gpu, a6xx_state, &dumper);
+ a6xx_get_dbgahb_clusters(gpu, a6xx_state, &dumper);
+
+ msm_gem_kernel_put(dumper.bo, gpu->aspace, true);
+ }
+
+ a6xx_get_debugbus(gpu, a6xx_state);
+
+ return &a6xx_state->base;
+}
+
+void a6xx_gpu_state_destroy(struct kref *kref)
+{
+ struct a6xx_state_memobj *obj, *tmp;
+ struct msm_gpu_state *state = container_of(kref,
+ struct msm_gpu_state, ref);
+ struct a6xx_gpu_state *a6xx_state = container_of(state,
+ struct a6xx_gpu_state, base);
+
+ list_for_each_entry_safe(obj, tmp, &a6xx_state->objs, node)
+ kfree(obj);
+
+ adreno_gpu_state_destroy(state);
+ kfree(a6xx_state);
+}
+
+int a6xx_gpu_state_put(struct msm_gpu_state *state)
+{
+ if (IS_ERR_OR_NULL(state))
+ return 1;
+
+ return kref_put(&state->ref, a6xx_gpu_state_destroy);
+}
+
+static void a6xx_show_registers(const u32 *registers, u32 *data, size_t count,
+ struct drm_printer *p)
+{
+ int i, index = 0;
+
+ if (!data)
+ return;
+
+ for (i = 0; i < count; i += 2) {
+ u32 count = RANGE(registers, i);
+ u32 offset = registers[i];
+ int j;
+
+ for (j = 0; j < count; index++, offset++, j++) {
+ if (data[index] == 0xdeafbead)
+ continue;
+
+ drm_printf(p, " - { offset: 0x%06x, value: 0x%08x }\n",
+ offset << 2, data[index]);
+ }
+ }
+}
+
+static void print_ascii85(struct drm_printer *p, size_t len, u32 *data)
+{
+ char out[ASCII85_BUFSZ];
+ long i, l, datalen = 0;
+
+ for (i = 0; i < len >> 2; i++) {
+ if (data[i])
+ datalen = (i + 1) << 2;
+ }
+
+ if (datalen == 0)
+ return;
+
+ drm_puts(p, " data: !!ascii85 |\n");
+ drm_puts(p, " ");
+
+
+ l = ascii85_encode_len(datalen);
+
+ for (i = 0; i < l; i++)
+ drm_puts(p, ascii85_encode(data[i], out));
+
+ drm_puts(p, "\n");
+}
+
+static void print_name(struct drm_printer *p, const char *fmt, const char *name)
+{
+ drm_puts(p, fmt);
+ drm_puts(p, name);
+ drm_puts(p, "\n");
+}
+
+static void a6xx_show_shader(struct a6xx_gpu_state_obj *obj,
+ struct drm_printer *p)
+{
+ const struct a6xx_shader_block *block = obj->handle;
+ int i;
+
+ if (!obj->handle)
+ return;
+
+ print_name(p, " - type: ", block->name);
+
+ for (i = 0; i < A6XX_NUM_SHADER_BANKS; i++) {
+ drm_printf(p, " - bank: %d\n", i);
+ drm_printf(p, " size: %d\n", block->size);
+
+ if (!obj->data)
+ continue;
+
+ print_ascii85(p, block->size << 2,
+ obj->data + (block->size * i));
+ }
+}
+
+static void a6xx_show_cluster_data(const u32 *registers, int size, u32 *data,
+ struct drm_printer *p)
+{
+ int ctx, index = 0;
+
+ for (ctx = 0; ctx < A6XX_NUM_CONTEXTS; ctx++) {
+ int j;
+
+ drm_printf(p, " - context: %d\n", ctx);
+
+ for (j = 0; j < size; j += 2) {
+ u32 count = RANGE(registers, j);
+ u32 offset = registers[j];
+ int k;
+
+ for (k = 0; k < count; index++, offset++, k++) {
+ if (data[index] == 0xdeafbead)
+ continue;
+
+ drm_printf(p, " - { offset: 0x%06x, value: 0x%08x }\n",
+ offset << 2, data[index]);
+ }
+ }
+ }
+}
+
+static void a6xx_show_dbgahb_cluster(struct a6xx_gpu_state_obj *obj,
+ struct drm_printer *p)
+{
+ const struct a6xx_dbgahb_cluster *dbgahb = obj->handle;
+
+ if (dbgahb) {
+ print_name(p, " - cluster-name: ", dbgahb->name);
+ a6xx_show_cluster_data(dbgahb->registers, dbgahb->count,
+ obj->data, p);
+ }
+}
+
+static void a6xx_show_cluster(struct a6xx_gpu_state_obj *obj,
+ struct drm_printer *p)
+{
+ const struct a6xx_cluster *cluster = obj->handle;
+
+ if (cluster) {
+ print_name(p, " - cluster-name: ", cluster->name);
+ a6xx_show_cluster_data(cluster->registers, cluster->count,
+ obj->data, p);
+ }
+}
+
+static void a6xx_show_indexed_regs(struct a6xx_gpu_state_obj *obj,
+ struct drm_printer *p)
+{
+ const struct a6xx_indexed_registers *indexed = obj->handle;
+
+ if (!indexed)
+ return;
+
+ print_name(p, " - regs-name: ", indexed->name);
+ drm_printf(p, " dwords: %d\n", indexed->count);
+
+ print_ascii85(p, indexed->count << 2, obj->data);
+}
+
+static void a6xx_show_debugbus_block(const struct a6xx_debugbus_block *block,
+ u32 *data, struct drm_printer *p)
+{
+ if (block) {
+ print_name(p, " - debugbus-block: ", block->name);
+
+ /*
+ * count for regular debugbus data is in quadwords,
+ * but print the size in dwords for consistency
+ */
+ drm_printf(p, " count: %d\n", block->count << 1);
+
+ print_ascii85(p, block->count << 3, data);
+ }
+}
+
+static void a6xx_show_debugbus(struct a6xx_gpu_state *a6xx_state,
+ struct drm_printer *p)
+{
+ int i;
+
+ for (i = 0; i < a6xx_state->nr_debugbus; i++) {
+ struct a6xx_gpu_state_obj *obj = &a6xx_state->debugbus[i];
+
+ a6xx_show_debugbus_block(obj->handle, obj->data, p);
+ }
+
+ if (a6xx_state->vbif_debugbus) {
+ struct a6xx_gpu_state_obj *obj = a6xx_state->vbif_debugbus;
+
+ drm_puts(p, " - debugbus-block: A6XX_DBGBUS_VBIF\n");
+ drm_printf(p, " count: %d\n", VBIF_DEBUGBUS_BLOCK_SIZE);
+
+ /* vbif debugbus data is in dwords. Confusing, huh? */
+ print_ascii85(p, VBIF_DEBUGBUS_BLOCK_SIZE << 2, obj->data);
+ }
+
+ for (i = 0; i < a6xx_state->nr_cx_debugbus; i++) {
+ struct a6xx_gpu_state_obj *obj = &a6xx_state->cx_debugbus[i];
+
+ a6xx_show_debugbus_block(obj->handle, obj->data, p);
+ }
+}
+
+void a6xx_show(struct msm_gpu *gpu, struct msm_gpu_state *state,
+ struct drm_printer *p)
+{
+ struct a6xx_gpu_state *a6xx_state = container_of(state,
+ struct a6xx_gpu_state, base);
+ int i;
+
+ if (IS_ERR_OR_NULL(state))
+ return;
+
+ adreno_show(gpu, state, p);
+
+ drm_puts(p, "registers:\n");
+ for (i = 0; i < a6xx_state->nr_registers; i++) {
+ struct a6xx_gpu_state_obj *obj = &a6xx_state->registers[i];
+ const struct a6xx_registers *regs = obj->handle;
+
+ if (!obj->handle)
+ continue;
+
+ a6xx_show_registers(regs->registers, obj->data, regs->count, p);
+ }
+
+ drm_puts(p, "registers-gmu:\n");
+ for (i = 0; i < a6xx_state->nr_gmu_registers; i++) {
+ struct a6xx_gpu_state_obj *obj = &a6xx_state->gmu_registers[i];
+ const struct a6xx_registers *regs = obj->handle;
+
+ if (!obj->handle)
+ continue;
+
+ a6xx_show_registers(regs->registers, obj->data, regs->count, p);
+ }
+
+ drm_puts(p, "indexed-registers:\n");
+ for (i = 0; i < a6xx_state->nr_indexed_regs; i++)
+ a6xx_show_indexed_regs(&a6xx_state->indexed_regs[i], p);
+
+ drm_puts(p, "shader-blocks:\n");
+ for (i = 0; i < a6xx_state->nr_shaders; i++)
+ a6xx_show_shader(&a6xx_state->shaders[i], p);
+
+ drm_puts(p, "clusters:\n");
+ for (i = 0; i < a6xx_state->nr_clusters; i++)
+ a6xx_show_cluster(&a6xx_state->clusters[i], p);
+
+ for (i = 0; i < a6xx_state->nr_dbgahb_clusters; i++)
+ a6xx_show_dbgahb_cluster(&a6xx_state->dbgahb_clusters[i], p);
+
+ drm_puts(p, "debugbus:\n");
+ a6xx_show_debugbus(a6xx_state, p);
+}
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.h b/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.h
new file mode 100644
index 000000000000..68cccfa2870a
--- /dev/null
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.h
@@ -0,0 +1,430 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2018 The Linux Foundation. All rights reserved. */
+
+#ifndef _A6XX_CRASH_DUMP_H_
+#define _A6XX_CRASH_DUMP_H_
+
+#include "a6xx.xml.h"
+
+#define A6XX_NUM_CONTEXTS 2
+#define A6XX_NUM_SHADER_BANKS 3
+
+static const u32 a6xx_gras_cluster[] = {
+ 0x8000, 0x8006, 0x8010, 0x8092, 0x8094, 0x809d, 0x80a0, 0x80a6,
+ 0x80af, 0x80f1, 0x8100, 0x8107, 0x8109, 0x8109, 0x8110, 0x8110,
+ 0x8400, 0x840b,
+};
+
+static const u32 a6xx_ps_cluster_rac[] = {
+ 0x8800, 0x8806, 0x8809, 0x8811, 0x8818, 0x881e, 0x8820, 0x8865,
+ 0x8870, 0x8879, 0x8880, 0x8889, 0x8890, 0x8891, 0x8898, 0x8898,
+ 0x88c0, 0x88c1, 0x88d0, 0x88e3, 0x8900, 0x890c, 0x890f, 0x891a,
+ 0x8c00, 0x8c01, 0x8c08, 0x8c10, 0x8c17, 0x8c1f, 0x8c26, 0x8c33,
+};
+
+static const u32 a6xx_ps_cluster_rbp[] = {
+ 0x88f0, 0x88f3, 0x890d, 0x890e, 0x8927, 0x8928, 0x8bf0, 0x8bf1,
+ 0x8c02, 0x8c07, 0x8c11, 0x8c16, 0x8c20, 0x8c25,
+};
+
+static const u32 a6xx_ps_cluster[] = {
+ 0x9200, 0x9216, 0x9218, 0x9236, 0x9300, 0x9306,
+};
+
+static const u32 a6xx_fe_cluster[] = {
+ 0x9300, 0x9306, 0x9800, 0x9806, 0x9b00, 0x9b07, 0xa000, 0xa009,
+ 0xa00e, 0xa0ef, 0xa0f8, 0xa0f8,
+};
+
+static const u32 a6xx_pc_vs_cluster[] = {
+ 0x9100, 0x9108, 0x9300, 0x9306, 0x9980, 0x9981, 0x9b00, 0x9b07,
+};
+
+#define CLUSTER_FE 0
+#define CLUSTER_SP_VS 1
+#define CLUSTER_PC_VS 2
+#define CLUSTER_GRAS 3
+#define CLUSTER_SP_PS 4
+#define CLUSTER_PS 5
+
+#define CLUSTER(_id, _reg, _sel_reg, _sel_val) \
+ { .id = _id, .name = #_id,\
+ .registers = _reg, \
+ .count = ARRAY_SIZE(_reg), \
+ .sel_reg = _sel_reg, .sel_val = _sel_val }
+
+static const struct a6xx_cluster {
+ u32 id;
+ const char *name;
+ const u32 *registers;
+ size_t count;
+ u32 sel_reg;
+ u32 sel_val;
+} a6xx_clusters[] = {
+ CLUSTER(CLUSTER_GRAS, a6xx_gras_cluster, 0, 0),
+ CLUSTER(CLUSTER_PS, a6xx_ps_cluster_rac, REG_A6XX_RB_RB_SUB_BLOCK_SEL_CNTL_CD, 0x0),
+ CLUSTER(CLUSTER_PS, a6xx_ps_cluster_rbp, REG_A6XX_RB_RB_SUB_BLOCK_SEL_CNTL_CD, 0x9),
+ CLUSTER(CLUSTER_PS, a6xx_ps_cluster, 0, 0),
+ CLUSTER(CLUSTER_FE, a6xx_fe_cluster, 0, 0),
+ CLUSTER(CLUSTER_PC_VS, a6xx_pc_vs_cluster, 0, 0),
+};
+
+static const u32 a6xx_sp_vs_hlsq_cluster[] = {
+ 0xb800, 0xb803, 0xb820, 0xb822,
+};
+
+static const u32 a6xx_sp_vs_sp_cluster[] = {
+ 0xa800, 0xa824, 0xa830, 0xa83c, 0xa840, 0xa864, 0xa870, 0xa895,
+ 0xa8a0, 0xa8af, 0xa8c0, 0xa8c3,
+};
+
+static const u32 a6xx_hlsq_duplicate_cluster[] = {
+ 0xbb10, 0xbb11, 0xbb20, 0xbb29,
+};
+
+static const u32 a6xx_hlsq_2d_duplicate_cluster[] = {
+ 0xbd80, 0xbd80,
+};
+
+static const u32 a6xx_sp_duplicate_cluster[] = {
+ 0xab00, 0xab00, 0xab04, 0xab05, 0xab10, 0xab1b, 0xab20, 0xab20,
+};
+
+static const u32 a6xx_tp_duplicate_cluster[] = {
+ 0xb300, 0xb307, 0xb309, 0xb309, 0xb380, 0xb382,
+};
+
+static const u32 a6xx_sp_ps_hlsq_cluster[] = {
+ 0xb980, 0xb980, 0xb982, 0xb987, 0xb990, 0xb99b, 0xb9a0, 0xb9a2,
+ 0xb9c0, 0xb9c9,
+};
+
+static const u32 a6xx_sp_ps_hlsq_2d_cluster[] = {
+ 0xbd80, 0xbd80,
+};
+
+static const u32 a6xx_sp_ps_sp_cluster[] = {
+ 0xa980, 0xa9a8, 0xa9b0, 0xa9bc, 0xa9d0, 0xa9d3, 0xa9e0, 0xa9f3,
+ 0xaa00, 0xaa00, 0xaa30, 0xaa31,
+};
+
+static const u32 a6xx_sp_ps_sp_2d_cluster[] = {
+ 0xacc0, 0xacc0,
+};
+
+static const u32 a6xx_sp_ps_tp_cluster[] = {
+ 0xb180, 0xb183, 0xb190, 0xb191,
+};
+
+static const u32 a6xx_sp_ps_tp_2d_cluster[] = {
+ 0xb4c0, 0xb4d1,
+};
+
+#define CLUSTER_DBGAHB(_id, _base, _type, _reg) \
+ { .name = #_id, .statetype = _type, .base = _base, \
+ .registers = _reg, .count = ARRAY_SIZE(_reg) }
+
+static const struct a6xx_dbgahb_cluster {
+ const char *name;
+ u32 statetype;
+ u32 base;
+ const u32 *registers;
+ size_t count;
+} a6xx_dbgahb_clusters[] = {
+ CLUSTER_DBGAHB(CLUSTER_SP_VS, 0x0002e000, 0x41, a6xx_sp_vs_hlsq_cluster),
+ CLUSTER_DBGAHB(CLUSTER_SP_VS, 0x0002a000, 0x21, a6xx_sp_vs_sp_cluster),
+ CLUSTER_DBGAHB(CLUSTER_SP_VS, 0x0002e000, 0x41, a6xx_hlsq_duplicate_cluster),
+ CLUSTER_DBGAHB(CLUSTER_SP_VS, 0x0002f000, 0x45, a6xx_hlsq_2d_duplicate_cluster),
+ CLUSTER_DBGAHB(CLUSTER_SP_VS, 0x0002a000, 0x21, a6xx_sp_duplicate_cluster),
+ CLUSTER_DBGAHB(CLUSTER_SP_VS, 0x0002c000, 0x1, a6xx_tp_duplicate_cluster),
+ CLUSTER_DBGAHB(CLUSTER_SP_PS, 0x0002e000, 0x42, a6xx_sp_ps_hlsq_cluster),
+ CLUSTER_DBGAHB(CLUSTER_SP_PS, 0x0002f000, 0x46, a6xx_sp_ps_hlsq_2d_cluster),
+ CLUSTER_DBGAHB(CLUSTER_SP_PS, 0x0002a000, 0x22, a6xx_sp_ps_sp_cluster),
+ CLUSTER_DBGAHB(CLUSTER_SP_PS, 0x0002b000, 0x26, a6xx_sp_ps_sp_2d_cluster),
+ CLUSTER_DBGAHB(CLUSTER_SP_PS, 0x0002c000, 0x2, a6xx_sp_ps_tp_cluster),
+ CLUSTER_DBGAHB(CLUSTER_SP_PS, 0x0002d000, 0x6, a6xx_sp_ps_tp_2d_cluster),
+ CLUSTER_DBGAHB(CLUSTER_SP_PS, 0x0002e000, 0x42, a6xx_hlsq_duplicate_cluster),
+ CLUSTER_DBGAHB(CLUSTER_SP_PS, 0x0002a000, 0x22, a6xx_sp_duplicate_cluster),
+ CLUSTER_DBGAHB(CLUSTER_SP_PS, 0x0002c000, 0x2, a6xx_tp_duplicate_cluster),
+};
+
+static const u32 a6xx_hlsq_registers[] = {
+ 0xbe00, 0xbe01, 0xbe04, 0xbe05, 0xbe08, 0xbe09, 0xbe10, 0xbe15,
+ 0xbe20, 0xbe23,
+};
+
+static const u32 a6xx_sp_registers[] = {
+ 0xae00, 0xae04, 0xae0c, 0xae0c, 0xae0f, 0xae2b, 0xae30, 0xae32,
+ 0xae35, 0xae35, 0xae3a, 0xae3f, 0xae50, 0xae52,
+};
+
+static const u32 a6xx_tp_registers[] = {
+ 0xb600, 0xb601, 0xb604, 0xb605, 0xb610, 0xb61b, 0xb620, 0xb623,
+};
+
+struct a6xx_registers {
+ const u32 *registers;
+ size_t count;
+ u32 val0;
+ u32 val1;
+};
+
+#define HLSQ_DBG_REGS(_base, _type, _array) \
+ { .val0 = _base, .val1 = _type, .registers = _array, \
+ .count = ARRAY_SIZE(_array), }
+
+static const struct a6xx_registers a6xx_hlsq_reglist[] = {
+ HLSQ_DBG_REGS(0x0002F800, 0x40, a6xx_hlsq_registers),
+ HLSQ_DBG_REGS(0x0002B800, 0x20, a6xx_sp_registers),
+ HLSQ_DBG_REGS(0x0002D800, 0x0, a6xx_tp_registers),
+};
+
+#define SHADER(_type, _size) \
+ { .type = _type, .name = #_type, .size = _size }
+
+static const struct a6xx_shader_block {
+ const char *name;
+ u32 type;
+ u32 size;
+} a6xx_shader_blocks[] = {
+ SHADER(A6XX_TP0_TMO_DATA, 0x200),
+ SHADER(A6XX_TP0_SMO_DATA, 0x80),
+ SHADER(A6XX_TP0_MIPMAP_BASE_DATA, 0x3c0),
+ SHADER(A6XX_TP1_TMO_DATA, 0x200),
+ SHADER(A6XX_TP1_SMO_DATA, 0x80),
+ SHADER(A6XX_TP1_MIPMAP_BASE_DATA, 0x3c0),
+ SHADER(A6XX_SP_INST_DATA, 0x800),
+ SHADER(A6XX_SP_LB_0_DATA, 0x800),
+ SHADER(A6XX_SP_LB_1_DATA, 0x800),
+ SHADER(A6XX_SP_LB_2_DATA, 0x800),
+ SHADER(A6XX_SP_LB_3_DATA, 0x800),
+ SHADER(A6XX_SP_LB_4_DATA, 0x800),
+ SHADER(A6XX_SP_LB_5_DATA, 0x200),
+ SHADER(A6XX_SP_CB_BINDLESS_DATA, 0x2000),
+ SHADER(A6XX_SP_CB_LEGACY_DATA, 0x280),
+ SHADER(A6XX_SP_UAV_DATA, 0x80),
+ SHADER(A6XX_SP_INST_TAG, 0x80),
+ SHADER(A6XX_SP_CB_BINDLESS_TAG, 0x80),
+ SHADER(A6XX_SP_TMO_UMO_TAG, 0x80),
+ SHADER(A6XX_SP_SMO_TAG, 0x80),
+ SHADER(A6XX_SP_STATE_DATA, 0x3f),
+ SHADER(A6XX_HLSQ_CHUNK_CVS_RAM, 0x1c0),
+ SHADER(A6XX_HLSQ_CHUNK_CPS_RAM, 0x280),
+ SHADER(A6XX_HLSQ_CHUNK_CVS_RAM_TAG, 0x40),
+ SHADER(A6XX_HLSQ_CHUNK_CPS_RAM_TAG, 0x40),
+ SHADER(A6XX_HLSQ_ICB_CVS_CB_BASE_TAG, 0x4),
+ SHADER(A6XX_HLSQ_ICB_CPS_CB_BASE_TAG, 0x4),
+ SHADER(A6XX_HLSQ_CVS_MISC_RAM, 0x1c0),
+ SHADER(A6XX_HLSQ_CPS_MISC_RAM, 0x580),
+ SHADER(A6XX_HLSQ_INST_RAM, 0x800),
+ SHADER(A6XX_HLSQ_GFX_CVS_CONST_RAM, 0x800),
+ SHADER(A6XX_HLSQ_GFX_CPS_CONST_RAM, 0x800),
+ SHADER(A6XX_HLSQ_CVS_MISC_RAM_TAG, 0x8),
+ SHADER(A6XX_HLSQ_CPS_MISC_RAM_TAG, 0x4),
+ SHADER(A6XX_HLSQ_INST_RAM_TAG, 0x80),
+ SHADER(A6XX_HLSQ_GFX_CVS_CONST_RAM_TAG, 0xc),
+ SHADER(A6XX_HLSQ_GFX_CPS_CONST_RAM_TAG, 0x10),
+ SHADER(A6XX_HLSQ_PWR_REST_RAM, 0x28),
+ SHADER(A6XX_HLSQ_PWR_REST_TAG, 0x14),
+ SHADER(A6XX_HLSQ_DATAPATH_META, 0x40),
+ SHADER(A6XX_HLSQ_FRONTEND_META, 0x40),
+ SHADER(A6XX_HLSQ_INDIRECT_META, 0x40),
+};
+
+static const u32 a6xx_rb_rac_registers[] = {
+ 0x8e04, 0x8e05, 0x8e07, 0x8e08, 0x8e10, 0x8e1c, 0x8e20, 0x8e25,
+ 0x8e28, 0x8e28, 0x8e2c, 0x8e2f, 0x8e50, 0x8e52,
+};
+
+static const u32 a6xx_rb_rbp_registers[] = {
+ 0x8e01, 0x8e01, 0x8e0c, 0x8e0c, 0x8e3b, 0x8e3e, 0x8e40, 0x8e43,
+ 0x8e53, 0x8e5f, 0x8e70, 0x8e77,
+};
+
+static const u32 a6xx_registers[] = {
+ /* RBBM */
+ 0x0000, 0x0002, 0x0010, 0x0010, 0x0012, 0x0012, 0x0018, 0x001b,
+ 0x001e, 0x0032, 0x0038, 0x003c, 0x0042, 0x0042, 0x0044, 0x0044,
+ 0x0047, 0x0047, 0x0056, 0x0056, 0x00ad, 0x00ae, 0x00b0, 0x00fb,
+ 0x0100, 0x011d, 0x0200, 0x020d, 0x0218, 0x023d, 0x0400, 0x04f9,
+ 0x0500, 0x0500, 0x0505, 0x050b, 0x050e, 0x0511, 0x0533, 0x0533,
+ 0x0540, 0x0555,
+ /* CP */
+ 0x0800, 0x0808, 0x0810, 0x0813, 0x0820, 0x0821, 0x0823, 0x0824,
+ 0x0826, 0x0827, 0x0830, 0x0833, 0x0840, 0x0843, 0x084f, 0x086f,
+ 0x0880, 0x088a, 0x08a0, 0x08ab, 0x08c0, 0x08c4, 0x08d0, 0x08dd,
+ 0x08f0, 0x08f3, 0x0900, 0x0903, 0x0908, 0x0911, 0x0928, 0x093e,
+ 0x0942, 0x094d, 0x0980, 0x0984, 0x098d, 0x0996, 0x0998, 0x099e,
+ 0x09a0, 0x09a6, 0x09a8, 0x09ae, 0x09b0, 0x09b1, 0x09c2, 0x09c8,
+ 0x0a00, 0x0a03,
+ /* VSC */
+ 0x0c00, 0x0c04, 0x0c06, 0x0c06, 0x0c10, 0x0cd9, 0x0e00, 0x0e0e,
+ /* UCHE */
+ 0x0e10, 0x0e13, 0x0e17, 0x0e19, 0x0e1c, 0x0e2b, 0x0e30, 0x0e32,
+ 0x0e38, 0x0e39,
+ /* GRAS */
+ 0x8600, 0x8601, 0x8610, 0x861b, 0x8620, 0x8620, 0x8628, 0x862b,
+ 0x8630, 0x8637,
+ /* VPC */
+ 0x9600, 0x9604, 0x9624, 0x9637,
+ /* PC */
+ 0x9e00, 0x9e01, 0x9e03, 0x9e0e, 0x9e11, 0x9e16, 0x9e19, 0x9e19,
+ 0x9e1c, 0x9e1c, 0x9e20, 0x9e23, 0x9e30, 0x9e31, 0x9e34, 0x9e34,
+ 0x9e70, 0x9e72, 0x9e78, 0x9e79, 0x9e80, 0x9fff,
+ /* VFD */
+ 0xa600, 0xa601, 0xa603, 0xa603, 0xa60a, 0xa60a, 0xa610, 0xa617,
+ 0xa630, 0xa630,
+};
+
+#define REGS(_array, _sel_reg, _sel_val) \
+ { .registers = _array, .count = ARRAY_SIZE(_array), \
+ .val0 = _sel_reg, .val1 = _sel_val }
+
+static const struct a6xx_registers a6xx_reglist[] = {
+ REGS(a6xx_registers, 0, 0),
+ REGS(a6xx_rb_rac_registers, REG_A6XX_RB_RB_SUB_BLOCK_SEL_CNTL_CD, 0),
+ REGS(a6xx_rb_rbp_registers, REG_A6XX_RB_RB_SUB_BLOCK_SEL_CNTL_CD, 9),
+};
+
+static const u32 a6xx_ahb_registers[] = {
+ /* RBBM_STATUS - RBBM_STATUS3 */
+ 0x210, 0x213,
+ /* CP_STATUS_1 */
+ 0x825, 0x825,
+};
+
+static const u32 a6xx_vbif_registers[] = {
+ 0x3000, 0x3007, 0x300c, 0x3014, 0x3018, 0x302d, 0x3030, 0x3031,
+ 0x3034, 0x3036, 0x303c, 0x303d, 0x3040, 0x3040, 0x3042, 0x3042,
+ 0x3049, 0x3049, 0x3058, 0x3058, 0x305a, 0x3061, 0x3064, 0x3068,
+ 0x306c, 0x306d, 0x3080, 0x3088, 0x308b, 0x308c, 0x3090, 0x3094,
+ 0x3098, 0x3098, 0x309c, 0x309c, 0x30c0, 0x30c0, 0x30c8, 0x30c8,
+ 0x30d0, 0x30d0, 0x30d8, 0x30d8, 0x30e0, 0x30e0, 0x3100, 0x3100,
+ 0x3108, 0x3108, 0x3110, 0x3110, 0x3118, 0x3118, 0x3120, 0x3120,
+ 0x3124, 0x3125, 0x3129, 0x3129, 0x3131, 0x3131, 0x3154, 0x3154,
+ 0x3156, 0x3156, 0x3158, 0x3158, 0x315a, 0x315a, 0x315c, 0x315c,
+ 0x315e, 0x315e, 0x3160, 0x3160, 0x3162, 0x3162, 0x340c, 0x340c,
+ 0x3410, 0x3410, 0x3800, 0x3801,
+};
+
+static const struct a6xx_registers a6xx_ahb_reglist[] = {
+ REGS(a6xx_ahb_registers, 0, 0),
+ REGS(a6xx_vbif_registers, 0, 0),
+};
+
+static const u32 a6xx_gmu_gx_registers[] = {
+ /* GMU GX */
+ 0x0000, 0x0000, 0x0010, 0x0013, 0x0016, 0x0016, 0x0018, 0x001b,
+ 0x001e, 0x001e, 0x0020, 0x0023, 0x0026, 0x0026, 0x0028, 0x002b,
+ 0x002e, 0x002e, 0x0030, 0x0033, 0x0036, 0x0036, 0x0038, 0x003b,
+ 0x003e, 0x003e, 0x0040, 0x0043, 0x0046, 0x0046, 0x0080, 0x0084,
+ 0x0100, 0x012b, 0x0140, 0x0140,
+};
+
+static const u32 a6xx_gmu_cx_registers[] = {
+ /* GMU CX */
+ 0x4c00, 0x4c07, 0x4c10, 0x4c12, 0x4d00, 0x4d00, 0x4d07, 0x4d0a,
+ 0x5000, 0x5004, 0x5007, 0x5008, 0x500b, 0x500c, 0x500f, 0x501c,
+ 0x5024, 0x502a, 0x502d, 0x5030, 0x5040, 0x5053, 0x5087, 0x5089,
+ 0x50a0, 0x50a2, 0x50a4, 0x50af, 0x50c0, 0x50c3, 0x50d0, 0x50d0,
+ 0x50e4, 0x50e4, 0x50e8, 0x50ec, 0x5100, 0x5103, 0x5140, 0x5140,
+ 0x5142, 0x5144, 0x514c, 0x514d, 0x514f, 0x5151, 0x5154, 0x5154,
+ 0x5157, 0x5158, 0x515d, 0x515d, 0x5162, 0x5162, 0x5164, 0x5165,
+ 0x5180, 0x5186, 0x5190, 0x519e, 0x51c0, 0x51c0, 0x51c5, 0x51cc,
+ 0x51e0, 0x51e2, 0x51f0, 0x51f0, 0x5200, 0x5201,
+ /* GPU RSCC */
+ 0x8c8c, 0x8c8c, 0x8d01, 0x8d02, 0x8f40, 0x8f42, 0x8f44, 0x8f47,
+ 0x8f4c, 0x8f87, 0x8fec, 0x8fef, 0x8ff4, 0x902f, 0x9094, 0x9097,
+ 0x909c, 0x90d7, 0x913c, 0x913f, 0x9144, 0x917f,
+ /* GMU AO */
+ 0x9300, 0x9316, 0x9400, 0x9400,
+ /* GPU CC */
+ 0x9800, 0x9812, 0x9840, 0x9852, 0x9c00, 0x9c04, 0x9c07, 0x9c0b,
+ 0x9c15, 0x9c1c, 0x9c1e, 0x9c2d, 0x9c3c, 0x9c3d, 0x9c3f, 0x9c40,
+ 0x9c42, 0x9c49, 0x9c58, 0x9c5a, 0x9d40, 0x9d5e, 0xa000, 0xa002,
+ 0xa400, 0xa402, 0xac00, 0xac02, 0xb000, 0xb002, 0xb400, 0xb402,
+ 0xb800, 0xb802,
+ /* GPU CC ACD */
+ 0xbc00, 0xbc16, 0xbc20, 0xbc27,
+};
+
+static const struct a6xx_registers a6xx_gmu_reglist[] = {
+ REGS(a6xx_gmu_cx_registers, 0, 0),
+ REGS(a6xx_gmu_gx_registers, 0, 0),
+};
+
+static const struct a6xx_indexed_registers {
+ const char *name;
+ u32 addr;
+ u32 data;
+ u32 count;
+} a6xx_indexed_reglist[] = {
+ { "CP_SEQ_STAT", REG_A6XX_CP_SQE_STAT_ADDR,
+ REG_A6XX_CP_SQE_STAT_DATA, 0x33 },
+ { "CP_DRAW_STATE", REG_A6XX_CP_DRAW_STATE_ADDR,
+ REG_A6XX_CP_DRAW_STATE_DATA, 0x100 },
+ { "CP_UCODE_DBG_DATA", REG_A6XX_CP_SQE_UCODE_DBG_ADDR,
+ REG_A6XX_CP_SQE_UCODE_DBG_DATA, 0x6000 },
+ { "CP_ROQ", REG_A6XX_CP_ROQ_DBG_ADDR,
+ REG_A6XX_CP_ROQ_DBG_DATA, 0x400 },
+};
+
+static const struct a6xx_indexed_registers a6xx_cp_mempool_indexed = {
+ "CP_MEMPOOOL", REG_A6XX_CP_MEM_POOL_DBG_ADDR,
+ REG_A6XX_CP_MEM_POOL_DBG_DATA, 0x2060,
+};
+
+#define DEBUGBUS(_id, _count) { .id = _id, .name = #_id, .count = _count }
+
+static const struct a6xx_debugbus_block {
+ const char *name;
+ u32 id;
+ u32 count;
+} a6xx_debugbus_blocks[] = {
+ DEBUGBUS(A6XX_DBGBUS_CP, 0x100),
+ DEBUGBUS(A6XX_DBGBUS_RBBM, 0x100),
+ DEBUGBUS(A6XX_DBGBUS_HLSQ, 0x100),
+ DEBUGBUS(A6XX_DBGBUS_UCHE, 0x100),
+ DEBUGBUS(A6XX_DBGBUS_DPM, 0x100),
+ DEBUGBUS(A6XX_DBGBUS_TESS, 0x100),
+ DEBUGBUS(A6XX_DBGBUS_PC, 0x100),
+ DEBUGBUS(A6XX_DBGBUS_VFDP, 0x100),
+ DEBUGBUS(A6XX_DBGBUS_VPC, 0x100),
+ DEBUGBUS(A6XX_DBGBUS_TSE, 0x100),
+ DEBUGBUS(A6XX_DBGBUS_RAS, 0x100),
+ DEBUGBUS(A6XX_DBGBUS_VSC, 0x100),
+ DEBUGBUS(A6XX_DBGBUS_COM, 0x100),
+ DEBUGBUS(A6XX_DBGBUS_LRZ, 0x100),
+ DEBUGBUS(A6XX_DBGBUS_A2D, 0x100),
+ DEBUGBUS(A6XX_DBGBUS_CCUFCHE, 0x100),
+ DEBUGBUS(A6XX_DBGBUS_RBP, 0x100),
+ DEBUGBUS(A6XX_DBGBUS_DCS, 0x100),
+ DEBUGBUS(A6XX_DBGBUS_DBGC, 0x100),
+ DEBUGBUS(A6XX_DBGBUS_GMU_GX, 0x100),
+ DEBUGBUS(A6XX_DBGBUS_TPFCHE, 0x100),
+ DEBUGBUS(A6XX_DBGBUS_GPC, 0x100),
+ DEBUGBUS(A6XX_DBGBUS_LARC, 0x100),
+ DEBUGBUS(A6XX_DBGBUS_HLSQ_SPTP, 0x100),
+ DEBUGBUS(A6XX_DBGBUS_RB_0, 0x100),
+ DEBUGBUS(A6XX_DBGBUS_RB_1, 0x100),
+ DEBUGBUS(A6XX_DBGBUS_UCHE_WRAPPER, 0x100),
+ DEBUGBUS(A6XX_DBGBUS_CCU_0, 0x100),
+ DEBUGBUS(A6XX_DBGBUS_CCU_1, 0x100),
+ DEBUGBUS(A6XX_DBGBUS_VFD_0, 0x100),
+ DEBUGBUS(A6XX_DBGBUS_VFD_1, 0x100),
+ DEBUGBUS(A6XX_DBGBUS_VFD_2, 0x100),
+ DEBUGBUS(A6XX_DBGBUS_VFD_3, 0x100),
+ DEBUGBUS(A6XX_DBGBUS_SP_0, 0x100),
+ DEBUGBUS(A6XX_DBGBUS_SP_1, 0x100),
+ DEBUGBUS(A6XX_DBGBUS_TPL1_0, 0x100),
+ DEBUGBUS(A6XX_DBGBUS_TPL1_1, 0x100),
+ DEBUGBUS(A6XX_DBGBUS_TPL1_2, 0x100),
+ DEBUGBUS(A6XX_DBGBUS_TPL1_3, 0x100),
+};
+
+static const struct a6xx_debugbus_block a6xx_cx_debugbus_blocks[] = {
+ DEBUGBUS(A6XX_DBGBUS_GMU_CX, 0x100),
+ DEBUGBUS(A6XX_DBGBUS_CX, 0x100),
+};
+
+#endif
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_hfi.c b/drivers/gpu/drm/msm/adreno/a6xx_hfi.c
index 6ff9baec2658..eda11abc5f01 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_hfi.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_hfi.c
@@ -91,7 +91,7 @@ static int a6xx_hfi_wait_for_ack(struct a6xx_gmu *gmu, u32 id, u32 seqnum,
val & A6XX_GMU_GMU2HOST_INTR_INFO_MSGQ, 100, 5000);
if (ret) {
- dev_err(gmu->dev,
+ DRM_DEV_ERROR(gmu->dev,
"Message %s id %d timed out waiting for response\n",
a6xx_hfi_msg_id[id], seqnum);
return -ETIMEDOUT;
@@ -110,7 +110,7 @@ static int a6xx_hfi_wait_for_ack(struct a6xx_gmu *gmu, u32 id, u32 seqnum,
/* If the queue is empty our response never made it */
if (!ret) {
- dev_err(gmu->dev,
+ DRM_DEV_ERROR(gmu->dev,
"The HFI response queue is unexpectedly empty\n");
return -ENOENT;
@@ -120,20 +120,20 @@ static int a6xx_hfi_wait_for_ack(struct a6xx_gmu *gmu, u32 id, u32 seqnum,
struct a6xx_hfi_msg_error *error =
(struct a6xx_hfi_msg_error *) &resp;
- dev_err(gmu->dev, "GMU firmware error %d\n",
+ DRM_DEV_ERROR(gmu->dev, "GMU firmware error %d\n",
error->code);
continue;
}
if (seqnum != HFI_HEADER_SEQNUM(resp.ret_header)) {
- dev_err(gmu->dev,
+ DRM_DEV_ERROR(gmu->dev,
"Unexpected message id %d on the response queue\n",
HFI_HEADER_SEQNUM(resp.ret_header));
continue;
}
if (resp.error) {
- dev_err(gmu->dev,
+ DRM_DEV_ERROR(gmu->dev,
"Message %s id %d returned error %d\n",
a6xx_hfi_msg_id[id], seqnum, resp.error);
return -EINVAL;
@@ -163,7 +163,7 @@ static int a6xx_hfi_send_msg(struct a6xx_gmu *gmu, int id,
ret = a6xx_hfi_queue_write(gmu, queue, data, dwords);
if (ret) {
- dev_err(gmu->dev, "Unable to send message %s id %d\n",
+ DRM_DEV_ERROR(gmu->dev, "Unable to send message %s id %d\n",
a6xx_hfi_msg_id[id], seqnum);
return ret;
}
@@ -317,7 +317,7 @@ void a6xx_hfi_stop(struct a6xx_gmu *gmu)
continue;
if (queue->header->read_index != queue->header->write_index)
- dev_err(gmu->dev, "HFI queue %d is not empty\n", i);
+ DRM_DEV_ERROR(gmu->dev, "HFI queue %d is not empty\n", i);
queue->header->read_index = 0;
queue->header->write_index = 0;
diff --git a/drivers/gpu/drm/msm/adreno/adreno_common.xml.h b/drivers/gpu/drm/msm/adreno/adreno_common.xml.h
index 1318959d504d..641d3ba477b6 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_common.xml.h
+++ b/drivers/gpu/drm/msm/adreno/adreno_common.xml.h
@@ -10,13 +10,13 @@ git clone https://github.com/freedreno/envytools.git
The rules-ng-ng source files this header was generated from are:
- /home/robclark/src/envytools/rnndb/adreno.xml ( 501 bytes, from 2018-07-03 19:37:13)
- /home/robclark/src/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/adreno/a2xx.xml ( 36805 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/adreno/adreno_common.xml ( 13634 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/adreno/adreno_pm4.xml ( 42585 bytes, from 2018-10-04 19:06:37)
+- /home/robclark/src/envytools/rnndb/adreno/a2xx.xml ( 42463 bytes, from 2018-11-19 13:44:03)
+- /home/robclark/src/envytools/rnndb/adreno/adreno_common.xml ( 14201 bytes, from 2018-12-02 17:29:54)
+- /home/robclark/src/envytools/rnndb/adreno/adreno_pm4.xml ( 43052 bytes, from 2018-12-02 17:29:54)
- /home/robclark/src/envytools/rnndb/adreno/a3xx.xml ( 83840 bytes, from 2018-07-03 19:37:13)
- /home/robclark/src/envytools/rnndb/adreno/a4xx.xml ( 112086 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/adreno/a5xx.xml ( 147240 bytes, from 2018-10-04 19:06:37)
-- /home/robclark/src/envytools/rnndb/adreno/a6xx.xml ( 139581 bytes, from 2018-10-04 19:06:42)
+- /home/robclark/src/envytools/rnndb/adreno/a5xx.xml ( 147240 bytes, from 2018-12-02 17:29:54)
+- /home/robclark/src/envytools/rnndb/adreno/a6xx.xml ( 140790 bytes, from 2018-12-02 17:29:54)
- /home/robclark/src/envytools/rnndb/adreno/a6xx_gmu.xml ( 10431 bytes, from 2018-09-14 13:03:07)
- /home/robclark/src/envytools/rnndb/adreno/ocmem.xml ( 1773 bytes, from 2018-07-03 19:37:13)
@@ -339,6 +339,15 @@ static inline uint32_t AXXX_SCRATCH_UMSK_SWAP(uint32_t val)
#define REG_AXXX_CP_STATE_DEBUG_DATA 0x000001ed
#define REG_AXXX_CP_INT_CNTL 0x000001f2
+#define AXXX_CP_INT_CNTL_SW_INT_MASK 0x00080000
+#define AXXX_CP_INT_CNTL_T0_PACKET_IN_IB_MASK 0x00800000
+#define AXXX_CP_INT_CNTL_OPCODE_ERROR_MASK 0x01000000
+#define AXXX_CP_INT_CNTL_PROTECTED_MODE_ERROR_MASK 0x02000000
+#define AXXX_CP_INT_CNTL_RESERVED_BIT_ERROR_MASK 0x04000000
+#define AXXX_CP_INT_CNTL_IB_ERROR_MASK 0x08000000
+#define AXXX_CP_INT_CNTL_IB2_INT_MASK 0x20000000
+#define AXXX_CP_INT_CNTL_IB1_INT_MASK 0x40000000
+#define AXXX_CP_INT_CNTL_RB_INT_MASK 0x80000000
#define REG_AXXX_CP_INT_STATUS 0x000001f3
diff --git a/drivers/gpu/drm/msm/adreno/adreno_device.c b/drivers/gpu/drm/msm/adreno/adreno_device.c
index 86abdb2b3a9c..714ed6505e47 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_device.c
+++ b/drivers/gpu/drm/msm/adreno/adreno_device.c
@@ -27,6 +27,39 @@ module_param_named(hang_debug, hang_debug, bool, 0600);
static const struct adreno_info gpulist[] = {
{
+ .rev = ADRENO_REV(2, 0, 0, 0),
+ .revn = 200,
+ .name = "A200",
+ .fw = {
+ [ADRENO_FW_PM4] = "yamato_pm4.fw",
+ [ADRENO_FW_PFP] = "yamato_pfp.fw",
+ },
+ .gmem = SZ_256K,
+ .inactive_period = DRM_MSM_INACTIVE_PERIOD,
+ .init = a2xx_gpu_init,
+ }, { /* a200 on i.mx51 has only 128kib gmem */
+ .rev = ADRENO_REV(2, 0, 0, 1),
+ .revn = 201,
+ .name = "A200",
+ .fw = {
+ [ADRENO_FW_PM4] = "yamato_pm4.fw",
+ [ADRENO_FW_PFP] = "yamato_pfp.fw",
+ },
+ .gmem = SZ_128K,
+ .inactive_period = DRM_MSM_INACTIVE_PERIOD,
+ .init = a2xx_gpu_init,
+ }, {
+ .rev = ADRENO_REV(2, 2, 0, ANY_ID),
+ .revn = 220,
+ .name = "A220",
+ .fw = {
+ [ADRENO_FW_PM4] = "leia_pm4_470.fw",
+ [ADRENO_FW_PFP] = "leia_pfp_470.fw",
+ },
+ .gmem = SZ_512K,
+ .inactive_period = DRM_MSM_INACTIVE_PERIOD,
+ .init = a2xx_gpu_init,
+ }, {
.rev = ADRENO_REV(3, 0, 5, ANY_ID),
.revn = 305,
.name = "A305",
@@ -196,7 +229,7 @@ struct msm_gpu *adreno_load_gpu(struct drm_device *dev)
ret = pm_runtime_get_sync(&pdev->dev);
if (ret < 0) {
- dev_err(dev->dev, "Couldn't power up the GPU: %d\n", ret);
+ DRM_DEV_ERROR(dev->dev, "Couldn't power up the GPU: %d\n", ret);
return NULL;
}
@@ -205,7 +238,7 @@ struct msm_gpu *adreno_load_gpu(struct drm_device *dev)
mutex_unlock(&dev->struct_mutex);
pm_runtime_put_autosuspend(&pdev->dev);
if (ret) {
- dev_err(dev->dev, "gpu hw init failed: %d\n", ret);
+ DRM_DEV_ERROR(dev->dev, "gpu hw init failed: %d\n", ret);
return NULL;
}
@@ -238,7 +271,8 @@ static int find_chipid(struct device *dev, struct adreno_rev *rev)
if (ret == 0) {
unsigned int r, patch;
- if (sscanf(compat, "qcom,adreno-%u.%u", &r, &patch) == 2) {
+ if (sscanf(compat, "qcom,adreno-%u.%u", &r, &patch) == 2 ||
+ sscanf(compat, "amd,imageon-%u.%u", &r, &patch) == 2) {
rev->core = r / 100;
r %= 100;
rev->major = r / 10;
@@ -253,7 +287,7 @@ static int find_chipid(struct device *dev, struct adreno_rev *rev)
/* and if that fails, fall back to legacy "qcom,chipid" property: */
ret = of_property_read_u32(node, "qcom,chipid", &chipid);
if (ret) {
- dev_err(dev, "could not parse qcom,chipid: %d\n", ret);
+ DRM_DEV_ERROR(dev, "could not parse qcom,chipid: %d\n", ret);
return ret;
}
@@ -274,6 +308,7 @@ static int adreno_bind(struct device *dev, struct device *master, void *data)
static struct adreno_platform_config config = {};
const struct adreno_info *info;
struct drm_device *drm = dev_get_drvdata(master);
+ struct msm_drm_private *priv = drm->dev_private;
struct msm_gpu *gpu;
int ret;
@@ -296,6 +331,8 @@ static int adreno_bind(struct device *dev, struct device *master, void *data)
DBG("Found GPU: %u.%u.%u.%u", config.rev.core, config.rev.major,
config.rev.minor, config.rev.patchid);
+ priv->is_a2xx = config.rev.core == 2;
+
gpu = info->init(drm);
if (IS_ERR(gpu)) {
dev_warn(drm->dev, "failed to load adreno gpu\n");
@@ -323,9 +360,37 @@ static const struct component_ops a3xx_ops = {
.unbind = adreno_unbind,
};
+static void adreno_device_register_headless(void)
+{
+ /* on imx5, we don't have a top-level mdp/dpu node
+ * this creates a dummy node for the driver for that case
+ */
+ struct platform_device_info dummy_info = {
+ .parent = NULL,
+ .name = "msm",
+ .id = -1,
+ .res = NULL,
+ .num_res = 0,
+ .data = NULL,
+ .size_data = 0,
+ .dma_mask = ~0,
+ };
+ platform_device_register_full(&dummy_info);
+}
+
static int adreno_probe(struct platform_device *pdev)
{
- return component_add(&pdev->dev, &a3xx_ops);
+
+ int ret;
+
+ ret = component_add(&pdev->dev, &a3xx_ops);
+ if (ret)
+ return ret;
+
+ if (of_device_is_compatible(pdev->dev.of_node, "amd,imageon"))
+ adreno_device_register_headless();
+
+ return 0;
}
static int adreno_remove(struct platform_device *pdev)
@@ -337,6 +402,8 @@ static int adreno_remove(struct platform_device *pdev)
static const struct of_device_id dt_match[] = {
{ .compatible = "qcom,adreno" },
{ .compatible = "qcom,adreno-3xx" },
+ /* for compatibility with imx5 gpu: */
+ { .compatible = "amd,imageon" },
/* for backwards compat w/ downstream kgsl DT files: */
{ .compatible = "qcom,kgsl-3d0" },
{}
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
index 93d70f4a2154..2e4372ef17a3 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
@@ -89,12 +89,12 @@ adreno_request_fw(struct adreno_gpu *adreno_gpu, const char *fwname)
ret = request_firmware_direct(&fw, newname, drm->dev);
if (!ret) {
- dev_info(drm->dev, "loaded %s from new location\n",
+ DRM_DEV_INFO(drm->dev, "loaded %s from new location\n",
newname);
adreno_gpu->fwloc = FW_LOCATION_NEW;
goto out;
} else if (adreno_gpu->fwloc != FW_LOCATION_UNKNOWN) {
- dev_err(drm->dev, "failed to load %s: %d\n",
+ DRM_DEV_ERROR(drm->dev, "failed to load %s: %d\n",
newname, ret);
fw = ERR_PTR(ret);
goto out;
@@ -109,12 +109,12 @@ adreno_request_fw(struct adreno_gpu *adreno_gpu, const char *fwname)
ret = request_firmware_direct(&fw, fwname, drm->dev);
if (!ret) {
- dev_info(drm->dev, "loaded %s from legacy location\n",
+ DRM_DEV_INFO(drm->dev, "loaded %s from legacy location\n",
newname);
adreno_gpu->fwloc = FW_LOCATION_LEGACY;
goto out;
} else if (adreno_gpu->fwloc != FW_LOCATION_UNKNOWN) {
- dev_err(drm->dev, "failed to load %s: %d\n",
+ DRM_DEV_ERROR(drm->dev, "failed to load %s: %d\n",
fwname, ret);
fw = ERR_PTR(ret);
goto out;
@@ -130,19 +130,19 @@ adreno_request_fw(struct adreno_gpu *adreno_gpu, const char *fwname)
ret = request_firmware(&fw, newname, drm->dev);
if (!ret) {
- dev_info(drm->dev, "loaded %s with helper\n",
+ DRM_DEV_INFO(drm->dev, "loaded %s with helper\n",
newname);
adreno_gpu->fwloc = FW_LOCATION_HELPER;
goto out;
} else if (adreno_gpu->fwloc != FW_LOCATION_UNKNOWN) {
- dev_err(drm->dev, "failed to load %s: %d\n",
+ DRM_DEV_ERROR(drm->dev, "failed to load %s: %d\n",
newname, ret);
fw = ERR_PTR(ret);
goto out;
}
}
- dev_err(drm->dev, "failed to load %s\n", fwname);
+ DRM_DEV_ERROR(drm->dev, "failed to load %s\n", fwname);
fw = ERR_PTR(-ENOENT);
out:
kfree(newname);
@@ -209,14 +209,6 @@ int adreno_hw_init(struct msm_gpu *gpu)
if (!ring)
continue;
- ret = msm_gem_get_iova(ring->bo, gpu->aspace, &ring->iova);
- if (ret) {
- ring->iova = 0;
- dev_err(gpu->dev->dev,
- "could not map ringbuffer %d: %d\n", i, ret);
- return ret;
- }
-
ring->cur = ring->start;
ring->next = ring->start;
@@ -277,7 +269,7 @@ void adreno_recover(struct msm_gpu *gpu)
ret = msm_gpu_hw_init(gpu);
if (ret) {
- dev_err(dev->dev, "gpu hw init failed: %d\n", ret);
+ DRM_DEV_ERROR(dev->dev, "gpu hw init failed: %d\n", ret);
/* hmm, oh well? */
}
}
@@ -319,16 +311,27 @@ void adreno_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
*/
OUT_PKT3(ring, CP_EVENT_WRITE, 1);
OUT_RING(ring, HLSQ_FLUSH);
-
- OUT_PKT3(ring, CP_WAIT_FOR_IDLE, 1);
- OUT_RING(ring, 0x00000000);
}
- /* BIT(31) of CACHE_FLUSH_TS triggers CACHE_FLUSH_TS IRQ from GPU */
- OUT_PKT3(ring, CP_EVENT_WRITE, 3);
- OUT_RING(ring, CACHE_FLUSH_TS | BIT(31));
- OUT_RING(ring, rbmemptr(ring, fence));
- OUT_RING(ring, submit->seqno);
+ /* wait for idle before cache flush/interrupt */
+ OUT_PKT3(ring, CP_WAIT_FOR_IDLE, 1);
+ OUT_RING(ring, 0x00000000);
+
+ if (!adreno_is_a2xx(adreno_gpu)) {
+ /* BIT(31) of CACHE_FLUSH_TS triggers CACHE_FLUSH_TS IRQ from GPU */
+ OUT_PKT3(ring, CP_EVENT_WRITE, 3);
+ OUT_RING(ring, CACHE_FLUSH_TS | BIT(31));
+ OUT_RING(ring, rbmemptr(ring, fence));
+ OUT_RING(ring, submit->seqno);
+ } else {
+ /* BIT(31) means something else on a2xx */
+ OUT_PKT3(ring, CP_EVENT_WRITE, 3);
+ OUT_RING(ring, CACHE_FLUSH_TS);
+ OUT_RING(ring, rbmemptr(ring, fence));
+ OUT_RING(ring, submit->seqno);
+ OUT_PKT3(ring, CP_INTERRUPT, 1);
+ OUT_RING(ring, 0x80000000);
+ }
#if 0
if (adreno_is_a3xx(adreno_gpu)) {
@@ -406,7 +409,7 @@ int adreno_gpu_state_get(struct msm_gpu *gpu, struct msm_gpu_state *state)
size = j + 1;
if (size) {
- state->ring[i].data = kmalloc(size << 2, GFP_KERNEL);
+ state->ring[i].data = kvmalloc(size << 2, GFP_KERNEL);
if (state->ring[i].data) {
memcpy(state->ring[i].data, gpu->rb[i]->start, size << 2);
state->ring[i].data_size = size << 2;
@@ -414,6 +417,10 @@ int adreno_gpu_state_get(struct msm_gpu *gpu, struct msm_gpu_state *state)
}
}
+ /* Some targets prefer to collect their own registers */
+ if (!adreno_gpu->registers)
+ return 0;
+
/* Count the number of registers */
for (i = 0; adreno_gpu->registers[i] != ~0; i += 2)
count += adreno_gpu->registers[i + 1] -
@@ -445,7 +452,7 @@ void adreno_gpu_state_destroy(struct msm_gpu_state *state)
int i;
for (i = 0; i < ARRAY_SIZE(state->ring); i++)
- kfree(state->ring[i].data);
+ kvfree(state->ring[i].data);
for (i = 0; state->bos && i < state->nr_bos; i++)
kvfree(state->bos[i].data);
@@ -475,34 +482,74 @@ int adreno_gpu_state_put(struct msm_gpu_state *state)
#if defined(CONFIG_DEBUG_FS) || defined(CONFIG_DEV_COREDUMP)
-static void adreno_show_object(struct drm_printer *p, u32 *ptr, int len)
+static char *adreno_gpu_ascii85_encode(u32 *src, size_t len)
{
+ void *buf;
+ size_t buf_itr = 0, buffer_size;
char out[ASCII85_BUFSZ];
- long l, datalen, i;
+ long l;
+ int i;
- if (!ptr || !len)
- return;
+ if (!src || !len)
+ return NULL;
+
+ l = ascii85_encode_len(len);
/*
- * Only dump the non-zero part of the buffer - rarely will any data
- * completely fill the entire allocated size of the buffer
+ * Ascii85 outputs either a 5 byte string or a 1 byte string. So we
+ * account for the worst case of 5 bytes per dword plus the 1 for '\0'
*/
- for (datalen = 0, i = 0; i < len >> 2; i++) {
- if (ptr[i])
- datalen = (i << 2) + 1;
- }
+ buffer_size = (l * 5) + 1;
+
+ buf = kvmalloc(buffer_size, GFP_KERNEL);
+ if (!buf)
+ return NULL;
+
+ for (i = 0; i < l; i++)
+ buf_itr += snprintf(buf + buf_itr, buffer_size - buf_itr, "%s",
+ ascii85_encode(src[i], out));
+
+ return buf;
+}
- /* Skip printing the object if it is empty */
- if (datalen == 0)
+/* len is expected to be in bytes */
+static void adreno_show_object(struct drm_printer *p, void **ptr, int len,
+ bool *encoded)
+{
+ if (!*ptr || !len)
return;
- l = ascii85_encode_len(datalen);
+ if (!*encoded) {
+ long datalen, i;
+ u32 *buf = *ptr;
+
+ /*
+ * Only dump the non-zero part of the buffer - rarely will
+ * any data completely fill the entire allocated size of
+ * the buffer.
+ */
+ for (datalen = 0, i = 0; i < len >> 2; i++)
+ if (buf[i])
+ datalen = ((i + 1) << 2);
+
+ /*
+ * If we reach here, then the originally captured binary buffer
+ * will be replaced with the ascii85 encoded string
+ */
+ *ptr = adreno_gpu_ascii85_encode(buf, datalen);
+
+ kvfree(buf);
+
+ *encoded = true;
+ }
+
+ if (!*ptr)
+ return;
drm_puts(p, " data: !!ascii85 |\n");
drm_puts(p, " ");
- for (i = 0; i < l; i++)
- drm_puts(p, ascii85_encode(ptr[i], out));
+ drm_puts(p, *ptr);
drm_puts(p, "\n");
}
@@ -534,8 +581,8 @@ void adreno_show(struct msm_gpu *gpu, struct msm_gpu_state *state,
drm_printf(p, " wptr: %d\n", state->ring[i].wptr);
drm_printf(p, " size: %d\n", MSM_GPU_RINGBUFFER_SZ);
- adreno_show_object(p, state->ring[i].data,
- state->ring[i].data_size);
+ adreno_show_object(p, &state->ring[i].data,
+ state->ring[i].data_size, &state->ring[i].encoded);
}
if (state->bos) {
@@ -546,17 +593,19 @@ void adreno_show(struct msm_gpu *gpu, struct msm_gpu_state *state,
state->bos[i].iova);
drm_printf(p, " size: %zd\n", state->bos[i].size);
- adreno_show_object(p, state->bos[i].data,
- state->bos[i].size);
+ adreno_show_object(p, &state->bos[i].data,
+ state->bos[i].size, &state->bos[i].encoded);
}
}
- drm_puts(p, "registers:\n");
+ if (state->nr_registers) {
+ drm_puts(p, "registers:\n");
- for (i = 0; i < state->nr_registers; i++) {
- drm_printf(p, " - { offset: 0x%04x, value: 0x%08x }\n",
- state->registers[i * 2] << 2,
- state->registers[(i * 2) + 1]);
+ for (i = 0; i < state->nr_registers; i++) {
+ drm_printf(p, " - { offset: 0x%04x, value: 0x%08x }\n",
+ state->registers[i * 2] << 2,
+ state->registers[(i * 2) + 1]);
+ }
}
}
#endif
@@ -595,6 +644,9 @@ void adreno_dump(struct msm_gpu *gpu)
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
int i;
+ if (!adreno_gpu->registers)
+ return;
+
/* dump these out in a form that can be parsed by demsm: */
printk("IO:region %s 00000000 00020000\n", gpu->name);
for (i = 0; adreno_gpu->registers[i] != ~0; i += 2) {
@@ -635,7 +687,7 @@ static int adreno_get_legacy_pwrlevels(struct device *dev)
node = of_get_compatible_child(dev->of_node, "qcom,gpu-pwrlevels");
if (!node) {
- dev_err(dev, "Could not find the GPU powerlevels\n");
+ DRM_DEV_ERROR(dev, "Could not find the GPU powerlevels\n");
return -ENXIO;
}
@@ -674,7 +726,7 @@ static int adreno_get_pwrlevels(struct device *dev,
else {
ret = dev_pm_opp_of_add_table(dev);
if (ret)
- dev_err(dev, "Unable to set the OPP table\n");
+ DRM_DEV_ERROR(dev, "Unable to set the OPP table\n");
}
if (!ret) {
@@ -717,6 +769,9 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
adreno_gpu_config.va_start = SZ_16M;
adreno_gpu_config.va_end = 0xffffffff;
+ /* maximum range of a2xx mmu */
+ if (adreno_is_a2xx(adreno_gpu))
+ adreno_gpu_config.va_end = SZ_16M + 0xfff * SZ_64K;
adreno_gpu_config.nr_rings = nr_rings;
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.h b/drivers/gpu/drm/msm/adreno/adreno_gpu.h
index de6e6ee42fba..5db459bc28a7 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.h
+++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.h
@@ -21,6 +21,7 @@
#define __ADRENO_GPU_H__
#include <linux/firmware.h>
+#include <linux/iopoll.h>
#include "msm_gpu.h"
@@ -154,6 +155,20 @@ struct adreno_platform_config {
__ret; \
})
+static inline bool adreno_is_a2xx(struct adreno_gpu *gpu)
+{
+ return (gpu->revn < 300);
+}
+
+static inline bool adreno_is_a20x(struct adreno_gpu *gpu)
+{
+ return (gpu->revn < 210);
+}
+
+static inline bool adreno_is_a225(struct adreno_gpu *gpu)
+{
+ return gpu->revn == 225;
+}
static inline bool adreno_is_a3xx(struct adreno_gpu *gpu)
{
@@ -334,6 +349,7 @@ static inline void adreno_gpu_write(struct adreno_gpu *gpu,
gpu_write(&gpu->base, reg - 1, data);
}
+struct msm_gpu *a2xx_gpu_init(struct drm_device *dev);
struct msm_gpu *a3xx_gpu_init(struct drm_device *dev);
struct msm_gpu *a4xx_gpu_init(struct drm_device *dev);
struct msm_gpu *a5xx_gpu_init(struct drm_device *dev);
@@ -375,4 +391,9 @@ static inline uint32_t get_wptr(struct msm_ringbuffer *ring)
((1 << 29) \
((ilog2((_len)) & 0x1F) << 24) | (((_reg) << 2) & 0xFFFFF))
+
+#define gpu_poll_timeout(gpu, addr, val, cond, interval, timeout) \
+ readl_poll_timeout((gpu)->mmio + ((addr) << 2), val, cond, \
+ interval, timeout)
+
#endif /* __ADRENO_GPU_H__ */
diff --git a/drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h b/drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h
index 15eb03bed984..79b907ac0b4b 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h
+++ b/drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h
@@ -10,13 +10,13 @@ git clone https://github.com/freedreno/envytools.git
The rules-ng-ng source files this header was generated from are:
- /home/robclark/src/envytools/rnndb/adreno.xml ( 501 bytes, from 2018-07-03 19:37:13)
- /home/robclark/src/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/adreno/a2xx.xml ( 36805 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/adreno/adreno_common.xml ( 13634 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/adreno/adreno_pm4.xml ( 42585 bytes, from 2018-10-04 19:06:37)
+- /home/robclark/src/envytools/rnndb/adreno/a2xx.xml ( 42463 bytes, from 2018-11-19 13:44:03)
+- /home/robclark/src/envytools/rnndb/adreno/adreno_common.xml ( 14201 bytes, from 2018-12-02 17:29:54)
+- /home/robclark/src/envytools/rnndb/adreno/adreno_pm4.xml ( 43052 bytes, from 2018-12-02 17:29:54)
- /home/robclark/src/envytools/rnndb/adreno/a3xx.xml ( 83840 bytes, from 2018-07-03 19:37:13)
- /home/robclark/src/envytools/rnndb/adreno/a4xx.xml ( 112086 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/adreno/a5xx.xml ( 147240 bytes, from 2018-10-04 19:06:37)
-- /home/robclark/src/envytools/rnndb/adreno/a6xx.xml ( 139581 bytes, from 2018-10-04 19:06:42)
+- /home/robclark/src/envytools/rnndb/adreno/a5xx.xml ( 147240 bytes, from 2018-12-02 17:29:54)
+- /home/robclark/src/envytools/rnndb/adreno/a6xx.xml ( 140790 bytes, from 2018-12-02 17:29:54)
- /home/robclark/src/envytools/rnndb/adreno/a6xx_gmu.xml ( 10431 bytes, from 2018-09-14 13:03:07)
- /home/robclark/src/envytools/rnndb/adreno/ocmem.xml ( 1773 bytes, from 2018-07-03 19:37:13)
@@ -108,6 +108,13 @@ enum pc_di_src_sel {
DI_SRC_SEL_RESERVED = 3,
};
+enum pc_di_face_cull_sel {
+ DI_FACE_CULL_NONE = 0,
+ DI_FACE_CULL_FETCH = 1,
+ DI_FACE_BACKFACE_CULL = 2,
+ DI_FACE_FRONTFACE_CULL = 3,
+};
+
enum pc_di_index_size {
INDEX_SIZE_IGN = 0,
INDEX_SIZE_16_BIT = 0,
@@ -356,6 +363,7 @@ enum a6xx_render_mode {
RM6_GMEM = 4,
RM6_BLIT2D = 5,
RM6_RESOLVE = 6,
+ RM6_BLIT2DSCALE = 12,
};
enum pseudo_reg {
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_core_irq.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_core_irq.c
index 879c13fe74e0..e45c69044935 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_core_irq.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_core_irq.c
@@ -319,10 +319,8 @@ static int dpu_debugfs_core_irq_show(struct seq_file *s, void *v)
unsigned long irq_flags;
int i, irq_count, enable_count, cb_count;
- if (!irq_obj || !irq_obj->enable_counts || !irq_obj->irq_cb_tbl) {
- DPU_ERROR("invalid parameters\n");
+ if (WARN_ON(!irq_obj->enable_counts || !irq_obj->irq_cb_tbl))
return 0;
- }
for (i = 0; i < irq_obj->total_irqs; i++) {
spin_lock_irqsave(&irq_obj->cb_lock, irq_flags);
@@ -343,31 +341,11 @@ static int dpu_debugfs_core_irq_show(struct seq_file *s, void *v)
DEFINE_DPU_DEBUGFS_SEQ_FOPS(dpu_debugfs_core_irq);
-int dpu_debugfs_core_irq_init(struct dpu_kms *dpu_kms,
- struct dentry *parent)
-{
- dpu_kms->irq_obj.debugfs_file = debugfs_create_file("core_irq", 0600,
- parent, &dpu_kms->irq_obj,
- &dpu_debugfs_core_irq_fops);
-
- return 0;
-}
-
-void dpu_debugfs_core_irq_destroy(struct dpu_kms *dpu_kms)
-{
- debugfs_remove(dpu_kms->irq_obj.debugfs_file);
- dpu_kms->irq_obj.debugfs_file = NULL;
-}
-
-#else
-int dpu_debugfs_core_irq_init(struct dpu_kms *dpu_kms,
+void dpu_debugfs_core_irq_init(struct dpu_kms *dpu_kms,
struct dentry *parent)
{
- return 0;
-}
-
-void dpu_debugfs_core_irq_destroy(struct dpu_kms *dpu_kms)
-{
+ debugfs_create_file("core_irq", 0600, parent, &dpu_kms->irq_obj,
+ &dpu_debugfs_core_irq_fops);
}
#endif
@@ -376,10 +354,7 @@ void dpu_core_irq_preinstall(struct dpu_kms *dpu_kms)
struct msm_drm_private *priv;
int i;
- if (!dpu_kms) {
- DPU_ERROR("invalid dpu_kms\n");
- return;
- } else if (!dpu_kms->dev) {
+ if (!dpu_kms->dev) {
DPU_ERROR("invalid drm device\n");
return;
} else if (!dpu_kms->dev->dev_private) {
@@ -410,20 +385,12 @@ void dpu_core_irq_preinstall(struct dpu_kms *dpu_kms)
}
}
-int dpu_core_irq_postinstall(struct dpu_kms *dpu_kms)
-{
- return 0;
-}
-
void dpu_core_irq_uninstall(struct dpu_kms *dpu_kms)
{
struct msm_drm_private *priv;
int i;
- if (!dpu_kms) {
- DPU_ERROR("invalid dpu_kms\n");
- return;
- } else if (!dpu_kms->dev) {
+ if (!dpu_kms->dev) {
DPU_ERROR("invalid drm device\n");
return;
} else if (!dpu_kms->dev->dev_private) {
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_core_irq.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_core_irq.h
index 5e98bba46af5..e9015a2b23fe 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_core_irq.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_core_irq.h
@@ -24,13 +24,6 @@
void dpu_core_irq_preinstall(struct dpu_kms *dpu_kms);
/**
- * dpu_core_irq_postinstall - perform post-installation of core IRQ handler
- * @dpu_kms: DPU handle
- * @return: 0 if success; error code otherwise
- */
-int dpu_core_irq_postinstall(struct dpu_kms *dpu_kms);
-
-/**
* dpu_core_irq_uninstall - uninstall core IRQ handler
* @dpu_kms: DPU handle
* @return: none
@@ -139,15 +132,8 @@ int dpu_core_irq_unregister_callback(
* dpu_debugfs_core_irq_init - register core irq debugfs
* @dpu_kms: pointer to kms
* @parent: debugfs directory root
- * @Return: 0 on success
*/
-int dpu_debugfs_core_irq_init(struct dpu_kms *dpu_kms,
+void dpu_debugfs_core_irq_init(struct dpu_kms *dpu_kms,
struct dentry *parent);
-/**
- * dpu_debugfs_core_irq_destroy - deregister core irq debugfs
- * @dpu_kms: pointer to kms
- */
-void dpu_debugfs_core_irq_destroy(struct dpu_kms *dpu_kms);
-
#endif /* __DPU_CORE_IRQ_H__ */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.c
index 41c5191f9056..9f20f397f77d 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.c
@@ -24,8 +24,6 @@
#include "dpu_crtc.h"
#include "dpu_core_perf.h"
-#define DPU_PERF_MODE_STRING_SIZE 128
-
/**
* enum dpu_perf_mode - performance tuning mode
* @DPU_PERF_MODE_NORMAL: performance controlled by user mode client
@@ -57,31 +55,20 @@ static struct dpu_kms *_dpu_crtc_get_kms(struct drm_crtc *crtc)
return to_dpu_kms(priv->kms);
}
-static bool _dpu_core_perf_crtc_is_power_on(struct drm_crtc *crtc)
-{
- return dpu_crtc_is_enabled(crtc);
-}
-
static bool _dpu_core_video_mode_intf_connected(struct drm_crtc *crtc)
{
struct drm_crtc *tmp_crtc;
- bool intf_connected = false;
-
- if (!crtc)
- goto end;
drm_for_each_crtc(tmp_crtc, crtc->dev) {
if ((dpu_crtc_get_intf_mode(tmp_crtc) == INTF_MODE_VIDEO) &&
- _dpu_core_perf_crtc_is_power_on(tmp_crtc)) {
+ tmp_crtc->enabled) {
DPU_DEBUG("video interface connected crtc:%d\n",
tmp_crtc->base.id);
- intf_connected = true;
- goto end;
+ return true;
}
}
-end:
- return intf_connected;
+ return false;
}
static void _dpu_core_perf_calc_crtc(struct dpu_kms *kms,
@@ -101,20 +88,20 @@ static void _dpu_core_perf_calc_crtc(struct dpu_kms *kms,
memset(perf, 0, sizeof(struct dpu_core_perf_params));
if (!dpu_cstate->bw_control) {
- for (i = 0; i < DPU_POWER_HANDLE_DBUS_ID_MAX; i++) {
+ for (i = 0; i < DPU_CORE_PERF_DATA_BUS_ID_MAX; i++) {
perf->bw_ctl[i] = kms->catalog->perf.max_bw_high *
1000ULL;
perf->max_per_pipe_ib[i] = perf->bw_ctl[i];
}
perf->core_clk_rate = kms->perf.max_core_clk_rate;
} else if (kms->perf.perf_tune.mode == DPU_PERF_MODE_MINIMUM) {
- for (i = 0; i < DPU_POWER_HANDLE_DBUS_ID_MAX; i++) {
+ for (i = 0; i < DPU_CORE_PERF_DATA_BUS_ID_MAX; i++) {
perf->bw_ctl[i] = 0;
perf->max_per_pipe_ib[i] = 0;
}
perf->core_clk_rate = 0;
} else if (kms->perf.perf_tune.mode == DPU_PERF_MODE_FIXED) {
- for (i = 0; i < DPU_POWER_HANDLE_DBUS_ID_MAX; i++) {
+ for (i = 0; i < DPU_CORE_PERF_DATA_BUS_ID_MAX; i++) {
perf->bw_ctl[i] = kms->perf.fix_core_ab_vote;
perf->max_per_pipe_ib[i] = kms->perf.fix_core_ib_vote;
}
@@ -124,12 +111,12 @@ static void _dpu_core_perf_calc_crtc(struct dpu_kms *kms,
DPU_DEBUG(
"crtc=%d clk_rate=%llu core_ib=%llu core_ab=%llu llcc_ib=%llu llcc_ab=%llu mem_ib=%llu mem_ab=%llu\n",
crtc->base.id, perf->core_clk_rate,
- perf->max_per_pipe_ib[DPU_POWER_HANDLE_DBUS_ID_MNOC],
- perf->bw_ctl[DPU_POWER_HANDLE_DBUS_ID_MNOC],
- perf->max_per_pipe_ib[DPU_POWER_HANDLE_DBUS_ID_LLCC],
- perf->bw_ctl[DPU_POWER_HANDLE_DBUS_ID_LLCC],
- perf->max_per_pipe_ib[DPU_POWER_HANDLE_DBUS_ID_EBI],
- perf->bw_ctl[DPU_POWER_HANDLE_DBUS_ID_EBI]);
+ perf->max_per_pipe_ib[DPU_CORE_PERF_DATA_BUS_ID_MNOC],
+ perf->bw_ctl[DPU_CORE_PERF_DATA_BUS_ID_MNOC],
+ perf->max_per_pipe_ib[DPU_CORE_PERF_DATA_BUS_ID_LLCC],
+ perf->bw_ctl[DPU_CORE_PERF_DATA_BUS_ID_LLCC],
+ perf->max_per_pipe_ib[DPU_CORE_PERF_DATA_BUS_ID_EBI],
+ perf->bw_ctl[DPU_CORE_PERF_DATA_BUS_ID_EBI]);
}
int dpu_core_perf_crtc_check(struct drm_crtc *crtc,
@@ -164,13 +151,13 @@ int dpu_core_perf_crtc_check(struct drm_crtc *crtc,
/* obtain new values */
_dpu_core_perf_calc_crtc(kms, crtc, state, &dpu_cstate->new_perf);
- for (i = DPU_POWER_HANDLE_DBUS_ID_MNOC;
- i < DPU_POWER_HANDLE_DBUS_ID_MAX; i++) {
+ for (i = DPU_CORE_PERF_DATA_BUS_ID_MNOC;
+ i < DPU_CORE_PERF_DATA_BUS_ID_MAX; i++) {
bw_sum_of_intfs = dpu_cstate->new_perf.bw_ctl[i];
curr_client_type = dpu_crtc_get_client_type(crtc);
drm_for_each_crtc(tmp_crtc, crtc->dev) {
- if (_dpu_core_perf_crtc_is_power_on(tmp_crtc) &&
+ if (tmp_crtc->enabled &&
(dpu_crtc_get_client_type(tmp_crtc) ==
curr_client_type) &&
(tmp_crtc != crtc)) {
@@ -229,7 +216,7 @@ static int _dpu_core_perf_crtc_update_bus(struct dpu_kms *kms,
int ret = 0;
drm_for_each_crtc(tmp_crtc, crtc->dev) {
- if (_dpu_core_perf_crtc_is_power_on(tmp_crtc) &&
+ if (tmp_crtc->enabled &&
curr_client_type ==
dpu_crtc_get_client_type(tmp_crtc)) {
dpu_cstate = to_dpu_crtc_state(tmp_crtc->state);
@@ -286,7 +273,7 @@ void dpu_core_perf_crtc_release_bw(struct drm_crtc *crtc)
*/
if (dpu_crtc_get_intf_mode(crtc) == INTF_MODE_CMD)
drm_for_each_crtc(tmp_crtc, crtc->dev) {
- if (_dpu_core_perf_crtc_is_power_on(tmp_crtc) &&
+ if (tmp_crtc->enabled &&
dpu_crtc_get_intf_mode(tmp_crtc) ==
INTF_MODE_VIDEO)
return;
@@ -296,7 +283,7 @@ void dpu_core_perf_crtc_release_bw(struct drm_crtc *crtc)
if (kms->perf.enable_bw_release) {
trace_dpu_cmd_release_bw(crtc->base.id);
DPU_DEBUG("Release BW crtc=%d\n", crtc->base.id);
- for (i = 0; i < DPU_POWER_HANDLE_DBUS_ID_MAX; i++) {
+ for (i = 0; i < DPU_CORE_PERF_DATA_BUS_ID_MAX; i++) {
dpu_crtc->cur_perf.bw_ctl[i] = 0;
_dpu_core_perf_crtc_update_bus(kms, crtc, i);
}
@@ -321,7 +308,7 @@ static u64 _dpu_core_perf_get_core_clk_rate(struct dpu_kms *kms)
struct dpu_crtc_state *dpu_cstate;
drm_for_each_crtc(crtc, kms->dev) {
- if (_dpu_core_perf_crtc_is_power_on(crtc)) {
+ if (crtc->enabled) {
dpu_cstate = to_dpu_crtc_state(crtc->state);
clk_rate = max(dpu_cstate->new_perf.core_clk_rate,
clk_rate);
@@ -372,8 +359,8 @@ int dpu_core_perf_crtc_update(struct drm_crtc *crtc,
old = &dpu_crtc->cur_perf;
new = &dpu_cstate->new_perf;
- if (_dpu_core_perf_crtc_is_power_on(crtc) && !stop_req) {
- for (i = 0; i < DPU_POWER_HANDLE_DBUS_ID_MAX; i++) {
+ if (crtc->enabled && !stop_req) {
+ for (i = 0; i < DPU_CORE_PERF_DATA_BUS_ID_MAX; i++) {
/*
* cases for bus bandwidth update.
* 1. new bandwidth vote - "ab or ib vote" is higher
@@ -415,13 +402,13 @@ int dpu_core_perf_crtc_update(struct drm_crtc *crtc,
update_clk = 1;
}
trace_dpu_perf_crtc_update(crtc->base.id,
- new->bw_ctl[DPU_POWER_HANDLE_DBUS_ID_MNOC],
- new->bw_ctl[DPU_POWER_HANDLE_DBUS_ID_LLCC],
- new->bw_ctl[DPU_POWER_HANDLE_DBUS_ID_EBI],
+ new->bw_ctl[DPU_CORE_PERF_DATA_BUS_ID_MNOC],
+ new->bw_ctl[DPU_CORE_PERF_DATA_BUS_ID_LLCC],
+ new->bw_ctl[DPU_CORE_PERF_DATA_BUS_ID_EBI],
new->core_clk_rate, stop_req,
update_bus, update_clk);
- for (i = 0; i < DPU_POWER_HANDLE_DBUS_ID_MAX; i++) {
+ for (i = 0; i < DPU_CORE_PERF_DATA_BUS_ID_MAX; i++) {
if (update_bus & BIT(i)) {
ret = _dpu_core_perf_crtc_update_bus(kms, crtc, i);
if (ret) {
@@ -462,24 +449,14 @@ static ssize_t _dpu_core_perf_mode_write(struct file *file,
struct dpu_core_perf *perf = file->private_data;
struct dpu_perf_cfg *cfg = &perf->catalog->perf;
u32 perf_mode = 0;
- char buf[10];
-
- if (!perf)
- return -ENODEV;
-
- if (count >= sizeof(buf))
- return -EFAULT;
-
- if (copy_from_user(buf, user_buf, count))
- return -EFAULT;
-
- buf[count] = 0; /* end of string */
+ int ret;
- if (kstrtouint(buf, 0, &perf_mode))
- return -EFAULT;
+ ret = kstrtouint_from_user(user_buf, count, 0, &perf_mode);
+ if (ret)
+ return ret;
if (perf_mode >= DPU_PERF_MODE_MAX)
- return -EFAULT;
+ return -EINVAL;
if (perf_mode == DPU_PERF_MODE_FIXED) {
DRM_INFO("fix performance mode\n");
@@ -504,29 +481,16 @@ static ssize_t _dpu_core_perf_mode_read(struct file *file,
char __user *buff, size_t count, loff_t *ppos)
{
struct dpu_core_perf *perf = file->private_data;
- int len = 0;
- char buf[DPU_PERF_MODE_STRING_SIZE] = {'\0'};
-
- if (!perf)
- return -ENODEV;
+ int len;
+ char buf[128];
- if (*ppos)
- return 0; /* the end */
-
- len = snprintf(buf, sizeof(buf),
+ len = scnprintf(buf, sizeof(buf),
"mode %d min_mdp_clk %llu min_bus_vote %llu\n",
perf->perf_tune.mode,
perf->perf_tune.min_core_clk,
perf->perf_tune.min_bus_vote);
- if (len < 0 || len >= sizeof(buf))
- return 0;
-
- if ((count < sizeof(buf)) || copy_to_user(buff, buf, len))
- return -EFAULT;
-
- *ppos += len; /* increase offset */
- return len;
+ return simple_read_from_buffer(buff, count, ppos, buf, len);
}
static const struct file_operations dpu_core_perf_mode_fops = {
@@ -535,70 +499,43 @@ static const struct file_operations dpu_core_perf_mode_fops = {
.write = _dpu_core_perf_mode_write,
};
-static void dpu_core_perf_debugfs_destroy(struct dpu_core_perf *perf)
-{
- debugfs_remove_recursive(perf->debugfs_root);
- perf->debugfs_root = NULL;
-}
-
-int dpu_core_perf_debugfs_init(struct dpu_core_perf *perf,
- struct dentry *parent)
+int dpu_core_perf_debugfs_init(struct dpu_kms *dpu_kms, struct dentry *parent)
{
+ struct dpu_core_perf *perf = &dpu_kms->perf;
struct dpu_mdss_cfg *catalog = perf->catalog;
- struct msm_drm_private *priv;
- struct dpu_kms *dpu_kms;
-
- priv = perf->dev->dev_private;
- if (!priv || !priv->kms) {
- DPU_ERROR("invalid KMS reference\n");
- return -EINVAL;
- }
+ struct dentry *entry;
- dpu_kms = to_dpu_kms(priv->kms);
-
- perf->debugfs_root = debugfs_create_dir("core_perf", parent);
- if (!perf->debugfs_root) {
- DPU_ERROR("failed to create core perf debugfs\n");
+ entry = debugfs_create_dir("core_perf", parent);
+ if (IS_ERR_OR_NULL(entry))
return -EINVAL;
- }
- debugfs_create_u64("max_core_clk_rate", 0600, perf->debugfs_root,
+ debugfs_create_u64("max_core_clk_rate", 0600, entry,
&perf->max_core_clk_rate);
- debugfs_create_u64("core_clk_rate", 0600, perf->debugfs_root,
+ debugfs_create_u64("core_clk_rate", 0600, entry,
&perf->core_clk_rate);
- debugfs_create_u32("enable_bw_release", 0600, perf->debugfs_root,
+ debugfs_create_u32("enable_bw_release", 0600, entry,
(u32 *)&perf->enable_bw_release);
- debugfs_create_u32("threshold_low", 0600, perf->debugfs_root,
+ debugfs_create_u32("threshold_low", 0600, entry,
(u32 *)&catalog->perf.max_bw_low);
- debugfs_create_u32("threshold_high", 0600, perf->debugfs_root,
+ debugfs_create_u32("threshold_high", 0600, entry,
(u32 *)&catalog->perf.max_bw_high);
- debugfs_create_u32("min_core_ib", 0600, perf->debugfs_root,
+ debugfs_create_u32("min_core_ib", 0600, entry,
(u32 *)&catalog->perf.min_core_ib);
- debugfs_create_u32("min_llcc_ib", 0600, perf->debugfs_root,
+ debugfs_create_u32("min_llcc_ib", 0600, entry,
(u32 *)&catalog->perf.min_llcc_ib);
- debugfs_create_u32("min_dram_ib", 0600, perf->debugfs_root,
+ debugfs_create_u32("min_dram_ib", 0600, entry,
(u32 *)&catalog->perf.min_dram_ib);
- debugfs_create_file("perf_mode", 0600, perf->debugfs_root,
+ debugfs_create_file("perf_mode", 0600, entry,
(u32 *)perf, &dpu_core_perf_mode_fops);
- debugfs_create_u64("fix_core_clk_rate", 0600, perf->debugfs_root,
+ debugfs_create_u64("fix_core_clk_rate", 0600, entry,
&perf->fix_core_clk_rate);
- debugfs_create_u64("fix_core_ib_vote", 0600, perf->debugfs_root,
+ debugfs_create_u64("fix_core_ib_vote", 0600, entry,
&perf->fix_core_ib_vote);
- debugfs_create_u64("fix_core_ab_vote", 0600, perf->debugfs_root,
+ debugfs_create_u64("fix_core_ab_vote", 0600, entry,
&perf->fix_core_ab_vote);
return 0;
}
-#else
-static void dpu_core_perf_debugfs_destroy(struct dpu_core_perf *perf)
-{
-}
-
-int dpu_core_perf_debugfs_init(struct dpu_core_perf *perf,
- struct dentry *parent)
-{
- return 0;
-}
#endif
void dpu_core_perf_destroy(struct dpu_core_perf *perf)
@@ -608,10 +545,8 @@ void dpu_core_perf_destroy(struct dpu_core_perf *perf)
return;
}
- dpu_core_perf_debugfs_destroy(perf);
perf->max_core_clk_rate = 0;
perf->core_clk = NULL;
- perf->phandle = NULL;
perf->catalog = NULL;
perf->dev = NULL;
}
@@ -619,12 +554,10 @@ void dpu_core_perf_destroy(struct dpu_core_perf *perf)
int dpu_core_perf_init(struct dpu_core_perf *perf,
struct drm_device *dev,
struct dpu_mdss_cfg *catalog,
- struct dpu_power_handle *phandle,
struct dss_clk *core_clk)
{
perf->dev = dev;
perf->catalog = catalog;
- perf->phandle = phandle;
perf->core_clk = core_clk;
perf->max_core_clk_rate = core_clk->max_rate;
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.h
index fbcbe0c7527a..37f518815eb7 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.h
@@ -19,19 +19,31 @@
#include <drm/drm_crtc.h>
#include "dpu_hw_catalog.h"
-#include "dpu_power_handle.h"
#define DPU_PERF_DEFAULT_MAX_CORE_CLK_RATE 412500000
/**
+ * enum dpu_core_perf_data_bus_id - data bus identifier
+ * @DPU_CORE_PERF_DATA_BUS_ID_MNOC: DPU/MNOC data bus
+ * @DPU_CORE_PERF_DATA_BUS_ID_LLCC: MNOC/LLCC data bus
+ * @DPU_CORE_PERF_DATA_BUS_ID_EBI: LLCC/EBI data bus
+ */
+enum dpu_core_perf_data_bus_id {
+ DPU_CORE_PERF_DATA_BUS_ID_MNOC,
+ DPU_CORE_PERF_DATA_BUS_ID_LLCC,
+ DPU_CORE_PERF_DATA_BUS_ID_EBI,
+ DPU_CORE_PERF_DATA_BUS_ID_MAX,
+};
+
+/**
* struct dpu_core_perf_params - definition of performance parameters
* @max_per_pipe_ib: maximum instantaneous bandwidth request
* @bw_ctl: arbitrated bandwidth request
* @core_clk_rate: core clock rate request
*/
struct dpu_core_perf_params {
- u64 max_per_pipe_ib[DPU_POWER_HANDLE_DBUS_ID_MAX];
- u64 bw_ctl[DPU_POWER_HANDLE_DBUS_ID_MAX];
+ u64 max_per_pipe_ib[DPU_CORE_PERF_DATA_BUS_ID_MAX];
+ u64 bw_ctl[DPU_CORE_PERF_DATA_BUS_ID_MAX];
u64 core_clk_rate;
};
@@ -52,7 +64,6 @@ struct dpu_core_perf_tune {
* @dev: Pointer to drm device
* @debugfs_root: top level debug folder
* @catalog: Pointer to catalog configuration
- * @phandle: Pointer to power handler
* @core_clk: Pointer to core clock structure
* @core_clk_rate: current core clock rate
* @max_core_clk_rate: maximum allowable core clock rate
@@ -66,7 +77,6 @@ struct dpu_core_perf {
struct drm_device *dev;
struct dentry *debugfs_root;
struct dpu_mdss_cfg *catalog;
- struct dpu_power_handle *phandle;
struct dss_clk *core_clk;
u64 core_clk_rate;
u64 max_core_clk_rate;
@@ -113,21 +123,20 @@ void dpu_core_perf_destroy(struct dpu_core_perf *perf);
* @perf: Pointer to core performance context
* @dev: Pointer to drm device
* @catalog: Pointer to catalog
- * @phandle: Pointer to power handle
* @core_clk: pointer to core clock
*/
int dpu_core_perf_init(struct dpu_core_perf *perf,
struct drm_device *dev,
struct dpu_mdss_cfg *catalog,
- struct dpu_power_handle *phandle,
struct dss_clk *core_clk);
+struct dpu_kms;
+
/**
* dpu_core_perf_debugfs_init - initialize debugfs for core performance context
- * @perf: Pointer to core performance context
+ * @dpu_kms: Pointer to the dpu_kms struct
* @debugfs_parent: Pointer to parent debugfs
*/
-int dpu_core_perf_debugfs_init(struct dpu_core_perf *perf,
- struct dentry *parent);
+int dpu_core_perf_debugfs_init(struct dpu_kms *dpu_kms, struct dentry *parent);
#endif /* _DPU_CORE_PERF_H_ */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
index d4530d60767b..9be7c355debd 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
@@ -33,7 +33,6 @@
#include "dpu_plane.h"
#include "dpu_encoder.h"
#include "dpu_vbif.h"
-#include "dpu_power_handle.h"
#include "dpu_core_perf.h"
#include "dpu_trace.h"
@@ -47,13 +46,7 @@
#define LEFT_MIXER 0
#define RIGHT_MIXER 1
-static inline int _dpu_crtc_get_mixer_width(struct dpu_crtc_state *cstate,
- struct drm_display_mode *mode)
-{
- return mode->hdisplay / cstate->num_mixers;
-}
-
-static inline struct dpu_kms *_dpu_crtc_get_kms(struct drm_crtc *crtc)
+static struct dpu_kms *_dpu_crtc_get_kms(struct drm_crtc *crtc)
{
struct msm_drm_private *priv = crtc->dev->dev_private;
@@ -69,10 +62,7 @@ static void dpu_crtc_destroy(struct drm_crtc *crtc)
if (!crtc)
return;
- dpu_crtc->phandle = NULL;
-
drm_crtc_cleanup(crtc);
- mutex_destroy(&dpu_crtc->crtc_lock);
kfree(dpu_crtc);
}
@@ -287,16 +277,17 @@ enum dpu_intf_mode dpu_crtc_get_intf_mode(struct drm_crtc *crtc)
return INTF_MODE_NONE;
}
- drm_for_each_encoder(encoder, crtc->dev)
- if (encoder->crtc == crtc)
- return dpu_encoder_get_intf_mode(encoder);
+ WARN_ON(!drm_modeset_is_locked(&crtc->mutex));
+
+ /* TODO: Returns the first INTF_MODE, could there be multiple values? */
+ drm_for_each_encoder_mask(encoder, crtc->dev, crtc->state->encoder_mask)
+ return dpu_encoder_get_intf_mode(encoder);
return INTF_MODE_NONE;
}
-static void dpu_crtc_vblank_cb(void *data)
+void dpu_crtc_vblank_callback(struct drm_crtc *crtc)
{
- struct drm_crtc *crtc = (struct drm_crtc *)data;
struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
/* keep statistics on vblank callback - with auto reset via debugfs */
@@ -309,6 +300,19 @@ static void dpu_crtc_vblank_cb(void *data)
trace_dpu_crtc_vblank_cb(DRMID(crtc));
}
+static void dpu_crtc_release_bw_unlocked(struct drm_crtc *crtc)
+{
+ int ret = 0;
+ struct drm_modeset_acquire_ctx ctx;
+
+ DRM_MODESET_LOCK_ALL_BEGIN(crtc->dev, ctx, 0, ret);
+ dpu_core_perf_crtc_release_bw(crtc);
+ DRM_MODESET_LOCK_ALL_END(ctx, ret);
+ if (ret)
+ DRM_ERROR("Failed to acquire modeset locks to release bw, %d\n",
+ ret);
+}
+
static void dpu_crtc_frame_event_work(struct kthread_work *work)
{
struct dpu_crtc_frame_event *fevent = container_of(work,
@@ -338,7 +342,7 @@ static void dpu_crtc_frame_event_work(struct kthread_work *work)
/* release bandwidth and other resources */
trace_dpu_crtc_frame_event_done(DRMID(crtc),
fevent->event);
- dpu_core_perf_crtc_release_bw(crtc);
+ dpu_crtc_release_bw_unlocked(crtc);
} else {
trace_dpu_crtc_frame_event_more_pending(DRMID(crtc),
fevent->event);
@@ -473,28 +477,21 @@ static void _dpu_crtc_setup_mixer_for_encoder(
static void _dpu_crtc_setup_mixers(struct drm_crtc *crtc)
{
- struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
struct drm_encoder *enc;
- mutex_lock(&dpu_crtc->crtc_lock);
- /* Check for mixers on all encoders attached to this crtc */
- list_for_each_entry(enc, &crtc->dev->mode_config.encoder_list, head) {
- if (enc->crtc != crtc)
- continue;
+ WARN_ON(!drm_modeset_is_locked(&crtc->mutex));
+ /* Check for mixers on all encoders attached to this crtc */
+ drm_for_each_encoder_mask(enc, crtc->dev, crtc->state->encoder_mask)
_dpu_crtc_setup_mixer_for_encoder(crtc, enc);
- }
-
- mutex_unlock(&dpu_crtc->crtc_lock);
}
static void _dpu_crtc_setup_lm_bounds(struct drm_crtc *crtc,
struct drm_crtc_state *state)
{
- struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
struct dpu_crtc_state *cstate = to_dpu_crtc_state(state);
struct drm_display_mode *adj_mode = &state->adjusted_mode;
- u32 crtc_split_width = _dpu_crtc_get_mixer_width(cstate, adj_mode);
+ u32 crtc_split_width = adj_mode->hdisplay / cstate->num_mixers;
int i;
for (i = 0; i < cstate->num_mixers; i++) {
@@ -502,7 +499,7 @@ static void _dpu_crtc_setup_lm_bounds(struct drm_crtc *crtc,
r->x1 = crtc_split_width * i;
r->y1 = 0;
r->x2 = r->x1 + crtc_split_width;
- r->y2 = dpu_crtc_get_mixer_height(dpu_crtc, cstate, adj_mode);
+ r->y2 = adj_mode->vdisplay;
trace_dpu_crtc_setup_lm_bounds(DRMID(crtc), i, r);
}
@@ -552,13 +549,9 @@ static void dpu_crtc_atomic_begin(struct drm_crtc *crtc,
spin_unlock_irqrestore(&dev->event_lock, flags);
}
- list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
- if (encoder->crtc != crtc)
- continue;
-
- /* encoder will trigger pending mask now */
+ /* encoder will trigger pending mask now */
+ drm_for_each_encoder_mask(encoder, crtc->dev, crtc->state->encoder_mask)
dpu_encoder_trigger_kickoff_pending(encoder);
- }
/*
* If no mixers have been allocated in dpu_crtc_atomic_check(),
@@ -702,10 +695,9 @@ static int _dpu_crtc_wait_for_frame_done(struct drm_crtc *crtc)
return rc;
}
-void dpu_crtc_commit_kickoff(struct drm_crtc *crtc)
+void dpu_crtc_commit_kickoff(struct drm_crtc *crtc, bool async)
{
struct drm_encoder *encoder;
- struct drm_device *dev = crtc->dev;
struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
struct dpu_kms *dpu_kms = _dpu_crtc_get_kms(crtc);
struct dpu_crtc_state *cstate = to_dpu_crtc_state(crtc->state);
@@ -721,127 +713,59 @@ void dpu_crtc_commit_kickoff(struct drm_crtc *crtc)
DPU_ATRACE_BEGIN("crtc_commit");
- list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+ /*
+ * Encoder will flush/start now, unless it has a tx pending. If so, it
+ * may delay and flush at an irq event (e.g. ppdone)
+ */
+ drm_for_each_encoder_mask(encoder, crtc->dev,
+ crtc->state->encoder_mask) {
struct dpu_encoder_kickoff_params params = { 0 };
-
- if (encoder->crtc != crtc)
- continue;
-
- /*
- * Encoder will flush/start now, unless it has a tx pending.
- * If so, it may delay and flush at an irq event (e.g. ppdone)
- */
- dpu_encoder_prepare_for_kickoff(encoder, &params);
+ dpu_encoder_prepare_for_kickoff(encoder, &params, async);
}
- /* wait for frame_event_done completion */
- DPU_ATRACE_BEGIN("wait_for_frame_done_event");
- ret = _dpu_crtc_wait_for_frame_done(crtc);
- DPU_ATRACE_END("wait_for_frame_done_event");
- if (ret) {
- DPU_ERROR("crtc%d wait for frame done failed;frame_pending%d\n",
- crtc->base.id,
- atomic_read(&dpu_crtc->frame_pending));
- goto end;
- }
- if (atomic_inc_return(&dpu_crtc->frame_pending) == 1) {
- /* acquire bandwidth and other resources */
- DPU_DEBUG("crtc%d first commit\n", crtc->base.id);
- } else
- DPU_DEBUG("crtc%d commit\n", crtc->base.id);
+ if (!async) {
+ /* wait for frame_event_done completion */
+ DPU_ATRACE_BEGIN("wait_for_frame_done_event");
+ ret = _dpu_crtc_wait_for_frame_done(crtc);
+ DPU_ATRACE_END("wait_for_frame_done_event");
+ if (ret) {
+ DPU_ERROR("crtc%d wait for frame done failed;frame_pending%d\n",
+ crtc->base.id,
+ atomic_read(&dpu_crtc->frame_pending));
+ goto end;
+ }
+
+ if (atomic_inc_return(&dpu_crtc->frame_pending) == 1) {
+ /* acquire bandwidth and other resources */
+ DPU_DEBUG("crtc%d first commit\n", crtc->base.id);
+ } else
+ DPU_DEBUG("crtc%d commit\n", crtc->base.id);
- dpu_crtc->play_count++;
+ dpu_crtc->play_count++;
+ }
dpu_vbif_clear_errors(dpu_kms);
- list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
- if (encoder->crtc != crtc)
- continue;
-
- dpu_encoder_kickoff(encoder);
- }
+ drm_for_each_encoder_mask(encoder, crtc->dev, crtc->state->encoder_mask)
+ dpu_encoder_kickoff(encoder, async);
end:
- reinit_completion(&dpu_crtc->frame_done_comp);
+ if (!async)
+ reinit_completion(&dpu_crtc->frame_done_comp);
DPU_ATRACE_END("crtc_commit");
}
-/**
- * _dpu_crtc_vblank_enable_no_lock - update power resource and vblank request
- * @dpu_crtc: Pointer to dpu crtc structure
- * @enable: Whether to enable/disable vblanks
- */
-static void _dpu_crtc_vblank_enable_no_lock(
- struct dpu_crtc *dpu_crtc, bool enable)
-{
- struct drm_crtc *crtc = &dpu_crtc->base;
- struct drm_device *dev = crtc->dev;
- struct drm_encoder *enc;
-
- if (enable) {
- /* drop lock since power crtc cb may try to re-acquire lock */
- mutex_unlock(&dpu_crtc->crtc_lock);
- pm_runtime_get_sync(dev->dev);
- mutex_lock(&dpu_crtc->crtc_lock);
-
- list_for_each_entry(enc, &dev->mode_config.encoder_list, head) {
- if (enc->crtc != crtc)
- continue;
-
- trace_dpu_crtc_vblank_enable(DRMID(&dpu_crtc->base),
- DRMID(enc), enable,
- dpu_crtc);
-
- dpu_encoder_register_vblank_callback(enc,
- dpu_crtc_vblank_cb, (void *)crtc);
- }
- } else {
- list_for_each_entry(enc, &dev->mode_config.encoder_list, head) {
- if (enc->crtc != crtc)
- continue;
-
- trace_dpu_crtc_vblank_enable(DRMID(&dpu_crtc->base),
- DRMID(enc), enable,
- dpu_crtc);
-
- dpu_encoder_register_vblank_callback(enc, NULL, NULL);
- }
-
- /* drop lock since power crtc cb may try to re-acquire lock */
- mutex_unlock(&dpu_crtc->crtc_lock);
- pm_runtime_put_sync(dev->dev);
- mutex_lock(&dpu_crtc->crtc_lock);
- }
-}
-
-/**
- * _dpu_crtc_set_suspend - notify crtc of suspend enable/disable
- * @crtc: Pointer to drm crtc object
- * @enable: true to enable suspend, false to indicate resume
- */
-static void _dpu_crtc_set_suspend(struct drm_crtc *crtc, bool enable)
+static void dpu_crtc_reset(struct drm_crtc *crtc)
{
- struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
-
- DRM_DEBUG_KMS("crtc%d suspend = %d\n", crtc->base.id, enable);
-
- mutex_lock(&dpu_crtc->crtc_lock);
+ struct dpu_crtc_state *cstate;
- /*
- * If the vblank is enabled, release a power reference on suspend
- * and take it back during resume (if it is still enabled).
- */
- trace_dpu_crtc_set_suspend(DRMID(&dpu_crtc->base), enable, dpu_crtc);
- if (dpu_crtc->suspend == enable)
- DPU_DEBUG("crtc%d suspend already set to %d, ignoring update\n",
- crtc->base.id, enable);
- else if (dpu_crtc->enabled && dpu_crtc->vblank_requested) {
- _dpu_crtc_vblank_enable_no_lock(dpu_crtc, !enable);
- }
+ if (crtc->state)
+ dpu_crtc_destroy_state(crtc, crtc->state);
- dpu_crtc->suspend = enable;
- mutex_unlock(&dpu_crtc->crtc_lock);
+ crtc->state = kzalloc(sizeof(*cstate), GFP_KERNEL);
+ if (crtc->state)
+ crtc->state->crtc = crtc;
}
/**
@@ -873,65 +797,8 @@ static struct drm_crtc_state *dpu_crtc_duplicate_state(struct drm_crtc *crtc)
return &cstate->base;
}
-/**
- * dpu_crtc_reset - reset hook for CRTCs
- * Resets the atomic state for @crtc by freeing the state pointer (which might
- * be NULL, e.g. at driver load time) and allocating a new empty state object.
- * @crtc: Pointer to drm crtc structure
- */
-static void dpu_crtc_reset(struct drm_crtc *crtc)
-{
- struct dpu_crtc *dpu_crtc;
- struct dpu_crtc_state *cstate;
-
- if (!crtc) {
- DPU_ERROR("invalid crtc\n");
- return;
- }
-
- /* revert suspend actions, if necessary */
- if (dpu_kms_is_suspend_state(crtc->dev))
- _dpu_crtc_set_suspend(crtc, false);
-
- /* remove previous state, if present */
- if (crtc->state) {
- dpu_crtc_destroy_state(crtc, crtc->state);
- crtc->state = 0;
- }
-
- dpu_crtc = to_dpu_crtc(crtc);
- cstate = kzalloc(sizeof(*cstate), GFP_KERNEL);
- if (!cstate) {
- DPU_ERROR("failed to allocate state\n");
- return;
- }
-
- cstate->base.crtc = crtc;
- crtc->state = &cstate->base;
-}
-
-static void dpu_crtc_handle_power_event(u32 event_type, void *arg)
-{
- struct drm_crtc *crtc = arg;
- struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
- struct drm_encoder *encoder;
-
- mutex_lock(&dpu_crtc->crtc_lock);
-
- trace_dpu_crtc_handle_power_event(DRMID(crtc), event_type);
-
- /* restore encoder; crtc will be programmed during commit */
- drm_for_each_encoder(encoder, crtc->dev) {
- if (encoder->crtc != crtc)
- continue;
-
- dpu_encoder_virt_restore(encoder);
- }
-
- mutex_unlock(&dpu_crtc->crtc_lock);
-}
-
-static void dpu_crtc_disable(struct drm_crtc *crtc)
+static void dpu_crtc_disable(struct drm_crtc *crtc,
+ struct drm_crtc_state *old_crtc_state)
{
struct dpu_crtc *dpu_crtc;
struct dpu_crtc_state *cstate;
@@ -951,13 +818,12 @@ static void dpu_crtc_disable(struct drm_crtc *crtc)
DRM_DEBUG_KMS("crtc%d\n", crtc->base.id);
- if (dpu_kms_is_suspend_state(crtc->dev))
- _dpu_crtc_set_suspend(crtc, true);
-
/* Disable/save vblank irq handling */
drm_crtc_vblank_off(crtc);
- mutex_lock(&dpu_crtc->crtc_lock);
+ drm_for_each_encoder_mask(encoder, crtc->dev,
+ old_crtc_state->encoder_mask)
+ dpu_encoder_assign_crtc(encoder, NULL);
/* wait for frame_event_done completion */
if (_dpu_crtc_wait_for_frame_done(crtc))
@@ -966,10 +832,6 @@ static void dpu_crtc_disable(struct drm_crtc *crtc)
atomic_read(&dpu_crtc->frame_pending));
trace_dpu_crtc_disable(DRMID(crtc), false, dpu_crtc);
- if (dpu_crtc->enabled && !dpu_crtc->suspend &&
- dpu_crtc->vblank_requested) {
- _dpu_crtc_vblank_enable_no_lock(dpu_crtc, false);
- }
dpu_crtc->enabled = false;
if (atomic_read(&dpu_crtc->frame_pending)) {
@@ -981,15 +843,8 @@ static void dpu_crtc_disable(struct drm_crtc *crtc)
dpu_core_perf_crtc_update(crtc, 0, true);
- drm_for_each_encoder(encoder, crtc->dev) {
- if (encoder->crtc != crtc)
- continue;
+ drm_for_each_encoder_mask(encoder, crtc->dev, crtc->state->encoder_mask)
dpu_encoder_register_frame_event_callback(encoder, NULL, NULL);
- }
-
- if (dpu_crtc->power_event)
- dpu_power_handle_unregister_event(dpu_crtc->phandle,
- dpu_crtc->power_event);
memset(cstate->mixers, 0, sizeof(cstate->mixers));
cstate->num_mixers = 0;
@@ -998,14 +853,14 @@ static void dpu_crtc_disable(struct drm_crtc *crtc)
cstate->bw_control = false;
cstate->bw_split_vote = false;
- mutex_unlock(&dpu_crtc->crtc_lock);
-
if (crtc->state->event && !crtc->state->active) {
spin_lock_irqsave(&crtc->dev->event_lock, flags);
drm_crtc_send_vblank_event(crtc, crtc->state->event);
crtc->state->event = NULL;
spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
}
+
+ pm_runtime_put_sync(crtc->dev->dev);
}
static void dpu_crtc_enable(struct drm_crtc *crtc,
@@ -1021,33 +876,23 @@ static void dpu_crtc_enable(struct drm_crtc *crtc,
}
priv = crtc->dev->dev_private;
+ pm_runtime_get_sync(crtc->dev->dev);
+
DRM_DEBUG_KMS("crtc%d\n", crtc->base.id);
dpu_crtc = to_dpu_crtc(crtc);
- drm_for_each_encoder(encoder, crtc->dev) {
- if (encoder->crtc != crtc)
- continue;
+ drm_for_each_encoder_mask(encoder, crtc->dev, crtc->state->encoder_mask)
dpu_encoder_register_frame_event_callback(encoder,
dpu_crtc_frame_event_cb, (void *)crtc);
- }
- mutex_lock(&dpu_crtc->crtc_lock);
trace_dpu_crtc_enable(DRMID(crtc), true, dpu_crtc);
- if (!dpu_crtc->enabled && !dpu_crtc->suspend &&
- dpu_crtc->vblank_requested) {
- _dpu_crtc_vblank_enable_no_lock(dpu_crtc, true);
- }
dpu_crtc->enabled = true;
- mutex_unlock(&dpu_crtc->crtc_lock);
+ drm_for_each_encoder_mask(encoder, crtc->dev, crtc->state->encoder_mask)
+ dpu_encoder_assign_crtc(encoder, crtc);
/* Enable/restore vblank irq handling */
drm_crtc_vblank_on(crtc);
-
- dpu_crtc->power_event = dpu_power_handle_register_event(
- dpu_crtc->phandle, DPU_POWER_EVENT_ENABLE,
- dpu_crtc_handle_power_event, crtc, dpu_crtc->name);
-
}
struct plane_state {
@@ -1101,7 +946,7 @@ static int dpu_crtc_atomic_check(struct drm_crtc *crtc,
memset(pipe_staged, 0, sizeof(pipe_staged));
- mixer_width = _dpu_crtc_get_mixer_width(cstate, mode);
+ mixer_width = mode->hdisplay / cstate->num_mixers;
_dpu_crtc_setup_lm_bounds(crtc, state);
@@ -1289,21 +1134,32 @@ end:
int dpu_crtc_vblank(struct drm_crtc *crtc, bool en)
{
- struct dpu_crtc *dpu_crtc;
-
- if (!crtc) {
- DPU_ERROR("invalid crtc\n");
- return -EINVAL;
- }
- dpu_crtc = to_dpu_crtc(crtc);
+ struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
+ struct drm_encoder *enc;
- mutex_lock(&dpu_crtc->crtc_lock);
trace_dpu_crtc_vblank(DRMID(&dpu_crtc->base), en, dpu_crtc);
- if (dpu_crtc->enabled && !dpu_crtc->suspend) {
- _dpu_crtc_vblank_enable_no_lock(dpu_crtc, en);
+
+ /*
+ * Normally we would iterate through encoder_mask in crtc state to find
+ * attached encoders. In this case, we might be disabling vblank _after_
+ * encoder_mask has been cleared.
+ *
+ * Instead, we "assign" a crtc to the encoder in enable and clear it in
+ * disable (which is also after encoder_mask is cleared). So instead of
+ * using encoder mask, we'll ask the encoder to toggle itself iff it's
+ * currently assigned to our crtc.
+ *
+ * Note also that this function cannot be called while crtc is disabled
+ * since we use drm_crtc_vblank_on/off. So we don't need to worry
+ * about the assigned crtcs being inconsistent with the current state
+ * (which means no need to worry about modeset locks).
+ */
+ list_for_each_entry(enc, &crtc->dev->mode_config.encoder_list, head) {
+ trace_dpu_crtc_vblank_enable(DRMID(crtc), DRMID(enc), en,
+ dpu_crtc);
+
+ dpu_encoder_toggle_vblank_for_crtc(enc, crtc, en);
}
- dpu_crtc->vblank_requested = en;
- mutex_unlock(&dpu_crtc->crtc_lock);
return 0;
}
@@ -1324,18 +1180,14 @@ static int _dpu_debugfs_status_show(struct seq_file *s, void *data)
int i, out_width;
- if (!s || !s->private)
- return -EINVAL;
-
dpu_crtc = s->private;
crtc = &dpu_crtc->base;
drm_modeset_lock_all(crtc->dev);
cstate = to_dpu_crtc_state(crtc->state);
- mutex_lock(&dpu_crtc->crtc_lock);
mode = &crtc->state->adjusted_mode;
- out_width = _dpu_crtc_get_mixer_width(cstate, mode);
+ out_width = mode->hdisplay / cstate->num_mixers;
seq_printf(s, "crtc:%d width:%d height:%d\n", crtc->base.id,
mode->hdisplay, mode->vdisplay);
@@ -1420,9 +1272,6 @@ static int _dpu_debugfs_status_show(struct seq_file *s, void *data)
dpu_crtc->vblank_cb_time = ktime_set(0, 0);
}
- seq_printf(s, "vblank_enable:%d\n", dpu_crtc->vblank_requested);
-
- mutex_unlock(&dpu_crtc->crtc_lock);
drm_modeset_unlock_all(crtc->dev);
return 0;
@@ -1456,13 +1305,11 @@ static int dpu_crtc_debugfs_state_show(struct seq_file *s, void *v)
seq_printf(s, "intf_mode: %d\n", dpu_crtc_get_intf_mode(crtc));
seq_printf(s, "core_clk_rate: %llu\n",
dpu_crtc->cur_perf.core_clk_rate);
- for (i = DPU_POWER_HANDLE_DBUS_ID_MNOC;
- i < DPU_POWER_HANDLE_DBUS_ID_MAX; i++) {
- seq_printf(s, "bw_ctl[%s]: %llu\n",
- dpu_power_handle_get_dbus_name(i),
+ for (i = DPU_CORE_PERF_DATA_BUS_ID_MNOC;
+ i < DPU_CORE_PERF_DATA_BUS_ID_MAX; i++) {
+ seq_printf(s, "bw_ctl[%d]: %llu\n", i,
dpu_crtc->cur_perf.bw_ctl[i]);
- seq_printf(s, "max_per_pipe_ib[%s]: %llu\n",
- dpu_power_handle_get_dbus_name(i),
+ seq_printf(s, "max_per_pipe_ib[%d]: %llu\n", i,
dpu_crtc->cur_perf.max_per_pipe_ib[i]);
}
@@ -1472,8 +1319,7 @@ DEFINE_DPU_DEBUGFS_SEQ_FOPS(dpu_crtc_debugfs_state);
static int _dpu_crtc_init_debugfs(struct drm_crtc *crtc)
{
- struct dpu_crtc *dpu_crtc;
- struct dpu_kms *dpu_kms;
+ struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
static const struct file_operations debugfs_status_fops = {
.open = _dpu_debugfs_status_open,
@@ -1482,12 +1328,6 @@ static int _dpu_crtc_init_debugfs(struct drm_crtc *crtc)
.release = single_release,
};
- if (!crtc)
- return -EINVAL;
- dpu_crtc = to_dpu_crtc(crtc);
-
- dpu_kms = _dpu_crtc_get_kms(crtc);
-
dpu_crtc->debugfs_root = debugfs_create_dir(dpu_crtc->name,
crtc->dev->primary->debugfs_root);
if (!dpu_crtc->debugfs_root)
@@ -1504,25 +1344,11 @@ static int _dpu_crtc_init_debugfs(struct drm_crtc *crtc)
return 0;
}
-
-static void _dpu_crtc_destroy_debugfs(struct drm_crtc *crtc)
-{
- struct dpu_crtc *dpu_crtc;
-
- if (!crtc)
- return;
- dpu_crtc = to_dpu_crtc(crtc);
- debugfs_remove_recursive(dpu_crtc->debugfs_root);
-}
#else
static int _dpu_crtc_init_debugfs(struct drm_crtc *crtc)
{
return 0;
}
-
-static void _dpu_crtc_destroy_debugfs(struct drm_crtc *crtc)
-{
-}
#endif /* CONFIG_DEBUG_FS */
static int dpu_crtc_late_register(struct drm_crtc *crtc)
@@ -1532,7 +1358,9 @@ static int dpu_crtc_late_register(struct drm_crtc *crtc)
static void dpu_crtc_early_unregister(struct drm_crtc *crtc)
{
- _dpu_crtc_destroy_debugfs(crtc);
+ struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
+
+ debugfs_remove_recursive(dpu_crtc->debugfs_root);
}
static const struct drm_crtc_funcs dpu_crtc_funcs = {
@@ -1547,7 +1375,7 @@ static const struct drm_crtc_funcs dpu_crtc_funcs = {
};
static const struct drm_crtc_helper_funcs dpu_crtc_helper_funcs = {
- .disable = dpu_crtc_disable,
+ .atomic_disable = dpu_crtc_disable,
.atomic_enable = dpu_crtc_enable,
.atomic_check = dpu_crtc_atomic_check,
.atomic_begin = dpu_crtc_atomic_begin,
@@ -1574,7 +1402,6 @@ struct drm_crtc *dpu_crtc_init(struct drm_device *dev, struct drm_plane *plane,
crtc = &dpu_crtc->base;
crtc->dev = dev;
- mutex_init(&dpu_crtc->crtc_lock);
spin_lock_init(&dpu_crtc->spin_lock);
atomic_set(&dpu_crtc->frame_pending, 0);
@@ -1594,7 +1421,6 @@ struct drm_crtc *dpu_crtc_init(struct drm_device *dev, struct drm_plane *plane,
NULL);
drm_crtc_helper_add(crtc, &dpu_crtc_helper_funcs);
- plane->crtc = crtc;
/* save user friendly CRTC name for later */
snprintf(dpu_crtc->name, DPU_CRTC_NAME_SIZE, "crtc%u", crtc->base.id);
@@ -1602,8 +1428,6 @@ struct drm_crtc *dpu_crtc_init(struct drm_device *dev, struct drm_plane *plane,
/* initialize event handling */
spin_lock_init(&dpu_crtc->event_lock);
- dpu_crtc->phandle = &kms->phandle;
-
DPU_DEBUG("%s: successfully initialized crtc\n", dpu_crtc->name);
return crtc;
}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.h
index 3723b4830335..dbfb38a1986c 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.h
@@ -132,8 +132,6 @@ struct dpu_crtc_frame_event {
* @vblank_cb_count : count of vblank callback since last reset
* @play_count : frame count between crtc enable and disable
* @vblank_cb_time : ktime at vblank count reset
- * @vblank_requested : whether the user has requested vblank events
- * @suspend : whether or not a suspend operation is in progress
* @enabled : whether the DPU CRTC is currently enabled. updated in the
* commit-thread, not state-swap time which is earlier, so
* safe to make decisions on during VBLANK on/off work
@@ -142,7 +140,6 @@ struct dpu_crtc_frame_event {
* @dirty_list : list of color processing features are dirty
* @ad_dirty: list containing ad properties that are dirty
* @ad_active: list containing ad properties that are active
- * @crtc_lock : crtc lock around create, destroy and access.
* @frame_pending : Whether or not an update is pending
* @frame_events : static allocation of in-flight frame events
* @frame_event_list : available frame event list
@@ -152,7 +149,6 @@ struct dpu_crtc_frame_event {
* @event_worker : Event worker queue
* @event_lock : Spinlock around event handling code
* @phandle: Pointer to power handler
- * @power_event : registered power event handle
* @cur_perf : current performance committed to clock/bandwidth driver
*/
struct dpu_crtc {
@@ -168,8 +164,6 @@ struct dpu_crtc {
u32 vblank_cb_count;
u64 play_count;
ktime_t vblank_cb_time;
- bool vblank_requested;
- bool suspend;
bool enabled;
struct list_head feature_list;
@@ -178,8 +172,6 @@ struct dpu_crtc {
struct list_head ad_dirty;
struct list_head ad_active;
- struct mutex crtc_lock;
-
atomic_t frame_pending;
struct dpu_crtc_frame_event frame_events[DPU_CRTC_FRAME_EVENT_SIZE];
struct list_head frame_event_list;
@@ -189,9 +181,6 @@ struct dpu_crtc {
/* for handling internal event thread */
spinlock_t event_lock;
- struct dpu_power_handle *phandle;
- struct dpu_power_event *power_event;
-
struct dpu_core_perf_params cur_perf;
struct dpu_crtc_smmu_state_data smmu_state;
@@ -238,41 +227,12 @@ struct dpu_crtc_state {
container_of(x, struct dpu_crtc_state, base)
/**
- * dpu_crtc_state_is_stereo - Is crtc virtualized with two mixers?
- * @cstate: Pointer to dpu crtc state
- * @Return: true - has two mixers, false - has one mixer
- */
-static inline bool dpu_crtc_state_is_stereo(struct dpu_crtc_state *cstate)
-{
- return cstate->num_mixers == CRTC_DUAL_MIXERS;
-}
-
-/**
- * dpu_crtc_get_mixer_height - get the mixer height
- * Mixer height will be same as panel height
- */
-static inline int dpu_crtc_get_mixer_height(struct dpu_crtc *dpu_crtc,
- struct dpu_crtc_state *cstate, struct drm_display_mode *mode)
-{
- if (!dpu_crtc || !cstate || !mode)
- return 0;
-
- return mode->vdisplay;
-}
-
-/**
* dpu_crtc_frame_pending - retun the number of pending frames
* @crtc: Pointer to drm crtc object
*/
static inline int dpu_crtc_frame_pending(struct drm_crtc *crtc)
{
- struct dpu_crtc *dpu_crtc;
-
- if (!crtc)
- return -EINVAL;
-
- dpu_crtc = to_dpu_crtc(crtc);
- return atomic_read(&dpu_crtc->frame_pending);
+ return crtc ? atomic_read(&to_dpu_crtc(crtc)->frame_pending) : -EINVAL;
}
/**
@@ -283,10 +243,17 @@ static inline int dpu_crtc_frame_pending(struct drm_crtc *crtc)
int dpu_crtc_vblank(struct drm_crtc *crtc, bool en);
/**
+ * dpu_crtc_vblank_callback - called on vblank irq, issues completion events
+ * @crtc: Pointer to drm crtc object
+ */
+void dpu_crtc_vblank_callback(struct drm_crtc *crtc);
+
+/**
* dpu_crtc_commit_kickoff - trigger kickoff of the commit for this crtc
* @crtc: Pointer to drm crtc object
+ * @async: true if the commit is asynchronous, false otherwise
*/
-void dpu_crtc_commit_kickoff(struct drm_crtc *crtc);
+void dpu_crtc_commit_kickoff(struct drm_crtc *crtc, bool async);
/**
* dpu_crtc_complete_commit - callback signalling completion of current commit
@@ -329,22 +296,7 @@ enum dpu_intf_mode dpu_crtc_get_intf_mode(struct drm_crtc *crtc);
static inline enum dpu_crtc_client_type dpu_crtc_get_client_type(
struct drm_crtc *crtc)
{
- struct dpu_crtc_state *cstate =
- crtc ? to_dpu_crtc_state(crtc->state) : NULL;
-
- if (!cstate)
- return NRT_CLIENT;
-
- return RT_CLIENT;
-}
-
-/**
- * dpu_crtc_is_enabled - check if dpu crtc is enabled or not
- * @crtc: Pointer to crtc
- */
-static inline bool dpu_crtc_is_enabled(struct drm_crtc *crtc)
-{
- return crtc ? crtc->enabled : false;
+ return crtc && crtc->state ? RT_CLIENT : NRT_CLIENT;
}
#endif /* _DPU_CRTC_H_ */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_dbg.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_dbg.c
deleted file mode 100644
index ae2aee7ed9e1..000000000000
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_dbg.c
+++ /dev/null
@@ -1,2393 +0,0 @@
-/* Copyright (c) 2009-2018, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__
-
-#include <linux/delay.h>
-#include <linux/spinlock.h>
-#include <linux/ktime.h>
-#include <linux/debugfs.h>
-#include <linux/uaccess.h>
-#include <linux/dma-buf.h>
-#include <linux/slab.h>
-#include <linux/list_sort.h>
-#include <linux/pm_runtime.h>
-
-#include "dpu_dbg.h"
-#include "disp/dpu1/dpu_hw_catalog.h"
-
-
-#define DEFAULT_DBGBUS_DPU DPU_DBG_DUMP_IN_MEM
-#define DEFAULT_DBGBUS_VBIFRT DPU_DBG_DUMP_IN_MEM
-#define REG_BASE_NAME_LEN 80
-
-#define DBGBUS_FLAGS_DSPP BIT(0)
-#define DBGBUS_DSPP_STATUS 0x34C
-
-#define DBGBUS_NAME_DPU "dpu"
-#define DBGBUS_NAME_VBIF_RT "vbif_rt"
-
-/* offsets from dpu top address for the debug buses */
-#define DBGBUS_SSPP0 0x188
-#define DBGBUS_AXI_INTF 0x194
-#define DBGBUS_SSPP1 0x298
-#define DBGBUS_DSPP 0x348
-#define DBGBUS_PERIPH 0x418
-
-#define TEST_MASK(id, tp) ((id << 4) | (tp << 1) | BIT(0))
-
-/* following offsets are with respect to MDP VBIF base for DBG BUS access */
-#define MMSS_VBIF_CLKON 0x4
-#define MMSS_VBIF_TEST_BUS_OUT_CTRL 0x210
-#define MMSS_VBIF_TEST_BUS_OUT 0x230
-
-/* Vbif error info */
-#define MMSS_VBIF_PND_ERR 0x190
-#define MMSS_VBIF_SRC_ERR 0x194
-#define MMSS_VBIF_XIN_HALT_CTRL1 0x204
-#define MMSS_VBIF_ERR_INFO 0X1a0
-#define MMSS_VBIF_ERR_INFO_1 0x1a4
-#define MMSS_VBIF_CLIENT_NUM 14
-
-/**
- * struct dpu_dbg_reg_base - register region base.
- * may sub-ranges: sub-ranges are used for dumping
- * or may not have sub-ranges: dumping is base -> max_offset
- * @reg_base_head: head of this node
- * @name: register base name
- * @base: base pointer
- * @off: cached offset of region for manual register dumping
- * @cnt: cached range of region for manual register dumping
- * @max_offset: length of region
- * @buf: buffer used for manual register dumping
- * @buf_len: buffer length used for manual register dumping
- * @cb: callback for external dump function, null if not defined
- * @cb_ptr: private pointer to callback function
- */
-struct dpu_dbg_reg_base {
- struct list_head reg_base_head;
- char name[REG_BASE_NAME_LEN];
- void __iomem *base;
- size_t off;
- size_t cnt;
- size_t max_offset;
- char *buf;
- size_t buf_len;
- void (*cb)(void *ptr);
- void *cb_ptr;
-};
-
-struct dpu_debug_bus_entry {
- u32 wr_addr;
- u32 block_id;
- u32 test_id;
- void (*analyzer)(void __iomem *mem_base,
- struct dpu_debug_bus_entry *entry, u32 val);
-};
-
-struct vbif_debug_bus_entry {
- u32 disable_bus_addr;
- u32 block_bus_addr;
- u32 bit_offset;
- u32 block_cnt;
- u32 test_pnt_start;
- u32 test_pnt_cnt;
-};
-
-struct dpu_dbg_debug_bus_common {
- char *name;
- u32 enable_mask;
- bool include_in_deferred_work;
- u32 flags;
- u32 entries_size;
- u32 *dumped_content;
-};
-
-struct dpu_dbg_dpu_debug_bus {
- struct dpu_dbg_debug_bus_common cmn;
- struct dpu_debug_bus_entry *entries;
- u32 top_blk_off;
-};
-
-struct dpu_dbg_vbif_debug_bus {
- struct dpu_dbg_debug_bus_common cmn;
- struct vbif_debug_bus_entry *entries;
-};
-
-/**
- * struct dpu_dbg_base - global dpu debug base structure
- * @reg_base_list: list of register dumping regions
- * @dev: device pointer
- * @dump_work: work struct for deferring register dump work to separate thread
- * @dbgbus_dpu: debug bus structure for the dpu
- * @dbgbus_vbif_rt: debug bus structure for the realtime vbif
- */
-static struct dpu_dbg_base {
- struct list_head reg_base_list;
- struct device *dev;
-
- struct work_struct dump_work;
-
- struct dpu_dbg_dpu_debug_bus dbgbus_dpu;
- struct dpu_dbg_vbif_debug_bus dbgbus_vbif_rt;
-} dpu_dbg_base;
-
-static void _dpu_debug_bus_xbar_dump(void __iomem *mem_base,
- struct dpu_debug_bus_entry *entry, u32 val)
-{
- dev_err(dpu_dbg_base.dev, "xbar 0x%x %d %d 0x%x\n",
- entry->wr_addr, entry->block_id, entry->test_id, val);
-}
-
-static void _dpu_debug_bus_lm_dump(void __iomem *mem_base,
- struct dpu_debug_bus_entry *entry, u32 val)
-{
- if (!(val & 0xFFF000))
- return;
-
- dev_err(dpu_dbg_base.dev, "lm 0x%x %d %d 0x%x\n",
- entry->wr_addr, entry->block_id, entry->test_id, val);
-}
-
-static void _dpu_debug_bus_ppb0_dump(void __iomem *mem_base,
- struct dpu_debug_bus_entry *entry, u32 val)
-{
- if (!(val & BIT(15)))
- return;
-
- dev_err(dpu_dbg_base.dev, "ppb0 0x%x %d %d 0x%x\n",
- entry->wr_addr, entry->block_id, entry->test_id, val);
-}
-
-static void _dpu_debug_bus_ppb1_dump(void __iomem *mem_base,
- struct dpu_debug_bus_entry *entry, u32 val)
-{
- if (!(val & BIT(15)))
- return;
-
- dev_err(dpu_dbg_base.dev, "ppb1 0x%x %d %d 0x%x\n",
- entry->wr_addr, entry->block_id, entry->test_id, val);
-}
-
-static struct dpu_debug_bus_entry dbg_bus_dpu_8998[] = {
-
- /* Unpack 0 sspp 0*/
- { DBGBUS_SSPP0, 50, 2 },
- { DBGBUS_SSPP0, 60, 2 },
- { DBGBUS_SSPP0, 70, 2 },
- { DBGBUS_SSPP0, 85, 2 },
-
- /* Upack 0 sspp 1*/
- { DBGBUS_SSPP1, 50, 2 },
- { DBGBUS_SSPP1, 60, 2 },
- { DBGBUS_SSPP1, 70, 2 },
- { DBGBUS_SSPP1, 85, 2 },
-
- /* scheduler */
- { DBGBUS_DSPP, 130, 0 },
- { DBGBUS_DSPP, 130, 1 },
- { DBGBUS_DSPP, 130, 2 },
- { DBGBUS_DSPP, 130, 3 },
- { DBGBUS_DSPP, 130, 4 },
- { DBGBUS_DSPP, 130, 5 },
-
- /* qseed */
- { DBGBUS_SSPP0, 6, 0},
- { DBGBUS_SSPP0, 6, 1},
- { DBGBUS_SSPP0, 26, 0},
- { DBGBUS_SSPP0, 26, 1},
- { DBGBUS_SSPP1, 6, 0},
- { DBGBUS_SSPP1, 6, 1},
- { DBGBUS_SSPP1, 26, 0},
- { DBGBUS_SSPP1, 26, 1},
-
- /* scale */
- { DBGBUS_SSPP0, 16, 0},
- { DBGBUS_SSPP0, 16, 1},
- { DBGBUS_SSPP0, 36, 0},
- { DBGBUS_SSPP0, 36, 1},
- { DBGBUS_SSPP1, 16, 0},
- { DBGBUS_SSPP1, 16, 1},
- { DBGBUS_SSPP1, 36, 0},
- { DBGBUS_SSPP1, 36, 1},
-
- /* fetch sspp0 */
-
- /* vig 0 */
- { DBGBUS_SSPP0, 0, 0 },
- { DBGBUS_SSPP0, 0, 1 },
- { DBGBUS_SSPP0, 0, 2 },
- { DBGBUS_SSPP0, 0, 3 },
- { DBGBUS_SSPP0, 0, 4 },
- { DBGBUS_SSPP0, 0, 5 },
- { DBGBUS_SSPP0, 0, 6 },
- { DBGBUS_SSPP0, 0, 7 },
-
- { DBGBUS_SSPP0, 1, 0 },
- { DBGBUS_SSPP0, 1, 1 },
- { DBGBUS_SSPP0, 1, 2 },
- { DBGBUS_SSPP0, 1, 3 },
- { DBGBUS_SSPP0, 1, 4 },
- { DBGBUS_SSPP0, 1, 5 },
- { DBGBUS_SSPP0, 1, 6 },
- { DBGBUS_SSPP0, 1, 7 },
-
- { DBGBUS_SSPP0, 2, 0 },
- { DBGBUS_SSPP0, 2, 1 },
- { DBGBUS_SSPP0, 2, 2 },
- { DBGBUS_SSPP0, 2, 3 },
- { DBGBUS_SSPP0, 2, 4 },
- { DBGBUS_SSPP0, 2, 5 },
- { DBGBUS_SSPP0, 2, 6 },
- { DBGBUS_SSPP0, 2, 7 },
-
- { DBGBUS_SSPP0, 4, 0 },
- { DBGBUS_SSPP0, 4, 1 },
- { DBGBUS_SSPP0, 4, 2 },
- { DBGBUS_SSPP0, 4, 3 },
- { DBGBUS_SSPP0, 4, 4 },
- { DBGBUS_SSPP0, 4, 5 },
- { DBGBUS_SSPP0, 4, 6 },
- { DBGBUS_SSPP0, 4, 7 },
-
- { DBGBUS_SSPP0, 5, 0 },
- { DBGBUS_SSPP0, 5, 1 },
- { DBGBUS_SSPP0, 5, 2 },
- { DBGBUS_SSPP0, 5, 3 },
- { DBGBUS_SSPP0, 5, 4 },
- { DBGBUS_SSPP0, 5, 5 },
- { DBGBUS_SSPP0, 5, 6 },
- { DBGBUS_SSPP0, 5, 7 },
-
- /* vig 2 */
- { DBGBUS_SSPP0, 20, 0 },
- { DBGBUS_SSPP0, 20, 1 },
- { DBGBUS_SSPP0, 20, 2 },
- { DBGBUS_SSPP0, 20, 3 },
- { DBGBUS_SSPP0, 20, 4 },
- { DBGBUS_SSPP0, 20, 5 },
- { DBGBUS_SSPP0, 20, 6 },
- { DBGBUS_SSPP0, 20, 7 },
-
- { DBGBUS_SSPP0, 21, 0 },
- { DBGBUS_SSPP0, 21, 1 },
- { DBGBUS_SSPP0, 21, 2 },
- { DBGBUS_SSPP0, 21, 3 },
- { DBGBUS_SSPP0, 21, 4 },
- { DBGBUS_SSPP0, 21, 5 },
- { DBGBUS_SSPP0, 21, 6 },
- { DBGBUS_SSPP0, 21, 7 },
-
- { DBGBUS_SSPP0, 22, 0 },
- { DBGBUS_SSPP0, 22, 1 },
- { DBGBUS_SSPP0, 22, 2 },
- { DBGBUS_SSPP0, 22, 3 },
- { DBGBUS_SSPP0, 22, 4 },
- { DBGBUS_SSPP0, 22, 5 },
- { DBGBUS_SSPP0, 22, 6 },
- { DBGBUS_SSPP0, 22, 7 },
-
- { DBGBUS_SSPP0, 24, 0 },
- { DBGBUS_SSPP0, 24, 1 },
- { DBGBUS_SSPP0, 24, 2 },
- { DBGBUS_SSPP0, 24, 3 },
- { DBGBUS_SSPP0, 24, 4 },
- { DBGBUS_SSPP0, 24, 5 },
- { DBGBUS_SSPP0, 24, 6 },
- { DBGBUS_SSPP0, 24, 7 },
-
- { DBGBUS_SSPP0, 25, 0 },
- { DBGBUS_SSPP0, 25, 1 },
- { DBGBUS_SSPP0, 25, 2 },
- { DBGBUS_SSPP0, 25, 3 },
- { DBGBUS_SSPP0, 25, 4 },
- { DBGBUS_SSPP0, 25, 5 },
- { DBGBUS_SSPP0, 25, 6 },
- { DBGBUS_SSPP0, 25, 7 },
-
- /* dma 2 */
- { DBGBUS_SSPP0, 30, 0 },
- { DBGBUS_SSPP0, 30, 1 },
- { DBGBUS_SSPP0, 30, 2 },
- { DBGBUS_SSPP0, 30, 3 },
- { DBGBUS_SSPP0, 30, 4 },
- { DBGBUS_SSPP0, 30, 5 },
- { DBGBUS_SSPP0, 30, 6 },
- { DBGBUS_SSPP0, 30, 7 },
-
- { DBGBUS_SSPP0, 31, 0 },
- { DBGBUS_SSPP0, 31, 1 },
- { DBGBUS_SSPP0, 31, 2 },
- { DBGBUS_SSPP0, 31, 3 },
- { DBGBUS_SSPP0, 31, 4 },
- { DBGBUS_SSPP0, 31, 5 },
- { DBGBUS_SSPP0, 31, 6 },
- { DBGBUS_SSPP0, 31, 7 },
-
- { DBGBUS_SSPP0, 32, 0 },
- { DBGBUS_SSPP0, 32, 1 },
- { DBGBUS_SSPP0, 32, 2 },
- { DBGBUS_SSPP0, 32, 3 },
- { DBGBUS_SSPP0, 32, 4 },
- { DBGBUS_SSPP0, 32, 5 },
- { DBGBUS_SSPP0, 32, 6 },
- { DBGBUS_SSPP0, 32, 7 },
-
- { DBGBUS_SSPP0, 33, 0 },
- { DBGBUS_SSPP0, 33, 1 },
- { DBGBUS_SSPP0, 33, 2 },
- { DBGBUS_SSPP0, 33, 3 },
- { DBGBUS_SSPP0, 33, 4 },
- { DBGBUS_SSPP0, 33, 5 },
- { DBGBUS_SSPP0, 33, 6 },
- { DBGBUS_SSPP0, 33, 7 },
-
- { DBGBUS_SSPP0, 34, 0 },
- { DBGBUS_SSPP0, 34, 1 },
- { DBGBUS_SSPP0, 34, 2 },
- { DBGBUS_SSPP0, 34, 3 },
- { DBGBUS_SSPP0, 34, 4 },
- { DBGBUS_SSPP0, 34, 5 },
- { DBGBUS_SSPP0, 34, 6 },
- { DBGBUS_SSPP0, 34, 7 },
-
- { DBGBUS_SSPP0, 35, 0 },
- { DBGBUS_SSPP0, 35, 1 },
- { DBGBUS_SSPP0, 35, 2 },
- { DBGBUS_SSPP0, 35, 3 },
-
- /* dma 0 */
- { DBGBUS_SSPP0, 40, 0 },
- { DBGBUS_SSPP0, 40, 1 },
- { DBGBUS_SSPP0, 40, 2 },
- { DBGBUS_SSPP0, 40, 3 },
- { DBGBUS_SSPP0, 40, 4 },
- { DBGBUS_SSPP0, 40, 5 },
- { DBGBUS_SSPP0, 40, 6 },
- { DBGBUS_SSPP0, 40, 7 },
-
- { DBGBUS_SSPP0, 41, 0 },
- { DBGBUS_SSPP0, 41, 1 },
- { DBGBUS_SSPP0, 41, 2 },
- { DBGBUS_SSPP0, 41, 3 },
- { DBGBUS_SSPP0, 41, 4 },
- { DBGBUS_SSPP0, 41, 5 },
- { DBGBUS_SSPP0, 41, 6 },
- { DBGBUS_SSPP0, 41, 7 },
-
- { DBGBUS_SSPP0, 42, 0 },
- { DBGBUS_SSPP0, 42, 1 },
- { DBGBUS_SSPP0, 42, 2 },
- { DBGBUS_SSPP0, 42, 3 },
- { DBGBUS_SSPP0, 42, 4 },
- { DBGBUS_SSPP0, 42, 5 },
- { DBGBUS_SSPP0, 42, 6 },
- { DBGBUS_SSPP0, 42, 7 },
-
- { DBGBUS_SSPP0, 44, 0 },
- { DBGBUS_SSPP0, 44, 1 },
- { DBGBUS_SSPP0, 44, 2 },
- { DBGBUS_SSPP0, 44, 3 },
- { DBGBUS_SSPP0, 44, 4 },
- { DBGBUS_SSPP0, 44, 5 },
- { DBGBUS_SSPP0, 44, 6 },
- { DBGBUS_SSPP0, 44, 7 },
-
- { DBGBUS_SSPP0, 45, 0 },
- { DBGBUS_SSPP0, 45, 1 },
- { DBGBUS_SSPP0, 45, 2 },
- { DBGBUS_SSPP0, 45, 3 },
- { DBGBUS_SSPP0, 45, 4 },
- { DBGBUS_SSPP0, 45, 5 },
- { DBGBUS_SSPP0, 45, 6 },
- { DBGBUS_SSPP0, 45, 7 },
-
- /* fetch sspp1 */
- /* vig 1 */
- { DBGBUS_SSPP1, 0, 0 },
- { DBGBUS_SSPP1, 0, 1 },
- { DBGBUS_SSPP1, 0, 2 },
- { DBGBUS_SSPP1, 0, 3 },
- { DBGBUS_SSPP1, 0, 4 },
- { DBGBUS_SSPP1, 0, 5 },
- { DBGBUS_SSPP1, 0, 6 },
- { DBGBUS_SSPP1, 0, 7 },
-
- { DBGBUS_SSPP1, 1, 0 },
- { DBGBUS_SSPP1, 1, 1 },
- { DBGBUS_SSPP1, 1, 2 },
- { DBGBUS_SSPP1, 1, 3 },
- { DBGBUS_SSPP1, 1, 4 },
- { DBGBUS_SSPP1, 1, 5 },
- { DBGBUS_SSPP1, 1, 6 },
- { DBGBUS_SSPP1, 1, 7 },
-
- { DBGBUS_SSPP1, 2, 0 },
- { DBGBUS_SSPP1, 2, 1 },
- { DBGBUS_SSPP1, 2, 2 },
- { DBGBUS_SSPP1, 2, 3 },
- { DBGBUS_SSPP1, 2, 4 },
- { DBGBUS_SSPP1, 2, 5 },
- { DBGBUS_SSPP1, 2, 6 },
- { DBGBUS_SSPP1, 2, 7 },
-
- { DBGBUS_SSPP1, 4, 0 },
- { DBGBUS_SSPP1, 4, 1 },
- { DBGBUS_SSPP1, 4, 2 },
- { DBGBUS_SSPP1, 4, 3 },
- { DBGBUS_SSPP1, 4, 4 },
- { DBGBUS_SSPP1, 4, 5 },
- { DBGBUS_SSPP1, 4, 6 },
- { DBGBUS_SSPP1, 4, 7 },
-
- { DBGBUS_SSPP1, 5, 0 },
- { DBGBUS_SSPP1, 5, 1 },
- { DBGBUS_SSPP1, 5, 2 },
- { DBGBUS_SSPP1, 5, 3 },
- { DBGBUS_SSPP1, 5, 4 },
- { DBGBUS_SSPP1, 5, 5 },
- { DBGBUS_SSPP1, 5, 6 },
- { DBGBUS_SSPP1, 5, 7 },
-
- /* vig 3 */
- { DBGBUS_SSPP1, 20, 0 },
- { DBGBUS_SSPP1, 20, 1 },
- { DBGBUS_SSPP1, 20, 2 },
- { DBGBUS_SSPP1, 20, 3 },
- { DBGBUS_SSPP1, 20, 4 },
- { DBGBUS_SSPP1, 20, 5 },
- { DBGBUS_SSPP1, 20, 6 },
- { DBGBUS_SSPP1, 20, 7 },
-
- { DBGBUS_SSPP1, 21, 0 },
- { DBGBUS_SSPP1, 21, 1 },
- { DBGBUS_SSPP1, 21, 2 },
- { DBGBUS_SSPP1, 21, 3 },
- { DBGBUS_SSPP1, 21, 4 },
- { DBGBUS_SSPP1, 21, 5 },
- { DBGBUS_SSPP1, 21, 6 },
- { DBGBUS_SSPP1, 21, 7 },
-
- { DBGBUS_SSPP1, 22, 0 },
- { DBGBUS_SSPP1, 22, 1 },
- { DBGBUS_SSPP1, 22, 2 },
- { DBGBUS_SSPP1, 22, 3 },
- { DBGBUS_SSPP1, 22, 4 },
- { DBGBUS_SSPP1, 22, 5 },
- { DBGBUS_SSPP1, 22, 6 },
- { DBGBUS_SSPP1, 22, 7 },
-
- { DBGBUS_SSPP1, 24, 0 },
- { DBGBUS_SSPP1, 24, 1 },
- { DBGBUS_SSPP1, 24, 2 },
- { DBGBUS_SSPP1, 24, 3 },
- { DBGBUS_SSPP1, 24, 4 },
- { DBGBUS_SSPP1, 24, 5 },
- { DBGBUS_SSPP1, 24, 6 },
- { DBGBUS_SSPP1, 24, 7 },
-
- { DBGBUS_SSPP1, 25, 0 },
- { DBGBUS_SSPP1, 25, 1 },
- { DBGBUS_SSPP1, 25, 2 },
- { DBGBUS_SSPP1, 25, 3 },
- { DBGBUS_SSPP1, 25, 4 },
- { DBGBUS_SSPP1, 25, 5 },
- { DBGBUS_SSPP1, 25, 6 },
- { DBGBUS_SSPP1, 25, 7 },
-
- /* dma 3 */
- { DBGBUS_SSPP1, 30, 0 },
- { DBGBUS_SSPP1, 30, 1 },
- { DBGBUS_SSPP1, 30, 2 },
- { DBGBUS_SSPP1, 30, 3 },
- { DBGBUS_SSPP1, 30, 4 },
- { DBGBUS_SSPP1, 30, 5 },
- { DBGBUS_SSPP1, 30, 6 },
- { DBGBUS_SSPP1, 30, 7 },
-
- { DBGBUS_SSPP1, 31, 0 },
- { DBGBUS_SSPP1, 31, 1 },
- { DBGBUS_SSPP1, 31, 2 },
- { DBGBUS_SSPP1, 31, 3 },
- { DBGBUS_SSPP1, 31, 4 },
- { DBGBUS_SSPP1, 31, 5 },
- { DBGBUS_SSPP1, 31, 6 },
- { DBGBUS_SSPP1, 31, 7 },
-
- { DBGBUS_SSPP1, 32, 0 },
- { DBGBUS_SSPP1, 32, 1 },
- { DBGBUS_SSPP1, 32, 2 },
- { DBGBUS_SSPP1, 32, 3 },
- { DBGBUS_SSPP1, 32, 4 },
- { DBGBUS_SSPP1, 32, 5 },
- { DBGBUS_SSPP1, 32, 6 },
- { DBGBUS_SSPP1, 32, 7 },
-
- { DBGBUS_SSPP1, 33, 0 },
- { DBGBUS_SSPP1, 33, 1 },
- { DBGBUS_SSPP1, 33, 2 },
- { DBGBUS_SSPP1, 33, 3 },
- { DBGBUS_SSPP1, 33, 4 },
- { DBGBUS_SSPP1, 33, 5 },
- { DBGBUS_SSPP1, 33, 6 },
- { DBGBUS_SSPP1, 33, 7 },
-
- { DBGBUS_SSPP1, 34, 0 },
- { DBGBUS_SSPP1, 34, 1 },
- { DBGBUS_SSPP1, 34, 2 },
- { DBGBUS_SSPP1, 34, 3 },
- { DBGBUS_SSPP1, 34, 4 },
- { DBGBUS_SSPP1, 34, 5 },
- { DBGBUS_SSPP1, 34, 6 },
- { DBGBUS_SSPP1, 34, 7 },
-
- { DBGBUS_SSPP1, 35, 0 },
- { DBGBUS_SSPP1, 35, 1 },
- { DBGBUS_SSPP1, 35, 2 },
-
- /* dma 1 */
- { DBGBUS_SSPP1, 40, 0 },
- { DBGBUS_SSPP1, 40, 1 },
- { DBGBUS_SSPP1, 40, 2 },
- { DBGBUS_SSPP1, 40, 3 },
- { DBGBUS_SSPP1, 40, 4 },
- { DBGBUS_SSPP1, 40, 5 },
- { DBGBUS_SSPP1, 40, 6 },
- { DBGBUS_SSPP1, 40, 7 },
-
- { DBGBUS_SSPP1, 41, 0 },
- { DBGBUS_SSPP1, 41, 1 },
- { DBGBUS_SSPP1, 41, 2 },
- { DBGBUS_SSPP1, 41, 3 },
- { DBGBUS_SSPP1, 41, 4 },
- { DBGBUS_SSPP1, 41, 5 },
- { DBGBUS_SSPP1, 41, 6 },
- { DBGBUS_SSPP1, 41, 7 },
-
- { DBGBUS_SSPP1, 42, 0 },
- { DBGBUS_SSPP1, 42, 1 },
- { DBGBUS_SSPP1, 42, 2 },
- { DBGBUS_SSPP1, 42, 3 },
- { DBGBUS_SSPP1, 42, 4 },
- { DBGBUS_SSPP1, 42, 5 },
- { DBGBUS_SSPP1, 42, 6 },
- { DBGBUS_SSPP1, 42, 7 },
-
- { DBGBUS_SSPP1, 44, 0 },
- { DBGBUS_SSPP1, 44, 1 },
- { DBGBUS_SSPP1, 44, 2 },
- { DBGBUS_SSPP1, 44, 3 },
- { DBGBUS_SSPP1, 44, 4 },
- { DBGBUS_SSPP1, 44, 5 },
- { DBGBUS_SSPP1, 44, 6 },
- { DBGBUS_SSPP1, 44, 7 },
-
- { DBGBUS_SSPP1, 45, 0 },
- { DBGBUS_SSPP1, 45, 1 },
- { DBGBUS_SSPP1, 45, 2 },
- { DBGBUS_SSPP1, 45, 3 },
- { DBGBUS_SSPP1, 45, 4 },
- { DBGBUS_SSPP1, 45, 5 },
- { DBGBUS_SSPP1, 45, 6 },
- { DBGBUS_SSPP1, 45, 7 },
-
- /* cursor 1 */
- { DBGBUS_SSPP1, 80, 0 },
- { DBGBUS_SSPP1, 80, 1 },
- { DBGBUS_SSPP1, 80, 2 },
- { DBGBUS_SSPP1, 80, 3 },
- { DBGBUS_SSPP1, 80, 4 },
- { DBGBUS_SSPP1, 80, 5 },
- { DBGBUS_SSPP1, 80, 6 },
- { DBGBUS_SSPP1, 80, 7 },
-
- { DBGBUS_SSPP1, 81, 0 },
- { DBGBUS_SSPP1, 81, 1 },
- { DBGBUS_SSPP1, 81, 2 },
- { DBGBUS_SSPP1, 81, 3 },
- { DBGBUS_SSPP1, 81, 4 },
- { DBGBUS_SSPP1, 81, 5 },
- { DBGBUS_SSPP1, 81, 6 },
- { DBGBUS_SSPP1, 81, 7 },
-
- { DBGBUS_SSPP1, 82, 0 },
- { DBGBUS_SSPP1, 82, 1 },
- { DBGBUS_SSPP1, 82, 2 },
- { DBGBUS_SSPP1, 82, 3 },
- { DBGBUS_SSPP1, 82, 4 },
- { DBGBUS_SSPP1, 82, 5 },
- { DBGBUS_SSPP1, 82, 6 },
- { DBGBUS_SSPP1, 82, 7 },
-
- { DBGBUS_SSPP1, 83, 0 },
- { DBGBUS_SSPP1, 83, 1 },
- { DBGBUS_SSPP1, 83, 2 },
- { DBGBUS_SSPP1, 83, 3 },
- { DBGBUS_SSPP1, 83, 4 },
- { DBGBUS_SSPP1, 83, 5 },
- { DBGBUS_SSPP1, 83, 6 },
- { DBGBUS_SSPP1, 83, 7 },
-
- { DBGBUS_SSPP1, 84, 0 },
- { DBGBUS_SSPP1, 84, 1 },
- { DBGBUS_SSPP1, 84, 2 },
- { DBGBUS_SSPP1, 84, 3 },
- { DBGBUS_SSPP1, 84, 4 },
- { DBGBUS_SSPP1, 84, 5 },
- { DBGBUS_SSPP1, 84, 6 },
- { DBGBUS_SSPP1, 84, 7 },
-
- /* dspp */
- { DBGBUS_DSPP, 13, 0 },
- { DBGBUS_DSPP, 19, 0 },
- { DBGBUS_DSPP, 14, 0 },
- { DBGBUS_DSPP, 14, 1 },
- { DBGBUS_DSPP, 14, 3 },
- { DBGBUS_DSPP, 20, 0 },
- { DBGBUS_DSPP, 20, 1 },
- { DBGBUS_DSPP, 20, 3 },
-
- /* ppb_0 */
- { DBGBUS_DSPP, 31, 0, _dpu_debug_bus_ppb0_dump },
- { DBGBUS_DSPP, 33, 0, _dpu_debug_bus_ppb0_dump },
- { DBGBUS_DSPP, 35, 0, _dpu_debug_bus_ppb0_dump },
- { DBGBUS_DSPP, 42, 0, _dpu_debug_bus_ppb0_dump },
-
- /* ppb_1 */
- { DBGBUS_DSPP, 32, 0, _dpu_debug_bus_ppb1_dump },
- { DBGBUS_DSPP, 34, 0, _dpu_debug_bus_ppb1_dump },
- { DBGBUS_DSPP, 36, 0, _dpu_debug_bus_ppb1_dump },
- { DBGBUS_DSPP, 43, 0, _dpu_debug_bus_ppb1_dump },
-
- /* lm_lut */
- { DBGBUS_DSPP, 109, 0 },
- { DBGBUS_DSPP, 105, 0 },
- { DBGBUS_DSPP, 103, 0 },
-
- /* tear-check */
- { DBGBUS_PERIPH, 63, 0 },
- { DBGBUS_PERIPH, 64, 0 },
- { DBGBUS_PERIPH, 65, 0 },
- { DBGBUS_PERIPH, 73, 0 },
- { DBGBUS_PERIPH, 74, 0 },
-
- /* crossbar */
- { DBGBUS_DSPP, 0, 0, _dpu_debug_bus_xbar_dump },
-
- /* rotator */
- { DBGBUS_DSPP, 9, 0},
-
- /* blend */
- /* LM0 */
- { DBGBUS_DSPP, 63, 0},
- { DBGBUS_DSPP, 63, 1},
- { DBGBUS_DSPP, 63, 2},
- { DBGBUS_DSPP, 63, 3},
- { DBGBUS_DSPP, 63, 4},
- { DBGBUS_DSPP, 63, 5},
- { DBGBUS_DSPP, 63, 6},
- { DBGBUS_DSPP, 63, 7, _dpu_debug_bus_lm_dump },
-
- { DBGBUS_DSPP, 64, 0},
- { DBGBUS_DSPP, 64, 1},
- { DBGBUS_DSPP, 64, 2},
- { DBGBUS_DSPP, 64, 3},
- { DBGBUS_DSPP, 64, 4},
- { DBGBUS_DSPP, 64, 5},
- { DBGBUS_DSPP, 64, 6},
- { DBGBUS_DSPP, 64, 7, _dpu_debug_bus_lm_dump },
-
- { DBGBUS_DSPP, 65, 0},
- { DBGBUS_DSPP, 65, 1},
- { DBGBUS_DSPP, 65, 2},
- { DBGBUS_DSPP, 65, 3},
- { DBGBUS_DSPP, 65, 4},
- { DBGBUS_DSPP, 65, 5},
- { DBGBUS_DSPP, 65, 6},
- { DBGBUS_DSPP, 65, 7, _dpu_debug_bus_lm_dump },
-
- { DBGBUS_DSPP, 66, 0},
- { DBGBUS_DSPP, 66, 1},
- { DBGBUS_DSPP, 66, 2},
- { DBGBUS_DSPP, 66, 3},
- { DBGBUS_DSPP, 66, 4},
- { DBGBUS_DSPP, 66, 5},
- { DBGBUS_DSPP, 66, 6},
- { DBGBUS_DSPP, 66, 7, _dpu_debug_bus_lm_dump },
-
- { DBGBUS_DSPP, 67, 0},
- { DBGBUS_DSPP, 67, 1},
- { DBGBUS_DSPP, 67, 2},
- { DBGBUS_DSPP, 67, 3},
- { DBGBUS_DSPP, 67, 4},
- { DBGBUS_DSPP, 67, 5},
- { DBGBUS_DSPP, 67, 6},
- { DBGBUS_DSPP, 67, 7, _dpu_debug_bus_lm_dump },
-
- { DBGBUS_DSPP, 68, 0},
- { DBGBUS_DSPP, 68, 1},
- { DBGBUS_DSPP, 68, 2},
- { DBGBUS_DSPP, 68, 3},
- { DBGBUS_DSPP, 68, 4},
- { DBGBUS_DSPP, 68, 5},
- { DBGBUS_DSPP, 68, 6},
- { DBGBUS_DSPP, 68, 7, _dpu_debug_bus_lm_dump },
-
- { DBGBUS_DSPP, 69, 0},
- { DBGBUS_DSPP, 69, 1},
- { DBGBUS_DSPP, 69, 2},
- { DBGBUS_DSPP, 69, 3},
- { DBGBUS_DSPP, 69, 4},
- { DBGBUS_DSPP, 69, 5},
- { DBGBUS_DSPP, 69, 6},
- { DBGBUS_DSPP, 69, 7, _dpu_debug_bus_lm_dump },
-
- /* LM1 */
- { DBGBUS_DSPP, 70, 0},
- { DBGBUS_DSPP, 70, 1},
- { DBGBUS_DSPP, 70, 2},
- { DBGBUS_DSPP, 70, 3},
- { DBGBUS_DSPP, 70, 4},
- { DBGBUS_DSPP, 70, 5},
- { DBGBUS_DSPP, 70, 6},
- { DBGBUS_DSPP, 70, 7, _dpu_debug_bus_lm_dump },
-
- { DBGBUS_DSPP, 71, 0},
- { DBGBUS_DSPP, 71, 1},
- { DBGBUS_DSPP, 71, 2},
- { DBGBUS_DSPP, 71, 3},
- { DBGBUS_DSPP, 71, 4},
- { DBGBUS_DSPP, 71, 5},
- { DBGBUS_DSPP, 71, 6},
- { DBGBUS_DSPP, 71, 7, _dpu_debug_bus_lm_dump },
-
- { DBGBUS_DSPP, 72, 0},
- { DBGBUS_DSPP, 72, 1},
- { DBGBUS_DSPP, 72, 2},
- { DBGBUS_DSPP, 72, 3},
- { DBGBUS_DSPP, 72, 4},
- { DBGBUS_DSPP, 72, 5},
- { DBGBUS_DSPP, 72, 6},
- { DBGBUS_DSPP, 72, 7, _dpu_debug_bus_lm_dump },
-
- { DBGBUS_DSPP, 73, 0},
- { DBGBUS_DSPP, 73, 1},
- { DBGBUS_DSPP, 73, 2},
- { DBGBUS_DSPP, 73, 3},
- { DBGBUS_DSPP, 73, 4},
- { DBGBUS_DSPP, 73, 5},
- { DBGBUS_DSPP, 73, 6},
- { DBGBUS_DSPP, 73, 7, _dpu_debug_bus_lm_dump },
-
- { DBGBUS_DSPP, 74, 0},
- { DBGBUS_DSPP, 74, 1},
- { DBGBUS_DSPP, 74, 2},
- { DBGBUS_DSPP, 74, 3},
- { DBGBUS_DSPP, 74, 4},
- { DBGBUS_DSPP, 74, 5},
- { DBGBUS_DSPP, 74, 6},
- { DBGBUS_DSPP, 74, 7, _dpu_debug_bus_lm_dump },
-
- { DBGBUS_DSPP, 75, 0},
- { DBGBUS_DSPP, 75, 1},
- { DBGBUS_DSPP, 75, 2},
- { DBGBUS_DSPP, 75, 3},
- { DBGBUS_DSPP, 75, 4},
- { DBGBUS_DSPP, 75, 5},
- { DBGBUS_DSPP, 75, 6},
- { DBGBUS_DSPP, 75, 7, _dpu_debug_bus_lm_dump },
-
- { DBGBUS_DSPP, 76, 0},
- { DBGBUS_DSPP, 76, 1},
- { DBGBUS_DSPP, 76, 2},
- { DBGBUS_DSPP, 76, 3},
- { DBGBUS_DSPP, 76, 4},
- { DBGBUS_DSPP, 76, 5},
- { DBGBUS_DSPP, 76, 6},
- { DBGBUS_DSPP, 76, 7, _dpu_debug_bus_lm_dump },
-
- /* LM2 */
- { DBGBUS_DSPP, 77, 0},
- { DBGBUS_DSPP, 77, 1},
- { DBGBUS_DSPP, 77, 2},
- { DBGBUS_DSPP, 77, 3},
- { DBGBUS_DSPP, 77, 4},
- { DBGBUS_DSPP, 77, 5},
- { DBGBUS_DSPP, 77, 6},
- { DBGBUS_DSPP, 77, 7, _dpu_debug_bus_lm_dump },
-
- { DBGBUS_DSPP, 78, 0},
- { DBGBUS_DSPP, 78, 1},
- { DBGBUS_DSPP, 78, 2},
- { DBGBUS_DSPP, 78, 3},
- { DBGBUS_DSPP, 78, 4},
- { DBGBUS_DSPP, 78, 5},
- { DBGBUS_DSPP, 78, 6},
- { DBGBUS_DSPP, 78, 7, _dpu_debug_bus_lm_dump },
-
- { DBGBUS_DSPP, 79, 0},
- { DBGBUS_DSPP, 79, 1},
- { DBGBUS_DSPP, 79, 2},
- { DBGBUS_DSPP, 79, 3},
- { DBGBUS_DSPP, 79, 4},
- { DBGBUS_DSPP, 79, 5},
- { DBGBUS_DSPP, 79, 6},
- { DBGBUS_DSPP, 79, 7, _dpu_debug_bus_lm_dump },
-
- { DBGBUS_DSPP, 80, 0},
- { DBGBUS_DSPP, 80, 1},
- { DBGBUS_DSPP, 80, 2},
- { DBGBUS_DSPP, 80, 3},
- { DBGBUS_DSPP, 80, 4},
- { DBGBUS_DSPP, 80, 5},
- { DBGBUS_DSPP, 80, 6},
- { DBGBUS_DSPP, 80, 7, _dpu_debug_bus_lm_dump },
-
- { DBGBUS_DSPP, 81, 0},
- { DBGBUS_DSPP, 81, 1},
- { DBGBUS_DSPP, 81, 2},
- { DBGBUS_DSPP, 81, 3},
- { DBGBUS_DSPP, 81, 4},
- { DBGBUS_DSPP, 81, 5},
- { DBGBUS_DSPP, 81, 6},
- { DBGBUS_DSPP, 81, 7, _dpu_debug_bus_lm_dump },
-
- { DBGBUS_DSPP, 82, 0},
- { DBGBUS_DSPP, 82, 1},
- { DBGBUS_DSPP, 82, 2},
- { DBGBUS_DSPP, 82, 3},
- { DBGBUS_DSPP, 82, 4},
- { DBGBUS_DSPP, 82, 5},
- { DBGBUS_DSPP, 82, 6},
- { DBGBUS_DSPP, 82, 7, _dpu_debug_bus_lm_dump },
-
- { DBGBUS_DSPP, 83, 0},
- { DBGBUS_DSPP, 83, 1},
- { DBGBUS_DSPP, 83, 2},
- { DBGBUS_DSPP, 83, 3},
- { DBGBUS_DSPP, 83, 4},
- { DBGBUS_DSPP, 83, 5},
- { DBGBUS_DSPP, 83, 6},
- { DBGBUS_DSPP, 83, 7, _dpu_debug_bus_lm_dump },
-
- /* csc */
- { DBGBUS_SSPP0, 7, 0},
- { DBGBUS_SSPP0, 7, 1},
- { DBGBUS_SSPP0, 27, 0},
- { DBGBUS_SSPP0, 27, 1},
- { DBGBUS_SSPP1, 7, 0},
- { DBGBUS_SSPP1, 7, 1},
- { DBGBUS_SSPP1, 27, 0},
- { DBGBUS_SSPP1, 27, 1},
-
- /* pcc */
- { DBGBUS_SSPP0, 3, 3},
- { DBGBUS_SSPP0, 23, 3},
- { DBGBUS_SSPP0, 33, 3},
- { DBGBUS_SSPP0, 43, 3},
- { DBGBUS_SSPP1, 3, 3},
- { DBGBUS_SSPP1, 23, 3},
- { DBGBUS_SSPP1, 33, 3},
- { DBGBUS_SSPP1, 43, 3},
-
- /* spa */
- { DBGBUS_SSPP0, 8, 0},
- { DBGBUS_SSPP0, 28, 0},
- { DBGBUS_SSPP1, 8, 0},
- { DBGBUS_SSPP1, 28, 0},
- { DBGBUS_DSPP, 13, 0},
- { DBGBUS_DSPP, 19, 0},
-
- /* igc */
- { DBGBUS_SSPP0, 9, 0},
- { DBGBUS_SSPP0, 9, 1},
- { DBGBUS_SSPP0, 9, 3},
- { DBGBUS_SSPP0, 29, 0},
- { DBGBUS_SSPP0, 29, 1},
- { DBGBUS_SSPP0, 29, 3},
- { DBGBUS_SSPP0, 17, 0},
- { DBGBUS_SSPP0, 17, 1},
- { DBGBUS_SSPP0, 17, 3},
- { DBGBUS_SSPP0, 37, 0},
- { DBGBUS_SSPP0, 37, 1},
- { DBGBUS_SSPP0, 37, 3},
- { DBGBUS_SSPP0, 46, 0},
- { DBGBUS_SSPP0, 46, 1},
- { DBGBUS_SSPP0, 46, 3},
-
- { DBGBUS_SSPP1, 9, 0},
- { DBGBUS_SSPP1, 9, 1},
- { DBGBUS_SSPP1, 9, 3},
- { DBGBUS_SSPP1, 29, 0},
- { DBGBUS_SSPP1, 29, 1},
- { DBGBUS_SSPP1, 29, 3},
- { DBGBUS_SSPP1, 17, 0},
- { DBGBUS_SSPP1, 17, 1},
- { DBGBUS_SSPP1, 17, 3},
- { DBGBUS_SSPP1, 37, 0},
- { DBGBUS_SSPP1, 37, 1},
- { DBGBUS_SSPP1, 37, 3},
- { DBGBUS_SSPP1, 46, 0},
- { DBGBUS_SSPP1, 46, 1},
- { DBGBUS_SSPP1, 46, 3},
-
- { DBGBUS_DSPP, 14, 0},
- { DBGBUS_DSPP, 14, 1},
- { DBGBUS_DSPP, 14, 3},
- { DBGBUS_DSPP, 20, 0},
- { DBGBUS_DSPP, 20, 1},
- { DBGBUS_DSPP, 20, 3},
-
- { DBGBUS_PERIPH, 60, 0},
-};
-
-static struct dpu_debug_bus_entry dbg_bus_dpu_sdm845[] = {
-
- /* Unpack 0 sspp 0*/
- { DBGBUS_SSPP0, 50, 2 },
- { DBGBUS_SSPP0, 60, 2 },
- { DBGBUS_SSPP0, 70, 2 },
-
- /* Upack 0 sspp 1*/
- { DBGBUS_SSPP1, 50, 2 },
- { DBGBUS_SSPP1, 60, 2 },
- { DBGBUS_SSPP1, 70, 2 },
-
- /* scheduler */
- { DBGBUS_DSPP, 130, 0 },
- { DBGBUS_DSPP, 130, 1 },
- { DBGBUS_DSPP, 130, 2 },
- { DBGBUS_DSPP, 130, 3 },
- { DBGBUS_DSPP, 130, 4 },
- { DBGBUS_DSPP, 130, 5 },
-
- /* qseed */
- { DBGBUS_SSPP0, 6, 0},
- { DBGBUS_SSPP0, 6, 1},
- { DBGBUS_SSPP0, 26, 0},
- { DBGBUS_SSPP0, 26, 1},
- { DBGBUS_SSPP1, 6, 0},
- { DBGBUS_SSPP1, 6, 1},
- { DBGBUS_SSPP1, 26, 0},
- { DBGBUS_SSPP1, 26, 1},
-
- /* scale */
- { DBGBUS_SSPP0, 16, 0},
- { DBGBUS_SSPP0, 16, 1},
- { DBGBUS_SSPP0, 36, 0},
- { DBGBUS_SSPP0, 36, 1},
- { DBGBUS_SSPP1, 16, 0},
- { DBGBUS_SSPP1, 16, 1},
- { DBGBUS_SSPP1, 36, 0},
- { DBGBUS_SSPP1, 36, 1},
-
- /* fetch sspp0 */
-
- /* vig 0 */
- { DBGBUS_SSPP0, 0, 0 },
- { DBGBUS_SSPP0, 0, 1 },
- { DBGBUS_SSPP0, 0, 2 },
- { DBGBUS_SSPP0, 0, 3 },
- { DBGBUS_SSPP0, 0, 4 },
- { DBGBUS_SSPP0, 0, 5 },
- { DBGBUS_SSPP0, 0, 6 },
- { DBGBUS_SSPP0, 0, 7 },
-
- { DBGBUS_SSPP0, 1, 0 },
- { DBGBUS_SSPP0, 1, 1 },
- { DBGBUS_SSPP0, 1, 2 },
- { DBGBUS_SSPP0, 1, 3 },
- { DBGBUS_SSPP0, 1, 4 },
- { DBGBUS_SSPP0, 1, 5 },
- { DBGBUS_SSPP0, 1, 6 },
- { DBGBUS_SSPP0, 1, 7 },
-
- { DBGBUS_SSPP0, 2, 0 },
- { DBGBUS_SSPP0, 2, 1 },
- { DBGBUS_SSPP0, 2, 2 },
- { DBGBUS_SSPP0, 2, 3 },
- { DBGBUS_SSPP0, 2, 4 },
- { DBGBUS_SSPP0, 2, 5 },
- { DBGBUS_SSPP0, 2, 6 },
- { DBGBUS_SSPP0, 2, 7 },
-
- { DBGBUS_SSPP0, 4, 0 },
- { DBGBUS_SSPP0, 4, 1 },
- { DBGBUS_SSPP0, 4, 2 },
- { DBGBUS_SSPP0, 4, 3 },
- { DBGBUS_SSPP0, 4, 4 },
- { DBGBUS_SSPP0, 4, 5 },
- { DBGBUS_SSPP0, 4, 6 },
- { DBGBUS_SSPP0, 4, 7 },
-
- { DBGBUS_SSPP0, 5, 0 },
- { DBGBUS_SSPP0, 5, 1 },
- { DBGBUS_SSPP0, 5, 2 },
- { DBGBUS_SSPP0, 5, 3 },
- { DBGBUS_SSPP0, 5, 4 },
- { DBGBUS_SSPP0, 5, 5 },
- { DBGBUS_SSPP0, 5, 6 },
- { DBGBUS_SSPP0, 5, 7 },
-
- /* vig 2 */
- { DBGBUS_SSPP0, 20, 0 },
- { DBGBUS_SSPP0, 20, 1 },
- { DBGBUS_SSPP0, 20, 2 },
- { DBGBUS_SSPP0, 20, 3 },
- { DBGBUS_SSPP0, 20, 4 },
- { DBGBUS_SSPP0, 20, 5 },
- { DBGBUS_SSPP0, 20, 6 },
- { DBGBUS_SSPP0, 20, 7 },
-
- { DBGBUS_SSPP0, 21, 0 },
- { DBGBUS_SSPP0, 21, 1 },
- { DBGBUS_SSPP0, 21, 2 },
- { DBGBUS_SSPP0, 21, 3 },
- { DBGBUS_SSPP0, 21, 4 },
- { DBGBUS_SSPP0, 21, 5 },
- { DBGBUS_SSPP0, 21, 6 },
- { DBGBUS_SSPP0, 21, 7 },
-
- { DBGBUS_SSPP0, 22, 0 },
- { DBGBUS_SSPP0, 22, 1 },
- { DBGBUS_SSPP0, 22, 2 },
- { DBGBUS_SSPP0, 22, 3 },
- { DBGBUS_SSPP0, 22, 4 },
- { DBGBUS_SSPP0, 22, 5 },
- { DBGBUS_SSPP0, 22, 6 },
- { DBGBUS_SSPP0, 22, 7 },
-
- { DBGBUS_SSPP0, 24, 0 },
- { DBGBUS_SSPP0, 24, 1 },
- { DBGBUS_SSPP0, 24, 2 },
- { DBGBUS_SSPP0, 24, 3 },
- { DBGBUS_SSPP0, 24, 4 },
- { DBGBUS_SSPP0, 24, 5 },
- { DBGBUS_SSPP0, 24, 6 },
- { DBGBUS_SSPP0, 24, 7 },
-
- { DBGBUS_SSPP0, 25, 0 },
- { DBGBUS_SSPP0, 25, 1 },
- { DBGBUS_SSPP0, 25, 2 },
- { DBGBUS_SSPP0, 25, 3 },
- { DBGBUS_SSPP0, 25, 4 },
- { DBGBUS_SSPP0, 25, 5 },
- { DBGBUS_SSPP0, 25, 6 },
- { DBGBUS_SSPP0, 25, 7 },
-
- /* dma 2 */
- { DBGBUS_SSPP0, 30, 0 },
- { DBGBUS_SSPP0, 30, 1 },
- { DBGBUS_SSPP0, 30, 2 },
- { DBGBUS_SSPP0, 30, 3 },
- { DBGBUS_SSPP0, 30, 4 },
- { DBGBUS_SSPP0, 30, 5 },
- { DBGBUS_SSPP0, 30, 6 },
- { DBGBUS_SSPP0, 30, 7 },
-
- { DBGBUS_SSPP0, 31, 0 },
- { DBGBUS_SSPP0, 31, 1 },
- { DBGBUS_SSPP0, 31, 2 },
- { DBGBUS_SSPP0, 31, 3 },
- { DBGBUS_SSPP0, 31, 4 },
- { DBGBUS_SSPP0, 31, 5 },
- { DBGBUS_SSPP0, 31, 6 },
- { DBGBUS_SSPP0, 31, 7 },
-
- { DBGBUS_SSPP0, 32, 0 },
- { DBGBUS_SSPP0, 32, 1 },
- { DBGBUS_SSPP0, 32, 2 },
- { DBGBUS_SSPP0, 32, 3 },
- { DBGBUS_SSPP0, 32, 4 },
- { DBGBUS_SSPP0, 32, 5 },
- { DBGBUS_SSPP0, 32, 6 },
- { DBGBUS_SSPP0, 32, 7 },
-
- { DBGBUS_SSPP0, 33, 0 },
- { DBGBUS_SSPP0, 33, 1 },
- { DBGBUS_SSPP0, 33, 2 },
- { DBGBUS_SSPP0, 33, 3 },
- { DBGBUS_SSPP0, 33, 4 },
- { DBGBUS_SSPP0, 33, 5 },
- { DBGBUS_SSPP0, 33, 6 },
- { DBGBUS_SSPP0, 33, 7 },
-
- { DBGBUS_SSPP0, 34, 0 },
- { DBGBUS_SSPP0, 34, 1 },
- { DBGBUS_SSPP0, 34, 2 },
- { DBGBUS_SSPP0, 34, 3 },
- { DBGBUS_SSPP0, 34, 4 },
- { DBGBUS_SSPP0, 34, 5 },
- { DBGBUS_SSPP0, 34, 6 },
- { DBGBUS_SSPP0, 34, 7 },
-
- { DBGBUS_SSPP0, 35, 0 },
- { DBGBUS_SSPP0, 35, 1 },
- { DBGBUS_SSPP0, 35, 2 },
- { DBGBUS_SSPP0, 35, 3 },
-
- /* dma 0 */
- { DBGBUS_SSPP0, 40, 0 },
- { DBGBUS_SSPP0, 40, 1 },
- { DBGBUS_SSPP0, 40, 2 },
- { DBGBUS_SSPP0, 40, 3 },
- { DBGBUS_SSPP0, 40, 4 },
- { DBGBUS_SSPP0, 40, 5 },
- { DBGBUS_SSPP0, 40, 6 },
- { DBGBUS_SSPP0, 40, 7 },
-
- { DBGBUS_SSPP0, 41, 0 },
- { DBGBUS_SSPP0, 41, 1 },
- { DBGBUS_SSPP0, 41, 2 },
- { DBGBUS_SSPP0, 41, 3 },
- { DBGBUS_SSPP0, 41, 4 },
- { DBGBUS_SSPP0, 41, 5 },
- { DBGBUS_SSPP0, 41, 6 },
- { DBGBUS_SSPP0, 41, 7 },
-
- { DBGBUS_SSPP0, 42, 0 },
- { DBGBUS_SSPP0, 42, 1 },
- { DBGBUS_SSPP0, 42, 2 },
- { DBGBUS_SSPP0, 42, 3 },
- { DBGBUS_SSPP0, 42, 4 },
- { DBGBUS_SSPP0, 42, 5 },
- { DBGBUS_SSPP0, 42, 6 },
- { DBGBUS_SSPP0, 42, 7 },
-
- { DBGBUS_SSPP0, 44, 0 },
- { DBGBUS_SSPP0, 44, 1 },
- { DBGBUS_SSPP0, 44, 2 },
- { DBGBUS_SSPP0, 44, 3 },
- { DBGBUS_SSPP0, 44, 4 },
- { DBGBUS_SSPP0, 44, 5 },
- { DBGBUS_SSPP0, 44, 6 },
- { DBGBUS_SSPP0, 44, 7 },
-
- { DBGBUS_SSPP0, 45, 0 },
- { DBGBUS_SSPP0, 45, 1 },
- { DBGBUS_SSPP0, 45, 2 },
- { DBGBUS_SSPP0, 45, 3 },
- { DBGBUS_SSPP0, 45, 4 },
- { DBGBUS_SSPP0, 45, 5 },
- { DBGBUS_SSPP0, 45, 6 },
- { DBGBUS_SSPP0, 45, 7 },
-
- /* fetch sspp1 */
- /* vig 1 */
- { DBGBUS_SSPP1, 0, 0 },
- { DBGBUS_SSPP1, 0, 1 },
- { DBGBUS_SSPP1, 0, 2 },
- { DBGBUS_SSPP1, 0, 3 },
- { DBGBUS_SSPP1, 0, 4 },
- { DBGBUS_SSPP1, 0, 5 },
- { DBGBUS_SSPP1, 0, 6 },
- { DBGBUS_SSPP1, 0, 7 },
-
- { DBGBUS_SSPP1, 1, 0 },
- { DBGBUS_SSPP1, 1, 1 },
- { DBGBUS_SSPP1, 1, 2 },
- { DBGBUS_SSPP1, 1, 3 },
- { DBGBUS_SSPP1, 1, 4 },
- { DBGBUS_SSPP1, 1, 5 },
- { DBGBUS_SSPP1, 1, 6 },
- { DBGBUS_SSPP1, 1, 7 },
-
- { DBGBUS_SSPP1, 2, 0 },
- { DBGBUS_SSPP1, 2, 1 },
- { DBGBUS_SSPP1, 2, 2 },
- { DBGBUS_SSPP1, 2, 3 },
- { DBGBUS_SSPP1, 2, 4 },
- { DBGBUS_SSPP1, 2, 5 },
- { DBGBUS_SSPP1, 2, 6 },
- { DBGBUS_SSPP1, 2, 7 },
-
- { DBGBUS_SSPP1, 4, 0 },
- { DBGBUS_SSPP1, 4, 1 },
- { DBGBUS_SSPP1, 4, 2 },
- { DBGBUS_SSPP1, 4, 3 },
- { DBGBUS_SSPP1, 4, 4 },
- { DBGBUS_SSPP1, 4, 5 },
- { DBGBUS_SSPP1, 4, 6 },
- { DBGBUS_SSPP1, 4, 7 },
-
- { DBGBUS_SSPP1, 5, 0 },
- { DBGBUS_SSPP1, 5, 1 },
- { DBGBUS_SSPP1, 5, 2 },
- { DBGBUS_SSPP1, 5, 3 },
- { DBGBUS_SSPP1, 5, 4 },
- { DBGBUS_SSPP1, 5, 5 },
- { DBGBUS_SSPP1, 5, 6 },
- { DBGBUS_SSPP1, 5, 7 },
-
- /* vig 3 */
- { DBGBUS_SSPP1, 20, 0 },
- { DBGBUS_SSPP1, 20, 1 },
- { DBGBUS_SSPP1, 20, 2 },
- { DBGBUS_SSPP1, 20, 3 },
- { DBGBUS_SSPP1, 20, 4 },
- { DBGBUS_SSPP1, 20, 5 },
- { DBGBUS_SSPP1, 20, 6 },
- { DBGBUS_SSPP1, 20, 7 },
-
- { DBGBUS_SSPP1, 21, 0 },
- { DBGBUS_SSPP1, 21, 1 },
- { DBGBUS_SSPP1, 21, 2 },
- { DBGBUS_SSPP1, 21, 3 },
- { DBGBUS_SSPP1, 21, 4 },
- { DBGBUS_SSPP1, 21, 5 },
- { DBGBUS_SSPP1, 21, 6 },
- { DBGBUS_SSPP1, 21, 7 },
-
- { DBGBUS_SSPP1, 22, 0 },
- { DBGBUS_SSPP1, 22, 1 },
- { DBGBUS_SSPP1, 22, 2 },
- { DBGBUS_SSPP1, 22, 3 },
- { DBGBUS_SSPP1, 22, 4 },
- { DBGBUS_SSPP1, 22, 5 },
- { DBGBUS_SSPP1, 22, 6 },
- { DBGBUS_SSPP1, 22, 7 },
-
- { DBGBUS_SSPP1, 24, 0 },
- { DBGBUS_SSPP1, 24, 1 },
- { DBGBUS_SSPP1, 24, 2 },
- { DBGBUS_SSPP1, 24, 3 },
- { DBGBUS_SSPP1, 24, 4 },
- { DBGBUS_SSPP1, 24, 5 },
- { DBGBUS_SSPP1, 24, 6 },
- { DBGBUS_SSPP1, 24, 7 },
-
- { DBGBUS_SSPP1, 25, 0 },
- { DBGBUS_SSPP1, 25, 1 },
- { DBGBUS_SSPP1, 25, 2 },
- { DBGBUS_SSPP1, 25, 3 },
- { DBGBUS_SSPP1, 25, 4 },
- { DBGBUS_SSPP1, 25, 5 },
- { DBGBUS_SSPP1, 25, 6 },
- { DBGBUS_SSPP1, 25, 7 },
-
- /* dma 3 */
- { DBGBUS_SSPP1, 30, 0 },
- { DBGBUS_SSPP1, 30, 1 },
- { DBGBUS_SSPP1, 30, 2 },
- { DBGBUS_SSPP1, 30, 3 },
- { DBGBUS_SSPP1, 30, 4 },
- { DBGBUS_SSPP1, 30, 5 },
- { DBGBUS_SSPP1, 30, 6 },
- { DBGBUS_SSPP1, 30, 7 },
-
- { DBGBUS_SSPP1, 31, 0 },
- { DBGBUS_SSPP1, 31, 1 },
- { DBGBUS_SSPP1, 31, 2 },
- { DBGBUS_SSPP1, 31, 3 },
- { DBGBUS_SSPP1, 31, 4 },
- { DBGBUS_SSPP1, 31, 5 },
- { DBGBUS_SSPP1, 31, 6 },
- { DBGBUS_SSPP1, 31, 7 },
-
- { DBGBUS_SSPP1, 32, 0 },
- { DBGBUS_SSPP1, 32, 1 },
- { DBGBUS_SSPP1, 32, 2 },
- { DBGBUS_SSPP1, 32, 3 },
- { DBGBUS_SSPP1, 32, 4 },
- { DBGBUS_SSPP1, 32, 5 },
- { DBGBUS_SSPP1, 32, 6 },
- { DBGBUS_SSPP1, 32, 7 },
-
- { DBGBUS_SSPP1, 33, 0 },
- { DBGBUS_SSPP1, 33, 1 },
- { DBGBUS_SSPP1, 33, 2 },
- { DBGBUS_SSPP1, 33, 3 },
- { DBGBUS_SSPP1, 33, 4 },
- { DBGBUS_SSPP1, 33, 5 },
- { DBGBUS_SSPP1, 33, 6 },
- { DBGBUS_SSPP1, 33, 7 },
-
- { DBGBUS_SSPP1, 34, 0 },
- { DBGBUS_SSPP1, 34, 1 },
- { DBGBUS_SSPP1, 34, 2 },
- { DBGBUS_SSPP1, 34, 3 },
- { DBGBUS_SSPP1, 34, 4 },
- { DBGBUS_SSPP1, 34, 5 },
- { DBGBUS_SSPP1, 34, 6 },
- { DBGBUS_SSPP1, 34, 7 },
-
- { DBGBUS_SSPP1, 35, 0 },
- { DBGBUS_SSPP1, 35, 1 },
- { DBGBUS_SSPP1, 35, 2 },
-
- /* dma 1 */
- { DBGBUS_SSPP1, 40, 0 },
- { DBGBUS_SSPP1, 40, 1 },
- { DBGBUS_SSPP1, 40, 2 },
- { DBGBUS_SSPP1, 40, 3 },
- { DBGBUS_SSPP1, 40, 4 },
- { DBGBUS_SSPP1, 40, 5 },
- { DBGBUS_SSPP1, 40, 6 },
- { DBGBUS_SSPP1, 40, 7 },
-
- { DBGBUS_SSPP1, 41, 0 },
- { DBGBUS_SSPP1, 41, 1 },
- { DBGBUS_SSPP1, 41, 2 },
- { DBGBUS_SSPP1, 41, 3 },
- { DBGBUS_SSPP1, 41, 4 },
- { DBGBUS_SSPP1, 41, 5 },
- { DBGBUS_SSPP1, 41, 6 },
- { DBGBUS_SSPP1, 41, 7 },
-
- { DBGBUS_SSPP1, 42, 0 },
- { DBGBUS_SSPP1, 42, 1 },
- { DBGBUS_SSPP1, 42, 2 },
- { DBGBUS_SSPP1, 42, 3 },
- { DBGBUS_SSPP1, 42, 4 },
- { DBGBUS_SSPP1, 42, 5 },
- { DBGBUS_SSPP1, 42, 6 },
- { DBGBUS_SSPP1, 42, 7 },
-
- { DBGBUS_SSPP1, 44, 0 },
- { DBGBUS_SSPP1, 44, 1 },
- { DBGBUS_SSPP1, 44, 2 },
- { DBGBUS_SSPP1, 44, 3 },
- { DBGBUS_SSPP1, 44, 4 },
- { DBGBUS_SSPP1, 44, 5 },
- { DBGBUS_SSPP1, 44, 6 },
- { DBGBUS_SSPP1, 44, 7 },
-
- { DBGBUS_SSPP1, 45, 0 },
- { DBGBUS_SSPP1, 45, 1 },
- { DBGBUS_SSPP1, 45, 2 },
- { DBGBUS_SSPP1, 45, 3 },
- { DBGBUS_SSPP1, 45, 4 },
- { DBGBUS_SSPP1, 45, 5 },
- { DBGBUS_SSPP1, 45, 6 },
- { DBGBUS_SSPP1, 45, 7 },
-
- /* dspp */
- { DBGBUS_DSPP, 13, 0 },
- { DBGBUS_DSPP, 19, 0 },
- { DBGBUS_DSPP, 14, 0 },
- { DBGBUS_DSPP, 14, 1 },
- { DBGBUS_DSPP, 14, 3 },
- { DBGBUS_DSPP, 20, 0 },
- { DBGBUS_DSPP, 20, 1 },
- { DBGBUS_DSPP, 20, 3 },
-
- /* ppb_0 */
- { DBGBUS_DSPP, 31, 0, _dpu_debug_bus_ppb0_dump },
- { DBGBUS_DSPP, 33, 0, _dpu_debug_bus_ppb0_dump },
- { DBGBUS_DSPP, 35, 0, _dpu_debug_bus_ppb0_dump },
- { DBGBUS_DSPP, 42, 0, _dpu_debug_bus_ppb0_dump },
-
- /* ppb_1 */
- { DBGBUS_DSPP, 32, 0, _dpu_debug_bus_ppb1_dump },
- { DBGBUS_DSPP, 34, 0, _dpu_debug_bus_ppb1_dump },
- { DBGBUS_DSPP, 36, 0, _dpu_debug_bus_ppb1_dump },
- { DBGBUS_DSPP, 43, 0, _dpu_debug_bus_ppb1_dump },
-
- /* lm_lut */
- { DBGBUS_DSPP, 109, 0 },
- { DBGBUS_DSPP, 105, 0 },
- { DBGBUS_DSPP, 103, 0 },
-
- /* crossbar */
- { DBGBUS_DSPP, 0, 0, _dpu_debug_bus_xbar_dump },
-
- /* rotator */
- { DBGBUS_DSPP, 9, 0},
-
- /* blend */
- /* LM0 */
- { DBGBUS_DSPP, 63, 1},
- { DBGBUS_DSPP, 63, 2},
- { DBGBUS_DSPP, 63, 3},
- { DBGBUS_DSPP, 63, 4},
- { DBGBUS_DSPP, 63, 5},
- { DBGBUS_DSPP, 63, 6},
- { DBGBUS_DSPP, 63, 7, _dpu_debug_bus_lm_dump },
-
- { DBGBUS_DSPP, 64, 1},
- { DBGBUS_DSPP, 64, 2},
- { DBGBUS_DSPP, 64, 3},
- { DBGBUS_DSPP, 64, 4},
- { DBGBUS_DSPP, 64, 5},
- { DBGBUS_DSPP, 64, 6},
- { DBGBUS_DSPP, 64, 7, _dpu_debug_bus_lm_dump },
-
- { DBGBUS_DSPP, 65, 1},
- { DBGBUS_DSPP, 65, 2},
- { DBGBUS_DSPP, 65, 3},
- { DBGBUS_DSPP, 65, 4},
- { DBGBUS_DSPP, 65, 5},
- { DBGBUS_DSPP, 65, 6},
- { DBGBUS_DSPP, 65, 7, _dpu_debug_bus_lm_dump },
-
- { DBGBUS_DSPP, 66, 1},
- { DBGBUS_DSPP, 66, 2},
- { DBGBUS_DSPP, 66, 3},
- { DBGBUS_DSPP, 66, 4},
- { DBGBUS_DSPP, 66, 5},
- { DBGBUS_DSPP, 66, 6},
- { DBGBUS_DSPP, 66, 7, _dpu_debug_bus_lm_dump },
-
- { DBGBUS_DSPP, 67, 1},
- { DBGBUS_DSPP, 67, 2},
- { DBGBUS_DSPP, 67, 3},
- { DBGBUS_DSPP, 67, 4},
- { DBGBUS_DSPP, 67, 5},
- { DBGBUS_DSPP, 67, 6},
- { DBGBUS_DSPP, 67, 7, _dpu_debug_bus_lm_dump },
-
- { DBGBUS_DSPP, 68, 1},
- { DBGBUS_DSPP, 68, 2},
- { DBGBUS_DSPP, 68, 3},
- { DBGBUS_DSPP, 68, 4},
- { DBGBUS_DSPP, 68, 5},
- { DBGBUS_DSPP, 68, 6},
- { DBGBUS_DSPP, 68, 7, _dpu_debug_bus_lm_dump },
-
- { DBGBUS_DSPP, 69, 1},
- { DBGBUS_DSPP, 69, 2},
- { DBGBUS_DSPP, 69, 3},
- { DBGBUS_DSPP, 69, 4},
- { DBGBUS_DSPP, 69, 5},
- { DBGBUS_DSPP, 69, 6},
- { DBGBUS_DSPP, 69, 7, _dpu_debug_bus_lm_dump },
-
- { DBGBUS_DSPP, 84, 1},
- { DBGBUS_DSPP, 84, 2},
- { DBGBUS_DSPP, 84, 3},
- { DBGBUS_DSPP, 84, 4},
- { DBGBUS_DSPP, 84, 5},
- { DBGBUS_DSPP, 84, 6},
- { DBGBUS_DSPP, 84, 7, _dpu_debug_bus_lm_dump },
-
-
- { DBGBUS_DSPP, 85, 1},
- { DBGBUS_DSPP, 85, 2},
- { DBGBUS_DSPP, 85, 3},
- { DBGBUS_DSPP, 85, 4},
- { DBGBUS_DSPP, 85, 5},
- { DBGBUS_DSPP, 85, 6},
- { DBGBUS_DSPP, 85, 7, _dpu_debug_bus_lm_dump },
-
-
- { DBGBUS_DSPP, 86, 1},
- { DBGBUS_DSPP, 86, 2},
- { DBGBUS_DSPP, 86, 3},
- { DBGBUS_DSPP, 86, 4},
- { DBGBUS_DSPP, 86, 5},
- { DBGBUS_DSPP, 86, 6},
- { DBGBUS_DSPP, 86, 7, _dpu_debug_bus_lm_dump },
-
-
- { DBGBUS_DSPP, 87, 1},
- { DBGBUS_DSPP, 87, 2},
- { DBGBUS_DSPP, 87, 3},
- { DBGBUS_DSPP, 87, 4},
- { DBGBUS_DSPP, 87, 5},
- { DBGBUS_DSPP, 87, 6},
- { DBGBUS_DSPP, 87, 7, _dpu_debug_bus_lm_dump },
-
- /* LM1 */
- { DBGBUS_DSPP, 70, 1},
- { DBGBUS_DSPP, 70, 2},
- { DBGBUS_DSPP, 70, 3},
- { DBGBUS_DSPP, 70, 4},
- { DBGBUS_DSPP, 70, 5},
- { DBGBUS_DSPP, 70, 6},
- { DBGBUS_DSPP, 70, 7, _dpu_debug_bus_lm_dump },
-
- { DBGBUS_DSPP, 71, 1},
- { DBGBUS_DSPP, 71, 2},
- { DBGBUS_DSPP, 71, 3},
- { DBGBUS_DSPP, 71, 4},
- { DBGBUS_DSPP, 71, 5},
- { DBGBUS_DSPP, 71, 6},
- { DBGBUS_DSPP, 71, 7, _dpu_debug_bus_lm_dump },
-
- { DBGBUS_DSPP, 72, 1},
- { DBGBUS_DSPP, 72, 2},
- { DBGBUS_DSPP, 72, 3},
- { DBGBUS_DSPP, 72, 4},
- { DBGBUS_DSPP, 72, 5},
- { DBGBUS_DSPP, 72, 6},
- { DBGBUS_DSPP, 72, 7, _dpu_debug_bus_lm_dump },
-
- { DBGBUS_DSPP, 73, 1},
- { DBGBUS_DSPP, 73, 2},
- { DBGBUS_DSPP, 73, 3},
- { DBGBUS_DSPP, 73, 4},
- { DBGBUS_DSPP, 73, 5},
- { DBGBUS_DSPP, 73, 6},
- { DBGBUS_DSPP, 73, 7, _dpu_debug_bus_lm_dump },
-
- { DBGBUS_DSPP, 74, 1},
- { DBGBUS_DSPP, 74, 2},
- { DBGBUS_DSPP, 74, 3},
- { DBGBUS_DSPP, 74, 4},
- { DBGBUS_DSPP, 74, 5},
- { DBGBUS_DSPP, 74, 6},
- { DBGBUS_DSPP, 74, 7, _dpu_debug_bus_lm_dump },
-
- { DBGBUS_DSPP, 75, 1},
- { DBGBUS_DSPP, 75, 2},
- { DBGBUS_DSPP, 75, 3},
- { DBGBUS_DSPP, 75, 4},
- { DBGBUS_DSPP, 75, 5},
- { DBGBUS_DSPP, 75, 6},
- { DBGBUS_DSPP, 75, 7, _dpu_debug_bus_lm_dump },
-
- { DBGBUS_DSPP, 76, 1},
- { DBGBUS_DSPP, 76, 2},
- { DBGBUS_DSPP, 76, 3},
- { DBGBUS_DSPP, 76, 4},
- { DBGBUS_DSPP, 76, 5},
- { DBGBUS_DSPP, 76, 6},
- { DBGBUS_DSPP, 76, 7, _dpu_debug_bus_lm_dump },
-
- { DBGBUS_DSPP, 88, 1},
- { DBGBUS_DSPP, 88, 2},
- { DBGBUS_DSPP, 88, 3},
- { DBGBUS_DSPP, 88, 4},
- { DBGBUS_DSPP, 88, 5},
- { DBGBUS_DSPP, 88, 6},
- { DBGBUS_DSPP, 88, 7, _dpu_debug_bus_lm_dump },
-
- { DBGBUS_DSPP, 89, 1},
- { DBGBUS_DSPP, 89, 2},
- { DBGBUS_DSPP, 89, 3},
- { DBGBUS_DSPP, 89, 4},
- { DBGBUS_DSPP, 89, 5},
- { DBGBUS_DSPP, 89, 6},
- { DBGBUS_DSPP, 89, 7, _dpu_debug_bus_lm_dump },
-
- { DBGBUS_DSPP, 90, 1},
- { DBGBUS_DSPP, 90, 2},
- { DBGBUS_DSPP, 90, 3},
- { DBGBUS_DSPP, 90, 4},
- { DBGBUS_DSPP, 90, 5},
- { DBGBUS_DSPP, 90, 6},
- { DBGBUS_DSPP, 90, 7, _dpu_debug_bus_lm_dump },
-
- { DBGBUS_DSPP, 91, 1},
- { DBGBUS_DSPP, 91, 2},
- { DBGBUS_DSPP, 91, 3},
- { DBGBUS_DSPP, 91, 4},
- { DBGBUS_DSPP, 91, 5},
- { DBGBUS_DSPP, 91, 6},
- { DBGBUS_DSPP, 91, 7, _dpu_debug_bus_lm_dump },
-
- /* LM2 */
- { DBGBUS_DSPP, 77, 0},
- { DBGBUS_DSPP, 77, 1},
- { DBGBUS_DSPP, 77, 2},
- { DBGBUS_DSPP, 77, 3},
- { DBGBUS_DSPP, 77, 4},
- { DBGBUS_DSPP, 77, 5},
- { DBGBUS_DSPP, 77, 6},
- { DBGBUS_DSPP, 77, 7, _dpu_debug_bus_lm_dump },
-
- { DBGBUS_DSPP, 78, 0},
- { DBGBUS_DSPP, 78, 1},
- { DBGBUS_DSPP, 78, 2},
- { DBGBUS_DSPP, 78, 3},
- { DBGBUS_DSPP, 78, 4},
- { DBGBUS_DSPP, 78, 5},
- { DBGBUS_DSPP, 78, 6},
- { DBGBUS_DSPP, 78, 7, _dpu_debug_bus_lm_dump },
-
- { DBGBUS_DSPP, 79, 0},
- { DBGBUS_DSPP, 79, 1},
- { DBGBUS_DSPP, 79, 2},
- { DBGBUS_DSPP, 79, 3},
- { DBGBUS_DSPP, 79, 4},
- { DBGBUS_DSPP, 79, 5},
- { DBGBUS_DSPP, 79, 6},
- { DBGBUS_DSPP, 79, 7, _dpu_debug_bus_lm_dump },
-
- { DBGBUS_DSPP, 80, 0},
- { DBGBUS_DSPP, 80, 1},
- { DBGBUS_DSPP, 80, 2},
- { DBGBUS_DSPP, 80, 3},
- { DBGBUS_DSPP, 80, 4},
- { DBGBUS_DSPP, 80, 5},
- { DBGBUS_DSPP, 80, 6},
- { DBGBUS_DSPP, 80, 7, _dpu_debug_bus_lm_dump },
-
- { DBGBUS_DSPP, 81, 0},
- { DBGBUS_DSPP, 81, 1},
- { DBGBUS_DSPP, 81, 2},
- { DBGBUS_DSPP, 81, 3},
- { DBGBUS_DSPP, 81, 4},
- { DBGBUS_DSPP, 81, 5},
- { DBGBUS_DSPP, 81, 6},
- { DBGBUS_DSPP, 81, 7, _dpu_debug_bus_lm_dump },
-
- { DBGBUS_DSPP, 82, 0},
- { DBGBUS_DSPP, 82, 1},
- { DBGBUS_DSPP, 82, 2},
- { DBGBUS_DSPP, 82, 3},
- { DBGBUS_DSPP, 82, 4},
- { DBGBUS_DSPP, 82, 5},
- { DBGBUS_DSPP, 82, 6},
- { DBGBUS_DSPP, 82, 7, _dpu_debug_bus_lm_dump },
-
- { DBGBUS_DSPP, 83, 0},
- { DBGBUS_DSPP, 83, 1},
- { DBGBUS_DSPP, 83, 2},
- { DBGBUS_DSPP, 83, 3},
- { DBGBUS_DSPP, 83, 4},
- { DBGBUS_DSPP, 83, 5},
- { DBGBUS_DSPP, 83, 6},
- { DBGBUS_DSPP, 83, 7, _dpu_debug_bus_lm_dump },
-
- { DBGBUS_DSPP, 92, 1},
- { DBGBUS_DSPP, 92, 2},
- { DBGBUS_DSPP, 92, 3},
- { DBGBUS_DSPP, 92, 4},
- { DBGBUS_DSPP, 92, 5},
- { DBGBUS_DSPP, 92, 6},
- { DBGBUS_DSPP, 92, 7, _dpu_debug_bus_lm_dump },
-
- { DBGBUS_DSPP, 93, 1},
- { DBGBUS_DSPP, 93, 2},
- { DBGBUS_DSPP, 93, 3},
- { DBGBUS_DSPP, 93, 4},
- { DBGBUS_DSPP, 93, 5},
- { DBGBUS_DSPP, 93, 6},
- { DBGBUS_DSPP, 93, 7, _dpu_debug_bus_lm_dump },
-
- { DBGBUS_DSPP, 94, 1},
- { DBGBUS_DSPP, 94, 2},
- { DBGBUS_DSPP, 94, 3},
- { DBGBUS_DSPP, 94, 4},
- { DBGBUS_DSPP, 94, 5},
- { DBGBUS_DSPP, 94, 6},
- { DBGBUS_DSPP, 94, 7, _dpu_debug_bus_lm_dump },
-
- { DBGBUS_DSPP, 95, 1},
- { DBGBUS_DSPP, 95, 2},
- { DBGBUS_DSPP, 95, 3},
- { DBGBUS_DSPP, 95, 4},
- { DBGBUS_DSPP, 95, 5},
- { DBGBUS_DSPP, 95, 6},
- { DBGBUS_DSPP, 95, 7, _dpu_debug_bus_lm_dump },
-
- /* LM5 */
- { DBGBUS_DSPP, 110, 1},
- { DBGBUS_DSPP, 110, 2},
- { DBGBUS_DSPP, 110, 3},
- { DBGBUS_DSPP, 110, 4},
- { DBGBUS_DSPP, 110, 5},
- { DBGBUS_DSPP, 110, 6},
- { DBGBUS_DSPP, 110, 7, _dpu_debug_bus_lm_dump },
-
- { DBGBUS_DSPP, 111, 1},
- { DBGBUS_DSPP, 111, 2},
- { DBGBUS_DSPP, 111, 3},
- { DBGBUS_DSPP, 111, 4},
- { DBGBUS_DSPP, 111, 5},
- { DBGBUS_DSPP, 111, 6},
- { DBGBUS_DSPP, 111, 7, _dpu_debug_bus_lm_dump },
-
- { DBGBUS_DSPP, 112, 1},
- { DBGBUS_DSPP, 112, 2},
- { DBGBUS_DSPP, 112, 3},
- { DBGBUS_DSPP, 112, 4},
- { DBGBUS_DSPP, 112, 5},
- { DBGBUS_DSPP, 112, 6},
- { DBGBUS_DSPP, 112, 7, _dpu_debug_bus_lm_dump },
-
- { DBGBUS_DSPP, 113, 1},
- { DBGBUS_DSPP, 113, 2},
- { DBGBUS_DSPP, 113, 3},
- { DBGBUS_DSPP, 113, 4},
- { DBGBUS_DSPP, 113, 5},
- { DBGBUS_DSPP, 113, 6},
- { DBGBUS_DSPP, 113, 7, _dpu_debug_bus_lm_dump },
-
- { DBGBUS_DSPP, 114, 1},
- { DBGBUS_DSPP, 114, 2},
- { DBGBUS_DSPP, 114, 3},
- { DBGBUS_DSPP, 114, 4},
- { DBGBUS_DSPP, 114, 5},
- { DBGBUS_DSPP, 114, 6},
- { DBGBUS_DSPP, 114, 7, _dpu_debug_bus_lm_dump },
-
- { DBGBUS_DSPP, 115, 1},
- { DBGBUS_DSPP, 115, 2},
- { DBGBUS_DSPP, 115, 3},
- { DBGBUS_DSPP, 115, 4},
- { DBGBUS_DSPP, 115, 5},
- { DBGBUS_DSPP, 115, 6},
- { DBGBUS_DSPP, 115, 7, _dpu_debug_bus_lm_dump },
-
- { DBGBUS_DSPP, 116, 1},
- { DBGBUS_DSPP, 116, 2},
- { DBGBUS_DSPP, 116, 3},
- { DBGBUS_DSPP, 116, 4},
- { DBGBUS_DSPP, 116, 5},
- { DBGBUS_DSPP, 116, 6},
- { DBGBUS_DSPP, 116, 7, _dpu_debug_bus_lm_dump },
-
- { DBGBUS_DSPP, 117, 1},
- { DBGBUS_DSPP, 117, 2},
- { DBGBUS_DSPP, 117, 3},
- { DBGBUS_DSPP, 117, 4},
- { DBGBUS_DSPP, 117, 5},
- { DBGBUS_DSPP, 117, 6},
- { DBGBUS_DSPP, 117, 7, _dpu_debug_bus_lm_dump },
-
- { DBGBUS_DSPP, 118, 1},
- { DBGBUS_DSPP, 118, 2},
- { DBGBUS_DSPP, 118, 3},
- { DBGBUS_DSPP, 118, 4},
- { DBGBUS_DSPP, 118, 5},
- { DBGBUS_DSPP, 118, 6},
- { DBGBUS_DSPP, 118, 7, _dpu_debug_bus_lm_dump },
-
- { DBGBUS_DSPP, 119, 1},
- { DBGBUS_DSPP, 119, 2},
- { DBGBUS_DSPP, 119, 3},
- { DBGBUS_DSPP, 119, 4},
- { DBGBUS_DSPP, 119, 5},
- { DBGBUS_DSPP, 119, 6},
- { DBGBUS_DSPP, 119, 7, _dpu_debug_bus_lm_dump },
-
- { DBGBUS_DSPP, 120, 1},
- { DBGBUS_DSPP, 120, 2},
- { DBGBUS_DSPP, 120, 3},
- { DBGBUS_DSPP, 120, 4},
- { DBGBUS_DSPP, 120, 5},
- { DBGBUS_DSPP, 120, 6},
- { DBGBUS_DSPP, 120, 7, _dpu_debug_bus_lm_dump },
-
- /* csc */
- { DBGBUS_SSPP0, 7, 0},
- { DBGBUS_SSPP0, 7, 1},
- { DBGBUS_SSPP0, 27, 0},
- { DBGBUS_SSPP0, 27, 1},
- { DBGBUS_SSPP1, 7, 0},
- { DBGBUS_SSPP1, 7, 1},
- { DBGBUS_SSPP1, 27, 0},
- { DBGBUS_SSPP1, 27, 1},
-
- /* pcc */
- { DBGBUS_SSPP0, 3, 3},
- { DBGBUS_SSPP0, 23, 3},
- { DBGBUS_SSPP0, 33, 3},
- { DBGBUS_SSPP0, 43, 3},
- { DBGBUS_SSPP1, 3, 3},
- { DBGBUS_SSPP1, 23, 3},
- { DBGBUS_SSPP1, 33, 3},
- { DBGBUS_SSPP1, 43, 3},
-
- /* spa */
- { DBGBUS_SSPP0, 8, 0},
- { DBGBUS_SSPP0, 28, 0},
- { DBGBUS_SSPP1, 8, 0},
- { DBGBUS_SSPP1, 28, 0},
- { DBGBUS_DSPP, 13, 0},
- { DBGBUS_DSPP, 19, 0},
-
- /* igc */
- { DBGBUS_SSPP0, 17, 0},
- { DBGBUS_SSPP0, 17, 1},
- { DBGBUS_SSPP0, 17, 3},
- { DBGBUS_SSPP0, 37, 0},
- { DBGBUS_SSPP0, 37, 1},
- { DBGBUS_SSPP0, 37, 3},
- { DBGBUS_SSPP0, 46, 0},
- { DBGBUS_SSPP0, 46, 1},
- { DBGBUS_SSPP0, 46, 3},
-
- { DBGBUS_SSPP1, 17, 0},
- { DBGBUS_SSPP1, 17, 1},
- { DBGBUS_SSPP1, 17, 3},
- { DBGBUS_SSPP1, 37, 0},
- { DBGBUS_SSPP1, 37, 1},
- { DBGBUS_SSPP1, 37, 3},
- { DBGBUS_SSPP1, 46, 0},
- { DBGBUS_SSPP1, 46, 1},
- { DBGBUS_SSPP1, 46, 3},
-
- { DBGBUS_DSPP, 14, 0},
- { DBGBUS_DSPP, 14, 1},
- { DBGBUS_DSPP, 14, 3},
- { DBGBUS_DSPP, 20, 0},
- { DBGBUS_DSPP, 20, 1},
- { DBGBUS_DSPP, 20, 3},
-
- /* intf0-3 */
- { DBGBUS_PERIPH, 0, 0},
- { DBGBUS_PERIPH, 1, 0},
- { DBGBUS_PERIPH, 2, 0},
- { DBGBUS_PERIPH, 3, 0},
-
- /* te counter wrapper */
- { DBGBUS_PERIPH, 60, 0},
-
- /* dsc0 */
- { DBGBUS_PERIPH, 47, 0},
- { DBGBUS_PERIPH, 47, 1},
- { DBGBUS_PERIPH, 47, 2},
- { DBGBUS_PERIPH, 47, 3},
- { DBGBUS_PERIPH, 47, 4},
- { DBGBUS_PERIPH, 47, 5},
- { DBGBUS_PERIPH, 47, 6},
- { DBGBUS_PERIPH, 47, 7},
-
- /* dsc1 */
- { DBGBUS_PERIPH, 48, 0},
- { DBGBUS_PERIPH, 48, 1},
- { DBGBUS_PERIPH, 48, 2},
- { DBGBUS_PERIPH, 48, 3},
- { DBGBUS_PERIPH, 48, 4},
- { DBGBUS_PERIPH, 48, 5},
- { DBGBUS_PERIPH, 48, 6},
- { DBGBUS_PERIPH, 48, 7},
-
- /* dsc2 */
- { DBGBUS_PERIPH, 51, 0},
- { DBGBUS_PERIPH, 51, 1},
- { DBGBUS_PERIPH, 51, 2},
- { DBGBUS_PERIPH, 51, 3},
- { DBGBUS_PERIPH, 51, 4},
- { DBGBUS_PERIPH, 51, 5},
- { DBGBUS_PERIPH, 51, 6},
- { DBGBUS_PERIPH, 51, 7},
-
- /* dsc3 */
- { DBGBUS_PERIPH, 52, 0},
- { DBGBUS_PERIPH, 52, 1},
- { DBGBUS_PERIPH, 52, 2},
- { DBGBUS_PERIPH, 52, 3},
- { DBGBUS_PERIPH, 52, 4},
- { DBGBUS_PERIPH, 52, 5},
- { DBGBUS_PERIPH, 52, 6},
- { DBGBUS_PERIPH, 52, 7},
-
- /* tear-check */
- { DBGBUS_PERIPH, 63, 0 },
- { DBGBUS_PERIPH, 64, 0 },
- { DBGBUS_PERIPH, 65, 0 },
- { DBGBUS_PERIPH, 73, 0 },
- { DBGBUS_PERIPH, 74, 0 },
-
- /* cdwn */
- { DBGBUS_PERIPH, 80, 0},
- { DBGBUS_PERIPH, 80, 1},
- { DBGBUS_PERIPH, 80, 2},
-
- { DBGBUS_PERIPH, 81, 0},
- { DBGBUS_PERIPH, 81, 1},
- { DBGBUS_PERIPH, 81, 2},
-
- { DBGBUS_PERIPH, 82, 0},
- { DBGBUS_PERIPH, 82, 1},
- { DBGBUS_PERIPH, 82, 2},
- { DBGBUS_PERIPH, 82, 3},
- { DBGBUS_PERIPH, 82, 4},
- { DBGBUS_PERIPH, 82, 5},
- { DBGBUS_PERIPH, 82, 6},
- { DBGBUS_PERIPH, 82, 7},
-
- /* hdmi */
- { DBGBUS_PERIPH, 68, 0},
- { DBGBUS_PERIPH, 68, 1},
- { DBGBUS_PERIPH, 68, 2},
- { DBGBUS_PERIPH, 68, 3},
- { DBGBUS_PERIPH, 68, 4},
- { DBGBUS_PERIPH, 68, 5},
-
- /* edp */
- { DBGBUS_PERIPH, 69, 0},
- { DBGBUS_PERIPH, 69, 1},
- { DBGBUS_PERIPH, 69, 2},
- { DBGBUS_PERIPH, 69, 3},
- { DBGBUS_PERIPH, 69, 4},
- { DBGBUS_PERIPH, 69, 5},
-
- /* dsi0 */
- { DBGBUS_PERIPH, 70, 0},
- { DBGBUS_PERIPH, 70, 1},
- { DBGBUS_PERIPH, 70, 2},
- { DBGBUS_PERIPH, 70, 3},
- { DBGBUS_PERIPH, 70, 4},
- { DBGBUS_PERIPH, 70, 5},
-
- /* dsi1 */
- { DBGBUS_PERIPH, 71, 0},
- { DBGBUS_PERIPH, 71, 1},
- { DBGBUS_PERIPH, 71, 2},
- { DBGBUS_PERIPH, 71, 3},
- { DBGBUS_PERIPH, 71, 4},
- { DBGBUS_PERIPH, 71, 5},
-};
-
-static struct vbif_debug_bus_entry vbif_dbg_bus_msm8998[] = {
- {0x214, 0x21c, 16, 2, 0x0, 0xd}, /* arb clients */
- {0x214, 0x21c, 16, 2, 0x80, 0xc0}, /* arb clients */
- {0x214, 0x21c, 16, 2, 0x100, 0x140}, /* arb clients */
- {0x214, 0x21c, 0, 16, 0x0, 0xf}, /* xin blocks - axi side */
- {0x214, 0x21c, 0, 16, 0x80, 0xa4}, /* xin blocks - axi side */
- {0x214, 0x21c, 0, 15, 0x100, 0x124}, /* xin blocks - axi side */
- {0x21c, 0x214, 0, 14, 0, 0xc}, /* xin blocks - clock side */
-};
-
-/**
- * _dpu_dbg_enable_power - use callback to turn power on for hw register access
- * @enable: whether to turn power on or off
- */
-static inline void _dpu_dbg_enable_power(int enable)
-{
- if (enable)
- pm_runtime_get_sync(dpu_dbg_base.dev);
- else
- pm_runtime_put_sync(dpu_dbg_base.dev);
-}
-
-static void _dpu_dbg_dump_dpu_dbg_bus(struct dpu_dbg_dpu_debug_bus *bus)
-{
- bool in_log, in_mem;
- u32 **dump_mem = NULL;
- u32 *dump_addr = NULL;
- u32 status = 0;
- struct dpu_debug_bus_entry *head;
- phys_addr_t phys = 0;
- int list_size;
- int i;
- u32 offset;
- void __iomem *mem_base = NULL;
- struct dpu_dbg_reg_base *reg_base;
-
- if (!bus || !bus->cmn.entries_size)
- return;
-
- list_for_each_entry(reg_base, &dpu_dbg_base.reg_base_list,
- reg_base_head)
- if (strlen(reg_base->name) &&
- !strcmp(reg_base->name, bus->cmn.name))
- mem_base = reg_base->base + bus->top_blk_off;
-
- if (!mem_base) {
- pr_err("unable to find mem_base for %s\n", bus->cmn.name);
- return;
- }
-
- dump_mem = &bus->cmn.dumped_content;
-
- /* will keep in memory 4 entries of 4 bytes each */
- list_size = (bus->cmn.entries_size * 4 * 4);
-
- in_log = (bus->cmn.enable_mask & DPU_DBG_DUMP_IN_LOG);
- in_mem = (bus->cmn.enable_mask & DPU_DBG_DUMP_IN_MEM);
-
- if (!in_log && !in_mem)
- return;
-
- dev_info(dpu_dbg_base.dev, "======== start %s dump =========\n",
- bus->cmn.name);
-
- if (in_mem) {
- if (!(*dump_mem))
- *dump_mem = dma_alloc_coherent(dpu_dbg_base.dev,
- list_size, &phys, GFP_KERNEL);
-
- if (*dump_mem) {
- dump_addr = *dump_mem;
- dev_info(dpu_dbg_base.dev,
- "%s: start_addr:0x%pK len:0x%x\n",
- __func__, dump_addr, list_size);
- } else {
- in_mem = false;
- pr_err("dump_mem: allocation fails\n");
- }
- }
-
- _dpu_dbg_enable_power(true);
- for (i = 0; i < bus->cmn.entries_size; i++) {
- head = bus->entries + i;
- writel_relaxed(TEST_MASK(head->block_id, head->test_id),
- mem_base + head->wr_addr);
- wmb(); /* make sure test bits were written */
-
- if (bus->cmn.flags & DBGBUS_FLAGS_DSPP) {
- offset = DBGBUS_DSPP_STATUS;
- /* keep DSPP test point enabled */
- if (head->wr_addr != DBGBUS_DSPP)
- writel_relaxed(0xF, mem_base + DBGBUS_DSPP);
- } else {
- offset = head->wr_addr + 0x4;
- }
-
- status = readl_relaxed(mem_base + offset);
-
- if (in_log)
- dev_info(dpu_dbg_base.dev,
- "waddr=0x%x blk=%d tst=%d val=0x%x\n",
- head->wr_addr, head->block_id,
- head->test_id, status);
-
- if (dump_addr && in_mem) {
- dump_addr[i*4] = head->wr_addr;
- dump_addr[i*4 + 1] = head->block_id;
- dump_addr[i*4 + 2] = head->test_id;
- dump_addr[i*4 + 3] = status;
- }
-
- if (head->analyzer)
- head->analyzer(mem_base, head, status);
-
- /* Disable debug bus once we are done */
- writel_relaxed(0, mem_base + head->wr_addr);
- if (bus->cmn.flags & DBGBUS_FLAGS_DSPP &&
- head->wr_addr != DBGBUS_DSPP)
- writel_relaxed(0x0, mem_base + DBGBUS_DSPP);
- }
- _dpu_dbg_enable_power(false);
-
- dev_info(dpu_dbg_base.dev, "======== end %s dump =========\n",
- bus->cmn.name);
-}
-
-static void _dpu_dbg_dump_vbif_debug_bus_entry(
- struct vbif_debug_bus_entry *head, void __iomem *mem_base,
- u32 *dump_addr, bool in_log)
-{
- int i, j;
- u32 val;
-
- if (!dump_addr && !in_log)
- return;
-
- for (i = 0; i < head->block_cnt; i++) {
- writel_relaxed(1 << (i + head->bit_offset),
- mem_base + head->block_bus_addr);
- /* make sure that current bus blcok enable */
- wmb();
- for (j = head->test_pnt_start; j < head->test_pnt_cnt; j++) {
- writel_relaxed(j, mem_base + head->block_bus_addr + 4);
- /* make sure that test point is enabled */
- wmb();
- val = readl_relaxed(mem_base + MMSS_VBIF_TEST_BUS_OUT);
- if (dump_addr) {
- *dump_addr++ = head->block_bus_addr;
- *dump_addr++ = i;
- *dump_addr++ = j;
- *dump_addr++ = val;
- }
- if (in_log)
- dev_info(dpu_dbg_base.dev,
- "testpoint:%x arb/xin id=%d index=%d val=0x%x\n",
- head->block_bus_addr, i, j, val);
- }
- }
-}
-
-static void _dpu_dbg_dump_vbif_dbg_bus(struct dpu_dbg_vbif_debug_bus *bus)
-{
- bool in_log, in_mem;
- u32 **dump_mem = NULL;
- u32 *dump_addr = NULL;
- u32 value, d0, d1;
- unsigned long reg, reg1, reg2;
- struct vbif_debug_bus_entry *head;
- phys_addr_t phys = 0;
- int i, list_size = 0;
- void __iomem *mem_base = NULL;
- struct vbif_debug_bus_entry *dbg_bus;
- u32 bus_size;
- struct dpu_dbg_reg_base *reg_base;
-
- if (!bus || !bus->cmn.entries_size)
- return;
-
- list_for_each_entry(reg_base, &dpu_dbg_base.reg_base_list,
- reg_base_head)
- if (strlen(reg_base->name) &&
- !strcmp(reg_base->name, bus->cmn.name))
- mem_base = reg_base->base;
-
- if (!mem_base) {
- pr_err("unable to find mem_base for %s\n", bus->cmn.name);
- return;
- }
-
- dbg_bus = bus->entries;
- bus_size = bus->cmn.entries_size;
- list_size = bus->cmn.entries_size;
- dump_mem = &bus->cmn.dumped_content;
-
- dev_info(dpu_dbg_base.dev, "======== start %s dump =========\n",
- bus->cmn.name);
-
- if (!dump_mem || !dbg_bus || !bus_size || !list_size)
- return;
-
- /* allocate memory for each test point */
- for (i = 0; i < bus_size; i++) {
- head = dbg_bus + i;
- list_size += (head->block_cnt * head->test_pnt_cnt);
- }
-
- /* 4 bytes * 4 entries for each test point*/
- list_size *= 16;
-
- in_log = (bus->cmn.enable_mask & DPU_DBG_DUMP_IN_LOG);
- in_mem = (bus->cmn.enable_mask & DPU_DBG_DUMP_IN_MEM);
-
- if (!in_log && !in_mem)
- return;
-
- if (in_mem) {
- if (!(*dump_mem))
- *dump_mem = dma_alloc_coherent(dpu_dbg_base.dev,
- list_size, &phys, GFP_KERNEL);
-
- if (*dump_mem) {
- dump_addr = *dump_mem;
- dev_info(dpu_dbg_base.dev,
- "%s: start_addr:0x%pK len:0x%x\n",
- __func__, dump_addr, list_size);
- } else {
- in_mem = false;
- pr_err("dump_mem: allocation fails\n");
- }
- }
-
- _dpu_dbg_enable_power(true);
-
- value = readl_relaxed(mem_base + MMSS_VBIF_CLKON);
- writel_relaxed(value | BIT(1), mem_base + MMSS_VBIF_CLKON);
-
- /* make sure that vbif core is on */
- wmb();
-
- /**
- * Extract VBIF error info based on XIN halt and error status.
- * If the XIN client is not in HALT state, or an error is detected,
- * then retrieve the VBIF error info for it.
- */
- reg = readl_relaxed(mem_base + MMSS_VBIF_XIN_HALT_CTRL1);
- reg1 = readl_relaxed(mem_base + MMSS_VBIF_PND_ERR);
- reg2 = readl_relaxed(mem_base + MMSS_VBIF_SRC_ERR);
- dev_err(dpu_dbg_base.dev,
- "XIN HALT:0x%lX, PND ERR:0x%lX, SRC ERR:0x%lX\n",
- reg, reg1, reg2);
- reg >>= 16;
- reg &= ~(reg1 | reg2);
- for (i = 0; i < MMSS_VBIF_CLIENT_NUM; i++) {
- if (!test_bit(0, &reg)) {
- writel_relaxed(i, mem_base + MMSS_VBIF_ERR_INFO);
- /* make sure reg write goes through */
- wmb();
-
- d0 = readl_relaxed(mem_base + MMSS_VBIF_ERR_INFO);
- d1 = readl_relaxed(mem_base + MMSS_VBIF_ERR_INFO_1);
-
- dev_err(dpu_dbg_base.dev,
- "Client:%d, errinfo=0x%X, errinfo1=0x%X\n",
- i, d0, d1);
- }
- reg >>= 1;
- }
-
- for (i = 0; i < bus_size; i++) {
- head = dbg_bus + i;
-
- writel_relaxed(0, mem_base + head->disable_bus_addr);
- writel_relaxed(BIT(0), mem_base + MMSS_VBIF_TEST_BUS_OUT_CTRL);
- /* make sure that other bus is off */
- wmb();
-
- _dpu_dbg_dump_vbif_debug_bus_entry(head, mem_base, dump_addr,
- in_log);
- if (dump_addr)
- dump_addr += (head->block_cnt * head->test_pnt_cnt * 4);
- }
-
- _dpu_dbg_enable_power(false);
-
- dev_info(dpu_dbg_base.dev, "======== end %s dump =========\n",
- bus->cmn.name);
-}
-
-/**
- * _dpu_dump_array - dump array of register bases
- * @name: string indicating origin of dump
- * @dump_dbgbus_dpu: whether to dump the dpu debug bus
- * @dump_dbgbus_vbif_rt: whether to dump the vbif rt debug bus
- */
-static void _dpu_dump_array(const char *name, bool dump_dbgbus_dpu,
- bool dump_dbgbus_vbif_rt)
-{
- if (dump_dbgbus_dpu)
- _dpu_dbg_dump_dpu_dbg_bus(&dpu_dbg_base.dbgbus_dpu);
-
- if (dump_dbgbus_vbif_rt)
- _dpu_dbg_dump_vbif_dbg_bus(&dpu_dbg_base.dbgbus_vbif_rt);
-}
-
-/**
- * _dpu_dump_work - deferred dump work function
- * @work: work structure
- */
-static void _dpu_dump_work(struct work_struct *work)
-{
- _dpu_dump_array("dpudump_workitem",
- dpu_dbg_base.dbgbus_dpu.cmn.include_in_deferred_work,
- dpu_dbg_base.dbgbus_vbif_rt.cmn.include_in_deferred_work);
-}
-
-void dpu_dbg_dump(bool queue_work, const char *name, bool dump_dbgbus_dpu,
- bool dump_dbgbus_vbif_rt)
-{
- if (queue_work && work_pending(&dpu_dbg_base.dump_work))
- return;
-
- if (!queue_work) {
- _dpu_dump_array(name, dump_dbgbus_dpu, dump_dbgbus_vbif_rt);
- return;
- }
-
- /* schedule work to dump later */
- dpu_dbg_base.dbgbus_dpu.cmn.include_in_deferred_work = dump_dbgbus_dpu;
- dpu_dbg_base.dbgbus_vbif_rt.cmn.include_in_deferred_work =
- dump_dbgbus_vbif_rt;
- schedule_work(&dpu_dbg_base.dump_work);
-}
-
-/*
- * dpu_dbg_debugfs_open - debugfs open handler for debug dump
- * @inode: debugfs inode
- * @file: file handle
- */
-static int dpu_dbg_debugfs_open(struct inode *inode, struct file *file)
-{
- /* non-seekable */
- file->f_mode &= ~(FMODE_LSEEK | FMODE_PREAD | FMODE_PWRITE);
- file->private_data = inode->i_private;
- return 0;
-}
-
-/**
- * dpu_dbg_dump_write - debugfs write handler for debug dump
- * @file: file handler
- * @user_buf: user buffer content from debugfs
- * @count: size of user buffer
- * @ppos: position offset of user buffer
- */
-static ssize_t dpu_dbg_dump_write(struct file *file,
- const char __user *user_buf, size_t count, loff_t *ppos)
-{
- _dpu_dump_array("dump_debugfs", true, true);
- return count;
-}
-
-static const struct file_operations dpu_dbg_dump_fops = {
- .open = dpu_dbg_debugfs_open,
- .write = dpu_dbg_dump_write,
-};
-
-int dpu_dbg_debugfs_register(struct dentry *debugfs_root)
-{
- static struct dpu_dbg_base *dbg = &dpu_dbg_base;
- char debug_name[80] = "";
-
- if (!debugfs_root)
- return -EINVAL;
-
- debugfs_create_file("dump", 0600, debugfs_root, NULL,
- &dpu_dbg_dump_fops);
-
- if (dbg->dbgbus_dpu.entries) {
- dbg->dbgbus_dpu.cmn.name = DBGBUS_NAME_DPU;
- snprintf(debug_name, sizeof(debug_name), "%s_dbgbus",
- dbg->dbgbus_dpu.cmn.name);
- dbg->dbgbus_dpu.cmn.enable_mask = DEFAULT_DBGBUS_DPU;
- debugfs_create_u32(debug_name, 0600, debugfs_root,
- &dbg->dbgbus_dpu.cmn.enable_mask);
- }
-
- if (dbg->dbgbus_vbif_rt.entries) {
- dbg->dbgbus_vbif_rt.cmn.name = DBGBUS_NAME_VBIF_RT;
- snprintf(debug_name, sizeof(debug_name), "%s_dbgbus",
- dbg->dbgbus_vbif_rt.cmn.name);
- dbg->dbgbus_vbif_rt.cmn.enable_mask = DEFAULT_DBGBUS_VBIFRT;
- debugfs_create_u32(debug_name, 0600, debugfs_root,
- &dbg->dbgbus_vbif_rt.cmn.enable_mask);
- }
-
- return 0;
-}
-
-static void _dpu_dbg_debugfs_destroy(void)
-{
-}
-
-void dpu_dbg_init_dbg_buses(u32 hwversion)
-{
- static struct dpu_dbg_base *dbg = &dpu_dbg_base;
-
- memset(&dbg->dbgbus_dpu, 0, sizeof(dbg->dbgbus_dpu));
- memset(&dbg->dbgbus_vbif_rt, 0, sizeof(dbg->dbgbus_vbif_rt));
-
- if (IS_MSM8998_TARGET(hwversion)) {
- dbg->dbgbus_dpu.entries = dbg_bus_dpu_8998;
- dbg->dbgbus_dpu.cmn.entries_size = ARRAY_SIZE(dbg_bus_dpu_8998);
- dbg->dbgbus_dpu.cmn.flags = DBGBUS_FLAGS_DSPP;
-
- dbg->dbgbus_vbif_rt.entries = vbif_dbg_bus_msm8998;
- dbg->dbgbus_vbif_rt.cmn.entries_size =
- ARRAY_SIZE(vbif_dbg_bus_msm8998);
- } else if (IS_SDM845_TARGET(hwversion) || IS_SDM670_TARGET(hwversion)) {
- dbg->dbgbus_dpu.entries = dbg_bus_dpu_sdm845;
- dbg->dbgbus_dpu.cmn.entries_size =
- ARRAY_SIZE(dbg_bus_dpu_sdm845);
- dbg->dbgbus_dpu.cmn.flags = DBGBUS_FLAGS_DSPP;
-
- /* vbif is unchanged vs 8998 */
- dbg->dbgbus_vbif_rt.entries = vbif_dbg_bus_msm8998;
- dbg->dbgbus_vbif_rt.cmn.entries_size =
- ARRAY_SIZE(vbif_dbg_bus_msm8998);
- } else {
- pr_err("unsupported chipset id %X\n", hwversion);
- }
-}
-
-int dpu_dbg_init(struct device *dev)
-{
- if (!dev) {
- pr_err("invalid params\n");
- return -EINVAL;
- }
-
- INIT_LIST_HEAD(&dpu_dbg_base.reg_base_list);
- dpu_dbg_base.dev = dev;
-
- INIT_WORK(&dpu_dbg_base.dump_work, _dpu_dump_work);
-
- return 0;
-}
-
-/**
- * dpu_dbg_destroy - destroy dpu debug facilities
- */
-void dpu_dbg_destroy(void)
-{
- _dpu_dbg_debugfs_destroy();
-}
-
-void dpu_dbg_set_dpu_top_offset(u32 blk_off)
-{
- dpu_dbg_base.dbgbus_dpu.top_blk_off = blk_off;
-}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_dbg.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_dbg.h
deleted file mode 100644
index 1e6fa945f98b..000000000000
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_dbg.h
+++ /dev/null
@@ -1,103 +0,0 @@
-/* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#ifndef DPU_DBG_H_
-#define DPU_DBG_H_
-
-#include <stdarg.h>
-#include <linux/debugfs.h>
-#include <linux/list.h>
-
-enum dpu_dbg_dump_flag {
- DPU_DBG_DUMP_IN_LOG = BIT(0),
- DPU_DBG_DUMP_IN_MEM = BIT(1),
-};
-
-#if defined(CONFIG_DEBUG_FS)
-
-/**
- * dpu_dbg_init_dbg_buses - initialize debug bus dumping support for the chipset
- * @hwversion: Chipset revision
- */
-void dpu_dbg_init_dbg_buses(u32 hwversion);
-
-/**
- * dpu_dbg_init - initialize global dpu debug facilities: regdump
- * @dev: device handle
- * Returns: 0 or -ERROR
- */
-int dpu_dbg_init(struct device *dev);
-
-/**
- * dpu_dbg_debugfs_register - register entries at the given debugfs dir
- * @debugfs_root: debugfs root in which to create dpu debug entries
- * Returns: 0 or -ERROR
- */
-int dpu_dbg_debugfs_register(struct dentry *debugfs_root);
-
-/**
- * dpu_dbg_destroy - destroy the global dpu debug facilities
- * Returns: none
- */
-void dpu_dbg_destroy(void);
-
-/**
- * dpu_dbg_dump - trigger dumping of all dpu_dbg facilities
- * @queue_work: whether to queue the dumping work to the work_struct
- * @name: string indicating origin of dump
- * @dump_dbgbus: dump the dpu debug bus
- * @dump_vbif_rt: dump the vbif rt bus
- * Returns: none
- */
-void dpu_dbg_dump(bool queue_work, const char *name, bool dump_dbgbus_dpu,
- bool dump_dbgbus_vbif_rt);
-
-/**
- * dpu_dbg_set_dpu_top_offset - set the target specific offset from mdss base
- * address of the top registers. Used for accessing debug bus controls.
- * @blk_off: offset from mdss base of the top block
- */
-void dpu_dbg_set_dpu_top_offset(u32 blk_off);
-
-#else
-
-static inline void dpu_dbg_init_dbg_buses(u32 hwversion)
-{
-}
-
-static inline int dpu_dbg_init(struct device *dev)
-{
- return 0;
-}
-
-static inline int dpu_dbg_debugfs_register(struct dentry *debugfs_root)
-{
- return 0;
-}
-
-static inline void dpu_dbg_destroy(void)
-{
-}
-
-static inline void dpu_dbg_dump(bool queue_work, const char *name,
- bool dump_dbgbus_dpu, bool dump_dbgbus_vbif_rt)
-{
-}
-
-static inline void dpu_dbg_set_dpu_top_offset(u32 blk_off)
-{
-}
-
-#endif /* defined(CONFIG_DEBUG_FS) */
-
-
-#endif /* DPU_DBG_H_ */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
index 96cdf06e7da2..36158b7d99cd 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
@@ -130,8 +130,9 @@ enum dpu_enc_rc_states {
* Virtual encoder defers as much as possible to the physical encoders.
* Virtual encoder registers itself with the DRM Framework as the encoder.
* @base: drm_encoder base class for registration with DRM
- * @enc_spin_lock: Virtual-Encoder-Wide Spin Lock for IRQ purposes
+ * @enc_spinlock: Virtual-Encoder-Wide Spin Lock for IRQ purposes
* @bus_scaling_client: Client handle to the bus scaling interface
+ * @enabled: True if the encoder is active, protected by enc_lock
* @num_phys_encs: Actual number of physical encoders contained.
* @phys_encs: Container of physical encoders managed.
* @cur_master: Pointer to the current master in this mode. Optimization
@@ -141,15 +142,17 @@ enum dpu_enc_rc_states {
* @intfs_swapped Whether or not the phys_enc interfaces have been swapped
* for partial update right-only cases, such as pingpong
* split where virtual pingpong does not generate IRQs
- * @crtc_vblank_cb: Callback into the upper layer / CRTC for
- * notification of the VBLANK
- * @crtc_vblank_cb_data: Data from upper layer for VBLANK notification
+ * @crtc: Pointer to the currently assigned crtc. Normally you
+ * would use crtc->state->encoder_mask to determine the
+ * link between encoder/crtc. However in this case we need
+ * to track crtc in the disable() hook which is called
+ * _after_ encoder_mask is cleared.
* @crtc_kickoff_cb: Callback into CRTC that will flush & start
* all CTL paths
* @crtc_kickoff_cb_data: Opaque user data given to crtc_kickoff_cb
* @debugfs_root: Debug file system root file node
- * @enc_lock: Lock around physical encoder create/destroy and
- access.
+ * @enc_lock: Lock around physical encoder
+ * create/destroy/enable/disable
* @frame_busy_mask: Bitmask tracking which phys_enc we are still
* busy processing current command.
* Bit0 = phys_encs[0] etc.
@@ -175,6 +178,8 @@ struct dpu_encoder_virt {
spinlock_t enc_spinlock;
uint32_t bus_scaling_client;
+ bool enabled;
+
unsigned int num_phys_encs;
struct dpu_encoder_phys *phys_encs[MAX_PHYS_ENCODERS_PER_VIRTUAL];
struct dpu_encoder_phys *cur_master;
@@ -183,8 +188,7 @@ struct dpu_encoder_virt {
bool intfs_swapped;
- void (*crtc_vblank_cb)(void *);
- void *crtc_vblank_cb_data;
+ struct drm_crtc *crtc;
struct dentry *debugfs_root;
struct mutex enc_lock;
@@ -210,39 +214,6 @@ struct dpu_encoder_virt {
};
#define to_dpu_encoder_virt(x) container_of(x, struct dpu_encoder_virt, base)
-static inline int _dpu_encoder_power_enable(struct dpu_encoder_virt *dpu_enc,
- bool enable)
-{
- struct drm_encoder *drm_enc;
- struct msm_drm_private *priv;
- struct dpu_kms *dpu_kms;
-
- if (!dpu_enc) {
- DPU_ERROR("invalid dpu enc\n");
- return -EINVAL;
- }
-
- drm_enc = &dpu_enc->base;
- if (!drm_enc->dev || !drm_enc->dev->dev_private) {
- DPU_ERROR("drm device invalid\n");
- return -EINVAL;
- }
-
- priv = drm_enc->dev->dev_private;
- if (!priv->kms) {
- DPU_ERROR("invalid kms\n");
- return -EINVAL;
- }
-
- dpu_kms = to_dpu_kms(priv->kms);
-
- if (enable)
- pm_runtime_get_sync(&dpu_kms->pdev->dev);
- else
- pm_runtime_put_sync(&dpu_kms->pdev->dev);
-
- return 0;
-}
void dpu_encoder_helper_report_irq_timeout(struct dpu_encoder_phys *phys_enc,
enum dpu_intr_idx intr_idx)
@@ -488,8 +459,6 @@ static void dpu_encoder_destroy(struct drm_encoder *drm_enc)
drm_encoder_cleanup(drm_enc);
mutex_destroy(&dpu_enc->enc_lock);
-
- kfree(dpu_enc);
}
void dpu_encoder_helper_split_config(
@@ -1119,28 +1088,24 @@ static void _dpu_encoder_virt_enable_helper(struct drm_encoder *drm_enc)
_dpu_encoder_update_vsync_source(dpu_enc, &dpu_enc->disp_info);
}
-void dpu_encoder_virt_restore(struct drm_encoder *drm_enc)
+void dpu_encoder_virt_runtime_resume(struct drm_encoder *drm_enc)
{
- struct dpu_encoder_virt *dpu_enc = NULL;
- int i;
-
- if (!drm_enc) {
- DPU_ERROR("invalid encoder\n");
- return;
- }
- dpu_enc = to_dpu_encoder_virt(drm_enc);
+ struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc);
- for (i = 0; i < dpu_enc->num_phys_encs; i++) {
- struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
+ mutex_lock(&dpu_enc->enc_lock);
- if (phys && (phys != dpu_enc->cur_master) && phys->ops.restore)
- phys->ops.restore(phys);
- }
+ if (!dpu_enc->enabled)
+ goto out;
+ if (dpu_enc->cur_slave && dpu_enc->cur_slave->ops.restore)
+ dpu_enc->cur_slave->ops.restore(dpu_enc->cur_slave);
if (dpu_enc->cur_master && dpu_enc->cur_master->ops.restore)
dpu_enc->cur_master->ops.restore(dpu_enc->cur_master);
_dpu_encoder_virt_enable_helper(drm_enc);
+
+out:
+ mutex_unlock(&dpu_enc->enc_lock);
}
static void dpu_encoder_virt_enable(struct drm_encoder *drm_enc)
@@ -1154,6 +1119,8 @@ static void dpu_encoder_virt_enable(struct drm_encoder *drm_enc)
return;
}
dpu_enc = to_dpu_encoder_virt(drm_enc);
+
+ mutex_lock(&dpu_enc->enc_lock);
cur_mode = &dpu_enc->base.crtc->state->adjusted_mode;
trace_dpu_enc_enable(DRMID(drm_enc), cur_mode->hdisplay,
@@ -1170,10 +1137,15 @@ static void dpu_encoder_virt_enable(struct drm_encoder *drm_enc)
if (ret) {
DPU_ERROR_ENC(dpu_enc, "dpu resource control failed: %d\n",
ret);
- return;
+ goto out;
}
_dpu_encoder_virt_enable_helper(drm_enc);
+
+ dpu_enc->enabled = true;
+
+out:
+ mutex_unlock(&dpu_enc->enc_lock);
}
static void dpu_encoder_virt_disable(struct drm_encoder *drm_enc)
@@ -1195,11 +1167,14 @@ static void dpu_encoder_virt_disable(struct drm_encoder *drm_enc)
return;
}
- mode = &drm_enc->crtc->state->adjusted_mode;
-
dpu_enc = to_dpu_encoder_virt(drm_enc);
DPU_DEBUG_ENC(dpu_enc, "\n");
+ mutex_lock(&dpu_enc->enc_lock);
+ dpu_enc->enabled = false;
+
+ mode = &drm_enc->crtc->state->adjusted_mode;
+
priv = drm_enc->dev->dev_private;
dpu_kms = to_dpu_kms(priv->kms);
@@ -1233,6 +1208,8 @@ static void dpu_encoder_virt_disable(struct drm_encoder *drm_enc)
DPU_DEBUG_ENC(dpu_enc, "encoder disabled\n");
dpu_rm_release(&dpu_kms->rm, drm_enc);
+
+ mutex_unlock(&dpu_enc->enc_lock);
}
static enum dpu_intf dpu_encoder_get_intf(struct dpu_mdss_cfg *catalog,
@@ -1263,8 +1240,8 @@ static void dpu_encoder_vblank_callback(struct drm_encoder *drm_enc,
dpu_enc = to_dpu_encoder_virt(drm_enc);
spin_lock_irqsave(&dpu_enc->enc_spinlock, lock_flags);
- if (dpu_enc->crtc_vblank_cb)
- dpu_enc->crtc_vblank_cb(dpu_enc->crtc_vblank_cb_data);
+ if (dpu_enc->crtc)
+ dpu_crtc_vblank_callback(dpu_enc->crtc);
spin_unlock_irqrestore(&dpu_enc->enc_spinlock, lock_flags);
atomic_inc(&phy_enc->vsync_cnt);
@@ -1284,25 +1261,32 @@ static void dpu_encoder_underrun_callback(struct drm_encoder *drm_enc,
DPU_ATRACE_END("encoder_underrun_callback");
}
-void dpu_encoder_register_vblank_callback(struct drm_encoder *drm_enc,
- void (*vbl_cb)(void *), void *vbl_data)
+void dpu_encoder_assign_crtc(struct drm_encoder *drm_enc, struct drm_crtc *crtc)
{
struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc);
unsigned long lock_flags;
- bool enable;
- int i;
- enable = vbl_cb ? true : false;
+ spin_lock_irqsave(&dpu_enc->enc_spinlock, lock_flags);
+ /* crtc should always be cleared before re-assigning */
+ WARN_ON(crtc && dpu_enc->crtc);
+ dpu_enc->crtc = crtc;
+ spin_unlock_irqrestore(&dpu_enc->enc_spinlock, lock_flags);
+}
+
+void dpu_encoder_toggle_vblank_for_crtc(struct drm_encoder *drm_enc,
+ struct drm_crtc *crtc, bool enable)
+{
+ struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc);
+ unsigned long lock_flags;
+ int i;
- if (!drm_enc) {
- DPU_ERROR("invalid encoder\n");
- return;
- }
trace_dpu_enc_vblank_cb(DRMID(drm_enc), enable);
spin_lock_irqsave(&dpu_enc->enc_spinlock, lock_flags);
- dpu_enc->crtc_vblank_cb = vbl_cb;
- dpu_enc->crtc_vblank_cb_data = vbl_data;
+ if (dpu_enc->crtc != crtc) {
+ spin_unlock_irqrestore(&dpu_enc->enc_spinlock, lock_flags);
+ return;
+ }
spin_unlock_irqrestore(&dpu_enc->enc_spinlock, lock_flags);
for (i = 0; i < dpu_enc->num_phys_encs; i++) {
@@ -1407,8 +1391,9 @@ static void dpu_encoder_off_work(struct kthread_work *work)
* phys: Pointer to physical encoder structure
* extra_flush_bits: Additional bit mask to include in flush trigger
*/
-static inline void _dpu_encoder_trigger_flush(struct drm_encoder *drm_enc,
- struct dpu_encoder_phys *phys, uint32_t extra_flush_bits)
+static void _dpu_encoder_trigger_flush(struct drm_encoder *drm_enc,
+ struct dpu_encoder_phys *phys, uint32_t extra_flush_bits,
+ bool async)
{
struct dpu_hw_ctl *ctl;
int pending_kickoff_cnt;
@@ -1431,7 +1416,10 @@ static inline void _dpu_encoder_trigger_flush(struct drm_encoder *drm_enc,
return;
}
- pending_kickoff_cnt = dpu_encoder_phys_inc_pending(phys);
+ if (!async)
+ pending_kickoff_cnt = dpu_encoder_phys_inc_pending(phys);
+ else
+ pending_kickoff_cnt = atomic_read(&phys->pending_kickoff_cnt);
if (extra_flush_bits && ctl->ops.update_pending_flush)
ctl->ops.update_pending_flush(ctl, extra_flush_bits);
@@ -1450,7 +1438,7 @@ static inline void _dpu_encoder_trigger_flush(struct drm_encoder *drm_enc,
* _dpu_encoder_trigger_start - trigger start for a physical encoder
* phys: Pointer to physical encoder structure
*/
-static inline void _dpu_encoder_trigger_start(struct dpu_encoder_phys *phys)
+static void _dpu_encoder_trigger_start(struct dpu_encoder_phys *phys)
{
if (!phys) {
DPU_ERROR("invalid argument(s)\n");
@@ -1507,7 +1495,7 @@ static int dpu_encoder_helper_wait_event_timeout(
return rc;
}
-void dpu_encoder_helper_hw_reset(struct dpu_encoder_phys *phys_enc)
+static void dpu_encoder_helper_hw_reset(struct dpu_encoder_phys *phys_enc)
{
struct dpu_encoder_virt *dpu_enc;
struct dpu_hw_ctl *ctl;
@@ -1527,10 +1515,8 @@ void dpu_encoder_helper_hw_reset(struct dpu_encoder_phys *phys_enc)
ctl->idx);
rc = ctl->ops.reset(ctl);
- if (rc) {
+ if (rc)
DPU_ERROR_ENC(dpu_enc, "ctl %d reset failure\n", ctl->idx);
- dpu_dbg_dump(false, __func__, true, true);
- }
phys_enc->enable_state = DPU_ENC_ENABLED;
}
@@ -1544,7 +1530,8 @@ void dpu_encoder_helper_hw_reset(struct dpu_encoder_phys *phys_enc)
* a time.
* dpu_enc: Pointer to virtual encoder structure
*/
-static void _dpu_encoder_kickoff_phys(struct dpu_encoder_virt *dpu_enc)
+static void _dpu_encoder_kickoff_phys(struct dpu_encoder_virt *dpu_enc,
+ bool async)
{
struct dpu_hw_ctl *ctl;
uint32_t i, pending_flush;
@@ -1575,7 +1562,8 @@ static void _dpu_encoder_kickoff_phys(struct dpu_encoder_virt *dpu_enc)
set_bit(i, dpu_enc->frame_busy_mask);
if (!phys->ops.needs_single_flush ||
!phys->ops.needs_single_flush(phys))
- _dpu_encoder_trigger_flush(&dpu_enc->base, phys, 0x0);
+ _dpu_encoder_trigger_flush(&dpu_enc->base, phys, 0x0,
+ async);
else if (ctl->ops.get_pending_flush)
pending_flush |= ctl->ops.get_pending_flush(ctl);
}
@@ -1585,7 +1573,7 @@ static void _dpu_encoder_kickoff_phys(struct dpu_encoder_virt *dpu_enc)
_dpu_encoder_trigger_flush(
&dpu_enc->base,
dpu_enc->cur_master,
- pending_flush);
+ pending_flush, async);
}
_dpu_encoder_trigger_start(dpu_enc->cur_master);
@@ -1769,7 +1757,7 @@ static void dpu_encoder_vsync_event_work_handler(struct kthread_work *work)
}
void dpu_encoder_prepare_for_kickoff(struct drm_encoder *drm_enc,
- struct dpu_encoder_kickoff_params *params)
+ struct dpu_encoder_kickoff_params *params, bool async)
{
struct dpu_encoder_virt *dpu_enc;
struct dpu_encoder_phys *phys;
@@ -1803,14 +1791,12 @@ void dpu_encoder_prepare_for_kickoff(struct drm_encoder *drm_enc,
if (needs_hw_reset) {
trace_dpu_enc_prepare_kickoff_reset(DRMID(drm_enc));
for (i = 0; i < dpu_enc->num_phys_encs; i++) {
- phys = dpu_enc->phys_encs[i];
- if (phys && phys->ops.hw_reset)
- phys->ops.hw_reset(phys);
+ dpu_encoder_helper_hw_reset(dpu_enc->phys_encs[i]);
}
}
}
-void dpu_encoder_kickoff(struct drm_encoder *drm_enc)
+void dpu_encoder_kickoff(struct drm_encoder *drm_enc, bool async)
{
struct dpu_encoder_virt *dpu_enc;
struct dpu_encoder_phys *phys;
@@ -1833,7 +1819,7 @@ void dpu_encoder_kickoff(struct drm_encoder *drm_enc)
((atomic_read(&dpu_enc->frame_done_timeout) * HZ) / 1000));
/* All phys encs are ready to go, trigger the kickoff */
- _dpu_encoder_kickoff_phys(dpu_enc);
+ _dpu_encoder_kickoff_phys(dpu_enc, async);
/* allow phys encs to handle any post-kickoff business */
for (i = 0; i < dpu_enc->num_phys_encs; i++) {
@@ -1875,14 +1861,9 @@ void dpu_encoder_prepare_commit(struct drm_encoder *drm_enc)
#ifdef CONFIG_DEBUG_FS
static int _dpu_encoder_status_show(struct seq_file *s, void *data)
{
- struct dpu_encoder_virt *dpu_enc;
+ struct dpu_encoder_virt *dpu_enc = s->private;
int i;
- if (!s || !s->private)
- return -EINVAL;
-
- dpu_enc = s->private;
-
mutex_lock(&dpu_enc->enc_lock);
for (i = 0; i < dpu_enc->num_phys_encs; i++) {
struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
@@ -1920,7 +1901,7 @@ static int _dpu_encoder_debugfs_status_open(struct inode *inode,
static int _dpu_encoder_init_debugfs(struct drm_encoder *drm_enc)
{
- struct dpu_encoder_virt *dpu_enc;
+ struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc);
struct msm_drm_private *priv;
struct dpu_kms *dpu_kms;
int i;
@@ -1934,12 +1915,11 @@ static int _dpu_encoder_init_debugfs(struct drm_encoder *drm_enc)
char name[DPU_NAME_SIZE];
- if (!drm_enc || !drm_enc->dev || !drm_enc->dev->dev_private) {
+ if (!drm_enc->dev || !drm_enc->dev->dev_private) {
DPU_ERROR("invalid encoder or kms\n");
return -EINVAL;
}
- dpu_enc = to_dpu_encoder_virt(drm_enc);
priv = drm_enc->dev->dev_private;
dpu_kms = to_dpu_kms(priv->kms);
@@ -1964,26 +1944,11 @@ static int _dpu_encoder_init_debugfs(struct drm_encoder *drm_enc)
return 0;
}
-
-static void _dpu_encoder_destroy_debugfs(struct drm_encoder *drm_enc)
-{
- struct dpu_encoder_virt *dpu_enc;
-
- if (!drm_enc)
- return;
-
- dpu_enc = to_dpu_encoder_virt(drm_enc);
- debugfs_remove_recursive(dpu_enc->debugfs_root);
-}
#else
static int _dpu_encoder_init_debugfs(struct drm_encoder *drm_enc)
{
return 0;
}
-
-static void _dpu_encoder_destroy_debugfs(struct drm_encoder *drm_enc)
-{
-}
#endif
static int dpu_encoder_late_register(struct drm_encoder *encoder)
@@ -1993,7 +1958,9 @@ static int dpu_encoder_late_register(struct drm_encoder *encoder)
static void dpu_encoder_early_unregister(struct drm_encoder *encoder)
{
- _dpu_encoder_destroy_debugfs(encoder);
+ struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(encoder);
+
+ debugfs_remove_recursive(dpu_enc->debugfs_root);
}
static int dpu_encoder_virt_add_phys_encs(
@@ -2268,6 +2235,8 @@ struct drm_encoder *dpu_encoder_init(struct drm_device *dev,
drm_encoder_helper_add(&dpu_enc->base, &dpu_encoder_helper_funcs);
+ dpu_enc->enabled = false;
+
return &dpu_enc->base;
}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.h
index 9dbf38f446d9..3f5dafe00580 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.h
@@ -55,14 +55,22 @@ void dpu_encoder_get_hw_resources(struct drm_encoder *encoder,
struct dpu_encoder_hw_resources *hw_res);
/**
- * dpu_encoder_register_vblank_callback - provide callback to encoder that
- * will be called on the next vblank.
+ * dpu_encoder_assign_crtc - Link the encoder to the crtc it's assigned to
* @encoder: encoder pointer
- * @cb: callback pointer, provide NULL to deregister and disable IRQs
- * @data: user data provided to callback
+ * @crtc: crtc pointer
+ */
+void dpu_encoder_assign_crtc(struct drm_encoder *encoder,
+ struct drm_crtc *crtc);
+
+/**
+ * dpu_encoder_toggle_vblank_for_crtc - Toggles vblank interrupts on or off if
+ * the encoder is assigned to the given crtc
+ * @encoder: encoder pointer
+ * @crtc: crtc pointer
+ * @enable: true if vblank should be enabled
*/
-void dpu_encoder_register_vblank_callback(struct drm_encoder *encoder,
- void (*cb)(void *), void *data);
+void dpu_encoder_toggle_vblank_for_crtc(struct drm_encoder *encoder,
+ struct drm_crtc *crtc, bool enable);
/**
* dpu_encoder_register_frame_event_callback - provide callback to encoder that
@@ -81,9 +89,10 @@ void dpu_encoder_register_frame_event_callback(struct drm_encoder *encoder,
* Delayed: Block until next trigger can be issued.
* @encoder: encoder pointer
* @params: kickoff time parameters
+ * @async: true if this is an asynchronous commit
*/
void dpu_encoder_prepare_for_kickoff(struct drm_encoder *encoder,
- struct dpu_encoder_kickoff_params *params);
+ struct dpu_encoder_kickoff_params *params, bool async);
/**
* dpu_encoder_trigger_kickoff_pending - Clear the flush bits from previous
@@ -96,8 +105,9 @@ void dpu_encoder_trigger_kickoff_pending(struct drm_encoder *encoder);
* dpu_encoder_kickoff - trigger a double buffer flip of the ctl path
* (i.e. ctl flush and start) immediately.
* @encoder: encoder pointer
+ * @async: true if this is an asynchronous commit
*/
-void dpu_encoder_kickoff(struct drm_encoder *encoder);
+void dpu_encoder_kickoff(struct drm_encoder *encoder, bool async);
/**
* dpu_encoder_wait_for_event - Waits for encoder events
@@ -126,10 +136,10 @@ int dpu_encoder_wait_for_event(struct drm_encoder *drm_encoder,
enum dpu_intf_mode dpu_encoder_get_intf_mode(struct drm_encoder *encoder);
/**
- * dpu_encoder_virt_restore - restore the encoder configs
+ * dpu_encoder_virt_runtime_resume - pm runtime resume the encoder configs
* @encoder: encoder pointer
*/
-void dpu_encoder_virt_restore(struct drm_encoder *encoder);
+void dpu_encoder_virt_runtime_resume(struct drm_encoder *encoder);
/**
* dpu_encoder_init - initialize virtual encoder object
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys.h
index 964efcc757a4..44e6f8b68e70 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys.h
@@ -114,8 +114,6 @@ struct dpu_encoder_virt_ops {
* @handle_post_kickoff: Do any work necessary post-kickoff work
* @trigger_start: Process start event on physical encoder
* @needs_single_flush: Whether encoder slaves need to be flushed
- * @hw_reset: Issue HW recovery such as CTL reset and clear
- * DPU_ENC_ERR_NEEDS_HW_RESET state
* @irq_control: Handler to enable/disable all the encoder IRQs
* @prepare_idle_pc: phys encoder can update the vsync_enable status
* on idle power collapse prepare
@@ -151,7 +149,6 @@ struct dpu_encoder_phys_ops {
void (*handle_post_kickoff)(struct dpu_encoder_phys *phys_enc);
void (*trigger_start)(struct dpu_encoder_phys *phys_enc);
bool (*needs_single_flush)(struct dpu_encoder_phys *phys_enc);
- void (*hw_reset)(struct dpu_encoder_phys *phys_enc);
void (*irq_control)(struct dpu_encoder_phys *phys, bool enable);
void (*prepare_idle_pc)(struct dpu_encoder_phys *phys_enc);
void (*restore)(struct dpu_encoder_phys *phys);
@@ -342,15 +339,6 @@ struct dpu_encoder_phys *dpu_encoder_phys_cmd_init(
*/
void dpu_encoder_helper_trigger_start(struct dpu_encoder_phys *phys_enc);
-/**
- * dpu_encoder_helper_hw_reset - issue ctl hw reset
- * This helper function may be optionally specified by physical
- * encoders if they require ctl hw reset. If state is currently
- * DPU_ENC_ERR_NEEDS_HW_RESET, it is set back to DPU_ENC_ENABLED.
- * @phys_enc: Pointer to physical encoder structure
- */
-void dpu_encoder_helper_hw_reset(struct dpu_encoder_phys *phys_enc);
-
static inline enum dpu_3d_blend_mode dpu_encoder_helper_get_3d_blend_mode(
struct dpu_encoder_phys *phys_enc)
{
@@ -362,7 +350,7 @@ static inline enum dpu_3d_blend_mode dpu_encoder_helper_get_3d_blend_mode(
dpu_cstate = to_dpu_crtc_state(phys_enc->parent->crtc->state);
if (phys_enc->split_role == ENC_ROLE_SOLO &&
- dpu_crtc_state_is_stereo(dpu_cstate))
+ dpu_cstate->num_mixers == CRTC_DUAL_MIXERS)
return BLEND_3D_H_ROW_INT;
return BLEND_3D_NONE;
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c
index b2d7f0ded24c..99ab5ca9bed3 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c
@@ -44,14 +44,7 @@
#define DPU_ENC_WR_PTR_START_TIMEOUT_US 20000
-static inline int _dpu_encoder_phys_cmd_get_idle_timeout(
- struct dpu_encoder_phys_cmd *cmd_enc)
-{
- return KICKOFF_TIMEOUT_MS;
-}
-
-static inline bool dpu_encoder_phys_cmd_is_master(
- struct dpu_encoder_phys *phys_enc)
+static bool dpu_encoder_phys_cmd_is_master(struct dpu_encoder_phys *phys_enc)
{
return (phys_enc->split_role != ENC_ROLE_SLAVE) ? true : false;
}
@@ -243,7 +236,6 @@ static int _dpu_encoder_phys_cmd_handle_ppdone_timeout(
atomic_read(&phys_enc->pending_kickoff_cnt));
dpu_encoder_helper_unregister_irq(phys_enc, INTR_IDX_RDPTR);
- dpu_dbg_dump(false, __func__, true, true);
}
atomic_add_unless(&phys_enc->pending_kickoff_cnt, -1, 0);
@@ -496,14 +488,11 @@ static void dpu_encoder_phys_cmd_enable_helper(
_dpu_encoder_phys_cmd_pingpong_config(phys_enc);
if (!dpu_encoder_phys_cmd_is_master(phys_enc))
- goto skip_flush;
+ return;
ctl = phys_enc->hw_ctl;
ctl->ops.get_bitmask_intf(ctl, &flush_mask, phys_enc->intf_idx);
ctl->ops.update_pending_flush(ctl, flush_mask);
-
-skip_flush:
- return;
}
static void dpu_encoder_phys_cmd_enable(struct dpu_encoder_phys *phys_enc)
@@ -727,7 +716,7 @@ static int dpu_encoder_phys_cmd_wait_for_vblank(
wait_info.wq = &cmd_enc->pending_vblank_wq;
wait_info.atomic_cnt = &cmd_enc->pending_vblank_cnt;
- wait_info.timeout_ms = _dpu_encoder_phys_cmd_get_idle_timeout(cmd_enc);
+ wait_info.timeout_ms = KICKOFF_TIMEOUT_MS;
atomic_inc(&cmd_enc->pending_vblank_cnt);
@@ -776,7 +765,6 @@ static void dpu_encoder_phys_cmd_init_ops(
ops->wait_for_vblank = dpu_encoder_phys_cmd_wait_for_vblank;
ops->trigger_start = dpu_encoder_phys_cmd_trigger_start;
ops->needs_single_flush = dpu_encoder_phys_cmd_needs_single_flush;
- ops->hw_reset = dpu_encoder_helper_hw_reset;
ops->irq_control = dpu_encoder_phys_cmd_irq_control;
ops->restore = dpu_encoder_phys_cmd_enable_helper;
ops->prepare_idle_pc = dpu_encoder_phys_cmd_prepare_idle_pc;
@@ -798,7 +786,7 @@ struct dpu_encoder_phys *dpu_encoder_phys_cmd_init(
if (!cmd_enc) {
ret = -ENOMEM;
DPU_ERROR("failed to allocate\n");
- goto fail;
+ return ERR_PTR(ret);
}
phys_enc = &cmd_enc->base;
phys_enc->hw_mdptop = p->dpu_kms->hw_mdp;
@@ -856,6 +844,5 @@ struct dpu_encoder_phys *dpu_encoder_phys_cmd_init(
return phys_enc;
-fail:
return ERR_PTR(ret);
}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c
index 84de385a9f62..acdab5b0db18 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c
@@ -110,7 +110,7 @@ static void drm_mode_to_intf_timing_params(
*/
}
-static inline u32 get_horizontal_total(const struct intf_timing_params *timing)
+static u32 get_horizontal_total(const struct intf_timing_params *timing)
{
u32 active = timing->xres;
u32 inactive =
@@ -119,7 +119,7 @@ static inline u32 get_horizontal_total(const struct intf_timing_params *timing)
return active + inactive;
}
-static inline u32 get_vertical_total(const struct intf_timing_params *timing)
+static u32 get_vertical_total(const struct intf_timing_params *timing)
{
u32 active = timing->yres;
u32 inactive =
@@ -331,7 +331,7 @@ static void dpu_encoder_phys_vid_vblank_irq(void *arg, int irq_idx)
if (hw_ctl && hw_ctl->ops.get_flush_register)
flush_register = hw_ctl->ops.get_flush_register(hw_ctl);
- if (flush_register == 0)
+ if (!(flush_register & hw_ctl->ops.get_pending_flush(hw_ctl)))
new_cnt = atomic_add_unless(&phys_enc->pending_kickoff_cnt,
-1, 0);
spin_unlock_irqrestore(phys_enc->enc_spinlock, lock_flags);
@@ -613,7 +613,6 @@ static void dpu_encoder_phys_vid_prepare_for_kickoff(
DPU_ERROR_VIDENC(vid_enc, "ctl %d reset failure: %d\n",
ctl->idx, rc);
dpu_encoder_helper_unregister_irq(phys_enc, INTR_IDX_VSYNC);
- dpu_dbg_dump(false, __func__, true, true);
}
}
@@ -766,7 +765,6 @@ static void dpu_encoder_phys_vid_init_ops(struct dpu_encoder_phys_ops *ops)
ops->prepare_for_kickoff = dpu_encoder_phys_vid_prepare_for_kickoff;
ops->handle_post_kickoff = dpu_encoder_phys_vid_handle_post_kickoff;
ops->needs_single_flush = dpu_encoder_phys_vid_needs_single_flush;
- ops->hw_reset = dpu_encoder_helper_hw_reset;
ops->get_line_count = dpu_encoder_phys_vid_get_line_count;
}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_formats.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_formats.c
index bfcd165e96df..0874f0a53bf9 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_formats.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_formats.c
@@ -216,7 +216,7 @@ static const struct dpu_format dpu_format_map[] = {
INTERLEAVED_RGB_FMT(XBGR8888,
COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
- true, 4, 0,
+ false, 4, 0,
DPU_FETCH_LINEAR, 1),
INTERLEAVED_RGB_FMT(RGBA8888,
@@ -921,7 +921,7 @@ static int _dpu_format_populate_addrs_ubwc(
+ layout->plane_size[2] + layout->plane_size[3];
if (!meta)
- goto done;
+ return 0;
/* configure Y metadata plane */
layout->plane_addr[2] = base_addr;
@@ -952,12 +952,11 @@ static int _dpu_format_populate_addrs_ubwc(
layout->plane_addr[1] = 0;
if (!meta)
- goto done;
+ return 0;
layout->plane_addr[2] = base_addr;
layout->plane_addr[3] = 0;
}
-done:
return 0;
}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_blk.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_blk.c
index 58d29e43faef..92f1c4241b9a 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_blk.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_blk.c
@@ -30,16 +30,10 @@ static LIST_HEAD(dpu_hw_blk_list);
* @type: hw block type - enum dpu_hw_blk_type
* @id: instance id of the hw block
* @ops: Pointer to block operations
- * return: 0 if success; error code otherwise
*/
-int dpu_hw_blk_init(struct dpu_hw_blk *hw_blk, u32 type, int id,
+void dpu_hw_blk_init(struct dpu_hw_blk *hw_blk, u32 type, int id,
struct dpu_hw_blk_ops *ops)
{
- if (!hw_blk) {
- pr_err("invalid parameters\n");
- return -EINVAL;
- }
-
INIT_LIST_HEAD(&hw_blk->list);
hw_blk->type = type;
hw_blk->id = id;
@@ -51,8 +45,6 @@ int dpu_hw_blk_init(struct dpu_hw_blk *hw_blk, u32 type, int id,
mutex_lock(&dpu_hw_blk_lock);
list_add(&hw_blk->list, &dpu_hw_blk_list);
mutex_unlock(&dpu_hw_blk_lock);
-
- return 0;
}
/**
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_blk.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_blk.h
index 0f4ca8af1ec5..1934c2f7e8fa 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_blk.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_blk.h
@@ -44,7 +44,7 @@ struct dpu_hw_blk {
struct dpu_hw_blk_ops ops;
};
-int dpu_hw_blk_init(struct dpu_hw_blk *hw_blk, u32 type, int id,
+void dpu_hw_blk_init(struct dpu_hw_blk *hw_blk, u32 type, int id,
struct dpu_hw_blk_ops *ops);
void dpu_hw_blk_destroy(struct dpu_hw_blk *hw_blk);
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h
index dc060e7358e4..144358a3d0fb 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h
@@ -736,13 +736,4 @@ struct dpu_mdss_cfg *dpu_hw_catalog_init(u32 hw_rev);
*/
void dpu_hw_catalog_deinit(struct dpu_mdss_cfg *dpu_cfg);
-/**
- * dpu_hw_sspp_multirect_enabled - check multirect enabled for the sspp
- * @cfg: pointer to sspp cfg
- */
-static inline bool dpu_hw_sspp_multirect_enabled(const struct dpu_sspp_cfg *cfg)
-{
- return test_bit(DPU_SSPP_SMART_DMA_V1, &cfg->features) ||
- test_bit(DPU_SSPP_SMART_DMA_V2, &cfg->features);
-}
#endif /* _DPU_HW_CATALOG_H */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c
index eec1051f2afc..1068b4b7940f 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c
@@ -13,8 +13,8 @@
#include <linux/delay.h>
#include "dpu_hwio.h"
#include "dpu_hw_ctl.h"
-#include "dpu_dbg.h"
#include "dpu_kms.h"
+#include "dpu_trace.h"
#define CTL_LAYER(lm) \
(((lm) == LM_5) ? (0x024) : (((lm) - LM_0) * 0x004))
@@ -72,24 +72,39 @@ static int _mixer_stages(const struct dpu_lm_cfg *mixer, int count,
return stages;
}
+static inline u32 dpu_hw_ctl_get_flush_register(struct dpu_hw_ctl *ctx)
+{
+ struct dpu_hw_blk_reg_map *c = &ctx->hw;
+
+ return DPU_REG_READ(c, CTL_FLUSH);
+}
+
static inline void dpu_hw_ctl_trigger_start(struct dpu_hw_ctl *ctx)
{
+ trace_dpu_hw_ctl_trigger_start(ctx->pending_flush_mask,
+ dpu_hw_ctl_get_flush_register(ctx));
DPU_REG_WRITE(&ctx->hw, CTL_START, 0x1);
}
static inline void dpu_hw_ctl_trigger_pending(struct dpu_hw_ctl *ctx)
{
+ trace_dpu_hw_ctl_trigger_prepare(ctx->pending_flush_mask,
+ dpu_hw_ctl_get_flush_register(ctx));
DPU_REG_WRITE(&ctx->hw, CTL_PREPARE, 0x1);
}
static inline void dpu_hw_ctl_clear_pending_flush(struct dpu_hw_ctl *ctx)
{
+ trace_dpu_hw_ctl_clear_pending_flush(ctx->pending_flush_mask,
+ dpu_hw_ctl_get_flush_register(ctx));
ctx->pending_flush_mask = 0x0;
}
static inline void dpu_hw_ctl_update_pending_flush(struct dpu_hw_ctl *ctx,
u32 flushbits)
{
+ trace_dpu_hw_ctl_update_pending_flush(flushbits,
+ ctx->pending_flush_mask);
ctx->pending_flush_mask |= flushbits;
}
@@ -103,18 +118,12 @@ static u32 dpu_hw_ctl_get_pending_flush(struct dpu_hw_ctl *ctx)
static inline void dpu_hw_ctl_trigger_flush(struct dpu_hw_ctl *ctx)
{
-
+ trace_dpu_hw_ctl_trigger_pending_flush(ctx->pending_flush_mask,
+ dpu_hw_ctl_get_flush_register(ctx));
DPU_REG_WRITE(&ctx->hw, CTL_FLUSH, ctx->pending_flush_mask);
}
-static inline u32 dpu_hw_ctl_get_flush_register(struct dpu_hw_ctl *ctx)
-{
- struct dpu_hw_blk_reg_map *c = &ctx->hw;
-
- return DPU_REG_READ(c, CTL_FLUSH);
-}
-
-static inline uint32_t dpu_hw_ctl_get_bitmask_sspp(struct dpu_hw_ctl *ctx,
+static uint32_t dpu_hw_ctl_get_bitmask_sspp(struct dpu_hw_ctl *ctx,
enum dpu_sspp sspp)
{
uint32_t flushbits = 0;
@@ -169,7 +178,7 @@ static inline uint32_t dpu_hw_ctl_get_bitmask_sspp(struct dpu_hw_ctl *ctx,
return flushbits;
}
-static inline uint32_t dpu_hw_ctl_get_bitmask_mixer(struct dpu_hw_ctl *ctx,
+static uint32_t dpu_hw_ctl_get_bitmask_mixer(struct dpu_hw_ctl *ctx,
enum dpu_lm lm)
{
uint32_t flushbits = 0;
@@ -202,7 +211,7 @@ static inline uint32_t dpu_hw_ctl_get_bitmask_mixer(struct dpu_hw_ctl *ctx,
return flushbits;
}
-static inline int dpu_hw_ctl_get_bitmask_intf(struct dpu_hw_ctl *ctx,
+static int dpu_hw_ctl_get_bitmask_intf(struct dpu_hw_ctl *ctx,
u32 *flushbits, enum dpu_intf intf)
{
switch (intf) {
@@ -474,10 +483,7 @@ static void _setup_ctl_ops(struct dpu_hw_ctl_ops *ops,
ops->get_bitmask_intf = dpu_hw_ctl_get_bitmask_intf;
};
-static struct dpu_hw_blk_ops dpu_hw_ops = {
- .start = NULL,
- .stop = NULL,
-};
+static struct dpu_hw_blk_ops dpu_hw_ops;
struct dpu_hw_ctl *dpu_hw_ctl_init(enum dpu_ctl idx,
void __iomem *addr,
@@ -485,7 +491,6 @@ struct dpu_hw_ctl *dpu_hw_ctl_init(enum dpu_ctl idx,
{
struct dpu_hw_ctl *c;
struct dpu_ctl_cfg *cfg;
- int rc;
c = kzalloc(sizeof(*c), GFP_KERNEL);
if (!c)
@@ -504,18 +509,9 @@ struct dpu_hw_ctl *dpu_hw_ctl_init(enum dpu_ctl idx,
c->mixer_count = m->mixer_count;
c->mixer_hw_caps = m->mixer;
- rc = dpu_hw_blk_init(&c->base, DPU_HW_BLK_CTL, idx, &dpu_hw_ops);
- if (rc) {
- DPU_ERROR("failed to init hw blk %d\n", rc);
- goto blk_init_error;
- }
+ dpu_hw_blk_init(&c->base, DPU_HW_BLK_CTL, idx, &dpu_hw_ops);
return c;
-
-blk_init_error:
- kzfree(c);
-
- return ERR_PTR(rc);
}
void dpu_hw_ctl_destroy(struct dpu_hw_ctl *ctx)
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c
index 9c6bba0ac7c3..f6a83daa385b 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c
@@ -13,7 +13,6 @@
#include "dpu_hwio.h"
#include "dpu_hw_catalog.h"
#include "dpu_hw_intf.h"
-#include "dpu_dbg.h"
#include "dpu_kms.h"
#define INTF_TIMING_ENGINE_EN 0x000
@@ -265,10 +264,7 @@ static void _setup_intf_ops(struct dpu_hw_intf_ops *ops,
ops->get_line_count = dpu_hw_intf_get_line_count;
}
-static struct dpu_hw_blk_ops dpu_hw_ops = {
- .start = NULL,
- .stop = NULL,
-};
+static struct dpu_hw_blk_ops dpu_hw_ops;
struct dpu_hw_intf *dpu_hw_intf_init(enum dpu_intf idx,
void __iomem *addr,
@@ -276,7 +272,6 @@ struct dpu_hw_intf *dpu_hw_intf_init(enum dpu_intf idx,
{
struct dpu_hw_intf *c;
struct dpu_intf_cfg *cfg;
- int rc;
c = kzalloc(sizeof(*c), GFP_KERNEL);
if (!c)
@@ -297,18 +292,9 @@ struct dpu_hw_intf *dpu_hw_intf_init(enum dpu_intf idx,
c->mdss = m;
_setup_intf_ops(&c->ops, c->cap->features);
- rc = dpu_hw_blk_init(&c->base, DPU_HW_BLK_INTF, idx, &dpu_hw_ops);
- if (rc) {
- DPU_ERROR("failed to init hw blk %d\n", rc);
- goto blk_init_error;
- }
+ dpu_hw_blk_init(&c->base, DPU_HW_BLK_INTF, idx, &dpu_hw_ops);
return c;
-
-blk_init_error:
- kzfree(c);
-
- return ERR_PTR(rc);
}
void dpu_hw_intf_destroy(struct dpu_hw_intf *intf)
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.h
index 3b77df460dea..a2b0dbc23058 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.h
@@ -92,16 +92,6 @@ struct dpu_hw_intf {
};
/**
- * to_dpu_hw_intf - convert base object dpu_hw_base to container
- * @hw: Pointer to base hardware block
- * return: Pointer to hardware block container
- */
-static inline struct dpu_hw_intf *to_dpu_hw_intf(struct dpu_hw_blk *hw)
-{
- return container_of(hw, struct dpu_hw_intf, base);
-}
-
-/**
* dpu_hw_intf_init(): Initializes the intf driver for the passed
* interface idx.
* @idx: interface index for which driver object is required
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.c
index acb8dc8acaa5..018df2c3b7ed 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.c
@@ -15,7 +15,6 @@
#include "dpu_hwio.h"
#include "dpu_hw_lm.h"
#include "dpu_hw_mdss.h"
-#include "dpu_dbg.h"
#include "dpu_kms.h"
#define LM_OP_MODE 0x00
@@ -64,16 +63,10 @@ static struct dpu_lm_cfg *_lm_offset(enum dpu_lm mixer,
static inline int _stage_offset(struct dpu_hw_mixer *ctx, enum dpu_stage stage)
{
const struct dpu_lm_sub_blks *sblk = ctx->cap->sblk;
- int rc;
+ if (stage != DPU_STAGE_BASE && stage <= sblk->maxblendstages)
+ return sblk->blendstage_base[stage - DPU_STAGE_0];
- if (stage == DPU_STAGE_BASE)
- rc = -EINVAL;
- else if (stage <= sblk->maxblendstages)
- rc = sblk->blendstage_base[stage - DPU_STAGE_0];
- else
- rc = -EINVAL;
-
- return rc;
+ return -EINVAL;
}
static void dpu_hw_lm_setup_out(struct dpu_hw_mixer *ctx,
@@ -163,11 +156,6 @@ static void dpu_hw_lm_setup_color3(struct dpu_hw_mixer *ctx,
DPU_REG_WRITE(c, LM_OP_MODE, op_mode);
}
-static void dpu_hw_lm_gc(struct dpu_hw_mixer *mixer,
- void *cfg)
-{
-}
-
static void _setup_mixer_ops(struct dpu_mdss_cfg *m,
struct dpu_hw_lm_ops *ops,
unsigned long features)
@@ -179,13 +167,9 @@ static void _setup_mixer_ops(struct dpu_mdss_cfg *m,
ops->setup_blend_config = dpu_hw_lm_setup_blend_config;
ops->setup_alpha_out = dpu_hw_lm_setup_color3;
ops->setup_border_color = dpu_hw_lm_setup_border_color;
- ops->setup_gc = dpu_hw_lm_gc;
};
-static struct dpu_hw_blk_ops dpu_hw_ops = {
- .start = NULL,
- .stop = NULL,
-};
+static struct dpu_hw_blk_ops dpu_hw_ops;
struct dpu_hw_mixer *dpu_hw_lm_init(enum dpu_lm idx,
void __iomem *addr,
@@ -193,7 +177,6 @@ struct dpu_hw_mixer *dpu_hw_lm_init(enum dpu_lm idx,
{
struct dpu_hw_mixer *c;
struct dpu_lm_cfg *cfg;
- int rc;
c = kzalloc(sizeof(*c), GFP_KERNEL);
if (!c)
@@ -210,18 +193,9 @@ struct dpu_hw_mixer *dpu_hw_lm_init(enum dpu_lm idx,
c->cap = cfg;
_setup_mixer_ops(m, &c->ops, c->cap->features);
- rc = dpu_hw_blk_init(&c->base, DPU_HW_BLK_LM, idx, &dpu_hw_ops);
- if (rc) {
- DPU_ERROR("failed to init hw blk %d\n", rc);
- goto blk_init_error;
- }
+ dpu_hw_blk_init(&c->base, DPU_HW_BLK_LM, idx, &dpu_hw_ops);
return c;
-
-blk_init_error:
- kzfree(c);
-
- return ERR_PTR(rc);
}
void dpu_hw_lm_destroy(struct dpu_hw_mixer *lm)
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.h
index 5b036aca8340..6aee839a6a23 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.h
@@ -61,11 +61,6 @@ struct dpu_hw_lm_ops {
void (*setup_border_color)(struct dpu_hw_mixer *ctx,
struct dpu_mdss_color *color,
u8 border_en);
- /**
- * setup_gc : enable/disable gamma correction feature
- */
- void (*setup_gc)(struct dpu_hw_mixer *mixer,
- void *cfg);
};
struct dpu_hw_mixer {
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.c
index cc3a623903f4..3bdf47ed1845 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.c
@@ -16,7 +16,6 @@
#include "dpu_hwio.h"
#include "dpu_hw_catalog.h"
#include "dpu_hw_pingpong.h"
-#include "dpu_dbg.h"
#include "dpu_kms.h"
#include "dpu_trace.h"
@@ -177,7 +176,7 @@ static u32 dpu_hw_pp_get_line_count(struct dpu_hw_pingpong *pp)
height = DPU_REG_READ(c, PP_SYNC_CONFIG_HEIGHT) & 0xFFFF;
if (height < init)
- goto line_count_exit;
+ return line;
line = DPU_REG_READ(c, PP_INT_COUNT_VAL) & 0xFFFF;
@@ -186,7 +185,6 @@ static u32 dpu_hw_pp_get_line_count(struct dpu_hw_pingpong *pp)
else
line -= init;
-line_count_exit:
return line;
}
@@ -201,10 +199,7 @@ static void _setup_pingpong_ops(struct dpu_hw_pingpong_ops *ops,
ops->get_line_count = dpu_hw_pp_get_line_count;
};
-static struct dpu_hw_blk_ops dpu_hw_ops = {
- .start = NULL,
- .stop = NULL,
-};
+static struct dpu_hw_blk_ops dpu_hw_ops;
struct dpu_hw_pingpong *dpu_hw_pingpong_init(enum dpu_pingpong idx,
void __iomem *addr,
@@ -212,7 +207,6 @@ struct dpu_hw_pingpong *dpu_hw_pingpong_init(enum dpu_pingpong idx,
{
struct dpu_hw_pingpong *c;
struct dpu_pingpong_cfg *cfg;
- int rc;
c = kzalloc(sizeof(*c), GFP_KERNEL);
if (!c)
@@ -228,18 +222,9 @@ struct dpu_hw_pingpong *dpu_hw_pingpong_init(enum dpu_pingpong idx,
c->caps = cfg;
_setup_pingpong_ops(&c->ops, c->caps);
- rc = dpu_hw_blk_init(&c->base, DPU_HW_BLK_PINGPONG, idx, &dpu_hw_ops);
- if (rc) {
- DPU_ERROR("failed to init hw blk %d\n", rc);
- goto blk_init_error;
- }
+ dpu_hw_blk_init(&c->base, DPU_HW_BLK_PINGPONG, idx, &dpu_hw_ops);
return c;
-
-blk_init_error:
- kzfree(c);
-
- return ERR_PTR(rc);
}
void dpu_hw_pingpong_destroy(struct dpu_hw_pingpong *pp)
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.h
index 3caccd7d6a3e..0e02e43cee14 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.h
@@ -105,16 +105,6 @@ struct dpu_hw_pingpong {
};
/**
- * dpu_hw_pingpong - convert base object dpu_hw_base to container
- * @hw: Pointer to base hardware block
- * return: Pointer to hardware block container
- */
-static inline struct dpu_hw_pingpong *to_dpu_hw_pingpong(struct dpu_hw_blk *hw)
-{
- return container_of(hw, struct dpu_hw_pingpong, base);
-}
-
-/**
* dpu_hw_pingpong_init - initializes the pingpong driver for the passed
* pingpong idx.
* @idx: Pingpong index for which driver object is required
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.c
index c25b52a6b219..e9132bf5166b 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.c
@@ -14,7 +14,6 @@
#include "dpu_hw_catalog.h"
#include "dpu_hw_lm.h"
#include "dpu_hw_sspp.h"
-#include "dpu_dbg.h"
#include "dpu_kms.h"
#define DPU_FETCH_CONFIG_RESET_VALUE 0x00000087
@@ -141,7 +140,7 @@
/* traffic shaper clock in Hz */
#define TS_CLK 19200000
-static inline int _sspp_subblk_offset(struct dpu_hw_pipe *ctx,
+static int _sspp_subblk_offset(struct dpu_hw_pipe *ctx,
int s_id,
u32 *idx)
{
@@ -662,7 +661,8 @@ static void _setup_layer_ops(struct dpu_hw_pipe *c,
test_bit(DPU_SSPP_CSC_10BIT, &features))
c->ops.setup_csc = dpu_hw_sspp_setup_csc;
- if (dpu_hw_sspp_multirect_enabled(c->cap))
+ if (test_bit(DPU_SSPP_SMART_DMA_V1, &c->cap->features) ||
+ test_bit(DPU_SSPP_SMART_DMA_V2, &c->cap->features))
c->ops.setup_multirect = dpu_hw_sspp_setup_multirect;
if (test_bit(DPU_SSPP_SCALER_QSEED3, &features)) {
@@ -697,10 +697,7 @@ static struct dpu_sspp_cfg *_sspp_offset(enum dpu_sspp sspp,
return ERR_PTR(-ENOMEM);
}
-static struct dpu_hw_blk_ops dpu_hw_ops = {
- .start = NULL,
- .stop = NULL,
-};
+static struct dpu_hw_blk_ops dpu_hw_ops;
struct dpu_hw_pipe *dpu_hw_sspp_init(enum dpu_sspp idx,
void __iomem *addr, struct dpu_mdss_cfg *catalog,
@@ -708,7 +705,6 @@ struct dpu_hw_pipe *dpu_hw_sspp_init(enum dpu_sspp idx,
{
struct dpu_hw_pipe *hw_pipe;
struct dpu_sspp_cfg *cfg;
- int rc;
if (!addr || !catalog)
return ERR_PTR(-EINVAL);
@@ -730,18 +726,9 @@ struct dpu_hw_pipe *dpu_hw_sspp_init(enum dpu_sspp idx,
hw_pipe->cap = cfg;
_setup_layer_ops(hw_pipe, hw_pipe->cap->features);
- rc = dpu_hw_blk_init(&hw_pipe->base, DPU_HW_BLK_SSPP, idx, &dpu_hw_ops);
- if (rc) {
- DPU_ERROR("failed to init hw blk %d\n", rc);
- goto blk_init_error;
- }
+ dpu_hw_blk_init(&hw_pipe->base, DPU_HW_BLK_SSPP, idx, &dpu_hw_ops);
return hw_pipe;
-
-blk_init_error:
- kzfree(hw_pipe);
-
- return ERR_PTR(rc);
}
void dpu_hw_sspp_destroy(struct dpu_hw_pipe *ctx)
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.h
index 4d81e5f5ce1b..119b4e1c16be 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.h
@@ -392,16 +392,6 @@ struct dpu_hw_pipe {
};
/**
- * dpu_hw_pipe - convert base object dpu_hw_base to container
- * @hw: Pointer to base hardware block
- * return: Pointer to hardware block container
- */
-static inline struct dpu_hw_pipe *to_dpu_hw_pipe(struct dpu_hw_blk *hw)
-{
- return container_of(hw, struct dpu_hw_pipe, base);
-}
-
-/**
* dpu_hw_sspp_init - initializes the sspp hw driver object.
* Should be called once before accessing every pipe.
* @idx: Pipe index for which driver object is required
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.c
index b8781256e21b..a041597bb849 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.c
@@ -13,7 +13,6 @@
#include "dpu_hwio.h"
#include "dpu_hw_catalog.h"
#include "dpu_hw_top.h"
-#include "dpu_dbg.h"
#include "dpu_kms.h"
#define SSPP_SPARE 0x28
@@ -322,10 +321,7 @@ static const struct dpu_mdp_cfg *_top_offset(enum dpu_mdp mdp,
return ERR_PTR(-EINVAL);
}
-static struct dpu_hw_blk_ops dpu_hw_ops = {
- .start = NULL,
- .stop = NULL,
-};
+static struct dpu_hw_blk_ops dpu_hw_ops;
struct dpu_hw_mdp *dpu_hw_mdptop_init(enum dpu_mdp idx,
void __iomem *addr,
@@ -333,7 +329,6 @@ struct dpu_hw_mdp *dpu_hw_mdptop_init(enum dpu_mdp idx,
{
struct dpu_hw_mdp *mdp;
const struct dpu_mdp_cfg *cfg;
- int rc;
if (!addr || !m)
return ERR_PTR(-EINVAL);
@@ -355,20 +350,9 @@ struct dpu_hw_mdp *dpu_hw_mdptop_init(enum dpu_mdp idx,
mdp->caps = cfg;
_setup_mdp_ops(&mdp->ops, mdp->caps->features);
- rc = dpu_hw_blk_init(&mdp->base, DPU_HW_BLK_TOP, idx, &dpu_hw_ops);
- if (rc) {
- DPU_ERROR("failed to init hw blk %d\n", rc);
- goto blk_init_error;
- }
-
- dpu_dbg_set_dpu_top_offset(mdp->hw.blk_off);
+ dpu_hw_blk_init(&mdp->base, DPU_HW_BLK_TOP, idx, &dpu_hw_ops);
return mdp;
-
-blk_init_error:
- kzfree(mdp);
-
- return ERR_PTR(rc);
}
void dpu_hw_mdp_destroy(struct dpu_hw_mdp *mdp)
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.h
index 192e338f20bb..aa21fd834398 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.h
@@ -161,16 +161,6 @@ struct dpu_hw_mdp {
};
/**
- * to_dpu_hw_mdp - convert base object dpu_hw_base to container
- * @hw: Pointer to base hardware block
- * return: Pointer to hardware block container
- */
-static inline struct dpu_hw_mdp *to_dpu_hw_mdp(struct dpu_hw_blk *hw)
-{
- return container_of(hw, struct dpu_hw_mdp, base);
-}
-
-/**
* dpu_hw_mdptop_init - initializes the top driver for the passed idx
* @idx: Interface index for which driver object is required
* @addr: Mapped register io address of MDP
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_vbif.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_vbif.c
index d43905525f92..38bfd222ed72 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_vbif.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_vbif.c
@@ -13,7 +13,6 @@
#include "dpu_hwio.h"
#include "dpu_hw_catalog.h"
#include "dpu_hw_vbif.h"
-#include "dpu_dbg.h"
#define VBIF_VERSION 0x0000
#define VBIF_CLK_FORCE_CTRL0 0x0008
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_io_util.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_io_util.c
index b557687b1964..78833c2c27f8 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_io_util.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_io_util.c
@@ -16,6 +16,8 @@
#include <linux/err.h>
#include <linux/delay.h>
+#include <drm/drm_print.h>
+
#include "dpu_io_util.h"
void msm_dss_put_clk(struct dss_clk *clk_arry, int num_clk)
@@ -164,7 +166,7 @@ int msm_dss_parse_clock(struct platform_device *pdev,
"clock-names", i,
&clock_name);
if (rc) {
- dev_err(&pdev->dev, "Failed to get clock name for %d\n",
+ DRM_DEV_ERROR(&pdev->dev, "Failed to get clock name for %d\n",
i);
break;
}
@@ -176,13 +178,13 @@ int msm_dss_parse_clock(struct platform_device *pdev,
rc = msm_dss_get_clk(&pdev->dev, mp->clk_config, num_clk);
if (rc) {
- dev_err(&pdev->dev, "Failed to get clock refs %d\n", rc);
+ DRM_DEV_ERROR(&pdev->dev, "Failed to get clock refs %d\n", rc);
goto err;
}
rc = of_clk_set_defaults(pdev->dev.of_node, false);
if (rc) {
- dev_err(&pdev->dev, "Failed to set clock defaults %d\n", rc);
+ DRM_DEV_ERROR(&pdev->dev, "Failed to set clock defaults %d\n", rc);
goto err;
}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_irq.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_irq.c
deleted file mode 100644
index d5e6ce0140cf..000000000000
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_irq.c
+++ /dev/null
@@ -1,66 +0,0 @@
-/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__
-
-#include <linux/irqdomain.h>
-#include <linux/irq.h>
-#include <linux/kthread.h>
-
-#include "dpu_irq.h"
-#include "dpu_core_irq.h"
-
-irqreturn_t dpu_irq(struct msm_kms *kms)
-{
- struct dpu_kms *dpu_kms = to_dpu_kms(kms);
-
- return dpu_core_irq(dpu_kms);
-}
-
-void dpu_irq_preinstall(struct msm_kms *kms)
-{
- struct dpu_kms *dpu_kms = to_dpu_kms(kms);
-
- if (!dpu_kms->dev || !dpu_kms->dev->dev) {
- pr_err("invalid device handles\n");
- return;
- }
-
- dpu_core_irq_preinstall(dpu_kms);
-}
-
-int dpu_irq_postinstall(struct msm_kms *kms)
-{
- struct dpu_kms *dpu_kms = to_dpu_kms(kms);
- int rc;
-
- if (!kms) {
- DPU_ERROR("invalid parameters\n");
- return -EINVAL;
- }
-
- rc = dpu_core_irq_postinstall(dpu_kms);
-
- return rc;
-}
-
-void dpu_irq_uninstall(struct msm_kms *kms)
-{
- struct dpu_kms *dpu_kms = to_dpu_kms(kms);
-
- if (!kms) {
- DPU_ERROR("invalid parameters\n");
- return;
- }
-
- dpu_core_irq_uninstall(dpu_kms);
-}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_irq.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_irq.h
deleted file mode 100644
index 3e147f7176e2..000000000000
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_irq.h
+++ /dev/null
@@ -1,59 +0,0 @@
-/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#ifndef __DPU_IRQ_H__
-#define __DPU_IRQ_H__
-
-#include <linux/kernel.h>
-#include <linux/irqdomain.h>
-
-#include "msm_kms.h"
-
-/**
- * dpu_irq_controller - define MDSS level interrupt controller context
- * @enabled_mask: enable status of MDSS level interrupt
- * @domain: interrupt domain of this controller
- */
-struct dpu_irq_controller {
- unsigned long enabled_mask;
- struct irq_domain *domain;
-};
-
-/**
- * dpu_irq_preinstall - perform pre-installation of MDSS IRQ handler
- * @kms: pointer to kms context
- * @return: none
- */
-void dpu_irq_preinstall(struct msm_kms *kms);
-
-/**
- * dpu_irq_postinstall - perform post-installation of MDSS IRQ handler
- * @kms: pointer to kms context
- * @return: 0 if success; error code otherwise
- */
-int dpu_irq_postinstall(struct msm_kms *kms);
-
-/**
- * dpu_irq_uninstall - uninstall MDSS IRQ handler
- * @drm_dev: pointer to kms context
- * @return: none
- */
-void dpu_irq_uninstall(struct msm_kms *kms);
-
-/**
- * dpu_irq - MDSS level IRQ handler
- * @kms: pointer to kms context
- * @return: interrupt handling status
- */
-irqreturn_t dpu_irq(struct msm_kms *kms);
-
-#endif /* __DPU_IRQ_H__ */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c
index 0a683e65a9f3..4d67b3c96702 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c
@@ -81,7 +81,7 @@ static int _dpu_danger_signal_status(struct seq_file *s,
struct dpu_danger_safe_status status;
int i;
- if (!kms || !kms->dev || !kms->dev->dev_private || !kms->hw_mdp) {
+ if (!kms->dev || !kms->dev->dev_private || !kms->hw_mdp) {
DPU_ERROR("invalid arg(s)\n");
return 0;
}
@@ -138,46 +138,29 @@ static int dpu_debugfs_safe_stats_show(struct seq_file *s, void *v)
}
DEFINE_DPU_DEBUGFS_SEQ_FOPS(dpu_debugfs_safe_stats);
-static void dpu_debugfs_danger_destroy(struct dpu_kms *dpu_kms)
-{
- debugfs_remove_recursive(dpu_kms->debugfs_danger);
- dpu_kms->debugfs_danger = NULL;
-}
-
-static int dpu_debugfs_danger_init(struct dpu_kms *dpu_kms,
+static void dpu_debugfs_danger_init(struct dpu_kms *dpu_kms,
struct dentry *parent)
{
- dpu_kms->debugfs_danger = debugfs_create_dir("danger",
- parent);
- if (!dpu_kms->debugfs_danger) {
- DPU_ERROR("failed to create danger debugfs\n");
- return -EINVAL;
- }
+ struct dentry *entry = debugfs_create_dir("danger", parent);
+ if (IS_ERR_OR_NULL(entry))
+ return;
- debugfs_create_file("danger_status", 0600, dpu_kms->debugfs_danger,
+ debugfs_create_file("danger_status", 0600, entry,
dpu_kms, &dpu_debugfs_danger_stats_fops);
- debugfs_create_file("safe_status", 0600, dpu_kms->debugfs_danger,
+ debugfs_create_file("safe_status", 0600, entry,
dpu_kms, &dpu_debugfs_safe_stats_fops);
-
- return 0;
}
static int _dpu_debugfs_show_regset32(struct seq_file *s, void *data)
{
- struct dpu_debugfs_regset32 *regset;
- struct dpu_kms *dpu_kms;
+ struct dpu_debugfs_regset32 *regset = s->private;
+ struct dpu_kms *dpu_kms = regset->dpu_kms;
struct drm_device *dev;
struct msm_drm_private *priv;
void __iomem *base;
uint32_t i, addr;
- if (!s || !s->private)
- return 0;
-
- regset = s->private;
-
- dpu_kms = regset->dpu_kms;
- if (!dpu_kms || !dpu_kms->mmio)
+ if (!dpu_kms->mmio)
return 0;
dev = dpu_kms->dev;
@@ -250,57 +233,24 @@ void *dpu_debugfs_create_regset32(const char *name, umode_t mode,
static int _dpu_debugfs_init(struct dpu_kms *dpu_kms)
{
- void *p;
- int rc;
-
- p = dpu_hw_util_get_log_mask_ptr();
+ void *p = dpu_hw_util_get_log_mask_ptr();
+ struct dentry *entry;
- if (!dpu_kms || !p)
+ if (!p)
return -EINVAL;
- dpu_kms->debugfs_root = debugfs_create_dir("debug",
- dpu_kms->dev->primary->debugfs_root);
- if (IS_ERR_OR_NULL(dpu_kms->debugfs_root)) {
- DRM_ERROR("debugfs create_dir failed %ld\n",
- PTR_ERR(dpu_kms->debugfs_root));
- return PTR_ERR(dpu_kms->debugfs_root);
- }
-
- rc = dpu_dbg_debugfs_register(dpu_kms->debugfs_root);
- if (rc) {
- DRM_ERROR("failed to reg dpu dbg debugfs: %d\n", rc);
- return rc;
- }
+ entry = debugfs_create_dir("debug", dpu_kms->dev->primary->debugfs_root);
+ if (IS_ERR_OR_NULL(entry))
+ return -ENODEV;
/* allow root to be NULL */
- debugfs_create_x32(DPU_DEBUGFS_HWMASKNAME, 0600, dpu_kms->debugfs_root, p);
-
- (void) dpu_debugfs_danger_init(dpu_kms, dpu_kms->debugfs_root);
- (void) dpu_debugfs_vbif_init(dpu_kms, dpu_kms->debugfs_root);
- (void) dpu_debugfs_core_irq_init(dpu_kms, dpu_kms->debugfs_root);
-
- rc = dpu_core_perf_debugfs_init(&dpu_kms->perf, dpu_kms->debugfs_root);
- if (rc) {
- DPU_ERROR("failed to init perf %d\n", rc);
- return rc;
- }
+ debugfs_create_x32(DPU_DEBUGFS_HWMASKNAME, 0600, entry, p);
- return 0;
-}
+ dpu_debugfs_danger_init(dpu_kms, entry);
+ dpu_debugfs_vbif_init(dpu_kms, entry);
+ dpu_debugfs_core_irq_init(dpu_kms, entry);
-static void _dpu_debugfs_destroy(struct dpu_kms *dpu_kms)
-{
- /* don't need to NULL check debugfs_root */
- if (dpu_kms) {
- dpu_debugfs_vbif_destroy(dpu_kms);
- dpu_debugfs_danger_destroy(dpu_kms);
- dpu_debugfs_core_irq_destroy(dpu_kms);
- debugfs_remove_recursive(dpu_kms->debugfs_root);
- }
-}
-#else
-static void _dpu_debugfs_destroy(struct dpu_kms *dpu_kms)
-{
+ return dpu_core_perf_debugfs_init(dpu_kms, entry);
}
#endif
@@ -320,7 +270,10 @@ static void dpu_kms_prepare_commit(struct msm_kms *kms,
struct dpu_kms *dpu_kms;
struct msm_drm_private *priv;
struct drm_device *dev;
+ struct drm_crtc *crtc;
+ struct drm_crtc_state *crtc_state;
struct drm_encoder *encoder;
+ int i;
if (!kms)
return;
@@ -332,9 +285,13 @@ static void dpu_kms_prepare_commit(struct msm_kms *kms,
priv = dev->dev_private;
pm_runtime_get_sync(&dpu_kms->pdev->dev);
- list_for_each_entry(encoder, &dev->mode_config.encoder_list, head)
- if (encoder->crtc != NULL)
+ /* Call prepare_commit for all affected encoders */
+ for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
+ drm_for_each_encoder_mask(encoder, crtc->dev,
+ crtc_state->encoder_mask) {
dpu_encoder_prepare_commit(encoder);
+ }
+ }
}
/*
@@ -344,15 +301,20 @@ static void dpu_kms_prepare_commit(struct msm_kms *kms,
void dpu_kms_encoder_enable(struct drm_encoder *encoder)
{
const struct drm_encoder_helper_funcs *funcs = encoder->helper_private;
- struct drm_crtc *crtc = encoder->crtc;
+ struct drm_device *dev = encoder->dev;
+ struct drm_crtc *crtc;
/* Forward this enable call to the commit hook */
if (funcs && funcs->commit)
funcs->commit(encoder);
- if (crtc && crtc->state->active) {
+ WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
+ drm_for_each_crtc(crtc, dev) {
+ if (!(crtc->state->encoder_mask & drm_encoder_mask(encoder)))
+ continue;
+
trace_dpu_kms_enc_enable(DRMID(crtc));
- dpu_crtc_commit_kickoff(crtc);
+ dpu_crtc_commit_kickoff(crtc, false);
}
}
@@ -369,7 +331,8 @@ static void dpu_kms_commit(struct msm_kms *kms, struct drm_atomic_state *state)
if (crtc->state->active) {
trace_dpu_kms_commit(DRMID(crtc));
- dpu_crtc_commit_kickoff(crtc);
+ dpu_crtc_commit_kickoff(crtc,
+ state->legacy_cursor_update);
}
}
}
@@ -613,22 +576,7 @@ fail:
#ifdef CONFIG_DEBUG_FS
static int dpu_kms_debugfs_init(struct msm_kms *kms, struct drm_minor *minor)
{
- struct dpu_kms *dpu_kms = to_dpu_kms(kms);
- struct drm_device *dev;
- int rc;
-
- if (!dpu_kms || !dpu_kms->dev || !dpu_kms->dev->dev) {
- DPU_ERROR("invalid dpu_kms\n");
- return -EINVAL;
- }
-
- dev = dpu_kms->dev;
-
- rc = _dpu_debugfs_init(dpu_kms);
- if (rc)
- DPU_ERROR("dpu_debugfs init failed: %d\n", rc);
-
- return rc;
+ return _dpu_debugfs_init(to_dpu_kms(kms));
}
#endif
@@ -651,12 +599,7 @@ static void _dpu_kms_hw_destroy(struct dpu_kms *dpu_kms)
dpu_hw_intr_destroy(dpu_kms->hw_intr);
dpu_kms->hw_intr = NULL;
- if (dpu_kms->power_event)
- dpu_power_handle_unregister_event(
- &dpu_kms->phandle, dpu_kms->power_event);
-
/* safe to call these more than once during shutdown */
- _dpu_debugfs_destroy(dpu_kms);
_dpu_kms_mmu_destroy(dpu_kms);
if (dpu_kms->catalog) {
@@ -676,11 +619,6 @@ static void _dpu_kms_hw_destroy(struct dpu_kms *dpu_kms)
dpu_hw_catalog_deinit(dpu_kms->catalog);
dpu_kms->catalog = NULL;
- if (dpu_kms->core_client)
- dpu_power_client_destroy(&dpu_kms->phandle,
- dpu_kms->core_client);
- dpu_kms->core_client = NULL;
-
if (dpu_kms->vbif[VBIF_NRT])
devm_iounmap(&dpu_kms->pdev->dev, dpu_kms->vbif[VBIF_NRT]);
dpu_kms->vbif[VBIF_NRT] = NULL;
@@ -705,131 +643,9 @@ static void dpu_kms_destroy(struct msm_kms *kms)
dpu_kms = to_dpu_kms(kms);
- dpu_dbg_destroy();
_dpu_kms_hw_destroy(dpu_kms);
}
-static int dpu_kms_pm_suspend(struct device *dev)
-{
- struct drm_device *ddev;
- struct drm_modeset_acquire_ctx ctx;
- struct drm_atomic_state *state;
- struct dpu_kms *dpu_kms;
- int ret = 0, num_crtcs = 0;
-
- if (!dev)
- return -EINVAL;
-
- ddev = dev_get_drvdata(dev);
- if (!ddev || !ddev_to_msm_kms(ddev))
- return -EINVAL;
-
- dpu_kms = to_dpu_kms(ddev_to_msm_kms(ddev));
-
- /* disable hot-plug polling */
- drm_kms_helper_poll_disable(ddev);
-
- /* acquire modeset lock(s) */
- drm_modeset_acquire_init(&ctx, 0);
-
-retry:
- DPU_ATRACE_BEGIN("kms_pm_suspend");
-
- ret = drm_modeset_lock_all_ctx(ddev, &ctx);
- if (ret)
- goto unlock;
-
- /* save current state for resume */
- if (dpu_kms->suspend_state)
- drm_atomic_state_put(dpu_kms->suspend_state);
- dpu_kms->suspend_state = drm_atomic_helper_duplicate_state(ddev, &ctx);
- if (IS_ERR_OR_NULL(dpu_kms->suspend_state)) {
- DRM_ERROR("failed to back up suspend state\n");
- dpu_kms->suspend_state = NULL;
- goto unlock;
- }
-
- /* create atomic state to disable all CRTCs */
- state = drm_atomic_state_alloc(ddev);
- if (IS_ERR_OR_NULL(state)) {
- DRM_ERROR("failed to allocate crtc disable state\n");
- goto unlock;
- }
-
- state->acquire_ctx = &ctx;
-
- /* check for nothing to do */
- if (num_crtcs == 0) {
- DRM_DEBUG("all crtcs are already in the off state\n");
- drm_atomic_state_put(state);
- goto suspended;
- }
-
- /* commit the "disable all" state */
- ret = drm_atomic_commit(state);
- if (ret < 0) {
- DRM_ERROR("failed to disable crtcs, %d\n", ret);
- drm_atomic_state_put(state);
- goto unlock;
- }
-
-suspended:
- dpu_kms->suspend_block = true;
-
-unlock:
- if (ret == -EDEADLK) {
- drm_modeset_backoff(&ctx);
- goto retry;
- }
- drm_modeset_drop_locks(&ctx);
- drm_modeset_acquire_fini(&ctx);
-
- DPU_ATRACE_END("kms_pm_suspend");
- return 0;
-}
-
-static int dpu_kms_pm_resume(struct device *dev)
-{
- struct drm_device *ddev;
- struct dpu_kms *dpu_kms;
- int ret;
-
- if (!dev)
- return -EINVAL;
-
- ddev = dev_get_drvdata(dev);
- if (!ddev || !ddev_to_msm_kms(ddev))
- return -EINVAL;
-
- dpu_kms = to_dpu_kms(ddev_to_msm_kms(ddev));
-
- DPU_ATRACE_BEGIN("kms_pm_resume");
-
- drm_mode_config_reset(ddev);
-
- drm_modeset_lock_all(ddev);
-
- dpu_kms->suspend_block = false;
-
- if (dpu_kms->suspend_state) {
- dpu_kms->suspend_state->acquire_ctx =
- ddev->mode_config.acquire_ctx;
- ret = drm_atomic_commit(dpu_kms->suspend_state);
- if (ret < 0) {
- DRM_ERROR("failed to restore state, %d\n", ret);
- drm_atomic_state_put(dpu_kms->suspend_state);
- }
- dpu_kms->suspend_state = NULL;
- }
- drm_modeset_unlock_all(ddev);
-
- /* enable hot-plug polling */
- drm_kms_helper_poll_enable(ddev);
-
- DPU_ATRACE_END("kms_pm_resume");
- return 0;
-}
-
static void _dpu_kms_set_encoder_mode(struct msm_kms *kms,
struct drm_encoder *encoder,
bool cmd_mode)
@@ -858,10 +674,30 @@ static void _dpu_kms_set_encoder_mode(struct msm_kms *kms,
encoder->base.id, rc);
}
+static irqreturn_t dpu_irq(struct msm_kms *kms)
+{
+ struct dpu_kms *dpu_kms = to_dpu_kms(kms);
+
+ return dpu_core_irq(dpu_kms);
+}
+
+static void dpu_irq_preinstall(struct msm_kms *kms)
+{
+ struct dpu_kms *dpu_kms = to_dpu_kms(kms);
+
+ dpu_core_irq_preinstall(dpu_kms);
+}
+
+static void dpu_irq_uninstall(struct msm_kms *kms)
+{
+ struct dpu_kms *dpu_kms = to_dpu_kms(kms);
+
+ dpu_core_irq_uninstall(dpu_kms);
+}
+
static const struct msm_kms_funcs kms_funcs = {
.hw_init = dpu_kms_hw_init,
.irq_preinstall = dpu_irq_preinstall,
- .irq_postinstall = dpu_irq_postinstall,
.irq_uninstall = dpu_irq_uninstall,
.irq = dpu_irq,
.prepare_commit = dpu_kms_prepare_commit,
@@ -873,8 +709,6 @@ static const struct msm_kms_funcs kms_funcs = {
.check_modified_format = dpu_format_check_modified_format,
.get_format = dpu_get_msm_format,
.round_pixclk = dpu_kms_round_pixclk,
- .pm_suspend = dpu_kms_pm_suspend,
- .pm_resume = dpu_kms_pm_resume,
.destroy = dpu_kms_destroy,
.set_encoder_mode = _dpu_kms_set_encoder_mode,
#ifdef CONFIG_DEBUG_FS
@@ -882,12 +716,6 @@ static const struct msm_kms_funcs kms_funcs = {
#endif
};
-/* the caller api needs to turn on clock before calling it */
-static inline void _dpu_kms_core_hw_rev_init(struct dpu_kms *dpu_kms)
-{
- dpu_kms->core_rev = readl_relaxed(dpu_kms->mmio + 0x0);
-}
-
static int _dpu_kms_mmu_destroy(struct dpu_kms *dpu_kms)
{
struct msm_mmu *mmu;
@@ -911,6 +739,9 @@ static int _dpu_kms_mmu_init(struct dpu_kms *dpu_kms)
if (!domain)
return 0;
+ domain->geometry.aperture_start = 0x1000;
+ domain->geometry.aperture_end = 0xffffffff;
+
aspace = msm_gem_address_space_create(dpu_kms->dev->dev,
domain, "dpu1");
if (IS_ERR(aspace)) {
@@ -960,16 +791,6 @@ u64 dpu_kms_get_clk_rate(struct dpu_kms *dpu_kms, char *clock_name)
return clk_get_rate(clk->clk);
}
-static void dpu_kms_handle_power_event(u32 event_type, void *usr)
-{
- struct dpu_kms *dpu_kms = usr;
-
- if (!dpu_kms)
- return;
-
- dpu_vbif_init_memtypes(dpu_kms);
-}
-
static int dpu_kms_hw_init(struct msm_kms *kms)
{
struct dpu_kms *dpu_kms;
@@ -979,26 +800,20 @@ static int dpu_kms_hw_init(struct msm_kms *kms)
if (!kms) {
DPU_ERROR("invalid kms\n");
- goto end;
+ return rc;
}
dpu_kms = to_dpu_kms(kms);
dev = dpu_kms->dev;
if (!dev) {
DPU_ERROR("invalid device\n");
- goto end;
- }
-
- rc = dpu_dbg_init(&dpu_kms->pdev->dev);
- if (rc) {
- DRM_ERROR("failed to init dpu dbg: %d\n", rc);
- goto end;
+ return rc;
}
priv = dev->dev_private;
if (!priv) {
DPU_ERROR("invalid private data\n");
- goto dbg_destroy;
+ return rc;
}
dpu_kms->mmio = msm_ioremap(dpu_kms->pdev, "mdp", "mdp");
@@ -1036,20 +851,9 @@ static int dpu_kms_hw_init(struct msm_kms *kms)
dpu_kms->reg_dma_len = dpu_iomap_size(dpu_kms->pdev, "regdma");
}
- dpu_kms->core_client = dpu_power_client_create(&dpu_kms->phandle,
- "core");
- if (IS_ERR_OR_NULL(dpu_kms->core_client)) {
- rc = PTR_ERR(dpu_kms->core_client);
- if (!dpu_kms->core_client)
- rc = -EINVAL;
- DPU_ERROR("dpu power client create failed: %d\n", rc);
- dpu_kms->core_client = NULL;
- goto error;
- }
-
pm_runtime_get_sync(&dpu_kms->pdev->dev);
- _dpu_kms_core_hw_rev_init(dpu_kms);
+ dpu_kms->core_rev = readl_relaxed(dpu_kms->mmio + 0x0);
pr_info("dpu hardware revision:0x%x\n", dpu_kms->core_rev);
@@ -1063,8 +867,6 @@ static int dpu_kms_hw_init(struct msm_kms *kms)
goto power_error;
}
- dpu_dbg_init_dbg_buses(dpu_kms->core_rev);
-
/*
* Now we need to read the HW catalog and initialize resources such as
* clocks, regulators, GDSC/MMAGIC, ioremap the register ranges etc
@@ -1110,7 +912,6 @@ static int dpu_kms_hw_init(struct msm_kms *kms)
}
rc = dpu_core_perf_init(&dpu_kms->perf, dev, dpu_kms->catalog,
- &dpu_kms->phandle,
_dpu_kms_get_clk(dpu_kms, "core"));
if (rc) {
DPU_ERROR("failed to init perf %d\n", rc);
@@ -1151,13 +952,7 @@ static int dpu_kms_hw_init(struct msm_kms *kms)
*/
dev->mode_config.allow_fb_modifiers = true;
- /*
- * Handle (re)initializations during power enable
- */
- dpu_kms_handle_power_event(DPU_POWER_EVENT_ENABLE, dpu_kms);
- dpu_kms->power_event = dpu_power_handle_register_event(
- &dpu_kms->phandle, DPU_POWER_EVENT_ENABLE,
- dpu_kms_handle_power_event, dpu_kms, "kms");
+ dpu_vbif_init_memtypes(dpu_kms);
pm_runtime_put_sync(&dpu_kms->pdev->dev);
@@ -1171,9 +966,7 @@ power_error:
pm_runtime_put_sync(&dpu_kms->pdev->dev);
error:
_dpu_kms_hw_destroy(dpu_kms);
-dbg_destroy:
- dpu_dbg_destroy();
-end:
+
return rc;
}
@@ -1221,8 +1014,6 @@ static int dpu_bind(struct device *dev, struct device *master, void *data)
return ret;
}
- dpu_power_resource_init(pdev, &dpu_kms->phandle);
-
platform_set_drvdata(pdev, dpu_kms);
msm_kms_init(&dpu_kms->base, &kms_funcs);
@@ -1242,7 +1033,6 @@ static void dpu_unbind(struct device *dev, struct device *master, void *data)
struct dpu_kms *dpu_kms = platform_get_drvdata(pdev);
struct dss_module_power *mp = &dpu_kms->mp;
- dpu_power_resource_deinit(pdev, &dpu_kms->phandle);
msm_dss_put_clk(mp->clk_config, mp->num_clk);
devm_kfree(&pdev->dev, mp->clk_config);
mp->num_clk = 0;
@@ -1278,19 +1068,13 @@ static int __maybe_unused dpu_runtime_suspend(struct device *dev)
ddev = dpu_kms->dev;
if (!ddev) {
DPU_ERROR("invalid drm_device\n");
- goto exit;
+ return rc;
}
- rc = dpu_power_resource_enable(&dpu_kms->phandle,
- dpu_kms->core_client, false);
- if (rc)
- DPU_ERROR("resource disable failed: %d\n", rc);
-
rc = msm_dss_enable_clk(mp->clk_config, mp->num_clk, false);
if (rc)
DPU_ERROR("clock disable failed rc:%d\n", rc);
-exit:
return rc;
}
@@ -1299,27 +1083,27 @@ static int __maybe_unused dpu_runtime_resume(struct device *dev)
int rc = -1;
struct platform_device *pdev = to_platform_device(dev);
struct dpu_kms *dpu_kms = platform_get_drvdata(pdev);
+ struct drm_encoder *encoder;
struct drm_device *ddev;
struct dss_module_power *mp = &dpu_kms->mp;
ddev = dpu_kms->dev;
if (!ddev) {
DPU_ERROR("invalid drm_device\n");
- goto exit;
+ return rc;
}
rc = msm_dss_enable_clk(mp->clk_config, mp->num_clk, true);
if (rc) {
DPU_ERROR("clock enable failed rc:%d\n", rc);
- goto exit;
+ return rc;
}
- rc = dpu_power_resource_enable(&dpu_kms->phandle,
- dpu_kms->core_client, true);
- if (rc)
- DPU_ERROR("resource enable failed: %d\n", rc);
+ dpu_vbif_init_memtypes(dpu_kms);
+
+ drm_for_each_encoder(encoder, ddev)
+ dpu_encoder_virt_runtime_resume(encoder);
-exit:
return rc;
}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h
index 66d466628e2b..ac75cfc267f4 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h
@@ -23,15 +23,13 @@
#include "msm_kms.h"
#include "msm_mmu.h"
#include "msm_gem.h"
-#include "dpu_dbg.h"
#include "dpu_hw_catalog.h"
#include "dpu_hw_ctl.h"
#include "dpu_hw_lm.h"
#include "dpu_hw_interrupts.h"
#include "dpu_hw_top.h"
+#include "dpu_io_util.h"
#include "dpu_rm.h"
-#include "dpu_power_handle.h"
-#include "dpu_irq.h"
#include "dpu_core_perf.h"
#define DRMID(x) ((x) ? (x)->base.id : -1)
@@ -104,7 +102,6 @@ struct dpu_irq {
atomic_t *enable_counts;
atomic_t *irq_counts;
spinlock_t cb_lock;
- struct dentry *debugfs_file;
};
struct dpu_kms {
@@ -113,15 +110,6 @@ struct dpu_kms {
int core_rev;
struct dpu_mdss_cfg *catalog;
- struct dpu_power_handle phandle;
- struct dpu_power_client *core_client;
- struct dpu_power_event *power_event;
-
- /* directory entry for debugfs */
- struct dentry *debugfs_root;
- struct dentry *debugfs_danger;
- struct dentry *debugfs_vbif;
-
/* io/register spaces: */
void __iomem *mmio, *vbif[VBIF_MAX], *reg_dma;
unsigned long mmio_len, vbif_len[VBIF_MAX], reg_dma_len;
@@ -135,10 +123,6 @@ struct dpu_kms {
struct dpu_core_perf perf;
- /* saved atomic state during system suspend */
- struct drm_atomic_state *suspend_state;
- bool suspend_block;
-
struct dpu_rm rm;
bool rm_init;
@@ -164,33 +148,6 @@ struct vsync_info {
((struct msm_drm_private *)((D)->dev_private))->kms : NULL)
/**
- * dpu_kms_is_suspend_state - whether or not the system is pm suspended
- * @dev: Pointer to drm device
- * Return: Suspend status
- */
-static inline bool dpu_kms_is_suspend_state(struct drm_device *dev)
-{
- if (!ddev_to_msm_kms(dev))
- return false;
-
- return to_dpu_kms(ddev_to_msm_kms(dev))->suspend_state != NULL;
-}
-
-/**
- * dpu_kms_is_suspend_blocked - whether or not commits are blocked due to pm
- * suspend status
- * @dev: Pointer to drm device
- * Return: True if commits should be rejected due to pm suspend
- */
-static inline bool dpu_kms_is_suspend_blocked(struct drm_device *dev)
-{
- if (!dpu_kms_is_suspend_state(dev))
- return false;
-
- return to_dpu_kms(ddev_to_msm_kms(dev))->suspend_block;
-}
-
-/**
* Debugfs functions - extra helper functions for debugfs support
*
* Main debugfs documentation is located at,
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_mdss.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_mdss.c
index 2235ef8129f4..cb307a2abf06 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_mdss.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_mdss.c
@@ -9,6 +9,11 @@
#define HW_INTR_STATUS 0x0010
+struct dpu_irq_controller {
+ unsigned long enabled_mask;
+ struct irq_domain *domain;
+};
+
struct dpu_mdss {
struct msm_mdss base;
void __iomem *mmio;
@@ -115,13 +120,12 @@ static int _dpu_mdss_irq_domain_add(struct dpu_mdss *dpu_mdss)
return 0;
}
-static int _dpu_mdss_irq_domain_fini(struct dpu_mdss *dpu_mdss)
+static void _dpu_mdss_irq_domain_fini(struct dpu_mdss *dpu_mdss)
{
if (dpu_mdss->irq_controller.domain) {
irq_domain_remove(dpu_mdss->irq_controller.domain);
dpu_mdss->irq_controller.domain = NULL;
}
- return 0;
}
static int dpu_mdss_enable(struct msm_mdss *mdss)
{
@@ -156,18 +160,16 @@ static void dpu_mdss_destroy(struct drm_device *dev)
struct dpu_mdss *dpu_mdss = to_dpu_mdss(priv->mdss);
struct dss_module_power *mp = &dpu_mdss->mp;
+ pm_runtime_suspend(dev->dev);
+ pm_runtime_disable(dev->dev);
_dpu_mdss_irq_domain_fini(dpu_mdss);
-
free_irq(platform_get_irq(pdev, 0), dpu_mdss);
-
msm_dss_put_clk(mp->clk_config, mp->num_clk);
devm_kfree(&pdev->dev, mp->clk_config);
if (dpu_mdss->mmio)
devm_iounmap(&pdev->dev, dpu_mdss->mmio);
dpu_mdss->mmio = NULL;
-
- pm_runtime_disable(dev->dev);
priv->mdss = NULL;
}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c
index d77a8cb15404..fd75870eb17f 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c
@@ -137,7 +137,7 @@ static struct dpu_kms *_dpu_plane_get_kms(struct drm_plane *plane)
* @src_wdith: width of source buffer
* Return: fill level corresponding to the source buffer/format or 0 if error
*/
-static inline int _dpu_plane_calc_fill_level(struct drm_plane *plane,
+static int _dpu_plane_calc_fill_level(struct drm_plane *plane,
const struct dpu_format *fmt, u32 src_width)
{
struct dpu_plane *pdpu, *tmp;
@@ -430,24 +430,14 @@ static void _dpu_plane_set_qos_remap(struct drm_plane *plane)
dpu_vbif_set_qos_remap(dpu_kms, &qos_params);
}
-/**
- * _dpu_plane_get_aspace: gets the address space
- */
-static inline struct msm_gem_address_space *_dpu_plane_get_aspace(
- struct dpu_plane *pdpu)
-{
- struct dpu_kms *kms = _dpu_plane_get_kms(&pdpu->base);
-
- return kms->base.aspace;
-}
-
-static inline void _dpu_plane_set_scanout(struct drm_plane *plane,
+static void _dpu_plane_set_scanout(struct drm_plane *plane,
struct dpu_plane_state *pstate,
struct dpu_hw_pipe_cfg *pipe_cfg,
struct drm_framebuffer *fb)
{
struct dpu_plane *pdpu = to_dpu_plane(plane);
- struct msm_gem_address_space *aspace = _dpu_plane_get_aspace(pdpu);
+ struct dpu_kms *kms = _dpu_plane_get_kms(&pdpu->base);
+ struct msm_gem_address_space *aspace = kms->base.aspace;
int ret;
ret = dpu_format_populate_layout(aspace, fb, &pipe_cfg->layout);
@@ -525,7 +515,7 @@ static void _dpu_plane_setup_scaler3(struct dpu_plane *pdpu,
scale_cfg->enable = 1;
}
-static inline void _dpu_plane_setup_csc(struct dpu_plane *pdpu)
+static void _dpu_plane_setup_csc(struct dpu_plane *pdpu)
{
static const struct dpu_csc_cfg dpu_csc_YUV2RGB_601L = {
{
@@ -801,7 +791,7 @@ static int dpu_plane_prepare_fb(struct drm_plane *plane,
struct drm_gem_object *obj;
struct msm_gem_object *msm_obj;
struct dma_fence *fence;
- struct msm_gem_address_space *aspace = _dpu_plane_get_aspace(pdpu);
+ struct dpu_kms *kms = _dpu_plane_get_kms(&pdpu->base);
int ret;
if (!new_state->fb)
@@ -810,7 +800,7 @@ static int dpu_plane_prepare_fb(struct drm_plane *plane,
DPU_DEBUG_PLANE(pdpu, "FB[%u]\n", fb->base.id);
/* cache aspace */
- pstate->aspace = aspace;
+ pstate->aspace = kms->base.aspace;
/*
* TODO: Need to sort out the msm_framebuffer_prepare() call below so
@@ -1191,19 +1181,8 @@ static void dpu_plane_destroy(struct drm_plane *plane)
static void dpu_plane_destroy_state(struct drm_plane *plane,
struct drm_plane_state *state)
{
- struct dpu_plane_state *pstate;
-
- if (!plane || !state) {
- DPU_ERROR("invalid arg(s), plane %d state %d\n",
- plane != 0, state != 0);
- return;
- }
-
- pstate = to_dpu_plane_state(state);
-
__drm_atomic_helper_plane_destroy_state(state);
-
- kfree(pstate);
+ kfree(to_dpu_plane_state(state));
}
static struct drm_plane_state *
@@ -1273,26 +1252,12 @@ static ssize_t _dpu_plane_danger_read(struct file *file,
char __user *buff, size_t count, loff_t *ppos)
{
struct dpu_kms *kms = file->private_data;
- struct dpu_mdss_cfg *cfg = kms->catalog;
- int len = 0;
- char buf[40] = {'\0'};
-
- if (!cfg)
- return -ENODEV;
+ int len;
+ char buf[40];
- if (*ppos)
- return 0; /* the end */
-
- len = snprintf(buf, sizeof(buf), "%d\n", !kms->has_danger_ctrl);
- if (len < 0 || len >= sizeof(buf))
- return 0;
+ len = scnprintf(buf, sizeof(buf), "%d\n", !kms->has_danger_ctrl);
- if ((count < sizeof(buf)) || copy_to_user(buff, buf, len))
- return -EFAULT;
-
- *ppos += len; /* increase offset */
-
- return len;
+ return simple_read_from_buffer(buff, count, ppos, buf, len);
}
static void _dpu_plane_set_danger_state(struct dpu_kms *kms, bool enable)
@@ -1322,23 +1287,12 @@ static ssize_t _dpu_plane_danger_write(struct file *file,
const char __user *user_buf, size_t count, loff_t *ppos)
{
struct dpu_kms *kms = file->private_data;
- struct dpu_mdss_cfg *cfg = kms->catalog;
int disable_panic;
- char buf[10];
-
- if (!cfg)
- return -EFAULT;
-
- if (count >= sizeof(buf))
- return -EFAULT;
-
- if (copy_from_user(buf, user_buf, count))
- return -EFAULT;
-
- buf[count] = 0; /* end of string */
+ int ret;
- if (kstrtoint(buf, 0, &disable_panic))
- return -EFAULT;
+ ret = kstrtouint_from_user(user_buf, count, 0, &disable_panic);
+ if (ret)
+ return ret;
if (disable_panic) {
/* Disable panic signal for all active pipes */
@@ -1363,33 +1317,10 @@ static const struct file_operations dpu_plane_danger_enable = {
static int _dpu_plane_init_debugfs(struct drm_plane *plane)
{
- struct dpu_plane *pdpu;
- struct dpu_kms *kms;
- struct msm_drm_private *priv;
- const struct dpu_sspp_sub_blks *sblk = 0;
- const struct dpu_sspp_cfg *cfg = 0;
-
- if (!plane || !plane->dev) {
- DPU_ERROR("invalid arguments\n");
- return -EINVAL;
- }
-
- priv = plane->dev->dev_private;
- if (!priv || !priv->kms) {
- DPU_ERROR("invalid KMS reference\n");
- return -EINVAL;
- }
-
- kms = to_dpu_kms(priv->kms);
- pdpu = to_dpu_plane(plane);
-
- if (pdpu && pdpu->pipe_hw)
- cfg = pdpu->pipe_hw->cap;
- if (cfg)
- sblk = cfg->sblk;
-
- if (!sblk)
- return 0;
+ struct dpu_plane *pdpu = to_dpu_plane(plane);
+ struct dpu_kms *kms = _dpu_plane_get_kms(plane);
+ const struct dpu_sspp_cfg *cfg = pdpu->pipe_hw->cap;
+ const struct dpu_sspp_sub_blks *sblk = cfg->sblk;
/* create overall sub-directory for the pipe */
pdpu->debugfs_root =
@@ -1460,25 +1391,11 @@ static int _dpu_plane_init_debugfs(struct drm_plane *plane)
return 0;
}
-
-static void _dpu_plane_destroy_debugfs(struct drm_plane *plane)
-{
- struct dpu_plane *pdpu;
-
- if (!plane)
- return;
- pdpu = to_dpu_plane(plane);
-
- debugfs_remove_recursive(pdpu->debugfs_root);
-}
#else
static int _dpu_plane_init_debugfs(struct drm_plane *plane)
{
return 0;
}
-static void _dpu_plane_destroy_debugfs(struct drm_plane *plane)
-{
-}
#endif
static int dpu_plane_late_register(struct drm_plane *plane)
@@ -1488,7 +1405,9 @@ static int dpu_plane_late_register(struct drm_plane *plane)
static void dpu_plane_early_unregister(struct drm_plane *plane)
{
- _dpu_plane_destroy_debugfs(plane);
+ struct dpu_plane *pdpu = to_dpu_plane(plane);
+
+ debugfs_remove_recursive(pdpu->debugfs_root);
}
static const struct drm_plane_funcs dpu_plane_funcs = {
@@ -1537,7 +1456,7 @@ struct drm_plane *dpu_plane_init(struct drm_device *dev,
if (!pdpu) {
DPU_ERROR("[%u]failed to allocate local plane struct\n", pipe);
ret = -ENOMEM;
- goto exit;
+ return ERR_PTR(ret);
}
/* cache local stuff for later */
@@ -1623,6 +1542,5 @@ clean_sspp:
dpu_hw_sspp_destroy(pdpu->pipe_hw);
clean_plane:
kfree(pdpu);
-exit:
return ERR_PTR(ret);
}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_power_handle.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_power_handle.c
deleted file mode 100644
index fc14116789f2..000000000000
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_power_handle.c
+++ /dev/null
@@ -1,240 +0,0 @@
-/* Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
-
-#define pr_fmt(fmt) "[drm:%s:%d]: " fmt, __func__, __LINE__
-
-#include <linux/kernel.h>
-#include <linux/of.h>
-#include <linux/string.h>
-#include <linux/of_address.h>
-#include <linux/slab.h>
-#include <linux/mutex.h>
-#include <linux/of_platform.h>
-
-#include "dpu_power_handle.h"
-#include "dpu_trace.h"
-
-static const char *data_bus_name[DPU_POWER_HANDLE_DBUS_ID_MAX] = {
- [DPU_POWER_HANDLE_DBUS_ID_MNOC] = "qcom,dpu-data-bus",
- [DPU_POWER_HANDLE_DBUS_ID_LLCC] = "qcom,dpu-llcc-bus",
- [DPU_POWER_HANDLE_DBUS_ID_EBI] = "qcom,dpu-ebi-bus",
-};
-
-const char *dpu_power_handle_get_dbus_name(u32 bus_id)
-{
- if (bus_id < DPU_POWER_HANDLE_DBUS_ID_MAX)
- return data_bus_name[bus_id];
-
- return NULL;
-}
-
-static void dpu_power_event_trigger_locked(struct dpu_power_handle *phandle,
- u32 event_type)
-{
- struct dpu_power_event *event;
-
- list_for_each_entry(event, &phandle->event_list, list) {
- if (event->event_type & event_type)
- event->cb_fnc(event_type, event->usr);
- }
-}
-
-struct dpu_power_client *dpu_power_client_create(
- struct dpu_power_handle *phandle, char *client_name)
-{
- struct dpu_power_client *client;
- static u32 id;
-
- if (!client_name || !phandle) {
- pr_err("client name is null or invalid power data\n");
- return ERR_PTR(-EINVAL);
- }
-
- client = kzalloc(sizeof(struct dpu_power_client), GFP_KERNEL);
- if (!client)
- return ERR_PTR(-ENOMEM);
-
- mutex_lock(&phandle->phandle_lock);
- strlcpy(client->name, client_name, MAX_CLIENT_NAME_LEN);
- client->usecase_ndx = VOTE_INDEX_DISABLE;
- client->id = id;
- client->active = true;
- pr_debug("client %s created:%pK id :%d\n", client_name,
- client, id);
- id++;
- list_add(&client->list, &phandle->power_client_clist);
- mutex_unlock(&phandle->phandle_lock);
-
- return client;
-}
-
-void dpu_power_client_destroy(struct dpu_power_handle *phandle,
- struct dpu_power_client *client)
-{
- if (!client || !phandle) {
- pr_err("reg bus vote: invalid client handle\n");
- } else if (!client->active) {
- pr_err("dpu power deinit already done\n");
- kfree(client);
- } else {
- pr_debug("bus vote client %s destroyed:%pK id:%u\n",
- client->name, client, client->id);
- mutex_lock(&phandle->phandle_lock);
- list_del_init(&client->list);
- mutex_unlock(&phandle->phandle_lock);
- kfree(client);
- }
-}
-
-void dpu_power_resource_init(struct platform_device *pdev,
- struct dpu_power_handle *phandle)
-{
- phandle->dev = &pdev->dev;
-
- INIT_LIST_HEAD(&phandle->power_client_clist);
- INIT_LIST_HEAD(&phandle->event_list);
-
- mutex_init(&phandle->phandle_lock);
-}
-
-void dpu_power_resource_deinit(struct platform_device *pdev,
- struct dpu_power_handle *phandle)
-{
- struct dpu_power_client *curr_client, *next_client;
- struct dpu_power_event *curr_event, *next_event;
-
- if (!phandle || !pdev) {
- pr_err("invalid input param\n");
- return;
- }
-
- mutex_lock(&phandle->phandle_lock);
- list_for_each_entry_safe(curr_client, next_client,
- &phandle->power_client_clist, list) {
- pr_err("client:%s-%d still registered with refcount:%d\n",
- curr_client->name, curr_client->id,
- curr_client->refcount);
- curr_client->active = false;
- list_del(&curr_client->list);
- }
-
- list_for_each_entry_safe(curr_event, next_event,
- &phandle->event_list, list) {
- pr_err("event:%d, client:%s still registered\n",
- curr_event->event_type,
- curr_event->client_name);
- curr_event->active = false;
- list_del(&curr_event->list);
- }
- mutex_unlock(&phandle->phandle_lock);
-}
-
-int dpu_power_resource_enable(struct dpu_power_handle *phandle,
- struct dpu_power_client *pclient, bool enable)
-{
- bool changed = false;
- u32 max_usecase_ndx = VOTE_INDEX_DISABLE, prev_usecase_ndx;
- struct dpu_power_client *client;
- u32 event_type;
-
- if (!phandle || !pclient) {
- pr_err("invalid input argument\n");
- return -EINVAL;
- }
-
- mutex_lock(&phandle->phandle_lock);
- if (enable)
- pclient->refcount++;
- else if (pclient->refcount)
- pclient->refcount--;
-
- if (pclient->refcount)
- pclient->usecase_ndx = VOTE_INDEX_LOW;
- else
- pclient->usecase_ndx = VOTE_INDEX_DISABLE;
-
- list_for_each_entry(client, &phandle->power_client_clist, list) {
- if (client->usecase_ndx < VOTE_INDEX_MAX &&
- client->usecase_ndx > max_usecase_ndx)
- max_usecase_ndx = client->usecase_ndx;
- }
-
- if (phandle->current_usecase_ndx != max_usecase_ndx) {
- changed = true;
- prev_usecase_ndx = phandle->current_usecase_ndx;
- phandle->current_usecase_ndx = max_usecase_ndx;
- }
-
- pr_debug("%pS: changed=%d current idx=%d request client %s id:%u enable:%d refcount:%d\n",
- __builtin_return_address(0), changed, max_usecase_ndx,
- pclient->name, pclient->id, enable, pclient->refcount);
-
- if (!changed)
- goto end;
-
- event_type = enable ? DPU_POWER_EVENT_ENABLE : DPU_POWER_EVENT_DISABLE;
-
- dpu_power_event_trigger_locked(phandle, event_type);
-end:
- mutex_unlock(&phandle->phandle_lock);
- return 0;
-}
-
-struct dpu_power_event *dpu_power_handle_register_event(
- struct dpu_power_handle *phandle,
- u32 event_type, void (*cb_fnc)(u32 event_type, void *usr),
- void *usr, char *client_name)
-{
- struct dpu_power_event *event;
-
- if (!phandle) {
- pr_err("invalid power handle\n");
- return ERR_PTR(-EINVAL);
- } else if (!cb_fnc || !event_type) {
- pr_err("no callback fnc or event type\n");
- return ERR_PTR(-EINVAL);
- }
-
- event = kzalloc(sizeof(struct dpu_power_event), GFP_KERNEL);
- if (!event)
- return ERR_PTR(-ENOMEM);
-
- event->event_type = event_type;
- event->cb_fnc = cb_fnc;
- event->usr = usr;
- strlcpy(event->client_name, client_name, MAX_CLIENT_NAME_LEN);
- event->active = true;
-
- mutex_lock(&phandle->phandle_lock);
- list_add(&event->list, &phandle->event_list);
- mutex_unlock(&phandle->phandle_lock);
-
- return event;
-}
-
-void dpu_power_handle_unregister_event(
- struct dpu_power_handle *phandle,
- struct dpu_power_event *event)
-{
- if (!phandle || !event) {
- pr_err("invalid phandle or event\n");
- } else if (!event->active) {
- pr_err("power handle deinit already done\n");
- kfree(event);
- } else {
- mutex_lock(&phandle->phandle_lock);
- list_del_init(&event->list);
- mutex_unlock(&phandle->phandle_lock);
- kfree(event);
- }
-}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_power_handle.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_power_handle.h
deleted file mode 100644
index a65b7a297f21..000000000000
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_power_handle.h
+++ /dev/null
@@ -1,217 +0,0 @@
-/* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
-
-#ifndef _DPU_POWER_HANDLE_H_
-#define _DPU_POWER_HANDLE_H_
-
-#define MAX_CLIENT_NAME_LEN 128
-
-#define DPU_POWER_HANDLE_ENABLE_BUS_AB_QUOTA 0
-#define DPU_POWER_HANDLE_DISABLE_BUS_AB_QUOTA 0
-#define DPU_POWER_HANDLE_ENABLE_BUS_IB_QUOTA 1600000000
-#define DPU_POWER_HANDLE_DISABLE_BUS_IB_QUOTA 0
-
-#include "dpu_io_util.h"
-
-/* events will be triggered on power handler enable/disable */
-#define DPU_POWER_EVENT_DISABLE BIT(0)
-#define DPU_POWER_EVENT_ENABLE BIT(1)
-
-/**
- * mdss_bus_vote_type: register bus vote type
- * VOTE_INDEX_DISABLE: removes the client vote
- * VOTE_INDEX_LOW: keeps the lowest vote for register bus
- * VOTE_INDEX_MAX: invalid
- */
-enum mdss_bus_vote_type {
- VOTE_INDEX_DISABLE,
- VOTE_INDEX_LOW,
- VOTE_INDEX_MAX,
-};
-
-/**
- * enum dpu_power_handle_data_bus_client - type of axi bus clients
- * @DPU_POWER_HANDLE_DATA_BUS_CLIENT_RT: core real-time bus client
- * @DPU_POWER_HANDLE_DATA_BUS_CLIENT_NRT: core non-real-time bus client
- * @DPU_POWER_HANDLE_DATA_BUS_CLIENT_MAX: maximum number of bus client type
- */
-enum dpu_power_handle_data_bus_client {
- DPU_POWER_HANDLE_DATA_BUS_CLIENT_RT,
- DPU_POWER_HANDLE_DATA_BUS_CLIENT_NRT,
- DPU_POWER_HANDLE_DATA_BUS_CLIENT_MAX
-};
-
-/**
- * enum DPU_POWER_HANDLE_DBUS_ID - data bus identifier
- * @DPU_POWER_HANDLE_DBUS_ID_MNOC: DPU/MNOC data bus
- * @DPU_POWER_HANDLE_DBUS_ID_LLCC: MNOC/LLCC data bus
- * @DPU_POWER_HANDLE_DBUS_ID_EBI: LLCC/EBI data bus
- */
-enum DPU_POWER_HANDLE_DBUS_ID {
- DPU_POWER_HANDLE_DBUS_ID_MNOC,
- DPU_POWER_HANDLE_DBUS_ID_LLCC,
- DPU_POWER_HANDLE_DBUS_ID_EBI,
- DPU_POWER_HANDLE_DBUS_ID_MAX,
-};
-
-/**
- * struct dpu_power_client: stores the power client for dpu driver
- * @name: name of the client
- * @usecase_ndx: current regs bus vote type
- * @refcount: current refcount if multiple modules are using same
- * same client for enable/disable. Power module will
- * aggregate the refcount and vote accordingly for this
- * client.
- * @id: assigned during create. helps for debugging.
- * @list: list to attach power handle master list
- * @ab: arbitrated bandwidth for each bus client
- * @ib: instantaneous bandwidth for each bus client
- * @active: inidcates the state of dpu power handle
- */
-struct dpu_power_client {
- char name[MAX_CLIENT_NAME_LEN];
- short usecase_ndx;
- short refcount;
- u32 id;
- struct list_head list;
- u64 ab[DPU_POWER_HANDLE_DATA_BUS_CLIENT_MAX];
- u64 ib[DPU_POWER_HANDLE_DATA_BUS_CLIENT_MAX];
- bool active;
-};
-
-/*
- * struct dpu_power_event - local event registration structure
- * @client_name: name of the client registering
- * @cb_fnc: pointer to desired callback function
- * @usr: user pointer to pass to callback event trigger
- * @event: refer to DPU_POWER_HANDLE_EVENT_*
- * @list: list to attach event master list
- * @active: indicates the state of dpu power handle
- */
-struct dpu_power_event {
- char client_name[MAX_CLIENT_NAME_LEN];
- void (*cb_fnc)(u32 event_type, void *usr);
- void *usr;
- u32 event_type;
- struct list_head list;
- bool active;
-};
-
-/**
- * struct dpu_power_handle: power handle main struct
- * @client_clist: master list to store all clients
- * @phandle_lock: lock to synchronize the enable/disable
- * @dev: pointer to device structure
- * @usecase_ndx: current usecase index
- * @event_list: current power handle event list
- */
-struct dpu_power_handle {
- struct list_head power_client_clist;
- struct mutex phandle_lock;
- struct device *dev;
- u32 current_usecase_ndx;
- struct list_head event_list;
-};
-
-/**
- * dpu_power_resource_init() - initializes the dpu power handle
- * @pdev: platform device to search the power resources
- * @pdata: power handle to store the power resources
- */
-void dpu_power_resource_init(struct platform_device *pdev,
- struct dpu_power_handle *pdata);
-
-/**
- * dpu_power_resource_deinit() - release the dpu power handle
- * @pdev: platform device for power resources
- * @pdata: power handle containing the resources
- *
- * Return: error code.
- */
-void dpu_power_resource_deinit(struct platform_device *pdev,
- struct dpu_power_handle *pdata);
-
-/**
- * dpu_power_client_create() - create the client on power handle
- * @pdata: power handle containing the resources
- * @client_name: new client name for registration
- *
- * Return: error code.
- */
-struct dpu_power_client *dpu_power_client_create(struct dpu_power_handle *pdata,
- char *client_name);
-
-/**
- * dpu_power_client_destroy() - destroy the client on power handle
- * @pdata: power handle containing the resources
- * @client_name: new client name for registration
- *
- * Return: none
- */
-void dpu_power_client_destroy(struct dpu_power_handle *phandle,
- struct dpu_power_client *client);
-
-/**
- * dpu_power_resource_enable() - enable/disable the power resources
- * @pdata: power handle containing the resources
- * @client: client information to enable/disable its vote
- * @enable: boolean request for enable/disable
- *
- * Return: error code.
- */
-int dpu_power_resource_enable(struct dpu_power_handle *pdata,
- struct dpu_power_client *pclient, bool enable);
-
-/**
- * dpu_power_data_bus_bandwidth_ctrl() - control data bus bandwidth enable
- * @phandle: power handle containing the resources
- * @client: client information to bandwidth control
- * @enable: true to enable bandwidth for data base
- *
- * Return: none
- */
-void dpu_power_data_bus_bandwidth_ctrl(struct dpu_power_handle *phandle,
- struct dpu_power_client *pclient, int enable);
-
-/**
- * dpu_power_handle_register_event - register a callback function for an event.
- * Clients can register for multiple events with a single register.
- * Any block with access to phandle can register for the event
- * notification.
- * @phandle: power handle containing the resources
- * @event_type: event type to register; refer DPU_POWER_HANDLE_EVENT_*
- * @cb_fnc: pointer to desired callback function
- * @usr: user pointer to pass to callback on event trigger
- *
- * Return: event pointer if success, or error code otherwise
- */
-struct dpu_power_event *dpu_power_handle_register_event(
- struct dpu_power_handle *phandle,
- u32 event_type, void (*cb_fnc)(u32 event_type, void *usr),
- void *usr, char *client_name);
-/**
- * dpu_power_handle_unregister_event - unregister callback for event(s)
- * @phandle: power handle containing the resources
- * @event: event pointer returned after power handle register
- */
-void dpu_power_handle_unregister_event(struct dpu_power_handle *phandle,
- struct dpu_power_event *event);
-
-/**
- * dpu_power_handle_get_dbus_name - get name of given data bus identifier
- * @bus_id: data bus identifier
- * Return: Pointer to name string if success; NULL otherwise
- */
-const char *dpu_power_handle_get_dbus_name(u32 bus_id);
-
-#endif /* _DPU_POWER_HANDLE_H_ */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_trace.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_trace.h
index e12c4cefb742..c78b521ceda1 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_trace.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_trace.h
@@ -99,27 +99,6 @@ TRACE_EVENT(dpu_perf_set_ot,
__entry->vbif_idx)
)
-TRACE_EVENT(dpu_perf_update_bus,
- TP_PROTO(int client, unsigned long long ab_quota,
- unsigned long long ib_quota),
- TP_ARGS(client, ab_quota, ib_quota),
- TP_STRUCT__entry(
- __field(int, client)
- __field(u64, ab_quota)
- __field(u64, ib_quota)
- ),
- TP_fast_assign(
- __entry->client = client;
- __entry->ab_quota = ab_quota;
- __entry->ib_quota = ib_quota;
- ),
- TP_printk("Request client:%d ab=%llu ib=%llu",
- __entry->client,
- __entry->ab_quota,
- __entry->ib_quota)
-)
-
-
TRACE_EVENT(dpu_cmd_release_bw,
TP_PROTO(u32 crtc_id),
TP_ARGS(crtc_id),
@@ -319,6 +298,10 @@ DEFINE_EVENT(dpu_drm_obj_template, dpu_kms_wait_for_commit_done,
TP_PROTO(uint32_t drm_id),
TP_ARGS(drm_id)
);
+DEFINE_EVENT(dpu_drm_obj_template, dpu_crtc_runtime_resume,
+ TP_PROTO(uint32_t drm_id),
+ TP_ARGS(drm_id)
+);
TRACE_EVENT(dpu_enc_enable,
TP_PROTO(uint32_t drm_id, int hdisplay, int vdisplay),
@@ -539,10 +522,6 @@ DEFINE_EVENT(dpu_id_event_template, dpu_crtc_frame_event_cb,
TP_PROTO(uint32_t drm_id, u32 event),
TP_ARGS(drm_id, event)
);
-DEFINE_EVENT(dpu_id_event_template, dpu_crtc_handle_power_event,
- TP_PROTO(uint32_t drm_id, u32 event),
- TP_ARGS(drm_id, event)
-);
DEFINE_EVENT(dpu_id_event_template, dpu_crtc_frame_event_done,
TP_PROTO(uint32_t drm_id, u32 event),
TP_ARGS(drm_id, event)
@@ -749,24 +728,17 @@ TRACE_EVENT(dpu_crtc_vblank_enable,
__field( uint32_t, enc_id )
__field( bool, enable )
__field( bool, enabled )
- __field( bool, suspend )
- __field( bool, vblank_requested )
),
TP_fast_assign(
__entry->drm_id = drm_id;
__entry->enc_id = enc_id;
__entry->enable = enable;
__entry->enabled = crtc->enabled;
- __entry->suspend = crtc->suspend;
- __entry->vblank_requested = crtc->vblank_requested;
),
- TP_printk("id:%u encoder:%u enable:%s state{enabled:%s suspend:%s "
- "vblank_req:%s}",
+ TP_printk("id:%u encoder:%u enable:%s state{enabled:%s}",
__entry->drm_id, __entry->enc_id,
__entry->enable ? "true" : "false",
- __entry->enabled ? "true" : "false",
- __entry->suspend ? "true" : "false",
- __entry->vblank_requested ? "true" : "false")
+ __entry->enabled ? "true" : "false")
);
DECLARE_EVENT_CLASS(dpu_crtc_enable_template,
@@ -776,25 +748,15 @@ DECLARE_EVENT_CLASS(dpu_crtc_enable_template,
__field( uint32_t, drm_id )
__field( bool, enable )
__field( bool, enabled )
- __field( bool, suspend )
- __field( bool, vblank_requested )
),
TP_fast_assign(
__entry->drm_id = drm_id;
__entry->enable = enable;
__entry->enabled = crtc->enabled;
- __entry->suspend = crtc->suspend;
- __entry->vblank_requested = crtc->vblank_requested;
),
- TP_printk("id:%u enable:%s state{enabled:%s suspend:%s vblank_req:%s}",
+ TP_printk("id:%u enable:%s state{enabled:%s}",
__entry->drm_id, __entry->enable ? "true" : "false",
- __entry->enabled ? "true" : "false",
- __entry->suspend ? "true" : "false",
- __entry->vblank_requested ? "true" : "false")
-);
-DEFINE_EVENT(dpu_crtc_enable_template, dpu_crtc_set_suspend,
- TP_PROTO(uint32_t drm_id, bool enable, struct dpu_crtc *crtc),
- TP_ARGS(drm_id, enable, crtc)
+ __entry->enabled ? "true" : "false")
);
DEFINE_EVENT(dpu_crtc_enable_template, dpu_crtc_enable,
TP_PROTO(uint32_t drm_id, bool enable, struct dpu_crtc *crtc),
@@ -1004,6 +966,53 @@ TRACE_EVENT(dpu_core_perf_update_clk,
__entry->stop_req ? "true" : "false", __entry->clk_rate)
);
+TRACE_EVENT(dpu_hw_ctl_update_pending_flush,
+ TP_PROTO(u32 new_bits, u32 pending_mask),
+ TP_ARGS(new_bits, pending_mask),
+ TP_STRUCT__entry(
+ __field( u32, new_bits )
+ __field( u32, pending_mask )
+ ),
+ TP_fast_assign(
+ __entry->new_bits = new_bits;
+ __entry->pending_mask = pending_mask;
+ ),
+ TP_printk("new=%x existing=%x", __entry->new_bits,
+ __entry->pending_mask)
+);
+
+DECLARE_EVENT_CLASS(dpu_hw_ctl_pending_flush_template,
+ TP_PROTO(u32 pending_mask, u32 ctl_flush),
+ TP_ARGS(pending_mask, ctl_flush),
+ TP_STRUCT__entry(
+ __field( u32, pending_mask )
+ __field( u32, ctl_flush )
+ ),
+ TP_fast_assign(
+ __entry->pending_mask = pending_mask;
+ __entry->ctl_flush = ctl_flush;
+ ),
+ TP_printk("pending_mask=%x CTL_FLUSH=%x", __entry->pending_mask,
+ __entry->ctl_flush)
+);
+DEFINE_EVENT(dpu_hw_ctl_pending_flush_template, dpu_hw_ctl_clear_pending_flush,
+ TP_PROTO(u32 pending_mask, u32 ctl_flush),
+ TP_ARGS(pending_mask, ctl_flush)
+);
+DEFINE_EVENT(dpu_hw_ctl_pending_flush_template,
+ dpu_hw_ctl_trigger_pending_flush,
+ TP_PROTO(u32 pending_mask, u32 ctl_flush),
+ TP_ARGS(pending_mask, ctl_flush)
+);
+DEFINE_EVENT(dpu_hw_ctl_pending_flush_template, dpu_hw_ctl_trigger_prepare,
+ TP_PROTO(u32 pending_mask, u32 ctl_flush),
+ TP_ARGS(pending_mask, ctl_flush)
+);
+DEFINE_EVENT(dpu_hw_ctl_pending_flush_template, dpu_hw_ctl_trigger_start,
+ TP_PROTO(u32 pending_mask, u32 ctl_flush),
+ TP_ARGS(pending_mask, ctl_flush)
+);
+
#define DPU_ATRACE_END(name) trace_tracing_mark_write(current->tgid, name, 0)
#define DPU_ATRACE_BEGIN(name) trace_tracing_mark_write(current->tgid, name, 1)
#define DPU_ATRACE_FUNC() DPU_ATRACE_BEGIN(__func__)
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_vbif.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_vbif.c
index 295528292296..ef753ea9c499 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_vbif.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_vbif.c
@@ -191,7 +191,7 @@ void dpu_vbif_set_ot_limit(struct dpu_kms *dpu_kms,
ot_lim = _dpu_vbif_get_ot_limit(vbif, params) & 0xFF;
if (ot_lim == 0)
- goto exit;
+ return;
trace_dpu_perf_set_ot(params->num, params->xin_id, ot_lim,
params->vbif_idx);
@@ -210,8 +210,6 @@ void dpu_vbif_set_ot_limit(struct dpu_kms *dpu_kms,
if (forced_on)
mdp->ops.setup_clk_force_ctrl(mdp, params->clk_ctrl, false);
-exit:
- return;
}
void dpu_vbif_set_qos_remap(struct dpu_kms *dpu_kms,
@@ -312,31 +310,25 @@ void dpu_vbif_init_memtypes(struct dpu_kms *dpu_kms)
}
#ifdef CONFIG_DEBUG_FS
-void dpu_debugfs_vbif_destroy(struct dpu_kms *dpu_kms)
-{
- debugfs_remove_recursive(dpu_kms->debugfs_vbif);
- dpu_kms->debugfs_vbif = NULL;
-}
-int dpu_debugfs_vbif_init(struct dpu_kms *dpu_kms, struct dentry *debugfs_root)
+void dpu_debugfs_vbif_init(struct dpu_kms *dpu_kms, struct dentry *debugfs_root)
{
char vbif_name[32];
- struct dentry *debugfs_vbif;
+ struct dentry *entry, *debugfs_vbif;
int i, j;
- dpu_kms->debugfs_vbif = debugfs_create_dir("vbif", debugfs_root);
- if (!dpu_kms->debugfs_vbif) {
- DPU_ERROR("failed to create vbif debugfs\n");
- return -EINVAL;
- }
+ entry = debugfs_create_dir("vbif", debugfs_root);
+ if (IS_ERR_OR_NULL(entry))
+ return;
for (i = 0; i < dpu_kms->catalog->vbif_count; i++) {
struct dpu_vbif_cfg *vbif = &dpu_kms->catalog->vbif[i];
snprintf(vbif_name, sizeof(vbif_name), "%d", vbif->id);
- debugfs_vbif = debugfs_create_dir(vbif_name,
- dpu_kms->debugfs_vbif);
+ debugfs_vbif = debugfs_create_dir(vbif_name, entry);
+ if (IS_ERR_OR_NULL(debugfs_vbif))
+ continue;
debugfs_create_u32("features", 0600, debugfs_vbif,
(u32 *)&vbif->features);
@@ -378,7 +370,5 @@ int dpu_debugfs_vbif_init(struct dpu_kms *dpu_kms, struct dentry *debugfs_root)
(u32 *)&cfg->ot_limit);
}
}
-
- return 0;
}
#endif
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_vbif.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_vbif.h
index f17af52dbbd5..6356876d7a66 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_vbif.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_vbif.h
@@ -78,17 +78,6 @@ void dpu_vbif_clear_errors(struct dpu_kms *dpu_kms);
*/
void dpu_vbif_init_memtypes(struct dpu_kms *dpu_kms);
-#ifdef CONFIG_DEBUG_FS
-int dpu_debugfs_vbif_init(struct dpu_kms *dpu_kms, struct dentry *debugfs_root);
-void dpu_debugfs_vbif_destroy(struct dpu_kms *dpu_kms);
-#else
-static inline int dpu_debugfs_vbif_init(struct dpu_kms *dpu_kms,
- struct dentry *debugfs_root)
-{
- return 0;
-}
-static inline void dpu_debugfs_vbif_destroy(struct dpu_kms *dpu_kms)
-{
-}
-#endif
+void dpu_debugfs_vbif_init(struct dpu_kms *dpu_kms, struct dentry *debugfs_root);
+
#endif /* __DPU_VBIF_H__ */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/msm_media_info.h b/drivers/gpu/drm/msm/disp/dpu1/msm_media_info.h
index 4f12e5c534c8..9fc9dbde8a27 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/msm_media_info.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/msm_media_info.h
@@ -813,18 +813,6 @@ enum color_fmts {
#define COLOR_FMT_P010_UBWC COLOR_FMT_P010_UBWC
#define COLOR_FMT_P010 COLOR_FMT_P010
-static inline unsigned int VENUS_EXTRADATA_SIZE(int width, int height)
-{
- (void)height;
- (void)width;
-
- /*
- * In the future, calculate the size based on the w/h but just
- * hardcode it for now since 16K satisfies all current usecases.
- */
- return 16 * 1024;
-}
-
/*
* Function arguments:
* @color_fmt
@@ -832,38 +820,32 @@ static inline unsigned int VENUS_EXTRADATA_SIZE(int width, int height)
* Progressive: width
* Interlaced: width
*/
-static inline unsigned int VENUS_Y_STRIDE(int color_fmt, int width)
+static unsigned int VENUS_Y_STRIDE(int color_fmt, int width)
{
- unsigned int alignment, stride = 0;
+ unsigned int stride = 0;
if (!width)
- goto invalid_input;
+ return 0;
switch (color_fmt) {
case COLOR_FMT_NV21:
case COLOR_FMT_NV12:
case COLOR_FMT_NV12_MVTB:
case COLOR_FMT_NV12_UBWC:
- alignment = 128;
- stride = MSM_MEDIA_ALIGN(width, alignment);
+ stride = MSM_MEDIA_ALIGN(width, 128);
break;
case COLOR_FMT_NV12_BPP10_UBWC:
- alignment = 256;
stride = MSM_MEDIA_ALIGN(width, 192);
- stride = MSM_MEDIA_ALIGN(stride * 4/3, alignment);
+ stride = MSM_MEDIA_ALIGN(stride * 4 / 3, 256);
break;
case COLOR_FMT_P010_UBWC:
- alignment = 256;
- stride = MSM_MEDIA_ALIGN(width * 2, alignment);
+ stride = MSM_MEDIA_ALIGN(width * 2, 256);
break;
case COLOR_FMT_P010:
- alignment = 128;
- stride = MSM_MEDIA_ALIGN(width*2, alignment);
- break;
- default:
+ stride = MSM_MEDIA_ALIGN(width * 2, 128);
break;
}
-invalid_input:
+
return stride;
}
@@ -874,38 +856,32 @@ invalid_input:
* Progressive: width
* Interlaced: width
*/
-static inline unsigned int VENUS_UV_STRIDE(int color_fmt, int width)
+static unsigned int VENUS_UV_STRIDE(int color_fmt, int width)
{
- unsigned int alignment, stride = 0;
+ unsigned int stride = 0;
if (!width)
- goto invalid_input;
+ return 0;
switch (color_fmt) {
case COLOR_FMT_NV21:
case COLOR_FMT_NV12:
case COLOR_FMT_NV12_MVTB:
case COLOR_FMT_NV12_UBWC:
- alignment = 128;
- stride = MSM_MEDIA_ALIGN(width, alignment);
+ stride = MSM_MEDIA_ALIGN(width, 128);
break;
case COLOR_FMT_NV12_BPP10_UBWC:
- alignment = 256;
stride = MSM_MEDIA_ALIGN(width, 192);
- stride = MSM_MEDIA_ALIGN(stride * 4/3, alignment);
+ stride = MSM_MEDIA_ALIGN(stride * 4 / 3, 256);
break;
case COLOR_FMT_P010_UBWC:
- alignment = 256;
- stride = MSM_MEDIA_ALIGN(width * 2, alignment);
+ stride = MSM_MEDIA_ALIGN(width * 2, 256);
break;
case COLOR_FMT_P010:
- alignment = 128;
- stride = MSM_MEDIA_ALIGN(width*2, alignment);
- break;
- default:
+ stride = MSM_MEDIA_ALIGN(width * 2, 128);
break;
}
-invalid_input:
+
return stride;
}
@@ -916,12 +892,12 @@ invalid_input:
* Progressive: height
* Interlaced: (height+1)>>1
*/
-static inline unsigned int VENUS_Y_SCANLINES(int color_fmt, int height)
+static unsigned int VENUS_Y_SCANLINES(int color_fmt, int height)
{
- unsigned int alignment, sclines = 0;
+ unsigned int sclines = 0;
if (!height)
- goto invalid_input;
+ return 0;
switch (color_fmt) {
case COLOR_FMT_NV21:
@@ -929,17 +905,14 @@ static inline unsigned int VENUS_Y_SCANLINES(int color_fmt, int height)
case COLOR_FMT_NV12_MVTB:
case COLOR_FMT_NV12_UBWC:
case COLOR_FMT_P010:
- alignment = 32;
+ sclines = MSM_MEDIA_ALIGN(height, 32);
break;
case COLOR_FMT_NV12_BPP10_UBWC:
case COLOR_FMT_P010_UBWC:
- alignment = 16;
+ sclines = MSM_MEDIA_ALIGN(height, 16);
break;
- default:
- return 0;
}
- sclines = MSM_MEDIA_ALIGN(height, alignment);
-invalid_input:
+
return sclines;
}
@@ -950,12 +923,12 @@ invalid_input:
* Progressive: height
* Interlaced: (height+1)>>1
*/
-static inline unsigned int VENUS_UV_SCANLINES(int color_fmt, int height)
+static unsigned int VENUS_UV_SCANLINES(int color_fmt, int height)
{
- unsigned int alignment, sclines = 0;
+ unsigned int sclines = 0;
if (!height)
- goto invalid_input;
+ return 0;
switch (color_fmt) {
case COLOR_FMT_NV21:
@@ -964,18 +937,13 @@ static inline unsigned int VENUS_UV_SCANLINES(int color_fmt, int height)
case COLOR_FMT_NV12_BPP10_UBWC:
case COLOR_FMT_P010_UBWC:
case COLOR_FMT_P010:
- alignment = 16;
+ sclines = MSM_MEDIA_ALIGN((height + 1) >> 1, 16);
break;
case COLOR_FMT_NV12_UBWC:
- alignment = 32;
+ sclines = MSM_MEDIA_ALIGN((height + 1) >> 1, 32);
break;
- default:
- goto invalid_input;
}
- sclines = MSM_MEDIA_ALIGN((height+1)>>1, alignment);
-
-invalid_input:
return sclines;
}
@@ -986,12 +954,12 @@ invalid_input:
* Progressive: width
* Interlaced: width
*/
-static inline unsigned int VENUS_Y_META_STRIDE(int color_fmt, int width)
+static unsigned int VENUS_Y_META_STRIDE(int color_fmt, int width)
{
- int y_tile_width = 0, y_meta_stride = 0;
+ int y_tile_width = 0, y_meta_stride;
if (!width)
- goto invalid_input;
+ return 0;
switch (color_fmt) {
case COLOR_FMT_NV12_UBWC:
@@ -1002,14 +970,11 @@ static inline unsigned int VENUS_Y_META_STRIDE(int color_fmt, int width)
y_tile_width = 48;
break;
default:
- goto invalid_input;
+ return 0;
}
y_meta_stride = MSM_MEDIA_ROUNDUP(width, y_tile_width);
- y_meta_stride = MSM_MEDIA_ALIGN(y_meta_stride, 64);
-
-invalid_input:
- return y_meta_stride;
+ return MSM_MEDIA_ALIGN(y_meta_stride, 64);
}
/*
@@ -1019,12 +984,12 @@ invalid_input:
* Progressive: height
* Interlaced: (height+1)>>1
*/
-static inline unsigned int VENUS_Y_META_SCANLINES(int color_fmt, int height)
+static unsigned int VENUS_Y_META_SCANLINES(int color_fmt, int height)
{
- int y_tile_height = 0, y_meta_scanlines = 0;
+ int y_tile_height = 0, y_meta_scanlines;
if (!height)
- goto invalid_input;
+ return 0;
switch (color_fmt) {
case COLOR_FMT_NV12_UBWC:
@@ -1035,14 +1000,11 @@ static inline unsigned int VENUS_Y_META_SCANLINES(int color_fmt, int height)
y_tile_height = 4;
break;
default:
- goto invalid_input;
+ return 0;
}
y_meta_scanlines = MSM_MEDIA_ROUNDUP(height, y_tile_height);
- y_meta_scanlines = MSM_MEDIA_ALIGN(y_meta_scanlines, 16);
-
-invalid_input:
- return y_meta_scanlines;
+ return MSM_MEDIA_ALIGN(y_meta_scanlines, 16);
}
/*
@@ -1052,12 +1014,12 @@ invalid_input:
* Progressive: width
* Interlaced: width
*/
-static inline unsigned int VENUS_UV_META_STRIDE(int color_fmt, int width)
+static unsigned int VENUS_UV_META_STRIDE(int color_fmt, int width)
{
- int uv_tile_width = 0, uv_meta_stride = 0;
+ int uv_tile_width = 0, uv_meta_stride;
if (!width)
- goto invalid_input;
+ return 0;
switch (color_fmt) {
case COLOR_FMT_NV12_UBWC:
@@ -1068,14 +1030,11 @@ static inline unsigned int VENUS_UV_META_STRIDE(int color_fmt, int width)
uv_tile_width = 24;
break;
default:
- goto invalid_input;
+ return 0;
}
uv_meta_stride = MSM_MEDIA_ROUNDUP((width+1)>>1, uv_tile_width);
- uv_meta_stride = MSM_MEDIA_ALIGN(uv_meta_stride, 64);
-
-invalid_input:
- return uv_meta_stride;
+ return MSM_MEDIA_ALIGN(uv_meta_stride, 64);
}
/*
@@ -1085,12 +1044,12 @@ invalid_input:
* Progressive: height
* Interlaced: (height+1)>>1
*/
-static inline unsigned int VENUS_UV_META_SCANLINES(int color_fmt, int height)
+static unsigned int VENUS_UV_META_SCANLINES(int color_fmt, int height)
{
- int uv_tile_height = 0, uv_meta_scanlines = 0;
+ int uv_tile_height = 0, uv_meta_scanlines;
if (!height)
- goto invalid_input;
+ return 0;
switch (color_fmt) {
case COLOR_FMT_NV12_UBWC:
@@ -1101,22 +1060,19 @@ static inline unsigned int VENUS_UV_META_SCANLINES(int color_fmt, int height)
uv_tile_height = 4;
break;
default:
- goto invalid_input;
+ return 0;
}
uv_meta_scanlines = MSM_MEDIA_ROUNDUP((height+1)>>1, uv_tile_height);
- uv_meta_scanlines = MSM_MEDIA_ALIGN(uv_meta_scanlines, 16);
-
-invalid_input:
- return uv_meta_scanlines;
+ return MSM_MEDIA_ALIGN(uv_meta_scanlines, 16);
}
-static inline unsigned int VENUS_RGB_STRIDE(int color_fmt, int width)
+static unsigned int VENUS_RGB_STRIDE(int color_fmt, int width)
{
- unsigned int alignment = 0, stride = 0, bpp = 4;
+ unsigned int alignment = 0, bpp = 4;
if (!width)
- goto invalid_input;
+ return 0;
switch (color_fmt) {
case COLOR_FMT_RGBA8888:
@@ -1131,21 +1087,18 @@ static inline unsigned int VENUS_RGB_STRIDE(int color_fmt, int width)
alignment = 256;
break;
default:
- goto invalid_input;
+ return 0;
}
- stride = MSM_MEDIA_ALIGN(width * bpp, alignment);
-
-invalid_input:
- return stride;
+ return MSM_MEDIA_ALIGN(width * bpp, alignment);
}
-static inline unsigned int VENUS_RGB_SCANLINES(int color_fmt, int height)
+static unsigned int VENUS_RGB_SCANLINES(int color_fmt, int height)
{
- unsigned int alignment = 0, scanlines = 0;
+ unsigned int alignment = 0;
if (!height)
- goto invalid_input;
+ return 0;
switch (color_fmt) {
case COLOR_FMT_RGBA8888:
@@ -1157,220 +1110,46 @@ static inline unsigned int VENUS_RGB_SCANLINES(int color_fmt, int height)
alignment = 16;
break;
default:
- goto invalid_input;
+ return 0;
}
- scanlines = MSM_MEDIA_ALIGN(height, alignment);
-
-invalid_input:
- return scanlines;
+ return MSM_MEDIA_ALIGN(height, alignment);
}
-static inline unsigned int VENUS_RGB_META_STRIDE(int color_fmt, int width)
+static unsigned int VENUS_RGB_META_STRIDE(int color_fmt, int width)
{
- int rgb_tile_width = 0, rgb_meta_stride = 0;
+ int rgb_meta_stride;
if (!width)
- goto invalid_input;
+ return 0;
switch (color_fmt) {
case COLOR_FMT_RGBA8888_UBWC:
case COLOR_FMT_RGBA1010102_UBWC:
case COLOR_FMT_RGB565_UBWC:
- rgb_tile_width = 16;
- break;
- default:
- goto invalid_input;
+ rgb_meta_stride = MSM_MEDIA_ROUNDUP(width, 16);
+ return MSM_MEDIA_ALIGN(rgb_meta_stride, 64);
}
- rgb_meta_stride = MSM_MEDIA_ROUNDUP(width, rgb_tile_width);
- rgb_meta_stride = MSM_MEDIA_ALIGN(rgb_meta_stride, 64);
-
-invalid_input:
- return rgb_meta_stride;
+ return 0;
}
-static inline unsigned int VENUS_RGB_META_SCANLINES(int color_fmt, int height)
+static unsigned int VENUS_RGB_META_SCANLINES(int color_fmt, int height)
{
- int rgb_tile_height = 0, rgb_meta_scanlines = 0;
+ int rgb_meta_scanlines;
if (!height)
- goto invalid_input;
+ return 0;
switch (color_fmt) {
case COLOR_FMT_RGBA8888_UBWC:
case COLOR_FMT_RGBA1010102_UBWC:
case COLOR_FMT_RGB565_UBWC:
- rgb_tile_height = 4;
- break;
- default:
- goto invalid_input;
+ rgb_meta_scanlines = MSM_MEDIA_ROUNDUP(height, 4);
+ return MSM_MEDIA_ALIGN(rgb_meta_scanlines, 16);
}
- rgb_meta_scanlines = MSM_MEDIA_ROUNDUP(height, rgb_tile_height);
- rgb_meta_scanlines = MSM_MEDIA_ALIGN(rgb_meta_scanlines, 16);
-
-invalid_input:
- return rgb_meta_scanlines;
-}
-
-/*
- * Function arguments:
- * @color_fmt
- * @width
- * Progressive: width
- * Interlaced: width
- * @height
- * Progressive: height
- * Interlaced: height
- */
-static inline unsigned int VENUS_BUFFER_SIZE(
- int color_fmt, int width, int height)
-{
- const unsigned int extra_size = VENUS_EXTRADATA_SIZE(width, height);
- unsigned int uv_alignment = 0, size = 0;
- unsigned int y_plane, uv_plane, y_stride,
- uv_stride, y_sclines, uv_sclines;
- unsigned int y_ubwc_plane = 0, uv_ubwc_plane = 0;
- unsigned int y_meta_stride = 0, y_meta_scanlines = 0;
- unsigned int uv_meta_stride = 0, uv_meta_scanlines = 0;
- unsigned int y_meta_plane = 0, uv_meta_plane = 0;
- unsigned int rgb_stride = 0, rgb_scanlines = 0;
- unsigned int rgb_plane = 0, rgb_ubwc_plane = 0, rgb_meta_plane = 0;
- unsigned int rgb_meta_stride = 0, rgb_meta_scanlines = 0;
-
- if (!width || !height)
- goto invalid_input;
-
- y_stride = VENUS_Y_STRIDE(color_fmt, width);
- uv_stride = VENUS_UV_STRIDE(color_fmt, width);
- y_sclines = VENUS_Y_SCANLINES(color_fmt, height);
- uv_sclines = VENUS_UV_SCANLINES(color_fmt, height);
- rgb_stride = VENUS_RGB_STRIDE(color_fmt, width);
- rgb_scanlines = VENUS_RGB_SCANLINES(color_fmt, height);
-
- switch (color_fmt) {
- case COLOR_FMT_NV21:
- case COLOR_FMT_NV12:
- case COLOR_FMT_P010:
- uv_alignment = 4096;
- y_plane = y_stride * y_sclines;
- uv_plane = uv_stride * uv_sclines + uv_alignment;
- size = y_plane + uv_plane +
- MSM_MEDIA_MAX(extra_size, 8 * y_stride);
- size = MSM_MEDIA_ALIGN(size, 4096);
- break;
- case COLOR_FMT_NV12_MVTB:
- uv_alignment = 4096;
- y_plane = y_stride * y_sclines;
- uv_plane = uv_stride * uv_sclines + uv_alignment;
- size = y_plane + uv_plane;
- size = 2 * size + extra_size;
- size = MSM_MEDIA_ALIGN(size, 4096);
- break;
- case COLOR_FMT_NV12_UBWC:
- y_sclines = VENUS_Y_SCANLINES(color_fmt, (height+1)>>1);
- y_ubwc_plane = MSM_MEDIA_ALIGN(y_stride * y_sclines, 4096);
- uv_sclines = VENUS_UV_SCANLINES(color_fmt, (height+1)>>1);
- uv_ubwc_plane = MSM_MEDIA_ALIGN(uv_stride * uv_sclines, 4096);
- y_meta_stride = VENUS_Y_META_STRIDE(color_fmt, width);
- y_meta_scanlines =
- VENUS_Y_META_SCANLINES(color_fmt, (height+1)>>1);
- y_meta_plane = MSM_MEDIA_ALIGN(
- y_meta_stride * y_meta_scanlines, 4096);
- uv_meta_stride = VENUS_UV_META_STRIDE(color_fmt, width);
- uv_meta_scanlines =
- VENUS_UV_META_SCANLINES(color_fmt, (height+1)>>1);
- uv_meta_plane = MSM_MEDIA_ALIGN(uv_meta_stride *
- uv_meta_scanlines, 4096);
-
- size = (y_ubwc_plane + uv_ubwc_plane + y_meta_plane +
- uv_meta_plane)*2 +
- MSM_MEDIA_MAX(extra_size + 8192, 48 * y_stride);
- size = MSM_MEDIA_ALIGN(size, 4096);
- break;
- case COLOR_FMT_NV12_BPP10_UBWC:
- y_ubwc_plane = MSM_MEDIA_ALIGN(y_stride * y_sclines, 4096);
- uv_ubwc_plane = MSM_MEDIA_ALIGN(uv_stride * uv_sclines, 4096);
- y_meta_stride = VENUS_Y_META_STRIDE(color_fmt, width);
- y_meta_scanlines = VENUS_Y_META_SCANLINES(color_fmt, height);
- y_meta_plane = MSM_MEDIA_ALIGN(
- y_meta_stride * y_meta_scanlines, 4096);
- uv_meta_stride = VENUS_UV_META_STRIDE(color_fmt, width);
- uv_meta_scanlines = VENUS_UV_META_SCANLINES(color_fmt, height);
- uv_meta_plane = MSM_MEDIA_ALIGN(uv_meta_stride *
- uv_meta_scanlines, 4096);
-
- size = y_ubwc_plane + uv_ubwc_plane + y_meta_plane +
- uv_meta_plane +
- MSM_MEDIA_MAX(extra_size + 8192, 48 * y_stride);
- size = MSM_MEDIA_ALIGN(size, 4096);
- break;
- case COLOR_FMT_P010_UBWC:
- y_ubwc_plane = MSM_MEDIA_ALIGN(y_stride * y_sclines, 4096);
- uv_ubwc_plane = MSM_MEDIA_ALIGN(uv_stride * uv_sclines, 4096);
- y_meta_stride = VENUS_Y_META_STRIDE(color_fmt, width);
- y_meta_scanlines = VENUS_Y_META_SCANLINES(color_fmt, height);
- y_meta_plane = MSM_MEDIA_ALIGN(
- y_meta_stride * y_meta_scanlines, 4096);
- uv_meta_stride = VENUS_UV_META_STRIDE(color_fmt, width);
- uv_meta_scanlines = VENUS_UV_META_SCANLINES(color_fmt, height);
- uv_meta_plane = MSM_MEDIA_ALIGN(uv_meta_stride *
- uv_meta_scanlines, 4096);
-
- size = y_ubwc_plane + uv_ubwc_plane + y_meta_plane +
- uv_meta_plane;
- size = MSM_MEDIA_ALIGN(size, 4096);
- break;
- case COLOR_FMT_RGBA8888:
- rgb_plane = MSM_MEDIA_ALIGN(rgb_stride * rgb_scanlines, 4096);
- size = rgb_plane;
- size = MSM_MEDIA_ALIGN(size, 4096);
- break;
- case COLOR_FMT_RGBA8888_UBWC:
- case COLOR_FMT_RGBA1010102_UBWC:
- case COLOR_FMT_RGB565_UBWC:
- rgb_ubwc_plane = MSM_MEDIA_ALIGN(rgb_stride * rgb_scanlines,
- 4096);
- rgb_meta_stride = VENUS_RGB_META_STRIDE(color_fmt, width);
- rgb_meta_scanlines = VENUS_RGB_META_SCANLINES(color_fmt,
- height);
- rgb_meta_plane = MSM_MEDIA_ALIGN(rgb_meta_stride *
- rgb_meta_scanlines, 4096);
- size = rgb_ubwc_plane + rgb_meta_plane;
- size = MSM_MEDIA_ALIGN(size, 4096);
- break;
- default:
- break;
- }
-invalid_input:
- return size;
-}
-
-static inline unsigned int VENUS_VIEW2_OFFSET(
- int color_fmt, int width, int height)
-{
- unsigned int offset = 0;
- unsigned int y_plane, uv_plane, y_stride,
- uv_stride, y_sclines, uv_sclines;
- if (!width || !height)
- goto invalid_input;
-
- y_stride = VENUS_Y_STRIDE(color_fmt, width);
- uv_stride = VENUS_UV_STRIDE(color_fmt, width);
- y_sclines = VENUS_Y_SCANLINES(color_fmt, height);
- uv_sclines = VENUS_UV_SCANLINES(color_fmt, height);
- switch (color_fmt) {
- case COLOR_FMT_NV12_MVTB:
- y_plane = y_stride * y_sclines;
- uv_plane = uv_stride * uv_sclines;
- offset = y_plane + uv_plane;
- break;
- default:
- break;
- }
-invalid_input:
- return offset;
+ return 0;
}
#endif
diff --git a/drivers/gpu/drm/msm/disp/mdp4/mdp4_crtc.c b/drivers/gpu/drm/msm/disp/mdp4/mdp4_crtc.c
index 457c29dba4a1..8f2359dc87b4 100644
--- a/drivers/gpu/drm/msm/disp/mdp4/mdp4_crtc.c
+++ b/drivers/gpu/drm/msm/disp/mdp4/mdp4_crtc.c
@@ -128,7 +128,7 @@ static void unref_cursor_worker(struct drm_flip_work *work, void *val)
struct mdp4_kms *mdp4_kms = get_kms(&mdp4_crtc->base);
struct msm_kms *kms = &mdp4_kms->base.base;
- msm_gem_put_iova(val, kms->aspace);
+ msm_gem_unpin_iova(val, kms->aspace);
drm_gem_object_put_unlocked(val);
}
@@ -384,7 +384,7 @@ static void update_cursor(struct drm_crtc *crtc)
if (next_bo) {
/* take a obj ref + iova ref when we start scanning out: */
drm_gem_object_get(next_bo);
- msm_gem_get_iova(next_bo, kms->aspace, &iova);
+ msm_gem_get_and_pin_iova(next_bo, kms->aspace, &iova);
/* enable cursor: */
mdp4_write(mdp4_kms, REG_MDP4_DMA_CURSOR_SIZE(dma),
@@ -429,7 +429,7 @@ static int mdp4_crtc_cursor_set(struct drm_crtc *crtc,
int ret;
if ((width > CURSOR_WIDTH) || (height > CURSOR_HEIGHT)) {
- dev_err(dev->dev, "bad cursor size: %dx%d\n", width, height);
+ DRM_DEV_ERROR(dev->dev, "bad cursor size: %dx%d\n", width, height);
return -EINVAL;
}
@@ -442,7 +442,7 @@ static int mdp4_crtc_cursor_set(struct drm_crtc *crtc,
}
if (cursor_bo) {
- ret = msm_gem_get_iova(cursor_bo, kms->aspace, &iova);
+ ret = msm_gem_get_and_pin_iova(cursor_bo, kms->aspace, &iova);
if (ret)
goto fail;
} else {
diff --git a/drivers/gpu/drm/msm/disp/mdp4/mdp4_dtv_encoder.c b/drivers/gpu/drm/msm/disp/mdp4/mdp4_dtv_encoder.c
index ba8e587f734b..a8fd14d4846b 100644
--- a/drivers/gpu/drm/msm/disp/mdp4/mdp4_dtv_encoder.c
+++ b/drivers/gpu/drm/msm/disp/mdp4/mdp4_dtv_encoder.c
@@ -45,7 +45,7 @@ static void bs_init(struct mdp4_dtv_encoder *mdp4_dtv_encoder)
struct lcdc_platform_data *dtv_pdata = mdp4_find_pdata("dtv.0");
if (!dtv_pdata) {
- dev_err(dev->dev, "could not find dtv pdata\n");
+ DRM_DEV_ERROR(dev->dev, "could not find dtv pdata\n");
return;
}
@@ -209,16 +209,16 @@ static void mdp4_dtv_encoder_enable(struct drm_encoder *encoder)
ret = clk_set_rate(mdp4_dtv_encoder->mdp_clk, pc);
if (ret)
- dev_err(dev->dev, "failed to set mdp_clk to %lu: %d\n",
+ DRM_DEV_ERROR(dev->dev, "failed to set mdp_clk to %lu: %d\n",
pc, ret);
ret = clk_prepare_enable(mdp4_dtv_encoder->mdp_clk);
if (ret)
- dev_err(dev->dev, "failed to enabled mdp_clk: %d\n", ret);
+ DRM_DEV_ERROR(dev->dev, "failed to enabled mdp_clk: %d\n", ret);
ret = clk_prepare_enable(mdp4_dtv_encoder->hdmi_clk);
if (ret)
- dev_err(dev->dev, "failed to enable hdmi_clk: %d\n", ret);
+ DRM_DEV_ERROR(dev->dev, "failed to enable hdmi_clk: %d\n", ret);
mdp4_write(mdp4_kms, REG_MDP4_DTV_ENABLE, 1);
@@ -258,14 +258,14 @@ struct drm_encoder *mdp4_dtv_encoder_init(struct drm_device *dev)
mdp4_dtv_encoder->hdmi_clk = devm_clk_get(dev->dev, "hdmi_clk");
if (IS_ERR(mdp4_dtv_encoder->hdmi_clk)) {
- dev_err(dev->dev, "failed to get hdmi_clk\n");
+ DRM_DEV_ERROR(dev->dev, "failed to get hdmi_clk\n");
ret = PTR_ERR(mdp4_dtv_encoder->hdmi_clk);
goto fail;
}
mdp4_dtv_encoder->mdp_clk = devm_clk_get(dev->dev, "tv_clk");
if (IS_ERR(mdp4_dtv_encoder->mdp_clk)) {
- dev_err(dev->dev, "failed to get tv_clk\n");
+ DRM_DEV_ERROR(dev->dev, "failed to get tv_clk\n");
ret = PTR_ERR(mdp4_dtv_encoder->mdp_clk);
goto fail;
}
diff --git a/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c b/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c
index 44d1cda56974..e437aa806f7b 100644
--- a/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c
+++ b/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c
@@ -43,7 +43,7 @@ static int mdp4_hw_init(struct msm_kms *kms)
DBG("found MDP4 version v%d.%d", major, minor);
if (major != 4) {
- dev_err(dev->dev, "unexpected MDP version: v%d.%d\n",
+ DRM_DEV_ERROR(dev->dev, "unexpected MDP version: v%d.%d\n",
major, minor);
ret = -ENXIO;
goto out;
@@ -165,7 +165,7 @@ static void mdp4_destroy(struct msm_kms *kms)
struct msm_gem_address_space *aspace = kms->aspace;
if (mdp4_kms->blank_cursor_iova)
- msm_gem_put_iova(mdp4_kms->blank_cursor_bo, kms->aspace);
+ msm_gem_unpin_iova(mdp4_kms->blank_cursor_bo, kms->aspace);
drm_gem_object_put_unlocked(mdp4_kms->blank_cursor_bo);
if (aspace) {
@@ -206,7 +206,8 @@ int mdp4_disable(struct mdp4_kms *mdp4_kms)
clk_disable_unprepare(mdp4_kms->clk);
if (mdp4_kms->pclk)
clk_disable_unprepare(mdp4_kms->pclk);
- clk_disable_unprepare(mdp4_kms->lut_clk);
+ if (mdp4_kms->lut_clk)
+ clk_disable_unprepare(mdp4_kms->lut_clk);
if (mdp4_kms->axi_clk)
clk_disable_unprepare(mdp4_kms->axi_clk);
@@ -220,7 +221,8 @@ int mdp4_enable(struct mdp4_kms *mdp4_kms)
clk_prepare_enable(mdp4_kms->clk);
if (mdp4_kms->pclk)
clk_prepare_enable(mdp4_kms->pclk);
- clk_prepare_enable(mdp4_kms->lut_clk);
+ if (mdp4_kms->lut_clk)
+ clk_prepare_enable(mdp4_kms->lut_clk);
if (mdp4_kms->axi_clk)
clk_prepare_enable(mdp4_kms->axi_clk);
@@ -251,7 +253,7 @@ static int mdp4_modeset_init_intf(struct mdp4_kms *mdp4_kms,
encoder = mdp4_lcdc_encoder_init(dev, panel_node);
if (IS_ERR(encoder)) {
- dev_err(dev->dev, "failed to construct LCDC encoder\n");
+ DRM_DEV_ERROR(dev->dev, "failed to construct LCDC encoder\n");
return PTR_ERR(encoder);
}
@@ -260,7 +262,7 @@ static int mdp4_modeset_init_intf(struct mdp4_kms *mdp4_kms,
connector = mdp4_lvds_connector_init(dev, panel_node, encoder);
if (IS_ERR(connector)) {
- dev_err(dev->dev, "failed to initialize LVDS connector\n");
+ DRM_DEV_ERROR(dev->dev, "failed to initialize LVDS connector\n");
return PTR_ERR(connector);
}
@@ -271,7 +273,7 @@ static int mdp4_modeset_init_intf(struct mdp4_kms *mdp4_kms,
case DRM_MODE_ENCODER_TMDS:
encoder = mdp4_dtv_encoder_init(dev);
if (IS_ERR(encoder)) {
- dev_err(dev->dev, "failed to construct DTV encoder\n");
+ DRM_DEV_ERROR(dev->dev, "failed to construct DTV encoder\n");
return PTR_ERR(encoder);
}
@@ -282,7 +284,7 @@ static int mdp4_modeset_init_intf(struct mdp4_kms *mdp4_kms,
/* Construct bridge/connector for HDMI: */
ret = msm_hdmi_modeset_init(priv->hdmi, dev, encoder);
if (ret) {
- dev_err(dev->dev, "failed to initialize HDMI: %d\n", ret);
+ DRM_DEV_ERROR(dev->dev, "failed to initialize HDMI: %d\n", ret);
return ret;
}
}
@@ -300,7 +302,7 @@ static int mdp4_modeset_init_intf(struct mdp4_kms *mdp4_kms,
encoder = mdp4_dsi_encoder_init(dev);
if (IS_ERR(encoder)) {
ret = PTR_ERR(encoder);
- dev_err(dev->dev,
+ DRM_DEV_ERROR(dev->dev,
"failed to construct DSI encoder: %d\n", ret);
return ret;
}
@@ -311,14 +313,14 @@ static int mdp4_modeset_init_intf(struct mdp4_kms *mdp4_kms,
ret = msm_dsi_modeset_init(priv->dsi[dsi_id], dev, encoder);
if (ret) {
- dev_err(dev->dev, "failed to initialize DSI: %d\n",
+ DRM_DEV_ERROR(dev->dev, "failed to initialize DSI: %d\n",
ret);
return ret;
}
break;
default:
- dev_err(dev->dev, "Invalid or unsupported interface\n");
+ DRM_DEV_ERROR(dev->dev, "Invalid or unsupported interface\n");
return -EINVAL;
}
@@ -354,7 +356,7 @@ static int modeset_init(struct mdp4_kms *mdp4_kms)
for (i = 0; i < ARRAY_SIZE(vg_planes); i++) {
plane = mdp4_plane_init(dev, vg_planes[i], false);
if (IS_ERR(plane)) {
- dev_err(dev->dev,
+ DRM_DEV_ERROR(dev->dev,
"failed to construct plane for VG%d\n", i + 1);
ret = PTR_ERR(plane);
goto fail;
@@ -365,7 +367,7 @@ static int modeset_init(struct mdp4_kms *mdp4_kms)
for (i = 0; i < ARRAY_SIZE(mdp4_crtcs); i++) {
plane = mdp4_plane_init(dev, rgb_planes[i], true);
if (IS_ERR(plane)) {
- dev_err(dev->dev,
+ DRM_DEV_ERROR(dev->dev,
"failed to construct plane for RGB%d\n", i + 1);
ret = PTR_ERR(plane);
goto fail;
@@ -374,7 +376,7 @@ static int modeset_init(struct mdp4_kms *mdp4_kms)
crtc = mdp4_crtc_init(dev, plane, priv->num_crtcs, i,
mdp4_crtcs[i]);
if (IS_ERR(crtc)) {
- dev_err(dev->dev, "failed to construct crtc for %s\n",
+ DRM_DEV_ERROR(dev->dev, "failed to construct crtc for %s\n",
mdp4_crtc_names[i]);
ret = PTR_ERR(crtc);
goto fail;
@@ -396,7 +398,7 @@ static int modeset_init(struct mdp4_kms *mdp4_kms)
for (i = 0; i < ARRAY_SIZE(mdp4_intfs); i++) {
ret = mdp4_modeset_init_intf(mdp4_kms, mdp4_intfs[i]);
if (ret) {
- dev_err(dev->dev, "failed to initialize intf: %d, %d\n",
+ DRM_DEV_ERROR(dev->dev, "failed to initialize intf: %d, %d\n",
i, ret);
goto fail;
}
@@ -419,7 +421,7 @@ struct msm_kms *mdp4_kms_init(struct drm_device *dev)
mdp4_kms = kzalloc(sizeof(*mdp4_kms), GFP_KERNEL);
if (!mdp4_kms) {
- dev_err(dev->dev, "failed to allocate kms\n");
+ DRM_DEV_ERROR(dev->dev, "failed to allocate kms\n");
ret = -ENOMEM;
goto fail;
}
@@ -439,7 +441,7 @@ struct msm_kms *mdp4_kms_init(struct drm_device *dev)
irq = platform_get_irq(pdev, 0);
if (irq < 0) {
ret = irq;
- dev_err(dev->dev, "failed to get irq: %d\n", ret);
+ DRM_DEV_ERROR(dev->dev, "failed to get irq: %d\n", ret);
goto fail;
}
@@ -456,14 +458,14 @@ struct msm_kms *mdp4_kms_init(struct drm_device *dev)
if (mdp4_kms->vdd) {
ret = regulator_enable(mdp4_kms->vdd);
if (ret) {
- dev_err(dev->dev, "failed to enable regulator vdd: %d\n", ret);
+ DRM_DEV_ERROR(dev->dev, "failed to enable regulator vdd: %d\n", ret);
goto fail;
}
}
mdp4_kms->clk = devm_clk_get(&pdev->dev, "core_clk");
if (IS_ERR(mdp4_kms->clk)) {
- dev_err(dev->dev, "failed to get core_clk\n");
+ DRM_DEV_ERROR(dev->dev, "failed to get core_clk\n");
ret = PTR_ERR(mdp4_kms->clk);
goto fail;
}
@@ -472,23 +474,25 @@ struct msm_kms *mdp4_kms_init(struct drm_device *dev)
if (IS_ERR(mdp4_kms->pclk))
mdp4_kms->pclk = NULL;
- // XXX if (rev >= MDP_REV_42) { ???
- mdp4_kms->lut_clk = devm_clk_get(&pdev->dev, "lut_clk");
- if (IS_ERR(mdp4_kms->lut_clk)) {
- dev_err(dev->dev, "failed to get lut_clk\n");
- ret = PTR_ERR(mdp4_kms->lut_clk);
- goto fail;
+ if (mdp4_kms->rev >= 2) {
+ mdp4_kms->lut_clk = devm_clk_get(&pdev->dev, "lut_clk");
+ if (IS_ERR(mdp4_kms->lut_clk)) {
+ DRM_DEV_ERROR(dev->dev, "failed to get lut_clk\n");
+ ret = PTR_ERR(mdp4_kms->lut_clk);
+ goto fail;
+ }
}
mdp4_kms->axi_clk = devm_clk_get(&pdev->dev, "bus_clk");
if (IS_ERR(mdp4_kms->axi_clk)) {
- dev_err(dev->dev, "failed to get axi_clk\n");
+ DRM_DEV_ERROR(dev->dev, "failed to get axi_clk\n");
ret = PTR_ERR(mdp4_kms->axi_clk);
goto fail;
}
clk_set_rate(mdp4_kms->clk, config->max_clk);
- clk_set_rate(mdp4_kms->lut_clk, config->max_clk);
+ if (mdp4_kms->lut_clk)
+ clk_set_rate(mdp4_kms->lut_clk, config->max_clk);
pm_runtime_enable(dev->dev);
mdp4_kms->rpm_enabled = true;
@@ -519,29 +523,29 @@ struct msm_kms *mdp4_kms_init(struct drm_device *dev)
if (ret)
goto fail;
} else {
- dev_info(dev->dev, "no iommu, fallback to phys "
+ DRM_DEV_INFO(dev->dev, "no iommu, fallback to phys "
"contig buffers for scanout\n");
aspace = NULL;
}
ret = modeset_init(mdp4_kms);
if (ret) {
- dev_err(dev->dev, "modeset_init failed: %d\n", ret);
+ DRM_DEV_ERROR(dev->dev, "modeset_init failed: %d\n", ret);
goto fail;
}
- mdp4_kms->blank_cursor_bo = msm_gem_new(dev, SZ_16K, MSM_BO_WC);
+ mdp4_kms->blank_cursor_bo = msm_gem_new(dev, SZ_16K, MSM_BO_WC | MSM_BO_SCANOUT);
if (IS_ERR(mdp4_kms->blank_cursor_bo)) {
ret = PTR_ERR(mdp4_kms->blank_cursor_bo);
- dev_err(dev->dev, "could not allocate blank-cursor bo: %d\n", ret);
+ DRM_DEV_ERROR(dev->dev, "could not allocate blank-cursor bo: %d\n", ret);
mdp4_kms->blank_cursor_bo = NULL;
goto fail;
}
- ret = msm_gem_get_iova(mdp4_kms->blank_cursor_bo, kms->aspace,
+ ret = msm_gem_get_and_pin_iova(mdp4_kms->blank_cursor_bo, kms->aspace,
&mdp4_kms->blank_cursor_iova);
if (ret) {
- dev_err(dev->dev, "could not pin blank-cursor bo: %d\n", ret);
+ DRM_DEV_ERROR(dev->dev, "could not pin blank-cursor bo: %d\n", ret);
goto fail;
}
diff --git a/drivers/gpu/drm/msm/disp/mdp4/mdp4_lcdc_encoder.c b/drivers/gpu/drm/msm/disp/mdp4/mdp4_lcdc_encoder.c
index 2bfb39082f54..c9e34501a89e 100644
--- a/drivers/gpu/drm/msm/disp/mdp4/mdp4_lcdc_encoder.c
+++ b/drivers/gpu/drm/msm/disp/mdp4/mdp4_lcdc_encoder.c
@@ -47,7 +47,7 @@ static void bs_init(struct mdp4_lcdc_encoder *mdp4_lcdc_encoder)
struct lcdc_platform_data *lcdc_pdata = mdp4_find_pdata("lvds.0");
if (!lcdc_pdata) {
- dev_err(dev->dev, "could not find lvds pdata\n");
+ DRM_DEV_ERROR(dev->dev, "could not find lvds pdata\n");
return;
}
@@ -224,7 +224,7 @@ static void setup_phy(struct drm_encoder *encoder)
break;
default:
- dev_err(dev->dev, "unknown bpp: %d\n", bpp);
+ DRM_DEV_ERROR(dev->dev, "unknown bpp: %d\n", bpp);
return;
}
@@ -241,7 +241,7 @@ static void setup_phy(struct drm_encoder *encoder)
MDP4_LCDC_LVDS_INTF_CTL_CH1_CLK_LANE_EN;
break;
default:
- dev_err(dev->dev, "unknown # of channels: %d\n", nchan);
+ DRM_DEV_ERROR(dev->dev, "unknown # of channels: %d\n", nchan);
return;
}
@@ -361,7 +361,7 @@ static void mdp4_lcdc_encoder_disable(struct drm_encoder *encoder)
for (i = 0; i < ARRAY_SIZE(mdp4_lcdc_encoder->regs); i++) {
ret = regulator_disable(mdp4_lcdc_encoder->regs[i]);
if (ret)
- dev_err(dev->dev, "failed to disable regulator: %d\n", ret);
+ DRM_DEV_ERROR(dev->dev, "failed to disable regulator: %d\n", ret);
}
bs_set(mdp4_lcdc_encoder, 0);
@@ -377,20 +377,25 @@ static void mdp4_lcdc_encoder_enable(struct drm_encoder *encoder)
unsigned long pc = mdp4_lcdc_encoder->pixclock;
struct mdp4_kms *mdp4_kms = get_kms(encoder);
struct drm_panel *panel;
+ uint32_t config;
int i, ret;
if (WARN_ON(mdp4_lcdc_encoder->enabled))
return;
/* TODO: hard-coded for 18bpp: */
- mdp4_crtc_set_config(encoder->crtc,
- MDP4_DMA_CONFIG_R_BPC(BPC6) |
- MDP4_DMA_CONFIG_G_BPC(BPC6) |
- MDP4_DMA_CONFIG_B_BPC(BPC6) |
- MDP4_DMA_CONFIG_PACK_ALIGN_MSB |
- MDP4_DMA_CONFIG_PACK(0x21) |
- MDP4_DMA_CONFIG_DEFLKR_EN |
- MDP4_DMA_CONFIG_DITHER_EN);
+ config =
+ MDP4_DMA_CONFIG_R_BPC(BPC6) |
+ MDP4_DMA_CONFIG_G_BPC(BPC6) |
+ MDP4_DMA_CONFIG_B_BPC(BPC6) |
+ MDP4_DMA_CONFIG_PACK(0x21) |
+ MDP4_DMA_CONFIG_DEFLKR_EN |
+ MDP4_DMA_CONFIG_DITHER_EN;
+
+ if (!of_property_read_bool(dev->dev->of_node, "qcom,lcdc-align-lsb"))
+ config |= MDP4_DMA_CONFIG_PACK_ALIGN_MSB;
+
+ mdp4_crtc_set_config(encoder->crtc, config);
mdp4_crtc_set_intf(encoder->crtc, INTF_LCDC_DTV, 0);
bs_set(mdp4_lcdc_encoder, 1);
@@ -398,16 +403,16 @@ static void mdp4_lcdc_encoder_enable(struct drm_encoder *encoder)
for (i = 0; i < ARRAY_SIZE(mdp4_lcdc_encoder->regs); i++) {
ret = regulator_enable(mdp4_lcdc_encoder->regs[i]);
if (ret)
- dev_err(dev->dev, "failed to enable regulator: %d\n", ret);
+ DRM_DEV_ERROR(dev->dev, "failed to enable regulator: %d\n", ret);
}
DBG("setting lcdc_clk=%lu", pc);
ret = clk_set_rate(mdp4_lcdc_encoder->lcdc_clk, pc);
if (ret)
- dev_err(dev->dev, "failed to configure lcdc_clk: %d\n", ret);
+ DRM_DEV_ERROR(dev->dev, "failed to configure lcdc_clk: %d\n", ret);
ret = clk_prepare_enable(mdp4_lcdc_encoder->lcdc_clk);
if (ret)
- dev_err(dev->dev, "failed to enable lcdc_clk: %d\n", ret);
+ DRM_DEV_ERROR(dev->dev, "failed to enable lcdc_clk: %d\n", ret);
panel = of_drm_find_panel(mdp4_lcdc_encoder->panel_node);
if (!IS_ERR(panel)) {
@@ -461,7 +466,7 @@ struct drm_encoder *mdp4_lcdc_encoder_init(struct drm_device *dev,
/* TODO: do we need different pll in other cases? */
mdp4_lcdc_encoder->lcdc_clk = mpd4_lvds_pll_init(dev);
if (IS_ERR(mdp4_lcdc_encoder->lcdc_clk)) {
- dev_err(dev->dev, "failed to get lvds_clk\n");
+ DRM_DEV_ERROR(dev->dev, "failed to get lvds_clk\n");
ret = PTR_ERR(mdp4_lcdc_encoder->lcdc_clk);
goto fail;
}
@@ -470,7 +475,7 @@ struct drm_encoder *mdp4_lcdc_encoder_init(struct drm_device *dev,
reg = devm_regulator_get(dev->dev, "lvds-vccs-3p3v");
if (IS_ERR(reg)) {
ret = PTR_ERR(reg);
- dev_err(dev->dev, "failed to get lvds-vccs-3p3v: %d\n", ret);
+ DRM_DEV_ERROR(dev->dev, "failed to get lvds-vccs-3p3v: %d\n", ret);
goto fail;
}
mdp4_lcdc_encoder->regs[0] = reg;
@@ -478,7 +483,7 @@ struct drm_encoder *mdp4_lcdc_encoder_init(struct drm_device *dev,
reg = devm_regulator_get(dev->dev, "lvds-pll-vdda");
if (IS_ERR(reg)) {
ret = PTR_ERR(reg);
- dev_err(dev->dev, "failed to get lvds-pll-vdda: %d\n", ret);
+ DRM_DEV_ERROR(dev->dev, "failed to get lvds-pll-vdda: %d\n", ret);
goto fail;
}
mdp4_lcdc_encoder->regs[1] = reg;
@@ -486,7 +491,7 @@ struct drm_encoder *mdp4_lcdc_encoder_init(struct drm_device *dev,
reg = devm_regulator_get(dev->dev, "lvds-vdda");
if (IS_ERR(reg)) {
ret = PTR_ERR(reg);
- dev_err(dev->dev, "failed to get lvds-vdda: %d\n", ret);
+ DRM_DEV_ERROR(dev->dev, "failed to get lvds-vdda: %d\n", ret);
goto fail;
}
mdp4_lcdc_encoder->regs[2] = reg;
diff --git a/drivers/gpu/drm/msm/disp/mdp4/mdp4_plane.c b/drivers/gpu/drm/msm/disp/mdp4/mdp4_plane.c
index 7a499731ce93..005066f7154d 100644
--- a/drivers/gpu/drm/msm/disp/mdp4/mdp4_plane.c
+++ b/drivers/gpu/drm/msm/disp/mdp4/mdp4_plane.c
@@ -234,22 +234,22 @@ static int mdp4_plane_mode_set(struct drm_plane *plane,
format = to_mdp_format(msm_framebuffer_format(fb));
if (src_w > (crtc_w * DOWN_SCALE_MAX)) {
- dev_err(dev->dev, "Width down scaling exceeds limits!\n");
+ DRM_DEV_ERROR(dev->dev, "Width down scaling exceeds limits!\n");
return -ERANGE;
}
if (src_h > (crtc_h * DOWN_SCALE_MAX)) {
- dev_err(dev->dev, "Height down scaling exceeds limits!\n");
+ DRM_DEV_ERROR(dev->dev, "Height down scaling exceeds limits!\n");
return -ERANGE;
}
if (crtc_w > (src_w * UP_SCALE_MAX)) {
- dev_err(dev->dev, "Width up scaling exceeds limits!\n");
+ DRM_DEV_ERROR(dev->dev, "Width up scaling exceeds limits!\n");
return -ERANGE;
}
if (crtc_h > (src_h * UP_SCALE_MAX)) {
- dev_err(dev->dev, "Height up scaling exceeds limits!\n");
+ DRM_DEV_ERROR(dev->dev, "Height up scaling exceeds limits!\n");
return -ERANGE;
}
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_cfg.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_cfg.c
index 824067d2d427..ea8f7d7daf7f 100644
--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_cfg.c
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_cfg.c
@@ -553,6 +553,91 @@ const struct mdp5_cfg_hw msm8x96_config = {
.max_clk = 412500000,
};
+const struct mdp5_cfg_hw msm8917_config = {
+ .name = "msm8917",
+ .mdp = {
+ .count = 1,
+ .caps = MDP_CAP_CDM,
+ },
+ .ctl = {
+ .count = 3,
+ .base = { 0x01000, 0x01200, 0x01400 },
+ .flush_hw_mask = 0xffffffff,
+ },
+ .pipe_vig = {
+ .count = 1,
+ .base = { 0x04000 },
+ .caps = MDP_PIPE_CAP_HFLIP |
+ MDP_PIPE_CAP_VFLIP |
+ MDP_PIPE_CAP_SCALE |
+ MDP_PIPE_CAP_CSC |
+ MDP_PIPE_CAP_DECIMATION |
+ MDP_PIPE_CAP_SW_PIX_EXT |
+ 0,
+ },
+ .pipe_rgb = {
+ .count = 2,
+ .base = { 0x14000, 0x16000 },
+ .caps = MDP_PIPE_CAP_HFLIP |
+ MDP_PIPE_CAP_VFLIP |
+ MDP_PIPE_CAP_DECIMATION |
+ MDP_PIPE_CAP_SW_PIX_EXT |
+ 0,
+ },
+ .pipe_dma = {
+ .count = 1,
+ .base = { 0x24000 },
+ .caps = MDP_PIPE_CAP_HFLIP |
+ MDP_PIPE_CAP_VFLIP |
+ MDP_PIPE_CAP_SW_PIX_EXT |
+ 0,
+ },
+ .pipe_cursor = {
+ .count = 1,
+ .base = { 0x34000 },
+ .caps = MDP_PIPE_CAP_HFLIP |
+ MDP_PIPE_CAP_VFLIP |
+ MDP_PIPE_CAP_SW_PIX_EXT |
+ MDP_PIPE_CAP_CURSOR |
+ 0,
+ },
+
+ .lm = {
+ .count = 2,
+ .base = { 0x44000, 0x45000 },
+ .instances = {
+ { .id = 0, .pp = 0, .dspp = 0,
+ .caps = MDP_LM_CAP_DISPLAY, },
+ { .id = 1, .pp = -1, .dspp = -1,
+ .caps = MDP_LM_CAP_WB },
+ },
+ .nb_stages = 8,
+ .max_width = 2048,
+ .max_height = 0xFFFF,
+ },
+ .dspp = {
+ .count = 1,
+ .base = { 0x54000 },
+
+ },
+ .pp = {
+ .count = 1,
+ .base = { 0x70000 },
+ },
+ .cdm = {
+ .count = 1,
+ .base = { 0x79200 },
+ },
+ .intf = {
+ .base = { 0x6a000, 0x6a800 },
+ .connect = {
+ [0] = INTF_DISABLED,
+ [1] = INTF_DSI,
+ },
+ },
+ .max_clk = 320000000,
+};
+
static const struct mdp5_cfg_handler cfg_handlers[] = {
{ .revision = 0, .config = { .hw = &msm8x74v1_config } },
{ .revision = 2, .config = { .hw = &msm8x74v2_config } },
@@ -560,6 +645,7 @@ static const struct mdp5_cfg_handler cfg_handlers[] = {
{ .revision = 6, .config = { .hw = &msm8x16_config } },
{ .revision = 9, .config = { .hw = &msm8x94_config } },
{ .revision = 7, .config = { .hw = &msm8x96_config } },
+ { .revision = 15, .config = { .hw = &msm8917_config } },
};
static struct mdp5_cfg_platform *mdp5_get_config(struct platform_device *dev);
@@ -600,7 +686,7 @@ struct mdp5_cfg_handler *mdp5_cfg_init(struct mdp5_kms *mdp5_kms,
}
if (major != 1) {
- dev_err(dev->dev, "unexpected MDP major version: v%d.%d\n",
+ DRM_DEV_ERROR(dev->dev, "unexpected MDP major version: v%d.%d\n",
major, minor);
ret = -ENXIO;
goto fail;
@@ -615,7 +701,7 @@ struct mdp5_cfg_handler *mdp5_cfg_init(struct mdp5_kms *mdp5_kms,
break;
}
if (unlikely(!mdp5_cfg)) {
- dev_err(dev->dev, "unexpected MDP minor revision: v%d.%d\n",
+ DRM_DEV_ERROR(dev->dev, "unexpected MDP minor revision: v%d.%d\n",
major, minor);
ret = -ENXIO;
goto fail;
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_cmd_encoder.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_cmd_encoder.c
index d6f79dc755b4..c1962f29ec7d 100644
--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_cmd_encoder.c
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_cmd_encoder.c
@@ -55,20 +55,20 @@ static int pingpong_tearcheck_setup(struct drm_encoder *encoder,
int pp_id = mixer->pp;
if (IS_ERR_OR_NULL(mdp5_kms->vsync_clk)) {
- dev_err(dev, "vsync_clk is not initialized\n");
+ DRM_DEV_ERROR(dev, "vsync_clk is not initialized\n");
return -EINVAL;
}
total_lines_x100 = mode->vtotal * mode->vrefresh;
if (!total_lines_x100) {
- dev_err(dev, "%s: vtotal(%d) or vrefresh(%d) is 0\n",
+ DRM_DEV_ERROR(dev, "%s: vtotal(%d) or vrefresh(%d) is 0\n",
__func__, mode->vtotal, mode->vrefresh);
return -EINVAL;
}
vsync_clk_speed = clk_round_rate(mdp5_kms->vsync_clk, VSYNC_CLK_RATE);
if (vsync_clk_speed <= 0) {
- dev_err(dev, "vsync_clk round rate failed %ld\n",
+ DRM_DEV_ERROR(dev, "vsync_clk round rate failed %ld\n",
vsync_clk_speed);
return -EINVAL;
}
@@ -102,13 +102,13 @@ static int pingpong_tearcheck_enable(struct drm_encoder *encoder)
ret = clk_set_rate(mdp5_kms->vsync_clk,
clk_round_rate(mdp5_kms->vsync_clk, VSYNC_CLK_RATE));
if (ret) {
- dev_err(encoder->dev->dev,
+ DRM_DEV_ERROR(encoder->dev->dev,
"vsync_clk clk_set_rate failed, %d\n", ret);
return ret;
}
ret = clk_prepare_enable(mdp5_kms->vsync_clk);
if (ret) {
- dev_err(encoder->dev->dev,
+ DRM_DEV_ERROR(encoder->dev->dev,
"vsync_clk clk_prepare_enable failed, %d\n", ret);
return ret;
}
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c
index b1da9ce54379..c5fde1a4191a 100644
--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c
@@ -173,7 +173,7 @@ static void unref_cursor_worker(struct drm_flip_work *work, void *val)
struct mdp5_kms *mdp5_kms = get_kms(&mdp5_crtc->base);
struct msm_kms *kms = &mdp5_kms->base.base;
- msm_gem_put_iova(val, kms->aspace);
+ msm_gem_unpin_iova(val, kms->aspace);
drm_gem_object_put_unlocked(val);
}
@@ -662,7 +662,7 @@ static int mdp5_crtc_atomic_check(struct drm_crtc *crtc,
ret = mdp5_crtc_setup_pipeline(crtc, state, need_right_mixer);
if (ret) {
- dev_err(dev->dev, "couldn't assign mixers %d\n", ret);
+ DRM_DEV_ERROR(dev->dev, "couldn't assign mixers %d\n", ret);
return ret;
}
@@ -679,7 +679,7 @@ static int mdp5_crtc_atomic_check(struct drm_crtc *crtc,
* and that we don't have conflicting mixer stages:
*/
if ((cnt + start - 1) >= hw_cfg->lm.nb_stages) {
- dev_err(dev->dev, "too many planes! cnt=%d, start stage=%d\n",
+ DRM_DEV_ERROR(dev->dev, "too many planes! cnt=%d, start stage=%d\n",
cnt, start);
return -EINVAL;
}
@@ -879,7 +879,7 @@ static int mdp5_crtc_cursor_set(struct drm_crtc *crtc,
}
if ((width > CURSOR_WIDTH) || (height > CURSOR_HEIGHT)) {
- dev_err(dev->dev, "bad cursor size: %dx%d\n", width, height);
+ DRM_DEV_ERROR(dev->dev, "bad cursor size: %dx%d\n", width, height);
return -EINVAL;
}
@@ -903,7 +903,7 @@ static int mdp5_crtc_cursor_set(struct drm_crtc *crtc,
if (!cursor_bo)
return -ENOENT;
- ret = msm_gem_get_iova(cursor_bo, kms->aspace,
+ ret = msm_gem_get_and_pin_iova(cursor_bo, kms->aspace,
&mdp5_crtc->cursor.iova);
if (ret)
return -EINVAL;
@@ -924,7 +924,7 @@ static int mdp5_crtc_cursor_set(struct drm_crtc *crtc,
set_cursor:
ret = mdp5_ctl_set_cursor(ctl, pipeline, 0, cursor_enable);
if (ret) {
- dev_err(dev->dev, "failed to %sable cursor: %d\n",
+ DRM_DEV_ERROR(dev->dev, "failed to %sable cursor: %d\n",
cursor_enable ? "en" : "dis", ret);
goto end;
}
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_ctl.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_ctl.c
index f93d5681267c..65a871f9f0d9 100644
--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_ctl.c
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_ctl.c
@@ -262,13 +262,13 @@ int mdp5_ctl_set_cursor(struct mdp5_ctl *ctl, struct mdp5_pipeline *pipeline,
struct mdp5_hw_mixer *mixer = pipeline->mixer;
if (unlikely(WARN_ON(!mixer))) {
- dev_err(ctl_mgr->dev->dev, "CTL %d cannot find LM",
+ DRM_DEV_ERROR(ctl_mgr->dev->dev, "CTL %d cannot find LM",
ctl->id);
return -EINVAL;
}
if (pipeline->r_mixer) {
- dev_err(ctl_mgr->dev->dev, "unsupported configuration");
+ DRM_DEV_ERROR(ctl_mgr->dev->dev, "unsupported configuration");
return -EINVAL;
}
@@ -604,10 +604,10 @@ int mdp5_ctl_pair(struct mdp5_ctl *ctlx, struct mdp5_ctl *ctly, bool enable)
mdp5_write(mdp5_kms, REG_MDP5_SPARE_0, 0);
return 0;
} else if ((ctlx->pair != NULL) || (ctly->pair != NULL)) {
- dev_err(ctl_mgr->dev->dev, "CTLs already paired\n");
+ DRM_DEV_ERROR(ctl_mgr->dev->dev, "CTLs already paired\n");
return -EINVAL;
} else if (!(ctlx->status & ctly->status & CTL_STAT_BOOKED)) {
- dev_err(ctl_mgr->dev->dev, "Only pair booked CTLs\n");
+ DRM_DEV_ERROR(ctl_mgr->dev->dev, "Only pair booked CTLs\n");
return -EINVAL;
}
@@ -652,7 +652,7 @@ struct mdp5_ctl *mdp5_ctlm_request(struct mdp5_ctl_manager *ctl_mgr,
if ((ctl_mgr->ctls[c].status & checkm) == match)
goto found;
- dev_err(ctl_mgr->dev->dev, "No more CTL available!");
+ DRM_DEV_ERROR(ctl_mgr->dev->dev, "No more CTL available!");
goto unlock;
found:
@@ -698,13 +698,13 @@ struct mdp5_ctl_manager *mdp5_ctlm_init(struct drm_device *dev,
ctl_mgr = kzalloc(sizeof(*ctl_mgr), GFP_KERNEL);
if (!ctl_mgr) {
- dev_err(dev->dev, "failed to allocate CTL manager\n");
+ DRM_DEV_ERROR(dev->dev, "failed to allocate CTL manager\n");
ret = -ENOMEM;
goto fail;
}
if (unlikely(WARN_ON(ctl_cfg->count > MAX_CTL))) {
- dev_err(dev->dev, "Increase static pool size to at least %d\n",
+ DRM_DEV_ERROR(dev->dev, "Increase static pool size to at least %d\n",
ctl_cfg->count);
ret = -ENOSPC;
goto fail;
@@ -723,7 +723,7 @@ struct mdp5_ctl_manager *mdp5_ctlm_init(struct drm_device *dev,
struct mdp5_ctl *ctl = &ctl_mgr->ctls[c];
if (WARN_ON(!ctl_cfg->base[c])) {
- dev_err(dev->dev, "CTL_%d: base is null!\n", c);
+ DRM_DEV_ERROR(dev->dev, "CTL_%d: base is null!\n", c);
ret = -EINVAL;
spin_unlock_irqrestore(&ctl_mgr->pool_lock, flags);
goto fail;
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c
index bddd625ab91b..d27e35a217bd 100644
--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c
@@ -264,7 +264,7 @@ static int mdp5_kms_debugfs_init(struct msm_kms *kms, struct drm_minor *minor)
minor->debugfs_root, minor);
if (ret) {
- dev_err(dev->dev, "could not install mdp5_debugfs_list\n");
+ DRM_DEV_ERROR(dev->dev, "could not install mdp5_debugfs_list\n");
return ret;
}
@@ -337,7 +337,7 @@ static struct drm_encoder *construct_encoder(struct mdp5_kms *mdp5_kms,
encoder = mdp5_encoder_init(dev, intf, ctl);
if (IS_ERR(encoder)) {
- dev_err(dev->dev, "failed to construct encoder\n");
+ DRM_DEV_ERROR(dev->dev, "failed to construct encoder\n");
return encoder;
}
@@ -418,7 +418,7 @@ static int modeset_init_intf(struct mdp5_kms *mdp5_kms,
int dsi_id = get_dsi_id_from_intf(hw_cfg, intf->num);
if ((dsi_id >= ARRAY_SIZE(priv->dsi)) || (dsi_id < 0)) {
- dev_err(dev->dev, "failed to find dsi from intf %d\n",
+ DRM_DEV_ERROR(dev->dev, "failed to find dsi from intf %d\n",
intf->num);
ret = -EINVAL;
break;
@@ -443,7 +443,7 @@ static int modeset_init_intf(struct mdp5_kms *mdp5_kms,
break;
}
default:
- dev_err(dev->dev, "unknown intf: %d\n", intf->type);
+ DRM_DEV_ERROR(dev->dev, "unknown intf: %d\n", intf->type);
ret = -EINVAL;
break;
}
@@ -500,7 +500,7 @@ static int modeset_init(struct mdp5_kms *mdp5_kms)
plane = mdp5_plane_init(dev, type);
if (IS_ERR(plane)) {
ret = PTR_ERR(plane);
- dev_err(dev->dev, "failed to construct plane %d (%d)\n", i, ret);
+ DRM_DEV_ERROR(dev->dev, "failed to construct plane %d (%d)\n", i, ret);
goto fail;
}
priv->planes[priv->num_planes++] = plane;
@@ -517,7 +517,7 @@ static int modeset_init(struct mdp5_kms *mdp5_kms)
crtc = mdp5_crtc_init(dev, primary[i], cursor[i], i);
if (IS_ERR(crtc)) {
ret = PTR_ERR(crtc);
- dev_err(dev->dev, "failed to construct crtc %d (%d)\n", i, ret);
+ DRM_DEV_ERROR(dev->dev, "failed to construct crtc %d (%d)\n", i, ret);
goto fail;
}
priv->crtcs[priv->num_crtcs++] = crtc;
@@ -552,7 +552,7 @@ static void read_mdp_hw_revision(struct mdp5_kms *mdp5_kms,
*major = FIELD(version, MDP5_HW_VERSION_MAJOR);
*minor = FIELD(version, MDP5_HW_VERSION_MINOR);
- dev_info(dev, "MDP5 version v%d.%d", *major, *minor);
+ DRM_DEV_INFO(dev, "MDP5 version v%d.%d", *major, *minor);
}
static int get_clk(struct platform_device *pdev, struct clk **clkp,
@@ -561,7 +561,7 @@ static int get_clk(struct platform_device *pdev, struct clk **clkp,
struct device *dev = &pdev->dev;
struct clk *clk = msm_clk_get(pdev, name);
if (IS_ERR(clk) && mandatory) {
- dev_err(dev, "failed to get %s (%ld)\n", name, PTR_ERR(clk));
+ DRM_DEV_ERROR(dev, "failed to get %s (%ld)\n", name, PTR_ERR(clk));
return PTR_ERR(clk);
}
if (IS_ERR(clk))
@@ -688,7 +688,7 @@ struct msm_kms *mdp5_kms_init(struct drm_device *dev)
irq = irq_of_parse_and_map(pdev->dev.of_node, 0);
if (irq < 0) {
ret = irq;
- dev_err(&pdev->dev, "failed to get irq: %d\n", ret);
+ DRM_DEV_ERROR(&pdev->dev, "failed to get irq: %d\n", ret);
goto fail;
}
@@ -724,12 +724,12 @@ struct msm_kms *mdp5_kms_init(struct drm_device *dev)
ret = aspace->mmu->funcs->attach(aspace->mmu, iommu_ports,
ARRAY_SIZE(iommu_ports));
if (ret) {
- dev_err(&pdev->dev, "failed to attach iommu: %d\n",
+ DRM_DEV_ERROR(&pdev->dev, "failed to attach iommu: %d\n",
ret);
goto fail;
}
} else {
- dev_info(&pdev->dev,
+ DRM_DEV_INFO(&pdev->dev,
"no iommu, fallback to phys contig buffers for scanout\n");
aspace = NULL;
}
@@ -738,7 +738,7 @@ struct msm_kms *mdp5_kms_init(struct drm_device *dev)
ret = modeset_init(mdp5_kms);
if (ret) {
- dev_err(&pdev->dev, "modeset_init failed: %d\n", ret);
+ DRM_DEV_ERROR(&pdev->dev, "modeset_init failed: %d\n", ret);
goto fail;
}
@@ -795,7 +795,7 @@ static int construct_pipes(struct mdp5_kms *mdp5_kms, int cnt,
hwpipe = mdp5_pipe_init(pipes[i], offsets[i], caps);
if (IS_ERR(hwpipe)) {
ret = PTR_ERR(hwpipe);
- dev_err(dev->dev, "failed to construct pipe for %s (%d)\n",
+ DRM_DEV_ERROR(dev->dev, "failed to construct pipe for %s (%d)\n",
pipe2name(pipes[i]), ret);
return ret;
}
@@ -867,7 +867,7 @@ static int hwmixer_init(struct mdp5_kms *mdp5_kms)
mixer = mdp5_mixer_init(&hw_cfg->lm.instances[i]);
if (IS_ERR(mixer)) {
ret = PTR_ERR(mixer);
- dev_err(dev->dev, "failed to construct LM%d (%d)\n",
+ DRM_DEV_ERROR(dev->dev, "failed to construct LM%d (%d)\n",
i, ret);
return ret;
}
@@ -897,7 +897,7 @@ static int interface_init(struct mdp5_kms *mdp5_kms)
intf = kzalloc(sizeof(*intf), GFP_KERNEL);
if (!intf) {
- dev_err(dev->dev, "failed to construct INTF%d\n", i);
+ DRM_DEV_ERROR(dev->dev, "failed to construct INTF%d\n", i);
return -ENOMEM;
}
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_mdss.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_mdss.c
index 1cc4e57f0226..889c2940692c 100644
--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_mdss.c
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_mdss.c
@@ -132,7 +132,7 @@ static int mdss_irq_domain_init(struct mdp5_mdss *mdp5_mdss)
d = irq_domain_add_linear(dev->of_node, 32, &mdss_hw_irqdomain_ops,
mdp5_mdss);
if (!d) {
- dev_err(dev, "mdss irq domain add failed\n");
+ DRM_DEV_ERROR(dev, "mdss irq domain add failed\n");
return -ENXIO;
}
@@ -246,7 +246,7 @@ int mdp5_mdss_init(struct drm_device *dev)
ret = msm_mdss_get_clocks(mdp5_mdss);
if (ret) {
- dev_err(dev->dev, "failed to get clocks: %d\n", ret);
+ DRM_DEV_ERROR(dev->dev, "failed to get clocks: %d\n", ret);
goto fail;
}
@@ -259,7 +259,7 @@ int mdp5_mdss_init(struct drm_device *dev)
ret = regulator_enable(mdp5_mdss->vdd);
if (ret) {
- dev_err(dev->dev, "failed to enable regulator vdd: %d\n",
+ DRM_DEV_ERROR(dev->dev, "failed to enable regulator vdd: %d\n",
ret);
goto fail;
}
@@ -267,13 +267,13 @@ int mdp5_mdss_init(struct drm_device *dev)
ret = devm_request_irq(dev->dev, platform_get_irq(pdev, 0),
mdss_irq, 0, "mdss_isr", mdp5_mdss);
if (ret) {
- dev_err(dev->dev, "failed to init irq: %d\n", ret);
+ DRM_DEV_ERROR(dev->dev, "failed to init irq: %d\n", ret);
goto fail_irq;
}
ret = mdss_irq_domain_init(mdp5_mdss);
if (ret) {
- dev_err(dev->dev, "failed to init sub-block irqs: %d\n", ret);
+ DRM_DEV_ERROR(dev->dev, "failed to init sub-block irqs: %d\n", ret);
goto fail_irq;
}
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c
index 310459541e48..be13140967b4 100644
--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c
@@ -125,7 +125,7 @@ static int mdp5_plane_atomic_set_property(struct drm_plane *plane,
SET_PROPERTY(zpos, ZPOS, uint8_t);
- dev_err(dev->dev, "Invalid property\n");
+ DRM_DEV_ERROR(dev->dev, "Invalid property\n");
ret = -EINVAL;
done:
return ret;
@@ -153,7 +153,7 @@ static int mdp5_plane_atomic_get_property(struct drm_plane *plane,
GET_PROPERTY(zpos, ZPOS, uint8_t);
- dev_err(dev->dev, "Invalid property\n");
+ DRM_DEV_ERROR(dev->dev, "Invalid property\n");
ret = -EINVAL;
done:
return ret;
@@ -658,7 +658,7 @@ static int calc_scalex_steps(struct drm_plane *plane,
ret = calc_phase_step(src, dest, &phasex_step);
if (ret) {
- dev_err(dev, "X scaling (%d->%d) failed: %d\n", src, dest, ret);
+ DRM_DEV_ERROR(dev, "X scaling (%d->%d) failed: %d\n", src, dest, ret);
return ret;
}
@@ -683,7 +683,7 @@ static int calc_scaley_steps(struct drm_plane *plane,
ret = calc_phase_step(src, dest, &phasey_step);
if (ret) {
- dev_err(dev, "Y scaling (%d->%d) failed: %d\n", src, dest, ret);
+ DRM_DEV_ERROR(dev, "Y scaling (%d->%d) failed: %d\n", src, dest, ret);
return ret;
}
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_smp.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_smp.c
index 96c2b828dba4..7cebcb2b3a37 100644
--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_smp.c
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_smp.c
@@ -88,7 +88,7 @@ static int smp_request_block(struct mdp5_smp *smp,
avail = cnt - bitmap_weight(state->state, cnt);
if (nblks > avail) {
- dev_err(smp->dev->dev, "out of blks (req=%d > avail=%d)\n",
+ DRM_DEV_ERROR(smp->dev->dev, "out of blks (req=%d > avail=%d)\n",
nblks, avail);
return -ENOSPC;
}
@@ -188,7 +188,7 @@ int mdp5_smp_assign(struct mdp5_smp *smp, struct mdp5_smp_state *state,
DBG("%s[%d]: request %d SMP blocks", pipe2name(pipe), i, n);
ret = smp_request_block(smp, state, cid, n);
if (ret) {
- dev_err(dev->dev, "Cannot allocate %d SMP blocks: %d\n",
+ DRM_DEV_ERROR(dev->dev, "Cannot allocate %d SMP blocks: %d\n",
n, ret);
return ret;
}
diff --git a/drivers/gpu/drm/msm/dsi/dsi.c b/drivers/gpu/drm/msm/dsi/dsi.c
index a9768f823290..7b2a1e6a8810 100644
--- a/drivers/gpu/drm/msm/dsi/dsi.c
+++ b/drivers/gpu/drm/msm/dsi/dsi.c
@@ -29,7 +29,7 @@ static int dsi_get_phy(struct msm_dsi *msm_dsi)
phy_node = of_parse_phandle(pdev->dev.of_node, "phys", 0);
if (!phy_node) {
- dev_err(&pdev->dev, "cannot find phy device\n");
+ DRM_DEV_ERROR(&pdev->dev, "cannot find phy device\n");
return -ENXIO;
}
@@ -40,7 +40,7 @@ static int dsi_get_phy(struct msm_dsi *msm_dsi)
of_node_put(phy_node);
if (!phy_pdev || !msm_dsi->phy) {
- dev_err(&pdev->dev, "%s: phy driver is not ready\n", __func__);
+ DRM_DEV_ERROR(&pdev->dev, "%s: phy driver is not ready\n", __func__);
return -EPROBE_DEFER;
}
@@ -210,7 +210,7 @@ int msm_dsi_modeset_init(struct msm_dsi *msm_dsi, struct drm_device *dev,
ret = msm_dsi_host_modeset_init(msm_dsi->host, dev);
if (ret) {
- dev_err(dev->dev, "failed to modeset init host: %d\n", ret);
+ DRM_DEV_ERROR(dev->dev, "failed to modeset init host: %d\n", ret);
goto fail;
}
@@ -222,7 +222,7 @@ int msm_dsi_modeset_init(struct msm_dsi *msm_dsi, struct drm_device *dev,
msm_dsi->bridge = msm_dsi_manager_bridge_init(msm_dsi->id);
if (IS_ERR(msm_dsi->bridge)) {
ret = PTR_ERR(msm_dsi->bridge);
- dev_err(dev->dev, "failed to create dsi bridge: %d\n", ret);
+ DRM_DEV_ERROR(dev->dev, "failed to create dsi bridge: %d\n", ret);
msm_dsi->bridge = NULL;
goto fail;
}
@@ -244,7 +244,7 @@ int msm_dsi_modeset_init(struct msm_dsi *msm_dsi, struct drm_device *dev,
if (IS_ERR(msm_dsi->connector)) {
ret = PTR_ERR(msm_dsi->connector);
- dev_err(dev->dev,
+ DRM_DEV_ERROR(dev->dev,
"failed to create dsi connector: %d\n", ret);
msm_dsi->connector = NULL;
goto fail;
diff --git a/drivers/gpu/drm/msm/dsi/dsi_host.c b/drivers/gpu/drm/msm/dsi/dsi_host.c
index 9c6c523eacdc..38e481d2d606 100644
--- a/drivers/gpu/drm/msm/dsi/dsi_host.c
+++ b/drivers/gpu/drm/msm/dsi/dsi_host.c
@@ -1050,7 +1050,7 @@ static void dsi_wait4video_done(struct msm_dsi_host *msm_host)
msecs_to_jiffies(70));
if (ret <= 0)
- dev_err(dev, "wait for video done timed out\n");
+ DRM_DEV_ERROR(dev, "wait for video done timed out\n");
dsi_intr_ctrl(msm_host, DSI_IRQ_MASK_VIDEO_DONE, 0);
}
@@ -1083,6 +1083,8 @@ int dsi_tx_buf_alloc_6g(struct msm_dsi_host *msm_host, int size)
return PTR_ERR(data);
}
+ msm_gem_object_set_name(msm_host->tx_gem_obj, "tx_gem");
+
msm_host->tx_size = msm_host->tx_gem_obj->size;
return 0;
@@ -1118,7 +1120,7 @@ static void dsi_tx_buf_free(struct msm_dsi_host *msm_host)
priv = dev->dev_private;
if (msm_host->tx_gem_obj) {
- msm_gem_put_iova(msm_host->tx_gem_obj, priv->kms->aspace);
+ msm_gem_unpin_iova(msm_host->tx_gem_obj, priv->kms->aspace);
drm_gem_object_put_unlocked(msm_host->tx_gem_obj);
msm_host->tx_gem_obj = NULL;
}
@@ -1248,7 +1250,7 @@ int dsi_dma_base_get_6g(struct msm_dsi_host *msm_host, uint64_t *dma_base)
if (!dma_base)
return -EINVAL;
- return msm_gem_get_iova(msm_host->tx_gem_obj,
+ return msm_gem_get_and_pin_iova(msm_host->tx_gem_obj,
priv->kms->aspace, dma_base);
}
@@ -1673,7 +1675,7 @@ static int dsi_host_parse_lane_data(struct msm_dsi_host *msm_host,
prop = of_find_property(ep, "data-lanes", &len);
if (!prop) {
- dev_dbg(dev,
+ DRM_DEV_DEBUG(dev,
"failed to find data lane mapping, using default\n");
return 0;
}
@@ -1681,7 +1683,7 @@ static int dsi_host_parse_lane_data(struct msm_dsi_host *msm_host,
num_lanes = len / sizeof(u32);
if (num_lanes < 1 || num_lanes > 4) {
- dev_err(dev, "bad number of data lanes\n");
+ DRM_DEV_ERROR(dev, "bad number of data lanes\n");
return -EINVAL;
}
@@ -1690,7 +1692,7 @@ static int dsi_host_parse_lane_data(struct msm_dsi_host *msm_host,
ret = of_property_read_u32_array(ep, "data-lanes", lane_map,
num_lanes);
if (ret) {
- dev_err(dev, "failed to read lane data\n");
+ DRM_DEV_ERROR(dev, "failed to read lane data\n");
return ret;
}
@@ -1711,7 +1713,7 @@ static int dsi_host_parse_lane_data(struct msm_dsi_host *msm_host,
*/
for (j = 0; j < num_lanes; j++) {
if (lane_map[j] < 0 || lane_map[j] > 3)
- dev_err(dev, "bad physical lane entry %u\n",
+ DRM_DEV_ERROR(dev, "bad physical lane entry %u\n",
lane_map[j]);
if (swap[lane_map[j]] != j)
@@ -1742,13 +1744,13 @@ static int dsi_host_parse_dt(struct msm_dsi_host *msm_host)
*/
endpoint = of_graph_get_endpoint_by_regs(np, 1, -1);
if (!endpoint) {
- dev_dbg(dev, "%s: no endpoint\n", __func__);
+ DRM_DEV_DEBUG(dev, "%s: no endpoint\n", __func__);
return 0;
}
ret = dsi_host_parse_lane_data(msm_host, endpoint);
if (ret) {
- dev_err(dev, "%s: invalid lane configuration %d\n",
+ DRM_DEV_ERROR(dev, "%s: invalid lane configuration %d\n",
__func__, ret);
ret = -EINVAL;
goto err;
@@ -1757,7 +1759,7 @@ static int dsi_host_parse_dt(struct msm_dsi_host *msm_host)
/* Get panel node from the output port's endpoint data */
device_node = of_graph_get_remote_node(np, 1, 0);
if (!device_node) {
- dev_dbg(dev, "%s: no valid device\n", __func__);
+ DRM_DEV_DEBUG(dev, "%s: no valid device\n", __func__);
ret = -ENODEV;
goto err;
}
@@ -1768,7 +1770,7 @@ static int dsi_host_parse_dt(struct msm_dsi_host *msm_host)
msm_host->sfpb = syscon_regmap_lookup_by_phandle(np,
"syscon-sfpb");
if (IS_ERR(msm_host->sfpb)) {
- dev_err(dev, "%s: failed to get sfpb regmap\n",
+ DRM_DEV_ERROR(dev, "%s: failed to get sfpb regmap\n",
__func__);
ret = PTR_ERR(msm_host->sfpb);
}
@@ -1918,7 +1920,7 @@ int msm_dsi_host_modeset_init(struct mipi_dsi_host *host,
msm_host->irq = irq_of_parse_and_map(pdev->dev.of_node, 0);
if (msm_host->irq < 0) {
ret = msm_host->irq;
- dev_err(dev->dev, "failed to get irq: %d\n", ret);
+ DRM_DEV_ERROR(dev->dev, "failed to get irq: %d\n", ret);
return ret;
}
@@ -1926,7 +1928,7 @@ int msm_dsi_host_modeset_init(struct mipi_dsi_host *host,
dsi_host_irq, IRQF_TRIGGER_HIGH | IRQF_ONESHOT,
"dsi_isr", msm_host);
if (ret < 0) {
- dev_err(&pdev->dev, "failed to request IRQ%u: %d\n",
+ DRM_DEV_ERROR(&pdev->dev, "failed to request IRQ%u: %d\n",
msm_host->irq, ret);
return ret;
}
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c
index 9a9fa0c75a13..1760483b247e 100644
--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c
@@ -404,7 +404,7 @@ static int dsi_phy_regulator_init(struct msm_dsi_phy *phy)
ret = devm_regulator_bulk_get(dev, num, s);
if (ret < 0) {
- dev_err(dev, "%s: failed to init regulator, ret=%d\n",
+ DRM_DEV_ERROR(dev, "%s: failed to init regulator, ret=%d\n",
__func__, ret);
return ret;
}
@@ -441,7 +441,7 @@ static int dsi_phy_regulator_enable(struct msm_dsi_phy *phy)
ret = regulator_set_load(s[i].consumer,
regs[i].enable_load);
if (ret < 0) {
- dev_err(dev,
+ DRM_DEV_ERROR(dev,
"regulator %d set op mode failed, %d\n",
i, ret);
goto fail;
@@ -451,7 +451,7 @@ static int dsi_phy_regulator_enable(struct msm_dsi_phy *phy)
ret = regulator_bulk_enable(num, s);
if (ret < 0) {
- dev_err(dev, "regulator enable failed, %d\n", ret);
+ DRM_DEV_ERROR(dev, "regulator enable failed, %d\n", ret);
goto fail;
}
@@ -472,7 +472,7 @@ static int dsi_phy_enable_resource(struct msm_dsi_phy *phy)
ret = clk_prepare_enable(phy->ahb_clk);
if (ret) {
- dev_err(dev, "%s: can't enable ahb clk, %d\n", __func__, ret);
+ DRM_DEV_ERROR(dev, "%s: can't enable ahb clk, %d\n", __func__, ret);
pm_runtime_put_sync(dev);
}
@@ -543,7 +543,7 @@ int msm_dsi_phy_init_common(struct msm_dsi_phy *phy)
phy->reg_base = msm_ioremap(pdev, "dsi_phy_regulator",
"DSI_PHY_REG");
if (IS_ERR(phy->reg_base)) {
- dev_err(&pdev->dev, "%s: failed to map phy regulator base\n",
+ DRM_DEV_ERROR(&pdev->dev, "%s: failed to map phy regulator base\n",
__func__);
ret = -ENOMEM;
goto fail;
@@ -574,7 +574,7 @@ static int dsi_phy_driver_probe(struct platform_device *pdev)
phy->id = dsi_phy_get_id(phy);
if (phy->id < 0) {
ret = phy->id;
- dev_err(dev, "%s: couldn't identify PHY index, %d\n",
+ DRM_DEV_ERROR(dev, "%s: couldn't identify PHY index, %d\n",
__func__, ret);
goto fail;
}
@@ -584,20 +584,20 @@ static int dsi_phy_driver_probe(struct platform_device *pdev)
phy->base = msm_ioremap(pdev, "dsi_phy", "DSI_PHY");
if (IS_ERR(phy->base)) {
- dev_err(dev, "%s: failed to map phy base\n", __func__);
+ DRM_DEV_ERROR(dev, "%s: failed to map phy base\n", __func__);
ret = -ENOMEM;
goto fail;
}
ret = dsi_phy_regulator_init(phy);
if (ret) {
- dev_err(dev, "%s: failed to init regulator\n", __func__);
+ DRM_DEV_ERROR(dev, "%s: failed to init regulator\n", __func__);
goto fail;
}
phy->ahb_clk = msm_clk_get(pdev, "iface");
if (IS_ERR(phy->ahb_clk)) {
- dev_err(dev, "%s: Unable to get ahb clk\n", __func__);
+ DRM_DEV_ERROR(dev, "%s: Unable to get ahb clk\n", __func__);
ret = PTR_ERR(phy->ahb_clk);
goto fail;
}
@@ -617,7 +617,7 @@ static int dsi_phy_driver_probe(struct platform_device *pdev)
phy->pll = msm_dsi_pll_init(pdev, phy->cfg->type, phy->id);
if (IS_ERR_OR_NULL(phy->pll))
- dev_info(dev,
+ DRM_DEV_INFO(dev,
"%s: pll init failed: %ld, need separate pll clk driver\n",
__func__, PTR_ERR(phy->pll));
@@ -675,21 +675,21 @@ int msm_dsi_phy_enable(struct msm_dsi_phy *phy, int src_pll_id,
ret = dsi_phy_enable_resource(phy);
if (ret) {
- dev_err(dev, "%s: resource enable failed, %d\n",
+ DRM_DEV_ERROR(dev, "%s: resource enable failed, %d\n",
__func__, ret);
goto res_en_fail;
}
ret = dsi_phy_regulator_enable(phy);
if (ret) {
- dev_err(dev, "%s: regulator enable failed, %d\n",
+ DRM_DEV_ERROR(dev, "%s: regulator enable failed, %d\n",
__func__, ret);
goto reg_en_fail;
}
ret = phy->cfg->ops.enable(phy, src_pll_id, clk_req);
if (ret) {
- dev_err(dev, "%s: phy enable failed, %d\n", __func__, ret);
+ DRM_DEV_ERROR(dev, "%s: phy enable failed, %d\n", __func__, ret);
goto phy_en_fail;
}
@@ -702,7 +702,7 @@ int msm_dsi_phy_enable(struct msm_dsi_phy *phy, int src_pll_id,
if (phy->usecase != MSM_DSI_PHY_SLAVE) {
ret = msm_dsi_pll_restore_state(phy->pll);
if (ret) {
- dev_err(dev, "%s: failed to restore pll state, %d\n",
+ DRM_DEV_ERROR(dev, "%s: failed to restore pll state, %d\n",
__func__, ret);
goto pll_restor_fail;
}
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_10nm.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_10nm.c
index b3fffc8dbb2a..44959e79ce28 100644
--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_10nm.c
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_10nm.c
@@ -93,7 +93,7 @@ static int dsi_10nm_phy_enable(struct msm_dsi_phy *phy, int src_pll_id,
DBG("");
if (msm_dsi_dphy_timing_calc_v3(timing, clk_req)) {
- dev_err(&phy->pdev->dev,
+ DRM_DEV_ERROR(&phy->pdev->dev,
"%s: D-PHY timing calculation failed\n", __func__);
return -EINVAL;
}
@@ -172,7 +172,7 @@ static int dsi_10nm_phy_enable(struct msm_dsi_phy *phy, int src_pll_id,
ret = msm_dsi_pll_set_usecase(phy->pll, phy->usecase);
if (ret) {
- dev_err(&phy->pdev->dev, "%s: set pll usecase failed, %d\n",
+ DRM_DEV_ERROR(&phy->pdev->dev, "%s: set pll usecase failed, %d\n",
__func__, ret);
return ret;
}
@@ -196,7 +196,7 @@ static int dsi_10nm_phy_init(struct msm_dsi_phy *phy)
phy->lane_base = msm_ioremap(pdev, "dsi_phy_lane",
"DSI_PHY_LANE");
if (IS_ERR(phy->lane_base)) {
- dev_err(&pdev->dev, "%s: failed to map phy lane base\n",
+ DRM_DEV_ERROR(&pdev->dev, "%s: failed to map phy lane base\n",
__func__);
return -ENOMEM;
}
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_14nm.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_14nm.c
index 513f4234adc1..a172c667e8bc 100644
--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_14nm.c
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_14nm.c
@@ -64,7 +64,7 @@ static int dsi_14nm_phy_enable(struct msm_dsi_phy *phy, int src_pll_id,
void __iomem *lane_base = phy->lane_base;
if (msm_dsi_dphy_timing_calc_v2(timing, clk_req)) {
- dev_err(&phy->pdev->dev,
+ DRM_DEV_ERROR(&phy->pdev->dev,
"%s: D-PHY timing calculation failed\n", __func__);
return -EINVAL;
}
@@ -115,7 +115,7 @@ static int dsi_14nm_phy_enable(struct msm_dsi_phy *phy, int src_pll_id,
ret = msm_dsi_pll_set_usecase(phy->pll, phy->usecase);
if (ret) {
- dev_err(&phy->pdev->dev, "%s: set pll usecase failed, %d\n",
+ DRM_DEV_ERROR(&phy->pdev->dev, "%s: set pll usecase failed, %d\n",
__func__, ret);
return ret;
}
@@ -142,7 +142,7 @@ static int dsi_14nm_phy_init(struct msm_dsi_phy *phy)
phy->lane_base = msm_ioremap(pdev, "dsi_phy_lane",
"DSI_PHY_LANE");
if (IS_ERR(phy->lane_base)) {
- dev_err(&pdev->dev, "%s: failed to map phy lane base\n",
+ DRM_DEV_ERROR(&pdev->dev, "%s: failed to map phy lane base\n",
__func__);
return -ENOMEM;
}
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_20nm.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_20nm.c
index 1ca6c69516f5..9ea9478d3707 100644
--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_20nm.c
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_20nm.c
@@ -82,7 +82,7 @@ static int dsi_20nm_phy_enable(struct msm_dsi_phy *phy, int src_pll_id,
DBG("");
if (msm_dsi_dphy_timing_calc(timing, clk_req)) {
- dev_err(&phy->pdev->dev,
+ DRM_DEV_ERROR(&phy->pdev->dev,
"%s: D-PHY timing calculation failed\n", __func__);
return -EINVAL;
}
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm.c
index 4972b52cbe44..c79505d97fe8 100644
--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm.c
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm.c
@@ -76,7 +76,7 @@ static int dsi_28nm_phy_enable(struct msm_dsi_phy *phy, int src_pll_id,
DBG("");
if (msm_dsi_dphy_timing_calc(timing, clk_req)) {
- dev_err(&phy->pdev->dev,
+ DRM_DEV_ERROR(&phy->pdev->dev,
"%s: D-PHY timing calculation failed\n", __func__);
return -EINVAL;
}
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm_8960.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm_8960.c
index 398004463498..98790b44da48 100644
--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm_8960.c
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm_8960.c
@@ -132,7 +132,7 @@ static int dsi_28nm_phy_enable(struct msm_dsi_phy *phy, int src_pll_id,
DBG("");
if (msm_dsi_dphy_timing_calc(timing, clk_req)) {
- dev_err(&phy->pdev->dev,
+ DRM_DEV_ERROR(&phy->pdev->dev,
"%s: D-PHY timing calculation failed\n", __func__);
return -EINVAL;
}
diff --git a/drivers/gpu/drm/msm/dsi/pll/dsi_pll.c b/drivers/gpu/drm/msm/dsi/pll/dsi_pll.c
index 613e206fa4fc..7a1fb4da2ad3 100644
--- a/drivers/gpu/drm/msm/dsi/pll/dsi_pll.c
+++ b/drivers/gpu/drm/msm/dsi/pll/dsi_pll.c
@@ -175,7 +175,7 @@ struct msm_dsi_pll *msm_dsi_pll_init(struct platform_device *pdev,
}
if (IS_ERR(pll)) {
- dev_err(dev, "%s: failed to init DSI PLL\n", __func__);
+ DRM_DEV_ERROR(dev, "%s: failed to init DSI PLL\n", __func__);
return pll;
}
diff --git a/drivers/gpu/drm/msm/dsi/pll/dsi_pll_10nm.c b/drivers/gpu/drm/msm/dsi/pll/dsi_pll_10nm.c
index 4c03f0b7343e..aabab6311043 100644
--- a/drivers/gpu/drm/msm/dsi/pll/dsi_pll_10nm.c
+++ b/drivers/gpu/drm/msm/dsi/pll/dsi_pll_10nm.c
@@ -17,7 +17,7 @@
* | |
* | |
* +---------+ | +----------+ | +----+
- * dsi0vco_clk ---| out_div |--o--| divl_3_0 |--o--| /8 |-- dsi0pllbyte
+ * dsi0vco_clk ---| out_div |--o--| divl_3_0 |--o--| /8 |-- dsi0_phy_pll_out_byteclk
* +---------+ | +----------+ | +----+
* | |
* | | dsi0_pll_by_2_bit_clk
@@ -25,7 +25,7 @@
* | | +----+ | |\ dsi0_pclk_mux
* | |--| /2 |--o--| \ |
* | | +----+ | \ | +---------+
- * | --------------| |--o--| div_7_4 |-- dsi0pll
+ * | --------------| |--o--| div_7_4 |-- dsi0_phy_pll_out_dsiclk
* |------------------------------| / +---------+
* | +-----+ | /
* -----------| /4? |--o----------|/
@@ -39,6 +39,8 @@
#define DSI_PIXEL_PLL_CLK 1
#define NUM_PROVIDED_CLKS 2
+#define VCO_REF_CLK_RATE 19200000
+
struct dsi_pll_regs {
u32 pll_prop_gain_rate;
u32 pll_lockdet_rate;
@@ -316,7 +318,7 @@ static int dsi_pll_10nm_vco_set_rate(struct clk_hw *hw, unsigned long rate,
parent_rate);
pll_10nm->vco_current_rate = rate;
- pll_10nm->vco_ref_clk_rate = parent_rate;
+ pll_10nm->vco_ref_clk_rate = VCO_REF_CLK_RATE;
dsi_pll_setup_config(pll_10nm);
@@ -688,7 +690,7 @@ static int pll_10nm_register(struct dsi_pll_10nm *pll_10nm)
hws[num++] = hw;
- snprintf(clk_name, 32, "dsi%dpllbyte", pll_10nm->id);
+ snprintf(clk_name, 32, "dsi%d_phy_pll_out_byteclk", pll_10nm->id);
snprintf(parent, 32, "dsi%d_pll_bit_clk", pll_10nm->id);
/* DSI Byte clock = VCO_CLK / OUT_DIV / BIT_DIV / 8 */
@@ -737,7 +739,7 @@ static int pll_10nm_register(struct dsi_pll_10nm *pll_10nm)
hws[num++] = hw;
- snprintf(clk_name, 32, "dsi%dpll", pll_10nm->id);
+ snprintf(clk_name, 32, "dsi%d_phy_pll_out_dsiclk", pll_10nm->id);
snprintf(parent, 32, "dsi%d_pclk_mux", pll_10nm->id);
/* PIX CLK DIV : DIV_CTRL_7_4*/
@@ -760,7 +762,7 @@ static int pll_10nm_register(struct dsi_pll_10nm *pll_10nm)
ret = of_clk_add_hw_provider(dev->of_node, of_clk_hw_onecell_get,
pll_10nm->hw_data);
if (ret) {
- dev_err(dev, "failed to register clk provider: %d\n", ret);
+ DRM_DEV_ERROR(dev, "failed to register clk provider: %d\n", ret);
return ret;
}
@@ -788,13 +790,13 @@ struct msm_dsi_pll *msm_dsi_pll_10nm_init(struct platform_device *pdev, int id)
pll_10nm->phy_cmn_mmio = msm_ioremap(pdev, "dsi_phy", "DSI_PHY");
if (IS_ERR_OR_NULL(pll_10nm->phy_cmn_mmio)) {
- dev_err(&pdev->dev, "failed to map CMN PHY base\n");
+ DRM_DEV_ERROR(&pdev->dev, "failed to map CMN PHY base\n");
return ERR_PTR(-ENOMEM);
}
pll_10nm->mmio = msm_ioremap(pdev, "dsi_pll", "DSI_PLL");
if (IS_ERR_OR_NULL(pll_10nm->mmio)) {
- dev_err(&pdev->dev, "failed to map PLL base\n");
+ DRM_DEV_ERROR(&pdev->dev, "failed to map PLL base\n");
return ERR_PTR(-ENOMEM);
}
@@ -813,7 +815,7 @@ struct msm_dsi_pll *msm_dsi_pll_10nm_init(struct platform_device *pdev, int id)
ret = pll_10nm_register(pll_10nm);
if (ret) {
- dev_err(&pdev->dev, "failed to register PLL: %d\n", ret);
+ DRM_DEV_ERROR(&pdev->dev, "failed to register PLL: %d\n", ret);
return ERR_PTR(ret);
}
diff --git a/drivers/gpu/drm/msm/dsi/pll/dsi_pll_14nm.c b/drivers/gpu/drm/msm/dsi/pll/dsi_pll_14nm.c
index 71fe60e5f01f..0e18cddd6f22 100644
--- a/drivers/gpu/drm/msm/dsi/pll/dsi_pll_14nm.c
+++ b/drivers/gpu/drm/msm/dsi/pll/dsi_pll_14nm.c
@@ -783,7 +783,7 @@ static int dsi_pll_14nm_enable_seq(struct msm_dsi_pll *pll)
POLL_TIMEOUT_US);
if (unlikely(!locked))
- dev_err(&pll_14nm->pdev->dev, "DSI PLL lock failed\n");
+ DRM_DEV_ERROR(&pll_14nm->pdev->dev, "DSI PLL lock failed\n");
else
DBG("DSI PLL lock success");
@@ -829,7 +829,7 @@ static int dsi_pll_14nm_restore_state(struct msm_dsi_pll *pll)
ret = dsi_pll_14nm_vco_set_rate(&pll->clk_hw,
cached_state->vco_rate, 0);
if (ret) {
- dev_err(&pll_14nm->pdev->dev,
+ DRM_DEV_ERROR(&pll_14nm->pdev->dev,
"restore vco rate failed. ret=%d\n", ret);
return ret;
}
@@ -1039,7 +1039,7 @@ static int pll_14nm_register(struct dsi_pll_14nm *pll_14nm)
ret = of_clk_add_hw_provider(dev->of_node, of_clk_hw_onecell_get,
pll_14nm->hw_data);
if (ret) {
- dev_err(dev, "failed to register clk provider: %d\n", ret);
+ DRM_DEV_ERROR(dev, "failed to register clk provider: %d\n", ret);
return ret;
}
@@ -1067,13 +1067,13 @@ struct msm_dsi_pll *msm_dsi_pll_14nm_init(struct platform_device *pdev, int id)
pll_14nm->phy_cmn_mmio = msm_ioremap(pdev, "dsi_phy", "DSI_PHY");
if (IS_ERR_OR_NULL(pll_14nm->phy_cmn_mmio)) {
- dev_err(&pdev->dev, "failed to map CMN PHY base\n");
+ DRM_DEV_ERROR(&pdev->dev, "failed to map CMN PHY base\n");
return ERR_PTR(-ENOMEM);
}
pll_14nm->mmio = msm_ioremap(pdev, "dsi_pll", "DSI_PLL");
if (IS_ERR_OR_NULL(pll_14nm->mmio)) {
- dev_err(&pdev->dev, "failed to map PLL base\n");
+ DRM_DEV_ERROR(&pdev->dev, "failed to map PLL base\n");
return ERR_PTR(-ENOMEM);
}
@@ -1096,7 +1096,7 @@ struct msm_dsi_pll *msm_dsi_pll_14nm_init(struct platform_device *pdev, int id)
ret = pll_14nm_register(pll_14nm);
if (ret) {
- dev_err(&pdev->dev, "failed to register PLL: %d\n", ret);
+ DRM_DEV_ERROR(&pdev->dev, "failed to register PLL: %d\n", ret);
return ERR_PTR(ret);
}
diff --git a/drivers/gpu/drm/msm/dsi/pll/dsi_pll_28nm.c b/drivers/gpu/drm/msm/dsi/pll/dsi_pll_28nm.c
index 26e3a01a99c2..dcbbaeb1b1fb 100644
--- a/drivers/gpu/drm/msm/dsi/pll/dsi_pll_28nm.c
+++ b/drivers/gpu/drm/msm/dsi/pll/dsi_pll_28nm.c
@@ -156,7 +156,7 @@ static int dsi_pll_28nm_clk_set_rate(struct clk_hw *hw, unsigned long rate,
if (rate <= lpfr_lut[i].vco_rate)
break;
if (i == LPFR_LUT_SIZE) {
- dev_err(dev, "unable to get loop filter resistance. vco=%lu\n",
+ DRM_DEV_ERROR(dev, "unable to get loop filter resistance. vco=%lu\n",
rate);
return -EINVAL;
}
@@ -386,7 +386,7 @@ static int dsi_pll_28nm_enable_seq_hpm(struct msm_dsi_pll *pll)
}
if (unlikely(!locked))
- dev_err(dev, "DSI PLL lock failed\n");
+ DRM_DEV_ERROR(dev, "DSI PLL lock failed\n");
else
DBG("DSI PLL Lock success");
@@ -429,7 +429,7 @@ static int dsi_pll_28nm_enable_seq_lp(struct msm_dsi_pll *pll)
locked = pll_28nm_poll_for_ready(pll_28nm, max_reads, timeout_us);
if (unlikely(!locked))
- dev_err(dev, "DSI PLL lock failed\n");
+ DRM_DEV_ERROR(dev, "DSI PLL lock failed\n");
else
DBG("DSI PLL lock success");
@@ -468,7 +468,7 @@ static int dsi_pll_28nm_restore_state(struct msm_dsi_pll *pll)
ret = dsi_pll_28nm_clk_set_rate(&pll->clk_hw,
cached_state->vco_rate, 0);
if (ret) {
- dev_err(&pll_28nm->pdev->dev,
+ DRM_DEV_ERROR(&pll_28nm->pdev->dev,
"restore vco rate failed. ret=%d\n", ret);
return ret;
}
@@ -581,7 +581,7 @@ static int pll_28nm_register(struct dsi_pll_28nm *pll_28nm)
ret = of_clk_add_provider(dev->of_node,
of_clk_src_onecell_get, &pll_28nm->clk_data);
if (ret) {
- dev_err(dev, "failed to register clk provider: %d\n", ret);
+ DRM_DEV_ERROR(dev, "failed to register clk provider: %d\n", ret);
return ret;
}
@@ -607,7 +607,7 @@ struct msm_dsi_pll *msm_dsi_pll_28nm_init(struct platform_device *pdev,
pll_28nm->mmio = msm_ioremap(pdev, "dsi_pll", "DSI_PLL");
if (IS_ERR_OR_NULL(pll_28nm->mmio)) {
- dev_err(&pdev->dev, "%s: failed to map pll base\n", __func__);
+ DRM_DEV_ERROR(&pdev->dev, "%s: failed to map pll base\n", __func__);
return ERR_PTR(-ENOMEM);
}
@@ -633,13 +633,13 @@ struct msm_dsi_pll *msm_dsi_pll_28nm_init(struct platform_device *pdev,
pll->en_seq_cnt = 1;
pll->enable_seqs[0] = dsi_pll_28nm_enable_seq_lp;
} else {
- dev_err(&pdev->dev, "phy type (%d) is not 28nm\n", type);
+ DRM_DEV_ERROR(&pdev->dev, "phy type (%d) is not 28nm\n", type);
return ERR_PTR(-EINVAL);
}
ret = pll_28nm_register(pll_28nm);
if (ret) {
- dev_err(&pdev->dev, "failed to register PLL: %d\n", ret);
+ DRM_DEV_ERROR(&pdev->dev, "failed to register PLL: %d\n", ret);
return ERR_PTR(ret);
}
diff --git a/drivers/gpu/drm/msm/dsi/pll/dsi_pll_28nm_8960.c b/drivers/gpu/drm/msm/dsi/pll/dsi_pll_28nm_8960.c
index 49008451085b..d6897464755f 100644
--- a/drivers/gpu/drm/msm/dsi/pll/dsi_pll_28nm_8960.c
+++ b/drivers/gpu/drm/msm/dsi/pll/dsi_pll_28nm_8960.c
@@ -327,7 +327,7 @@ static int dsi_pll_28nm_enable_seq(struct msm_dsi_pll *pll)
locked = pll_28nm_poll_for_ready(pll_28nm, max_reads, timeout_us);
if (unlikely(!locked))
- dev_err(dev, "DSI PLL lock failed\n");
+ DRM_DEV_ERROR(dev, "DSI PLL lock failed\n");
else
DBG("DSI PLL lock success");
@@ -368,7 +368,7 @@ static int dsi_pll_28nm_restore_state(struct msm_dsi_pll *pll)
ret = dsi_pll_28nm_clk_set_rate(&pll->clk_hw,
cached_state->vco_rate, 0);
if (ret) {
- dev_err(&pll_28nm->pdev->dev,
+ DRM_DEV_ERROR(&pll_28nm->pdev->dev,
"restore vco rate failed. ret=%d\n", ret);
return ret;
}
@@ -482,7 +482,7 @@ static int pll_28nm_register(struct dsi_pll_28nm *pll_28nm)
ret = of_clk_add_provider(dev->of_node,
of_clk_src_onecell_get, &pll_28nm->clk_data);
if (ret) {
- dev_err(dev, "failed to register clk provider: %d\n", ret);
+ DRM_DEV_ERROR(dev, "failed to register clk provider: %d\n", ret);
return ret;
}
@@ -508,7 +508,7 @@ struct msm_dsi_pll *msm_dsi_pll_28nm_8960_init(struct platform_device *pdev,
pll_28nm->mmio = msm_ioremap(pdev, "dsi_pll", "DSI_PLL");
if (IS_ERR_OR_NULL(pll_28nm->mmio)) {
- dev_err(&pdev->dev, "%s: failed to map pll base\n", __func__);
+ DRM_DEV_ERROR(&pdev->dev, "%s: failed to map pll base\n", __func__);
return ERR_PTR(-ENOMEM);
}
@@ -526,7 +526,7 @@ struct msm_dsi_pll *msm_dsi_pll_28nm_8960_init(struct platform_device *pdev,
ret = pll_28nm_register(pll_28nm);
if (ret) {
- dev_err(&pdev->dev, "failed to register PLL: %d\n", ret);
+ DRM_DEV_ERROR(&pdev->dev, "failed to register PLL: %d\n", ret);
return ERR_PTR(ret);
}
diff --git a/drivers/gpu/drm/msm/edp/edp.c b/drivers/gpu/drm/msm/edp/edp.c
index 0940e84b2821..6a63aba98a30 100644
--- a/drivers/gpu/drm/msm/edp/edp.c
+++ b/drivers/gpu/drm/msm/edp/edp.c
@@ -157,7 +157,7 @@ int msm_edp_modeset_init(struct msm_edp *edp, struct drm_device *dev,
edp->bridge = msm_edp_bridge_init(edp);
if (IS_ERR(edp->bridge)) {
ret = PTR_ERR(edp->bridge);
- dev_err(dev->dev, "failed to create eDP bridge: %d\n", ret);
+ DRM_DEV_ERROR(dev->dev, "failed to create eDP bridge: %d\n", ret);
edp->bridge = NULL;
goto fail;
}
@@ -165,7 +165,7 @@ int msm_edp_modeset_init(struct msm_edp *edp, struct drm_device *dev,
edp->connector = msm_edp_connector_init(edp);
if (IS_ERR(edp->connector)) {
ret = PTR_ERR(edp->connector);
- dev_err(dev->dev, "failed to create eDP connector: %d\n", ret);
+ DRM_DEV_ERROR(dev->dev, "failed to create eDP connector: %d\n", ret);
edp->connector = NULL;
goto fail;
}
@@ -173,7 +173,7 @@ int msm_edp_modeset_init(struct msm_edp *edp, struct drm_device *dev,
edp->irq = irq_of_parse_and_map(pdev->dev.of_node, 0);
if (edp->irq < 0) {
ret = edp->irq;
- dev_err(dev->dev, "failed to get IRQ: %d\n", ret);
+ DRM_DEV_ERROR(dev->dev, "failed to get IRQ: %d\n", ret);
goto fail;
}
@@ -181,7 +181,7 @@ int msm_edp_modeset_init(struct msm_edp *edp, struct drm_device *dev,
edp_irq, IRQF_TRIGGER_HIGH | IRQF_ONESHOT,
"edp_isr", edp);
if (ret < 0) {
- dev_err(dev->dev, "failed to request IRQ%u: %d\n",
+ DRM_DEV_ERROR(dev->dev, "failed to request IRQ%u: %d\n",
edp->irq, ret);
goto fail;
}
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.c b/drivers/gpu/drm/msm/hdmi/hdmi.c
index 23670907a29d..e247d6942a49 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi.c
@@ -98,7 +98,7 @@ static int msm_hdmi_get_phy(struct hdmi *hdmi)
phy_node = of_parse_phandle(pdev->dev.of_node, "phys", 0);
if (!phy_node) {
- dev_err(&pdev->dev, "cannot find phy device\n");
+ DRM_DEV_ERROR(&pdev->dev, "cannot find phy device\n");
return -ENXIO;
}
@@ -109,7 +109,7 @@ static int msm_hdmi_get_phy(struct hdmi *hdmi)
of_node_put(phy_node);
if (!phy_pdev || !hdmi->phy) {
- dev_err(&pdev->dev, "phy driver is not ready\n");
+ DRM_DEV_ERROR(&pdev->dev, "phy driver is not ready\n");
return -EPROBE_DEFER;
}
@@ -153,7 +153,7 @@ static struct hdmi *msm_hdmi_init(struct platform_device *pdev)
hdmi->qfprom_mmio = msm_ioremap(pdev,
config->qfprom_mmio_name, "HDMI_QFPROM");
if (IS_ERR(hdmi->qfprom_mmio)) {
- dev_info(&pdev->dev, "can't find qfprom resource\n");
+ DRM_DEV_INFO(&pdev->dev, "can't find qfprom resource\n");
hdmi->qfprom_mmio = NULL;
}
@@ -172,7 +172,7 @@ static struct hdmi *msm_hdmi_init(struct platform_device *pdev)
config->hpd_reg_names[i]);
if (IS_ERR(reg)) {
ret = PTR_ERR(reg);
- dev_err(&pdev->dev, "failed to get hpd regulator: %s (%d)\n",
+ DRM_DEV_ERROR(&pdev->dev, "failed to get hpd regulator: %s (%d)\n",
config->hpd_reg_names[i], ret);
goto fail;
}
@@ -195,7 +195,7 @@ static struct hdmi *msm_hdmi_init(struct platform_device *pdev)
config->pwr_reg_names[i]);
if (IS_ERR(reg)) {
ret = PTR_ERR(reg);
- dev_err(&pdev->dev, "failed to get pwr regulator: %s (%d)\n",
+ DRM_DEV_ERROR(&pdev->dev, "failed to get pwr regulator: %s (%d)\n",
config->pwr_reg_names[i], ret);
goto fail;
}
@@ -217,7 +217,7 @@ static struct hdmi *msm_hdmi_init(struct platform_device *pdev)
clk = msm_clk_get(pdev, config->hpd_clk_names[i]);
if (IS_ERR(clk)) {
ret = PTR_ERR(clk);
- dev_err(&pdev->dev, "failed to get hpd clk: %s (%d)\n",
+ DRM_DEV_ERROR(&pdev->dev, "failed to get hpd clk: %s (%d)\n",
config->hpd_clk_names[i], ret);
goto fail;
}
@@ -239,7 +239,7 @@ static struct hdmi *msm_hdmi_init(struct platform_device *pdev)
clk = msm_clk_get(pdev, config->pwr_clk_names[i]);
if (IS_ERR(clk)) {
ret = PTR_ERR(clk);
- dev_err(&pdev->dev, "failed to get pwr clk: %s (%d)\n",
+ DRM_DEV_ERROR(&pdev->dev, "failed to get pwr clk: %s (%d)\n",
config->pwr_clk_names[i], ret);
goto fail;
}
@@ -254,14 +254,14 @@ static struct hdmi *msm_hdmi_init(struct platform_device *pdev)
hdmi->i2c = msm_hdmi_i2c_init(hdmi);
if (IS_ERR(hdmi->i2c)) {
ret = PTR_ERR(hdmi->i2c);
- dev_err(&pdev->dev, "failed to get i2c: %d\n", ret);
+ DRM_DEV_ERROR(&pdev->dev, "failed to get i2c: %d\n", ret);
hdmi->i2c = NULL;
goto fail;
}
ret = msm_hdmi_get_phy(hdmi);
if (ret) {
- dev_err(&pdev->dev, "failed to get phy\n");
+ DRM_DEV_ERROR(&pdev->dev, "failed to get phy\n");
goto fail;
}
@@ -303,7 +303,7 @@ int msm_hdmi_modeset_init(struct hdmi *hdmi,
hdmi->bridge = msm_hdmi_bridge_init(hdmi);
if (IS_ERR(hdmi->bridge)) {
ret = PTR_ERR(hdmi->bridge);
- dev_err(dev->dev, "failed to create HDMI bridge: %d\n", ret);
+ DRM_DEV_ERROR(dev->dev, "failed to create HDMI bridge: %d\n", ret);
hdmi->bridge = NULL;
goto fail;
}
@@ -311,7 +311,7 @@ int msm_hdmi_modeset_init(struct hdmi *hdmi,
hdmi->connector = msm_hdmi_connector_init(hdmi);
if (IS_ERR(hdmi->connector)) {
ret = PTR_ERR(hdmi->connector);
- dev_err(dev->dev, "failed to create HDMI connector: %d\n", ret);
+ DRM_DEV_ERROR(dev->dev, "failed to create HDMI connector: %d\n", ret);
hdmi->connector = NULL;
goto fail;
}
@@ -319,7 +319,7 @@ int msm_hdmi_modeset_init(struct hdmi *hdmi,
hdmi->irq = irq_of_parse_and_map(pdev->dev.of_node, 0);
if (hdmi->irq < 0) {
ret = hdmi->irq;
- dev_err(dev->dev, "failed to get irq: %d\n", ret);
+ DRM_DEV_ERROR(dev->dev, "failed to get irq: %d\n", ret);
goto fail;
}
@@ -327,11 +327,17 @@ int msm_hdmi_modeset_init(struct hdmi *hdmi,
msm_hdmi_irq, IRQF_TRIGGER_HIGH | IRQF_ONESHOT,
"hdmi_isr", hdmi);
if (ret < 0) {
- dev_err(dev->dev, "failed to request IRQ%u: %d\n",
+ DRM_DEV_ERROR(dev->dev, "failed to request IRQ%u: %d\n",
hdmi->irq, ret);
goto fail;
}
+ ret = msm_hdmi_hpd_enable(hdmi->connector);
+ if (ret < 0) {
+ DRM_DEV_ERROR(&hdmi->pdev->dev, "failed to enable HPD: %d\n", ret);
+ goto fail;
+ }
+
encoder->bridge = hdmi->bridge;
priv->bridges[priv->num_bridges++] = hdmi->bridge;
@@ -476,7 +482,7 @@ static int msm_hdmi_audio_hw_params(struct device *dev, void *data,
unsigned int level_shift = 0; /* 0dB */
bool down_mix = false;
- dev_dbg(dev, "%u Hz, %d bit, %d channels\n", params->sample_rate,
+ DRM_DEV_DEBUG(dev, "%u Hz, %d bit, %d channels\n", params->sample_rate,
params->sample_width, params->cea.channels);
switch (params->cea.channels) {
@@ -527,7 +533,7 @@ static int msm_hdmi_audio_hw_params(struct device *dev, void *data,
rate = HDMI_SAMPLE_RATE_192KHZ;
break;
default:
- dev_err(dev, "rate[%d] not supported!\n",
+ DRM_DEV_ERROR(dev, "rate[%d] not supported!\n",
params->sample_rate);
return -EINVAL;
}
@@ -571,7 +577,7 @@ static int msm_hdmi_bind(struct device *dev, struct device *master, void *data)
{
struct drm_device *drm = dev_get_drvdata(master);
struct msm_drm_private *priv = drm->dev_private;
- static struct hdmi_platform_config *hdmi_cfg;
+ struct hdmi_platform_config *hdmi_cfg;
struct hdmi *hdmi;
struct device_node *of_node = dev->of_node;
int i, err;
@@ -579,7 +585,7 @@ static int msm_hdmi_bind(struct device *dev, struct device *master, void *data)
hdmi_cfg = (struct hdmi_platform_config *)
of_device_get_match_data(dev);
if (!hdmi_cfg) {
- dev_err(dev, "unknown hdmi_cfg: %pOFn\n", of_node);
+ DRM_DEV_ERROR(dev, "unknown hdmi_cfg: %pOFn\n", of_node);
return -ENXIO;
}
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.h b/drivers/gpu/drm/msm/hdmi/hdmi.h
index accc9a61611d..5c5df6ab2a57 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi.h
+++ b/drivers/gpu/drm/msm/hdmi/hdmi.h
@@ -245,6 +245,7 @@ void msm_hdmi_bridge_destroy(struct drm_bridge *bridge);
void msm_hdmi_connector_irq(struct drm_connector *connector);
struct drm_connector *msm_hdmi_connector_init(struct hdmi *hdmi);
+int msm_hdmi_hpd_enable(struct drm_connector *connector);
/*
* i2c adapter for ddc:
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c b/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c
index 7e357077ed26..98d61c690260 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c
@@ -40,7 +40,7 @@ static void msm_hdmi_power_on(struct drm_bridge *bridge)
for (i = 0; i < config->pwr_reg_cnt; i++) {
ret = regulator_enable(hdmi->pwr_regs[i]);
if (ret) {
- dev_err(dev->dev, "failed to enable pwr regulator: %s (%d)\n",
+ DRM_DEV_ERROR(dev->dev, "failed to enable pwr regulator: %s (%d)\n",
config->pwr_reg_names[i], ret);
}
}
@@ -49,7 +49,7 @@ static void msm_hdmi_power_on(struct drm_bridge *bridge)
DBG("pixclock: %lu", hdmi->pixclock);
ret = clk_set_rate(hdmi->pwr_clks[0], hdmi->pixclock);
if (ret) {
- dev_err(dev->dev, "failed to set pixel clk: %s (%d)\n",
+ DRM_DEV_ERROR(dev->dev, "failed to set pixel clk: %s (%d)\n",
config->pwr_clk_names[0], ret);
}
}
@@ -57,7 +57,7 @@ static void msm_hdmi_power_on(struct drm_bridge *bridge)
for (i = 0; i < config->pwr_clk_cnt; i++) {
ret = clk_prepare_enable(hdmi->pwr_clks[i]);
if (ret) {
- dev_err(dev->dev, "failed to enable pwr clk: %s (%d)\n",
+ DRM_DEV_ERROR(dev->dev, "failed to enable pwr clk: %s (%d)\n",
config->pwr_clk_names[i], ret);
}
}
@@ -82,7 +82,7 @@ static void power_off(struct drm_bridge *bridge)
for (i = 0; i < config->pwr_reg_cnt; i++) {
ret = regulator_disable(hdmi->pwr_regs[i]);
if (ret) {
- dev_err(dev->dev, "failed to disable pwr regulator: %s (%d)\n",
+ DRM_DEV_ERROR(dev->dev, "failed to disable pwr regulator: %s (%d)\n",
config->pwr_reg_names[i], ret);
}
}
@@ -105,7 +105,7 @@ static void msm_hdmi_config_avi_infoframe(struct hdmi *hdmi)
len = hdmi_infoframe_pack(&frame, buffer, sizeof(buffer));
if (len < 0) {
- dev_err(&hdmi->pdev->dev,
+ DRM_DEV_ERROR(&hdmi->pdev->dev,
"failed to configure avi infoframe\n");
return;
}
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_connector.c b/drivers/gpu/drm/msm/hdmi/hdmi_connector.c
index e9c9a0af508e..a6eeab2c4dc3 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi_connector.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_connector.c
@@ -90,7 +90,7 @@ static int gpio_config(struct hdmi *hdmi, bool on)
if (gpio.num != -1) {
ret = gpio_request(gpio.num, gpio.label);
if (ret) {
- dev_err(dev,
+ DRM_DEV_ERROR(dev,
"'%s'(%d) gpio_request failed: %d\n",
gpio.label, gpio.num, ret);
goto err;
@@ -156,7 +156,7 @@ static void enable_hpd_clocks(struct hdmi *hdmi, bool enable)
ret = clk_prepare_enable(hdmi->hpd_clks[i]);
if (ret) {
- dev_err(dev,
+ DRM_DEV_ERROR(dev,
"failed to enable hpd clk: %s (%d)\n",
config->hpd_clk_names[i], ret);
}
@@ -167,8 +167,9 @@ static void enable_hpd_clocks(struct hdmi *hdmi, bool enable)
}
}
-static int hpd_enable(struct hdmi_connector *hdmi_connector)
+int msm_hdmi_hpd_enable(struct drm_connector *connector)
{
+ struct hdmi_connector *hdmi_connector = to_hdmi_connector(connector);
struct hdmi *hdmi = hdmi_connector->hdmi;
const struct hdmi_platform_config *config = hdmi->config;
struct device *dev = &hdmi->pdev->dev;
@@ -179,7 +180,7 @@ static int hpd_enable(struct hdmi_connector *hdmi_connector)
for (i = 0; i < config->hpd_reg_cnt; i++) {
ret = regulator_enable(hdmi->hpd_regs[i]);
if (ret) {
- dev_err(dev, "failed to enable hpd regulator: %s (%d)\n",
+ DRM_DEV_ERROR(dev, "failed to enable hpd regulator: %s (%d)\n",
config->hpd_reg_names[i], ret);
goto fail;
}
@@ -187,13 +188,13 @@ static int hpd_enable(struct hdmi_connector *hdmi_connector)
ret = pinctrl_pm_select_default_state(dev);
if (ret) {
- dev_err(dev, "pinctrl state chg failed: %d\n", ret);
+ DRM_DEV_ERROR(dev, "pinctrl state chg failed: %d\n", ret);
goto fail;
}
ret = gpio_config(hdmi, true);
if (ret) {
- dev_err(dev, "failed to configure GPIOs: %d\n", ret);
+ DRM_DEV_ERROR(dev, "failed to configure GPIOs: %d\n", ret);
goto fail;
}
@@ -450,7 +451,6 @@ struct drm_connector *msm_hdmi_connector_init(struct hdmi *hdmi)
{
struct drm_connector *connector = NULL;
struct hdmi_connector *hdmi_connector;
- int ret;
hdmi_connector = kzalloc(sizeof(*hdmi_connector), GFP_KERNEL);
if (!hdmi_connector)
@@ -471,12 +471,6 @@ struct drm_connector *msm_hdmi_connector_init(struct hdmi *hdmi)
connector->interlace_allowed = 0;
connector->doublescan_allowed = 0;
- ret = hpd_enable(hdmi_connector);
- if (ret) {
- dev_err(&hdmi->pdev->dev, "failed to enable HPD: %d\n", ret);
- return ERR_PTR(ret);
- }
-
drm_connector_attach_encoder(connector, hdmi->encoder);
return connector;
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_i2c.c b/drivers/gpu/drm/msm/hdmi/hdmi_i2c.c
index 73e20219d431..25d2fe2c60e8 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi_i2c.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_i2c.c
@@ -66,7 +66,7 @@ static int ddc_clear_irq(struct hdmi_i2c_adapter *hdmi_i2c)
} while ((ddc_int_ctrl & HDMI_DDC_INT_CTRL_SW_DONE_INT) && retry);
if (!retry) {
- dev_err(dev->dev, "timeout waiting for DDC\n");
+ DRM_DEV_ERROR(dev->dev, "timeout waiting for DDC\n");
return -ETIMEDOUT;
}
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_phy.c b/drivers/gpu/drm/msm/hdmi/hdmi_phy.c
index 4157722d6b4d..1f4331ed69bd 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi_phy.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_phy.c
@@ -37,7 +37,7 @@ static int msm_hdmi_phy_resource_init(struct hdmi_phy *phy)
reg = devm_regulator_get(dev, cfg->reg_names[i]);
if (IS_ERR(reg)) {
ret = PTR_ERR(reg);
- dev_err(dev, "failed to get phy regulator: %s (%d)\n",
+ DRM_DEV_ERROR(dev, "failed to get phy regulator: %s (%d)\n",
cfg->reg_names[i], ret);
return ret;
}
@@ -51,7 +51,7 @@ static int msm_hdmi_phy_resource_init(struct hdmi_phy *phy)
clk = msm_clk_get(phy->pdev, cfg->clk_names[i]);
if (IS_ERR(clk)) {
ret = PTR_ERR(clk);
- dev_err(dev, "failed to get phy clock: %s (%d)\n",
+ DRM_DEV_ERROR(dev, "failed to get phy clock: %s (%d)\n",
cfg->clk_names[i], ret);
return ret;
}
@@ -73,14 +73,14 @@ int msm_hdmi_phy_resource_enable(struct hdmi_phy *phy)
for (i = 0; i < cfg->num_regs; i++) {
ret = regulator_enable(phy->regs[i]);
if (ret)
- dev_err(dev, "failed to enable regulator: %s (%d)\n",
+ DRM_DEV_ERROR(dev, "failed to enable regulator: %s (%d)\n",
cfg->reg_names[i], ret);
}
for (i = 0; i < cfg->num_clks; i++) {
ret = clk_prepare_enable(phy->clks[i]);
if (ret)
- dev_err(dev, "failed to enable clock: %s (%d)\n",
+ DRM_DEV_ERROR(dev, "failed to enable clock: %s (%d)\n",
cfg->clk_names[i], ret);
}
@@ -159,7 +159,7 @@ static int msm_hdmi_phy_probe(struct platform_device *pdev)
phy->mmio = msm_ioremap(pdev, "hdmi_phy", "HDMI_PHY");
if (IS_ERR(phy->mmio)) {
- dev_err(dev, "%s: failed to map phy base\n", __func__);
+ DRM_DEV_ERROR(dev, "%s: failed to map phy base\n", __func__);
return -ENOMEM;
}
@@ -177,7 +177,7 @@ static int msm_hdmi_phy_probe(struct platform_device *pdev)
ret = msm_hdmi_phy_pll_init(pdev, phy->cfg->type);
if (ret) {
- dev_err(dev, "couldn't init PLL\n");
+ DRM_DEV_ERROR(dev, "couldn't init PLL\n");
msm_hdmi_phy_resource_disable(phy);
return ret;
}
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_phy_8996.c b/drivers/gpu/drm/msm/hdmi/hdmi_phy_8996.c
index 0df504c61833..318708f26731 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi_phy_8996.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_phy_8996.c
@@ -725,7 +725,7 @@ int msm_hdmi_pll_8996_init(struct platform_device *pdev)
pll->mmio_qserdes_com = msm_ioremap(pdev, "hdmi_pll", "HDMI_PLL");
if (IS_ERR(pll->mmio_qserdes_com)) {
- dev_err(dev, "failed to map pll base\n");
+ DRM_DEV_ERROR(dev, "failed to map pll base\n");
return -ENOMEM;
}
@@ -737,7 +737,7 @@ int msm_hdmi_pll_8996_init(struct platform_device *pdev)
pll->mmio_qserdes_tx[i] = msm_ioremap(pdev, name, label);
if (IS_ERR(pll->mmio_qserdes_tx[i])) {
- dev_err(dev, "failed to map pll base\n");
+ DRM_DEV_ERROR(dev, "failed to map pll base\n");
return -ENOMEM;
}
}
@@ -745,7 +745,7 @@ int msm_hdmi_pll_8996_init(struct platform_device *pdev)
clk = devm_clk_register(dev, &pll->clk_hw);
if (IS_ERR(clk)) {
- dev_err(dev, "failed to register pll clock\n");
+ DRM_DEV_ERROR(dev, "failed to register pll clock\n");
return -EINVAL;
}
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_pll_8960.c b/drivers/gpu/drm/msm/hdmi/hdmi_pll_8960.c
index 99590758c68b..c6dae6e437f9 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi_pll_8960.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_pll_8960.c
@@ -445,7 +445,7 @@ int msm_hdmi_pll_8960_init(struct platform_device *pdev)
pll->mmio = msm_ioremap(pdev, "hdmi_pll", "HDMI_PLL");
if (IS_ERR(pll->mmio)) {
- dev_err(dev, "failed to map pll base\n");
+ DRM_DEV_ERROR(dev, "failed to map pll base\n");
return -ENOMEM;
}
@@ -454,7 +454,7 @@ int msm_hdmi_pll_8960_init(struct platform_device *pdev)
clk = devm_clk_register(dev, &pll->clk_hw);
if (IS_ERR(clk)) {
- dev_err(dev, "failed to register pll clock\n");
+ DRM_DEV_ERROR(dev, "failed to register pll clock\n");
return -EINVAL;
}
diff --git a/drivers/gpu/drm/msm/msm_atomic.c b/drivers/gpu/drm/msm/msm_atomic.c
index 4bcdeca7479d..f5b1256e32b6 100644
--- a/drivers/gpu/drm/msm/msm_atomic.c
+++ b/drivers/gpu/drm/msm/msm_atomic.c
@@ -34,7 +34,12 @@ static void msm_atomic_wait_for_commit_done(struct drm_device *dev,
if (!new_crtc_state->active)
continue;
+ if (drm_crtc_vblank_get(crtc))
+ continue;
+
kms->funcs->wait_for_crtc_commit_done(kms, crtc);
+
+ drm_crtc_vblank_put(crtc);
}
}
@@ -78,7 +83,8 @@ void msm_atomic_commit_tail(struct drm_atomic_state *state)
kms->funcs->commit(kms, state);
}
- msm_atomic_wait_for_commit_done(dev, state);
+ if (!state->legacy_cursor_update)
+ msm_atomic_wait_for_commit_done(dev, state);
kms->funcs->complete_commit(kms, state);
diff --git a/drivers/gpu/drm/msm/msm_debugfs.c b/drivers/gpu/drm/msm/msm_debugfs.c
index f0da0d3c8a80..fb423d309e91 100644
--- a/drivers/gpu/drm/msm/msm_debugfs.c
+++ b/drivers/gpu/drm/msm/msm_debugfs.c
@@ -84,7 +84,7 @@ static int msm_gpu_open(struct inode *inode, struct file *file)
ret = mutex_lock_interruptible(&dev->struct_mutex);
if (ret)
- return ret;
+ goto free_priv;
pm_runtime_get_sync(&gpu->pdev->dev);
show_priv->state = gpu->funcs->gpu_state_get(gpu);
@@ -94,13 +94,20 @@ static int msm_gpu_open(struct inode *inode, struct file *file)
if (IS_ERR(show_priv->state)) {
ret = PTR_ERR(show_priv->state);
- kfree(show_priv);
- return ret;
+ goto free_priv;
}
show_priv->dev = dev;
- return single_open(file, msm_gpu_show, show_priv);
+ ret = single_open(file, msm_gpu_show, show_priv);
+ if (ret)
+ goto free_priv;
+
+ return 0;
+
+free_priv:
+ kfree(show_priv);
+ return ret;
}
static const struct file_operations msm_gpu_fops = {
@@ -194,13 +201,13 @@ static int late_init_minor(struct drm_minor *minor)
ret = msm_rd_debugfs_init(minor);
if (ret) {
- dev_err(minor->dev->dev, "could not install rd debugfs\n");
+ DRM_DEV_ERROR(minor->dev->dev, "could not install rd debugfs\n");
return ret;
}
ret = msm_perf_debugfs_init(minor);
if (ret) {
- dev_err(minor->dev->dev, "could not install perf debugfs\n");
+ DRM_DEV_ERROR(minor->dev->dev, "could not install perf debugfs\n");
return ret;
}
@@ -228,14 +235,14 @@ int msm_debugfs_init(struct drm_minor *minor)
minor->debugfs_root, minor);
if (ret) {
- dev_err(dev->dev, "could not install msm_debugfs_list\n");
+ DRM_DEV_ERROR(dev->dev, "could not install msm_debugfs_list\n");
return ret;
}
debugfs_create_file("gpu", S_IRUSR, minor->debugfs_root,
dev, &msm_gpu_fops);
- if (priv->kms->funcs->debugfs_init) {
+ if (priv->kms && priv->kms->funcs->debugfs_init) {
ret = priv->kms->funcs->debugfs_init(priv->kms, minor);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
index 5e758d95751a..d2cdc7b553fe 100644
--- a/drivers/gpu/drm/msm/msm_drv.c
+++ b/drivers/gpu/drm/msm/msm_drv.c
@@ -23,8 +23,10 @@
#include "msm_drv.h"
#include "msm_debugfs.h"
#include "msm_fence.h"
+#include "msm_gem.h"
#include "msm_gpu.h"
#include "msm_kms.h"
+#include "adreno/adreno_gpu.h"
/*
@@ -35,9 +37,11 @@
* - 1.3.0 - adds GMEM_BASE + NR_RINGS params, SUBMITQUEUE_NEW +
* SUBMITQUEUE_CLOSE ioctls, and MSM_INFO_IOVA flag for
* MSM_GEM_INFO ioctl.
+ * - 1.4.0 - softpin, MSM_RELOC_BO_DUMP, and GEM_INFO support to set/get
+ * GEM object's debug name
*/
#define MSM_VERSION_MAJOR 1
-#define MSM_VERSION_MINOR 3
+#define MSM_VERSION_MINOR 4
#define MSM_VERSION_PATCHLEVEL 0
static const struct drm_mode_config_funcs mode_config_funcs = {
@@ -170,7 +174,7 @@ void __iomem *msm_ioremap(struct platform_device *pdev, const char *name,
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res) {
- dev_err(&pdev->dev, "failed to get memory resource: %s\n", name);
+ DRM_DEV_ERROR(&pdev->dev, "failed to get memory resource: %s\n", name);
return ERR_PTR(-EINVAL);
}
@@ -178,7 +182,7 @@ void __iomem *msm_ioremap(struct platform_device *pdev, const char *name,
ptr = devm_ioremap_nocache(&pdev->dev, res->start, size);
if (!ptr) {
- dev_err(&pdev->dev, "failed to ioremap: %s\n", name);
+ DRM_DEV_ERROR(&pdev->dev, "failed to ioremap: %s\n", name);
return ERR_PTR(-ENOMEM);
}
@@ -358,6 +362,14 @@ static int get_mdp_ver(struct platform_device *pdev)
#include <linux/of_address.h>
+bool msm_use_mmu(struct drm_device *dev)
+{
+ struct msm_drm_private *priv = dev->dev_private;
+
+ /* a2xx comes with its own MMU */
+ return priv->is_a2xx || iommu_present(&platform_bus_type);
+}
+
static int msm_init_vram(struct drm_device *dev)
{
struct msm_drm_private *priv = dev->dev_private;
@@ -396,7 +408,7 @@ static int msm_init_vram(struct drm_device *dev)
* Grab the entire CMA chunk carved out in early startup in
* mach-msm:
*/
- } else if (!iommu_present(&platform_bus_type)) {
+ } else if (!msm_use_mmu(dev)) {
DRM_INFO("using %s VRAM carveout\n", vram);
size = memparse(vram, NULL);
}
@@ -419,12 +431,12 @@ static int msm_init_vram(struct drm_device *dev)
p = dma_alloc_attrs(dev->dev, size,
&priv->vram.paddr, GFP_KERNEL, attrs);
if (!p) {
- dev_err(dev->dev, "failed to allocate VRAM\n");
+ DRM_DEV_ERROR(dev->dev, "failed to allocate VRAM\n");
priv->vram.paddr = 0;
return -ENOMEM;
}
- dev_info(dev->dev, "VRAM: %08x->%08x\n",
+ DRM_DEV_INFO(dev->dev, "VRAM: %08x->%08x\n",
(uint32_t)priv->vram.paddr,
(uint32_t)(priv->vram.paddr + size));
}
@@ -444,7 +456,7 @@ static int msm_drm_init(struct device *dev, struct drm_driver *drv)
ddev = drm_dev_alloc(drv, dev);
if (IS_ERR(ddev)) {
- dev_err(dev, "failed to allocate drm_device\n");
+ DRM_DEV_ERROR(dev, "failed to allocate drm_device\n");
return PTR_ERR(ddev);
}
@@ -508,19 +520,16 @@ static int msm_drm_init(struct device *dev, struct drm_driver *drv)
priv->kms = kms;
break;
default:
- kms = ERR_PTR(-ENODEV);
+ /* valid only for the dummy headless case, where of_node=NULL */
+ WARN_ON(dev->of_node);
+ kms = NULL;
break;
}
if (IS_ERR(kms)) {
- /*
- * NOTE: once we have GPU support, having no kms should not
- * be considered fatal.. ideally we would still support gpu
- * and (for example) use dmabuf/prime to share buffers with
- * imx drm driver on iMX5
- */
- dev_err(dev, "failed to load kms\n");
+ DRM_DEV_ERROR(dev, "failed to load kms\n");
ret = PTR_ERR(kms);
+ priv->kms = NULL;
goto err_msm_uninit;
}
@@ -530,7 +539,7 @@ static int msm_drm_init(struct device *dev, struct drm_driver *drv)
if (kms) {
ret = kms->funcs->hw_init(kms);
if (ret) {
- dev_err(dev, "kms hw init failed: %d\n", ret);
+ DRM_DEV_ERROR(dev, "kms hw init failed: %d\n", ret);
goto err_msm_uninit;
}
}
@@ -554,17 +563,18 @@ static int msm_drm_init(struct device *dev, struct drm_driver *drv)
kthread_run(kthread_worker_fn,
&priv->disp_thread[i].worker,
"crtc_commit:%d", priv->disp_thread[i].crtc_id);
- ret = sched_setscheduler(priv->disp_thread[i].thread,
- SCHED_FIFO, &param);
- if (ret)
- pr_warn("display thread priority update failed: %d\n",
- ret);
-
if (IS_ERR(priv->disp_thread[i].thread)) {
- dev_err(dev, "failed to create crtc_commit kthread\n");
+ DRM_DEV_ERROR(dev, "failed to create crtc_commit kthread\n");
priv->disp_thread[i].thread = NULL;
+ goto err_msm_uninit;
}
+ ret = sched_setscheduler(priv->disp_thread[i].thread,
+ SCHED_FIFO, &param);
+ if (ret)
+ dev_warn(dev, "disp_thread set priority failed: %d\n",
+ ret);
+
/* initialize event thread */
priv->event_thread[i].crtc_id = priv->crtcs[i]->base.id;
kthread_init_worker(&priv->event_thread[i].worker);
@@ -573,6 +583,12 @@ static int msm_drm_init(struct device *dev, struct drm_driver *drv)
kthread_run(kthread_worker_fn,
&priv->event_thread[i].worker,
"crtc_event:%d", priv->event_thread[i].crtc_id);
+ if (IS_ERR(priv->event_thread[i].thread)) {
+ DRM_DEV_ERROR(dev, "failed to create crtc_event kthread\n");
+ priv->event_thread[i].thread = NULL;
+ goto err_msm_uninit;
+ }
+
/**
* event thread should also run at same priority as disp_thread
* because it is handling frame_done events. A lower priority
@@ -581,39 +597,15 @@ static int msm_drm_init(struct device *dev, struct drm_driver *drv)
* failure at crtc commit level.
*/
ret = sched_setscheduler(priv->event_thread[i].thread,
- SCHED_FIFO, &param);
+ SCHED_FIFO, &param);
if (ret)
- pr_warn("display event thread priority update failed: %d\n",
- ret);
-
- if (IS_ERR(priv->event_thread[i].thread)) {
- dev_err(dev, "failed to create crtc_event kthread\n");
- priv->event_thread[i].thread = NULL;
- }
-
- if ((!priv->disp_thread[i].thread) ||
- !priv->event_thread[i].thread) {
- /* clean up previously created threads if any */
- for ( ; i >= 0; i--) {
- if (priv->disp_thread[i].thread) {
- kthread_stop(
- priv->disp_thread[i].thread);
- priv->disp_thread[i].thread = NULL;
- }
-
- if (priv->event_thread[i].thread) {
- kthread_stop(
- priv->event_thread[i].thread);
- priv->event_thread[i].thread = NULL;
- }
- }
- goto err_msm_uninit;
- }
+ dev_warn(dev, "event_thread set priority failed:%d\n",
+ ret);
}
ret = drm_vblank_init(ddev, priv->num_crtcs);
if (ret < 0) {
- dev_err(dev, "failed to initialize vblank\n");
+ DRM_DEV_ERROR(dev, "failed to initialize vblank\n");
goto err_msm_uninit;
}
@@ -622,7 +614,7 @@ static int msm_drm_init(struct device *dev, struct drm_driver *drv)
ret = drm_irq_install(ddev, kms->irq);
pm_runtime_put_sync(dev);
if (ret < 0) {
- dev_err(dev, "failed to install IRQ handler\n");
+ DRM_DEV_ERROR(dev, "failed to install IRQ handler\n");
goto err_msm_uninit;
}
}
@@ -634,7 +626,7 @@ static int msm_drm_init(struct device *dev, struct drm_driver *drv)
drm_mode_config_reset(ddev);
#ifdef CONFIG_DRM_FBDEV_EMULATION
- if (fbdev)
+ if (kms && fbdev)
priv->fbdev = msm_fbdev_init(ddev);
#endif
@@ -742,7 +734,11 @@ static int msm_irq_postinstall(struct drm_device *dev)
struct msm_drm_private *priv = dev->dev_private;
struct msm_kms *kms = priv->kms;
BUG_ON(!kms);
- return kms->funcs->irq_postinstall(kms);
+
+ if (kms->funcs->irq_postinstall)
+ return kms->funcs->irq_postinstall(kms);
+
+ return 0;
}
static void msm_irq_uninstall(struct drm_device *dev)
@@ -809,7 +805,7 @@ static int msm_ioctl_gem_new(struct drm_device *dev, void *data,
}
return msm_gem_new_handle(dev, file, args->size,
- args->flags, &args->handle);
+ args->flags, &args->handle, NULL);
}
static inline ktime_t to_ktime(struct drm_msm_timespec timeout)
@@ -867,6 +863,10 @@ static int msm_ioctl_gem_info_iova(struct drm_device *dev,
if (!priv->gpu)
return -EINVAL;
+ /*
+ * Don't pin the memory here - just get an address so that userspace can
+ * be productive
+ */
return msm_gem_get_iova(obj, priv->gpu->aspace, iova);
}
@@ -875,23 +875,66 @@ static int msm_ioctl_gem_info(struct drm_device *dev, void *data,
{
struct drm_msm_gem_info *args = data;
struct drm_gem_object *obj;
- int ret = 0;
+ struct msm_gem_object *msm_obj;
+ int i, ret = 0;
+
+ if (args->pad)
+ return -EINVAL;
- if (args->flags & ~MSM_INFO_FLAGS)
+ switch (args->info) {
+ case MSM_INFO_GET_OFFSET:
+ case MSM_INFO_GET_IOVA:
+ /* value returned as immediate, not pointer, so len==0: */
+ if (args->len)
+ return -EINVAL;
+ break;
+ case MSM_INFO_SET_NAME:
+ case MSM_INFO_GET_NAME:
+ break;
+ default:
return -EINVAL;
+ }
obj = drm_gem_object_lookup(file, args->handle);
if (!obj)
return -ENOENT;
- if (args->flags & MSM_INFO_IOVA) {
- uint64_t iova;
+ msm_obj = to_msm_bo(obj);
- ret = msm_ioctl_gem_info_iova(dev, obj, &iova);
- if (!ret)
- args->offset = iova;
- } else {
- args->offset = msm_gem_mmap_offset(obj);
+ switch (args->info) {
+ case MSM_INFO_GET_OFFSET:
+ args->value = msm_gem_mmap_offset(obj);
+ break;
+ case MSM_INFO_GET_IOVA:
+ ret = msm_ioctl_gem_info_iova(dev, obj, &args->value);
+ break;
+ case MSM_INFO_SET_NAME:
+ /* length check should leave room for terminating null: */
+ if (args->len >= sizeof(msm_obj->name)) {
+ ret = -EINVAL;
+ break;
+ }
+ ret = copy_from_user(msm_obj->name,
+ u64_to_user_ptr(args->value), args->len);
+ msm_obj->name[args->len] = '\0';
+ for (i = 0; i < args->len; i++) {
+ if (!isprint(msm_obj->name[i])) {
+ msm_obj->name[i] = '\0';
+ break;
+ }
+ }
+ break;
+ case MSM_INFO_GET_NAME:
+ if (args->value && (args->len < strlen(msm_obj->name))) {
+ ret = -EINVAL;
+ break;
+ }
+ args->len = strlen(msm_obj->name);
+ if (args->value) {
+ ret = copy_to_user(u64_to_user_ptr(args->value),
+ msm_obj->name, args->len);
+ }
+ break;
}
drm_gem_object_put_unlocked(obj);
@@ -1070,18 +1113,15 @@ static int msm_pm_suspend(struct device *dev)
{
struct drm_device *ddev = dev_get_drvdata(dev);
struct msm_drm_private *priv = ddev->dev_private;
- struct msm_kms *kms = priv->kms;
- /* TODO: Use atomic helper suspend/resume */
- if (kms && kms->funcs && kms->funcs->pm_suspend)
- return kms->funcs->pm_suspend(dev);
-
- drm_kms_helper_poll_disable(ddev);
+ if (WARN_ON(priv->pm_state))
+ drm_atomic_state_put(priv->pm_state);
priv->pm_state = drm_atomic_helper_suspend(ddev);
if (IS_ERR(priv->pm_state)) {
- drm_kms_helper_poll_enable(ddev);
- return PTR_ERR(priv->pm_state);
+ int ret = PTR_ERR(priv->pm_state);
+ DRM_ERROR("Failed to suspend dpu, %d\n", ret);
+ return ret;
}
return 0;
@@ -1091,16 +1131,16 @@ static int msm_pm_resume(struct device *dev)
{
struct drm_device *ddev = dev_get_drvdata(dev);
struct msm_drm_private *priv = ddev->dev_private;
- struct msm_kms *kms = priv->kms;
+ int ret;
- /* TODO: Use atomic helper suspend/resume */
- if (kms && kms->funcs && kms->funcs->pm_resume)
- return kms->funcs->pm_resume(dev);
+ if (WARN_ON(!priv->pm_state))
+ return -ENOENT;
- drm_atomic_helper_resume(ddev, priv->pm_state);
- drm_kms_helper_poll_enable(ddev);
+ ret = drm_atomic_helper_resume(ddev, priv->pm_state);
+ if (!ret)
+ priv->pm_state = NULL;
- return 0;
+ return ret;
}
#endif
@@ -1185,7 +1225,7 @@ static int add_components_mdp(struct device *mdp_dev,
ret = of_graph_parse_endpoint(ep_node, &ep);
if (ret) {
- dev_err(mdp_dev, "unable to parse port endpoint\n");
+ DRM_DEV_ERROR(mdp_dev, "unable to parse port endpoint\n");
of_node_put(ep_node);
return ret;
}
@@ -1207,8 +1247,10 @@ static int add_components_mdp(struct device *mdp_dev,
if (!intf)
continue;
- drm_of_component_match_add(master_dev, matchptr, compare_of,
- intf);
+ if (of_device_is_available(intf))
+ drm_of_component_match_add(master_dev, matchptr,
+ compare_of, intf);
+
of_node_put(intf);
}
@@ -1236,13 +1278,13 @@ static int add_display_components(struct device *dev,
of_device_is_compatible(dev->of_node, "qcom,sdm845-mdss")) {
ret = of_platform_populate(dev->of_node, NULL, NULL, dev);
if (ret) {
- dev_err(dev, "failed to populate children devices\n");
+ DRM_DEV_ERROR(dev, "failed to populate children devices\n");
return ret;
}
mdp_dev = device_find_child(dev, NULL, compare_name_mdp);
if (!mdp_dev) {
- dev_err(dev, "failed to find MDSS MDP node\n");
+ DRM_DEV_ERROR(dev, "failed to find MDSS MDP node\n");
of_platform_depopulate(dev);
return -ENODEV;
}
@@ -1272,6 +1314,7 @@ static int add_display_components(struct device *dev,
static const struct of_device_id msm_gpu_match[] = {
{ .compatible = "qcom,adreno" },
{ .compatible = "qcom,adreno-3xx" },
+ { .compatible = "amd,imageon" },
{ .compatible = "qcom,kgsl-3d0" },
{ },
};
@@ -1316,9 +1359,11 @@ static int msm_pdev_probe(struct platform_device *pdev)
struct component_match *match = NULL;
int ret;
- ret = add_display_components(&pdev->dev, &match);
- if (ret)
- return ret;
+ if (get_mdp_ver(pdev)) {
+ ret = add_display_components(&pdev->dev, &match);
+ if (ret)
+ return ret;
+ }
ret = add_gpu_components(&pdev->dev, &match);
if (ret)
diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h
index 9d11f321f5a9..9cd6a96c6bf2 100644
--- a/drivers/gpu/drm/msm/msm_drv.h
+++ b/drivers/gpu/drm/msm/msm_drv.h
@@ -179,6 +179,8 @@ struct msm_drm_private {
/* when we have more than one 'msm_gpu' these need to be an array: */
struct msm_gpu *gpu;
struct msm_file_private *lastctx;
+ /* gpu is only set on open(), but we need this info earlier */
+ bool is_a2xx;
struct drm_fb_helper *fbdev;
@@ -241,10 +243,16 @@ struct drm_atomic_state *msm_atomic_state_alloc(struct drm_device *dev);
void msm_atomic_state_clear(struct drm_atomic_state *state);
void msm_atomic_state_free(struct drm_atomic_state *state);
+int msm_gem_init_vma(struct msm_gem_address_space *aspace,
+ struct msm_gem_vma *vma, int npages);
+void msm_gem_purge_vma(struct msm_gem_address_space *aspace,
+ struct msm_gem_vma *vma);
void msm_gem_unmap_vma(struct msm_gem_address_space *aspace,
- struct msm_gem_vma *vma, struct sg_table *sgt);
+ struct msm_gem_vma *vma);
int msm_gem_map_vma(struct msm_gem_address_space *aspace,
struct msm_gem_vma *vma, struct sg_table *sgt, int npages);
+void msm_gem_close_vma(struct msm_gem_address_space *aspace,
+ struct msm_gem_vma *vma);
void msm_gem_address_space_put(struct msm_gem_address_space *aspace);
@@ -252,9 +260,15 @@ struct msm_gem_address_space *
msm_gem_address_space_create(struct device *dev, struct iommu_domain *domain,
const char *name);
+struct msm_gem_address_space *
+msm_gem_address_space_create_a2xx(struct device *dev, struct msm_gpu *gpu,
+ const char *name, uint64_t va_start, uint64_t va_end);
+
int msm_register_mmu(struct drm_device *dev, struct msm_mmu *mmu);
void msm_unregister_mmu(struct drm_device *dev, struct msm_mmu *mmu);
+bool msm_use_mmu(struct drm_device *dev);
+
void msm_gem_submit_free(struct msm_gem_submit *submit);
int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
struct drm_file *file);
@@ -269,12 +283,14 @@ vm_fault_t msm_gem_fault(struct vm_fault *vmf);
uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj);
int msm_gem_get_iova(struct drm_gem_object *obj,
struct msm_gem_address_space *aspace, uint64_t *iova);
+int msm_gem_get_and_pin_iova(struct drm_gem_object *obj,
+ struct msm_gem_address_space *aspace, uint64_t *iova);
uint64_t msm_gem_iova(struct drm_gem_object *obj,
struct msm_gem_address_space *aspace);
+void msm_gem_unpin_iova(struct drm_gem_object *obj,
+ struct msm_gem_address_space *aspace);
struct page **msm_gem_get_pages(struct drm_gem_object *obj);
void msm_gem_put_pages(struct drm_gem_object *obj);
-void msm_gem_put_iova(struct drm_gem_object *obj,
- struct msm_gem_address_space *aspace);
int msm_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
struct drm_mode_create_dumb *args);
int msm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
@@ -301,7 +317,7 @@ int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, ktime_t *timeout);
int msm_gem_cpu_fini(struct drm_gem_object *obj);
void msm_gem_free_object(struct drm_gem_object *obj);
int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file,
- uint32_t size, uint32_t flags, uint32_t *handle);
+ uint32_t size, uint32_t flags, uint32_t *handle, char *name);
struct drm_gem_object *msm_gem_new(struct drm_device *dev,
uint32_t size, uint32_t flags);
struct drm_gem_object *msm_gem_new_locked(struct drm_device *dev,
@@ -312,9 +328,13 @@ void *msm_gem_kernel_new(struct drm_device *dev, uint32_t size,
void *msm_gem_kernel_new_locked(struct drm_device *dev, uint32_t size,
uint32_t flags, struct msm_gem_address_space *aspace,
struct drm_gem_object **bo, uint64_t *iova);
+void msm_gem_kernel_put(struct drm_gem_object *bo,
+ struct msm_gem_address_space *aspace, bool locked);
struct drm_gem_object *msm_gem_import(struct drm_device *dev,
struct dma_buf *dmabuf, struct sg_table *sgt);
+void msm_gem_object_set_name(struct drm_gem_object *bo, const char *fmt, ...);
+
int msm_framebuffer_prepare(struct drm_framebuffer *fb,
struct msm_gem_address_space *aspace);
void msm_framebuffer_cleanup(struct drm_framebuffer *fb,
diff --git a/drivers/gpu/drm/msm/msm_fb.c b/drivers/gpu/drm/msm/msm_fb.c
index 2a7348aeb38d..67dfd8d3dc12 100644
--- a/drivers/gpu/drm/msm/msm_fb.c
+++ b/drivers/gpu/drm/msm/msm_fb.c
@@ -66,7 +66,7 @@ int msm_framebuffer_prepare(struct drm_framebuffer *fb,
uint64_t iova;
for (i = 0; i < n; i++) {
- ret = msm_gem_get_iova(fb->obj[i], aspace, &iova);
+ ret = msm_gem_get_and_pin_iova(fb->obj[i], aspace, &iova);
DBG("FB[%u]: iova[%d]: %08llx (%d)", fb->base.id, i, iova, ret);
if (ret)
return ret;
@@ -81,7 +81,7 @@ void msm_framebuffer_cleanup(struct drm_framebuffer *fb,
int i, n = fb->format->num_planes;
for (i = 0; i < n; i++)
- msm_gem_put_iova(fb->obj[i], aspace);
+ msm_gem_unpin_iova(fb->obj[i], aspace);
}
uint32_t msm_framebuffer_iova(struct drm_framebuffer *fb,
@@ -154,7 +154,7 @@ static struct drm_framebuffer *msm_framebuffer_init(struct drm_device *dev,
format = kms->funcs->get_format(kms, mode_cmd->pixel_format,
mode_cmd->modifier[0]);
if (!format) {
- dev_err(dev->dev, "unsupported pixel format: %4.4s\n",
+ DRM_DEV_ERROR(dev->dev, "unsupported pixel format: %4.4s\n",
(char *)&mode_cmd->pixel_format);
ret = -EINVAL;
goto fail;
@@ -196,7 +196,7 @@ static struct drm_framebuffer *msm_framebuffer_init(struct drm_device *dev,
ret = drm_framebuffer_init(dev, fb, &msm_framebuffer_funcs);
if (ret) {
- dev_err(dev->dev, "framebuffer init failed: %d\n", ret);
+ DRM_DEV_ERROR(dev->dev, "framebuffer init failed: %d\n", ret);
goto fail;
}
@@ -233,13 +233,15 @@ msm_alloc_stolen_fb(struct drm_device *dev, int w, int h, int p, uint32_t format
bo = msm_gem_new(dev, size, MSM_BO_SCANOUT | MSM_BO_WC);
}
if (IS_ERR(bo)) {
- dev_err(dev->dev, "failed to allocate buffer object\n");
+ DRM_DEV_ERROR(dev->dev, "failed to allocate buffer object\n");
return ERR_CAST(bo);
}
+ msm_gem_object_set_name(bo, "stolenfb");
+
fb = msm_framebuffer_init(dev, &mode_cmd, &bo);
if (IS_ERR(fb)) {
- dev_err(dev->dev, "failed to allocate fb\n");
+ DRM_DEV_ERROR(dev->dev, "failed to allocate fb\n");
/* note: if fb creation failed, we can't rely on fb destroy
* to unref the bo:
*/
diff --git a/drivers/gpu/drm/msm/msm_fbdev.c b/drivers/gpu/drm/msm/msm_fbdev.c
index 456622b46335..c03e860ba737 100644
--- a/drivers/gpu/drm/msm/msm_fbdev.c
+++ b/drivers/gpu/drm/msm/msm_fbdev.c
@@ -91,7 +91,7 @@ static int msm_fbdev_create(struct drm_fb_helper *helper,
sizes->surface_height, pitch, format);
if (IS_ERR(fb)) {
- dev_err(dev->dev, "failed to allocate fb\n");
+ DRM_DEV_ERROR(dev->dev, "failed to allocate fb\n");
return PTR_ERR(fb);
}
@@ -104,15 +104,15 @@ static int msm_fbdev_create(struct drm_fb_helper *helper,
* in panic (ie. lock-safe, etc) we could avoid pinning the
* buffer now:
*/
- ret = msm_gem_get_iova(bo, priv->kms->aspace, &paddr);
+ ret = msm_gem_get_and_pin_iova(bo, priv->kms->aspace, &paddr);
if (ret) {
- dev_err(dev->dev, "failed to get buffer obj iova: %d\n", ret);
+ DRM_DEV_ERROR(dev->dev, "failed to get buffer obj iova: %d\n", ret);
goto fail_unlock;
}
fbi = drm_fb_helper_alloc_fbi(helper);
if (IS_ERR(fbi)) {
- dev_err(dev->dev, "failed to allocate fb info\n");
+ DRM_DEV_ERROR(dev->dev, "failed to allocate fb info\n");
ret = PTR_ERR(fbi);
goto fail_unlock;
}
@@ -176,7 +176,7 @@ struct drm_fb_helper *msm_fbdev_init(struct drm_device *dev)
ret = drm_fb_helper_init(dev, helper, priv->num_connectors);
if (ret) {
- dev_err(dev->dev, "could not init fbdev: ret=%d\n", ret);
+ DRM_DEV_ERROR(dev->dev, "could not init fbdev: ret=%d\n", ret);
goto fail;
}
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
index f59ca27a4a35..51a95da694d8 100644
--- a/drivers/gpu/drm/msm/msm_gem.c
+++ b/drivers/gpu/drm/msm/msm_gem.c
@@ -88,7 +88,7 @@ static struct page **get_pages(struct drm_gem_object *obj)
p = get_pages_vram(obj, npages);
if (IS_ERR(p)) {
- dev_err(dev->dev, "could not get pages: %ld\n",
+ DRM_DEV_ERROR(dev->dev, "could not get pages: %ld\n",
PTR_ERR(p));
return p;
}
@@ -99,7 +99,7 @@ static struct page **get_pages(struct drm_gem_object *obj)
if (IS_ERR(msm_obj->sgt)) {
void *ptr = ERR_CAST(msm_obj->sgt);
- dev_err(dev->dev, "failed to allocate sgt\n");
+ DRM_DEV_ERROR(dev->dev, "failed to allocate sgt\n");
msm_obj->sgt = NULL;
return ptr;
}
@@ -280,7 +280,7 @@ static uint64_t mmap_offset(struct drm_gem_object *obj)
ret = drm_gem_create_mmap_offset(obj);
if (ret) {
- dev_err(dev->dev, "could not allocate mmap offset\n");
+ DRM_DEV_ERROR(dev->dev, "could not allocate mmap offset\n");
return 0;
}
@@ -352,63 +352,104 @@ put_iova(struct drm_gem_object *obj)
WARN_ON(!mutex_is_locked(&msm_obj->lock));
list_for_each_entry_safe(vma, tmp, &msm_obj->vmas, list) {
- msm_gem_unmap_vma(vma->aspace, vma, msm_obj->sgt);
+ msm_gem_purge_vma(vma->aspace, vma);
+ msm_gem_close_vma(vma->aspace, vma);
del_vma(vma);
}
}
-/* get iova, taking a reference. Should have a matching put */
-int msm_gem_get_iova(struct drm_gem_object *obj,
+static int msm_gem_get_iova_locked(struct drm_gem_object *obj,
struct msm_gem_address_space *aspace, uint64_t *iova)
{
struct msm_gem_object *msm_obj = to_msm_bo(obj);
struct msm_gem_vma *vma;
int ret = 0;
- mutex_lock(&msm_obj->lock);
-
- if (WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) {
- mutex_unlock(&msm_obj->lock);
- return -EBUSY;
- }
+ WARN_ON(!mutex_is_locked(&msm_obj->lock));
vma = lookup_vma(obj, aspace);
if (!vma) {
- struct page **pages;
-
vma = add_vma(obj, aspace);
- if (IS_ERR(vma)) {
- ret = PTR_ERR(vma);
- goto unlock;
- }
+ if (IS_ERR(vma))
+ return PTR_ERR(vma);
- pages = get_pages(obj);
- if (IS_ERR(pages)) {
- ret = PTR_ERR(pages);
- goto fail;
+ ret = msm_gem_init_vma(aspace, vma, obj->size >> PAGE_SHIFT);
+ if (ret) {
+ del_vma(vma);
+ return ret;
}
-
- ret = msm_gem_map_vma(aspace, vma, msm_obj->sgt,
- obj->size >> PAGE_SHIFT);
- if (ret)
- goto fail;
}
*iova = vma->iova;
+ return 0;
+}
+
+static int msm_gem_pin_iova(struct drm_gem_object *obj,
+ struct msm_gem_address_space *aspace)
+{
+ struct msm_gem_object *msm_obj = to_msm_bo(obj);
+ struct msm_gem_vma *vma;
+ struct page **pages;
+
+ WARN_ON(!mutex_is_locked(&msm_obj->lock));
+
+ if (WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED))
+ return -EBUSY;
+
+ vma = lookup_vma(obj, aspace);
+ if (WARN_ON(!vma))
+ return -EINVAL;
+
+ pages = get_pages(obj);
+ if (IS_ERR(pages))
+ return PTR_ERR(pages);
+
+ return msm_gem_map_vma(aspace, vma, msm_obj->sgt,
+ obj->size >> PAGE_SHIFT);
+}
+
+/* get iova and pin it. Should have a matching put */
+int msm_gem_get_and_pin_iova(struct drm_gem_object *obj,
+ struct msm_gem_address_space *aspace, uint64_t *iova)
+{
+ struct msm_gem_object *msm_obj = to_msm_bo(obj);
+ u64 local;
+ int ret;
+
+ mutex_lock(&msm_obj->lock);
+
+ ret = msm_gem_get_iova_locked(obj, aspace, &local);
+
+ if (!ret)
+ ret = msm_gem_pin_iova(obj, aspace);
+
+ if (!ret)
+ *iova = local;
mutex_unlock(&msm_obj->lock);
- return 0;
+ return ret;
+}
-fail:
- del_vma(vma);
-unlock:
+/*
+ * Get an iova but don't pin it. Doesn't need a put because iovas are currently
+ * valid for the life of the object
+ */
+int msm_gem_get_iova(struct drm_gem_object *obj,
+ struct msm_gem_address_space *aspace, uint64_t *iova)
+{
+ struct msm_gem_object *msm_obj = to_msm_bo(obj);
+ int ret;
+
+ mutex_lock(&msm_obj->lock);
+ ret = msm_gem_get_iova_locked(obj, aspace, iova);
mutex_unlock(&msm_obj->lock);
+
return ret;
}
/* get iova without taking a reference, used in places where you have
- * already done a 'msm_gem_get_iova()'.
+ * already done a 'msm_gem_get_and_pin_iova' or 'msm_gem_get_iova'
*/
uint64_t msm_gem_iova(struct drm_gem_object *obj,
struct msm_gem_address_space *aspace)
@@ -424,15 +465,24 @@ uint64_t msm_gem_iova(struct drm_gem_object *obj,
return vma ? vma->iova : 0;
}
-void msm_gem_put_iova(struct drm_gem_object *obj,
+/*
+ * Unpin a iova by updating the reference counts. The memory isn't actually
+ * purged until something else (shrinker, mm_notifier, destroy, etc) decides
+ * to get rid of it
+ */
+void msm_gem_unpin_iova(struct drm_gem_object *obj,
struct msm_gem_address_space *aspace)
{
- // XXX TODO ..
- // NOTE: probably don't need a _locked() version.. we wouldn't
- // normally unmap here, but instead just mark that it could be
- // unmapped (if the iova refcnt drops to zero), but then later
- // if another _get_iova_locked() fails we can start unmapping
- // things that are no longer needed..
+ struct msm_gem_object *msm_obj = to_msm_bo(obj);
+ struct msm_gem_vma *vma;
+
+ mutex_lock(&msm_obj->lock);
+ vma = lookup_vma(obj, aspace);
+
+ if (!WARN_ON(!vma))
+ msm_gem_unmap_vma(aspace, vma);
+
+ mutex_unlock(&msm_obj->lock);
}
int msm_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
@@ -441,7 +491,7 @@ int msm_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
args->pitch = align_pitch(args->width, args->bpp);
args->size = PAGE_ALIGN(args->pitch * args->height);
return msm_gem_new_handle(dev, file, args->size,
- MSM_BO_SCANOUT | MSM_BO_WC, &args->handle);
+ MSM_BO_SCANOUT | MSM_BO_WC, &args->handle, "dumb");
}
int msm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
@@ -473,7 +523,7 @@ static void *get_vaddr(struct drm_gem_object *obj, unsigned madv)
mutex_lock(&msm_obj->lock);
if (WARN_ON(msm_obj->madv > madv)) {
- dev_err(obj->dev->dev, "Invalid madv state: %u vs %u\n",
+ DRM_DEV_ERROR(obj->dev->dev, "Invalid madv state: %u vs %u\n",
msm_obj->madv, madv);
mutex_unlock(&msm_obj->lock);
return ERR_PTR(-EBUSY);
@@ -739,16 +789,24 @@ void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
break;
}
- seq_printf(m, "%08x: %c %2d (%2d) %08llx %p\t",
+ seq_printf(m, "%08x: %c %2d (%2d) %08llx %p",
msm_obj->flags, is_active(msm_obj) ? 'A' : 'I',
obj->name, kref_read(&obj->refcount),
off, msm_obj->vaddr);
- /* FIXME: we need to print the address space here too */
- list_for_each_entry(vma, &msm_obj->vmas, list)
- seq_printf(m, " %08llx", vma->iova);
+ seq_printf(m, " %08zu %9s %-32s\n", obj->size, madv, msm_obj->name);
+
+ if (!list_empty(&msm_obj->vmas)) {
+
+ seq_puts(m, " vmas:");
- seq_printf(m, " %zu%s\n", obj->size, madv);
+ list_for_each_entry(vma, &msm_obj->vmas, list)
+ seq_printf(m, " [%s: %08llx,%s,inuse=%d]", vma->aspace->name,
+ vma->iova, vma->mapped ? "mapped" : "unmapped",
+ vma->inuse);
+
+ seq_puts(m, "\n");
+ }
rcu_read_lock();
fobj = rcu_dereference(robj->fence);
@@ -775,9 +833,10 @@ void msm_gem_describe_objects(struct list_head *list, struct seq_file *m)
int count = 0;
size_t size = 0;
+ seq_puts(m, " flags id ref offset kaddr size madv name\n");
list_for_each_entry(msm_obj, list, mm_list) {
struct drm_gem_object *obj = &msm_obj->base;
- seq_printf(m, " ");
+ seq_puts(m, " ");
msm_gem_describe(obj, m);
count++;
size += obj->size;
@@ -831,7 +890,8 @@ void msm_gem_free_object(struct drm_gem_object *obj)
/* convenience method to construct a GEM buffer object, and userspace handle */
int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file,
- uint32_t size, uint32_t flags, uint32_t *handle)
+ uint32_t size, uint32_t flags, uint32_t *handle,
+ char *name)
{
struct drm_gem_object *obj;
int ret;
@@ -841,6 +901,9 @@ int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file,
if (IS_ERR(obj))
return PTR_ERR(obj);
+ if (name)
+ msm_gem_object_set_name(obj, "%s", name);
+
ret = drm_gem_handle_create(file, obj, handle);
/* drop reference from allocate - handle holds it now */
@@ -864,7 +927,7 @@ static int msm_gem_new_impl(struct drm_device *dev,
case MSM_BO_WC:
break;
default:
- dev_err(dev->dev, "invalid cache flag: %x\n",
+ DRM_DEV_ERROR(dev->dev, "invalid cache flag: %x\n",
(flags & MSM_BO_CACHE_MASK));
return -EINVAL;
}
@@ -912,9 +975,9 @@ static struct drm_gem_object *_msm_gem_new(struct drm_device *dev,
size = PAGE_ALIGN(size);
- if (!iommu_present(&platform_bus_type))
+ if (!msm_use_mmu(dev))
use_vram = true;
- else if ((flags & MSM_BO_STOLEN) && priv->vram.size)
+ else if ((flags & (MSM_BO_STOLEN | MSM_BO_SCANOUT)) && priv->vram.size)
use_vram = true;
if (WARN_ON(use_vram && !priv->vram.size))
@@ -989,8 +1052,8 @@ struct drm_gem_object *msm_gem_import(struct drm_device *dev,
int ret, npages;
/* if we don't have IOMMU, don't bother pretending we can import: */
- if (!iommu_present(&platform_bus_type)) {
- dev_err(dev->dev, "cannot import without IOMMU\n");
+ if (!msm_use_mmu(dev)) {
+ DRM_DEV_ERROR(dev->dev, "cannot import without IOMMU\n");
return ERR_PTR(-EINVAL);
}
@@ -1040,24 +1103,30 @@ static void *_msm_gem_kernel_new(struct drm_device *dev, uint32_t size,
return ERR_CAST(obj);
if (iova) {
- ret = msm_gem_get_iova(obj, aspace, iova);
- if (ret) {
- drm_gem_object_put(obj);
- return ERR_PTR(ret);
- }
+ ret = msm_gem_get_and_pin_iova(obj, aspace, iova);
+ if (ret)
+ goto err;
}
vaddr = msm_gem_get_vaddr(obj);
if (IS_ERR(vaddr)) {
- msm_gem_put_iova(obj, aspace);
- drm_gem_object_put(obj);
- return ERR_CAST(vaddr);
+ msm_gem_unpin_iova(obj, aspace);
+ ret = PTR_ERR(vaddr);
+ goto err;
}
if (bo)
*bo = obj;
return vaddr;
+err:
+ if (locked)
+ drm_gem_object_put(obj);
+ else
+ drm_gem_object_put_unlocked(obj);
+
+ return ERR_PTR(ret);
+
}
void *msm_gem_kernel_new(struct drm_device *dev, uint32_t size,
@@ -1073,3 +1142,31 @@ void *msm_gem_kernel_new_locked(struct drm_device *dev, uint32_t size,
{
return _msm_gem_kernel_new(dev, size, flags, aspace, bo, iova, true);
}
+
+void msm_gem_kernel_put(struct drm_gem_object *bo,
+ struct msm_gem_address_space *aspace, bool locked)
+{
+ if (IS_ERR_OR_NULL(bo))
+ return;
+
+ msm_gem_put_vaddr(bo);
+ msm_gem_unpin_iova(bo, aspace);
+
+ if (locked)
+ drm_gem_object_put(bo);
+ else
+ drm_gem_object_put_unlocked(bo);
+}
+
+void msm_gem_object_set_name(struct drm_gem_object *bo, const char *fmt, ...)
+{
+ struct msm_gem_object *msm_obj = to_msm_bo(bo);
+ va_list ap;
+
+ if (!fmt)
+ return;
+
+ va_start(ap, fmt);
+ vsnprintf(msm_obj->name, sizeof(msm_obj->name), fmt, ap);
+ va_end(ap);
+}
diff --git a/drivers/gpu/drm/msm/msm_gem.h b/drivers/gpu/drm/msm/msm_gem.h
index c5d9bd3e47a8..2064fac871b8 100644
--- a/drivers/gpu/drm/msm/msm_gem.h
+++ b/drivers/gpu/drm/msm/msm_gem.h
@@ -41,6 +41,8 @@ struct msm_gem_vma {
uint64_t iova;
struct msm_gem_address_space *aspace;
struct list_head list; /* node in msm_gem_object::vmas */
+ bool mapped;
+ int inuse;
};
struct msm_gem_object {
@@ -91,6 +93,8 @@ struct msm_gem_object {
*/
struct drm_mm_node *vram_node;
struct mutex lock; /* Protects resources associated with bo */
+
+ char name[32]; /* Identifier to print for the debugfs files */
};
#define to_msm_bo(x) container_of(x, struct msm_gem_object, base)
@@ -150,6 +154,7 @@ struct msm_gem_submit {
struct msm_ringbuffer *ring;
unsigned int nr_cmds;
unsigned int nr_bos;
+ u32 ident; /* A "identifier" for the submit for logging */
struct {
uint32_t type;
uint32_t size; /* in dwords */
diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c b/drivers/gpu/drm/msm/msm_gem_submit.c
index a90aedd6883a..12b983fc0b56 100644
--- a/drivers/gpu/drm/msm/msm_gem_submit.c
+++ b/drivers/gpu/drm/msm/msm_gem_submit.c
@@ -20,6 +20,7 @@
#include "msm_drv.h"
#include "msm_gpu.h"
#include "msm_gem.h"
+#include "msm_gpu_trace.h"
/*
* Cmdstream submission:
@@ -48,7 +49,6 @@ static struct msm_gem_submit *submit_create(struct drm_device *dev,
submit->dev = dev;
submit->gpu = gpu;
submit->fence = NULL;
- submit->pid = get_pid(task_pid(current));
submit->cmd = (void *)&submit->bos[nr_bos];
submit->queue = queue;
submit->ring = gpu->rb[queue->prio];
@@ -77,7 +77,7 @@ void msm_gem_submit_free(struct msm_gem_submit *submit)
static inline unsigned long __must_check
copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
{
- if (access_ok(VERIFY_READ, from, n))
+ if (access_ok(from, n))
return __copy_from_user_inatomic(to, from, n);
return -EFAULT;
}
@@ -114,8 +114,11 @@ static int submit_lookup_objects(struct msm_gem_submit *submit,
pagefault_disable();
}
+/* at least one of READ and/or WRITE flags should be set: */
+#define MANDATORY_FLAGS (MSM_SUBMIT_BO_READ | MSM_SUBMIT_BO_WRITE)
+
if ((submit_bo.flags & ~MSM_SUBMIT_BO_FLAGS) ||
- !(submit_bo.flags & MSM_SUBMIT_BO_FLAGS)) {
+ !(submit_bo.flags & MANDATORY_FLAGS)) {
DRM_ERROR("invalid flags: %x\n", submit_bo.flags);
ret = -EINVAL;
goto out_unlock;
@@ -167,7 +170,7 @@ static void submit_unlock_unpin_bo(struct msm_gem_submit *submit,
struct msm_gem_object *msm_obj = submit->bos[i].obj;
if (submit->bos[i].flags & BO_PINNED)
- msm_gem_put_iova(&msm_obj->base, submit->gpu->aspace);
+ msm_gem_unpin_iova(&msm_obj->base, submit->gpu->aspace);
if (submit->bos[i].flags & BO_LOCKED)
ww_mutex_unlock(&msm_obj->resv->lock);
@@ -270,7 +273,7 @@ static int submit_pin_objects(struct msm_gem_submit *submit)
uint64_t iova;
/* if locking succeeded, pin bo: */
- ret = msm_gem_get_iova(&msm_obj->base,
+ ret = msm_gem_get_and_pin_iova(&msm_obj->base,
submit->gpu->aspace, &iova);
if (ret)
@@ -318,6 +321,9 @@ static int submit_reloc(struct msm_gem_submit *submit, struct msm_gem_object *ob
uint32_t *ptr;
int ret = 0;
+ if (!nr_relocs)
+ return 0;
+
if (offset % 4) {
DRM_ERROR("non-aligned cmdstream buffer: %u\n", offset);
return -EINVAL;
@@ -406,19 +412,19 @@ static void submit_cleanup(struct msm_gem_submit *submit)
int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
struct drm_file *file)
{
+ static atomic_t ident = ATOMIC_INIT(0);
struct msm_drm_private *priv = dev->dev_private;
struct drm_msm_gem_submit *args = data;
struct msm_file_private *ctx = file->driver_priv;
struct msm_gem_submit *submit;
struct msm_gpu *gpu = priv->gpu;
- struct dma_fence *in_fence = NULL;
struct sync_file *sync_file = NULL;
struct msm_gpu_submitqueue *queue;
struct msm_ringbuffer *ring;
int out_fence_fd = -1;
+ struct pid *pid = get_pid(task_pid(current));
unsigned i;
- int ret;
-
+ int ret, submitid;
if (!gpu)
return -ENXIO;
@@ -441,9 +447,16 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
if (!queue)
return -ENOENT;
+ /* Get a unique identifier for the submission for logging purposes */
+ submitid = atomic_inc_return(&ident) - 1;
+
ring = gpu->rb[queue->prio];
+ trace_msm_gpu_submit(pid_nr(pid), ring->id, submitid,
+ args->nr_bos, args->nr_cmds);
if (args->flags & MSM_SUBMIT_FENCE_FD_IN) {
+ struct dma_fence *in_fence;
+
in_fence = sync_file_get_fence(args->fence_fd);
if (!in_fence)
@@ -453,11 +466,13 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
* Wait if the fence is from a foreign context, or if the fence
* array contains any fence from a foreign context.
*/
- if (!dma_fence_match_context(in_fence, ring->fctx->context)) {
+ ret = 0;
+ if (!dma_fence_match_context(in_fence, ring->fctx->context))
ret = dma_fence_wait(in_fence, true);
- if (ret)
- return ret;
- }
+
+ dma_fence_put(in_fence);
+ if (ret)
+ return ret;
}
ret = mutex_lock_interruptible(&dev->struct_mutex);
@@ -478,6 +493,9 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
goto out_unlock;
}
+ submit->pid = pid;
+ submit->ident = submitid;
+
if (args->flags & MSM_SUBMIT_SUDO)
submit->in_rb = true;
@@ -583,8 +601,6 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
}
out:
- if (in_fence)
- dma_fence_put(in_fence);
submit_cleanup(submit);
if (ret)
msm_gem_submit_free(submit);
diff --git a/drivers/gpu/drm/msm/msm_gem_vma.c b/drivers/gpu/drm/msm/msm_gem_vma.c
index ffbec224551b..557360788084 100644
--- a/drivers/gpu/drm/msm/msm_gem_vma.c
+++ b/drivers/gpu/drm/msm/msm_gem_vma.c
@@ -38,20 +38,72 @@ void msm_gem_address_space_put(struct msm_gem_address_space *aspace)
kref_put(&aspace->kref, msm_gem_address_space_destroy);
}
-void
-msm_gem_unmap_vma(struct msm_gem_address_space *aspace,
- struct msm_gem_vma *vma, struct sg_table *sgt)
+/* Actually unmap memory for the vma */
+void msm_gem_purge_vma(struct msm_gem_address_space *aspace,
+ struct msm_gem_vma *vma)
{
- if (!aspace || !vma->iova)
+ unsigned size = vma->node.size << PAGE_SHIFT;
+
+ /* Print a message if we try to purge a vma in use */
+ if (WARN_ON(vma->inuse > 0))
+ return;
+
+ /* Don't do anything if the memory isn't mapped */
+ if (!vma->mapped)
return;
- if (aspace->mmu) {
- unsigned size = vma->node.size << PAGE_SHIFT;
- aspace->mmu->funcs->unmap(aspace->mmu, vma->iova, sgt, size);
- }
+ if (aspace->mmu)
+ aspace->mmu->funcs->unmap(aspace->mmu, vma->iova, size);
+
+ vma->mapped = false;
+}
+
+/* Remove reference counts for the mapping */
+void msm_gem_unmap_vma(struct msm_gem_address_space *aspace,
+ struct msm_gem_vma *vma)
+{
+ if (!WARN_ON(!vma->iova))
+ vma->inuse--;
+}
+
+int
+msm_gem_map_vma(struct msm_gem_address_space *aspace,
+ struct msm_gem_vma *vma, struct sg_table *sgt, int npages)
+{
+ unsigned size = npages << PAGE_SHIFT;
+ int ret = 0;
+
+ if (WARN_ON(!vma->iova))
+ return -EINVAL;
+
+ /* Increase the usage counter */
+ vma->inuse++;
+
+ if (vma->mapped)
+ return 0;
+
+ vma->mapped = true;
+
+ if (aspace->mmu)
+ ret = aspace->mmu->funcs->map(aspace->mmu, vma->iova, sgt,
+ size, IOMMU_READ | IOMMU_WRITE);
+
+ if (ret)
+ vma->mapped = false;
+
+ return ret;
+}
+
+/* Close an iova. Warn if it is still in use */
+void msm_gem_close_vma(struct msm_gem_address_space *aspace,
+ struct msm_gem_vma *vma)
+{
+ if (WARN_ON(vma->inuse > 0 || vma->mapped))
+ return;
spin_lock(&aspace->lock);
- drm_mm_remove_node(&vma->node);
+ if (vma->iova)
+ drm_mm_remove_node(&vma->node);
spin_unlock(&aspace->lock);
vma->iova = 0;
@@ -59,18 +111,16 @@ msm_gem_unmap_vma(struct msm_gem_address_space *aspace,
msm_gem_address_space_put(aspace);
}
-int
-msm_gem_map_vma(struct msm_gem_address_space *aspace,
- struct msm_gem_vma *vma, struct sg_table *sgt, int npages)
+/* Initialize a new vma and allocate an iova for it */
+int msm_gem_init_vma(struct msm_gem_address_space *aspace,
+ struct msm_gem_vma *vma, int npages)
{
int ret;
- spin_lock(&aspace->lock);
- if (WARN_ON(drm_mm_node_allocated(&vma->node))) {
- spin_unlock(&aspace->lock);
- return 0;
- }
+ if (WARN_ON(vma->iova))
+ return -EBUSY;
+ spin_lock(&aspace->lock);
ret = drm_mm_insert_node(&aspace->mm, &vma->node, npages);
spin_unlock(&aspace->lock);
@@ -78,19 +128,14 @@ msm_gem_map_vma(struct msm_gem_address_space *aspace,
return ret;
vma->iova = vma->node.start << PAGE_SHIFT;
+ vma->mapped = false;
- if (aspace->mmu) {
- unsigned size = npages << PAGE_SHIFT;
- ret = aspace->mmu->funcs->map(aspace->mmu, vma->iova, sgt,
- size, IOMMU_READ | IOMMU_WRITE);
- }
-
- /* Get a reference to the aspace to keep it around */
kref_get(&aspace->kref);
- return ret;
+ return 0;
}
+
struct msm_gem_address_space *
msm_gem_address_space_create(struct device *dev, struct iommu_domain *domain,
const char *name)
@@ -114,3 +159,26 @@ msm_gem_address_space_create(struct device *dev, struct iommu_domain *domain,
return aspace;
}
+
+struct msm_gem_address_space *
+msm_gem_address_space_create_a2xx(struct device *dev, struct msm_gpu *gpu,
+ const char *name, uint64_t va_start, uint64_t va_end)
+{
+ struct msm_gem_address_space *aspace;
+ u64 size = va_end - va_start;
+
+ aspace = kzalloc(sizeof(*aspace), GFP_KERNEL);
+ if (!aspace)
+ return ERR_PTR(-ENOMEM);
+
+ spin_lock_init(&aspace->lock);
+ aspace->name = name;
+ aspace->mmu = msm_gpummu_new(dev, gpu);
+
+ drm_mm_init(&aspace->mm, (va_start >> PAGE_SHIFT),
+ size >> PAGE_SHIFT);
+
+ kref_init(&aspace->kref);
+
+ return aspace;
+}
diff --git a/drivers/gpu/drm/msm/msm_gpu.c b/drivers/gpu/drm/msm/msm_gpu.c
index 11aac8337066..5f3eff304355 100644
--- a/drivers/gpu/drm/msm/msm_gpu.c
+++ b/drivers/gpu/drm/msm/msm_gpu.c
@@ -19,6 +19,8 @@
#include "msm_gem.h"
#include "msm_mmu.h"
#include "msm_fence.h"
+#include "msm_gpu_trace.h"
+#include "adreno/adreno_gpu.h"
#include <generated/utsrelease.h>
#include <linux/string_helpers.h>
@@ -107,7 +109,7 @@ static void msm_devfreq_init(struct msm_gpu *gpu)
&msm_devfreq_profile, "simple_ondemand", NULL);
if (IS_ERR(gpu->devfreq.devfreq)) {
- dev_err(&gpu->pdev->dev, "Couldn't initialize GPU devfreq\n");
+ DRM_DEV_ERROR(&gpu->pdev->dev, "Couldn't initialize GPU devfreq\n");
gpu->devfreq.devfreq = NULL;
}
@@ -122,7 +124,7 @@ static int enable_pwrrail(struct msm_gpu *gpu)
if (gpu->gpu_reg) {
ret = regulator_enable(gpu->gpu_reg);
if (ret) {
- dev_err(dev->dev, "failed to enable 'gpu_reg': %d\n", ret);
+ DRM_DEV_ERROR(dev->dev, "failed to enable 'gpu_reg': %d\n", ret);
return ret;
}
}
@@ -130,7 +132,7 @@ static int enable_pwrrail(struct msm_gpu *gpu)
if (gpu->gpu_cx) {
ret = regulator_enable(gpu->gpu_cx);
if (ret) {
- dev_err(dev->dev, "failed to enable 'gpu_cx': %d\n", ret);
+ DRM_DEV_ERROR(dev->dev, "failed to enable 'gpu_cx': %d\n", ret);
return ret;
}
}
@@ -315,28 +317,28 @@ static void msm_gpu_crashstate_get_bo(struct msm_gpu_state *state,
struct msm_gpu_state_bo *state_bo = &state->bos[state->nr_bos];
/* Don't record write only objects */
-
state_bo->size = obj->base.size;
state_bo->iova = iova;
- /* Only store the data for buffer objects marked for read */
- if ((flags & MSM_SUBMIT_BO_READ)) {
+ /* Only store data for non imported buffer objects marked for read */
+ if ((flags & MSM_SUBMIT_BO_READ) && !obj->base.import_attach) {
void *ptr;
state_bo->data = kvmalloc(obj->base.size, GFP_KERNEL);
if (!state_bo->data)
- return;
+ goto out;
ptr = msm_gem_get_vaddr_active(&obj->base);
if (IS_ERR(ptr)) {
kvfree(state_bo->data);
- return;
+ state_bo->data = NULL;
+ goto out;
}
memcpy(state_bo->data, ptr, obj->base.size);
msm_gem_put_vaddr(&obj->base);
}
-
+out:
state->nr_bos++;
}
@@ -345,6 +347,10 @@ static void msm_gpu_crashstate_capture(struct msm_gpu *gpu,
{
struct msm_gpu_state *state;
+ /* Check if the target supports capturing crash state */
+ if (!gpu->funcs->gpu_state_get)
+ return;
+
/* Only save one crash state at a time */
if (gpu->crashstate)
return;
@@ -360,12 +366,15 @@ static void msm_gpu_crashstate_capture(struct msm_gpu *gpu,
if (submit) {
int i;
- state->bos = kcalloc(submit->nr_bos,
+ state->bos = kcalloc(submit->nr_cmds,
sizeof(struct msm_gpu_state_bo), GFP_KERNEL);
- for (i = 0; state->bos && i < submit->nr_bos; i++)
- msm_gpu_crashstate_get_bo(state, submit->bos[i].obj,
- submit->bos[i].iova, submit->bos[i].flags);
+ for (i = 0; state->bos && i < submit->nr_cmds; i++) {
+ int idx = submit->cmd[i].idx;
+
+ msm_gpu_crashstate_get_bo(state, submit->bos[idx].obj,
+ submit->bos[idx].iova, submit->bos[idx].flags);
+ }
}
/* Set the active crash state to be dumped on failure */
@@ -428,16 +437,15 @@ static void recover_worker(struct work_struct *work)
mutex_lock(&dev->struct_mutex);
- dev_err(dev->dev, "%s: hangcheck recover!\n", gpu->name);
+ DRM_DEV_ERROR(dev->dev, "%s: hangcheck recover!\n", gpu->name);
submit = find_submit(cur_ring, cur_ring->memptrs->fence + 1);
if (submit) {
struct task_struct *task;
- rcu_read_lock();
- task = pid_task(submit->pid, PIDTYPE_PID);
+ task = get_pid_task(submit->pid, PIDTYPE_PID);
if (task) {
- comm = kstrdup(task->comm, GFP_ATOMIC);
+ comm = kstrdup(task->comm, GFP_KERNEL);
/*
* So slightly annoying, in other paths like
@@ -450,13 +458,13 @@ static void recover_worker(struct work_struct *work)
* about the submit going away.
*/
mutex_unlock(&dev->struct_mutex);
- cmd = kstrdup_quotable_cmdline(task, GFP_ATOMIC);
+ cmd = kstrdup_quotable_cmdline(task, GFP_KERNEL);
+ put_task_struct(task);
mutex_lock(&dev->struct_mutex);
}
- rcu_read_unlock();
if (comm && cmd) {
- dev_err(dev->dev, "%s: offending task: %s (%s)\n",
+ DRM_DEV_ERROR(dev->dev, "%s: offending task: %s (%s)\n",
gpu->name, comm, cmd);
msm_rd_dump_submit(priv->hangrd, submit,
@@ -539,11 +547,11 @@ static void hangcheck_handler(struct timer_list *t)
} else if (fence < ring->seqno) {
/* no progress and not done.. hung! */
ring->hangcheck_fence = fence;
- dev_err(dev->dev, "%s: hangcheck detected gpu lockup rb %d!\n",
+ DRM_DEV_ERROR(dev->dev, "%s: hangcheck detected gpu lockup rb %d!\n",
gpu->name, ring->id);
- dev_err(dev->dev, "%s: completed fence: %u\n",
+ DRM_DEV_ERROR(dev->dev, "%s: completed fence: %u\n",
gpu->name, fence);
- dev_err(dev->dev, "%s: submitted fence: %u\n",
+ DRM_DEV_ERROR(dev->dev, "%s: submitted fence: %u\n",
gpu->name, ring->seqno);
queue_work(priv->wq, &gpu->recover_work);
@@ -659,15 +667,33 @@ out:
* Cmdstream submission/retirement:
*/
-static void retire_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
+static void retire_submit(struct msm_gpu *gpu, struct msm_ringbuffer *ring,
+ struct msm_gem_submit *submit)
{
+ int index = submit->seqno % MSM_GPU_SUBMIT_STATS_COUNT;
+ volatile struct msm_gpu_submit_stats *stats;
+ u64 elapsed, clock = 0;
int i;
+ stats = &ring->memptrs->stats[index];
+ /* Convert 19.2Mhz alwayson ticks to nanoseconds for elapsed time */
+ elapsed = (stats->alwayson_end - stats->alwayson_start) * 10000;
+ do_div(elapsed, 192);
+
+ /* Calculate the clock frequency from the number of CP cycles */
+ if (elapsed) {
+ clock = (stats->cpcycles_end - stats->cpcycles_start) * 1000;
+ do_div(clock, elapsed);
+ }
+
+ trace_msm_gpu_submit_retired(submit, elapsed, clock,
+ stats->alwayson_start, stats->alwayson_end);
+
for (i = 0; i < submit->nr_bos; i++) {
struct msm_gem_object *msm_obj = submit->bos[i].obj;
/* move to inactive: */
msm_gem_move_to_inactive(&msm_obj->base);
- msm_gem_put_iova(&msm_obj->base, gpu->aspace);
+ msm_gem_unpin_iova(&msm_obj->base, gpu->aspace);
drm_gem_object_put(&msm_obj->base);
}
@@ -690,7 +716,7 @@ static void retire_submits(struct msm_gpu *gpu)
list_for_each_entry_safe(submit, tmp, &ring->submits, node) {
if (dma_fence_is_signaled(submit->fence))
- retire_submit(gpu, submit);
+ retire_submit(gpu, ring, submit);
}
}
}
@@ -751,7 +777,7 @@ void msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
/* submit takes a reference to the bo and iova until retired: */
drm_gem_object_get(&msm_obj->base);
- msm_gem_get_iova(&msm_obj->base,
+ msm_gem_get_and_pin_iova(&msm_obj->base,
submit->gpu->aspace, &iova);
if (submit->bos[i].flags & MSM_SUBMIT_BO_WRITE)
@@ -800,7 +826,6 @@ static struct msm_gem_address_space *
msm_gpu_create_address_space(struct msm_gpu *gpu, struct platform_device *pdev,
uint64_t va_start, uint64_t va_end)
{
- struct iommu_domain *iommu;
struct msm_gem_address_space *aspace;
int ret;
@@ -809,20 +834,27 @@ msm_gpu_create_address_space(struct msm_gpu *gpu, struct platform_device *pdev,
* and have separate page tables per context. For now, to keep things
* simple and to get something working, just use a single address space:
*/
- iommu = iommu_domain_alloc(&platform_bus_type);
- if (!iommu)
- return NULL;
-
- iommu->geometry.aperture_start = va_start;
- iommu->geometry.aperture_end = va_end;
-
- dev_info(gpu->dev->dev, "%s: using IOMMU\n", gpu->name);
+ if (!adreno_is_a2xx(to_adreno_gpu(gpu))) {
+ struct iommu_domain *iommu = iommu_domain_alloc(&platform_bus_type);
+ if (!iommu)
+ return NULL;
+
+ iommu->geometry.aperture_start = va_start;
+ iommu->geometry.aperture_end = va_end;
+
+ DRM_DEV_INFO(gpu->dev->dev, "%s: using IOMMU\n", gpu->name);
+
+ aspace = msm_gem_address_space_create(&pdev->dev, iommu, "gpu");
+ if (IS_ERR(aspace))
+ iommu_domain_free(iommu);
+ } else {
+ aspace = msm_gem_address_space_create_a2xx(&pdev->dev, gpu, "gpu",
+ va_start, va_end);
+ }
- aspace = msm_gem_address_space_create(&pdev->dev, iommu, "gpu");
if (IS_ERR(aspace)) {
- dev_err(gpu->dev->dev, "failed to init iommu: %ld\n",
+ DRM_DEV_ERROR(gpu->dev->dev, "failed to init mmu: %ld\n",
PTR_ERR(aspace));
- iommu_domain_free(iommu);
return ERR_CAST(aspace);
}
@@ -871,14 +903,14 @@ int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
gpu->irq = platform_get_irq_byname(pdev, config->irqname);
if (gpu->irq < 0) {
ret = gpu->irq;
- dev_err(drm->dev, "failed to get irq: %d\n", ret);
+ DRM_DEV_ERROR(drm->dev, "failed to get irq: %d\n", ret);
goto fail;
}
ret = devm_request_irq(&pdev->dev, gpu->irq, irq_handler,
IRQF_TRIGGER_HIGH, gpu->name, gpu);
if (ret) {
- dev_err(drm->dev, "failed to request IRQ%u: %d\n", gpu->irq, ret);
+ DRM_DEV_ERROR(drm->dev, "failed to request IRQ%u: %d\n", gpu->irq, ret);
goto fail;
}
@@ -911,22 +943,25 @@ int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
config->va_start, config->va_end);
if (gpu->aspace == NULL)
- dev_info(drm->dev, "%s: no IOMMU, fallback to VRAM carveout!\n", name);
+ DRM_DEV_INFO(drm->dev, "%s: no IOMMU, fallback to VRAM carveout!\n", name);
else if (IS_ERR(gpu->aspace)) {
ret = PTR_ERR(gpu->aspace);
goto fail;
}
- memptrs = msm_gem_kernel_new(drm, sizeof(*gpu->memptrs_bo),
+ memptrs = msm_gem_kernel_new(drm,
+ sizeof(struct msm_rbmemptrs) * nr_rings,
MSM_BO_UNCACHED, gpu->aspace, &gpu->memptrs_bo,
&memptrs_iova);
if (IS_ERR(memptrs)) {
ret = PTR_ERR(memptrs);
- dev_err(drm->dev, "could not allocate memptrs: %d\n", ret);
+ DRM_DEV_ERROR(drm->dev, "could not allocate memptrs: %d\n", ret);
goto fail;
}
+ msm_gem_object_set_name(gpu->memptrs_bo, "memptrs");
+
if (nr_rings > ARRAY_SIZE(gpu->rb)) {
DRM_DEV_INFO_ONCE(drm->dev, "Only creating %zu ringbuffers\n",
ARRAY_SIZE(gpu->rb));
@@ -939,7 +974,7 @@ int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
if (IS_ERR(gpu->rb[i])) {
ret = PTR_ERR(gpu->rb[i]);
- dev_err(drm->dev,
+ DRM_DEV_ERROR(drm->dev,
"could not create ringbuffer %d: %d\n", i, ret);
goto fail;
}
@@ -958,11 +993,7 @@ fail:
gpu->rb[i] = NULL;
}
- if (gpu->memptrs_bo) {
- msm_gem_put_vaddr(gpu->memptrs_bo);
- msm_gem_put_iova(gpu->memptrs_bo, gpu->aspace);
- drm_gem_object_put_unlocked(gpu->memptrs_bo);
- }
+ msm_gem_kernel_put(gpu->memptrs_bo, gpu->aspace, false);
platform_set_drvdata(pdev, NULL);
return ret;
@@ -981,11 +1012,7 @@ void msm_gpu_cleanup(struct msm_gpu *gpu)
gpu->rb[i] = NULL;
}
- if (gpu->memptrs_bo) {
- msm_gem_put_vaddr(gpu->memptrs_bo);
- msm_gem_put_iova(gpu->memptrs_bo, gpu->aspace);
- drm_gem_object_put_unlocked(gpu->memptrs_bo);
- }
+ msm_gem_kernel_put(gpu->memptrs_bo, gpu->aspace, false);
if (!IS_ERR_OR_NULL(gpu->aspace)) {
gpu->aspace->mmu->funcs->detach(gpu->aspace->mmu,
diff --git a/drivers/gpu/drm/msm/msm_gpu.h b/drivers/gpu/drm/msm/msm_gpu.h
index f82bac086666..efb49bb64191 100644
--- a/drivers/gpu/drm/msm/msm_gpu.h
+++ b/drivers/gpu/drm/msm/msm_gpu.h
@@ -187,6 +187,7 @@ struct msm_gpu_state_bo {
u64 iova;
size_t size;
void *data;
+ bool encoded;
};
struct msm_gpu_state {
@@ -201,6 +202,7 @@ struct msm_gpu_state {
u32 wptr;
void *data;
int data_size;
+ bool encoded;
} ring[MSM_GPU_MAX_RINGS];
int nr_registers;
diff --git a/drivers/gpu/drm/msm/msm_gpu_trace.h b/drivers/gpu/drm/msm/msm_gpu_trace.h
new file mode 100644
index 000000000000..1155118a27a1
--- /dev/null
+++ b/drivers/gpu/drm/msm/msm_gpu_trace.h
@@ -0,0 +1,90 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#if !defined(_MSM_GPU_TRACE_H_) || defined(TRACE_HEADER_MULTI_READ)
+#define _MSM_GPU_TRACE_H_
+
+#include <linux/tracepoint.h>
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM drm_msm
+#define TRACE_INCLUDE_FILE msm_gpu_trace
+
+TRACE_EVENT(msm_gpu_submit,
+ TP_PROTO(pid_t pid, u32 ringid, u32 id, u32 nr_bos, u32 nr_cmds),
+ TP_ARGS(pid, ringid, id, nr_bos, nr_cmds),
+ TP_STRUCT__entry(
+ __field(pid_t, pid)
+ __field(u32, id)
+ __field(u32, ringid)
+ __field(u32, nr_cmds)
+ __field(u32, nr_bos)
+ ),
+ TP_fast_assign(
+ __entry->pid = pid;
+ __entry->id = id;
+ __entry->ringid = ringid;
+ __entry->nr_bos = nr_bos;
+ __entry->nr_cmds = nr_cmds
+ ),
+ TP_printk("id=%d pid=%d ring=%d bos=%d cmds=%d",
+ __entry->id, __entry->pid, __entry->ringid,
+ __entry->nr_bos, __entry->nr_cmds)
+);
+
+TRACE_EVENT(msm_gpu_submit_flush,
+ TP_PROTO(struct msm_gem_submit *submit, u64 ticks),
+ TP_ARGS(submit, ticks),
+ TP_STRUCT__entry(
+ __field(pid_t, pid)
+ __field(u32, id)
+ __field(u32, ringid)
+ __field(u32, seqno)
+ __field(u64, ticks)
+ ),
+ TP_fast_assign(
+ __entry->pid = pid_nr(submit->pid);
+ __entry->id = submit->ident;
+ __entry->ringid = submit->ring->id;
+ __entry->seqno = submit->seqno;
+ __entry->ticks = ticks;
+ ),
+ TP_printk("id=%d pid=%d ring=%d:%d ticks=%lld",
+ __entry->id, __entry->pid, __entry->ringid, __entry->seqno,
+ __entry->ticks)
+);
+
+
+TRACE_EVENT(msm_gpu_submit_retired,
+ TP_PROTO(struct msm_gem_submit *submit, u64 elapsed, u64 clock,
+ u64 start, u64 end),
+ TP_ARGS(submit, elapsed, clock, start, end),
+ TP_STRUCT__entry(
+ __field(pid_t, pid)
+ __field(u32, id)
+ __field(u32, ringid)
+ __field(u32, seqno)
+ __field(u64, elapsed)
+ __field(u64, clock)
+ __field(u64, start_ticks)
+ __field(u64, end_ticks)
+ ),
+ TP_fast_assign(
+ __entry->pid = pid_nr(submit->pid);
+ __entry->id = submit->ident;
+ __entry->ringid = submit->ring->id;
+ __entry->seqno = submit->seqno;
+ __entry->elapsed = elapsed;
+ __entry->clock = clock;
+ __entry->start_ticks = start;
+ __entry->end_ticks = end;
+ ),
+ TP_printk("id=%d pid=%d ring=%d:%d elapsed=%lld ns mhz=%lld start=%lld end=%lld",
+ __entry->id, __entry->pid, __entry->ringid, __entry->seqno,
+ __entry->elapsed, __entry->clock,
+ __entry->start_ticks, __entry->end_ticks)
+);
+
+#endif
+
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH ../../drivers/gpu/drm/msm
+#include <trace/define_trace.h>
diff --git a/drivers/gpu/drm/msm/msm_gpu_tracepoints.c b/drivers/gpu/drm/msm/msm_gpu_tracepoints.c
new file mode 100644
index 000000000000..72c074f8c4f8
--- /dev/null
+++ b/drivers/gpu/drm/msm/msm_gpu_tracepoints.c
@@ -0,0 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0
+#include "msm_gem.h"
+#include "msm_ringbuffer.h"
+
+#define CREATE_TRACE_POINTS
+#include "msm_gpu_trace.h"
diff --git a/drivers/gpu/drm/msm/msm_gpummu.c b/drivers/gpu/drm/msm/msm_gpummu.c
new file mode 100644
index 000000000000..27312b553dd8
--- /dev/null
+++ b/drivers/gpu/drm/msm/msm_gpummu.c
@@ -0,0 +1,123 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2018 The Linux Foundation. All rights reserved. */
+
+#include "msm_drv.h"
+#include "msm_mmu.h"
+#include "adreno/adreno_gpu.h"
+#include "adreno/a2xx.xml.h"
+
+struct msm_gpummu {
+ struct msm_mmu base;
+ struct msm_gpu *gpu;
+ dma_addr_t pt_base;
+ uint32_t *table;
+};
+#define to_msm_gpummu(x) container_of(x, struct msm_gpummu, base)
+
+#define GPUMMU_VA_START SZ_16M
+#define GPUMMU_VA_RANGE (0xfff * SZ_64K)
+#define GPUMMU_PAGE_SIZE SZ_4K
+#define TABLE_SIZE (sizeof(uint32_t) * GPUMMU_VA_RANGE / GPUMMU_PAGE_SIZE)
+
+static int msm_gpummu_attach(struct msm_mmu *mmu, const char * const *names,
+ int cnt)
+{
+ return 0;
+}
+
+static void msm_gpummu_detach(struct msm_mmu *mmu, const char * const *names,
+ int cnt)
+{
+}
+
+static int msm_gpummu_map(struct msm_mmu *mmu, uint64_t iova,
+ struct sg_table *sgt, unsigned len, int prot)
+{
+ struct msm_gpummu *gpummu = to_msm_gpummu(mmu);
+ unsigned idx = (iova - GPUMMU_VA_START) / GPUMMU_PAGE_SIZE;
+ struct scatterlist *sg;
+ unsigned prot_bits = 0;
+ unsigned i, j;
+
+ if (prot & IOMMU_WRITE)
+ prot_bits |= 1;
+ if (prot & IOMMU_READ)
+ prot_bits |= 2;
+
+ for_each_sg(sgt->sgl, sg, sgt->nents, i) {
+ dma_addr_t addr = sg->dma_address;
+ for (j = 0; j < sg->length / GPUMMU_PAGE_SIZE; j++, idx++) {
+ gpummu->table[idx] = addr | prot_bits;
+ addr += GPUMMU_PAGE_SIZE;
+ }
+ }
+
+ /* we can improve by deferring flush for multiple map() */
+ gpu_write(gpummu->gpu, REG_A2XX_MH_MMU_INVALIDATE,
+ A2XX_MH_MMU_INVALIDATE_INVALIDATE_ALL |
+ A2XX_MH_MMU_INVALIDATE_INVALIDATE_TC);
+ return 0;
+}
+
+static int msm_gpummu_unmap(struct msm_mmu *mmu, uint64_t iova, unsigned len)
+{
+ struct msm_gpummu *gpummu = to_msm_gpummu(mmu);
+ unsigned idx = (iova - GPUMMU_VA_START) / GPUMMU_PAGE_SIZE;
+ unsigned i;
+
+ for (i = 0; i < len / GPUMMU_PAGE_SIZE; i++, idx++)
+ gpummu->table[idx] = 0;
+
+ gpu_write(gpummu->gpu, REG_A2XX_MH_MMU_INVALIDATE,
+ A2XX_MH_MMU_INVALIDATE_INVALIDATE_ALL |
+ A2XX_MH_MMU_INVALIDATE_INVALIDATE_TC);
+ return 0;
+}
+
+static void msm_gpummu_destroy(struct msm_mmu *mmu)
+{
+ struct msm_gpummu *gpummu = to_msm_gpummu(mmu);
+
+ dma_free_attrs(mmu->dev, TABLE_SIZE, gpummu->table, gpummu->pt_base,
+ DMA_ATTR_FORCE_CONTIGUOUS);
+
+ kfree(gpummu);
+}
+
+static const struct msm_mmu_funcs funcs = {
+ .attach = msm_gpummu_attach,
+ .detach = msm_gpummu_detach,
+ .map = msm_gpummu_map,
+ .unmap = msm_gpummu_unmap,
+ .destroy = msm_gpummu_destroy,
+};
+
+struct msm_mmu *msm_gpummu_new(struct device *dev, struct msm_gpu *gpu)
+{
+ struct msm_gpummu *gpummu;
+
+ gpummu = kzalloc(sizeof(*gpummu), GFP_KERNEL);
+ if (!gpummu)
+ return ERR_PTR(-ENOMEM);
+
+ gpummu->table = dma_alloc_attrs(dev, TABLE_SIZE + 32, &gpummu->pt_base,
+ GFP_KERNEL | __GFP_ZERO, DMA_ATTR_FORCE_CONTIGUOUS);
+ if (!gpummu->table) {
+ kfree(gpummu);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ gpummu->gpu = gpu;
+ msm_mmu_init(&gpummu->base, dev, &funcs);
+
+ return &gpummu->base;
+}
+
+void msm_gpummu_params(struct msm_mmu *mmu, dma_addr_t *pt_base,
+ dma_addr_t *tran_error)
+{
+ dma_addr_t base = to_msm_gpummu(mmu)->pt_base;
+
+ *pt_base = base;
+ *tran_error = base + TABLE_SIZE; /* 32-byte aligned */
+}
diff --git a/drivers/gpu/drm/msm/msm_iommu.c b/drivers/gpu/drm/msm/msm_iommu.c
index b23d33622f37..4d62790cd425 100644
--- a/drivers/gpu/drm/msm/msm_iommu.c
+++ b/drivers/gpu/drm/msm/msm_iommu.c
@@ -66,13 +66,12 @@ static int msm_iommu_map(struct msm_mmu *mmu, uint64_t iova,
// pm_runtime_get_sync(mmu->dev);
ret = iommu_map_sg(iommu->domain, iova, sgt->sgl, sgt->nents, prot);
// pm_runtime_put_sync(mmu->dev);
- WARN_ON(ret < 0);
+ WARN_ON(!ret);
return (ret == len) ? 0 : -EINVAL;
}
-static int msm_iommu_unmap(struct msm_mmu *mmu, uint64_t iova,
- struct sg_table *sgt, unsigned len)
+static int msm_iommu_unmap(struct msm_mmu *mmu, uint64_t iova, unsigned len)
{
struct msm_iommu *iommu = to_msm_iommu(mmu);
diff --git a/drivers/gpu/drm/msm/msm_kms.h b/drivers/gpu/drm/msm/msm_kms.h
index fd88cebb6adb..2b81b43a4bab 100644
--- a/drivers/gpu/drm/msm/msm_kms.h
+++ b/drivers/gpu/drm/msm/msm_kms.h
@@ -67,9 +67,6 @@ struct msm_kms_funcs {
void (*set_encoder_mode)(struct msm_kms *kms,
struct drm_encoder *encoder,
bool cmd_mode);
- /* pm suspend/resume hooks */
- int (*pm_suspend)(struct device *dev);
- int (*pm_resume)(struct device *dev);
/* cleanup: */
void (*destroy)(struct msm_kms *kms);
#ifdef CONFIG_DEBUG_FS
diff --git a/drivers/gpu/drm/msm/msm_mmu.h b/drivers/gpu/drm/msm/msm_mmu.h
index aa2c5d4580c8..d21b26604d0b 100644
--- a/drivers/gpu/drm/msm/msm_mmu.h
+++ b/drivers/gpu/drm/msm/msm_mmu.h
@@ -25,8 +25,7 @@ struct msm_mmu_funcs {
void (*detach)(struct msm_mmu *mmu, const char * const *names, int cnt);
int (*map)(struct msm_mmu *mmu, uint64_t iova, struct sg_table *sgt,
unsigned len, int prot);
- int (*unmap)(struct msm_mmu *mmu, uint64_t iova, struct sg_table *sgt,
- unsigned len);
+ int (*unmap)(struct msm_mmu *mmu, uint64_t iova, unsigned len);
void (*destroy)(struct msm_mmu *mmu);
};
@@ -54,4 +53,7 @@ static inline void msm_mmu_set_fault_handler(struct msm_mmu *mmu, void *arg,
mmu->handler = handler;
}
+void msm_gpummu_params(struct msm_mmu *mmu, dma_addr_t *pt_base,
+ dma_addr_t *tran_error);
+
#endif /* __MSM_MMU_H__ */
diff --git a/drivers/gpu/drm/msm/msm_rd.c b/drivers/gpu/drm/msm/msm_rd.c
index cca933458439..90e9d0a48dc0 100644
--- a/drivers/gpu/drm/msm/msm_rd.c
+++ b/drivers/gpu/drm/msm/msm_rd.c
@@ -316,10 +316,11 @@ static void snapshot_buf(struct msm_rd_state *rd,
uint64_t iova, uint32_t size)
{
struct msm_gem_object *obj = submit->bos[idx].obj;
+ unsigned offset = 0;
const char *buf;
if (iova) {
- buf += iova - submit->bos[idx].iova;
+ offset = iova - submit->bos[idx].iova;
} else {
iova = submit->bos[idx].iova;
size = obj->base.size;
@@ -340,11 +341,19 @@ static void snapshot_buf(struct msm_rd_state *rd,
if (IS_ERR(buf))
return;
+ buf += offset;
+
rd_write_section(rd, RD_BUFFER_CONTENTS, buf, size);
msm_gem_put_vaddr(&obj->base);
}
+static bool
+should_dump(struct msm_gem_submit *submit, int idx)
+{
+ return rd_full || (submit->bos[idx].flags & MSM_SUBMIT_BO_DUMP);
+}
+
/* called under struct_mutex */
void msm_rd_dump_submit(struct msm_rd_state *rd, struct msm_gem_submit *submit,
const char *fmt, ...)
@@ -386,15 +395,16 @@ void msm_rd_dump_submit(struct msm_rd_state *rd, struct msm_gem_submit *submit,
rd_write_section(rd, RD_CMD, msg, ALIGN(n, 4));
- for (i = 0; rd_full && i < submit->nr_bos; i++)
- snapshot_buf(rd, submit, i, 0, 0);
+ for (i = 0; i < submit->nr_bos; i++)
+ if (should_dump(submit, i))
+ snapshot_buf(rd, submit, i, 0, 0);
for (i = 0; i < submit->nr_cmds; i++) {
uint64_t iova = submit->cmd[i].iova;
uint32_t szd = submit->cmd[i].size; /* in dwords */
/* snapshot cmdstream bo's (if we haven't already): */
- if (!rd_full) {
+ if (!should_dump(submit, i)) {
snapshot_buf(rd, submit, submit->cmd[i].idx,
submit->cmd[i].iova, szd * 4);
}
diff --git a/drivers/gpu/drm/msm/msm_ringbuffer.c b/drivers/gpu/drm/msm/msm_ringbuffer.c
index 6f5295b3f2f6..20a96fe69dcd 100644
--- a/drivers/gpu/drm/msm/msm_ringbuffer.c
+++ b/drivers/gpu/drm/msm/msm_ringbuffer.c
@@ -36,15 +36,18 @@ struct msm_ringbuffer *msm_ringbuffer_new(struct msm_gpu *gpu, int id,
ring->gpu = gpu;
ring->id = id;
- /* Pass NULL for the iova pointer - we will map it later */
+
ring->start = msm_gem_kernel_new(gpu->dev, MSM_GPU_RINGBUFFER_SZ,
- MSM_BO_WC, gpu->aspace, &ring->bo, NULL);
+ MSM_BO_WC, gpu->aspace, &ring->bo, &ring->iova);
if (IS_ERR(ring->start)) {
ret = PTR_ERR(ring->start);
ring->start = 0;
goto fail;
}
+
+ msm_gem_object_set_name(ring->bo, "ring%d", id);
+
ring->end = ring->start + (MSM_GPU_RINGBUFFER_SZ >> 2);
ring->next = ring->start;
ring->cur = ring->start;
@@ -73,10 +76,7 @@ void msm_ringbuffer_destroy(struct msm_ringbuffer *ring)
msm_fence_context_free(ring->fctx);
- if (ring->bo) {
- msm_gem_put_iova(ring->bo, ring->gpu->aspace);
- msm_gem_put_vaddr(ring->bo);
- drm_gem_object_put_unlocked(ring->bo);
- }
+ msm_gem_kernel_put(ring->bo, ring->gpu->aspace, false);
+
kfree(ring);
}
diff --git a/drivers/gpu/drm/msm/msm_ringbuffer.h b/drivers/gpu/drm/msm/msm_ringbuffer.h
index cffce094aecb..6434ebb13136 100644
--- a/drivers/gpu/drm/msm/msm_ringbuffer.h
+++ b/drivers/gpu/drm/msm/msm_ringbuffer.h
@@ -23,9 +23,25 @@
#define rbmemptr(ring, member) \
((ring)->memptrs_iova + offsetof(struct msm_rbmemptrs, member))
+#define rbmemptr_stats(ring, index, member) \
+ (rbmemptr((ring), stats) + \
+ ((index) * sizeof(struct msm_gpu_submit_stats)) + \
+ offsetof(struct msm_gpu_submit_stats, member))
+
+struct msm_gpu_submit_stats {
+ u64 cpcycles_start;
+ u64 cpcycles_end;
+ u64 alwayson_start;
+ u64 alwayson_end;
+};
+
+#define MSM_GPU_SUBMIT_STATS_COUNT 64
+
struct msm_rbmemptrs {
volatile uint32_t rptr;
volatile uint32_t fence;
+
+ volatile struct msm_gpu_submit_stats stats[MSM_GPU_SUBMIT_STATS_COUNT];
};
struct msm_ringbuffer {
diff --git a/drivers/gpu/drm/nouveau/Kconfig b/drivers/gpu/drm/nouveau/Kconfig
index 4b75ad40dd80..432c440223bb 100644
--- a/drivers/gpu/drm/nouveau/Kconfig
+++ b/drivers/gpu/drm/nouveau/Kconfig
@@ -4,7 +4,8 @@ config DRM_NOUVEAU
select FW_LOADER
select DRM_KMS_HELPER
select DRM_TTM
- select FB_BACKLIGHT if DRM_NOUVEAU_BACKLIGHT
+ select BACKLIGHT_CLASS_DEVICE if DRM_NOUVEAU_BACKLIGHT
+ select BACKLIGHT_LCD_SUPPORT if DRM_NOUVEAU_BACKLIGHT
select ACPI_VIDEO if ACPI && X86 && BACKLIGHT_CLASS_DEVICE && INPUT
select X86_PLATFORM_DEVICES if ACPI && X86
select ACPI_WMI if ACPI && X86
diff --git a/drivers/gpu/drm/nouveau/dispnv50/disp.c b/drivers/gpu/drm/nouveau/dispnv50/disp.c
index 00add3ba051f..134701a837c8 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/disp.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/disp.c
@@ -198,6 +198,22 @@ nv50_dmac_create(struct nvif_device *device, struct nvif_object *disp,
/******************************************************************************
* EVO channel helpers
*****************************************************************************/
+static void
+evo_flush(struct nv50_dmac *dmac)
+{
+ /* Push buffer fetches are not coherent with BAR1, we need to ensure
+ * writes have been flushed right through to VRAM before writing PUT.
+ */
+ if (dmac->push.type & NVIF_MEM_VRAM) {
+ struct nvif_device *device = dmac->base.device;
+ nvif_wr32(&device->object, 0x070000, 0x00000001);
+ nvif_msec(device, 2000,
+ if (!(nvif_rd32(&device->object, 0x070000) & 0x00000002))
+ break;
+ );
+ }
+}
+
u32 *
evo_wait(struct nv50_dmac *evoc, int nr)
{
@@ -208,6 +224,7 @@ evo_wait(struct nv50_dmac *evoc, int nr)
mutex_lock(&dmac->lock);
if (put + nr >= (PAGE_SIZE / 4) - 8) {
dmac->ptr[put] = 0x20000000;
+ evo_flush(dmac);
nvif_wr32(&dmac->base.user, 0x0000, 0x00000000);
if (nvif_msec(device, 2000,
@@ -230,17 +247,7 @@ evo_kick(u32 *push, struct nv50_dmac *evoc)
{
struct nv50_dmac *dmac = evoc;
- /* Push buffer fetches are not coherent with BAR1, we need to ensure
- * writes have been flushed right through to VRAM before writing PUT.
- */
- if (dmac->push.type & NVIF_MEM_VRAM) {
- struct nvif_device *device = dmac->base.device;
- nvif_wr32(&device->object, 0x070000, 0x00000001);
- nvif_msec(device, 2000,
- if (!(nvif_rd32(&device->object, 0x070000) & 0x00000002))
- break;
- );
- }
+ evo_flush(dmac);
nvif_wr32(&dmac->base.user, 0x0000, (push - dmac->ptr) << 2);
mutex_unlock(&dmac->lock);
@@ -1272,6 +1279,7 @@ nv50_mstm_del(struct nv50_mstm **pmstm)
{
struct nv50_mstm *mstm = *pmstm;
if (mstm) {
+ drm_dp_mst_topology_mgr_destroy(&mstm->mgr);
kfree(*pmstm);
*pmstm = NULL;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c
index 232c3f6bc35b..f900e94592f8 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.c
@@ -1173,10 +1173,16 @@ nouveau_platform_device_create(const struct nvkm_device_tegra_func *func,
goto err_free;
}
+ err = nouveau_drm_device_init(drm);
+ if (err)
+ goto err_put;
+
platform_set_drvdata(pdev, drm);
return drm;
+err_put:
+ drm_dev_put(drm);
err_free:
nvkm_device_del(pdevice);
diff --git a/drivers/gpu/drm/omapdrm/displays/panel-dpi.c b/drivers/gpu/drm/omapdrm/displays/panel-dpi.c
index 1f8161b041be..465120809eb3 100644
--- a/drivers/gpu/drm/omapdrm/displays/panel-dpi.c
+++ b/drivers/gpu/drm/omapdrm/displays/panel-dpi.c
@@ -177,6 +177,7 @@ static int panel_dpi_probe(struct platform_device *pdev)
dssdev->type = OMAP_DISPLAY_TYPE_DPI;
dssdev->owner = THIS_MODULE;
dssdev->of_ports = BIT(0);
+ drm_bus_flags_from_videomode(&ddata->vm, &dssdev->bus_flags);
omapdss_display_init(dssdev);
omapdss_device_register(dssdev);
diff --git a/drivers/gpu/drm/omapdrm/dss/dsi.c b/drivers/gpu/drm/omapdrm/dss/dsi.c
index 0a485c5b982e..00a9c2ab9e6c 100644
--- a/drivers/gpu/drm/omapdrm/dss/dsi.c
+++ b/drivers/gpu/drm/omapdrm/dss/dsi.c
@@ -5418,9 +5418,15 @@ static int dsi_probe(struct platform_device *pdev)
dsi->num_lanes_supported = 3;
}
+ r = of_platform_populate(dev->of_node, NULL, NULL, dev);
+ if (r) {
+ DSSERR("Failed to populate DSI child devices: %d\n", r);
+ goto err_pm_disable;
+ }
+
r = dsi_init_output(dsi);
if (r)
- goto err_pm_disable;
+ goto err_of_depopulate;
r = dsi_probe_of(dsi);
if (r) {
@@ -5428,22 +5434,16 @@ static int dsi_probe(struct platform_device *pdev)
goto err_uninit_output;
}
- r = of_platform_populate(dev->of_node, NULL, NULL, dev);
- if (r) {
- DSSERR("Failed to populate DSI child devices: %d\n", r);
- goto err_uninit_output;
- }
-
r = component_add(&pdev->dev, &dsi_component_ops);
if (r)
- goto err_of_depopulate;
+ goto err_uninit_output;
return 0;
-err_of_depopulate:
- of_platform_depopulate(dev);
err_uninit_output:
dsi_uninit_output(dsi);
+err_of_depopulate:
+ of_platform_depopulate(dev);
err_pm_disable:
pm_runtime_disable(dev);
return r;
diff --git a/drivers/gpu/drm/omapdrm/dss/omapdss.h b/drivers/gpu/drm/omapdrm/dss/omapdss.h
index 1f698a95a94a..33e15cb77efa 100644
--- a/drivers/gpu/drm/omapdrm/dss/omapdss.h
+++ b/drivers/gpu/drm/omapdrm/dss/omapdss.h
@@ -432,7 +432,7 @@ struct omap_dss_device {
const struct omap_dss_driver *driver;
const struct omap_dss_device_ops *ops;
unsigned long ops_flags;
- unsigned long bus_flags;
+ u32 bus_flags;
/* helper variable for driver suspend/resume */
bool activate_after_resume;
diff --git a/drivers/gpu/drm/omapdrm/omap_encoder.c b/drivers/gpu/drm/omapdrm/omap_encoder.c
index 452e625f6ce3..933ebc9f9faa 100644
--- a/drivers/gpu/drm/omapdrm/omap_encoder.c
+++ b/drivers/gpu/drm/omapdrm/omap_encoder.c
@@ -52,17 +52,44 @@ static const struct drm_encoder_funcs omap_encoder_funcs = {
.destroy = omap_encoder_destroy,
};
+static void omap_encoder_hdmi_mode_set(struct drm_encoder *encoder,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct drm_device *dev = encoder->dev;
+ struct omap_encoder *omap_encoder = to_omap_encoder(encoder);
+ struct omap_dss_device *dssdev = omap_encoder->output;
+ struct drm_connector *connector;
+ bool hdmi_mode;
+
+ hdmi_mode = false;
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ if (connector->encoder == encoder) {
+ hdmi_mode = omap_connector_get_hdmi_mode(connector);
+ break;
+ }
+ }
+
+ if (dssdev->ops->hdmi.set_hdmi_mode)
+ dssdev->ops->hdmi.set_hdmi_mode(dssdev, hdmi_mode);
+
+ if (hdmi_mode && dssdev->ops->hdmi.set_infoframe) {
+ struct hdmi_avi_infoframe avi;
+ int r;
+
+ r = drm_hdmi_avi_infoframe_from_display_mode(&avi, adjusted_mode,
+ false);
+ if (r == 0)
+ dssdev->ops->hdmi.set_infoframe(dssdev, &avi);
+ }
+}
+
static void omap_encoder_mode_set(struct drm_encoder *encoder,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
- struct drm_device *dev = encoder->dev;
struct omap_encoder *omap_encoder = to_omap_encoder(encoder);
- struct drm_connector *connector;
struct omap_dss_device *dssdev;
struct videomode vm = { 0 };
- bool hdmi_mode;
- int r;
drm_display_mode_to_videomode(adjusted_mode, &vm);
@@ -112,27 +139,8 @@ static void omap_encoder_mode_set(struct drm_encoder *encoder,
}
/* Set the HDMI mode and HDMI infoframe if applicable. */
- hdmi_mode = false;
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
- if (connector->encoder == encoder) {
- hdmi_mode = omap_connector_get_hdmi_mode(connector);
- break;
- }
- }
-
- dssdev = omap_encoder->output;
-
- if (dssdev->ops->hdmi.set_hdmi_mode)
- dssdev->ops->hdmi.set_hdmi_mode(dssdev, hdmi_mode);
-
- if (hdmi_mode && dssdev->ops->hdmi.set_infoframe) {
- struct hdmi_avi_infoframe avi;
-
- r = drm_hdmi_avi_infoframe_from_display_mode(&avi, adjusted_mode,
- false);
- if (r == 0)
- dssdev->ops->hdmi.set_infoframe(dssdev, &avi);
- }
+ if (omap_encoder->output->output_type == OMAP_DISPLAY_TYPE_HDMI)
+ omap_encoder_hdmi_mode_set(encoder, adjusted_mode);
}
static void omap_encoder_disable(struct drm_encoder *encoder)
diff --git a/drivers/gpu/drm/qxl/qxl_ioctl.c b/drivers/gpu/drm/qxl/qxl_ioctl.c
index 6e828158bcb0..d410e2925162 100644
--- a/drivers/gpu/drm/qxl/qxl_ioctl.c
+++ b/drivers/gpu/drm/qxl/qxl_ioctl.c
@@ -163,8 +163,7 @@ static int qxl_process_single_command(struct qxl_device *qdev,
if (cmd->command_size > PAGE_SIZE - sizeof(union qxl_release_info))
return -EINVAL;
- if (!access_ok(VERIFY_READ,
- u64_to_user_ptr(cmd->command),
+ if (!access_ok(u64_to_user_ptr(cmd->command),
cmd->command_size))
return -EFAULT;
diff --git a/drivers/gpu/drm/qxl/qxl_release.c b/drivers/gpu/drm/qxl/qxl_release.c
index 0a693fede05e..30f85f0130cb 100644
--- a/drivers/gpu/drm/qxl/qxl_release.c
+++ b/drivers/gpu/drm/qxl/qxl_release.c
@@ -217,7 +217,7 @@ int qxl_release_list_add(struct qxl_release *release, struct qxl_bo *bo)
qxl_bo_ref(bo);
entry->tv.bo = &bo->tbo;
- entry->tv.shared = false;
+ entry->tv.num_shared = 0;
list_add_tail(&entry->tv.head, &release->bos);
return 0;
}
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c
index 1ae31dbc61c6..f43305329939 100644
--- a/drivers/gpu/drm/radeon/radeon_cs.c
+++ b/drivers/gpu/drm/radeon/radeon_cs.c
@@ -178,7 +178,7 @@ static int radeon_cs_parser_relocs(struct radeon_cs_parser *p)
}
p->relocs[i].tv.bo = &p->relocs[i].robj->tbo;
- p->relocs[i].tv.shared = !r->write_domain;
+ p->relocs[i].tv.num_shared = !r->write_domain;
radeon_cs_buckets_add(&buckets, &p->relocs[i].tv.head,
priority);
@@ -253,7 +253,7 @@ static int radeon_cs_sync_rings(struct radeon_cs_parser *p)
resv = reloc->robj->tbo.resv;
r = radeon_sync_resv(p->rdev, &p->ib.sync, resv,
- reloc->tv.shared);
+ reloc->tv.num_shared);
if (r)
return r;
}
diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c
index 27d8e7dd2d06..44617dec8183 100644
--- a/drivers/gpu/drm/radeon/radeon_gem.c
+++ b/drivers/gpu/drm/radeon/radeon_gem.c
@@ -552,7 +552,7 @@ static void radeon_gem_va_update_vm(struct radeon_device *rdev,
INIT_LIST_HEAD(&list);
tv.bo = &bo_va->bo->tbo;
- tv.shared = true;
+ tv.num_shared = 1;
list_add(&tv.head, &list);
vm_bos = radeon_vm_get_bos(rdev, bo_va->vm, &list);
diff --git a/drivers/gpu/drm/radeon/radeon_mn.c b/drivers/gpu/drm/radeon/radeon_mn.c
index f8b35df44c60..b3019505065a 100644
--- a/drivers/gpu/drm/radeon/radeon_mn.c
+++ b/drivers/gpu/drm/radeon/radeon_mn.c
@@ -119,40 +119,38 @@ static void radeon_mn_release(struct mmu_notifier *mn,
* unmap them by move them into system domain again.
*/
static int radeon_mn_invalidate_range_start(struct mmu_notifier *mn,
- struct mm_struct *mm,
- unsigned long start,
- unsigned long end,
- bool blockable)
+ const struct mmu_notifier_range *range)
{
struct radeon_mn *rmn = container_of(mn, struct radeon_mn, mn);
struct ttm_operation_ctx ctx = { false, false };
struct interval_tree_node *it;
+ unsigned long end;
int ret = 0;
/* notification is exclusive, but interval is inclusive */
- end -= 1;
+ end = range->end - 1;
/* TODO we should be able to split locking for interval tree and
* the tear down.
*/
- if (blockable)
+ if (range->blockable)
mutex_lock(&rmn->lock);
else if (!mutex_trylock(&rmn->lock))
return -EAGAIN;
- it = interval_tree_iter_first(&rmn->objects, start, end);
+ it = interval_tree_iter_first(&rmn->objects, range->start, end);
while (it) {
struct radeon_mn_node *node;
struct radeon_bo *bo;
long r;
- if (!blockable) {
+ if (!range->blockable) {
ret = -EAGAIN;
goto out_unlock;
}
node = container_of(it, struct radeon_mn_node, it);
- it = interval_tree_iter_next(it, start, end);
+ it = interval_tree_iter_next(it, range->start, end);
list_for_each_entry(bo, &node->bos, mn_list) {
diff --git a/drivers/gpu/drm/radeon/radeon_vm.c b/drivers/gpu/drm/radeon/radeon_vm.c
index a3d2ca07a058..0d374211661c 100644
--- a/drivers/gpu/drm/radeon/radeon_vm.c
+++ b/drivers/gpu/drm/radeon/radeon_vm.c
@@ -142,7 +142,7 @@ struct radeon_bo_list *radeon_vm_get_bos(struct radeon_device *rdev,
list[0].preferred_domains = RADEON_GEM_DOMAIN_VRAM;
list[0].allowed_domains = RADEON_GEM_DOMAIN_VRAM;
list[0].tv.bo = &vm->page_directory->tbo;
- list[0].tv.shared = true;
+ list[0].tv.num_shared = 1;
list[0].tiling_flags = 0;
list_add(&list[0].tv.head, head);
@@ -154,7 +154,7 @@ struct radeon_bo_list *radeon_vm_get_bos(struct radeon_device *rdev,
list[idx].preferred_domains = RADEON_GEM_DOMAIN_VRAM;
list[idx].allowed_domains = RADEON_GEM_DOMAIN_VRAM;
list[idx].tv.bo = &list[idx].robj->tbo;
- list[idx].tv.shared = true;
+ list[idx].tv.num_shared = 1;
list[idx].tiling_flags = 0;
list_add(&list[idx++].tv.head, head);
}
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_group.c b/drivers/gpu/drm/rcar-du/rcar_du_group.c
index d85f0a1c1581..cebf313c6e1f 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_group.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_group.c
@@ -202,10 +202,25 @@ void rcar_du_group_put(struct rcar_du_group *rgrp)
static void __rcar_du_group_start_stop(struct rcar_du_group *rgrp, bool start)
{
- struct rcar_du_crtc *rcrtc = &rgrp->dev->crtcs[rgrp->index * 2];
+ struct rcar_du_device *rcdu = rgrp->dev;
+
+ /*
+ * Group start/stop is controlled by the DRES and DEN bits of DSYSR0
+ * for the first group and DSYSR2 for the second group. On most DU
+ * instances, this maps to the first CRTC of the group, and we can just
+ * use rcar_du_crtc_dsysr_clr_set() to access the correct DSYSR. On
+ * M3-N, however, DU2 doesn't exist, but DSYSR2 does. We thus need to
+ * access the register directly using group read/write.
+ */
+ if (rcdu->info->channels_mask & BIT(rgrp->index * 2)) {
+ struct rcar_du_crtc *rcrtc = &rgrp->dev->crtcs[rgrp->index * 2];
- rcar_du_crtc_dsysr_clr_set(rcrtc, DSYSR_DRES | DSYSR_DEN,
- start ? DSYSR_DEN : DSYSR_DRES);
+ rcar_du_crtc_dsysr_clr_set(rcrtc, DSYSR_DRES | DSYSR_DEN,
+ start ? DSYSR_DEN : DSYSR_DRES);
+ } else {
+ rcar_du_group_write(rgrp, DSYSR,
+ start ? DSYSR_DEN : DSYSR_DRES);
+ }
}
void rcar_du_group_start_stop(struct rcar_du_group *rgrp, bool start)
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_drv.c b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c
index 37f9a3b651ab..be6c2573039a 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_drv.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c
@@ -448,11 +448,6 @@ static int rockchip_drm_platform_remove(struct platform_device *pdev)
return 0;
}
-static void rockchip_drm_platform_shutdown(struct platform_device *pdev)
-{
- rockchip_drm_platform_remove(pdev);
-}
-
static const struct of_device_id rockchip_drm_dt_ids[] = {
{ .compatible = "rockchip,display-subsystem", },
{ /* sentinel */ },
@@ -462,7 +457,6 @@ MODULE_DEVICE_TABLE(of, rockchip_drm_dt_ids);
static struct platform_driver rockchip_drm_platform_driver = {
.probe = rockchip_drm_platform_probe,
.remove = rockchip_drm_platform_remove,
- .shutdown = rockchip_drm_platform_shutdown,
.driver = {
.name = "rockchip-drm",
.of_match_table = rockchip_drm_dt_ids,
diff --git a/drivers/gpu/drm/scheduler/sched_main.c b/drivers/gpu/drm/scheduler/sched_main.c
index 9d4cd196037a..dbb69063b3d5 100644
--- a/drivers/gpu/drm/scheduler/sched_main.c
+++ b/drivers/gpu/drm/scheduler/sched_main.c
@@ -211,6 +211,62 @@ void drm_sched_fault(struct drm_gpu_scheduler *sched)
}
EXPORT_SYMBOL(drm_sched_fault);
+/**
+ * drm_sched_suspend_timeout - Suspend scheduler job timeout
+ *
+ * @sched: scheduler instance for which to suspend the timeout
+ *
+ * Suspend the delayed work timeout for the scheduler. This is done by
+ * modifying the delayed work timeout to an arbitrary large value,
+ * MAX_SCHEDULE_TIMEOUT in this case. Note that this function can be
+ * called from an IRQ context.
+ *
+ * Returns the timeout remaining
+ *
+ */
+unsigned long drm_sched_suspend_timeout(struct drm_gpu_scheduler *sched)
+{
+ unsigned long sched_timeout, now = jiffies;
+
+ sched_timeout = sched->work_tdr.timer.expires;
+
+ /*
+ * Modify the timeout to an arbitrarily large value. This also prevents
+ * the timeout to be restarted when new submissions arrive
+ */
+ if (mod_delayed_work(system_wq, &sched->work_tdr, MAX_SCHEDULE_TIMEOUT)
+ && time_after(sched_timeout, now))
+ return sched_timeout - now;
+ else
+ return sched->timeout;
+}
+EXPORT_SYMBOL(drm_sched_suspend_timeout);
+
+/**
+ * drm_sched_resume_timeout - Resume scheduler job timeout
+ *
+ * @sched: scheduler instance for which to resume the timeout
+ * @remaining: remaining timeout
+ *
+ * Resume the delayed work timeout for the scheduler. Note that
+ * this function can be called from an IRQ context.
+ */
+void drm_sched_resume_timeout(struct drm_gpu_scheduler *sched,
+ unsigned long remaining)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&sched->job_list_lock, flags);
+
+ if (list_empty(&sched->ring_mirror_list))
+ cancel_delayed_work(&sched->work_tdr);
+ else
+ mod_delayed_work(system_wq, &sched->work_tdr, remaining);
+
+ spin_unlock_irqrestore(&sched->job_list_lock, flags);
+}
+EXPORT_SYMBOL(drm_sched_resume_timeout);
+
/* job_finish is called after hw fence signaled
*/
static void drm_sched_job_finish(struct work_struct *work)
@@ -218,6 +274,7 @@ static void drm_sched_job_finish(struct work_struct *work)
struct drm_sched_job *s_job = container_of(work, struct drm_sched_job,
finish_work);
struct drm_gpu_scheduler *sched = s_job->sched;
+ unsigned long flags;
/*
* Canceling the timeout without removing our job from the ring mirror
@@ -228,12 +285,12 @@ static void drm_sched_job_finish(struct work_struct *work)
*/
cancel_delayed_work_sync(&sched->work_tdr);
- spin_lock(&sched->job_list_lock);
+ spin_lock_irqsave(&sched->job_list_lock, flags);
/* remove job from ring_mirror_list */
list_del_init(&s_job->node);
/* queue TDR for next job */
drm_sched_start_timeout(sched);
- spin_unlock(&sched->job_list_lock);
+ spin_unlock_irqrestore(&sched->job_list_lock, flags);
sched->ops->free_job(s_job);
}
@@ -249,20 +306,22 @@ static void drm_sched_job_finish_cb(struct dma_fence *f,
static void drm_sched_job_begin(struct drm_sched_job *s_job)
{
struct drm_gpu_scheduler *sched = s_job->sched;
+ unsigned long flags;
dma_fence_add_callback(&s_job->s_fence->finished, &s_job->finish_cb,
drm_sched_job_finish_cb);
- spin_lock(&sched->job_list_lock);
+ spin_lock_irqsave(&sched->job_list_lock, flags);
list_add_tail(&s_job->node, &sched->ring_mirror_list);
drm_sched_start_timeout(sched);
- spin_unlock(&sched->job_list_lock);
+ spin_unlock_irqrestore(&sched->job_list_lock, flags);
}
static void drm_sched_job_timedout(struct work_struct *work)
{
struct drm_gpu_scheduler *sched;
struct drm_sched_job *job;
+ unsigned long flags;
sched = container_of(work, struct drm_gpu_scheduler, work_tdr.work);
job = list_first_entry_or_null(&sched->ring_mirror_list,
@@ -271,9 +330,9 @@ static void drm_sched_job_timedout(struct work_struct *work)
if (job)
job->sched->ops->timedout_job(job);
- spin_lock(&sched->job_list_lock);
+ spin_lock_irqsave(&sched->job_list_lock, flags);
drm_sched_start_timeout(sched);
- spin_unlock(&sched->job_list_lock);
+ spin_unlock_irqrestore(&sched->job_list_lock, flags);
}
/**
@@ -287,9 +346,10 @@ void drm_sched_hw_job_reset(struct drm_gpu_scheduler *sched, struct drm_sched_jo
{
struct drm_sched_job *s_job;
struct drm_sched_entity *entity, *tmp;
+ unsigned long flags;
int i;
- spin_lock(&sched->job_list_lock);
+ spin_lock_irqsave(&sched->job_list_lock, flags);
list_for_each_entry_reverse(s_job, &sched->ring_mirror_list, node) {
if (s_job->s_fence->parent &&
dma_fence_remove_callback(s_job->s_fence->parent,
@@ -299,7 +359,7 @@ void drm_sched_hw_job_reset(struct drm_gpu_scheduler *sched, struct drm_sched_jo
atomic_dec(&sched->hw_rq_count);
}
}
- spin_unlock(&sched->job_list_lock);
+ spin_unlock_irqrestore(&sched->job_list_lock, flags);
if (bad && bad->s_priority != DRM_SCHED_PRIORITY_KERNEL) {
atomic_inc(&bad->karma);
@@ -337,9 +397,10 @@ void drm_sched_job_recovery(struct drm_gpu_scheduler *sched)
{
struct drm_sched_job *s_job, *tmp;
bool found_guilty = false;
+ unsigned long flags;
int r;
- spin_lock(&sched->job_list_lock);
+ spin_lock_irqsave(&sched->job_list_lock, flags);
list_for_each_entry_safe(s_job, tmp, &sched->ring_mirror_list, node) {
struct drm_sched_fence *s_fence = s_job->s_fence;
struct dma_fence *fence;
@@ -353,7 +414,7 @@ void drm_sched_job_recovery(struct drm_gpu_scheduler *sched)
if (found_guilty && s_job->s_fence->scheduled.context == guilty_context)
dma_fence_set_error(&s_fence->finished, -ECANCELED);
- spin_unlock(&sched->job_list_lock);
+ spin_unlock_irqrestore(&sched->job_list_lock, flags);
fence = sched->ops->run_job(s_job);
atomic_inc(&sched->hw_rq_count);
@@ -372,10 +433,10 @@ void drm_sched_job_recovery(struct drm_gpu_scheduler *sched)
drm_sched_expel_job_unlocked(s_job);
drm_sched_process_job(NULL, &s_fence->cb);
}
- spin_lock(&sched->job_list_lock);
+ spin_lock_irqsave(&sched->job_list_lock, flags);
}
drm_sched_start_timeout(sched);
- spin_unlock(&sched->job_list_lock);
+ spin_unlock_irqrestore(&sched->job_list_lock, flags);
}
EXPORT_SYMBOL(drm_sched_job_recovery);
@@ -612,7 +673,7 @@ int drm_sched_init(struct drm_gpu_scheduler *sched,
long timeout,
const char *name)
{
- int i;
+ int i, ret;
sched->ops = ops;
sched->hw_submission_limit = hw_submission;
sched->name = name;
@@ -633,8 +694,10 @@ int drm_sched_init(struct drm_gpu_scheduler *sched,
/* Each scheduler will run on a seperate kernel thread */
sched->thread = kthread_run(drm_sched_main, sched, sched->name);
if (IS_ERR(sched->thread)) {
+ ret = PTR_ERR(sched->thread);
+ sched->thread = NULL;
DRM_ERROR("Failed to create scheduler for %s.\n", name);
- return PTR_ERR(sched->thread);
+ return ret;
}
sched->ready = true;
diff --git a/drivers/gpu/drm/selftests/test-drm_damage_helper.c b/drivers/gpu/drm/selftests/test-drm_damage_helper.c
index a2f753205a3e..9d2bcdf8bc29 100644
--- a/drivers/gpu/drm/selftests/test-drm_damage_helper.c
+++ b/drivers/gpu/drm/selftests/test-drm_damage_helper.c
@@ -53,7 +53,7 @@ static bool check_damage_clip(struct drm_plane_state *state, struct drm_rect *r,
int src_y2 = (state->src.y2 >> 16) + !!(state->src.y2 & 0xFFFF);
if (x1 >= x2 || y1 >= y2) {
- pr_err("Cannot have damage clip with no dimention.\n");
+ pr_err("Cannot have damage clip with no dimension.\n");
return false;
}
diff --git a/drivers/gpu/drm/tegra/dc.c b/drivers/gpu/drm/tegra/dc.c
index f80e82e16475..607a6ea17ecc 100644
--- a/drivers/gpu/drm/tegra/dc.c
+++ b/drivers/gpu/drm/tegra/dc.c
@@ -1978,6 +1978,23 @@ static irqreturn_t tegra_dc_irq(int irq, void *data)
return IRQ_HANDLED;
}
+static bool tegra_dc_has_window_groups(struct tegra_dc *dc)
+{
+ unsigned int i;
+
+ if (!dc->soc->wgrps)
+ return true;
+
+ for (i = 0; i < dc->soc->num_wgrps; i++) {
+ const struct tegra_windowgroup_soc *wgrp = &dc->soc->wgrps[i];
+
+ if (wgrp->dc == dc->pipe && wgrp->num_windows > 0)
+ return true;
+ }
+
+ return false;
+}
+
static int tegra_dc_init(struct host1x_client *client)
{
struct drm_device *drm = dev_get_drvdata(client->parent);
@@ -1993,22 +2010,8 @@ static int tegra_dc_init(struct host1x_client *client)
* assign a primary plane to them, which in turn will cause KMS to
* crash.
*/
- if (dc->soc->wgrps) {
- bool has_wgrps = false;
- unsigned int i;
-
- for (i = 0; i < dc->soc->num_wgrps; i++) {
- const struct tegra_windowgroup_soc *wgrp = &dc->soc->wgrps[i];
-
- if (wgrp->dc == dc->pipe && wgrp->num_windows > 0) {
- has_wgrps = true;
- break;
- }
- }
-
- if (!has_wgrps)
- return 0;
- }
+ if (!tegra_dc_has_window_groups(dc))
+ return 0;
dc->syncpt = host1x_syncpt_request(client, flags);
if (!dc->syncpt)
@@ -2094,6 +2097,9 @@ static int tegra_dc_exit(struct host1x_client *client)
struct tegra_dc *dc = host1x_client_to_dc(client);
int err;
+ if (!tegra_dc_has_window_groups(dc))
+ return 0;
+
devm_free_irq(dc->dev, dc->irq, dc);
err = tegra_dc_rgb_exit(dc);
diff --git a/drivers/gpu/drm/tegra/drm.c b/drivers/gpu/drm/tegra/drm.c
index 65ea4988b332..4b70ce664c41 100644
--- a/drivers/gpu/drm/tegra/drm.c
+++ b/drivers/gpu/drm/tegra/drm.c
@@ -1274,6 +1274,7 @@ static const struct of_device_id host1x_drm_subdevs[] = {
{ .compatible = "nvidia,tegra194-display", },
{ .compatible = "nvidia,tegra194-dc", },
{ .compatible = "nvidia,tegra194-sor", },
+ { .compatible = "nvidia,tegra194-vic", },
{ /* sentinel */ }
};
diff --git a/drivers/gpu/drm/tegra/falcon.c b/drivers/gpu/drm/tegra/falcon.c
index f685e72949d1..352d05feabb0 100644
--- a/drivers/gpu/drm/tegra/falcon.c
+++ b/drivers/gpu/drm/tegra/falcon.c
@@ -141,9 +141,9 @@ int falcon_load_firmware(struct falcon *falcon)
/* allocate iova space for the firmware */
falcon->firmware.vaddr = falcon->ops->alloc(falcon, firmware->size,
&falcon->firmware.paddr);
- if (!falcon->firmware.vaddr) {
- dev_err(falcon->dev, "dma memory mapping failed\n");
- return -ENOMEM;
+ if (IS_ERR(falcon->firmware.vaddr)) {
+ dev_err(falcon->dev, "DMA memory mapping failed\n");
+ return PTR_ERR(falcon->firmware.vaddr);
}
/* copy firmware image into local area. this also ensures endianness */
@@ -197,11 +197,19 @@ void falcon_exit(struct falcon *falcon)
int falcon_boot(struct falcon *falcon)
{
unsigned long offset;
+ u32 value;
int err;
if (!falcon->firmware.vaddr)
return -EINVAL;
+ err = readl_poll_timeout(falcon->regs + FALCON_DMACTL, value,
+ (value & (FALCON_DMACTL_IMEM_SCRUBBING |
+ FALCON_DMACTL_DMEM_SCRUBBING)) == 0,
+ 10, 10000);
+ if (err < 0)
+ return err;
+
falcon_writel(falcon, 0, FALCON_DMACTL);
/* setup the address of the binary data so Falcon can access it later */
diff --git a/drivers/gpu/drm/tegra/hub.c b/drivers/gpu/drm/tegra/hub.c
index 6112d9042979..922a48d5a483 100644
--- a/drivers/gpu/drm/tegra/hub.c
+++ b/drivers/gpu/drm/tegra/hub.c
@@ -742,7 +742,9 @@ static const struct host1x_client_ops tegra_display_hub_ops = {
static int tegra_display_hub_probe(struct platform_device *pdev)
{
+ struct device_node *child = NULL;
struct tegra_display_hub *hub;
+ struct clk *clk;
unsigned int i;
int err;
@@ -801,6 +803,34 @@ static int tegra_display_hub_probe(struct platform_device *pdev)
return err;
}
+ hub->num_heads = of_get_child_count(pdev->dev.of_node);
+
+ hub->clk_heads = devm_kcalloc(&pdev->dev, hub->num_heads, sizeof(clk),
+ GFP_KERNEL);
+ if (!hub->clk_heads)
+ return -ENOMEM;
+
+ for (i = 0; i < hub->num_heads; i++) {
+ child = of_get_next_child(pdev->dev.of_node, child);
+ if (!child) {
+ dev_err(&pdev->dev, "failed to find node for head %u\n",
+ i);
+ return -ENODEV;
+ }
+
+ clk = devm_get_clk_from_child(&pdev->dev, child, "dc");
+ if (IS_ERR(clk)) {
+ dev_err(&pdev->dev, "failed to get clock for head %u\n",
+ i);
+ of_node_put(child);
+ return PTR_ERR(clk);
+ }
+
+ hub->clk_heads[i] = clk;
+ }
+
+ of_node_put(child);
+
/* XXX: enable clock across reset? */
err = reset_control_assert(hub->rst);
if (err < 0)
@@ -840,12 +870,16 @@ static int tegra_display_hub_remove(struct platform_device *pdev)
static int __maybe_unused tegra_display_hub_suspend(struct device *dev)
{
struct tegra_display_hub *hub = dev_get_drvdata(dev);
+ unsigned int i = hub->num_heads;
int err;
err = reset_control_assert(hub->rst);
if (err < 0)
return err;
+ while (i--)
+ clk_disable_unprepare(hub->clk_heads[i]);
+
clk_disable_unprepare(hub->clk_hub);
clk_disable_unprepare(hub->clk_dsc);
clk_disable_unprepare(hub->clk_disp);
@@ -856,6 +890,7 @@ static int __maybe_unused tegra_display_hub_suspend(struct device *dev)
static int __maybe_unused tegra_display_hub_resume(struct device *dev)
{
struct tegra_display_hub *hub = dev_get_drvdata(dev);
+ unsigned int i;
int err;
err = clk_prepare_enable(hub->clk_disp);
@@ -870,13 +905,22 @@ static int __maybe_unused tegra_display_hub_resume(struct device *dev)
if (err < 0)
goto disable_dsc;
+ for (i = 0; i < hub->num_heads; i++) {
+ err = clk_prepare_enable(hub->clk_heads[i]);
+ if (err < 0)
+ goto disable_heads;
+ }
+
err = reset_control_deassert(hub->rst);
if (err < 0)
- goto disable_hub;
+ goto disable_heads;
return 0;
-disable_hub:
+disable_heads:
+ while (i--)
+ clk_disable_unprepare(hub->clk_heads[i]);
+
clk_disable_unprepare(hub->clk_hub);
disable_dsc:
clk_disable_unprepare(hub->clk_dsc);
diff --git a/drivers/gpu/drm/tegra/hub.h b/drivers/gpu/drm/tegra/hub.h
index 6696a85fc1f2..479087c0705a 100644
--- a/drivers/gpu/drm/tegra/hub.h
+++ b/drivers/gpu/drm/tegra/hub.h
@@ -49,6 +49,9 @@ struct tegra_display_hub {
struct clk *clk_hub;
struct reset_control *rst;
+ unsigned int num_heads;
+ struct clk **clk_heads;
+
const struct tegra_display_hub_soc *soc;
struct tegra_windowgroup *wgrps;
};
diff --git a/drivers/gpu/drm/tegra/sor.c b/drivers/gpu/drm/tegra/sor.c
index b129da2e5afd..ef8692b7075a 100644
--- a/drivers/gpu/drm/tegra/sor.c
+++ b/drivers/gpu/drm/tegra/sor.c
@@ -19,6 +19,8 @@
#include <soc/tegra/pmc.h>
+#include <sound/hda_verbs.h>
+
#include <drm/drm_atomic_helper.h>
#include <drm/drm_dp_helper.h>
#include <drm/drm_panel.h>
@@ -29,14 +31,6 @@
#include "sor.h"
#include "trace.h"
-/*
- * XXX Remove this after the commit adding it to soc/tegra/pmc.h has been
- * merged. Having this around after the commit is merged should be safe since
- * the preprocessor will effectively replace all occurrences and therefore no
- * duplicate will be defined.
- */
-#define TEGRA_IO_PAD_HDMI_DP0 26
-
#define SOR_REKEY 0x38
struct tegra_sor_hdmi_settings {
@@ -407,6 +401,7 @@ struct tegra_sor {
const struct tegra_sor_soc *soc;
void __iomem *regs;
unsigned int index;
+ unsigned int irq;
struct reset_control *rst;
struct clk *clk_parent;
@@ -433,6 +428,11 @@ struct tegra_sor {
struct delayed_work scdc;
bool scdc_enabled;
+
+ struct {
+ unsigned int sample_rate;
+ unsigned int channels;
+ } audio;
};
struct tegra_sor_state {
@@ -2139,6 +2139,144 @@ tegra_sor_hdmi_setup_avi_infoframe(struct tegra_sor *sor,
return 0;
}
+static void tegra_sor_write_eld(struct tegra_sor *sor)
+{
+ size_t length = drm_eld_size(sor->output.connector.eld), i;
+
+ for (i = 0; i < length; i++)
+ tegra_sor_writel(sor, i << 8 | sor->output.connector.eld[i],
+ SOR_AUDIO_HDA_ELD_BUFWR);
+
+ /*
+ * The HDA codec will always report an ELD buffer size of 96 bytes and
+ * the HDA codec driver will check that each byte read from the buffer
+ * is valid. Therefore every byte must be written, even if no 96 bytes
+ * were parsed from EDID.
+ */
+ for (i = length; i < 96; i++)
+ tegra_sor_writel(sor, i << 8 | 0, SOR_AUDIO_HDA_ELD_BUFWR);
+}
+
+static void tegra_sor_audio_prepare(struct tegra_sor *sor)
+{
+ u32 value;
+
+ tegra_sor_write_eld(sor);
+
+ value = SOR_AUDIO_HDA_PRESENSE_ELDV | SOR_AUDIO_HDA_PRESENSE_PD;
+ tegra_sor_writel(sor, value, SOR_AUDIO_HDA_PRESENSE);
+}
+
+static void tegra_sor_audio_unprepare(struct tegra_sor *sor)
+{
+ tegra_sor_writel(sor, 0, SOR_AUDIO_HDA_PRESENSE);
+}
+
+static int tegra_sor_hdmi_enable_audio_infoframe(struct tegra_sor *sor)
+{
+ u8 buffer[HDMI_INFOFRAME_SIZE(AUDIO)];
+ struct hdmi_audio_infoframe frame;
+ u32 value;
+ int err;
+
+ err = hdmi_audio_infoframe_init(&frame);
+ if (err < 0) {
+ dev_err(sor->dev, "failed to setup audio infoframe: %d\n", err);
+ return err;
+ }
+
+ frame.channels = sor->audio.channels;
+
+ err = hdmi_audio_infoframe_pack(&frame, buffer, sizeof(buffer));
+ if (err < 0) {
+ dev_err(sor->dev, "failed to pack audio infoframe: %d\n", err);
+ return err;
+ }
+
+ tegra_sor_hdmi_write_infopack(sor, buffer, err);
+
+ value = tegra_sor_readl(sor, SOR_HDMI_AUDIO_INFOFRAME_CTRL);
+ value |= INFOFRAME_CTRL_CHECKSUM_ENABLE;
+ value |= INFOFRAME_CTRL_ENABLE;
+ tegra_sor_writel(sor, value, SOR_HDMI_AUDIO_INFOFRAME_CTRL);
+
+ return 0;
+}
+
+static void tegra_sor_hdmi_audio_enable(struct tegra_sor *sor)
+{
+ u32 value;
+
+ value = tegra_sor_readl(sor, SOR_AUDIO_CNTRL);
+
+ /* select HDA audio input */
+ value &= ~SOR_AUDIO_CNTRL_SOURCE_SELECT(SOURCE_SELECT_MASK);
+ value |= SOR_AUDIO_CNTRL_SOURCE_SELECT(SOURCE_SELECT_HDA);
+
+ /* inject null samples */
+ if (sor->audio.channels != 2)
+ value &= ~SOR_AUDIO_CNTRL_INJECT_NULLSMPL;
+ else
+ value |= SOR_AUDIO_CNTRL_INJECT_NULLSMPL;
+
+ value |= SOR_AUDIO_CNTRL_AFIFO_FLUSH;
+
+ tegra_sor_writel(sor, value, SOR_AUDIO_CNTRL);
+
+ /* enable advertising HBR capability */
+ tegra_sor_writel(sor, SOR_AUDIO_SPARE_HBR_ENABLE, SOR_AUDIO_SPARE);
+
+ tegra_sor_writel(sor, 0, SOR_HDMI_ACR_CTRL);
+
+ value = SOR_HDMI_SPARE_ACR_PRIORITY_HIGH |
+ SOR_HDMI_SPARE_CTS_RESET(1) |
+ SOR_HDMI_SPARE_HW_CTS_ENABLE;
+ tegra_sor_writel(sor, value, SOR_HDMI_SPARE);
+
+ /* enable HW CTS */
+ value = SOR_HDMI_ACR_SUBPACK_LOW_SB1(0);
+ tegra_sor_writel(sor, value, SOR_HDMI_ACR_0441_SUBPACK_LOW);
+
+ /* allow packet to be sent */
+ value = SOR_HDMI_ACR_SUBPACK_HIGH_ENABLE;
+ tegra_sor_writel(sor, value, SOR_HDMI_ACR_0441_SUBPACK_HIGH);
+
+ /* reset N counter and enable lookup */
+ value = SOR_HDMI_AUDIO_N_RESET | SOR_HDMI_AUDIO_N_LOOKUP;
+ tegra_sor_writel(sor, value, SOR_HDMI_AUDIO_N);
+
+ value = (24000 * 4096) / (128 * sor->audio.sample_rate / 1000);
+ tegra_sor_writel(sor, value, SOR_AUDIO_AVAL_0320);
+ tegra_sor_writel(sor, 4096, SOR_AUDIO_NVAL_0320);
+
+ tegra_sor_writel(sor, 20000, SOR_AUDIO_AVAL_0441);
+ tegra_sor_writel(sor, 4704, SOR_AUDIO_NVAL_0441);
+
+ tegra_sor_writel(sor, 20000, SOR_AUDIO_AVAL_0882);
+ tegra_sor_writel(sor, 9408, SOR_AUDIO_NVAL_0882);
+
+ tegra_sor_writel(sor, 20000, SOR_AUDIO_AVAL_1764);
+ tegra_sor_writel(sor, 18816, SOR_AUDIO_NVAL_1764);
+
+ value = (24000 * 6144) / (128 * sor->audio.sample_rate / 1000);
+ tegra_sor_writel(sor, value, SOR_AUDIO_AVAL_0480);
+ tegra_sor_writel(sor, 6144, SOR_AUDIO_NVAL_0480);
+
+ value = (24000 * 12288) / (128 * sor->audio.sample_rate / 1000);
+ tegra_sor_writel(sor, value, SOR_AUDIO_AVAL_0960);
+ tegra_sor_writel(sor, 12288, SOR_AUDIO_NVAL_0960);
+
+ value = (24000 * 24576) / (128 * sor->audio.sample_rate / 1000);
+ tegra_sor_writel(sor, value, SOR_AUDIO_AVAL_1920);
+ tegra_sor_writel(sor, 24576, SOR_AUDIO_NVAL_1920);
+
+ value = tegra_sor_readl(sor, SOR_HDMI_AUDIO_N);
+ value &= ~SOR_HDMI_AUDIO_N_RESET;
+ tegra_sor_writel(sor, value, SOR_HDMI_AUDIO_N);
+
+ tegra_sor_hdmi_enable_audio_infoframe(sor);
+}
+
static void tegra_sor_hdmi_disable_audio_infoframe(struct tegra_sor *sor)
{
u32 value;
@@ -2148,6 +2286,11 @@ static void tegra_sor_hdmi_disable_audio_infoframe(struct tegra_sor *sor)
tegra_sor_writel(sor, value, SOR_HDMI_AUDIO_INFOFRAME_CTRL);
}
+static void tegra_sor_hdmi_audio_disable(struct tegra_sor *sor)
+{
+ tegra_sor_hdmi_disable_audio_infoframe(sor);
+}
+
static struct tegra_sor_hdmi_settings *
tegra_sor_hdmi_find_settings(struct tegra_sor *sor, unsigned long frequency)
{
@@ -2243,6 +2386,7 @@ static void tegra_sor_hdmi_disable(struct drm_encoder *encoder)
u32 value;
int err;
+ tegra_sor_audio_unprepare(sor);
tegra_sor_hdmi_scdc_stop(sor);
err = tegra_sor_detach(sor);
@@ -2651,6 +2795,7 @@ static void tegra_sor_hdmi_enable(struct drm_encoder *encoder)
dev_err(sor->dev, "failed to wakeup SOR: %d\n", err);
tegra_sor_hdmi_scdc_start(sor);
+ tegra_sor_audio_prepare(sor);
}
static const struct drm_encoder_helper_funcs tegra_sor_hdmi_helpers = {
@@ -2666,6 +2811,7 @@ static int tegra_sor_init(struct host1x_client *client)
struct tegra_sor *sor = host1x_client_to_sor(client);
int connector = DRM_MODE_CONNECTOR_Unknown;
int encoder = DRM_MODE_ENCODER_NONE;
+ u32 value;
int err;
if (!sor->aux) {
@@ -2759,6 +2905,15 @@ static int tegra_sor_init(struct host1x_client *client)
if (err < 0)
return err;
+ /*
+ * Enable and unmask the HDA codec SCRATCH0 register interrupt. This
+ * is used for interoperability between the HDA codec driver and the
+ * HDMI/DP driver.
+ */
+ value = SOR_INT_CODEC_SCRATCH1 | SOR_INT_CODEC_SCRATCH0;
+ tegra_sor_writel(sor, value, SOR_INT_ENABLE);
+ tegra_sor_writel(sor, value, SOR_INT_MASK);
+
return 0;
}
@@ -2767,6 +2922,9 @@ static int tegra_sor_exit(struct host1x_client *client)
struct tegra_sor *sor = host1x_client_to_sor(client);
int err;
+ tegra_sor_writel(sor, 0, SOR_INT_MASK);
+ tegra_sor_writel(sor, 0, SOR_INT_ENABLE);
+
tegra_output_exit(&sor->output);
if (sor->aux) {
@@ -3037,6 +3195,54 @@ static int tegra_sor_parse_dt(struct tegra_sor *sor)
return 0;
}
+static void tegra_hda_parse_format(unsigned int format, unsigned int *rate,
+ unsigned int *channels)
+{
+ unsigned int mul, div;
+
+ if (format & AC_FMT_BASE_44K)
+ *rate = 44100;
+ else
+ *rate = 48000;
+
+ mul = (format & AC_FMT_MULT_MASK) >> AC_FMT_MULT_SHIFT;
+ div = (format & AC_FMT_DIV_MASK) >> AC_FMT_DIV_SHIFT;
+
+ *rate = *rate * (mul + 1) / (div + 1);
+
+ *channels = (format & AC_FMT_CHAN_MASK) >> AC_FMT_CHAN_SHIFT;
+}
+
+static irqreturn_t tegra_sor_irq(int irq, void *data)
+{
+ struct tegra_sor *sor = data;
+ u32 value;
+
+ value = tegra_sor_readl(sor, SOR_INT_STATUS);
+ tegra_sor_writel(sor, value, SOR_INT_STATUS);
+
+ if (value & SOR_INT_CODEC_SCRATCH0) {
+ value = tegra_sor_readl(sor, SOR_AUDIO_HDA_CODEC_SCRATCH0);
+
+ if (value & SOR_AUDIO_HDA_CODEC_SCRATCH0_VALID) {
+ unsigned int format, sample_rate, channels;
+
+ format = value & SOR_AUDIO_HDA_CODEC_SCRATCH0_FMT_MASK;
+
+ tegra_hda_parse_format(format, &sample_rate, &channels);
+
+ sor->audio.sample_rate = sample_rate;
+ sor->audio.channels = channels;
+
+ tegra_sor_hdmi_audio_enable(sor);
+ } else {
+ tegra_sor_hdmi_audio_disable(sor);
+ }
+ }
+
+ return IRQ_HANDLED;
+}
+
static int tegra_sor_probe(struct platform_device *pdev)
{
struct device_node *np;
@@ -3119,14 +3325,38 @@ static int tegra_sor_probe(struct platform_device *pdev)
goto remove;
}
- if (!pdev->dev.pm_domain) {
- sor->rst = devm_reset_control_get(&pdev->dev, "sor");
- if (IS_ERR(sor->rst)) {
- err = PTR_ERR(sor->rst);
+ err = platform_get_irq(pdev, 0);
+ if (err < 0) {
+ dev_err(&pdev->dev, "failed to get IRQ: %d\n", err);
+ goto remove;
+ }
+
+ sor->irq = err;
+
+ err = devm_request_irq(sor->dev, sor->irq, tegra_sor_irq, 0,
+ dev_name(sor->dev), sor);
+ if (err < 0) {
+ dev_err(&pdev->dev, "failed to request IRQ: %d\n", err);
+ goto remove;
+ }
+
+ sor->rst = devm_reset_control_get(&pdev->dev, "sor");
+ if (IS_ERR(sor->rst)) {
+ err = PTR_ERR(sor->rst);
+
+ if (err != -EBUSY || WARN_ON(!pdev->dev.pm_domain)) {
dev_err(&pdev->dev, "failed to get reset control: %d\n",
err);
goto remove;
}
+
+ /*
+ * At this point, the reset control is most likely being used
+ * by the generic power domain implementation. With any luck
+ * the power domain will have taken care of resetting the SOR
+ * and we don't have to do anything.
+ */
+ sor->rst = NULL;
}
sor->clk = devm_clk_get(&pdev->dev, NULL);
diff --git a/drivers/gpu/drm/tegra/sor.h b/drivers/gpu/drm/tegra/sor.h
index fb0854d92a27..13f7e68bec42 100644
--- a/drivers/gpu/drm/tegra/sor.h
+++ b/drivers/gpu/drm/tegra/sor.h
@@ -364,12 +364,28 @@
#define INFOFRAME_HEADER_VERSION(x) (((x) & 0xff) << 8)
#define INFOFRAME_HEADER_TYPE(x) (((x) & 0xff) << 0)
+#define SOR_HDMI_ACR_CTRL 0xb1
+
+#define SOR_HDMI_ACR_0320_SUBPACK_LOW 0xb2
+#define SOR_HDMI_ACR_SUBPACK_LOW_SB1(x) (((x) & 0xff) << 24)
+
+#define SOR_HDMI_ACR_0320_SUBPACK_HIGH 0xb3
+#define SOR_HDMI_ACR_SUBPACK_HIGH_ENABLE (1 << 31)
+
+#define SOR_HDMI_ACR_0441_SUBPACK_LOW 0xb4
+#define SOR_HDMI_ACR_0441_SUBPACK_HIGH 0xb5
+
#define SOR_HDMI_CTRL 0xc0
#define SOR_HDMI_CTRL_ENABLE (1 << 30)
#define SOR_HDMI_CTRL_MAX_AC_PACKET(x) (((x) & 0x1f) << 16)
#define SOR_HDMI_CTRL_AUDIO_LAYOUT (1 << 10)
#define SOR_HDMI_CTRL_REKEY(x) (((x) & 0x7f) << 0)
+#define SOR_HDMI_SPARE 0xcb
+#define SOR_HDMI_SPARE_ACR_PRIORITY_HIGH (1 << 31)
+#define SOR_HDMI_SPARE_CTS_RESET(x) (((x) & 0x7) << 16)
+#define SOR_HDMI_SPARE_HW_CTS_ENABLE (1 << 0)
+
#define SOR_REFCLK 0xe6
#define SOR_REFCLK_DIV_INT(x) ((((x) >> 2) & 0xff) << 8)
#define SOR_REFCLK_DIV_FRAC(x) (((x) & 0x3) << 6)
@@ -378,10 +394,62 @@
#define SOR_INPUT_CONTROL_ARM_VIDEO_RANGE_LIMITED (1 << 1)
#define SOR_INPUT_CONTROL_HDMI_SRC_SELECT(x) (((x) & 0x1) << 0)
+#define SOR_AUDIO_CNTRL 0xfc
+#define SOR_AUDIO_CNTRL_INJECT_NULLSMPL (1 << 29)
+#define SOR_AUDIO_CNTRL_SOURCE_SELECT(x) (((x) & 0x3) << 20)
+#define SOURCE_SELECT_MASK 0x3
+#define SOURCE_SELECT_HDA 0x2
+#define SOURCE_SELECT_SPDIF 0x1
+#define SOURCE_SELECT_AUTO 0x0
+#define SOR_AUDIO_CNTRL_AFIFO_FLUSH (1 << 12)
+
+#define SOR_AUDIO_SPARE 0xfe
+#define SOR_AUDIO_SPARE_HBR_ENABLE (1 << 27)
+
+#define SOR_AUDIO_NVAL_0320 0xff
+#define SOR_AUDIO_NVAL_0441 0x100
+#define SOR_AUDIO_NVAL_0882 0x101
+#define SOR_AUDIO_NVAL_1764 0x102
+#define SOR_AUDIO_NVAL_0480 0x103
+#define SOR_AUDIO_NVAL_0960 0x104
+#define SOR_AUDIO_NVAL_1920 0x105
+
+#define SOR_AUDIO_HDA_CODEC_SCRATCH0 0x10a
+#define SOR_AUDIO_HDA_CODEC_SCRATCH0_VALID (1 << 30)
+#define SOR_AUDIO_HDA_CODEC_SCRATCH0_FMT_MASK 0xffff
+
+#define SOR_AUDIO_HDA_ELD_BUFWR 0x10c
+#define SOR_AUDIO_HDA_ELD_BUFWR_INDEX(x) (((x) & 0xff) << 8)
+#define SOR_AUDIO_HDA_ELD_BUFWR_DATA(x) (((x) & 0xff) << 0)
+
+#define SOR_AUDIO_HDA_PRESENSE 0x10d
+#define SOR_AUDIO_HDA_PRESENSE_ELDV (1 << 1)
+#define SOR_AUDIO_HDA_PRESENSE_PD (1 << 0)
+
+#define SOR_AUDIO_AVAL_0320 0x10f
+#define SOR_AUDIO_AVAL_0441 0x110
+#define SOR_AUDIO_AVAL_0882 0x111
+#define SOR_AUDIO_AVAL_1764 0x112
+#define SOR_AUDIO_AVAL_0480 0x113
+#define SOR_AUDIO_AVAL_0960 0x114
+#define SOR_AUDIO_AVAL_1920 0x115
+
+#define SOR_INT_STATUS 0x11c
+#define SOR_INT_CODEC_CP_REQUEST (1 << 2)
+#define SOR_INT_CODEC_SCRATCH1 (1 << 1)
+#define SOR_INT_CODEC_SCRATCH0 (1 << 0)
+
+#define SOR_INT_MASK 0x11d
+#define SOR_INT_ENABLE 0x11e
+
#define SOR_HDMI_VSI_INFOFRAME_CTRL 0x123
#define SOR_HDMI_VSI_INFOFRAME_STATUS 0x124
#define SOR_HDMI_VSI_INFOFRAME_HEADER 0x125
+#define SOR_HDMI_AUDIO_N 0x13c
+#define SOR_HDMI_AUDIO_N_LOOKUP (1 << 28)
+#define SOR_HDMI_AUDIO_N_RESET (1 << 20)
+
#define SOR_HDMI2_CTRL 0x13e
#define SOR_HDMI2_CTRL_CLOCK_MODE_DIV_BY_4 (1 << 1)
#define SOR_HDMI2_CTRL_SCRAMBLE (1 << 0)
diff --git a/drivers/gpu/drm/tegra/vic.c b/drivers/gpu/drm/tegra/vic.c
index 9f657a63b0bb..d47983deb1cf 100644
--- a/drivers/gpu/drm/tegra/vic.c
+++ b/drivers/gpu/drm/tegra/vic.c
@@ -38,6 +38,7 @@ struct vic {
struct iommu_domain *domain;
struct device *dev;
struct clk *clk;
+ struct reset_control *rst;
/* Platform configuration */
const struct vic_config *config;
@@ -56,13 +57,37 @@ static void vic_writel(struct vic *vic, u32 value, unsigned int offset)
static int vic_runtime_resume(struct device *dev)
{
struct vic *vic = dev_get_drvdata(dev);
+ int err;
+
+ err = clk_prepare_enable(vic->clk);
+ if (err < 0)
+ return err;
+
+ usleep_range(10, 20);
+
+ err = reset_control_deassert(vic->rst);
+ if (err < 0)
+ goto disable;
+
+ usleep_range(10, 20);
+
+ return 0;
- return clk_prepare_enable(vic->clk);
+disable:
+ clk_disable_unprepare(vic->clk);
+ return err;
}
static int vic_runtime_suspend(struct device *dev)
{
struct vic *vic = dev_get_drvdata(dev);
+ int err;
+
+ err = reset_control_assert(vic->rst);
+ if (err < 0)
+ return err;
+
+ usleep_range(2000, 4000);
clk_disable_unprepare(vic->clk);
@@ -282,10 +307,18 @@ static const struct vic_config vic_t186_config = {
.version = 0x18,
};
+#define NVIDIA_TEGRA_194_VIC_FIRMWARE "nvidia/tegra194/vic.bin"
+
+static const struct vic_config vic_t194_config = {
+ .firmware = NVIDIA_TEGRA_194_VIC_FIRMWARE,
+ .version = 0x19,
+};
+
static const struct of_device_id vic_match[] = {
{ .compatible = "nvidia,tegra124-vic", .data = &vic_t124_config },
{ .compatible = "nvidia,tegra210-vic", .data = &vic_t210_config },
{ .compatible = "nvidia,tegra186-vic", .data = &vic_t186_config },
+ { .compatible = "nvidia,tegra194-vic", .data = &vic_t194_config },
{ },
};
@@ -323,6 +356,14 @@ static int vic_probe(struct platform_device *pdev)
return PTR_ERR(vic->clk);
}
+ if (!dev->pm_domain) {
+ vic->rst = devm_reset_control_get(dev, "vic");
+ if (IS_ERR(vic->rst)) {
+ dev_err(&pdev->dev, "failed to get reset\n");
+ return PTR_ERR(vic->rst);
+ }
+ }
+
vic->falcon.dev = dev;
vic->falcon.regs = vic->regs;
vic->falcon.ops = &vic_falcon_ops;
@@ -418,3 +459,6 @@ MODULE_FIRMWARE(NVIDIA_TEGRA_210_VIC_FIRMWARE);
#if IS_ENABLED(CONFIG_ARCH_TEGRA_186_SOC)
MODULE_FIRMWARE(NVIDIA_TEGRA_186_VIC_FIRMWARE);
#endif
+#if IS_ENABLED(CONFIG_ARCH_TEGRA_194_SOC)
+MODULE_FIRMWARE(NVIDIA_TEGRA_194_VIC_FIRMWARE);
+#endif
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index d87935bf8e30..0ec08394e17a 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -77,38 +77,39 @@ static inline int ttm_mem_type_from_place(const struct ttm_place *place,
return 0;
}
-static void ttm_mem_type_debug(struct ttm_bo_device *bdev, int mem_type)
+static void ttm_mem_type_debug(struct ttm_bo_device *bdev, struct drm_printer *p,
+ int mem_type)
{
struct ttm_mem_type_manager *man = &bdev->man[mem_type];
- struct drm_printer p = drm_debug_printer(TTM_PFX);
- pr_err(" has_type: %d\n", man->has_type);
- pr_err(" use_type: %d\n", man->use_type);
- pr_err(" flags: 0x%08X\n", man->flags);
- pr_err(" gpu_offset: 0x%08llX\n", man->gpu_offset);
- pr_err(" size: %llu\n", man->size);
- pr_err(" available_caching: 0x%08X\n", man->available_caching);
- pr_err(" default_caching: 0x%08X\n", man->default_caching);
+ drm_printf(p, " has_type: %d\n", man->has_type);
+ drm_printf(p, " use_type: %d\n", man->use_type);
+ drm_printf(p, " flags: 0x%08X\n", man->flags);
+ drm_printf(p, " gpu_offset: 0x%08llX\n", man->gpu_offset);
+ drm_printf(p, " size: %llu\n", man->size);
+ drm_printf(p, " available_caching: 0x%08X\n", man->available_caching);
+ drm_printf(p, " default_caching: 0x%08X\n", man->default_caching);
if (mem_type != TTM_PL_SYSTEM)
- (*man->func->debug)(man, &p);
+ (*man->func->debug)(man, p);
}
static void ttm_bo_mem_space_debug(struct ttm_buffer_object *bo,
struct ttm_placement *placement)
{
+ struct drm_printer p = drm_debug_printer(TTM_PFX);
int i, ret, mem_type;
- pr_err("No space for %p (%lu pages, %luK, %luM)\n",
- bo, bo->mem.num_pages, bo->mem.size >> 10,
- bo->mem.size >> 20);
+ drm_printf(&p, "No space for %p (%lu pages, %luK, %luM)\n",
+ bo, bo->mem.num_pages, bo->mem.size >> 10,
+ bo->mem.size >> 20);
for (i = 0; i < placement->num_placement; i++) {
ret = ttm_mem_type_from_place(&placement->placement[i],
&mem_type);
if (ret)
return;
- pr_err(" placement[%d]=0x%08X (%d)\n",
- i, placement->placement[i].flags, mem_type);
- ttm_mem_type_debug(bo->bdev, mem_type);
+ drm_printf(&p, " placement[%d]=0x%08X (%d)\n",
+ i, placement->placement[i].flags, mem_type);
+ ttm_mem_type_debug(bo->bdev, &p, mem_type);
}
}
diff --git a/drivers/gpu/drm/ttm/ttm_execbuf_util.c b/drivers/gpu/drm/ttm/ttm_execbuf_util.c
index efa005a1c1b7..93860346c426 100644
--- a/drivers/gpu/drm/ttm/ttm_execbuf_util.c
+++ b/drivers/gpu/drm/ttm/ttm_execbuf_util.c
@@ -126,10 +126,11 @@ int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket,
}
if (!ret) {
- if (!entry->shared)
+ if (!entry->num_shared)
continue;
- ret = reservation_object_reserve_shared(bo->resv, 1);
+ ret = reservation_object_reserve_shared(bo->resv,
+ entry->num_shared);
if (!ret)
continue;
}
@@ -150,8 +151,9 @@ int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket,
}
}
- if (!ret && entry->shared)
- ret = reservation_object_reserve_shared(bo->resv, 1);
+ if (!ret && entry->num_shared)
+ ret = reservation_object_reserve_shared(bo->resv,
+ entry->num_shared);
if (unlikely(ret != 0)) {
if (ret == -EINTR)
@@ -199,7 +201,7 @@ void ttm_eu_fence_buffer_objects(struct ww_acquire_ctx *ticket,
list_for_each_entry(entry, list, head) {
bo = entry->bo;
- if (entry->shared)
+ if (entry->num_shared)
reservation_object_add_shared_fence(bo->resv, fence);
else
reservation_object_add_excl_fence(bo->resv, fence);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index 9fd8b4e75a8c..25afb1d594e3 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -34,7 +34,7 @@
#include <drm/ttm/ttm_placement.h>
#include <drm/ttm/ttm_bo_driver.h>
#include <drm/ttm/ttm_module.h>
-#include <linux/dma_remapping.h>
+#include <linux/intel-iommu.h>
#define VMWGFX_DRIVER_DESC "Linux drm driver for VMware graphics devices"
#define VMWGFX_CHIP_SVGAII 0
@@ -49,6 +49,8 @@
#define VMWGFX_REPO "In Tree"
+#define VMWGFX_VALIDATION_MEM_GRAN (16*PAGE_SIZE)
+
/**
* Fully encoded drm commands. Might move to vmw_drm.h
@@ -581,7 +583,7 @@ static int vmw_dma_select_mode(struct vmw_private *dev_priv)
dev_priv->map_mode = vmw_dma_map_populate;
- if (dma_ops->sync_single_for_cpu)
+ if (dma_ops && dma_ops->sync_single_for_cpu)
dev_priv->map_mode = vmw_dma_alloc_coherent;
#ifdef CONFIG_SWIOTLB
if (swiotlb_nr_tbl() == 0)
@@ -911,7 +913,7 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
spin_unlock(&dev_priv->cap_lock);
}
-
+ vmw_validation_mem_init_ttm(dev_priv, VMWGFX_VALIDATION_MEM_GRAN);
ret = vmw_kms_init(dev_priv);
if (unlikely(ret != 0))
goto out_no_kms;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
index d7f6cb9331de..cd607ba9c2fe 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
@@ -593,6 +593,9 @@ struct vmw_private {
struct vmw_cmdbuf_man *cman;
DECLARE_BITMAP(irqthread_pending, VMW_IRQTHREAD_MAX);
+
+ /* Validation memory reservation */
+ struct vmw_validation_mem vvm;
};
static inline struct vmw_surface *vmw_res_to_srf(struct vmw_resource *res)
@@ -831,6 +834,8 @@ extern int vmw_fifo_flush(struct vmw_private *dev_priv,
extern int vmw_mmap(struct file *filp, struct vm_area_struct *vma);
+extern void vmw_validation_mem_init_ttm(struct vmw_private *dev_priv,
+ size_t gran);
/**
* TTM buffer object driver - vmwgfx_ttm_buffer.c
*/
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
index 260650bb5560..f2d13a72c05d 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
@@ -3835,6 +3835,8 @@ int vmw_execbuf_process(struct drm_file *file_priv,
struct sync_file *sync_file = NULL;
DECLARE_VAL_CONTEXT(val_ctx, &sw_context->res_ht, 1);
+ vmw_validation_set_val_mem(&val_ctx, &dev_priv->vvm);
+
if (flags & DRM_VMW_EXECBUF_FLAG_EXPORT_FENCE_FD) {
out_fence_fd = get_unused_fd_flags(O_CLOEXEC);
if (out_fence_fd < 0) {
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
index 8a029bade32a..3025bfc001a1 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
@@ -85,7 +85,7 @@ static void vmw_resource_release(struct kref *kref)
struct ttm_validate_buffer val_buf;
val_buf.bo = bo;
- val_buf.shared = false;
+ val_buf.num_shared = 0;
res->func->unbind(res, false, &val_buf);
}
res->backup_dirty = false;
@@ -462,7 +462,7 @@ vmw_resource_check_buffer(struct ww_acquire_ctx *ticket,
INIT_LIST_HEAD(&val_list);
val_buf->bo = ttm_bo_reference(&res->backup->base);
- val_buf->shared = false;
+ val_buf->num_shared = 0;
list_add_tail(&val_buf->head, &val_list);
ret = ttm_eu_reserve_buffers(ticket, &val_list, interruptible, NULL);
if (unlikely(ret != 0))
@@ -565,7 +565,7 @@ static int vmw_resource_do_evict(struct ww_acquire_ctx *ticket,
BUG_ON(!func->may_evict);
val_buf.bo = NULL;
- val_buf.shared = false;
+ val_buf.num_shared = 0;
ret = vmw_resource_check_buffer(ticket, res, interruptible, &val_buf);
if (unlikely(ret != 0))
return ret;
@@ -614,7 +614,7 @@ int vmw_resource_validate(struct vmw_resource *res, bool intr)
return 0;
val_buf.bo = NULL;
- val_buf.shared = false;
+ val_buf.num_shared = 0;
if (res->backup)
val_buf.bo = &res->backup->base;
do {
@@ -685,7 +685,7 @@ void vmw_resource_unbind_list(struct vmw_buffer_object *vbo)
struct vmw_resource *res, *next;
struct ttm_validate_buffer val_buf = {
.bo = &vbo->base,
- .shared = false
+ .num_shared = 0
};
lockdep_assert_held(&vbo->base.resv->lock.base);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c
index 154eb09aa91e..e6d75e377dd8 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c
@@ -42,3 +42,39 @@ int vmw_mmap(struct file *filp, struct vm_area_struct *vma)
dev_priv = vmw_priv(file_priv->minor->dev);
return ttm_bo_mmap(filp, vma, &dev_priv->bdev);
}
+
+/* struct vmw_validation_mem callback */
+static int vmw_vmt_reserve(struct vmw_validation_mem *m, size_t size)
+{
+ static struct ttm_operation_ctx ctx = {.interruptible = false,
+ .no_wait_gpu = false};
+ struct vmw_private *dev_priv = container_of(m, struct vmw_private, vvm);
+
+ return ttm_mem_global_alloc(vmw_mem_glob(dev_priv), size, &ctx);
+}
+
+/* struct vmw_validation_mem callback */
+static void vmw_vmt_unreserve(struct vmw_validation_mem *m, size_t size)
+{
+ struct vmw_private *dev_priv = container_of(m, struct vmw_private, vvm);
+
+ return ttm_mem_global_free(vmw_mem_glob(dev_priv), size);
+}
+
+/**
+ * vmw_validation_mem_init_ttm - Interface the validation memory tracker
+ * to ttm.
+ * @dev_priv: Pointer to struct vmw_private. The reason we choose a vmw private
+ * rather than a struct vmw_validation_mem is to make sure assumption in the
+ * callbacks that struct vmw_private derives from struct vmw_validation_mem
+ * holds true.
+ * @gran: The recommended allocation granularity
+ */
+void vmw_validation_mem_init_ttm(struct vmw_private *dev_priv, size_t gran)
+{
+ struct vmw_validation_mem *vvm = &dev_priv->vvm;
+
+ vvm->reserve_mem = vmw_vmt_reserve;
+ vvm->unreserve_mem = vmw_vmt_unreserve;
+ vvm->gran = gran;
+}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_validation.c b/drivers/gpu/drm/vmwgfx/vmwgfx_validation.c
index 184025fa938e..b3f547fc5d3d 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_validation.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_validation.c
@@ -104,11 +104,25 @@ void *vmw_validation_mem_alloc(struct vmw_validation_context *ctx,
return NULL;
if (ctx->mem_size_left < size) {
- struct page *page = alloc_page(GFP_KERNEL | __GFP_ZERO);
+ struct page *page;
+ if (ctx->vm && ctx->vm_size_left < PAGE_SIZE) {
+ int ret = ctx->vm->reserve_mem(ctx->vm, ctx->vm->gran);
+
+ if (ret)
+ return NULL;
+
+ ctx->vm_size_left += ctx->vm->gran;
+ ctx->total_mem += ctx->vm->gran;
+ }
+
+ page = alloc_page(GFP_KERNEL | __GFP_ZERO);
if (!page)
return NULL;
+ if (ctx->vm)
+ ctx->vm_size_left -= PAGE_SIZE;
+
list_add_tail(&page->lru, &ctx->page_list);
ctx->page_address = page_address(page);
ctx->mem_size_left = PAGE_SIZE;
@@ -138,6 +152,11 @@ static void vmw_validation_mem_free(struct vmw_validation_context *ctx)
}
ctx->mem_size_left = 0;
+ if (ctx->vm && ctx->total_mem) {
+ ctx->vm->unreserve_mem(ctx->vm, ctx->total_mem);
+ ctx->total_mem = 0;
+ ctx->vm_size_left = 0;
+ }
}
/**
@@ -266,7 +285,7 @@ int vmw_validation_add_bo(struct vmw_validation_context *ctx,
val_buf->bo = ttm_bo_get_unless_zero(&vbo->base);
if (!val_buf->bo)
return -ESRCH;
- val_buf->shared = false;
+ val_buf->num_shared = 0;
list_add_tail(&val_buf->head, &ctx->bo_list);
bo_node->as_mob = as_mob;
bo_node->cpu_blit = cpu_blit;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_validation.h b/drivers/gpu/drm/vmwgfx/vmwgfx_validation.h
index b57e3292c386..3b396fea40d7 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_validation.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_validation.h
@@ -34,6 +34,21 @@
#include <drm/ttm/ttm_execbuf_util.h>
/**
+ * struct vmw_validation_mem - Custom interface to provide memory reservations
+ * for the validation code.
+ * @reserve_mem: Callback to reserve memory
+ * @unreserve_mem: Callback to unreserve memory
+ * @gran: Reservation granularity. Contains a hint how much memory should
+ * be reserved in each call to @reserve_mem(). A slow implementation may want
+ * reservation to be done in large batches.
+ */
+struct vmw_validation_mem {
+ int (*reserve_mem)(struct vmw_validation_mem *m, size_t size);
+ void (*unreserve_mem)(struct vmw_validation_mem *m, size_t size);
+ size_t gran;
+};
+
+/**
* struct vmw_validation_context - Per command submission validation context
* @ht: Hash table used to find resource- or buffer object duplicates
* @resource_list: List head for resource validation metadata
@@ -47,6 +62,10 @@
* buffer objects
* @mem_size_left: Free memory left in the last page in @page_list
* @page_address: Kernel virtual address of the last page in @page_list
+ * @vm: A pointer to the memory reservation interface or NULL if no
+ * memory reservation is needed.
+ * @vm_size_left: Amount of reserved memory that so far has not been allocated.
+ * @total_mem: Amount of reserved memory.
*/
struct vmw_validation_context {
struct drm_open_hash *ht;
@@ -59,6 +78,9 @@ struct vmw_validation_context {
unsigned int merge_dups;
unsigned int mem_size_left;
u8 *page_address;
+ struct vmw_validation_mem *vm;
+ size_t vm_size_left;
+ size_t total_mem;
};
struct vmw_buffer_object;
@@ -102,6 +124,21 @@ vmw_validation_has_bos(struct vmw_validation_context *ctx)
}
/**
+ * vmw_validation_set_val_mem - Register a validation mem object for
+ * validation memory reservation
+ * @ctx: The validation context
+ * @vm: Pointer to a struct vmw_validation_mem
+ *
+ * Must be set before the first attempt to allocate validation memory.
+ */
+static inline void
+vmw_validation_set_val_mem(struct vmw_validation_context *ctx,
+ struct vmw_validation_mem *vm)
+{
+ ctx->vm = vm;
+}
+
+/**
* vmw_validation_set_ht - Register a hash table for duplicate finding
* @ctx: The validation context
* @ht: Pointer to a hash table to use for duplicate finding
diff --git a/drivers/gpu/drm/xen/Kconfig b/drivers/gpu/drm/xen/Kconfig
index 4cca160782ab..f969d486855d 100644
--- a/drivers/gpu/drm/xen/Kconfig
+++ b/drivers/gpu/drm/xen/Kconfig
@@ -12,6 +12,7 @@ config DRM_XEN_FRONTEND
select DRM_KMS_HELPER
select VIDEOMODE_HELPERS
select XEN_XENBUS_FRONTEND
+ select XEN_FRONT_PGDIR_SHBUF
help
Choose this option if you want to enable a para-virtualized
frontend DRM/KMS driver for Xen guest OSes.
diff --git a/drivers/gpu/drm/xen/Makefile b/drivers/gpu/drm/xen/Makefile
index 712afff5ffc3..825905f67faa 100644
--- a/drivers/gpu/drm/xen/Makefile
+++ b/drivers/gpu/drm/xen/Makefile
@@ -4,7 +4,6 @@ drm_xen_front-objs := xen_drm_front.o \
xen_drm_front_kms.o \
xen_drm_front_conn.o \
xen_drm_front_evtchnl.o \
- xen_drm_front_shbuf.o \
xen_drm_front_cfg.o \
xen_drm_front_gem.o
diff --git a/drivers/gpu/drm/xen/xen_drm_front.c b/drivers/gpu/drm/xen/xen_drm_front.c
index 6b6d5ab82ec3..4d3d36fc3a5d 100644
--- a/drivers/gpu/drm/xen/xen_drm_front.c
+++ b/drivers/gpu/drm/xen/xen_drm_front.c
@@ -19,6 +19,7 @@
#include <xen/xen.h>
#include <xen/xenbus.h>
+#include <xen/xen-front-pgdir-shbuf.h>
#include <xen/interface/io/displif.h>
#include "xen_drm_front.h"
@@ -26,28 +27,20 @@
#include "xen_drm_front_evtchnl.h"
#include "xen_drm_front_gem.h"
#include "xen_drm_front_kms.h"
-#include "xen_drm_front_shbuf.h"
struct xen_drm_front_dbuf {
struct list_head list;
u64 dbuf_cookie;
u64 fb_cookie;
- struct xen_drm_front_shbuf *shbuf;
+
+ struct xen_front_pgdir_shbuf shbuf;
};
-static int dbuf_add_to_list(struct xen_drm_front_info *front_info,
- struct xen_drm_front_shbuf *shbuf, u64 dbuf_cookie)
+static void dbuf_add_to_list(struct xen_drm_front_info *front_info,
+ struct xen_drm_front_dbuf *dbuf, u64 dbuf_cookie)
{
- struct xen_drm_front_dbuf *dbuf;
-
- dbuf = kzalloc(sizeof(*dbuf), GFP_KERNEL);
- if (!dbuf)
- return -ENOMEM;
-
dbuf->dbuf_cookie = dbuf_cookie;
- dbuf->shbuf = shbuf;
list_add(&dbuf->list, &front_info->dbuf_list);
- return 0;
}
static struct xen_drm_front_dbuf *dbuf_get(struct list_head *dbuf_list,
@@ -62,15 +55,6 @@ static struct xen_drm_front_dbuf *dbuf_get(struct list_head *dbuf_list,
return NULL;
}
-static void dbuf_flush_fb(struct list_head *dbuf_list, u64 fb_cookie)
-{
- struct xen_drm_front_dbuf *buf, *q;
-
- list_for_each_entry_safe(buf, q, dbuf_list, list)
- if (buf->fb_cookie == fb_cookie)
- xen_drm_front_shbuf_flush(buf->shbuf);
-}
-
static void dbuf_free(struct list_head *dbuf_list, u64 dbuf_cookie)
{
struct xen_drm_front_dbuf *buf, *q;
@@ -78,8 +62,8 @@ static void dbuf_free(struct list_head *dbuf_list, u64 dbuf_cookie)
list_for_each_entry_safe(buf, q, dbuf_list, list)
if (buf->dbuf_cookie == dbuf_cookie) {
list_del(&buf->list);
- xen_drm_front_shbuf_unmap(buf->shbuf);
- xen_drm_front_shbuf_free(buf->shbuf);
+ xen_front_pgdir_shbuf_unmap(&buf->shbuf);
+ xen_front_pgdir_shbuf_free(&buf->shbuf);
kfree(buf);
break;
}
@@ -91,8 +75,8 @@ static void dbuf_free_all(struct list_head *dbuf_list)
list_for_each_entry_safe(buf, q, dbuf_list, list) {
list_del(&buf->list);
- xen_drm_front_shbuf_unmap(buf->shbuf);
- xen_drm_front_shbuf_free(buf->shbuf);
+ xen_front_pgdir_shbuf_unmap(&buf->shbuf);
+ xen_front_pgdir_shbuf_free(&buf->shbuf);
kfree(buf);
}
}
@@ -171,9 +155,9 @@ int xen_drm_front_dbuf_create(struct xen_drm_front_info *front_info,
u32 bpp, u64 size, struct page **pages)
{
struct xen_drm_front_evtchnl *evtchnl;
- struct xen_drm_front_shbuf *shbuf;
+ struct xen_drm_front_dbuf *dbuf;
struct xendispl_req *req;
- struct xen_drm_front_shbuf_cfg buf_cfg;
+ struct xen_front_pgdir_shbuf_cfg buf_cfg;
unsigned long flags;
int ret;
@@ -181,28 +165,29 @@ int xen_drm_front_dbuf_create(struct xen_drm_front_info *front_info,
if (unlikely(!evtchnl))
return -EIO;
+ dbuf = kzalloc(sizeof(*dbuf), GFP_KERNEL);
+ if (!dbuf)
+ return -ENOMEM;
+
+ dbuf_add_to_list(front_info, dbuf, dbuf_cookie);
+
memset(&buf_cfg, 0, sizeof(buf_cfg));
buf_cfg.xb_dev = front_info->xb_dev;
+ buf_cfg.num_pages = DIV_ROUND_UP(size, PAGE_SIZE);
buf_cfg.pages = pages;
- buf_cfg.size = size;
+ buf_cfg.pgdir = &dbuf->shbuf;
buf_cfg.be_alloc = front_info->cfg.be_alloc;
- shbuf = xen_drm_front_shbuf_alloc(&buf_cfg);
- if (IS_ERR(shbuf))
- return PTR_ERR(shbuf);
-
- ret = dbuf_add_to_list(front_info, shbuf, dbuf_cookie);
- if (ret < 0) {
- xen_drm_front_shbuf_free(shbuf);
- return ret;
- }
+ ret = xen_front_pgdir_shbuf_alloc(&buf_cfg);
+ if (ret < 0)
+ goto fail_shbuf_alloc;
mutex_lock(&evtchnl->u.req.req_io_lock);
spin_lock_irqsave(&front_info->io_lock, flags);
req = be_prepare_req(evtchnl, XENDISPL_OP_DBUF_CREATE);
req->op.dbuf_create.gref_directory =
- xen_drm_front_shbuf_get_dir_start(shbuf);
+ xen_front_pgdir_shbuf_get_dir_start(&dbuf->shbuf);
req->op.dbuf_create.buffer_sz = size;
req->op.dbuf_create.dbuf_cookie = dbuf_cookie;
req->op.dbuf_create.width = width;
@@ -221,7 +206,7 @@ int xen_drm_front_dbuf_create(struct xen_drm_front_info *front_info,
if (ret < 0)
goto fail;
- ret = xen_drm_front_shbuf_map(shbuf);
+ ret = xen_front_pgdir_shbuf_map(&dbuf->shbuf);
if (ret < 0)
goto fail;
@@ -230,6 +215,7 @@ int xen_drm_front_dbuf_create(struct xen_drm_front_info *front_info,
fail:
mutex_unlock(&evtchnl->u.req.req_io_lock);
+fail_shbuf_alloc:
dbuf_free(&front_info->dbuf_list, dbuf_cookie);
return ret;
}
@@ -358,7 +344,6 @@ int xen_drm_front_page_flip(struct xen_drm_front_info *front_info,
if (unlikely(conn_idx >= front_info->num_evt_pairs))
return -EINVAL;
- dbuf_flush_fb(&front_info->dbuf_list, fb_cookie);
evtchnl = &front_info->evt_pairs[conn_idx].req;
mutex_lock(&evtchnl->u.req.req_io_lock);
diff --git a/drivers/gpu/drm/xen/xen_drm_front_gem.c b/drivers/gpu/drm/xen/xen_drm_front_gem.c
index 47ff019d3aef..28bc501af450 100644
--- a/drivers/gpu/drm/xen/xen_drm_front_gem.c
+++ b/drivers/gpu/drm/xen/xen_drm_front_gem.c
@@ -22,7 +22,6 @@
#include <xen/balloon.h>
#include "xen_drm_front.h"
-#include "xen_drm_front_shbuf.h"
struct xen_gem_object {
struct drm_gem_object base;
diff --git a/drivers/gpu/drm/xen/xen_drm_front_shbuf.c b/drivers/gpu/drm/xen/xen_drm_front_shbuf.c
deleted file mode 100644
index d333b67cc1a0..000000000000
--- a/drivers/gpu/drm/xen/xen_drm_front_shbuf.c
+++ /dev/null
@@ -1,414 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0 OR MIT
-
-/*
- * Xen para-virtual DRM device
- *
- * Copyright (C) 2016-2018 EPAM Systems Inc.
- *
- * Author: Oleksandr Andrushchenko <oleksandr_andrushchenko@epam.com>
- */
-
-#include <drm/drmP.h>
-
-#if defined(CONFIG_X86)
-#include <drm/drm_cache.h>
-#endif
-#include <linux/errno.h>
-#include <linux/mm.h>
-
-#include <asm/xen/hypervisor.h>
-#include <xen/balloon.h>
-#include <xen/xen.h>
-#include <xen/xenbus.h>
-#include <xen/interface/io/ring.h>
-#include <xen/interface/io/displif.h>
-
-#include "xen_drm_front.h"
-#include "xen_drm_front_shbuf.h"
-
-struct xen_drm_front_shbuf_ops {
- /*
- * Calculate number of grefs required to handle this buffer,
- * e.g. if grefs are required for page directory only or the buffer
- * pages as well.
- */
- void (*calc_num_grefs)(struct xen_drm_front_shbuf *buf);
- /* Fill page directory according to para-virtual display protocol. */
- void (*fill_page_dir)(struct xen_drm_front_shbuf *buf);
- /* Claim grant references for the pages of the buffer. */
- int (*grant_refs_for_buffer)(struct xen_drm_front_shbuf *buf,
- grant_ref_t *priv_gref_head, int gref_idx);
- /* Map grant references of the buffer. */
- int (*map)(struct xen_drm_front_shbuf *buf);
- /* Unmap grant references of the buffer. */
- int (*unmap)(struct xen_drm_front_shbuf *buf);
-};
-
-grant_ref_t xen_drm_front_shbuf_get_dir_start(struct xen_drm_front_shbuf *buf)
-{
- if (!buf->grefs)
- return GRANT_INVALID_REF;
-
- return buf->grefs[0];
-}
-
-int xen_drm_front_shbuf_map(struct xen_drm_front_shbuf *buf)
-{
- if (buf->ops->map)
- return buf->ops->map(buf);
-
- /* no need to map own grant references */
- return 0;
-}
-
-int xen_drm_front_shbuf_unmap(struct xen_drm_front_shbuf *buf)
-{
- if (buf->ops->unmap)
- return buf->ops->unmap(buf);
-
- /* no need to unmap own grant references */
- return 0;
-}
-
-void xen_drm_front_shbuf_flush(struct xen_drm_front_shbuf *buf)
-{
-#if defined(CONFIG_X86)
- drm_clflush_pages(buf->pages, buf->num_pages);
-#endif
-}
-
-void xen_drm_front_shbuf_free(struct xen_drm_front_shbuf *buf)
-{
- if (buf->grefs) {
- int i;
-
- for (i = 0; i < buf->num_grefs; i++)
- if (buf->grefs[i] != GRANT_INVALID_REF)
- gnttab_end_foreign_access(buf->grefs[i],
- 0, 0UL);
- }
- kfree(buf->grefs);
- kfree(buf->directory);
- kfree(buf);
-}
-
-/*
- * number of grefs a page can hold with respect to the
- * struct xendispl_page_directory header
- */
-#define XEN_DRM_NUM_GREFS_PER_PAGE ((PAGE_SIZE - \
- offsetof(struct xendispl_page_directory, gref)) / \
- sizeof(grant_ref_t))
-
-static int get_num_pages_dir(struct xen_drm_front_shbuf *buf)
-{
- /* number of pages the page directory consumes itself */
- return DIV_ROUND_UP(buf->num_pages, XEN_DRM_NUM_GREFS_PER_PAGE);
-}
-
-static void backend_calc_num_grefs(struct xen_drm_front_shbuf *buf)
-{
- /* only for pages the page directory consumes itself */
- buf->num_grefs = get_num_pages_dir(buf);
-}
-
-static void guest_calc_num_grefs(struct xen_drm_front_shbuf *buf)
-{
- /*
- * number of pages the page directory consumes itself
- * plus grefs for the buffer pages
- */
- buf->num_grefs = get_num_pages_dir(buf) + buf->num_pages;
-}
-
-#define xen_page_to_vaddr(page) \
- ((uintptr_t)pfn_to_kaddr(page_to_xen_pfn(page)))
-
-static int backend_unmap(struct xen_drm_front_shbuf *buf)
-{
- struct gnttab_unmap_grant_ref *unmap_ops;
- int i, ret;
-
- if (!buf->pages || !buf->backend_map_handles || !buf->grefs)
- return 0;
-
- unmap_ops = kcalloc(buf->num_pages, sizeof(*unmap_ops),
- GFP_KERNEL);
- if (!unmap_ops) {
- DRM_ERROR("Failed to get memory while unmapping\n");
- return -ENOMEM;
- }
-
- for (i = 0; i < buf->num_pages; i++) {
- phys_addr_t addr;
-
- addr = xen_page_to_vaddr(buf->pages[i]);
- gnttab_set_unmap_op(&unmap_ops[i], addr, GNTMAP_host_map,
- buf->backend_map_handles[i]);
- }
-
- ret = gnttab_unmap_refs(unmap_ops, NULL, buf->pages,
- buf->num_pages);
-
- for (i = 0; i < buf->num_pages; i++) {
- if (unlikely(unmap_ops[i].status != GNTST_okay))
- DRM_ERROR("Failed to unmap page %d: %d\n",
- i, unmap_ops[i].status);
- }
-
- if (ret)
- DRM_ERROR("Failed to unmap grant references, ret %d", ret);
-
- kfree(unmap_ops);
- kfree(buf->backend_map_handles);
- buf->backend_map_handles = NULL;
- return ret;
-}
-
-static int backend_map(struct xen_drm_front_shbuf *buf)
-{
- struct gnttab_map_grant_ref *map_ops = NULL;
- unsigned char *ptr;
- int ret, cur_gref, cur_dir_page, cur_page, grefs_left;
-
- map_ops = kcalloc(buf->num_pages, sizeof(*map_ops), GFP_KERNEL);
- if (!map_ops)
- return -ENOMEM;
-
- buf->backend_map_handles = kcalloc(buf->num_pages,
- sizeof(*buf->backend_map_handles),
- GFP_KERNEL);
- if (!buf->backend_map_handles) {
- kfree(map_ops);
- return -ENOMEM;
- }
-
- /*
- * read page directory to get grefs from the backend: for external
- * buffer we only allocate buf->grefs for the page directory,
- * so buf->num_grefs has number of pages in the page directory itself
- */
- ptr = buf->directory;
- grefs_left = buf->num_pages;
- cur_page = 0;
- for (cur_dir_page = 0; cur_dir_page < buf->num_grefs; cur_dir_page++) {
- struct xendispl_page_directory *page_dir =
- (struct xendispl_page_directory *)ptr;
- int to_copy = XEN_DRM_NUM_GREFS_PER_PAGE;
-
- if (to_copy > grefs_left)
- to_copy = grefs_left;
-
- for (cur_gref = 0; cur_gref < to_copy; cur_gref++) {
- phys_addr_t addr;
-
- addr = xen_page_to_vaddr(buf->pages[cur_page]);
- gnttab_set_map_op(&map_ops[cur_page], addr,
- GNTMAP_host_map,
- page_dir->gref[cur_gref],
- buf->xb_dev->otherend_id);
- cur_page++;
- }
-
- grefs_left -= to_copy;
- ptr += PAGE_SIZE;
- }
- ret = gnttab_map_refs(map_ops, NULL, buf->pages, buf->num_pages);
-
- /* save handles even if error, so we can unmap */
- for (cur_page = 0; cur_page < buf->num_pages; cur_page++) {
- buf->backend_map_handles[cur_page] = map_ops[cur_page].handle;
- if (unlikely(map_ops[cur_page].status != GNTST_okay))
- DRM_ERROR("Failed to map page %d: %d\n",
- cur_page, map_ops[cur_page].status);
- }
-
- if (ret) {
- DRM_ERROR("Failed to map grant references, ret %d", ret);
- backend_unmap(buf);
- }
-
- kfree(map_ops);
- return ret;
-}
-
-static void backend_fill_page_dir(struct xen_drm_front_shbuf *buf)
-{
- struct xendispl_page_directory *page_dir;
- unsigned char *ptr;
- int i, num_pages_dir;
-
- ptr = buf->directory;
- num_pages_dir = get_num_pages_dir(buf);
-
- /* fill only grefs for the page directory itself */
- for (i = 0; i < num_pages_dir - 1; i++) {
- page_dir = (struct xendispl_page_directory *)ptr;
-
- page_dir->gref_dir_next_page = buf->grefs[i + 1];
- ptr += PAGE_SIZE;
- }
- /* last page must say there is no more pages */
- page_dir = (struct xendispl_page_directory *)ptr;
- page_dir->gref_dir_next_page = GRANT_INVALID_REF;
-}
-
-static void guest_fill_page_dir(struct xen_drm_front_shbuf *buf)
-{
- unsigned char *ptr;
- int cur_gref, grefs_left, to_copy, i, num_pages_dir;
-
- ptr = buf->directory;
- num_pages_dir = get_num_pages_dir(buf);
-
- /*
- * while copying, skip grefs at start, they are for pages
- * granted for the page directory itself
- */
- cur_gref = num_pages_dir;
- grefs_left = buf->num_pages;
- for (i = 0; i < num_pages_dir; i++) {
- struct xendispl_page_directory *page_dir =
- (struct xendispl_page_directory *)ptr;
-
- if (grefs_left <= XEN_DRM_NUM_GREFS_PER_PAGE) {
- to_copy = grefs_left;
- page_dir->gref_dir_next_page = GRANT_INVALID_REF;
- } else {
- to_copy = XEN_DRM_NUM_GREFS_PER_PAGE;
- page_dir->gref_dir_next_page = buf->grefs[i + 1];
- }
- memcpy(&page_dir->gref, &buf->grefs[cur_gref],
- to_copy * sizeof(grant_ref_t));
- ptr += PAGE_SIZE;
- grefs_left -= to_copy;
- cur_gref += to_copy;
- }
-}
-
-static int guest_grant_refs_for_buffer(struct xen_drm_front_shbuf *buf,
- grant_ref_t *priv_gref_head,
- int gref_idx)
-{
- int i, cur_ref, otherend_id;
-
- otherend_id = buf->xb_dev->otherend_id;
- for (i = 0; i < buf->num_pages; i++) {
- cur_ref = gnttab_claim_grant_reference(priv_gref_head);
- if (cur_ref < 0)
- return cur_ref;
-
- gnttab_grant_foreign_access_ref(cur_ref, otherend_id,
- xen_page_to_gfn(buf->pages[i]),
- 0);
- buf->grefs[gref_idx++] = cur_ref;
- }
- return 0;
-}
-
-static int grant_references(struct xen_drm_front_shbuf *buf)
-{
- grant_ref_t priv_gref_head;
- int ret, i, j, cur_ref;
- int otherend_id, num_pages_dir;
-
- ret = gnttab_alloc_grant_references(buf->num_grefs, &priv_gref_head);
- if (ret < 0) {
- DRM_ERROR("Cannot allocate grant references\n");
- return ret;
- }
-
- otherend_id = buf->xb_dev->otherend_id;
- j = 0;
- num_pages_dir = get_num_pages_dir(buf);
- for (i = 0; i < num_pages_dir; i++) {
- unsigned long frame;
-
- cur_ref = gnttab_claim_grant_reference(&priv_gref_head);
- if (cur_ref < 0)
- return cur_ref;
-
- frame = xen_page_to_gfn(virt_to_page(buf->directory +
- PAGE_SIZE * i));
- gnttab_grant_foreign_access_ref(cur_ref, otherend_id, frame, 0);
- buf->grefs[j++] = cur_ref;
- }
-
- if (buf->ops->grant_refs_for_buffer) {
- ret = buf->ops->grant_refs_for_buffer(buf, &priv_gref_head, j);
- if (ret)
- return ret;
- }
-
- gnttab_free_grant_references(priv_gref_head);
- return 0;
-}
-
-static int alloc_storage(struct xen_drm_front_shbuf *buf)
-{
- buf->grefs = kcalloc(buf->num_grefs, sizeof(*buf->grefs), GFP_KERNEL);
- if (!buf->grefs)
- return -ENOMEM;
-
- buf->directory = kcalloc(get_num_pages_dir(buf), PAGE_SIZE, GFP_KERNEL);
- if (!buf->directory)
- return -ENOMEM;
-
- return 0;
-}
-
-/*
- * For be allocated buffers we don't need grant_refs_for_buffer as those
- * grant references are allocated at backend side
- */
-static const struct xen_drm_front_shbuf_ops backend_ops = {
- .calc_num_grefs = backend_calc_num_grefs,
- .fill_page_dir = backend_fill_page_dir,
- .map = backend_map,
- .unmap = backend_unmap
-};
-
-/* For locally granted references we do not need to map/unmap the references */
-static const struct xen_drm_front_shbuf_ops local_ops = {
- .calc_num_grefs = guest_calc_num_grefs,
- .fill_page_dir = guest_fill_page_dir,
- .grant_refs_for_buffer = guest_grant_refs_for_buffer,
-};
-
-struct xen_drm_front_shbuf *
-xen_drm_front_shbuf_alloc(struct xen_drm_front_shbuf_cfg *cfg)
-{
- struct xen_drm_front_shbuf *buf;
- int ret;
-
- buf = kzalloc(sizeof(*buf), GFP_KERNEL);
- if (!buf)
- return ERR_PTR(-ENOMEM);
-
- if (cfg->be_alloc)
- buf->ops = &backend_ops;
- else
- buf->ops = &local_ops;
-
- buf->xb_dev = cfg->xb_dev;
- buf->num_pages = DIV_ROUND_UP(cfg->size, PAGE_SIZE);
- buf->pages = cfg->pages;
-
- buf->ops->calc_num_grefs(buf);
-
- ret = alloc_storage(buf);
- if (ret)
- goto fail;
-
- ret = grant_references(buf);
- if (ret)
- goto fail;
-
- buf->ops->fill_page_dir(buf);
-
- return buf;
-
-fail:
- xen_drm_front_shbuf_free(buf);
- return ERR_PTR(ret);
-}
diff --git a/drivers/gpu/drm/xen/xen_drm_front_shbuf.h b/drivers/gpu/drm/xen/xen_drm_front_shbuf.h
deleted file mode 100644
index 7545c692539e..000000000000
--- a/drivers/gpu/drm/xen/xen_drm_front_shbuf.h
+++ /dev/null
@@ -1,64 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 OR MIT */
-
-/*
- * Xen para-virtual DRM device
- *
- * Copyright (C) 2016-2018 EPAM Systems Inc.
- *
- * Author: Oleksandr Andrushchenko <oleksandr_andrushchenko@epam.com>
- */
-
-#ifndef __XEN_DRM_FRONT_SHBUF_H_
-#define __XEN_DRM_FRONT_SHBUF_H_
-
-#include <linux/kernel.h>
-#include <linux/scatterlist.h>
-
-#include <xen/grant_table.h>
-
-struct xen_drm_front_shbuf {
- /*
- * number of references granted for the backend use:
- * - for allocated/imported dma-buf's this holds number of grant
- * references for the page directory and pages of the buffer
- * - for the buffer provided by the backend this holds number of
- * grant references for the page directory as grant references for
- * the buffer will be provided by the backend
- */
- int num_grefs;
- grant_ref_t *grefs;
- unsigned char *directory;
-
- int num_pages;
- struct page **pages;
-
- struct xenbus_device *xb_dev;
-
- /* these are the ops used internally depending on be_alloc mode */
- const struct xen_drm_front_shbuf_ops *ops;
-
- /* Xen map handles for the buffer allocated by the backend */
- grant_handle_t *backend_map_handles;
-};
-
-struct xen_drm_front_shbuf_cfg {
- struct xenbus_device *xb_dev;
- size_t size;
- struct page **pages;
- bool be_alloc;
-};
-
-struct xen_drm_front_shbuf *
-xen_drm_front_shbuf_alloc(struct xen_drm_front_shbuf_cfg *cfg);
-
-grant_ref_t xen_drm_front_shbuf_get_dir_start(struct xen_drm_front_shbuf *buf);
-
-int xen_drm_front_shbuf_map(struct xen_drm_front_shbuf *buf);
-
-int xen_drm_front_shbuf_unmap(struct xen_drm_front_shbuf *buf);
-
-void xen_drm_front_shbuf_flush(struct xen_drm_front_shbuf *buf);
-
-void xen_drm_front_shbuf_free(struct xen_drm_front_shbuf *buf);
-
-#endif /* __XEN_DRM_FRONT_SHBUF_H_ */
diff --git a/drivers/gpu/host1x/Makefile b/drivers/gpu/host1x/Makefile
index b92016ce09b7..096017b8789d 100644
--- a/drivers/gpu/host1x/Makefile
+++ b/drivers/gpu/host1x/Makefile
@@ -13,6 +13,7 @@ host1x-y = \
hw/host1x02.o \
hw/host1x04.o \
hw/host1x05.o \
- hw/host1x06.o
+ hw/host1x06.o \
+ hw/host1x07.o
obj-$(CONFIG_TEGRA_HOST1X) += host1x.o
diff --git a/drivers/gpu/host1x/dev.c b/drivers/gpu/host1x/dev.c
index de6bc4e7fa23..419d8929a98f 100644
--- a/drivers/gpu/host1x/dev.c
+++ b/drivers/gpu/host1x/dev.c
@@ -44,6 +44,7 @@
#include "hw/host1x04.h"
#include "hw/host1x05.h"
#include "hw/host1x06.h"
+#include "hw/host1x07.h"
void host1x_hypervisor_writel(struct host1x *host1x, u32 v, u32 r)
{
@@ -130,7 +131,19 @@ static const struct host1x_info host1x06_info = {
.has_hypervisor = true,
};
+static const struct host1x_info host1x07_info = {
+ .nb_channels = 63,
+ .nb_pts = 704,
+ .nb_mlocks = 32,
+ .nb_bases = 0,
+ .init = host1x07_init,
+ .sync_offset = 0x0,
+ .dma_mask = DMA_BIT_MASK(40),
+ .has_hypervisor = true,
+};
+
static const struct of_device_id host1x_of_match[] = {
+ { .compatible = "nvidia,tegra194-host1x", .data = &host1x07_info, },
{ .compatible = "nvidia,tegra186-host1x", .data = &host1x06_info, },
{ .compatible = "nvidia,tegra210-host1x", .data = &host1x05_info, },
{ .compatible = "nvidia,tegra124-host1x", .data = &host1x04_info, },
diff --git a/drivers/gpu/host1x/hw/channel_hw.c b/drivers/gpu/host1x/hw/channel_hw.c
index d188f9068b91..95ea81172a83 100644
--- a/drivers/gpu/host1x/hw/channel_hw.c
+++ b/drivers/gpu/host1x/hw/channel_hw.c
@@ -26,7 +26,6 @@
#include "../intr.h"
#include "../job.h"
-#define HOST1X_CHANNEL_SIZE 16384
#define TRACE_MAX_LENGTH 128U
static void trace_write_gather(struct host1x_cdma *cdma, struct host1x_bo *bo,
@@ -203,7 +202,11 @@ static void enable_gather_filter(struct host1x *host,
static int host1x_channel_init(struct host1x_channel *ch, struct host1x *dev,
unsigned int index)
{
- ch->regs = dev->regs + index * HOST1X_CHANNEL_SIZE;
+#if HOST1X_HW < 6
+ ch->regs = dev->regs + index * 0x4000;
+#else
+ ch->regs = dev->regs + index * 0x100;
+#endif
enable_gather_filter(dev, ch);
return 0;
}
diff --git a/drivers/gpu/host1x/hw/debug_hw_1x06.c b/drivers/gpu/host1x/hw/debug_hw_1x06.c
index b503c740c022..8b749516c051 100644
--- a/drivers/gpu/host1x/hw/debug_hw_1x06.c
+++ b/drivers/gpu/host1x/hw/debug_hw_1x06.c
@@ -62,9 +62,12 @@ static void host1x_debug_show_channel_fifo(struct host1x *host,
struct host1x_channel *ch,
struct output *o)
{
- u32 val, rd_ptr, wr_ptr, start, end;
+#if HOST1X_HW <= 6
+ u32 rd_ptr, wr_ptr, start, end;
u32 payload = INVALID_PAYLOAD;
unsigned int data_count = 0;
+#endif
+ u32 val;
host1x_debug_output(o, "%u: fifo:\n", ch->id);
@@ -78,6 +81,7 @@ static void host1x_debug_show_channel_fifo(struct host1x *host,
val = host1x_ch_readl(ch, HOST1X_CHANNEL_CMDFIFO_RDATA);
host1x_debug_output(o, "CMDFIFO_RDATA %08x\n", val);
+#if HOST1X_HW <= 6
/* Peek pointer values are invalid during SLCG, so disable it */
host1x_hypervisor_writel(host, 0x1, HOST1X_HV_ICG_EN_OVERRIDE);
@@ -127,6 +131,7 @@ static void host1x_debug_show_channel_fifo(struct host1x *host,
host1x_hypervisor_writel(host, 0x0, HOST1X_HV_CMDFIFO_PEEK_CTRL);
host1x_hypervisor_writel(host, 0x0, HOST1X_HV_ICG_EN_OVERRIDE);
+#endif
}
static void host1x_debug_show_mlocks(struct host1x *host, struct output *o)
diff --git a/drivers/gpu/host1x/hw/host1x07.c b/drivers/gpu/host1x/hw/host1x07.c
new file mode 100644
index 000000000000..04b779a53f08
--- /dev/null
+++ b/drivers/gpu/host1x/hw/host1x07.c
@@ -0,0 +1,44 @@
+/*
+ * Host1x init for Tegra194 SoCs
+ *
+ * Copyright (c) 2018 NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+/* include hw specification */
+#include "host1x07.h"
+#include "host1x07_hardware.h"
+
+/* include code */
+#define HOST1X_HW 7
+
+#include "cdma_hw.c"
+#include "channel_hw.c"
+#include "debug_hw.c"
+#include "intr_hw.c"
+#include "syncpt_hw.c"
+
+#include "../dev.h"
+
+int host1x07_init(struct host1x *host)
+{
+ host->channel_op = &host1x_channel_ops;
+ host->cdma_op = &host1x_cdma_ops;
+ host->cdma_pb_op = &host1x_pushbuffer_ops;
+ host->syncpt_op = &host1x_syncpt_ops;
+ host->intr_op = &host1x_intr_ops;
+ host->debug_op = &host1x_debug_ops;
+
+ return 0;
+}
diff --git a/drivers/gpu/host1x/hw/host1x07.h b/drivers/gpu/host1x/hw/host1x07.h
new file mode 100644
index 000000000000..57b19f354274
--- /dev/null
+++ b/drivers/gpu/host1x/hw/host1x07.h
@@ -0,0 +1,26 @@
+/*
+ * Host1x init for Tegra194 SoCs
+ *
+ * Copyright (c) 2018 NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef HOST1X_HOST1X07_H
+#define HOST1X_HOST1X07_H
+
+struct host1x;
+
+int host1x07_init(struct host1x *host);
+
+#endif
diff --git a/drivers/gpu/host1x/hw/host1x07_hardware.h b/drivers/gpu/host1x/hw/host1x07_hardware.h
new file mode 100644
index 000000000000..1353e7ab71dd
--- /dev/null
+++ b/drivers/gpu/host1x/hw/host1x07_hardware.h
@@ -0,0 +1,142 @@
+/*
+ * Tegra host1x Register Offsets for Tegra194
+ *
+ * Copyright (c) 2018 NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __HOST1X_HOST1X07_HARDWARE_H
+#define __HOST1X_HOST1X07_HARDWARE_H
+
+#include <linux/types.h>
+#include <linux/bitops.h>
+
+#include "hw_host1x07_uclass.h"
+#include "hw_host1x07_vm.h"
+#include "hw_host1x07_hypervisor.h"
+
+static inline u32 host1x_class_host_wait_syncpt(
+ unsigned indx, unsigned threshold)
+{
+ return host1x_uclass_wait_syncpt_indx_f(indx)
+ | host1x_uclass_wait_syncpt_thresh_f(threshold);
+}
+
+static inline u32 host1x_class_host_load_syncpt_base(
+ unsigned indx, unsigned threshold)
+{
+ return host1x_uclass_load_syncpt_base_base_indx_f(indx)
+ | host1x_uclass_load_syncpt_base_value_f(threshold);
+}
+
+static inline u32 host1x_class_host_wait_syncpt_base(
+ unsigned indx, unsigned base_indx, unsigned offset)
+{
+ return host1x_uclass_wait_syncpt_base_indx_f(indx)
+ | host1x_uclass_wait_syncpt_base_base_indx_f(base_indx)
+ | host1x_uclass_wait_syncpt_base_offset_f(offset);
+}
+
+static inline u32 host1x_class_host_incr_syncpt_base(
+ unsigned base_indx, unsigned offset)
+{
+ return host1x_uclass_incr_syncpt_base_base_indx_f(base_indx)
+ | host1x_uclass_incr_syncpt_base_offset_f(offset);
+}
+
+static inline u32 host1x_class_host_incr_syncpt(
+ unsigned cond, unsigned indx)
+{
+ return host1x_uclass_incr_syncpt_cond_f(cond)
+ | host1x_uclass_incr_syncpt_indx_f(indx);
+}
+
+static inline u32 host1x_class_host_indoff_reg_write(
+ unsigned mod_id, unsigned offset, bool auto_inc)
+{
+ u32 v = host1x_uclass_indoff_indbe_f(0xf)
+ | host1x_uclass_indoff_indmodid_f(mod_id)
+ | host1x_uclass_indoff_indroffset_f(offset);
+ if (auto_inc)
+ v |= host1x_uclass_indoff_autoinc_f(1);
+ return v;
+}
+
+static inline u32 host1x_class_host_indoff_reg_read(
+ unsigned mod_id, unsigned offset, bool auto_inc)
+{
+ u32 v = host1x_uclass_indoff_indmodid_f(mod_id)
+ | host1x_uclass_indoff_indroffset_f(offset)
+ | host1x_uclass_indoff_rwn_read_v();
+ if (auto_inc)
+ v |= host1x_uclass_indoff_autoinc_f(1);
+ return v;
+}
+
+/* cdma opcodes */
+static inline u32 host1x_opcode_setclass(
+ unsigned class_id, unsigned offset, unsigned mask)
+{
+ return (0 << 28) | (offset << 16) | (class_id << 6) | mask;
+}
+
+static inline u32 host1x_opcode_incr(unsigned offset, unsigned count)
+{
+ return (1 << 28) | (offset << 16) | count;
+}
+
+static inline u32 host1x_opcode_nonincr(unsigned offset, unsigned count)
+{
+ return (2 << 28) | (offset << 16) | count;
+}
+
+static inline u32 host1x_opcode_mask(unsigned offset, unsigned mask)
+{
+ return (3 << 28) | (offset << 16) | mask;
+}
+
+static inline u32 host1x_opcode_imm(unsigned offset, unsigned value)
+{
+ return (4 << 28) | (offset << 16) | value;
+}
+
+static inline u32 host1x_opcode_imm_incr_syncpt(unsigned cond, unsigned indx)
+{
+ return host1x_opcode_imm(host1x_uclass_incr_syncpt_r(),
+ host1x_class_host_incr_syncpt(cond, indx));
+}
+
+static inline u32 host1x_opcode_restart(unsigned address)
+{
+ return (5 << 28) | (address >> 4);
+}
+
+static inline u32 host1x_opcode_gather(unsigned count)
+{
+ return (6 << 28) | count;
+}
+
+static inline u32 host1x_opcode_gather_nonincr(unsigned offset, unsigned count)
+{
+ return (6 << 28) | (offset << 16) | BIT(15) | count;
+}
+
+static inline u32 host1x_opcode_gather_incr(unsigned offset, unsigned count)
+{
+ return (6 << 28) | (offset << 16) | BIT(15) | BIT(14) | count;
+}
+
+#define HOST1X_OPCODE_NOP host1x_opcode_nonincr(0, 0)
+
+#endif
diff --git a/drivers/gpu/host1x/hw/hw_host1x06_uclass.h b/drivers/gpu/host1x/hw/hw_host1x06_uclass.h
index 4457486c72b0..e599e15bf999 100644
--- a/drivers/gpu/host1x/hw/hw_host1x06_uclass.h
+++ b/drivers/gpu/host1x/hw/hw_host1x06_uclass.h
@@ -59,7 +59,7 @@ static inline u32 host1x_uclass_incr_syncpt_r(void)
host1x_uclass_incr_syncpt_r()
static inline u32 host1x_uclass_incr_syncpt_cond_f(u32 v)
{
- return (v & 0xff) << 8;
+ return (v & 0xff) << 10;
}
#define HOST1X_UCLASS_INCR_SYNCPT_COND_F(v) \
host1x_uclass_incr_syncpt_cond_f(v)
diff --git a/drivers/gpu/host1x/hw/hw_host1x07_hypervisor.h b/drivers/gpu/host1x/hw/hw_host1x07_hypervisor.h
new file mode 100644
index 000000000000..2b99d68d3040
--- /dev/null
+++ b/drivers/gpu/host1x/hw/hw_host1x07_hypervisor.h
@@ -0,0 +1,32 @@
+/*
+ * Copyright (c) 2018 NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ */
+
+#define HOST1X_HV_SYNCPT_PROT_EN 0x1ac4
+#define HOST1X_HV_SYNCPT_PROT_EN_CH_EN BIT(1)
+#define HOST1X_HV_CH_KERNEL_FILTER_GBUFFER(x) (0x2020 + (x * 4))
+#define HOST1X_HV_CMDFIFO_PEEK_CTRL 0x233c
+#define HOST1X_HV_CMDFIFO_PEEK_CTRL_ADDR(x) (x)
+#define HOST1X_HV_CMDFIFO_PEEK_CTRL_CHANNEL(x) ((x) << 16)
+#define HOST1X_HV_CMDFIFO_PEEK_CTRL_ENABLE BIT(31)
+#define HOST1X_HV_CMDFIFO_PEEK_READ 0x2340
+#define HOST1X_HV_CMDFIFO_PEEK_PTRS 0x2344
+#define HOST1X_HV_CMDFIFO_PEEK_PTRS_WR_PTR_V(x) (((x) >> 16) & 0xfff)
+#define HOST1X_HV_CMDFIFO_PEEK_PTRS_RD_PTR_V(x) ((x) & 0xfff)
+#define HOST1X_HV_CMDFIFO_SETUP(x) (0x2588 + (x * 4))
+#define HOST1X_HV_CMDFIFO_SETUP_LIMIT_V(x) (((x) >> 16) & 0xfff)
+#define HOST1X_HV_CMDFIFO_SETUP_BASE_V(x) ((x) & 0xfff)
+#define HOST1X_HV_ICG_EN_OVERRIDE 0x2aa8
diff --git a/drivers/gpu/host1x/hw/hw_host1x07_uclass.h b/drivers/gpu/host1x/hw/hw_host1x07_uclass.h
new file mode 100644
index 000000000000..7e4e3b377f91
--- /dev/null
+++ b/drivers/gpu/host1x/hw/hw_host1x07_uclass.h
@@ -0,0 +1,181 @@
+/*
+ * Copyright (c) 2018 NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ */
+
+ /*
+ * Function naming determines intended use:
+ *
+ * <x>_r(void) : Returns the offset for register <x>.
+ *
+ * <x>_w(void) : Returns the word offset for word (4 byte) element <x>.
+ *
+ * <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits.
+ *
+ * <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted
+ * and masked to place it at field <y> of register <x>. This value
+ * can be |'d with others to produce a full register value for
+ * register <x>.
+ *
+ * <x>_<y>_m(void) : Returns a mask for field <y> of register <x>. This
+ * value can be ~'d and then &'d to clear the value of field <y> for
+ * register <x>.
+ *
+ * <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted
+ * to place it at field <y> of register <x>. This value can be |'d
+ * with others to produce a full register value for <x>.
+ *
+ * <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register
+ * <x> value 'r' after being shifted to place its LSB at bit 0.
+ * This value is suitable for direct comparison with other unshifted
+ * values appropriate for use in field <y> of register <x>.
+ *
+ * <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for
+ * field <y> of register <x>. This value is suitable for direct
+ * comparison with unshifted values appropriate for use in field <y>
+ * of register <x>.
+ */
+
+#ifndef HOST1X_HW_HOST1X07_UCLASS_H
+#define HOST1X_HW_HOST1X07_UCLASS_H
+
+static inline u32 host1x_uclass_incr_syncpt_r(void)
+{
+ return 0x0;
+}
+#define HOST1X_UCLASS_INCR_SYNCPT \
+ host1x_uclass_incr_syncpt_r()
+static inline u32 host1x_uclass_incr_syncpt_cond_f(u32 v)
+{
+ return (v & 0xff) << 10;
+}
+#define HOST1X_UCLASS_INCR_SYNCPT_COND_F(v) \
+ host1x_uclass_incr_syncpt_cond_f(v)
+static inline u32 host1x_uclass_incr_syncpt_indx_f(u32 v)
+{
+ return (v & 0xff) << 0;
+}
+#define HOST1X_UCLASS_INCR_SYNCPT_INDX_F(v) \
+ host1x_uclass_incr_syncpt_indx_f(v)
+static inline u32 host1x_uclass_wait_syncpt_r(void)
+{
+ return 0x8;
+}
+#define HOST1X_UCLASS_WAIT_SYNCPT \
+ host1x_uclass_wait_syncpt_r()
+static inline u32 host1x_uclass_wait_syncpt_indx_f(u32 v)
+{
+ return (v & 0xff) << 24;
+}
+#define HOST1X_UCLASS_WAIT_SYNCPT_INDX_F(v) \
+ host1x_uclass_wait_syncpt_indx_f(v)
+static inline u32 host1x_uclass_wait_syncpt_thresh_f(u32 v)
+{
+ return (v & 0xffffff) << 0;
+}
+#define HOST1X_UCLASS_WAIT_SYNCPT_THRESH_F(v) \
+ host1x_uclass_wait_syncpt_thresh_f(v)
+static inline u32 host1x_uclass_wait_syncpt_base_r(void)
+{
+ return 0x9;
+}
+#define HOST1X_UCLASS_WAIT_SYNCPT_BASE \
+ host1x_uclass_wait_syncpt_base_r()
+static inline u32 host1x_uclass_wait_syncpt_base_indx_f(u32 v)
+{
+ return (v & 0xff) << 24;
+}
+#define HOST1X_UCLASS_WAIT_SYNCPT_BASE_INDX_F(v) \
+ host1x_uclass_wait_syncpt_base_indx_f(v)
+static inline u32 host1x_uclass_wait_syncpt_base_base_indx_f(u32 v)
+{
+ return (v & 0xff) << 16;
+}
+#define HOST1X_UCLASS_WAIT_SYNCPT_BASE_BASE_INDX_F(v) \
+ host1x_uclass_wait_syncpt_base_base_indx_f(v)
+static inline u32 host1x_uclass_wait_syncpt_base_offset_f(u32 v)
+{
+ return (v & 0xffff) << 0;
+}
+#define HOST1X_UCLASS_WAIT_SYNCPT_BASE_OFFSET_F(v) \
+ host1x_uclass_wait_syncpt_base_offset_f(v)
+static inline u32 host1x_uclass_load_syncpt_base_r(void)
+{
+ return 0xb;
+}
+#define HOST1X_UCLASS_LOAD_SYNCPT_BASE \
+ host1x_uclass_load_syncpt_base_r()
+static inline u32 host1x_uclass_load_syncpt_base_base_indx_f(u32 v)
+{
+ return (v & 0xff) << 24;
+}
+#define HOST1X_UCLASS_LOAD_SYNCPT_BASE_BASE_INDX_F(v) \
+ host1x_uclass_load_syncpt_base_base_indx_f(v)
+static inline u32 host1x_uclass_load_syncpt_base_value_f(u32 v)
+{
+ return (v & 0xffffff) << 0;
+}
+#define HOST1X_UCLASS_LOAD_SYNCPT_BASE_VALUE_F(v) \
+ host1x_uclass_load_syncpt_base_value_f(v)
+static inline u32 host1x_uclass_incr_syncpt_base_base_indx_f(u32 v)
+{
+ return (v & 0xff) << 24;
+}
+#define HOST1X_UCLASS_INCR_SYNCPT_BASE_BASE_INDX_F(v) \
+ host1x_uclass_incr_syncpt_base_base_indx_f(v)
+static inline u32 host1x_uclass_incr_syncpt_base_offset_f(u32 v)
+{
+ return (v & 0xffffff) << 0;
+}
+#define HOST1X_UCLASS_INCR_SYNCPT_BASE_OFFSET_F(v) \
+ host1x_uclass_incr_syncpt_base_offset_f(v)
+static inline u32 host1x_uclass_indoff_r(void)
+{
+ return 0x2d;
+}
+#define HOST1X_UCLASS_INDOFF \
+ host1x_uclass_indoff_r()
+static inline u32 host1x_uclass_indoff_indbe_f(u32 v)
+{
+ return (v & 0xf) << 28;
+}
+#define HOST1X_UCLASS_INDOFF_INDBE_F(v) \
+ host1x_uclass_indoff_indbe_f(v)
+static inline u32 host1x_uclass_indoff_autoinc_f(u32 v)
+{
+ return (v & 0x1) << 27;
+}
+#define HOST1X_UCLASS_INDOFF_AUTOINC_F(v) \
+ host1x_uclass_indoff_autoinc_f(v)
+static inline u32 host1x_uclass_indoff_indmodid_f(u32 v)
+{
+ return (v & 0xff) << 18;
+}
+#define HOST1X_UCLASS_INDOFF_INDMODID_F(v) \
+ host1x_uclass_indoff_indmodid_f(v)
+static inline u32 host1x_uclass_indoff_indroffset_f(u32 v)
+{
+ return (v & 0xffff) << 2;
+}
+#define HOST1X_UCLASS_INDOFF_INDROFFSET_F(v) \
+ host1x_uclass_indoff_indroffset_f(v)
+static inline u32 host1x_uclass_indoff_rwn_read_v(void)
+{
+ return 1;
+}
+#define HOST1X_UCLASS_INDOFF_INDROFFSET_F(v) \
+ host1x_uclass_indoff_indroffset_f(v)
+
+#endif
diff --git a/drivers/gpu/host1x/hw/hw_host1x07_vm.h b/drivers/gpu/host1x/hw/hw_host1x07_vm.h
new file mode 100644
index 000000000000..7e4629e77a2a
--- /dev/null
+++ b/drivers/gpu/host1x/hw/hw_host1x07_vm.h
@@ -0,0 +1,46 @@
+/*
+ * Copyright (c) 2018 NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ */
+
+#define HOST1X_CHANNEL_DMASTART 0x0000
+#define HOST1X_CHANNEL_DMASTART_HI 0x0004
+#define HOST1X_CHANNEL_DMAPUT 0x0008
+#define HOST1X_CHANNEL_DMAPUT_HI 0x000c
+#define HOST1X_CHANNEL_DMAGET 0x0010
+#define HOST1X_CHANNEL_DMAGET_HI 0x0014
+#define HOST1X_CHANNEL_DMAEND 0x0018
+#define HOST1X_CHANNEL_DMAEND_HI 0x001c
+#define HOST1X_CHANNEL_DMACTRL 0x0020
+#define HOST1X_CHANNEL_DMACTRL_DMASTOP BIT(0)
+#define HOST1X_CHANNEL_DMACTRL_DMAGETRST BIT(1)
+#define HOST1X_CHANNEL_DMACTRL_DMAINITGET BIT(2)
+#define HOST1X_CHANNEL_CMDFIFO_STAT 0x0024
+#define HOST1X_CHANNEL_CMDFIFO_STAT_EMPTY BIT(13)
+#define HOST1X_CHANNEL_CMDFIFO_RDATA 0x0028
+#define HOST1X_CHANNEL_CMDP_OFFSET 0x0030
+#define HOST1X_CHANNEL_CMDP_CLASS 0x0034
+#define HOST1X_CHANNEL_CHANNELSTAT 0x0038
+#define HOST1X_CHANNEL_CMDPROC_STOP 0x0048
+#define HOST1X_CHANNEL_TEARDOWN 0x004c
+
+#define HOST1X_SYNC_SYNCPT_CPU_INCR(x) (0x6400 + 4 * (x))
+#define HOST1X_SYNC_SYNCPT_THRESH_CPU0_INT_STATUS(x) (0x6464 + 4 * (x))
+#define HOST1X_SYNC_SYNCPT_THRESH_INT_ENABLE_CPU0(x) (0x652c + 4 * (x))
+#define HOST1X_SYNC_SYNCPT_THRESH_INT_DISABLE(x) (0x6590 + 4 * (x))
+#define HOST1X_SYNC_SYNCPT(x) (0x8080 + 4 * (x))
+#define HOST1X_SYNC_SYNCPT_INT_THRESH(x) (0x8d00 + 4 * (x))
+#define HOST1X_SYNC_SYNCPT_CH_APP(x) (0xa604 + 4 * (x))
+#define HOST1X_SYNC_SYNCPT_CH_APP_CH(v) (((v) & 0x3f) << 8)
diff --git a/drivers/gpu/host1x/hw/syncpt_hw.c b/drivers/gpu/host1x/hw/syncpt_hw.c
index a23bb3352d02..d946660d47f8 100644
--- a/drivers/gpu/host1x/hw/syncpt_hw.c
+++ b/drivers/gpu/host1x/hw/syncpt_hw.c
@@ -37,10 +37,12 @@ static void syncpt_restore(struct host1x_syncpt *sp)
*/
static void syncpt_restore_wait_base(struct host1x_syncpt *sp)
{
+#if HOST1X_HW < 7
struct host1x *host = sp->host;
host1x_sync_writel(host, sp->base_val,
HOST1X_SYNC_SYNCPT_BASE(sp->id));
+#endif
}
/*
@@ -48,10 +50,12 @@ static void syncpt_restore_wait_base(struct host1x_syncpt *sp)
*/
static void syncpt_read_wait_base(struct host1x_syncpt *sp)
{
+#if HOST1X_HW < 7
struct host1x *host = sp->host;
sp->base_val =
host1x_sync_readl(host, HOST1X_SYNC_SYNCPT_BASE(sp->id));
+#endif
}
/*
OpenPOWER on IntegriCloud