summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/msm
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/msm')
-rw-r--r--drivers/gpu/drm/msm/Kconfig21
-rw-r--r--drivers/gpu/drm/msm/Makefile82
-rw-r--r--drivers/gpu/drm/msm/adreno/a2xx.xml.h57
-rw-r--r--drivers/gpu/drm/msm/adreno/a3xx.xml.h24
-rw-r--r--drivers/gpu/drm/msm/adreno/a3xx_gpu.c39
-rw-r--r--drivers/gpu/drm/msm/adreno/a4xx.xml.h193
-rw-r--r--drivers/gpu/drm/msm/adreno/a4xx_gpu.c31
-rw-r--r--drivers/gpu/drm/msm/adreno/a5xx.xml.h483
-rw-r--r--drivers/gpu/drm/msm/adreno/a5xx_debugfs.c187
-rw-r--r--drivers/gpu/drm/msm/adreno/a5xx_gpu.c352
-rw-r--r--drivers/gpu/drm/msm/adreno/a5xx_gpu.h4
-rw-r--r--drivers/gpu/drm/msm/adreno/a5xx_power.c26
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx.xml.h4562
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gmu.c1207
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gmu.h162
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gmu.xml.h382
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gpu.c818
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gpu.h60
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_hfi.c435
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_hfi.h127
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_common.xml.h38
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_device.c100
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gpu.c287
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gpu.h28
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h497
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_core_irq.c479
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_core_irq.h153
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.c637
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.h133
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c2138
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.h423
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_dbg.c2393
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_dbg.h103
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c2500
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.h177
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys.h430
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c905
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c922
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_formats.c1173
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_formats.h88
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_blk.c155
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_blk.h53
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c511
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h804
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog_format.h168
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_cdm.c323
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_cdm.h139
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c540
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.h218
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.c1183
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.h257
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c349
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.h128
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.c261
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.h122
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_mdss.h465
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.c250
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.h136
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.c753
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.h424
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.c398
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.h202
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.c368
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.h348
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_vbif.c275
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_vbif.h128
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hwio.h56
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_io_util.c203
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_io_util.h57
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_irq.c66
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_irq.h59
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c1345
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h290
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_mdss.c245
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c1963
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_plane.h175
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_power_handle.c249
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_power_handle.h225
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c1079
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_rm.h199
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_trace.h1007
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_vbif.c384
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_vbif.h94
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/msm_media_info.h1376
-rw-r--r--drivers/gpu/drm/msm/disp/mdp4/mdp4.xml.h (renamed from drivers/gpu/drm/msm/mdp/mdp4/mdp4.xml.h)26
-rw-r--r--drivers/gpu/drm/msm/disp/mdp4/mdp4_crtc.c (renamed from drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c)10
-rw-r--r--drivers/gpu/drm/msm/disp/mdp4/mdp4_dsi_encoder.c (renamed from drivers/gpu/drm/msm/mdp/mdp4/mdp4_dsi_encoder.c)0
-rw-r--r--drivers/gpu/drm/msm/disp/mdp4/mdp4_dtv_encoder.c (renamed from drivers/gpu/drm/msm/mdp/mdp4/mdp4_dtv_encoder.c)0
-rw-r--r--drivers/gpu/drm/msm/disp/mdp4/mdp4_irq.c (renamed from drivers/gpu/drm/msm/mdp/mdp4/mdp4_irq.c)0
-rw-r--r--drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c (renamed from drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c)4
-rw-r--r--drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.h (renamed from drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h)2
-rw-r--r--drivers/gpu/drm/msm/disp/mdp4/mdp4_lcdc_encoder.c (renamed from drivers/gpu/drm/msm/mdp/mdp4/mdp4_lcdc_encoder.c)4
-rw-r--r--drivers/gpu/drm/msm/disp/mdp4/mdp4_lvds_connector.c (renamed from drivers/gpu/drm/msm/mdp/mdp4/mdp4_lvds_connector.c)7
-rw-r--r--drivers/gpu/drm/msm/disp/mdp4/mdp4_lvds_pll.c (renamed from drivers/gpu/drm/msm/mdp/mdp4/mdp4_lvds_pll.c)0
-rw-r--r--drivers/gpu/drm/msm/disp/mdp4/mdp4_plane.c (renamed from drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c)21
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5.xml.h (renamed from drivers/gpu/drm/msm/mdp/mdp5/mdp5.xml.h)26
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_cfg.c (renamed from drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.c)0
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_cfg.h (renamed from drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.h)0
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_cmd_encoder.c (renamed from drivers/gpu/drm/msm/mdp/mdp5/mdp5_cmd_encoder.c)4
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c (renamed from drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c)75
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_ctl.c (renamed from drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.c)60
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_ctl.h (renamed from drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.h)2
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_encoder.c (renamed from drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c)15
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_irq.c (renamed from drivers/gpu/drm/msm/mdp/mdp5/mdp5_irq.c)0
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c (renamed from drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c)107
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.h (renamed from drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h)37
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_mdss.c (renamed from drivers/gpu/drm/msm/mdp/mdp5/mdp5_mdss.c)154
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_mixer.c (renamed from drivers/gpu/drm/msm/mdp/mdp5/mdp5_mixer.c)12
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_mixer.h (renamed from drivers/gpu/drm/msm/mdp/mdp5/mdp5_mixer.h)0
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_pipe.c (renamed from drivers/gpu/drm/msm/mdp/mdp5/mdp5_pipe.c)20
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_pipe.h (renamed from drivers/gpu/drm/msm/mdp/mdp5/mdp5_pipe.h)0
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c (renamed from drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c)38
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_smp.c (renamed from drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.c)17
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_smp.h (renamed from drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.h)0
-rw-r--r--drivers/gpu/drm/msm/disp/mdp_common.xml.h (renamed from drivers/gpu/drm/msm/mdp/mdp_common.xml.h)26
-rw-r--r--drivers/gpu/drm/msm/disp/mdp_format.c (renamed from drivers/gpu/drm/msm/mdp/mdp_format.c)3
-rw-r--r--drivers/gpu/drm/msm/disp/mdp_kms.c (renamed from drivers/gpu/drm/msm/mdp/mdp_kms.c)0
-rw-r--r--drivers/gpu/drm/msm/disp/mdp_kms.h (renamed from drivers/gpu/drm/msm/mdp/mdp_kms.h)2
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi.c28
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi.h24
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi.xml.h196
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_cfg.c73
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_cfg.h13
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_host.c478
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_manager.c141
-rw-r--r--drivers/gpu/drm/msm/dsi/mmss_cc.xml.h26
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy.c119
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy.h3
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy_10nm.c223
-rw-r--r--drivers/gpu/drm/msm/dsi/pll/dsi_pll.c5
-rw-r--r--drivers/gpu/drm/msm/dsi/pll/dsi_pll.h9
-rw-r--r--drivers/gpu/drm/msm/dsi/pll/dsi_pll_10nm.c824
-rw-r--r--drivers/gpu/drm/msm/dsi/sfpb.xml.h26
-rw-r--r--drivers/gpu/drm/msm/edp/edp.xml.h26
-rw-r--r--drivers/gpu/drm/msm/edp/edp_connector.c4
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi.c24
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi.xml.h26
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_connector.c4
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_hdcp.c2
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_phy.c4
-rw-r--r--drivers/gpu/drm/msm/hdmi/qfprom.xml.h26
-rw-r--r--drivers/gpu/drm/msm/msm_atomic.c259
-rw-r--r--drivers/gpu/drm/msm/msm_debugfs.c101
-rw-r--r--drivers/gpu/drm/msm/msm_drv.c306
-rw-r--r--drivers/gpu/drm/msm/msm_drv.h113
-rw-r--r--drivers/gpu/drm/msm/msm_fb.c61
-rw-r--r--drivers/gpu/drm/msm/msm_fbdev.c11
-rw-r--r--drivers/gpu/drm/msm/msm_fence.h2
-rw-r--r--drivers/gpu/drm/msm/msm_gem.c66
-rw-r--r--drivers/gpu/drm/msm/msm_gem.h1
-rw-r--r--drivers/gpu/drm/msm/msm_gem_submit.c9
-rw-r--r--drivers/gpu/drm/msm/msm_gem_vma.c4
-rw-r--r--drivers/gpu/drm/msm/msm_gpu.c213
-rw-r--r--drivers/gpu/drm/msm/msm_gpu.h72
-rw-r--r--drivers/gpu/drm/msm/msm_kms.h48
-rw-r--r--drivers/gpu/drm/msm/msm_ringbuffer.c2
156 files changed, 43612 insertions, 1518 deletions
diff --git a/drivers/gpu/drm/msm/Kconfig b/drivers/gpu/drm/msm/Kconfig
index 99d39b2aefa6..843a9d40c05e 100644
--- a/drivers/gpu/drm/msm/Kconfig
+++ b/drivers/gpu/drm/msm/Kconfig
@@ -12,6 +12,7 @@ config DRM_MSM
select SHMEM
select TMPFS
select QCOM_SCM
+ select WANT_DEV_COREDUMP
select SND_SOC_HDMI_CODEC if SND_SOC
select SYNC_FILE
select PM_OPP
@@ -28,6 +29,19 @@ config DRM_MSM_REGISTER_LOGGING
that can be parsed by envytools demsm tool. If enabled, register
logging can be switched on via msm.reglog=y module param.
+config DRM_MSM_GPU_SUDO
+ bool "Enable SUDO flag on submits"
+ depends on DRM_MSM && EXPERT
+ default n
+ help
+ Enable userspace that has CAP_SYS_RAWIO to submit GPU commands
+ that are run from RB instead of IB1. This essentially gives
+ userspace kernel level access, but is useful for firmware
+ debugging.
+
+ Only use this if you are a driver developer. This should *not*
+ be enabled for production kernels. If unsure, say N.
+
config DRM_MSM_HDMI_HDCP
bool "Enable HDMI HDCP support in MSM DRM driver"
depends on DRM_MSM && QCOM_SCM
@@ -81,3 +95,10 @@ config DRM_MSM_DSI_14NM_PHY
default y
help
Choose this option if DSI PHY on 8996 is used on the platform.
+
+config DRM_MSM_DSI_10NM_PHY
+ bool "Enable DSI 10nm PHY driver in MSM DRM (used by SDM845)"
+ depends on DRM_MSM_DSI
+ default y
+ help
+ Choose this option if DSI PHY on SDM845 is used on the platform.
diff --git a/drivers/gpu/drm/msm/Makefile b/drivers/gpu/drm/msm/Makefile
index 92b3844202d2..261fa79d456d 100644
--- a/drivers/gpu/drm/msm/Makefile
+++ b/drivers/gpu/drm/msm/Makefile
@@ -1,5 +1,6 @@
# SPDX-License-Identifier: GPL-2.0
ccflags-y := -Idrivers/gpu/drm/msm
+ccflags-y += -Idrivers/gpu/drm/msm/disp/dpu1
ccflags-$(CONFIG_DRM_MSM_DSI) += -Idrivers/gpu/drm/msm/dsi
msm-y := \
@@ -10,6 +11,9 @@ msm-y := \
adreno/a5xx_gpu.o \
adreno/a5xx_power.o \
adreno/a5xx_preempt.o \
+ adreno/a6xx_gpu.o \
+ adreno/a6xx_gmu.o \
+ adreno/a6xx_hfi.o \
hdmi/hdmi.o \
hdmi/hdmi_audio.o \
hdmi/hdmi_bridge.o \
@@ -25,26 +29,53 @@ msm-y := \
edp/edp_connector.o \
edp/edp_ctrl.o \
edp/edp_phy.o \
- mdp/mdp_format.o \
- mdp/mdp_kms.o \
- mdp/mdp4/mdp4_crtc.o \
- mdp/mdp4/mdp4_dtv_encoder.o \
- mdp/mdp4/mdp4_lcdc_encoder.o \
- mdp/mdp4/mdp4_lvds_connector.o \
- mdp/mdp4/mdp4_irq.o \
- mdp/mdp4/mdp4_kms.o \
- mdp/mdp4/mdp4_plane.o \
- mdp/mdp5/mdp5_cfg.o \
- mdp/mdp5/mdp5_ctl.o \
- mdp/mdp5/mdp5_crtc.o \
- mdp/mdp5/mdp5_encoder.o \
- mdp/mdp5/mdp5_irq.o \
- mdp/mdp5/mdp5_mdss.o \
- mdp/mdp5/mdp5_kms.o \
- mdp/mdp5/mdp5_pipe.o \
- mdp/mdp5/mdp5_mixer.o \
- mdp/mdp5/mdp5_plane.o \
- mdp/mdp5/mdp5_smp.o \
+ disp/mdp_format.o \
+ disp/mdp_kms.o \
+ disp/mdp4/mdp4_crtc.o \
+ disp/mdp4/mdp4_dtv_encoder.o \
+ disp/mdp4/mdp4_lcdc_encoder.o \
+ disp/mdp4/mdp4_lvds_connector.o \
+ disp/mdp4/mdp4_irq.o \
+ disp/mdp4/mdp4_kms.o \
+ disp/mdp4/mdp4_plane.o \
+ disp/mdp5/mdp5_cfg.o \
+ disp/mdp5/mdp5_ctl.o \
+ disp/mdp5/mdp5_crtc.o \
+ disp/mdp5/mdp5_encoder.o \
+ disp/mdp5/mdp5_irq.o \
+ disp/mdp5/mdp5_mdss.o \
+ disp/mdp5/mdp5_kms.o \
+ disp/mdp5/mdp5_pipe.o \
+ disp/mdp5/mdp5_mixer.o \
+ disp/mdp5/mdp5_plane.o \
+ disp/mdp5/mdp5_smp.o \
+ disp/dpu1/dpu_core_irq.o \
+ disp/dpu1/dpu_core_perf.o \
+ disp/dpu1/dpu_crtc.o \
+ disp/dpu1/dpu_encoder.o \
+ disp/dpu1/dpu_encoder_phys_cmd.o \
+ disp/dpu1/dpu_encoder_phys_vid.o \
+ disp/dpu1/dpu_formats.o \
+ disp/dpu1/dpu_hw_blk.o \
+ disp/dpu1/dpu_hw_catalog.o \
+ disp/dpu1/dpu_hw_cdm.o \
+ disp/dpu1/dpu_hw_ctl.o \
+ disp/dpu1/dpu_hw_interrupts.o \
+ disp/dpu1/dpu_hw_intf.o \
+ disp/dpu1/dpu_hw_lm.o \
+ disp/dpu1/dpu_hw_pingpong.o \
+ disp/dpu1/dpu_hw_sspp.o \
+ disp/dpu1/dpu_hw_top.o \
+ disp/dpu1/dpu_hw_util.o \
+ disp/dpu1/dpu_hw_vbif.o \
+ disp/dpu1/dpu_io_util.o \
+ disp/dpu1/dpu_irq.o \
+ disp/dpu1/dpu_kms.o \
+ disp/dpu1/dpu_mdss.o \
+ disp/dpu1/dpu_plane.o \
+ disp/dpu1/dpu_power_handle.o \
+ disp/dpu1/dpu_rm.o \
+ disp/dpu1/dpu_vbif.o \
msm_atomic.o \
msm_debugfs.o \
msm_drv.o \
@@ -62,31 +93,36 @@ msm-y := \
msm_ringbuffer.o \
msm_submitqueue.o
+msm-$(CONFIG_DEBUG_FS) += adreno/a5xx_debugfs.o \
+ disp/dpu1/dpu_dbg.o
+
msm-$(CONFIG_DRM_FBDEV_EMULATION) += msm_fbdev.o
-msm-$(CONFIG_COMMON_CLK) += mdp/mdp4/mdp4_lvds_pll.o
+msm-$(CONFIG_COMMON_CLK) += disp/mdp4/mdp4_lvds_pll.o
msm-$(CONFIG_COMMON_CLK) += hdmi/hdmi_pll_8960.o
msm-$(CONFIG_COMMON_CLK) += hdmi/hdmi_phy_8996.o
msm-$(CONFIG_DRM_MSM_HDMI_HDCP) += hdmi/hdmi_hdcp.o
msm-$(CONFIG_DRM_MSM_DSI) += dsi/dsi.o \
- mdp/mdp4/mdp4_dsi_encoder.o \
+ disp/mdp4/mdp4_dsi_encoder.o \
dsi/dsi_cfg.o \
dsi/dsi_host.o \
dsi/dsi_manager.o \
dsi/phy/dsi_phy.o \
- mdp/mdp5/mdp5_cmd_encoder.o
+ disp/mdp5/mdp5_cmd_encoder.o
msm-$(CONFIG_DRM_MSM_DSI_28NM_PHY) += dsi/phy/dsi_phy_28nm.o
msm-$(CONFIG_DRM_MSM_DSI_20NM_PHY) += dsi/phy/dsi_phy_20nm.o
msm-$(CONFIG_DRM_MSM_DSI_28NM_8960_PHY) += dsi/phy/dsi_phy_28nm_8960.o
msm-$(CONFIG_DRM_MSM_DSI_14NM_PHY) += dsi/phy/dsi_phy_14nm.o
+msm-$(CONFIG_DRM_MSM_DSI_10NM_PHY) += dsi/phy/dsi_phy_10nm.o
ifeq ($(CONFIG_DRM_MSM_DSI_PLL),y)
msm-y += dsi/pll/dsi_pll.o
msm-$(CONFIG_DRM_MSM_DSI_28NM_PHY) += dsi/pll/dsi_pll_28nm.o
msm-$(CONFIG_DRM_MSM_DSI_28NM_8960_PHY) += dsi/pll/dsi_pll_28nm_8960.o
msm-$(CONFIG_DRM_MSM_DSI_14NM_PHY) += dsi/pll/dsi_pll_14nm.o
+msm-$(CONFIG_DRM_MSM_DSI_10NM_PHY) += dsi/pll/dsi_pll_10nm.o
endif
obj-$(CONFIG_DRM_MSM) += msm.o
diff --git a/drivers/gpu/drm/msm/adreno/a2xx.xml.h b/drivers/gpu/drm/msm/adreno/a2xx.xml.h
index 644374c7b3e0..4bff0a740c7d 100644
--- a/drivers/gpu/drm/msm/adreno/a2xx.xml.h
+++ b/drivers/gpu/drm/msm/adreno/a2xx.xml.h
@@ -8,17 +8,19 @@ http://github.com/freedreno/envytools/
git clone https://github.com/freedreno/envytools.git
The rules-ng-ng source files this header was generated from are:
-- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml ( 431 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml ( 37162 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml ( 13324 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml ( 31866 bytes, from 2017-06-06 18:26:14)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml ( 83840 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml ( 111898 bytes, from 2017-06-06 18:23:59)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a5xx.xml ( 139480 bytes, from 2017-06-16 12:44:39)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/ocmem.xml ( 1773 bytes, from 2017-05-17 13:21:27)
-
-Copyright (C) 2013-2017 by the following authors:
+- /home/robclark/src/envytools/rnndb/adreno.xml ( 501 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/adreno/a2xx.xml ( 36805 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/adreno/adreno_common.xml ( 13634 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/adreno/adreno_pm4.xml ( 42393 bytes, from 2018-08-06 18:45:45)
+- /home/robclark/src/envytools/rnndb/adreno/a3xx.xml ( 83840 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/adreno/a4xx.xml ( 112086 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/adreno/a5xx.xml ( 147240 bytes, from 2018-08-06 18:45:45)
+- /home/robclark/src/envytools/rnndb/adreno/a6xx.xml ( 101627 bytes, from 2018-08-06 18:45:45)
+- /home/robclark/src/envytools/rnndb/adreno/a6xx_gmu.xml ( 10431 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/adreno/ocmem.xml ( 1773 bytes, from 2018-07-03 19:37:13)
+
+Copyright (C) 2013-2018 by the following authors:
- Rob Clark <robdclark@gmail.com> (robclark)
- Ilia Mirkin <imirkin@alum.mit.edu> (imirkin)
@@ -84,13 +86,12 @@ enum a2xx_sq_surfaceformat {
FMT_5_5_5_1 = 13,
FMT_8_8_8_8_A = 14,
FMT_4_4_4_4 = 15,
- FMT_10_11_11 = 16,
- FMT_11_11_10 = 17,
+ FMT_8_8_8 = 16,
FMT_DXT1 = 18,
FMT_DXT2_3 = 19,
FMT_DXT4_5 = 20,
+ FMT_10_10_10_2 = 21,
FMT_24_8 = 22,
- FMT_24_8_FLOAT = 23,
FMT_16 = 24,
FMT_16_16 = 25,
FMT_16_16_16_16 = 26,
@@ -106,29 +107,23 @@ enum a2xx_sq_surfaceformat {
FMT_32_FLOAT = 36,
FMT_32_32_FLOAT = 37,
FMT_32_32_32_32_FLOAT = 38,
- FMT_32_AS_8 = 39,
- FMT_32_AS_8_8 = 40,
- FMT_16_MPEG = 41,
- FMT_16_16_MPEG = 42,
- FMT_8_INTERLACED = 43,
- FMT_32_AS_8_INTERLACED = 44,
- FMT_32_AS_8_8_INTERLACED = 45,
- FMT_16_INTERLACED = 46,
- FMT_16_MPEG_INTERLACED = 47,
- FMT_16_16_MPEG_INTERLACED = 48,
+ FMT_ATI_TC_RGB = 39,
+ FMT_ATI_TC_RGBA = 40,
+ FMT_ATI_TC_555_565_RGB = 41,
+ FMT_ATI_TC_555_565_RGBA = 42,
+ FMT_ATI_TC_RGBA_INTERP = 43,
+ FMT_ATI_TC_555_565_RGBA_INTERP = 44,
+ FMT_ETC1_RGBA_INTERP = 46,
+ FMT_ETC1_RGB = 47,
+ FMT_ETC1_RGBA = 48,
FMT_DXN = 49,
- FMT_8_8_8_8_AS_16_16_16_16 = 50,
- FMT_DXT1_AS_16_16_16_16 = 51,
- FMT_DXT2_3_AS_16_16_16_16 = 52,
- FMT_DXT4_5_AS_16_16_16_16 = 53,
+ FMT_2_3_3 = 51,
FMT_2_10_10_10_AS_16_16_16_16 = 54,
- FMT_10_11_11_AS_16_16_16_16 = 55,
- FMT_11_11_10_AS_16_16_16_16 = 56,
+ FMT_10_10_10_2_AS_16_16_16_16 = 55,
FMT_32_32_32_FLOAT = 57,
FMT_DXT3A = 58,
FMT_DXT5A = 59,
FMT_CTX1 = 60,
- FMT_DXT3A_AS_1_1_1_1 = 61,
};
enum a2xx_sq_ps_vtx_mode {
diff --git a/drivers/gpu/drm/msm/adreno/a3xx.xml.h b/drivers/gpu/drm/msm/adreno/a3xx.xml.h
index 663a73216926..645a19aef399 100644
--- a/drivers/gpu/drm/msm/adreno/a3xx.xml.h
+++ b/drivers/gpu/drm/msm/adreno/a3xx.xml.h
@@ -8,17 +8,19 @@ http://github.com/freedreno/envytools/
git clone https://github.com/freedreno/envytools.git
The rules-ng-ng source files this header was generated from are:
-- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml ( 431 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml ( 37162 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml ( 13324 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml ( 31866 bytes, from 2017-06-06 18:26:14)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml ( 83840 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml ( 111898 bytes, from 2017-06-06 18:23:59)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a5xx.xml ( 139480 bytes, from 2017-06-16 12:44:39)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/ocmem.xml ( 1773 bytes, from 2017-05-17 13:21:27)
-
-Copyright (C) 2013-2017 by the following authors:
+- /home/robclark/src/envytools/rnndb/adreno.xml ( 501 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/adreno/a2xx.xml ( 36805 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/adreno/adreno_common.xml ( 13634 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/adreno/adreno_pm4.xml ( 42393 bytes, from 2018-08-06 18:45:45)
+- /home/robclark/src/envytools/rnndb/adreno/a3xx.xml ( 83840 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/adreno/a4xx.xml ( 112086 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/adreno/a5xx.xml ( 147240 bytes, from 2018-08-06 18:45:45)
+- /home/robclark/src/envytools/rnndb/adreno/a6xx.xml ( 101627 bytes, from 2018-08-06 18:45:45)
+- /home/robclark/src/envytools/rnndb/adreno/a6xx_gmu.xml ( 10431 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/adreno/ocmem.xml ( 1773 bytes, from 2018-07-03 19:37:13)
+
+Copyright (C) 2013-2018 by the following authors:
- Rob Clark <robdclark@gmail.com> (robclark)
- Ilia Mirkin <imirkin@alum.mit.edu> (imirkin)
diff --git a/drivers/gpu/drm/msm/adreno/a3xx_gpu.c b/drivers/gpu/drm/msm/adreno/a3xx_gpu.c
index 4baef2738178..669c2d4b070d 100644
--- a/drivers/gpu/drm/msm/adreno/a3xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a3xx_gpu.c
@@ -35,6 +35,7 @@
A3XX_INT0_CP_RB_INT | \
A3XX_INT0_CP_REG_PROTECT_FAULT | \
A3XX_INT0_CP_AHB_ERROR_HALT | \
+ A3XX_INT0_CACHE_FLUSH_TS | \
A3XX_INT0_UCHE_OOB_ACCESS)
extern bool hang_debug;
@@ -256,8 +257,8 @@ static int a3xx_hw_init(struct msm_gpu *gpu)
*/
/* Load PM4: */
- ptr = (uint32_t *)(adreno_gpu->pm4->data);
- len = adreno_gpu->pm4->size / 4;
+ ptr = (uint32_t *)(adreno_gpu->fw[ADRENO_FW_PM4]->data);
+ len = adreno_gpu->fw[ADRENO_FW_PM4]->size / 4;
DBG("loading PM4 ucode version: %x", ptr[1]);
gpu_write(gpu, REG_AXXX_CP_DEBUG,
@@ -268,8 +269,8 @@ static int a3xx_hw_init(struct msm_gpu *gpu)
gpu_write(gpu, REG_AXXX_CP_ME_RAM_DATA, ptr[i]);
/* Load PFP: */
- ptr = (uint32_t *)(adreno_gpu->pfp->data);
- len = adreno_gpu->pfp->size / 4;
+ ptr = (uint32_t *)(adreno_gpu->fw[ADRENO_FW_PFP]->data);
+ len = adreno_gpu->fw[ADRENO_FW_PFP]->size / 4;
DBG("loading PFP ucode version: %x", ptr[5]);
gpu_write(gpu, REG_A3XX_CP_PFP_UCODE_ADDR, 0);
@@ -410,15 +411,6 @@ static const unsigned int a3xx_registers[] = {
~0 /* sentinel */
};
-#ifdef CONFIG_DEBUG_FS
-static void a3xx_show(struct msm_gpu *gpu, struct seq_file *m)
-{
- seq_printf(m, "status: %08x\n",
- gpu_read(gpu, REG_A3XX_RBBM_STATUS));
- adreno_show(gpu, m);
-}
-#endif
-
/* would be nice to not have to duplicate the _show() stuff with printk(): */
static void a3xx_dump(struct msm_gpu *gpu)
{
@@ -426,6 +418,21 @@ static void a3xx_dump(struct msm_gpu *gpu)
gpu_read(gpu, REG_A3XX_RBBM_STATUS));
adreno_dump(gpu);
}
+
+static struct msm_gpu_state *a3xx_gpu_state_get(struct msm_gpu *gpu)
+{
+ struct msm_gpu_state *state = kzalloc(sizeof(*state), GFP_KERNEL);
+
+ if (!state)
+ return ERR_PTR(-ENOMEM);
+
+ adreno_gpu_state_get(gpu, state);
+
+ state->rbbm_status = gpu_read(gpu, REG_A3XX_RBBM_STATUS);
+
+ return state;
+}
+
/* Register offset defines for A3XX */
static const unsigned int a3xx_register_offsets[REG_ADRENO_REGISTER_MAX] = {
REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_BASE, REG_AXXX_CP_RB_BASE),
@@ -449,9 +456,11 @@ static const struct adreno_gpu_funcs funcs = {
.active_ring = adreno_active_ring,
.irq = a3xx_irq,
.destroy = a3xx_destroy,
-#ifdef CONFIG_DEBUG_FS
- .show = a3xx_show,
+#if defined(CONFIG_DEBUG_FS) || defined(CONFIG_DEV_COREDUMP)
+ .show = adreno_show,
#endif
+ .gpu_state_get = a3xx_gpu_state_get,
+ .gpu_state_put = adreno_gpu_state_put,
},
};
diff --git a/drivers/gpu/drm/msm/adreno/a4xx.xml.h b/drivers/gpu/drm/msm/adreno/a4xx.xml.h
index 1a14f4a40b9c..19565e87aa7b 100644
--- a/drivers/gpu/drm/msm/adreno/a4xx.xml.h
+++ b/drivers/gpu/drm/msm/adreno/a4xx.xml.h
@@ -8,17 +8,19 @@ http://github.com/freedreno/envytools/
git clone https://github.com/freedreno/envytools.git
The rules-ng-ng source files this header was generated from are:
-- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml ( 431 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml ( 37162 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml ( 13324 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml ( 31866 bytes, from 2017-06-06 18:26:14)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml ( 83840 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml ( 111898 bytes, from 2017-06-06 18:23:59)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a5xx.xml ( 139480 bytes, from 2017-06-16 12:44:39)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/ocmem.xml ( 1773 bytes, from 2017-05-17 13:21:27)
-
-Copyright (C) 2013-2017 by the following authors:
+- /home/robclark/src/envytools/rnndb/adreno.xml ( 501 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/adreno/a2xx.xml ( 36805 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/adreno/adreno_common.xml ( 13634 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/adreno/adreno_pm4.xml ( 42393 bytes, from 2018-08-06 18:45:45)
+- /home/robclark/src/envytools/rnndb/adreno/a3xx.xml ( 83840 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/adreno/a4xx.xml ( 112086 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/adreno/a5xx.xml ( 147240 bytes, from 2018-08-06 18:45:45)
+- /home/robclark/src/envytools/rnndb/adreno/a6xx.xml ( 101627 bytes, from 2018-08-06 18:45:45)
+- /home/robclark/src/envytools/rnndb/adreno/a6xx_gmu.xml ( 10431 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/adreno/ocmem.xml ( 1773 bytes, from 2018-07-03 19:37:13)
+
+Copyright (C) 2013-2018 by the following authors:
- Rob Clark <robdclark@gmail.com> (robclark)
- Ilia Mirkin <imirkin@alum.mit.edu> (imirkin)
@@ -263,12 +265,6 @@ enum a4xx_depth_format {
DEPTH4_32 = 3,
};
-enum a4xx_tess_spacing {
- EQUAL_SPACING = 0,
- ODD_SPACING = 2,
- EVEN_SPACING = 3,
-};
-
enum a4xx_ccu_perfcounter_select {
CCU_BUSY_CYCLES = 0,
CCU_RB_DEPTH_RETURN_STALL = 2,
@@ -3544,12 +3540,13 @@ static inline uint32_t A4XX_HLSQ_VS_CONTROL_REG_CONSTLENGTH(uint32_t val)
{
return ((val) << A4XX_HLSQ_VS_CONTROL_REG_CONSTLENGTH__SHIFT) & A4XX_HLSQ_VS_CONTROL_REG_CONSTLENGTH__MASK;
}
-#define A4XX_HLSQ_VS_CONTROL_REG_CONSTOBJECTOFFSET__MASK 0x0000ff00
+#define A4XX_HLSQ_VS_CONTROL_REG_CONSTOBJECTOFFSET__MASK 0x00007f00
#define A4XX_HLSQ_VS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT 8
static inline uint32_t A4XX_HLSQ_VS_CONTROL_REG_CONSTOBJECTOFFSET(uint32_t val)
{
return ((val) << A4XX_HLSQ_VS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT) & A4XX_HLSQ_VS_CONTROL_REG_CONSTOBJECTOFFSET__MASK;
}
+#define A4XX_HLSQ_VS_CONTROL_REG_SSBO_ENABLE 0x00008000
#define A4XX_HLSQ_VS_CONTROL_REG_ENABLED 0x00010000
#define A4XX_HLSQ_VS_CONTROL_REG_SHADEROBJOFFSET__MASK 0x00fe0000
#define A4XX_HLSQ_VS_CONTROL_REG_SHADEROBJOFFSET__SHIFT 17
@@ -3571,12 +3568,13 @@ static inline uint32_t A4XX_HLSQ_FS_CONTROL_REG_CONSTLENGTH(uint32_t val)
{
return ((val) << A4XX_HLSQ_FS_CONTROL_REG_CONSTLENGTH__SHIFT) & A4XX_HLSQ_FS_CONTROL_REG_CONSTLENGTH__MASK;
}
-#define A4XX_HLSQ_FS_CONTROL_REG_CONSTOBJECTOFFSET__MASK 0x0000ff00
+#define A4XX_HLSQ_FS_CONTROL_REG_CONSTOBJECTOFFSET__MASK 0x00007f00
#define A4XX_HLSQ_FS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT 8
static inline uint32_t A4XX_HLSQ_FS_CONTROL_REG_CONSTOBJECTOFFSET(uint32_t val)
{
return ((val) << A4XX_HLSQ_FS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT) & A4XX_HLSQ_FS_CONTROL_REG_CONSTOBJECTOFFSET__MASK;
}
+#define A4XX_HLSQ_FS_CONTROL_REG_SSBO_ENABLE 0x00008000
#define A4XX_HLSQ_FS_CONTROL_REG_ENABLED 0x00010000
#define A4XX_HLSQ_FS_CONTROL_REG_SHADEROBJOFFSET__MASK 0x00fe0000
#define A4XX_HLSQ_FS_CONTROL_REG_SHADEROBJOFFSET__SHIFT 17
@@ -3598,12 +3596,13 @@ static inline uint32_t A4XX_HLSQ_HS_CONTROL_REG_CONSTLENGTH(uint32_t val)
{
return ((val) << A4XX_HLSQ_HS_CONTROL_REG_CONSTLENGTH__SHIFT) & A4XX_HLSQ_HS_CONTROL_REG_CONSTLENGTH__MASK;
}
-#define A4XX_HLSQ_HS_CONTROL_REG_CONSTOBJECTOFFSET__MASK 0x0000ff00
+#define A4XX_HLSQ_HS_CONTROL_REG_CONSTOBJECTOFFSET__MASK 0x00007f00
#define A4XX_HLSQ_HS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT 8
static inline uint32_t A4XX_HLSQ_HS_CONTROL_REG_CONSTOBJECTOFFSET(uint32_t val)
{
return ((val) << A4XX_HLSQ_HS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT) & A4XX_HLSQ_HS_CONTROL_REG_CONSTOBJECTOFFSET__MASK;
}
+#define A4XX_HLSQ_HS_CONTROL_REG_SSBO_ENABLE 0x00008000
#define A4XX_HLSQ_HS_CONTROL_REG_ENABLED 0x00010000
#define A4XX_HLSQ_HS_CONTROL_REG_SHADEROBJOFFSET__MASK 0x00fe0000
#define A4XX_HLSQ_HS_CONTROL_REG_SHADEROBJOFFSET__SHIFT 17
@@ -3625,12 +3624,13 @@ static inline uint32_t A4XX_HLSQ_DS_CONTROL_REG_CONSTLENGTH(uint32_t val)
{
return ((val) << A4XX_HLSQ_DS_CONTROL_REG_CONSTLENGTH__SHIFT) & A4XX_HLSQ_DS_CONTROL_REG_CONSTLENGTH__MASK;
}
-#define A4XX_HLSQ_DS_CONTROL_REG_CONSTOBJECTOFFSET__MASK 0x0000ff00
+#define A4XX_HLSQ_DS_CONTROL_REG_CONSTOBJECTOFFSET__MASK 0x00007f00
#define A4XX_HLSQ_DS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT 8
static inline uint32_t A4XX_HLSQ_DS_CONTROL_REG_CONSTOBJECTOFFSET(uint32_t val)
{
return ((val) << A4XX_HLSQ_DS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT) & A4XX_HLSQ_DS_CONTROL_REG_CONSTOBJECTOFFSET__MASK;
}
+#define A4XX_HLSQ_DS_CONTROL_REG_SSBO_ENABLE 0x00008000
#define A4XX_HLSQ_DS_CONTROL_REG_ENABLED 0x00010000
#define A4XX_HLSQ_DS_CONTROL_REG_SHADEROBJOFFSET__MASK 0x00fe0000
#define A4XX_HLSQ_DS_CONTROL_REG_SHADEROBJOFFSET__SHIFT 17
@@ -3652,12 +3652,13 @@ static inline uint32_t A4XX_HLSQ_GS_CONTROL_REG_CONSTLENGTH(uint32_t val)
{
return ((val) << A4XX_HLSQ_GS_CONTROL_REG_CONSTLENGTH__SHIFT) & A4XX_HLSQ_GS_CONTROL_REG_CONSTLENGTH__MASK;
}
-#define A4XX_HLSQ_GS_CONTROL_REG_CONSTOBJECTOFFSET__MASK 0x0000ff00
+#define A4XX_HLSQ_GS_CONTROL_REG_CONSTOBJECTOFFSET__MASK 0x00007f00
#define A4XX_HLSQ_GS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT 8
static inline uint32_t A4XX_HLSQ_GS_CONTROL_REG_CONSTOBJECTOFFSET(uint32_t val)
{
return ((val) << A4XX_HLSQ_GS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT) & A4XX_HLSQ_GS_CONTROL_REG_CONSTOBJECTOFFSET__MASK;
}
+#define A4XX_HLSQ_GS_CONTROL_REG_SSBO_ENABLE 0x00008000
#define A4XX_HLSQ_GS_CONTROL_REG_ENABLED 0x00010000
#define A4XX_HLSQ_GS_CONTROL_REG_SHADEROBJOFFSET__MASK 0x00fe0000
#define A4XX_HLSQ_GS_CONTROL_REG_SHADEROBJOFFSET__SHIFT 17
@@ -3672,23 +3673,103 @@ static inline uint32_t A4XX_HLSQ_GS_CONTROL_REG_INSTRLENGTH(uint32_t val)
return ((val) << A4XX_HLSQ_GS_CONTROL_REG_INSTRLENGTH__SHIFT) & A4XX_HLSQ_GS_CONTROL_REG_INSTRLENGTH__MASK;
}
-#define REG_A4XX_HLSQ_CS_CONTROL 0x000023ca
+#define REG_A4XX_HLSQ_CS_CONTROL_REG 0x000023ca
+#define A4XX_HLSQ_CS_CONTROL_REG_CONSTLENGTH__MASK 0x000000ff
+#define A4XX_HLSQ_CS_CONTROL_REG_CONSTLENGTH__SHIFT 0
+static inline uint32_t A4XX_HLSQ_CS_CONTROL_REG_CONSTLENGTH(uint32_t val)
+{
+ return ((val) << A4XX_HLSQ_CS_CONTROL_REG_CONSTLENGTH__SHIFT) & A4XX_HLSQ_CS_CONTROL_REG_CONSTLENGTH__MASK;
+}
+#define A4XX_HLSQ_CS_CONTROL_REG_CONSTOBJECTOFFSET__MASK 0x00007f00
+#define A4XX_HLSQ_CS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT 8
+static inline uint32_t A4XX_HLSQ_CS_CONTROL_REG_CONSTOBJECTOFFSET(uint32_t val)
+{
+ return ((val) << A4XX_HLSQ_CS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT) & A4XX_HLSQ_CS_CONTROL_REG_CONSTOBJECTOFFSET__MASK;
+}
+#define A4XX_HLSQ_CS_CONTROL_REG_SSBO_ENABLE 0x00008000
+#define A4XX_HLSQ_CS_CONTROL_REG_ENABLED 0x00010000
+#define A4XX_HLSQ_CS_CONTROL_REG_SHADEROBJOFFSET__MASK 0x00fe0000
+#define A4XX_HLSQ_CS_CONTROL_REG_SHADEROBJOFFSET__SHIFT 17
+static inline uint32_t A4XX_HLSQ_CS_CONTROL_REG_SHADEROBJOFFSET(uint32_t val)
+{
+ return ((val) << A4XX_HLSQ_CS_CONTROL_REG_SHADEROBJOFFSET__SHIFT) & A4XX_HLSQ_CS_CONTROL_REG_SHADEROBJOFFSET__MASK;
+}
+#define A4XX_HLSQ_CS_CONTROL_REG_INSTRLENGTH__MASK 0xff000000
+#define A4XX_HLSQ_CS_CONTROL_REG_INSTRLENGTH__SHIFT 24
+static inline uint32_t A4XX_HLSQ_CS_CONTROL_REG_INSTRLENGTH(uint32_t val)
+{
+ return ((val) << A4XX_HLSQ_CS_CONTROL_REG_INSTRLENGTH__SHIFT) & A4XX_HLSQ_CS_CONTROL_REG_INSTRLENGTH__MASK;
+}
#define REG_A4XX_HLSQ_CL_NDRANGE_0 0x000023cd
+#define A4XX_HLSQ_CL_NDRANGE_0_KERNELDIM__MASK 0x00000003
+#define A4XX_HLSQ_CL_NDRANGE_0_KERNELDIM__SHIFT 0
+static inline uint32_t A4XX_HLSQ_CL_NDRANGE_0_KERNELDIM(uint32_t val)
+{
+ return ((val) << A4XX_HLSQ_CL_NDRANGE_0_KERNELDIM__SHIFT) & A4XX_HLSQ_CL_NDRANGE_0_KERNELDIM__MASK;
+}
+#define A4XX_HLSQ_CL_NDRANGE_0_LOCALSIZEX__MASK 0x00000ffc
+#define A4XX_HLSQ_CL_NDRANGE_0_LOCALSIZEX__SHIFT 2
+static inline uint32_t A4XX_HLSQ_CL_NDRANGE_0_LOCALSIZEX(uint32_t val)
+{
+ return ((val) << A4XX_HLSQ_CL_NDRANGE_0_LOCALSIZEX__SHIFT) & A4XX_HLSQ_CL_NDRANGE_0_LOCALSIZEX__MASK;
+}
+#define A4XX_HLSQ_CL_NDRANGE_0_LOCALSIZEY__MASK 0x003ff000
+#define A4XX_HLSQ_CL_NDRANGE_0_LOCALSIZEY__SHIFT 12
+static inline uint32_t A4XX_HLSQ_CL_NDRANGE_0_LOCALSIZEY(uint32_t val)
+{
+ return ((val) << A4XX_HLSQ_CL_NDRANGE_0_LOCALSIZEY__SHIFT) & A4XX_HLSQ_CL_NDRANGE_0_LOCALSIZEY__MASK;
+}
+#define A4XX_HLSQ_CL_NDRANGE_0_LOCALSIZEZ__MASK 0xffc00000
+#define A4XX_HLSQ_CL_NDRANGE_0_LOCALSIZEZ__SHIFT 22
+static inline uint32_t A4XX_HLSQ_CL_NDRANGE_0_LOCALSIZEZ(uint32_t val)
+{
+ return ((val) << A4XX_HLSQ_CL_NDRANGE_0_LOCALSIZEZ__SHIFT) & A4XX_HLSQ_CL_NDRANGE_0_LOCALSIZEZ__MASK;
+}
#define REG_A4XX_HLSQ_CL_NDRANGE_1 0x000023ce
+#define A4XX_HLSQ_CL_NDRANGE_1_SIZE_X__MASK 0xffffffff
+#define A4XX_HLSQ_CL_NDRANGE_1_SIZE_X__SHIFT 0
+static inline uint32_t A4XX_HLSQ_CL_NDRANGE_1_SIZE_X(uint32_t val)
+{
+ return ((val) << A4XX_HLSQ_CL_NDRANGE_1_SIZE_X__SHIFT) & A4XX_HLSQ_CL_NDRANGE_1_SIZE_X__MASK;
+}
#define REG_A4XX_HLSQ_CL_NDRANGE_2 0x000023cf
#define REG_A4XX_HLSQ_CL_NDRANGE_3 0x000023d0
+#define A4XX_HLSQ_CL_NDRANGE_3_SIZE_Y__MASK 0xffffffff
+#define A4XX_HLSQ_CL_NDRANGE_3_SIZE_Y__SHIFT 0
+static inline uint32_t A4XX_HLSQ_CL_NDRANGE_3_SIZE_Y(uint32_t val)
+{
+ return ((val) << A4XX_HLSQ_CL_NDRANGE_3_SIZE_Y__SHIFT) & A4XX_HLSQ_CL_NDRANGE_3_SIZE_Y__MASK;
+}
#define REG_A4XX_HLSQ_CL_NDRANGE_4 0x000023d1
#define REG_A4XX_HLSQ_CL_NDRANGE_5 0x000023d2
+#define A4XX_HLSQ_CL_NDRANGE_5_SIZE_Z__MASK 0xffffffff
+#define A4XX_HLSQ_CL_NDRANGE_5_SIZE_Z__SHIFT 0
+static inline uint32_t A4XX_HLSQ_CL_NDRANGE_5_SIZE_Z(uint32_t val)
+{
+ return ((val) << A4XX_HLSQ_CL_NDRANGE_5_SIZE_Z__SHIFT) & A4XX_HLSQ_CL_NDRANGE_5_SIZE_Z__MASK;
+}
#define REG_A4XX_HLSQ_CL_NDRANGE_6 0x000023d3
#define REG_A4XX_HLSQ_CL_CONTROL_0 0x000023d4
+#define A4XX_HLSQ_CL_CONTROL_0_WGIDCONSTID__MASK 0x000000ff
+#define A4XX_HLSQ_CL_CONTROL_0_WGIDCONSTID__SHIFT 0
+static inline uint32_t A4XX_HLSQ_CL_CONTROL_0_WGIDCONSTID(uint32_t val)
+{
+ return ((val) << A4XX_HLSQ_CL_CONTROL_0_WGIDCONSTID__SHIFT) & A4XX_HLSQ_CL_CONTROL_0_WGIDCONSTID__MASK;
+}
+#define A4XX_HLSQ_CL_CONTROL_0_LOCALIDREGID__MASK 0xff000000
+#define A4XX_HLSQ_CL_CONTROL_0_LOCALIDREGID__SHIFT 24
+static inline uint32_t A4XX_HLSQ_CL_CONTROL_0_LOCALIDREGID(uint32_t val)
+{
+ return ((val) << A4XX_HLSQ_CL_CONTROL_0_LOCALIDREGID__SHIFT) & A4XX_HLSQ_CL_CONTROL_0_LOCALIDREGID__MASK;
+}
#define REG_A4XX_HLSQ_CL_CONTROL_1 0x000023d5
@@ -4087,5 +4168,71 @@ static inline uint32_t A4XX_TEX_CONST_4_BASE(uint32_t val)
#define REG_A4XX_TEX_CONST_7 0x00000007
+#define REG_A4XX_SSBO_0_0 0x00000000
+#define A4XX_SSBO_0_0_BASE__MASK 0xffffffe0
+#define A4XX_SSBO_0_0_BASE__SHIFT 5
+static inline uint32_t A4XX_SSBO_0_0_BASE(uint32_t val)
+{
+ return ((val >> 5) << A4XX_SSBO_0_0_BASE__SHIFT) & A4XX_SSBO_0_0_BASE__MASK;
+}
+
+#define REG_A4XX_SSBO_0_1 0x00000001
+#define A4XX_SSBO_0_1_PITCH__MASK 0x003fffff
+#define A4XX_SSBO_0_1_PITCH__SHIFT 0
+static inline uint32_t A4XX_SSBO_0_1_PITCH(uint32_t val)
+{
+ return ((val) << A4XX_SSBO_0_1_PITCH__SHIFT) & A4XX_SSBO_0_1_PITCH__MASK;
+}
+
+#define REG_A4XX_SSBO_0_2 0x00000002
+#define A4XX_SSBO_0_2_ARRAY_PITCH__MASK 0x03fff000
+#define A4XX_SSBO_0_2_ARRAY_PITCH__SHIFT 12
+static inline uint32_t A4XX_SSBO_0_2_ARRAY_PITCH(uint32_t val)
+{
+ return ((val >> 12) << A4XX_SSBO_0_2_ARRAY_PITCH__SHIFT) & A4XX_SSBO_0_2_ARRAY_PITCH__MASK;
+}
+
+#define REG_A4XX_SSBO_0_3 0x00000003
+#define A4XX_SSBO_0_3_CPP__MASK 0x0000003f
+#define A4XX_SSBO_0_3_CPP__SHIFT 0
+static inline uint32_t A4XX_SSBO_0_3_CPP(uint32_t val)
+{
+ return ((val) << A4XX_SSBO_0_3_CPP__SHIFT) & A4XX_SSBO_0_3_CPP__MASK;
+}
+
+#define REG_A4XX_SSBO_1_0 0x00000000
+#define A4XX_SSBO_1_0_CPP__MASK 0x0000001f
+#define A4XX_SSBO_1_0_CPP__SHIFT 0
+static inline uint32_t A4XX_SSBO_1_0_CPP(uint32_t val)
+{
+ return ((val) << A4XX_SSBO_1_0_CPP__SHIFT) & A4XX_SSBO_1_0_CPP__MASK;
+}
+#define A4XX_SSBO_1_0_FMT__MASK 0x0000ff00
+#define A4XX_SSBO_1_0_FMT__SHIFT 8
+static inline uint32_t A4XX_SSBO_1_0_FMT(enum a4xx_color_fmt val)
+{
+ return ((val) << A4XX_SSBO_1_0_FMT__SHIFT) & A4XX_SSBO_1_0_FMT__MASK;
+}
+#define A4XX_SSBO_1_0_WIDTH__MASK 0xffff0000
+#define A4XX_SSBO_1_0_WIDTH__SHIFT 16
+static inline uint32_t A4XX_SSBO_1_0_WIDTH(uint32_t val)
+{
+ return ((val) << A4XX_SSBO_1_0_WIDTH__SHIFT) & A4XX_SSBO_1_0_WIDTH__MASK;
+}
+
+#define REG_A4XX_SSBO_1_1 0x00000001
+#define A4XX_SSBO_1_1_HEIGHT__MASK 0x0000ffff
+#define A4XX_SSBO_1_1_HEIGHT__SHIFT 0
+static inline uint32_t A4XX_SSBO_1_1_HEIGHT(uint32_t val)
+{
+ return ((val) << A4XX_SSBO_1_1_HEIGHT__SHIFT) & A4XX_SSBO_1_1_HEIGHT__MASK;
+}
+#define A4XX_SSBO_1_1_DEPTH__MASK 0xffff0000
+#define A4XX_SSBO_1_1_DEPTH__SHIFT 16
+static inline uint32_t A4XX_SSBO_1_1_DEPTH(uint32_t val)
+{
+ return ((val) << A4XX_SSBO_1_1_DEPTH__SHIFT) & A4XX_SSBO_1_1_DEPTH__MASK;
+}
+
#endif /* A4XX_XML */
diff --git a/drivers/gpu/drm/msm/adreno/a4xx_gpu.c b/drivers/gpu/drm/msm/adreno/a4xx_gpu.c
index 8199a4b9f2fa..7c4e6dc1ed59 100644
--- a/drivers/gpu/drm/msm/adreno/a4xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a4xx_gpu.c
@@ -27,6 +27,7 @@
A4XX_INT0_CP_RB_INT | \
A4XX_INT0_CP_REG_PROTECT_FAULT | \
A4XX_INT0_CP_AHB_ERROR_HALT | \
+ A4XX_INT0_CACHE_FLUSH_TS | \
A4XX_INT0_UCHE_OOB_ACCESS)
extern bool hang_debug;
@@ -274,16 +275,16 @@ static int a4xx_hw_init(struct msm_gpu *gpu)
return ret;
/* Load PM4: */
- ptr = (uint32_t *)(adreno_gpu->pm4->data);
- len = adreno_gpu->pm4->size / 4;
+ ptr = (uint32_t *)(adreno_gpu->fw[ADRENO_FW_PM4]->data);
+ len = adreno_gpu->fw[ADRENO_FW_PM4]->size / 4;
DBG("loading PM4 ucode version: %u", ptr[0]);
gpu_write(gpu, REG_A4XX_CP_ME_RAM_WADDR, 0);
for (i = 1; i < len; i++)
gpu_write(gpu, REG_A4XX_CP_ME_RAM_DATA, ptr[i]);
/* Load PFP: */
- ptr = (uint32_t *)(adreno_gpu->pfp->data);
- len = adreno_gpu->pfp->size / 4;
+ ptr = (uint32_t *)(adreno_gpu->fw[ADRENO_FW_PFP]->data);
+ len = adreno_gpu->fw[ADRENO_FW_PFP]->size / 4;
DBG("loading PFP ucode version: %u", ptr[0]);
gpu_write(gpu, REG_A4XX_CP_PFP_UCODE_ADDR, 0);
@@ -454,15 +455,19 @@ static const unsigned int a4xx_registers[] = {
~0 /* sentinel */
};
-#ifdef CONFIG_DEBUG_FS
-static void a4xx_show(struct msm_gpu *gpu, struct seq_file *m)
+static struct msm_gpu_state *a4xx_gpu_state_get(struct msm_gpu *gpu)
{
- seq_printf(m, "status: %08x\n",
- gpu_read(gpu, REG_A4XX_RBBM_STATUS));
- adreno_show(gpu, m);
+ struct msm_gpu_state *state = kzalloc(sizeof(*state), GFP_KERNEL);
+
+ if (!state)
+ return ERR_PTR(-ENOMEM);
+
+ adreno_gpu_state_get(gpu, state);
+ state->rbbm_status = gpu_read(gpu, REG_A4XX_RBBM_STATUS);
+
+ return state;
}
-#endif
/* Register offset defines for A4XX, in order of enum adreno_regs */
static const unsigned int a4xx_register_offsets[REG_ADRENO_REGISTER_MAX] = {
@@ -537,9 +542,11 @@ static const struct adreno_gpu_funcs funcs = {
.active_ring = adreno_active_ring,
.irq = a4xx_irq,
.destroy = a4xx_destroy,
-#ifdef CONFIG_DEBUG_FS
- .show = a4xx_show,
+#if defined(CONFIG_DEBUG_FS) || defined(CONFIG_DEV_COREDUMP)
+ .show = adreno_show,
#endif
+ .gpu_state_get = a4xx_gpu_state_get,
+ .gpu_state_put = adreno_gpu_state_put,
},
.get_timestamp = a4xx_get_timestamp,
};
diff --git a/drivers/gpu/drm/msm/adreno/a5xx.xml.h b/drivers/gpu/drm/msm/adreno/a5xx.xml.h
index e0e6711f4f78..182d37ff3794 100644
--- a/drivers/gpu/drm/msm/adreno/a5xx.xml.h
+++ b/drivers/gpu/drm/msm/adreno/a5xx.xml.h
@@ -8,17 +8,19 @@ http://github.com/freedreno/envytools/
git clone https://github.com/freedreno/envytools.git
The rules-ng-ng source files this header was generated from are:
-- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml ( 431 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml ( 37162 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml ( 13324 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml ( 31866 bytes, from 2017-06-06 18:26:14)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml ( 83840 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml ( 111898 bytes, from 2017-06-06 18:23:59)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a5xx.xml ( 139480 bytes, from 2017-06-16 12:44:39)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/ocmem.xml ( 1773 bytes, from 2017-05-17 13:21:27)
-
-Copyright (C) 2013-2017 by the following authors:
+- /home/robclark/src/envytools/rnndb/adreno.xml ( 501 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/adreno/a2xx.xml ( 36805 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/adreno/adreno_common.xml ( 13634 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/adreno/adreno_pm4.xml ( 42393 bytes, from 2018-08-06 18:45:45)
+- /home/robclark/src/envytools/rnndb/adreno/a3xx.xml ( 83840 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/adreno/a4xx.xml ( 112086 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/adreno/a5xx.xml ( 147240 bytes, from 2018-08-06 18:45:45)
+- /home/robclark/src/envytools/rnndb/adreno/a6xx.xml ( 101627 bytes, from 2018-08-06 18:45:45)
+- /home/robclark/src/envytools/rnndb/adreno/a6xx_gmu.xml ( 10431 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/adreno/ocmem.xml ( 1773 bytes, from 2018-07-03 19:37:13)
+
+Copyright (C) 2013-2018 by the following authors:
- Rob Clark <robdclark@gmail.com> (robclark)
- Ilia Mirkin <imirkin@alum.mit.edu> (imirkin)
@@ -119,6 +121,11 @@ enum a5xx_vtx_fmt {
VFMT5_8_8_8_8_SNORM = 50,
VFMT5_8_8_8_8_UINT = 51,
VFMT5_8_8_8_8_SINT = 52,
+ VFMT5_10_10_10_2_UNORM = 54,
+ VFMT5_10_10_10_2_SNORM = 57,
+ VFMT5_10_10_10_2_UINT = 58,
+ VFMT5_10_10_10_2_SINT = 59,
+ VFMT5_11_11_10_FLOAT = 66,
VFMT5_16_16_UNORM = 67,
VFMT5_16_16_SNORM = 68,
VFMT5_16_16_FLOAT = 69,
@@ -204,14 +211,45 @@ enum a5xx_tex_fmt {
TFMT5_32_32_FLOAT = 103,
TFMT5_32_32_UINT = 104,
TFMT5_32_32_SINT = 105,
+ TFMT5_32_32_32_UINT = 114,
+ TFMT5_32_32_32_SINT = 115,
+ TFMT5_32_32_32_FLOAT = 116,
TFMT5_32_32_32_32_FLOAT = 130,
TFMT5_32_32_32_32_UINT = 131,
TFMT5_32_32_32_32_SINT = 132,
TFMT5_X8Z24_UNORM = 160,
+ TFMT5_ETC2_RG11_UNORM = 171,
+ TFMT5_ETC2_RG11_SNORM = 172,
+ TFMT5_ETC2_R11_UNORM = 173,
+ TFMT5_ETC2_R11_SNORM = 174,
+ TFMT5_ETC1 = 175,
+ TFMT5_ETC2_RGB8 = 176,
+ TFMT5_ETC2_RGBA8 = 177,
+ TFMT5_ETC2_RGB8A1 = 178,
+ TFMT5_DXT1 = 179,
+ TFMT5_DXT3 = 180,
+ TFMT5_DXT5 = 181,
TFMT5_RGTC1_UNORM = 183,
TFMT5_RGTC1_SNORM = 184,
TFMT5_RGTC2_UNORM = 187,
TFMT5_RGTC2_SNORM = 188,
+ TFMT5_BPTC_UFLOAT = 190,
+ TFMT5_BPTC_FLOAT = 191,
+ TFMT5_BPTC = 192,
+ TFMT5_ASTC_4x4 = 193,
+ TFMT5_ASTC_5x4 = 194,
+ TFMT5_ASTC_5x5 = 195,
+ TFMT5_ASTC_6x5 = 196,
+ TFMT5_ASTC_6x6 = 197,
+ TFMT5_ASTC_8x5 = 198,
+ TFMT5_ASTC_8x6 = 199,
+ TFMT5_ASTC_8x8 = 200,
+ TFMT5_ASTC_10x5 = 201,
+ TFMT5_ASTC_10x6 = 202,
+ TFMT5_ASTC_10x8 = 203,
+ TFMT5_ASTC_10x10 = 204,
+ TFMT5_ASTC_12x10 = 205,
+ TFMT5_ASTC_12x12 = 206,
};
enum a5xx_tex_fetchsize {
@@ -239,7 +277,7 @@ enum a5xx_blit_buf {
BLIT_MRT6 = 6,
BLIT_MRT7 = 7,
BLIT_ZS = 8,
- BLIT_Z32 = 9,
+ BLIT_S = 9,
};
enum a5xx_cp_perfcounter_select {
@@ -899,6 +937,12 @@ enum a5xx_tex_type {
#define REG_A5XX_CP_DRAW_STATE_DATA 0x0000080c
+#define REG_A5XX_CP_ME_NRT_ADDR_LO 0x0000080d
+
+#define REG_A5XX_CP_ME_NRT_ADDR_HI 0x0000080e
+
+#define REG_A5XX_CP_ME_NRT_DATA 0x00000810
+
#define REG_A5XX_CP_CRASH_SCRIPT_BASE_LO 0x00000817
#define REG_A5XX_CP_CRASH_SCRIPT_BASE_HI 0x00000818
@@ -2072,9 +2116,17 @@ static inline uint32_t A5XX_VSC_RESOLVE_CNTL_Y(uint32_t val)
#define REG_A5XX_PC_MODE_CNTL 0x00000d02
-#define REG_A5XX_UNKNOWN_0D08 0x00000d08
+#define REG_A5XX_PC_INDEX_BUF_LO 0x00000d04
+
+#define REG_A5XX_PC_INDEX_BUF_HI 0x00000d05
+
+#define REG_A5XX_PC_START_INDEX 0x00000d06
-#define REG_A5XX_UNKNOWN_0D09 0x00000d09
+#define REG_A5XX_PC_MAX_INDEX 0x00000d07
+
+#define REG_A5XX_PC_TESSFACTOR_ADDR_LO 0x00000d08
+
+#define REG_A5XX_PC_TESSFACTOR_ADDR_HI 0x00000d09
#define REG_A5XX_PC_PERFCTR_PC_SEL_0 0x00000d10
@@ -2327,6 +2379,14 @@ static inline uint32_t A5XX_VSC_RESOLVE_CNTL_Y(uint32_t val)
#define REG_A5XX_VBIF_PERF_CNT_EN3 0x000030c3
+#define REG_A5XX_VBIF_PERF_CNT_CLR0 0x000030c8
+
+#define REG_A5XX_VBIF_PERF_CNT_CLR1 0x000030c9
+
+#define REG_A5XX_VBIF_PERF_CNT_CLR2 0x000030ca
+
+#define REG_A5XX_VBIF_PERF_CNT_CLR3 0x000030cb
+
#define REG_A5XX_VBIF_PERF_CNT_SEL0 0x000030d0
#define REG_A5XX_VBIF_PERF_CNT_SEL1 0x000030d1
@@ -2590,6 +2650,7 @@ static inline uint32_t A5XX_VSC_RESOLVE_CNTL_Y(uint32_t val)
#define REG_A5XX_GPU_CS_AMP_CALIBRATION_CONTROL1 0x0000c557
#define REG_A5XX_GRAS_CL_CNTL 0x0000e000
+#define A5XX_GRAS_CL_CNTL_ZERO_GB_SCALE_Z 0x00000040
#define REG_A5XX_UNKNOWN_E001 0x0000e001
@@ -2700,7 +2761,7 @@ static inline uint32_t A5XX_GRAS_SU_POINT_SIZE(float val)
return ((((int32_t)(val * 16.0))) << A5XX_GRAS_SU_POINT_SIZE__SHIFT) & A5XX_GRAS_SU_POINT_SIZE__MASK;
}
-#define REG_A5XX_UNKNOWN_E093 0x0000e093
+#define REG_A5XX_GRAS_SU_LAYERED 0x0000e093
#define REG_A5XX_GRAS_SU_DEPTH_PLANE_CNTL 0x0000e094
#define A5XX_GRAS_SU_DEPTH_PLANE_CNTL_FRAG_WRITES_Z 0x00000001
@@ -2936,7 +2997,9 @@ static inline uint32_t A5XX_RB_DEST_MSAA_CNTL_SAMPLES(enum a3xx_msaa_samples val
#define A5XX_RB_RENDER_CONTROL0_WCOORD 0x00000200
#define REG_A5XX_RB_RENDER_CONTROL1 0x0000e145
+#define A5XX_RB_RENDER_CONTROL1_SAMPLEMASK 0x00000001
#define A5XX_RB_RENDER_CONTROL1_FACENESS 0x00000002
+#define A5XX_RB_RENDER_CONTROL1_SAMPLEID 0x00000004
#define REG_A5XX_RB_FS_OUTPUT_CNTL 0x0000e146
#define A5XX_RB_FS_OUTPUT_CNTL_MRT__MASK 0x0000000f
@@ -3002,6 +3065,13 @@ static inline uint32_t REG_A5XX_RB_MRT(uint32_t i0) { return 0x0000e150 + 0x7*i0
static inline uint32_t REG_A5XX_RB_MRT_CONTROL(uint32_t i0) { return 0x0000e150 + 0x7*i0; }
#define A5XX_RB_MRT_CONTROL_BLEND 0x00000001
#define A5XX_RB_MRT_CONTROL_BLEND2 0x00000002
+#define A5XX_RB_MRT_CONTROL_ROP_ENABLE 0x00000004
+#define A5XX_RB_MRT_CONTROL_ROP_CODE__MASK 0x00000078
+#define A5XX_RB_MRT_CONTROL_ROP_CODE__SHIFT 3
+static inline uint32_t A5XX_RB_MRT_CONTROL_ROP_CODE(enum a3xx_rop_code val)
+{
+ return ((val) << A5XX_RB_MRT_CONTROL_ROP_CODE__SHIFT) & A5XX_RB_MRT_CONTROL_ROP_CODE__MASK;
+}
#define A5XX_RB_MRT_CONTROL_COMPONENT_ENABLE__MASK 0x00000780
#define A5XX_RB_MRT_CONTROL_COMPONENT_ENABLE__SHIFT 7
static inline uint32_t A5XX_RB_MRT_CONTROL_COMPONENT_ENABLE(uint32_t val)
@@ -3060,6 +3130,12 @@ static inline uint32_t A5XX_RB_MRT_BUF_INFO_COLOR_TILE_MODE(enum a5xx_tile_mode
{
return ((val) << A5XX_RB_MRT_BUF_INFO_COLOR_TILE_MODE__SHIFT) & A5XX_RB_MRT_BUF_INFO_COLOR_TILE_MODE__MASK;
}
+#define A5XX_RB_MRT_BUF_INFO_DITHER_MODE__MASK 0x00001800
+#define A5XX_RB_MRT_BUF_INFO_DITHER_MODE__SHIFT 11
+static inline uint32_t A5XX_RB_MRT_BUF_INFO_DITHER_MODE(enum adreno_rb_dither_mode val)
+{
+ return ((val) << A5XX_RB_MRT_BUF_INFO_DITHER_MODE__SHIFT) & A5XX_RB_MRT_BUF_INFO_DITHER_MODE__MASK;
+}
#define A5XX_RB_MRT_BUF_INFO_COLOR_SWAP__MASK 0x00006000
#define A5XX_RB_MRT_BUF_INFO_COLOR_SWAP__SHIFT 13
static inline uint32_t A5XX_RB_MRT_BUF_INFO_COLOR_SWAP(enum a3xx_color_swap val)
@@ -3223,6 +3299,7 @@ static inline uint32_t A5XX_RB_BLEND_CNTL_ENABLE_BLEND(uint32_t val)
return ((val) << A5XX_RB_BLEND_CNTL_ENABLE_BLEND__SHIFT) & A5XX_RB_BLEND_CNTL_ENABLE_BLEND__MASK;
}
#define A5XX_RB_BLEND_CNTL_INDEPENDENT_BLEND 0x00000100
+#define A5XX_RB_BLEND_CNTL_ALPHA_TO_COVERAGE 0x00000400
#define A5XX_RB_BLEND_CNTL_SAMPLE_MASK__MASK 0xffff0000
#define A5XX_RB_BLEND_CNTL_SAMPLE_MASK__SHIFT 16
static inline uint32_t A5XX_RB_BLEND_CNTL_SAMPLE_MASK(uint32_t val)
@@ -3369,7 +3446,25 @@ static inline uint32_t A5XX_RB_STENCILREFMASK_STENCILWRITEMASK(uint32_t val)
return ((val) << A5XX_RB_STENCILREFMASK_STENCILWRITEMASK__SHIFT) & A5XX_RB_STENCILREFMASK_STENCILWRITEMASK__MASK;
}
-#define REG_A5XX_UNKNOWN_E1C7 0x0000e1c7
+#define REG_A5XX_RB_STENCILREFMASK_BF 0x0000e1c7
+#define A5XX_RB_STENCILREFMASK_BF_STENCILREF__MASK 0x000000ff
+#define A5XX_RB_STENCILREFMASK_BF_STENCILREF__SHIFT 0
+static inline uint32_t A5XX_RB_STENCILREFMASK_BF_STENCILREF(uint32_t val)
+{
+ return ((val) << A5XX_RB_STENCILREFMASK_BF_STENCILREF__SHIFT) & A5XX_RB_STENCILREFMASK_BF_STENCILREF__MASK;
+}
+#define A5XX_RB_STENCILREFMASK_BF_STENCILMASK__MASK 0x0000ff00
+#define A5XX_RB_STENCILREFMASK_BF_STENCILMASK__SHIFT 8
+static inline uint32_t A5XX_RB_STENCILREFMASK_BF_STENCILMASK(uint32_t val)
+{
+ return ((val) << A5XX_RB_STENCILREFMASK_BF_STENCILMASK__SHIFT) & A5XX_RB_STENCILREFMASK_BF_STENCILMASK__MASK;
+}
+#define A5XX_RB_STENCILREFMASK_BF_STENCILWRITEMASK__MASK 0x00ff0000
+#define A5XX_RB_STENCILREFMASK_BF_STENCILWRITEMASK__SHIFT 16
+static inline uint32_t A5XX_RB_STENCILREFMASK_BF_STENCILWRITEMASK(uint32_t val)
+{
+ return ((val) << A5XX_RB_STENCILREFMASK_BF_STENCILWRITEMASK__SHIFT) & A5XX_RB_STENCILREFMASK_BF_STENCILWRITEMASK__MASK;
+}
#define REG_A5XX_RB_WINDOW_OFFSET 0x0000e1d0
#define A5XX_RB_WINDOW_OFFSET_WINDOW_OFFSET_DISABLE 0x80000000
@@ -3428,6 +3523,7 @@ static inline uint32_t A5XX_RB_RESOLVE_CNTL_2_Y(uint32_t val)
}
#define REG_A5XX_RB_RESOLVE_CNTL_3 0x0000e213
+#define A5XX_RB_RESOLVE_CNTL_3_TILED 0x00000001
#define REG_A5XX_RB_BLIT_DST_LO 0x0000e214
@@ -3459,6 +3555,7 @@ static inline uint32_t A5XX_RB_BLIT_DST_ARRAY_PITCH(uint32_t val)
#define REG_A5XX_RB_CLEAR_CNTL 0x0000e21c
#define A5XX_RB_CLEAR_CNTL_FAST_CLEAR 0x00000002
+#define A5XX_RB_CLEAR_CNTL_MSAA_RESOLVE 0x00000004
#define A5XX_RB_CLEAR_CNTL_MASK__MASK 0x000000f0
#define A5XX_RB_CLEAR_CNTL_MASK__SHIFT 4
static inline uint32_t A5XX_RB_CLEAR_CNTL_MASK(uint32_t val)
@@ -3627,22 +3724,69 @@ static inline uint32_t A5XX_PC_PRIMITIVE_CNTL_STRIDE_IN_VPC(uint32_t val)
{
return ((val) << A5XX_PC_PRIMITIVE_CNTL_STRIDE_IN_VPC__SHIFT) & A5XX_PC_PRIMITIVE_CNTL_STRIDE_IN_VPC__MASK;
}
+#define A5XX_PC_PRIMITIVE_CNTL_PRIMITIVE_RESTART 0x00000100
+#define A5XX_PC_PRIMITIVE_CNTL_COUNT_PRIMITIVES 0x00000200
#define A5XX_PC_PRIMITIVE_CNTL_PROVOKING_VTX_LAST 0x00000400
#define REG_A5XX_PC_PRIM_VTX_CNTL 0x0000e385
#define A5XX_PC_PRIM_VTX_CNTL_PSIZE 0x00000800
#define REG_A5XX_PC_RASTER_CNTL 0x0000e388
+#define A5XX_PC_RASTER_CNTL_POLYMODE_FRONT_PTYPE__MASK 0x00000007
+#define A5XX_PC_RASTER_CNTL_POLYMODE_FRONT_PTYPE__SHIFT 0
+static inline uint32_t A5XX_PC_RASTER_CNTL_POLYMODE_FRONT_PTYPE(enum adreno_pa_su_sc_draw val)
+{
+ return ((val) << A5XX_PC_RASTER_CNTL_POLYMODE_FRONT_PTYPE__SHIFT) & A5XX_PC_RASTER_CNTL_POLYMODE_FRONT_PTYPE__MASK;
+}
+#define A5XX_PC_RASTER_CNTL_POLYMODE_BACK_PTYPE__MASK 0x00000038
+#define A5XX_PC_RASTER_CNTL_POLYMODE_BACK_PTYPE__SHIFT 3
+static inline uint32_t A5XX_PC_RASTER_CNTL_POLYMODE_BACK_PTYPE(enum adreno_pa_su_sc_draw val)
+{
+ return ((val) << A5XX_PC_RASTER_CNTL_POLYMODE_BACK_PTYPE__SHIFT) & A5XX_PC_RASTER_CNTL_POLYMODE_BACK_PTYPE__MASK;
+}
+#define A5XX_PC_RASTER_CNTL_POLYMODE_ENABLE 0x00000040
#define REG_A5XX_UNKNOWN_E389 0x0000e389
#define REG_A5XX_PC_RESTART_INDEX 0x0000e38c
-#define REG_A5XX_UNKNOWN_E38D 0x0000e38d
+#define REG_A5XX_PC_GS_LAYERED 0x0000e38d
#define REG_A5XX_PC_GS_PARAM 0x0000e38e
+#define A5XX_PC_GS_PARAM_MAX_VERTICES__MASK 0x000003ff
+#define A5XX_PC_GS_PARAM_MAX_VERTICES__SHIFT 0
+static inline uint32_t A5XX_PC_GS_PARAM_MAX_VERTICES(uint32_t val)
+{
+ return ((val) << A5XX_PC_GS_PARAM_MAX_VERTICES__SHIFT) & A5XX_PC_GS_PARAM_MAX_VERTICES__MASK;
+}
+#define A5XX_PC_GS_PARAM_INVOCATIONS__MASK 0x0000f800
+#define A5XX_PC_GS_PARAM_INVOCATIONS__SHIFT 11
+static inline uint32_t A5XX_PC_GS_PARAM_INVOCATIONS(uint32_t val)
+{
+ return ((val) << A5XX_PC_GS_PARAM_INVOCATIONS__SHIFT) & A5XX_PC_GS_PARAM_INVOCATIONS__MASK;
+}
+#define A5XX_PC_GS_PARAM_PRIMTYPE__MASK 0x01800000
+#define A5XX_PC_GS_PARAM_PRIMTYPE__SHIFT 23
+static inline uint32_t A5XX_PC_GS_PARAM_PRIMTYPE(enum adreno_pa_su_sc_draw val)
+{
+ return ((val) << A5XX_PC_GS_PARAM_PRIMTYPE__SHIFT) & A5XX_PC_GS_PARAM_PRIMTYPE__MASK;
+}
#define REG_A5XX_PC_HS_PARAM 0x0000e38f
+#define A5XX_PC_HS_PARAM_VERTICES_OUT__MASK 0x0000003f
+#define A5XX_PC_HS_PARAM_VERTICES_OUT__SHIFT 0
+static inline uint32_t A5XX_PC_HS_PARAM_VERTICES_OUT(uint32_t val)
+{
+ return ((val) << A5XX_PC_HS_PARAM_VERTICES_OUT__SHIFT) & A5XX_PC_HS_PARAM_VERTICES_OUT__MASK;
+}
+#define A5XX_PC_HS_PARAM_SPACING__MASK 0x00600000
+#define A5XX_PC_HS_PARAM_SPACING__SHIFT 21
+static inline uint32_t A5XX_PC_HS_PARAM_SPACING(enum a4xx_tess_spacing val)
+{
+ return ((val) << A5XX_PC_HS_PARAM_SPACING__SHIFT) & A5XX_PC_HS_PARAM_SPACING__MASK;
+}
+#define A5XX_PC_HS_PARAM_CW 0x00800000
+#define A5XX_PC_HS_PARAM_CONNECTED 0x01000000
#define REG_A5XX_PC_POWER_CNTL 0x0000e3b0
@@ -3667,10 +3811,40 @@ static inline uint32_t A5XX_VFD_CONTROL_1_REGID4INST(uint32_t val)
{
return ((val) << A5XX_VFD_CONTROL_1_REGID4INST__SHIFT) & A5XX_VFD_CONTROL_1_REGID4INST__MASK;
}
+#define A5XX_VFD_CONTROL_1_REGID4PRIMID__MASK 0x00ff0000
+#define A5XX_VFD_CONTROL_1_REGID4PRIMID__SHIFT 16
+static inline uint32_t A5XX_VFD_CONTROL_1_REGID4PRIMID(uint32_t val)
+{
+ return ((val) << A5XX_VFD_CONTROL_1_REGID4PRIMID__SHIFT) & A5XX_VFD_CONTROL_1_REGID4PRIMID__MASK;
+}
#define REG_A5XX_VFD_CONTROL_2 0x0000e402
+#define A5XX_VFD_CONTROL_2_REGID_PATCHID__MASK 0x000000ff
+#define A5XX_VFD_CONTROL_2_REGID_PATCHID__SHIFT 0
+static inline uint32_t A5XX_VFD_CONTROL_2_REGID_PATCHID(uint32_t val)
+{
+ return ((val) << A5XX_VFD_CONTROL_2_REGID_PATCHID__SHIFT) & A5XX_VFD_CONTROL_2_REGID_PATCHID__MASK;
+}
#define REG_A5XX_VFD_CONTROL_3 0x0000e403
+#define A5XX_VFD_CONTROL_3_REGID_PATCHID__MASK 0x0000ff00
+#define A5XX_VFD_CONTROL_3_REGID_PATCHID__SHIFT 8
+static inline uint32_t A5XX_VFD_CONTROL_3_REGID_PATCHID(uint32_t val)
+{
+ return ((val) << A5XX_VFD_CONTROL_3_REGID_PATCHID__SHIFT) & A5XX_VFD_CONTROL_3_REGID_PATCHID__MASK;
+}
+#define A5XX_VFD_CONTROL_3_REGID_TESSX__MASK 0x00ff0000
+#define A5XX_VFD_CONTROL_3_REGID_TESSX__SHIFT 16
+static inline uint32_t A5XX_VFD_CONTROL_3_REGID_TESSX(uint32_t val)
+{
+ return ((val) << A5XX_VFD_CONTROL_3_REGID_TESSX__SHIFT) & A5XX_VFD_CONTROL_3_REGID_TESSX__MASK;
+}
+#define A5XX_VFD_CONTROL_3_REGID_TESSY__MASK 0xff000000
+#define A5XX_VFD_CONTROL_3_REGID_TESSY__SHIFT 24
+static inline uint32_t A5XX_VFD_CONTROL_3_REGID_TESSY(uint32_t val)
+{
+ return ((val) << A5XX_VFD_CONTROL_3_REGID_TESSY__SHIFT) & A5XX_VFD_CONTROL_3_REGID_TESSY__MASK;
+}
#define REG_A5XX_VFD_CONTROL_4 0x0000e404
@@ -3700,12 +3874,18 @@ static inline uint32_t A5XX_VFD_DECODE_INSTR_IDX(uint32_t val)
return ((val) << A5XX_VFD_DECODE_INSTR_IDX__SHIFT) & A5XX_VFD_DECODE_INSTR_IDX__MASK;
}
#define A5XX_VFD_DECODE_INSTR_INSTANCED 0x00020000
-#define A5XX_VFD_DECODE_INSTR_FORMAT__MASK 0x3ff00000
+#define A5XX_VFD_DECODE_INSTR_FORMAT__MASK 0x0ff00000
#define A5XX_VFD_DECODE_INSTR_FORMAT__SHIFT 20
static inline uint32_t A5XX_VFD_DECODE_INSTR_FORMAT(enum a5xx_vtx_fmt val)
{
return ((val) << A5XX_VFD_DECODE_INSTR_FORMAT__SHIFT) & A5XX_VFD_DECODE_INSTR_FORMAT__MASK;
}
+#define A5XX_VFD_DECODE_INSTR_SWAP__MASK 0x30000000
+#define A5XX_VFD_DECODE_INSTR_SWAP__SHIFT 28
+static inline uint32_t A5XX_VFD_DECODE_INSTR_SWAP(enum a3xx_color_swap val)
+{
+ return ((val) << A5XX_VFD_DECODE_INSTR_SWAP__SHIFT) & A5XX_VFD_DECODE_INSTR_SWAP__MASK;
+}
#define A5XX_VFD_DECODE_INSTR_UNK30 0x40000000
#define A5XX_VFD_DECODE_INSTR_FLOAT 0x80000000
@@ -3960,6 +4140,7 @@ static inline uint32_t A5XX_SP_FS_CTRL_REG0_BRANCHSTACK(uint32_t val)
#define REG_A5XX_SP_BLEND_CNTL 0x0000e5c9
#define A5XX_SP_BLEND_CNTL_ENABLED 0x00000001
#define A5XX_SP_BLEND_CNTL_UNK8 0x00000100
+#define A5XX_SP_BLEND_CNTL_ALPHA_TO_COVERAGE 0x00000400
#define REG_A5XX_SP_FS_OUTPUT_CNTL 0x0000e5ca
#define A5XX_SP_FS_OUTPUT_CNTL_MRT__MASK 0x0000000f
@@ -4001,16 +4182,12 @@ static inline uint32_t A5XX_SP_FS_MRT_REG_COLOR_FORMAT(enum a5xx_color_fmt val)
{
return ((val) << A5XX_SP_FS_MRT_REG_COLOR_FORMAT__SHIFT) & A5XX_SP_FS_MRT_REG_COLOR_FORMAT__MASK;
}
+#define A5XX_SP_FS_MRT_REG_COLOR_SINT 0x00000100
+#define A5XX_SP_FS_MRT_REG_COLOR_UINT 0x00000200
#define A5XX_SP_FS_MRT_REG_COLOR_SRGB 0x00000400
#define REG_A5XX_UNKNOWN_E5DB 0x0000e5db
-#define REG_A5XX_UNKNOWN_E5F2 0x0000e5f2
-
-#define REG_A5XX_SP_CS_OBJ_START_LO 0x0000e5f3
-
-#define REG_A5XX_SP_CS_OBJ_START_HI 0x0000e5f4
-
#define REG_A5XX_SP_CS_CTRL_REG0 0x0000e5f0
#define A5XX_SP_CS_CTRL_REG0_THREADSIZE__MASK 0x00000008
#define A5XX_SP_CS_CTRL_REG0_THREADSIZE__SHIFT 3
@@ -4039,7 +4216,39 @@ static inline uint32_t A5XX_SP_CS_CTRL_REG0_BRANCHSTACK(uint32_t val)
return ((val) << A5XX_SP_CS_CTRL_REG0_BRANCHSTACK__SHIFT) & A5XX_SP_CS_CTRL_REG0_BRANCHSTACK__MASK;
}
-#define REG_A5XX_UNKNOWN_E600 0x0000e600
+#define REG_A5XX_UNKNOWN_E5F2 0x0000e5f2
+
+#define REG_A5XX_SP_CS_OBJ_START_LO 0x0000e5f3
+
+#define REG_A5XX_SP_CS_OBJ_START_HI 0x0000e5f4
+
+#define REG_A5XX_SP_HS_CTRL_REG0 0x0000e600
+#define A5XX_SP_HS_CTRL_REG0_THREADSIZE__MASK 0x00000008
+#define A5XX_SP_HS_CTRL_REG0_THREADSIZE__SHIFT 3
+static inline uint32_t A5XX_SP_HS_CTRL_REG0_THREADSIZE(enum a3xx_threadsize val)
+{
+ return ((val) << A5XX_SP_HS_CTRL_REG0_THREADSIZE__SHIFT) & A5XX_SP_HS_CTRL_REG0_THREADSIZE__MASK;
+}
+#define A5XX_SP_HS_CTRL_REG0_HALFREGFOOTPRINT__MASK 0x000003f0
+#define A5XX_SP_HS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT 4
+static inline uint32_t A5XX_SP_HS_CTRL_REG0_HALFREGFOOTPRINT(uint32_t val)
+{
+ return ((val) << A5XX_SP_HS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT) & A5XX_SP_HS_CTRL_REG0_HALFREGFOOTPRINT__MASK;
+}
+#define A5XX_SP_HS_CTRL_REG0_FULLREGFOOTPRINT__MASK 0x0000fc00
+#define A5XX_SP_HS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT 10
+static inline uint32_t A5XX_SP_HS_CTRL_REG0_FULLREGFOOTPRINT(uint32_t val)
+{
+ return ((val) << A5XX_SP_HS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT) & A5XX_SP_HS_CTRL_REG0_FULLREGFOOTPRINT__MASK;
+}
+#define A5XX_SP_HS_CTRL_REG0_VARYING 0x00010000
+#define A5XX_SP_HS_CTRL_REG0_PIXLODENABLE 0x00100000
+#define A5XX_SP_HS_CTRL_REG0_BRANCHSTACK__MASK 0xfe000000
+#define A5XX_SP_HS_CTRL_REG0_BRANCHSTACK__SHIFT 25
+static inline uint32_t A5XX_SP_HS_CTRL_REG0_BRANCHSTACK(uint32_t val)
+{
+ return ((val) << A5XX_SP_HS_CTRL_REG0_BRANCHSTACK__SHIFT) & A5XX_SP_HS_CTRL_REG0_BRANCHSTACK__MASK;
+}
#define REG_A5XX_UNKNOWN_E602 0x0000e602
@@ -4047,13 +4256,67 @@ static inline uint32_t A5XX_SP_CS_CTRL_REG0_BRANCHSTACK(uint32_t val)
#define REG_A5XX_SP_HS_OBJ_START_HI 0x0000e604
+#define REG_A5XX_SP_DS_CTRL_REG0 0x0000e610
+#define A5XX_SP_DS_CTRL_REG0_THREADSIZE__MASK 0x00000008
+#define A5XX_SP_DS_CTRL_REG0_THREADSIZE__SHIFT 3
+static inline uint32_t A5XX_SP_DS_CTRL_REG0_THREADSIZE(enum a3xx_threadsize val)
+{
+ return ((val) << A5XX_SP_DS_CTRL_REG0_THREADSIZE__SHIFT) & A5XX_SP_DS_CTRL_REG0_THREADSIZE__MASK;
+}
+#define A5XX_SP_DS_CTRL_REG0_HALFREGFOOTPRINT__MASK 0x000003f0
+#define A5XX_SP_DS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT 4
+static inline uint32_t A5XX_SP_DS_CTRL_REG0_HALFREGFOOTPRINT(uint32_t val)
+{
+ return ((val) << A5XX_SP_DS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT) & A5XX_SP_DS_CTRL_REG0_HALFREGFOOTPRINT__MASK;
+}
+#define A5XX_SP_DS_CTRL_REG0_FULLREGFOOTPRINT__MASK 0x0000fc00
+#define A5XX_SP_DS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT 10
+static inline uint32_t A5XX_SP_DS_CTRL_REG0_FULLREGFOOTPRINT(uint32_t val)
+{
+ return ((val) << A5XX_SP_DS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT) & A5XX_SP_DS_CTRL_REG0_FULLREGFOOTPRINT__MASK;
+}
+#define A5XX_SP_DS_CTRL_REG0_VARYING 0x00010000
+#define A5XX_SP_DS_CTRL_REG0_PIXLODENABLE 0x00100000
+#define A5XX_SP_DS_CTRL_REG0_BRANCHSTACK__MASK 0xfe000000
+#define A5XX_SP_DS_CTRL_REG0_BRANCHSTACK__SHIFT 25
+static inline uint32_t A5XX_SP_DS_CTRL_REG0_BRANCHSTACK(uint32_t val)
+{
+ return ((val) << A5XX_SP_DS_CTRL_REG0_BRANCHSTACK__SHIFT) & A5XX_SP_DS_CTRL_REG0_BRANCHSTACK__MASK;
+}
+
#define REG_A5XX_UNKNOWN_E62B 0x0000e62b
#define REG_A5XX_SP_DS_OBJ_START_LO 0x0000e62c
#define REG_A5XX_SP_DS_OBJ_START_HI 0x0000e62d
-#define REG_A5XX_UNKNOWN_E640 0x0000e640
+#define REG_A5XX_SP_GS_CTRL_REG0 0x0000e640
+#define A5XX_SP_GS_CTRL_REG0_THREADSIZE__MASK 0x00000008
+#define A5XX_SP_GS_CTRL_REG0_THREADSIZE__SHIFT 3
+static inline uint32_t A5XX_SP_GS_CTRL_REG0_THREADSIZE(enum a3xx_threadsize val)
+{
+ return ((val) << A5XX_SP_GS_CTRL_REG0_THREADSIZE__SHIFT) & A5XX_SP_GS_CTRL_REG0_THREADSIZE__MASK;
+}
+#define A5XX_SP_GS_CTRL_REG0_HALFREGFOOTPRINT__MASK 0x000003f0
+#define A5XX_SP_GS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT 4
+static inline uint32_t A5XX_SP_GS_CTRL_REG0_HALFREGFOOTPRINT(uint32_t val)
+{
+ return ((val) << A5XX_SP_GS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT) & A5XX_SP_GS_CTRL_REG0_HALFREGFOOTPRINT__MASK;
+}
+#define A5XX_SP_GS_CTRL_REG0_FULLREGFOOTPRINT__MASK 0x0000fc00
+#define A5XX_SP_GS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT 10
+static inline uint32_t A5XX_SP_GS_CTRL_REG0_FULLREGFOOTPRINT(uint32_t val)
+{
+ return ((val) << A5XX_SP_GS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT) & A5XX_SP_GS_CTRL_REG0_FULLREGFOOTPRINT__MASK;
+}
+#define A5XX_SP_GS_CTRL_REG0_VARYING 0x00010000
+#define A5XX_SP_GS_CTRL_REG0_PIXLODENABLE 0x00100000
+#define A5XX_SP_GS_CTRL_REG0_BRANCHSTACK__MASK 0xfe000000
+#define A5XX_SP_GS_CTRL_REG0_BRANCHSTACK__SHIFT 25
+static inline uint32_t A5XX_SP_GS_CTRL_REG0_BRANCHSTACK(uint32_t val)
+{
+ return ((val) << A5XX_SP_GS_CTRL_REG0_BRANCHSTACK__SHIFT) & A5XX_SP_GS_CTRL_REG0_BRANCHSTACK__MASK;
+}
#define REG_A5XX_UNKNOWN_E65B 0x0000e65b
@@ -4173,6 +4436,18 @@ static inline uint32_t A5XX_HLSQ_CONTROL_2_REG_FACEREGID(uint32_t val)
{
return ((val) << A5XX_HLSQ_CONTROL_2_REG_FACEREGID__SHIFT) & A5XX_HLSQ_CONTROL_2_REG_FACEREGID__MASK;
}
+#define A5XX_HLSQ_CONTROL_2_REG_SAMPLEID__MASK 0x0000ff00
+#define A5XX_HLSQ_CONTROL_2_REG_SAMPLEID__SHIFT 8
+static inline uint32_t A5XX_HLSQ_CONTROL_2_REG_SAMPLEID(uint32_t val)
+{
+ return ((val) << A5XX_HLSQ_CONTROL_2_REG_SAMPLEID__SHIFT) & A5XX_HLSQ_CONTROL_2_REG_SAMPLEID__MASK;
+}
+#define A5XX_HLSQ_CONTROL_2_REG_SAMPLEMASK__MASK 0x00ff0000
+#define A5XX_HLSQ_CONTROL_2_REG_SAMPLEMASK__SHIFT 16
+static inline uint32_t A5XX_HLSQ_CONTROL_2_REG_SAMPLEMASK(uint32_t val)
+{
+ return ((val) << A5XX_HLSQ_CONTROL_2_REG_SAMPLEMASK__SHIFT) & A5XX_HLSQ_CONTROL_2_REG_SAMPLEMASK__MASK;
+}
#define REG_A5XX_HLSQ_CONTROL_3_REG 0x0000e787
#define A5XX_HLSQ_CONTROL_3_REG_FRAGCOORDXYREGID__MASK 0x000000ff
@@ -4375,34 +4650,52 @@ static inline uint32_t A5XX_HLSQ_CS_NDRANGE_0_LOCALSIZEZ(uint32_t val)
}
#define REG_A5XX_HLSQ_CS_NDRANGE_1 0x0000e7b1
-#define A5XX_HLSQ_CS_NDRANGE_1_SIZE_X__MASK 0xffffffff
-#define A5XX_HLSQ_CS_NDRANGE_1_SIZE_X__SHIFT 0
-static inline uint32_t A5XX_HLSQ_CS_NDRANGE_1_SIZE_X(uint32_t val)
+#define A5XX_HLSQ_CS_NDRANGE_1_GLOBALSIZE_X__MASK 0xffffffff
+#define A5XX_HLSQ_CS_NDRANGE_1_GLOBALSIZE_X__SHIFT 0
+static inline uint32_t A5XX_HLSQ_CS_NDRANGE_1_GLOBALSIZE_X(uint32_t val)
{
- return ((val) << A5XX_HLSQ_CS_NDRANGE_1_SIZE_X__SHIFT) & A5XX_HLSQ_CS_NDRANGE_1_SIZE_X__MASK;
+ return ((val) << A5XX_HLSQ_CS_NDRANGE_1_GLOBALSIZE_X__SHIFT) & A5XX_HLSQ_CS_NDRANGE_1_GLOBALSIZE_X__MASK;
}
#define REG_A5XX_HLSQ_CS_NDRANGE_2 0x0000e7b2
+#define A5XX_HLSQ_CS_NDRANGE_2_GLOBALOFF_X__MASK 0xffffffff
+#define A5XX_HLSQ_CS_NDRANGE_2_GLOBALOFF_X__SHIFT 0
+static inline uint32_t A5XX_HLSQ_CS_NDRANGE_2_GLOBALOFF_X(uint32_t val)
+{
+ return ((val) << A5XX_HLSQ_CS_NDRANGE_2_GLOBALOFF_X__SHIFT) & A5XX_HLSQ_CS_NDRANGE_2_GLOBALOFF_X__MASK;
+}
#define REG_A5XX_HLSQ_CS_NDRANGE_3 0x0000e7b3
-#define A5XX_HLSQ_CS_NDRANGE_3_SIZE_Y__MASK 0xffffffff
-#define A5XX_HLSQ_CS_NDRANGE_3_SIZE_Y__SHIFT 0
-static inline uint32_t A5XX_HLSQ_CS_NDRANGE_3_SIZE_Y(uint32_t val)
+#define A5XX_HLSQ_CS_NDRANGE_3_GLOBALSIZE_Y__MASK 0xffffffff
+#define A5XX_HLSQ_CS_NDRANGE_3_GLOBALSIZE_Y__SHIFT 0
+static inline uint32_t A5XX_HLSQ_CS_NDRANGE_3_GLOBALSIZE_Y(uint32_t val)
{
- return ((val) << A5XX_HLSQ_CS_NDRANGE_3_SIZE_Y__SHIFT) & A5XX_HLSQ_CS_NDRANGE_3_SIZE_Y__MASK;
+ return ((val) << A5XX_HLSQ_CS_NDRANGE_3_GLOBALSIZE_Y__SHIFT) & A5XX_HLSQ_CS_NDRANGE_3_GLOBALSIZE_Y__MASK;
}
#define REG_A5XX_HLSQ_CS_NDRANGE_4 0x0000e7b4
+#define A5XX_HLSQ_CS_NDRANGE_4_GLOBALOFF_Y__MASK 0xffffffff
+#define A5XX_HLSQ_CS_NDRANGE_4_GLOBALOFF_Y__SHIFT 0
+static inline uint32_t A5XX_HLSQ_CS_NDRANGE_4_GLOBALOFF_Y(uint32_t val)
+{
+ return ((val) << A5XX_HLSQ_CS_NDRANGE_4_GLOBALOFF_Y__SHIFT) & A5XX_HLSQ_CS_NDRANGE_4_GLOBALOFF_Y__MASK;
+}
#define REG_A5XX_HLSQ_CS_NDRANGE_5 0x0000e7b5
-#define A5XX_HLSQ_CS_NDRANGE_5_SIZE_Z__MASK 0xffffffff
-#define A5XX_HLSQ_CS_NDRANGE_5_SIZE_Z__SHIFT 0
-static inline uint32_t A5XX_HLSQ_CS_NDRANGE_5_SIZE_Z(uint32_t val)
+#define A5XX_HLSQ_CS_NDRANGE_5_GLOBALSIZE_Z__MASK 0xffffffff
+#define A5XX_HLSQ_CS_NDRANGE_5_GLOBALSIZE_Z__SHIFT 0
+static inline uint32_t A5XX_HLSQ_CS_NDRANGE_5_GLOBALSIZE_Z(uint32_t val)
{
- return ((val) << A5XX_HLSQ_CS_NDRANGE_5_SIZE_Z__SHIFT) & A5XX_HLSQ_CS_NDRANGE_5_SIZE_Z__MASK;
+ return ((val) << A5XX_HLSQ_CS_NDRANGE_5_GLOBALSIZE_Z__SHIFT) & A5XX_HLSQ_CS_NDRANGE_5_GLOBALSIZE_Z__MASK;
}
#define REG_A5XX_HLSQ_CS_NDRANGE_6 0x0000e7b6
+#define A5XX_HLSQ_CS_NDRANGE_6_GLOBALOFF_Z__MASK 0xffffffff
+#define A5XX_HLSQ_CS_NDRANGE_6_GLOBALOFF_Z__SHIFT 0
+static inline uint32_t A5XX_HLSQ_CS_NDRANGE_6_GLOBALOFF_Z(uint32_t val)
+{
+ return ((val) << A5XX_HLSQ_CS_NDRANGE_6_GLOBALOFF_Z__SHIFT) & A5XX_HLSQ_CS_NDRANGE_6_GLOBALOFF_Z__MASK;
+}
#define REG_A5XX_HLSQ_CS_CNTL_0 0x0000e7b7
#define A5XX_HLSQ_CS_CNTL_0_WGIDCONSTID__MASK 0x000000ff
@@ -4468,6 +4761,8 @@ static inline uint32_t A5XX_HLSQ_CS_CNTL_0_LOCALIDREGID(uint32_t val)
#define REG_A5XX_HLSQ_CS_INSTRLEN 0x0000e7dd
+#define REG_A5XX_RB_2D_BLIT_CNTL 0x00002100
+
#define REG_A5XX_RB_2D_SRC_SOLID_DW0 0x00002101
#define REG_A5XX_RB_2D_SRC_SOLID_DW1 0x00002102
@@ -4483,12 +4778,19 @@ static inline uint32_t A5XX_RB_2D_SRC_INFO_COLOR_FORMAT(enum a5xx_color_fmt val)
{
return ((val) << A5XX_RB_2D_SRC_INFO_COLOR_FORMAT__SHIFT) & A5XX_RB_2D_SRC_INFO_COLOR_FORMAT__MASK;
}
+#define A5XX_RB_2D_SRC_INFO_TILE_MODE__MASK 0x00000300
+#define A5XX_RB_2D_SRC_INFO_TILE_MODE__SHIFT 8
+static inline uint32_t A5XX_RB_2D_SRC_INFO_TILE_MODE(enum a5xx_tile_mode val)
+{
+ return ((val) << A5XX_RB_2D_SRC_INFO_TILE_MODE__SHIFT) & A5XX_RB_2D_SRC_INFO_TILE_MODE__MASK;
+}
#define A5XX_RB_2D_SRC_INFO_COLOR_SWAP__MASK 0x00000c00
#define A5XX_RB_2D_SRC_INFO_COLOR_SWAP__SHIFT 10
static inline uint32_t A5XX_RB_2D_SRC_INFO_COLOR_SWAP(enum a3xx_color_swap val)
{
return ((val) << A5XX_RB_2D_SRC_INFO_COLOR_SWAP__SHIFT) & A5XX_RB_2D_SRC_INFO_COLOR_SWAP__MASK;
}
+#define A5XX_RB_2D_SRC_INFO_FLAGS 0x00001000
#define REG_A5XX_RB_2D_SRC_LO 0x00002108
@@ -4515,12 +4817,19 @@ static inline uint32_t A5XX_RB_2D_DST_INFO_COLOR_FORMAT(enum a5xx_color_fmt val)
{
return ((val) << A5XX_RB_2D_DST_INFO_COLOR_FORMAT__SHIFT) & A5XX_RB_2D_DST_INFO_COLOR_FORMAT__MASK;
}
+#define A5XX_RB_2D_DST_INFO_TILE_MODE__MASK 0x00000300
+#define A5XX_RB_2D_DST_INFO_TILE_MODE__SHIFT 8
+static inline uint32_t A5XX_RB_2D_DST_INFO_TILE_MODE(enum a5xx_tile_mode val)
+{
+ return ((val) << A5XX_RB_2D_DST_INFO_TILE_MODE__SHIFT) & A5XX_RB_2D_DST_INFO_TILE_MODE__MASK;
+}
#define A5XX_RB_2D_DST_INFO_COLOR_SWAP__MASK 0x00000c00
#define A5XX_RB_2D_DST_INFO_COLOR_SWAP__SHIFT 10
static inline uint32_t A5XX_RB_2D_DST_INFO_COLOR_SWAP(enum a3xx_color_swap val)
{
return ((val) << A5XX_RB_2D_DST_INFO_COLOR_SWAP__SHIFT) & A5XX_RB_2D_DST_INFO_COLOR_SWAP__MASK;
}
+#define A5XX_RB_2D_DST_INFO_FLAGS 0x00001000
#define REG_A5XX_RB_2D_DST_LO 0x00002111
@@ -4548,6 +4857,8 @@ static inline uint32_t A5XX_RB_2D_DST_SIZE_ARRAY_PITCH(uint32_t val)
#define REG_A5XX_RB_2D_DST_FLAGS_HI 0x00002144
+#define REG_A5XX_GRAS_2D_BLIT_CNTL 0x00002180
+
#define REG_A5XX_GRAS_2D_SRC_INFO 0x00002181
#define A5XX_GRAS_2D_SRC_INFO_COLOR_FORMAT__MASK 0x000000ff
#define A5XX_GRAS_2D_SRC_INFO_COLOR_FORMAT__SHIFT 0
@@ -4555,12 +4866,19 @@ static inline uint32_t A5XX_GRAS_2D_SRC_INFO_COLOR_FORMAT(enum a5xx_color_fmt va
{
return ((val) << A5XX_GRAS_2D_SRC_INFO_COLOR_FORMAT__SHIFT) & A5XX_GRAS_2D_SRC_INFO_COLOR_FORMAT__MASK;
}
+#define A5XX_GRAS_2D_SRC_INFO_TILE_MODE__MASK 0x00000300
+#define A5XX_GRAS_2D_SRC_INFO_TILE_MODE__SHIFT 8
+static inline uint32_t A5XX_GRAS_2D_SRC_INFO_TILE_MODE(enum a5xx_tile_mode val)
+{
+ return ((val) << A5XX_GRAS_2D_SRC_INFO_TILE_MODE__SHIFT) & A5XX_GRAS_2D_SRC_INFO_TILE_MODE__MASK;
+}
#define A5XX_GRAS_2D_SRC_INFO_COLOR_SWAP__MASK 0x00000c00
#define A5XX_GRAS_2D_SRC_INFO_COLOR_SWAP__SHIFT 10
static inline uint32_t A5XX_GRAS_2D_SRC_INFO_COLOR_SWAP(enum a3xx_color_swap val)
{
return ((val) << A5XX_GRAS_2D_SRC_INFO_COLOR_SWAP__SHIFT) & A5XX_GRAS_2D_SRC_INFO_COLOR_SWAP__MASK;
}
+#define A5XX_GRAS_2D_SRC_INFO_FLAGS 0x00001000
#define REG_A5XX_GRAS_2D_DST_INFO 0x00002182
#define A5XX_GRAS_2D_DST_INFO_COLOR_FORMAT__MASK 0x000000ff
@@ -4569,12 +4887,19 @@ static inline uint32_t A5XX_GRAS_2D_DST_INFO_COLOR_FORMAT(enum a5xx_color_fmt va
{
return ((val) << A5XX_GRAS_2D_DST_INFO_COLOR_FORMAT__SHIFT) & A5XX_GRAS_2D_DST_INFO_COLOR_FORMAT__MASK;
}
+#define A5XX_GRAS_2D_DST_INFO_TILE_MODE__MASK 0x00000300
+#define A5XX_GRAS_2D_DST_INFO_TILE_MODE__SHIFT 8
+static inline uint32_t A5XX_GRAS_2D_DST_INFO_TILE_MODE(enum a5xx_tile_mode val)
+{
+ return ((val) << A5XX_GRAS_2D_DST_INFO_TILE_MODE__SHIFT) & A5XX_GRAS_2D_DST_INFO_TILE_MODE__MASK;
+}
#define A5XX_GRAS_2D_DST_INFO_COLOR_SWAP__MASK 0x00000c00
#define A5XX_GRAS_2D_DST_INFO_COLOR_SWAP__SHIFT 10
static inline uint32_t A5XX_GRAS_2D_DST_INFO_COLOR_SWAP(enum a3xx_color_swap val)
{
return ((val) << A5XX_GRAS_2D_DST_INFO_COLOR_SWAP__SHIFT) & A5XX_GRAS_2D_DST_INFO_COLOR_SWAP__MASK;
}
+#define A5XX_GRAS_2D_DST_INFO_FLAGS 0x00001000
#define REG_A5XX_UNKNOWN_2100 0x00002100
@@ -4698,6 +5023,12 @@ static inline uint32_t A5XX_TEX_CONST_0_MIPLVLS(uint32_t val)
{
return ((val) << A5XX_TEX_CONST_0_MIPLVLS__SHIFT) & A5XX_TEX_CONST_0_MIPLVLS__MASK;
}
+#define A5XX_TEX_CONST_0_SAMPLES__MASK 0x00300000
+#define A5XX_TEX_CONST_0_SAMPLES__SHIFT 20
+static inline uint32_t A5XX_TEX_CONST_0_SAMPLES(enum a3xx_msaa_samples val)
+{
+ return ((val) << A5XX_TEX_CONST_0_SAMPLES__SHIFT) & A5XX_TEX_CONST_0_SAMPLES__MASK;
+}
#define A5XX_TEX_CONST_0_FMT__MASK 0x3fc00000
#define A5XX_TEX_CONST_0_FMT__SHIFT 22
static inline uint32_t A5XX_TEX_CONST_0_FMT(enum a5xx_tex_fmt val)
@@ -4788,5 +5119,81 @@ static inline uint32_t A5XX_TEX_CONST_5_DEPTH(uint32_t val)
#define REG_A5XX_TEX_CONST_11 0x0000000b
+#define REG_A5XX_SSBO_0_0 0x00000000
+#define A5XX_SSBO_0_0_BASE_LO__MASK 0xffffffe0
+#define A5XX_SSBO_0_0_BASE_LO__SHIFT 5
+static inline uint32_t A5XX_SSBO_0_0_BASE_LO(uint32_t val)
+{
+ return ((val >> 5) << A5XX_SSBO_0_0_BASE_LO__SHIFT) & A5XX_SSBO_0_0_BASE_LO__MASK;
+}
+
+#define REG_A5XX_SSBO_0_1 0x00000001
+#define A5XX_SSBO_0_1_PITCH__MASK 0x003fffff
+#define A5XX_SSBO_0_1_PITCH__SHIFT 0
+static inline uint32_t A5XX_SSBO_0_1_PITCH(uint32_t val)
+{
+ return ((val) << A5XX_SSBO_0_1_PITCH__SHIFT) & A5XX_SSBO_0_1_PITCH__MASK;
+}
+
+#define REG_A5XX_SSBO_0_2 0x00000002
+#define A5XX_SSBO_0_2_ARRAY_PITCH__MASK 0x03fff000
+#define A5XX_SSBO_0_2_ARRAY_PITCH__SHIFT 12
+static inline uint32_t A5XX_SSBO_0_2_ARRAY_PITCH(uint32_t val)
+{
+ return ((val >> 12) << A5XX_SSBO_0_2_ARRAY_PITCH__SHIFT) & A5XX_SSBO_0_2_ARRAY_PITCH__MASK;
+}
+
+#define REG_A5XX_SSBO_0_3 0x00000003
+#define A5XX_SSBO_0_3_CPP__MASK 0x0000003f
+#define A5XX_SSBO_0_3_CPP__SHIFT 0
+static inline uint32_t A5XX_SSBO_0_3_CPP(uint32_t val)
+{
+ return ((val) << A5XX_SSBO_0_3_CPP__SHIFT) & A5XX_SSBO_0_3_CPP__MASK;
+}
+
+#define REG_A5XX_SSBO_1_0 0x00000000
+#define A5XX_SSBO_1_0_FMT__MASK 0x0000ff00
+#define A5XX_SSBO_1_0_FMT__SHIFT 8
+static inline uint32_t A5XX_SSBO_1_0_FMT(enum a5xx_tex_fmt val)
+{
+ return ((val) << A5XX_SSBO_1_0_FMT__SHIFT) & A5XX_SSBO_1_0_FMT__MASK;
+}
+#define A5XX_SSBO_1_0_WIDTH__MASK 0xffff0000
+#define A5XX_SSBO_1_0_WIDTH__SHIFT 16
+static inline uint32_t A5XX_SSBO_1_0_WIDTH(uint32_t val)
+{
+ return ((val) << A5XX_SSBO_1_0_WIDTH__SHIFT) & A5XX_SSBO_1_0_WIDTH__MASK;
+}
+
+#define REG_A5XX_SSBO_1_1 0x00000001
+#define A5XX_SSBO_1_1_HEIGHT__MASK 0x0000ffff
+#define A5XX_SSBO_1_1_HEIGHT__SHIFT 0
+static inline uint32_t A5XX_SSBO_1_1_HEIGHT(uint32_t val)
+{
+ return ((val) << A5XX_SSBO_1_1_HEIGHT__SHIFT) & A5XX_SSBO_1_1_HEIGHT__MASK;
+}
+#define A5XX_SSBO_1_1_DEPTH__MASK 0xffff0000
+#define A5XX_SSBO_1_1_DEPTH__SHIFT 16
+static inline uint32_t A5XX_SSBO_1_1_DEPTH(uint32_t val)
+{
+ return ((val) << A5XX_SSBO_1_1_DEPTH__SHIFT) & A5XX_SSBO_1_1_DEPTH__MASK;
+}
+
+#define REG_A5XX_SSBO_2_0 0x00000000
+#define A5XX_SSBO_2_0_BASE_LO__MASK 0xffffffff
+#define A5XX_SSBO_2_0_BASE_LO__SHIFT 0
+static inline uint32_t A5XX_SSBO_2_0_BASE_LO(uint32_t val)
+{
+ return ((val) << A5XX_SSBO_2_0_BASE_LO__SHIFT) & A5XX_SSBO_2_0_BASE_LO__MASK;
+}
+
+#define REG_A5XX_SSBO_2_1 0x00000001
+#define A5XX_SSBO_2_1_BASE_HI__MASK 0xffffffff
+#define A5XX_SSBO_2_1_BASE_HI__SHIFT 0
+static inline uint32_t A5XX_SSBO_2_1_BASE_HI(uint32_t val)
+{
+ return ((val) << A5XX_SSBO_2_1_BASE_HI__SHIFT) & A5XX_SSBO_2_1_BASE_HI__MASK;
+}
+
#endif /* A5XX_XML */
diff --git a/drivers/gpu/drm/msm/adreno/a5xx_debugfs.c b/drivers/gpu/drm/msm/adreno/a5xx_debugfs.c
new file mode 100644
index 000000000000..059ec7d394d0
--- /dev/null
+++ b/drivers/gpu/drm/msm/adreno/a5xx_debugfs.c
@@ -0,0 +1,187 @@
+/* Copyright (c) 2016-2017 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+
+#include <linux/types.h>
+#include <linux/debugfs.h>
+#include <drm/drm_print.h>
+
+#include "a5xx_gpu.h"
+
+static int pfp_print(struct msm_gpu *gpu, struct drm_printer *p)
+{
+ int i;
+
+ drm_printf(p, "PFP state:\n");
+
+ for (i = 0; i < 36; i++) {
+ gpu_write(gpu, REG_A5XX_CP_PFP_STAT_ADDR, i);
+ drm_printf(p, " %02x: %08x\n", i,
+ gpu_read(gpu, REG_A5XX_CP_PFP_STAT_DATA));
+ }
+
+ return 0;
+}
+
+static int me_print(struct msm_gpu *gpu, struct drm_printer *p)
+{
+ int i;
+
+ drm_printf(p, "ME state:\n");
+
+ for (i = 0; i < 29; i++) {
+ gpu_write(gpu, REG_A5XX_CP_ME_STAT_ADDR, i);
+ drm_printf(p, " %02x: %08x\n", i,
+ gpu_read(gpu, REG_A5XX_CP_ME_STAT_DATA));
+ }
+
+ return 0;
+}
+
+static int meq_print(struct msm_gpu *gpu, struct drm_printer *p)
+{
+ int i;
+
+ drm_printf(p, "MEQ state:\n");
+ gpu_write(gpu, REG_A5XX_CP_MEQ_DBG_ADDR, 0);
+
+ for (i = 0; i < 64; i++) {
+ drm_printf(p, " %02x: %08x\n", i,
+ gpu_read(gpu, REG_A5XX_CP_MEQ_DBG_DATA));
+ }
+
+ return 0;
+}
+
+static int roq_print(struct msm_gpu *gpu, struct drm_printer *p)
+{
+ int i;
+
+ drm_printf(p, "ROQ state:\n");
+ gpu_write(gpu, REG_A5XX_CP_ROQ_DBG_ADDR, 0);
+
+ for (i = 0; i < 512 / 4; i++) {
+ uint32_t val[4];
+ int j;
+ for (j = 0; j < 4; j++)
+ val[j] = gpu_read(gpu, REG_A5XX_CP_ROQ_DBG_DATA);
+ drm_printf(p, " %02x: %08x %08x %08x %08x\n", i,
+ val[0], val[1], val[2], val[3]);
+ }
+
+ return 0;
+}
+
+static int show(struct seq_file *m, void *arg)
+{
+ struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct drm_device *dev = node->minor->dev;
+ struct msm_drm_private *priv = dev->dev_private;
+ struct drm_printer p = drm_seq_file_printer(m);
+ int (*show)(struct msm_gpu *gpu, struct drm_printer *p) =
+ node->info_ent->data;
+
+ return show(priv->gpu, &p);
+}
+
+#define ENT(n) { .name = #n, .show = show, .data = n ##_print }
+static struct drm_info_list a5xx_debugfs_list[] = {
+ ENT(pfp),
+ ENT(me),
+ ENT(meq),
+ ENT(roq),
+};
+
+/* for debugfs files that can be written to, we can't use drm helper: */
+static int
+reset_set(void *data, u64 val)
+{
+ struct drm_device *dev = data;
+ struct msm_drm_private *priv = dev->dev_private;
+ struct msm_gpu *gpu = priv->gpu;
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EINVAL;
+
+ /* TODO do we care about trying to make sure the GPU is idle?
+ * Since this is just a debug feature limited to CAP_SYS_ADMIN,
+ * maybe it is fine to let the user keep both pieces if they
+ * try to reset an active GPU.
+ */
+
+ mutex_lock(&dev->struct_mutex);
+
+ release_firmware(adreno_gpu->fw[ADRENO_FW_PM4]);
+ adreno_gpu->fw[ADRENO_FW_PM4] = NULL;
+
+ release_firmware(adreno_gpu->fw[ADRENO_FW_PFP]);
+ adreno_gpu->fw[ADRENO_FW_PFP] = NULL;
+
+ if (a5xx_gpu->pm4_bo) {
+ if (a5xx_gpu->pm4_iova)
+ msm_gem_put_iova(a5xx_gpu->pm4_bo, gpu->aspace);
+ drm_gem_object_unreference(a5xx_gpu->pm4_bo);
+ a5xx_gpu->pm4_bo = NULL;
+ }
+
+ if (a5xx_gpu->pfp_bo) {
+ if (a5xx_gpu->pfp_iova)
+ msm_gem_put_iova(a5xx_gpu->pfp_bo, gpu->aspace);
+ drm_gem_object_unreference(a5xx_gpu->pfp_bo);
+ a5xx_gpu->pfp_bo = NULL;
+ }
+
+ gpu->needs_hw_init = true;
+
+ pm_runtime_get_sync(&gpu->pdev->dev);
+ gpu->funcs->recover(gpu);
+
+ pm_runtime_put_sync(&gpu->pdev->dev);
+ mutex_unlock(&dev->struct_mutex);
+
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(reset_fops, NULL, reset_set, "%llx\n");
+
+
+int a5xx_debugfs_init(struct msm_gpu *gpu, struct drm_minor *minor)
+{
+ struct drm_device *dev;
+ struct dentry *ent;
+ int ret;
+
+ if (!minor)
+ return 0;
+
+ dev = minor->dev;
+
+ ret = drm_debugfs_create_files(a5xx_debugfs_list,
+ ARRAY_SIZE(a5xx_debugfs_list),
+ minor->debugfs_root, minor);
+
+ if (ret) {
+ dev_err(dev->dev, "could not install a5xx_debugfs_list\n");
+ return ret;
+ }
+
+ ent = debugfs_create_file("reset", S_IWUGO,
+ minor->debugfs_root,
+ dev, &reset_fops);
+ if (!ent)
+ return -ENOMEM;
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
index 7e09d44e4a15..ab1d9308c311 100644
--- a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
@@ -11,6 +11,7 @@
*
*/
+#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/cpumask.h>
#include <linux/qcom_scm.h>
@@ -19,6 +20,8 @@
#include <linux/soc/qcom/mdt_loader.h>
#include <linux/pm_opp.h>
#include <linux/nvmem-consumer.h>
+#include <linux/iopoll.h>
+#include <linux/slab.h>
#include "msm_gem.h"
#include "msm_mmu.h"
#include "a5xx_gpu.h"
@@ -89,14 +92,15 @@ static int zap_shader_load_mdt(struct msm_gpu *gpu, const char *fwname)
*/
if (to_adreno_gpu(gpu)->fwloc == FW_LOCATION_LEGACY) {
ret = qcom_mdt_load(dev, fw, fwname, GPU_PAS_ID,
- mem_region, mem_phys, mem_size);
+ mem_region, mem_phys, mem_size, NULL);
} else {
- char newname[strlen("qcom/") + strlen(fwname) + 1];
+ char *newname;
- sprintf(newname, "qcom/%s", fwname);
+ newname = kasprintf(GFP_KERNEL, "qcom/%s", fwname);
ret = qcom_mdt_load(dev, fw, newname, GPU_PAS_ID,
- mem_region, mem_phys, mem_size);
+ mem_region, mem_phys, mem_size, NULL);
+ kfree(newname);
}
if (ret)
goto out;
@@ -140,6 +144,65 @@ static void a5xx_flush(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
gpu_write(gpu, REG_A5XX_CP_RB_WPTR, wptr);
}
+static void a5xx_submit_in_rb(struct msm_gpu *gpu, struct msm_gem_submit *submit,
+ struct msm_file_private *ctx)
+{
+ struct msm_drm_private *priv = gpu->dev->dev_private;
+ struct msm_ringbuffer *ring = submit->ring;
+ struct msm_gem_object *obj;
+ uint32_t *ptr, dwords;
+ unsigned int i;
+
+ for (i = 0; i < submit->nr_cmds; i++) {
+ switch (submit->cmd[i].type) {
+ case MSM_SUBMIT_CMD_IB_TARGET_BUF:
+ break;
+ case MSM_SUBMIT_CMD_CTX_RESTORE_BUF:
+ if (priv->lastctx == ctx)
+ break;
+ case MSM_SUBMIT_CMD_BUF:
+ /* copy commands into RB: */
+ obj = submit->bos[submit->cmd[i].idx].obj;
+ dwords = submit->cmd[i].size;
+
+ ptr = msm_gem_get_vaddr(&obj->base);
+
+ /* _get_vaddr() shouldn't fail at this point,
+ * since we've already mapped it once in
+ * submit_reloc()
+ */
+ if (WARN_ON(!ptr))
+ return;
+
+ for (i = 0; i < dwords; i++) {
+ /* normally the OUT_PKTn() would wait
+ * for space for the packet. But since
+ * we just OUT_RING() the whole thing,
+ * need to call adreno_wait_ring()
+ * ourself:
+ */
+ adreno_wait_ring(ring, 1);
+ OUT_RING(ring, ptr[i]);
+ }
+
+ msm_gem_put_vaddr(&obj->base);
+
+ break;
+ }
+ }
+
+ a5xx_flush(gpu, ring);
+ a5xx_preempt_trigger(gpu);
+
+ /* we might not necessarily have a cmd from userspace to
+ * trigger an event to know that submit has completed, so
+ * do this manually:
+ */
+ a5xx_idle(gpu, ring);
+ ring->memptrs->fence = submit->seqno;
+ msm_gpu_retire(gpu);
+}
+
static void a5xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
struct msm_file_private *ctx)
{
@@ -149,6 +212,12 @@ static void a5xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
struct msm_ringbuffer *ring = submit->ring;
unsigned int i, ibs = 0;
+ if (IS_ENABLED(CONFIG_DRM_MSM_GPU_SUDO) && submit->in_rb) {
+ priv->lastctx = NULL;
+ a5xx_submit_in_rb(gpu, submit, ctx);
+ return;
+ }
+
OUT_PKT7(ring, CP_PREEMPT_ENABLE_GLOBAL, 1);
OUT_RING(ring, 0x02);
@@ -432,25 +501,6 @@ static int a5xx_preempt_start(struct msm_gpu *gpu)
return a5xx_idle(gpu, ring) ? 0 : -EINVAL;
}
-
-static struct drm_gem_object *a5xx_ucode_load_bo(struct msm_gpu *gpu,
- const struct firmware *fw, u64 *iova)
-{
- struct drm_gem_object *bo;
- void *ptr;
-
- ptr = msm_gem_kernel_new_locked(gpu->dev, fw->size - 4,
- MSM_BO_UNCACHED | MSM_BO_GPU_READONLY, gpu->aspace, &bo, iova);
-
- if (IS_ERR(ptr))
- return ERR_CAST(ptr);
-
- memcpy(ptr, &fw->data[4], fw->size - 4);
-
- msm_gem_put_vaddr(bo);
- return bo;
-}
-
static int a5xx_ucode_init(struct msm_gpu *gpu)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
@@ -458,8 +508,8 @@ static int a5xx_ucode_init(struct msm_gpu *gpu)
int ret;
if (!a5xx_gpu->pm4_bo) {
- a5xx_gpu->pm4_bo = a5xx_ucode_load_bo(gpu, adreno_gpu->pm4,
- &a5xx_gpu->pm4_iova);
+ a5xx_gpu->pm4_bo = adreno_fw_create_bo(gpu,
+ adreno_gpu->fw[ADRENO_FW_PM4], &a5xx_gpu->pm4_iova);
if (IS_ERR(a5xx_gpu->pm4_bo)) {
ret = PTR_ERR(a5xx_gpu->pm4_bo);
@@ -471,8 +521,8 @@ static int a5xx_ucode_init(struct msm_gpu *gpu)
}
if (!a5xx_gpu->pfp_bo) {
- a5xx_gpu->pfp_bo = a5xx_ucode_load_bo(gpu, adreno_gpu->pfp,
- &a5xx_gpu->pfp_iova);
+ a5xx_gpu->pfp_bo = adreno_fw_create_bo(gpu,
+ adreno_gpu->fw[ADRENO_FW_PFP], &a5xx_gpu->pfp_iova);
if (IS_ERR(a5xx_gpu->pfp_bo)) {
ret = PTR_ERR(a5xx_gpu->pfp_bo);
@@ -793,19 +843,19 @@ static void a5xx_destroy(struct msm_gpu *gpu)
if (a5xx_gpu->pm4_bo) {
if (a5xx_gpu->pm4_iova)
msm_gem_put_iova(a5xx_gpu->pm4_bo, gpu->aspace);
- drm_gem_object_unreference_unlocked(a5xx_gpu->pm4_bo);
+ drm_gem_object_put_unlocked(a5xx_gpu->pm4_bo);
}
if (a5xx_gpu->pfp_bo) {
if (a5xx_gpu->pfp_iova)
msm_gem_put_iova(a5xx_gpu->pfp_bo, gpu->aspace);
- drm_gem_object_unreference_unlocked(a5xx_gpu->pfp_bo);
+ drm_gem_object_put_unlocked(a5xx_gpu->pfp_bo);
}
if (a5xx_gpu->gpmu_bo) {
if (a5xx_gpu->gpmu_iova)
msm_gem_put_iova(a5xx_gpu->gpmu_bo, gpu->aspace);
- drm_gem_object_unreference_unlocked(a5xx_gpu->gpmu_bo);
+ drm_gem_object_put_unlocked(a5xx_gpu->gpmu_bo);
}
adreno_gpu_cleanup(adreno_gpu);
@@ -1077,8 +1127,9 @@ static const u32 a5xx_registers[] = {
0xE800, 0xE806, 0xE810, 0xE89A, 0xE8A0, 0xE8A4, 0xE8AA, 0xE8EB,
0xE900, 0xE905, 0xEB80, 0xEB8F, 0xEBB0, 0xEBB0, 0xEC00, 0xEC05,
0xEC08, 0xECE9, 0xECF0, 0xECF0, 0xEA80, 0xEA80, 0xEA82, 0xEAA3,
- 0xEAA5, 0xEAC2, 0xA800, 0xA8FF, 0xAC60, 0xAC60, 0xB000, 0xB97F,
- 0xB9A0, 0xB9BF, ~0
+ 0xEAA5, 0xEAC2, 0xA800, 0xA800, 0xA820, 0xA828, 0xA840, 0xA87D,
+ 0XA880, 0xA88D, 0xA890, 0xA8A3, 0xA8D0, 0xA8D8, 0xA8E0, 0xA8F5,
+ 0xAC60, 0xAC60, ~0,
};
static void a5xx_dump(struct msm_gpu *gpu)
@@ -1149,19 +1200,231 @@ static int a5xx_get_timestamp(struct msm_gpu *gpu, uint64_t *value)
return 0;
}
-#ifdef CONFIG_DEBUG_FS
-static void a5xx_show(struct msm_gpu *gpu, struct seq_file *m)
+struct a5xx_crashdumper {
+ void *ptr;
+ struct drm_gem_object *bo;
+ u64 iova;
+};
+
+struct a5xx_gpu_state {
+ struct msm_gpu_state base;
+ u32 *hlsqregs;
+};
+
+#define gpu_poll_timeout(gpu, addr, val, cond, interval, timeout) \
+ readl_poll_timeout((gpu)->mmio + ((addr) << 2), val, cond, \
+ interval, timeout)
+
+static int a5xx_crashdumper_init(struct msm_gpu *gpu,
+ struct a5xx_crashdumper *dumper)
+{
+ dumper->ptr = msm_gem_kernel_new_locked(gpu->dev,
+ SZ_1M, MSM_BO_UNCACHED, gpu->aspace,
+ &dumper->bo, &dumper->iova);
+
+ if (IS_ERR(dumper->ptr))
+ return PTR_ERR(dumper->ptr);
+
+ return 0;
+}
+
+static void a5xx_crashdumper_free(struct msm_gpu *gpu,
+ struct a5xx_crashdumper *dumper)
{
- seq_printf(m, "status: %08x\n",
- gpu_read(gpu, REG_A5XX_RBBM_STATUS));
+ msm_gem_put_iova(dumper->bo, gpu->aspace);
+ msm_gem_put_vaddr(dumper->bo);
- /*
- * Temporarily disable hardware clock gating before going into
- * adreno_show to avoid issues while reading the registers
- */
+ drm_gem_object_unreference(dumper->bo);
+}
+
+static int a5xx_crashdumper_run(struct msm_gpu *gpu,
+ struct a5xx_crashdumper *dumper)
+{
+ u32 val;
+
+ if (IS_ERR_OR_NULL(dumper->ptr))
+ return -EINVAL;
+
+ gpu_write64(gpu, REG_A5XX_CP_CRASH_SCRIPT_BASE_LO,
+ REG_A5XX_CP_CRASH_SCRIPT_BASE_HI, dumper->iova);
+
+ gpu_write(gpu, REG_A5XX_CP_CRASH_DUMP_CNTL, 1);
+
+ return gpu_poll_timeout(gpu, REG_A5XX_CP_CRASH_DUMP_CNTL, val,
+ val & 0x04, 100, 10000);
+}
+
+/*
+ * These are a list of the registers that need to be read through the HLSQ
+ * aperture through the crashdumper. These are not nominally accessible from
+ * the CPU on a secure platform.
+ */
+static const struct {
+ u32 type;
+ u32 regoffset;
+ u32 count;
+} a5xx_hlsq_aperture_regs[] = {
+ { 0x35, 0xe00, 0x32 }, /* HSLQ non-context */
+ { 0x31, 0x2080, 0x1 }, /* HLSQ 2D context 0 */
+ { 0x33, 0x2480, 0x1 }, /* HLSQ 2D context 1 */
+ { 0x32, 0xe780, 0x62 }, /* HLSQ 3D context 0 */
+ { 0x34, 0xef80, 0x62 }, /* HLSQ 3D context 1 */
+ { 0x3f, 0x0ec0, 0x40 }, /* SP non-context */
+ { 0x3d, 0x2040, 0x1 }, /* SP 2D context 0 */
+ { 0x3b, 0x2440, 0x1 }, /* SP 2D context 1 */
+ { 0x3e, 0xe580, 0x170 }, /* SP 3D context 0 */
+ { 0x3c, 0xed80, 0x170 }, /* SP 3D context 1 */
+ { 0x3a, 0x0f00, 0x1c }, /* TP non-context */
+ { 0x38, 0x2000, 0xa }, /* TP 2D context 0 */
+ { 0x36, 0x2400, 0xa }, /* TP 2D context 1 */
+ { 0x39, 0xe700, 0x80 }, /* TP 3D context 0 */
+ { 0x37, 0xef00, 0x80 }, /* TP 3D context 1 */
+};
+
+static void a5xx_gpu_state_get_hlsq_regs(struct msm_gpu *gpu,
+ struct a5xx_gpu_state *a5xx_state)
+{
+ struct a5xx_crashdumper dumper = { 0 };
+ u32 offset, count = 0;
+ u64 *ptr;
+ int i;
+
+ if (a5xx_crashdumper_init(gpu, &dumper))
+ return;
+
+ /* The script will be written at offset 0 */
+ ptr = dumper.ptr;
+
+ /* Start writing the data at offset 256k */
+ offset = dumper.iova + (256 * SZ_1K);
+
+ /* Count how many additional registers to get from the HLSQ aperture */
+ for (i = 0; i < ARRAY_SIZE(a5xx_hlsq_aperture_regs); i++)
+ count += a5xx_hlsq_aperture_regs[i].count;
+
+ a5xx_state->hlsqregs = kcalloc(count, sizeof(u32), GFP_KERNEL);
+ if (!a5xx_state->hlsqregs)
+ return;
+
+ /* Build the crashdump script */
+ for (i = 0; i < ARRAY_SIZE(a5xx_hlsq_aperture_regs); i++) {
+ u32 type = a5xx_hlsq_aperture_regs[i].type;
+ u32 c = a5xx_hlsq_aperture_regs[i].count;
+
+ /* Write the register to select the desired bank */
+ *ptr++ = ((u64) type << 8);
+ *ptr++ = (((u64) REG_A5XX_HLSQ_DBG_READ_SEL) << 44) |
+ (1 << 21) | 1;
+
+ *ptr++ = offset;
+ *ptr++ = (((u64) REG_A5XX_HLSQ_DBG_AHB_READ_APERTURE) << 44)
+ | c;
+
+ offset += c * sizeof(u32);
+ }
+
+ /* Write two zeros to close off the script */
+ *ptr++ = 0;
+ *ptr++ = 0;
+
+ if (a5xx_crashdumper_run(gpu, &dumper)) {
+ kfree(a5xx_state->hlsqregs);
+ a5xx_crashdumper_free(gpu, &dumper);
+ return;
+ }
+
+ /* Copy the data from the crashdumper to the state */
+ memcpy(a5xx_state->hlsqregs, dumper.ptr + (256 * SZ_1K),
+ count * sizeof(u32));
+
+ a5xx_crashdumper_free(gpu, &dumper);
+}
+
+static struct msm_gpu_state *a5xx_gpu_state_get(struct msm_gpu *gpu)
+{
+ struct a5xx_gpu_state *a5xx_state = kzalloc(sizeof(*a5xx_state),
+ GFP_KERNEL);
+
+ if (!a5xx_state)
+ return ERR_PTR(-ENOMEM);
+
+ /* Temporarily disable hardware clock gating before reading the hw */
a5xx_set_hwcg(gpu, false);
- adreno_show(gpu, m);
+
+ /* First get the generic state from the adreno core */
+ adreno_gpu_state_get(gpu, &(a5xx_state->base));
+
+ a5xx_state->base.rbbm_status = gpu_read(gpu, REG_A5XX_RBBM_STATUS);
+
+ /* Get the HLSQ regs with the help of the crashdumper */
+ a5xx_gpu_state_get_hlsq_regs(gpu, a5xx_state);
+
a5xx_set_hwcg(gpu, true);
+
+ return &a5xx_state->base;
+}
+
+static void a5xx_gpu_state_destroy(struct kref *kref)
+{
+ struct msm_gpu_state *state = container_of(kref,
+ struct msm_gpu_state, ref);
+ struct a5xx_gpu_state *a5xx_state = container_of(state,
+ struct a5xx_gpu_state, base);
+
+ kfree(a5xx_state->hlsqregs);
+
+ adreno_gpu_state_destroy(state);
+ kfree(a5xx_state);
+}
+
+int a5xx_gpu_state_put(struct msm_gpu_state *state)
+{
+ if (IS_ERR_OR_NULL(state))
+ return 1;
+
+ return kref_put(&state->ref, a5xx_gpu_state_destroy);
+}
+
+
+#if defined(CONFIG_DEBUG_FS) || defined(CONFIG_DEV_COREDUMP)
+void a5xx_show(struct msm_gpu *gpu, struct msm_gpu_state *state,
+ struct drm_printer *p)
+{
+ int i, j;
+ u32 pos = 0;
+ struct a5xx_gpu_state *a5xx_state = container_of(state,
+ struct a5xx_gpu_state, base);
+
+ if (IS_ERR_OR_NULL(state))
+ return;
+
+ adreno_show(gpu, state, p);
+
+ /* Dump the additional a5xx HLSQ registers */
+ if (!a5xx_state->hlsqregs)
+ return;
+
+ drm_printf(p, "registers-hlsq:\n");
+
+ for (i = 0; i < ARRAY_SIZE(a5xx_hlsq_aperture_regs); i++) {
+ u32 o = a5xx_hlsq_aperture_regs[i].regoffset;
+ u32 c = a5xx_hlsq_aperture_regs[i].count;
+
+ for (j = 0; j < c; j++, pos++, o++) {
+ /*
+ * To keep the crashdump simple we pull the entire range
+ * for each register type but not all of the registers
+ * in the range are valid. Fortunately invalid registers
+ * stick out like a sore thumb with a value of
+ * 0xdeadbeef
+ */
+ if (a5xx_state->hlsqregs[pos] == 0xdeadbeef)
+ continue;
+
+ drm_printf(p, " - { offset: 0x%04x, value: 0x%08x }\n",
+ o << 2, a5xx_state->hlsqregs[pos]);
+ }
+ }
}
#endif
@@ -1193,10 +1456,15 @@ static const struct adreno_gpu_funcs funcs = {
.active_ring = a5xx_active_ring,
.irq = a5xx_irq,
.destroy = a5xx_destroy,
-#ifdef CONFIG_DEBUG_FS
+#if defined(CONFIG_DEBUG_FS) || defined(CONFIG_DEV_COREDUMP)
.show = a5xx_show,
#endif
+#if defined(CONFIG_DEBUG_FS)
+ .debugfs_init = a5xx_debugfs_init,
+#endif
.gpu_busy = a5xx_gpu_busy,
+ .gpu_state_get = a5xx_gpu_state_get,
+ .gpu_state_put = a5xx_gpu_state_put,
},
.get_timestamp = a5xx_get_timestamp,
};
diff --git a/drivers/gpu/drm/msm/adreno/a5xx_gpu.h b/drivers/gpu/drm/msm/adreno/a5xx_gpu.h
index 6fb8c2f9b9e4..7d71860c4bee 100644
--- a/drivers/gpu/drm/msm/adreno/a5xx_gpu.h
+++ b/drivers/gpu/drm/msm/adreno/a5xx_gpu.h
@@ -49,6 +49,10 @@ struct a5xx_gpu {
#define to_a5xx_gpu(x) container_of(x, struct a5xx_gpu, base)
+#ifdef CONFIG_DEBUG_FS
+int a5xx_debugfs_init(struct msm_gpu *gpu, struct drm_minor *minor);
+#endif
+
/*
* In order to do lockless preemption we use a simple state machine to progress
* through the process.
diff --git a/drivers/gpu/drm/msm/adreno/a5xx_power.c b/drivers/gpu/drm/msm/adreno/a5xx_power.c
index 4e4d965fd9ab..e9c0e56dbec0 100644
--- a/drivers/gpu/drm/msm/adreno/a5xx_power.c
+++ b/drivers/gpu/drm/msm/adreno/a5xx_power.c
@@ -261,7 +261,6 @@ void a5xx_gpmu_ucode_init(struct msm_gpu *gpu)
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
struct drm_device *drm = gpu->dev;
- const struct firmware *fw;
uint32_t dwords = 0, offset = 0, bosize;
unsigned int *data, *ptr, *cmds;
unsigned int cmds_size;
@@ -269,15 +268,7 @@ void a5xx_gpmu_ucode_init(struct msm_gpu *gpu)
if (a5xx_gpu->gpmu_bo)
return;
- /* Get the firmware */
- fw = adreno_request_fw(adreno_gpu, adreno_gpu->info->gpmufw);
- if (IS_ERR(fw)) {
- DRM_ERROR("%s: Could not get GPMU firmware. GPMU will not be active\n",
- gpu->name);
- return;
- }
-
- data = (unsigned int *) fw->data;
+ data = (unsigned int *) adreno_gpu->fw[ADRENO_FW_GPMU]->data;
/*
* The first dword is the size of the remaining data in dwords. Use it
@@ -285,12 +276,14 @@ void a5xx_gpmu_ucode_init(struct msm_gpu *gpu)
* the firmware that we read
*/
- if (fw->size < 8 || (data[0] < 2) || (data[0] >= (fw->size >> 2)))
- goto out;
+ if (adreno_gpu->fw[ADRENO_FW_GPMU]->size < 8 ||
+ (data[0] < 2) || (data[0] >=
+ (adreno_gpu->fw[ADRENO_FW_GPMU]->size >> 2)))
+ return;
/* The second dword is an ID - look for 2 (GPMU_FIRMWARE_ID) */
if (data[1] != 2)
- goto out;
+ return;
cmds = data + data[2] + 3;
cmds_size = data[0] - data[2] - 2;
@@ -325,8 +318,7 @@ void a5xx_gpmu_ucode_init(struct msm_gpu *gpu)
msm_gem_put_vaddr(a5xx_gpu->gpmu_bo);
a5xx_gpu->gpmu_dwords = dwords;
- goto out;
-
+ return;
err:
if (a5xx_gpu->gpmu_iova)
msm_gem_put_iova(a5xx_gpu->gpmu_bo, gpu->aspace);
@@ -336,8 +328,4 @@ err:
a5xx_gpu->gpmu_bo = NULL;
a5xx_gpu->gpmu_iova = 0;
a5xx_gpu->gpmu_dwords = 0;
-
-out:
- /* No need to keep that firmware laying around anymore */
- release_firmware(fw);
}
diff --git a/drivers/gpu/drm/msm/adreno/a6xx.xml.h b/drivers/gpu/drm/msm/adreno/a6xx.xml.h
new file mode 100644
index 000000000000..87eab51f7000
--- /dev/null
+++ b/drivers/gpu/drm/msm/adreno/a6xx.xml.h
@@ -0,0 +1,4562 @@
+#ifndef A6XX_XML
+#define A6XX_XML
+
+/* Autogenerated file, DO NOT EDIT manually!
+
+This file was generated by the rules-ng-ng headergen tool in this git repository:
+http://github.com/freedreno/envytools/
+git clone https://github.com/freedreno/envytools.git
+
+The rules-ng-ng source files this header was generated from are:
+- /home/robclark/src/envytools/rnndb/adreno.xml ( 501 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/adreno/a2xx.xml ( 36805 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/adreno/adreno_common.xml ( 13634 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/adreno/adreno_pm4.xml ( 42393 bytes, from 2018-08-06 18:45:45)
+- /home/robclark/src/envytools/rnndb/adreno/a3xx.xml ( 83840 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/adreno/a4xx.xml ( 112086 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/adreno/a5xx.xml ( 147240 bytes, from 2018-08-06 18:45:45)
+- /home/robclark/src/envytools/rnndb/adreno/a6xx.xml ( 101627 bytes, from 2018-08-06 18:45:45)
+- /home/robclark/src/envytools/rnndb/adreno/a6xx_gmu.xml ( 10431 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/adreno/ocmem.xml ( 1773 bytes, from 2018-07-03 19:37:13)
+
+Copyright (C) 2013-2018 by the following authors:
+- Rob Clark <robdclark@gmail.com> (robclark)
+- Ilia Mirkin <imirkin@alum.mit.edu> (imirkin)
+
+Permission is hereby granted, free of charge, to any person obtaining
+a copy of this software and associated documentation files (the
+"Software"), to deal in the Software without restriction, including
+without limitation the rights to use, copy, modify, merge, publish,
+distribute, sublicense, and/or sell copies of the Software, and to
+permit persons to whom the Software is furnished to do so, subject to
+the following conditions:
+
+The above copyright notice and this permission notice (including the
+next paragraph) shall be included in all copies or substantial
+portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/
+
+
+enum a6xx_color_fmt {
+ RB6_A8_UNORM = 2,
+ RB6_R8_UNORM = 3,
+ RB6_R8_SNORM = 4,
+ RB6_R8_UINT = 5,
+ RB6_R8_SINT = 6,
+ RB6_R4G4B4A4_UNORM = 8,
+ RB6_R5G5B5A1_UNORM = 10,
+ RB6_R5G6B5_UNORM = 14,
+ RB6_R8G8_UNORM = 15,
+ RB6_R8G8_SNORM = 16,
+ RB6_R8G8_UINT = 17,
+ RB6_R8G8_SINT = 18,
+ RB6_R16_UNORM = 21,
+ RB6_R16_SNORM = 22,
+ RB6_R16_FLOAT = 23,
+ RB6_R16_UINT = 24,
+ RB6_R16_SINT = 25,
+ RB6_R8G8B8A8_UNORM = 48,
+ RB6_R8G8B8_UNORM = 49,
+ RB6_R8G8B8A8_SNORM = 50,
+ RB6_R8G8B8A8_UINT = 51,
+ RB6_R8G8B8A8_SINT = 52,
+ RB6_R10G10B10A2_UNORM = 55,
+ RB6_R10G10B10A2_UINT = 58,
+ RB6_R11G11B10_FLOAT = 66,
+ RB6_R16G16_UNORM = 67,
+ RB6_R16G16_SNORM = 68,
+ RB6_R16G16_FLOAT = 69,
+ RB6_R16G16_UINT = 70,
+ RB6_R16G16_SINT = 71,
+ RB6_R32_FLOAT = 74,
+ RB6_R32_UINT = 75,
+ RB6_R32_SINT = 76,
+ RB6_R16G16B16A16_UNORM = 96,
+ RB6_R16G16B16A16_SNORM = 97,
+ RB6_R16G16B16A16_FLOAT = 98,
+ RB6_R16G16B16A16_UINT = 99,
+ RB6_R16G16B16A16_SINT = 100,
+ RB6_R32G32_FLOAT = 103,
+ RB6_R32G32_UINT = 104,
+ RB6_R32G32_SINT = 105,
+ RB6_R32G32B32A32_FLOAT = 130,
+ RB6_R32G32B32A32_UINT = 131,
+ RB6_R32G32B32A32_SINT = 132,
+ RB6_X8Z24_UNORM = 160,
+};
+
+enum a6xx_tile_mode {
+ TILE6_LINEAR = 0,
+ TILE6_2 = 2,
+ TILE6_3 = 3,
+};
+
+enum a6xx_vtx_fmt {
+ VFMT6_8_UNORM = 3,
+ VFMT6_8_SNORM = 4,
+ VFMT6_8_UINT = 5,
+ VFMT6_8_SINT = 6,
+ VFMT6_8_8_UNORM = 15,
+ VFMT6_8_8_SNORM = 16,
+ VFMT6_8_8_UINT = 17,
+ VFMT6_8_8_SINT = 18,
+ VFMT6_16_UNORM = 21,
+ VFMT6_16_SNORM = 22,
+ VFMT6_16_FLOAT = 23,
+ VFMT6_16_UINT = 24,
+ VFMT6_16_SINT = 25,
+ VFMT6_8_8_8_UNORM = 33,
+ VFMT6_8_8_8_SNORM = 34,
+ VFMT6_8_8_8_UINT = 35,
+ VFMT6_8_8_8_SINT = 36,
+ VFMT6_8_8_8_8_UNORM = 48,
+ VFMT6_8_8_8_8_SNORM = 50,
+ VFMT6_8_8_8_8_UINT = 51,
+ VFMT6_8_8_8_8_SINT = 52,
+ VFMT6_10_10_10_2_UNORM = 54,
+ VFMT6_10_10_10_2_SNORM = 57,
+ VFMT6_10_10_10_2_UINT = 58,
+ VFMT6_10_10_10_2_SINT = 59,
+ VFMT6_11_11_10_FLOAT = 66,
+ VFMT6_16_16_UNORM = 67,
+ VFMT6_16_16_SNORM = 68,
+ VFMT6_16_16_FLOAT = 69,
+ VFMT6_16_16_UINT = 70,
+ VFMT6_16_16_SINT = 71,
+ VFMT6_32_UNORM = 72,
+ VFMT6_32_SNORM = 73,
+ VFMT6_32_FLOAT = 74,
+ VFMT6_32_UINT = 75,
+ VFMT6_32_SINT = 76,
+ VFMT6_32_FIXED = 77,
+ VFMT6_16_16_16_UNORM = 88,
+ VFMT6_16_16_16_SNORM = 89,
+ VFMT6_16_16_16_FLOAT = 90,
+ VFMT6_16_16_16_UINT = 91,
+ VFMT6_16_16_16_SINT = 92,
+ VFMT6_16_16_16_16_UNORM = 96,
+ VFMT6_16_16_16_16_SNORM = 97,
+ VFMT6_16_16_16_16_FLOAT = 98,
+ VFMT6_16_16_16_16_UINT = 99,
+ VFMT6_16_16_16_16_SINT = 100,
+ VFMT6_32_32_UNORM = 101,
+ VFMT6_32_32_SNORM = 102,
+ VFMT6_32_32_FLOAT = 103,
+ VFMT6_32_32_UINT = 104,
+ VFMT6_32_32_SINT = 105,
+ VFMT6_32_32_FIXED = 106,
+ VFMT6_32_32_32_UNORM = 112,
+ VFMT6_32_32_32_SNORM = 113,
+ VFMT6_32_32_32_UINT = 114,
+ VFMT6_32_32_32_SINT = 115,
+ VFMT6_32_32_32_FLOAT = 116,
+ VFMT6_32_32_32_FIXED = 117,
+ VFMT6_32_32_32_32_UNORM = 128,
+ VFMT6_32_32_32_32_SNORM = 129,
+ VFMT6_32_32_32_32_FLOAT = 130,
+ VFMT6_32_32_32_32_UINT = 131,
+ VFMT6_32_32_32_32_SINT = 132,
+ VFMT6_32_32_32_32_FIXED = 133,
+};
+
+enum a6xx_tex_fmt {
+ TFMT6_A8_UNORM = 2,
+ TFMT6_8_UNORM = 3,
+ TFMT6_8_SNORM = 4,
+ TFMT6_8_UINT = 5,
+ TFMT6_8_SINT = 6,
+ TFMT6_4_4_4_4_UNORM = 8,
+ TFMT6_5_5_5_1_UNORM = 10,
+ TFMT6_5_6_5_UNORM = 14,
+ TFMT6_8_8_UNORM = 15,
+ TFMT6_8_8_SNORM = 16,
+ TFMT6_8_8_UINT = 17,
+ TFMT6_8_8_SINT = 18,
+ TFMT6_L8_A8_UNORM = 19,
+ TFMT6_16_UNORM = 21,
+ TFMT6_16_SNORM = 22,
+ TFMT6_16_FLOAT = 23,
+ TFMT6_16_UINT = 24,
+ TFMT6_16_SINT = 25,
+ TFMT6_8_8_8_8_UNORM = 48,
+ TFMT6_8_8_8_UNORM = 49,
+ TFMT6_8_8_8_8_SNORM = 50,
+ TFMT6_8_8_8_8_UINT = 51,
+ TFMT6_8_8_8_8_SINT = 52,
+ TFMT6_9_9_9_E5_FLOAT = 53,
+ TFMT6_10_10_10_2_UNORM = 54,
+ TFMT6_10_10_10_2_UINT = 58,
+ TFMT6_11_11_10_FLOAT = 66,
+ TFMT6_16_16_UNORM = 67,
+ TFMT6_16_16_SNORM = 68,
+ TFMT6_16_16_FLOAT = 69,
+ TFMT6_16_16_UINT = 70,
+ TFMT6_16_16_SINT = 71,
+ TFMT6_32_FLOAT = 74,
+ TFMT6_32_UINT = 75,
+ TFMT6_32_SINT = 76,
+ TFMT6_16_16_16_16_UNORM = 96,
+ TFMT6_16_16_16_16_SNORM = 97,
+ TFMT6_16_16_16_16_FLOAT = 98,
+ TFMT6_16_16_16_16_UINT = 99,
+ TFMT6_16_16_16_16_SINT = 100,
+ TFMT6_32_32_FLOAT = 103,
+ TFMT6_32_32_UINT = 104,
+ TFMT6_32_32_SINT = 105,
+ TFMT6_32_32_32_UINT = 114,
+ TFMT6_32_32_32_SINT = 115,
+ TFMT6_32_32_32_FLOAT = 116,
+ TFMT6_32_32_32_32_FLOAT = 130,
+ TFMT6_32_32_32_32_UINT = 131,
+ TFMT6_32_32_32_32_SINT = 132,
+ TFMT6_X8Z24_UNORM = 160,
+ TFMT6_ETC2_RG11_UNORM = 171,
+ TFMT6_ETC2_RG11_SNORM = 172,
+ TFMT6_ETC2_R11_UNORM = 173,
+ TFMT6_ETC2_R11_SNORM = 174,
+ TFMT6_ETC1 = 175,
+ TFMT6_ETC2_RGB8 = 176,
+ TFMT6_ETC2_RGBA8 = 177,
+ TFMT6_ETC2_RGB8A1 = 178,
+ TFMT6_DXT1 = 179,
+ TFMT6_DXT3 = 180,
+ TFMT6_DXT5 = 181,
+ TFMT6_RGTC1_UNORM = 183,
+ TFMT6_RGTC1_SNORM = 184,
+ TFMT6_RGTC2_UNORM = 187,
+ TFMT6_RGTC2_SNORM = 188,
+ TFMT6_BPTC_UFLOAT = 190,
+ TFMT6_BPTC_FLOAT = 191,
+ TFMT6_BPTC = 192,
+ TFMT6_ASTC_4x4 = 193,
+ TFMT6_ASTC_5x4 = 194,
+ TFMT6_ASTC_5x5 = 195,
+ TFMT6_ASTC_6x5 = 196,
+ TFMT6_ASTC_6x6 = 197,
+ TFMT6_ASTC_8x5 = 198,
+ TFMT6_ASTC_8x6 = 199,
+ TFMT6_ASTC_8x8 = 200,
+ TFMT6_ASTC_10x5 = 201,
+ TFMT6_ASTC_10x6 = 202,
+ TFMT6_ASTC_10x8 = 203,
+ TFMT6_ASTC_10x10 = 204,
+ TFMT6_ASTC_12x10 = 205,
+ TFMT6_ASTC_12x12 = 206,
+};
+
+enum a6xx_tex_fetchsize {
+ TFETCH6_1_BYTE = 0,
+ TFETCH6_2_BYTE = 1,
+ TFETCH6_4_BYTE = 2,
+ TFETCH6_8_BYTE = 3,
+ TFETCH6_16_BYTE = 4,
+};
+
+enum a6xx_depth_format {
+ DEPTH6_NONE = 0,
+ DEPTH6_16 = 1,
+ DEPTH6_24_8 = 2,
+ DEPTH6_32 = 4,
+};
+
+enum a6xx_cp_perfcounter_select {
+ PERF_CP_ALWAYS_COUNT = 0,
+};
+
+enum a6xx_tex_filter {
+ A6XX_TEX_NEAREST = 0,
+ A6XX_TEX_LINEAR = 1,
+ A6XX_TEX_ANISO = 2,
+};
+
+enum a6xx_tex_clamp {
+ A6XX_TEX_REPEAT = 0,
+ A6XX_TEX_CLAMP_TO_EDGE = 1,
+ A6XX_TEX_MIRROR_REPEAT = 2,
+ A6XX_TEX_CLAMP_TO_BORDER = 3,
+ A6XX_TEX_MIRROR_CLAMP = 4,
+};
+
+enum a6xx_tex_aniso {
+ A6XX_TEX_ANISO_1 = 0,
+ A6XX_TEX_ANISO_2 = 1,
+ A6XX_TEX_ANISO_4 = 2,
+ A6XX_TEX_ANISO_8 = 3,
+ A6XX_TEX_ANISO_16 = 4,
+};
+
+enum a6xx_tex_swiz {
+ A6XX_TEX_X = 0,
+ A6XX_TEX_Y = 1,
+ A6XX_TEX_Z = 2,
+ A6XX_TEX_W = 3,
+ A6XX_TEX_ZERO = 4,
+ A6XX_TEX_ONE = 5,
+};
+
+enum a6xx_tex_type {
+ A6XX_TEX_1D = 0,
+ A6XX_TEX_2D = 1,
+ A6XX_TEX_CUBE = 2,
+ A6XX_TEX_3D = 3,
+};
+
+#define A6XX_RBBM_INT_0_MASK_RBBM_GPU_IDLE 0x00000001
+#define A6XX_RBBM_INT_0_MASK_CP_AHB_ERROR 0x00000002
+#define A6XX_RBBM_INT_0_MASK_RBBM_ATB_ASYNCFIFO_OVERFLOW 0x00000040
+#define A6XX_RBBM_INT_0_MASK_RBBM_GPC_ERROR 0x00000080
+#define A6XX_RBBM_INT_0_MASK_CP_SW 0x00000100
+#define A6XX_RBBM_INT_0_MASK_CP_HW_ERROR 0x00000200
+#define A6XX_RBBM_INT_0_MASK_CP_CCU_FLUSH_DEPTH_TS 0x00000400
+#define A6XX_RBBM_INT_0_MASK_CP_CCU_FLUSH_COLOR_TS 0x00000800
+#define A6XX_RBBM_INT_0_MASK_CP_CCU_RESOLVE_TS 0x00001000
+#define A6XX_RBBM_INT_0_MASK_CP_IB2 0x00002000
+#define A6XX_RBBM_INT_0_MASK_CP_IB1 0x00004000
+#define A6XX_RBBM_INT_0_MASK_CP_RB 0x00008000
+#define A6XX_RBBM_INT_0_MASK_CP_RB_DONE_TS 0x00020000
+#define A6XX_RBBM_INT_0_MASK_CP_WT_DONE_TS 0x00040000
+#define A6XX_RBBM_INT_0_MASK_CP_CACHE_FLUSH_TS 0x00100000
+#define A6XX_RBBM_INT_0_MASK_RBBM_ATB_BUS_OVERFLOW 0x00400000
+#define A6XX_RBBM_INT_0_MASK_RBBM_HANG_DETECT 0x00800000
+#define A6XX_RBBM_INT_0_MASK_UCHE_OOB_ACCESS 0x01000000
+#define A6XX_RBBM_INT_0_MASK_UCHE_TRAP_INTR 0x02000000
+#define A6XX_RBBM_INT_0_MASK_DEBBUS_INTR_0 0x04000000
+#define A6XX_RBBM_INT_0_MASK_DEBBUS_INTR_1 0x08000000
+#define A6XX_RBBM_INT_0_MASK_ISDB_CPU_IRQ 0x40000000
+#define A6XX_RBBM_INT_0_MASK_ISDB_UNDER_DEBUG 0x80000000
+#define A6XX_CP_INT_CP_OPCODE_ERROR 0x00000001
+#define A6XX_CP_INT_CP_UCODE_ERROR 0x00000002
+#define A6XX_CP_INT_CP_HW_FAULT_ERROR 0x00000004
+#define A6XX_CP_INT_CP_REGISTER_PROTECTION_ERROR 0x00000010
+#define A6XX_CP_INT_CP_AHB_ERROR 0x00000020
+#define A6XX_CP_INT_CP_VSD_PARITY_ERROR 0x00000040
+#define A6XX_CP_INT_CP_ILLEGAL_INSTR_ERROR 0x00000080
+#define REG_A6XX_CP_RB_BASE 0x00000800
+
+#define REG_A6XX_CP_RB_BASE_HI 0x00000801
+
+#define REG_A6XX_CP_RB_CNTL 0x00000802
+
+#define REG_A6XX_CP_RB_RPTR_ADDR_LO 0x00000804
+
+#define REG_A6XX_CP_RB_RPTR_ADDR_HI 0x00000805
+
+#define REG_A6XX_CP_RB_RPTR 0x00000806
+
+#define REG_A6XX_CP_RB_WPTR 0x00000807
+
+#define REG_A6XX_CP_SQE_CNTL 0x00000808
+
+#define REG_A6XX_CP_HW_FAULT 0x00000821
+
+#define REG_A6XX_CP_INTERRUPT_STATUS 0x00000823
+
+#define REG_A6XX_CP_PROTECT_STATUS 0x00000824
+
+#define REG_A6XX_CP_SQE_INSTR_BASE_LO 0x00000830
+
+#define REG_A6XX_CP_SQE_INSTR_BASE_HI 0x00000831
+
+#define REG_A6XX_CP_MISC_CNTL 0x00000840
+
+#define REG_A6XX_CP_ROQ_THRESHOLDS_1 0x000008c1
+
+#define REG_A6XX_CP_ROQ_THRESHOLDS_2 0x000008c2
+
+#define REG_A6XX_CP_MEM_POOL_SIZE 0x000008c3
+
+#define REG_A6XX_CP_CHICKEN_DBG 0x00000841
+
+#define REG_A6XX_CP_ADDR_MODE_CNTL 0x00000842
+
+#define REG_A6XX_CP_DBG_ECO_CNTL 0x00000843
+
+#define REG_A6XX_CP_PROTECT_CNTL 0x0000084f
+
+static inline uint32_t REG_A6XX_CP_SCRATCH(uint32_t i0) { return 0x00000883 + 0x1*i0; }
+
+static inline uint32_t REG_A6XX_CP_SCRATCH_REG(uint32_t i0) { return 0x00000883 + 0x1*i0; }
+
+static inline uint32_t REG_A6XX_CP_PROTECT(uint32_t i0) { return 0x00000850 + 0x1*i0; }
+
+static inline uint32_t REG_A6XX_CP_PROTECT_REG(uint32_t i0) { return 0x00000850 + 0x1*i0; }
+#define A6XX_CP_PROTECT_REG_BASE_ADDR__MASK 0x0003ffff
+#define A6XX_CP_PROTECT_REG_BASE_ADDR__SHIFT 0
+static inline uint32_t A6XX_CP_PROTECT_REG_BASE_ADDR(uint32_t val)
+{
+ return ((val) << A6XX_CP_PROTECT_REG_BASE_ADDR__SHIFT) & A6XX_CP_PROTECT_REG_BASE_ADDR__MASK;
+}
+#define A6XX_CP_PROTECT_REG_MASK_LEN__MASK 0x7ffc0000
+#define A6XX_CP_PROTECT_REG_MASK_LEN__SHIFT 18
+static inline uint32_t A6XX_CP_PROTECT_REG_MASK_LEN(uint32_t val)
+{
+ return ((val) << A6XX_CP_PROTECT_REG_MASK_LEN__SHIFT) & A6XX_CP_PROTECT_REG_MASK_LEN__MASK;
+}
+#define A6XX_CP_PROTECT_REG_READ 0x80000000
+
+#define REG_A6XX_CP_CONTEXT_SWITCH_CNTL 0x000008a0
+
+#define REG_A6XX_CP_CONTEXT_SWITCH_SMMU_INFO_LO 0x000008a1
+
+#define REG_A6XX_CP_CONTEXT_SWITCH_SMMU_INFO_HI 0x000008a2
+
+#define REG_A6XX_CP_CONTEXT_SWITCH_PRIV_NON_SECURE_RESTORE_ADDR_LO 0x000008a3
+
+#define REG_A6XX_CP_CONTEXT_SWITCH_PRIV_NON_SECURE_RESTORE_ADDR_HI 0x000008a4
+
+#define REG_A6XX_CP_CONTEXT_SWITCH_PRIV_SECURE_RESTORE_ADDR_LO 0x000008a5
+
+#define REG_A6XX_CP_CONTEXT_SWITCH_PRIV_SECURE_RESTORE_ADDR_HI 0x000008a6
+
+#define REG_A6XX_CP_CONTEXT_SWITCH_NON_PRIV_RESTORE_ADDR_LO 0x000008a7
+
+#define REG_A6XX_CP_CONTEXT_SWITCH_NON_PRIV_RESTORE_ADDR_HI 0x000008a8
+
+#define REG_A6XX_CP_PERFCTR_CP_SEL_0 0x000008d0
+
+#define REG_A6XX_CP_PERFCTR_CP_SEL_1 0x000008d1
+
+#define REG_A6XX_CP_PERFCTR_CP_SEL_2 0x000008d2
+
+#define REG_A6XX_CP_PERFCTR_CP_SEL_3 0x000008d3
+
+#define REG_A6XX_CP_PERFCTR_CP_SEL_4 0x000008d4
+
+#define REG_A6XX_CP_PERFCTR_CP_SEL_5 0x000008d5
+
+#define REG_A6XX_CP_PERFCTR_CP_SEL_6 0x000008d6
+
+#define REG_A6XX_CP_PERFCTR_CP_SEL_7 0x000008d7
+
+#define REG_A6XX_CP_PERFCTR_CP_SEL_8 0x000008d8
+
+#define REG_A6XX_CP_PERFCTR_CP_SEL_9 0x000008d9
+
+#define REG_A6XX_CP_PERFCTR_CP_SEL_10 0x000008da
+
+#define REG_A6XX_CP_PERFCTR_CP_SEL_11 0x000008db
+
+#define REG_A6XX_CP_PERFCTR_CP_SEL_12 0x000008dc
+
+#define REG_A6XX_CP_PERFCTR_CP_SEL_13 0x000008dd
+
+#define REG_A6XX_CP_CRASH_SCRIPT_BASE_LO 0x00000900
+
+#define REG_A6XX_CP_CRASH_SCRIPT_BASE_HI 0x00000901
+
+#define REG_A6XX_CP_CRASH_DUMP_CNTL 0x00000902
+
+#define REG_A6XX_CP_CRASH_DUMP_STATUS 0x00000903
+
+#define REG_A6XX_CP_SQE_STAT_ADDR 0x00000908
+
+#define REG_A6XX_CP_SQE_STAT_DATA 0x00000909
+
+#define REG_A6XX_CP_DRAW_STATE_ADDR 0x0000090a
+
+#define REG_A6XX_CP_DRAW_STATE_DATA 0x0000090b
+
+#define REG_A6XX_CP_ROQ_DBG_ADDR 0x0000090c
+
+#define REG_A6XX_CP_ROQ_DBG_DATA 0x0000090d
+
+#define REG_A6XX_CP_MEM_POOL_DBG_ADDR 0x0000090e
+
+#define REG_A6XX_CP_MEM_POOL_DBG_DATA 0x0000090f
+
+#define REG_A6XX_CP_SQE_UCODE_DBG_ADDR 0x00000910
+
+#define REG_A6XX_CP_SQE_UCODE_DBG_DATA 0x00000911
+
+#define REG_A6XX_CP_IB1_BASE 0x00000928
+
+#define REG_A6XX_CP_IB1_BASE_HI 0x00000929
+
+#define REG_A6XX_CP_IB1_REM_SIZE 0x0000092a
+
+#define REG_A6XX_CP_IB2_BASE 0x0000092b
+
+#define REG_A6XX_CP_IB2_BASE_HI 0x0000092c
+
+#define REG_A6XX_CP_IB2_REM_SIZE 0x0000092d
+
+#define REG_A6XX_CP_ALWAYS_ON_COUNTER_LO 0x00000980
+
+#define REG_A6XX_CP_ALWAYS_ON_COUNTER_HI 0x00000981
+
+#define REG_A6XX_CP_AHB_CNTL 0x0000098d
+
+#define REG_A6XX_CP_APERTURE_CNTL_HOST 0x00000a00
+
+#define REG_A6XX_CP_APERTURE_CNTL_CD 0x00000a03
+
+#define REG_A6XX_VSC_ADDR_MODE_CNTL 0x00000c01
+
+#define REG_A6XX_RBBM_INT_0_STATUS 0x00000201
+
+#define REG_A6XX_RBBM_STATUS 0x00000210
+#define A6XX_RBBM_STATUS_GPU_BUSY_IGN_AHB 0x00800000
+#define A6XX_RBBM_STATUS_GPU_BUSY_IGN_AHB_CP 0x00400000
+#define A6XX_RBBM_STATUS_HLSQ_BUSY 0x00200000
+#define A6XX_RBBM_STATUS_VSC_BUSY 0x00100000
+#define A6XX_RBBM_STATUS_TPL1_BUSY 0x00080000
+#define A6XX_RBBM_STATUS_SP_BUSY 0x00040000
+#define A6XX_RBBM_STATUS_UCHE_BUSY 0x00020000
+#define A6XX_RBBM_STATUS_VPC_BUSY 0x00010000
+#define A6XX_RBBM_STATUS_VFD_BUSY 0x00008000
+#define A6XX_RBBM_STATUS_TESS_BUSY 0x00004000
+#define A6XX_RBBM_STATUS_PC_VSD_BUSY 0x00002000
+#define A6XX_RBBM_STATUS_PC_DCALL_BUSY 0x00001000
+#define A6XX_RBBM_STATUS_COM_DCOM_BUSY 0x00000800
+#define A6XX_RBBM_STATUS_LRZ_BUSY 0x00000400
+#define A6XX_RBBM_STATUS_A2D_BUSY 0x00000200
+#define A6XX_RBBM_STATUS_CCU_BUSY 0x00000100
+#define A6XX_RBBM_STATUS_RB_BUSY 0x00000080
+#define A6XX_RBBM_STATUS_RAS_BUSY 0x00000040
+#define A6XX_RBBM_STATUS_TSE_BUSY 0x00000020
+#define A6XX_RBBM_STATUS_VBIF_BUSY 0x00000010
+#define A6XX_RBBM_STATUS_GFX_DBGC_BUSY 0x00000008
+#define A6XX_RBBM_STATUS_CP_BUSY 0x00000004
+#define A6XX_RBBM_STATUS_CP_AHB_BUSY_CP_MASTER 0x00000002
+#define A6XX_RBBM_STATUS_CP_AHB_BUSY_CX_MASTER 0x00000001
+
+#define REG_A6XX_RBBM_STATUS3 0x00000213
+
+#define REG_A6XX_RBBM_VBIF_GX_RESET_STATUS 0x00000215
+
+#define REG_A6XX_RBBM_PERFCTR_CP_0_LO 0x00000400
+
+#define REG_A6XX_RBBM_PERFCTR_CP_0_HI 0x00000401
+
+#define REG_A6XX_RBBM_PERFCTR_CP_1_LO 0x00000402
+
+#define REG_A6XX_RBBM_PERFCTR_CP_1_HI 0x00000403
+
+#define REG_A6XX_RBBM_PERFCTR_CP_2_LO 0x00000404
+
+#define REG_A6XX_RBBM_PERFCTR_CP_2_HI 0x00000405
+
+#define REG_A6XX_RBBM_PERFCTR_CP_3_LO 0x00000406
+
+#define REG_A6XX_RBBM_PERFCTR_CP_3_HI 0x00000407
+
+#define REG_A6XX_RBBM_PERFCTR_CP_4_LO 0x00000408
+
+#define REG_A6XX_RBBM_PERFCTR_CP_4_HI 0x00000409
+
+#define REG_A6XX_RBBM_PERFCTR_CP_5_LO 0x0000040a
+
+#define REG_A6XX_RBBM_PERFCTR_CP_5_HI 0x0000040b
+
+#define REG_A6XX_RBBM_PERFCTR_CP_6_LO 0x0000040c
+
+#define REG_A6XX_RBBM_PERFCTR_CP_6_HI 0x0000040d
+
+#define REG_A6XX_RBBM_PERFCTR_CP_7_LO 0x0000040e
+
+#define REG_A6XX_RBBM_PERFCTR_CP_7_HI 0x0000040f
+
+#define REG_A6XX_RBBM_PERFCTR_CP_8_LO 0x00000410
+
+#define REG_A6XX_RBBM_PERFCTR_CP_8_HI 0x00000411
+
+#define REG_A6XX_RBBM_PERFCTR_CP_9_LO 0x00000412
+
+#define REG_A6XX_RBBM_PERFCTR_CP_9_HI 0x00000413
+
+#define REG_A6XX_RBBM_PERFCTR_CP_10_LO 0x00000414
+
+#define REG_A6XX_RBBM_PERFCTR_CP_10_HI 0x00000415
+
+#define REG_A6XX_RBBM_PERFCTR_CP_11_LO 0x00000416
+
+#define REG_A6XX_RBBM_PERFCTR_CP_11_HI 0x00000417
+
+#define REG_A6XX_RBBM_PERFCTR_CP_12_LO 0x00000418
+
+#define REG_A6XX_RBBM_PERFCTR_CP_12_HI 0x00000419
+
+#define REG_A6XX_RBBM_PERFCTR_CP_13_LO 0x0000041a
+
+#define REG_A6XX_RBBM_PERFCTR_CP_13_HI 0x0000041b
+
+#define REG_A6XX_RBBM_PERFCTR_RBBM_0_LO 0x0000041c
+
+#define REG_A6XX_RBBM_PERFCTR_RBBM_0_HI 0x0000041d
+
+#define REG_A6XX_RBBM_PERFCTR_RBBM_1_LO 0x0000041e
+
+#define REG_A6XX_RBBM_PERFCTR_RBBM_1_HI 0x0000041f
+
+#define REG_A6XX_RBBM_PERFCTR_RBBM_2_LO 0x00000420
+
+#define REG_A6XX_RBBM_PERFCTR_RBBM_2_HI 0x00000421
+
+#define REG_A6XX_RBBM_PERFCTR_RBBM_3_LO 0x00000422
+
+#define REG_A6XX_RBBM_PERFCTR_RBBM_3_HI 0x00000423
+
+#define REG_A6XX_RBBM_PERFCTR_PC_0_LO 0x00000424
+
+#define REG_A6XX_RBBM_PERFCTR_PC_0_HI 0x00000425
+
+#define REG_A6XX_RBBM_PERFCTR_PC_1_LO 0x00000426
+
+#define REG_A6XX_RBBM_PERFCTR_PC_1_HI 0x00000427
+
+#define REG_A6XX_RBBM_PERFCTR_PC_2_LO 0x00000428
+
+#define REG_A6XX_RBBM_PERFCTR_PC_2_HI 0x00000429
+
+#define REG_A6XX_RBBM_PERFCTR_PC_3_LO 0x0000042a
+
+#define REG_A6XX_RBBM_PERFCTR_PC_3_HI 0x0000042b
+
+#define REG_A6XX_RBBM_PERFCTR_PC_4_LO 0x0000042c
+
+#define REG_A6XX_RBBM_PERFCTR_PC_4_HI 0x0000042d
+
+#define REG_A6XX_RBBM_PERFCTR_PC_5_LO 0x0000042e
+
+#define REG_A6XX_RBBM_PERFCTR_PC_5_HI 0x0000042f
+
+#define REG_A6XX_RBBM_PERFCTR_PC_6_LO 0x00000430
+
+#define REG_A6XX_RBBM_PERFCTR_PC_6_HI 0x00000431
+
+#define REG_A6XX_RBBM_PERFCTR_PC_7_LO 0x00000432
+
+#define REG_A6XX_RBBM_PERFCTR_PC_7_HI 0x00000433
+
+#define REG_A6XX_RBBM_PERFCTR_VFD_0_LO 0x00000434
+
+#define REG_A6XX_RBBM_PERFCTR_VFD_0_HI 0x00000435
+
+#define REG_A6XX_RBBM_PERFCTR_VFD_1_LO 0x00000436
+
+#define REG_A6XX_RBBM_PERFCTR_VFD_1_HI 0x00000437
+
+#define REG_A6XX_RBBM_PERFCTR_VFD_2_LO 0x00000438
+
+#define REG_A6XX_RBBM_PERFCTR_VFD_2_HI 0x00000439
+
+#define REG_A6XX_RBBM_PERFCTR_VFD_3_LO 0x0000043a
+
+#define REG_A6XX_RBBM_PERFCTR_VFD_3_HI 0x0000043b
+
+#define REG_A6XX_RBBM_PERFCTR_VFD_4_LO 0x0000043c
+
+#define REG_A6XX_RBBM_PERFCTR_VFD_4_HI 0x0000043d
+
+#define REG_A6XX_RBBM_PERFCTR_VFD_5_LO 0x0000043e
+
+#define REG_A6XX_RBBM_PERFCTR_VFD_5_HI 0x0000043f
+
+#define REG_A6XX_RBBM_PERFCTR_VFD_6_LO 0x00000440
+
+#define REG_A6XX_RBBM_PERFCTR_VFD_6_HI 0x00000441
+
+#define REG_A6XX_RBBM_PERFCTR_VFD_7_LO 0x00000442
+
+#define REG_A6XX_RBBM_PERFCTR_VFD_7_HI 0x00000443
+
+#define REG_A6XX_RBBM_PERFCTR_HLSQ_0_LO 0x00000444
+
+#define REG_A6XX_RBBM_PERFCTR_HLSQ_0_HI 0x00000445
+
+#define REG_A6XX_RBBM_PERFCTR_HLSQ_1_LO 0x00000446
+
+#define REG_A6XX_RBBM_PERFCTR_HLSQ_1_HI 0x00000447
+
+#define REG_A6XX_RBBM_PERFCTR_HLSQ_2_LO 0x00000448
+
+#define REG_A6XX_RBBM_PERFCTR_HLSQ_2_HI 0x00000449
+
+#define REG_A6XX_RBBM_PERFCTR_HLSQ_3_LO 0x0000044a
+
+#define REG_A6XX_RBBM_PERFCTR_HLSQ_3_HI 0x0000044b
+
+#define REG_A6XX_RBBM_PERFCTR_HLSQ_4_LO 0x0000044c
+
+#define REG_A6XX_RBBM_PERFCTR_HLSQ_4_HI 0x0000044d
+
+#define REG_A6XX_RBBM_PERFCTR_HLSQ_5_LO 0x0000044e
+
+#define REG_A6XX_RBBM_PERFCTR_HLSQ_5_HI 0x0000044f
+
+#define REG_A6XX_RBBM_PERFCTR_VPC_0_LO 0x00000450
+
+#define REG_A6XX_RBBM_PERFCTR_VPC_0_HI 0x00000451
+
+#define REG_A6XX_RBBM_PERFCTR_VPC_1_LO 0x00000452
+
+#define REG_A6XX_RBBM_PERFCTR_VPC_1_HI 0x00000453
+
+#define REG_A6XX_RBBM_PERFCTR_VPC_2_LO 0x00000454
+
+#define REG_A6XX_RBBM_PERFCTR_VPC_2_HI 0x00000455
+
+#define REG_A6XX_RBBM_PERFCTR_VPC_3_LO 0x00000456
+
+#define REG_A6XX_RBBM_PERFCTR_VPC_3_HI 0x00000457
+
+#define REG_A6XX_RBBM_PERFCTR_VPC_4_LO 0x00000458
+
+#define REG_A6XX_RBBM_PERFCTR_VPC_4_HI 0x00000459
+
+#define REG_A6XX_RBBM_PERFCTR_VPC_5_LO 0x0000045a
+
+#define REG_A6XX_RBBM_PERFCTR_VPC_5_HI 0x0000045b
+
+#define REG_A6XX_RBBM_PERFCTR_CCU_0_LO 0x0000045c
+
+#define REG_A6XX_RBBM_PERFCTR_CCU_0_HI 0x0000045d
+
+#define REG_A6XX_RBBM_PERFCTR_CCU_1_LO 0x0000045e
+
+#define REG_A6XX_RBBM_PERFCTR_CCU_1_HI 0x0000045f
+
+#define REG_A6XX_RBBM_PERFCTR_CCU_2_LO 0x00000460
+
+#define REG_A6XX_RBBM_PERFCTR_CCU_2_HI 0x00000461
+
+#define REG_A6XX_RBBM_PERFCTR_CCU_3_LO 0x00000462
+
+#define REG_A6XX_RBBM_PERFCTR_CCU_3_HI 0x00000463
+
+#define REG_A6XX_RBBM_PERFCTR_CCU_4_LO 0x00000464
+
+#define REG_A6XX_RBBM_PERFCTR_CCU_4_HI 0x00000465
+
+#define REG_A6XX_RBBM_PERFCTR_TSE_0_LO 0x00000466
+
+#define REG_A6XX_RBBM_PERFCTR_TSE_0_HI 0x00000467
+
+#define REG_A6XX_RBBM_PERFCTR_TSE_1_LO 0x00000468
+
+#define REG_A6XX_RBBM_PERFCTR_TSE_1_HI 0x00000469
+
+#define REG_A6XX_RBBM_PERFCTR_TSE_2_LO 0x0000046a
+
+#define REG_A6XX_RBBM_PERFCTR_CCU_4_HI 0x00000465
+
+#define REG_A6XX_RBBM_PERFCTR_TSE_0_LO 0x00000466
+
+#define REG_A6XX_RBBM_PERFCTR_TSE_0_HI 0x00000467
+
+#define REG_A6XX_RBBM_PERFCTR_TSE_1_LO 0x00000468
+
+#define REG_A6XX_RBBM_PERFCTR_TSE_1_HI 0x00000469
+
+#define REG_A6XX_RBBM_PERFCTR_TSE_2_LO 0x0000046a
+
+#define REG_A6XX_RBBM_PERFCTR_TSE_2_HI 0x0000046b
+
+#define REG_A6XX_RBBM_PERFCTR_TSE_3_LO 0x0000046c
+
+#define REG_A6XX_RBBM_PERFCTR_TSE_3_HI 0x0000046d
+
+#define REG_A6XX_RBBM_PERFCTR_RAS_0_LO 0x0000046e
+
+#define REG_A6XX_RBBM_PERFCTR_RAS_0_HI 0x0000046f
+
+#define REG_A6XX_RBBM_PERFCTR_RAS_1_LO 0x00000470
+
+#define REG_A6XX_RBBM_PERFCTR_RAS_1_HI 0x00000471
+
+#define REG_A6XX_RBBM_PERFCTR_RAS_2_LO 0x00000472
+
+#define REG_A6XX_RBBM_PERFCTR_RAS_2_HI 0x00000473
+
+#define REG_A6XX_RBBM_PERFCTR_RAS_3_LO 0x00000474
+
+#define REG_A6XX_RBBM_PERFCTR_RAS_3_HI 0x00000475
+
+#define REG_A6XX_RBBM_PERFCTR_UCHE_0_LO 0x00000476
+
+#define REG_A6XX_RBBM_PERFCTR_UCHE_0_HI 0x00000477
+
+#define REG_A6XX_RBBM_PERFCTR_UCHE_1_LO 0x00000478
+
+#define REG_A6XX_RBBM_PERFCTR_UCHE_1_HI 0x00000479
+
+#define REG_A6XX_RBBM_PERFCTR_UCHE_2_LO 0x0000047a
+
+#define REG_A6XX_RBBM_PERFCTR_UCHE_2_HI 0x0000047b
+
+#define REG_A6XX_RBBM_PERFCTR_UCHE_3_LO 0x0000047c
+
+#define REG_A6XX_RBBM_PERFCTR_UCHE_3_HI 0x0000047d
+
+#define REG_A6XX_RBBM_PERFCTR_UCHE_4_LO 0x0000047e
+
+#define REG_A6XX_RBBM_PERFCTR_UCHE_4_HI 0x0000047f
+
+#define REG_A6XX_RBBM_PERFCTR_UCHE_5_LO 0x00000480
+
+#define REG_A6XX_RBBM_PERFCTR_UCHE_5_HI 0x00000481
+
+#define REG_A6XX_RBBM_PERFCTR_UCHE_6_LO 0x00000482
+
+#define REG_A6XX_RBBM_PERFCTR_UCHE_6_HI 0x00000483
+
+#define REG_A6XX_RBBM_PERFCTR_UCHE_7_LO 0x00000484
+
+#define REG_A6XX_RBBM_PERFCTR_UCHE_7_HI 0x00000485
+
+#define REG_A6XX_RBBM_PERFCTR_UCHE_8_LO 0x00000486
+
+#define REG_A6XX_RBBM_PERFCTR_UCHE_8_HI 0x00000487
+
+#define REG_A6XX_RBBM_PERFCTR_UCHE_9_LO 0x00000488
+
+#define REG_A6XX_RBBM_PERFCTR_UCHE_9_HI 0x00000489
+
+#define REG_A6XX_RBBM_PERFCTR_UCHE_10_LO 0x0000048a
+
+#define REG_A6XX_RBBM_PERFCTR_UCHE_10_HI 0x0000048b
+
+#define REG_A6XX_RBBM_PERFCTR_UCHE_11_LO 0x0000048c
+
+#define REG_A6XX_RBBM_PERFCTR_UCHE_11_HI 0x0000048d
+
+#define REG_A6XX_RBBM_PERFCTR_TP_0_LO 0x0000048e
+
+#define REG_A6XX_RBBM_PERFCTR_TP_0_HI 0x0000048f
+
+#define REG_A6XX_RBBM_PERFCTR_TP_1_LO 0x00000490
+
+#define REG_A6XX_RBBM_PERFCTR_TP_1_HI 0x00000491
+
+#define REG_A6XX_RBBM_PERFCTR_TP_2_LO 0x00000492
+
+#define REG_A6XX_RBBM_PERFCTR_TP_2_HI 0x00000493
+
+#define REG_A6XX_RBBM_PERFCTR_TP_3_LO 0x00000494
+
+#define REG_A6XX_RBBM_PERFCTR_TP_3_HI 0x00000495
+
+#define REG_A6XX_RBBM_PERFCTR_TP_4_LO 0x00000496
+
+#define REG_A6XX_RBBM_PERFCTR_TP_4_HI 0x00000497
+
+#define REG_A6XX_RBBM_PERFCTR_TP_5_LO 0x00000498
+
+#define REG_A6XX_RBBM_PERFCTR_TP_5_HI 0x00000499
+
+#define REG_A6XX_RBBM_PERFCTR_TP_6_LO 0x0000049a
+
+#define REG_A6XX_RBBM_PERFCTR_TP_6_HI 0x0000049b
+
+#define REG_A6XX_RBBM_PERFCTR_TP_7_LO 0x0000049c
+
+#define REG_A6XX_RBBM_PERFCTR_TP_7_HI 0x0000049d
+
+#define REG_A6XX_RBBM_PERFCTR_TP_8_LO 0x0000049e
+
+#define REG_A6XX_RBBM_PERFCTR_TP_8_HI 0x0000049f
+
+#define REG_A6XX_RBBM_PERFCTR_TP_9_LO 0x000004a0
+
+#define REG_A6XX_RBBM_PERFCTR_TP_9_HI 0x000004a1
+
+#define REG_A6XX_RBBM_PERFCTR_TP_10_LO 0x000004a2
+
+#define REG_A6XX_RBBM_PERFCTR_TP_10_HI 0x000004a3
+
+#define REG_A6XX_RBBM_PERFCTR_TP_11_LO 0x000004a4
+
+#define REG_A6XX_RBBM_PERFCTR_TP_11_HI 0x000004a5
+
+#define REG_A6XX_RBBM_PERFCTR_SP_0_LO 0x000004a6
+
+#define REG_A6XX_RBBM_PERFCTR_SP_0_HI 0x000004a7
+
+#define REG_A6XX_RBBM_PERFCTR_SP_1_LO 0x000004a8
+
+#define REG_A6XX_RBBM_PERFCTR_SP_1_HI 0x000004a9
+
+#define REG_A6XX_RBBM_PERFCTR_SP_2_LO 0x000004aa
+
+#define REG_A6XX_RBBM_PERFCTR_SP_2_HI 0x000004ab
+
+#define REG_A6XX_RBBM_PERFCTR_SP_3_LO 0x000004ac
+
+#define REG_A6XX_RBBM_PERFCTR_SP_3_HI 0x000004ad
+
+#define REG_A6XX_RBBM_PERFCTR_SP_4_LO 0x000004ae
+
+#define REG_A6XX_RBBM_PERFCTR_SP_4_HI 0x000004af
+
+#define REG_A6XX_RBBM_PERFCTR_SP_5_LO 0x000004b0
+
+#define REG_A6XX_RBBM_PERFCTR_SP_5_HI 0x000004b1
+
+#define REG_A6XX_RBBM_PERFCTR_SP_6_LO 0x000004b2
+
+#define REG_A6XX_RBBM_PERFCTR_SP_6_HI 0x000004b3
+
+#define REG_A6XX_RBBM_PERFCTR_SP_7_LO 0x000004b4
+
+#define REG_A6XX_RBBM_PERFCTR_SP_7_HI 0x000004b5
+
+#define REG_A6XX_RBBM_PERFCTR_SP_8_LO 0x000004b6
+
+#define REG_A6XX_RBBM_PERFCTR_SP_8_HI 0x000004b7
+
+#define REG_A6XX_RBBM_PERFCTR_SP_9_LO 0x000004b8
+
+#define REG_A6XX_RBBM_PERFCTR_SP_9_HI 0x000004b9
+
+#define REG_A6XX_RBBM_PERFCTR_SP_10_LO 0x000004ba
+
+#define REG_A6XX_RBBM_PERFCTR_SP_10_HI 0x000004bb
+
+#define REG_A6XX_RBBM_PERFCTR_SP_11_LO 0x000004bc
+
+#define REG_A6XX_RBBM_PERFCTR_SP_11_HI 0x000004bd
+
+#define REG_A6XX_RBBM_PERFCTR_SP_12_LO 0x000004be
+
+#define REG_A6XX_RBBM_PERFCTR_SP_12_HI 0x000004bf
+
+#define REG_A6XX_RBBM_PERFCTR_SP_13_LO 0x000004c0
+
+#define REG_A6XX_RBBM_PERFCTR_SP_13_HI 0x000004c1
+
+#define REG_A6XX_RBBM_PERFCTR_SP_14_LO 0x000004c2
+
+#define REG_A6XX_RBBM_PERFCTR_SP_14_HI 0x000004c3
+
+#define REG_A6XX_RBBM_PERFCTR_SP_15_LO 0x000004c4
+
+#define REG_A6XX_RBBM_PERFCTR_SP_15_HI 0x000004c5
+
+#define REG_A6XX_RBBM_PERFCTR_SP_16_LO 0x000004c6
+
+#define REG_A6XX_RBBM_PERFCTR_SP_16_HI 0x000004c7
+
+#define REG_A6XX_RBBM_PERFCTR_SP_17_LO 0x000004c8
+
+#define REG_A6XX_RBBM_PERFCTR_SP_17_HI 0x000004c9
+
+#define REG_A6XX_RBBM_PERFCTR_SP_18_LO 0x000004ca
+
+#define REG_A6XX_RBBM_PERFCTR_SP_18_HI 0x000004cb
+
+#define REG_A6XX_RBBM_PERFCTR_SP_19_LO 0x000004cc
+
+#define REG_A6XX_RBBM_PERFCTR_SP_19_HI 0x000004cd
+
+#define REG_A6XX_RBBM_PERFCTR_SP_20_LO 0x000004ce
+
+#define REG_A6XX_RBBM_PERFCTR_SP_20_HI 0x000004cf
+
+#define REG_A6XX_RBBM_PERFCTR_SP_21_LO 0x000004d0
+
+#define REG_A6XX_RBBM_PERFCTR_SP_21_HI 0x000004d1
+
+#define REG_A6XX_RBBM_PERFCTR_SP_22_LO 0x000004d2
+
+#define REG_A6XX_RBBM_PERFCTR_SP_22_HI 0x000004d3
+
+#define REG_A6XX_RBBM_PERFCTR_SP_23_LO 0x000004d4
+
+#define REG_A6XX_RBBM_PERFCTR_SP_23_HI 0x000004d5
+
+#define REG_A6XX_RBBM_PERFCTR_RB_0_LO 0x000004d6
+
+#define REG_A6XX_RBBM_PERFCTR_RB_0_HI 0x000004d7
+
+#define REG_A6XX_RBBM_PERFCTR_RB_1_LO 0x000004d8
+
+#define REG_A6XX_RBBM_PERFCTR_RB_1_HI 0x000004d9
+
+#define REG_A6XX_RBBM_PERFCTR_RB_2_LO 0x000004da
+
+#define REG_A6XX_RBBM_PERFCTR_RB_2_HI 0x000004db
+
+#define REG_A6XX_RBBM_PERFCTR_RB_3_LO 0x000004dc
+
+#define REG_A6XX_RBBM_PERFCTR_RB_3_HI 0x000004dd
+
+#define REG_A6XX_RBBM_PERFCTR_RB_4_LO 0x000004de
+
+#define REG_A6XX_RBBM_PERFCTR_RB_4_HI 0x000004df
+
+#define REG_A6XX_RBBM_PERFCTR_RB_5_LO 0x000004e0
+
+#define REG_A6XX_RBBM_PERFCTR_RB_5_HI 0x000004e1
+
+#define REG_A6XX_RBBM_PERFCTR_RB_6_LO 0x000004e2
+
+#define REG_A6XX_RBBM_PERFCTR_RB_6_HI 0x000004e3
+
+#define REG_A6XX_RBBM_PERFCTR_RB_7_LO 0x000004e4
+
+#define REG_A6XX_RBBM_PERFCTR_RB_7_HI 0x000004e5
+
+#define REG_A6XX_RBBM_PERFCTR_VSC_0_LO 0x000004e6
+
+#define REG_A6XX_RBBM_PERFCTR_VSC_0_HI 0x000004e7
+
+#define REG_A6XX_RBBM_PERFCTR_VSC_1_LO 0x000004e8
+
+#define REG_A6XX_RBBM_PERFCTR_VSC_1_HI 0x000004e9
+
+#define REG_A6XX_RBBM_PERFCTR_LRZ_0_LO 0x000004ea
+
+#define REG_A6XX_RBBM_PERFCTR_LRZ_0_HI 0x000004eb
+
+#define REG_A6XX_RBBM_PERFCTR_LRZ_1_LO 0x000004ec
+
+#define REG_A6XX_RBBM_PERFCTR_LRZ_1_HI 0x000004ed
+
+#define REG_A6XX_RBBM_PERFCTR_LRZ_2_LO 0x000004ee
+
+#define REG_A6XX_RBBM_PERFCTR_LRZ_2_HI 0x000004ef
+
+#define REG_A6XX_RBBM_PERFCTR_LRZ_3_LO 0x000004f0
+
+#define REG_A6XX_RBBM_PERFCTR_LRZ_3_HI 0x000004f1
+
+#define REG_A6XX_RBBM_PERFCTR_CMP_0_LO 0x000004f2
+
+#define REG_A6XX_RBBM_PERFCTR_CMP_0_HI 0x000004f3
+
+#define REG_A6XX_RBBM_PERFCTR_CMP_1_LO 0x000004f4
+
+#define REG_A6XX_RBBM_PERFCTR_CMP_1_HI 0x000004f5
+
+#define REG_A6XX_RBBM_PERFCTR_CMP_2_LO 0x000004f6
+
+#define REG_A6XX_RBBM_PERFCTR_CMP_2_HI 0x000004f7
+
+#define REG_A6XX_RBBM_PERFCTR_CMP_3_LO 0x000004f8
+
+#define REG_A6XX_RBBM_PERFCTR_CMP_3_HI 0x000004f9
+
+#define REG_A6XX_RBBM_PERFCTR_CNTL 0x00000500
+
+#define REG_A6XX_RBBM_PERFCTR_LOAD_CMD0 0x00000501
+
+#define REG_A6XX_RBBM_PERFCTR_LOAD_CMD1 0x00000502
+
+#define REG_A6XX_RBBM_PERFCTR_LOAD_CMD2 0x00000503
+
+#define REG_A6XX_RBBM_PERFCTR_LOAD_CMD3 0x00000504
+
+#define REG_A6XX_RBBM_PERFCTR_LOAD_VALUE_LO 0x00000505
+
+#define REG_A6XX_RBBM_PERFCTR_LOAD_VALUE_HI 0x00000506
+
+#define REG_A6XX_RBBM_PERFCTR_RBBM_SEL_0 0x00000507
+
+#define REG_A6XX_RBBM_PERFCTR_RBBM_SEL_1 0x00000508
+
+#define REG_A6XX_RBBM_PERFCTR_RBBM_SEL_2 0x00000509
+
+#define REG_A6XX_RBBM_PERFCTR_RBBM_SEL_3 0x0000050a
+
+#define REG_A6XX_RBBM_PERFCTR_GPU_BUSY_MASKED 0x0000050b
+
+#define REG_A6XX_RBBM_ISDB_CNT 0x00000533
+
+#define REG_A6XX_RBBM_SECVID_TRUST_CNTL 0x0000f400
+
+#define REG_A6XX_RBBM_SECVID_TSB_TRUSTED_BASE_LO 0x0000f800
+
+#define REG_A6XX_RBBM_SECVID_TSB_TRUSTED_BASE_HI 0x0000f801
+
+#define REG_A6XX_RBBM_SECVID_TSB_TRUSTED_SIZE 0x0000f802
+
+#define REG_A6XX_RBBM_SECVID_TSB_CNTL 0x0000f803
+
+#define REG_A6XX_RBBM_SECVID_TSB_ADDR_MODE_CNTL 0x0000f810
+
+#define REG_A6XX_RBBM_VBIF_CLIENT_QOS_CNTL 0x00000010
+
+#define REG_A6XX_RBBM_INTERFACE_HANG_INT_CNTL 0x0000001f
+
+#define REG_A6XX_RBBM_INT_CLEAR_CMD 0x00000037
+
+#define REG_A6XX_RBBM_INT_0_MASK 0x00000038
+
+#define REG_A6XX_RBBM_SP_HYST_CNT 0x00000042
+
+#define REG_A6XX_RBBM_SW_RESET_CMD 0x00000043
+
+#define REG_A6XX_RBBM_RAC_THRESHOLD_CNT 0x00000044
+
+#define REG_A6XX_RBBM_BLOCK_SW_RESET_CMD 0x00000045
+
+#define REG_A6XX_RBBM_BLOCK_SW_RESET_CMD2 0x00000046
+
+#define REG_A6XX_RBBM_CLOCK_CNTL 0x000000ae
+
+#define REG_A6XX_RBBM_CLOCK_CNTL_SP0 0x000000b0
+
+#define REG_A6XX_RBBM_CLOCK_CNTL_SP1 0x000000b1
+
+#define REG_A6XX_RBBM_CLOCK_CNTL_SP2 0x000000b2
+
+#define REG_A6XX_RBBM_CLOCK_CNTL_SP3 0x000000b3
+
+#define REG_A6XX_RBBM_CLOCK_CNTL2_SP0 0x000000b4
+
+#define REG_A6XX_RBBM_CLOCK_CNTL2_SP1 0x000000b5
+
+#define REG_A6XX_RBBM_CLOCK_CNTL2_SP2 0x000000b6
+
+#define REG_A6XX_RBBM_CLOCK_CNTL2_SP3 0x000000b7
+
+#define REG_A6XX_RBBM_CLOCK_DELAY_SP0 0x000000b8
+
+#define REG_A6XX_RBBM_CLOCK_DELAY_SP1 0x000000b9
+
+#define REG_A6XX_RBBM_CLOCK_DELAY_SP2 0x000000ba
+
+#define REG_A6XX_RBBM_CLOCK_DELAY_SP3 0x000000bb
+
+#define REG_A6XX_RBBM_CLOCK_HYST_SP0 0x000000bc
+
+#define REG_A6XX_RBBM_CLOCK_HYST_SP1 0x000000bd
+
+#define REG_A6XX_RBBM_CLOCK_HYST_SP2 0x000000be
+
+#define REG_A6XX_RBBM_CLOCK_HYST_SP3 0x000000bf
+
+#define REG_A6XX_RBBM_CLOCK_CNTL_TP0 0x000000c0
+
+#define REG_A6XX_RBBM_CLOCK_CNTL_TP1 0x000000c1
+
+#define REG_A6XX_RBBM_CLOCK_CNTL_TP2 0x000000c2
+
+#define REG_A6XX_RBBM_CLOCK_CNTL_TP3 0x000000c3
+
+#define REG_A6XX_RBBM_CLOCK_CNTL2_TP0 0x000000c4
+
+#define REG_A6XX_RBBM_CLOCK_CNTL2_TP1 0x000000c5
+
+#define REG_A6XX_RBBM_CLOCK_CNTL2_TP2 0x000000c6
+
+#define REG_A6XX_RBBM_CLOCK_CNTL2_TP3 0x000000c7
+
+#define REG_A6XX_RBBM_CLOCK_CNTL3_TP0 0x000000c8
+
+#define REG_A6XX_RBBM_CLOCK_CNTL3_TP1 0x000000c9
+
+#define REG_A6XX_RBBM_CLOCK_CNTL3_TP2 0x000000ca
+
+#define REG_A6XX_RBBM_CLOCK_CNTL3_TP3 0x000000cb
+
+#define REG_A6XX_RBBM_CLOCK_CNTL4_TP0 0x000000cc
+
+#define REG_A6XX_RBBM_CLOCK_CNTL4_TP1 0x000000cd
+
+#define REG_A6XX_RBBM_CLOCK_CNTL4_TP2 0x000000ce
+
+#define REG_A6XX_RBBM_CLOCK_CNTL4_TP3 0x000000cf
+
+#define REG_A6XX_RBBM_CLOCK_DELAY_TP0 0x000000d0
+
+#define REG_A6XX_RBBM_CLOCK_DELAY_TP1 0x000000d1
+
+#define REG_A6XX_RBBM_CLOCK_DELAY_TP2 0x000000d2
+
+#define REG_A6XX_RBBM_CLOCK_DELAY_TP3 0x000000d3
+
+#define REG_A6XX_RBBM_CLOCK_DELAY2_TP0 0x000000d4
+
+#define REG_A6XX_RBBM_CLOCK_DELAY2_TP1 0x000000d5
+
+#define REG_A6XX_RBBM_CLOCK_DELAY2_TP2 0x000000d6
+
+#define REG_A6XX_RBBM_CLOCK_DELAY2_TP3 0x000000d7
+
+#define REG_A6XX_RBBM_CLOCK_DELAY3_TP0 0x000000d8
+
+#define REG_A6XX_RBBM_CLOCK_DELAY3_TP1 0x000000d9
+
+#define REG_A6XX_RBBM_CLOCK_DELAY3_TP2 0x000000da
+
+#define REG_A6XX_RBBM_CLOCK_DELAY3_TP3 0x000000db
+
+#define REG_A6XX_RBBM_CLOCK_DELAY4_TP0 0x000000dc
+
+#define REG_A6XX_RBBM_CLOCK_DELAY4_TP1 0x000000dd
+
+#define REG_A6XX_RBBM_CLOCK_DELAY4_TP2 0x000000de
+
+#define REG_A6XX_RBBM_CLOCK_DELAY4_TP3 0x000000df
+
+#define REG_A6XX_RBBM_CLOCK_HYST_TP0 0x000000e0
+
+#define REG_A6XX_RBBM_CLOCK_HYST_TP1 0x000000e1
+
+#define REG_A6XX_RBBM_CLOCK_HYST_TP2 0x000000e2
+
+#define REG_A6XX_RBBM_CLOCK_HYST_TP3 0x000000e3
+
+#define REG_A6XX_RBBM_CLOCK_HYST2_TP0 0x000000e4
+
+#define REG_A6XX_RBBM_CLOCK_HYST2_TP1 0x000000e5
+
+#define REG_A6XX_RBBM_CLOCK_HYST2_TP2 0x000000e6
+
+#define REG_A6XX_RBBM_CLOCK_HYST2_TP3 0x000000e7
+
+#define REG_A6XX_RBBM_CLOCK_HYST3_TP0 0x000000e8
+
+#define REG_A6XX_RBBM_CLOCK_HYST3_TP1 0x000000e9
+
+#define REG_A6XX_RBBM_CLOCK_HYST3_TP2 0x000000ea
+
+#define REG_A6XX_RBBM_CLOCK_HYST3_TP3 0x000000eb
+
+#define REG_A6XX_RBBM_CLOCK_HYST4_TP0 0x000000ec
+
+#define REG_A6XX_RBBM_CLOCK_HYST4_TP1 0x000000ed
+
+#define REG_A6XX_RBBM_CLOCK_HYST4_TP2 0x000000ee
+
+#define REG_A6XX_RBBM_CLOCK_HYST4_TP3 0x000000ef
+
+#define REG_A6XX_RBBM_CLOCK_CNTL_RB0 0x000000f0
+
+#define REG_A6XX_RBBM_CLOCK_CNTL_RB1 0x000000f1
+
+#define REG_A6XX_RBBM_CLOCK_CNTL_RB2 0x000000f2
+
+#define REG_A6XX_RBBM_CLOCK_CNTL_RB3 0x000000f3
+
+#define REG_A6XX_RBBM_CLOCK_CNTL2_RB0 0x000000f4
+
+#define REG_A6XX_RBBM_CLOCK_CNTL2_RB1 0x000000f5
+
+#define REG_A6XX_RBBM_CLOCK_CNTL2_RB2 0x000000f6
+
+#define REG_A6XX_RBBM_CLOCK_CNTL2_RB3 0x000000f7
+
+#define REG_A6XX_RBBM_CLOCK_CNTL_CCU0 0x000000f8
+
+#define REG_A6XX_RBBM_CLOCK_CNTL_CCU1 0x000000f9
+
+#define REG_A6XX_RBBM_CLOCK_CNTL_CCU2 0x000000fa
+
+#define REG_A6XX_RBBM_CLOCK_CNTL_CCU3 0x000000fb
+
+#define REG_A6XX_RBBM_CLOCK_HYST_RB_CCU0 0x00000100
+
+#define REG_A6XX_RBBM_CLOCK_HYST_RB_CCU1 0x00000101
+
+#define REG_A6XX_RBBM_CLOCK_HYST_RB_CCU2 0x00000102
+
+#define REG_A6XX_RBBM_CLOCK_HYST_RB_CCU3 0x00000103
+
+#define REG_A6XX_RBBM_CLOCK_CNTL_RAC 0x00000104
+
+#define REG_A6XX_RBBM_CLOCK_CNTL2_RAC 0x00000105
+
+#define REG_A6XX_RBBM_CLOCK_DELAY_RAC 0x00000106
+
+#define REG_A6XX_RBBM_CLOCK_HYST_RAC 0x00000107
+
+#define REG_A6XX_RBBM_CLOCK_CNTL_TSE_RAS_RBBM 0x00000108
+
+#define REG_A6XX_RBBM_CLOCK_DELAY_TSE_RAS_RBBM 0x00000109
+
+#define REG_A6XX_RBBM_CLOCK_HYST_TSE_RAS_RBBM 0x0000010a
+
+#define REG_A6XX_RBBM_CLOCK_CNTL_UCHE 0x0000010b
+
+#define REG_A6XX_RBBM_CLOCK_CNTL2_UCHE 0x0000010c
+
+#define REG_A6XX_RBBM_CLOCK_CNTL3_UCHE 0x0000010d
+
+#define REG_A6XX_RBBM_CLOCK_CNTL4_UCHE 0x0000010e
+
+#define REG_A6XX_RBBM_CLOCK_DELAY_UCHE 0x0000010f
+
+#define REG_A6XX_RBBM_CLOCK_HYST_UCHE 0x00000110
+
+#define REG_A6XX_RBBM_CLOCK_MODE_VFD 0x00000111
+
+#define REG_A6XX_RBBM_CLOCK_DELAY_VFD 0x00000112
+
+#define REG_A6XX_RBBM_CLOCK_HYST_VFD 0x00000113
+
+#define REG_A6XX_RBBM_CLOCK_MODE_GPC 0x00000114
+
+#define REG_A6XX_RBBM_CLOCK_DELAY_GPC 0x00000115
+
+#define REG_A6XX_RBBM_CLOCK_HYST_GPC 0x00000116
+
+#define REG_A6XX_RBBM_CLOCK_DELAY_HLSQ_2 0x00000117
+
+#define REG_A6XX_RBBM_CLOCK_CNTL_GMU_GX 0x00000118
+
+#define REG_A6XX_RBBM_CLOCK_DELAY_GMU_GX 0x00000119
+
+#define REG_A6XX_RBBM_CLOCK_HYST_GMU_GX 0x0000011a
+
+#define REG_A6XX_RBBM_CLOCK_MODE_HLSQ 0x0000011b
+
+#define REG_A6XX_RBBM_CLOCK_DELAY_HLSQ 0x0000011c
+
+#define REG_A6XX_DBGC_CFG_DBGBUS_SEL_A 0x00000600
+
+#define REG_A6XX_DBGC_CFG_DBGBUS_SEL_B 0x00000601
+
+#define REG_A6XX_DBGC_CFG_DBGBUS_SEL_C 0x00000602
+
+#define REG_A6XX_DBGC_CFG_DBGBUS_SEL_D 0x00000603
+#define A6XX_DBGC_CFG_DBGBUS_SEL_D_PING_INDEX__MASK 0x000000ff
+#define A6XX_DBGC_CFG_DBGBUS_SEL_D_PING_INDEX__SHIFT 0
+static inline uint32_t A6XX_DBGC_CFG_DBGBUS_SEL_D_PING_INDEX(uint32_t val)
+{
+ return ((val) << A6XX_DBGC_CFG_DBGBUS_SEL_D_PING_INDEX__SHIFT) & A6XX_DBGC_CFG_DBGBUS_SEL_D_PING_INDEX__MASK;
+}
+#define A6XX_DBGC_CFG_DBGBUS_SEL_D_PING_BLK_SEL__MASK 0x0000ff00
+#define A6XX_DBGC_CFG_DBGBUS_SEL_D_PING_BLK_SEL__SHIFT 8
+static inline uint32_t A6XX_DBGC_CFG_DBGBUS_SEL_D_PING_BLK_SEL(uint32_t val)
+{
+ return ((val) << A6XX_DBGC_CFG_DBGBUS_SEL_D_PING_BLK_SEL__SHIFT) & A6XX_DBGC_CFG_DBGBUS_SEL_D_PING_BLK_SEL__MASK;
+}
+
+#define REG_A6XX_DBGC_CFG_DBGBUS_CNTLT 0x00000604
+#define A6XX_DBGC_CFG_DBGBUS_CNTLT_TRACEEN__MASK 0x0000003f
+#define A6XX_DBGC_CFG_DBGBUS_CNTLT_TRACEEN__SHIFT 0
+static inline uint32_t A6XX_DBGC_CFG_DBGBUS_CNTLT_TRACEEN(uint32_t val)
+{
+ return ((val) << A6XX_DBGC_CFG_DBGBUS_CNTLT_TRACEEN__SHIFT) & A6XX_DBGC_CFG_DBGBUS_CNTLT_TRACEEN__MASK;
+}
+#define A6XX_DBGC_CFG_DBGBUS_CNTLT_GRANU__MASK 0x00007000
+#define A6XX_DBGC_CFG_DBGBUS_CNTLT_GRANU__SHIFT 12
+static inline uint32_t A6XX_DBGC_CFG_DBGBUS_CNTLT_GRANU(uint32_t val)
+{
+ return ((val) << A6XX_DBGC_CFG_DBGBUS_CNTLT_GRANU__SHIFT) & A6XX_DBGC_CFG_DBGBUS_CNTLT_GRANU__MASK;
+}
+#define A6XX_DBGC_CFG_DBGBUS_CNTLT_SEGT__MASK 0xf0000000
+#define A6XX_DBGC_CFG_DBGBUS_CNTLT_SEGT__SHIFT 28
+static inline uint32_t A6XX_DBGC_CFG_DBGBUS_CNTLT_SEGT(uint32_t val)
+{
+ return ((val) << A6XX_DBGC_CFG_DBGBUS_CNTLT_SEGT__SHIFT) & A6XX_DBGC_CFG_DBGBUS_CNTLT_SEGT__MASK;
+}
+
+#define REG_A6XX_DBGC_CFG_DBGBUS_CNTLM 0x00000605
+#define A6XX_DBGC_CFG_DBGBUS_CNTLM_ENABLE__MASK 0x0f000000
+#define A6XX_DBGC_CFG_DBGBUS_CNTLM_ENABLE__SHIFT 24
+static inline uint32_t A6XX_DBGC_CFG_DBGBUS_CNTLM_ENABLE(uint32_t val)
+{
+ return ((val) << A6XX_DBGC_CFG_DBGBUS_CNTLM_ENABLE__SHIFT) & A6XX_DBGC_CFG_DBGBUS_CNTLM_ENABLE__MASK;
+}
+
+#define REG_A6XX_DBGC_CFG_DBGBUS_IVTL_0 0x00000608
+
+#define REG_A6XX_DBGC_CFG_DBGBUS_IVTL_1 0x00000609
+
+#define REG_A6XX_DBGC_CFG_DBGBUS_IVTL_2 0x0000060a
+
+#define REG_A6XX_DBGC_CFG_DBGBUS_IVTL_3 0x0000060b
+
+#define REG_A6XX_DBGC_CFG_DBGBUS_MASKL_0 0x0000060c
+
+#define REG_A6XX_DBGC_CFG_DBGBUS_MASKL_1 0x0000060d
+
+#define REG_A6XX_DBGC_CFG_DBGBUS_MASKL_2 0x0000060e
+
+#define REG_A6XX_DBGC_CFG_DBGBUS_MASKL_3 0x0000060f
+
+#define REG_A6XX_DBGC_CFG_DBGBUS_BYTEL_0 0x00000610
+#define A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL0__MASK 0x0000000f
+#define A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL0__SHIFT 0
+static inline uint32_t A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL0(uint32_t val)
+{
+ return ((val) << A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL0__SHIFT) & A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL0__MASK;
+}
+#define A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL1__MASK 0x000000f0
+#define A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL1__SHIFT 4
+static inline uint32_t A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL1(uint32_t val)
+{
+ return ((val) << A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL1__SHIFT) & A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL1__MASK;
+}
+#define A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL2__MASK 0x00000f00
+#define A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL2__SHIFT 8
+static inline uint32_t A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL2(uint32_t val)
+{
+ return ((val) << A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL2__SHIFT) & A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL2__MASK;
+}
+#define A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL3__MASK 0x0000f000
+#define A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL3__SHIFT 12
+static inline uint32_t A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL3(uint32_t val)
+{
+ return ((val) << A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL3__SHIFT) & A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL3__MASK;
+}
+#define A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL4__MASK 0x000f0000
+#define A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL4__SHIFT 16
+static inline uint32_t A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL4(uint32_t val)
+{
+ return ((val) << A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL4__SHIFT) & A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL4__MASK;
+}
+#define A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL5__MASK 0x00f00000
+#define A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL5__SHIFT 20
+static inline uint32_t A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL5(uint32_t val)
+{
+ return ((val) << A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL5__SHIFT) & A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL5__MASK;
+}
+#define A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL6__MASK 0x0f000000
+#define A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL6__SHIFT 24
+static inline uint32_t A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL6(uint32_t val)
+{
+ return ((val) << A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL6__SHIFT) & A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL6__MASK;
+}
+#define A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL7__MASK 0xf0000000
+#define A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL7__SHIFT 28
+static inline uint32_t A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL7(uint32_t val)
+{
+ return ((val) << A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL7__SHIFT) & A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL7__MASK;
+}
+
+#define REG_A6XX_DBGC_CFG_DBGBUS_BYTEL_1 0x00000611
+#define A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL8__MASK 0x0000000f
+#define A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL8__SHIFT 0
+static inline uint32_t A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL8(uint32_t val)
+{
+ return ((val) << A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL8__SHIFT) & A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL8__MASK;
+}
+#define A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL9__MASK 0x000000f0
+#define A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL9__SHIFT 4
+static inline uint32_t A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL9(uint32_t val)
+{
+ return ((val) << A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL9__SHIFT) & A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL9__MASK;
+}
+#define A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL10__MASK 0x00000f00
+#define A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL10__SHIFT 8
+static inline uint32_t A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL10(uint32_t val)
+{
+ return ((val) << A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL10__SHIFT) & A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL10__MASK;
+}
+#define A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL11__MASK 0x0000f000
+#define A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL11__SHIFT 12
+static inline uint32_t A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL11(uint32_t val)
+{
+ return ((val) << A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL11__SHIFT) & A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL11__MASK;
+}
+#define A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL12__MASK 0x000f0000
+#define A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL12__SHIFT 16
+static inline uint32_t A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL12(uint32_t val)
+{
+ return ((val) << A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL12__SHIFT) & A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL12__MASK;
+}
+#define A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL13__MASK 0x00f00000
+#define A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL13__SHIFT 20
+static inline uint32_t A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL13(uint32_t val)
+{
+ return ((val) << A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL13__SHIFT) & A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL13__MASK;
+}
+#define A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL14__MASK 0x0f000000
+#define A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL14__SHIFT 24
+static inline uint32_t A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL14(uint32_t val)
+{
+ return ((val) << A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL14__SHIFT) & A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL14__MASK;
+}
+#define A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL15__MASK 0xf0000000
+#define A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL15__SHIFT 28
+static inline uint32_t A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL15(uint32_t val)
+{
+ return ((val) << A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL15__SHIFT) & A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL15__MASK;
+}
+
+#define REG_A6XX_DBGC_CFG_DBGBUS_TRACE_BUF1 0x0000062f
+
+#define REG_A6XX_DBGC_CFG_DBGBUS_TRACE_BUF2 0x00000630
+
+#define REG_A6XX_VSC_PERFCTR_VSC_SEL_0 0x00000cd8
+
+#define REG_A6XX_VSC_PERFCTR_VSC_SEL_1 0x00000cd9
+
+#define REG_A6XX_GRAS_ADDR_MODE_CNTL 0x00008601
+
+#define REG_A6XX_GRAS_PERFCTR_TSE_SEL_0 0x00008610
+
+#define REG_A6XX_GRAS_PERFCTR_TSE_SEL_1 0x00008611
+
+#define REG_A6XX_GRAS_PERFCTR_TSE_SEL_2 0x00008612
+
+#define REG_A6XX_GRAS_PERFCTR_TSE_SEL_3 0x00008613
+
+#define REG_A6XX_GRAS_PERFCTR_RAS_SEL_0 0x00008614
+
+#define REG_A6XX_GRAS_PERFCTR_RAS_SEL_1 0x00008615
+
+#define REG_A6XX_GRAS_PERFCTR_RAS_SEL_2 0x00008616
+
+#define REG_A6XX_GRAS_PERFCTR_RAS_SEL_3 0x00008617
+
+#define REG_A6XX_GRAS_PERFCTR_LRZ_SEL_0 0x00008618
+
+#define REG_A6XX_GRAS_PERFCTR_LRZ_SEL_1 0x00008619
+
+#define REG_A6XX_GRAS_PERFCTR_LRZ_SEL_2 0x0000861a
+
+#define REG_A6XX_GRAS_PERFCTR_LRZ_SEL_3 0x0000861b
+
+#define REG_A6XX_RB_ADDR_MODE_CNTL 0x00008e05
+
+#define REG_A6XX_RB_NC_MODE_CNTL 0x00008e08
+
+#define REG_A6XX_RB_PERFCTR_RB_SEL_0 0x00008e10
+
+#define REG_A6XX_RB_PERFCTR_RB_SEL_1 0x00008e11
+
+#define REG_A6XX_RB_PERFCTR_RB_SEL_2 0x00008e12
+
+#define REG_A6XX_RB_PERFCTR_RB_SEL_3 0x00008e13
+
+#define REG_A6XX_RB_PERFCTR_RB_SEL_4 0x00008e14
+
+#define REG_A6XX_RB_PERFCTR_RB_SEL_5 0x00008e15
+
+#define REG_A6XX_RB_PERFCTR_RB_SEL_6 0x00008e16
+
+#define REG_A6XX_RB_PERFCTR_RB_SEL_7 0x00008e17
+
+#define REG_A6XX_RB_PERFCTR_CCU_SEL_0 0x00008e18
+
+#define REG_A6XX_RB_PERFCTR_CCU_SEL_1 0x00008e19
+
+#define REG_A6XX_RB_PERFCTR_CCU_SEL_2 0x00008e1a
+
+#define REG_A6XX_RB_PERFCTR_CCU_SEL_3 0x00008e1b
+
+#define REG_A6XX_RB_PERFCTR_CCU_SEL_4 0x00008e1c
+
+#define REG_A6XX_RB_PERFCTR_CMP_SEL_0 0x00008e2c
+
+#define REG_A6XX_RB_PERFCTR_CMP_SEL_1 0x00008e2d
+
+#define REG_A6XX_RB_PERFCTR_CMP_SEL_2 0x00008e2e
+
+#define REG_A6XX_RB_PERFCTR_CMP_SEL_3 0x00008e2f
+
+#define REG_A6XX_RB_RB_SUB_BLOCK_SEL_CNTL_CD 0x00008e3d
+
+#define REG_A6XX_RB_CONTEXT_SWITCH_GMEM_SAVE_RESTORE 0x00008e50
+
+#define REG_A6XX_PC_DBG_ECO_CNTL 0x00009e00
+
+#define REG_A6XX_PC_ADDR_MODE_CNTL 0x00009e01
+
+#define REG_A6XX_PC_PERFCTR_PC_SEL_0 0x00009e34
+
+#define REG_A6XX_PC_PERFCTR_PC_SEL_1 0x00009e35
+
+#define REG_A6XX_PC_PERFCTR_PC_SEL_2 0x00009e36
+
+#define REG_A6XX_PC_PERFCTR_PC_SEL_3 0x00009e37
+
+#define REG_A6XX_PC_PERFCTR_PC_SEL_4 0x00009e38
+
+#define REG_A6XX_PC_PERFCTR_PC_SEL_5 0x00009e39
+
+#define REG_A6XX_PC_PERFCTR_PC_SEL_6 0x00009e3a
+
+#define REG_A6XX_PC_PERFCTR_PC_SEL_7 0x00009e3b
+
+#define REG_A6XX_HLSQ_ADDR_MODE_CNTL 0x0000be05
+
+#define REG_A6XX_HLSQ_PERFCTR_HLSQ_SEL_0 0x0000be10
+
+#define REG_A6XX_HLSQ_PERFCTR_HLSQ_SEL_1 0x0000be11
+
+#define REG_A6XX_HLSQ_PERFCTR_HLSQ_SEL_2 0x0000be12
+
+#define REG_A6XX_HLSQ_PERFCTR_HLSQ_SEL_3 0x0000be13
+
+#define REG_A6XX_HLSQ_PERFCTR_HLSQ_SEL_4 0x0000be14
+
+#define REG_A6XX_HLSQ_PERFCTR_HLSQ_SEL_5 0x0000be15
+
+#define REG_A6XX_HLSQ_DBG_AHB_READ_APERTURE 0x0000c800
+
+#define REG_A6XX_HLSQ_DBG_READ_SEL 0x0000d000
+
+#define REG_A6XX_VFD_ADDR_MODE_CNTL 0x0000a601
+
+#define REG_A6XX_VFD_PERFCTR_VFD_SEL_0 0x0000a610
+
+#define REG_A6XX_VFD_PERFCTR_VFD_SEL_1 0x0000a611
+
+#define REG_A6XX_VFD_PERFCTR_VFD_SEL_2 0x0000a612
+
+#define REG_A6XX_VFD_PERFCTR_VFD_SEL_3 0x0000a613
+
+#define REG_A6XX_VFD_PERFCTR_VFD_SEL_4 0x0000a614
+
+#define REG_A6XX_VFD_PERFCTR_VFD_SEL_5 0x0000a615
+
+#define REG_A6XX_VFD_PERFCTR_VFD_SEL_6 0x0000a616
+
+#define REG_A6XX_VFD_PERFCTR_VFD_SEL_7 0x0000a617
+
+#define REG_A6XX_VPC_ADDR_MODE_CNTL 0x00009601
+
+#define REG_A6XX_VPC_PERFCTR_VPC_SEL_0 0x00009604
+
+#define REG_A6XX_VPC_PERFCTR_VPC_SEL_1 0x00009605
+
+#define REG_A6XX_VPC_PERFCTR_VPC_SEL_2 0x00009606
+
+#define REG_A6XX_VPC_PERFCTR_VPC_SEL_3 0x00009607
+
+#define REG_A6XX_VPC_PERFCTR_VPC_SEL_4 0x00009608
+
+#define REG_A6XX_VPC_PERFCTR_VPC_SEL_5 0x00009609
+
+#define REG_A6XX_UCHE_ADDR_MODE_CNTL 0x00000e00
+
+#define REG_A6XX_UCHE_MODE_CNTL 0x00000e01
+
+#define REG_A6XX_UCHE_WRITE_RANGE_MAX_LO 0x00000e05
+
+#define REG_A6XX_UCHE_WRITE_RANGE_MAX_HI 0x00000e06
+
+#define REG_A6XX_UCHE_WRITE_THRU_BASE_LO 0x00000e07
+
+#define REG_A6XX_UCHE_WRITE_THRU_BASE_HI 0x00000e08
+
+#define REG_A6XX_UCHE_TRAP_BASE_LO 0x00000e09
+
+#define REG_A6XX_UCHE_TRAP_BASE_HI 0x00000e0a
+
+#define REG_A6XX_UCHE_GMEM_RANGE_MIN_LO 0x00000e0b
+
+#define REG_A6XX_UCHE_GMEM_RANGE_MIN_HI 0x00000e0c
+
+#define REG_A6XX_UCHE_GMEM_RANGE_MAX_LO 0x00000e0d
+
+#define REG_A6XX_UCHE_GMEM_RANGE_MAX_HI 0x00000e0e
+
+#define REG_A6XX_UCHE_CACHE_WAYS 0x00000e17
+
+#define REG_A6XX_UCHE_FILTER_CNTL 0x00000e18
+
+#define REG_A6XX_UCHE_CLIENT_PF 0x00000e19
+#define A6XX_UCHE_CLIENT_PF_PERFSEL__MASK 0x000000ff
+#define A6XX_UCHE_CLIENT_PF_PERFSEL__SHIFT 0
+static inline uint32_t A6XX_UCHE_CLIENT_PF_PERFSEL(uint32_t val)
+{
+ return ((val) << A6XX_UCHE_CLIENT_PF_PERFSEL__SHIFT) & A6XX_UCHE_CLIENT_PF_PERFSEL__MASK;
+}
+
+#define REG_A6XX_UCHE_PERFCTR_UCHE_SEL_0 0x00000e1c
+
+#define REG_A6XX_UCHE_PERFCTR_UCHE_SEL_1 0x00000e1d
+
+#define REG_A6XX_UCHE_PERFCTR_UCHE_SEL_2 0x00000e1e
+
+#define REG_A6XX_UCHE_PERFCTR_UCHE_SEL_3 0x00000e1f
+
+#define REG_A6XX_UCHE_PERFCTR_UCHE_SEL_4 0x00000e20
+
+#define REG_A6XX_UCHE_PERFCTR_UCHE_SEL_5 0x00000e21
+
+#define REG_A6XX_UCHE_PERFCTR_UCHE_SEL_6 0x00000e22
+
+#define REG_A6XX_UCHE_PERFCTR_UCHE_SEL_7 0x00000e23
+
+#define REG_A6XX_UCHE_PERFCTR_UCHE_SEL_8 0x00000e24
+
+#define REG_A6XX_UCHE_PERFCTR_UCHE_SEL_9 0x00000e25
+
+#define REG_A6XX_UCHE_PERFCTR_UCHE_SEL_10 0x00000e26
+
+#define REG_A6XX_UCHE_PERFCTR_UCHE_SEL_11 0x00000e27
+
+#define REG_A6XX_SP_ADDR_MODE_CNTL 0x0000ae01
+
+#define REG_A6XX_SP_NC_MODE_CNTL 0x0000ae02
+
+#define REG_A6XX_SP_PERFCTR_SP_SEL_0 0x0000ae10
+
+#define REG_A6XX_SP_PERFCTR_SP_SEL_1 0x0000ae11
+
+#define REG_A6XX_SP_PERFCTR_SP_SEL_2 0x0000ae12
+
+#define REG_A6XX_SP_PERFCTR_SP_SEL_3 0x0000ae13
+
+#define REG_A6XX_SP_PERFCTR_SP_SEL_4 0x0000ae14
+
+#define REG_A6XX_SP_PERFCTR_SP_SEL_5 0x0000ae15
+
+#define REG_A6XX_SP_PERFCTR_SP_SEL_6 0x0000ae16
+
+#define REG_A6XX_SP_PERFCTR_SP_SEL_7 0x0000ae17
+
+#define REG_A6XX_SP_PERFCTR_SP_SEL_8 0x0000ae18
+
+#define REG_A6XX_SP_PERFCTR_SP_SEL_9 0x0000ae19
+
+#define REG_A6XX_SP_PERFCTR_SP_SEL_10 0x0000ae1a
+
+#define REG_A6XX_SP_PERFCTR_SP_SEL_11 0x0000ae1b
+
+#define REG_A6XX_SP_PERFCTR_SP_SEL_12 0x0000ae1c
+
+#define REG_A6XX_SP_PERFCTR_SP_SEL_13 0x0000ae1d
+
+#define REG_A6XX_SP_PERFCTR_SP_SEL_14 0x0000ae1e
+
+#define REG_A6XX_SP_PERFCTR_SP_SEL_15 0x0000ae1f
+
+#define REG_A6XX_SP_PERFCTR_SP_SEL_16 0x0000ae20
+
+#define REG_A6XX_SP_PERFCTR_SP_SEL_17 0x0000ae21
+
+#define REG_A6XX_SP_PERFCTR_SP_SEL_18 0x0000ae22
+
+#define REG_A6XX_SP_PERFCTR_SP_SEL_19 0x0000ae23
+
+#define REG_A6XX_SP_PERFCTR_SP_SEL_20 0x0000ae24
+
+#define REG_A6XX_SP_PERFCTR_SP_SEL_21 0x0000ae25
+
+#define REG_A6XX_SP_PERFCTR_SP_SEL_22 0x0000ae26
+
+#define REG_A6XX_SP_PERFCTR_SP_SEL_23 0x0000ae27
+
+#define REG_A6XX_TPL1_ADDR_MODE_CNTL 0x0000b601
+
+#define REG_A6XX_TPL1_NC_MODE_CNTL 0x0000b604
+
+#define REG_A6XX_TPL1_PERFCTR_TP_SEL_0 0x0000b610
+
+#define REG_A6XX_TPL1_PERFCTR_TP_SEL_1 0x0000b611
+
+#define REG_A6XX_TPL1_PERFCTR_TP_SEL_2 0x0000b612
+
+#define REG_A6XX_TPL1_PERFCTR_TP_SEL_3 0x0000b613
+
+#define REG_A6XX_TPL1_PERFCTR_TP_SEL_4 0x0000b614
+
+#define REG_A6XX_TPL1_PERFCTR_TP_SEL_5 0x0000b615
+
+#define REG_A6XX_TPL1_PERFCTR_TP_SEL_6 0x0000b616
+
+#define REG_A6XX_TPL1_PERFCTR_TP_SEL_7 0x0000b617
+
+#define REG_A6XX_TPL1_PERFCTR_TP_SEL_8 0x0000b618
+
+#define REG_A6XX_TPL1_PERFCTR_TP_SEL_9 0x0000b619
+
+#define REG_A6XX_TPL1_PERFCTR_TP_SEL_10 0x0000b61a
+
+#define REG_A6XX_TPL1_PERFCTR_TP_SEL_11 0x0000b61b
+
+#define REG_A6XX_VBIF_VERSION 0x00003000
+
+#define REG_A6XX_VBIF_GATE_OFF_WRREQ_EN 0x0000302a
+
+#define REG_A6XX_VBIF_XIN_HALT_CTRL0 0x00003080
+
+#define REG_A6XX_VBIF_XIN_HALT_CTRL1 0x00003081
+
+#define REG_A6XX_VBIF_PERF_CNT_SEL0 0x000030d0
+
+#define REG_A6XX_VBIF_PERF_CNT_SEL1 0x000030d1
+
+#define REG_A6XX_VBIF_PERF_CNT_SEL2 0x000030d2
+
+#define REG_A6XX_VBIF_PERF_CNT_SEL3 0x000030d3
+
+#define REG_A6XX_VBIF_PERF_CNT_LOW0 0x000030d8
+
+#define REG_A6XX_VBIF_PERF_CNT_LOW1 0x000030d9
+
+#define REG_A6XX_VBIF_PERF_CNT_LOW2 0x000030da
+
+#define REG_A6XX_VBIF_PERF_CNT_LOW3 0x000030db
+
+#define REG_A6XX_VBIF_PERF_CNT_HIGH0 0x000030e0
+
+#define REG_A6XX_VBIF_PERF_CNT_HIGH1 0x000030e1
+
+#define REG_A6XX_VBIF_PERF_CNT_HIGH2 0x000030e2
+
+#define REG_A6XX_VBIF_PERF_CNT_HIGH3 0x000030e3
+
+#define REG_A6XX_VBIF_PERF_PWR_CNT_EN0 0x00003100
+
+#define REG_A6XX_VBIF_PERF_PWR_CNT_EN1 0x00003101
+
+#define REG_A6XX_VBIF_PERF_PWR_CNT_EN2 0x00003102
+
+#define REG_A6XX_VBIF_PERF_PWR_CNT_LOW0 0x00003110
+
+#define REG_A6XX_VBIF_PERF_PWR_CNT_LOW1 0x00003111
+
+#define REG_A6XX_VBIF_PERF_PWR_CNT_LOW2 0x00003112
+
+#define REG_A6XX_VBIF_PERF_PWR_CNT_HIGH0 0x00003118
+
+#define REG_A6XX_VBIF_PERF_PWR_CNT_HIGH1 0x00003119
+
+#define REG_A6XX_VBIF_PERF_PWR_CNT_HIGH2 0x0000311a
+
+#define REG_A6XX_CX_DBGC_CFG_DBGBUS_SEL_A 0x00018400
+
+#define REG_A6XX_CX_DBGC_CFG_DBGBUS_SEL_B 0x00018401
+
+#define REG_A6XX_CX_DBGC_CFG_DBGBUS_SEL_C 0x00018402
+
+#define REG_A6XX_CX_DBGC_CFG_DBGBUS_SEL_D 0x00018403
+#define A6XX_CX_DBGC_CFG_DBGBUS_SEL_D_PING_INDEX__MASK 0x000000ff
+#define A6XX_CX_DBGC_CFG_DBGBUS_SEL_D_PING_INDEX__SHIFT 0
+static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_SEL_D_PING_INDEX(uint32_t val)
+{
+ return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_SEL_D_PING_INDEX__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_SEL_D_PING_INDEX__MASK;
+}
+#define A6XX_CX_DBGC_CFG_DBGBUS_SEL_D_PING_BLK_SEL__MASK 0x0000ff00
+#define A6XX_CX_DBGC_CFG_DBGBUS_SEL_D_PING_BLK_SEL__SHIFT 8
+static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_SEL_D_PING_BLK_SEL(uint32_t val)
+{
+ return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_SEL_D_PING_BLK_SEL__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_SEL_D_PING_BLK_SEL__MASK;
+}
+
+#define REG_A6XX_CX_DBGC_CFG_DBGBUS_CNTLT 0x00018404
+#define A6XX_CX_DBGC_CFG_DBGBUS_CNTLT_TRACEEN__MASK 0x0000003f
+#define A6XX_CX_DBGC_CFG_DBGBUS_CNTLT_TRACEEN__SHIFT 0
+static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_CNTLT_TRACEEN(uint32_t val)
+{
+ return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_CNTLT_TRACEEN__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_CNTLT_TRACEEN__MASK;
+}
+#define A6XX_CX_DBGC_CFG_DBGBUS_CNTLT_GRANU__MASK 0x00007000
+#define A6XX_CX_DBGC_CFG_DBGBUS_CNTLT_GRANU__SHIFT 12
+static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_CNTLT_GRANU(uint32_t val)
+{
+ return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_CNTLT_GRANU__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_CNTLT_GRANU__MASK;
+}
+#define A6XX_CX_DBGC_CFG_DBGBUS_CNTLT_SEGT__MASK 0xf0000000
+#define A6XX_CX_DBGC_CFG_DBGBUS_CNTLT_SEGT__SHIFT 28
+static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_CNTLT_SEGT(uint32_t val)
+{
+ return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_CNTLT_SEGT__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_CNTLT_SEGT__MASK;
+}
+
+#define REG_A6XX_CX_DBGC_CFG_DBGBUS_CNTLM 0x00018405
+#define A6XX_CX_DBGC_CFG_DBGBUS_CNTLM_ENABLE__MASK 0x0f000000
+#define A6XX_CX_DBGC_CFG_DBGBUS_CNTLM_ENABLE__SHIFT 24
+static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_CNTLM_ENABLE(uint32_t val)
+{
+ return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_CNTLM_ENABLE__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_CNTLM_ENABLE__MASK;
+}
+
+#define REG_A6XX_CX_DBGC_CFG_DBGBUS_IVTL_0 0x00018408
+
+#define REG_A6XX_CX_DBGC_CFG_DBGBUS_IVTL_1 0x00018409
+
+#define REG_A6XX_CX_DBGC_CFG_DBGBUS_IVTL_2 0x0001840a
+
+#define REG_A6XX_CX_DBGC_CFG_DBGBUS_IVTL_3 0x0001840b
+
+#define REG_A6XX_CX_DBGC_CFG_DBGBUS_MASKL_0 0x0001840c
+
+#define REG_A6XX_CX_DBGC_CFG_DBGBUS_MASKL_1 0x0001840d
+
+#define REG_A6XX_CX_DBGC_CFG_DBGBUS_MASKL_2 0x0001840e
+
+#define REG_A6XX_CX_DBGC_CFG_DBGBUS_MASKL_3 0x0001840f
+
+#define REG_A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0 0x00018410
+#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL0__MASK 0x0000000f
+#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL0__SHIFT 0
+static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL0(uint32_t val)
+{
+ return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL0__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL0__MASK;
+}
+#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL1__MASK 0x000000f0
+#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL1__SHIFT 4
+static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL1(uint32_t val)
+{
+ return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL1__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL1__MASK;
+}
+#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL2__MASK 0x00000f00
+#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL2__SHIFT 8
+static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL2(uint32_t val)
+{
+ return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL2__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL2__MASK;
+}
+#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL3__MASK 0x0000f000
+#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL3__SHIFT 12
+static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL3(uint32_t val)
+{
+ return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL3__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL3__MASK;
+}
+#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL4__MASK 0x000f0000
+#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL4__SHIFT 16
+static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL4(uint32_t val)
+{
+ return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL4__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL4__MASK;
+}
+#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL5__MASK 0x00f00000
+#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL5__SHIFT 20
+static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL5(uint32_t val)
+{
+ return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL5__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL5__MASK;
+}
+#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL6__MASK 0x0f000000
+#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL6__SHIFT 24
+static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL6(uint32_t val)
+{
+ return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL6__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL6__MASK;
+}
+#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL7__MASK 0xf0000000
+#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL7__SHIFT 28
+static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL7(uint32_t val)
+{
+ return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL7__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL7__MASK;
+}
+
+#define REG_A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1 0x00018411
+#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL8__MASK 0x0000000f
+#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL8__SHIFT 0
+static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL8(uint32_t val)
+{
+ return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL8__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL8__MASK;
+}
+#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL9__MASK 0x000000f0
+#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL9__SHIFT 4
+static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL9(uint32_t val)
+{
+ return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL9__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL9__MASK;
+}
+#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL10__MASK 0x00000f00
+#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL10__SHIFT 8
+static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL10(uint32_t val)
+{
+ return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL10__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL10__MASK;
+}
+#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL11__MASK 0x0000f000
+#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL11__SHIFT 12
+static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL11(uint32_t val)
+{
+ return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL11__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL11__MASK;
+}
+#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL12__MASK 0x000f0000
+#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL12__SHIFT 16
+static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL12(uint32_t val)
+{
+ return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL12__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL12__MASK;
+}
+#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL13__MASK 0x00f00000
+#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL13__SHIFT 20
+static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL13(uint32_t val)
+{
+ return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL13__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL13__MASK;
+}
+#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL14__MASK 0x0f000000
+#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL14__SHIFT 24
+static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL14(uint32_t val)
+{
+ return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL14__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL14__MASK;
+}
+#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL15__MASK 0xf0000000
+#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL15__SHIFT 28
+static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL15(uint32_t val)
+{
+ return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL15__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL15__MASK;
+}
+
+#define REG_A6XX_CX_DBGC_CFG_DBGBUS_TRACE_BUF1 0x0001842f
+
+#define REG_A6XX_CX_DBGC_CFG_DBGBUS_TRACE_BUF2 0x00018430
+
+#define REG_A6XX_PDC_GPU_ENABLE_PDC 0x00021140
+
+#define REG_A6XX_PDC_GPU_SEQ_START_ADDR 0x00021148
+
+#define REG_A6XX_PDC_GPU_TCS0_CONTROL 0x00021540
+
+#define REG_A6XX_PDC_GPU_TCS0_CMD_ENABLE_BANK 0x00021541
+
+#define REG_A6XX_PDC_GPU_TCS0_CMD_WAIT_FOR_CMPL_BANK 0x00021542
+
+#define REG_A6XX_PDC_GPU_TCS0_CMD0_MSGID 0x00021543
+
+#define REG_A6XX_PDC_GPU_TCS0_CMD0_ADDR 0x00021544
+
+#define REG_A6XX_PDC_GPU_TCS0_CMD0_DATA 0x00021545
+
+#define REG_A6XX_PDC_GPU_TCS1_CONTROL 0x00021572
+
+#define REG_A6XX_PDC_GPU_TCS1_CMD_ENABLE_BANK 0x00021573
+
+#define REG_A6XX_PDC_GPU_TCS1_CMD_WAIT_FOR_CMPL_BANK 0x00021574
+
+#define REG_A6XX_PDC_GPU_TCS1_CMD0_MSGID 0x00021575
+
+#define REG_A6XX_PDC_GPU_TCS1_CMD0_ADDR 0x00021576
+
+#define REG_A6XX_PDC_GPU_TCS1_CMD0_DATA 0x00021577
+
+#define REG_A6XX_PDC_GPU_TCS2_CONTROL 0x000215a4
+
+#define REG_A6XX_PDC_GPU_TCS2_CMD_ENABLE_BANK 0x000215a5
+
+#define REG_A6XX_PDC_GPU_TCS2_CMD_WAIT_FOR_CMPL_BANK 0x000215a6
+
+#define REG_A6XX_PDC_GPU_TCS2_CMD0_MSGID 0x000215a7
+
+#define REG_A6XX_PDC_GPU_TCS2_CMD0_ADDR 0x000215a8
+
+#define REG_A6XX_PDC_GPU_TCS2_CMD0_DATA 0x000215a9
+
+#define REG_A6XX_PDC_GPU_TCS3_CONTROL 0x000215d6
+
+#define REG_A6XX_PDC_GPU_TCS3_CMD_ENABLE_BANK 0x000215d7
+
+#define REG_A6XX_PDC_GPU_TCS3_CMD_WAIT_FOR_CMPL_BANK 0x000215d8
+
+#define REG_A6XX_PDC_GPU_TCS3_CMD0_MSGID 0x000215d9
+
+#define REG_A6XX_PDC_GPU_TCS3_CMD0_ADDR 0x000215da
+
+#define REG_A6XX_PDC_GPU_TCS3_CMD0_DATA 0x000215db
+
+#define REG_A6XX_PDC_GPU_SEQ_MEM_0 0x000a0000
+
+#define REG_A6XX_X1_WINDOW_OFFSET 0x000088d4
+#define A6XX_X1_WINDOW_OFFSET_WINDOW_OFFSET_DISABLE 0x80000000
+#define A6XX_X1_WINDOW_OFFSET_X__MASK 0x00007fff
+#define A6XX_X1_WINDOW_OFFSET_X__SHIFT 0
+static inline uint32_t A6XX_X1_WINDOW_OFFSET_X(uint32_t val)
+{
+ return ((val) << A6XX_X1_WINDOW_OFFSET_X__SHIFT) & A6XX_X1_WINDOW_OFFSET_X__MASK;
+}
+#define A6XX_X1_WINDOW_OFFSET_Y__MASK 0x7fff0000
+#define A6XX_X1_WINDOW_OFFSET_Y__SHIFT 16
+static inline uint32_t A6XX_X1_WINDOW_OFFSET_Y(uint32_t val)
+{
+ return ((val) << A6XX_X1_WINDOW_OFFSET_Y__SHIFT) & A6XX_X1_WINDOW_OFFSET_Y__MASK;
+}
+
+#define REG_A6XX_X2_WINDOW_OFFSET 0x0000b4d1
+#define A6XX_X2_WINDOW_OFFSET_WINDOW_OFFSET_DISABLE 0x80000000
+#define A6XX_X2_WINDOW_OFFSET_X__MASK 0x00007fff
+#define A6XX_X2_WINDOW_OFFSET_X__SHIFT 0
+static inline uint32_t A6XX_X2_WINDOW_OFFSET_X(uint32_t val)
+{
+ return ((val) << A6XX_X2_WINDOW_OFFSET_X__SHIFT) & A6XX_X2_WINDOW_OFFSET_X__MASK;
+}
+#define A6XX_X2_WINDOW_OFFSET_Y__MASK 0x7fff0000
+#define A6XX_X2_WINDOW_OFFSET_Y__SHIFT 16
+static inline uint32_t A6XX_X2_WINDOW_OFFSET_Y(uint32_t val)
+{
+ return ((val) << A6XX_X2_WINDOW_OFFSET_Y__SHIFT) & A6XX_X2_WINDOW_OFFSET_Y__MASK;
+}
+
+#define REG_A6XX_X3_WINDOW_OFFSET 0x0000b307
+#define A6XX_X3_WINDOW_OFFSET_WINDOW_OFFSET_DISABLE 0x80000000
+#define A6XX_X3_WINDOW_OFFSET_X__MASK 0x00007fff
+#define A6XX_X3_WINDOW_OFFSET_X__SHIFT 0
+static inline uint32_t A6XX_X3_WINDOW_OFFSET_X(uint32_t val)
+{
+ return ((val) << A6XX_X3_WINDOW_OFFSET_X__SHIFT) & A6XX_X3_WINDOW_OFFSET_X__MASK;
+}
+#define A6XX_X3_WINDOW_OFFSET_Y__MASK 0x7fff0000
+#define A6XX_X3_WINDOW_OFFSET_Y__SHIFT 16
+static inline uint32_t A6XX_X3_WINDOW_OFFSET_Y(uint32_t val)
+{
+ return ((val) << A6XX_X3_WINDOW_OFFSET_Y__SHIFT) & A6XX_X3_WINDOW_OFFSET_Y__MASK;
+}
+
+#define REG_A6XX_X1_BIN_SIZE 0x000080a1
+#define A6XX_X1_BIN_SIZE_WIDTH__MASK 0x000000ff
+#define A6XX_X1_BIN_SIZE_WIDTH__SHIFT 0
+static inline uint32_t A6XX_X1_BIN_SIZE_WIDTH(uint32_t val)
+{
+ return ((val >> 5) << A6XX_X1_BIN_SIZE_WIDTH__SHIFT) & A6XX_X1_BIN_SIZE_WIDTH__MASK;
+}
+#define A6XX_X1_BIN_SIZE_HEIGHT__MASK 0x0001ff00
+#define A6XX_X1_BIN_SIZE_HEIGHT__SHIFT 8
+static inline uint32_t A6XX_X1_BIN_SIZE_HEIGHT(uint32_t val)
+{
+ return ((val >> 4) << A6XX_X1_BIN_SIZE_HEIGHT__SHIFT) & A6XX_X1_BIN_SIZE_HEIGHT__MASK;
+}
+
+#define REG_A6XX_X2_BIN_SIZE 0x00008800
+#define A6XX_X2_BIN_SIZE_WIDTH__MASK 0x000000ff
+#define A6XX_X2_BIN_SIZE_WIDTH__SHIFT 0
+static inline uint32_t A6XX_X2_BIN_SIZE_WIDTH(uint32_t val)
+{
+ return ((val >> 5) << A6XX_X2_BIN_SIZE_WIDTH__SHIFT) & A6XX_X2_BIN_SIZE_WIDTH__MASK;
+}
+#define A6XX_X2_BIN_SIZE_HEIGHT__MASK 0x0001ff00
+#define A6XX_X2_BIN_SIZE_HEIGHT__SHIFT 8
+static inline uint32_t A6XX_X2_BIN_SIZE_HEIGHT(uint32_t val)
+{
+ return ((val >> 4) << A6XX_X2_BIN_SIZE_HEIGHT__SHIFT) & A6XX_X2_BIN_SIZE_HEIGHT__MASK;
+}
+
+#define REG_A6XX_X3_BIN_SIZE 0x000088d3
+#define A6XX_X3_BIN_SIZE_WIDTH__MASK 0x000000ff
+#define A6XX_X3_BIN_SIZE_WIDTH__SHIFT 0
+static inline uint32_t A6XX_X3_BIN_SIZE_WIDTH(uint32_t val)
+{
+ return ((val >> 5) << A6XX_X3_BIN_SIZE_WIDTH__SHIFT) & A6XX_X3_BIN_SIZE_WIDTH__MASK;
+}
+#define A6XX_X3_BIN_SIZE_HEIGHT__MASK 0x0001ff00
+#define A6XX_X3_BIN_SIZE_HEIGHT__SHIFT 8
+static inline uint32_t A6XX_X3_BIN_SIZE_HEIGHT(uint32_t val)
+{
+ return ((val >> 4) << A6XX_X3_BIN_SIZE_HEIGHT__SHIFT) & A6XX_X3_BIN_SIZE_HEIGHT__MASK;
+}
+
+#define REG_A6XX_VSC_BIN_SIZE 0x00000c02
+#define A6XX_VSC_BIN_SIZE_WIDTH__MASK 0x000000ff
+#define A6XX_VSC_BIN_SIZE_WIDTH__SHIFT 0
+static inline uint32_t A6XX_VSC_BIN_SIZE_WIDTH(uint32_t val)
+{
+ return ((val >> 5) << A6XX_VSC_BIN_SIZE_WIDTH__SHIFT) & A6XX_VSC_BIN_SIZE_WIDTH__MASK;
+}
+#define A6XX_VSC_BIN_SIZE_HEIGHT__MASK 0x0001ff00
+#define A6XX_VSC_BIN_SIZE_HEIGHT__SHIFT 8
+static inline uint32_t A6XX_VSC_BIN_SIZE_HEIGHT(uint32_t val)
+{
+ return ((val >> 4) << A6XX_VSC_BIN_SIZE_HEIGHT__SHIFT) & A6XX_VSC_BIN_SIZE_HEIGHT__MASK;
+}
+
+#define REG_A6XX_VSC_SIZE_ADDRESS_LO 0x00000c03
+
+#define REG_A6XX_VSC_SIZE_ADDRESS_HI 0x00000c04
+
+#define REG_A6XX_VSC_BIN_COUNT 0x00000c06
+#define A6XX_VSC_BIN_COUNT_NX__MASK 0x000007fe
+#define A6XX_VSC_BIN_COUNT_NX__SHIFT 1
+static inline uint32_t A6XX_VSC_BIN_COUNT_NX(uint32_t val)
+{
+ return ((val) << A6XX_VSC_BIN_COUNT_NX__SHIFT) & A6XX_VSC_BIN_COUNT_NX__MASK;
+}
+#define A6XX_VSC_BIN_COUNT_NY__MASK 0x001ff800
+#define A6XX_VSC_BIN_COUNT_NY__SHIFT 11
+static inline uint32_t A6XX_VSC_BIN_COUNT_NY(uint32_t val)
+{
+ return ((val) << A6XX_VSC_BIN_COUNT_NY__SHIFT) & A6XX_VSC_BIN_COUNT_NY__MASK;
+}
+
+static inline uint32_t REG_A6XX_VSC_PIPE_CONFIG(uint32_t i0) { return 0x00000c10 + 0x1*i0; }
+
+static inline uint32_t REG_A6XX_VSC_PIPE_CONFIG_REG(uint32_t i0) { return 0x00000c10 + 0x1*i0; }
+#define A6XX_VSC_PIPE_CONFIG_REG_X__MASK 0x000003ff
+#define A6XX_VSC_PIPE_CONFIG_REG_X__SHIFT 0
+static inline uint32_t A6XX_VSC_PIPE_CONFIG_REG_X(uint32_t val)
+{
+ return ((val) << A6XX_VSC_PIPE_CONFIG_REG_X__SHIFT) & A6XX_VSC_PIPE_CONFIG_REG_X__MASK;
+}
+#define A6XX_VSC_PIPE_CONFIG_REG_Y__MASK 0x000ffc00
+#define A6XX_VSC_PIPE_CONFIG_REG_Y__SHIFT 10
+static inline uint32_t A6XX_VSC_PIPE_CONFIG_REG_Y(uint32_t val)
+{
+ return ((val) << A6XX_VSC_PIPE_CONFIG_REG_Y__SHIFT) & A6XX_VSC_PIPE_CONFIG_REG_Y__MASK;
+}
+#define A6XX_VSC_PIPE_CONFIG_REG_W__MASK 0x03f00000
+#define A6XX_VSC_PIPE_CONFIG_REG_W__SHIFT 20
+static inline uint32_t A6XX_VSC_PIPE_CONFIG_REG_W(uint32_t val)
+{
+ return ((val) << A6XX_VSC_PIPE_CONFIG_REG_W__SHIFT) & A6XX_VSC_PIPE_CONFIG_REG_W__MASK;
+}
+#define A6XX_VSC_PIPE_CONFIG_REG_H__MASK 0xfc000000
+#define A6XX_VSC_PIPE_CONFIG_REG_H__SHIFT 26
+static inline uint32_t A6XX_VSC_PIPE_CONFIG_REG_H(uint32_t val)
+{
+ return ((val) << A6XX_VSC_PIPE_CONFIG_REG_H__SHIFT) & A6XX_VSC_PIPE_CONFIG_REG_H__MASK;
+}
+
+#define REG_A6XX_VSC_XXX_ADDRESS_LO 0x00000c30
+
+#define REG_A6XX_VSC_XXX_ADDRESS_HI 0x00000c31
+
+#define REG_A6XX_VSC_XXX_PITCH 0x00000c32
+
+#define REG_A6XX_VSC_PIPE_DATA_ADDRESS_LO 0x00000c34
+
+#define REG_A6XX_VSC_PIPE_DATA_ADDRESS_HI 0x00000c35
+
+#define REG_A6XX_VSC_PIPE_DATA_PITCH 0x00000c36
+
+static inline uint32_t REG_A6XX_VSC_SIZE(uint32_t i0) { return 0x00000c78 + 0x1*i0; }
+
+static inline uint32_t REG_A6XX_VSC_SIZE_REG(uint32_t i0) { return 0x00000c78 + 0x1*i0; }
+
+#define REG_A6XX_UCHE_UNKNOWN_0E12 0x00000e12
+
+#define REG_A6XX_GRAS_UNKNOWN_8001 0x00008001
+
+#define REG_A6XX_GRAS_UNKNOWN_8004 0x00008004
+
+#define REG_A6XX_GRAS_CNTL 0x00008005
+#define A6XX_GRAS_CNTL_VARYING 0x00000001
+#define A6XX_GRAS_CNTL_XCOORD 0x00000040
+#define A6XX_GRAS_CNTL_YCOORD 0x00000080
+#define A6XX_GRAS_CNTL_ZCOORD 0x00000100
+#define A6XX_GRAS_CNTL_WCOORD 0x00000200
+
+#define REG_A6XX_GRAS_CL_GUARDBAND_CLIP_ADJ 0x00008006
+#define A6XX_GRAS_CL_GUARDBAND_CLIP_ADJ_HORZ__MASK 0x000003ff
+#define A6XX_GRAS_CL_GUARDBAND_CLIP_ADJ_HORZ__SHIFT 0
+static inline uint32_t A6XX_GRAS_CL_GUARDBAND_CLIP_ADJ_HORZ(uint32_t val)
+{
+ return ((val) << A6XX_GRAS_CL_GUARDBAND_CLIP_ADJ_HORZ__SHIFT) & A6XX_GRAS_CL_GUARDBAND_CLIP_ADJ_HORZ__MASK;
+}
+#define A6XX_GRAS_CL_GUARDBAND_CLIP_ADJ_VERT__MASK 0x000ffc00
+#define A6XX_GRAS_CL_GUARDBAND_CLIP_ADJ_VERT__SHIFT 10
+static inline uint32_t A6XX_GRAS_CL_GUARDBAND_CLIP_ADJ_VERT(uint32_t val)
+{
+ return ((val) << A6XX_GRAS_CL_GUARDBAND_CLIP_ADJ_VERT__SHIFT) & A6XX_GRAS_CL_GUARDBAND_CLIP_ADJ_VERT__MASK;
+}
+
+#define REG_A6XX_GRAS_CL_VPORT_XOFFSET_0 0x00008010
+#define A6XX_GRAS_CL_VPORT_XOFFSET_0__MASK 0xffffffff
+#define A6XX_GRAS_CL_VPORT_XOFFSET_0__SHIFT 0
+static inline uint32_t A6XX_GRAS_CL_VPORT_XOFFSET_0(float val)
+{
+ return ((fui(val)) << A6XX_GRAS_CL_VPORT_XOFFSET_0__SHIFT) & A6XX_GRAS_CL_VPORT_XOFFSET_0__MASK;
+}
+
+#define REG_A6XX_GRAS_CL_VPORT_XSCALE_0 0x00008011
+#define A6XX_GRAS_CL_VPORT_XSCALE_0__MASK 0xffffffff
+#define A6XX_GRAS_CL_VPORT_XSCALE_0__SHIFT 0
+static inline uint32_t A6XX_GRAS_CL_VPORT_XSCALE_0(float val)
+{
+ return ((fui(val)) << A6XX_GRAS_CL_VPORT_XSCALE_0__SHIFT) & A6XX_GRAS_CL_VPORT_XSCALE_0__MASK;
+}
+
+#define REG_A6XX_GRAS_CL_VPORT_YOFFSET_0 0x00008012
+#define A6XX_GRAS_CL_VPORT_YOFFSET_0__MASK 0xffffffff
+#define A6XX_GRAS_CL_VPORT_YOFFSET_0__SHIFT 0
+static inline uint32_t A6XX_GRAS_CL_VPORT_YOFFSET_0(float val)
+{
+ return ((fui(val)) << A6XX_GRAS_CL_VPORT_YOFFSET_0__SHIFT) & A6XX_GRAS_CL_VPORT_YOFFSET_0__MASK;
+}
+
+#define REG_A6XX_GRAS_CL_VPORT_YSCALE_0 0x00008013
+#define A6XX_GRAS_CL_VPORT_YSCALE_0__MASK 0xffffffff
+#define A6XX_GRAS_CL_VPORT_YSCALE_0__SHIFT 0
+static inline uint32_t A6XX_GRAS_CL_VPORT_YSCALE_0(float val)
+{
+ return ((fui(val)) << A6XX_GRAS_CL_VPORT_YSCALE_0__SHIFT) & A6XX_GRAS_CL_VPORT_YSCALE_0__MASK;
+}
+
+#define REG_A6XX_GRAS_CL_VPORT_ZOFFSET_0 0x00008014
+#define A6XX_GRAS_CL_VPORT_ZOFFSET_0__MASK 0xffffffff
+#define A6XX_GRAS_CL_VPORT_ZOFFSET_0__SHIFT 0
+static inline uint32_t A6XX_GRAS_CL_VPORT_ZOFFSET_0(float val)
+{
+ return ((fui(val)) << A6XX_GRAS_CL_VPORT_ZOFFSET_0__SHIFT) & A6XX_GRAS_CL_VPORT_ZOFFSET_0__MASK;
+}
+
+#define REG_A6XX_GRAS_CL_VPORT_ZSCALE_0 0x00008015
+#define A6XX_GRAS_CL_VPORT_ZSCALE_0__MASK 0xffffffff
+#define A6XX_GRAS_CL_VPORT_ZSCALE_0__SHIFT 0
+static inline uint32_t A6XX_GRAS_CL_VPORT_ZSCALE_0(float val)
+{
+ return ((fui(val)) << A6XX_GRAS_CL_VPORT_ZSCALE_0__SHIFT) & A6XX_GRAS_CL_VPORT_ZSCALE_0__MASK;
+}
+
+#define REG_A6XX_GRAS_SU_CNTL 0x00008090
+#define A6XX_GRAS_SU_CNTL_CULL_FRONT 0x00000001
+#define A6XX_GRAS_SU_CNTL_CULL_BACK 0x00000002
+#define A6XX_GRAS_SU_CNTL_FRONT_CW 0x00000004
+#define A6XX_GRAS_SU_CNTL_LINEHALFWIDTH__MASK 0x000007f8
+#define A6XX_GRAS_SU_CNTL_LINEHALFWIDTH__SHIFT 3
+static inline uint32_t A6XX_GRAS_SU_CNTL_LINEHALFWIDTH(float val)
+{
+ return ((((int32_t)(val * 4.0))) << A6XX_GRAS_SU_CNTL_LINEHALFWIDTH__SHIFT) & A6XX_GRAS_SU_CNTL_LINEHALFWIDTH__MASK;
+}
+#define A6XX_GRAS_SU_CNTL_POLY_OFFSET 0x00000800
+#define A6XX_GRAS_SU_CNTL_MSAA_ENABLE 0x00002000
+
+#define REG_A6XX_GRAS_SU_POINT_MINMAX 0x00008091
+#define A6XX_GRAS_SU_POINT_MINMAX_MIN__MASK 0x0000ffff
+#define A6XX_GRAS_SU_POINT_MINMAX_MIN__SHIFT 0
+static inline uint32_t A6XX_GRAS_SU_POINT_MINMAX_MIN(float val)
+{
+ return ((((uint32_t)(val * 16.0))) << A6XX_GRAS_SU_POINT_MINMAX_MIN__SHIFT) & A6XX_GRAS_SU_POINT_MINMAX_MIN__MASK;
+}
+#define A6XX_GRAS_SU_POINT_MINMAX_MAX__MASK 0xffff0000
+#define A6XX_GRAS_SU_POINT_MINMAX_MAX__SHIFT 16
+static inline uint32_t A6XX_GRAS_SU_POINT_MINMAX_MAX(float val)
+{
+ return ((((uint32_t)(val * 16.0))) << A6XX_GRAS_SU_POINT_MINMAX_MAX__SHIFT) & A6XX_GRAS_SU_POINT_MINMAX_MAX__MASK;
+}
+
+#define REG_A6XX_GRAS_SU_POINT_SIZE 0x00008092
+#define A6XX_GRAS_SU_POINT_SIZE__MASK 0xffffffff
+#define A6XX_GRAS_SU_POINT_SIZE__SHIFT 0
+static inline uint32_t A6XX_GRAS_SU_POINT_SIZE(float val)
+{
+ return ((((int32_t)(val * 16.0))) << A6XX_GRAS_SU_POINT_SIZE__SHIFT) & A6XX_GRAS_SU_POINT_SIZE__MASK;
+}
+
+#define REG_A6XX_GRAS_SU_POLY_OFFSET_SCALE 0x00008095
+#define A6XX_GRAS_SU_POLY_OFFSET_SCALE__MASK 0xffffffff
+#define A6XX_GRAS_SU_POLY_OFFSET_SCALE__SHIFT 0
+static inline uint32_t A6XX_GRAS_SU_POLY_OFFSET_SCALE(float val)
+{
+ return ((fui(val)) << A6XX_GRAS_SU_POLY_OFFSET_SCALE__SHIFT) & A6XX_GRAS_SU_POLY_OFFSET_SCALE__MASK;
+}
+
+#define REG_A6XX_GRAS_SU_POLY_OFFSET_OFFSET 0x00008096
+#define A6XX_GRAS_SU_POLY_OFFSET_OFFSET__MASK 0xffffffff
+#define A6XX_GRAS_SU_POLY_OFFSET_OFFSET__SHIFT 0
+static inline uint32_t A6XX_GRAS_SU_POLY_OFFSET_OFFSET(float val)
+{
+ return ((fui(val)) << A6XX_GRAS_SU_POLY_OFFSET_OFFSET__SHIFT) & A6XX_GRAS_SU_POLY_OFFSET_OFFSET__MASK;
+}
+
+#define REG_A6XX_GRAS_SU_POLY_OFFSET_OFFSET_CLAMP 0x00008097
+#define A6XX_GRAS_SU_POLY_OFFSET_OFFSET_CLAMP__MASK 0xffffffff
+#define A6XX_GRAS_SU_POLY_OFFSET_OFFSET_CLAMP__SHIFT 0
+static inline uint32_t A6XX_GRAS_SU_POLY_OFFSET_OFFSET_CLAMP(float val)
+{
+ return ((fui(val)) << A6XX_GRAS_SU_POLY_OFFSET_OFFSET_CLAMP__SHIFT) & A6XX_GRAS_SU_POLY_OFFSET_OFFSET_CLAMP__MASK;
+}
+
+#define REG_A6XX_GRAS_SU_DEPTH_BUFFER_INFO 0x00008098
+#define A6XX_GRAS_SU_DEPTH_BUFFER_INFO_DEPTH_FORMAT__MASK 0x00000007
+#define A6XX_GRAS_SU_DEPTH_BUFFER_INFO_DEPTH_FORMAT__SHIFT 0
+static inline uint32_t A6XX_GRAS_SU_DEPTH_BUFFER_INFO_DEPTH_FORMAT(enum a6xx_depth_format val)
+{
+ return ((val) << A6XX_GRAS_SU_DEPTH_BUFFER_INFO_DEPTH_FORMAT__SHIFT) & A6XX_GRAS_SU_DEPTH_BUFFER_INFO_DEPTH_FORMAT__MASK;
+}
+
+#define REG_A6XX_GRAS_UNKNOWN_8099 0x00008099
+
+#define REG_A6XX_GRAS_UNKNOWN_809B 0x0000809b
+
+#define REG_A6XX_GRAS_RAS_MSAA_CNTL 0x000080a2
+#define A6XX_GRAS_RAS_MSAA_CNTL_SAMPLES__MASK 0x00000003
+#define A6XX_GRAS_RAS_MSAA_CNTL_SAMPLES__SHIFT 0
+static inline uint32_t A6XX_GRAS_RAS_MSAA_CNTL_SAMPLES(enum a3xx_msaa_samples val)
+{
+ return ((val) << A6XX_GRAS_RAS_MSAA_CNTL_SAMPLES__SHIFT) & A6XX_GRAS_RAS_MSAA_CNTL_SAMPLES__MASK;
+}
+
+#define REG_A6XX_GRAS_DEST_MSAA_CNTL 0x000080a3
+#define A6XX_GRAS_DEST_MSAA_CNTL_SAMPLES__MASK 0x00000003
+#define A6XX_GRAS_DEST_MSAA_CNTL_SAMPLES__SHIFT 0
+static inline uint32_t A6XX_GRAS_DEST_MSAA_CNTL_SAMPLES(enum a3xx_msaa_samples val)
+{
+ return ((val) << A6XX_GRAS_DEST_MSAA_CNTL_SAMPLES__SHIFT) & A6XX_GRAS_DEST_MSAA_CNTL_SAMPLES__MASK;
+}
+#define A6XX_GRAS_DEST_MSAA_CNTL_MSAA_DISABLE 0x00000004
+
+#define REG_A6XX_GRAS_UNKNOWN_80A4 0x000080a4
+
+#define REG_A6XX_GRAS_UNKNOWN_80A5 0x000080a5
+
+#define REG_A6XX_GRAS_UNKNOWN_80A6 0x000080a6
+
+#define REG_A6XX_GRAS_UNKNOWN_80AF 0x000080af
+
+#define REG_A6XX_GRAS_SC_SCREEN_SCISSOR_TL_0 0x000080b0
+#define A6XX_GRAS_SC_SCREEN_SCISSOR_TL_0_WINDOW_OFFSET_DISABLE 0x80000000
+#define A6XX_GRAS_SC_SCREEN_SCISSOR_TL_0_X__MASK 0x00007fff
+#define A6XX_GRAS_SC_SCREEN_SCISSOR_TL_0_X__SHIFT 0
+static inline uint32_t A6XX_GRAS_SC_SCREEN_SCISSOR_TL_0_X(uint32_t val)
+{
+ return ((val) << A6XX_GRAS_SC_SCREEN_SCISSOR_TL_0_X__SHIFT) & A6XX_GRAS_SC_SCREEN_SCISSOR_TL_0_X__MASK;
+}
+#define A6XX_GRAS_SC_SCREEN_SCISSOR_TL_0_Y__MASK 0x7fff0000
+#define A6XX_GRAS_SC_SCREEN_SCISSOR_TL_0_Y__SHIFT 16
+static inline uint32_t A6XX_GRAS_SC_SCREEN_SCISSOR_TL_0_Y(uint32_t val)
+{
+ return ((val) << A6XX_GRAS_SC_SCREEN_SCISSOR_TL_0_Y__SHIFT) & A6XX_GRAS_SC_SCREEN_SCISSOR_TL_0_Y__MASK;
+}
+
+#define REG_A6XX_GRAS_SC_SCREEN_SCISSOR_BR_0 0x000080b1
+#define A6XX_GRAS_SC_SCREEN_SCISSOR_BR_0_WINDOW_OFFSET_DISABLE 0x80000000
+#define A6XX_GRAS_SC_SCREEN_SCISSOR_BR_0_X__MASK 0x00007fff
+#define A6XX_GRAS_SC_SCREEN_SCISSOR_BR_0_X__SHIFT 0
+static inline uint32_t A6XX_GRAS_SC_SCREEN_SCISSOR_BR_0_X(uint32_t val)
+{
+ return ((val) << A6XX_GRAS_SC_SCREEN_SCISSOR_BR_0_X__SHIFT) & A6XX_GRAS_SC_SCREEN_SCISSOR_BR_0_X__MASK;
+}
+#define A6XX_GRAS_SC_SCREEN_SCISSOR_BR_0_Y__MASK 0x7fff0000
+#define A6XX_GRAS_SC_SCREEN_SCISSOR_BR_0_Y__SHIFT 16
+static inline uint32_t A6XX_GRAS_SC_SCREEN_SCISSOR_BR_0_Y(uint32_t val)
+{
+ return ((val) << A6XX_GRAS_SC_SCREEN_SCISSOR_BR_0_Y__SHIFT) & A6XX_GRAS_SC_SCREEN_SCISSOR_BR_0_Y__MASK;
+}
+
+#define REG_A6XX_GRAS_SC_VIEWPORT_SCISSOR_TL_0 0x000080d0
+#define A6XX_GRAS_SC_VIEWPORT_SCISSOR_TL_0_WINDOW_OFFSET_DISABLE 0x80000000
+#define A6XX_GRAS_SC_VIEWPORT_SCISSOR_TL_0_X__MASK 0x00007fff
+#define A6XX_GRAS_SC_VIEWPORT_SCISSOR_TL_0_X__SHIFT 0
+static inline uint32_t A6XX_GRAS_SC_VIEWPORT_SCISSOR_TL_0_X(uint32_t val)
+{
+ return ((val) << A6XX_GRAS_SC_VIEWPORT_SCISSOR_TL_0_X__SHIFT) & A6XX_GRAS_SC_VIEWPORT_SCISSOR_TL_0_X__MASK;
+}
+#define A6XX_GRAS_SC_VIEWPORT_SCISSOR_TL_0_Y__MASK 0x7fff0000
+#define A6XX_GRAS_SC_VIEWPORT_SCISSOR_TL_0_Y__SHIFT 16
+static inline uint32_t A6XX_GRAS_SC_VIEWPORT_SCISSOR_TL_0_Y(uint32_t val)
+{
+ return ((val) << A6XX_GRAS_SC_VIEWPORT_SCISSOR_TL_0_Y__SHIFT) & A6XX_GRAS_SC_VIEWPORT_SCISSOR_TL_0_Y__MASK;
+}
+
+#define REG_A6XX_GRAS_SC_VIEWPORT_SCISSOR_BR_0 0x000080d1
+#define A6XX_GRAS_SC_VIEWPORT_SCISSOR_BR_0_WINDOW_OFFSET_DISABLE 0x80000000
+#define A6XX_GRAS_SC_VIEWPORT_SCISSOR_BR_0_X__MASK 0x00007fff
+#define A6XX_GRAS_SC_VIEWPORT_SCISSOR_BR_0_X__SHIFT 0
+static inline uint32_t A6XX_GRAS_SC_VIEWPORT_SCISSOR_BR_0_X(uint32_t val)
+{
+ return ((val) << A6XX_GRAS_SC_VIEWPORT_SCISSOR_BR_0_X__SHIFT) & A6XX_GRAS_SC_VIEWPORT_SCISSOR_BR_0_X__MASK;
+}
+#define A6XX_GRAS_SC_VIEWPORT_SCISSOR_BR_0_Y__MASK 0x7fff0000
+#define A6XX_GRAS_SC_VIEWPORT_SCISSOR_BR_0_Y__SHIFT 16
+static inline uint32_t A6XX_GRAS_SC_VIEWPORT_SCISSOR_BR_0_Y(uint32_t val)
+{
+ return ((val) << A6XX_GRAS_SC_VIEWPORT_SCISSOR_BR_0_Y__SHIFT) & A6XX_GRAS_SC_VIEWPORT_SCISSOR_BR_0_Y__MASK;
+}
+
+#define REG_A6XX_GRAS_SC_WINDOW_SCISSOR_TL 0x000080f0
+#define A6XX_GRAS_SC_WINDOW_SCISSOR_TL_WINDOW_OFFSET_DISABLE 0x80000000
+#define A6XX_GRAS_SC_WINDOW_SCISSOR_TL_X__MASK 0x00007fff
+#define A6XX_GRAS_SC_WINDOW_SCISSOR_TL_X__SHIFT 0
+static inline uint32_t A6XX_GRAS_SC_WINDOW_SCISSOR_TL_X(uint32_t val)
+{
+ return ((val) << A6XX_GRAS_SC_WINDOW_SCISSOR_TL_X__SHIFT) & A6XX_GRAS_SC_WINDOW_SCISSOR_TL_X__MASK;
+}
+#define A6XX_GRAS_SC_WINDOW_SCISSOR_TL_Y__MASK 0x7fff0000
+#define A6XX_GRAS_SC_WINDOW_SCISSOR_TL_Y__SHIFT 16
+static inline uint32_t A6XX_GRAS_SC_WINDOW_SCISSOR_TL_Y(uint32_t val)
+{
+ return ((val) << A6XX_GRAS_SC_WINDOW_SCISSOR_TL_Y__SHIFT) & A6XX_GRAS_SC_WINDOW_SCISSOR_TL_Y__MASK;
+}
+
+#define REG_A6XX_GRAS_SC_WINDOW_SCISSOR_BR 0x000080f1
+#define A6XX_GRAS_SC_WINDOW_SCISSOR_BR_WINDOW_OFFSET_DISABLE 0x80000000
+#define A6XX_GRAS_SC_WINDOW_SCISSOR_BR_X__MASK 0x00007fff
+#define A6XX_GRAS_SC_WINDOW_SCISSOR_BR_X__SHIFT 0
+static inline uint32_t A6XX_GRAS_SC_WINDOW_SCISSOR_BR_X(uint32_t val)
+{
+ return ((val) << A6XX_GRAS_SC_WINDOW_SCISSOR_BR_X__SHIFT) & A6XX_GRAS_SC_WINDOW_SCISSOR_BR_X__MASK;
+}
+#define A6XX_GRAS_SC_WINDOW_SCISSOR_BR_Y__MASK 0x7fff0000
+#define A6XX_GRAS_SC_WINDOW_SCISSOR_BR_Y__SHIFT 16
+static inline uint32_t A6XX_GRAS_SC_WINDOW_SCISSOR_BR_Y(uint32_t val)
+{
+ return ((val) << A6XX_GRAS_SC_WINDOW_SCISSOR_BR_Y__SHIFT) & A6XX_GRAS_SC_WINDOW_SCISSOR_BR_Y__MASK;
+}
+
+#define REG_A6XX_GRAS_LRZ_CNTL 0x00008100
+#define A6XX_GRAS_LRZ_CNTL_ENABLE 0x00000001
+#define A6XX_GRAS_LRZ_CNTL_LRZ_WRITE 0x00000002
+#define A6XX_GRAS_LRZ_CNTL_GREATER 0x00000004
+
+#define REG_A6XX_GRAS_2D_BLIT_INFO 0x00008102
+#define A6XX_GRAS_2D_BLIT_INFO_COLOR_FORMAT__MASK 0x000000ff
+#define A6XX_GRAS_2D_BLIT_INFO_COLOR_FORMAT__SHIFT 0
+static inline uint32_t A6XX_GRAS_2D_BLIT_INFO_COLOR_FORMAT(enum a6xx_color_fmt val)
+{
+ return ((val) << A6XX_GRAS_2D_BLIT_INFO_COLOR_FORMAT__SHIFT) & A6XX_GRAS_2D_BLIT_INFO_COLOR_FORMAT__MASK;
+}
+
+#define REG_A6XX_GRAS_LRZ_BUFFER_BASE_LO 0x00008103
+
+#define REG_A6XX_GRAS_LRZ_BUFFER_BASE_HI 0x00008104
+
+#define REG_A6XX_GRAS_LRZ_BUFFER_PITCH 0x00008105
+#define A6XX_GRAS_LRZ_BUFFER_PITCH_PITCH__MASK 0x000007ff
+#define A6XX_GRAS_LRZ_BUFFER_PITCH_PITCH__SHIFT 0
+static inline uint32_t A6XX_GRAS_LRZ_BUFFER_PITCH_PITCH(uint32_t val)
+{
+ return ((val >> 5) << A6XX_GRAS_LRZ_BUFFER_PITCH_PITCH__SHIFT) & A6XX_GRAS_LRZ_BUFFER_PITCH_PITCH__MASK;
+}
+#define A6XX_GRAS_LRZ_BUFFER_PITCH_ARRAY_PITCH__MASK 0x003ff800
+#define A6XX_GRAS_LRZ_BUFFER_PITCH_ARRAY_PITCH__SHIFT 11
+static inline uint32_t A6XX_GRAS_LRZ_BUFFER_PITCH_ARRAY_PITCH(uint32_t val)
+{
+ return ((val >> 5) << A6XX_GRAS_LRZ_BUFFER_PITCH_ARRAY_PITCH__SHIFT) & A6XX_GRAS_LRZ_BUFFER_PITCH_ARRAY_PITCH__MASK;
+}
+
+#define REG_A6XX_GRAS_LRZ_FAST_CLEAR_BUFFER_BASE_LO 0x00008106
+
+#define REG_A6XX_GRAS_LRZ_FAST_CLEAR_BUFFER_BASE_HI 0x00008107
+
+#define REG_A6XX_GRAS_2D_BLIT_CNTL 0x00008400
+
+#define REG_A6XX_GRAS_2D_SRC_TL_X 0x00008401
+#define A6XX_GRAS_2D_SRC_TL_X_X__MASK 0x00ffff00
+#define A6XX_GRAS_2D_SRC_TL_X_X__SHIFT 8
+static inline uint32_t A6XX_GRAS_2D_SRC_TL_X_X(uint32_t val)
+{
+ return ((val) << A6XX_GRAS_2D_SRC_TL_X_X__SHIFT) & A6XX_GRAS_2D_SRC_TL_X_X__MASK;
+}
+
+#define REG_A6XX_GRAS_2D_SRC_BR_X 0x00008402
+#define A6XX_GRAS_2D_SRC_BR_X_X__MASK 0x00ffff00
+#define A6XX_GRAS_2D_SRC_BR_X_X__SHIFT 8
+static inline uint32_t A6XX_GRAS_2D_SRC_BR_X_X(uint32_t val)
+{
+ return ((val) << A6XX_GRAS_2D_SRC_BR_X_X__SHIFT) & A6XX_GRAS_2D_SRC_BR_X_X__MASK;
+}
+
+#define REG_A6XX_GRAS_2D_SRC_TL_Y 0x00008403
+#define A6XX_GRAS_2D_SRC_TL_Y_Y__MASK 0x00ffff00
+#define A6XX_GRAS_2D_SRC_TL_Y_Y__SHIFT 8
+static inline uint32_t A6XX_GRAS_2D_SRC_TL_Y_Y(uint32_t val)
+{
+ return ((val) << A6XX_GRAS_2D_SRC_TL_Y_Y__SHIFT) & A6XX_GRAS_2D_SRC_TL_Y_Y__MASK;
+}
+
+#define REG_A6XX_GRAS_2D_SRC_BR_Y 0x00008404
+#define A6XX_GRAS_2D_SRC_BR_Y_Y__MASK 0x00ffff00
+#define A6XX_GRAS_2D_SRC_BR_Y_Y__SHIFT 8
+static inline uint32_t A6XX_GRAS_2D_SRC_BR_Y_Y(uint32_t val)
+{
+ return ((val) << A6XX_GRAS_2D_SRC_BR_Y_Y__SHIFT) & A6XX_GRAS_2D_SRC_BR_Y_Y__MASK;
+}
+
+#define REG_A6XX_GRAS_2D_DST_TL 0x00008405
+#define A6XX_GRAS_2D_DST_TL_WINDOW_OFFSET_DISABLE 0x80000000
+#define A6XX_GRAS_2D_DST_TL_X__MASK 0x00007fff
+#define A6XX_GRAS_2D_DST_TL_X__SHIFT 0
+static inline uint32_t A6XX_GRAS_2D_DST_TL_X(uint32_t val)
+{
+ return ((val) << A6XX_GRAS_2D_DST_TL_X__SHIFT) & A6XX_GRAS_2D_DST_TL_X__MASK;
+}
+#define A6XX_GRAS_2D_DST_TL_Y__MASK 0x7fff0000
+#define A6XX_GRAS_2D_DST_TL_Y__SHIFT 16
+static inline uint32_t A6XX_GRAS_2D_DST_TL_Y(uint32_t val)
+{
+ return ((val) << A6XX_GRAS_2D_DST_TL_Y__SHIFT) & A6XX_GRAS_2D_DST_TL_Y__MASK;
+}
+
+#define REG_A6XX_GRAS_2D_DST_BR 0x00008406
+#define A6XX_GRAS_2D_DST_BR_WINDOW_OFFSET_DISABLE 0x80000000
+#define A6XX_GRAS_2D_DST_BR_X__MASK 0x00007fff
+#define A6XX_GRAS_2D_DST_BR_X__SHIFT 0
+static inline uint32_t A6XX_GRAS_2D_DST_BR_X(uint32_t val)
+{
+ return ((val) << A6XX_GRAS_2D_DST_BR_X__SHIFT) & A6XX_GRAS_2D_DST_BR_X__MASK;
+}
+#define A6XX_GRAS_2D_DST_BR_Y__MASK 0x7fff0000
+#define A6XX_GRAS_2D_DST_BR_Y__SHIFT 16
+static inline uint32_t A6XX_GRAS_2D_DST_BR_Y(uint32_t val)
+{
+ return ((val) << A6XX_GRAS_2D_DST_BR_Y__SHIFT) & A6XX_GRAS_2D_DST_BR_Y__MASK;
+}
+
+#define REG_A6XX_GRAS_RESOLVE_CNTL_1 0x0000840a
+#define A6XX_GRAS_RESOLVE_CNTL_1_WINDOW_OFFSET_DISABLE 0x80000000
+#define A6XX_GRAS_RESOLVE_CNTL_1_X__MASK 0x00007fff
+#define A6XX_GRAS_RESOLVE_CNTL_1_X__SHIFT 0
+static inline uint32_t A6XX_GRAS_RESOLVE_CNTL_1_X(uint32_t val)
+{
+ return ((val) << A6XX_GRAS_RESOLVE_CNTL_1_X__SHIFT) & A6XX_GRAS_RESOLVE_CNTL_1_X__MASK;
+}
+#define A6XX_GRAS_RESOLVE_CNTL_1_Y__MASK 0x7fff0000
+#define A6XX_GRAS_RESOLVE_CNTL_1_Y__SHIFT 16
+static inline uint32_t A6XX_GRAS_RESOLVE_CNTL_1_Y(uint32_t val)
+{
+ return ((val) << A6XX_GRAS_RESOLVE_CNTL_1_Y__SHIFT) & A6XX_GRAS_RESOLVE_CNTL_1_Y__MASK;
+}
+
+#define REG_A6XX_GRAS_RESOLVE_CNTL_2 0x0000840b
+#define A6XX_GRAS_RESOLVE_CNTL_2_WINDOW_OFFSET_DISABLE 0x80000000
+#define A6XX_GRAS_RESOLVE_CNTL_2_X__MASK 0x00007fff
+#define A6XX_GRAS_RESOLVE_CNTL_2_X__SHIFT 0
+static inline uint32_t A6XX_GRAS_RESOLVE_CNTL_2_X(uint32_t val)
+{
+ return ((val) << A6XX_GRAS_RESOLVE_CNTL_2_X__SHIFT) & A6XX_GRAS_RESOLVE_CNTL_2_X__MASK;
+}
+#define A6XX_GRAS_RESOLVE_CNTL_2_Y__MASK 0x7fff0000
+#define A6XX_GRAS_RESOLVE_CNTL_2_Y__SHIFT 16
+static inline uint32_t A6XX_GRAS_RESOLVE_CNTL_2_Y(uint32_t val)
+{
+ return ((val) << A6XX_GRAS_RESOLVE_CNTL_2_Y__SHIFT) & A6XX_GRAS_RESOLVE_CNTL_2_Y__MASK;
+}
+
+#define REG_A6XX_GRAS_UNKNOWN_8600 0x00008600
+
+#define REG_A6XX_RB_RAS_MSAA_CNTL 0x00008802
+#define A6XX_RB_RAS_MSAA_CNTL_SAMPLES__MASK 0x00000003
+#define A6XX_RB_RAS_MSAA_CNTL_SAMPLES__SHIFT 0
+static inline uint32_t A6XX_RB_RAS_MSAA_CNTL_SAMPLES(enum a3xx_msaa_samples val)
+{
+ return ((val) << A6XX_RB_RAS_MSAA_CNTL_SAMPLES__SHIFT) & A6XX_RB_RAS_MSAA_CNTL_SAMPLES__MASK;
+}
+
+#define REG_A6XX_RB_DEST_MSAA_CNTL 0x00008803
+#define A6XX_RB_DEST_MSAA_CNTL_SAMPLES__MASK 0x00000003
+#define A6XX_RB_DEST_MSAA_CNTL_SAMPLES__SHIFT 0
+static inline uint32_t A6XX_RB_DEST_MSAA_CNTL_SAMPLES(enum a3xx_msaa_samples val)
+{
+ return ((val) << A6XX_RB_DEST_MSAA_CNTL_SAMPLES__SHIFT) & A6XX_RB_DEST_MSAA_CNTL_SAMPLES__MASK;
+}
+#define A6XX_RB_DEST_MSAA_CNTL_MSAA_DISABLE 0x00000004
+
+#define REG_A6XX_RB_UNKNOWN_8804 0x00008804
+
+#define REG_A6XX_RB_UNKNOWN_8805 0x00008805
+
+#define REG_A6XX_RB_UNKNOWN_8806 0x00008806
+
+#define REG_A6XX_RB_RENDER_CONTROL0 0x00008809
+#define A6XX_RB_RENDER_CONTROL0_VARYING 0x00000001
+#define A6XX_RB_RENDER_CONTROL0_XCOORD 0x00000040
+#define A6XX_RB_RENDER_CONTROL0_YCOORD 0x00000080
+#define A6XX_RB_RENDER_CONTROL0_ZCOORD 0x00000100
+#define A6XX_RB_RENDER_CONTROL0_WCOORD 0x00000200
+#define A6XX_RB_RENDER_CONTROL0_UNK10 0x00000400
+
+#define REG_A6XX_RB_RENDER_CONTROL1 0x0000880a
+#define A6XX_RB_RENDER_CONTROL1_SAMPLEMASK 0x00000001
+#define A6XX_RB_RENDER_CONTROL1_FACENESS 0x00000002
+#define A6XX_RB_RENDER_CONTROL1_SAMPLEID 0x00000008
+
+#define REG_A6XX_RB_FS_OUTPUT_CNTL0 0x0000880b
+#define A6XX_RB_FS_OUTPUT_CNTL0_FRAG_WRITES_Z 0x00000002
+
+#define REG_A6XX_RB_FS_OUTPUT_CNTL1 0x0000880c
+#define A6XX_RB_FS_OUTPUT_CNTL1_MRT__MASK 0x0000000f
+#define A6XX_RB_FS_OUTPUT_CNTL1_MRT__SHIFT 0
+static inline uint32_t A6XX_RB_FS_OUTPUT_CNTL1_MRT(uint32_t val)
+{
+ return ((val) << A6XX_RB_FS_OUTPUT_CNTL1_MRT__SHIFT) & A6XX_RB_FS_OUTPUT_CNTL1_MRT__MASK;
+}
+
+#define REG_A6XX_RB_RENDER_COMPONENTS 0x0000880d
+#define A6XX_RB_RENDER_COMPONENTS_RT0__MASK 0x0000000f
+#define A6XX_RB_RENDER_COMPONENTS_RT0__SHIFT 0
+static inline uint32_t A6XX_RB_RENDER_COMPONENTS_RT0(uint32_t val)
+{
+ return ((val) << A6XX_RB_RENDER_COMPONENTS_RT0__SHIFT) & A6XX_RB_RENDER_COMPONENTS_RT0__MASK;
+}
+#define A6XX_RB_RENDER_COMPONENTS_RT1__MASK 0x000000f0
+#define A6XX_RB_RENDER_COMPONENTS_RT1__SHIFT 4
+static inline uint32_t A6XX_RB_RENDER_COMPONENTS_RT1(uint32_t val)
+{
+ return ((val) << A6XX_RB_RENDER_COMPONENTS_RT1__SHIFT) & A6XX_RB_RENDER_COMPONENTS_RT1__MASK;
+}
+#define A6XX_RB_RENDER_COMPONENTS_RT2__MASK 0x00000f00
+#define A6XX_RB_RENDER_COMPONENTS_RT2__SHIFT 8
+static inline uint32_t A6XX_RB_RENDER_COMPONENTS_RT2(uint32_t val)
+{
+ return ((val) << A6XX_RB_RENDER_COMPONENTS_RT2__SHIFT) & A6XX_RB_RENDER_COMPONENTS_RT2__MASK;
+}
+#define A6XX_RB_RENDER_COMPONENTS_RT3__MASK 0x0000f000
+#define A6XX_RB_RENDER_COMPONENTS_RT3__SHIFT 12
+static inline uint32_t A6XX_RB_RENDER_COMPONENTS_RT3(uint32_t val)
+{
+ return ((val) << A6XX_RB_RENDER_COMPONENTS_RT3__SHIFT) & A6XX_RB_RENDER_COMPONENTS_RT3__MASK;
+}
+#define A6XX_RB_RENDER_COMPONENTS_RT4__MASK 0x000f0000
+#define A6XX_RB_RENDER_COMPONENTS_RT4__SHIFT 16
+static inline uint32_t A6XX_RB_RENDER_COMPONENTS_RT4(uint32_t val)
+{
+ return ((val) << A6XX_RB_RENDER_COMPONENTS_RT4__SHIFT) & A6XX_RB_RENDER_COMPONENTS_RT4__MASK;
+}
+#define A6XX_RB_RENDER_COMPONENTS_RT5__MASK 0x00f00000
+#define A6XX_RB_RENDER_COMPONENTS_RT5__SHIFT 20
+static inline uint32_t A6XX_RB_RENDER_COMPONENTS_RT5(uint32_t val)
+{
+ return ((val) << A6XX_RB_RENDER_COMPONENTS_RT5__SHIFT) & A6XX_RB_RENDER_COMPONENTS_RT5__MASK;
+}
+#define A6XX_RB_RENDER_COMPONENTS_RT6__MASK 0x0f000000
+#define A6XX_RB_RENDER_COMPONENTS_RT6__SHIFT 24
+static inline uint32_t A6XX_RB_RENDER_COMPONENTS_RT6(uint32_t val)
+{
+ return ((val) << A6XX_RB_RENDER_COMPONENTS_RT6__SHIFT) & A6XX_RB_RENDER_COMPONENTS_RT6__MASK;
+}
+#define A6XX_RB_RENDER_COMPONENTS_RT7__MASK 0xf0000000
+#define A6XX_RB_RENDER_COMPONENTS_RT7__SHIFT 28
+static inline uint32_t A6XX_RB_RENDER_COMPONENTS_RT7(uint32_t val)
+{
+ return ((val) << A6XX_RB_RENDER_COMPONENTS_RT7__SHIFT) & A6XX_RB_RENDER_COMPONENTS_RT7__MASK;
+}
+
+#define REG_A6XX_RB_DITHER_CNTL 0x0000880e
+#define A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT0__MASK 0x00000003
+#define A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT0__SHIFT 0
+static inline uint32_t A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT0(enum adreno_rb_dither_mode val)
+{
+ return ((val) << A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT0__SHIFT) & A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT0__MASK;
+}
+#define A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT1__MASK 0x0000000c
+#define A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT1__SHIFT 2
+static inline uint32_t A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT1(enum adreno_rb_dither_mode val)
+{
+ return ((val) << A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT1__SHIFT) & A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT1__MASK;
+}
+#define A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT2__MASK 0x00000030
+#define A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT2__SHIFT 4
+static inline uint32_t A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT2(enum adreno_rb_dither_mode val)
+{
+ return ((val) << A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT2__SHIFT) & A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT2__MASK;
+}
+#define A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT3__MASK 0x000000c0
+#define A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT3__SHIFT 6
+static inline uint32_t A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT3(enum adreno_rb_dither_mode val)
+{
+ return ((val) << A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT3__SHIFT) & A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT3__MASK;
+}
+#define A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT4__MASK 0x00000300
+#define A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT4__SHIFT 8
+static inline uint32_t A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT4(enum adreno_rb_dither_mode val)
+{
+ return ((val) << A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT4__SHIFT) & A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT4__MASK;
+}
+#define A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT5__MASK 0x00000c00
+#define A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT5__SHIFT 10
+static inline uint32_t A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT5(enum adreno_rb_dither_mode val)
+{
+ return ((val) << A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT5__SHIFT) & A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT5__MASK;
+}
+#define A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT6__MASK 0x00001000
+#define A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT6__SHIFT 12
+static inline uint32_t A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT6(enum adreno_rb_dither_mode val)
+{
+ return ((val) << A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT6__SHIFT) & A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT6__MASK;
+}
+#define A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT7__MASK 0x0000c000
+#define A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT7__SHIFT 14
+static inline uint32_t A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT7(enum adreno_rb_dither_mode val)
+{
+ return ((val) << A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT7__SHIFT) & A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT7__MASK;
+}
+
+#define REG_A6XX_RB_SRGB_CNTL 0x0000880f
+#define A6XX_RB_SRGB_CNTL_SRGB_MRT0 0x00000001
+#define A6XX_RB_SRGB_CNTL_SRGB_MRT1 0x00000002
+#define A6XX_RB_SRGB_CNTL_SRGB_MRT2 0x00000004
+#define A6XX_RB_SRGB_CNTL_SRGB_MRT3 0x00000008
+#define A6XX_RB_SRGB_CNTL_SRGB_MRT4 0x00000010
+#define A6XX_RB_SRGB_CNTL_SRGB_MRT5 0x00000020
+#define A6XX_RB_SRGB_CNTL_SRGB_MRT6 0x00000040
+#define A6XX_RB_SRGB_CNTL_SRGB_MRT7 0x00000080
+
+#define REG_A6XX_RB_UNKNOWN_8818 0x00008818
+
+#define REG_A6XX_RB_UNKNOWN_8819 0x00008819
+
+#define REG_A6XX_RB_UNKNOWN_881A 0x0000881a
+
+#define REG_A6XX_RB_UNKNOWN_881B 0x0000881b
+
+#define REG_A6XX_RB_UNKNOWN_881C 0x0000881c
+
+#define REG_A6XX_RB_UNKNOWN_881D 0x0000881d
+
+#define REG_A6XX_RB_UNKNOWN_881E 0x0000881e
+
+static inline uint32_t REG_A6XX_RB_MRT(uint32_t i0) { return 0x00008820 + 0x8*i0; }
+
+static inline uint32_t REG_A6XX_RB_MRT_CONTROL(uint32_t i0) { return 0x00008820 + 0x8*i0; }
+#define A6XX_RB_MRT_CONTROL_BLEND 0x00000001
+#define A6XX_RB_MRT_CONTROL_BLEND2 0x00000002
+#define A6XX_RB_MRT_CONTROL_ROP_ENABLE 0x00000004
+#define A6XX_RB_MRT_CONTROL_ROP_CODE__MASK 0x00000078
+#define A6XX_RB_MRT_CONTROL_ROP_CODE__SHIFT 3
+static inline uint32_t A6XX_RB_MRT_CONTROL_ROP_CODE(enum a3xx_rop_code val)
+{
+ return ((val) << A6XX_RB_MRT_CONTROL_ROP_CODE__SHIFT) & A6XX_RB_MRT_CONTROL_ROP_CODE__MASK;
+}
+#define A6XX_RB_MRT_CONTROL_COMPONENT_ENABLE__MASK 0x00000780
+#define A6XX_RB_MRT_CONTROL_COMPONENT_ENABLE__SHIFT 7
+static inline uint32_t A6XX_RB_MRT_CONTROL_COMPONENT_ENABLE(uint32_t val)
+{
+ return ((val) << A6XX_RB_MRT_CONTROL_COMPONENT_ENABLE__SHIFT) & A6XX_RB_MRT_CONTROL_COMPONENT_ENABLE__MASK;
+}
+
+static inline uint32_t REG_A6XX_RB_MRT_BLEND_CONTROL(uint32_t i0) { return 0x00008821 + 0x8*i0; }
+#define A6XX_RB_MRT_BLEND_CONTROL_RGB_SRC_FACTOR__MASK 0x0000001f
+#define A6XX_RB_MRT_BLEND_CONTROL_RGB_SRC_FACTOR__SHIFT 0
+static inline uint32_t A6XX_RB_MRT_BLEND_CONTROL_RGB_SRC_FACTOR(enum adreno_rb_blend_factor val)
+{
+ return ((val) << A6XX_RB_MRT_BLEND_CONTROL_RGB_SRC_FACTOR__SHIFT) & A6XX_RB_MRT_BLEND_CONTROL_RGB_SRC_FACTOR__MASK;
+}
+#define A6XX_RB_MRT_BLEND_CONTROL_RGB_BLEND_OPCODE__MASK 0x000000e0
+#define A6XX_RB_MRT_BLEND_CONTROL_RGB_BLEND_OPCODE__SHIFT 5
+static inline uint32_t A6XX_RB_MRT_BLEND_CONTROL_RGB_BLEND_OPCODE(enum a3xx_rb_blend_opcode val)
+{
+ return ((val) << A6XX_RB_MRT_BLEND_CONTROL_RGB_BLEND_OPCODE__SHIFT) & A6XX_RB_MRT_BLEND_CONTROL_RGB_BLEND_OPCODE__MASK;
+}
+#define A6XX_RB_MRT_BLEND_CONTROL_RGB_DEST_FACTOR__MASK 0x00001f00
+#define A6XX_RB_MRT_BLEND_CONTROL_RGB_DEST_FACTOR__SHIFT 8
+static inline uint32_t A6XX_RB_MRT_BLEND_CONTROL_RGB_DEST_FACTOR(enum adreno_rb_blend_factor val)
+{
+ return ((val) << A6XX_RB_MRT_BLEND_CONTROL_RGB_DEST_FACTOR__SHIFT) & A6XX_RB_MRT_BLEND_CONTROL_RGB_DEST_FACTOR__MASK;
+}
+#define A6XX_RB_MRT_BLEND_CONTROL_ALPHA_SRC_FACTOR__MASK 0x001f0000
+#define A6XX_RB_MRT_BLEND_CONTROL_ALPHA_SRC_FACTOR__SHIFT 16
+static inline uint32_t A6XX_RB_MRT_BLEND_CONTROL_ALPHA_SRC_FACTOR(enum adreno_rb_blend_factor val)
+{
+ return ((val) << A6XX_RB_MRT_BLEND_CONTROL_ALPHA_SRC_FACTOR__SHIFT) & A6XX_RB_MRT_BLEND_CONTROL_ALPHA_SRC_FACTOR__MASK;
+}
+#define A6XX_RB_MRT_BLEND_CONTROL_ALPHA_BLEND_OPCODE__MASK 0x00e00000
+#define A6XX_RB_MRT_BLEND_CONTROL_ALPHA_BLEND_OPCODE__SHIFT 21
+static inline uint32_t A6XX_RB_MRT_BLEND_CONTROL_ALPHA_BLEND_OPCODE(enum a3xx_rb_blend_opcode val)
+{
+ return ((val) << A6XX_RB_MRT_BLEND_CONTROL_ALPHA_BLEND_OPCODE__SHIFT) & A6XX_RB_MRT_BLEND_CONTROL_ALPHA_BLEND_OPCODE__MASK;
+}
+#define A6XX_RB_MRT_BLEND_CONTROL_ALPHA_DEST_FACTOR__MASK 0x1f000000
+#define A6XX_RB_MRT_BLEND_CONTROL_ALPHA_DEST_FACTOR__SHIFT 24
+static inline uint32_t A6XX_RB_MRT_BLEND_CONTROL_ALPHA_DEST_FACTOR(enum adreno_rb_blend_factor val)
+{
+ return ((val) << A6XX_RB_MRT_BLEND_CONTROL_ALPHA_DEST_FACTOR__SHIFT) & A6XX_RB_MRT_BLEND_CONTROL_ALPHA_DEST_FACTOR__MASK;
+}
+
+static inline uint32_t REG_A6XX_RB_MRT_BUF_INFO(uint32_t i0) { return 0x00008822 + 0x8*i0; }
+#define A6XX_RB_MRT_BUF_INFO_COLOR_FORMAT__MASK 0x000000ff
+#define A6XX_RB_MRT_BUF_INFO_COLOR_FORMAT__SHIFT 0
+static inline uint32_t A6XX_RB_MRT_BUF_INFO_COLOR_FORMAT(enum a6xx_color_fmt val)
+{
+ return ((val) << A6XX_RB_MRT_BUF_INFO_COLOR_FORMAT__SHIFT) & A6XX_RB_MRT_BUF_INFO_COLOR_FORMAT__MASK;
+}
+#define A6XX_RB_MRT_BUF_INFO_COLOR_TILE_MODE__MASK 0x00000300
+#define A6XX_RB_MRT_BUF_INFO_COLOR_TILE_MODE__SHIFT 8
+static inline uint32_t A6XX_RB_MRT_BUF_INFO_COLOR_TILE_MODE(enum a6xx_tile_mode val)
+{
+ return ((val) << A6XX_RB_MRT_BUF_INFO_COLOR_TILE_MODE__SHIFT) & A6XX_RB_MRT_BUF_INFO_COLOR_TILE_MODE__MASK;
+}
+#define A6XX_RB_MRT_BUF_INFO_COLOR_SWAP__MASK 0x00006000
+#define A6XX_RB_MRT_BUF_INFO_COLOR_SWAP__SHIFT 13
+static inline uint32_t A6XX_RB_MRT_BUF_INFO_COLOR_SWAP(enum a3xx_color_swap val)
+{
+ return ((val) << A6XX_RB_MRT_BUF_INFO_COLOR_SWAP__SHIFT) & A6XX_RB_MRT_BUF_INFO_COLOR_SWAP__MASK;
+}
+#define A6XX_RB_MRT_BUF_INFO_COLOR_SRGB 0x00008000
+
+static inline uint32_t REG_A6XX_RB_MRT_PITCH(uint32_t i0) { return 0x00008823 + 0x8*i0; }
+#define A6XX_RB_MRT_PITCH__MASK 0xffffffff
+#define A6XX_RB_MRT_PITCH__SHIFT 0
+static inline uint32_t A6XX_RB_MRT_PITCH(uint32_t val)
+{
+ return ((val >> 6) << A6XX_RB_MRT_PITCH__SHIFT) & A6XX_RB_MRT_PITCH__MASK;
+}
+
+static inline uint32_t REG_A6XX_RB_MRT_ARRAY_PITCH(uint32_t i0) { return 0x00008824 + 0x8*i0; }
+#define A6XX_RB_MRT_ARRAY_PITCH__MASK 0xffffffff
+#define A6XX_RB_MRT_ARRAY_PITCH__SHIFT 0
+static inline uint32_t A6XX_RB_MRT_ARRAY_PITCH(uint32_t val)
+{
+ return ((val >> 6) << A6XX_RB_MRT_ARRAY_PITCH__SHIFT) & A6XX_RB_MRT_ARRAY_PITCH__MASK;
+}
+
+static inline uint32_t REG_A6XX_RB_MRT_BASE_LO(uint32_t i0) { return 0x00008825 + 0x8*i0; }
+
+static inline uint32_t REG_A6XX_RB_MRT_BASE_HI(uint32_t i0) { return 0x00008826 + 0x8*i0; }
+
+static inline uint32_t REG_A6XX_RB_MRT_BASE_GMEM(uint32_t i0) { return 0x00008827 + 0x8*i0; }
+
+#define REG_A6XX_RB_BLEND_RED_F32 0x00008860
+#define A6XX_RB_BLEND_RED_F32__MASK 0xffffffff
+#define A6XX_RB_BLEND_RED_F32__SHIFT 0
+static inline uint32_t A6XX_RB_BLEND_RED_F32(float val)
+{
+ return ((fui(val)) << A6XX_RB_BLEND_RED_F32__SHIFT) & A6XX_RB_BLEND_RED_F32__MASK;
+}
+
+#define REG_A6XX_RB_BLEND_GREEN_F32 0x00008861
+#define A6XX_RB_BLEND_GREEN_F32__MASK 0xffffffff
+#define A6XX_RB_BLEND_GREEN_F32__SHIFT 0
+static inline uint32_t A6XX_RB_BLEND_GREEN_F32(float val)
+{
+ return ((fui(val)) << A6XX_RB_BLEND_GREEN_F32__SHIFT) & A6XX_RB_BLEND_GREEN_F32__MASK;
+}
+
+#define REG_A6XX_RB_BLEND_BLUE_F32 0x00008862
+#define A6XX_RB_BLEND_BLUE_F32__MASK 0xffffffff
+#define A6XX_RB_BLEND_BLUE_F32__SHIFT 0
+static inline uint32_t A6XX_RB_BLEND_BLUE_F32(float val)
+{
+ return ((fui(val)) << A6XX_RB_BLEND_BLUE_F32__SHIFT) & A6XX_RB_BLEND_BLUE_F32__MASK;
+}
+
+#define REG_A6XX_RB_BLEND_ALPHA_F32 0x00008863
+#define A6XX_RB_BLEND_ALPHA_F32__MASK 0xffffffff
+#define A6XX_RB_BLEND_ALPHA_F32__SHIFT 0
+static inline uint32_t A6XX_RB_BLEND_ALPHA_F32(float val)
+{
+ return ((fui(val)) << A6XX_RB_BLEND_ALPHA_F32__SHIFT) & A6XX_RB_BLEND_ALPHA_F32__MASK;
+}
+
+#define REG_A6XX_RB_ALPHA_CONTROL 0x00008864
+#define A6XX_RB_ALPHA_CONTROL_ALPHA_REF__MASK 0x000000ff
+#define A6XX_RB_ALPHA_CONTROL_ALPHA_REF__SHIFT 0
+static inline uint32_t A6XX_RB_ALPHA_CONTROL_ALPHA_REF(uint32_t val)
+{
+ return ((val) << A6XX_RB_ALPHA_CONTROL_ALPHA_REF__SHIFT) & A6XX_RB_ALPHA_CONTROL_ALPHA_REF__MASK;
+}
+#define A6XX_RB_ALPHA_CONTROL_ALPHA_TEST 0x00000100
+#define A6XX_RB_ALPHA_CONTROL_ALPHA_TEST_FUNC__MASK 0x00000e00
+#define A6XX_RB_ALPHA_CONTROL_ALPHA_TEST_FUNC__SHIFT 9
+static inline uint32_t A6XX_RB_ALPHA_CONTROL_ALPHA_TEST_FUNC(enum adreno_compare_func val)
+{
+ return ((val) << A6XX_RB_ALPHA_CONTROL_ALPHA_TEST_FUNC__SHIFT) & A6XX_RB_ALPHA_CONTROL_ALPHA_TEST_FUNC__MASK;
+}
+
+#define REG_A6XX_RB_BLEND_CNTL 0x00008865
+#define A6XX_RB_BLEND_CNTL_ENABLE_BLEND__MASK 0x000000ff
+#define A6XX_RB_BLEND_CNTL_ENABLE_BLEND__SHIFT 0
+static inline uint32_t A6XX_RB_BLEND_CNTL_ENABLE_BLEND(uint32_t val)
+{
+ return ((val) << A6XX_RB_BLEND_CNTL_ENABLE_BLEND__SHIFT) & A6XX_RB_BLEND_CNTL_ENABLE_BLEND__MASK;
+}
+#define A6XX_RB_BLEND_CNTL_INDEPENDENT_BLEND 0x00000100
+#define A6XX_RB_BLEND_CNTL_SAMPLE_MASK__MASK 0xffff0000
+#define A6XX_RB_BLEND_CNTL_SAMPLE_MASK__SHIFT 16
+static inline uint32_t A6XX_RB_BLEND_CNTL_SAMPLE_MASK(uint32_t val)
+{
+ return ((val) << A6XX_RB_BLEND_CNTL_SAMPLE_MASK__SHIFT) & A6XX_RB_BLEND_CNTL_SAMPLE_MASK__MASK;
+}
+
+#define REG_A6XX_RB_DEPTH_CNTL 0x00008871
+#define A6XX_RB_DEPTH_CNTL_Z_ENABLE 0x00000001
+#define A6XX_RB_DEPTH_CNTL_Z_WRITE_ENABLE 0x00000002
+#define A6XX_RB_DEPTH_CNTL_ZFUNC__MASK 0x0000001c
+#define A6XX_RB_DEPTH_CNTL_ZFUNC__SHIFT 2
+static inline uint32_t A6XX_RB_DEPTH_CNTL_ZFUNC(enum adreno_compare_func val)
+{
+ return ((val) << A6XX_RB_DEPTH_CNTL_ZFUNC__SHIFT) & A6XX_RB_DEPTH_CNTL_ZFUNC__MASK;
+}
+#define A6XX_RB_DEPTH_CNTL_Z_TEST_ENABLE 0x00000040
+
+#define REG_A6XX_RB_DEPTH_BUFFER_INFO 0x00008872
+#define A6XX_RB_DEPTH_BUFFER_INFO_DEPTH_FORMAT__MASK 0x00000007
+#define A6XX_RB_DEPTH_BUFFER_INFO_DEPTH_FORMAT__SHIFT 0
+static inline uint32_t A6XX_RB_DEPTH_BUFFER_INFO_DEPTH_FORMAT(enum a6xx_depth_format val)
+{
+ return ((val) << A6XX_RB_DEPTH_BUFFER_INFO_DEPTH_FORMAT__SHIFT) & A6XX_RB_DEPTH_BUFFER_INFO_DEPTH_FORMAT__MASK;
+}
+
+#define REG_A6XX_RB_DEPTH_BUFFER_PITCH 0x00008873
+#define A6XX_RB_DEPTH_BUFFER_PITCH__MASK 0xffffffff
+#define A6XX_RB_DEPTH_BUFFER_PITCH__SHIFT 0
+static inline uint32_t A6XX_RB_DEPTH_BUFFER_PITCH(uint32_t val)
+{
+ return ((val >> 6) << A6XX_RB_DEPTH_BUFFER_PITCH__SHIFT) & A6XX_RB_DEPTH_BUFFER_PITCH__MASK;
+}
+
+#define REG_A6XX_RB_DEPTH_BUFFER_ARRAY_PITCH 0x00008874
+#define A6XX_RB_DEPTH_BUFFER_ARRAY_PITCH__MASK 0xffffffff
+#define A6XX_RB_DEPTH_BUFFER_ARRAY_PITCH__SHIFT 0
+static inline uint32_t A6XX_RB_DEPTH_BUFFER_ARRAY_PITCH(uint32_t val)
+{
+ return ((val >> 6) << A6XX_RB_DEPTH_BUFFER_ARRAY_PITCH__SHIFT) & A6XX_RB_DEPTH_BUFFER_ARRAY_PITCH__MASK;
+}
+
+#define REG_A6XX_RB_DEPTH_BUFFER_BASE_LO 0x00008875
+
+#define REG_A6XX_RB_DEPTH_BUFFER_BASE_HI 0x00008876
+
+#define REG_A6XX_RB_DEPTH_BUFFER_BASE_GMEM 0x00008877
+
+#define REG_A6XX_RB_UNKNOWN_8878 0x00008878
+
+#define REG_A6XX_RB_UNKNOWN_8879 0x00008879
+
+#define REG_A6XX_RB_STENCIL_CONTROL 0x00008880
+#define A6XX_RB_STENCIL_CONTROL_STENCIL_ENABLE 0x00000001
+#define A6XX_RB_STENCIL_CONTROL_STENCIL_ENABLE_BF 0x00000002
+#define A6XX_RB_STENCIL_CONTROL_STENCIL_READ 0x00000004
+#define A6XX_RB_STENCIL_CONTROL_FUNC__MASK 0x00000700
+#define A6XX_RB_STENCIL_CONTROL_FUNC__SHIFT 8
+static inline uint32_t A6XX_RB_STENCIL_CONTROL_FUNC(enum adreno_compare_func val)
+{
+ return ((val) << A6XX_RB_STENCIL_CONTROL_FUNC__SHIFT) & A6XX_RB_STENCIL_CONTROL_FUNC__MASK;
+}
+#define A6XX_RB_STENCIL_CONTROL_FAIL__MASK 0x00003800
+#define A6XX_RB_STENCIL_CONTROL_FAIL__SHIFT 11
+static inline uint32_t A6XX_RB_STENCIL_CONTROL_FAIL(enum adreno_stencil_op val)
+{
+ return ((val) << A6XX_RB_STENCIL_CONTROL_FAIL__SHIFT) & A6XX_RB_STENCIL_CONTROL_FAIL__MASK;
+}
+#define A6XX_RB_STENCIL_CONTROL_ZPASS__MASK 0x0001c000
+#define A6XX_RB_STENCIL_CONTROL_ZPASS__SHIFT 14
+static inline uint32_t A6XX_RB_STENCIL_CONTROL_ZPASS(enum adreno_stencil_op val)
+{
+ return ((val) << A6XX_RB_STENCIL_CONTROL_ZPASS__SHIFT) & A6XX_RB_STENCIL_CONTROL_ZPASS__MASK;
+}
+#define A6XX_RB_STENCIL_CONTROL_ZFAIL__MASK 0x000e0000
+#define A6XX_RB_STENCIL_CONTROL_ZFAIL__SHIFT 17
+static inline uint32_t A6XX_RB_STENCIL_CONTROL_ZFAIL(enum adreno_stencil_op val)
+{
+ return ((val) << A6XX_RB_STENCIL_CONTROL_ZFAIL__SHIFT) & A6XX_RB_STENCIL_CONTROL_ZFAIL__MASK;
+}
+#define A6XX_RB_STENCIL_CONTROL_FUNC_BF__MASK 0x00700000
+#define A6XX_RB_STENCIL_CONTROL_FUNC_BF__SHIFT 20
+static inline uint32_t A6XX_RB_STENCIL_CONTROL_FUNC_BF(enum adreno_compare_func val)
+{
+ return ((val) << A6XX_RB_STENCIL_CONTROL_FUNC_BF__SHIFT) & A6XX_RB_STENCIL_CONTROL_FUNC_BF__MASK;
+}
+#define A6XX_RB_STENCIL_CONTROL_FAIL_BF__MASK 0x03800000
+#define A6XX_RB_STENCIL_CONTROL_FAIL_BF__SHIFT 23
+static inline uint32_t A6XX_RB_STENCIL_CONTROL_FAIL_BF(enum adreno_stencil_op val)
+{
+ return ((val) << A6XX_RB_STENCIL_CONTROL_FAIL_BF__SHIFT) & A6XX_RB_STENCIL_CONTROL_FAIL_BF__MASK;
+}
+#define A6XX_RB_STENCIL_CONTROL_ZPASS_BF__MASK 0x1c000000
+#define A6XX_RB_STENCIL_CONTROL_ZPASS_BF__SHIFT 26
+static inline uint32_t A6XX_RB_STENCIL_CONTROL_ZPASS_BF(enum adreno_stencil_op val)
+{
+ return ((val) << A6XX_RB_STENCIL_CONTROL_ZPASS_BF__SHIFT) & A6XX_RB_STENCIL_CONTROL_ZPASS_BF__MASK;
+}
+#define A6XX_RB_STENCIL_CONTROL_ZFAIL_BF__MASK 0xe0000000
+#define A6XX_RB_STENCIL_CONTROL_ZFAIL_BF__SHIFT 29
+static inline uint32_t A6XX_RB_STENCIL_CONTROL_ZFAIL_BF(enum adreno_stencil_op val)
+{
+ return ((val) << A6XX_RB_STENCIL_CONTROL_ZFAIL_BF__SHIFT) & A6XX_RB_STENCIL_CONTROL_ZFAIL_BF__MASK;
+}
+
+#define REG_A6XX_RB_STENCIL_INFO 0x00008881
+#define A6XX_RB_STENCIL_INFO_SEPARATE_STENCIL 0x00000001
+
+#define REG_A6XX_RB_STENCIL_BUFFER_PITCH 0x00008882
+#define A6XX_RB_STENCIL_BUFFER_PITCH__MASK 0xffffffff
+#define A6XX_RB_STENCIL_BUFFER_PITCH__SHIFT 0
+static inline uint32_t A6XX_RB_STENCIL_BUFFER_PITCH(uint32_t val)
+{
+ return ((val >> 6) << A6XX_RB_STENCIL_BUFFER_PITCH__SHIFT) & A6XX_RB_STENCIL_BUFFER_PITCH__MASK;
+}
+
+#define REG_A6XX_RB_STENCIL_BUFFER_ARRAY_PITCH 0x00008883
+#define A6XX_RB_STENCIL_BUFFER_ARRAY_PITCH__MASK 0xffffffff
+#define A6XX_RB_STENCIL_BUFFER_ARRAY_PITCH__SHIFT 0
+static inline uint32_t A6XX_RB_STENCIL_BUFFER_ARRAY_PITCH(uint32_t val)
+{
+ return ((val >> 6) << A6XX_RB_STENCIL_BUFFER_ARRAY_PITCH__SHIFT) & A6XX_RB_STENCIL_BUFFER_ARRAY_PITCH__MASK;
+}
+
+#define REG_A6XX_RB_STENCIL_BUFFER_BASE_LO 0x00008884
+
+#define REG_A6XX_RB_STENCIL_BUFFER_BASE_HI 0x00008885
+
+#define REG_A6XX_RB_STENCIL_BUFFER_BASE_GMEM 0x00008886
+
+#define REG_A6XX_RB_STENCILREF 0x00008887
+#define A6XX_RB_STENCILREF_REF__MASK 0x000000ff
+#define A6XX_RB_STENCILREF_REF__SHIFT 0
+static inline uint32_t A6XX_RB_STENCILREF_REF(uint32_t val)
+{
+ return ((val) << A6XX_RB_STENCILREF_REF__SHIFT) & A6XX_RB_STENCILREF_REF__MASK;
+}
+
+#define REG_A6XX_RB_STENCILMASK 0x00008888
+#define A6XX_RB_STENCILMASK_MASK__MASK 0x000000ff
+#define A6XX_RB_STENCILMASK_MASK__SHIFT 0
+static inline uint32_t A6XX_RB_STENCILMASK_MASK(uint32_t val)
+{
+ return ((val) << A6XX_RB_STENCILMASK_MASK__SHIFT) & A6XX_RB_STENCILMASK_MASK__MASK;
+}
+
+#define REG_A6XX_RB_STENCILWRMASK 0x00008889
+#define A6XX_RB_STENCILWRMASK_WRMASK__MASK 0x000000ff
+#define A6XX_RB_STENCILWRMASK_WRMASK__SHIFT 0
+static inline uint32_t A6XX_RB_STENCILWRMASK_WRMASK(uint32_t val)
+{
+ return ((val) << A6XX_RB_STENCILWRMASK_WRMASK__SHIFT) & A6XX_RB_STENCILWRMASK_WRMASK__MASK;
+}
+
+#define REG_A6XX_RB_WINDOW_OFFSET 0x00008890
+#define A6XX_RB_WINDOW_OFFSET_WINDOW_OFFSET_DISABLE 0x80000000
+#define A6XX_RB_WINDOW_OFFSET_X__MASK 0x00007fff
+#define A6XX_RB_WINDOW_OFFSET_X__SHIFT 0
+static inline uint32_t A6XX_RB_WINDOW_OFFSET_X(uint32_t val)
+{
+ return ((val) << A6XX_RB_WINDOW_OFFSET_X__SHIFT) & A6XX_RB_WINDOW_OFFSET_X__MASK;
+}
+#define A6XX_RB_WINDOW_OFFSET_Y__MASK 0x7fff0000
+#define A6XX_RB_WINDOW_OFFSET_Y__SHIFT 16
+static inline uint32_t A6XX_RB_WINDOW_OFFSET_Y(uint32_t val)
+{
+ return ((val) << A6XX_RB_WINDOW_OFFSET_Y__SHIFT) & A6XX_RB_WINDOW_OFFSET_Y__MASK;
+}
+
+#define REG_A6XX_RB_SAMPLE_COUNT_CONTROL 0x00008891
+#define A6XX_RB_SAMPLE_COUNT_CONTROL_COPY 0x00000002
+
+#define REG_A6XX_RB_UNKNOWN_88D0 0x000088d0
+
+#define REG_A6XX_RB_BLIT_SCISSOR_TL 0x000088d1
+#define A6XX_RB_BLIT_SCISSOR_TL_WINDOW_OFFSET_DISABLE 0x80000000
+#define A6XX_RB_BLIT_SCISSOR_TL_X__MASK 0x00007fff
+#define A6XX_RB_BLIT_SCISSOR_TL_X__SHIFT 0
+static inline uint32_t A6XX_RB_BLIT_SCISSOR_TL_X(uint32_t val)
+{
+ return ((val) << A6XX_RB_BLIT_SCISSOR_TL_X__SHIFT) & A6XX_RB_BLIT_SCISSOR_TL_X__MASK;
+}
+#define A6XX_RB_BLIT_SCISSOR_TL_Y__MASK 0x7fff0000
+#define A6XX_RB_BLIT_SCISSOR_TL_Y__SHIFT 16
+static inline uint32_t A6XX_RB_BLIT_SCISSOR_TL_Y(uint32_t val)
+{
+ return ((val) << A6XX_RB_BLIT_SCISSOR_TL_Y__SHIFT) & A6XX_RB_BLIT_SCISSOR_TL_Y__MASK;
+}
+
+#define REG_A6XX_RB_BLIT_SCISSOR_BR 0x000088d2
+#define A6XX_RB_BLIT_SCISSOR_BR_WINDOW_OFFSET_DISABLE 0x80000000
+#define A6XX_RB_BLIT_SCISSOR_BR_X__MASK 0x00007fff
+#define A6XX_RB_BLIT_SCISSOR_BR_X__SHIFT 0
+static inline uint32_t A6XX_RB_BLIT_SCISSOR_BR_X(uint32_t val)
+{
+ return ((val) << A6XX_RB_BLIT_SCISSOR_BR_X__SHIFT) & A6XX_RB_BLIT_SCISSOR_BR_X__MASK;
+}
+#define A6XX_RB_BLIT_SCISSOR_BR_Y__MASK 0x7fff0000
+#define A6XX_RB_BLIT_SCISSOR_BR_Y__SHIFT 16
+static inline uint32_t A6XX_RB_BLIT_SCISSOR_BR_Y(uint32_t val)
+{
+ return ((val) << A6XX_RB_BLIT_SCISSOR_BR_Y__SHIFT) & A6XX_RB_BLIT_SCISSOR_BR_Y__MASK;
+}
+
+#define REG_A6XX_RB_BLIT_BASE_GMEM 0x000088d6
+
+#define REG_A6XX_RB_BLIT_DST_INFO 0x000088d7
+#define A6XX_RB_BLIT_DST_INFO_TILE_MODE__MASK 0x00000003
+#define A6XX_RB_BLIT_DST_INFO_TILE_MODE__SHIFT 0
+static inline uint32_t A6XX_RB_BLIT_DST_INFO_TILE_MODE(enum a6xx_tile_mode val)
+{
+ return ((val) << A6XX_RB_BLIT_DST_INFO_TILE_MODE__SHIFT) & A6XX_RB_BLIT_DST_INFO_TILE_MODE__MASK;
+}
+#define A6XX_RB_BLIT_DST_INFO_FLAGS 0x00000004
+#define A6XX_RB_BLIT_DST_INFO_COLOR_FORMAT__MASK 0x00007f80
+#define A6XX_RB_BLIT_DST_INFO_COLOR_FORMAT__SHIFT 7
+static inline uint32_t A6XX_RB_BLIT_DST_INFO_COLOR_FORMAT(enum a6xx_color_fmt val)
+{
+ return ((val) << A6XX_RB_BLIT_DST_INFO_COLOR_FORMAT__SHIFT) & A6XX_RB_BLIT_DST_INFO_COLOR_FORMAT__MASK;
+}
+#define A6XX_RB_BLIT_DST_INFO_COLOR_SWAP__MASK 0x00000060
+#define A6XX_RB_BLIT_DST_INFO_COLOR_SWAP__SHIFT 5
+static inline uint32_t A6XX_RB_BLIT_DST_INFO_COLOR_SWAP(enum a3xx_color_swap val)
+{
+ return ((val) << A6XX_RB_BLIT_DST_INFO_COLOR_SWAP__SHIFT) & A6XX_RB_BLIT_DST_INFO_COLOR_SWAP__MASK;
+}
+
+#define REG_A6XX_RB_BLIT_DST_LO 0x000088d8
+
+#define REG_A6XX_RB_BLIT_DST_HI 0x000088d9
+
+#define REG_A6XX_RB_BLIT_DST_PITCH 0x000088da
+#define A6XX_RB_BLIT_DST_PITCH__MASK 0xffffffff
+#define A6XX_RB_BLIT_DST_PITCH__SHIFT 0
+static inline uint32_t A6XX_RB_BLIT_DST_PITCH(uint32_t val)
+{
+ return ((val >> 6) << A6XX_RB_BLIT_DST_PITCH__SHIFT) & A6XX_RB_BLIT_DST_PITCH__MASK;
+}
+
+#define REG_A6XX_RB_BLIT_DST_ARRAY_PITCH 0x000088db
+#define A6XX_RB_BLIT_DST_ARRAY_PITCH__MASK 0xffffffff
+#define A6XX_RB_BLIT_DST_ARRAY_PITCH__SHIFT 0
+static inline uint32_t A6XX_RB_BLIT_DST_ARRAY_PITCH(uint32_t val)
+{
+ return ((val >> 6) << A6XX_RB_BLIT_DST_ARRAY_PITCH__SHIFT) & A6XX_RB_BLIT_DST_ARRAY_PITCH__MASK;
+}
+
+#define REG_A6XX_RB_BLIT_FLAG_DST_LO 0x000088dc
+
+#define REG_A6XX_RB_BLIT_FLAG_DST_HI 0x000088dd
+
+#define REG_A6XX_RB_BLIT_CLEAR_COLOR_DW0 0x000088df
+
+#define REG_A6XX_RB_BLIT_CLEAR_COLOR_DW1 0x000088e0
+
+#define REG_A6XX_RB_BLIT_CLEAR_COLOR_DW2 0x000088e1
+
+#define REG_A6XX_RB_BLIT_CLEAR_COLOR_DW3 0x000088e2
+
+#define REG_A6XX_RB_BLIT_INFO 0x000088e3
+#define A6XX_RB_BLIT_INFO_UNK0 0x00000001
+#define A6XX_RB_BLIT_INFO_FAST_CLEAR 0x00000002
+#define A6XX_RB_BLIT_INFO_INTEGER 0x00000004
+#define A6XX_RB_BLIT_INFO_UNK3 0x00000008
+#define A6XX_RB_BLIT_INFO_MASK__MASK 0x000000f0
+#define A6XX_RB_BLIT_INFO_MASK__SHIFT 4
+static inline uint32_t A6XX_RB_BLIT_INFO_MASK(uint32_t val)
+{
+ return ((val) << A6XX_RB_BLIT_INFO_MASK__SHIFT) & A6XX_RB_BLIT_INFO_MASK__MASK;
+}
+
+#define REG_A6XX_RB_UNKNOWN_88F0 0x000088f0
+
+#define REG_A6XX_RB_DEPTH_FLAG_BUFFER_BASE_LO 0x00008900
+
+#define REG_A6XX_RB_DEPTH_FLAG_BUFFER_BASE_HI 0x00008901
+
+#define REG_A6XX_RB_DEPTH_FLAG_BUFFER_PITCH 0x00008902
+
+static inline uint32_t REG_A6XX_RB_MRT_FLAG_BUFFER(uint32_t i0) { return 0x00008903 + 0x3*i0; }
+
+static inline uint32_t REG_A6XX_RB_MRT_FLAG_BUFFER_ADDR_LO(uint32_t i0) { return 0x00008903 + 0x3*i0; }
+
+static inline uint32_t REG_A6XX_RB_MRT_FLAG_BUFFER_ADDR_HI(uint32_t i0) { return 0x00008904 + 0x3*i0; }
+
+static inline uint32_t REG_A6XX_RB_MRT_FLAG_BUFFER_PITCH(uint32_t i0) { return 0x00008905 + 0x3*i0; }
+#define A6XX_RB_MRT_FLAG_BUFFER_PITCH_PITCH__MASK 0x000007ff
+#define A6XX_RB_MRT_FLAG_BUFFER_PITCH_PITCH__SHIFT 0
+static inline uint32_t A6XX_RB_MRT_FLAG_BUFFER_PITCH_PITCH(uint32_t val)
+{
+ return ((val >> 5) << A6XX_RB_MRT_FLAG_BUFFER_PITCH_PITCH__SHIFT) & A6XX_RB_MRT_FLAG_BUFFER_PITCH_PITCH__MASK;
+}
+#define A6XX_RB_MRT_FLAG_BUFFER_PITCH_ARRAY_PITCH__MASK 0x003ff800
+#define A6XX_RB_MRT_FLAG_BUFFER_PITCH_ARRAY_PITCH__SHIFT 11
+static inline uint32_t A6XX_RB_MRT_FLAG_BUFFER_PITCH_ARRAY_PITCH(uint32_t val)
+{
+ return ((val >> 5) << A6XX_RB_MRT_FLAG_BUFFER_PITCH_ARRAY_PITCH__SHIFT) & A6XX_RB_MRT_FLAG_BUFFER_PITCH_ARRAY_PITCH__MASK;
+}
+
+#define REG_A6XX_RB_SAMPLE_COUNT_ADDR_LO 0x00008927
+
+#define REG_A6XX_RB_SAMPLE_COUNT_ADDR_HI 0x00008928
+
+#define REG_A6XX_RB_2D_BLIT_CNTL 0x00008c00
+#define A6XX_RB_2D_BLIT_CNTL_COLOR_FORMAT__MASK 0x0000ff00
+#define A6XX_RB_2D_BLIT_CNTL_COLOR_FORMAT__SHIFT 8
+static inline uint32_t A6XX_RB_2D_BLIT_CNTL_COLOR_FORMAT(enum a6xx_color_fmt val)
+{
+ return ((val) << A6XX_RB_2D_BLIT_CNTL_COLOR_FORMAT__SHIFT) & A6XX_RB_2D_BLIT_CNTL_COLOR_FORMAT__MASK;
+}
+
+#define REG_A6XX_RB_2D_DST_INFO 0x00008c17
+#define A6XX_RB_2D_DST_INFO_COLOR_FORMAT__MASK 0x000000ff
+#define A6XX_RB_2D_DST_INFO_COLOR_FORMAT__SHIFT 0
+static inline uint32_t A6XX_RB_2D_DST_INFO_COLOR_FORMAT(enum a6xx_color_fmt val)
+{
+ return ((val) << A6XX_RB_2D_DST_INFO_COLOR_FORMAT__SHIFT) & A6XX_RB_2D_DST_INFO_COLOR_FORMAT__MASK;
+}
+#define A6XX_RB_2D_DST_INFO_TILE_MODE__MASK 0x00000300
+#define A6XX_RB_2D_DST_INFO_TILE_MODE__SHIFT 8
+static inline uint32_t A6XX_RB_2D_DST_INFO_TILE_MODE(enum a6xx_tile_mode val)
+{
+ return ((val) << A6XX_RB_2D_DST_INFO_TILE_MODE__SHIFT) & A6XX_RB_2D_DST_INFO_TILE_MODE__MASK;
+}
+#define A6XX_RB_2D_DST_INFO_COLOR_SWAP__MASK 0x00000c00
+#define A6XX_RB_2D_DST_INFO_COLOR_SWAP__SHIFT 10
+static inline uint32_t A6XX_RB_2D_DST_INFO_COLOR_SWAP(enum a3xx_color_swap val)
+{
+ return ((val) << A6XX_RB_2D_DST_INFO_COLOR_SWAP__SHIFT) & A6XX_RB_2D_DST_INFO_COLOR_SWAP__MASK;
+}
+#define A6XX_RB_2D_DST_INFO_FLAGS 0x00001000
+
+#define REG_A6XX_RB_2D_DST_LO 0x00008c18
+
+#define REG_A6XX_RB_2D_DST_HI 0x00008c19
+
+#define REG_A6XX_RB_2D_DST_SIZE 0x00008c1a
+#define A6XX_RB_2D_DST_SIZE_PITCH__MASK 0x0000ffff
+#define A6XX_RB_2D_DST_SIZE_PITCH__SHIFT 0
+static inline uint32_t A6XX_RB_2D_DST_SIZE_PITCH(uint32_t val)
+{
+ return ((val >> 6) << A6XX_RB_2D_DST_SIZE_PITCH__SHIFT) & A6XX_RB_2D_DST_SIZE_PITCH__MASK;
+}
+
+#define REG_A6XX_RB_2D_DST_FLAGS_LO 0x00008c20
+
+#define REG_A6XX_RB_2D_DST_FLAGS_HI 0x00008c21
+
+#define REG_A6XX_RB_2D_SRC_SOLID_C0 0x00008c2c
+
+#define REG_A6XX_RB_2D_SRC_SOLID_C1 0x00008c2d
+
+#define REG_A6XX_RB_2D_SRC_SOLID_C2 0x00008c2e
+
+#define REG_A6XX_RB_2D_SRC_SOLID_C3 0x00008c2f
+
+#define REG_A6XX_RB_UNKNOWN_8E01 0x00008e01
+
+#define REG_A6XX_RB_CCU_CNTL 0x00008e07
+
+#define REG_A6XX_VPC_UNKNOWN_9101 0x00009101
+
+#define REG_A6XX_VPC_GS_SIV_CNTL 0x00009104
+
+#define REG_A6XX_VPC_UNKNOWN_9108 0x00009108
+
+static inline uint32_t REG_A6XX_VPC_VARYING_INTERP(uint32_t i0) { return 0x00009200 + 0x1*i0; }
+
+static inline uint32_t REG_A6XX_VPC_VARYING_INTERP_MODE(uint32_t i0) { return 0x00009200 + 0x1*i0; }
+
+static inline uint32_t REG_A6XX_VPC_VARYING_PS_REPL(uint32_t i0) { return 0x00009208 + 0x1*i0; }
+
+static inline uint32_t REG_A6XX_VPC_VARYING_PS_REPL_MODE(uint32_t i0) { return 0x00009208 + 0x1*i0; }
+
+#define REG_A6XX_VPC_UNKNOWN_9210 0x00009210
+
+#define REG_A6XX_VPC_UNKNOWN_9211 0x00009211
+
+static inline uint32_t REG_A6XX_VPC_VAR(uint32_t i0) { return 0x00009212 + 0x1*i0; }
+
+static inline uint32_t REG_A6XX_VPC_VAR_DISABLE(uint32_t i0) { return 0x00009212 + 0x1*i0; }
+
+#define REG_A6XX_VPC_SO_CNTL 0x00009216
+#define A6XX_VPC_SO_CNTL_ENABLE 0x00010000
+
+#define REG_A6XX_VPC_SO_PROG 0x00009217
+#define A6XX_VPC_SO_PROG_A_BUF__MASK 0x00000003
+#define A6XX_VPC_SO_PROG_A_BUF__SHIFT 0
+static inline uint32_t A6XX_VPC_SO_PROG_A_BUF(uint32_t val)
+{
+ return ((val) << A6XX_VPC_SO_PROG_A_BUF__SHIFT) & A6XX_VPC_SO_PROG_A_BUF__MASK;
+}
+#define A6XX_VPC_SO_PROG_A_OFF__MASK 0x000007fc
+#define A6XX_VPC_SO_PROG_A_OFF__SHIFT 2
+static inline uint32_t A6XX_VPC_SO_PROG_A_OFF(uint32_t val)
+{
+ return ((val >> 2) << A6XX_VPC_SO_PROG_A_OFF__SHIFT) & A6XX_VPC_SO_PROG_A_OFF__MASK;
+}
+#define A6XX_VPC_SO_PROG_A_EN 0x00000800
+#define A6XX_VPC_SO_PROG_B_BUF__MASK 0x00003000
+#define A6XX_VPC_SO_PROG_B_BUF__SHIFT 12
+static inline uint32_t A6XX_VPC_SO_PROG_B_BUF(uint32_t val)
+{
+ return ((val) << A6XX_VPC_SO_PROG_B_BUF__SHIFT) & A6XX_VPC_SO_PROG_B_BUF__MASK;
+}
+#define A6XX_VPC_SO_PROG_B_OFF__MASK 0x007fc000
+#define A6XX_VPC_SO_PROG_B_OFF__SHIFT 14
+static inline uint32_t A6XX_VPC_SO_PROG_B_OFF(uint32_t val)
+{
+ return ((val >> 2) << A6XX_VPC_SO_PROG_B_OFF__SHIFT) & A6XX_VPC_SO_PROG_B_OFF__MASK;
+}
+#define A6XX_VPC_SO_PROG_B_EN 0x00800000
+
+static inline uint32_t REG_A6XX_VPC_SO(uint32_t i0) { return 0x0000921a + 0x7*i0; }
+
+static inline uint32_t REG_A6XX_VPC_SO_BUFFER_BASE_LO(uint32_t i0) { return 0x0000921a + 0x7*i0; }
+
+static inline uint32_t REG_A6XX_VPC_SO_BUFFER_BASE_HI(uint32_t i0) { return 0x0000921b + 0x7*i0; }
+
+static inline uint32_t REG_A6XX_VPC_SO_BUFFER_SIZE(uint32_t i0) { return 0x0000921c + 0x7*i0; }
+
+static inline uint32_t REG_A6XX_VPC_SO_NCOMP(uint32_t i0) { return 0x0000921d + 0x7*i0; }
+
+static inline uint32_t REG_A6XX_VPC_SO_BUFFER_OFFSET(uint32_t i0) { return 0x0000921e + 0x7*i0; }
+
+static inline uint32_t REG_A6XX_VPC_SO_FLUSH_BASE_LO(uint32_t i0) { return 0x0000921f + 0x7*i0; }
+
+static inline uint32_t REG_A6XX_VPC_SO_FLUSH_BASE_HI(uint32_t i0) { return 0x00009220 + 0x7*i0; }
+
+#define REG_A6XX_VPC_UNKNOWN_9236 0x00009236
+
+#define REG_A6XX_VPC_UNKNOWN_9300 0x00009300
+
+#define REG_A6XX_VPC_PACK 0x00009301
+#define A6XX_VPC_PACK_STRIDE_IN_VPC__MASK 0x000000ff
+#define A6XX_VPC_PACK_STRIDE_IN_VPC__SHIFT 0
+static inline uint32_t A6XX_VPC_PACK_STRIDE_IN_VPC(uint32_t val)
+{
+ return ((val) << A6XX_VPC_PACK_STRIDE_IN_VPC__SHIFT) & A6XX_VPC_PACK_STRIDE_IN_VPC__MASK;
+}
+#define A6XX_VPC_PACK_NUMNONPOSVAR__MASK 0x0000ff00
+#define A6XX_VPC_PACK_NUMNONPOSVAR__SHIFT 8
+static inline uint32_t A6XX_VPC_PACK_NUMNONPOSVAR(uint32_t val)
+{
+ return ((val) << A6XX_VPC_PACK_NUMNONPOSVAR__SHIFT) & A6XX_VPC_PACK_NUMNONPOSVAR__MASK;
+}
+#define A6XX_VPC_PACK_PSIZELOC__MASK 0x00ff0000
+#define A6XX_VPC_PACK_PSIZELOC__SHIFT 16
+static inline uint32_t A6XX_VPC_PACK_PSIZELOC(uint32_t val)
+{
+ return ((val) << A6XX_VPC_PACK_PSIZELOC__SHIFT) & A6XX_VPC_PACK_PSIZELOC__MASK;
+}
+
+#define REG_A6XX_VPC_CNTL_0 0x00009304
+#define A6XX_VPC_CNTL_0_NUMNONPOSVAR__MASK 0x000000ff
+#define A6XX_VPC_CNTL_0_NUMNONPOSVAR__SHIFT 0
+static inline uint32_t A6XX_VPC_CNTL_0_NUMNONPOSVAR(uint32_t val)
+{
+ return ((val) << A6XX_VPC_CNTL_0_NUMNONPOSVAR__SHIFT) & A6XX_VPC_CNTL_0_NUMNONPOSVAR__MASK;
+}
+#define A6XX_VPC_CNTL_0_VARYING 0x00010000
+
+#define REG_A6XX_VPC_SO_BUF_CNTL 0x00009305
+#define A6XX_VPC_SO_BUF_CNTL_BUF0 0x00000001
+#define A6XX_VPC_SO_BUF_CNTL_BUF1 0x00000008
+#define A6XX_VPC_SO_BUF_CNTL_BUF2 0x00000040
+#define A6XX_VPC_SO_BUF_CNTL_BUF3 0x00000200
+#define A6XX_VPC_SO_BUF_CNTL_ENABLE 0x00008000
+
+#define REG_A6XX_VPC_UNKNOWN_9600 0x00009600
+
+#define REG_A6XX_VPC_UNKNOWN_9602 0x00009602
+
+#define REG_A6XX_PC_UNKNOWN_9801 0x00009801
+
+#define REG_A6XX_PC_RESTART_INDEX 0x00009803
+
+#define REG_A6XX_PC_MODE_CNTL 0x00009804
+
+#define REG_A6XX_PC_UNKNOWN_9805 0x00009805
+
+#define REG_A6XX_PC_UNKNOWN_9981 0x00009981
+
+#define REG_A6XX_PC_PRIMITIVE_CNTL_0 0x00009b00
+#define A6XX_PC_PRIMITIVE_CNTL_0_PRIMITIVE_RESTART 0x00000001
+#define A6XX_PC_PRIMITIVE_CNTL_0_PROVOKING_VTX_LAST 0x00000002
+
+#define REG_A6XX_PC_PRIMITIVE_CNTL_1 0x00009b01
+#define A6XX_PC_PRIMITIVE_CNTL_1_STRIDE_IN_VPC__MASK 0x0000007f
+#define A6XX_PC_PRIMITIVE_CNTL_1_STRIDE_IN_VPC__SHIFT 0
+static inline uint32_t A6XX_PC_PRIMITIVE_CNTL_1_STRIDE_IN_VPC(uint32_t val)
+{
+ return ((val) << A6XX_PC_PRIMITIVE_CNTL_1_STRIDE_IN_VPC__SHIFT) & A6XX_PC_PRIMITIVE_CNTL_1_STRIDE_IN_VPC__MASK;
+}
+
+#define REG_A6XX_PC_UNKNOWN_9B06 0x00009b06
+
+#define REG_A6XX_PC_UNKNOWN_9B07 0x00009b07
+
+#define REG_A6XX_PC_TESSFACTOR_ADDR_LO 0x00009e08
+
+#define REG_A6XX_PC_TESSFACTOR_ADDR_HI 0x00009e09
+
+#define REG_A6XX_PC_UNKNOWN_9E72 0x00009e72
+
+#define REG_A6XX_VFD_CONTROL_0 0x0000a000
+#define A6XX_VFD_CONTROL_0_VTXCNT__MASK 0x0000003f
+#define A6XX_VFD_CONTROL_0_VTXCNT__SHIFT 0
+static inline uint32_t A6XX_VFD_CONTROL_0_VTXCNT(uint32_t val)
+{
+ return ((val) << A6XX_VFD_CONTROL_0_VTXCNT__SHIFT) & A6XX_VFD_CONTROL_0_VTXCNT__MASK;
+}
+
+#define REG_A6XX_VFD_CONTROL_1 0x0000a001
+#define A6XX_VFD_CONTROL_1_REGID4VTX__MASK 0x000000ff
+#define A6XX_VFD_CONTROL_1_REGID4VTX__SHIFT 0
+static inline uint32_t A6XX_VFD_CONTROL_1_REGID4VTX(uint32_t val)
+{
+ return ((val) << A6XX_VFD_CONTROL_1_REGID4VTX__SHIFT) & A6XX_VFD_CONTROL_1_REGID4VTX__MASK;
+}
+#define A6XX_VFD_CONTROL_1_REGID4INST__MASK 0x0000ff00
+#define A6XX_VFD_CONTROL_1_REGID4INST__SHIFT 8
+static inline uint32_t A6XX_VFD_CONTROL_1_REGID4INST(uint32_t val)
+{
+ return ((val) << A6XX_VFD_CONTROL_1_REGID4INST__SHIFT) & A6XX_VFD_CONTROL_1_REGID4INST__MASK;
+}
+#define A6XX_VFD_CONTROL_1_REGID4PRIMID__MASK 0x00ff0000
+#define A6XX_VFD_CONTROL_1_REGID4PRIMID__SHIFT 16
+static inline uint32_t A6XX_VFD_CONTROL_1_REGID4PRIMID(uint32_t val)
+{
+ return ((val) << A6XX_VFD_CONTROL_1_REGID4PRIMID__SHIFT) & A6XX_VFD_CONTROL_1_REGID4PRIMID__MASK;
+}
+
+#define REG_A6XX_VFD_CONTROL_2 0x0000a002
+#define A6XX_VFD_CONTROL_2_REGID_PATCHID__MASK 0x000000ff
+#define A6XX_VFD_CONTROL_2_REGID_PATCHID__SHIFT 0
+static inline uint32_t A6XX_VFD_CONTROL_2_REGID_PATCHID(uint32_t val)
+{
+ return ((val) << A6XX_VFD_CONTROL_2_REGID_PATCHID__SHIFT) & A6XX_VFD_CONTROL_2_REGID_PATCHID__MASK;
+}
+
+#define REG_A6XX_VFD_CONTROL_3 0x0000a003
+#define A6XX_VFD_CONTROL_3_REGID_PATCHID__MASK 0x0000ff00
+#define A6XX_VFD_CONTROL_3_REGID_PATCHID__SHIFT 8
+static inline uint32_t A6XX_VFD_CONTROL_3_REGID_PATCHID(uint32_t val)
+{
+ return ((val) << A6XX_VFD_CONTROL_3_REGID_PATCHID__SHIFT) & A6XX_VFD_CONTROL_3_REGID_PATCHID__MASK;
+}
+#define A6XX_VFD_CONTROL_3_REGID_TESSX__MASK 0x00ff0000
+#define A6XX_VFD_CONTROL_3_REGID_TESSX__SHIFT 16
+static inline uint32_t A6XX_VFD_CONTROL_3_REGID_TESSX(uint32_t val)
+{
+ return ((val) << A6XX_VFD_CONTROL_3_REGID_TESSX__SHIFT) & A6XX_VFD_CONTROL_3_REGID_TESSX__MASK;
+}
+#define A6XX_VFD_CONTROL_3_REGID_TESSY__MASK 0xff000000
+#define A6XX_VFD_CONTROL_3_REGID_TESSY__SHIFT 24
+static inline uint32_t A6XX_VFD_CONTROL_3_REGID_TESSY(uint32_t val)
+{
+ return ((val) << A6XX_VFD_CONTROL_3_REGID_TESSY__SHIFT) & A6XX_VFD_CONTROL_3_REGID_TESSY__MASK;
+}
+
+#define REG_A6XX_VFD_CONTROL_4 0x0000a004
+
+#define REG_A6XX_VFD_CONTROL_5 0x0000a005
+
+#define REG_A6XX_VFD_CONTROL_6 0x0000a006
+
+#define REG_A6XX_VFD_MODE_CNTL 0x0000a007
+#define A6XX_VFD_MODE_CNTL_BINNING_PASS 0x00000001
+
+#define REG_A6XX_VFD_UNKNOWN_A008 0x0000a008
+
+#define REG_A6XX_VFD_INDEX_OFFSET 0x0000a00e
+
+#define REG_A6XX_VFD_INSTANCE_START_OFFSET 0x0000a00f
+
+static inline uint32_t REG_A6XX_VFD_FETCH(uint32_t i0) { return 0x0000a010 + 0x4*i0; }
+
+static inline uint32_t REG_A6XX_VFD_FETCH_BASE_LO(uint32_t i0) { return 0x0000a010 + 0x4*i0; }
+
+static inline uint32_t REG_A6XX_VFD_FETCH_BASE_HI(uint32_t i0) { return 0x0000a011 + 0x4*i0; }
+
+static inline uint32_t REG_A6XX_VFD_FETCH_SIZE(uint32_t i0) { return 0x0000a012 + 0x4*i0; }
+
+static inline uint32_t REG_A6XX_VFD_FETCH_STRIDE(uint32_t i0) { return 0x0000a013 + 0x4*i0; }
+
+static inline uint32_t REG_A6XX_VFD_DECODE(uint32_t i0) { return 0x0000a090 + 0x2*i0; }
+
+static inline uint32_t REG_A6XX_VFD_DECODE_INSTR(uint32_t i0) { return 0x0000a090 + 0x2*i0; }
+#define A6XX_VFD_DECODE_INSTR_IDX__MASK 0x0000001f
+#define A6XX_VFD_DECODE_INSTR_IDX__SHIFT 0
+static inline uint32_t A6XX_VFD_DECODE_INSTR_IDX(uint32_t val)
+{
+ return ((val) << A6XX_VFD_DECODE_INSTR_IDX__SHIFT) & A6XX_VFD_DECODE_INSTR_IDX__MASK;
+}
+#define A6XX_VFD_DECODE_INSTR_INSTANCED 0x00020000
+#define A6XX_VFD_DECODE_INSTR_FORMAT__MASK 0x0ff00000
+#define A6XX_VFD_DECODE_INSTR_FORMAT__SHIFT 20
+static inline uint32_t A6XX_VFD_DECODE_INSTR_FORMAT(enum a6xx_vtx_fmt val)
+{
+ return ((val) << A6XX_VFD_DECODE_INSTR_FORMAT__SHIFT) & A6XX_VFD_DECODE_INSTR_FORMAT__MASK;
+}
+#define A6XX_VFD_DECODE_INSTR_SWAP__MASK 0x30000000
+#define A6XX_VFD_DECODE_INSTR_SWAP__SHIFT 28
+static inline uint32_t A6XX_VFD_DECODE_INSTR_SWAP(enum a3xx_color_swap val)
+{
+ return ((val) << A6XX_VFD_DECODE_INSTR_SWAP__SHIFT) & A6XX_VFD_DECODE_INSTR_SWAP__MASK;
+}
+#define A6XX_VFD_DECODE_INSTR_UNK30 0x40000000
+#define A6XX_VFD_DECODE_INSTR_FLOAT 0x80000000
+
+static inline uint32_t REG_A6XX_VFD_DECODE_STEP_RATE(uint32_t i0) { return 0x0000a091 + 0x2*i0; }
+
+static inline uint32_t REG_A6XX_VFD_DEST_CNTL(uint32_t i0) { return 0x0000a0d0 + 0x1*i0; }
+
+static inline uint32_t REG_A6XX_VFD_DEST_CNTL_INSTR(uint32_t i0) { return 0x0000a0d0 + 0x1*i0; }
+#define A6XX_VFD_DEST_CNTL_INSTR_WRITEMASK__MASK 0x0000000f
+#define A6XX_VFD_DEST_CNTL_INSTR_WRITEMASK__SHIFT 0
+static inline uint32_t A6XX_VFD_DEST_CNTL_INSTR_WRITEMASK(uint32_t val)
+{
+ return ((val) << A6XX_VFD_DEST_CNTL_INSTR_WRITEMASK__SHIFT) & A6XX_VFD_DEST_CNTL_INSTR_WRITEMASK__MASK;
+}
+#define A6XX_VFD_DEST_CNTL_INSTR_REGID__MASK 0x00000ff0
+#define A6XX_VFD_DEST_CNTL_INSTR_REGID__SHIFT 4
+static inline uint32_t A6XX_VFD_DEST_CNTL_INSTR_REGID(uint32_t val)
+{
+ return ((val) << A6XX_VFD_DEST_CNTL_INSTR_REGID__SHIFT) & A6XX_VFD_DEST_CNTL_INSTR_REGID__MASK;
+}
+
+#define REG_A6XX_SP_UNKNOWN_A0F8 0x0000a0f8
+
+#define REG_A6XX_SP_PRIMITIVE_CNTL 0x0000a802
+#define A6XX_SP_PRIMITIVE_CNTL_VSOUT__MASK 0x0000001f
+#define A6XX_SP_PRIMITIVE_CNTL_VSOUT__SHIFT 0
+static inline uint32_t A6XX_SP_PRIMITIVE_CNTL_VSOUT(uint32_t val)
+{
+ return ((val) << A6XX_SP_PRIMITIVE_CNTL_VSOUT__SHIFT) & A6XX_SP_PRIMITIVE_CNTL_VSOUT__MASK;
+}
+
+static inline uint32_t REG_A6XX_SP_VS_OUT(uint32_t i0) { return 0x0000a803 + 0x1*i0; }
+
+static inline uint32_t REG_A6XX_SP_VS_OUT_REG(uint32_t i0) { return 0x0000a803 + 0x1*i0; }
+#define A6XX_SP_VS_OUT_REG_A_REGID__MASK 0x000000ff
+#define A6XX_SP_VS_OUT_REG_A_REGID__SHIFT 0
+static inline uint32_t A6XX_SP_VS_OUT_REG_A_REGID(uint32_t val)
+{
+ return ((val) << A6XX_SP_VS_OUT_REG_A_REGID__SHIFT) & A6XX_SP_VS_OUT_REG_A_REGID__MASK;
+}
+#define A6XX_SP_VS_OUT_REG_A_COMPMASK__MASK 0x00000f00
+#define A6XX_SP_VS_OUT_REG_A_COMPMASK__SHIFT 8
+static inline uint32_t A6XX_SP_VS_OUT_REG_A_COMPMASK(uint32_t val)
+{
+ return ((val) << A6XX_SP_VS_OUT_REG_A_COMPMASK__SHIFT) & A6XX_SP_VS_OUT_REG_A_COMPMASK__MASK;
+}
+#define A6XX_SP_VS_OUT_REG_B_REGID__MASK 0x00ff0000
+#define A6XX_SP_VS_OUT_REG_B_REGID__SHIFT 16
+static inline uint32_t A6XX_SP_VS_OUT_REG_B_REGID(uint32_t val)
+{
+ return ((val) << A6XX_SP_VS_OUT_REG_B_REGID__SHIFT) & A6XX_SP_VS_OUT_REG_B_REGID__MASK;
+}
+#define A6XX_SP_VS_OUT_REG_B_COMPMASK__MASK 0x0f000000
+#define A6XX_SP_VS_OUT_REG_B_COMPMASK__SHIFT 24
+static inline uint32_t A6XX_SP_VS_OUT_REG_B_COMPMASK(uint32_t val)
+{
+ return ((val) << A6XX_SP_VS_OUT_REG_B_COMPMASK__SHIFT) & A6XX_SP_VS_OUT_REG_B_COMPMASK__MASK;
+}
+
+static inline uint32_t REG_A6XX_SP_VS_VPC_DST(uint32_t i0) { return 0x0000a813 + 0x1*i0; }
+
+static inline uint32_t REG_A6XX_SP_VS_VPC_DST_REG(uint32_t i0) { return 0x0000a813 + 0x1*i0; }
+#define A6XX_SP_VS_VPC_DST_REG_OUTLOC0__MASK 0x000000ff
+#define A6XX_SP_VS_VPC_DST_REG_OUTLOC0__SHIFT 0
+static inline uint32_t A6XX_SP_VS_VPC_DST_REG_OUTLOC0(uint32_t val)
+{
+ return ((val) << A6XX_SP_VS_VPC_DST_REG_OUTLOC0__SHIFT) & A6XX_SP_VS_VPC_DST_REG_OUTLOC0__MASK;
+}
+#define A6XX_SP_VS_VPC_DST_REG_OUTLOC1__MASK 0x0000ff00
+#define A6XX_SP_VS_VPC_DST_REG_OUTLOC1__SHIFT 8
+static inline uint32_t A6XX_SP_VS_VPC_DST_REG_OUTLOC1(uint32_t val)
+{
+ return ((val) << A6XX_SP_VS_VPC_DST_REG_OUTLOC1__SHIFT) & A6XX_SP_VS_VPC_DST_REG_OUTLOC1__MASK;
+}
+#define A6XX_SP_VS_VPC_DST_REG_OUTLOC2__MASK 0x00ff0000
+#define A6XX_SP_VS_VPC_DST_REG_OUTLOC2__SHIFT 16
+static inline uint32_t A6XX_SP_VS_VPC_DST_REG_OUTLOC2(uint32_t val)
+{
+ return ((val) << A6XX_SP_VS_VPC_DST_REG_OUTLOC2__SHIFT) & A6XX_SP_VS_VPC_DST_REG_OUTLOC2__MASK;
+}
+#define A6XX_SP_VS_VPC_DST_REG_OUTLOC3__MASK 0xff000000
+#define A6XX_SP_VS_VPC_DST_REG_OUTLOC3__SHIFT 24
+static inline uint32_t A6XX_SP_VS_VPC_DST_REG_OUTLOC3(uint32_t val)
+{
+ return ((val) << A6XX_SP_VS_VPC_DST_REG_OUTLOC3__SHIFT) & A6XX_SP_VS_VPC_DST_REG_OUTLOC3__MASK;
+}
+
+#define REG_A6XX_SP_VS_CTRL_REG0 0x0000a800
+#define A6XX_SP_VS_CTRL_REG0_HALFREGFOOTPRINT__MASK 0x0000007e
+#define A6XX_SP_VS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT 1
+static inline uint32_t A6XX_SP_VS_CTRL_REG0_HALFREGFOOTPRINT(uint32_t val)
+{
+ return ((val) << A6XX_SP_VS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT) & A6XX_SP_VS_CTRL_REG0_HALFREGFOOTPRINT__MASK;
+}
+#define A6XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT__MASK 0x00001f80
+#define A6XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT 7
+static inline uint32_t A6XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT(uint32_t val)
+{
+ return ((val) << A6XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT) & A6XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT__MASK;
+}
+#define A6XX_SP_VS_CTRL_REG0_BRANCHSTACK__MASK 0x000fc000
+#define A6XX_SP_VS_CTRL_REG0_BRANCHSTACK__SHIFT 14
+static inline uint32_t A6XX_SP_VS_CTRL_REG0_BRANCHSTACK(uint32_t val)
+{
+ return ((val) << A6XX_SP_VS_CTRL_REG0_BRANCHSTACK__SHIFT) & A6XX_SP_VS_CTRL_REG0_BRANCHSTACK__MASK;
+}
+#define A6XX_SP_VS_CTRL_REG0_THREADSIZE__MASK 0x00100000
+#define A6XX_SP_VS_CTRL_REG0_THREADSIZE__SHIFT 20
+static inline uint32_t A6XX_SP_VS_CTRL_REG0_THREADSIZE(enum a3xx_threadsize val)
+{
+ return ((val) << A6XX_SP_VS_CTRL_REG0_THREADSIZE__SHIFT) & A6XX_SP_VS_CTRL_REG0_THREADSIZE__MASK;
+}
+#define A6XX_SP_VS_CTRL_REG0_VARYING 0x00400000
+#define A6XX_SP_VS_CTRL_REG0_PIXLODENABLE 0x04000000
+#define A6XX_SP_VS_CTRL_REG0_MERGEDREGS 0x80000000
+
+#define REG_A6XX_SP_VS_OBJ_START_LO 0x0000a81c
+
+#define REG_A6XX_SP_VS_OBJ_START_HI 0x0000a81d
+
+#define REG_A6XX_SP_VS_TEX_COUNT 0x0000a822
+
+#define REG_A6XX_SP_VS_CONFIG 0x0000a823
+#define A6XX_SP_VS_CONFIG_ENABLED 0x00000100
+#define A6XX_SP_VS_CONFIG_NTEX__MASK 0x0001fe00
+#define A6XX_SP_VS_CONFIG_NTEX__SHIFT 9
+static inline uint32_t A6XX_SP_VS_CONFIG_NTEX(uint32_t val)
+{
+ return ((val) << A6XX_SP_VS_CONFIG_NTEX__SHIFT) & A6XX_SP_VS_CONFIG_NTEX__MASK;
+}
+#define A6XX_SP_VS_CONFIG_NSAMP__MASK 0x01fe0000
+#define A6XX_SP_VS_CONFIG_NSAMP__SHIFT 17
+static inline uint32_t A6XX_SP_VS_CONFIG_NSAMP(uint32_t val)
+{
+ return ((val) << A6XX_SP_VS_CONFIG_NSAMP__SHIFT) & A6XX_SP_VS_CONFIG_NSAMP__MASK;
+}
+
+#define REG_A6XX_SP_VS_INSTRLEN 0x0000a824
+
+#define REG_A6XX_SP_HS_CTRL_REG0 0x0000a830
+#define A6XX_SP_HS_CTRL_REG0_HALFREGFOOTPRINT__MASK 0x0000007e
+#define A6XX_SP_HS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT 1
+static inline uint32_t A6XX_SP_HS_CTRL_REG0_HALFREGFOOTPRINT(uint32_t val)
+{
+ return ((val) << A6XX_SP_HS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT) & A6XX_SP_HS_CTRL_REG0_HALFREGFOOTPRINT__MASK;
+}
+#define A6XX_SP_HS_CTRL_REG0_FULLREGFOOTPRINT__MASK 0x00001f80
+#define A6XX_SP_HS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT 7
+static inline uint32_t A6XX_SP_HS_CTRL_REG0_FULLREGFOOTPRINT(uint32_t val)
+{
+ return ((val) << A6XX_SP_HS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT) & A6XX_SP_HS_CTRL_REG0_FULLREGFOOTPRINT__MASK;
+}
+#define A6XX_SP_HS_CTRL_REG0_BRANCHSTACK__MASK 0x000fc000
+#define A6XX_SP_HS_CTRL_REG0_BRANCHSTACK__SHIFT 14
+static inline uint32_t A6XX_SP_HS_CTRL_REG0_BRANCHSTACK(uint32_t val)
+{
+ return ((val) << A6XX_SP_HS_CTRL_REG0_BRANCHSTACK__SHIFT) & A6XX_SP_HS_CTRL_REG0_BRANCHSTACK__MASK;
+}
+#define A6XX_SP_HS_CTRL_REG0_THREADSIZE__MASK 0x00100000
+#define A6XX_SP_HS_CTRL_REG0_THREADSIZE__SHIFT 20
+static inline uint32_t A6XX_SP_HS_CTRL_REG0_THREADSIZE(enum a3xx_threadsize val)
+{
+ return ((val) << A6XX_SP_HS_CTRL_REG0_THREADSIZE__SHIFT) & A6XX_SP_HS_CTRL_REG0_THREADSIZE__MASK;
+}
+#define A6XX_SP_HS_CTRL_REG0_VARYING 0x00400000
+#define A6XX_SP_HS_CTRL_REG0_PIXLODENABLE 0x04000000
+#define A6XX_SP_HS_CTRL_REG0_MERGEDREGS 0x80000000
+
+#define REG_A6XX_SP_HS_UNKNOWN_A831 0x0000a831
+
+#define REG_A6XX_SP_HS_OBJ_START_LO 0x0000a834
+
+#define REG_A6XX_SP_HS_OBJ_START_HI 0x0000a835
+
+#define REG_A6XX_SP_HS_TEX_COUNT 0x0000a83a
+
+#define REG_A6XX_SP_HS_CONFIG 0x0000a83b
+#define A6XX_SP_HS_CONFIG_ENABLED 0x00000100
+#define A6XX_SP_HS_CONFIG_NTEX__MASK 0x0001fe00
+#define A6XX_SP_HS_CONFIG_NTEX__SHIFT 9
+static inline uint32_t A6XX_SP_HS_CONFIG_NTEX(uint32_t val)
+{
+ return ((val) << A6XX_SP_HS_CONFIG_NTEX__SHIFT) & A6XX_SP_HS_CONFIG_NTEX__MASK;
+}
+#define A6XX_SP_HS_CONFIG_NSAMP__MASK 0x01fe0000
+#define A6XX_SP_HS_CONFIG_NSAMP__SHIFT 17
+static inline uint32_t A6XX_SP_HS_CONFIG_NSAMP(uint32_t val)
+{
+ return ((val) << A6XX_SP_HS_CONFIG_NSAMP__SHIFT) & A6XX_SP_HS_CONFIG_NSAMP__MASK;
+}
+
+#define REG_A6XX_SP_HS_INSTRLEN 0x0000a83c
+
+#define REG_A6XX_SP_DS_CTRL_REG0 0x0000a840
+#define A6XX_SP_DS_CTRL_REG0_HALFREGFOOTPRINT__MASK 0x0000007e
+#define A6XX_SP_DS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT 1
+static inline uint32_t A6XX_SP_DS_CTRL_REG0_HALFREGFOOTPRINT(uint32_t val)
+{
+ return ((val) << A6XX_SP_DS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT) & A6XX_SP_DS_CTRL_REG0_HALFREGFOOTPRINT__MASK;
+}
+#define A6XX_SP_DS_CTRL_REG0_FULLREGFOOTPRINT__MASK 0x00001f80
+#define A6XX_SP_DS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT 7
+static inline uint32_t A6XX_SP_DS_CTRL_REG0_FULLREGFOOTPRINT(uint32_t val)
+{
+ return ((val) << A6XX_SP_DS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT) & A6XX_SP_DS_CTRL_REG0_FULLREGFOOTPRINT__MASK;
+}
+#define A6XX_SP_DS_CTRL_REG0_BRANCHSTACK__MASK 0x000fc000
+#define A6XX_SP_DS_CTRL_REG0_BRANCHSTACK__SHIFT 14
+static inline uint32_t A6XX_SP_DS_CTRL_REG0_BRANCHSTACK(uint32_t val)
+{
+ return ((val) << A6XX_SP_DS_CTRL_REG0_BRANCHSTACK__SHIFT) & A6XX_SP_DS_CTRL_REG0_BRANCHSTACK__MASK;
+}
+#define A6XX_SP_DS_CTRL_REG0_THREADSIZE__MASK 0x00100000
+#define A6XX_SP_DS_CTRL_REG0_THREADSIZE__SHIFT 20
+static inline uint32_t A6XX_SP_DS_CTRL_REG0_THREADSIZE(enum a3xx_threadsize val)
+{
+ return ((val) << A6XX_SP_DS_CTRL_REG0_THREADSIZE__SHIFT) & A6XX_SP_DS_CTRL_REG0_THREADSIZE__MASK;
+}
+#define A6XX_SP_DS_CTRL_REG0_VARYING 0x00400000
+#define A6XX_SP_DS_CTRL_REG0_PIXLODENABLE 0x04000000
+#define A6XX_SP_DS_CTRL_REG0_MERGEDREGS 0x80000000
+
+#define REG_A6XX_SP_DS_OBJ_START_LO 0x0000a85c
+
+#define REG_A6XX_SP_DS_OBJ_START_HI 0x0000a85d
+
+#define REG_A6XX_SP_DS_TEX_COUNT 0x0000a862
+
+#define REG_A6XX_SP_DS_CONFIG 0x0000a863
+#define A6XX_SP_DS_CONFIG_ENABLED 0x00000100
+#define A6XX_SP_DS_CONFIG_NTEX__MASK 0x0001fe00
+#define A6XX_SP_DS_CONFIG_NTEX__SHIFT 9
+static inline uint32_t A6XX_SP_DS_CONFIG_NTEX(uint32_t val)
+{
+ return ((val) << A6XX_SP_DS_CONFIG_NTEX__SHIFT) & A6XX_SP_DS_CONFIG_NTEX__MASK;
+}
+#define A6XX_SP_DS_CONFIG_NSAMP__MASK 0x01fe0000
+#define A6XX_SP_DS_CONFIG_NSAMP__SHIFT 17
+static inline uint32_t A6XX_SP_DS_CONFIG_NSAMP(uint32_t val)
+{
+ return ((val) << A6XX_SP_DS_CONFIG_NSAMP__SHIFT) & A6XX_SP_DS_CONFIG_NSAMP__MASK;
+}
+
+#define REG_A6XX_SP_DS_INSTRLEN 0x0000a864
+
+#define REG_A6XX_SP_GS_CTRL_REG0 0x0000a870
+#define A6XX_SP_GS_CTRL_REG0_HALFREGFOOTPRINT__MASK 0x0000007e
+#define A6XX_SP_GS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT 1
+static inline uint32_t A6XX_SP_GS_CTRL_REG0_HALFREGFOOTPRINT(uint32_t val)
+{
+ return ((val) << A6XX_SP_GS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT) & A6XX_SP_GS_CTRL_REG0_HALFREGFOOTPRINT__MASK;
+}
+#define A6XX_SP_GS_CTRL_REG0_FULLREGFOOTPRINT__MASK 0x00001f80
+#define A6XX_SP_GS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT 7
+static inline uint32_t A6XX_SP_GS_CTRL_REG0_FULLREGFOOTPRINT(uint32_t val)
+{
+ return ((val) << A6XX_SP_GS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT) & A6XX_SP_GS_CTRL_REG0_FULLREGFOOTPRINT__MASK;
+}
+#define A6XX_SP_GS_CTRL_REG0_BRANCHSTACK__MASK 0x000fc000
+#define A6XX_SP_GS_CTRL_REG0_BRANCHSTACK__SHIFT 14
+static inline uint32_t A6XX_SP_GS_CTRL_REG0_BRANCHSTACK(uint32_t val)
+{
+ return ((val) << A6XX_SP_GS_CTRL_REG0_BRANCHSTACK__SHIFT) & A6XX_SP_GS_CTRL_REG0_BRANCHSTACK__MASK;
+}
+#define A6XX_SP_GS_CTRL_REG0_THREADSIZE__MASK 0x00100000
+#define A6XX_SP_GS_CTRL_REG0_THREADSIZE__SHIFT 20
+static inline uint32_t A6XX_SP_GS_CTRL_REG0_THREADSIZE(enum a3xx_threadsize val)
+{
+ return ((val) << A6XX_SP_GS_CTRL_REG0_THREADSIZE__SHIFT) & A6XX_SP_GS_CTRL_REG0_THREADSIZE__MASK;
+}
+#define A6XX_SP_GS_CTRL_REG0_VARYING 0x00400000
+#define A6XX_SP_GS_CTRL_REG0_PIXLODENABLE 0x04000000
+#define A6XX_SP_GS_CTRL_REG0_MERGEDREGS 0x80000000
+
+#define REG_A6XX_SP_GS_UNKNOWN_A871 0x0000a871
+
+#define REG_A6XX_SP_GS_OBJ_START_LO 0x0000a88d
+
+#define REG_A6XX_SP_GS_OBJ_START_HI 0x0000a88e
+
+#define REG_A6XX_SP_GS_TEX_COUNT 0x0000a893
+
+#define REG_A6XX_SP_GS_CONFIG 0x0000a894
+#define A6XX_SP_GS_CONFIG_ENABLED 0x00000100
+#define A6XX_SP_GS_CONFIG_NTEX__MASK 0x0001fe00
+#define A6XX_SP_GS_CONFIG_NTEX__SHIFT 9
+static inline uint32_t A6XX_SP_GS_CONFIG_NTEX(uint32_t val)
+{
+ return ((val) << A6XX_SP_GS_CONFIG_NTEX__SHIFT) & A6XX_SP_GS_CONFIG_NTEX__MASK;
+}
+#define A6XX_SP_GS_CONFIG_NSAMP__MASK 0x01fe0000
+#define A6XX_SP_GS_CONFIG_NSAMP__SHIFT 17
+static inline uint32_t A6XX_SP_GS_CONFIG_NSAMP(uint32_t val)
+{
+ return ((val) << A6XX_SP_GS_CONFIG_NSAMP__SHIFT) & A6XX_SP_GS_CONFIG_NSAMP__MASK;
+}
+
+#define REG_A6XX_SP_GS_INSTRLEN 0x0000a895
+
+#define REG_A6XX_SP_VS_TEX_SAMP_LO 0x0000a8a0
+
+#define REG_A6XX_SP_VS_TEX_SAMP_HI 0x0000a8a1
+
+#define REG_A6XX_SP_HS_TEX_SAMP_LO 0x0000a8a2
+
+#define REG_A6XX_SP_HS_TEX_SAMP_HI 0x0000a8a3
+
+#define REG_A6XX_SP_DS_TEX_SAMP_LO 0x0000a8a4
+
+#define REG_A6XX_SP_DS_TEX_SAMP_HI 0x0000a8a5
+
+#define REG_A6XX_SP_GS_TEX_SAMP_LO 0x0000a8a6
+
+#define REG_A6XX_SP_GS_TEX_SAMP_HI 0x0000a8a7
+
+#define REG_A6XX_SP_VS_TEX_CONST_LO 0x0000a8a8
+
+#define REG_A6XX_SP_VS_TEX_CONST_HI 0x0000a8a9
+
+#define REG_A6XX_SP_HS_TEX_CONST_LO 0x0000a8aa
+
+#define REG_A6XX_SP_HS_TEX_CONST_HI 0x0000a8ab
+
+#define REG_A6XX_SP_DS_TEX_CONST_LO 0x0000a8ac
+
+#define REG_A6XX_SP_DS_TEX_CONST_HI 0x0000a8ad
+
+#define REG_A6XX_SP_GS_TEX_CONST_LO 0x0000a8ae
+
+#define REG_A6XX_SP_GS_TEX_CONST_HI 0x0000a8af
+
+#define REG_A6XX_SP_FS_CTRL_REG0 0x0000a980
+#define A6XX_SP_FS_CTRL_REG0_HALFREGFOOTPRINT__MASK 0x0000007e
+#define A6XX_SP_FS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT 1
+static inline uint32_t A6XX_SP_FS_CTRL_REG0_HALFREGFOOTPRINT(uint32_t val)
+{
+ return ((val) << A6XX_SP_FS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT) & A6XX_SP_FS_CTRL_REG0_HALFREGFOOTPRINT__MASK;
+}
+#define A6XX_SP_FS_CTRL_REG0_FULLREGFOOTPRINT__MASK 0x00001f80
+#define A6XX_SP_FS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT 7
+static inline uint32_t A6XX_SP_FS_CTRL_REG0_FULLREGFOOTPRINT(uint32_t val)
+{
+ return ((val) << A6XX_SP_FS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT) & A6XX_SP_FS_CTRL_REG0_FULLREGFOOTPRINT__MASK;
+}
+#define A6XX_SP_FS_CTRL_REG0_BRANCHSTACK__MASK 0x000fc000
+#define A6XX_SP_FS_CTRL_REG0_BRANCHSTACK__SHIFT 14
+static inline uint32_t A6XX_SP_FS_CTRL_REG0_BRANCHSTACK(uint32_t val)
+{
+ return ((val) << A6XX_SP_FS_CTRL_REG0_BRANCHSTACK__SHIFT) & A6XX_SP_FS_CTRL_REG0_BRANCHSTACK__MASK;
+}
+#define A6XX_SP_FS_CTRL_REG0_THREADSIZE__MASK 0x00100000
+#define A6XX_SP_FS_CTRL_REG0_THREADSIZE__SHIFT 20
+static inline uint32_t A6XX_SP_FS_CTRL_REG0_THREADSIZE(enum a3xx_threadsize val)
+{
+ return ((val) << A6XX_SP_FS_CTRL_REG0_THREADSIZE__SHIFT) & A6XX_SP_FS_CTRL_REG0_THREADSIZE__MASK;
+}
+#define A6XX_SP_FS_CTRL_REG0_VARYING 0x00400000
+#define A6XX_SP_FS_CTRL_REG0_PIXLODENABLE 0x04000000
+#define A6XX_SP_FS_CTRL_REG0_MERGEDREGS 0x80000000
+
+#define REG_A6XX_SP_FS_OBJ_START_LO 0x0000a983
+
+#define REG_A6XX_SP_FS_OBJ_START_HI 0x0000a984
+
+#define REG_A6XX_SP_BLEND_CNTL 0x0000a989
+#define A6XX_SP_BLEND_CNTL_ENABLED 0x00000001
+#define A6XX_SP_BLEND_CNTL_UNK8 0x00000100
+
+#define REG_A6XX_SP_SRGB_CNTL 0x0000a98a
+#define A6XX_SP_SRGB_CNTL_SRGB_MRT0 0x00000001
+#define A6XX_SP_SRGB_CNTL_SRGB_MRT1 0x00000002
+#define A6XX_SP_SRGB_CNTL_SRGB_MRT2 0x00000004
+#define A6XX_SP_SRGB_CNTL_SRGB_MRT3 0x00000008
+#define A6XX_SP_SRGB_CNTL_SRGB_MRT4 0x00000010
+#define A6XX_SP_SRGB_CNTL_SRGB_MRT5 0x00000020
+#define A6XX_SP_SRGB_CNTL_SRGB_MRT6 0x00000040
+#define A6XX_SP_SRGB_CNTL_SRGB_MRT7 0x00000080
+
+#define REG_A6XX_SP_FS_RENDER_COMPONENTS 0x0000a98b
+#define A6XX_SP_FS_RENDER_COMPONENTS_RT0__MASK 0x0000000f
+#define A6XX_SP_FS_RENDER_COMPONENTS_RT0__SHIFT 0
+static inline uint32_t A6XX_SP_FS_RENDER_COMPONENTS_RT0(uint32_t val)
+{
+ return ((val) << A6XX_SP_FS_RENDER_COMPONENTS_RT0__SHIFT) & A6XX_SP_FS_RENDER_COMPONENTS_RT0__MASK;
+}
+#define A6XX_SP_FS_RENDER_COMPONENTS_RT1__MASK 0x000000f0
+#define A6XX_SP_FS_RENDER_COMPONENTS_RT1__SHIFT 4
+static inline uint32_t A6XX_SP_FS_RENDER_COMPONENTS_RT1(uint32_t val)
+{
+ return ((val) << A6XX_SP_FS_RENDER_COMPONENTS_RT1__SHIFT) & A6XX_SP_FS_RENDER_COMPONENTS_RT1__MASK;
+}
+#define A6XX_SP_FS_RENDER_COMPONENTS_RT2__MASK 0x00000f00
+#define A6XX_SP_FS_RENDER_COMPONENTS_RT2__SHIFT 8
+static inline uint32_t A6XX_SP_FS_RENDER_COMPONENTS_RT2(uint32_t val)
+{
+ return ((val) << A6XX_SP_FS_RENDER_COMPONENTS_RT2__SHIFT) & A6XX_SP_FS_RENDER_COMPONENTS_RT2__MASK;
+}
+#define A6XX_SP_FS_RENDER_COMPONENTS_RT3__MASK 0x0000f000
+#define A6XX_SP_FS_RENDER_COMPONENTS_RT3__SHIFT 12
+static inline uint32_t A6XX_SP_FS_RENDER_COMPONENTS_RT3(uint32_t val)
+{
+ return ((val) << A6XX_SP_FS_RENDER_COMPONENTS_RT3__SHIFT) & A6XX_SP_FS_RENDER_COMPONENTS_RT3__MASK;
+}
+#define A6XX_SP_FS_RENDER_COMPONENTS_RT4__MASK 0x000f0000
+#define A6XX_SP_FS_RENDER_COMPONENTS_RT4__SHIFT 16
+static inline uint32_t A6XX_SP_FS_RENDER_COMPONENTS_RT4(uint32_t val)
+{
+ return ((val) << A6XX_SP_FS_RENDER_COMPONENTS_RT4__SHIFT) & A6XX_SP_FS_RENDER_COMPONENTS_RT4__MASK;
+}
+#define A6XX_SP_FS_RENDER_COMPONENTS_RT5__MASK 0x00f00000
+#define A6XX_SP_FS_RENDER_COMPONENTS_RT5__SHIFT 20
+static inline uint32_t A6XX_SP_FS_RENDER_COMPONENTS_RT5(uint32_t val)
+{
+ return ((val) << A6XX_SP_FS_RENDER_COMPONENTS_RT5__SHIFT) & A6XX_SP_FS_RENDER_COMPONENTS_RT5__MASK;
+}
+#define A6XX_SP_FS_RENDER_COMPONENTS_RT6__MASK 0x0f000000
+#define A6XX_SP_FS_RENDER_COMPONENTS_RT6__SHIFT 24
+static inline uint32_t A6XX_SP_FS_RENDER_COMPONENTS_RT6(uint32_t val)
+{
+ return ((val) << A6XX_SP_FS_RENDER_COMPONENTS_RT6__SHIFT) & A6XX_SP_FS_RENDER_COMPONENTS_RT6__MASK;
+}
+#define A6XX_SP_FS_RENDER_COMPONENTS_RT7__MASK 0xf0000000
+#define A6XX_SP_FS_RENDER_COMPONENTS_RT7__SHIFT 28
+static inline uint32_t A6XX_SP_FS_RENDER_COMPONENTS_RT7(uint32_t val)
+{
+ return ((val) << A6XX_SP_FS_RENDER_COMPONENTS_RT7__SHIFT) & A6XX_SP_FS_RENDER_COMPONENTS_RT7__MASK;
+}
+
+#define REG_A6XX_SP_FS_OUTPUT_CNTL0 0x0000a98c
+#define A6XX_SP_FS_OUTPUT_CNTL0_DEPTH_REGID__MASK 0x0000ff00
+#define A6XX_SP_FS_OUTPUT_CNTL0_DEPTH_REGID__SHIFT 8
+static inline uint32_t A6XX_SP_FS_OUTPUT_CNTL0_DEPTH_REGID(uint32_t val)
+{
+ return ((val) << A6XX_SP_FS_OUTPUT_CNTL0_DEPTH_REGID__SHIFT) & A6XX_SP_FS_OUTPUT_CNTL0_DEPTH_REGID__MASK;
+}
+
+#define REG_A6XX_SP_FS_OUTPUT_CNTL1 0x0000a98d
+#define A6XX_SP_FS_OUTPUT_CNTL1_MRT__MASK 0x0000000f
+#define A6XX_SP_FS_OUTPUT_CNTL1_MRT__SHIFT 0
+static inline uint32_t A6XX_SP_FS_OUTPUT_CNTL1_MRT(uint32_t val)
+{
+ return ((val) << A6XX_SP_FS_OUTPUT_CNTL1_MRT__SHIFT) & A6XX_SP_FS_OUTPUT_CNTL1_MRT__MASK;
+}
+
+static inline uint32_t REG_A6XX_SP_FS_MRT(uint32_t i0) { return 0x0000a996 + 0x1*i0; }
+
+static inline uint32_t REG_A6XX_SP_FS_MRT_REG(uint32_t i0) { return 0x0000a996 + 0x1*i0; }
+#define A6XX_SP_FS_MRT_REG_COLOR_FORMAT__MASK 0x000000ff
+#define A6XX_SP_FS_MRT_REG_COLOR_FORMAT__SHIFT 0
+static inline uint32_t A6XX_SP_FS_MRT_REG_COLOR_FORMAT(enum a6xx_color_fmt val)
+{
+ return ((val) << A6XX_SP_FS_MRT_REG_COLOR_FORMAT__SHIFT) & A6XX_SP_FS_MRT_REG_COLOR_FORMAT__MASK;
+}
+#define A6XX_SP_FS_MRT_REG_COLOR_SINT 0x00000100
+#define A6XX_SP_FS_MRT_REG_COLOR_UINT 0x00000200
+#define A6XX_SP_FS_MRT_REG_COLOR_SRGB 0x00000400
+
+#define REG_A6XX_SP_FS_TEX_COUNT 0x0000a9a7
+
+#define REG_A6XX_SP_UNKNOWN_A9A8 0x0000a9a8
+
+#define REG_A6XX_SP_FS_TEX_SAMP_LO 0x0000a9e0
+
+#define REG_A6XX_SP_FS_TEX_SAMP_HI 0x0000a9e1
+
+#define REG_A6XX_SP_CS_TEX_SAMP_LO 0x0000a9e2
+
+#define REG_A6XX_SP_CS_TEX_SAMP_HI 0x0000a9e3
+
+#define REG_A6XX_SP_FS_TEX_CONST_LO 0x0000a9e4
+
+#define REG_A6XX_SP_FS_TEX_CONST_HI 0x0000a9e5
+
+#define REG_A6XX_SP_CS_TEX_CONST_LO 0x0000a9e6
+
+#define REG_A6XX_SP_CS_TEX_CONST_HI 0x0000a9e7
+
+static inline uint32_t REG_A6XX_SP_FS_OUTPUT(uint32_t i0) { return 0x0000a98e + 0x1*i0; }
+
+static inline uint32_t REG_A6XX_SP_FS_OUTPUT_REG(uint32_t i0) { return 0x0000a98e + 0x1*i0; }
+#define A6XX_SP_FS_OUTPUT_REG_REGID__MASK 0x000000ff
+#define A6XX_SP_FS_OUTPUT_REG_REGID__SHIFT 0
+static inline uint32_t A6XX_SP_FS_OUTPUT_REG_REGID(uint32_t val)
+{
+ return ((val) << A6XX_SP_FS_OUTPUT_REG_REGID__SHIFT) & A6XX_SP_FS_OUTPUT_REG_REGID__MASK;
+}
+#define A6XX_SP_FS_OUTPUT_REG_HALF_PRECISION 0x00000100
+
+#define REG_A6XX_SP_CS_CTRL_REG0 0x0000a9b0
+#define A6XX_SP_CS_CTRL_REG0_HALFREGFOOTPRINT__MASK 0x0000007e
+#define A6XX_SP_CS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT 1
+static inline uint32_t A6XX_SP_CS_CTRL_REG0_HALFREGFOOTPRINT(uint32_t val)
+{
+ return ((val) << A6XX_SP_CS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT) & A6XX_SP_CS_CTRL_REG0_HALFREGFOOTPRINT__MASK;
+}
+#define A6XX_SP_CS_CTRL_REG0_FULLREGFOOTPRINT__MASK 0x00001f80
+#define A6XX_SP_CS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT 7
+static inline uint32_t A6XX_SP_CS_CTRL_REG0_FULLREGFOOTPRINT(uint32_t val)
+{
+ return ((val) << A6XX_SP_CS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT) & A6XX_SP_CS_CTRL_REG0_FULLREGFOOTPRINT__MASK;
+}
+#define A6XX_SP_CS_CTRL_REG0_BRANCHSTACK__MASK 0x000fc000
+#define A6XX_SP_CS_CTRL_REG0_BRANCHSTACK__SHIFT 14
+static inline uint32_t A6XX_SP_CS_CTRL_REG0_BRANCHSTACK(uint32_t val)
+{
+ return ((val) << A6XX_SP_CS_CTRL_REG0_BRANCHSTACK__SHIFT) & A6XX_SP_CS_CTRL_REG0_BRANCHSTACK__MASK;
+}
+#define A6XX_SP_CS_CTRL_REG0_THREADSIZE__MASK 0x00100000
+#define A6XX_SP_CS_CTRL_REG0_THREADSIZE__SHIFT 20
+static inline uint32_t A6XX_SP_CS_CTRL_REG0_THREADSIZE(enum a3xx_threadsize val)
+{
+ return ((val) << A6XX_SP_CS_CTRL_REG0_THREADSIZE__SHIFT) & A6XX_SP_CS_CTRL_REG0_THREADSIZE__MASK;
+}
+#define A6XX_SP_CS_CTRL_REG0_VARYING 0x00400000
+#define A6XX_SP_CS_CTRL_REG0_PIXLODENABLE 0x04000000
+#define A6XX_SP_CS_CTRL_REG0_MERGEDREGS 0x80000000
+
+#define REG_A6XX_SP_CS_OBJ_START_LO 0x0000a9b4
+
+#define REG_A6XX_SP_CS_OBJ_START_HI 0x0000a9b5
+
+#define REG_A6XX_SP_CS_INSTRLEN 0x0000a9bc
+
+#define REG_A6XX_SP_UNKNOWN_AB00 0x0000ab00
+
+#define REG_A6XX_SP_FS_CONFIG 0x0000ab04
+#define A6XX_SP_FS_CONFIG_ENABLED 0x00000100
+#define A6XX_SP_FS_CONFIG_NTEX__MASK 0x0001fe00
+#define A6XX_SP_FS_CONFIG_NTEX__SHIFT 9
+static inline uint32_t A6XX_SP_FS_CONFIG_NTEX(uint32_t val)
+{
+ return ((val) << A6XX_SP_FS_CONFIG_NTEX__SHIFT) & A6XX_SP_FS_CONFIG_NTEX__MASK;
+}
+#define A6XX_SP_FS_CONFIG_NSAMP__MASK 0x01fe0000
+#define A6XX_SP_FS_CONFIG_NSAMP__SHIFT 17
+static inline uint32_t A6XX_SP_FS_CONFIG_NSAMP(uint32_t val)
+{
+ return ((val) << A6XX_SP_FS_CONFIG_NSAMP__SHIFT) & A6XX_SP_FS_CONFIG_NSAMP__MASK;
+}
+
+#define REG_A6XX_SP_FS_INSTRLEN 0x0000ab05
+
+#define REG_A6XX_SP_UNKNOWN_AE00 0x0000ae00
+
+#define REG_A6XX_SP_UNKNOWN_AE04 0x0000ae04
+
+#define REG_A6XX_SP_UNKNOWN_AE0F 0x0000ae0f
+
+#define REG_A6XX_SP_UNKNOWN_B182 0x0000b182
+
+#define REG_A6XX_SP_TP_RAS_MSAA_CNTL 0x0000b300
+#define A6XX_SP_TP_RAS_MSAA_CNTL_SAMPLES__MASK 0x00000003
+#define A6XX_SP_TP_RAS_MSAA_CNTL_SAMPLES__SHIFT 0
+static inline uint32_t A6XX_SP_TP_RAS_MSAA_CNTL_SAMPLES(enum a3xx_msaa_samples val)
+{
+ return ((val) << A6XX_SP_TP_RAS_MSAA_CNTL_SAMPLES__SHIFT) & A6XX_SP_TP_RAS_MSAA_CNTL_SAMPLES__MASK;
+}
+
+#define REG_A6XX_SP_TP_DEST_MSAA_CNTL 0x0000b301
+#define A6XX_SP_TP_DEST_MSAA_CNTL_SAMPLES__MASK 0x00000003
+#define A6XX_SP_TP_DEST_MSAA_CNTL_SAMPLES__SHIFT 0
+static inline uint32_t A6XX_SP_TP_DEST_MSAA_CNTL_SAMPLES(enum a3xx_msaa_samples val)
+{
+ return ((val) << A6XX_SP_TP_DEST_MSAA_CNTL_SAMPLES__SHIFT) & A6XX_SP_TP_DEST_MSAA_CNTL_SAMPLES__MASK;
+}
+#define A6XX_SP_TP_DEST_MSAA_CNTL_MSAA_DISABLE 0x00000004
+
+#define REG_A6XX_SP_TP_BORDER_COLOR_BASE_ADDR_LO 0x0000b302
+
+#define REG_A6XX_SP_TP_BORDER_COLOR_BASE_ADDR_HI 0x0000b303
+
+#define REG_A6XX_SP_TP_UNKNOWN_B304 0x0000b304
+
+#define REG_A6XX_SP_PS_2D_SRC_INFO 0x0000b4c0
+#define A6XX_SP_PS_2D_SRC_INFO_COLOR_FORMAT__MASK 0x000000ff
+#define A6XX_SP_PS_2D_SRC_INFO_COLOR_FORMAT__SHIFT 0
+static inline uint32_t A6XX_SP_PS_2D_SRC_INFO_COLOR_FORMAT(enum a6xx_color_fmt val)
+{
+ return ((val) << A6XX_SP_PS_2D_SRC_INFO_COLOR_FORMAT__SHIFT) & A6XX_SP_PS_2D_SRC_INFO_COLOR_FORMAT__MASK;
+}
+#define A6XX_SP_PS_2D_SRC_INFO_TILE_MODE__MASK 0x00000300
+#define A6XX_SP_PS_2D_SRC_INFO_TILE_MODE__SHIFT 8
+static inline uint32_t A6XX_SP_PS_2D_SRC_INFO_TILE_MODE(enum a6xx_tile_mode val)
+{
+ return ((val) << A6XX_SP_PS_2D_SRC_INFO_TILE_MODE__SHIFT) & A6XX_SP_PS_2D_SRC_INFO_TILE_MODE__MASK;
+}
+#define A6XX_SP_PS_2D_SRC_INFO_COLOR_SWAP__MASK 0x00000c00
+#define A6XX_SP_PS_2D_SRC_INFO_COLOR_SWAP__SHIFT 10
+static inline uint32_t A6XX_SP_PS_2D_SRC_INFO_COLOR_SWAP(enum a3xx_color_swap val)
+{
+ return ((val) << A6XX_SP_PS_2D_SRC_INFO_COLOR_SWAP__SHIFT) & A6XX_SP_PS_2D_SRC_INFO_COLOR_SWAP__MASK;
+}
+#define A6XX_SP_PS_2D_SRC_INFO_FLAGS 0x00001000
+
+#define REG_A6XX_SP_PS_2D_SRC_LO 0x0000b4c2
+
+#define REG_A6XX_SP_PS_2D_SRC_HI 0x0000b4c3
+
+#define REG_A6XX_SP_PS_2D_SRC_FLAGS_LO 0x0000b4ca
+
+#define REG_A6XX_SP_PS_2D_SRC_FLAGS_HI 0x0000b4cb
+
+#define REG_A6XX_SP_UNKNOWN_B600 0x0000b600
+
+#define REG_A6XX_SP_UNKNOWN_B605 0x0000b605
+
+#define REG_A6XX_HLSQ_VS_CNTL 0x0000b800
+#define A6XX_HLSQ_VS_CNTL_CONSTLEN__MASK 0x000000ff
+#define A6XX_HLSQ_VS_CNTL_CONSTLEN__SHIFT 0
+static inline uint32_t A6XX_HLSQ_VS_CNTL_CONSTLEN(uint32_t val)
+{
+ return ((val >> 2) << A6XX_HLSQ_VS_CNTL_CONSTLEN__SHIFT) & A6XX_HLSQ_VS_CNTL_CONSTLEN__MASK;
+}
+
+#define REG_A6XX_HLSQ_HS_CNTL 0x0000b801
+#define A6XX_HLSQ_HS_CNTL_CONSTLEN__MASK 0x000000ff
+#define A6XX_HLSQ_HS_CNTL_CONSTLEN__SHIFT 0
+static inline uint32_t A6XX_HLSQ_HS_CNTL_CONSTLEN(uint32_t val)
+{
+ return ((val >> 2) << A6XX_HLSQ_HS_CNTL_CONSTLEN__SHIFT) & A6XX_HLSQ_HS_CNTL_CONSTLEN__MASK;
+}
+
+#define REG_A6XX_HLSQ_DS_CNTL 0x0000b802
+#define A6XX_HLSQ_DS_CNTL_CONSTLEN__MASK 0x000000ff
+#define A6XX_HLSQ_DS_CNTL_CONSTLEN__SHIFT 0
+static inline uint32_t A6XX_HLSQ_DS_CNTL_CONSTLEN(uint32_t val)
+{
+ return ((val >> 2) << A6XX_HLSQ_DS_CNTL_CONSTLEN__SHIFT) & A6XX_HLSQ_DS_CNTL_CONSTLEN__MASK;
+}
+
+#define REG_A6XX_HLSQ_GS_CNTL 0x0000b803
+#define A6XX_HLSQ_GS_CNTL_CONSTLEN__MASK 0x000000ff
+#define A6XX_HLSQ_GS_CNTL_CONSTLEN__SHIFT 0
+static inline uint32_t A6XX_HLSQ_GS_CNTL_CONSTLEN(uint32_t val)
+{
+ return ((val >> 2) << A6XX_HLSQ_GS_CNTL_CONSTLEN__SHIFT) & A6XX_HLSQ_GS_CNTL_CONSTLEN__MASK;
+}
+
+#define REG_A6XX_HLSQ_CONTROL_1_REG 0x0000b982
+
+#define REG_A6XX_HLSQ_CONTROL_2_REG 0x0000b983
+#define A6XX_HLSQ_CONTROL_2_REG_FACEREGID__MASK 0x000000ff
+#define A6XX_HLSQ_CONTROL_2_REG_FACEREGID__SHIFT 0
+static inline uint32_t A6XX_HLSQ_CONTROL_2_REG_FACEREGID(uint32_t val)
+{
+ return ((val) << A6XX_HLSQ_CONTROL_2_REG_FACEREGID__SHIFT) & A6XX_HLSQ_CONTROL_2_REG_FACEREGID__MASK;
+}
+#define A6XX_HLSQ_CONTROL_2_REG_SAMPLEID__MASK 0x0000ff00
+#define A6XX_HLSQ_CONTROL_2_REG_SAMPLEID__SHIFT 8
+static inline uint32_t A6XX_HLSQ_CONTROL_2_REG_SAMPLEID(uint32_t val)
+{
+ return ((val) << A6XX_HLSQ_CONTROL_2_REG_SAMPLEID__SHIFT) & A6XX_HLSQ_CONTROL_2_REG_SAMPLEID__MASK;
+}
+#define A6XX_HLSQ_CONTROL_2_REG_SAMPLEMASK__MASK 0x00ff0000
+#define A6XX_HLSQ_CONTROL_2_REG_SAMPLEMASK__SHIFT 16
+static inline uint32_t A6XX_HLSQ_CONTROL_2_REG_SAMPLEMASK(uint32_t val)
+{
+ return ((val) << A6XX_HLSQ_CONTROL_2_REG_SAMPLEMASK__SHIFT) & A6XX_HLSQ_CONTROL_2_REG_SAMPLEMASK__MASK;
+}
+
+#define REG_A6XX_HLSQ_CONTROL_3_REG 0x0000b984
+#define A6XX_HLSQ_CONTROL_3_REG_FRAGCOORDXYREGID__MASK 0x000000ff
+#define A6XX_HLSQ_CONTROL_3_REG_FRAGCOORDXYREGID__SHIFT 0
+static inline uint32_t A6XX_HLSQ_CONTROL_3_REG_FRAGCOORDXYREGID(uint32_t val)
+{
+ return ((val) << A6XX_HLSQ_CONTROL_3_REG_FRAGCOORDXYREGID__SHIFT) & A6XX_HLSQ_CONTROL_3_REG_FRAGCOORDXYREGID__MASK;
+}
+
+#define REG_A6XX_HLSQ_CONTROL_4_REG 0x0000b985
+#define A6XX_HLSQ_CONTROL_4_REG_XYCOORDREGID__MASK 0x00ff0000
+#define A6XX_HLSQ_CONTROL_4_REG_XYCOORDREGID__SHIFT 16
+static inline uint32_t A6XX_HLSQ_CONTROL_4_REG_XYCOORDREGID(uint32_t val)
+{
+ return ((val) << A6XX_HLSQ_CONTROL_4_REG_XYCOORDREGID__SHIFT) & A6XX_HLSQ_CONTROL_4_REG_XYCOORDREGID__MASK;
+}
+#define A6XX_HLSQ_CONTROL_4_REG_ZWCOORDREGID__MASK 0xff000000
+#define A6XX_HLSQ_CONTROL_4_REG_ZWCOORDREGID__SHIFT 24
+static inline uint32_t A6XX_HLSQ_CONTROL_4_REG_ZWCOORDREGID(uint32_t val)
+{
+ return ((val) << A6XX_HLSQ_CONTROL_4_REG_ZWCOORDREGID__SHIFT) & A6XX_HLSQ_CONTROL_4_REG_ZWCOORDREGID__MASK;
+}
+
+#define REG_A6XX_HLSQ_CONTROL_5_REG 0x0000b986
+
+#define REG_A6XX_HLSQ_CS_NDRANGE_0 0x0000b990
+#define A6XX_HLSQ_CS_NDRANGE_0_KERNELDIM__MASK 0x00000003
+#define A6XX_HLSQ_CS_NDRANGE_0_KERNELDIM__SHIFT 0
+static inline uint32_t A6XX_HLSQ_CS_NDRANGE_0_KERNELDIM(uint32_t val)
+{
+ return ((val) << A6XX_HLSQ_CS_NDRANGE_0_KERNELDIM__SHIFT) & A6XX_HLSQ_CS_NDRANGE_0_KERNELDIM__MASK;
+}
+#define A6XX_HLSQ_CS_NDRANGE_0_LOCALSIZEX__MASK 0x00000ffc
+#define A6XX_HLSQ_CS_NDRANGE_0_LOCALSIZEX__SHIFT 2
+static inline uint32_t A6XX_HLSQ_CS_NDRANGE_0_LOCALSIZEX(uint32_t val)
+{
+ return ((val) << A6XX_HLSQ_CS_NDRANGE_0_LOCALSIZEX__SHIFT) & A6XX_HLSQ_CS_NDRANGE_0_LOCALSIZEX__MASK;
+}
+#define A6XX_HLSQ_CS_NDRANGE_0_LOCALSIZEY__MASK 0x003ff000
+#define A6XX_HLSQ_CS_NDRANGE_0_LOCALSIZEY__SHIFT 12
+static inline uint32_t A6XX_HLSQ_CS_NDRANGE_0_LOCALSIZEY(uint32_t val)
+{
+ return ((val) << A6XX_HLSQ_CS_NDRANGE_0_LOCALSIZEY__SHIFT) & A6XX_HLSQ_CS_NDRANGE_0_LOCALSIZEY__MASK;
+}
+#define A6XX_HLSQ_CS_NDRANGE_0_LOCALSIZEZ__MASK 0xffc00000
+#define A6XX_HLSQ_CS_NDRANGE_0_LOCALSIZEZ__SHIFT 22
+static inline uint32_t A6XX_HLSQ_CS_NDRANGE_0_LOCALSIZEZ(uint32_t val)
+{
+ return ((val) << A6XX_HLSQ_CS_NDRANGE_0_LOCALSIZEZ__SHIFT) & A6XX_HLSQ_CS_NDRANGE_0_LOCALSIZEZ__MASK;
+}
+
+#define REG_A6XX_HLSQ_CS_NDRANGE_1 0x0000b991
+#define A6XX_HLSQ_CS_NDRANGE_1_GLOBALSIZE_X__MASK 0xffffffff
+#define A6XX_HLSQ_CS_NDRANGE_1_GLOBALSIZE_X__SHIFT 0
+static inline uint32_t A6XX_HLSQ_CS_NDRANGE_1_GLOBALSIZE_X(uint32_t val)
+{
+ return ((val) << A6XX_HLSQ_CS_NDRANGE_1_GLOBALSIZE_X__SHIFT) & A6XX_HLSQ_CS_NDRANGE_1_GLOBALSIZE_X__MASK;
+}
+
+#define REG_A6XX_HLSQ_CS_NDRANGE_2 0x0000b992
+#define A6XX_HLSQ_CS_NDRANGE_2_GLOBALOFF_X__MASK 0xffffffff
+#define A6XX_HLSQ_CS_NDRANGE_2_GLOBALOFF_X__SHIFT 0
+static inline uint32_t A6XX_HLSQ_CS_NDRANGE_2_GLOBALOFF_X(uint32_t val)
+{
+ return ((val) << A6XX_HLSQ_CS_NDRANGE_2_GLOBALOFF_X__SHIFT) & A6XX_HLSQ_CS_NDRANGE_2_GLOBALOFF_X__MASK;
+}
+
+#define REG_A6XX_HLSQ_CS_NDRANGE_3 0x0000b993
+#define A6XX_HLSQ_CS_NDRANGE_3_GLOBALSIZE_Y__MASK 0xffffffff
+#define A6XX_HLSQ_CS_NDRANGE_3_GLOBALSIZE_Y__SHIFT 0
+static inline uint32_t A6XX_HLSQ_CS_NDRANGE_3_GLOBALSIZE_Y(uint32_t val)
+{
+ return ((val) << A6XX_HLSQ_CS_NDRANGE_3_GLOBALSIZE_Y__SHIFT) & A6XX_HLSQ_CS_NDRANGE_3_GLOBALSIZE_Y__MASK;
+}
+
+#define REG_A6XX_HLSQ_CS_NDRANGE_4 0x0000b994
+#define A6XX_HLSQ_CS_NDRANGE_4_GLOBALOFF_Y__MASK 0xffffffff
+#define A6XX_HLSQ_CS_NDRANGE_4_GLOBALOFF_Y__SHIFT 0
+static inline uint32_t A6XX_HLSQ_CS_NDRANGE_4_GLOBALOFF_Y(uint32_t val)
+{
+ return ((val) << A6XX_HLSQ_CS_NDRANGE_4_GLOBALOFF_Y__SHIFT) & A6XX_HLSQ_CS_NDRANGE_4_GLOBALOFF_Y__MASK;
+}
+
+#define REG_A6XX_HLSQ_CS_NDRANGE_5 0x0000b995
+#define A6XX_HLSQ_CS_NDRANGE_5_GLOBALSIZE_Z__MASK 0xffffffff
+#define A6XX_HLSQ_CS_NDRANGE_5_GLOBALSIZE_Z__SHIFT 0
+static inline uint32_t A6XX_HLSQ_CS_NDRANGE_5_GLOBALSIZE_Z(uint32_t val)
+{
+ return ((val) << A6XX_HLSQ_CS_NDRANGE_5_GLOBALSIZE_Z__SHIFT) & A6XX_HLSQ_CS_NDRANGE_5_GLOBALSIZE_Z__MASK;
+}
+
+#define REG_A6XX_HLSQ_CS_NDRANGE_6 0x0000b996
+#define A6XX_HLSQ_CS_NDRANGE_6_GLOBALOFF_Z__MASK 0xffffffff
+#define A6XX_HLSQ_CS_NDRANGE_6_GLOBALOFF_Z__SHIFT 0
+static inline uint32_t A6XX_HLSQ_CS_NDRANGE_6_GLOBALOFF_Z(uint32_t val)
+{
+ return ((val) << A6XX_HLSQ_CS_NDRANGE_6_GLOBALOFF_Z__SHIFT) & A6XX_HLSQ_CS_NDRANGE_6_GLOBALOFF_Z__MASK;
+}
+
+#define REG_A6XX_HLSQ_CS_CNTL_0 0x0000b997
+#define A6XX_HLSQ_CS_CNTL_0_WGIDCONSTID__MASK 0x000000ff
+#define A6XX_HLSQ_CS_CNTL_0_WGIDCONSTID__SHIFT 0
+static inline uint32_t A6XX_HLSQ_CS_CNTL_0_WGIDCONSTID(uint32_t val)
+{
+ return ((val) << A6XX_HLSQ_CS_CNTL_0_WGIDCONSTID__SHIFT) & A6XX_HLSQ_CS_CNTL_0_WGIDCONSTID__MASK;
+}
+#define A6XX_HLSQ_CS_CNTL_0_UNK0__MASK 0x0000ff00
+#define A6XX_HLSQ_CS_CNTL_0_UNK0__SHIFT 8
+static inline uint32_t A6XX_HLSQ_CS_CNTL_0_UNK0(uint32_t val)
+{
+ return ((val) << A6XX_HLSQ_CS_CNTL_0_UNK0__SHIFT) & A6XX_HLSQ_CS_CNTL_0_UNK0__MASK;
+}
+#define A6XX_HLSQ_CS_CNTL_0_UNK1__MASK 0x00ff0000
+#define A6XX_HLSQ_CS_CNTL_0_UNK1__SHIFT 16
+static inline uint32_t A6XX_HLSQ_CS_CNTL_0_UNK1(uint32_t val)
+{
+ return ((val) << A6XX_HLSQ_CS_CNTL_0_UNK1__SHIFT) & A6XX_HLSQ_CS_CNTL_0_UNK1__MASK;
+}
+#define A6XX_HLSQ_CS_CNTL_0_LOCALIDREGID__MASK 0xff000000
+#define A6XX_HLSQ_CS_CNTL_0_LOCALIDREGID__SHIFT 24
+static inline uint32_t A6XX_HLSQ_CS_CNTL_0_LOCALIDREGID(uint32_t val)
+{
+ return ((val) << A6XX_HLSQ_CS_CNTL_0_LOCALIDREGID__SHIFT) & A6XX_HLSQ_CS_CNTL_0_LOCALIDREGID__MASK;
+}
+
+#define REG_A6XX_HLSQ_CS_KERNEL_GROUP_X 0x0000b999
+
+#define REG_A6XX_HLSQ_CS_KERNEL_GROUP_Y 0x0000b99a
+
+#define REG_A6XX_HLSQ_CS_KERNEL_GROUP_Z 0x0000b99b
+
+#define REG_A6XX_HLSQ_UPDATE_CNTL 0x0000bb08
+
+#define REG_A6XX_HLSQ_FS_CNTL 0x0000bb10
+#define A6XX_HLSQ_FS_CNTL_CONSTLEN__MASK 0x000000ff
+#define A6XX_HLSQ_FS_CNTL_CONSTLEN__SHIFT 0
+static inline uint32_t A6XX_HLSQ_FS_CNTL_CONSTLEN(uint32_t val)
+{
+ return ((val >> 2) << A6XX_HLSQ_FS_CNTL_CONSTLEN__SHIFT) & A6XX_HLSQ_FS_CNTL_CONSTLEN__MASK;
+}
+
+#define REG_A6XX_HLSQ_UNKNOWN_BB11 0x0000bb11
+
+#define REG_A6XX_HLSQ_UNKNOWN_BE00 0x0000be00
+
+#define REG_A6XX_HLSQ_UNKNOWN_BE01 0x0000be01
+
+#define REG_A6XX_HLSQ_UNKNOWN_BE04 0x0000be04
+
+#define REG_A6XX_TEX_SAMP_0 0x00000000
+#define A6XX_TEX_SAMP_0_MIPFILTER_LINEAR_NEAR 0x00000001
+#define A6XX_TEX_SAMP_0_XY_MAG__MASK 0x00000006
+#define A6XX_TEX_SAMP_0_XY_MAG__SHIFT 1
+static inline uint32_t A6XX_TEX_SAMP_0_XY_MAG(enum a6xx_tex_filter val)
+{
+ return ((val) << A6XX_TEX_SAMP_0_XY_MAG__SHIFT) & A6XX_TEX_SAMP_0_XY_MAG__MASK;
+}
+#define A6XX_TEX_SAMP_0_XY_MIN__MASK 0x00000018
+#define A6XX_TEX_SAMP_0_XY_MIN__SHIFT 3
+static inline uint32_t A6XX_TEX_SAMP_0_XY_MIN(enum a6xx_tex_filter val)
+{
+ return ((val) << A6XX_TEX_SAMP_0_XY_MIN__SHIFT) & A6XX_TEX_SAMP_0_XY_MIN__MASK;
+}
+#define A6XX_TEX_SAMP_0_WRAP_S__MASK 0x000000e0
+#define A6XX_TEX_SAMP_0_WRAP_S__SHIFT 5
+static inline uint32_t A6XX_TEX_SAMP_0_WRAP_S(enum a6xx_tex_clamp val)
+{
+ return ((val) << A6XX_TEX_SAMP_0_WRAP_S__SHIFT) & A6XX_TEX_SAMP_0_WRAP_S__MASK;
+}
+#define A6XX_TEX_SAMP_0_WRAP_T__MASK 0x00000700
+#define A6XX_TEX_SAMP_0_WRAP_T__SHIFT 8
+static inline uint32_t A6XX_TEX_SAMP_0_WRAP_T(enum a6xx_tex_clamp val)
+{
+ return ((val) << A6XX_TEX_SAMP_0_WRAP_T__SHIFT) & A6XX_TEX_SAMP_0_WRAP_T__MASK;
+}
+#define A6XX_TEX_SAMP_0_WRAP_R__MASK 0x00003800
+#define A6XX_TEX_SAMP_0_WRAP_R__SHIFT 11
+static inline uint32_t A6XX_TEX_SAMP_0_WRAP_R(enum a6xx_tex_clamp val)
+{
+ return ((val) << A6XX_TEX_SAMP_0_WRAP_R__SHIFT) & A6XX_TEX_SAMP_0_WRAP_R__MASK;
+}
+#define A6XX_TEX_SAMP_0_ANISO__MASK 0x0001c000
+#define A6XX_TEX_SAMP_0_ANISO__SHIFT 14
+static inline uint32_t A6XX_TEX_SAMP_0_ANISO(enum a6xx_tex_aniso val)
+{
+ return ((val) << A6XX_TEX_SAMP_0_ANISO__SHIFT) & A6XX_TEX_SAMP_0_ANISO__MASK;
+}
+#define A6XX_TEX_SAMP_0_LOD_BIAS__MASK 0xfff80000
+#define A6XX_TEX_SAMP_0_LOD_BIAS__SHIFT 19
+static inline uint32_t A6XX_TEX_SAMP_0_LOD_BIAS(float val)
+{
+ return ((((int32_t)(val * 256.0))) << A6XX_TEX_SAMP_0_LOD_BIAS__SHIFT) & A6XX_TEX_SAMP_0_LOD_BIAS__MASK;
+}
+
+#define REG_A6XX_TEX_SAMP_1 0x00000001
+#define A6XX_TEX_SAMP_1_COMPARE_FUNC__MASK 0x0000000e
+#define A6XX_TEX_SAMP_1_COMPARE_FUNC__SHIFT 1
+static inline uint32_t A6XX_TEX_SAMP_1_COMPARE_FUNC(enum adreno_compare_func val)
+{
+ return ((val) << A6XX_TEX_SAMP_1_COMPARE_FUNC__SHIFT) & A6XX_TEX_SAMP_1_COMPARE_FUNC__MASK;
+}
+#define A6XX_TEX_SAMP_1_CUBEMAPSEAMLESSFILTOFF 0x00000010
+#define A6XX_TEX_SAMP_1_UNNORM_COORDS 0x00000020
+#define A6XX_TEX_SAMP_1_MIPFILTER_LINEAR_FAR 0x00000040
+#define A6XX_TEX_SAMP_1_MAX_LOD__MASK 0x000fff00
+#define A6XX_TEX_SAMP_1_MAX_LOD__SHIFT 8
+static inline uint32_t A6XX_TEX_SAMP_1_MAX_LOD(float val)
+{
+ return ((((uint32_t)(val * 256.0))) << A6XX_TEX_SAMP_1_MAX_LOD__SHIFT) & A6XX_TEX_SAMP_1_MAX_LOD__MASK;
+}
+#define A6XX_TEX_SAMP_1_MIN_LOD__MASK 0xfff00000
+#define A6XX_TEX_SAMP_1_MIN_LOD__SHIFT 20
+static inline uint32_t A6XX_TEX_SAMP_1_MIN_LOD(float val)
+{
+ return ((((uint32_t)(val * 256.0))) << A6XX_TEX_SAMP_1_MIN_LOD__SHIFT) & A6XX_TEX_SAMP_1_MIN_LOD__MASK;
+}
+
+#define REG_A6XX_TEX_SAMP_2 0x00000002
+#define A6XX_TEX_SAMP_2_BCOLOR_OFFSET__MASK 0xfffffff0
+#define A6XX_TEX_SAMP_2_BCOLOR_OFFSET__SHIFT 4
+static inline uint32_t A6XX_TEX_SAMP_2_BCOLOR_OFFSET(uint32_t val)
+{
+ return ((val) << A6XX_TEX_SAMP_2_BCOLOR_OFFSET__SHIFT) & A6XX_TEX_SAMP_2_BCOLOR_OFFSET__MASK;
+}
+
+#define REG_A6XX_TEX_SAMP_3 0x00000003
+
+#define REG_A6XX_TEX_CONST_0 0x00000000
+#define A6XX_TEX_CONST_0_TILE_MODE__MASK 0x00000003
+#define A6XX_TEX_CONST_0_TILE_MODE__SHIFT 0
+static inline uint32_t A6XX_TEX_CONST_0_TILE_MODE(enum a6xx_tile_mode val)
+{
+ return ((val) << A6XX_TEX_CONST_0_TILE_MODE__SHIFT) & A6XX_TEX_CONST_0_TILE_MODE__MASK;
+}
+#define A6XX_TEX_CONST_0_SRGB 0x00000004
+#define A6XX_TEX_CONST_0_SWIZ_X__MASK 0x00000070
+#define A6XX_TEX_CONST_0_SWIZ_X__SHIFT 4
+static inline uint32_t A6XX_TEX_CONST_0_SWIZ_X(enum a6xx_tex_swiz val)
+{
+ return ((val) << A6XX_TEX_CONST_0_SWIZ_X__SHIFT) & A6XX_TEX_CONST_0_SWIZ_X__MASK;
+}
+#define A6XX_TEX_CONST_0_SWIZ_Y__MASK 0x00000380
+#define A6XX_TEX_CONST_0_SWIZ_Y__SHIFT 7
+static inline uint32_t A6XX_TEX_CONST_0_SWIZ_Y(enum a6xx_tex_swiz val)
+{
+ return ((val) << A6XX_TEX_CONST_0_SWIZ_Y__SHIFT) & A6XX_TEX_CONST_0_SWIZ_Y__MASK;
+}
+#define A6XX_TEX_CONST_0_SWIZ_Z__MASK 0x00001c00
+#define A6XX_TEX_CONST_0_SWIZ_Z__SHIFT 10
+static inline uint32_t A6XX_TEX_CONST_0_SWIZ_Z(enum a6xx_tex_swiz val)
+{
+ return ((val) << A6XX_TEX_CONST_0_SWIZ_Z__SHIFT) & A6XX_TEX_CONST_0_SWIZ_Z__MASK;
+}
+#define A6XX_TEX_CONST_0_SWIZ_W__MASK 0x0000e000
+#define A6XX_TEX_CONST_0_SWIZ_W__SHIFT 13
+static inline uint32_t A6XX_TEX_CONST_0_SWIZ_W(enum a6xx_tex_swiz val)
+{
+ return ((val) << A6XX_TEX_CONST_0_SWIZ_W__SHIFT) & A6XX_TEX_CONST_0_SWIZ_W__MASK;
+}
+#define A6XX_TEX_CONST_0_MIPLVLS__MASK 0x000f0000
+#define A6XX_TEX_CONST_0_MIPLVLS__SHIFT 16
+static inline uint32_t A6XX_TEX_CONST_0_MIPLVLS(uint32_t val)
+{
+ return ((val) << A6XX_TEX_CONST_0_MIPLVLS__SHIFT) & A6XX_TEX_CONST_0_MIPLVLS__MASK;
+}
+#define A6XX_TEX_CONST_0_FMT__MASK 0x3fc00000
+#define A6XX_TEX_CONST_0_FMT__SHIFT 22
+static inline uint32_t A6XX_TEX_CONST_0_FMT(enum a6xx_tex_fmt val)
+{
+ return ((val) << A6XX_TEX_CONST_0_FMT__SHIFT) & A6XX_TEX_CONST_0_FMT__MASK;
+}
+#define A6XX_TEX_CONST_0_SWAP__MASK 0xc0000000
+#define A6XX_TEX_CONST_0_SWAP__SHIFT 30
+static inline uint32_t A6XX_TEX_CONST_0_SWAP(enum a3xx_color_swap val)
+{
+ return ((val) << A6XX_TEX_CONST_0_SWAP__SHIFT) & A6XX_TEX_CONST_0_SWAP__MASK;
+}
+
+#define REG_A6XX_TEX_CONST_1 0x00000001
+#define A6XX_TEX_CONST_1_WIDTH__MASK 0x00007fff
+#define A6XX_TEX_CONST_1_WIDTH__SHIFT 0
+static inline uint32_t A6XX_TEX_CONST_1_WIDTH(uint32_t val)
+{
+ return ((val) << A6XX_TEX_CONST_1_WIDTH__SHIFT) & A6XX_TEX_CONST_1_WIDTH__MASK;
+}
+#define A6XX_TEX_CONST_1_HEIGHT__MASK 0x3fff8000
+#define A6XX_TEX_CONST_1_HEIGHT__SHIFT 15
+static inline uint32_t A6XX_TEX_CONST_1_HEIGHT(uint32_t val)
+{
+ return ((val) << A6XX_TEX_CONST_1_HEIGHT__SHIFT) & A6XX_TEX_CONST_1_HEIGHT__MASK;
+}
+
+#define REG_A6XX_TEX_CONST_2 0x00000002
+#define A6XX_TEX_CONST_2_FETCHSIZE__MASK 0x0000000f
+#define A6XX_TEX_CONST_2_FETCHSIZE__SHIFT 0
+static inline uint32_t A6XX_TEX_CONST_2_FETCHSIZE(enum a6xx_tex_fetchsize val)
+{
+ return ((val) << A6XX_TEX_CONST_2_FETCHSIZE__SHIFT) & A6XX_TEX_CONST_2_FETCHSIZE__MASK;
+}
+#define A6XX_TEX_CONST_2_PITCH__MASK 0x1fffff80
+#define A6XX_TEX_CONST_2_PITCH__SHIFT 7
+static inline uint32_t A6XX_TEX_CONST_2_PITCH(uint32_t val)
+{
+ return ((val) << A6XX_TEX_CONST_2_PITCH__SHIFT) & A6XX_TEX_CONST_2_PITCH__MASK;
+}
+#define A6XX_TEX_CONST_2_TYPE__MASK 0x60000000
+#define A6XX_TEX_CONST_2_TYPE__SHIFT 29
+static inline uint32_t A6XX_TEX_CONST_2_TYPE(enum a6xx_tex_type val)
+{
+ return ((val) << A6XX_TEX_CONST_2_TYPE__SHIFT) & A6XX_TEX_CONST_2_TYPE__MASK;
+}
+
+#define REG_A6XX_TEX_CONST_3 0x00000003
+#define A6XX_TEX_CONST_3_ARRAY_PITCH__MASK 0x00003fff
+#define A6XX_TEX_CONST_3_ARRAY_PITCH__SHIFT 0
+static inline uint32_t A6XX_TEX_CONST_3_ARRAY_PITCH(uint32_t val)
+{
+ return ((val >> 12) << A6XX_TEX_CONST_3_ARRAY_PITCH__SHIFT) & A6XX_TEX_CONST_3_ARRAY_PITCH__MASK;
+}
+#define A6XX_TEX_CONST_3_FLAG 0x10000000
+
+#define REG_A6XX_TEX_CONST_4 0x00000004
+#define A6XX_TEX_CONST_4_BASE_LO__MASK 0xffffffe0
+#define A6XX_TEX_CONST_4_BASE_LO__SHIFT 5
+static inline uint32_t A6XX_TEX_CONST_4_BASE_LO(uint32_t val)
+{
+ return ((val >> 5) << A6XX_TEX_CONST_4_BASE_LO__SHIFT) & A6XX_TEX_CONST_4_BASE_LO__MASK;
+}
+
+#define REG_A6XX_TEX_CONST_5 0x00000005
+#define A6XX_TEX_CONST_5_BASE_HI__MASK 0x0001ffff
+#define A6XX_TEX_CONST_5_BASE_HI__SHIFT 0
+static inline uint32_t A6XX_TEX_CONST_5_BASE_HI(uint32_t val)
+{
+ return ((val) << A6XX_TEX_CONST_5_BASE_HI__SHIFT) & A6XX_TEX_CONST_5_BASE_HI__MASK;
+}
+#define A6XX_TEX_CONST_5_DEPTH__MASK 0x3ffe0000
+#define A6XX_TEX_CONST_5_DEPTH__SHIFT 17
+static inline uint32_t A6XX_TEX_CONST_5_DEPTH(uint32_t val)
+{
+ return ((val) << A6XX_TEX_CONST_5_DEPTH__SHIFT) & A6XX_TEX_CONST_5_DEPTH__MASK;
+}
+
+#define REG_A6XX_TEX_CONST_6 0x00000006
+
+#define REG_A6XX_TEX_CONST_7 0x00000007
+#define A6XX_TEX_CONST_7_FLAG_LO__MASK 0xffffffe0
+#define A6XX_TEX_CONST_7_FLAG_LO__SHIFT 5
+static inline uint32_t A6XX_TEX_CONST_7_FLAG_LO(uint32_t val)
+{
+ return ((val >> 5) << A6XX_TEX_CONST_7_FLAG_LO__SHIFT) & A6XX_TEX_CONST_7_FLAG_LO__MASK;
+}
+
+#define REG_A6XX_TEX_CONST_8 0x00000008
+#define A6XX_TEX_CONST_8_BASE_HI__MASK 0x0001ffff
+#define A6XX_TEX_CONST_8_BASE_HI__SHIFT 0
+static inline uint32_t A6XX_TEX_CONST_8_BASE_HI(uint32_t val)
+{
+ return ((val) << A6XX_TEX_CONST_8_BASE_HI__SHIFT) & A6XX_TEX_CONST_8_BASE_HI__MASK;
+}
+
+#define REG_A6XX_TEX_CONST_9 0x00000009
+
+#define REG_A6XX_TEX_CONST_10 0x0000000a
+
+#define REG_A6XX_TEX_CONST_11 0x0000000b
+
+#define REG_A6XX_TEX_CONST_12 0x0000000c
+
+#define REG_A6XX_TEX_CONST_13 0x0000000d
+
+#define REG_A6XX_TEX_CONST_14 0x0000000e
+
+#define REG_A6XX_TEX_CONST_15 0x0000000f
+
+
+#endif /* A6XX_XML */
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
new file mode 100644
index 000000000000..bbb8126ec5c5
--- /dev/null
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
@@ -0,0 +1,1207 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2017-2018 The Linux Foundation. All rights reserved. */
+
+#include <linux/clk.h>
+#include <linux/iopoll.h>
+#include <linux/pm_opp.h>
+#include <soc/qcom/cmd-db.h>
+
+#include "a6xx_gpu.h"
+#include "a6xx_gmu.xml.h"
+
+static irqreturn_t a6xx_gmu_irq(int irq, void *data)
+{
+ struct a6xx_gmu *gmu = data;
+ u32 status;
+
+ status = gmu_read(gmu, REG_A6XX_GMU_AO_HOST_INTERRUPT_STATUS);
+ gmu_write(gmu, REG_A6XX_GMU_AO_HOST_INTERRUPT_CLR, status);
+
+ if (status & A6XX_GMU_AO_HOST_INTERRUPT_STATUS_WDOG_BITE) {
+ dev_err_ratelimited(gmu->dev, "GMU watchdog expired\n");
+
+ /* Temporary until we can recover safely */
+ BUG();
+ }
+
+ if (status & A6XX_GMU_AO_HOST_INTERRUPT_STATUS_HOST_AHB_BUS_ERROR)
+ dev_err_ratelimited(gmu->dev, "GMU AHB bus error\n");
+
+ if (status & A6XX_GMU_AO_HOST_INTERRUPT_STATUS_FENCE_ERR)
+ dev_err_ratelimited(gmu->dev, "GMU fence error: 0x%x\n",
+ gmu_read(gmu, REG_A6XX_GMU_AHB_FENCE_STATUS));
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t a6xx_hfi_irq(int irq, void *data)
+{
+ struct a6xx_gmu *gmu = data;
+ u32 status;
+
+ status = gmu_read(gmu, REG_A6XX_GMU_GMU2HOST_INTR_INFO);
+ gmu_write(gmu, REG_A6XX_GMU_GMU2HOST_INTR_CLR, status);
+
+ if (status & A6XX_GMU_GMU2HOST_INTR_INFO_MSGQ)
+ tasklet_schedule(&gmu->hfi_tasklet);
+
+ if (status & A6XX_GMU_GMU2HOST_INTR_INFO_CM3_FAULT) {
+ dev_err_ratelimited(gmu->dev, "GMU firmware fault\n");
+
+ /* Temporary until we can recover safely */
+ BUG();
+ }
+
+ return IRQ_HANDLED;
+}
+
+/* Check to see if the GX rail is still powered */
+static bool a6xx_gmu_gx_is_on(struct a6xx_gmu *gmu)
+{
+ u32 val = gmu_read(gmu, REG_A6XX_GMU_SPTPRAC_PWR_CLK_STATUS);
+
+ return !(val &
+ (A6XX_GMU_SPTPRAC_PWR_CLK_STATUS_GX_HM_GDSC_POWER_OFF |
+ A6XX_GMU_SPTPRAC_PWR_CLK_STATUS_GX_HM_CLK_OFF));
+}
+
+static int a6xx_gmu_set_freq(struct a6xx_gmu *gmu, int index)
+{
+ gmu_write(gmu, REG_A6XX_GMU_DCVS_ACK_OPTION, 0);
+
+ gmu_write(gmu, REG_A6XX_GMU_DCVS_PERF_SETTING,
+ ((index << 24) & 0xff) | (3 & 0xf));
+
+ /*
+ * Send an invalid index as a vote for the bus bandwidth and let the
+ * firmware decide on the right vote
+ */
+ gmu_write(gmu, REG_A6XX_GMU_DCVS_BW_SETTING, 0xff);
+
+ /* Set and clear the OOB for DCVS to trigger the GMU */
+ a6xx_gmu_set_oob(gmu, GMU_OOB_DCVS_SET);
+ a6xx_gmu_clear_oob(gmu, GMU_OOB_DCVS_SET);
+
+ return gmu_read(gmu, REG_A6XX_GMU_DCVS_RETURN);
+}
+
+static bool a6xx_gmu_check_idle_level(struct a6xx_gmu *gmu)
+{
+ u32 val;
+ int local = gmu->idle_level;
+
+ /* SPTP and IFPC both report as IFPC */
+ if (gmu->idle_level == GMU_IDLE_STATE_SPTP)
+ local = GMU_IDLE_STATE_IFPC;
+
+ val = gmu_read(gmu, REG_A6XX_GPU_GMU_CX_GMU_RPMH_POWER_STATE);
+
+ if (val == local) {
+ if (gmu->idle_level != GMU_IDLE_STATE_IFPC ||
+ !a6xx_gmu_gx_is_on(gmu))
+ return true;
+ }
+
+ return false;
+}
+
+/* Wait for the GMU to get to its most idle state */
+int a6xx_gmu_wait_for_idle(struct a6xx_gpu *a6xx_gpu)
+{
+ struct a6xx_gmu *gmu = &a6xx_gpu->gmu;
+
+ return spin_until(a6xx_gmu_check_idle_level(gmu));
+}
+
+static int a6xx_gmu_start(struct a6xx_gmu *gmu)
+{
+ int ret;
+ u32 val;
+
+ gmu_write(gmu, REG_A6XX_GMU_CM3_SYSRESET, 1);
+ gmu_write(gmu, REG_A6XX_GMU_CM3_SYSRESET, 0);
+
+ ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_CM3_FW_INIT_RESULT, val,
+ val == 0xbabeface, 100, 10000);
+
+ if (ret)
+ dev_err(gmu->dev, "GMU firmware initialization timed out\n");
+
+ return ret;
+}
+
+static int a6xx_gmu_hfi_start(struct a6xx_gmu *gmu)
+{
+ u32 val;
+ int ret;
+
+ gmu_rmw(gmu, REG_A6XX_GMU_GMU2HOST_INTR_MASK,
+ A6XX_GMU_GMU2HOST_INTR_INFO_MSGQ, 0);
+
+ gmu_write(gmu, REG_A6XX_GMU_HFI_CTRL_INIT, 1);
+
+ ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_HFI_CTRL_STATUS, val,
+ val & 1, 100, 10000);
+ if (ret)
+ dev_err(gmu->dev, "Unable to start the HFI queues\n");
+
+ return ret;
+}
+
+/* Trigger a OOB (out of band) request to the GMU */
+int a6xx_gmu_set_oob(struct a6xx_gmu *gmu, enum a6xx_gmu_oob_state state)
+{
+ int ret;
+ u32 val;
+ int request, ack;
+ const char *name;
+
+ switch (state) {
+ case GMU_OOB_GPU_SET:
+ request = GMU_OOB_GPU_SET_REQUEST;
+ ack = GMU_OOB_GPU_SET_ACK;
+ name = "GPU_SET";
+ break;
+ case GMU_OOB_BOOT_SLUMBER:
+ request = GMU_OOB_BOOT_SLUMBER_REQUEST;
+ ack = GMU_OOB_BOOT_SLUMBER_ACK;
+ name = "BOOT_SLUMBER";
+ break;
+ case GMU_OOB_DCVS_SET:
+ request = GMU_OOB_DCVS_REQUEST;
+ ack = GMU_OOB_DCVS_ACK;
+ name = "GPU_DCVS";
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* Trigger the equested OOB operation */
+ gmu_write(gmu, REG_A6XX_GMU_HOST2GMU_INTR_SET, 1 << request);
+
+ /* Wait for the acknowledge interrupt */
+ ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_GMU2HOST_INTR_INFO, val,
+ val & (1 << ack), 100, 10000);
+
+ if (ret)
+ dev_err(gmu->dev,
+ "Timeout waiting for GMU OOB set %s: 0x%x\n",
+ name,
+ gmu_read(gmu, REG_A6XX_GMU_GMU2HOST_INTR_INFO));
+
+ /* Clear the acknowledge interrupt */
+ gmu_write(gmu, REG_A6XX_GMU_GMU2HOST_INTR_CLR, 1 << ack);
+
+ return ret;
+}
+
+/* Clear a pending OOB state in the GMU */
+void a6xx_gmu_clear_oob(struct a6xx_gmu *gmu, enum a6xx_gmu_oob_state state)
+{
+ switch (state) {
+ case GMU_OOB_GPU_SET:
+ gmu_write(gmu, REG_A6XX_GMU_HOST2GMU_INTR_SET,
+ 1 << GMU_OOB_GPU_SET_CLEAR);
+ break;
+ case GMU_OOB_BOOT_SLUMBER:
+ gmu_write(gmu, REG_A6XX_GMU_HOST2GMU_INTR_SET,
+ 1 << GMU_OOB_BOOT_SLUMBER_CLEAR);
+ break;
+ case GMU_OOB_DCVS_SET:
+ gmu_write(gmu, REG_A6XX_GMU_HOST2GMU_INTR_SET,
+ 1 << GMU_OOB_DCVS_CLEAR);
+ break;
+ }
+}
+
+/* Enable CPU control of SPTP power power collapse */
+static int a6xx_sptprac_enable(struct a6xx_gmu *gmu)
+{
+ int ret;
+ u32 val;
+
+ gmu_write(gmu, REG_A6XX_GMU_GX_SPTPRAC_POWER_CONTROL, 0x778000);
+
+ ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_SPTPRAC_PWR_CLK_STATUS, val,
+ (val & 0x38) == 0x28, 1, 100);
+
+ if (ret) {
+ dev_err(gmu->dev, "Unable to power on SPTPRAC: 0x%x\n",
+ gmu_read(gmu, REG_A6XX_GMU_SPTPRAC_PWR_CLK_STATUS));
+ }
+
+ return 0;
+}
+
+/* Disable CPU control of SPTP power power collapse */
+static void a6xx_sptprac_disable(struct a6xx_gmu *gmu)
+{
+ u32 val;
+ int ret;
+
+ /* Make sure retention is on */
+ gmu_rmw(gmu, REG_A6XX_GPU_CC_GX_GDSCR, 0, (1 << 11));
+
+ gmu_write(gmu, REG_A6XX_GMU_GX_SPTPRAC_POWER_CONTROL, 0x778001);
+
+ ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_SPTPRAC_PWR_CLK_STATUS, val,
+ (val & 0x04), 100, 10000);
+
+ if (ret)
+ dev_err(gmu->dev, "failed to power off SPTPRAC: 0x%x\n",
+ gmu_read(gmu, REG_A6XX_GMU_SPTPRAC_PWR_CLK_STATUS));
+}
+
+/* Let the GMU know we are starting a boot sequence */
+static int a6xx_gmu_gfx_rail_on(struct a6xx_gmu *gmu)
+{
+ u32 vote;
+
+ /* Let the GMU know we are getting ready for boot */
+ gmu_write(gmu, REG_A6XX_GMU_BOOT_SLUMBER_OPTION, 0);
+
+ /* Choose the "default" power level as the highest available */
+ vote = gmu->gx_arc_votes[gmu->nr_gpu_freqs - 1];
+
+ gmu_write(gmu, REG_A6XX_GMU_GX_VOTE_IDX, vote & 0xff);
+ gmu_write(gmu, REG_A6XX_GMU_MX_VOTE_IDX, (vote >> 8) & 0xff);
+
+ /* Let the GMU know the boot sequence has started */
+ return a6xx_gmu_set_oob(gmu, GMU_OOB_BOOT_SLUMBER);
+}
+
+/* Let the GMU know that we are about to go into slumber */
+static int a6xx_gmu_notify_slumber(struct a6xx_gmu *gmu)
+{
+ int ret;
+
+ /* Disable the power counter so the GMU isn't busy */
+ gmu_write(gmu, REG_A6XX_GMU_CX_GMU_POWER_COUNTER_ENABLE, 0);
+
+ /* Disable SPTP_PC if the CPU is responsible for it */
+ if (gmu->idle_level < GMU_IDLE_STATE_SPTP)
+ a6xx_sptprac_disable(gmu);
+
+ /* Tell the GMU to get ready to slumber */
+ gmu_write(gmu, REG_A6XX_GMU_BOOT_SLUMBER_OPTION, 1);
+
+ ret = a6xx_gmu_set_oob(gmu, GMU_OOB_BOOT_SLUMBER);
+ a6xx_gmu_clear_oob(gmu, GMU_OOB_BOOT_SLUMBER);
+
+ if (!ret) {
+ /* Check to see if the GMU really did slumber */
+ if (gmu_read(gmu, REG_A6XX_GPU_GMU_CX_GMU_RPMH_POWER_STATE)
+ != 0x0f) {
+ dev_err(gmu->dev, "The GMU did not go into slumber\n");
+ ret = -ETIMEDOUT;
+ }
+ }
+
+ /* Put fence into allow mode */
+ gmu_write(gmu, REG_A6XX_GMU_AO_AHB_FENCE_CTRL, 0);
+ return ret;
+}
+
+static int a6xx_rpmh_start(struct a6xx_gmu *gmu)
+{
+ int ret;
+ u32 val;
+
+ gmu_write(gmu, REG_A6XX_GMU_RSCC_CONTROL_REQ, 1 << 1);
+ /* Wait for the register to finish posting */
+ wmb();
+
+ ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_RSCC_CONTROL_ACK, val,
+ val & (1 << 1), 100, 10000);
+ if (ret) {
+ dev_err(gmu->dev, "Unable to power on the GPU RSC\n");
+ return ret;
+ }
+
+ ret = gmu_poll_timeout(gmu, REG_A6XX_RSCC_SEQ_BUSY_DRV0, val,
+ !val, 100, 10000);
+
+ if (!ret) {
+ gmu_write(gmu, REG_A6XX_GMU_RSCC_CONTROL_REQ, 0);
+
+ /* Re-enable the power counter */
+ gmu_write(gmu, REG_A6XX_GMU_CX_GMU_POWER_COUNTER_ENABLE, 1);
+ return 0;
+ }
+
+ dev_err(gmu->dev, "GPU RSC sequence stuck while waking up the GPU\n");
+ return ret;
+}
+
+static void a6xx_rpmh_stop(struct a6xx_gmu *gmu)
+{
+ int ret;
+ u32 val;
+
+ gmu_write(gmu, REG_A6XX_GMU_RSCC_CONTROL_REQ, 1);
+
+ ret = gmu_poll_timeout(gmu, REG_A6XX_GPU_RSCC_RSC_STATUS0_DRV0,
+ val, val & (1 << 16), 100, 10000);
+ if (ret)
+ dev_err(gmu->dev, "Unable to power off the GPU RSC\n");
+
+ gmu_write(gmu, REG_A6XX_GMU_RSCC_CONTROL_REQ, 0);
+}
+
+static void a6xx_gmu_rpmh_init(struct a6xx_gmu *gmu)
+{
+ /* Disable SDE clock gating */
+ gmu_write(gmu, REG_A6XX_GPU_RSCC_RSC_STATUS0_DRV0, BIT(24));
+
+ /* Setup RSC PDC handshake for sleep and wakeup */
+ gmu_write(gmu, REG_A6XX_RSCC_PDC_SLAVE_ID_DRV0, 1);
+ gmu_write(gmu, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_DATA, 0);
+ gmu_write(gmu, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_ADDR, 0);
+ gmu_write(gmu, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_DATA + 2, 0);
+ gmu_write(gmu, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_ADDR + 2, 0);
+ gmu_write(gmu, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_DATA + 4, 0x80000000);
+ gmu_write(gmu, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_ADDR + 4, 0);
+ gmu_write(gmu, REG_A6XX_RSCC_OVERRIDE_START_ADDR, 0);
+ gmu_write(gmu, REG_A6XX_RSCC_PDC_SEQ_START_ADDR, 0x4520);
+ gmu_write(gmu, REG_A6XX_RSCC_PDC_MATCH_VALUE_LO, 0x4510);
+ gmu_write(gmu, REG_A6XX_RSCC_PDC_MATCH_VALUE_HI, 0x4514);
+
+ /* Load RSC sequencer uCode for sleep and wakeup */
+ gmu_write(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0, 0xa7a506a0);
+ gmu_write(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0 + 1, 0xa1e6a6e7);
+ gmu_write(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0 + 2, 0xa2e081e1);
+ gmu_write(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0 + 3, 0xe9a982e2);
+ gmu_write(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0 + 4, 0x0020e8a8);
+
+ /* Load PDC sequencer uCode for power up and power down sequence */
+ pdc_write(gmu, REG_A6XX_PDC_GPU_SEQ_MEM_0, 0xfebea1e1);
+ pdc_write(gmu, REG_A6XX_PDC_GPU_SEQ_MEM_0 + 1, 0xa5a4a3a2);
+ pdc_write(gmu, REG_A6XX_PDC_GPU_SEQ_MEM_0 + 2, 0x8382a6e0);
+ pdc_write(gmu, REG_A6XX_PDC_GPU_SEQ_MEM_0 + 3, 0xbce3e284);
+ pdc_write(gmu, REG_A6XX_PDC_GPU_SEQ_MEM_0 + 4, 0x002081fc);
+
+ /* Set TCS commands used by PDC sequence for low power modes */
+ pdc_write(gmu, REG_A6XX_PDC_GPU_TCS1_CMD_ENABLE_BANK, 7);
+ pdc_write(gmu, REG_A6XX_PDC_GPU_TCS1_CMD_WAIT_FOR_CMPL_BANK, 0);
+ pdc_write(gmu, REG_A6XX_PDC_GPU_TCS1_CONTROL, 0);
+ pdc_write(gmu, REG_A6XX_PDC_GPU_TCS1_CMD0_MSGID, 0x10108);
+ pdc_write(gmu, REG_A6XX_PDC_GPU_TCS1_CMD0_ADDR, 0x30010);
+ pdc_write(gmu, REG_A6XX_PDC_GPU_TCS1_CMD0_DATA, 1);
+ pdc_write(gmu, REG_A6XX_PDC_GPU_TCS1_CMD0_MSGID + 4, 0x10108);
+ pdc_write(gmu, REG_A6XX_PDC_GPU_TCS1_CMD0_ADDR + 4, 0x30000);
+ pdc_write(gmu, REG_A6XX_PDC_GPU_TCS1_CMD0_DATA + 4, 0x0);
+ pdc_write(gmu, REG_A6XX_PDC_GPU_TCS1_CMD0_MSGID + 8, 0x10108);
+ pdc_write(gmu, REG_A6XX_PDC_GPU_TCS1_CMD0_ADDR + 8, 0x30080);
+ pdc_write(gmu, REG_A6XX_PDC_GPU_TCS1_CMD0_DATA + 8, 0x0);
+ pdc_write(gmu, REG_A6XX_PDC_GPU_TCS3_CMD_ENABLE_BANK, 7);
+ pdc_write(gmu, REG_A6XX_PDC_GPU_TCS3_CMD_WAIT_FOR_CMPL_BANK, 0);
+ pdc_write(gmu, REG_A6XX_PDC_GPU_TCS3_CONTROL, 0);
+ pdc_write(gmu, REG_A6XX_PDC_GPU_TCS3_CMD0_MSGID, 0x10108);
+ pdc_write(gmu, REG_A6XX_PDC_GPU_TCS3_CMD0_ADDR, 0x30010);
+ pdc_write(gmu, REG_A6XX_PDC_GPU_TCS3_CMD0_DATA, 2);
+ pdc_write(gmu, REG_A6XX_PDC_GPU_TCS3_CMD0_MSGID + 4, 0x10108);
+ pdc_write(gmu, REG_A6XX_PDC_GPU_TCS3_CMD0_ADDR + 4, 0x30000);
+ pdc_write(gmu, REG_A6XX_PDC_GPU_TCS3_CMD0_DATA + 4, 0x3);
+ pdc_write(gmu, REG_A6XX_PDC_GPU_TCS3_CMD0_MSGID + 8, 0x10108);
+ pdc_write(gmu, REG_A6XX_PDC_GPU_TCS3_CMD0_ADDR + 8, 0x30080);
+ pdc_write(gmu, REG_A6XX_PDC_GPU_TCS3_CMD0_DATA + 8, 0x3);
+
+ /* Setup GPU PDC */
+ pdc_write(gmu, REG_A6XX_PDC_GPU_SEQ_START_ADDR, 0);
+ pdc_write(gmu, REG_A6XX_PDC_GPU_ENABLE_PDC, 0x80000001);
+
+ /* ensure no writes happen before the uCode is fully written */
+ wmb();
+}
+
+/*
+ * The lowest 16 bits of this value are the number of XO clock cycles for main
+ * hysteresis which is set at 0x1680 cycles (300 us). The higher 16 bits are
+ * for the shorter hysteresis that happens after main - this is 0xa (.5 us)
+ */
+
+#define GMU_PWR_COL_HYST 0x000a1680
+
+/* Set up the idle state for the GMU */
+static void a6xx_gmu_power_config(struct a6xx_gmu *gmu)
+{
+ /* Disable GMU WB/RB buffer */
+ gmu_write(gmu, REG_A6XX_GMU_SYS_BUS_CONFIG, 0x1);
+
+ gmu_write(gmu, REG_A6XX_GMU_PWR_COL_INTER_FRAME_CTRL, 0x9c40400);
+
+ switch (gmu->idle_level) {
+ case GMU_IDLE_STATE_IFPC:
+ gmu_write(gmu, REG_A6XX_GMU_PWR_COL_INTER_FRAME_HYST,
+ GMU_PWR_COL_HYST);
+ gmu_rmw(gmu, REG_A6XX_GMU_PWR_COL_INTER_FRAME_CTRL, 0,
+ A6XX_GMU_PWR_COL_INTER_FRAME_CTRL_IFPC_ENABLE |
+ A6XX_GMU_PWR_COL_INTER_FRAME_CTRL_HM_POWER_COLLAPSE_ENABLE);
+ /* Fall through */
+ case GMU_IDLE_STATE_SPTP:
+ gmu_write(gmu, REG_A6XX_GMU_PWR_COL_SPTPRAC_HYST,
+ GMU_PWR_COL_HYST);
+ gmu_rmw(gmu, REG_A6XX_GMU_PWR_COL_INTER_FRAME_CTRL, 0,
+ A6XX_GMU_PWR_COL_INTER_FRAME_CTRL_IFPC_ENABLE |
+ A6XX_GMU_PWR_COL_INTER_FRAME_CTRL_SPTPRAC_POWER_CONTROL_ENABLE);
+ }
+
+ /* Enable RPMh GPU client */
+ gmu_rmw(gmu, REG_A6XX_GMU_RPMH_CTRL, 0,
+ A6XX_GMU_RPMH_CTRL_RPMH_INTERFACE_ENABLE |
+ A6XX_GMU_RPMH_CTRL_LLC_VOTE_ENABLE |
+ A6XX_GMU_RPMH_CTRL_DDR_VOTE_ENABLE |
+ A6XX_GMU_RPMH_CTRL_MX_VOTE_ENABLE |
+ A6XX_GMU_RPMH_CTRL_CX_VOTE_ENABLE |
+ A6XX_GMU_RPMH_CTRL_GFX_VOTE_ENABLE);
+}
+
+static int a6xx_gmu_fw_start(struct a6xx_gmu *gmu, unsigned int state)
+{
+ static bool rpmh_init;
+ struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu);
+ struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
+ int i, ret;
+ u32 chipid;
+ u32 *image;
+
+ if (state == GMU_WARM_BOOT) {
+ ret = a6xx_rpmh_start(gmu);
+ if (ret)
+ return ret;
+ } else {
+ if (WARN(!adreno_gpu->fw[ADRENO_FW_GMU],
+ "GMU firmware is not loaded\n"))
+ return -ENOENT;
+
+ /* Sanity check the size of the firmware that was loaded */
+ if (adreno_gpu->fw[ADRENO_FW_GMU]->size > 0x8000) {
+ dev_err(gmu->dev,
+ "GMU firmware is bigger than the available region\n");
+ return -EINVAL;
+ }
+
+ /* Turn on register retention */
+ gmu_write(gmu, REG_A6XX_GMU_GENERAL_7, 1);
+
+ /* We only need to load the RPMh microcode once */
+ if (!rpmh_init) {
+ a6xx_gmu_rpmh_init(gmu);
+ rpmh_init = true;
+ } else if (state != GMU_RESET) {
+ ret = a6xx_rpmh_start(gmu);
+ if (ret)
+ return ret;
+ }
+
+ image = (u32 *) adreno_gpu->fw[ADRENO_FW_GMU]->data;
+
+ for (i = 0; i < adreno_gpu->fw[ADRENO_FW_GMU]->size >> 2; i++)
+ gmu_write(gmu, REG_A6XX_GMU_CM3_ITCM_START + i,
+ image[i]);
+ }
+
+ gmu_write(gmu, REG_A6XX_GMU_CM3_FW_INIT_RESULT, 0);
+ gmu_write(gmu, REG_A6XX_GMU_CM3_BOOT_CONFIG, 0x02);
+
+ /* Write the iova of the HFI table */
+ gmu_write(gmu, REG_A6XX_GMU_HFI_QTBL_ADDR, gmu->hfi->iova);
+ gmu_write(gmu, REG_A6XX_GMU_HFI_QTBL_INFO, 1);
+
+ gmu_write(gmu, REG_A6XX_GMU_AHB_FENCE_RANGE_0,
+ (1 << 31) | (0xa << 18) | (0xa0));
+
+ chipid = adreno_gpu->rev.core << 24;
+ chipid |= adreno_gpu->rev.major << 16;
+ chipid |= adreno_gpu->rev.minor << 12;
+ chipid |= adreno_gpu->rev.patchid << 8;
+
+ gmu_write(gmu, REG_A6XX_GMU_HFI_SFR_ADDR, chipid);
+
+ /* Set up the lowest idle level on the GMU */
+ a6xx_gmu_power_config(gmu);
+
+ ret = a6xx_gmu_start(gmu);
+ if (ret)
+ return ret;
+
+ ret = a6xx_gmu_gfx_rail_on(gmu);
+ if (ret)
+ return ret;
+
+ /* Enable SPTP_PC if the CPU is responsible for it */
+ if (gmu->idle_level < GMU_IDLE_STATE_SPTP) {
+ ret = a6xx_sptprac_enable(gmu);
+ if (ret)
+ return ret;
+ }
+
+ ret = a6xx_gmu_hfi_start(gmu);
+ if (ret)
+ return ret;
+
+ /* FIXME: Do we need this wmb() here? */
+ wmb();
+
+ return 0;
+}
+
+#define A6XX_HFI_IRQ_MASK \
+ (A6XX_GMU_GMU2HOST_INTR_INFO_MSGQ | \
+ A6XX_GMU_GMU2HOST_INTR_INFO_CM3_FAULT)
+
+#define A6XX_GMU_IRQ_MASK \
+ (A6XX_GMU_AO_HOST_INTERRUPT_STATUS_WDOG_BITE | \
+ A6XX_GMU_AO_HOST_INTERRUPT_STATUS_HOST_AHB_BUS_ERROR | \
+ A6XX_GMU_AO_HOST_INTERRUPT_STATUS_FENCE_ERR)
+
+static void a6xx_gmu_irq_enable(struct a6xx_gmu *gmu)
+{
+ gmu_write(gmu, REG_A6XX_GMU_AO_HOST_INTERRUPT_CLR, ~0);
+ gmu_write(gmu, REG_A6XX_GMU_GMU2HOST_INTR_CLR, ~0);
+
+ gmu_write(gmu, REG_A6XX_GMU_AO_HOST_INTERRUPT_MASK,
+ ~A6XX_GMU_IRQ_MASK);
+ gmu_write(gmu, REG_A6XX_GMU_GMU2HOST_INTR_MASK,
+ ~A6XX_HFI_IRQ_MASK);
+
+ enable_irq(gmu->gmu_irq);
+ enable_irq(gmu->hfi_irq);
+}
+
+static void a6xx_gmu_irq_disable(struct a6xx_gmu *gmu)
+{
+ disable_irq(gmu->gmu_irq);
+ disable_irq(gmu->hfi_irq);
+
+ gmu_write(gmu, REG_A6XX_GMU_AO_HOST_INTERRUPT_MASK, ~0);
+ gmu_write(gmu, REG_A6XX_GMU_GMU2HOST_INTR_MASK, ~0);
+}
+
+int a6xx_gmu_reset(struct a6xx_gpu *a6xx_gpu)
+{
+ struct a6xx_gmu *gmu = &a6xx_gpu->gmu;
+ int ret;
+ u32 val;
+
+ /* Flush all the queues */
+ a6xx_hfi_stop(gmu);
+
+ /* Stop the interrupts */
+ a6xx_gmu_irq_disable(gmu);
+
+ /* Force off SPTP in case the GMU is managing it */
+ a6xx_sptprac_disable(gmu);
+
+ /* Make sure there are no outstanding RPMh votes */
+ gmu_poll_timeout(gmu, REG_A6XX_RSCC_TCS0_DRV0_STATUS, val,
+ (val & 1), 100, 10000);
+ gmu_poll_timeout(gmu, REG_A6XX_RSCC_TCS1_DRV0_STATUS, val,
+ (val & 1), 100, 10000);
+ gmu_poll_timeout(gmu, REG_A6XX_RSCC_TCS2_DRV0_STATUS, val,
+ (val & 1), 100, 10000);
+ gmu_poll_timeout(gmu, REG_A6XX_RSCC_TCS3_DRV0_STATUS, val,
+ (val & 1), 100, 1000);
+
+ /* Force off the GX GSDC */
+ regulator_force_disable(gmu->gx);
+
+ /* Disable the resources */
+ clk_bulk_disable_unprepare(gmu->nr_clocks, gmu->clocks);
+ pm_runtime_put_sync(gmu->dev);
+
+ /* Re-enable the resources */
+ pm_runtime_get_sync(gmu->dev);
+
+ /* Use a known rate to bring up the GMU */
+ clk_set_rate(gmu->core_clk, 200000000);
+ ret = clk_bulk_prepare_enable(gmu->nr_clocks, gmu->clocks);
+ if (ret)
+ goto out;
+
+ a6xx_gmu_irq_enable(gmu);
+
+ ret = a6xx_gmu_fw_start(gmu, GMU_RESET);
+ if (!ret)
+ ret = a6xx_hfi_start(gmu, GMU_COLD_BOOT);
+
+ /* Set the GPU back to the highest power frequency */
+ a6xx_gmu_set_freq(gmu, gmu->nr_gpu_freqs - 1);
+
+out:
+ if (ret)
+ a6xx_gmu_clear_oob(gmu, GMU_OOB_BOOT_SLUMBER);
+
+ return ret;
+}
+
+int a6xx_gmu_resume(struct a6xx_gpu *a6xx_gpu)
+{
+ struct a6xx_gmu *gmu = &a6xx_gpu->gmu;
+ int status, ret;
+
+ if (WARN(!gmu->mmio, "The GMU is not set up yet\n"))
+ return 0;
+
+ /* Turn on the resources */
+ pm_runtime_get_sync(gmu->dev);
+
+ /* Use a known rate to bring up the GMU */
+ clk_set_rate(gmu->core_clk, 200000000);
+ ret = clk_bulk_prepare_enable(gmu->nr_clocks, gmu->clocks);
+ if (ret)
+ goto out;
+
+ a6xx_gmu_irq_enable(gmu);
+
+ /* Check to see if we are doing a cold or warm boot */
+ status = gmu_read(gmu, REG_A6XX_GMU_GENERAL_7) == 1 ?
+ GMU_WARM_BOOT : GMU_COLD_BOOT;
+
+ ret = a6xx_gmu_fw_start(gmu, status);
+ if (ret)
+ goto out;
+
+ ret = a6xx_hfi_start(gmu, status);
+
+ /* Set the GPU to the highest power frequency */
+ a6xx_gmu_set_freq(gmu, gmu->nr_gpu_freqs - 1);
+
+out:
+ /* Make sure to turn off the boot OOB request on error */
+ if (ret)
+ a6xx_gmu_clear_oob(gmu, GMU_OOB_BOOT_SLUMBER);
+
+ return ret;
+}
+
+bool a6xx_gmu_isidle(struct a6xx_gmu *gmu)
+{
+ u32 reg;
+
+ if (!gmu->mmio)
+ return true;
+
+ reg = gmu_read(gmu, REG_A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS);
+
+ if (reg & A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS_GPUBUSYIGNAHB)
+ return false;
+
+ return true;
+}
+
+int a6xx_gmu_stop(struct a6xx_gpu *a6xx_gpu)
+{
+ struct a6xx_gmu *gmu = &a6xx_gpu->gmu;
+ u32 val;
+
+ /*
+ * The GMU may still be in slumber unless the GPU started so check and
+ * skip putting it back into slumber if so
+ */
+ val = gmu_read(gmu, REG_A6XX_GPU_GMU_CX_GMU_RPMH_POWER_STATE);
+
+ if (val != 0xf) {
+ int ret = a6xx_gmu_wait_for_idle(a6xx_gpu);
+
+ /* Temporary until we can recover safely */
+ BUG_ON(ret);
+
+ /* tell the GMU we want to slumber */
+ a6xx_gmu_notify_slumber(gmu);
+
+ ret = gmu_poll_timeout(gmu,
+ REG_A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS, val,
+ !(val & A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS_GPUBUSYIGNAHB),
+ 100, 10000);
+
+ /*
+ * Let the user know we failed to slumber but don't worry too
+ * much because we are powering down anyway
+ */
+
+ if (ret)
+ dev_err(gmu->dev,
+ "Unable to slumber GMU: status = 0%x/0%x\n",
+ gmu_read(gmu,
+ REG_A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS),
+ gmu_read(gmu,
+ REG_A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS2));
+ }
+
+ /* Turn off HFI */
+ a6xx_hfi_stop(gmu);
+
+ /* Stop the interrupts and mask the hardware */
+ a6xx_gmu_irq_disable(gmu);
+
+ /* Tell RPMh to power off the GPU */
+ a6xx_rpmh_stop(gmu);
+
+ clk_bulk_disable_unprepare(gmu->nr_clocks, gmu->clocks);
+
+ pm_runtime_put_sync(gmu->dev);
+
+ return 0;
+}
+
+static void a6xx_gmu_memory_free(struct a6xx_gmu *gmu, struct a6xx_gmu_bo *bo)
+{
+ int count, i;
+ u64 iova;
+
+ if (IS_ERR_OR_NULL(bo))
+ return;
+
+ count = bo->size >> PAGE_SHIFT;
+ iova = bo->iova;
+
+ for (i = 0; i < count; i++, iova += PAGE_SIZE) {
+ iommu_unmap(gmu->domain, iova, PAGE_SIZE);
+ __free_pages(bo->pages[i], 0);
+ }
+
+ kfree(bo->pages);
+ kfree(bo);
+}
+
+static struct a6xx_gmu_bo *a6xx_gmu_memory_alloc(struct a6xx_gmu *gmu,
+ size_t size)
+{
+ struct a6xx_gmu_bo *bo;
+ int ret, count, i;
+
+ bo = kzalloc(sizeof(*bo), GFP_KERNEL);
+ if (!bo)
+ return ERR_PTR(-ENOMEM);
+
+ bo->size = PAGE_ALIGN(size);
+
+ count = bo->size >> PAGE_SHIFT;
+
+ bo->pages = kcalloc(count, sizeof(struct page *), GFP_KERNEL);
+ if (!bo->pages) {
+ kfree(bo);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ for (i = 0; i < count; i++) {
+ bo->pages[i] = alloc_page(GFP_KERNEL);
+ if (!bo->pages[i])
+ goto err;
+ }
+
+ bo->iova = gmu->uncached_iova_base;
+
+ for (i = 0; i < count; i++) {
+ ret = iommu_map(gmu->domain,
+ bo->iova + (PAGE_SIZE * i),
+ page_to_phys(bo->pages[i]), PAGE_SIZE,
+ IOMMU_READ | IOMMU_WRITE);
+
+ if (ret) {
+ dev_err(gmu->dev, "Unable to map GMU buffer object\n");
+
+ for (i = i - 1 ; i >= 0; i--)
+ iommu_unmap(gmu->domain,
+ bo->iova + (PAGE_SIZE * i),
+ PAGE_SIZE);
+
+ goto err;
+ }
+ }
+
+ bo->virt = vmap(bo->pages, count, VM_IOREMAP,
+ pgprot_writecombine(PAGE_KERNEL));
+ if (!bo->virt)
+ goto err;
+
+ /* Align future IOVA addresses on 1MB boundaries */
+ gmu->uncached_iova_base += ALIGN(size, SZ_1M);
+
+ return bo;
+
+err:
+ for (i = 0; i < count; i++) {
+ if (bo->pages[i])
+ __free_pages(bo->pages[i], 0);
+ }
+
+ kfree(bo->pages);
+ kfree(bo);
+
+ return ERR_PTR(-ENOMEM);
+}
+
+static int a6xx_gmu_memory_probe(struct a6xx_gmu *gmu)
+{
+ int ret;
+
+ /*
+ * The GMU address space is hardcoded to treat the range
+ * 0x60000000 - 0x80000000 as un-cached memory. All buffers shared
+ * between the GMU and the CPU will live in this space
+ */
+ gmu->uncached_iova_base = 0x60000000;
+
+
+ gmu->domain = iommu_domain_alloc(&platform_bus_type);
+ if (!gmu->domain)
+ return -ENODEV;
+
+ ret = iommu_attach_device(gmu->domain, gmu->dev);
+
+ if (ret) {
+ iommu_domain_free(gmu->domain);
+ gmu->domain = NULL;
+ }
+
+ return ret;
+}
+
+/* Get the list of RPMh voltage levels from cmd-db */
+static int a6xx_gmu_rpmh_arc_cmds(const char *id, void *vals, int size)
+{
+ u32 len = cmd_db_read_aux_data_len(id);
+
+ if (!len)
+ return 0;
+
+ if (WARN_ON(len > size))
+ return -EINVAL;
+
+ cmd_db_read_aux_data(id, vals, len);
+
+ /*
+ * The data comes back as an array of unsigned shorts so adjust the
+ * count accordingly
+ */
+ return len >> 1;
+}
+
+/* Return the 'arc-level' for the given frequency */
+static u32 a6xx_gmu_get_arc_level(struct device *dev, unsigned long freq)
+{
+ struct dev_pm_opp *opp;
+ struct device_node *np;
+ u32 val = 0;
+
+ if (!freq)
+ return 0;
+
+ opp = dev_pm_opp_find_freq_exact(dev, freq, true);
+ if (IS_ERR(opp))
+ return 0;
+
+ np = dev_pm_opp_get_of_node(opp);
+
+ if (np) {
+ of_property_read_u32(np, "qcom,level", &val);
+ of_node_put(np);
+ }
+
+ dev_pm_opp_put(opp);
+
+ return val;
+}
+
+static int a6xx_gmu_rpmh_arc_votes_init(struct device *dev, u32 *votes,
+ unsigned long *freqs, int freqs_count,
+ u16 *pri, int pri_count,
+ u16 *sec, int sec_count)
+{
+ int i, j;
+
+ /* Construct a vote for each frequency */
+ for (i = 0; i < freqs_count; i++) {
+ u8 pindex = 0, sindex = 0;
+ u32 level = a6xx_gmu_get_arc_level(dev, freqs[i]);
+
+ /* Get the primary index that matches the arc level */
+ for (j = 0; j < pri_count; j++) {
+ if (pri[j] >= level) {
+ pindex = j;
+ break;
+ }
+ }
+
+ if (j == pri_count) {
+ dev_err(dev,
+ "Level %u not found in in the RPMh list\n",
+ level);
+ dev_err(dev, "Available levels:\n");
+ for (j = 0; j < pri_count; j++)
+ dev_err(dev, " %u\n", pri[j]);
+
+ return -EINVAL;
+ }
+
+ /*
+ * Look for a level in in the secondary list that matches. If
+ * nothing fits, use the maximum non zero vote
+ */
+
+ for (j = 0; j < sec_count; j++) {
+ if (sec[j] >= level) {
+ sindex = j;
+ break;
+ } else if (sec[j]) {
+ sindex = j;
+ }
+ }
+
+ /* Construct the vote */
+ votes[i] = ((pri[pindex] & 0xffff) << 16) |
+ (sindex << 8) | pindex;
+ }
+
+ return 0;
+}
+
+/*
+ * The GMU votes with the RPMh for itself and on behalf of the GPU but we need
+ * to construct the list of votes on the CPU and send it over. Query the RPMh
+ * voltage levels and build the votes
+ */
+
+static int a6xx_gmu_rpmh_votes_init(struct a6xx_gmu *gmu)
+{
+ struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu);
+ struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
+ struct msm_gpu *gpu = &adreno_gpu->base;
+
+ u16 gx[16], cx[16], mx[16];
+ u32 gxcount, cxcount, mxcount;
+ int ret;
+
+ /* Get the list of available voltage levels for each component */
+ gxcount = a6xx_gmu_rpmh_arc_cmds("gfx.lvl", gx, sizeof(gx));
+ cxcount = a6xx_gmu_rpmh_arc_cmds("cx.lvl", cx, sizeof(cx));
+ mxcount = a6xx_gmu_rpmh_arc_cmds("mx.lvl", mx, sizeof(mx));
+
+ /* Build the GX votes */
+ ret = a6xx_gmu_rpmh_arc_votes_init(&gpu->pdev->dev, gmu->gx_arc_votes,
+ gmu->gpu_freqs, gmu->nr_gpu_freqs,
+ gx, gxcount, mx, mxcount);
+
+ /* Build the CX votes */
+ ret |= a6xx_gmu_rpmh_arc_votes_init(gmu->dev, gmu->cx_arc_votes,
+ gmu->gmu_freqs, gmu->nr_gmu_freqs,
+ cx, cxcount, mx, mxcount);
+
+ return ret;
+}
+
+static int a6xx_gmu_build_freq_table(struct device *dev, unsigned long *freqs,
+ u32 size)
+{
+ int count = dev_pm_opp_get_opp_count(dev);
+ struct dev_pm_opp *opp;
+ int i, index = 0;
+ unsigned long freq = 1;
+
+ /*
+ * The OPP table doesn't contain the "off" frequency level so we need to
+ * add 1 to the table size to account for it
+ */
+
+ if (WARN(count + 1 > size,
+ "The GMU frequency table is being truncated\n"))
+ count = size - 1;
+
+ /* Set the "off" frequency */
+ freqs[index++] = 0;
+
+ for (i = 0; i < count; i++) {
+ opp = dev_pm_opp_find_freq_ceil(dev, &freq);
+ if (IS_ERR(opp))
+ break;
+
+ dev_pm_opp_put(opp);
+ freqs[index++] = freq++;
+ }
+
+ return index;
+}
+
+static int a6xx_gmu_pwrlevels_probe(struct a6xx_gmu *gmu)
+{
+ struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu);
+ struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
+ struct msm_gpu *gpu = &adreno_gpu->base;
+
+ int ret = 0;
+
+ /*
+ * The GMU handles its own frequency switching so build a list of
+ * available frequencies to send during initialization
+ */
+ ret = dev_pm_opp_of_add_table(gmu->dev);
+ if (ret) {
+ dev_err(gmu->dev, "Unable to set the OPP table for the GMU\n");
+ return ret;
+ }
+
+ gmu->nr_gmu_freqs = a6xx_gmu_build_freq_table(gmu->dev,
+ gmu->gmu_freqs, ARRAY_SIZE(gmu->gmu_freqs));
+
+ /*
+ * The GMU also handles GPU frequency switching so build a list
+ * from the GPU OPP table
+ */
+ gmu->nr_gpu_freqs = a6xx_gmu_build_freq_table(&gpu->pdev->dev,
+ gmu->gpu_freqs, ARRAY_SIZE(gmu->gpu_freqs));
+
+ /* Build the list of RPMh votes that we'll send to the GMU */
+ return a6xx_gmu_rpmh_votes_init(gmu);
+}
+
+static int a6xx_gmu_clocks_probe(struct a6xx_gmu *gmu)
+{
+ int ret = msm_clk_bulk_get(gmu->dev, &gmu->clocks);
+
+ if (ret < 1)
+ return ret;
+
+ gmu->nr_clocks = ret;
+
+ gmu->core_clk = msm_clk_bulk_get_clock(gmu->clocks,
+ gmu->nr_clocks, "gmu");
+
+ return 0;
+}
+
+static void __iomem *a6xx_gmu_get_mmio(struct platform_device *pdev,
+ const char *name)
+{
+ void __iomem *ret;
+ struct resource *res = platform_get_resource_byname(pdev,
+ IORESOURCE_MEM, name);
+
+ if (!res) {
+ dev_err(&pdev->dev, "Unable to find the %s registers\n", name);
+ return ERR_PTR(-EINVAL);
+ }
+
+ ret = devm_ioremap(&pdev->dev, res->start, resource_size(res));
+ if (!ret) {
+ dev_err(&pdev->dev, "Unable to map the %s registers\n", name);
+ return ERR_PTR(-EINVAL);
+ }
+
+ return ret;
+}
+
+static int a6xx_gmu_get_irq(struct a6xx_gmu *gmu, struct platform_device *pdev,
+ const char *name, irq_handler_t handler)
+{
+ int irq, ret;
+
+ irq = platform_get_irq_byname(pdev, name);
+
+ ret = devm_request_irq(&pdev->dev, irq, handler, IRQF_TRIGGER_HIGH,
+ name, gmu);
+ if (ret) {
+ dev_err(&pdev->dev, "Unable to get interrupt %s\n", name);
+ return ret;
+ }
+
+ disable_irq(irq);
+
+ return irq;
+}
+
+void a6xx_gmu_remove(struct a6xx_gpu *a6xx_gpu)
+{
+ struct a6xx_gmu *gmu = &a6xx_gpu->gmu;
+
+ if (IS_ERR_OR_NULL(gmu->mmio))
+ return;
+
+ pm_runtime_disable(gmu->dev);
+ a6xx_gmu_stop(a6xx_gpu);
+
+ a6xx_gmu_irq_disable(gmu);
+ a6xx_gmu_memory_free(gmu, gmu->hfi);
+
+ iommu_detach_device(gmu->domain, gmu->dev);
+
+ iommu_domain_free(gmu->domain);
+}
+
+int a6xx_gmu_probe(struct a6xx_gpu *a6xx_gpu, struct device_node *node)
+{
+ struct a6xx_gmu *gmu = &a6xx_gpu->gmu;
+ struct platform_device *pdev = of_find_device_by_node(node);
+ int ret;
+
+ if (!pdev)
+ return -ENODEV;
+
+ gmu->dev = &pdev->dev;
+
+ of_dma_configure(gmu->dev, node, false);
+
+ /* Fow now, don't do anything fancy until we get our feet under us */
+ gmu->idle_level = GMU_IDLE_STATE_ACTIVE;
+
+ pm_runtime_enable(gmu->dev);
+ gmu->gx = devm_regulator_get(gmu->dev, "vdd");
+
+ /* Get the list of clocks */
+ ret = a6xx_gmu_clocks_probe(gmu);
+ if (ret)
+ return ret;
+
+ /* Set up the IOMMU context bank */
+ ret = a6xx_gmu_memory_probe(gmu);
+ if (ret)
+ return ret;
+
+ /* Allocate memory for for the HFI queues */
+ gmu->hfi = a6xx_gmu_memory_alloc(gmu, SZ_16K);
+ if (IS_ERR(gmu->hfi))
+ goto err;
+
+ /* Allocate memory for the GMU debug region */
+ gmu->debug = a6xx_gmu_memory_alloc(gmu, SZ_16K);
+ if (IS_ERR(gmu->debug))
+ goto err;
+
+ /* Map the GMU registers */
+ gmu->mmio = a6xx_gmu_get_mmio(pdev, "gmu");
+
+ /* Map the GPU power domain controller registers */
+ gmu->pdc_mmio = a6xx_gmu_get_mmio(pdev, "gmu_pdc");
+
+ if (IS_ERR(gmu->mmio) || IS_ERR(gmu->pdc_mmio))
+ goto err;
+
+ /* Get the HFI and GMU interrupts */
+ gmu->hfi_irq = a6xx_gmu_get_irq(gmu, pdev, "hfi", a6xx_hfi_irq);
+ gmu->gmu_irq = a6xx_gmu_get_irq(gmu, pdev, "gmu", a6xx_gmu_irq);
+
+ if (gmu->hfi_irq < 0 || gmu->gmu_irq < 0)
+ goto err;
+
+ /* Set up a tasklet to handle GMU HFI responses */
+ tasklet_init(&gmu->hfi_tasklet, a6xx_hfi_task, (unsigned long) gmu);
+
+ /* Get the power levels for the GMU and GPU */
+ a6xx_gmu_pwrlevels_probe(gmu);
+
+ /* Set up the HFI queues */
+ a6xx_hfi_init(gmu);
+
+ return 0;
+err:
+ a6xx_gmu_memory_free(gmu, gmu->hfi);
+
+ if (gmu->domain) {
+ iommu_detach_device(gmu->domain, gmu->dev);
+
+ iommu_domain_free(gmu->domain);
+ }
+
+ return -ENODEV;
+}
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gmu.h b/drivers/gpu/drm/msm/adreno/a6xx_gmu.h
new file mode 100644
index 000000000000..d9a386c18799
--- /dev/null
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gmu.h
@@ -0,0 +1,162 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2017 The Linux Foundation. All rights reserved. */
+
+#ifndef _A6XX_GMU_H_
+#define _A6XX_GMU_H_
+
+#include <linux/interrupt.h>
+#include "msm_drv.h"
+#include "a6xx_hfi.h"
+
+struct a6xx_gmu_bo {
+ void *virt;
+ size_t size;
+ u64 iova;
+ struct page **pages;
+};
+
+/*
+ * These define the different GMU wake up options - these define how both the
+ * CPU and the GMU bring up the hardware
+ */
+
+/* THe GMU has already been booted and the rentention registers are active */
+#define GMU_WARM_BOOT 0
+
+/* the GMU is coming up for the first time or back from a power collapse */
+#define GMU_COLD_BOOT 1
+
+/* The GMU is being soft reset after a fault */
+#define GMU_RESET 2
+
+/*
+ * These define the level of control that the GMU has - the higher the number
+ * the more things that the GMU hardware controls on its own.
+ */
+
+/* The GMU does not do any idle state management */
+#define GMU_IDLE_STATE_ACTIVE 0
+
+/* The GMU manages SPTP power collapse */
+#define GMU_IDLE_STATE_SPTP 2
+
+/* The GMU does automatic IFPC (intra-frame power collapse) */
+#define GMU_IDLE_STATE_IFPC 3
+
+struct a6xx_gmu {
+ struct device *dev;
+
+ void * __iomem mmio;
+ void * __iomem pdc_mmio;
+
+ int hfi_irq;
+ int gmu_irq;
+
+ struct regulator *gx;
+
+ struct iommu_domain *domain;
+ u64 uncached_iova_base;
+
+ int idle_level;
+
+ struct a6xx_gmu_bo *hfi;
+ struct a6xx_gmu_bo *debug;
+
+ int nr_clocks;
+ struct clk_bulk_data *clocks;
+ struct clk *core_clk;
+
+ int nr_gpu_freqs;
+ unsigned long gpu_freqs[16];
+ u32 gx_arc_votes[16];
+
+ int nr_gmu_freqs;
+ unsigned long gmu_freqs[4];
+ u32 cx_arc_votes[4];
+
+ struct a6xx_hfi_queue queues[2];
+
+ struct tasklet_struct hfi_tasklet;
+};
+
+static inline u32 gmu_read(struct a6xx_gmu *gmu, u32 offset)
+{
+ return msm_readl(gmu->mmio + (offset << 2));
+}
+
+static inline void gmu_write(struct a6xx_gmu *gmu, u32 offset, u32 value)
+{
+ return msm_writel(value, gmu->mmio + (offset << 2));
+}
+
+static inline void pdc_write(struct a6xx_gmu *gmu, u32 offset, u32 value)
+{
+ return msm_writel(value, gmu->pdc_mmio + (offset << 2));
+}
+
+static inline void gmu_rmw(struct a6xx_gmu *gmu, u32 reg, u32 mask, u32 or)
+{
+ u32 val = gmu_read(gmu, reg);
+
+ val &= ~mask;
+
+ gmu_write(gmu, reg, val | or);
+}
+
+#define gmu_poll_timeout(gmu, addr, val, cond, interval, timeout) \
+ readl_poll_timeout((gmu)->mmio + ((addr) << 2), val, cond, \
+ interval, timeout)
+
+/*
+ * These are the available OOB (out of band requests) to the GMU where "out of
+ * band" means that the CPU talks to the GMU directly and not through HFI.
+ * Normally this works by writing a ITCM/DTCM register and then triggering a
+ * interrupt (the "request" bit) and waiting for an acknowledgment (the "ack"
+ * bit). The state is cleared by writing the "clear' bit to the GMU interrupt.
+ *
+ * These are used to force the GMU/GPU to stay on during a critical sequence or
+ * for hardware workarounds.
+ */
+
+enum a6xx_gmu_oob_state {
+ GMU_OOB_BOOT_SLUMBER = 0,
+ GMU_OOB_GPU_SET,
+ GMU_OOB_DCVS_SET,
+};
+
+/* These are the interrupt / ack bits for each OOB request that are set
+ * in a6xx_gmu_set_oob and a6xx_clear_oob
+ */
+
+/*
+ * Let the GMU know that a boot or slumber operation has started. The value in
+ * REG_A6XX_GMU_BOOT_SLUMBER_OPTION lets the GMU know which operation we are
+ * doing
+ */
+#define GMU_OOB_BOOT_SLUMBER_REQUEST 22
+#define GMU_OOB_BOOT_SLUMBER_ACK 30
+#define GMU_OOB_BOOT_SLUMBER_CLEAR 30
+
+/*
+ * Set a new power level for the GPU when the CPU is doing frequency scaling
+ */
+#define GMU_OOB_DCVS_REQUEST 23
+#define GMU_OOB_DCVS_ACK 31
+#define GMU_OOB_DCVS_CLEAR 31
+
+/*
+ * Let the GMU know to not turn off any GPU registers while the CPU is in a
+ * critical section
+ */
+#define GMU_OOB_GPU_SET_REQUEST 16
+#define GMU_OOB_GPU_SET_ACK 24
+#define GMU_OOB_GPU_SET_CLEAR 24
+
+
+void a6xx_hfi_init(struct a6xx_gmu *gmu);
+int a6xx_hfi_start(struct a6xx_gmu *gmu, int boot_state);
+void a6xx_hfi_stop(struct a6xx_gmu *gmu);
+
+void a6xx_hfi_task(unsigned long data);
+
+#endif
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gmu.xml.h b/drivers/gpu/drm/msm/adreno/a6xx_gmu.xml.h
new file mode 100644
index 000000000000..ef68098d2adc
--- /dev/null
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gmu.xml.h
@@ -0,0 +1,382 @@
+#ifndef A6XX_GMU_XML
+#define A6XX_GMU_XML
+
+/* Autogenerated file, DO NOT EDIT manually!
+
+This file was generated by the rules-ng-ng headergen tool in this git repository:
+http://github.com/freedreno/envytools/
+git clone https://github.com/freedreno/envytools.git
+
+The rules-ng-ng source files this header was generated from are:
+- /home/robclark/src/envytools/rnndb/adreno.xml ( 501 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/adreno/a2xx.xml ( 36805 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/adreno/adreno_common.xml ( 13634 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/adreno/adreno_pm4.xml ( 42393 bytes, from 2018-08-06 18:45:45)
+- /home/robclark/src/envytools/rnndb/adreno/a3xx.xml ( 83840 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/adreno/a4xx.xml ( 112086 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/adreno/a5xx.xml ( 147240 bytes, from 2018-08-06 18:45:45)
+- /home/robclark/src/envytools/rnndb/adreno/a6xx.xml ( 101627 bytes, from 2018-08-06 18:45:45)
+- /home/robclark/src/envytools/rnndb/adreno/a6xx_gmu.xml ( 10431 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/adreno/ocmem.xml ( 1773 bytes, from 2018-07-03 19:37:13)
+
+Copyright (C) 2013-2018 by the following authors:
+- Rob Clark <robdclark@gmail.com> (robclark)
+- Ilia Mirkin <imirkin@alum.mit.edu> (imirkin)
+
+Permission is hereby granted, free of charge, to any person obtaining
+a copy of this software and associated documentation files (the
+"Software"), to deal in the Software without restriction, including
+without limitation the rights to use, copy, modify, merge, publish,
+distribute, sublicense, and/or sell copies of the Software, and to
+permit persons to whom the Software is furnished to do so, subject to
+the following conditions:
+
+The above copyright notice and this permission notice (including the
+next paragraph) shall be included in all copies or substantial
+portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/
+
+
+#define A6XX_GMU_GPU_IDLE_STATUS_BUSY_IGN_AHB 0x00800000
+#define A6XX_GMU_GPU_IDLE_STATUS_CX_GX_CPU_BUSY_IGN_AHB 0x40000000
+#define A6XX_GMU_OOB_BOOT_SLUMBER_SET_MASK 0x00400000
+#define A6XX_GMU_OOB_BOOT_SLUMBER_CHECK_MASK 0x40000000
+#define A6XX_GMU_OOB_BOOT_SLUMBER_CLEAR_MASK 0x40000000
+#define A6XX_GMU_OOB_DCVS_SET_MASK 0x00800000
+#define A6XX_GMU_OOB_DCVS_CHECK_MASK 0x80000000
+#define A6XX_GMU_OOB_DCVS_CLEAR_MASK 0x80000000
+#define A6XX_GMU_OOB_GPU_SET_MASK 0x00040000
+#define A6XX_GMU_OOB_GPU_CHECK_MASK 0x04000000
+#define A6XX_GMU_OOB_GPU_CLEAR_MASK 0x04000000
+#define A6XX_GMU_OOB_PERFCNTR_SET_MASK 0x00020000
+#define A6XX_GMU_OOB_PERFCNTR_CHECK_MASK 0x02000000
+#define A6XX_GMU_OOB_PERFCNTR_CLEAR_MASK 0x02000000
+#define A6XX_HFI_IRQ_MSGQ_MASK 0x00000001
+#define A6XX_HFI_IRQ_DSGQ_MASK 0x00000002
+#define A6XX_HFI_IRQ_BLOCKED_MSG_MASK 0x00000004
+#define A6XX_HFI_IRQ_CM3_FAULT_MASK 0x00800000
+#define A6XX_HFI_IRQ_GMU_ERR_MASK__MASK 0x007f0000
+#define A6XX_HFI_IRQ_GMU_ERR_MASK__SHIFT 16
+static inline uint32_t A6XX_HFI_IRQ_GMU_ERR_MASK(uint32_t val)
+{
+ return ((val) << A6XX_HFI_IRQ_GMU_ERR_MASK__SHIFT) & A6XX_HFI_IRQ_GMU_ERR_MASK__MASK;
+}
+#define A6XX_HFI_IRQ_OOB_MASK__MASK 0xff000000
+#define A6XX_HFI_IRQ_OOB_MASK__SHIFT 24
+static inline uint32_t A6XX_HFI_IRQ_OOB_MASK(uint32_t val)
+{
+ return ((val) << A6XX_HFI_IRQ_OOB_MASK__SHIFT) & A6XX_HFI_IRQ_OOB_MASK__MASK;
+}
+#define A6XX_HFI_H2F_IRQ_MASK_BIT 0x00000001
+#define REG_A6XX_GPU_GMU_GX_SPTPRAC_CLOCK_CONTROL 0x00000080
+
+#define REG_A6XX_GMU_GX_SPTPRAC_POWER_CONTROL 0x00000081
+
+#define REG_A6XX_GMU_CM3_ITCM_START 0x00000c00
+
+#define REG_A6XX_GMU_CM3_DTCM_START 0x00001c00
+
+#define REG_A6XX_GMU_NMI_CONTROL_STATUS 0x000023f0
+
+#define REG_A6XX_GMU_BOOT_SLUMBER_OPTION 0x000023f8
+
+#define REG_A6XX_GMU_GX_VOTE_IDX 0x000023f9
+
+#define REG_A6XX_GMU_MX_VOTE_IDX 0x000023fa
+
+#define REG_A6XX_GMU_DCVS_ACK_OPTION 0x000023fc
+
+#define REG_A6XX_GMU_DCVS_PERF_SETTING 0x000023fd
+
+#define REG_A6XX_GMU_DCVS_BW_SETTING 0x000023fe
+
+#define REG_A6XX_GMU_DCVS_RETURN 0x000023ff
+
+#define REG_A6XX_GMU_SYS_BUS_CONFIG 0x00004c0f
+
+#define REG_A6XX_GMU_CM3_SYSRESET 0x00005000
+
+#define REG_A6XX_GMU_CM3_BOOT_CONFIG 0x00005001
+
+#define REG_A6XX_GMU_CM3_FW_BUSY 0x0000501a
+
+#define REG_A6XX_GMU_CM3_FW_INIT_RESULT 0x0000501c
+
+#define REG_A6XX_GMU_CM3_CFG 0x0000502d
+
+#define REG_A6XX_GMU_CX_GMU_POWER_COUNTER_ENABLE 0x00005040
+
+#define REG_A6XX_GMU_CX_GMU_POWER_COUNTER_SELECT_0 0x00005041
+
+#define REG_A6XX_GMU_CX_GMU_POWER_COUNTER_SELECT_1 0x00005042
+
+#define REG_A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_0_L 0x00005044
+
+#define REG_A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_0_H 0x00005045
+
+#define REG_A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_1_L 0x00005046
+
+#define REG_A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_1_H 0x00005047
+
+#define REG_A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_2_L 0x00005048
+
+#define REG_A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_2_H 0x00005049
+
+#define REG_A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_3_L 0x0000504a
+
+#define REG_A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_3_H 0x0000504b
+
+#define REG_A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_4_L 0x0000504c
+
+#define REG_A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_4_H 0x0000504d
+
+#define REG_A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_5_L 0x0000504e
+
+#define REG_A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_5_H 0x0000504f
+
+#define REG_A6XX_GMU_PWR_COL_INTER_FRAME_CTRL 0x000050c0
+#define A6XX_GMU_PWR_COL_INTER_FRAME_CTRL_IFPC_ENABLE 0x00000001
+#define A6XX_GMU_PWR_COL_INTER_FRAME_CTRL_HM_POWER_COLLAPSE_ENABLE 0x00000002
+#define A6XX_GMU_PWR_COL_INTER_FRAME_CTRL_SPTPRAC_POWER_CONTROL_ENABLE 0x00000004
+#define A6XX_GMU_PWR_COL_INTER_FRAME_CTRL_NUM_PASS_SKIPS__MASK 0x00003c00
+#define A6XX_GMU_PWR_COL_INTER_FRAME_CTRL_NUM_PASS_SKIPS__SHIFT 10
+static inline uint32_t A6XX_GMU_PWR_COL_INTER_FRAME_CTRL_NUM_PASS_SKIPS(uint32_t val)
+{
+ return ((val) << A6XX_GMU_PWR_COL_INTER_FRAME_CTRL_NUM_PASS_SKIPS__SHIFT) & A6XX_GMU_PWR_COL_INTER_FRAME_CTRL_NUM_PASS_SKIPS__MASK;
+}
+#define A6XX_GMU_PWR_COL_INTER_FRAME_CTRL_MIN_PASS_LENGTH__MASK 0xffffc000
+#define A6XX_GMU_PWR_COL_INTER_FRAME_CTRL_MIN_PASS_LENGTH__SHIFT 14
+static inline uint32_t A6XX_GMU_PWR_COL_INTER_FRAME_CTRL_MIN_PASS_LENGTH(uint32_t val)
+{
+ return ((val) << A6XX_GMU_PWR_COL_INTER_FRAME_CTRL_MIN_PASS_LENGTH__SHIFT) & A6XX_GMU_PWR_COL_INTER_FRAME_CTRL_MIN_PASS_LENGTH__MASK;
+}
+
+#define REG_A6XX_GMU_PWR_COL_INTER_FRAME_HYST 0x000050c1
+
+#define REG_A6XX_GMU_PWR_COL_SPTPRAC_HYST 0x000050c2
+
+#define REG_A6XX_GMU_SPTPRAC_PWR_CLK_STATUS 0x000050d0
+#define A6XX_GMU_SPTPRAC_PWR_CLK_STATUS_SPTPRAC_GDSC_POWERING_OFF 0x00000001
+#define A6XX_GMU_SPTPRAC_PWR_CLK_STATUS_SPTPRAC_GDSC_POWERING_ON 0x00000002
+#define A6XX_GMU_SPTPRAC_PWR_CLK_STATUS_SPTPRAC_GDSC_POWER_ON 0x00000004
+#define A6XX_GMU_SPTPRAC_PWR_CLK_STATUS_SPTPRAC_GDSC_POWER_OFF 0x00000008
+#define A6XX_GMU_SPTPRAC_PWR_CLK_STATUS_SP_CLOCK_OFF 0x00000010
+#define A6XX_GMU_SPTPRAC_PWR_CLK_STATUS_GMU_UP_POWER_STATE 0x00000020
+#define A6XX_GMU_SPTPRAC_PWR_CLK_STATUS_GX_HM_GDSC_POWER_OFF 0x00000040
+#define A6XX_GMU_SPTPRAC_PWR_CLK_STATUS_GX_HM_CLK_OFF 0x00000080
+
+#define REG_A6XX_GMU_GPU_NAP_CTRL 0x000050e4
+#define A6XX_GMU_GPU_NAP_CTRL_HW_NAP_ENABLE 0x00000001
+#define A6XX_GMU_GPU_NAP_CTRL_SID__MASK 0x000001f0
+#define A6XX_GMU_GPU_NAP_CTRL_SID__SHIFT 4
+static inline uint32_t A6XX_GMU_GPU_NAP_CTRL_SID(uint32_t val)
+{
+ return ((val) << A6XX_GMU_GPU_NAP_CTRL_SID__SHIFT) & A6XX_GMU_GPU_NAP_CTRL_SID__MASK;
+}
+
+#define REG_A6XX_GMU_RPMH_CTRL 0x000050e8
+#define A6XX_GMU_RPMH_CTRL_RPMH_INTERFACE_ENABLE 0x00000001
+#define A6XX_GMU_RPMH_CTRL_LLC_VOTE_ENABLE 0x00000010
+#define A6XX_GMU_RPMH_CTRL_DDR_VOTE_ENABLE 0x00000100
+#define A6XX_GMU_RPMH_CTRL_MX_VOTE_ENABLE 0x00000200
+#define A6XX_GMU_RPMH_CTRL_CX_VOTE_ENABLE 0x00000400
+#define A6XX_GMU_RPMH_CTRL_GFX_VOTE_ENABLE 0x00000800
+#define A6XX_GMU_RPMH_CTRL_DDR_MIN_VOTE_ENABLE 0x00001000
+#define A6XX_GMU_RPMH_CTRL_MX_MIN_VOTE_ENABLE 0x00002000
+#define A6XX_GMU_RPMH_CTRL_CX_MIN_VOTE_ENABLE 0x00004000
+#define A6XX_GMU_RPMH_CTRL_GFX_MIN_VOTE_ENABLE 0x00008000
+
+#define REG_A6XX_GMU_RPMH_HYST_CTRL 0x000050e9
+
+#define REG_A6XX_GPU_GMU_CX_GMU_RPMH_POWER_STATE 0x000050ec
+
+#define REG_A6XX_GMU_BOOT_KMD_LM_HANDSHAKE 0x000051f0
+
+#define REG_A6XX_GMU_LLM_GLM_SLEEP_CTRL 0x00005157
+
+#define REG_A6XX_GMU_LLM_GLM_SLEEP_STATUS 0x00005158
+
+#define REG_A6XX_GMU_ALWAYS_ON_COUNTER_L 0x00005088
+
+#define REG_A6XX_GMU_ALWAYS_ON_COUNTER_H 0x00005089
+
+#define REG_A6XX_GMU_GMU_PWR_COL_KEEPALIVE 0x000050c3
+
+#define REG_A6XX_GMU_HFI_CTRL_STATUS 0x00005180
+
+#define REG_A6XX_GMU_HFI_VERSION_INFO 0x00005181
+
+#define REG_A6XX_GMU_HFI_SFR_ADDR 0x00005182
+
+#define REG_A6XX_GMU_HFI_MMAP_ADDR 0x00005183
+
+#define REG_A6XX_GMU_HFI_QTBL_INFO 0x00005184
+
+#define REG_A6XX_GMU_HFI_QTBL_ADDR 0x00005185
+
+#define REG_A6XX_GMU_HFI_CTRL_INIT 0x00005186
+
+#define REG_A6XX_GMU_GMU2HOST_INTR_SET 0x00005190
+
+#define REG_A6XX_GMU_GMU2HOST_INTR_CLR 0x00005191
+
+#define REG_A6XX_GMU_GMU2HOST_INTR_INFO 0x00005192
+#define A6XX_GMU_GMU2HOST_INTR_INFO_MSGQ 0x00000001
+#define A6XX_GMU_GMU2HOST_INTR_INFO_CM3_FAULT 0x00800000
+
+#define REG_A6XX_GMU_GMU2HOST_INTR_MASK 0x00005193
+
+#define REG_A6XX_GMU_HOST2GMU_INTR_SET 0x00005194
+
+#define REG_A6XX_GMU_HOST2GMU_INTR_CLR 0x00005195
+
+#define REG_A6XX_GMU_HOST2GMU_INTR_RAW_INFO 0x00005196
+
+#define REG_A6XX_GMU_HOST2GMU_INTR_EN_0 0x00005197
+
+#define REG_A6XX_GMU_HOST2GMU_INTR_EN_1 0x00005198
+
+#define REG_A6XX_GMU_HOST2GMU_INTR_EN_2 0x00005199
+
+#define REG_A6XX_GMU_HOST2GMU_INTR_EN_3 0x0000519a
+
+#define REG_A6XX_GMU_HOST2GMU_INTR_INFO_0 0x0000519b
+
+#define REG_A6XX_GMU_HOST2GMU_INTR_INFO_1 0x0000519c
+
+#define REG_A6XX_GMU_HOST2GMU_INTR_INFO_2 0x0000519d
+
+#define REG_A6XX_GMU_HOST2GMU_INTR_INFO_3 0x0000519e
+
+#define REG_A6XX_GMU_GENERAL_1 0x000051c6
+
+#define REG_A6XX_GMU_GENERAL_7 0x000051cc
+
+#define REG_A6XX_GMU_ISENSE_CTRL 0x0000515d
+
+#define REG_A6XX_GPU_CS_ENABLE_REG 0x00008920
+
+#define REG_A6XX_GPU_GMU_CX_GMU_ISENSE_CTRL 0x0000515d
+
+#define REG_A6XX_GPU_CS_AMP_CALIBRATION_CONTROL3 0x00008578
+
+#define REG_A6XX_GPU_CS_AMP_CALIBRATION_CONTROL2 0x00008558
+
+#define REG_A6XX_GPU_CS_A_SENSOR_CTRL_0 0x00008580
+
+#define REG_A6XX_GPU_CS_A_SENSOR_CTRL_2 0x00027ada
+
+#define REG_A6XX_GPU_CS_SENSOR_GENERAL_STATUS 0x0000881a
+
+#define REG_A6XX_GPU_CS_AMP_CALIBRATION_CONTROL1 0x00008957
+
+#define REG_A6XX_GPU_CS_SENSOR_GENERAL_STATUS 0x0000881a
+
+#define REG_A6XX_GPU_CS_AMP_CALIBRATION_STATUS1_0 0x0000881d
+
+#define REG_A6XX_GPU_CS_AMP_CALIBRATION_STATUS1_2 0x0000881f
+
+#define REG_A6XX_GPU_CS_AMP_CALIBRATION_STATUS1_4 0x00008821
+
+#define REG_A6XX_GPU_CS_AMP_CALIBRATION_DONE 0x00008965
+
+#define REG_A6XX_GPU_CS_AMP_PERIOD_CTRL 0x0000896d
+
+#define REG_A6XX_GPU_CS_AMP_CALIBRATION_DONE 0x00008965
+
+#define REG_A6XX_GPU_GMU_CX_GMU_PWR_THRESHOLD 0x0000514d
+
+#define REG_A6XX_GMU_AO_INTERRUPT_EN 0x00009303
+
+#define REG_A6XX_GMU_AO_HOST_INTERRUPT_CLR 0x00009304
+
+#define REG_A6XX_GMU_AO_HOST_INTERRUPT_STATUS 0x00009305
+#define A6XX_GMU_AO_HOST_INTERRUPT_STATUS_WDOG_BITE 0x00000001
+#define A6XX_GMU_AO_HOST_INTERRUPT_STATUS_RSCC_COMP 0x00000002
+#define A6XX_GMU_AO_HOST_INTERRUPT_STATUS_VDROOP 0x00000004
+#define A6XX_GMU_AO_HOST_INTERRUPT_STATUS_FENCE_ERR 0x00000008
+#define A6XX_GMU_AO_HOST_INTERRUPT_STATUS_DBD_WAKEUP 0x00000010
+#define A6XX_GMU_AO_HOST_INTERRUPT_STATUS_HOST_AHB_BUS_ERROR 0x00000020
+
+#define REG_A6XX_GMU_AO_HOST_INTERRUPT_MASK 0x00009306
+
+#define REG_A6XX_GPU_GMU_AO_GMU_CGC_MODE_CNTL 0x00009309
+
+#define REG_A6XX_GPU_GMU_AO_GMU_CGC_DELAY_CNTL 0x0000930a
+
+#define REG_A6XX_GPU_GMU_AO_GMU_CGC_HYST_CNTL 0x0000930b
+
+#define REG_A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS 0x0000930c
+#define A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS_GPUBUSYIGNAHB 0x00800000
+
+#define REG_A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS2 0x0000930d
+
+#define REG_A6XX_GPU_GMU_AO_GPU_CX_BUSY_MASK 0x0000930e
+
+#define REG_A6XX_GMU_AO_AHB_FENCE_CTRL 0x00009310
+
+#define REG_A6XX_GMU_AHB_FENCE_STATUS 0x00009313
+
+#define REG_A6XX_GMU_RBBM_INT_UNMASKED_STATUS 0x00009315
+
+#define REG_A6XX_GMU_AO_SPARE_CNTL 0x00009316
+
+#define REG_A6XX_GPU_RSCC_RSC_STATUS0_DRV0 0x00008c04
+
+#define REG_A6XX_GMU_RSCC_CONTROL_REQ 0x00009307
+
+#define REG_A6XX_GMU_RSCC_CONTROL_ACK 0x00009308
+
+#define REG_A6XX_GMU_AHB_FENCE_RANGE_0 0x00009311
+
+#define REG_A6XX_GMU_AHB_FENCE_RANGE_1 0x00009312
+
+#define REG_A6XX_GPU_CC_GX_GDSCR 0x00009c03
+
+#define REG_A6XX_GPU_CC_GX_DOMAIN_MISC 0x00009d42
+
+#define REG_A6XX_RSCC_PDC_SEQ_START_ADDR 0x00008c08
+
+#define REG_A6XX_RSCC_PDC_MATCH_VALUE_LO 0x00008c09
+
+#define REG_A6XX_RSCC_PDC_MATCH_VALUE_HI 0x00008c0a
+
+#define REG_A6XX_RSCC_PDC_SLAVE_ID_DRV0 0x00008c0b
+
+#define REG_A6XX_RSCC_HIDDEN_TCS_CMD0_ADDR 0x00008c0d
+
+#define REG_A6XX_RSCC_HIDDEN_TCS_CMD0_DATA 0x00008c0e
+
+#define REG_A6XX_RSCC_TIMESTAMP_UNIT0_TIMESTAMP_L_DRV0 0x00008c82
+
+#define REG_A6XX_RSCC_TIMESTAMP_UNIT0_TIMESTAMP_H_DRV0 0x00008c83
+
+#define REG_A6XX_RSCC_TIMESTAMP_UNIT1_EN_DRV0 0x00008c89
+
+#define REG_A6XX_RSCC_TIMESTAMP_UNIT1_OUTPUT_DRV0 0x00008c8c
+
+#define REG_A6XX_RSCC_OVERRIDE_START_ADDR 0x00008d00
+
+#define REG_A6XX_RSCC_SEQ_BUSY_DRV0 0x00008d01
+
+#define REG_A6XX_RSCC_SEQ_MEM_0_DRV0 0x00008d80
+
+#define REG_A6XX_RSCC_TCS0_DRV0_STATUS 0x00008f46
+
+#define REG_A6XX_RSCC_TCS1_DRV0_STATUS 0x000090ae
+
+#define REG_A6XX_RSCC_TCS2_DRV0_STATUS 0x00009216
+
+#define REG_A6XX_RSCC_TCS3_DRV0_STATUS 0x0000937e
+
+
+#endif /* A6XX_GMU_XML */
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
new file mode 100644
index 000000000000..c629f742a1d1
--- /dev/null
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
@@ -0,0 +1,818 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2017-2018 The Linux Foundation. All rights reserved. */
+
+
+#include "msm_gem.h"
+#include "msm_mmu.h"
+#include "a6xx_gpu.h"
+#include "a6xx_gmu.xml.h"
+
+static inline bool _a6xx_check_idle(struct msm_gpu *gpu)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
+
+ /* Check that the GMU is idle */
+ if (!a6xx_gmu_isidle(&a6xx_gpu->gmu))
+ return false;
+
+ /* Check tha the CX master is idle */
+ if (gpu_read(gpu, REG_A6XX_RBBM_STATUS) &
+ ~A6XX_RBBM_STATUS_CP_AHB_BUSY_CX_MASTER)
+ return false;
+
+ return !(gpu_read(gpu, REG_A6XX_RBBM_INT_0_STATUS) &
+ A6XX_RBBM_INT_0_MASK_RBBM_HANG_DETECT);
+}
+
+bool a6xx_idle(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
+{
+ /* wait for CP to drain ringbuffer: */
+ if (!adreno_idle(gpu, ring))
+ return false;
+
+ if (spin_until(_a6xx_check_idle(gpu))) {
+ DRM_ERROR("%s: %ps: timeout waiting for GPU to idle: status %8.8X irq %8.8X rptr/wptr %d/%d\n",
+ gpu->name, __builtin_return_address(0),
+ gpu_read(gpu, REG_A6XX_RBBM_STATUS),
+ gpu_read(gpu, REG_A6XX_RBBM_INT_0_STATUS),
+ gpu_read(gpu, REG_A6XX_CP_RB_RPTR),
+ gpu_read(gpu, REG_A6XX_CP_RB_WPTR));
+ return false;
+ }
+
+ return true;
+}
+
+static void a6xx_flush(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
+{
+ uint32_t wptr;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ring->lock, flags);
+
+ /* Copy the shadow to the actual register */
+ ring->cur = ring->next;
+
+ /* Make sure to wrap wptr if we need to */
+ wptr = get_wptr(ring);
+
+ spin_unlock_irqrestore(&ring->lock, flags);
+
+ /* Make sure everything is posted before making a decision */
+ mb();
+
+ gpu_write(gpu, REG_A6XX_CP_RB_WPTR, wptr);
+}
+
+static void a6xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
+ struct msm_file_private *ctx)
+{
+ struct msm_drm_private *priv = gpu->dev->dev_private;
+ struct msm_ringbuffer *ring = submit->ring;
+ unsigned int i;
+
+ /* Invalidate CCU depth and color */
+ OUT_PKT7(ring, CP_EVENT_WRITE, 1);
+ OUT_RING(ring, PC_CCU_INVALIDATE_DEPTH);
+
+ OUT_PKT7(ring, CP_EVENT_WRITE, 1);
+ OUT_RING(ring, PC_CCU_INVALIDATE_COLOR);
+
+ /* Submit the commands */
+ for (i = 0; i < submit->nr_cmds; i++) {
+ switch (submit->cmd[i].type) {
+ case MSM_SUBMIT_CMD_IB_TARGET_BUF:
+ break;
+ case MSM_SUBMIT_CMD_CTX_RESTORE_BUF:
+ if (priv->lastctx == ctx)
+ break;
+ case MSM_SUBMIT_CMD_BUF:
+ OUT_PKT7(ring, CP_INDIRECT_BUFFER_PFE, 3);
+ OUT_RING(ring, lower_32_bits(submit->cmd[i].iova));
+ OUT_RING(ring, upper_32_bits(submit->cmd[i].iova));
+ OUT_RING(ring, submit->cmd[i].size);
+ break;
+ }
+ }
+
+ /* Write the fence to the scratch register */
+ OUT_PKT4(ring, REG_A6XX_CP_SCRATCH_REG(2), 1);
+ OUT_RING(ring, submit->seqno);
+
+ /*
+ * Execute a CACHE_FLUSH_TS event. This will ensure that the
+ * timestamp is written to the memory and then triggers the interrupt
+ */
+ OUT_PKT7(ring, CP_EVENT_WRITE, 4);
+ OUT_RING(ring, CACHE_FLUSH_TS | (1 << 31));
+ OUT_RING(ring, lower_32_bits(rbmemptr(ring, fence)));
+ OUT_RING(ring, upper_32_bits(rbmemptr(ring, fence)));
+ OUT_RING(ring, submit->seqno);
+
+ a6xx_flush(gpu, ring);
+}
+
+static const struct {
+ u32 offset;
+ u32 value;
+} a6xx_hwcg[] = {
+ {REG_A6XX_RBBM_CLOCK_CNTL_SP0, 0x22222222},
+ {REG_A6XX_RBBM_CLOCK_CNTL_SP1, 0x22222222},
+ {REG_A6XX_RBBM_CLOCK_CNTL_SP2, 0x22222222},
+ {REG_A6XX_RBBM_CLOCK_CNTL_SP3, 0x22222222},
+ {REG_A6XX_RBBM_CLOCK_CNTL2_SP0, 0x02022220},
+ {REG_A6XX_RBBM_CLOCK_CNTL2_SP1, 0x02022220},
+ {REG_A6XX_RBBM_CLOCK_CNTL2_SP2, 0x02022220},
+ {REG_A6XX_RBBM_CLOCK_CNTL2_SP3, 0x02022220},
+ {REG_A6XX_RBBM_CLOCK_DELAY_SP0, 0x00000080},
+ {REG_A6XX_RBBM_CLOCK_DELAY_SP1, 0x00000080},
+ {REG_A6XX_RBBM_CLOCK_DELAY_SP2, 0x00000080},
+ {REG_A6XX_RBBM_CLOCK_DELAY_SP3, 0x00000080},
+ {REG_A6XX_RBBM_CLOCK_HYST_SP0, 0x0000f3cf},
+ {REG_A6XX_RBBM_CLOCK_HYST_SP1, 0x0000f3cf},
+ {REG_A6XX_RBBM_CLOCK_HYST_SP2, 0x0000f3cf},
+ {REG_A6XX_RBBM_CLOCK_HYST_SP3, 0x0000f3cf},
+ {REG_A6XX_RBBM_CLOCK_CNTL_TP0, 0x02222222},
+ {REG_A6XX_RBBM_CLOCK_CNTL_TP1, 0x02222222},
+ {REG_A6XX_RBBM_CLOCK_CNTL_TP2, 0x02222222},
+ {REG_A6XX_RBBM_CLOCK_CNTL_TP3, 0x02222222},
+ {REG_A6XX_RBBM_CLOCK_CNTL2_TP0, 0x22222222},
+ {REG_A6XX_RBBM_CLOCK_CNTL2_TP1, 0x22222222},
+ {REG_A6XX_RBBM_CLOCK_CNTL2_TP2, 0x22222222},
+ {REG_A6XX_RBBM_CLOCK_CNTL2_TP3, 0x22222222},
+ {REG_A6XX_RBBM_CLOCK_CNTL3_TP0, 0x22222222},
+ {REG_A6XX_RBBM_CLOCK_CNTL3_TP1, 0x22222222},
+ {REG_A6XX_RBBM_CLOCK_CNTL3_TP2, 0x22222222},
+ {REG_A6XX_RBBM_CLOCK_CNTL3_TP3, 0x22222222},
+ {REG_A6XX_RBBM_CLOCK_CNTL4_TP0, 0x00022222},
+ {REG_A6XX_RBBM_CLOCK_CNTL4_TP1, 0x00022222},
+ {REG_A6XX_RBBM_CLOCK_CNTL4_TP2, 0x00022222},
+ {REG_A6XX_RBBM_CLOCK_CNTL4_TP3, 0x00022222},
+ {REG_A6XX_RBBM_CLOCK_HYST_TP0, 0x77777777},
+ {REG_A6XX_RBBM_CLOCK_HYST_TP1, 0x77777777},
+ {REG_A6XX_RBBM_CLOCK_HYST_TP2, 0x77777777},
+ {REG_A6XX_RBBM_CLOCK_HYST_TP3, 0x77777777},
+ {REG_A6XX_RBBM_CLOCK_HYST2_TP0, 0x77777777},
+ {REG_A6XX_RBBM_CLOCK_HYST2_TP1, 0x77777777},
+ {REG_A6XX_RBBM_CLOCK_HYST2_TP2, 0x77777777},
+ {REG_A6XX_RBBM_CLOCK_HYST2_TP3, 0x77777777},
+ {REG_A6XX_RBBM_CLOCK_HYST3_TP0, 0x77777777},
+ {REG_A6XX_RBBM_CLOCK_HYST3_TP1, 0x77777777},
+ {REG_A6XX_RBBM_CLOCK_HYST3_TP2, 0x77777777},
+ {REG_A6XX_RBBM_CLOCK_HYST3_TP3, 0x77777777},
+ {REG_A6XX_RBBM_CLOCK_HYST4_TP0, 0x00077777},
+ {REG_A6XX_RBBM_CLOCK_HYST4_TP1, 0x00077777},
+ {REG_A6XX_RBBM_CLOCK_HYST4_TP2, 0x00077777},
+ {REG_A6XX_RBBM_CLOCK_HYST4_TP3, 0x00077777},
+ {REG_A6XX_RBBM_CLOCK_DELAY_TP0, 0x11111111},
+ {REG_A6XX_RBBM_CLOCK_DELAY_TP1, 0x11111111},
+ {REG_A6XX_RBBM_CLOCK_DELAY_TP2, 0x11111111},
+ {REG_A6XX_RBBM_CLOCK_DELAY_TP3, 0x11111111},
+ {REG_A6XX_RBBM_CLOCK_DELAY2_TP0, 0x11111111},
+ {REG_A6XX_RBBM_CLOCK_DELAY2_TP1, 0x11111111},
+ {REG_A6XX_RBBM_CLOCK_DELAY2_TP2, 0x11111111},
+ {REG_A6XX_RBBM_CLOCK_DELAY2_TP3, 0x11111111},
+ {REG_A6XX_RBBM_CLOCK_DELAY3_TP0, 0x11111111},
+ {REG_A6XX_RBBM_CLOCK_DELAY3_TP1, 0x11111111},
+ {REG_A6XX_RBBM_CLOCK_DELAY3_TP2, 0x11111111},
+ {REG_A6XX_RBBM_CLOCK_DELAY3_TP3, 0x11111111},
+ {REG_A6XX_RBBM_CLOCK_DELAY4_TP0, 0x00011111},
+ {REG_A6XX_RBBM_CLOCK_DELAY4_TP1, 0x00011111},
+ {REG_A6XX_RBBM_CLOCK_DELAY4_TP2, 0x00011111},
+ {REG_A6XX_RBBM_CLOCK_DELAY4_TP3, 0x00011111},
+ {REG_A6XX_RBBM_CLOCK_CNTL_UCHE, 0x22222222},
+ {REG_A6XX_RBBM_CLOCK_CNTL2_UCHE, 0x22222222},
+ {REG_A6XX_RBBM_CLOCK_CNTL3_UCHE, 0x22222222},
+ {REG_A6XX_RBBM_CLOCK_CNTL4_UCHE, 0x00222222},
+ {REG_A6XX_RBBM_CLOCK_HYST_UCHE, 0x00000004},
+ {REG_A6XX_RBBM_CLOCK_DELAY_UCHE, 0x00000002},
+ {REG_A6XX_RBBM_CLOCK_CNTL_RB0, 0x22222222},
+ {REG_A6XX_RBBM_CLOCK_CNTL_RB1, 0x22222222},
+ {REG_A6XX_RBBM_CLOCK_CNTL_RB2, 0x22222222},
+ {REG_A6XX_RBBM_CLOCK_CNTL_RB3, 0x22222222},
+ {REG_A6XX_RBBM_CLOCK_CNTL2_RB0, 0x00002222},
+ {REG_A6XX_RBBM_CLOCK_CNTL2_RB1, 0x00002222},
+ {REG_A6XX_RBBM_CLOCK_CNTL2_RB2, 0x00002222},
+ {REG_A6XX_RBBM_CLOCK_CNTL2_RB3, 0x00002222},
+ {REG_A6XX_RBBM_CLOCK_CNTL_CCU0, 0x00002220},
+ {REG_A6XX_RBBM_CLOCK_CNTL_CCU1, 0x00002220},
+ {REG_A6XX_RBBM_CLOCK_CNTL_CCU2, 0x00002220},
+ {REG_A6XX_RBBM_CLOCK_CNTL_CCU3, 0x00002220},
+ {REG_A6XX_RBBM_CLOCK_HYST_RB_CCU0, 0x00040f00},
+ {REG_A6XX_RBBM_CLOCK_HYST_RB_CCU1, 0x00040f00},
+ {REG_A6XX_RBBM_CLOCK_HYST_RB_CCU2, 0x00040f00},
+ {REG_A6XX_RBBM_CLOCK_HYST_RB_CCU3, 0x00040f00},
+ {REG_A6XX_RBBM_CLOCK_CNTL_RAC, 0x05022022},
+ {REG_A6XX_RBBM_CLOCK_CNTL2_RAC, 0x00005555},
+ {REG_A6XX_RBBM_CLOCK_DELAY_RAC, 0x00000011},
+ {REG_A6XX_RBBM_CLOCK_HYST_RAC, 0x00445044},
+ {REG_A6XX_RBBM_CLOCK_CNTL_TSE_RAS_RBBM, 0x04222222},
+ {REG_A6XX_RBBM_CLOCK_MODE_GPC, 0x00222222},
+ {REG_A6XX_RBBM_CLOCK_MODE_VFD, 0x00002222},
+ {REG_A6XX_RBBM_CLOCK_HYST_TSE_RAS_RBBM, 0x00000000},
+ {REG_A6XX_RBBM_CLOCK_HYST_GPC, 0x04104004},
+ {REG_A6XX_RBBM_CLOCK_HYST_VFD, 0x00000000},
+ {REG_A6XX_RBBM_CLOCK_DELAY_HLSQ, 0x00000000},
+ {REG_A6XX_RBBM_CLOCK_DELAY_TSE_RAS_RBBM, 0x00004000},
+ {REG_A6XX_RBBM_CLOCK_DELAY_GPC, 0x00000200},
+ {REG_A6XX_RBBM_CLOCK_DELAY_VFD, 0x00002222},
+ {REG_A6XX_RBBM_CLOCK_DELAY_HLSQ_2, 0x00000002},
+ {REG_A6XX_RBBM_CLOCK_MODE_HLSQ, 0x00002222},
+ {REG_A6XX_RBBM_CLOCK_CNTL_GMU_GX, 0x00000222},
+ {REG_A6XX_RBBM_CLOCK_DELAY_GMU_GX, 0x00000111},
+ {REG_A6XX_RBBM_CLOCK_HYST_GMU_GX, 0x00000555}
+};
+
+static void a6xx_set_hwcg(struct msm_gpu *gpu, bool state)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
+ struct a6xx_gmu *gmu = &a6xx_gpu->gmu;
+ unsigned int i;
+ u32 val;
+
+ val = gpu_read(gpu, REG_A6XX_RBBM_CLOCK_CNTL);
+
+ /* Don't re-program the registers if they are already correct */
+ if ((!state && !val) || (state && (val == 0x8aa8aa02)))
+ return;
+
+ /* Disable SP clock before programming HWCG registers */
+ gmu_rmw(gmu, REG_A6XX_GPU_GMU_GX_SPTPRAC_CLOCK_CONTROL, 1, 0);
+
+ for (i = 0; i < ARRAY_SIZE(a6xx_hwcg); i++)
+ gpu_write(gpu, a6xx_hwcg[i].offset,
+ state ? a6xx_hwcg[i].value : 0);
+
+ /* Enable SP clock */
+ gmu_rmw(gmu, REG_A6XX_GPU_GMU_GX_SPTPRAC_CLOCK_CONTROL, 0, 1);
+
+ gpu_write(gpu, REG_A6XX_RBBM_CLOCK_CNTL, state ? 0x8aa8aa02 : 0);
+}
+
+static int a6xx_cp_init(struct msm_gpu *gpu)
+{
+ struct msm_ringbuffer *ring = gpu->rb[0];
+
+ OUT_PKT7(ring, CP_ME_INIT, 8);
+
+ OUT_RING(ring, 0x0000002f);
+
+ /* Enable multiple hardware contexts */
+ OUT_RING(ring, 0x00000003);
+
+ /* Enable error detection */
+ OUT_RING(ring, 0x20000000);
+
+ /* Don't enable header dump */
+ OUT_RING(ring, 0x00000000);
+ OUT_RING(ring, 0x00000000);
+
+ /* No workarounds enabled */
+ OUT_RING(ring, 0x00000000);
+
+ /* Pad rest of the cmds with 0's */
+ OUT_RING(ring, 0x00000000);
+ OUT_RING(ring, 0x00000000);
+
+ a6xx_flush(gpu, ring);
+ return a6xx_idle(gpu, ring) ? 0 : -EINVAL;
+}
+
+static int a6xx_ucode_init(struct msm_gpu *gpu)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
+
+ if (!a6xx_gpu->sqe_bo) {
+ a6xx_gpu->sqe_bo = adreno_fw_create_bo(gpu,
+ adreno_gpu->fw[ADRENO_FW_SQE], &a6xx_gpu->sqe_iova);
+
+ if (IS_ERR(a6xx_gpu->sqe_bo)) {
+ int ret = PTR_ERR(a6xx_gpu->sqe_bo);
+
+ a6xx_gpu->sqe_bo = NULL;
+ DRM_DEV_ERROR(&gpu->pdev->dev,
+ "Could not allocate SQE ucode: %d\n", ret);
+
+ return ret;
+ }
+ }
+
+ gpu_write64(gpu, REG_A6XX_CP_SQE_INSTR_BASE_LO,
+ REG_A6XX_CP_SQE_INSTR_BASE_HI, a6xx_gpu->sqe_iova);
+
+ return 0;
+}
+
+#define A6XX_INT_MASK (A6XX_RBBM_INT_0_MASK_CP_AHB_ERROR | \
+ A6XX_RBBM_INT_0_MASK_RBBM_ATB_ASYNCFIFO_OVERFLOW | \
+ A6XX_RBBM_INT_0_MASK_CP_HW_ERROR | \
+ A6XX_RBBM_INT_0_MASK_CP_IB2 | \
+ A6XX_RBBM_INT_0_MASK_CP_IB1 | \
+ A6XX_RBBM_INT_0_MASK_CP_RB | \
+ A6XX_RBBM_INT_0_MASK_CP_CACHE_FLUSH_TS | \
+ A6XX_RBBM_INT_0_MASK_RBBM_ATB_BUS_OVERFLOW | \
+ A6XX_RBBM_INT_0_MASK_RBBM_HANG_DETECT | \
+ A6XX_RBBM_INT_0_MASK_UCHE_OOB_ACCESS | \
+ A6XX_RBBM_INT_0_MASK_UCHE_TRAP_INTR)
+
+static int a6xx_hw_init(struct msm_gpu *gpu)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
+ int ret;
+
+ /* Make sure the GMU keeps the GPU on while we set it up */
+ a6xx_gmu_set_oob(&a6xx_gpu->gmu, GMU_OOB_GPU_SET);
+
+ gpu_write(gpu, REG_A6XX_RBBM_SECVID_TSB_CNTL, 0);
+
+ /*
+ * Disable the trusted memory range - we don't actually supported secure
+ * memory rendering at this point in time and we don't want to block off
+ * part of the virtual memory space.
+ */
+ gpu_write64(gpu, REG_A6XX_RBBM_SECVID_TSB_TRUSTED_BASE_LO,
+ REG_A6XX_RBBM_SECVID_TSB_TRUSTED_BASE_HI, 0x00000000);
+ gpu_write(gpu, REG_A6XX_RBBM_SECVID_TSB_TRUSTED_SIZE, 0x00000000);
+
+ /* enable hardware clockgating */
+ a6xx_set_hwcg(gpu, true);
+
+ /* VBIF start */
+ gpu_write(gpu, REG_A6XX_VBIF_GATE_OFF_WRREQ_EN, 0x00000009);
+ gpu_write(gpu, REG_A6XX_RBBM_VBIF_CLIENT_QOS_CNTL, 0x3);
+
+ /* Make all blocks contribute to the GPU BUSY perf counter */
+ gpu_write(gpu, REG_A6XX_RBBM_PERFCTR_GPU_BUSY_MASKED, 0xffffffff);
+
+ /* Disable L2 bypass in the UCHE */
+ gpu_write(gpu, REG_A6XX_UCHE_WRITE_RANGE_MAX_LO, 0xffffffc0);
+ gpu_write(gpu, REG_A6XX_UCHE_WRITE_RANGE_MAX_HI, 0x0001ffff);
+ gpu_write(gpu, REG_A6XX_UCHE_TRAP_BASE_LO, 0xfffff000);
+ gpu_write(gpu, REG_A6XX_UCHE_TRAP_BASE_HI, 0x0001ffff);
+ gpu_write(gpu, REG_A6XX_UCHE_WRITE_THRU_BASE_LO, 0xfffff000);
+ gpu_write(gpu, REG_A6XX_UCHE_WRITE_THRU_BASE_HI, 0x0001ffff);
+
+ /* Set the GMEM VA range [0x100000:0x100000 + gpu->gmem - 1] */
+ gpu_write64(gpu, REG_A6XX_UCHE_GMEM_RANGE_MIN_LO,
+ REG_A6XX_UCHE_GMEM_RANGE_MIN_HI, 0x00100000);
+
+ gpu_write64(gpu, REG_A6XX_UCHE_GMEM_RANGE_MAX_LO,
+ REG_A6XX_UCHE_GMEM_RANGE_MAX_HI,
+ 0x00100000 + adreno_gpu->gmem - 1);
+
+ gpu_write(gpu, REG_A6XX_UCHE_FILTER_CNTL, 0x804);
+ gpu_write(gpu, REG_A6XX_UCHE_CACHE_WAYS, 0x4);
+
+ gpu_write(gpu, REG_A6XX_CP_ROQ_THRESHOLDS_2, 0x010000c0);
+ gpu_write(gpu, REG_A6XX_CP_ROQ_THRESHOLDS_1, 0x8040362c);
+
+ /* Setting the mem pool size */
+ gpu_write(gpu, REG_A6XX_CP_MEM_POOL_SIZE, 128);
+
+ /* Setting the primFifo thresholds default values */
+ gpu_write(gpu, REG_A6XX_PC_DBG_ECO_CNTL, (0x300 << 11));
+
+ /* Set the AHB default slave response to "ERROR" */
+ gpu_write(gpu, REG_A6XX_CP_AHB_CNTL, 0x1);
+
+ /* Turn on performance counters */
+ gpu_write(gpu, REG_A6XX_RBBM_PERFCTR_CNTL, 0x1);
+
+ /* Select CP0 to always count cycles */
+ gpu_write(gpu, REG_A6XX_CP_PERFCTR_CP_SEL_0, PERF_CP_ALWAYS_COUNT);
+
+ /* FIXME: not sure if this should live here or in a6xx_gmu.c */
+ gmu_write(&a6xx_gpu->gmu, REG_A6XX_GPU_GMU_AO_GPU_CX_BUSY_MASK,
+ 0xff000000);
+ gmu_rmw(&a6xx_gpu->gmu, REG_A6XX_GMU_CX_GMU_POWER_COUNTER_SELECT_0,
+ 0xff, 0x20);
+ gmu_write(&a6xx_gpu->gmu, REG_A6XX_GMU_CX_GMU_POWER_COUNTER_ENABLE,
+ 0x01);
+
+ gpu_write(gpu, REG_A6XX_RB_NC_MODE_CNTL, 2 << 1);
+ gpu_write(gpu, REG_A6XX_TPL1_NC_MODE_CNTL, 2 << 1);
+ gpu_write(gpu, REG_A6XX_SP_NC_MODE_CNTL, 2 << 1);
+ gpu_write(gpu, REG_A6XX_UCHE_MODE_CNTL, 2 << 21);
+
+ /* Enable fault detection */
+ gpu_write(gpu, REG_A6XX_RBBM_INTERFACE_HANG_INT_CNTL,
+ (1 << 30) | 0x1fffff);
+
+ gpu_write(gpu, REG_A6XX_UCHE_CLIENT_PF, 1);
+
+ /* Protect registers from the CP */
+ gpu_write(gpu, REG_A6XX_CP_PROTECT_CNTL, 0x00000003);
+
+ gpu_write(gpu, REG_A6XX_CP_PROTECT(0),
+ A6XX_PROTECT_RDONLY(0x600, 0x51));
+ gpu_write(gpu, REG_A6XX_CP_PROTECT(1), A6XX_PROTECT_RW(0xae50, 0x2));
+ gpu_write(gpu, REG_A6XX_CP_PROTECT(2), A6XX_PROTECT_RW(0x9624, 0x13));
+ gpu_write(gpu, REG_A6XX_CP_PROTECT(3), A6XX_PROTECT_RW(0x8630, 0x8));
+ gpu_write(gpu, REG_A6XX_CP_PROTECT(4), A6XX_PROTECT_RW(0x9e70, 0x1));
+ gpu_write(gpu, REG_A6XX_CP_PROTECT(5), A6XX_PROTECT_RW(0x9e78, 0x187));
+ gpu_write(gpu, REG_A6XX_CP_PROTECT(6), A6XX_PROTECT_RW(0xf000, 0x810));
+ gpu_write(gpu, REG_A6XX_CP_PROTECT(7),
+ A6XX_PROTECT_RDONLY(0xfc00, 0x3));
+ gpu_write(gpu, REG_A6XX_CP_PROTECT(8), A6XX_PROTECT_RW(0x50e, 0x0));
+ gpu_write(gpu, REG_A6XX_CP_PROTECT(9), A6XX_PROTECT_RDONLY(0x50f, 0x0));
+ gpu_write(gpu, REG_A6XX_CP_PROTECT(10), A6XX_PROTECT_RW(0x510, 0x0));
+ gpu_write(gpu, REG_A6XX_CP_PROTECT(11),
+ A6XX_PROTECT_RDONLY(0x0, 0x4f9));
+ gpu_write(gpu, REG_A6XX_CP_PROTECT(12),
+ A6XX_PROTECT_RDONLY(0x501, 0xa));
+ gpu_write(gpu, REG_A6XX_CP_PROTECT(13),
+ A6XX_PROTECT_RDONLY(0x511, 0x44));
+ gpu_write(gpu, REG_A6XX_CP_PROTECT(14), A6XX_PROTECT_RW(0xe00, 0xe));
+ gpu_write(gpu, REG_A6XX_CP_PROTECT(15), A6XX_PROTECT_RW(0x8e00, 0x0));
+ gpu_write(gpu, REG_A6XX_CP_PROTECT(16), A6XX_PROTECT_RW(0x8e50, 0xf));
+ gpu_write(gpu, REG_A6XX_CP_PROTECT(17), A6XX_PROTECT_RW(0xbe02, 0x0));
+ gpu_write(gpu, REG_A6XX_CP_PROTECT(18),
+ A6XX_PROTECT_RW(0xbe20, 0x11f3));
+ gpu_write(gpu, REG_A6XX_CP_PROTECT(19), A6XX_PROTECT_RW(0x800, 0x82));
+ gpu_write(gpu, REG_A6XX_CP_PROTECT(20), A6XX_PROTECT_RW(0x8a0, 0x8));
+ gpu_write(gpu, REG_A6XX_CP_PROTECT(21), A6XX_PROTECT_RW(0x8ab, 0x19));
+ gpu_write(gpu, REG_A6XX_CP_PROTECT(22), A6XX_PROTECT_RW(0x900, 0x4d));
+ gpu_write(gpu, REG_A6XX_CP_PROTECT(23), A6XX_PROTECT_RW(0x98d, 0x76));
+ gpu_write(gpu, REG_A6XX_CP_PROTECT(24),
+ A6XX_PROTECT_RDONLY(0x8d0, 0x23));
+ gpu_write(gpu, REG_A6XX_CP_PROTECT(25),
+ A6XX_PROTECT_RDONLY(0x980, 0x4));
+ gpu_write(gpu, REG_A6XX_CP_PROTECT(26), A6XX_PROTECT_RW(0xa630, 0x0));
+
+ /* Enable interrupts */
+ gpu_write(gpu, REG_A6XX_RBBM_INT_0_MASK, A6XX_INT_MASK);
+
+ ret = adreno_hw_init(gpu);
+ if (ret)
+ goto out;
+
+ ret = a6xx_ucode_init(gpu);
+ if (ret)
+ goto out;
+
+ /* Always come up on rb 0 */
+ a6xx_gpu->cur_ring = gpu->rb[0];
+
+ /* Enable the SQE_to start the CP engine */
+ gpu_write(gpu, REG_A6XX_CP_SQE_CNTL, 1);
+
+ ret = a6xx_cp_init(gpu);
+ if (ret)
+ goto out;
+
+ gpu_write(gpu, REG_A6XX_RBBM_SECVID_TRUST_CNTL, 0x0);
+
+out:
+ /*
+ * Tell the GMU that we are done touching the GPU and it can start power
+ * management
+ */
+ a6xx_gmu_clear_oob(&a6xx_gpu->gmu, GMU_OOB_GPU_SET);
+
+ /* Take the GMU out of its special boot mode */
+ a6xx_gmu_clear_oob(&a6xx_gpu->gmu, GMU_OOB_BOOT_SLUMBER);
+
+ return ret;
+}
+
+static void a6xx_dump(struct msm_gpu *gpu)
+{
+ dev_info(&gpu->pdev->dev, "status: %08x\n",
+ gpu_read(gpu, REG_A6XX_RBBM_STATUS));
+ adreno_dump(gpu);
+}
+
+#define VBIF_RESET_ACK_TIMEOUT 100
+#define VBIF_RESET_ACK_MASK 0x00f0
+
+static void a6xx_recover(struct msm_gpu *gpu)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
+ int i;
+
+ adreno_dump_info(gpu);
+
+ for (i = 0; i < 8; i++)
+ dev_info(&gpu->pdev->dev, "CP_SCRATCH_REG%d: %u\n", i,
+ gpu_read(gpu, REG_A6XX_CP_SCRATCH_REG(i)));
+
+ if (hang_debug)
+ a6xx_dump(gpu);
+
+ /*
+ * Turn off keep alive that might have been enabled by the hang
+ * interrupt
+ */
+ gmu_write(&a6xx_gpu->gmu, REG_A6XX_GMU_GMU_PWR_COL_KEEPALIVE, 0);
+
+ gpu->funcs->pm_suspend(gpu);
+ gpu->funcs->pm_resume(gpu);
+
+ msm_gpu_hw_init(gpu);
+}
+
+static int a6xx_fault_handler(void *arg, unsigned long iova, int flags)
+{
+ struct msm_gpu *gpu = arg;
+
+ pr_warn_ratelimited("*** gpu fault: iova=%08lx, flags=%d (%u,%u,%u,%u)\n",
+ iova, flags,
+ gpu_read(gpu, REG_A6XX_CP_SCRATCH_REG(4)),
+ gpu_read(gpu, REG_A6XX_CP_SCRATCH_REG(5)),
+ gpu_read(gpu, REG_A6XX_CP_SCRATCH_REG(6)),
+ gpu_read(gpu, REG_A6XX_CP_SCRATCH_REG(7)));
+
+ return -EFAULT;
+}
+
+static void a6xx_cp_hw_err_irq(struct msm_gpu *gpu)
+{
+ u32 status = gpu_read(gpu, REG_A6XX_CP_INTERRUPT_STATUS);
+
+ if (status & A6XX_CP_INT_CP_OPCODE_ERROR) {
+ u32 val;
+
+ gpu_write(gpu, REG_A6XX_CP_SQE_STAT_ADDR, 1);
+ val = gpu_read(gpu, REG_A6XX_CP_SQE_STAT_DATA);
+ dev_err_ratelimited(&gpu->pdev->dev,
+ "CP | opcode error | possible opcode=0x%8.8X\n",
+ val);
+ }
+
+ if (status & A6XX_CP_INT_CP_UCODE_ERROR)
+ dev_err_ratelimited(&gpu->pdev->dev,
+ "CP ucode error interrupt\n");
+
+ if (status & A6XX_CP_INT_CP_HW_FAULT_ERROR)
+ dev_err_ratelimited(&gpu->pdev->dev, "CP | HW fault | status=0x%8.8X\n",
+ gpu_read(gpu, REG_A6XX_CP_HW_FAULT));
+
+ if (status & A6XX_CP_INT_CP_REGISTER_PROTECTION_ERROR) {
+ u32 val = gpu_read(gpu, REG_A6XX_CP_PROTECT_STATUS);
+
+ dev_err_ratelimited(&gpu->pdev->dev,
+ "CP | protected mode error | %s | addr=0x%8.8X | status=0x%8.8X\n",
+ val & (1 << 20) ? "READ" : "WRITE",
+ (val & 0x3ffff), val);
+ }
+
+ if (status & A6XX_CP_INT_CP_AHB_ERROR)
+ dev_err_ratelimited(&gpu->pdev->dev, "CP AHB error interrupt\n");
+
+ if (status & A6XX_CP_INT_CP_VSD_PARITY_ERROR)
+ dev_err_ratelimited(&gpu->pdev->dev, "CP VSD decoder parity error\n");
+
+ if (status & A6XX_CP_INT_CP_ILLEGAL_INSTR_ERROR)
+ dev_err_ratelimited(&gpu->pdev->dev, "CP illegal instruction error\n");
+
+}
+
+static void a6xx_fault_detect_irq(struct msm_gpu *gpu)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
+ struct drm_device *dev = gpu->dev;
+ struct msm_drm_private *priv = dev->dev_private;
+ struct msm_ringbuffer *ring = gpu->funcs->active_ring(gpu);
+
+ /*
+ * Force the GPU to stay on until after we finish
+ * collecting information
+ */
+ gmu_write(&a6xx_gpu->gmu, REG_A6XX_GMU_GMU_PWR_COL_KEEPALIVE, 1);
+
+ DRM_DEV_ERROR(&gpu->pdev->dev,
+ "gpu fault ring %d fence %x status %8.8X rb %4.4x/%4.4x ib1 %16.16llX/%4.4x ib2 %16.16llX/%4.4x\n",
+ ring ? ring->id : -1, ring ? ring->seqno : 0,
+ gpu_read(gpu, REG_A6XX_RBBM_STATUS),
+ gpu_read(gpu, REG_A6XX_CP_RB_RPTR),
+ gpu_read(gpu, REG_A6XX_CP_RB_WPTR),
+ gpu_read64(gpu, REG_A6XX_CP_IB1_BASE, REG_A6XX_CP_IB1_BASE_HI),
+ gpu_read(gpu, REG_A6XX_CP_IB1_REM_SIZE),
+ gpu_read64(gpu, REG_A6XX_CP_IB2_BASE, REG_A6XX_CP_IB2_BASE_HI),
+ gpu_read(gpu, REG_A6XX_CP_IB2_REM_SIZE));
+
+ /* Turn off the hangcheck timer to keep it from bothering us */
+ del_timer(&gpu->hangcheck_timer);
+
+ queue_work(priv->wq, &gpu->recover_work);
+}
+
+static irqreturn_t a6xx_irq(struct msm_gpu *gpu)
+{
+ u32 status = gpu_read(gpu, REG_A6XX_RBBM_INT_0_STATUS);
+
+ gpu_write(gpu, REG_A6XX_RBBM_INT_CLEAR_CMD, status);
+
+ if (status & A6XX_RBBM_INT_0_MASK_RBBM_HANG_DETECT)
+ a6xx_fault_detect_irq(gpu);
+
+ if (status & A6XX_RBBM_INT_0_MASK_CP_AHB_ERROR)
+ dev_err_ratelimited(&gpu->pdev->dev, "CP | AHB bus error\n");
+
+ if (status & A6XX_RBBM_INT_0_MASK_CP_HW_ERROR)
+ a6xx_cp_hw_err_irq(gpu);
+
+ if (status & A6XX_RBBM_INT_0_MASK_RBBM_ATB_ASYNCFIFO_OVERFLOW)
+ dev_err_ratelimited(&gpu->pdev->dev, "RBBM | ATB ASYNC overflow\n");
+
+ if (status & A6XX_RBBM_INT_0_MASK_RBBM_ATB_BUS_OVERFLOW)
+ dev_err_ratelimited(&gpu->pdev->dev, "RBBM | ATB bus overflow\n");
+
+ if (status & A6XX_RBBM_INT_0_MASK_UCHE_OOB_ACCESS)
+ dev_err_ratelimited(&gpu->pdev->dev, "UCHE | Out of bounds access\n");
+
+ if (status & A6XX_RBBM_INT_0_MASK_CP_CACHE_FLUSH_TS)
+ msm_gpu_retire(gpu);
+
+ return IRQ_HANDLED;
+}
+
+static const u32 a6xx_register_offsets[REG_ADRENO_REGISTER_MAX] = {
+ REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_BASE, REG_A6XX_CP_RB_BASE),
+ REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_BASE_HI, REG_A6XX_CP_RB_BASE_HI),
+ REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_RPTR_ADDR,
+ REG_A6XX_CP_RB_RPTR_ADDR_LO),
+ REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_RPTR_ADDR_HI,
+ REG_A6XX_CP_RB_RPTR_ADDR_HI),
+ REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_RPTR, REG_A6XX_CP_RB_RPTR),
+ REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_WPTR, REG_A6XX_CP_RB_WPTR),
+ REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_CNTL, REG_A6XX_CP_RB_CNTL),
+};
+
+static const u32 a6xx_registers[] = {
+ 0x0000, 0x0002, 0x0010, 0x0010, 0x0012, 0x0012, 0x0018, 0x001b,
+ 0x001e, 0x0032, 0x0038, 0x003c, 0x0042, 0x0042, 0x0044, 0x0044,
+ 0x0047, 0x0047, 0x0056, 0x0056, 0x00ad, 0x00ae, 0x00b0, 0x00fb,
+ 0x0100, 0x011d, 0x0200, 0x020d, 0x0210, 0x0213, 0x0218, 0x023d,
+ 0x0400, 0x04f9, 0x0500, 0x0500, 0x0505, 0x050b, 0x050e, 0x0511,
+ 0x0533, 0x0533, 0x0540, 0x0555, 0x0800, 0x0808, 0x0810, 0x0813,
+ 0x0820, 0x0821, 0x0823, 0x0827, 0x0830, 0x0833, 0x0840, 0x0843,
+ 0x084f, 0x086f, 0x0880, 0x088a, 0x08a0, 0x08ab, 0x08c0, 0x08c4,
+ 0x08d0, 0x08dd, 0x08f0, 0x08f3, 0x0900, 0x0903, 0x0908, 0x0911,
+ 0x0928, 0x093e, 0x0942, 0x094d, 0x0980, 0x0984, 0x098d, 0x0996,
+ 0x0998, 0x099e, 0x09a0, 0x09a6, 0x09a8, 0x09ae, 0x09b0, 0x09b1,
+ 0x09c2, 0x09c8, 0x0a00, 0x0a03, 0x0c00, 0x0c04, 0x0c06, 0x0c06,
+ 0x0c10, 0x0cd9, 0x0e00, 0x0e0e, 0x0e10, 0x0e13, 0x0e17, 0x0e19,
+ 0x0e1c, 0x0e2b, 0x0e30, 0x0e32, 0x0e38, 0x0e39, 0x8600, 0x8601,
+ 0x8610, 0x861b, 0x8620, 0x8620, 0x8628, 0x862b, 0x8630, 0x8637,
+ 0x8e01, 0x8e01, 0x8e04, 0x8e05, 0x8e07, 0x8e08, 0x8e0c, 0x8e0c,
+ 0x8e10, 0x8e1c, 0x8e20, 0x8e25, 0x8e28, 0x8e28, 0x8e2c, 0x8e2f,
+ 0x8e3b, 0x8e3e, 0x8e40, 0x8e43, 0x8e50, 0x8e5e, 0x8e70, 0x8e77,
+ 0x9600, 0x9604, 0x9624, 0x9637, 0x9e00, 0x9e01, 0x9e03, 0x9e0e,
+ 0x9e11, 0x9e16, 0x9e19, 0x9e19, 0x9e1c, 0x9e1c, 0x9e20, 0x9e23,
+ 0x9e30, 0x9e31, 0x9e34, 0x9e34, 0x9e70, 0x9e72, 0x9e78, 0x9e79,
+ 0x9e80, 0x9fff, 0xa600, 0xa601, 0xa603, 0xa603, 0xa60a, 0xa60a,
+ 0xa610, 0xa617, 0xa630, 0xa630,
+ ~0
+};
+
+static int a6xx_pm_resume(struct msm_gpu *gpu)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
+ int ret;
+
+ ret = a6xx_gmu_resume(a6xx_gpu);
+
+ gpu->needs_hw_init = true;
+
+ return ret;
+}
+
+static int a6xx_pm_suspend(struct msm_gpu *gpu)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
+
+ /*
+ * Make sure the GMU is idle before continuing (because some transitions
+ * may use VBIF
+ */
+ a6xx_gmu_wait_for_idle(a6xx_gpu);
+
+ /* Clear the VBIF pipe before shutting down */
+ /* FIXME: This accesses the GPU - do we need to make sure it is on? */
+ gpu_write(gpu, REG_A6XX_VBIF_XIN_HALT_CTRL0, 0xf);
+ spin_until((gpu_read(gpu, REG_A6XX_VBIF_XIN_HALT_CTRL1) & 0xf) == 0xf);
+ gpu_write(gpu, REG_A6XX_VBIF_XIN_HALT_CTRL0, 0);
+
+ return a6xx_gmu_stop(a6xx_gpu);
+}
+
+static int a6xx_get_timestamp(struct msm_gpu *gpu, uint64_t *value)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
+
+ /* Force the GPU power on so we can read this register */
+ a6xx_gmu_set_oob(&a6xx_gpu->gmu, GMU_OOB_GPU_SET);
+
+ *value = gpu_read64(gpu, REG_A6XX_RBBM_PERFCTR_CP_0_LO,
+ REG_A6XX_RBBM_PERFCTR_CP_0_HI);
+
+ a6xx_gmu_clear_oob(&a6xx_gpu->gmu, GMU_OOB_GPU_SET);
+ return 0;
+}
+
+#if defined(CONFIG_DEBUG_FS) || defined(CONFIG_DEV_COREDUMP)
+static void a6xx_show(struct msm_gpu *gpu, struct msm_gpu_state *state,
+ struct drm_printer *p)
+{
+ adreno_show(gpu, state, p);
+}
+#endif
+
+static struct msm_ringbuffer *a6xx_active_ring(struct msm_gpu *gpu)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
+
+ return a6xx_gpu->cur_ring;
+}
+
+static void a6xx_destroy(struct msm_gpu *gpu)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
+
+ if (a6xx_gpu->sqe_bo) {
+ if (a6xx_gpu->sqe_iova)
+ msm_gem_put_iova(a6xx_gpu->sqe_bo, gpu->aspace);
+ drm_gem_object_unreference_unlocked(a6xx_gpu->sqe_bo);
+ }
+
+ a6xx_gmu_remove(a6xx_gpu);
+
+ adreno_gpu_cleanup(adreno_gpu);
+ kfree(a6xx_gpu);
+}
+
+static const struct adreno_gpu_funcs funcs = {
+ .base = {
+ .get_param = adreno_get_param,
+ .hw_init = a6xx_hw_init,
+ .pm_suspend = a6xx_pm_suspend,
+ .pm_resume = a6xx_pm_resume,
+ .recover = a6xx_recover,
+ .submit = a6xx_submit,
+ .flush = a6xx_flush,
+ .active_ring = a6xx_active_ring,
+ .irq = a6xx_irq,
+ .destroy = a6xx_destroy,
+#if defined(CONFIG_DEBUG_FS) || defined(CONFIG_DEV_COREDUMP)
+ .show = a6xx_show,
+#endif
+ },
+ .get_timestamp = a6xx_get_timestamp,
+};
+
+struct msm_gpu *a6xx_gpu_init(struct drm_device *dev)
+{
+ struct msm_drm_private *priv = dev->dev_private;
+ struct platform_device *pdev = priv->gpu_pdev;
+ struct device_node *node;
+ struct a6xx_gpu *a6xx_gpu;
+ struct adreno_gpu *adreno_gpu;
+ struct msm_gpu *gpu;
+ int ret;
+
+ a6xx_gpu = kzalloc(sizeof(*a6xx_gpu), GFP_KERNEL);
+ if (!a6xx_gpu)
+ return ERR_PTR(-ENOMEM);
+
+ adreno_gpu = &a6xx_gpu->base;
+ gpu = &adreno_gpu->base;
+
+ adreno_gpu->registers = a6xx_registers;
+ adreno_gpu->reg_offsets = a6xx_register_offsets;
+
+ ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs, 1);
+ if (ret) {
+ a6xx_destroy(&(a6xx_gpu->base.base));
+ return ERR_PTR(ret);
+ }
+
+ /* Check if there is a GMU phandle and set it up */
+ node = of_parse_phandle(pdev->dev.of_node, "gmu", 0);
+
+ /* FIXME: How do we gracefully handle this? */
+ BUG_ON(!node);
+
+ ret = a6xx_gmu_probe(a6xx_gpu, node);
+ if (ret) {
+ a6xx_destroy(&(a6xx_gpu->base.base));
+ return ERR_PTR(ret);
+ }
+
+ if (gpu->aspace)
+ msm_mmu_set_fault_handler(gpu->aspace->mmu, gpu,
+ a6xx_fault_handler);
+
+ return gpu;
+}
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.h b/drivers/gpu/drm/msm/adreno/a6xx_gpu.h
new file mode 100644
index 000000000000..dd69e5b0e692
--- /dev/null
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.h
@@ -0,0 +1,60 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2017 The Linux Foundation. All rights reserved. */
+
+#ifndef __A6XX_GPU_H__
+#define __A6XX_GPU_H__
+
+
+#include "adreno_gpu.h"
+#include "a6xx.xml.h"
+
+#include "a6xx_gmu.h"
+
+extern bool hang_debug;
+
+struct a6xx_gpu {
+ struct adreno_gpu base;
+
+ struct drm_gem_object *sqe_bo;
+ uint64_t sqe_iova;
+
+ struct msm_ringbuffer *cur_ring;
+
+ struct a6xx_gmu gmu;
+};
+
+#define to_a6xx_gpu(x) container_of(x, struct a6xx_gpu, base)
+
+/*
+ * Given a register and a count, return a value to program into
+ * REG_CP_PROTECT_REG(n) - this will block both reads and writes for _len
+ * registers starting at _reg.
+ */
+#define A6XX_PROTECT_RW(_reg, _len) \
+ ((1 << 31) | \
+ (((_len) & 0x3FFF) << 18) | ((_reg) & 0x3FFFF))
+
+/*
+ * Same as above, but allow reads over the range. For areas of mixed use (such
+ * as performance counters) this allows us to protect a much larger range with a
+ * single register
+ */
+#define A6XX_PROTECT_RDONLY(_reg, _len) \
+ ((((_len) & 0x3FFF) << 18) | ((_reg) & 0x3FFFF))
+
+
+int a6xx_gmu_resume(struct a6xx_gpu *gpu);
+int a6xx_gmu_stop(struct a6xx_gpu *gpu);
+
+int a6xx_gmu_wait_for_idle(struct a6xx_gpu *gpu);
+
+int a6xx_gmu_reset(struct a6xx_gpu *a6xx_gpu);
+bool a6xx_gmu_isidle(struct a6xx_gmu *gmu);
+
+int a6xx_gmu_set_oob(struct a6xx_gmu *gmu, enum a6xx_gmu_oob_state state);
+void a6xx_gmu_clear_oob(struct a6xx_gmu *gmu, enum a6xx_gmu_oob_state state);
+
+int a6xx_gmu_probe(struct a6xx_gpu *a6xx_gpu, struct device_node *node);
+void a6xx_gmu_remove(struct a6xx_gpu *a6xx_gpu);
+
+#endif /* __A6XX_GPU_H__ */
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_hfi.c b/drivers/gpu/drm/msm/adreno/a6xx_hfi.c
new file mode 100644
index 000000000000..f19ef4cb6ea4
--- /dev/null
+++ b/drivers/gpu/drm/msm/adreno/a6xx_hfi.c
@@ -0,0 +1,435 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2017-2018 The Linux Foundation. All rights reserved. */
+
+#include <linux/completion.h>
+#include <linux/circ_buf.h>
+#include <linux/list.h>
+
+#include "a6xx_gmu.h"
+#include "a6xx_gmu.xml.h"
+
+#define HFI_MSG_ID(val) [val] = #val
+
+static const char * const a6xx_hfi_msg_id[] = {
+ HFI_MSG_ID(HFI_H2F_MSG_INIT),
+ HFI_MSG_ID(HFI_H2F_MSG_FW_VERSION),
+ HFI_MSG_ID(HFI_H2F_MSG_BW_TABLE),
+ HFI_MSG_ID(HFI_H2F_MSG_PERF_TABLE),
+ HFI_MSG_ID(HFI_H2F_MSG_TEST),
+};
+
+static int a6xx_hfi_queue_read(struct a6xx_hfi_queue *queue, u32 *data,
+ u32 dwords)
+{
+ struct a6xx_hfi_queue_header *header = queue->header;
+ u32 i, hdr, index = header->read_index;
+
+ if (header->read_index == header->write_index) {
+ header->rx_request = 1;
+ return 0;
+ }
+
+ hdr = queue->data[index];
+
+ /*
+ * If we are to assume that the GMU firmware is in fact a rational actor
+ * and is programmed to not send us a larger response than we expect
+ * then we can also assume that if the header size is unexpectedly large
+ * that it is due to memory corruption and/or hardware failure. In this
+ * case the only reasonable course of action is to BUG() to help harden
+ * the failure.
+ */
+
+ BUG_ON(HFI_HEADER_SIZE(hdr) > dwords);
+
+ for (i = 0; i < HFI_HEADER_SIZE(hdr); i++) {
+ data[i] = queue->data[index];
+ index = (index + 1) % header->size;
+ }
+
+ header->read_index = index;
+ return HFI_HEADER_SIZE(hdr);
+}
+
+static int a6xx_hfi_queue_write(struct a6xx_gmu *gmu,
+ struct a6xx_hfi_queue *queue, u32 *data, u32 dwords)
+{
+ struct a6xx_hfi_queue_header *header = queue->header;
+ u32 i, space, index = header->write_index;
+
+ spin_lock(&queue->lock);
+
+ space = CIRC_SPACE(header->write_index, header->read_index,
+ header->size);
+ if (space < dwords) {
+ header->dropped++;
+ spin_unlock(&queue->lock);
+ return -ENOSPC;
+ }
+
+ for (i = 0; i < dwords; i++) {
+ queue->data[index] = data[i];
+ index = (index + 1) % header->size;
+ }
+
+ header->write_index = index;
+ spin_unlock(&queue->lock);
+
+ gmu_write(gmu, REG_A6XX_GMU_HOST2GMU_INTR_SET, 0x01);
+ return 0;
+}
+
+struct a6xx_hfi_response {
+ u32 id;
+ u32 seqnum;
+ struct list_head node;
+ struct completion complete;
+
+ u32 error;
+ u32 payload[16];
+};
+
+/*
+ * Incoming HFI ack messages can come in out of order so we need to store all
+ * the pending messages on a list until they are handled.
+ */
+static spinlock_t hfi_ack_lock = __SPIN_LOCK_UNLOCKED(message_lock);
+static LIST_HEAD(hfi_ack_list);
+
+static void a6xx_hfi_handle_ack(struct a6xx_gmu *gmu,
+ struct a6xx_hfi_msg_response *msg)
+{
+ struct a6xx_hfi_response *resp;
+ u32 id, seqnum;
+
+ /* msg->ret_header contains the header of the message being acked */
+ id = HFI_HEADER_ID(msg->ret_header);
+ seqnum = HFI_HEADER_SEQNUM(msg->ret_header);
+
+ spin_lock(&hfi_ack_lock);
+ list_for_each_entry(resp, &hfi_ack_list, node) {
+ if (resp->id == id && resp->seqnum == seqnum) {
+ resp->error = msg->error;
+ memcpy(resp->payload, msg->payload,
+ sizeof(resp->payload));
+
+ complete(&resp->complete);
+ spin_unlock(&hfi_ack_lock);
+ return;
+ }
+ }
+ spin_unlock(&hfi_ack_lock);
+
+ dev_err(gmu->dev, "Nobody was waiting for HFI message %d\n", seqnum);
+}
+
+static void a6xx_hfi_handle_error(struct a6xx_gmu *gmu,
+ struct a6xx_hfi_msg_response *msg)
+{
+ struct a6xx_hfi_msg_error *error = (struct a6xx_hfi_msg_error *) msg;
+
+ dev_err(gmu->dev, "GMU firmware error %d\n", error->code);
+}
+
+void a6xx_hfi_task(unsigned long data)
+{
+ struct a6xx_gmu *gmu = (struct a6xx_gmu *) data;
+ struct a6xx_hfi_queue *queue = &gmu->queues[HFI_RESPONSE_QUEUE];
+ struct a6xx_hfi_msg_response resp;
+
+ for (;;) {
+ u32 id;
+ int ret = a6xx_hfi_queue_read(queue, (u32 *) &resp,
+ sizeof(resp) >> 2);
+
+ /* Returns the number of bytes copied or negative on error */
+ if (ret <= 0) {
+ if (ret < 0)
+ dev_err(gmu->dev,
+ "Unable to read the HFI message queue\n");
+ break;
+ }
+
+ id = HFI_HEADER_ID(resp.header);
+
+ if (id == HFI_F2H_MSG_ACK)
+ a6xx_hfi_handle_ack(gmu, &resp);
+ else if (id == HFI_F2H_MSG_ERROR)
+ a6xx_hfi_handle_error(gmu, &resp);
+ }
+}
+
+static int a6xx_hfi_send_msg(struct a6xx_gmu *gmu, int id,
+ void *data, u32 size, u32 *payload, u32 payload_size)
+{
+ struct a6xx_hfi_queue *queue = &gmu->queues[HFI_COMMAND_QUEUE];
+ struct a6xx_hfi_response resp = { 0 };
+ int ret, dwords = size >> 2;
+ u32 seqnum;
+
+ seqnum = atomic_inc_return(&queue->seqnum) % 0xfff;
+
+ /* First dword of the message is the message header - fill it in */
+ *((u32 *) data) = (seqnum << 20) | (HFI_MSG_CMD << 16) |
+ (dwords << 8) | id;
+
+ init_completion(&resp.complete);
+ resp.id = id;
+ resp.seqnum = seqnum;
+
+ spin_lock_bh(&hfi_ack_lock);
+ list_add_tail(&resp.node, &hfi_ack_list);
+ spin_unlock_bh(&hfi_ack_lock);
+
+ ret = a6xx_hfi_queue_write(gmu, queue, data, dwords);
+ if (ret) {
+ dev_err(gmu->dev, "Unable to send message %s id %d\n",
+ a6xx_hfi_msg_id[id], seqnum);
+ goto out;
+ }
+
+ /* Wait up to 5 seconds for the response */
+ ret = wait_for_completion_timeout(&resp.complete,
+ msecs_to_jiffies(5000));
+ if (!ret) {
+ dev_err(gmu->dev,
+ "Message %s id %d timed out waiting for response\n",
+ a6xx_hfi_msg_id[id], seqnum);
+ ret = -ETIMEDOUT;
+ } else
+ ret = 0;
+
+out:
+ spin_lock_bh(&hfi_ack_lock);
+ list_del(&resp.node);
+ spin_unlock_bh(&hfi_ack_lock);
+
+ if (ret)
+ return ret;
+
+ if (resp.error) {
+ dev_err(gmu->dev, "Message %s id %d returned error %d\n",
+ a6xx_hfi_msg_id[id], seqnum, resp.error);
+ return -EINVAL;
+ }
+
+ if (payload && payload_size) {
+ int copy = min_t(u32, payload_size, sizeof(resp.payload));
+
+ memcpy(payload, resp.payload, copy);
+ }
+
+ return 0;
+}
+
+static int a6xx_hfi_send_gmu_init(struct a6xx_gmu *gmu, int boot_state)
+{
+ struct a6xx_hfi_msg_gmu_init_cmd msg = { 0 };
+
+ msg.dbg_buffer_addr = (u32) gmu->debug->iova;
+ msg.dbg_buffer_size = (u32) gmu->debug->size;
+ msg.boot_state = boot_state;
+
+ return a6xx_hfi_send_msg(gmu, HFI_H2F_MSG_INIT, &msg, sizeof(msg),
+ NULL, 0);
+}
+
+static int a6xx_hfi_get_fw_version(struct a6xx_gmu *gmu, u32 *version)
+{
+ struct a6xx_hfi_msg_fw_version msg = { 0 };
+
+ /* Currently supporting version 1.1 */
+ msg.supported_version = (1 << 28) | (1 << 16);
+
+ return a6xx_hfi_send_msg(gmu, HFI_H2F_MSG_FW_VERSION, &msg, sizeof(msg),
+ version, sizeof(*version));
+}
+
+static int a6xx_hfi_send_perf_table(struct a6xx_gmu *gmu)
+{
+ struct a6xx_hfi_msg_perf_table msg = { 0 };
+ int i;
+
+ msg.num_gpu_levels = gmu->nr_gpu_freqs;
+ msg.num_gmu_levels = gmu->nr_gmu_freqs;
+
+ for (i = 0; i < gmu->nr_gpu_freqs; i++) {
+ msg.gx_votes[i].vote = gmu->gx_arc_votes[i];
+ msg.gx_votes[i].freq = gmu->gpu_freqs[i] / 1000;
+ }
+
+ for (i = 0; i < gmu->nr_gmu_freqs; i++) {
+ msg.cx_votes[i].vote = gmu->cx_arc_votes[i];
+ msg.cx_votes[i].freq = gmu->gmu_freqs[i] / 1000;
+ }
+
+ return a6xx_hfi_send_msg(gmu, HFI_H2F_MSG_PERF_TABLE, &msg, sizeof(msg),
+ NULL, 0);
+}
+
+static int a6xx_hfi_send_bw_table(struct a6xx_gmu *gmu)
+{
+ struct a6xx_hfi_msg_bw_table msg = { 0 };
+
+ /*
+ * The sdm845 GMU doesn't do bus frequency scaling on its own but it
+ * does need at least one entry in the list because it might be accessed
+ * when the GMU is shutting down. Send a single "off" entry.
+ */
+
+ msg.bw_level_num = 1;
+
+ msg.ddr_cmds_num = 3;
+ msg.ddr_wait_bitmask = 0x07;
+
+ msg.ddr_cmds_addrs[0] = 0x50000;
+ msg.ddr_cmds_addrs[1] = 0x5005c;
+ msg.ddr_cmds_addrs[2] = 0x5000c;
+
+ msg.ddr_cmds_data[0][0] = 0x40000000;
+ msg.ddr_cmds_data[0][1] = 0x40000000;
+ msg.ddr_cmds_data[0][2] = 0x40000000;
+
+ /*
+ * These are the CX (CNOC) votes. This is used but the values for the
+ * sdm845 GMU are known and fixed so we can hard code them.
+ */
+
+ msg.cnoc_cmds_num = 3;
+ msg.cnoc_wait_bitmask = 0x05;
+
+ msg.cnoc_cmds_addrs[0] = 0x50034;
+ msg.cnoc_cmds_addrs[1] = 0x5007c;
+ msg.cnoc_cmds_addrs[2] = 0x5004c;
+
+ msg.cnoc_cmds_data[0][0] = 0x40000000;
+ msg.cnoc_cmds_data[0][1] = 0x00000000;
+ msg.cnoc_cmds_data[0][2] = 0x40000000;
+
+ msg.cnoc_cmds_data[1][0] = 0x60000001;
+ msg.cnoc_cmds_data[1][1] = 0x20000001;
+ msg.cnoc_cmds_data[1][2] = 0x60000001;
+
+ return a6xx_hfi_send_msg(gmu, HFI_H2F_MSG_BW_TABLE, &msg, sizeof(msg),
+ NULL, 0);
+}
+
+static int a6xx_hfi_send_test(struct a6xx_gmu *gmu)
+{
+ struct a6xx_hfi_msg_test msg = { 0 };
+
+ return a6xx_hfi_send_msg(gmu, HFI_H2F_MSG_TEST, &msg, sizeof(msg),
+ NULL, 0);
+}
+
+int a6xx_hfi_start(struct a6xx_gmu *gmu, int boot_state)
+{
+ int ret;
+
+ ret = a6xx_hfi_send_gmu_init(gmu, boot_state);
+ if (ret)
+ return ret;
+
+ ret = a6xx_hfi_get_fw_version(gmu, NULL);
+ if (ret)
+ return ret;
+
+ /*
+ * We have to get exchange version numbers per the sequence but at this
+ * point th kernel driver doesn't need to know the exact version of
+ * the GMU firmware
+ */
+
+ ret = a6xx_hfi_send_perf_table(gmu);
+ if (ret)
+ return ret;
+
+ ret = a6xx_hfi_send_bw_table(gmu);
+ if (ret)
+ return ret;
+
+ /*
+ * Let the GMU know that there won't be any more HFI messages until next
+ * boot
+ */
+ a6xx_hfi_send_test(gmu);
+
+ return 0;
+}
+
+void a6xx_hfi_stop(struct a6xx_gmu *gmu)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(gmu->queues); i++) {
+ struct a6xx_hfi_queue *queue = &gmu->queues[i];
+
+ if (!queue->header)
+ continue;
+
+ if (queue->header->read_index != queue->header->write_index)
+ dev_err(gmu->dev, "HFI queue %d is not empty\n", i);
+
+ queue->header->read_index = 0;
+ queue->header->write_index = 0;
+ }
+}
+
+static void a6xx_hfi_queue_init(struct a6xx_hfi_queue *queue,
+ struct a6xx_hfi_queue_header *header, void *virt, u64 iova,
+ u32 id)
+{
+ spin_lock_init(&queue->lock);
+ queue->header = header;
+ queue->data = virt;
+ atomic_set(&queue->seqnum, 0);
+
+ /* Set up the shared memory header */
+ header->iova = iova;
+ header->type = 10 << 8 | id;
+ header->status = 1;
+ header->size = SZ_4K >> 2;
+ header->msg_size = 0;
+ header->dropped = 0;
+ header->rx_watermark = 1;
+ header->tx_watermark = 1;
+ header->rx_request = 1;
+ header->tx_request = 0;
+ header->read_index = 0;
+ header->write_index = 0;
+}
+
+void a6xx_hfi_init(struct a6xx_gmu *gmu)
+{
+ struct a6xx_gmu_bo *hfi = gmu->hfi;
+ struct a6xx_hfi_queue_table_header *table = hfi->virt;
+ struct a6xx_hfi_queue_header *headers = hfi->virt + sizeof(*table);
+ u64 offset;
+ int table_size;
+
+ /*
+ * The table size is the size of the table header plus all of the queue
+ * headers
+ */
+ table_size = sizeof(*table);
+ table_size += (ARRAY_SIZE(gmu->queues) *
+ sizeof(struct a6xx_hfi_queue_header));
+
+ table->version = 0;
+ table->size = table_size;
+ /* First queue header is located immediately after the table header */
+ table->qhdr0_offset = sizeof(*table) >> 2;
+ table->qhdr_size = sizeof(struct a6xx_hfi_queue_header) >> 2;
+ table->num_queues = ARRAY_SIZE(gmu->queues);
+ table->active_queues = ARRAY_SIZE(gmu->queues);
+
+ /* Command queue */
+ offset = SZ_4K;
+ a6xx_hfi_queue_init(&gmu->queues[0], &headers[0], hfi->virt + offset,
+ hfi->iova + offset, 0);
+
+ /* GMU response queue */
+ offset += SZ_4K;
+ a6xx_hfi_queue_init(&gmu->queues[1], &headers[1], hfi->virt + offset,
+ hfi->iova + offset, 4);
+}
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_hfi.h b/drivers/gpu/drm/msm/adreno/a6xx_hfi.h
new file mode 100644
index 000000000000..60d1319fa44f
--- /dev/null
+++ b/drivers/gpu/drm/msm/adreno/a6xx_hfi.h
@@ -0,0 +1,127 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2017 The Linux Foundation. All rights reserved. */
+
+#ifndef _A6XX_HFI_H_
+#define _A6XX_HFI_H_
+
+struct a6xx_hfi_queue_table_header {
+ u32 version;
+ u32 size; /* Size of the queue table in dwords */
+ u32 qhdr0_offset; /* Offset of the first queue header */
+ u32 qhdr_size; /* Size of the queue headers */
+ u32 num_queues; /* Number of total queues */
+ u32 active_queues; /* Number of active queues */
+};
+
+struct a6xx_hfi_queue_header {
+ u32 status;
+ u32 iova;
+ u32 type;
+ u32 size;
+ u32 msg_size;
+ u32 dropped;
+ u32 rx_watermark;
+ u32 tx_watermark;
+ u32 rx_request;
+ u32 tx_request;
+ u32 read_index;
+ u32 write_index;
+};
+
+struct a6xx_hfi_queue {
+ struct a6xx_hfi_queue_header *header;
+ spinlock_t lock;
+ u32 *data;
+ atomic_t seqnum;
+};
+
+/* This is the outgoing queue to the GMU */
+#define HFI_COMMAND_QUEUE 0
+
+/* THis is the incoming response queue from the GMU */
+#define HFI_RESPONSE_QUEUE 1
+
+#define HFI_HEADER_ID(msg) ((msg) & 0xff)
+#define HFI_HEADER_SIZE(msg) (((msg) >> 8) & 0xff)
+#define HFI_HEADER_SEQNUM(msg) (((msg) >> 20) & 0xfff)
+
+/* FIXME: Do we need this or can we use ARRAY_SIZE? */
+#define HFI_RESPONSE_PAYLOAD_SIZE 16
+
+/* HFI message types */
+
+#define HFI_MSG_CMD 0
+#define HFI_MSG_ACK 2
+
+#define HFI_F2H_MSG_ACK 126
+
+struct a6xx_hfi_msg_response {
+ u32 header;
+ u32 ret_header;
+ u32 error;
+ u32 payload[HFI_RESPONSE_PAYLOAD_SIZE];
+};
+
+#define HFI_F2H_MSG_ERROR 100
+
+struct a6xx_hfi_msg_error {
+ u32 header;
+ u32 code;
+ u32 payload[2];
+};
+
+#define HFI_H2F_MSG_INIT 0
+
+struct a6xx_hfi_msg_gmu_init_cmd {
+ u32 header;
+ u32 seg_id;
+ u32 dbg_buffer_addr;
+ u32 dbg_buffer_size;
+ u32 boot_state;
+};
+
+#define HFI_H2F_MSG_FW_VERSION 1
+
+struct a6xx_hfi_msg_fw_version {
+ u32 header;
+ u32 supported_version;
+};
+
+#define HFI_H2F_MSG_PERF_TABLE 4
+
+struct perf_level {
+ u32 vote;
+ u32 freq;
+};
+
+struct a6xx_hfi_msg_perf_table {
+ u32 header;
+ u32 num_gpu_levels;
+ u32 num_gmu_levels;
+
+ struct perf_level gx_votes[16];
+ struct perf_level cx_votes[4];
+};
+
+#define HFI_H2F_MSG_BW_TABLE 3
+
+struct a6xx_hfi_msg_bw_table {
+ u32 header;
+ u32 bw_level_num;
+ u32 cnoc_cmds_num;
+ u32 ddr_cmds_num;
+ u32 cnoc_wait_bitmask;
+ u32 ddr_wait_bitmask;
+ u32 cnoc_cmds_addrs[6];
+ u32 cnoc_cmds_data[2][6];
+ u32 ddr_cmds_addrs[8];
+ u32 ddr_cmds_data[16][8];
+};
+
+#define HFI_H2F_MSG_TEST 5
+
+struct a6xx_hfi_msg_test {
+ u32 header;
+};
+
+#endif
diff --git a/drivers/gpu/drm/msm/adreno/adreno_common.xml.h b/drivers/gpu/drm/msm/adreno/adreno_common.xml.h
index b634cf71352b..5dace1350810 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_common.xml.h
+++ b/drivers/gpu/drm/msm/adreno/adreno_common.xml.h
@@ -8,17 +8,19 @@ http://github.com/freedreno/envytools/
git clone https://github.com/freedreno/envytools.git
The rules-ng-ng source files this header was generated from are:
-- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml ( 431 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml ( 37162 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml ( 13324 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml ( 31866 bytes, from 2017-06-06 18:26:14)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml ( 83840 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml ( 111898 bytes, from 2017-06-06 18:23:59)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a5xx.xml ( 139480 bytes, from 2017-06-16 12:44:39)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/ocmem.xml ( 1773 bytes, from 2017-05-17 13:21:27)
-
-Copyright (C) 2013-2017 by the following authors:
+- /home/robclark/src/envytools/rnndb/adreno.xml ( 501 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/adreno/a2xx.xml ( 36805 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/adreno/adreno_common.xml ( 13634 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/adreno/adreno_pm4.xml ( 42393 bytes, from 2018-08-06 18:45:45)
+- /home/robclark/src/envytools/rnndb/adreno/a3xx.xml ( 83840 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/adreno/a4xx.xml ( 112086 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/adreno/a5xx.xml ( 147240 bytes, from 2018-08-06 18:45:45)
+- /home/robclark/src/envytools/rnndb/adreno/a6xx.xml ( 101627 bytes, from 2018-08-06 18:45:45)
+- /home/robclark/src/envytools/rnndb/adreno/a6xx_gmu.xml ( 10431 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/adreno/ocmem.xml ( 1773 bytes, from 2018-07-03 19:37:13)
+
+Copyright (C) 2013-2018 by the following authors:
- Rob Clark <robdclark@gmail.com> (robclark)
- Ilia Mirkin <imirkin@alum.mit.edu> (imirkin)
@@ -44,6 +46,14 @@ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
+enum chip {
+ A2XX = 0,
+ A3XX = 0,
+ A4XX = 0,
+ A5XX = 0,
+ A6XX = 0,
+};
+
enum adreno_pa_su_sc_draw {
PC_DRAW_POINTS = 0,
PC_DRAW_LINES = 1,
@@ -181,6 +191,12 @@ enum a3xx_rb_blend_opcode {
BLEND_MAX_DST_SRC = 4,
};
+enum a4xx_tess_spacing {
+ EQUAL_SPACING = 0,
+ ODD_SPACING = 2,
+ EVEN_SPACING = 3,
+};
+
#define REG_AXXX_CP_RB_BASE 0x000001c0
#define REG_AXXX_CP_RB_CNTL 0x000001c1
diff --git a/drivers/gpu/drm/msm/adreno/adreno_device.c b/drivers/gpu/drm/msm/adreno/adreno_device.c
index 62bdb7316da1..7d3e9a129ac7 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_device.c
+++ b/drivers/gpu/drm/msm/adreno/adreno_device.c
@@ -30,62 +30,97 @@ static const struct adreno_info gpulist[] = {
.rev = ADRENO_REV(3, 0, 5, ANY_ID),
.revn = 305,
.name = "A305",
- .pm4fw = "a300_pm4.fw",
- .pfpfw = "a300_pfp.fw",
+ .fw = {
+ [ADRENO_FW_PM4] = "a300_pm4.fw",
+ [ADRENO_FW_PFP] = "a300_pfp.fw",
+ },
.gmem = SZ_256K,
+ .inactive_period = DRM_MSM_INACTIVE_PERIOD,
.init = a3xx_gpu_init,
}, {
.rev = ADRENO_REV(3, 0, 6, 0),
.revn = 307, /* because a305c is revn==306 */
.name = "A306",
- .pm4fw = "a300_pm4.fw",
- .pfpfw = "a300_pfp.fw",
+ .fw = {
+ [ADRENO_FW_PM4] = "a300_pm4.fw",
+ [ADRENO_FW_PFP] = "a300_pfp.fw",
+ },
.gmem = SZ_128K,
+ .inactive_period = DRM_MSM_INACTIVE_PERIOD,
.init = a3xx_gpu_init,
}, {
.rev = ADRENO_REV(3, 2, ANY_ID, ANY_ID),
.revn = 320,
.name = "A320",
- .pm4fw = "a300_pm4.fw",
- .pfpfw = "a300_pfp.fw",
+ .fw = {
+ [ADRENO_FW_PM4] = "a300_pm4.fw",
+ [ADRENO_FW_PFP] = "a300_pfp.fw",
+ },
.gmem = SZ_512K,
+ .inactive_period = DRM_MSM_INACTIVE_PERIOD,
.init = a3xx_gpu_init,
}, {
.rev = ADRENO_REV(3, 3, 0, ANY_ID),
.revn = 330,
.name = "A330",
- .pm4fw = "a330_pm4.fw",
- .pfpfw = "a330_pfp.fw",
+ .fw = {
+ [ADRENO_FW_PM4] = "a330_pm4.fw",
+ [ADRENO_FW_PFP] = "a330_pfp.fw",
+ },
.gmem = SZ_1M,
+ .inactive_period = DRM_MSM_INACTIVE_PERIOD,
.init = a3xx_gpu_init,
}, {
.rev = ADRENO_REV(4, 2, 0, ANY_ID),
.revn = 420,
.name = "A420",
- .pm4fw = "a420_pm4.fw",
- .pfpfw = "a420_pfp.fw",
+ .fw = {
+ [ADRENO_FW_PM4] = "a420_pm4.fw",
+ [ADRENO_FW_PFP] = "a420_pfp.fw",
+ },
.gmem = (SZ_1M + SZ_512K),
+ .inactive_period = DRM_MSM_INACTIVE_PERIOD,
.init = a4xx_gpu_init,
}, {
.rev = ADRENO_REV(4, 3, 0, ANY_ID),
.revn = 430,
.name = "A430",
- .pm4fw = "a420_pm4.fw",
- .pfpfw = "a420_pfp.fw",
+ .fw = {
+ [ADRENO_FW_PM4] = "a420_pm4.fw",
+ [ADRENO_FW_PFP] = "a420_pfp.fw",
+ },
.gmem = (SZ_1M + SZ_512K),
+ .inactive_period = DRM_MSM_INACTIVE_PERIOD,
.init = a4xx_gpu_init,
}, {
.rev = ADRENO_REV(5, 3, 0, 2),
.revn = 530,
.name = "A530",
- .pm4fw = "a530_pm4.fw",
- .pfpfw = "a530_pfp.fw",
+ .fw = {
+ [ADRENO_FW_PM4] = "a530_pm4.fw",
+ [ADRENO_FW_PFP] = "a530_pfp.fw",
+ [ADRENO_FW_GPMU] = "a530v3_gpmu.fw2",
+ },
.gmem = SZ_1M,
+ /*
+ * Increase inactive period to 250 to avoid bouncing
+ * the GDSC which appears to make it grumpy
+ */
+ .inactive_period = 250,
.quirks = ADRENO_QUIRK_TWO_PASS_USE_WFI |
ADRENO_QUIRK_FAULT_DETECT_MASK,
.init = a5xx_gpu_init,
- .gpmufw = "a530v3_gpmu.fw2",
.zapfw = "a530_zap.mdt",
+ }, {
+ .rev = ADRENO_REV(6, 3, 0, ANY_ID),
+ .revn = 630,
+ .name = "A630",
+ .fw = {
+ [ADRENO_FW_SQE] = "a630_sqe.fw",
+ [ADRENO_FW_GMU] = "a630_gmu.bin",
+ },
+ .gmem = SZ_1M,
+ .init = a6xx_gpu_init,
},
};
@@ -102,6 +137,8 @@ MODULE_FIRMWARE("qcom/a530_zap.mdt");
MODULE_FIRMWARE("qcom/a530_zap.b00");
MODULE_FIRMWARE("qcom/a530_zap.b01");
MODULE_FIRMWARE("qcom/a530_zap.b02");
+MODULE_FIRMWARE("qcom/a630_sqe.fw");
+MODULE_FIRMWARE("qcom/a630_gmu.bin");
static inline bool _rev_match(uint8_t entry, uint8_t id)
{
@@ -130,6 +167,7 @@ struct msm_gpu *adreno_load_gpu(struct drm_device *dev)
struct msm_drm_private *priv = dev->dev_private;
struct platform_device *pdev = priv->gpu_pdev;
struct msm_gpu *gpu = NULL;
+ struct adreno_gpu *adreno_gpu;
int ret;
if (pdev)
@@ -140,16 +178,43 @@ struct msm_gpu *adreno_load_gpu(struct drm_device *dev)
return NULL;
}
- pm_runtime_get_sync(&pdev->dev);
+ adreno_gpu = to_adreno_gpu(gpu);
+
+ /*
+ * The number one reason for HW init to fail is if the firmware isn't
+ * loaded yet. Try that first and don't bother continuing on
+ * otherwise
+ */
+
+ ret = adreno_load_fw(adreno_gpu);
+ if (ret)
+ return NULL;
+
+ /* Make sure pm runtime is active and reset any previous errors */
+ pm_runtime_set_active(&pdev->dev);
+
+ ret = pm_runtime_get_sync(&pdev->dev);
+ if (ret < 0) {
+ dev_err(dev->dev, "Couldn't power up the GPU: %d\n", ret);
+ return NULL;
+ }
+
mutex_lock(&dev->struct_mutex);
ret = msm_gpu_hw_init(gpu);
mutex_unlock(&dev->struct_mutex);
- pm_runtime_put_sync(&pdev->dev);
+ pm_runtime_put_autosuspend(&pdev->dev);
if (ret) {
dev_err(dev->dev, "gpu hw init failed: %d\n", ret);
return NULL;
}
+#ifdef CONFIG_DEBUG_FS
+ if (gpu->funcs->debugfs_init) {
+ gpu->funcs->debugfs_init(gpu, dev->primary);
+ gpu->funcs->debugfs_init(gpu, dev->render);
+ }
+#endif
+
return gpu;
}
@@ -295,6 +360,7 @@ static int adreno_suspend(struct device *dev)
#endif
static const struct dev_pm_ops adreno_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, pm_runtime_force_resume)
SET_RUNTIME_PM_OPS(adreno_suspend, adreno_resume, NULL)
};
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
index de63ff26a062..da1363a0c54d 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
@@ -17,7 +17,10 @@
* this program. If not, see <http://www.gnu.org/licenses/>.
*/
+#include <linux/ascii85.h>
+#include <linux/kernel.h>
#include <linux/pm_opp.h>
+#include <linux/slab.h>
#include "adreno_gpu.h"
#include "msm_gem.h"
#include "msm_mmu.h"
@@ -70,10 +73,12 @@ adreno_request_fw(struct adreno_gpu *adreno_gpu, const char *fwname)
{
struct drm_device *drm = adreno_gpu->base.dev;
const struct firmware *fw = NULL;
- char newname[strlen("qcom/") + strlen(fwname) + 1];
+ char *newname;
int ret;
- sprintf(newname, "qcom/%s", fwname);
+ newname = kasprintf(GFP_KERNEL, "qcom/%s", fwname);
+ if (!newname)
+ return ERR_PTR(-ENOMEM);
/*
* Try first to load from qcom/$fwfile using a direct load (to avoid
@@ -87,11 +92,12 @@ adreno_request_fw(struct adreno_gpu *adreno_gpu, const char *fwname)
dev_info(drm->dev, "loaded %s from new location\n",
newname);
adreno_gpu->fwloc = FW_LOCATION_NEW;
- return fw;
+ goto out;
} else if (adreno_gpu->fwloc != FW_LOCATION_UNKNOWN) {
dev_err(drm->dev, "failed to load %s: %d\n",
newname, ret);
- return ERR_PTR(ret);
+ fw = ERR_PTR(ret);
+ goto out;
}
}
@@ -106,11 +112,12 @@ adreno_request_fw(struct adreno_gpu *adreno_gpu, const char *fwname)
dev_info(drm->dev, "loaded %s from legacy location\n",
newname);
adreno_gpu->fwloc = FW_LOCATION_LEGACY;
- return fw;
+ goto out;
} else if (adreno_gpu->fwloc != FW_LOCATION_UNKNOWN) {
dev_err(drm->dev, "failed to load %s: %d\n",
fwname, ret);
- return ERR_PTR(ret);
+ fw = ERR_PTR(ret);
+ goto out;
}
}
@@ -126,41 +133,65 @@ adreno_request_fw(struct adreno_gpu *adreno_gpu, const char *fwname)
dev_info(drm->dev, "loaded %s with helper\n",
newname);
adreno_gpu->fwloc = FW_LOCATION_HELPER;
- return fw;
+ goto out;
} else if (adreno_gpu->fwloc != FW_LOCATION_UNKNOWN) {
dev_err(drm->dev, "failed to load %s: %d\n",
newname, ret);
- return ERR_PTR(ret);
+ fw = ERR_PTR(ret);
+ goto out;
}
}
dev_err(drm->dev, "failed to load %s\n", fwname);
- return ERR_PTR(-ENOENT);
+ fw = ERR_PTR(-ENOENT);
+out:
+ kfree(newname);
+ return fw;
}
-static int adreno_load_fw(struct adreno_gpu *adreno_gpu)
+int adreno_load_fw(struct adreno_gpu *adreno_gpu)
{
- const struct firmware *fw;
+ int i;
- if (adreno_gpu->pm4)
- return 0;
+ for (i = 0; i < ARRAY_SIZE(adreno_gpu->info->fw); i++) {
+ const struct firmware *fw;
+
+ if (!adreno_gpu->info->fw[i])
+ continue;
+
+ /* Skip if the firmware has already been loaded */
+ if (adreno_gpu->fw[i])
+ continue;
- fw = adreno_request_fw(adreno_gpu, adreno_gpu->info->pm4fw);
- if (IS_ERR(fw))
- return PTR_ERR(fw);
- adreno_gpu->pm4 = fw;
+ fw = adreno_request_fw(adreno_gpu, adreno_gpu->info->fw[i]);
+ if (IS_ERR(fw))
+ return PTR_ERR(fw);
- fw = adreno_request_fw(adreno_gpu, adreno_gpu->info->pfpfw);
- if (IS_ERR(fw)) {
- release_firmware(adreno_gpu->pm4);
- adreno_gpu->pm4 = NULL;
- return PTR_ERR(fw);
+ adreno_gpu->fw[i] = fw;
}
- adreno_gpu->pfp = fw;
return 0;
}
+struct drm_gem_object *adreno_fw_create_bo(struct msm_gpu *gpu,
+ const struct firmware *fw, u64 *iova)
+{
+ struct drm_gem_object *bo;
+ void *ptr;
+
+ ptr = msm_gem_kernel_new_locked(gpu->dev, fw->size - 4,
+ MSM_BO_UNCACHED | MSM_BO_GPU_READONLY, gpu->aspace, &bo, iova);
+
+ if (IS_ERR(ptr))
+ return ERR_CAST(ptr);
+
+ memcpy(ptr, &fw->data[4], fw->size - 4);
+
+ msm_gem_put_vaddr(bo);
+
+ return bo;
+}
+
int adreno_hw_init(struct msm_gpu *gpu)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
@@ -293,26 +324,12 @@ void adreno_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
OUT_RING(ring, 0x00000000);
}
+ /* BIT(31) of CACHE_FLUSH_TS triggers CACHE_FLUSH_TS IRQ from GPU */
OUT_PKT3(ring, CP_EVENT_WRITE, 3);
- OUT_RING(ring, CACHE_FLUSH_TS);
+ OUT_RING(ring, CACHE_FLUSH_TS | BIT(31));
OUT_RING(ring, rbmemptr(ring, fence));
OUT_RING(ring, submit->seqno);
- /* we could maybe be clever and only CP_COND_EXEC the interrupt: */
- OUT_PKT3(ring, CP_INTERRUPT, 1);
- OUT_RING(ring, 0x80000000);
-
- /* Workaround for missing irq issue on 8x16/a306. Unsure if the
- * root cause is a platform issue or some a306 quirk, but this
- * keeps things humming along:
- */
- if (adreno_is_a306(adreno_gpu)) {
- OUT_PKT3(ring, CP_WAIT_FOR_IDLE, 1);
- OUT_RING(ring, 0x00000000);
- OUT_PKT3(ring, CP_INTERRUPT, 1);
- OUT_RING(ring, 0x80000000);
- }
-
#if 0
if (adreno_is_a3xx(adreno_gpu)) {
/* Dummy set-constant to trigger context rollover */
@@ -362,40 +379,185 @@ bool adreno_idle(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
return false;
}
-#ifdef CONFIG_DEBUG_FS
-void adreno_show(struct msm_gpu *gpu, struct seq_file *m)
+int adreno_gpu_state_get(struct msm_gpu *gpu, struct msm_gpu_state *state)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ int i, count = 0;
+
+ kref_init(&state->ref);
+
+ ktime_get_real_ts64(&state->time);
+
+ for (i = 0; i < gpu->nr_rings; i++) {
+ int size = 0, j;
+
+ state->ring[i].fence = gpu->rb[i]->memptrs->fence;
+ state->ring[i].iova = gpu->rb[i]->iova;
+ state->ring[i].seqno = gpu->rb[i]->seqno;
+ state->ring[i].rptr = get_rptr(adreno_gpu, gpu->rb[i]);
+ state->ring[i].wptr = get_wptr(gpu->rb[i]);
+
+ /* Copy at least 'wptr' dwords of the data */
+ size = state->ring[i].wptr;
+
+ /* After wptr find the last non zero dword to save space */
+ for (j = state->ring[i].wptr; j < MSM_GPU_RINGBUFFER_SZ >> 2; j++)
+ if (gpu->rb[i]->start[j])
+ size = j + 1;
+
+ if (size) {
+ state->ring[i].data = kmalloc(size << 2, GFP_KERNEL);
+ if (state->ring[i].data) {
+ memcpy(state->ring[i].data, gpu->rb[i]->start, size << 2);
+ state->ring[i].data_size = size << 2;
+ }
+ }
+ }
+
+ /* Count the number of registers */
+ for (i = 0; adreno_gpu->registers[i] != ~0; i += 2)
+ count += adreno_gpu->registers[i + 1] -
+ adreno_gpu->registers[i] + 1;
+
+ state->registers = kcalloc(count * 2, sizeof(u32), GFP_KERNEL);
+ if (state->registers) {
+ int pos = 0;
+
+ for (i = 0; adreno_gpu->registers[i] != ~0; i += 2) {
+ u32 start = adreno_gpu->registers[i];
+ u32 end = adreno_gpu->registers[i + 1];
+ u32 addr;
+
+ for (addr = start; addr <= end; addr++) {
+ state->registers[pos++] = addr;
+ state->registers[pos++] = gpu_read(gpu, addr);
+ }
+ }
+
+ state->nr_registers = count;
+ }
+
+ return 0;
+}
+
+void adreno_gpu_state_destroy(struct msm_gpu_state *state)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(state->ring); i++)
+ kfree(state->ring[i].data);
+
+ for (i = 0; state->bos && i < state->nr_bos; i++)
+ kvfree(state->bos[i].data);
+
+ kfree(state->bos);
+ kfree(state->comm);
+ kfree(state->cmd);
+ kfree(state->registers);
+}
+
+static void adreno_gpu_state_kref_destroy(struct kref *kref)
+{
+ struct msm_gpu_state *state = container_of(kref,
+ struct msm_gpu_state, ref);
+
+ adreno_gpu_state_destroy(state);
+ kfree(state);
+}
+
+int adreno_gpu_state_put(struct msm_gpu_state *state)
+{
+ if (IS_ERR_OR_NULL(state))
+ return 1;
+
+ return kref_put(&state->ref, adreno_gpu_state_kref_destroy);
+}
+
+#if defined(CONFIG_DEBUG_FS) || defined(CONFIG_DEV_COREDUMP)
+
+static void adreno_show_object(struct drm_printer *p, u32 *ptr, int len)
+{
+ char out[ASCII85_BUFSZ];
+ long l, datalen, i;
+
+ if (!ptr || !len)
+ return;
+
+ /*
+ * Only dump the non-zero part of the buffer - rarely will any data
+ * completely fill the entire allocated size of the buffer
+ */
+ for (datalen = 0, i = 0; i < len >> 2; i++) {
+ if (ptr[i])
+ datalen = (i << 2) + 1;
+ }
+
+ /* Skip printing the object if it is empty */
+ if (datalen == 0)
+ return;
+
+ l = ascii85_encode_len(datalen);
+
+ drm_puts(p, " data: !!ascii85 |\n");
+ drm_puts(p, " ");
+
+ for (i = 0; i < l; i++)
+ drm_puts(p, ascii85_encode(ptr[i], out));
+
+ drm_puts(p, "\n");
+}
+
+void adreno_show(struct msm_gpu *gpu, struct msm_gpu_state *state,
+ struct drm_printer *p)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
int i;
- seq_printf(m, "revision: %d (%d.%d.%d.%d)\n",
+ if (IS_ERR_OR_NULL(state))
+ return;
+
+ drm_printf(p, "revision: %d (%d.%d.%d.%d)\n",
adreno_gpu->info->revn, adreno_gpu->rev.core,
adreno_gpu->rev.major, adreno_gpu->rev.minor,
adreno_gpu->rev.patchid);
- for (i = 0; i < gpu->nr_rings; i++) {
- struct msm_ringbuffer *ring = gpu->rb[i];
+ drm_printf(p, "rbbm-status: 0x%08x\n", state->rbbm_status);
- seq_printf(m, "rb %d: fence: %d/%d\n", i,
- ring->memptrs->fence, ring->seqno);
+ drm_puts(p, "ringbuffer:\n");
- seq_printf(m, " rptr: %d\n",
- get_rptr(adreno_gpu, ring));
- seq_printf(m, "rb wptr: %d\n", get_wptr(ring));
+ for (i = 0; i < gpu->nr_rings; i++) {
+ drm_printf(p, " - id: %d\n", i);
+ drm_printf(p, " iova: 0x%016llx\n", state->ring[i].iova);
+ drm_printf(p, " last-fence: %d\n", state->ring[i].seqno);
+ drm_printf(p, " retired-fence: %d\n", state->ring[i].fence);
+ drm_printf(p, " rptr: %d\n", state->ring[i].rptr);
+ drm_printf(p, " wptr: %d\n", state->ring[i].wptr);
+ drm_printf(p, " size: %d\n", MSM_GPU_RINGBUFFER_SZ);
+
+ adreno_show_object(p, state->ring[i].data,
+ state->ring[i].data_size);
}
- /* dump these out in a form that can be parsed by demsm: */
- seq_printf(m, "IO:region %s 00000000 00020000\n", gpu->name);
- for (i = 0; adreno_gpu->registers[i] != ~0; i += 2) {
- uint32_t start = adreno_gpu->registers[i];
- uint32_t end = adreno_gpu->registers[i+1];
- uint32_t addr;
+ if (state->bos) {
+ drm_puts(p, "bos:\n");
- for (addr = start; addr <= end; addr++) {
- uint32_t val = gpu_read(gpu, addr);
- seq_printf(m, "IO:R %08x %08x\n", addr<<2, val);
+ for (i = 0; i < state->nr_bos; i++) {
+ drm_printf(p, " - iova: 0x%016llx\n",
+ state->bos[i].iova);
+ drm_printf(p, " size: %zd\n", state->bos[i].size);
+
+ adreno_show_object(p, state->bos[i].data,
+ state->bos[i].size);
}
}
+
+ drm_puts(p, "registers:\n");
+
+ for (i = 0; i < state->nr_registers; i++) {
+ drm_printf(p, " - { offset: 0x%04x, value: 0x%08x }\n",
+ state->registers[i * 2] << 2,
+ state->registers[(i * 2) + 1]);
+ }
}
#endif
@@ -559,7 +721,8 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
adreno_get_pwrlevels(&pdev->dev, gpu);
- pm_runtime_set_autosuspend_delay(&pdev->dev, DRM_MSM_INACTIVE_PERIOD);
+ pm_runtime_set_autosuspend_delay(&pdev->dev,
+ adreno_gpu->info->inactive_period);
pm_runtime_use_autosuspend(&pdev->dev);
pm_runtime_enable(&pdev->dev);
@@ -569,8 +732,10 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
void adreno_gpu_cleanup(struct adreno_gpu *adreno_gpu)
{
- release_firmware(adreno_gpu->pm4);
- release_firmware(adreno_gpu->pfp);
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(adreno_gpu->info->fw); i++)
+ release_firmware(adreno_gpu->fw[i]);
msm_gpu_cleanup(&adreno_gpu->base);
}
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.h b/drivers/gpu/drm/msm/adreno/adreno_gpu.h
index 8d3d0a924908..de6e6ee42fba 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.h
+++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.h
@@ -48,6 +48,15 @@ enum adreno_regs {
REG_ADRENO_REGISTER_MAX,
};
+enum {
+ ADRENO_FW_PM4 = 0,
+ ADRENO_FW_SQE = 0, /* a6xx */
+ ADRENO_FW_PFP = 1,
+ ADRENO_FW_GMU = 1, /* a6xx */
+ ADRENO_FW_GPMU = 2,
+ ADRENO_FW_MAX,
+};
+
enum adreno_quirks {
ADRENO_QUIRK_TWO_PASS_USE_WFI = 1,
ADRENO_QUIRK_FAULT_DETECT_MASK = 2,
@@ -72,12 +81,12 @@ struct adreno_info {
struct adreno_rev rev;
uint32_t revn;
const char *name;
- const char *pm4fw, *pfpfw;
- const char *gpmufw;
+ const char *fw[ADRENO_FW_MAX];
uint32_t gmem;
enum adreno_quirks quirks;
struct msm_gpu *(*init)(struct drm_device *dev);
const char *zapfw;
+ u32 inactive_period;
};
const struct adreno_info *adreno_info(struct adreno_rev rev);
@@ -115,7 +124,7 @@ struct adreno_gpu {
} fwloc;
/* firmware: */
- const struct firmware *pm4, *pfp;
+ const struct firmware *fw[ADRENO_FW_MAX];
/*
* Register offsets are different between some GPUs.
@@ -200,14 +209,17 @@ static inline int adreno_is_a530(struct adreno_gpu *gpu)
int adreno_get_param(struct msm_gpu *gpu, uint32_t param, uint64_t *value);
const struct firmware *adreno_request_fw(struct adreno_gpu *adreno_gpu,
const char *fwname);
+struct drm_gem_object *adreno_fw_create_bo(struct msm_gpu *gpu,
+ const struct firmware *fw, u64 *iova);
int adreno_hw_init(struct msm_gpu *gpu);
void adreno_recover(struct msm_gpu *gpu);
void adreno_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
struct msm_file_private *ctx);
void adreno_flush(struct msm_gpu *gpu, struct msm_ringbuffer *ring);
bool adreno_idle(struct msm_gpu *gpu, struct msm_ringbuffer *ring);
-#ifdef CONFIG_DEBUG_FS
-void adreno_show(struct msm_gpu *gpu, struct seq_file *m);
+#if defined(CONFIG_DEBUG_FS) || defined(CONFIG_DEV_COREDUMP)
+void adreno_show(struct msm_gpu *gpu, struct msm_gpu_state *state,
+ struct drm_printer *p);
#endif
void adreno_dump_info(struct msm_gpu *gpu);
void adreno_dump(struct msm_gpu *gpu);
@@ -218,7 +230,12 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
struct adreno_gpu *gpu, const struct adreno_gpu_funcs *funcs,
int nr_rings);
void adreno_gpu_cleanup(struct adreno_gpu *gpu);
+int adreno_load_fw(struct adreno_gpu *adreno_gpu);
+
+void adreno_gpu_state_destroy(struct msm_gpu_state *state);
+int adreno_gpu_state_get(struct msm_gpu *gpu, struct msm_gpu_state *state);
+int adreno_gpu_state_put(struct msm_gpu_state *state);
/* ringbuffer helpers (the parts that are adreno specific) */
@@ -320,6 +337,7 @@ static inline void adreno_gpu_write(struct adreno_gpu *gpu,
struct msm_gpu *a3xx_gpu_init(struct drm_device *dev);
struct msm_gpu *a4xx_gpu_init(struct drm_device *dev);
struct msm_gpu *a5xx_gpu_init(struct drm_device *dev);
+struct msm_gpu *a6xx_gpu_init(struct drm_device *dev);
static inline void adreno_gpu_write64(struct adreno_gpu *gpu,
enum adreno_regs lo, enum adreno_regs hi, u64 data)
diff --git a/drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h b/drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h
index fb605a3534cf..03a91e10b310 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h
+++ b/drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h
@@ -8,17 +8,19 @@ http://github.com/freedreno/envytools/
git clone https://github.com/freedreno/envytools.git
The rules-ng-ng source files this header was generated from are:
-- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml ( 431 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml ( 37162 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml ( 13324 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml ( 31866 bytes, from 2017-06-06 18:26:14)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml ( 83840 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml ( 111898 bytes, from 2017-06-06 18:23:59)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a5xx.xml ( 139480 bytes, from 2017-06-16 12:44:39)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/ocmem.xml ( 1773 bytes, from 2017-05-17 13:21:27)
-
-Copyright (C) 2013-2017 by the following authors:
+- /home/robclark/src/envytools/rnndb/adreno.xml ( 501 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/adreno/a2xx.xml ( 36805 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/adreno/adreno_common.xml ( 13634 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/adreno/adreno_pm4.xml ( 42393 bytes, from 2018-08-06 18:45:45)
+- /home/robclark/src/envytools/rnndb/adreno/a3xx.xml ( 83840 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/adreno/a4xx.xml ( 112086 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/adreno/a5xx.xml ( 147240 bytes, from 2018-08-06 18:45:45)
+- /home/robclark/src/envytools/rnndb/adreno/a6xx.xml ( 101627 bytes, from 2018-08-06 18:45:45)
+- /home/robclark/src/envytools/rnndb/adreno/a6xx_gmu.xml ( 10431 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/adreno/ocmem.xml ( 1773 bytes, from 2018-07-03 19:37:13)
+
+Copyright (C) 2013-2018 by the following authors:
- Rob Clark <robdclark@gmail.com> (robclark)
- Ilia Mirkin <imirkin@alum.mit.edu> (imirkin)
@@ -71,7 +73,8 @@ enum vgt_event_type {
FLUSH_SO_1 = 18,
FLUSH_SO_2 = 19,
FLUSH_SO_3 = 20,
- UNK_19 = 25,
+ PC_CCU_INVALIDATE_DEPTH = 24,
+ PC_CCU_INVALIDATE_COLOR = 25,
UNK_1C = 28,
UNK_1D = 29,
BLIT = 30,
@@ -199,9 +202,12 @@ enum adreno_pm4_type3_packets {
CP_WAIT_MEM_WRITES = 18,
CP_COND_REG_EXEC = 71,
CP_MEM_TO_REG = 66,
+ CP_EXEC_CS_INDIRECT = 65,
CP_EXEC_CS = 51,
CP_PERFCOUNTER_ACTION = 80,
CP_SMMU_TABLE_UPDATE = 83,
+ CP_SET_MARKER = 101,
+ CP_SET_PSEUDO_REG = 86,
CP_CONTEXT_REG_BUNCH = 92,
CP_YIELD_ENABLE = 28,
CP_SKIP_IB2_ENABLE_GLOBAL = 29,
@@ -215,7 +221,10 @@ enum adreno_pm4_type3_packets {
CP_COMPUTE_CHECKPOINT = 110,
CP_MEM_TO_MEM = 115,
CP_BLIT = 44,
- CP_UNK_39 = 57,
+ CP_REG_TEST = 57,
+ CP_SET_MODE = 99,
+ CP_LOAD_STATE6_GEOM = 50,
+ CP_LOAD_STATE6_FRAG = 52,
IN_IB_PREFETCH_END = 23,
IN_SUBBLK_PREFETCH = 31,
IN_INSTR_PREFETCH = 32,
@@ -224,6 +233,11 @@ enum adreno_pm4_type3_packets {
IN_INCR_UPDT_STATE = 85,
IN_INCR_UPDT_CONST = 86,
IN_INCR_UPDT_INSTR = 87,
+ PKT4 = 4,
+ CP_UNK_A6XX_14 = 20,
+ CP_UNK_A6XX_36 = 54,
+ CP_UNK_A6XX_55 = 85,
+ UNK_A6XX_6D = 109,
};
enum adreno_state_block {
@@ -278,6 +292,33 @@ enum a4xx_state_src {
SS4_INDIRECT = 2,
};
+enum a6xx_state_block {
+ SB6_VS_TEX = 0,
+ SB6_HS_TEX = 1,
+ SB6_DS_TEX = 2,
+ SB6_GS_TEX = 3,
+ SB6_FS_TEX = 4,
+ SB6_CS_TEX = 5,
+ SB6_VS_SHADER = 8,
+ SB6_HS_SHADER = 9,
+ SB6_DS_SHADER = 10,
+ SB6_GS_SHADER = 11,
+ SB6_FS_SHADER = 12,
+ SB6_CS_SHADER = 13,
+ SB6_SSBO = 14,
+ SB6_CS_SSBO = 15,
+};
+
+enum a6xx_state_type {
+ ST6_SHADER = 0,
+ ST6_CONSTANTS = 1,
+};
+
+enum a6xx_state_src {
+ SS6_DIRECT = 0,
+ SS6_INDIRECT = 2,
+};
+
enum a4xx_index_size {
INDEX4_SIZE_8_BIT = 0,
INDEX4_SIZE_16_BIT = 1,
@@ -300,6 +341,7 @@ enum render_mode_cmd {
GMEM = 3,
BLIT2D = 5,
BLIT2DSCALE = 7,
+ END2D = 8,
};
enum cp_blit_cmd {
@@ -308,6 +350,22 @@ enum cp_blit_cmd {
BLIT_OP_SCALE = 3,
};
+enum a6xx_render_mode {
+ RM6_BYPASS = 1,
+ RM6_BINNING = 2,
+ RM6_GMEM = 4,
+ RM6_BLIT2D = 5,
+ RM6_RESOLVE = 6,
+};
+
+enum pseudo_reg {
+ SMMU_INFO = 0,
+ NON_SECURE_SAVE_ADDR = 1,
+ SECURE_SAVE_ADDR = 2,
+ NON_PRIV_SAVE_ADDR = 3,
+ COUNTER = 4,
+};
+
#define REG_CP_LOAD_STATE_0 0x00000000
#define CP_LOAD_STATE_0_DST_OFF__MASK 0x0000ffff
#define CP_LOAD_STATE_0_DST_OFF__SHIFT 0
@@ -349,7 +407,7 @@ static inline uint32_t CP_LOAD_STATE_1_EXT_SRC_ADDR(uint32_t val)
}
#define REG_CP_LOAD_STATE4_0 0x00000000
-#define CP_LOAD_STATE4_0_DST_OFF__MASK 0x0000ffff
+#define CP_LOAD_STATE4_0_DST_OFF__MASK 0x00003fff
#define CP_LOAD_STATE4_0_DST_OFF__SHIFT 0
static inline uint32_t CP_LOAD_STATE4_0_DST_OFF(uint32_t val)
{
@@ -396,6 +454,54 @@ static inline uint32_t CP_LOAD_STATE4_2_EXT_SRC_ADDR_HI(uint32_t val)
return ((val) << CP_LOAD_STATE4_2_EXT_SRC_ADDR_HI__SHIFT) & CP_LOAD_STATE4_2_EXT_SRC_ADDR_HI__MASK;
}
+#define REG_CP_LOAD_STATE6_0 0x00000000
+#define CP_LOAD_STATE6_0_DST_OFF__MASK 0x00003fff
+#define CP_LOAD_STATE6_0_DST_OFF__SHIFT 0
+static inline uint32_t CP_LOAD_STATE6_0_DST_OFF(uint32_t val)
+{
+ return ((val) << CP_LOAD_STATE6_0_DST_OFF__SHIFT) & CP_LOAD_STATE6_0_DST_OFF__MASK;
+}
+#define CP_LOAD_STATE6_0_STATE_TYPE__MASK 0x00004000
+#define CP_LOAD_STATE6_0_STATE_TYPE__SHIFT 14
+static inline uint32_t CP_LOAD_STATE6_0_STATE_TYPE(enum a6xx_state_type val)
+{
+ return ((val) << CP_LOAD_STATE6_0_STATE_TYPE__SHIFT) & CP_LOAD_STATE6_0_STATE_TYPE__MASK;
+}
+#define CP_LOAD_STATE6_0_STATE_SRC__MASK 0x00030000
+#define CP_LOAD_STATE6_0_STATE_SRC__SHIFT 16
+static inline uint32_t CP_LOAD_STATE6_0_STATE_SRC(enum a6xx_state_src val)
+{
+ return ((val) << CP_LOAD_STATE6_0_STATE_SRC__SHIFT) & CP_LOAD_STATE6_0_STATE_SRC__MASK;
+}
+#define CP_LOAD_STATE6_0_STATE_BLOCK__MASK 0x003c0000
+#define CP_LOAD_STATE6_0_STATE_BLOCK__SHIFT 18
+static inline uint32_t CP_LOAD_STATE6_0_STATE_BLOCK(enum a6xx_state_block val)
+{
+ return ((val) << CP_LOAD_STATE6_0_STATE_BLOCK__SHIFT) & CP_LOAD_STATE6_0_STATE_BLOCK__MASK;
+}
+#define CP_LOAD_STATE6_0_NUM_UNIT__MASK 0xffc00000
+#define CP_LOAD_STATE6_0_NUM_UNIT__SHIFT 22
+static inline uint32_t CP_LOAD_STATE6_0_NUM_UNIT(uint32_t val)
+{
+ return ((val) << CP_LOAD_STATE6_0_NUM_UNIT__SHIFT) & CP_LOAD_STATE6_0_NUM_UNIT__MASK;
+}
+
+#define REG_CP_LOAD_STATE6_1 0x00000001
+#define CP_LOAD_STATE6_1_EXT_SRC_ADDR__MASK 0xfffffffc
+#define CP_LOAD_STATE6_1_EXT_SRC_ADDR__SHIFT 2
+static inline uint32_t CP_LOAD_STATE6_1_EXT_SRC_ADDR(uint32_t val)
+{
+ return ((val >> 2) << CP_LOAD_STATE6_1_EXT_SRC_ADDR__SHIFT) & CP_LOAD_STATE6_1_EXT_SRC_ADDR__MASK;
+}
+
+#define REG_CP_LOAD_STATE6_2 0x00000002
+#define CP_LOAD_STATE6_2_EXT_SRC_ADDR_HI__MASK 0xffffffff
+#define CP_LOAD_STATE6_2_EXT_SRC_ADDR_HI__SHIFT 0
+static inline uint32_t CP_LOAD_STATE6_2_EXT_SRC_ADDR_HI(uint32_t val)
+{
+ return ((val) << CP_LOAD_STATE6_2_EXT_SRC_ADDR_HI__SHIFT) & CP_LOAD_STATE6_2_EXT_SRC_ADDR_HI__MASK;
+}
+
#define REG_CP_DRAW_INDX_0 0x00000000
#define CP_DRAW_INDX_0_VIZ_QUERY__MASK 0xffffffff
#define CP_DRAW_INDX_0_VIZ_QUERY__SHIFT 0
@@ -580,6 +686,153 @@ static inline uint32_t CP_DRAW_INDX_OFFSET_5_INDX_SIZE(uint32_t val)
return ((val) << CP_DRAW_INDX_OFFSET_5_INDX_SIZE__SHIFT) & CP_DRAW_INDX_OFFSET_5_INDX_SIZE__MASK;
}
+#define REG_A4XX_CP_DRAW_INDIRECT_0 0x00000000
+#define A4XX_CP_DRAW_INDIRECT_0_PRIM_TYPE__MASK 0x0000003f
+#define A4XX_CP_DRAW_INDIRECT_0_PRIM_TYPE__SHIFT 0
+static inline uint32_t A4XX_CP_DRAW_INDIRECT_0_PRIM_TYPE(enum pc_di_primtype val)
+{
+ return ((val) << A4XX_CP_DRAW_INDIRECT_0_PRIM_TYPE__SHIFT) & A4XX_CP_DRAW_INDIRECT_0_PRIM_TYPE__MASK;
+}
+#define A4XX_CP_DRAW_INDIRECT_0_SOURCE_SELECT__MASK 0x000000c0
+#define A4XX_CP_DRAW_INDIRECT_0_SOURCE_SELECT__SHIFT 6
+static inline uint32_t A4XX_CP_DRAW_INDIRECT_0_SOURCE_SELECT(enum pc_di_src_sel val)
+{
+ return ((val) << A4XX_CP_DRAW_INDIRECT_0_SOURCE_SELECT__SHIFT) & A4XX_CP_DRAW_INDIRECT_0_SOURCE_SELECT__MASK;
+}
+#define A4XX_CP_DRAW_INDIRECT_0_VIS_CULL__MASK 0x00000300
+#define A4XX_CP_DRAW_INDIRECT_0_VIS_CULL__SHIFT 8
+static inline uint32_t A4XX_CP_DRAW_INDIRECT_0_VIS_CULL(enum pc_di_vis_cull_mode val)
+{
+ return ((val) << A4XX_CP_DRAW_INDIRECT_0_VIS_CULL__SHIFT) & A4XX_CP_DRAW_INDIRECT_0_VIS_CULL__MASK;
+}
+#define A4XX_CP_DRAW_INDIRECT_0_INDEX_SIZE__MASK 0x00000c00
+#define A4XX_CP_DRAW_INDIRECT_0_INDEX_SIZE__SHIFT 10
+static inline uint32_t A4XX_CP_DRAW_INDIRECT_0_INDEX_SIZE(enum a4xx_index_size val)
+{
+ return ((val) << A4XX_CP_DRAW_INDIRECT_0_INDEX_SIZE__SHIFT) & A4XX_CP_DRAW_INDIRECT_0_INDEX_SIZE__MASK;
+}
+#define A4XX_CP_DRAW_INDIRECT_0_TESS_MODE__MASK 0x01f00000
+#define A4XX_CP_DRAW_INDIRECT_0_TESS_MODE__SHIFT 20
+static inline uint32_t A4XX_CP_DRAW_INDIRECT_0_TESS_MODE(uint32_t val)
+{
+ return ((val) << A4XX_CP_DRAW_INDIRECT_0_TESS_MODE__SHIFT) & A4XX_CP_DRAW_INDIRECT_0_TESS_MODE__MASK;
+}
+
+#define REG_A4XX_CP_DRAW_INDIRECT_1 0x00000001
+#define A4XX_CP_DRAW_INDIRECT_1_INDIRECT__MASK 0xffffffff
+#define A4XX_CP_DRAW_INDIRECT_1_INDIRECT__SHIFT 0
+static inline uint32_t A4XX_CP_DRAW_INDIRECT_1_INDIRECT(uint32_t val)
+{
+ return ((val) << A4XX_CP_DRAW_INDIRECT_1_INDIRECT__SHIFT) & A4XX_CP_DRAW_INDIRECT_1_INDIRECT__MASK;
+}
+
+
+#define REG_A5XX_CP_DRAW_INDIRECT_2 0x00000002
+#define A5XX_CP_DRAW_INDIRECT_2_INDIRECT_HI__MASK 0xffffffff
+#define A5XX_CP_DRAW_INDIRECT_2_INDIRECT_HI__SHIFT 0
+static inline uint32_t A5XX_CP_DRAW_INDIRECT_2_INDIRECT_HI(uint32_t val)
+{
+ return ((val) << A5XX_CP_DRAW_INDIRECT_2_INDIRECT_HI__SHIFT) & A5XX_CP_DRAW_INDIRECT_2_INDIRECT_HI__MASK;
+}
+
+#define REG_A4XX_CP_DRAW_INDX_INDIRECT_0 0x00000000
+#define A4XX_CP_DRAW_INDX_INDIRECT_0_PRIM_TYPE__MASK 0x0000003f
+#define A4XX_CP_DRAW_INDX_INDIRECT_0_PRIM_TYPE__SHIFT 0
+static inline uint32_t A4XX_CP_DRAW_INDX_INDIRECT_0_PRIM_TYPE(enum pc_di_primtype val)
+{
+ return ((val) << A4XX_CP_DRAW_INDX_INDIRECT_0_PRIM_TYPE__SHIFT) & A4XX_CP_DRAW_INDX_INDIRECT_0_PRIM_TYPE__MASK;
+}
+#define A4XX_CP_DRAW_INDX_INDIRECT_0_SOURCE_SELECT__MASK 0x000000c0
+#define A4XX_CP_DRAW_INDX_INDIRECT_0_SOURCE_SELECT__SHIFT 6
+static inline uint32_t A4XX_CP_DRAW_INDX_INDIRECT_0_SOURCE_SELECT(enum pc_di_src_sel val)
+{
+ return ((val) << A4XX_CP_DRAW_INDX_INDIRECT_0_SOURCE_SELECT__SHIFT) & A4XX_CP_DRAW_INDX_INDIRECT_0_SOURCE_SELECT__MASK;
+}
+#define A4XX_CP_DRAW_INDX_INDIRECT_0_VIS_CULL__MASK 0x00000300
+#define A4XX_CP_DRAW_INDX_INDIRECT_0_VIS_CULL__SHIFT 8
+static inline uint32_t A4XX_CP_DRAW_INDX_INDIRECT_0_VIS_CULL(enum pc_di_vis_cull_mode val)
+{
+ return ((val) << A4XX_CP_DRAW_INDX_INDIRECT_0_VIS_CULL__SHIFT) & A4XX_CP_DRAW_INDX_INDIRECT_0_VIS_CULL__MASK;
+}
+#define A4XX_CP_DRAW_INDX_INDIRECT_0_INDEX_SIZE__MASK 0x00000c00
+#define A4XX_CP_DRAW_INDX_INDIRECT_0_INDEX_SIZE__SHIFT 10
+static inline uint32_t A4XX_CP_DRAW_INDX_INDIRECT_0_INDEX_SIZE(enum a4xx_index_size val)
+{
+ return ((val) << A4XX_CP_DRAW_INDX_INDIRECT_0_INDEX_SIZE__SHIFT) & A4XX_CP_DRAW_INDX_INDIRECT_0_INDEX_SIZE__MASK;
+}
+#define A4XX_CP_DRAW_INDX_INDIRECT_0_TESS_MODE__MASK 0x01f00000
+#define A4XX_CP_DRAW_INDX_INDIRECT_0_TESS_MODE__SHIFT 20
+static inline uint32_t A4XX_CP_DRAW_INDX_INDIRECT_0_TESS_MODE(uint32_t val)
+{
+ return ((val) << A4XX_CP_DRAW_INDX_INDIRECT_0_TESS_MODE__SHIFT) & A4XX_CP_DRAW_INDX_INDIRECT_0_TESS_MODE__MASK;
+}
+
+
+#define REG_A4XX_CP_DRAW_INDX_INDIRECT_1 0x00000001
+#define A4XX_CP_DRAW_INDX_INDIRECT_1_INDX_BASE__MASK 0xffffffff
+#define A4XX_CP_DRAW_INDX_INDIRECT_1_INDX_BASE__SHIFT 0
+static inline uint32_t A4XX_CP_DRAW_INDX_INDIRECT_1_INDX_BASE(uint32_t val)
+{
+ return ((val) << A4XX_CP_DRAW_INDX_INDIRECT_1_INDX_BASE__SHIFT) & A4XX_CP_DRAW_INDX_INDIRECT_1_INDX_BASE__MASK;
+}
+
+#define REG_A4XX_CP_DRAW_INDX_INDIRECT_2 0x00000002
+#define A4XX_CP_DRAW_INDX_INDIRECT_2_INDX_SIZE__MASK 0xffffffff
+#define A4XX_CP_DRAW_INDX_INDIRECT_2_INDX_SIZE__SHIFT 0
+static inline uint32_t A4XX_CP_DRAW_INDX_INDIRECT_2_INDX_SIZE(uint32_t val)
+{
+ return ((val) << A4XX_CP_DRAW_INDX_INDIRECT_2_INDX_SIZE__SHIFT) & A4XX_CP_DRAW_INDX_INDIRECT_2_INDX_SIZE__MASK;
+}
+
+#define REG_A4XX_CP_DRAW_INDX_INDIRECT_3 0x00000003
+#define A4XX_CP_DRAW_INDX_INDIRECT_3_INDIRECT__MASK 0xffffffff
+#define A4XX_CP_DRAW_INDX_INDIRECT_3_INDIRECT__SHIFT 0
+static inline uint32_t A4XX_CP_DRAW_INDX_INDIRECT_3_INDIRECT(uint32_t val)
+{
+ return ((val) << A4XX_CP_DRAW_INDX_INDIRECT_3_INDIRECT__SHIFT) & A4XX_CP_DRAW_INDX_INDIRECT_3_INDIRECT__MASK;
+}
+
+
+#define REG_A5XX_CP_DRAW_INDX_INDIRECT_1 0x00000001
+#define A5XX_CP_DRAW_INDX_INDIRECT_1_INDX_BASE_LO__MASK 0xffffffff
+#define A5XX_CP_DRAW_INDX_INDIRECT_1_INDX_BASE_LO__SHIFT 0
+static inline uint32_t A5XX_CP_DRAW_INDX_INDIRECT_1_INDX_BASE_LO(uint32_t val)
+{
+ return ((val) << A5XX_CP_DRAW_INDX_INDIRECT_1_INDX_BASE_LO__SHIFT) & A5XX_CP_DRAW_INDX_INDIRECT_1_INDX_BASE_LO__MASK;
+}
+
+#define REG_A5XX_CP_DRAW_INDX_INDIRECT_2 0x00000002
+#define A5XX_CP_DRAW_INDX_INDIRECT_2_INDX_BASE_HI__MASK 0xffffffff
+#define A5XX_CP_DRAW_INDX_INDIRECT_2_INDX_BASE_HI__SHIFT 0
+static inline uint32_t A5XX_CP_DRAW_INDX_INDIRECT_2_INDX_BASE_HI(uint32_t val)
+{
+ return ((val) << A5XX_CP_DRAW_INDX_INDIRECT_2_INDX_BASE_HI__SHIFT) & A5XX_CP_DRAW_INDX_INDIRECT_2_INDX_BASE_HI__MASK;
+}
+
+#define REG_A5XX_CP_DRAW_INDX_INDIRECT_3 0x00000003
+#define A5XX_CP_DRAW_INDX_INDIRECT_3_MAX_INDICES__MASK 0xffffffff
+#define A5XX_CP_DRAW_INDX_INDIRECT_3_MAX_INDICES__SHIFT 0
+static inline uint32_t A5XX_CP_DRAW_INDX_INDIRECT_3_MAX_INDICES(uint32_t val)
+{
+ return ((val) << A5XX_CP_DRAW_INDX_INDIRECT_3_MAX_INDICES__SHIFT) & A5XX_CP_DRAW_INDX_INDIRECT_3_MAX_INDICES__MASK;
+}
+
+#define REG_A5XX_CP_DRAW_INDX_INDIRECT_4 0x00000004
+#define A5XX_CP_DRAW_INDX_INDIRECT_4_INDIRECT_LO__MASK 0xffffffff
+#define A5XX_CP_DRAW_INDX_INDIRECT_4_INDIRECT_LO__SHIFT 0
+static inline uint32_t A5XX_CP_DRAW_INDX_INDIRECT_4_INDIRECT_LO(uint32_t val)
+{
+ return ((val) << A5XX_CP_DRAW_INDX_INDIRECT_4_INDIRECT_LO__SHIFT) & A5XX_CP_DRAW_INDX_INDIRECT_4_INDIRECT_LO__MASK;
+}
+
+#define REG_A5XX_CP_DRAW_INDX_INDIRECT_5 0x00000005
+#define A5XX_CP_DRAW_INDX_INDIRECT_5_INDIRECT_HI__MASK 0xffffffff
+#define A5XX_CP_DRAW_INDX_INDIRECT_5_INDIRECT_HI__SHIFT 0
+static inline uint32_t A5XX_CP_DRAW_INDX_INDIRECT_5_INDIRECT_HI(uint32_t val)
+{
+ return ((val) << A5XX_CP_DRAW_INDX_INDIRECT_5_INDIRECT_HI__SHIFT) & A5XX_CP_DRAW_INDX_INDIRECT_5_INDIRECT_HI__MASK;
+}
+
static inline uint32_t REG_CP_SET_DRAW_STATE_(uint32_t i0) { return 0x00000000 + 0x3*i0; }
static inline uint32_t REG_CP_SET_DRAW_STATE__0(uint32_t i0) { return 0x00000000 + 0x3*i0; }
@@ -593,6 +846,12 @@ static inline uint32_t CP_SET_DRAW_STATE__0_COUNT(uint32_t val)
#define CP_SET_DRAW_STATE__0_DISABLE 0x00020000
#define CP_SET_DRAW_STATE__0_DISABLE_ALL_GROUPS 0x00040000
#define CP_SET_DRAW_STATE__0_LOAD_IMMED 0x00080000
+#define CP_SET_DRAW_STATE__0_ENABLE_MASK__MASK 0x00f00000
+#define CP_SET_DRAW_STATE__0_ENABLE_MASK__SHIFT 20
+static inline uint32_t CP_SET_DRAW_STATE__0_ENABLE_MASK(uint32_t val)
+{
+ return ((val) << CP_SET_DRAW_STATE__0_ENABLE_MASK__SHIFT) & CP_SET_DRAW_STATE__0_ENABLE_MASK__MASK;
+}
#define CP_SET_DRAW_STATE__0_GROUP_ID__MASK 0x1f000000
#define CP_SET_DRAW_STATE__0_GROUP_ID__SHIFT 24
static inline uint32_t CP_SET_DRAW_STATE__0_GROUP_ID(uint32_t val)
@@ -708,6 +967,22 @@ static inline uint32_t CP_SET_BIN_DATA5_4_BIN_SIZE_ADDRESS_HI(uint32_t val)
return ((val) << CP_SET_BIN_DATA5_4_BIN_SIZE_ADDRESS_HI__SHIFT) & CP_SET_BIN_DATA5_4_BIN_SIZE_ADDRESS_HI__MASK;
}
+#define REG_CP_SET_BIN_DATA5_5 0x00000005
+#define CP_SET_BIN_DATA5_5_XXX_ADDRESS_LO__MASK 0xffffffff
+#define CP_SET_BIN_DATA5_5_XXX_ADDRESS_LO__SHIFT 0
+static inline uint32_t CP_SET_BIN_DATA5_5_XXX_ADDRESS_LO(uint32_t val)
+{
+ return ((val) << CP_SET_BIN_DATA5_5_XXX_ADDRESS_LO__SHIFT) & CP_SET_BIN_DATA5_5_XXX_ADDRESS_LO__MASK;
+}
+
+#define REG_CP_SET_BIN_DATA5_6 0x00000006
+#define CP_SET_BIN_DATA5_6_XXX_ADDRESS_HI__MASK 0xffffffff
+#define CP_SET_BIN_DATA5_6_XXX_ADDRESS_HI__SHIFT 0
+static inline uint32_t CP_SET_BIN_DATA5_6_XXX_ADDRESS_HI(uint32_t val)
+{
+ return ((val) << CP_SET_BIN_DATA5_6_XXX_ADDRESS_HI__SHIFT) & CP_SET_BIN_DATA5_6_XXX_ADDRESS_HI__MASK;
+}
+
#define REG_CP_REG_TO_MEM_0 0x00000000
#define CP_REG_TO_MEM_0_REG__MASK 0x0000ffff
#define CP_REG_TO_MEM_0_REG__SHIFT 0
@@ -732,6 +1007,46 @@ static inline uint32_t CP_REG_TO_MEM_1_DEST(uint32_t val)
return ((val) << CP_REG_TO_MEM_1_DEST__SHIFT) & CP_REG_TO_MEM_1_DEST__MASK;
}
+#define REG_CP_REG_TO_MEM_2 0x00000002
+#define CP_REG_TO_MEM_2_DEST_HI__MASK 0xffffffff
+#define CP_REG_TO_MEM_2_DEST_HI__SHIFT 0
+static inline uint32_t CP_REG_TO_MEM_2_DEST_HI(uint32_t val)
+{
+ return ((val) << CP_REG_TO_MEM_2_DEST_HI__SHIFT) & CP_REG_TO_MEM_2_DEST_HI__MASK;
+}
+
+#define REG_CP_MEM_TO_REG_0 0x00000000
+#define CP_MEM_TO_REG_0_REG__MASK 0x0000ffff
+#define CP_MEM_TO_REG_0_REG__SHIFT 0
+static inline uint32_t CP_MEM_TO_REG_0_REG(uint32_t val)
+{
+ return ((val) << CP_MEM_TO_REG_0_REG__SHIFT) & CP_MEM_TO_REG_0_REG__MASK;
+}
+#define CP_MEM_TO_REG_0_CNT__MASK 0x3ff80000
+#define CP_MEM_TO_REG_0_CNT__SHIFT 19
+static inline uint32_t CP_MEM_TO_REG_0_CNT(uint32_t val)
+{
+ return ((val) << CP_MEM_TO_REG_0_CNT__SHIFT) & CP_MEM_TO_REG_0_CNT__MASK;
+}
+#define CP_MEM_TO_REG_0_64B 0x40000000
+#define CP_MEM_TO_REG_0_ACCUMULATE 0x80000000
+
+#define REG_CP_MEM_TO_REG_1 0x00000001
+#define CP_MEM_TO_REG_1_SRC__MASK 0xffffffff
+#define CP_MEM_TO_REG_1_SRC__SHIFT 0
+static inline uint32_t CP_MEM_TO_REG_1_SRC(uint32_t val)
+{
+ return ((val) << CP_MEM_TO_REG_1_SRC__SHIFT) & CP_MEM_TO_REG_1_SRC__MASK;
+}
+
+#define REG_CP_MEM_TO_REG_2 0x00000002
+#define CP_MEM_TO_REG_2_SRC_HI__MASK 0xffffffff
+#define CP_MEM_TO_REG_2_SRC_HI__SHIFT 0
+static inline uint32_t CP_MEM_TO_REG_2_SRC_HI(uint32_t val)
+{
+ return ((val) << CP_MEM_TO_REG_2_SRC_HI__SHIFT) & CP_MEM_TO_REG_2_SRC_HI__MASK;
+}
+
#define REG_CP_MEM_TO_MEM_0 0x00000000
#define CP_MEM_TO_MEM_0_NEG_A 0x00000001
#define CP_MEM_TO_MEM_0_NEG_B 0x00000002
@@ -953,15 +1268,15 @@ static inline uint32_t CP_COMPUTE_CHECKPOINT_1_ADDR_0_HI(uint32_t val)
#define REG_CP_COMPUTE_CHECKPOINT_2 0x00000002
#define REG_CP_COMPUTE_CHECKPOINT_3 0x00000003
-
-#define REG_CP_COMPUTE_CHECKPOINT_4 0x00000004
-#define CP_COMPUTE_CHECKPOINT_4_ADDR_1_LEN__MASK 0xffffffff
-#define CP_COMPUTE_CHECKPOINT_4_ADDR_1_LEN__SHIFT 0
-static inline uint32_t CP_COMPUTE_CHECKPOINT_4_ADDR_1_LEN(uint32_t val)
+#define CP_COMPUTE_CHECKPOINT_3_ADDR_1_LEN__MASK 0xffffffff
+#define CP_COMPUTE_CHECKPOINT_3_ADDR_1_LEN__SHIFT 0
+static inline uint32_t CP_COMPUTE_CHECKPOINT_3_ADDR_1_LEN(uint32_t val)
{
- return ((val) << CP_COMPUTE_CHECKPOINT_4_ADDR_1_LEN__SHIFT) & CP_COMPUTE_CHECKPOINT_4_ADDR_1_LEN__MASK;
+ return ((val) << CP_COMPUTE_CHECKPOINT_3_ADDR_1_LEN__SHIFT) & CP_COMPUTE_CHECKPOINT_3_ADDR_1_LEN__MASK;
}
+#define REG_CP_COMPUTE_CHECKPOINT_4 0x00000004
+
#define REG_CP_COMPUTE_CHECKPOINT_5 0x00000005
#define CP_COMPUTE_CHECKPOINT_5_ADDR_1_LO__MASK 0xffffffff
#define CP_COMPUTE_CHECKPOINT_5_ADDR_1_LO__SHIFT 0
@@ -978,6 +1293,8 @@ static inline uint32_t CP_COMPUTE_CHECKPOINT_6_ADDR_1_HI(uint32_t val)
return ((val) << CP_COMPUTE_CHECKPOINT_6_ADDR_1_HI__SHIFT) & CP_COMPUTE_CHECKPOINT_6_ADDR_1_HI__MASK;
}
+#define REG_CP_COMPUTE_CHECKPOINT_7 0x00000007
+
#define REG_CP_PERFCOUNTER_ACTION_0 0x00000000
#define REG_CP_PERFCOUNTER_ACTION_1 0x00000001
@@ -1032,13 +1349,13 @@ static inline uint32_t CP_BLIT_0_OP(enum cp_blit_cmd val)
}
#define REG_CP_BLIT_1 0x00000001
-#define CP_BLIT_1_SRC_X1__MASK 0x0000ffff
+#define CP_BLIT_1_SRC_X1__MASK 0x00003fff
#define CP_BLIT_1_SRC_X1__SHIFT 0
static inline uint32_t CP_BLIT_1_SRC_X1(uint32_t val)
{
return ((val) << CP_BLIT_1_SRC_X1__SHIFT) & CP_BLIT_1_SRC_X1__MASK;
}
-#define CP_BLIT_1_SRC_Y1__MASK 0xffff0000
+#define CP_BLIT_1_SRC_Y1__MASK 0x3fff0000
#define CP_BLIT_1_SRC_Y1__SHIFT 16
static inline uint32_t CP_BLIT_1_SRC_Y1(uint32_t val)
{
@@ -1046,13 +1363,13 @@ static inline uint32_t CP_BLIT_1_SRC_Y1(uint32_t val)
}
#define REG_CP_BLIT_2 0x00000002
-#define CP_BLIT_2_SRC_X2__MASK 0x0000ffff
+#define CP_BLIT_2_SRC_X2__MASK 0x00003fff
#define CP_BLIT_2_SRC_X2__SHIFT 0
static inline uint32_t CP_BLIT_2_SRC_X2(uint32_t val)
{
return ((val) << CP_BLIT_2_SRC_X2__SHIFT) & CP_BLIT_2_SRC_X2__MASK;
}
-#define CP_BLIT_2_SRC_Y2__MASK 0xffff0000
+#define CP_BLIT_2_SRC_Y2__MASK 0x3fff0000
#define CP_BLIT_2_SRC_Y2__SHIFT 16
static inline uint32_t CP_BLIT_2_SRC_Y2(uint32_t val)
{
@@ -1060,13 +1377,13 @@ static inline uint32_t CP_BLIT_2_SRC_Y2(uint32_t val)
}
#define REG_CP_BLIT_3 0x00000003
-#define CP_BLIT_3_DST_X1__MASK 0x0000ffff
+#define CP_BLIT_3_DST_X1__MASK 0x00003fff
#define CP_BLIT_3_DST_X1__SHIFT 0
static inline uint32_t CP_BLIT_3_DST_X1(uint32_t val)
{
return ((val) << CP_BLIT_3_DST_X1__SHIFT) & CP_BLIT_3_DST_X1__MASK;
}
-#define CP_BLIT_3_DST_Y1__MASK 0xffff0000
+#define CP_BLIT_3_DST_Y1__MASK 0x3fff0000
#define CP_BLIT_3_DST_Y1__SHIFT 16
static inline uint32_t CP_BLIT_3_DST_Y1(uint32_t val)
{
@@ -1074,13 +1391,13 @@ static inline uint32_t CP_BLIT_3_DST_Y1(uint32_t val)
}
#define REG_CP_BLIT_4 0x00000004
-#define CP_BLIT_4_DST_X2__MASK 0x0000ffff
+#define CP_BLIT_4_DST_X2__MASK 0x00003fff
#define CP_BLIT_4_DST_X2__SHIFT 0
static inline uint32_t CP_BLIT_4_DST_X2(uint32_t val)
{
return ((val) << CP_BLIT_4_DST_X2__SHIFT) & CP_BLIT_4_DST_X2__MASK;
}
-#define CP_BLIT_4_DST_Y2__MASK 0xffff0000
+#define CP_BLIT_4_DST_Y2__MASK 0x3fff0000
#define CP_BLIT_4_DST_Y2__SHIFT 16
static inline uint32_t CP_BLIT_4_DST_Y2(uint32_t val)
{
@@ -1113,5 +1430,129 @@ static inline uint32_t CP_EXEC_CS_3_NGROUPS_Z(uint32_t val)
return ((val) << CP_EXEC_CS_3_NGROUPS_Z__SHIFT) & CP_EXEC_CS_3_NGROUPS_Z__MASK;
}
+#define REG_A4XX_CP_EXEC_CS_INDIRECT_0 0x00000000
+
+
+#define REG_A4XX_CP_EXEC_CS_INDIRECT_1 0x00000001
+#define A4XX_CP_EXEC_CS_INDIRECT_1_ADDR__MASK 0xffffffff
+#define A4XX_CP_EXEC_CS_INDIRECT_1_ADDR__SHIFT 0
+static inline uint32_t A4XX_CP_EXEC_CS_INDIRECT_1_ADDR(uint32_t val)
+{
+ return ((val) << A4XX_CP_EXEC_CS_INDIRECT_1_ADDR__SHIFT) & A4XX_CP_EXEC_CS_INDIRECT_1_ADDR__MASK;
+}
+
+#define REG_A4XX_CP_EXEC_CS_INDIRECT_2 0x00000002
+#define A4XX_CP_EXEC_CS_INDIRECT_2_LOCALSIZEX__MASK 0x00000ffc
+#define A4XX_CP_EXEC_CS_INDIRECT_2_LOCALSIZEX__SHIFT 2
+static inline uint32_t A4XX_CP_EXEC_CS_INDIRECT_2_LOCALSIZEX(uint32_t val)
+{
+ return ((val) << A4XX_CP_EXEC_CS_INDIRECT_2_LOCALSIZEX__SHIFT) & A4XX_CP_EXEC_CS_INDIRECT_2_LOCALSIZEX__MASK;
+}
+#define A4XX_CP_EXEC_CS_INDIRECT_2_LOCALSIZEY__MASK 0x003ff000
+#define A4XX_CP_EXEC_CS_INDIRECT_2_LOCALSIZEY__SHIFT 12
+static inline uint32_t A4XX_CP_EXEC_CS_INDIRECT_2_LOCALSIZEY(uint32_t val)
+{
+ return ((val) << A4XX_CP_EXEC_CS_INDIRECT_2_LOCALSIZEY__SHIFT) & A4XX_CP_EXEC_CS_INDIRECT_2_LOCALSIZEY__MASK;
+}
+#define A4XX_CP_EXEC_CS_INDIRECT_2_LOCALSIZEZ__MASK 0xffc00000
+#define A4XX_CP_EXEC_CS_INDIRECT_2_LOCALSIZEZ__SHIFT 22
+static inline uint32_t A4XX_CP_EXEC_CS_INDIRECT_2_LOCALSIZEZ(uint32_t val)
+{
+ return ((val) << A4XX_CP_EXEC_CS_INDIRECT_2_LOCALSIZEZ__SHIFT) & A4XX_CP_EXEC_CS_INDIRECT_2_LOCALSIZEZ__MASK;
+}
+
+
+#define REG_A5XX_CP_EXEC_CS_INDIRECT_1 0x00000001
+#define A5XX_CP_EXEC_CS_INDIRECT_1_ADDR_LO__MASK 0xffffffff
+#define A5XX_CP_EXEC_CS_INDIRECT_1_ADDR_LO__SHIFT 0
+static inline uint32_t A5XX_CP_EXEC_CS_INDIRECT_1_ADDR_LO(uint32_t val)
+{
+ return ((val) << A5XX_CP_EXEC_CS_INDIRECT_1_ADDR_LO__SHIFT) & A5XX_CP_EXEC_CS_INDIRECT_1_ADDR_LO__MASK;
+}
+
+#define REG_A5XX_CP_EXEC_CS_INDIRECT_2 0x00000002
+#define A5XX_CP_EXEC_CS_INDIRECT_2_ADDR_HI__MASK 0xffffffff
+#define A5XX_CP_EXEC_CS_INDIRECT_2_ADDR_HI__SHIFT 0
+static inline uint32_t A5XX_CP_EXEC_CS_INDIRECT_2_ADDR_HI(uint32_t val)
+{
+ return ((val) << A5XX_CP_EXEC_CS_INDIRECT_2_ADDR_HI__SHIFT) & A5XX_CP_EXEC_CS_INDIRECT_2_ADDR_HI__MASK;
+}
+
+#define REG_A5XX_CP_EXEC_CS_INDIRECT_3 0x00000003
+#define A5XX_CP_EXEC_CS_INDIRECT_3_LOCALSIZEX__MASK 0x00000ffc
+#define A5XX_CP_EXEC_CS_INDIRECT_3_LOCALSIZEX__SHIFT 2
+static inline uint32_t A5XX_CP_EXEC_CS_INDIRECT_3_LOCALSIZEX(uint32_t val)
+{
+ return ((val) << A5XX_CP_EXEC_CS_INDIRECT_3_LOCALSIZEX__SHIFT) & A5XX_CP_EXEC_CS_INDIRECT_3_LOCALSIZEX__MASK;
+}
+#define A5XX_CP_EXEC_CS_INDIRECT_3_LOCALSIZEY__MASK 0x003ff000
+#define A5XX_CP_EXEC_CS_INDIRECT_3_LOCALSIZEY__SHIFT 12
+static inline uint32_t A5XX_CP_EXEC_CS_INDIRECT_3_LOCALSIZEY(uint32_t val)
+{
+ return ((val) << A5XX_CP_EXEC_CS_INDIRECT_3_LOCALSIZEY__SHIFT) & A5XX_CP_EXEC_CS_INDIRECT_3_LOCALSIZEY__MASK;
+}
+#define A5XX_CP_EXEC_CS_INDIRECT_3_LOCALSIZEZ__MASK 0xffc00000
+#define A5XX_CP_EXEC_CS_INDIRECT_3_LOCALSIZEZ__SHIFT 22
+static inline uint32_t A5XX_CP_EXEC_CS_INDIRECT_3_LOCALSIZEZ(uint32_t val)
+{
+ return ((val) << A5XX_CP_EXEC_CS_INDIRECT_3_LOCALSIZEZ__SHIFT) & A5XX_CP_EXEC_CS_INDIRECT_3_LOCALSIZEZ__MASK;
+}
+
+#define REG_A2XX_CP_SET_MARKER_0 0x00000000
+#define A2XX_CP_SET_MARKER_0_MARKER__MASK 0x0000000f
+#define A2XX_CP_SET_MARKER_0_MARKER__SHIFT 0
+static inline uint32_t A2XX_CP_SET_MARKER_0_MARKER(uint32_t val)
+{
+ return ((val) << A2XX_CP_SET_MARKER_0_MARKER__SHIFT) & A2XX_CP_SET_MARKER_0_MARKER__MASK;
+}
+#define A2XX_CP_SET_MARKER_0_MODE__MASK 0x0000000f
+#define A2XX_CP_SET_MARKER_0_MODE__SHIFT 0
+static inline uint32_t A2XX_CP_SET_MARKER_0_MODE(enum a6xx_render_mode val)
+{
+ return ((val) << A2XX_CP_SET_MARKER_0_MODE__SHIFT) & A2XX_CP_SET_MARKER_0_MODE__MASK;
+}
+#define A2XX_CP_SET_MARKER_0_IFPC 0x00000100
+
+static inline uint32_t REG_A2XX_CP_SET_PSEUDO_REG_(uint32_t i0) { return 0x00000000 + 0x3*i0; }
+
+static inline uint32_t REG_A2XX_CP_SET_PSEUDO_REG__0(uint32_t i0) { return 0x00000000 + 0x3*i0; }
+#define A2XX_CP_SET_PSEUDO_REG__0_PSEUDO_REG__MASK 0x00000007
+#define A2XX_CP_SET_PSEUDO_REG__0_PSEUDO_REG__SHIFT 0
+static inline uint32_t A2XX_CP_SET_PSEUDO_REG__0_PSEUDO_REG(enum pseudo_reg val)
+{
+ return ((val) << A2XX_CP_SET_PSEUDO_REG__0_PSEUDO_REG__SHIFT) & A2XX_CP_SET_PSEUDO_REG__0_PSEUDO_REG__MASK;
+}
+
+static inline uint32_t REG_A2XX_CP_SET_PSEUDO_REG__1(uint32_t i0) { return 0x00000001 + 0x3*i0; }
+#define A2XX_CP_SET_PSEUDO_REG__1_LO__MASK 0xffffffff
+#define A2XX_CP_SET_PSEUDO_REG__1_LO__SHIFT 0
+static inline uint32_t A2XX_CP_SET_PSEUDO_REG__1_LO(uint32_t val)
+{
+ return ((val) << A2XX_CP_SET_PSEUDO_REG__1_LO__SHIFT) & A2XX_CP_SET_PSEUDO_REG__1_LO__MASK;
+}
+
+static inline uint32_t REG_A2XX_CP_SET_PSEUDO_REG__2(uint32_t i0) { return 0x00000002 + 0x3*i0; }
+#define A2XX_CP_SET_PSEUDO_REG__2_HI__MASK 0xffffffff
+#define A2XX_CP_SET_PSEUDO_REG__2_HI__SHIFT 0
+static inline uint32_t A2XX_CP_SET_PSEUDO_REG__2_HI(uint32_t val)
+{
+ return ((val) << A2XX_CP_SET_PSEUDO_REG__2_HI__SHIFT) & A2XX_CP_SET_PSEUDO_REG__2_HI__MASK;
+}
+
+#define REG_A2XX_CP_REG_TEST_0 0x00000000
+#define A2XX_CP_REG_TEST_0_REG__MASK 0x00000fff
+#define A2XX_CP_REG_TEST_0_REG__SHIFT 0
+static inline uint32_t A2XX_CP_REG_TEST_0_REG(uint32_t val)
+{
+ return ((val) << A2XX_CP_REG_TEST_0_REG__SHIFT) & A2XX_CP_REG_TEST_0_REG__MASK;
+}
+#define A2XX_CP_REG_TEST_0_BIT__MASK 0x01f00000
+#define A2XX_CP_REG_TEST_0_BIT__SHIFT 20
+static inline uint32_t A2XX_CP_REG_TEST_0_BIT(uint32_t val)
+{
+ return ((val) << A2XX_CP_REG_TEST_0_BIT__SHIFT) & A2XX_CP_REG_TEST_0_BIT__MASK;
+}
+#define A2XX_CP_REG_TEST_0_UNK25 0x02000000
+
#endif /* ADRENO_PM4_XML */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_core_irq.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_core_irq.c
new file mode 100644
index 000000000000..879c13fe74e0
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_core_irq.c
@@ -0,0 +1,479 @@
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__
+
+#include <linux/debugfs.h>
+#include <linux/irqdomain.h>
+#include <linux/irq.h>
+#include <linux/kthread.h>
+
+#include "dpu_core_irq.h"
+#include "dpu_trace.h"
+
+/**
+ * dpu_core_irq_callback_handler - dispatch core interrupts
+ * @arg: private data of callback handler
+ * @irq_idx: interrupt index
+ */
+static void dpu_core_irq_callback_handler(void *arg, int irq_idx)
+{
+ struct dpu_kms *dpu_kms = arg;
+ struct dpu_irq *irq_obj = &dpu_kms->irq_obj;
+ struct dpu_irq_callback *cb;
+ unsigned long irq_flags;
+
+ pr_debug("irq_idx=%d\n", irq_idx);
+
+ if (list_empty(&irq_obj->irq_cb_tbl[irq_idx])) {
+ DRM_ERROR("no registered cb, idx:%d enable_count:%d\n", irq_idx,
+ atomic_read(&dpu_kms->irq_obj.enable_counts[irq_idx]));
+ }
+
+ atomic_inc(&irq_obj->irq_counts[irq_idx]);
+
+ /*
+ * Perform registered function callback
+ */
+ spin_lock_irqsave(&dpu_kms->irq_obj.cb_lock, irq_flags);
+ list_for_each_entry(cb, &irq_obj->irq_cb_tbl[irq_idx], list)
+ if (cb->func)
+ cb->func(cb->arg, irq_idx);
+ spin_unlock_irqrestore(&dpu_kms->irq_obj.cb_lock, irq_flags);
+
+ /*
+ * Clear pending interrupt status in HW.
+ * NOTE: dpu_core_irq_callback_handler is protected by top-level
+ * spinlock, so it is safe to clear any interrupt status here.
+ */
+ dpu_kms->hw_intr->ops.clear_intr_status_nolock(
+ dpu_kms->hw_intr,
+ irq_idx);
+}
+
+int dpu_core_irq_idx_lookup(struct dpu_kms *dpu_kms,
+ enum dpu_intr_type intr_type, u32 instance_idx)
+{
+ if (!dpu_kms || !dpu_kms->hw_intr ||
+ !dpu_kms->hw_intr->ops.irq_idx_lookup)
+ return -EINVAL;
+
+ return dpu_kms->hw_intr->ops.irq_idx_lookup(intr_type,
+ instance_idx);
+}
+
+/**
+ * _dpu_core_irq_enable - enable core interrupt given by the index
+ * @dpu_kms: Pointer to dpu kms context
+ * @irq_idx: interrupt index
+ */
+static int _dpu_core_irq_enable(struct dpu_kms *dpu_kms, int irq_idx)
+{
+ unsigned long irq_flags;
+ int ret = 0, enable_count;
+
+ if (!dpu_kms || !dpu_kms->hw_intr ||
+ !dpu_kms->irq_obj.enable_counts ||
+ !dpu_kms->irq_obj.irq_counts) {
+ DPU_ERROR("invalid params\n");
+ return -EINVAL;
+ }
+
+ if (irq_idx < 0 || irq_idx >= dpu_kms->hw_intr->irq_idx_tbl_size) {
+ DPU_ERROR("invalid IRQ index: [%d]\n", irq_idx);
+ return -EINVAL;
+ }
+
+ enable_count = atomic_read(&dpu_kms->irq_obj.enable_counts[irq_idx]);
+ DRM_DEBUG_KMS("irq_idx=%d enable_count=%d\n", irq_idx, enable_count);
+ trace_dpu_core_irq_enable_idx(irq_idx, enable_count);
+
+ if (atomic_inc_return(&dpu_kms->irq_obj.enable_counts[irq_idx]) == 1) {
+ ret = dpu_kms->hw_intr->ops.enable_irq(
+ dpu_kms->hw_intr,
+ irq_idx);
+ if (ret)
+ DPU_ERROR("Fail to enable IRQ for irq_idx:%d\n",
+ irq_idx);
+
+ DPU_DEBUG("irq_idx=%d ret=%d\n", irq_idx, ret);
+
+ spin_lock_irqsave(&dpu_kms->irq_obj.cb_lock, irq_flags);
+ /* empty callback list but interrupt is enabled */
+ if (list_empty(&dpu_kms->irq_obj.irq_cb_tbl[irq_idx]))
+ DPU_ERROR("irq_idx=%d enabled with no callback\n",
+ irq_idx);
+ spin_unlock_irqrestore(&dpu_kms->irq_obj.cb_lock, irq_flags);
+ }
+
+ return ret;
+}
+
+int dpu_core_irq_enable(struct dpu_kms *dpu_kms, int *irq_idxs, u32 irq_count)
+{
+ int i, ret = 0, counts;
+
+ if (!dpu_kms || !irq_idxs || !irq_count) {
+ DPU_ERROR("invalid params\n");
+ return -EINVAL;
+ }
+
+ counts = atomic_read(&dpu_kms->irq_obj.enable_counts[irq_idxs[0]]);
+ if (counts)
+ DRM_ERROR("irq_idx=%d enable_count=%d\n", irq_idxs[0], counts);
+
+ for (i = 0; (i < irq_count) && !ret; i++)
+ ret = _dpu_core_irq_enable(dpu_kms, irq_idxs[i]);
+
+ return ret;
+}
+
+/**
+ * _dpu_core_irq_disable - disable core interrupt given by the index
+ * @dpu_kms: Pointer to dpu kms context
+ * @irq_idx: interrupt index
+ */
+static int _dpu_core_irq_disable(struct dpu_kms *dpu_kms, int irq_idx)
+{
+ int ret = 0, enable_count;
+
+ if (!dpu_kms || !dpu_kms->hw_intr || !dpu_kms->irq_obj.enable_counts) {
+ DPU_ERROR("invalid params\n");
+ return -EINVAL;
+ }
+
+ if (irq_idx < 0 || irq_idx >= dpu_kms->hw_intr->irq_idx_tbl_size) {
+ DPU_ERROR("invalid IRQ index: [%d]\n", irq_idx);
+ return -EINVAL;
+ }
+
+ enable_count = atomic_read(&dpu_kms->irq_obj.enable_counts[irq_idx]);
+ DRM_DEBUG_KMS("irq_idx=%d enable_count=%d\n", irq_idx, enable_count);
+ trace_dpu_core_irq_disable_idx(irq_idx, enable_count);
+
+ if (atomic_dec_return(&dpu_kms->irq_obj.enable_counts[irq_idx]) == 0) {
+ ret = dpu_kms->hw_intr->ops.disable_irq(
+ dpu_kms->hw_intr,
+ irq_idx);
+ if (ret)
+ DPU_ERROR("Fail to disable IRQ for irq_idx:%d\n",
+ irq_idx);
+ DPU_DEBUG("irq_idx=%d ret=%d\n", irq_idx, ret);
+ }
+
+ return ret;
+}
+
+int dpu_core_irq_disable(struct dpu_kms *dpu_kms, int *irq_idxs, u32 irq_count)
+{
+ int i, ret = 0, counts;
+
+ if (!dpu_kms || !irq_idxs || !irq_count) {
+ DPU_ERROR("invalid params\n");
+ return -EINVAL;
+ }
+
+ counts = atomic_read(&dpu_kms->irq_obj.enable_counts[irq_idxs[0]]);
+ if (counts == 2)
+ DRM_ERROR("irq_idx=%d enable_count=%d\n", irq_idxs[0], counts);
+
+ for (i = 0; (i < irq_count) && !ret; i++)
+ ret = _dpu_core_irq_disable(dpu_kms, irq_idxs[i]);
+
+ return ret;
+}
+
+u32 dpu_core_irq_read(struct dpu_kms *dpu_kms, int irq_idx, bool clear)
+{
+ if (!dpu_kms || !dpu_kms->hw_intr ||
+ !dpu_kms->hw_intr->ops.get_interrupt_status)
+ return 0;
+
+ if (irq_idx < 0) {
+ DPU_ERROR("[%pS] invalid irq_idx=%d\n",
+ __builtin_return_address(0), irq_idx);
+ return 0;
+ }
+
+ return dpu_kms->hw_intr->ops.get_interrupt_status(dpu_kms->hw_intr,
+ irq_idx, clear);
+}
+
+int dpu_core_irq_register_callback(struct dpu_kms *dpu_kms, int irq_idx,
+ struct dpu_irq_callback *register_irq_cb)
+{
+ unsigned long irq_flags;
+
+ if (!dpu_kms || !dpu_kms->irq_obj.irq_cb_tbl) {
+ DPU_ERROR("invalid params\n");
+ return -EINVAL;
+ }
+
+ if (!register_irq_cb || !register_irq_cb->func) {
+ DPU_ERROR("invalid irq_cb:%d func:%d\n",
+ register_irq_cb != NULL,
+ register_irq_cb ?
+ register_irq_cb->func != NULL : -1);
+ return -EINVAL;
+ }
+
+ if (irq_idx < 0 || irq_idx >= dpu_kms->hw_intr->irq_idx_tbl_size) {
+ DPU_ERROR("invalid IRQ index: [%d]\n", irq_idx);
+ return -EINVAL;
+ }
+
+ DPU_DEBUG("[%pS] irq_idx=%d\n", __builtin_return_address(0), irq_idx);
+
+ spin_lock_irqsave(&dpu_kms->irq_obj.cb_lock, irq_flags);
+ trace_dpu_core_irq_register_callback(irq_idx, register_irq_cb);
+ list_del_init(&register_irq_cb->list);
+ list_add_tail(&register_irq_cb->list,
+ &dpu_kms->irq_obj.irq_cb_tbl[irq_idx]);
+ spin_unlock_irqrestore(&dpu_kms->irq_obj.cb_lock, irq_flags);
+
+ return 0;
+}
+
+int dpu_core_irq_unregister_callback(struct dpu_kms *dpu_kms, int irq_idx,
+ struct dpu_irq_callback *register_irq_cb)
+{
+ unsigned long irq_flags;
+
+ if (!dpu_kms || !dpu_kms->irq_obj.irq_cb_tbl) {
+ DPU_ERROR("invalid params\n");
+ return -EINVAL;
+ }
+
+ if (!register_irq_cb || !register_irq_cb->func) {
+ DPU_ERROR("invalid irq_cb:%d func:%d\n",
+ register_irq_cb != NULL,
+ register_irq_cb ?
+ register_irq_cb->func != NULL : -1);
+ return -EINVAL;
+ }
+
+ if (irq_idx < 0 || irq_idx >= dpu_kms->hw_intr->irq_idx_tbl_size) {
+ DPU_ERROR("invalid IRQ index: [%d]\n", irq_idx);
+ return -EINVAL;
+ }
+
+ DPU_DEBUG("[%pS] irq_idx=%d\n", __builtin_return_address(0), irq_idx);
+
+ spin_lock_irqsave(&dpu_kms->irq_obj.cb_lock, irq_flags);
+ trace_dpu_core_irq_unregister_callback(irq_idx, register_irq_cb);
+ list_del_init(&register_irq_cb->list);
+ /* empty callback list but interrupt is still enabled */
+ if (list_empty(&dpu_kms->irq_obj.irq_cb_tbl[irq_idx]) &&
+ atomic_read(&dpu_kms->irq_obj.enable_counts[irq_idx]))
+ DPU_ERROR("irq_idx=%d enabled with no callback\n", irq_idx);
+ spin_unlock_irqrestore(&dpu_kms->irq_obj.cb_lock, irq_flags);
+
+ return 0;
+}
+
+static void dpu_clear_all_irqs(struct dpu_kms *dpu_kms)
+{
+ if (!dpu_kms || !dpu_kms->hw_intr ||
+ !dpu_kms->hw_intr->ops.clear_all_irqs)
+ return;
+
+ dpu_kms->hw_intr->ops.clear_all_irqs(dpu_kms->hw_intr);
+}
+
+static void dpu_disable_all_irqs(struct dpu_kms *dpu_kms)
+{
+ if (!dpu_kms || !dpu_kms->hw_intr ||
+ !dpu_kms->hw_intr->ops.disable_all_irqs)
+ return;
+
+ dpu_kms->hw_intr->ops.disable_all_irqs(dpu_kms->hw_intr);
+}
+
+#ifdef CONFIG_DEBUG_FS
+#define DEFINE_DPU_DEBUGFS_SEQ_FOPS(__prefix) \
+static int __prefix ## _open(struct inode *inode, struct file *file) \
+{ \
+ return single_open(file, __prefix ## _show, inode->i_private); \
+} \
+static const struct file_operations __prefix ## _fops = { \
+ .owner = THIS_MODULE, \
+ .open = __prefix ## _open, \
+ .release = single_release, \
+ .read = seq_read, \
+ .llseek = seq_lseek, \
+}
+
+static int dpu_debugfs_core_irq_show(struct seq_file *s, void *v)
+{
+ struct dpu_irq *irq_obj = s->private;
+ struct dpu_irq_callback *cb;
+ unsigned long irq_flags;
+ int i, irq_count, enable_count, cb_count;
+
+ if (!irq_obj || !irq_obj->enable_counts || !irq_obj->irq_cb_tbl) {
+ DPU_ERROR("invalid parameters\n");
+ return 0;
+ }
+
+ for (i = 0; i < irq_obj->total_irqs; i++) {
+ spin_lock_irqsave(&irq_obj->cb_lock, irq_flags);
+ cb_count = 0;
+ irq_count = atomic_read(&irq_obj->irq_counts[i]);
+ enable_count = atomic_read(&irq_obj->enable_counts[i]);
+ list_for_each_entry(cb, &irq_obj->irq_cb_tbl[i], list)
+ cb_count++;
+ spin_unlock_irqrestore(&irq_obj->cb_lock, irq_flags);
+
+ if (irq_count || enable_count || cb_count)
+ seq_printf(s, "idx:%d irq:%d enable:%d cb:%d\n",
+ i, irq_count, enable_count, cb_count);
+ }
+
+ return 0;
+}
+
+DEFINE_DPU_DEBUGFS_SEQ_FOPS(dpu_debugfs_core_irq);
+
+int dpu_debugfs_core_irq_init(struct dpu_kms *dpu_kms,
+ struct dentry *parent)
+{
+ dpu_kms->irq_obj.debugfs_file = debugfs_create_file("core_irq", 0600,
+ parent, &dpu_kms->irq_obj,
+ &dpu_debugfs_core_irq_fops);
+
+ return 0;
+}
+
+void dpu_debugfs_core_irq_destroy(struct dpu_kms *dpu_kms)
+{
+ debugfs_remove(dpu_kms->irq_obj.debugfs_file);
+ dpu_kms->irq_obj.debugfs_file = NULL;
+}
+
+#else
+int dpu_debugfs_core_irq_init(struct dpu_kms *dpu_kms,
+ struct dentry *parent)
+{
+ return 0;
+}
+
+void dpu_debugfs_core_irq_destroy(struct dpu_kms *dpu_kms)
+{
+}
+#endif
+
+void dpu_core_irq_preinstall(struct dpu_kms *dpu_kms)
+{
+ struct msm_drm_private *priv;
+ int i;
+
+ if (!dpu_kms) {
+ DPU_ERROR("invalid dpu_kms\n");
+ return;
+ } else if (!dpu_kms->dev) {
+ DPU_ERROR("invalid drm device\n");
+ return;
+ } else if (!dpu_kms->dev->dev_private) {
+ DPU_ERROR("invalid device private\n");
+ return;
+ }
+ priv = dpu_kms->dev->dev_private;
+
+ pm_runtime_get_sync(&dpu_kms->pdev->dev);
+ dpu_clear_all_irqs(dpu_kms);
+ dpu_disable_all_irqs(dpu_kms);
+ pm_runtime_put_sync(&dpu_kms->pdev->dev);
+
+ spin_lock_init(&dpu_kms->irq_obj.cb_lock);
+
+ /* Create irq callbacks for all possible irq_idx */
+ dpu_kms->irq_obj.total_irqs = dpu_kms->hw_intr->irq_idx_tbl_size;
+ dpu_kms->irq_obj.irq_cb_tbl = kcalloc(dpu_kms->irq_obj.total_irqs,
+ sizeof(struct list_head), GFP_KERNEL);
+ dpu_kms->irq_obj.enable_counts = kcalloc(dpu_kms->irq_obj.total_irqs,
+ sizeof(atomic_t), GFP_KERNEL);
+ dpu_kms->irq_obj.irq_counts = kcalloc(dpu_kms->irq_obj.total_irqs,
+ sizeof(atomic_t), GFP_KERNEL);
+ for (i = 0; i < dpu_kms->irq_obj.total_irqs; i++) {
+ INIT_LIST_HEAD(&dpu_kms->irq_obj.irq_cb_tbl[i]);
+ atomic_set(&dpu_kms->irq_obj.enable_counts[i], 0);
+ atomic_set(&dpu_kms->irq_obj.irq_counts[i], 0);
+ }
+}
+
+int dpu_core_irq_postinstall(struct dpu_kms *dpu_kms)
+{
+ return 0;
+}
+
+void dpu_core_irq_uninstall(struct dpu_kms *dpu_kms)
+{
+ struct msm_drm_private *priv;
+ int i;
+
+ if (!dpu_kms) {
+ DPU_ERROR("invalid dpu_kms\n");
+ return;
+ } else if (!dpu_kms->dev) {
+ DPU_ERROR("invalid drm device\n");
+ return;
+ } else if (!dpu_kms->dev->dev_private) {
+ DPU_ERROR("invalid device private\n");
+ return;
+ }
+ priv = dpu_kms->dev->dev_private;
+
+ pm_runtime_get_sync(&dpu_kms->pdev->dev);
+ for (i = 0; i < dpu_kms->irq_obj.total_irqs; i++)
+ if (atomic_read(&dpu_kms->irq_obj.enable_counts[i]) ||
+ !list_empty(&dpu_kms->irq_obj.irq_cb_tbl[i]))
+ DPU_ERROR("irq_idx=%d still enabled/registered\n", i);
+
+ dpu_clear_all_irqs(dpu_kms);
+ dpu_disable_all_irqs(dpu_kms);
+ pm_runtime_put_sync(&dpu_kms->pdev->dev);
+
+ kfree(dpu_kms->irq_obj.irq_cb_tbl);
+ kfree(dpu_kms->irq_obj.enable_counts);
+ kfree(dpu_kms->irq_obj.irq_counts);
+ dpu_kms->irq_obj.irq_cb_tbl = NULL;
+ dpu_kms->irq_obj.enable_counts = NULL;
+ dpu_kms->irq_obj.irq_counts = NULL;
+ dpu_kms->irq_obj.total_irqs = 0;
+}
+
+irqreturn_t dpu_core_irq(struct dpu_kms *dpu_kms)
+{
+ /*
+ * Read interrupt status from all sources. Interrupt status are
+ * stored within hw_intr.
+ * Function will also clear the interrupt status after reading.
+ * Individual interrupt status bit will only get stored if it
+ * is enabled.
+ */
+ dpu_kms->hw_intr->ops.get_interrupt_statuses(dpu_kms->hw_intr);
+
+ /*
+ * Dispatch to HW driver to handle interrupt lookup that is being
+ * fired. When matching interrupt is located, HW driver will call to
+ * dpu_core_irq_callback_handler with the irq_idx from the lookup table.
+ * dpu_core_irq_callback_handler will perform the registered function
+ * callback, and do the interrupt status clearing once the registered
+ * callback is finished.
+ */
+ dpu_kms->hw_intr->ops.dispatch_irqs(
+ dpu_kms->hw_intr,
+ dpu_core_irq_callback_handler,
+ dpu_kms);
+
+ return IRQ_HANDLED;
+}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_core_irq.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_core_irq.h
new file mode 100644
index 000000000000..5e98bba46af5
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_core_irq.h
@@ -0,0 +1,153 @@
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __DPU_CORE_IRQ_H__
+#define __DPU_CORE_IRQ_H__
+
+#include "dpu_kms.h"
+#include "dpu_hw_interrupts.h"
+
+/**
+ * dpu_core_irq_preinstall - perform pre-installation of core IRQ handler
+ * @dpu_kms: DPU handle
+ * @return: none
+ */
+void dpu_core_irq_preinstall(struct dpu_kms *dpu_kms);
+
+/**
+ * dpu_core_irq_postinstall - perform post-installation of core IRQ handler
+ * @dpu_kms: DPU handle
+ * @return: 0 if success; error code otherwise
+ */
+int dpu_core_irq_postinstall(struct dpu_kms *dpu_kms);
+
+/**
+ * dpu_core_irq_uninstall - uninstall core IRQ handler
+ * @dpu_kms: DPU handle
+ * @return: none
+ */
+void dpu_core_irq_uninstall(struct dpu_kms *dpu_kms);
+
+/**
+ * dpu_core_irq - core IRQ handler
+ * @dpu_kms: DPU handle
+ * @return: interrupt handling status
+ */
+irqreturn_t dpu_core_irq(struct dpu_kms *dpu_kms);
+
+/**
+ * dpu_core_irq_idx_lookup - IRQ helper function for lookup irq_idx from HW
+ * interrupt mapping table.
+ * @dpu_kms: DPU handle
+ * @intr_type: DPU HW interrupt type for lookup
+ * @instance_idx: DPU HW block instance defined in dpu_hw_mdss.h
+ * @return: irq_idx or -EINVAL when fail to lookup
+ */
+int dpu_core_irq_idx_lookup(
+ struct dpu_kms *dpu_kms,
+ enum dpu_intr_type intr_type,
+ uint32_t instance_idx);
+
+/**
+ * dpu_core_irq_enable - IRQ helper function for enabling one or more IRQs
+ * @dpu_kms: DPU handle
+ * @irq_idxs: Array of irq index
+ * @irq_count: Number of irq_idx provided in the array
+ * @return: 0 for success enabling IRQ, otherwise failure
+ *
+ * This function increments count on each enable and decrements on each
+ * disable. Interrupts is enabled if count is 0 before increment.
+ */
+int dpu_core_irq_enable(
+ struct dpu_kms *dpu_kms,
+ int *irq_idxs,
+ uint32_t irq_count);
+
+/**
+ * dpu_core_irq_disable - IRQ helper function for disabling one of more IRQs
+ * @dpu_kms: DPU handle
+ * @irq_idxs: Array of irq index
+ * @irq_count: Number of irq_idx provided in the array
+ * @return: 0 for success disabling IRQ, otherwise failure
+ *
+ * This function increments count on each enable and decrements on each
+ * disable. Interrupts is disabled if count is 0 after decrement.
+ */
+int dpu_core_irq_disable(
+ struct dpu_kms *dpu_kms,
+ int *irq_idxs,
+ uint32_t irq_count);
+
+/**
+ * dpu_core_irq_read - IRQ helper function for reading IRQ status
+ * @dpu_kms: DPU handle
+ * @irq_idx: irq index
+ * @clear: True to clear the irq after read
+ * @return: non-zero if irq detected; otherwise no irq detected
+ */
+u32 dpu_core_irq_read(
+ struct dpu_kms *dpu_kms,
+ int irq_idx,
+ bool clear);
+
+/**
+ * dpu_core_irq_register_callback - For registering callback function on IRQ
+ * interrupt
+ * @dpu_kms: DPU handle
+ * @irq_idx: irq index
+ * @irq_cb: IRQ callback structure, containing callback function
+ * and argument. Passing NULL for irq_cb will unregister
+ * the callback for the given irq_idx
+ * This must exist until un-registration.
+ * @return: 0 for success registering callback, otherwise failure
+ *
+ * This function supports registration of multiple callbacks for each interrupt.
+ */
+int dpu_core_irq_register_callback(
+ struct dpu_kms *dpu_kms,
+ int irq_idx,
+ struct dpu_irq_callback *irq_cb);
+
+/**
+ * dpu_core_irq_unregister_callback - For unregistering callback function on IRQ
+ * interrupt
+ * @dpu_kms: DPU handle
+ * @irq_idx: irq index
+ * @irq_cb: IRQ callback structure, containing callback function
+ * and argument. Passing NULL for irq_cb will unregister
+ * the callback for the given irq_idx
+ * This must match with registration.
+ * @return: 0 for success registering callback, otherwise failure
+ *
+ * This function supports registration of multiple callbacks for each interrupt.
+ */
+int dpu_core_irq_unregister_callback(
+ struct dpu_kms *dpu_kms,
+ int irq_idx,
+ struct dpu_irq_callback *irq_cb);
+
+/**
+ * dpu_debugfs_core_irq_init - register core irq debugfs
+ * @dpu_kms: pointer to kms
+ * @parent: debugfs directory root
+ * @Return: 0 on success
+ */
+int dpu_debugfs_core_irq_init(struct dpu_kms *dpu_kms,
+ struct dentry *parent);
+
+/**
+ * dpu_debugfs_core_irq_destroy - deregister core irq debugfs
+ * @dpu_kms: pointer to kms
+ */
+void dpu_debugfs_core_irq_destroy(struct dpu_kms *dpu_kms);
+
+#endif /* __DPU_CORE_IRQ_H__ */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.c
new file mode 100644
index 000000000000..41c5191f9056
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.c
@@ -0,0 +1,637 @@
+/* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__
+
+#include <linux/debugfs.h>
+#include <linux/errno.h>
+#include <linux/mutex.h>
+#include <linux/sort.h>
+#include <linux/clk.h>
+#include <linux/bitmap.h>
+
+#include "dpu_kms.h"
+#include "dpu_trace.h"
+#include "dpu_crtc.h"
+#include "dpu_core_perf.h"
+
+#define DPU_PERF_MODE_STRING_SIZE 128
+
+/**
+ * enum dpu_perf_mode - performance tuning mode
+ * @DPU_PERF_MODE_NORMAL: performance controlled by user mode client
+ * @DPU_PERF_MODE_MINIMUM: performance bounded by minimum setting
+ * @DPU_PERF_MODE_FIXED: performance bounded by fixed setting
+ */
+enum dpu_perf_mode {
+ DPU_PERF_MODE_NORMAL,
+ DPU_PERF_MODE_MINIMUM,
+ DPU_PERF_MODE_FIXED,
+ DPU_PERF_MODE_MAX
+};
+
+static struct dpu_kms *_dpu_crtc_get_kms(struct drm_crtc *crtc)
+{
+ struct msm_drm_private *priv;
+
+ if (!crtc->dev || !crtc->dev->dev_private) {
+ DPU_ERROR("invalid device\n");
+ return NULL;
+ }
+
+ priv = crtc->dev->dev_private;
+ if (!priv || !priv->kms) {
+ DPU_ERROR("invalid kms\n");
+ return NULL;
+ }
+
+ return to_dpu_kms(priv->kms);
+}
+
+static bool _dpu_core_perf_crtc_is_power_on(struct drm_crtc *crtc)
+{
+ return dpu_crtc_is_enabled(crtc);
+}
+
+static bool _dpu_core_video_mode_intf_connected(struct drm_crtc *crtc)
+{
+ struct drm_crtc *tmp_crtc;
+ bool intf_connected = false;
+
+ if (!crtc)
+ goto end;
+
+ drm_for_each_crtc(tmp_crtc, crtc->dev) {
+ if ((dpu_crtc_get_intf_mode(tmp_crtc) == INTF_MODE_VIDEO) &&
+ _dpu_core_perf_crtc_is_power_on(tmp_crtc)) {
+ DPU_DEBUG("video interface connected crtc:%d\n",
+ tmp_crtc->base.id);
+ intf_connected = true;
+ goto end;
+ }
+ }
+
+end:
+ return intf_connected;
+}
+
+static void _dpu_core_perf_calc_crtc(struct dpu_kms *kms,
+ struct drm_crtc *crtc,
+ struct drm_crtc_state *state,
+ struct dpu_core_perf_params *perf)
+{
+ struct dpu_crtc_state *dpu_cstate;
+ int i;
+
+ if (!kms || !kms->catalog || !crtc || !state || !perf) {
+ DPU_ERROR("invalid parameters\n");
+ return;
+ }
+
+ dpu_cstate = to_dpu_crtc_state(state);
+ memset(perf, 0, sizeof(struct dpu_core_perf_params));
+
+ if (!dpu_cstate->bw_control) {
+ for (i = 0; i < DPU_POWER_HANDLE_DBUS_ID_MAX; i++) {
+ perf->bw_ctl[i] = kms->catalog->perf.max_bw_high *
+ 1000ULL;
+ perf->max_per_pipe_ib[i] = perf->bw_ctl[i];
+ }
+ perf->core_clk_rate = kms->perf.max_core_clk_rate;
+ } else if (kms->perf.perf_tune.mode == DPU_PERF_MODE_MINIMUM) {
+ for (i = 0; i < DPU_POWER_HANDLE_DBUS_ID_MAX; i++) {
+ perf->bw_ctl[i] = 0;
+ perf->max_per_pipe_ib[i] = 0;
+ }
+ perf->core_clk_rate = 0;
+ } else if (kms->perf.perf_tune.mode == DPU_PERF_MODE_FIXED) {
+ for (i = 0; i < DPU_POWER_HANDLE_DBUS_ID_MAX; i++) {
+ perf->bw_ctl[i] = kms->perf.fix_core_ab_vote;
+ perf->max_per_pipe_ib[i] = kms->perf.fix_core_ib_vote;
+ }
+ perf->core_clk_rate = kms->perf.fix_core_clk_rate;
+ }
+
+ DPU_DEBUG(
+ "crtc=%d clk_rate=%llu core_ib=%llu core_ab=%llu llcc_ib=%llu llcc_ab=%llu mem_ib=%llu mem_ab=%llu\n",
+ crtc->base.id, perf->core_clk_rate,
+ perf->max_per_pipe_ib[DPU_POWER_HANDLE_DBUS_ID_MNOC],
+ perf->bw_ctl[DPU_POWER_HANDLE_DBUS_ID_MNOC],
+ perf->max_per_pipe_ib[DPU_POWER_HANDLE_DBUS_ID_LLCC],
+ perf->bw_ctl[DPU_POWER_HANDLE_DBUS_ID_LLCC],
+ perf->max_per_pipe_ib[DPU_POWER_HANDLE_DBUS_ID_EBI],
+ perf->bw_ctl[DPU_POWER_HANDLE_DBUS_ID_EBI]);
+}
+
+int dpu_core_perf_crtc_check(struct drm_crtc *crtc,
+ struct drm_crtc_state *state)
+{
+ u32 bw, threshold;
+ u64 bw_sum_of_intfs = 0;
+ enum dpu_crtc_client_type curr_client_type;
+ bool is_video_mode;
+ struct dpu_crtc_state *dpu_cstate;
+ struct drm_crtc *tmp_crtc;
+ struct dpu_kms *kms;
+ int i;
+
+ if (!crtc || !state) {
+ DPU_ERROR("invalid crtc\n");
+ return -EINVAL;
+ }
+
+ kms = _dpu_crtc_get_kms(crtc);
+ if (!kms || !kms->catalog) {
+ DPU_ERROR("invalid parameters\n");
+ return 0;
+ }
+
+ /* we only need bandwidth check on real-time clients (interfaces) */
+ if (dpu_crtc_get_client_type(crtc) == NRT_CLIENT)
+ return 0;
+
+ dpu_cstate = to_dpu_crtc_state(state);
+
+ /* obtain new values */
+ _dpu_core_perf_calc_crtc(kms, crtc, state, &dpu_cstate->new_perf);
+
+ for (i = DPU_POWER_HANDLE_DBUS_ID_MNOC;
+ i < DPU_POWER_HANDLE_DBUS_ID_MAX; i++) {
+ bw_sum_of_intfs = dpu_cstate->new_perf.bw_ctl[i];
+ curr_client_type = dpu_crtc_get_client_type(crtc);
+
+ drm_for_each_crtc(tmp_crtc, crtc->dev) {
+ if (_dpu_core_perf_crtc_is_power_on(tmp_crtc) &&
+ (dpu_crtc_get_client_type(tmp_crtc) ==
+ curr_client_type) &&
+ (tmp_crtc != crtc)) {
+ struct dpu_crtc_state *tmp_cstate =
+ to_dpu_crtc_state(tmp_crtc->state);
+
+ DPU_DEBUG("crtc:%d bw:%llu ctrl:%d\n",
+ tmp_crtc->base.id,
+ tmp_cstate->new_perf.bw_ctl[i],
+ tmp_cstate->bw_control);
+ /*
+ * For bw check only use the bw if the
+ * atomic property has been already set
+ */
+ if (tmp_cstate->bw_control)
+ bw_sum_of_intfs +=
+ tmp_cstate->new_perf.bw_ctl[i];
+ }
+ }
+
+ /* convert bandwidth to kb */
+ bw = DIV_ROUND_UP_ULL(bw_sum_of_intfs, 1000);
+ DPU_DEBUG("calculated bandwidth=%uk\n", bw);
+
+ is_video_mode = dpu_crtc_get_intf_mode(crtc) == INTF_MODE_VIDEO;
+ threshold = (is_video_mode ||
+ _dpu_core_video_mode_intf_connected(crtc)) ?
+ kms->catalog->perf.max_bw_low :
+ kms->catalog->perf.max_bw_high;
+
+ DPU_DEBUG("final threshold bw limit = %d\n", threshold);
+
+ if (!dpu_cstate->bw_control) {
+ DPU_DEBUG("bypass bandwidth check\n");
+ } else if (!threshold) {
+ DPU_ERROR("no bandwidth limits specified\n");
+ return -E2BIG;
+ } else if (bw > threshold) {
+ DPU_ERROR("exceeds bandwidth: %ukb > %ukb\n", bw,
+ threshold);
+ return -E2BIG;
+ }
+ }
+
+ return 0;
+}
+
+static int _dpu_core_perf_crtc_update_bus(struct dpu_kms *kms,
+ struct drm_crtc *crtc, u32 bus_id)
+{
+ struct dpu_core_perf_params perf = { { 0 } };
+ enum dpu_crtc_client_type curr_client_type
+ = dpu_crtc_get_client_type(crtc);
+ struct drm_crtc *tmp_crtc;
+ struct dpu_crtc_state *dpu_cstate;
+ int ret = 0;
+
+ drm_for_each_crtc(tmp_crtc, crtc->dev) {
+ if (_dpu_core_perf_crtc_is_power_on(tmp_crtc) &&
+ curr_client_type ==
+ dpu_crtc_get_client_type(tmp_crtc)) {
+ dpu_cstate = to_dpu_crtc_state(tmp_crtc->state);
+
+ perf.max_per_pipe_ib[bus_id] =
+ max(perf.max_per_pipe_ib[bus_id],
+ dpu_cstate->new_perf.max_per_pipe_ib[bus_id]);
+
+ DPU_DEBUG("crtc=%d bus_id=%d bw=%llu\n",
+ tmp_crtc->base.id, bus_id,
+ dpu_cstate->new_perf.bw_ctl[bus_id]);
+ }
+ }
+ return ret;
+}
+
+/**
+ * @dpu_core_perf_crtc_release_bw() - request zero bandwidth
+ * @crtc - pointer to a crtc
+ *
+ * Function checks a state variable for the crtc, if all pending commit
+ * requests are done, meaning no more bandwidth is needed, release
+ * bandwidth request.
+ */
+void dpu_core_perf_crtc_release_bw(struct drm_crtc *crtc)
+{
+ struct drm_crtc *tmp_crtc;
+ struct dpu_crtc *dpu_crtc;
+ struct dpu_crtc_state *dpu_cstate;
+ struct dpu_kms *kms;
+ int i;
+
+ if (!crtc) {
+ DPU_ERROR("invalid crtc\n");
+ return;
+ }
+
+ kms = _dpu_crtc_get_kms(crtc);
+ if (!kms || !kms->catalog) {
+ DPU_ERROR("invalid kms\n");
+ return;
+ }
+
+ dpu_crtc = to_dpu_crtc(crtc);
+ dpu_cstate = to_dpu_crtc_state(crtc->state);
+
+ /* only do this for command mode rt client */
+ if (dpu_crtc_get_intf_mode(crtc) != INTF_MODE_CMD)
+ return;
+
+ /*
+ * If video interface present, cmd panel bandwidth cannot be
+ * released.
+ */
+ if (dpu_crtc_get_intf_mode(crtc) == INTF_MODE_CMD)
+ drm_for_each_crtc(tmp_crtc, crtc->dev) {
+ if (_dpu_core_perf_crtc_is_power_on(tmp_crtc) &&
+ dpu_crtc_get_intf_mode(tmp_crtc) ==
+ INTF_MODE_VIDEO)
+ return;
+ }
+
+ /* Release the bandwidth */
+ if (kms->perf.enable_bw_release) {
+ trace_dpu_cmd_release_bw(crtc->base.id);
+ DPU_DEBUG("Release BW crtc=%d\n", crtc->base.id);
+ for (i = 0; i < DPU_POWER_HANDLE_DBUS_ID_MAX; i++) {
+ dpu_crtc->cur_perf.bw_ctl[i] = 0;
+ _dpu_core_perf_crtc_update_bus(kms, crtc, i);
+ }
+ }
+}
+
+static int _dpu_core_perf_set_core_clk_rate(struct dpu_kms *kms, u64 rate)
+{
+ struct dss_clk *core_clk = kms->perf.core_clk;
+
+ if (core_clk->max_rate && (rate > core_clk->max_rate))
+ rate = core_clk->max_rate;
+
+ core_clk->rate = rate;
+ return msm_dss_clk_set_rate(core_clk, 1);
+}
+
+static u64 _dpu_core_perf_get_core_clk_rate(struct dpu_kms *kms)
+{
+ u64 clk_rate = kms->perf.perf_tune.min_core_clk;
+ struct drm_crtc *crtc;
+ struct dpu_crtc_state *dpu_cstate;
+
+ drm_for_each_crtc(crtc, kms->dev) {
+ if (_dpu_core_perf_crtc_is_power_on(crtc)) {
+ dpu_cstate = to_dpu_crtc_state(crtc->state);
+ clk_rate = max(dpu_cstate->new_perf.core_clk_rate,
+ clk_rate);
+ clk_rate = clk_round_rate(kms->perf.core_clk->clk,
+ clk_rate);
+ }
+ }
+
+ if (kms->perf.perf_tune.mode == DPU_PERF_MODE_FIXED)
+ clk_rate = kms->perf.fix_core_clk_rate;
+
+ DPU_DEBUG("clk:%llu\n", clk_rate);
+
+ return clk_rate;
+}
+
+int dpu_core_perf_crtc_update(struct drm_crtc *crtc,
+ int params_changed, bool stop_req)
+{
+ struct dpu_core_perf_params *new, *old;
+ int update_bus = 0, update_clk = 0;
+ u64 clk_rate = 0;
+ struct dpu_crtc *dpu_crtc;
+ struct dpu_crtc_state *dpu_cstate;
+ int i;
+ struct msm_drm_private *priv;
+ struct dpu_kms *kms;
+ int ret;
+
+ if (!crtc) {
+ DPU_ERROR("invalid crtc\n");
+ return -EINVAL;
+ }
+
+ kms = _dpu_crtc_get_kms(crtc);
+ if (!kms || !kms->catalog) {
+ DPU_ERROR("invalid kms\n");
+ return -EINVAL;
+ }
+ priv = kms->dev->dev_private;
+
+ dpu_crtc = to_dpu_crtc(crtc);
+ dpu_cstate = to_dpu_crtc_state(crtc->state);
+
+ DPU_DEBUG("crtc:%d stop_req:%d core_clk:%llu\n",
+ crtc->base.id, stop_req, kms->perf.core_clk_rate);
+
+ old = &dpu_crtc->cur_perf;
+ new = &dpu_cstate->new_perf;
+
+ if (_dpu_core_perf_crtc_is_power_on(crtc) && !stop_req) {
+ for (i = 0; i < DPU_POWER_HANDLE_DBUS_ID_MAX; i++) {
+ /*
+ * cases for bus bandwidth update.
+ * 1. new bandwidth vote - "ab or ib vote" is higher
+ * than current vote for update request.
+ * 2. new bandwidth vote - "ab or ib vote" is lower
+ * than current vote at end of commit or stop.
+ */
+ if ((params_changed && ((new->bw_ctl[i] >
+ old->bw_ctl[i]) ||
+ (new->max_per_pipe_ib[i] >
+ old->max_per_pipe_ib[i]))) ||
+ (!params_changed && ((new->bw_ctl[i] <
+ old->bw_ctl[i]) ||
+ (new->max_per_pipe_ib[i] <
+ old->max_per_pipe_ib[i])))) {
+ DPU_DEBUG(
+ "crtc=%d p=%d new_bw=%llu,old_bw=%llu\n",
+ crtc->base.id, params_changed,
+ new->bw_ctl[i], old->bw_ctl[i]);
+ old->bw_ctl[i] = new->bw_ctl[i];
+ old->max_per_pipe_ib[i] =
+ new->max_per_pipe_ib[i];
+ update_bus |= BIT(i);
+ }
+ }
+
+ if ((params_changed &&
+ (new->core_clk_rate > old->core_clk_rate)) ||
+ (!params_changed &&
+ (new->core_clk_rate < old->core_clk_rate))) {
+ old->core_clk_rate = new->core_clk_rate;
+ update_clk = 1;
+ }
+ } else {
+ DPU_DEBUG("crtc=%d disable\n", crtc->base.id);
+ memset(old, 0, sizeof(*old));
+ memset(new, 0, sizeof(*new));
+ update_bus = ~0;
+ update_clk = 1;
+ }
+ trace_dpu_perf_crtc_update(crtc->base.id,
+ new->bw_ctl[DPU_POWER_HANDLE_DBUS_ID_MNOC],
+ new->bw_ctl[DPU_POWER_HANDLE_DBUS_ID_LLCC],
+ new->bw_ctl[DPU_POWER_HANDLE_DBUS_ID_EBI],
+ new->core_clk_rate, stop_req,
+ update_bus, update_clk);
+
+ for (i = 0; i < DPU_POWER_HANDLE_DBUS_ID_MAX; i++) {
+ if (update_bus & BIT(i)) {
+ ret = _dpu_core_perf_crtc_update_bus(kms, crtc, i);
+ if (ret) {
+ DPU_ERROR("crtc-%d: failed to update bw vote for bus-%d\n",
+ crtc->base.id, i);
+ return ret;
+ }
+ }
+ }
+
+ /*
+ * Update the clock after bandwidth vote to ensure
+ * bandwidth is available before clock rate is increased.
+ */
+ if (update_clk) {
+ clk_rate = _dpu_core_perf_get_core_clk_rate(kms);
+
+ trace_dpu_core_perf_update_clk(kms->dev, stop_req, clk_rate);
+
+ ret = _dpu_core_perf_set_core_clk_rate(kms, clk_rate);
+ if (ret) {
+ DPU_ERROR("failed to set %s clock rate %llu\n",
+ kms->perf.core_clk->clk_name, clk_rate);
+ return ret;
+ }
+
+ kms->perf.core_clk_rate = clk_rate;
+ DPU_DEBUG("update clk rate = %lld HZ\n", clk_rate);
+ }
+ return 0;
+}
+
+#ifdef CONFIG_DEBUG_FS
+
+static ssize_t _dpu_core_perf_mode_write(struct file *file,
+ const char __user *user_buf, size_t count, loff_t *ppos)
+{
+ struct dpu_core_perf *perf = file->private_data;
+ struct dpu_perf_cfg *cfg = &perf->catalog->perf;
+ u32 perf_mode = 0;
+ char buf[10];
+
+ if (!perf)
+ return -ENODEV;
+
+ if (count >= sizeof(buf))
+ return -EFAULT;
+
+ if (copy_from_user(buf, user_buf, count))
+ return -EFAULT;
+
+ buf[count] = 0; /* end of string */
+
+ if (kstrtouint(buf, 0, &perf_mode))
+ return -EFAULT;
+
+ if (perf_mode >= DPU_PERF_MODE_MAX)
+ return -EFAULT;
+
+ if (perf_mode == DPU_PERF_MODE_FIXED) {
+ DRM_INFO("fix performance mode\n");
+ } else if (perf_mode == DPU_PERF_MODE_MINIMUM) {
+ /* run the driver with max clk and BW vote */
+ perf->perf_tune.min_core_clk = perf->max_core_clk_rate;
+ perf->perf_tune.min_bus_vote =
+ (u64) cfg->max_bw_high * 1000;
+ DRM_INFO("minimum performance mode\n");
+ } else if (perf_mode == DPU_PERF_MODE_NORMAL) {
+ /* reset the perf tune params to 0 */
+ perf->perf_tune.min_core_clk = 0;
+ perf->perf_tune.min_bus_vote = 0;
+ DRM_INFO("normal performance mode\n");
+ }
+ perf->perf_tune.mode = perf_mode;
+
+ return count;
+}
+
+static ssize_t _dpu_core_perf_mode_read(struct file *file,
+ char __user *buff, size_t count, loff_t *ppos)
+{
+ struct dpu_core_perf *perf = file->private_data;
+ int len = 0;
+ char buf[DPU_PERF_MODE_STRING_SIZE] = {'\0'};
+
+ if (!perf)
+ return -ENODEV;
+
+ if (*ppos)
+ return 0; /* the end */
+
+ len = snprintf(buf, sizeof(buf),
+ "mode %d min_mdp_clk %llu min_bus_vote %llu\n",
+ perf->perf_tune.mode,
+ perf->perf_tune.min_core_clk,
+ perf->perf_tune.min_bus_vote);
+ if (len < 0 || len >= sizeof(buf))
+ return 0;
+
+ if ((count < sizeof(buf)) || copy_to_user(buff, buf, len))
+ return -EFAULT;
+
+ *ppos += len; /* increase offset */
+
+ return len;
+}
+
+static const struct file_operations dpu_core_perf_mode_fops = {
+ .open = simple_open,
+ .read = _dpu_core_perf_mode_read,
+ .write = _dpu_core_perf_mode_write,
+};
+
+static void dpu_core_perf_debugfs_destroy(struct dpu_core_perf *perf)
+{
+ debugfs_remove_recursive(perf->debugfs_root);
+ perf->debugfs_root = NULL;
+}
+
+int dpu_core_perf_debugfs_init(struct dpu_core_perf *perf,
+ struct dentry *parent)
+{
+ struct dpu_mdss_cfg *catalog = perf->catalog;
+ struct msm_drm_private *priv;
+ struct dpu_kms *dpu_kms;
+
+ priv = perf->dev->dev_private;
+ if (!priv || !priv->kms) {
+ DPU_ERROR("invalid KMS reference\n");
+ return -EINVAL;
+ }
+
+ dpu_kms = to_dpu_kms(priv->kms);
+
+ perf->debugfs_root = debugfs_create_dir("core_perf", parent);
+ if (!perf->debugfs_root) {
+ DPU_ERROR("failed to create core perf debugfs\n");
+ return -EINVAL;
+ }
+
+ debugfs_create_u64("max_core_clk_rate", 0600, perf->debugfs_root,
+ &perf->max_core_clk_rate);
+ debugfs_create_u64("core_clk_rate", 0600, perf->debugfs_root,
+ &perf->core_clk_rate);
+ debugfs_create_u32("enable_bw_release", 0600, perf->debugfs_root,
+ (u32 *)&perf->enable_bw_release);
+ debugfs_create_u32("threshold_low", 0600, perf->debugfs_root,
+ (u32 *)&catalog->perf.max_bw_low);
+ debugfs_create_u32("threshold_high", 0600, perf->debugfs_root,
+ (u32 *)&catalog->perf.max_bw_high);
+ debugfs_create_u32("min_core_ib", 0600, perf->debugfs_root,
+ (u32 *)&catalog->perf.min_core_ib);
+ debugfs_create_u32("min_llcc_ib", 0600, perf->debugfs_root,
+ (u32 *)&catalog->perf.min_llcc_ib);
+ debugfs_create_u32("min_dram_ib", 0600, perf->debugfs_root,
+ (u32 *)&catalog->perf.min_dram_ib);
+ debugfs_create_file("perf_mode", 0600, perf->debugfs_root,
+ (u32 *)perf, &dpu_core_perf_mode_fops);
+ debugfs_create_u64("fix_core_clk_rate", 0600, perf->debugfs_root,
+ &perf->fix_core_clk_rate);
+ debugfs_create_u64("fix_core_ib_vote", 0600, perf->debugfs_root,
+ &perf->fix_core_ib_vote);
+ debugfs_create_u64("fix_core_ab_vote", 0600, perf->debugfs_root,
+ &perf->fix_core_ab_vote);
+
+ return 0;
+}
+#else
+static void dpu_core_perf_debugfs_destroy(struct dpu_core_perf *perf)
+{
+}
+
+int dpu_core_perf_debugfs_init(struct dpu_core_perf *perf,
+ struct dentry *parent)
+{
+ return 0;
+}
+#endif
+
+void dpu_core_perf_destroy(struct dpu_core_perf *perf)
+{
+ if (!perf) {
+ DPU_ERROR("invalid parameters\n");
+ return;
+ }
+
+ dpu_core_perf_debugfs_destroy(perf);
+ perf->max_core_clk_rate = 0;
+ perf->core_clk = NULL;
+ perf->phandle = NULL;
+ perf->catalog = NULL;
+ perf->dev = NULL;
+}
+
+int dpu_core_perf_init(struct dpu_core_perf *perf,
+ struct drm_device *dev,
+ struct dpu_mdss_cfg *catalog,
+ struct dpu_power_handle *phandle,
+ struct dss_clk *core_clk)
+{
+ perf->dev = dev;
+ perf->catalog = catalog;
+ perf->phandle = phandle;
+ perf->core_clk = core_clk;
+
+ perf->max_core_clk_rate = core_clk->max_rate;
+ if (!perf->max_core_clk_rate) {
+ DPU_DEBUG("optional max core clk rate, use default\n");
+ perf->max_core_clk_rate = DPU_PERF_DEFAULT_MAX_CORE_CLK_RATE;
+ }
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.h
new file mode 100644
index 000000000000..fbcbe0c7527a
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.h
@@ -0,0 +1,133 @@
+/* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _DPU_CORE_PERF_H_
+#define _DPU_CORE_PERF_H_
+
+#include <linux/types.h>
+#include <linux/dcache.h>
+#include <linux/mutex.h>
+#include <drm/drm_crtc.h>
+
+#include "dpu_hw_catalog.h"
+#include "dpu_power_handle.h"
+
+#define DPU_PERF_DEFAULT_MAX_CORE_CLK_RATE 412500000
+
+/**
+ * struct dpu_core_perf_params - definition of performance parameters
+ * @max_per_pipe_ib: maximum instantaneous bandwidth request
+ * @bw_ctl: arbitrated bandwidth request
+ * @core_clk_rate: core clock rate request
+ */
+struct dpu_core_perf_params {
+ u64 max_per_pipe_ib[DPU_POWER_HANDLE_DBUS_ID_MAX];
+ u64 bw_ctl[DPU_POWER_HANDLE_DBUS_ID_MAX];
+ u64 core_clk_rate;
+};
+
+/**
+ * struct dpu_core_perf_tune - definition of performance tuning control
+ * @mode: performance mode
+ * @min_core_clk: minimum core clock
+ * @min_bus_vote: minimum bus vote
+ */
+struct dpu_core_perf_tune {
+ u32 mode;
+ u64 min_core_clk;
+ u64 min_bus_vote;
+};
+
+/**
+ * struct dpu_core_perf - definition of core performance context
+ * @dev: Pointer to drm device
+ * @debugfs_root: top level debug folder
+ * @catalog: Pointer to catalog configuration
+ * @phandle: Pointer to power handler
+ * @core_clk: Pointer to core clock structure
+ * @core_clk_rate: current core clock rate
+ * @max_core_clk_rate: maximum allowable core clock rate
+ * @perf_tune: debug control for performance tuning
+ * @enable_bw_release: debug control for bandwidth release
+ * @fix_core_clk_rate: fixed core clock request in Hz used in mode 2
+ * @fix_core_ib_vote: fixed core ib vote in bps used in mode 2
+ * @fix_core_ab_vote: fixed core ab vote in bps used in mode 2
+ */
+struct dpu_core_perf {
+ struct drm_device *dev;
+ struct dentry *debugfs_root;
+ struct dpu_mdss_cfg *catalog;
+ struct dpu_power_handle *phandle;
+ struct dss_clk *core_clk;
+ u64 core_clk_rate;
+ u64 max_core_clk_rate;
+ struct dpu_core_perf_tune perf_tune;
+ u32 enable_bw_release;
+ u64 fix_core_clk_rate;
+ u64 fix_core_ib_vote;
+ u64 fix_core_ab_vote;
+};
+
+/**
+ * dpu_core_perf_crtc_check - validate performance of the given crtc state
+ * @crtc: Pointer to crtc
+ * @state: Pointer to new crtc state
+ * return: zero if success, or error code otherwise
+ */
+int dpu_core_perf_crtc_check(struct drm_crtc *crtc,
+ struct drm_crtc_state *state);
+
+/**
+ * dpu_core_perf_crtc_update - update performance of the given crtc
+ * @crtc: Pointer to crtc
+ * @params_changed: true if crtc parameters are modified
+ * @stop_req: true if this is a stop request
+ * return: zero if success, or error code otherwise
+ */
+int dpu_core_perf_crtc_update(struct drm_crtc *crtc,
+ int params_changed, bool stop_req);
+
+/**
+ * dpu_core_perf_crtc_release_bw - release bandwidth of the given crtc
+ * @crtc: Pointer to crtc
+ */
+void dpu_core_perf_crtc_release_bw(struct drm_crtc *crtc);
+
+/**
+ * dpu_core_perf_destroy - destroy the given core performance context
+ * @perf: Pointer to core performance context
+ */
+void dpu_core_perf_destroy(struct dpu_core_perf *perf);
+
+/**
+ * dpu_core_perf_init - initialize the given core performance context
+ * @perf: Pointer to core performance context
+ * @dev: Pointer to drm device
+ * @catalog: Pointer to catalog
+ * @phandle: Pointer to power handle
+ * @core_clk: pointer to core clock
+ */
+int dpu_core_perf_init(struct dpu_core_perf *perf,
+ struct drm_device *dev,
+ struct dpu_mdss_cfg *catalog,
+ struct dpu_power_handle *phandle,
+ struct dss_clk *core_clk);
+
+/**
+ * dpu_core_perf_debugfs_init - initialize debugfs for core performance context
+ * @perf: Pointer to core performance context
+ * @debugfs_parent: Pointer to parent debugfs
+ */
+int dpu_core_perf_debugfs_init(struct dpu_core_perf *perf,
+ struct dentry *parent);
+
+#endif /* _DPU_CORE_PERF_H_ */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
new file mode 100644
index 000000000000..80cbf75bc2ff
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
@@ -0,0 +1,2138 @@
+/*
+ * Copyright (c) 2014-2018 The Linux Foundation. All rights reserved.
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__
+#include <linux/sort.h>
+#include <linux/debugfs.h>
+#include <linux/ktime.h>
+#include <drm/drm_mode.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_flip_work.h>
+#include <drm/drm_rect.h>
+
+#include "dpu_kms.h"
+#include "dpu_hw_lm.h"
+#include "dpu_hw_ctl.h"
+#include "dpu_crtc.h"
+#include "dpu_plane.h"
+#include "dpu_encoder.h"
+#include "dpu_vbif.h"
+#include "dpu_power_handle.h"
+#include "dpu_core_perf.h"
+#include "dpu_trace.h"
+
+#define DPU_DRM_BLEND_OP_NOT_DEFINED 0
+#define DPU_DRM_BLEND_OP_OPAQUE 1
+#define DPU_DRM_BLEND_OP_PREMULTIPLIED 2
+#define DPU_DRM_BLEND_OP_COVERAGE 3
+#define DPU_DRM_BLEND_OP_MAX 4
+
+/* layer mixer index on dpu_crtc */
+#define LEFT_MIXER 0
+#define RIGHT_MIXER 1
+
+#define MISR_BUFF_SIZE 256
+
+static inline struct dpu_kms *_dpu_crtc_get_kms(struct drm_crtc *crtc)
+{
+ struct msm_drm_private *priv;
+
+ if (!crtc || !crtc->dev || !crtc->dev->dev_private) {
+ DPU_ERROR("invalid crtc\n");
+ return NULL;
+ }
+ priv = crtc->dev->dev_private;
+ if (!priv || !priv->kms) {
+ DPU_ERROR("invalid kms\n");
+ return NULL;
+ }
+
+ return to_dpu_kms(priv->kms);
+}
+
+static inline int _dpu_crtc_power_enable(struct dpu_crtc *dpu_crtc, bool enable)
+{
+ struct drm_crtc *crtc;
+ struct msm_drm_private *priv;
+ struct dpu_kms *dpu_kms;
+
+ if (!dpu_crtc) {
+ DPU_ERROR("invalid dpu crtc\n");
+ return -EINVAL;
+ }
+
+ crtc = &dpu_crtc->base;
+ if (!crtc->dev || !crtc->dev->dev_private) {
+ DPU_ERROR("invalid drm device\n");
+ return -EINVAL;
+ }
+
+ priv = crtc->dev->dev_private;
+ if (!priv->kms) {
+ DPU_ERROR("invalid kms\n");
+ return -EINVAL;
+ }
+
+ dpu_kms = to_dpu_kms(priv->kms);
+
+ if (enable)
+ pm_runtime_get_sync(&dpu_kms->pdev->dev);
+ else
+ pm_runtime_put_sync(&dpu_kms->pdev->dev);
+
+ return 0;
+}
+
+/**
+ * _dpu_crtc_rp_to_crtc - get crtc from resource pool object
+ * @rp: Pointer to resource pool
+ * return: Pointer to drm crtc if success; null otherwise
+ */
+static struct drm_crtc *_dpu_crtc_rp_to_crtc(struct dpu_crtc_respool *rp)
+{
+ if (!rp)
+ return NULL;
+
+ return container_of(rp, struct dpu_crtc_state, rp)->base.crtc;
+}
+
+/**
+ * _dpu_crtc_rp_reclaim - reclaim unused, or all if forced, resources in pool
+ * @rp: Pointer to resource pool
+ * @force: True to reclaim all resources; otherwise, reclaim only unused ones
+ * return: None
+ */
+static void _dpu_crtc_rp_reclaim(struct dpu_crtc_respool *rp, bool force)
+{
+ struct dpu_crtc_res *res, *next;
+ struct drm_crtc *crtc;
+
+ crtc = _dpu_crtc_rp_to_crtc(rp);
+ if (!crtc) {
+ DPU_ERROR("invalid crtc\n");
+ return;
+ }
+
+ DPU_DEBUG("crtc%d.%u %s\n", crtc->base.id, rp->sequence_id,
+ force ? "destroy" : "free_unused");
+
+ list_for_each_entry_safe(res, next, &rp->res_list, list) {
+ if (!force && !(res->flags & DPU_CRTC_RES_FLAG_FREE))
+ continue;
+ DPU_DEBUG("crtc%d.%u reclaim res:0x%x/0x%llx/%pK/%d\n",
+ crtc->base.id, rp->sequence_id,
+ res->type, res->tag, res->val,
+ atomic_read(&res->refcount));
+ list_del(&res->list);
+ if (res->ops.put)
+ res->ops.put(res->val);
+ kfree(res);
+ }
+}
+
+/**
+ * _dpu_crtc_rp_free_unused - free unused resource in pool
+ * @rp: Pointer to resource pool
+ * return: none
+ */
+static void _dpu_crtc_rp_free_unused(struct dpu_crtc_respool *rp)
+{
+ mutex_lock(rp->rp_lock);
+ _dpu_crtc_rp_reclaim(rp, false);
+ mutex_unlock(rp->rp_lock);
+}
+
+/**
+ * _dpu_crtc_rp_destroy - destroy resource pool
+ * @rp: Pointer to resource pool
+ * return: None
+ */
+static void _dpu_crtc_rp_destroy(struct dpu_crtc_respool *rp)
+{
+ mutex_lock(rp->rp_lock);
+ list_del_init(&rp->rp_list);
+ _dpu_crtc_rp_reclaim(rp, true);
+ mutex_unlock(rp->rp_lock);
+}
+
+/**
+ * _dpu_crtc_hw_blk_get - get callback for hardware block
+ * @val: Resource handle
+ * @type: Resource type
+ * @tag: Search tag for given resource
+ * return: Resource handle
+ */
+static void *_dpu_crtc_hw_blk_get(void *val, u32 type, u64 tag)
+{
+ DPU_DEBUG("res:%d/0x%llx/%pK\n", type, tag, val);
+ return dpu_hw_blk_get(val, type, tag);
+}
+
+/**
+ * _dpu_crtc_hw_blk_put - put callback for hardware block
+ * @val: Resource handle
+ * return: None
+ */
+static void _dpu_crtc_hw_blk_put(void *val)
+{
+ DPU_DEBUG("res://%pK\n", val);
+ dpu_hw_blk_put(val);
+}
+
+/**
+ * _dpu_crtc_rp_duplicate - duplicate resource pool and reset reference count
+ * @rp: Pointer to original resource pool
+ * @dup_rp: Pointer to duplicated resource pool
+ * return: None
+ */
+static void _dpu_crtc_rp_duplicate(struct dpu_crtc_respool *rp,
+ struct dpu_crtc_respool *dup_rp)
+{
+ struct dpu_crtc_res *res, *dup_res;
+ struct drm_crtc *crtc;
+
+ if (!rp || !dup_rp || !rp->rp_head) {
+ DPU_ERROR("invalid resource pool\n");
+ return;
+ }
+
+ crtc = _dpu_crtc_rp_to_crtc(rp);
+ if (!crtc) {
+ DPU_ERROR("invalid crtc\n");
+ return;
+ }
+
+ DPU_DEBUG("crtc%d.%u duplicate\n", crtc->base.id, rp->sequence_id);
+
+ mutex_lock(rp->rp_lock);
+ dup_rp->sequence_id = rp->sequence_id + 1;
+ INIT_LIST_HEAD(&dup_rp->res_list);
+ dup_rp->ops = rp->ops;
+ list_for_each_entry(res, &rp->res_list, list) {
+ dup_res = kzalloc(sizeof(struct dpu_crtc_res), GFP_KERNEL);
+ if (!dup_res) {
+ mutex_unlock(rp->rp_lock);
+ return;
+ }
+ INIT_LIST_HEAD(&dup_res->list);
+ atomic_set(&dup_res->refcount, 0);
+ dup_res->type = res->type;
+ dup_res->tag = res->tag;
+ dup_res->val = res->val;
+ dup_res->ops = res->ops;
+ dup_res->flags = DPU_CRTC_RES_FLAG_FREE;
+ DPU_DEBUG("crtc%d.%u dup res:0x%x/0x%llx/%pK/%d\n",
+ crtc->base.id, dup_rp->sequence_id,
+ dup_res->type, dup_res->tag, dup_res->val,
+ atomic_read(&dup_res->refcount));
+ list_add_tail(&dup_res->list, &dup_rp->res_list);
+ if (dup_res->ops.get)
+ dup_res->ops.get(dup_res->val, 0, -1);
+ }
+
+ dup_rp->rp_lock = rp->rp_lock;
+ dup_rp->rp_head = rp->rp_head;
+ INIT_LIST_HEAD(&dup_rp->rp_list);
+ list_add_tail(&dup_rp->rp_list, rp->rp_head);
+ mutex_unlock(rp->rp_lock);
+}
+
+/**
+ * _dpu_crtc_rp_reset - reset resource pool after allocation
+ * @rp: Pointer to original resource pool
+ * @rp_lock: Pointer to serialization resource pool lock
+ * @rp_head: Pointer to crtc resource pool head
+ * return: None
+ */
+static void _dpu_crtc_rp_reset(struct dpu_crtc_respool *rp,
+ struct mutex *rp_lock, struct list_head *rp_head)
+{
+ if (!rp || !rp_lock || !rp_head) {
+ DPU_ERROR("invalid resource pool\n");
+ return;
+ }
+
+ mutex_lock(rp_lock);
+ rp->rp_lock = rp_lock;
+ rp->rp_head = rp_head;
+ INIT_LIST_HEAD(&rp->rp_list);
+ rp->sequence_id = 0;
+ INIT_LIST_HEAD(&rp->res_list);
+ rp->ops.get = _dpu_crtc_hw_blk_get;
+ rp->ops.put = _dpu_crtc_hw_blk_put;
+ list_add_tail(&rp->rp_list, rp->rp_head);
+ mutex_unlock(rp_lock);
+}
+
+static void dpu_crtc_destroy(struct drm_crtc *crtc)
+{
+ struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
+
+ DPU_DEBUG("\n");
+
+ if (!crtc)
+ return;
+
+ dpu_crtc->phandle = NULL;
+
+ drm_crtc_cleanup(crtc);
+ mutex_destroy(&dpu_crtc->crtc_lock);
+ kfree(dpu_crtc);
+}
+
+static void _dpu_crtc_setup_blend_cfg(struct dpu_crtc_mixer *mixer,
+ struct dpu_plane_state *pstate)
+{
+ struct dpu_hw_mixer *lm = mixer->hw_lm;
+
+ /* default to opaque blending */
+ lm->ops.setup_blend_config(lm, pstate->stage, 0XFF, 0,
+ DPU_BLEND_FG_ALPHA_FG_CONST |
+ DPU_BLEND_BG_ALPHA_BG_CONST);
+}
+
+static void _dpu_crtc_program_lm_output_roi(struct drm_crtc *crtc)
+{
+ struct dpu_crtc *dpu_crtc;
+ struct dpu_crtc_state *crtc_state;
+ int lm_idx, lm_horiz_position;
+
+ dpu_crtc = to_dpu_crtc(crtc);
+ crtc_state = to_dpu_crtc_state(crtc->state);
+
+ lm_horiz_position = 0;
+ for (lm_idx = 0; lm_idx < dpu_crtc->num_mixers; lm_idx++) {
+ const struct drm_rect *lm_roi = &crtc_state->lm_bounds[lm_idx];
+ struct dpu_hw_mixer *hw_lm = dpu_crtc->mixers[lm_idx].hw_lm;
+ struct dpu_hw_mixer_cfg cfg;
+
+ if (!lm_roi || !drm_rect_visible(lm_roi))
+ continue;
+
+ cfg.out_width = drm_rect_width(lm_roi);
+ cfg.out_height = drm_rect_height(lm_roi);
+ cfg.right_mixer = lm_horiz_position++;
+ cfg.flags = 0;
+ hw_lm->ops.setup_mixer_out(hw_lm, &cfg);
+ }
+}
+
+static void _dpu_crtc_blend_setup_mixer(struct drm_crtc *crtc,
+ struct dpu_crtc *dpu_crtc, struct dpu_crtc_mixer *mixer)
+{
+ struct drm_plane *plane;
+ struct drm_framebuffer *fb;
+ struct drm_plane_state *state;
+ struct dpu_crtc_state *cstate;
+ struct dpu_plane_state *pstate = NULL;
+ struct dpu_format *format;
+ struct dpu_hw_ctl *ctl;
+ struct dpu_hw_mixer *lm;
+ struct dpu_hw_stage_cfg *stage_cfg;
+
+ u32 flush_mask;
+ uint32_t stage_idx, lm_idx;
+ int zpos_cnt[DPU_STAGE_MAX + 1] = { 0 };
+ bool bg_alpha_enable = false;
+
+ if (!dpu_crtc || !mixer) {
+ DPU_ERROR("invalid dpu_crtc or mixer\n");
+ return;
+ }
+
+ ctl = mixer->hw_ctl;
+ lm = mixer->hw_lm;
+ stage_cfg = &dpu_crtc->stage_cfg;
+ cstate = to_dpu_crtc_state(crtc->state);
+
+ drm_atomic_crtc_for_each_plane(plane, crtc) {
+ state = plane->state;
+ if (!state)
+ continue;
+
+ pstate = to_dpu_plane_state(state);
+ fb = state->fb;
+
+ dpu_plane_get_ctl_flush(plane, ctl, &flush_mask);
+
+ DPU_DEBUG("crtc %d stage:%d - plane %d sspp %d fb %d\n",
+ crtc->base.id,
+ pstate->stage,
+ plane->base.id,
+ dpu_plane_pipe(plane) - SSPP_VIG0,
+ state->fb ? state->fb->base.id : -1);
+
+ format = to_dpu_format(msm_framebuffer_format(pstate->base.fb));
+ if (!format) {
+ DPU_ERROR("invalid format\n");
+ return;
+ }
+
+ if (pstate->stage == DPU_STAGE_BASE && format->alpha_enable)
+ bg_alpha_enable = true;
+
+ stage_idx = zpos_cnt[pstate->stage]++;
+ stage_cfg->stage[pstate->stage][stage_idx] =
+ dpu_plane_pipe(plane);
+ stage_cfg->multirect_index[pstate->stage][stage_idx] =
+ pstate->multirect_index;
+
+ trace_dpu_crtc_setup_mixer(DRMID(crtc), DRMID(plane),
+ state, pstate, stage_idx,
+ dpu_plane_pipe(plane) - SSPP_VIG0,
+ format->base.pixel_format,
+ fb ? fb->modifier : 0);
+
+ /* blend config update */
+ for (lm_idx = 0; lm_idx < dpu_crtc->num_mixers; lm_idx++) {
+ _dpu_crtc_setup_blend_cfg(mixer + lm_idx, pstate);
+
+ mixer[lm_idx].flush_mask |= flush_mask;
+
+ if (bg_alpha_enable && !format->alpha_enable)
+ mixer[lm_idx].mixer_op_mode = 0;
+ else
+ mixer[lm_idx].mixer_op_mode |=
+ 1 << pstate->stage;
+ }
+ }
+
+ _dpu_crtc_program_lm_output_roi(crtc);
+}
+
+/**
+ * _dpu_crtc_blend_setup - configure crtc mixers
+ * @crtc: Pointer to drm crtc structure
+ */
+static void _dpu_crtc_blend_setup(struct drm_crtc *crtc)
+{
+ struct dpu_crtc *dpu_crtc;
+ struct dpu_crtc_state *dpu_crtc_state;
+ struct dpu_crtc_mixer *mixer;
+ struct dpu_hw_ctl *ctl;
+ struct dpu_hw_mixer *lm;
+
+ int i;
+
+ if (!crtc)
+ return;
+
+ dpu_crtc = to_dpu_crtc(crtc);
+ dpu_crtc_state = to_dpu_crtc_state(crtc->state);
+ mixer = dpu_crtc->mixers;
+
+ DPU_DEBUG("%s\n", dpu_crtc->name);
+
+ if (dpu_crtc->num_mixers > CRTC_DUAL_MIXERS) {
+ DPU_ERROR("invalid number mixers: %d\n", dpu_crtc->num_mixers);
+ return;
+ }
+
+ for (i = 0; i < dpu_crtc->num_mixers; i++) {
+ if (!mixer[i].hw_lm || !mixer[i].hw_ctl) {
+ DPU_ERROR("invalid lm or ctl assigned to mixer\n");
+ return;
+ }
+ mixer[i].mixer_op_mode = 0;
+ mixer[i].flush_mask = 0;
+ if (mixer[i].hw_ctl->ops.clear_all_blendstages)
+ mixer[i].hw_ctl->ops.clear_all_blendstages(
+ mixer[i].hw_ctl);
+ }
+
+ /* initialize stage cfg */
+ memset(&dpu_crtc->stage_cfg, 0, sizeof(struct dpu_hw_stage_cfg));
+
+ _dpu_crtc_blend_setup_mixer(crtc, dpu_crtc, mixer);
+
+ for (i = 0; i < dpu_crtc->num_mixers; i++) {
+ ctl = mixer[i].hw_ctl;
+ lm = mixer[i].hw_lm;
+
+ lm->ops.setup_alpha_out(lm, mixer[i].mixer_op_mode);
+
+ mixer[i].flush_mask |= ctl->ops.get_bitmask_mixer(ctl,
+ mixer[i].hw_lm->idx);
+
+ /* stage config flush mask */
+ ctl->ops.update_pending_flush(ctl, mixer[i].flush_mask);
+
+ DPU_DEBUG("lm %d, op_mode 0x%X, ctl %d, flush mask 0x%x\n",
+ mixer[i].hw_lm->idx - LM_0,
+ mixer[i].mixer_op_mode,
+ ctl->idx - CTL_0,
+ mixer[i].flush_mask);
+
+ ctl->ops.setup_blendstage(ctl, mixer[i].hw_lm->idx,
+ &dpu_crtc->stage_cfg);
+ }
+}
+
+/**
+ * _dpu_crtc_complete_flip - signal pending page_flip events
+ * Any pending vblank events are added to the vblank_event_list
+ * so that the next vblank interrupt shall signal them.
+ * However PAGE_FLIP events are not handled through the vblank_event_list.
+ * This API signals any pending PAGE_FLIP events requested through
+ * DRM_IOCTL_MODE_PAGE_FLIP and are cached in the dpu_crtc->event.
+ * @crtc: Pointer to drm crtc structure
+ */
+static void _dpu_crtc_complete_flip(struct drm_crtc *crtc)
+{
+ struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
+ struct drm_device *dev = crtc->dev;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev->event_lock, flags);
+ if (dpu_crtc->event) {
+ DRM_DEBUG_VBL("%s: send event: %pK\n", dpu_crtc->name,
+ dpu_crtc->event);
+ trace_dpu_crtc_complete_flip(DRMID(crtc));
+ drm_crtc_send_vblank_event(crtc, dpu_crtc->event);
+ dpu_crtc->event = NULL;
+ }
+ spin_unlock_irqrestore(&dev->event_lock, flags);
+}
+
+enum dpu_intf_mode dpu_crtc_get_intf_mode(struct drm_crtc *crtc)
+{
+ struct drm_encoder *encoder;
+
+ if (!crtc || !crtc->dev) {
+ DPU_ERROR("invalid crtc\n");
+ return INTF_MODE_NONE;
+ }
+
+ drm_for_each_encoder(encoder, crtc->dev)
+ if (encoder->crtc == crtc)
+ return dpu_encoder_get_intf_mode(encoder);
+
+ return INTF_MODE_NONE;
+}
+
+static void dpu_crtc_vblank_cb(void *data)
+{
+ struct drm_crtc *crtc = (struct drm_crtc *)data;
+ struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
+
+ /* keep statistics on vblank callback - with auto reset via debugfs */
+ if (ktime_compare(dpu_crtc->vblank_cb_time, ktime_set(0, 0)) == 0)
+ dpu_crtc->vblank_cb_time = ktime_get();
+ else
+ dpu_crtc->vblank_cb_count++;
+ _dpu_crtc_complete_flip(crtc);
+ drm_crtc_handle_vblank(crtc);
+ trace_dpu_crtc_vblank_cb(DRMID(crtc));
+}
+
+static void dpu_crtc_frame_event_work(struct kthread_work *work)
+{
+ struct msm_drm_private *priv;
+ struct dpu_crtc_frame_event *fevent;
+ struct drm_crtc *crtc;
+ struct dpu_crtc *dpu_crtc;
+ struct dpu_kms *dpu_kms;
+ unsigned long flags;
+ bool frame_done = false;
+
+ if (!work) {
+ DPU_ERROR("invalid work handle\n");
+ return;
+ }
+
+ fevent = container_of(work, struct dpu_crtc_frame_event, work);
+ if (!fevent->crtc || !fevent->crtc->state) {
+ DPU_ERROR("invalid crtc\n");
+ return;
+ }
+
+ crtc = fevent->crtc;
+ dpu_crtc = to_dpu_crtc(crtc);
+
+ dpu_kms = _dpu_crtc_get_kms(crtc);
+ if (!dpu_kms) {
+ DPU_ERROR("invalid kms handle\n");
+ return;
+ }
+ priv = dpu_kms->dev->dev_private;
+ DPU_ATRACE_BEGIN("crtc_frame_event");
+
+ DRM_DEBUG_KMS("crtc%d event:%u ts:%lld\n", crtc->base.id, fevent->event,
+ ktime_to_ns(fevent->ts));
+
+ if (fevent->event & (DPU_ENCODER_FRAME_EVENT_DONE
+ | DPU_ENCODER_FRAME_EVENT_ERROR
+ | DPU_ENCODER_FRAME_EVENT_PANEL_DEAD)) {
+
+ if (atomic_read(&dpu_crtc->frame_pending) < 1) {
+ /* this should not happen */
+ DRM_ERROR("crtc%d ev:%u ts:%lld frame_pending:%d\n",
+ crtc->base.id,
+ fevent->event,
+ ktime_to_ns(fevent->ts),
+ atomic_read(&dpu_crtc->frame_pending));
+ } else if (atomic_dec_return(&dpu_crtc->frame_pending) == 0) {
+ /* release bandwidth and other resources */
+ trace_dpu_crtc_frame_event_done(DRMID(crtc),
+ fevent->event);
+ dpu_core_perf_crtc_release_bw(crtc);
+ } else {
+ trace_dpu_crtc_frame_event_more_pending(DRMID(crtc),
+ fevent->event);
+ }
+
+ if (fevent->event & DPU_ENCODER_FRAME_EVENT_DONE)
+ dpu_core_perf_crtc_update(crtc, 0, false);
+
+ if (fevent->event & (DPU_ENCODER_FRAME_EVENT_DONE
+ | DPU_ENCODER_FRAME_EVENT_ERROR))
+ frame_done = true;
+ }
+
+ if (fevent->event & DPU_ENCODER_FRAME_EVENT_PANEL_DEAD)
+ DPU_ERROR("crtc%d ts:%lld received panel dead event\n",
+ crtc->base.id, ktime_to_ns(fevent->ts));
+
+ if (frame_done)
+ complete_all(&dpu_crtc->frame_done_comp);
+
+ spin_lock_irqsave(&dpu_crtc->spin_lock, flags);
+ list_add_tail(&fevent->list, &dpu_crtc->frame_event_list);
+ spin_unlock_irqrestore(&dpu_crtc->spin_lock, flags);
+ DPU_ATRACE_END("crtc_frame_event");
+}
+
+/*
+ * dpu_crtc_frame_event_cb - crtc frame event callback API. CRTC module
+ * registers this API to encoder for all frame event callbacks like
+ * frame_error, frame_done, idle_timeout, etc. Encoder may call different events
+ * from different context - IRQ, user thread, commit_thread, etc. Each event
+ * should be carefully reviewed and should be processed in proper task context
+ * to avoid schedulin delay or properly manage the irq context's bottom half
+ * processing.
+ */
+static void dpu_crtc_frame_event_cb(void *data, u32 event)
+{
+ struct drm_crtc *crtc = (struct drm_crtc *)data;
+ struct dpu_crtc *dpu_crtc;
+ struct msm_drm_private *priv;
+ struct dpu_crtc_frame_event *fevent;
+ unsigned long flags;
+ u32 crtc_id;
+
+ if (!crtc || !crtc->dev || !crtc->dev->dev_private) {
+ DPU_ERROR("invalid parameters\n");
+ return;
+ }
+
+ /* Nothing to do on idle event */
+ if (event & DPU_ENCODER_FRAME_EVENT_IDLE)
+ return;
+
+ dpu_crtc = to_dpu_crtc(crtc);
+ priv = crtc->dev->dev_private;
+ crtc_id = drm_crtc_index(crtc);
+
+ trace_dpu_crtc_frame_event_cb(DRMID(crtc), event);
+
+ spin_lock_irqsave(&dpu_crtc->spin_lock, flags);
+ fevent = list_first_entry_or_null(&dpu_crtc->frame_event_list,
+ struct dpu_crtc_frame_event, list);
+ if (fevent)
+ list_del_init(&fevent->list);
+ spin_unlock_irqrestore(&dpu_crtc->spin_lock, flags);
+
+ if (!fevent) {
+ DRM_ERROR("crtc%d event %d overflow\n", crtc->base.id, event);
+ return;
+ }
+
+ fevent->event = event;
+ fevent->crtc = crtc;
+ fevent->ts = ktime_get();
+ kthread_queue_work(&priv->event_thread[crtc_id].worker, &fevent->work);
+}
+
+void dpu_crtc_complete_commit(struct drm_crtc *crtc,
+ struct drm_crtc_state *old_state)
+{
+ if (!crtc || !crtc->state) {
+ DPU_ERROR("invalid crtc\n");
+ return;
+ }
+ trace_dpu_crtc_complete_commit(DRMID(crtc));
+}
+
+static void _dpu_crtc_setup_mixer_for_encoder(
+ struct drm_crtc *crtc,
+ struct drm_encoder *enc)
+{
+ struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
+ struct dpu_kms *dpu_kms = _dpu_crtc_get_kms(crtc);
+ struct dpu_rm *rm = &dpu_kms->rm;
+ struct dpu_crtc_mixer *mixer;
+ struct dpu_hw_ctl *last_valid_ctl = NULL;
+ int i;
+ struct dpu_rm_hw_iter lm_iter, ctl_iter;
+
+ dpu_rm_init_hw_iter(&lm_iter, enc->base.id, DPU_HW_BLK_LM);
+ dpu_rm_init_hw_iter(&ctl_iter, enc->base.id, DPU_HW_BLK_CTL);
+
+ /* Set up all the mixers and ctls reserved by this encoder */
+ for (i = dpu_crtc->num_mixers; i < ARRAY_SIZE(dpu_crtc->mixers); i++) {
+ mixer = &dpu_crtc->mixers[i];
+
+ if (!dpu_rm_get_hw(rm, &lm_iter))
+ break;
+ mixer->hw_lm = (struct dpu_hw_mixer *)lm_iter.hw;
+
+ /* CTL may be <= LMs, if <, multiple LMs controlled by 1 CTL */
+ if (!dpu_rm_get_hw(rm, &ctl_iter)) {
+ DPU_DEBUG("no ctl assigned to lm %d, using previous\n",
+ mixer->hw_lm->idx - LM_0);
+ mixer->hw_ctl = last_valid_ctl;
+ } else {
+ mixer->hw_ctl = (struct dpu_hw_ctl *)ctl_iter.hw;
+ last_valid_ctl = mixer->hw_ctl;
+ }
+
+ /* Shouldn't happen, mixers are always >= ctls */
+ if (!mixer->hw_ctl) {
+ DPU_ERROR("no valid ctls found for lm %d\n",
+ mixer->hw_lm->idx - LM_0);
+ return;
+ }
+
+ mixer->encoder = enc;
+
+ dpu_crtc->num_mixers++;
+ DPU_DEBUG("setup mixer %d: lm %d\n",
+ i, mixer->hw_lm->idx - LM_0);
+ DPU_DEBUG("setup mixer %d: ctl %d\n",
+ i, mixer->hw_ctl->idx - CTL_0);
+ }
+}
+
+static void _dpu_crtc_setup_mixers(struct drm_crtc *crtc)
+{
+ struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
+ struct drm_encoder *enc;
+
+ dpu_crtc->num_mixers = 0;
+ dpu_crtc->mixers_swapped = false;
+ memset(dpu_crtc->mixers, 0, sizeof(dpu_crtc->mixers));
+
+ mutex_lock(&dpu_crtc->crtc_lock);
+ /* Check for mixers on all encoders attached to this crtc */
+ list_for_each_entry(enc, &crtc->dev->mode_config.encoder_list, head) {
+ if (enc->crtc != crtc)
+ continue;
+
+ _dpu_crtc_setup_mixer_for_encoder(crtc, enc);
+ }
+
+ mutex_unlock(&dpu_crtc->crtc_lock);
+}
+
+static void _dpu_crtc_setup_lm_bounds(struct drm_crtc *crtc,
+ struct drm_crtc_state *state)
+{
+ struct dpu_crtc *dpu_crtc;
+ struct dpu_crtc_state *cstate;
+ struct drm_display_mode *adj_mode;
+ u32 crtc_split_width;
+ int i;
+
+ if (!crtc || !state) {
+ DPU_ERROR("invalid args\n");
+ return;
+ }
+
+ dpu_crtc = to_dpu_crtc(crtc);
+ cstate = to_dpu_crtc_state(state);
+
+ adj_mode = &state->adjusted_mode;
+ crtc_split_width = dpu_crtc_get_mixer_width(dpu_crtc, cstate, adj_mode);
+
+ for (i = 0; i < dpu_crtc->num_mixers; i++) {
+ struct drm_rect *r = &cstate->lm_bounds[i];
+ r->x1 = crtc_split_width * i;
+ r->y1 = 0;
+ r->x2 = r->x1 + crtc_split_width;
+ r->y2 = dpu_crtc_get_mixer_height(dpu_crtc, cstate, adj_mode);
+
+ trace_dpu_crtc_setup_lm_bounds(DRMID(crtc), i, r);
+ }
+
+ drm_mode_debug_printmodeline(adj_mode);
+}
+
+static void dpu_crtc_atomic_begin(struct drm_crtc *crtc,
+ struct drm_crtc_state *old_state)
+{
+ struct dpu_crtc *dpu_crtc;
+ struct drm_encoder *encoder;
+ struct drm_device *dev;
+ unsigned long flags;
+ struct dpu_crtc_smmu_state_data *smmu_state;
+
+ if (!crtc) {
+ DPU_ERROR("invalid crtc\n");
+ return;
+ }
+
+ if (!crtc->state->enable) {
+ DPU_DEBUG("crtc%d -> enable %d, skip atomic_begin\n",
+ crtc->base.id, crtc->state->enable);
+ return;
+ }
+
+ DPU_DEBUG("crtc%d\n", crtc->base.id);
+
+ dpu_crtc = to_dpu_crtc(crtc);
+ dev = crtc->dev;
+ smmu_state = &dpu_crtc->smmu_state;
+
+ if (!dpu_crtc->num_mixers) {
+ _dpu_crtc_setup_mixers(crtc);
+ _dpu_crtc_setup_lm_bounds(crtc, crtc->state);
+ }
+
+ if (dpu_crtc->event) {
+ WARN_ON(dpu_crtc->event);
+ } else {
+ spin_lock_irqsave(&dev->event_lock, flags);
+ dpu_crtc->event = crtc->state->event;
+ crtc->state->event = NULL;
+ spin_unlock_irqrestore(&dev->event_lock, flags);
+ }
+
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+ if (encoder->crtc != crtc)
+ continue;
+
+ /* encoder will trigger pending mask now */
+ dpu_encoder_trigger_kickoff_pending(encoder);
+ }
+
+ /*
+ * If no mixers have been allocated in dpu_crtc_atomic_check(),
+ * it means we are trying to flush a CRTC whose state is disabled:
+ * nothing else needs to be done.
+ */
+ if (unlikely(!dpu_crtc->num_mixers))
+ return;
+
+ _dpu_crtc_blend_setup(crtc);
+
+ /*
+ * PP_DONE irq is only used by command mode for now.
+ * It is better to request pending before FLUSH and START trigger
+ * to make sure no pp_done irq missed.
+ * This is safe because no pp_done will happen before SW trigger
+ * in command mode.
+ */
+}
+
+static void dpu_crtc_atomic_flush(struct drm_crtc *crtc,
+ struct drm_crtc_state *old_crtc_state)
+{
+ struct dpu_crtc *dpu_crtc;
+ struct drm_device *dev;
+ struct drm_plane *plane;
+ struct msm_drm_private *priv;
+ struct msm_drm_thread *event_thread;
+ unsigned long flags;
+ struct dpu_crtc_state *cstate;
+
+ if (!crtc || !crtc->dev || !crtc->dev->dev_private) {
+ DPU_ERROR("invalid crtc\n");
+ return;
+ }
+
+ if (!crtc->state->enable) {
+ DPU_DEBUG("crtc%d -> enable %d, skip atomic_flush\n",
+ crtc->base.id, crtc->state->enable);
+ return;
+ }
+
+ DPU_DEBUG("crtc%d\n", crtc->base.id);
+
+ dpu_crtc = to_dpu_crtc(crtc);
+ cstate = to_dpu_crtc_state(crtc->state);
+ dev = crtc->dev;
+ priv = dev->dev_private;
+
+ if (crtc->index >= ARRAY_SIZE(priv->event_thread)) {
+ DPU_ERROR("invalid crtc index[%d]\n", crtc->index);
+ return;
+ }
+
+ event_thread = &priv->event_thread[crtc->index];
+
+ if (dpu_crtc->event) {
+ DPU_DEBUG("already received dpu_crtc->event\n");
+ } else {
+ spin_lock_irqsave(&dev->event_lock, flags);
+ dpu_crtc->event = crtc->state->event;
+ crtc->state->event = NULL;
+ spin_unlock_irqrestore(&dev->event_lock, flags);
+ }
+
+ /*
+ * If no mixers has been allocated in dpu_crtc_atomic_check(),
+ * it means we are trying to flush a CRTC whose state is disabled:
+ * nothing else needs to be done.
+ */
+ if (unlikely(!dpu_crtc->num_mixers))
+ return;
+
+ /*
+ * For planes without commit update, drm framework will not add
+ * those planes to current state since hardware update is not
+ * required. However, if those planes were power collapsed since
+ * last commit cycle, driver has to restore the hardware state
+ * of those planes explicitly here prior to plane flush.
+ */
+ drm_atomic_crtc_for_each_plane(plane, crtc)
+ dpu_plane_restore(plane);
+
+ /* update performance setting before crtc kickoff */
+ dpu_core_perf_crtc_update(crtc, 1, false);
+
+ /*
+ * Final plane updates: Give each plane a chance to complete all
+ * required writes/flushing before crtc's "flush
+ * everything" call below.
+ */
+ drm_atomic_crtc_for_each_plane(plane, crtc) {
+ if (dpu_crtc->smmu_state.transition_error)
+ dpu_plane_set_error(plane, true);
+ dpu_plane_flush(plane);
+ }
+
+ /* Kickoff will be scheduled by outer layer */
+}
+
+/**
+ * dpu_crtc_destroy_state - state destroy hook
+ * @crtc: drm CRTC
+ * @state: CRTC state object to release
+ */
+static void dpu_crtc_destroy_state(struct drm_crtc *crtc,
+ struct drm_crtc_state *state)
+{
+ struct dpu_crtc *dpu_crtc;
+ struct dpu_crtc_state *cstate;
+
+ if (!crtc || !state) {
+ DPU_ERROR("invalid argument(s)\n");
+ return;
+ }
+
+ dpu_crtc = to_dpu_crtc(crtc);
+ cstate = to_dpu_crtc_state(state);
+
+ DPU_DEBUG("crtc%d\n", crtc->base.id);
+
+ _dpu_crtc_rp_destroy(&cstate->rp);
+
+ __drm_atomic_helper_crtc_destroy_state(state);
+
+ kfree(cstate);
+}
+
+static int _dpu_crtc_wait_for_frame_done(struct drm_crtc *crtc)
+{
+ struct dpu_crtc *dpu_crtc;
+ int ret, rc = 0;
+
+ if (!crtc) {
+ DPU_ERROR("invalid argument\n");
+ return -EINVAL;
+ }
+ dpu_crtc = to_dpu_crtc(crtc);
+
+ if (!atomic_read(&dpu_crtc->frame_pending)) {
+ DPU_DEBUG("no frames pending\n");
+ return 0;
+ }
+
+ DPU_ATRACE_BEGIN("frame done completion wait");
+ ret = wait_for_completion_timeout(&dpu_crtc->frame_done_comp,
+ msecs_to_jiffies(DPU_FRAME_DONE_TIMEOUT));
+ if (!ret) {
+ DRM_ERROR("frame done wait timed out, ret:%d\n", ret);
+ rc = -ETIMEDOUT;
+ }
+ DPU_ATRACE_END("frame done completion wait");
+
+ return rc;
+}
+
+void dpu_crtc_commit_kickoff(struct drm_crtc *crtc)
+{
+ struct drm_encoder *encoder;
+ struct drm_device *dev;
+ struct dpu_crtc *dpu_crtc;
+ struct msm_drm_private *priv;
+ struct dpu_kms *dpu_kms;
+ struct dpu_crtc_state *cstate;
+ int ret;
+
+ if (!crtc) {
+ DPU_ERROR("invalid argument\n");
+ return;
+ }
+ dev = crtc->dev;
+ dpu_crtc = to_dpu_crtc(crtc);
+ dpu_kms = _dpu_crtc_get_kms(crtc);
+
+ if (!dpu_kms || !dpu_kms->dev || !dpu_kms->dev->dev_private) {
+ DPU_ERROR("invalid argument\n");
+ return;
+ }
+
+ priv = dpu_kms->dev->dev_private;
+ cstate = to_dpu_crtc_state(crtc->state);
+
+ /*
+ * If no mixers has been allocated in dpu_crtc_atomic_check(),
+ * it means we are trying to start a CRTC whose state is disabled:
+ * nothing else needs to be done.
+ */
+ if (unlikely(!dpu_crtc->num_mixers))
+ return;
+
+ DPU_ATRACE_BEGIN("crtc_commit");
+
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+ struct dpu_encoder_kickoff_params params = { 0 };
+
+ if (encoder->crtc != crtc)
+ continue;
+
+ /*
+ * Encoder will flush/start now, unless it has a tx pending.
+ * If so, it may delay and flush at an irq event (e.g. ppdone)
+ */
+ dpu_encoder_prepare_for_kickoff(encoder, &params);
+ }
+
+ /* wait for frame_event_done completion */
+ DPU_ATRACE_BEGIN("wait_for_frame_done_event");
+ ret = _dpu_crtc_wait_for_frame_done(crtc);
+ DPU_ATRACE_END("wait_for_frame_done_event");
+ if (ret) {
+ DPU_ERROR("crtc%d wait for frame done failed;frame_pending%d\n",
+ crtc->base.id,
+ atomic_read(&dpu_crtc->frame_pending));
+ goto end;
+ }
+
+ if (atomic_inc_return(&dpu_crtc->frame_pending) == 1) {
+ /* acquire bandwidth and other resources */
+ DPU_DEBUG("crtc%d first commit\n", crtc->base.id);
+ } else
+ DPU_DEBUG("crtc%d commit\n", crtc->base.id);
+
+ dpu_crtc->play_count++;
+
+ dpu_vbif_clear_errors(dpu_kms);
+
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+ if (encoder->crtc != crtc)
+ continue;
+
+ dpu_encoder_kickoff(encoder);
+ }
+
+end:
+ reinit_completion(&dpu_crtc->frame_done_comp);
+ DPU_ATRACE_END("crtc_commit");
+}
+
+/**
+ * _dpu_crtc_vblank_enable_no_lock - update power resource and vblank request
+ * @dpu_crtc: Pointer to dpu crtc structure
+ * @enable: Whether to enable/disable vblanks
+ *
+ * @Return: error code
+ */
+static int _dpu_crtc_vblank_enable_no_lock(
+ struct dpu_crtc *dpu_crtc, bool enable)
+{
+ struct drm_device *dev;
+ struct drm_crtc *crtc;
+ struct drm_encoder *enc;
+
+ if (!dpu_crtc) {
+ DPU_ERROR("invalid crtc\n");
+ return -EINVAL;
+ }
+
+ crtc = &dpu_crtc->base;
+ dev = crtc->dev;
+
+ if (enable) {
+ int ret;
+
+ /* drop lock since power crtc cb may try to re-acquire lock */
+ mutex_unlock(&dpu_crtc->crtc_lock);
+ ret = _dpu_crtc_power_enable(dpu_crtc, true);
+ mutex_lock(&dpu_crtc->crtc_lock);
+ if (ret)
+ return ret;
+
+ list_for_each_entry(enc, &dev->mode_config.encoder_list, head) {
+ if (enc->crtc != crtc)
+ continue;
+
+ trace_dpu_crtc_vblank_enable(DRMID(&dpu_crtc->base),
+ DRMID(enc), enable,
+ dpu_crtc);
+
+ dpu_encoder_register_vblank_callback(enc,
+ dpu_crtc_vblank_cb, (void *)crtc);
+ }
+ } else {
+ list_for_each_entry(enc, &dev->mode_config.encoder_list, head) {
+ if (enc->crtc != crtc)
+ continue;
+
+ trace_dpu_crtc_vblank_enable(DRMID(&dpu_crtc->base),
+ DRMID(enc), enable,
+ dpu_crtc);
+
+ dpu_encoder_register_vblank_callback(enc, NULL, NULL);
+ }
+
+ /* drop lock since power crtc cb may try to re-acquire lock */
+ mutex_unlock(&dpu_crtc->crtc_lock);
+ _dpu_crtc_power_enable(dpu_crtc, false);
+ mutex_lock(&dpu_crtc->crtc_lock);
+ }
+
+ return 0;
+}
+
+/**
+ * _dpu_crtc_set_suspend - notify crtc of suspend enable/disable
+ * @crtc: Pointer to drm crtc object
+ * @enable: true to enable suspend, false to indicate resume
+ */
+static void _dpu_crtc_set_suspend(struct drm_crtc *crtc, bool enable)
+{
+ struct dpu_crtc *dpu_crtc;
+ struct msm_drm_private *priv;
+ struct dpu_kms *dpu_kms;
+ int ret = 0;
+
+ if (!crtc || !crtc->dev || !crtc->dev->dev_private) {
+ DPU_ERROR("invalid crtc\n");
+ return;
+ }
+ dpu_crtc = to_dpu_crtc(crtc);
+ priv = crtc->dev->dev_private;
+
+ if (!priv->kms) {
+ DPU_ERROR("invalid crtc kms\n");
+ return;
+ }
+ dpu_kms = to_dpu_kms(priv->kms);
+
+ DRM_DEBUG_KMS("crtc%d suspend = %d\n", crtc->base.id, enable);
+
+ mutex_lock(&dpu_crtc->crtc_lock);
+
+ /*
+ * If the vblank is enabled, release a power reference on suspend
+ * and take it back during resume (if it is still enabled).
+ */
+ trace_dpu_crtc_set_suspend(DRMID(&dpu_crtc->base), enable, dpu_crtc);
+ if (dpu_crtc->suspend == enable)
+ DPU_DEBUG("crtc%d suspend already set to %d, ignoring update\n",
+ crtc->base.id, enable);
+ else if (dpu_crtc->enabled && dpu_crtc->vblank_requested) {
+ ret = _dpu_crtc_vblank_enable_no_lock(dpu_crtc, !enable);
+ if (ret)
+ DPU_ERROR("%s vblank enable failed: %d\n",
+ dpu_crtc->name, ret);
+ }
+
+ dpu_crtc->suspend = enable;
+ mutex_unlock(&dpu_crtc->crtc_lock);
+}
+
+/**
+ * dpu_crtc_duplicate_state - state duplicate hook
+ * @crtc: Pointer to drm crtc structure
+ * @Returns: Pointer to new drm_crtc_state structure
+ */
+static struct drm_crtc_state *dpu_crtc_duplicate_state(struct drm_crtc *crtc)
+{
+ struct dpu_crtc *dpu_crtc;
+ struct dpu_crtc_state *cstate, *old_cstate;
+
+ if (!crtc || !crtc->state) {
+ DPU_ERROR("invalid argument(s)\n");
+ return NULL;
+ }
+
+ dpu_crtc = to_dpu_crtc(crtc);
+ old_cstate = to_dpu_crtc_state(crtc->state);
+ cstate = kmemdup(old_cstate, sizeof(*old_cstate), GFP_KERNEL);
+ if (!cstate) {
+ DPU_ERROR("failed to allocate state\n");
+ return NULL;
+ }
+
+ /* duplicate base helper */
+ __drm_atomic_helper_crtc_duplicate_state(crtc, &cstate->base);
+
+ _dpu_crtc_rp_duplicate(&old_cstate->rp, &cstate->rp);
+
+ return &cstate->base;
+}
+
+/**
+ * dpu_crtc_reset - reset hook for CRTCs
+ * Resets the atomic state for @crtc by freeing the state pointer (which might
+ * be NULL, e.g. at driver load time) and allocating a new empty state object.
+ * @crtc: Pointer to drm crtc structure
+ */
+static void dpu_crtc_reset(struct drm_crtc *crtc)
+{
+ struct dpu_crtc *dpu_crtc;
+ struct dpu_crtc_state *cstate;
+
+ if (!crtc) {
+ DPU_ERROR("invalid crtc\n");
+ return;
+ }
+
+ /* revert suspend actions, if necessary */
+ if (dpu_kms_is_suspend_state(crtc->dev))
+ _dpu_crtc_set_suspend(crtc, false);
+
+ /* remove previous state, if present */
+ if (crtc->state) {
+ dpu_crtc_destroy_state(crtc, crtc->state);
+ crtc->state = 0;
+ }
+
+ dpu_crtc = to_dpu_crtc(crtc);
+ cstate = kzalloc(sizeof(*cstate), GFP_KERNEL);
+ if (!cstate) {
+ DPU_ERROR("failed to allocate state\n");
+ return;
+ }
+
+ _dpu_crtc_rp_reset(&cstate->rp, &dpu_crtc->rp_lock,
+ &dpu_crtc->rp_head);
+
+ cstate->base.crtc = crtc;
+ crtc->state = &cstate->base;
+}
+
+static void dpu_crtc_handle_power_event(u32 event_type, void *arg)
+{
+ struct drm_crtc *crtc = arg;
+ struct dpu_crtc *dpu_crtc;
+ struct drm_encoder *encoder;
+ struct dpu_crtc_mixer *m;
+ u32 i, misr_status;
+
+ if (!crtc) {
+ DPU_ERROR("invalid crtc\n");
+ return;
+ }
+ dpu_crtc = to_dpu_crtc(crtc);
+
+ mutex_lock(&dpu_crtc->crtc_lock);
+
+ trace_dpu_crtc_handle_power_event(DRMID(crtc), event_type);
+
+ switch (event_type) {
+ case DPU_POWER_EVENT_POST_ENABLE:
+ /* restore encoder; crtc will be programmed during commit */
+ drm_for_each_encoder(encoder, crtc->dev) {
+ if (encoder->crtc != crtc)
+ continue;
+
+ dpu_encoder_virt_restore(encoder);
+ }
+
+ for (i = 0; i < dpu_crtc->num_mixers; ++i) {
+ m = &dpu_crtc->mixers[i];
+ if (!m->hw_lm || !m->hw_lm->ops.setup_misr ||
+ !dpu_crtc->misr_enable)
+ continue;
+
+ m->hw_lm->ops.setup_misr(m->hw_lm, true,
+ dpu_crtc->misr_frame_count);
+ }
+ break;
+ case DPU_POWER_EVENT_PRE_DISABLE:
+ for (i = 0; i < dpu_crtc->num_mixers; ++i) {
+ m = &dpu_crtc->mixers[i];
+ if (!m->hw_lm || !m->hw_lm->ops.collect_misr ||
+ !dpu_crtc->misr_enable)
+ continue;
+
+ misr_status = m->hw_lm->ops.collect_misr(m->hw_lm);
+ dpu_crtc->misr_data[i] = misr_status ? misr_status :
+ dpu_crtc->misr_data[i];
+ }
+ break;
+ case DPU_POWER_EVENT_POST_DISABLE:
+ /**
+ * Nothing to do. All the planes on the CRTC will be
+ * programmed for every frame
+ */
+ break;
+ default:
+ DPU_DEBUG("event:%d not handled\n", event_type);
+ break;
+ }
+
+ mutex_unlock(&dpu_crtc->crtc_lock);
+}
+
+static void dpu_crtc_disable(struct drm_crtc *crtc)
+{
+ struct dpu_crtc *dpu_crtc;
+ struct dpu_crtc_state *cstate;
+ struct drm_display_mode *mode;
+ struct drm_encoder *encoder;
+ struct msm_drm_private *priv;
+ int ret;
+ unsigned long flags;
+
+ if (!crtc || !crtc->dev || !crtc->dev->dev_private || !crtc->state) {
+ DPU_ERROR("invalid crtc\n");
+ return;
+ }
+ dpu_crtc = to_dpu_crtc(crtc);
+ cstate = to_dpu_crtc_state(crtc->state);
+ mode = &cstate->base.adjusted_mode;
+ priv = crtc->dev->dev_private;
+
+ DRM_DEBUG_KMS("crtc%d\n", crtc->base.id);
+
+ if (dpu_kms_is_suspend_state(crtc->dev))
+ _dpu_crtc_set_suspend(crtc, true);
+
+ /* Disable/save vblank irq handling */
+ drm_crtc_vblank_off(crtc);
+
+ mutex_lock(&dpu_crtc->crtc_lock);
+
+ /* wait for frame_event_done completion */
+ if (_dpu_crtc_wait_for_frame_done(crtc))
+ DPU_ERROR("crtc%d wait for frame done failed;frame_pending%d\n",
+ crtc->base.id,
+ atomic_read(&dpu_crtc->frame_pending));
+
+ trace_dpu_crtc_disable(DRMID(crtc), false, dpu_crtc);
+ if (dpu_crtc->enabled && !dpu_crtc->suspend &&
+ dpu_crtc->vblank_requested) {
+ ret = _dpu_crtc_vblank_enable_no_lock(dpu_crtc, false);
+ if (ret)
+ DPU_ERROR("%s vblank enable failed: %d\n",
+ dpu_crtc->name, ret);
+ }
+ dpu_crtc->enabled = false;
+
+ if (atomic_read(&dpu_crtc->frame_pending)) {
+ trace_dpu_crtc_disable_frame_pending(DRMID(crtc),
+ atomic_read(&dpu_crtc->frame_pending));
+ dpu_core_perf_crtc_release_bw(crtc);
+ atomic_set(&dpu_crtc->frame_pending, 0);
+ }
+
+ dpu_core_perf_crtc_update(crtc, 0, true);
+
+ drm_for_each_encoder(encoder, crtc->dev) {
+ if (encoder->crtc != crtc)
+ continue;
+ dpu_encoder_register_frame_event_callback(encoder, NULL, NULL);
+ }
+
+ if (dpu_crtc->power_event)
+ dpu_power_handle_unregister_event(dpu_crtc->phandle,
+ dpu_crtc->power_event);
+
+ memset(dpu_crtc->mixers, 0, sizeof(dpu_crtc->mixers));
+ dpu_crtc->num_mixers = 0;
+ dpu_crtc->mixers_swapped = false;
+
+ /* disable clk & bw control until clk & bw properties are set */
+ cstate->bw_control = false;
+ cstate->bw_split_vote = false;
+
+ mutex_unlock(&dpu_crtc->crtc_lock);
+
+ if (crtc->state->event && !crtc->state->active) {
+ spin_lock_irqsave(&crtc->dev->event_lock, flags);
+ drm_crtc_send_vblank_event(crtc, crtc->state->event);
+ crtc->state->event = NULL;
+ spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
+ }
+}
+
+static void dpu_crtc_enable(struct drm_crtc *crtc,
+ struct drm_crtc_state *old_crtc_state)
+{
+ struct dpu_crtc *dpu_crtc;
+ struct drm_encoder *encoder;
+ struct msm_drm_private *priv;
+ int ret;
+
+ if (!crtc || !crtc->dev || !crtc->dev->dev_private) {
+ DPU_ERROR("invalid crtc\n");
+ return;
+ }
+ priv = crtc->dev->dev_private;
+
+ DRM_DEBUG_KMS("crtc%d\n", crtc->base.id);
+ dpu_crtc = to_dpu_crtc(crtc);
+
+ drm_for_each_encoder(encoder, crtc->dev) {
+ if (encoder->crtc != crtc)
+ continue;
+ dpu_encoder_register_frame_event_callback(encoder,
+ dpu_crtc_frame_event_cb, (void *)crtc);
+ }
+
+ mutex_lock(&dpu_crtc->crtc_lock);
+ trace_dpu_crtc_enable(DRMID(crtc), true, dpu_crtc);
+ if (!dpu_crtc->enabled && !dpu_crtc->suspend &&
+ dpu_crtc->vblank_requested) {
+ ret = _dpu_crtc_vblank_enable_no_lock(dpu_crtc, true);
+ if (ret)
+ DPU_ERROR("%s vblank enable failed: %d\n",
+ dpu_crtc->name, ret);
+ }
+ dpu_crtc->enabled = true;
+
+ mutex_unlock(&dpu_crtc->crtc_lock);
+
+ /* Enable/restore vblank irq handling */
+ drm_crtc_vblank_on(crtc);
+
+ dpu_crtc->power_event = dpu_power_handle_register_event(
+ dpu_crtc->phandle,
+ DPU_POWER_EVENT_POST_ENABLE | DPU_POWER_EVENT_POST_DISABLE |
+ DPU_POWER_EVENT_PRE_DISABLE,
+ dpu_crtc_handle_power_event, crtc, dpu_crtc->name);
+
+}
+
+struct plane_state {
+ struct dpu_plane_state *dpu_pstate;
+ const struct drm_plane_state *drm_pstate;
+ int stage;
+ u32 pipe_id;
+};
+
+static int dpu_crtc_atomic_check(struct drm_crtc *crtc,
+ struct drm_crtc_state *state)
+{
+ struct dpu_crtc *dpu_crtc;
+ struct plane_state *pstates;
+ struct dpu_crtc_state *cstate;
+
+ const struct drm_plane_state *pstate;
+ struct drm_plane *plane;
+ struct drm_display_mode *mode;
+
+ int cnt = 0, rc = 0, mixer_width, i, z_pos;
+
+ struct dpu_multirect_plane_states multirect_plane[DPU_STAGE_MAX * 2];
+ int multirect_count = 0;
+ const struct drm_plane_state *pipe_staged[SSPP_MAX];
+ int left_zpos_cnt = 0, right_zpos_cnt = 0;
+ struct drm_rect crtc_rect = { 0 };
+
+ if (!crtc) {
+ DPU_ERROR("invalid crtc\n");
+ return -EINVAL;
+ }
+
+ pstates = kzalloc(sizeof(*pstates) * DPU_STAGE_MAX * 4, GFP_KERNEL);
+
+ dpu_crtc = to_dpu_crtc(crtc);
+ cstate = to_dpu_crtc_state(state);
+
+ if (!state->enable || !state->active) {
+ DPU_DEBUG("crtc%d -> enable %d, active %d, skip atomic_check\n",
+ crtc->base.id, state->enable, state->active);
+ goto end;
+ }
+
+ mode = &state->adjusted_mode;
+ DPU_DEBUG("%s: check", dpu_crtc->name);
+
+ /* force a full mode set if active state changed */
+ if (state->active_changed)
+ state->mode_changed = true;
+
+ memset(pipe_staged, 0, sizeof(pipe_staged));
+
+ mixer_width = dpu_crtc_get_mixer_width(dpu_crtc, cstate, mode);
+
+ _dpu_crtc_setup_lm_bounds(crtc, state);
+
+ crtc_rect.x2 = mode->hdisplay;
+ crtc_rect.y2 = mode->vdisplay;
+
+ /* get plane state for all drm planes associated with crtc state */
+ drm_atomic_crtc_state_for_each_plane_state(plane, pstate, state) {
+ struct drm_rect dst, clip = crtc_rect;
+
+ if (IS_ERR_OR_NULL(pstate)) {
+ rc = PTR_ERR(pstate);
+ DPU_ERROR("%s: failed to get plane%d state, %d\n",
+ dpu_crtc->name, plane->base.id, rc);
+ goto end;
+ }
+ if (cnt >= DPU_STAGE_MAX * 4)
+ continue;
+
+ pstates[cnt].dpu_pstate = to_dpu_plane_state(pstate);
+ pstates[cnt].drm_pstate = pstate;
+ pstates[cnt].stage = pstate->normalized_zpos;
+ pstates[cnt].pipe_id = dpu_plane_pipe(plane);
+
+ if (pipe_staged[pstates[cnt].pipe_id]) {
+ multirect_plane[multirect_count].r0 =
+ pipe_staged[pstates[cnt].pipe_id];
+ multirect_plane[multirect_count].r1 = pstate;
+ multirect_count++;
+
+ pipe_staged[pstates[cnt].pipe_id] = NULL;
+ } else {
+ pipe_staged[pstates[cnt].pipe_id] = pstate;
+ }
+
+ cnt++;
+
+ dst = drm_plane_state_dest(pstate);
+ if (!drm_rect_intersect(&clip, &dst) ||
+ !drm_rect_equals(&clip, &dst)) {
+ DPU_ERROR("invalid vertical/horizontal destination\n");
+ DPU_ERROR("display: " DRM_RECT_FMT " plane: "
+ DRM_RECT_FMT "\n", DRM_RECT_ARG(&crtc_rect),
+ DRM_RECT_ARG(&dst));
+ rc = -E2BIG;
+ goto end;
+ }
+ }
+
+ for (i = 1; i < SSPP_MAX; i++) {
+ if (pipe_staged[i]) {
+ dpu_plane_clear_multirect(pipe_staged[i]);
+
+ if (is_dpu_plane_virtual(pipe_staged[i]->plane)) {
+ DPU_ERROR(
+ "r1 only virt plane:%d not supported\n",
+ pipe_staged[i]->plane->base.id);
+ rc = -EINVAL;
+ goto end;
+ }
+ }
+ }
+
+ z_pos = -1;
+ for (i = 0; i < cnt; i++) {
+ /* reset counts at every new blend stage */
+ if (pstates[i].stage != z_pos) {
+ left_zpos_cnt = 0;
+ right_zpos_cnt = 0;
+ z_pos = pstates[i].stage;
+ }
+
+ /* verify z_pos setting before using it */
+ if (z_pos >= DPU_STAGE_MAX - DPU_STAGE_0) {
+ DPU_ERROR("> %d plane stages assigned\n",
+ DPU_STAGE_MAX - DPU_STAGE_0);
+ rc = -EINVAL;
+ goto end;
+ } else if (pstates[i].drm_pstate->crtc_x < mixer_width) {
+ if (left_zpos_cnt == 2) {
+ DPU_ERROR("> 2 planes @ stage %d on left\n",
+ z_pos);
+ rc = -EINVAL;
+ goto end;
+ }
+ left_zpos_cnt++;
+
+ } else {
+ if (right_zpos_cnt == 2) {
+ DPU_ERROR("> 2 planes @ stage %d on right\n",
+ z_pos);
+ rc = -EINVAL;
+ goto end;
+ }
+ right_zpos_cnt++;
+ }
+
+ pstates[i].dpu_pstate->stage = z_pos + DPU_STAGE_0;
+ DPU_DEBUG("%s: zpos %d", dpu_crtc->name, z_pos);
+ }
+
+ for (i = 0; i < multirect_count; i++) {
+ if (dpu_plane_validate_multirect_v2(&multirect_plane[i])) {
+ DPU_ERROR(
+ "multirect validation failed for planes (%d - %d)\n",
+ multirect_plane[i].r0->plane->base.id,
+ multirect_plane[i].r1->plane->base.id);
+ rc = -EINVAL;
+ goto end;
+ }
+ }
+
+ rc = dpu_core_perf_crtc_check(crtc, state);
+ if (rc) {
+ DPU_ERROR("crtc%d failed performance check %d\n",
+ crtc->base.id, rc);
+ goto end;
+ }
+
+ /* validate source split:
+ * use pstates sorted by stage to check planes on same stage
+ * we assume that all pipes are in source split so its valid to compare
+ * without taking into account left/right mixer placement
+ */
+ for (i = 1; i < cnt; i++) {
+ struct plane_state *prv_pstate, *cur_pstate;
+ struct drm_rect left_rect, right_rect;
+ int32_t left_pid, right_pid;
+ int32_t stage;
+
+ prv_pstate = &pstates[i - 1];
+ cur_pstate = &pstates[i];
+ if (prv_pstate->stage != cur_pstate->stage)
+ continue;
+
+ stage = cur_pstate->stage;
+
+ left_pid = prv_pstate->dpu_pstate->base.plane->base.id;
+ left_rect = drm_plane_state_dest(prv_pstate->drm_pstate);
+
+ right_pid = cur_pstate->dpu_pstate->base.plane->base.id;
+ right_rect = drm_plane_state_dest(cur_pstate->drm_pstate);
+
+ if (right_rect.x1 < left_rect.x1) {
+ swap(left_pid, right_pid);
+ swap(left_rect, right_rect);
+ }
+
+ /**
+ * - planes are enumerated in pipe-priority order such that
+ * planes with lower drm_id must be left-most in a shared
+ * blend-stage when using source split.
+ * - planes in source split must be contiguous in width
+ * - planes in source split must have same dest yoff and height
+ */
+ if (right_pid < left_pid) {
+ DPU_ERROR(
+ "invalid src split cfg. priority mismatch. stage: %d left: %d right: %d\n",
+ stage, left_pid, right_pid);
+ rc = -EINVAL;
+ goto end;
+ } else if (right_rect.x1 != drm_rect_width(&left_rect)) {
+ DPU_ERROR("non-contiguous coordinates for src split. "
+ "stage: %d left: " DRM_RECT_FMT " right: "
+ DRM_RECT_FMT "\n", stage,
+ DRM_RECT_ARG(&left_rect),
+ DRM_RECT_ARG(&right_rect));
+ rc = -EINVAL;
+ goto end;
+ } else if (left_rect.y1 != right_rect.y1 ||
+ drm_rect_height(&left_rect) != drm_rect_height(&right_rect)) {
+ DPU_ERROR("source split at stage: %d. invalid "
+ "yoff/height: left: " DRM_RECT_FMT " right: "
+ DRM_RECT_FMT "\n", stage,
+ DRM_RECT_ARG(&left_rect),
+ DRM_RECT_ARG(&right_rect));
+ rc = -EINVAL;
+ goto end;
+ }
+ }
+
+end:
+ _dpu_crtc_rp_free_unused(&cstate->rp);
+ kfree(pstates);
+ return rc;
+}
+
+int dpu_crtc_vblank(struct drm_crtc *crtc, bool en)
+{
+ struct dpu_crtc *dpu_crtc;
+ int ret;
+
+ if (!crtc) {
+ DPU_ERROR("invalid crtc\n");
+ return -EINVAL;
+ }
+ dpu_crtc = to_dpu_crtc(crtc);
+
+ mutex_lock(&dpu_crtc->crtc_lock);
+ trace_dpu_crtc_vblank(DRMID(&dpu_crtc->base), en, dpu_crtc);
+ if (dpu_crtc->enabled && !dpu_crtc->suspend) {
+ ret = _dpu_crtc_vblank_enable_no_lock(dpu_crtc, en);
+ if (ret)
+ DPU_ERROR("%s vblank enable failed: %d\n",
+ dpu_crtc->name, ret);
+ }
+ dpu_crtc->vblank_requested = en;
+ mutex_unlock(&dpu_crtc->crtc_lock);
+
+ return 0;
+}
+
+#ifdef CONFIG_DEBUG_FS
+static int _dpu_debugfs_status_show(struct seq_file *s, void *data)
+{
+ struct dpu_crtc *dpu_crtc;
+ struct dpu_plane_state *pstate = NULL;
+ struct dpu_crtc_mixer *m;
+
+ struct drm_crtc *crtc;
+ struct drm_plane *plane;
+ struct drm_display_mode *mode;
+ struct drm_framebuffer *fb;
+ struct drm_plane_state *state;
+ struct dpu_crtc_state *cstate;
+
+ int i, out_width;
+
+ if (!s || !s->private)
+ return -EINVAL;
+
+ dpu_crtc = s->private;
+ crtc = &dpu_crtc->base;
+ cstate = to_dpu_crtc_state(crtc->state);
+
+ mutex_lock(&dpu_crtc->crtc_lock);
+ mode = &crtc->state->adjusted_mode;
+ out_width = dpu_crtc_get_mixer_width(dpu_crtc, cstate, mode);
+
+ seq_printf(s, "crtc:%d width:%d height:%d\n", crtc->base.id,
+ mode->hdisplay, mode->vdisplay);
+
+ seq_puts(s, "\n");
+
+ for (i = 0; i < dpu_crtc->num_mixers; ++i) {
+ m = &dpu_crtc->mixers[i];
+ if (!m->hw_lm)
+ seq_printf(s, "\tmixer[%d] has no lm\n", i);
+ else if (!m->hw_ctl)
+ seq_printf(s, "\tmixer[%d] has no ctl\n", i);
+ else
+ seq_printf(s, "\tmixer:%d ctl:%d width:%d height:%d\n",
+ m->hw_lm->idx - LM_0, m->hw_ctl->idx - CTL_0,
+ out_width, mode->vdisplay);
+ }
+
+ seq_puts(s, "\n");
+
+ drm_atomic_crtc_for_each_plane(plane, crtc) {
+ pstate = to_dpu_plane_state(plane->state);
+ state = plane->state;
+
+ if (!pstate || !state)
+ continue;
+
+ seq_printf(s, "\tplane:%u stage:%d\n", plane->base.id,
+ pstate->stage);
+
+ if (plane->state->fb) {
+ fb = plane->state->fb;
+
+ seq_printf(s, "\tfb:%d image format:%4.4s wxh:%ux%u ",
+ fb->base.id, (char *) &fb->format->format,
+ fb->width, fb->height);
+ for (i = 0; i < ARRAY_SIZE(fb->format->cpp); ++i)
+ seq_printf(s, "cpp[%d]:%u ",
+ i, fb->format->cpp[i]);
+ seq_puts(s, "\n\t");
+
+ seq_printf(s, "modifier:%8llu ", fb->modifier);
+ seq_puts(s, "\n");
+
+ seq_puts(s, "\t");
+ for (i = 0; i < ARRAY_SIZE(fb->pitches); i++)
+ seq_printf(s, "pitches[%d]:%8u ", i,
+ fb->pitches[i]);
+ seq_puts(s, "\n");
+
+ seq_puts(s, "\t");
+ for (i = 0; i < ARRAY_SIZE(fb->offsets); i++)
+ seq_printf(s, "offsets[%d]:%8u ", i,
+ fb->offsets[i]);
+ seq_puts(s, "\n");
+ }
+
+ seq_printf(s, "\tsrc_x:%4d src_y:%4d src_w:%4d src_h:%4d\n",
+ state->src_x, state->src_y, state->src_w, state->src_h);
+
+ seq_printf(s, "\tdst x:%4d dst_y:%4d dst_w:%4d dst_h:%4d\n",
+ state->crtc_x, state->crtc_y, state->crtc_w,
+ state->crtc_h);
+ seq_printf(s, "\tmultirect: mode: %d index: %d\n",
+ pstate->multirect_mode, pstate->multirect_index);
+
+ seq_puts(s, "\n");
+ }
+ if (dpu_crtc->vblank_cb_count) {
+ ktime_t diff = ktime_sub(ktime_get(), dpu_crtc->vblank_cb_time);
+ s64 diff_ms = ktime_to_ms(diff);
+ s64 fps = diff_ms ? div_s64(
+ dpu_crtc->vblank_cb_count * 1000, diff_ms) : 0;
+
+ seq_printf(s,
+ "vblank fps:%lld count:%u total:%llums total_framecount:%llu\n",
+ fps, dpu_crtc->vblank_cb_count,
+ ktime_to_ms(diff), dpu_crtc->play_count);
+
+ /* reset time & count for next measurement */
+ dpu_crtc->vblank_cb_count = 0;
+ dpu_crtc->vblank_cb_time = ktime_set(0, 0);
+ }
+
+ seq_printf(s, "vblank_enable:%d\n", dpu_crtc->vblank_requested);
+
+ mutex_unlock(&dpu_crtc->crtc_lock);
+
+ return 0;
+}
+
+static int _dpu_debugfs_status_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, _dpu_debugfs_status_show, inode->i_private);
+}
+
+static ssize_t _dpu_crtc_misr_setup(struct file *file,
+ const char __user *user_buf, size_t count, loff_t *ppos)
+{
+ struct dpu_crtc *dpu_crtc;
+ struct dpu_crtc_mixer *m;
+ int i = 0, rc;
+ char buf[MISR_BUFF_SIZE + 1];
+ u32 frame_count, enable;
+ size_t buff_copy;
+
+ if (!file || !file->private_data)
+ return -EINVAL;
+
+ dpu_crtc = file->private_data;
+ buff_copy = min_t(size_t, count, MISR_BUFF_SIZE);
+ if (copy_from_user(buf, user_buf, buff_copy)) {
+ DPU_ERROR("buffer copy failed\n");
+ return -EINVAL;
+ }
+
+ buf[buff_copy] = 0; /* end of string */
+
+ if (sscanf(buf, "%u %u", &enable, &frame_count) != 2)
+ return -EINVAL;
+
+ rc = _dpu_crtc_power_enable(dpu_crtc, true);
+ if (rc)
+ return rc;
+
+ mutex_lock(&dpu_crtc->crtc_lock);
+ dpu_crtc->misr_enable = enable;
+ dpu_crtc->misr_frame_count = frame_count;
+ for (i = 0; i < dpu_crtc->num_mixers; ++i) {
+ dpu_crtc->misr_data[i] = 0;
+ m = &dpu_crtc->mixers[i];
+ if (!m->hw_lm || !m->hw_lm->ops.setup_misr)
+ continue;
+
+ m->hw_lm->ops.setup_misr(m->hw_lm, enable, frame_count);
+ }
+ mutex_unlock(&dpu_crtc->crtc_lock);
+ _dpu_crtc_power_enable(dpu_crtc, false);
+
+ return count;
+}
+
+static ssize_t _dpu_crtc_misr_read(struct file *file,
+ char __user *user_buff, size_t count, loff_t *ppos)
+{
+ struct dpu_crtc *dpu_crtc;
+ struct dpu_crtc_mixer *m;
+ int i = 0, rc;
+ u32 misr_status;
+ ssize_t len = 0;
+ char buf[MISR_BUFF_SIZE + 1] = {'\0'};
+
+ if (*ppos)
+ return 0;
+
+ if (!file || !file->private_data)
+ return -EINVAL;
+
+ dpu_crtc = file->private_data;
+ rc = _dpu_crtc_power_enable(dpu_crtc, true);
+ if (rc)
+ return rc;
+
+ mutex_lock(&dpu_crtc->crtc_lock);
+ if (!dpu_crtc->misr_enable) {
+ len += snprintf(buf + len, MISR_BUFF_SIZE - len,
+ "disabled\n");
+ goto buff_check;
+ }
+
+ for (i = 0; i < dpu_crtc->num_mixers; ++i) {
+ m = &dpu_crtc->mixers[i];
+ if (!m->hw_lm || !m->hw_lm->ops.collect_misr)
+ continue;
+
+ misr_status = m->hw_lm->ops.collect_misr(m->hw_lm);
+ dpu_crtc->misr_data[i] = misr_status ? misr_status :
+ dpu_crtc->misr_data[i];
+ len += snprintf(buf + len, MISR_BUFF_SIZE - len, "lm idx:%d\n",
+ m->hw_lm->idx - LM_0);
+ len += snprintf(buf + len, MISR_BUFF_SIZE - len, "0x%x\n",
+ dpu_crtc->misr_data[i]);
+ }
+
+buff_check:
+ if (count <= len) {
+ len = 0;
+ goto end;
+ }
+
+ if (copy_to_user(user_buff, buf, len)) {
+ len = -EFAULT;
+ goto end;
+ }
+
+ *ppos += len; /* increase offset */
+
+end:
+ mutex_unlock(&dpu_crtc->crtc_lock);
+ _dpu_crtc_power_enable(dpu_crtc, false);
+ return len;
+}
+
+#define DEFINE_DPU_DEBUGFS_SEQ_FOPS(__prefix) \
+static int __prefix ## _open(struct inode *inode, struct file *file) \
+{ \
+ return single_open(file, __prefix ## _show, inode->i_private); \
+} \
+static const struct file_operations __prefix ## _fops = { \
+ .owner = THIS_MODULE, \
+ .open = __prefix ## _open, \
+ .release = single_release, \
+ .read = seq_read, \
+ .llseek = seq_lseek, \
+}
+
+static int dpu_crtc_debugfs_state_show(struct seq_file *s, void *v)
+{
+ struct drm_crtc *crtc = (struct drm_crtc *) s->private;
+ struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
+ struct dpu_crtc_res *res;
+ struct dpu_crtc_respool *rp;
+ int i;
+
+ seq_printf(s, "client type: %d\n", dpu_crtc_get_client_type(crtc));
+ seq_printf(s, "intf_mode: %d\n", dpu_crtc_get_intf_mode(crtc));
+ seq_printf(s, "core_clk_rate: %llu\n",
+ dpu_crtc->cur_perf.core_clk_rate);
+ for (i = DPU_POWER_HANDLE_DBUS_ID_MNOC;
+ i < DPU_POWER_HANDLE_DBUS_ID_MAX; i++) {
+ seq_printf(s, "bw_ctl[%s]: %llu\n",
+ dpu_power_handle_get_dbus_name(i),
+ dpu_crtc->cur_perf.bw_ctl[i]);
+ seq_printf(s, "max_per_pipe_ib[%s]: %llu\n",
+ dpu_power_handle_get_dbus_name(i),
+ dpu_crtc->cur_perf.max_per_pipe_ib[i]);
+ }
+
+ mutex_lock(&dpu_crtc->rp_lock);
+ list_for_each_entry(rp, &dpu_crtc->rp_head, rp_list) {
+ seq_printf(s, "rp.%d: ", rp->sequence_id);
+ list_for_each_entry(res, &rp->res_list, list)
+ seq_printf(s, "0x%x/0x%llx/%pK/%d ",
+ res->type, res->tag, res->val,
+ atomic_read(&res->refcount));
+ seq_puts(s, "\n");
+ }
+ mutex_unlock(&dpu_crtc->rp_lock);
+
+ return 0;
+}
+DEFINE_DPU_DEBUGFS_SEQ_FOPS(dpu_crtc_debugfs_state);
+
+static int _dpu_crtc_init_debugfs(struct drm_crtc *crtc)
+{
+ struct dpu_crtc *dpu_crtc;
+ struct dpu_kms *dpu_kms;
+
+ static const struct file_operations debugfs_status_fops = {
+ .open = _dpu_debugfs_status_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ };
+ static const struct file_operations debugfs_misr_fops = {
+ .open = simple_open,
+ .read = _dpu_crtc_misr_read,
+ .write = _dpu_crtc_misr_setup,
+ };
+
+ if (!crtc)
+ return -EINVAL;
+ dpu_crtc = to_dpu_crtc(crtc);
+
+ dpu_kms = _dpu_crtc_get_kms(crtc);
+ if (!dpu_kms)
+ return -EINVAL;
+
+ dpu_crtc->debugfs_root = debugfs_create_dir(dpu_crtc->name,
+ crtc->dev->primary->debugfs_root);
+ if (!dpu_crtc->debugfs_root)
+ return -ENOMEM;
+
+ /* don't error check these */
+ debugfs_create_file("status", 0400,
+ dpu_crtc->debugfs_root,
+ dpu_crtc, &debugfs_status_fops);
+ debugfs_create_file("state", 0600,
+ dpu_crtc->debugfs_root,
+ &dpu_crtc->base,
+ &dpu_crtc_debugfs_state_fops);
+ debugfs_create_file("misr_data", 0600, dpu_crtc->debugfs_root,
+ dpu_crtc, &debugfs_misr_fops);
+
+ return 0;
+}
+
+static void _dpu_crtc_destroy_debugfs(struct drm_crtc *crtc)
+{
+ struct dpu_crtc *dpu_crtc;
+
+ if (!crtc)
+ return;
+ dpu_crtc = to_dpu_crtc(crtc);
+ debugfs_remove_recursive(dpu_crtc->debugfs_root);
+}
+#else
+static int _dpu_crtc_init_debugfs(struct drm_crtc *crtc)
+{
+ return 0;
+}
+
+static void _dpu_crtc_destroy_debugfs(struct drm_crtc *crtc)
+{
+}
+#endif /* CONFIG_DEBUG_FS */
+
+static int dpu_crtc_late_register(struct drm_crtc *crtc)
+{
+ return _dpu_crtc_init_debugfs(crtc);
+}
+
+static void dpu_crtc_early_unregister(struct drm_crtc *crtc)
+{
+ _dpu_crtc_destroy_debugfs(crtc);
+}
+
+static const struct drm_crtc_funcs dpu_crtc_funcs = {
+ .set_config = drm_atomic_helper_set_config,
+ .destroy = dpu_crtc_destroy,
+ .page_flip = drm_atomic_helper_page_flip,
+ .reset = dpu_crtc_reset,
+ .atomic_duplicate_state = dpu_crtc_duplicate_state,
+ .atomic_destroy_state = dpu_crtc_destroy_state,
+ .late_register = dpu_crtc_late_register,
+ .early_unregister = dpu_crtc_early_unregister,
+};
+
+static const struct drm_crtc_helper_funcs dpu_crtc_helper_funcs = {
+ .disable = dpu_crtc_disable,
+ .atomic_enable = dpu_crtc_enable,
+ .atomic_check = dpu_crtc_atomic_check,
+ .atomic_begin = dpu_crtc_atomic_begin,
+ .atomic_flush = dpu_crtc_atomic_flush,
+};
+
+/* initialize crtc */
+struct drm_crtc *dpu_crtc_init(struct drm_device *dev, struct drm_plane *plane)
+{
+ struct drm_crtc *crtc = NULL;
+ struct dpu_crtc *dpu_crtc = NULL;
+ struct msm_drm_private *priv = NULL;
+ struct dpu_kms *kms = NULL;
+ int i;
+
+ priv = dev->dev_private;
+ kms = to_dpu_kms(priv->kms);
+
+ dpu_crtc = kzalloc(sizeof(*dpu_crtc), GFP_KERNEL);
+ if (!dpu_crtc)
+ return ERR_PTR(-ENOMEM);
+
+ crtc = &dpu_crtc->base;
+ crtc->dev = dev;
+
+ mutex_init(&dpu_crtc->crtc_lock);
+ spin_lock_init(&dpu_crtc->spin_lock);
+ atomic_set(&dpu_crtc->frame_pending, 0);
+
+ mutex_init(&dpu_crtc->rp_lock);
+ INIT_LIST_HEAD(&dpu_crtc->rp_head);
+
+ init_completion(&dpu_crtc->frame_done_comp);
+
+ INIT_LIST_HEAD(&dpu_crtc->frame_event_list);
+
+ for (i = 0; i < ARRAY_SIZE(dpu_crtc->frame_events); i++) {
+ INIT_LIST_HEAD(&dpu_crtc->frame_events[i].list);
+ list_add(&dpu_crtc->frame_events[i].list,
+ &dpu_crtc->frame_event_list);
+ kthread_init_work(&dpu_crtc->frame_events[i].work,
+ dpu_crtc_frame_event_work);
+ }
+
+ drm_crtc_init_with_planes(dev, crtc, plane, NULL, &dpu_crtc_funcs,
+ NULL);
+
+ drm_crtc_helper_add(crtc, &dpu_crtc_helper_funcs);
+ plane->crtc = crtc;
+
+ /* save user friendly CRTC name for later */
+ snprintf(dpu_crtc->name, DPU_CRTC_NAME_SIZE, "crtc%u", crtc->base.id);
+
+ /* initialize event handling */
+ spin_lock_init(&dpu_crtc->event_lock);
+
+ dpu_crtc->phandle = &kms->phandle;
+
+ DPU_DEBUG("%s: successfully initialized crtc\n", dpu_crtc->name);
+ return crtc;
+}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.h
new file mode 100644
index 000000000000..e87109e608e9
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.h
@@ -0,0 +1,423 @@
+/*
+ * Copyright (c) 2015-2018 The Linux Foundation. All rights reserved.
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef _DPU_CRTC_H_
+#define _DPU_CRTC_H_
+
+#include <linux/kthread.h>
+#include <drm/drm_crtc.h>
+#include "dpu_kms.h"
+#include "dpu_core_perf.h"
+#include "dpu_hw_blk.h"
+
+#define DPU_CRTC_NAME_SIZE 12
+
+/* define the maximum number of in-flight frame events */
+#define DPU_CRTC_FRAME_EVENT_SIZE 4
+
+/**
+ * enum dpu_crtc_client_type: crtc client type
+ * @RT_CLIENT: RealTime client like video/cmd mode display
+ * voting through apps rsc
+ * @NRT_CLIENT: Non-RealTime client like WB display
+ * voting through apps rsc
+ */
+enum dpu_crtc_client_type {
+ RT_CLIENT,
+ NRT_CLIENT,
+};
+
+/**
+ * enum dpu_crtc_smmu_state: smmu state
+ * @ATTACHED: all the context banks are attached.
+ * @DETACHED: all the context banks are detached.
+ * @ATTACH_ALL_REQ: transient state of attaching context banks.
+ * @DETACH_ALL_REQ: transient state of detaching context banks.
+ */
+enum dpu_crtc_smmu_state {
+ ATTACHED = 0,
+ DETACHED,
+ ATTACH_ALL_REQ,
+ DETACH_ALL_REQ,
+};
+
+/**
+ * enum dpu_crtc_smmu_state_transition_type: state transition type
+ * @NONE: no pending state transitions
+ * @PRE_COMMIT: state transitions should be done before processing the commit
+ * @POST_COMMIT: state transitions to be done after processing the commit.
+ */
+enum dpu_crtc_smmu_state_transition_type {
+ NONE,
+ PRE_COMMIT,
+ POST_COMMIT
+};
+
+/**
+ * struct dpu_crtc_smmu_state_data: stores the smmu state and transition type
+ * @state: current state of smmu context banks
+ * @transition_type: transition request type
+ * @transition_error: whether there is error while transitioning the state
+ */
+struct dpu_crtc_smmu_state_data {
+ uint32_t state;
+ uint32_t transition_type;
+ uint32_t transition_error;
+};
+
+/**
+ * struct dpu_crtc_mixer: stores the map for each virtual pipeline in the CRTC
+ * @hw_lm: LM HW Driver context
+ * @hw_ctl: CTL Path HW driver context
+ * @encoder: Encoder attached to this lm & ctl
+ * @mixer_op_mode: mixer blending operation mode
+ * @flush_mask: mixer flush mask for ctl, mixer and pipe
+ */
+struct dpu_crtc_mixer {
+ struct dpu_hw_mixer *hw_lm;
+ struct dpu_hw_ctl *hw_ctl;
+ struct drm_encoder *encoder;
+ u32 mixer_op_mode;
+ u32 flush_mask;
+};
+
+/**
+ * struct dpu_crtc_frame_event: stores crtc frame event for crtc processing
+ * @work: base work structure
+ * @crtc: Pointer to crtc handling this event
+ * @list: event list
+ * @ts: timestamp at queue entry
+ * @event: event identifier
+ */
+struct dpu_crtc_frame_event {
+ struct kthread_work work;
+ struct drm_crtc *crtc;
+ struct list_head list;
+ ktime_t ts;
+ u32 event;
+};
+
+/*
+ * Maximum number of free event structures to cache
+ */
+#define DPU_CRTC_MAX_EVENT_COUNT 16
+
+/**
+ * struct dpu_crtc - virtualized CRTC data structure
+ * @base : Base drm crtc structure
+ * @name : ASCII description of this crtc
+ * @num_ctls : Number of ctl paths in use
+ * @num_mixers : Number of mixers in use
+ * @mixers_swapped: Whether the mixers have been swapped for left/right update
+ * especially in the case of DSC Merge.
+ * @mixers : List of active mixers
+ * @event : Pointer to last received drm vblank event. If there is a
+ * pending vblank event, this will be non-null.
+ * @vsync_count : Running count of received vsync events
+ * @drm_requested_vblank : Whether vblanks have been enabled in the encoder
+ * @property_info : Opaque structure for generic property support
+ * @property_defaults : Array of default values for generic property support
+ * @stage_cfg : H/w mixer stage configuration
+ * @debugfs_root : Parent of debugfs node
+ * @vblank_cb_count : count of vblank callback since last reset
+ * @play_count : frame count between crtc enable and disable
+ * @vblank_cb_time : ktime at vblank count reset
+ * @vblank_requested : whether the user has requested vblank events
+ * @suspend : whether or not a suspend operation is in progress
+ * @enabled : whether the DPU CRTC is currently enabled. updated in the
+ * commit-thread, not state-swap time which is earlier, so
+ * safe to make decisions on during VBLANK on/off work
+ * @feature_list : list of color processing features supported on a crtc
+ * @active_list : list of color processing features are active
+ * @dirty_list : list of color processing features are dirty
+ * @ad_dirty: list containing ad properties that are dirty
+ * @ad_active: list containing ad properties that are active
+ * @crtc_lock : crtc lock around create, destroy and access.
+ * @frame_pending : Whether or not an update is pending
+ * @frame_events : static allocation of in-flight frame events
+ * @frame_event_list : available frame event list
+ * @spin_lock : spin lock for frame event, transaction status, etc...
+ * @frame_done_comp : for frame_event_done synchronization
+ * @event_thread : Pointer to event handler thread
+ * @event_worker : Event worker queue
+ * @event_lock : Spinlock around event handling code
+ * @misr_enable : boolean entry indicates misr enable/disable status.
+ * @misr_frame_count : misr frame count provided by client
+ * @misr_data : store misr data before turning off the clocks.
+ * @phandle: Pointer to power handler
+ * @power_event : registered power event handle
+ * @cur_perf : current performance committed to clock/bandwidth driver
+ * @rp_lock : serialization lock for resource pool
+ * @rp_head : list of active resource pool
+ * @scl3_cfg_lut : qseed3 lut config
+ */
+struct dpu_crtc {
+ struct drm_crtc base;
+ char name[DPU_CRTC_NAME_SIZE];
+
+ /* HW Resources reserved for the crtc */
+ u32 num_ctls;
+ u32 num_mixers;
+ bool mixers_swapped;
+ struct dpu_crtc_mixer mixers[CRTC_DUAL_MIXERS];
+ struct dpu_hw_scaler3_lut_cfg *scl3_lut_cfg;
+
+ struct drm_pending_vblank_event *event;
+ u32 vsync_count;
+
+ struct dpu_hw_stage_cfg stage_cfg;
+ struct dentry *debugfs_root;
+
+ u32 vblank_cb_count;
+ u64 play_count;
+ ktime_t vblank_cb_time;
+ bool vblank_requested;
+ bool suspend;
+ bool enabled;
+
+ struct list_head feature_list;
+ struct list_head active_list;
+ struct list_head dirty_list;
+ struct list_head ad_dirty;
+ struct list_head ad_active;
+
+ struct mutex crtc_lock;
+
+ atomic_t frame_pending;
+ struct dpu_crtc_frame_event frame_events[DPU_CRTC_FRAME_EVENT_SIZE];
+ struct list_head frame_event_list;
+ spinlock_t spin_lock;
+ struct completion frame_done_comp;
+
+ /* for handling internal event thread */
+ spinlock_t event_lock;
+ bool misr_enable;
+ u32 misr_frame_count;
+ u32 misr_data[CRTC_DUAL_MIXERS];
+
+ struct dpu_power_handle *phandle;
+ struct dpu_power_event *power_event;
+
+ struct dpu_core_perf_params cur_perf;
+
+ struct mutex rp_lock;
+ struct list_head rp_head;
+
+ struct dpu_crtc_smmu_state_data smmu_state;
+};
+
+#define to_dpu_crtc(x) container_of(x, struct dpu_crtc, base)
+
+/**
+ * struct dpu_crtc_res_ops - common operations for crtc resources
+ * @get: get given resource
+ * @put: put given resource
+ */
+struct dpu_crtc_res_ops {
+ void *(*get)(void *val, u32 type, u64 tag);
+ void (*put)(void *val);
+};
+
+#define DPU_CRTC_RES_FLAG_FREE BIT(0)
+
+/**
+ * struct dpu_crtc_res - definition of crtc resources
+ * @list: list of crtc resource
+ * @type: crtc resource type
+ * @tag: unique identifier per type
+ * @refcount: reference/usage count
+ * @ops: callback operations
+ * @val: resource handle associated with type/tag
+ * @flags: customization flags
+ */
+struct dpu_crtc_res {
+ struct list_head list;
+ u32 type;
+ u64 tag;
+ atomic_t refcount;
+ struct dpu_crtc_res_ops ops;
+ void *val;
+ u32 flags;
+};
+
+/**
+ * dpu_crtc_respool - crtc resource pool
+ * @rp_lock: pointer to serialization lock
+ * @rp_head: pointer to head of active resource pools of this crtc
+ * @rp_list: list of crtc resource pool
+ * @sequence_id: sequence identifier, incremented per state duplication
+ * @res_list: list of resource managed by this resource pool
+ * @ops: resource operations for parent resource pool
+ */
+struct dpu_crtc_respool {
+ struct mutex *rp_lock;
+ struct list_head *rp_head;
+ struct list_head rp_list;
+ u32 sequence_id;
+ struct list_head res_list;
+ struct dpu_crtc_res_ops ops;
+};
+
+/**
+ * struct dpu_crtc_state - dpu container for atomic crtc state
+ * @base: Base drm crtc state structure
+ * @is_ppsplit : Whether current topology requires PPSplit special handling
+ * @bw_control : true if bw/clk controlled by core bw/clk properties
+ * @bw_split_vote : true if bw controlled by llcc/dram bw properties
+ * @lm_bounds : LM boundaries based on current mode full resolution, no ROI.
+ * Origin top left of CRTC.
+ * @property_state: Local storage for msm_prop properties
+ * @property_values: Current crtc property values
+ * @input_fence_timeout_ns : Cached input fence timeout, in ns
+ * @new_perf: new performance state being requested
+ */
+struct dpu_crtc_state {
+ struct drm_crtc_state base;
+
+ bool bw_control;
+ bool bw_split_vote;
+
+ bool is_ppsplit;
+ struct drm_rect lm_bounds[CRTC_DUAL_MIXERS];
+
+ uint64_t input_fence_timeout_ns;
+
+ struct dpu_core_perf_params new_perf;
+ struct dpu_crtc_respool rp;
+};
+
+#define to_dpu_crtc_state(x) \
+ container_of(x, struct dpu_crtc_state, base)
+
+/**
+ * dpu_crtc_get_mixer_width - get the mixer width
+ * Mixer width will be same as panel width(/2 for split)
+ */
+static inline int dpu_crtc_get_mixer_width(struct dpu_crtc *dpu_crtc,
+ struct dpu_crtc_state *cstate, struct drm_display_mode *mode)
+{
+ u32 mixer_width;
+
+ if (!dpu_crtc || !cstate || !mode)
+ return 0;
+
+ mixer_width = (dpu_crtc->num_mixers == CRTC_DUAL_MIXERS ?
+ mode->hdisplay / CRTC_DUAL_MIXERS : mode->hdisplay);
+
+ return mixer_width;
+}
+
+/**
+ * dpu_crtc_get_mixer_height - get the mixer height
+ * Mixer height will be same as panel height
+ */
+static inline int dpu_crtc_get_mixer_height(struct dpu_crtc *dpu_crtc,
+ struct dpu_crtc_state *cstate, struct drm_display_mode *mode)
+{
+ if (!dpu_crtc || !cstate || !mode)
+ return 0;
+
+ return mode->vdisplay;
+}
+
+/**
+ * dpu_crtc_frame_pending - retun the number of pending frames
+ * @crtc: Pointer to drm crtc object
+ */
+static inline int dpu_crtc_frame_pending(struct drm_crtc *crtc)
+{
+ struct dpu_crtc *dpu_crtc;
+
+ if (!crtc)
+ return -EINVAL;
+
+ dpu_crtc = to_dpu_crtc(crtc);
+ return atomic_read(&dpu_crtc->frame_pending);
+}
+
+/**
+ * dpu_crtc_vblank - enable or disable vblanks for this crtc
+ * @crtc: Pointer to drm crtc object
+ * @en: true to enable vblanks, false to disable
+ */
+int dpu_crtc_vblank(struct drm_crtc *crtc, bool en);
+
+/**
+ * dpu_crtc_commit_kickoff - trigger kickoff of the commit for this crtc
+ * @crtc: Pointer to drm crtc object
+ */
+void dpu_crtc_commit_kickoff(struct drm_crtc *crtc);
+
+/**
+ * dpu_crtc_complete_commit - callback signalling completion of current commit
+ * @crtc: Pointer to drm crtc object
+ * @old_state: Pointer to drm crtc old state object
+ */
+void dpu_crtc_complete_commit(struct drm_crtc *crtc,
+ struct drm_crtc_state *old_state);
+
+/**
+ * dpu_crtc_init - create a new crtc object
+ * @dev: dpu device
+ * @plane: base plane
+ * @Return: new crtc object or error
+ */
+struct drm_crtc *dpu_crtc_init(struct drm_device *dev, struct drm_plane *plane);
+
+/**
+ * dpu_crtc_register_custom_event - api for enabling/disabling crtc event
+ * @kms: Pointer to dpu_kms
+ * @crtc_drm: Pointer to crtc object
+ * @event: Event that client is interested
+ * @en: Flag to enable/disable the event
+ */
+int dpu_crtc_register_custom_event(struct dpu_kms *kms,
+ struct drm_crtc *crtc_drm, u32 event, bool en);
+
+/**
+ * dpu_crtc_get_intf_mode - get interface mode of the given crtc
+ * @crtc: Pointert to crtc
+ */
+enum dpu_intf_mode dpu_crtc_get_intf_mode(struct drm_crtc *crtc);
+
+/**
+ * dpu_crtc_get_client_type - check the crtc type- rt, nrt etc.
+ * @crtc: Pointer to crtc
+ */
+static inline enum dpu_crtc_client_type dpu_crtc_get_client_type(
+ struct drm_crtc *crtc)
+{
+ struct dpu_crtc_state *cstate =
+ crtc ? to_dpu_crtc_state(crtc->state) : NULL;
+
+ if (!cstate)
+ return NRT_CLIENT;
+
+ return RT_CLIENT;
+}
+
+/**
+ * dpu_crtc_is_enabled - check if dpu crtc is enabled or not
+ * @crtc: Pointer to crtc
+ */
+static inline bool dpu_crtc_is_enabled(struct drm_crtc *crtc)
+{
+ return crtc ? crtc->enabled : false;
+}
+
+#endif /* _DPU_CRTC_H_ */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_dbg.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_dbg.c
new file mode 100644
index 000000000000..ae2aee7ed9e1
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_dbg.c
@@ -0,0 +1,2393 @@
+/* Copyright (c) 2009-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__
+
+#include <linux/delay.h>
+#include <linux/spinlock.h>
+#include <linux/ktime.h>
+#include <linux/debugfs.h>
+#include <linux/uaccess.h>
+#include <linux/dma-buf.h>
+#include <linux/slab.h>
+#include <linux/list_sort.h>
+#include <linux/pm_runtime.h>
+
+#include "dpu_dbg.h"
+#include "disp/dpu1/dpu_hw_catalog.h"
+
+
+#define DEFAULT_DBGBUS_DPU DPU_DBG_DUMP_IN_MEM
+#define DEFAULT_DBGBUS_VBIFRT DPU_DBG_DUMP_IN_MEM
+#define REG_BASE_NAME_LEN 80
+
+#define DBGBUS_FLAGS_DSPP BIT(0)
+#define DBGBUS_DSPP_STATUS 0x34C
+
+#define DBGBUS_NAME_DPU "dpu"
+#define DBGBUS_NAME_VBIF_RT "vbif_rt"
+
+/* offsets from dpu top address for the debug buses */
+#define DBGBUS_SSPP0 0x188
+#define DBGBUS_AXI_INTF 0x194
+#define DBGBUS_SSPP1 0x298
+#define DBGBUS_DSPP 0x348
+#define DBGBUS_PERIPH 0x418
+
+#define TEST_MASK(id, tp) ((id << 4) | (tp << 1) | BIT(0))
+
+/* following offsets are with respect to MDP VBIF base for DBG BUS access */
+#define MMSS_VBIF_CLKON 0x4
+#define MMSS_VBIF_TEST_BUS_OUT_CTRL 0x210
+#define MMSS_VBIF_TEST_BUS_OUT 0x230
+
+/* Vbif error info */
+#define MMSS_VBIF_PND_ERR 0x190
+#define MMSS_VBIF_SRC_ERR 0x194
+#define MMSS_VBIF_XIN_HALT_CTRL1 0x204
+#define MMSS_VBIF_ERR_INFO 0X1a0
+#define MMSS_VBIF_ERR_INFO_1 0x1a4
+#define MMSS_VBIF_CLIENT_NUM 14
+
+/**
+ * struct dpu_dbg_reg_base - register region base.
+ * may sub-ranges: sub-ranges are used for dumping
+ * or may not have sub-ranges: dumping is base -> max_offset
+ * @reg_base_head: head of this node
+ * @name: register base name
+ * @base: base pointer
+ * @off: cached offset of region for manual register dumping
+ * @cnt: cached range of region for manual register dumping
+ * @max_offset: length of region
+ * @buf: buffer used for manual register dumping
+ * @buf_len: buffer length used for manual register dumping
+ * @cb: callback for external dump function, null if not defined
+ * @cb_ptr: private pointer to callback function
+ */
+struct dpu_dbg_reg_base {
+ struct list_head reg_base_head;
+ char name[REG_BASE_NAME_LEN];
+ void __iomem *base;
+ size_t off;
+ size_t cnt;
+ size_t max_offset;
+ char *buf;
+ size_t buf_len;
+ void (*cb)(void *ptr);
+ void *cb_ptr;
+};
+
+struct dpu_debug_bus_entry {
+ u32 wr_addr;
+ u32 block_id;
+ u32 test_id;
+ void (*analyzer)(void __iomem *mem_base,
+ struct dpu_debug_bus_entry *entry, u32 val);
+};
+
+struct vbif_debug_bus_entry {
+ u32 disable_bus_addr;
+ u32 block_bus_addr;
+ u32 bit_offset;
+ u32 block_cnt;
+ u32 test_pnt_start;
+ u32 test_pnt_cnt;
+};
+
+struct dpu_dbg_debug_bus_common {
+ char *name;
+ u32 enable_mask;
+ bool include_in_deferred_work;
+ u32 flags;
+ u32 entries_size;
+ u32 *dumped_content;
+};
+
+struct dpu_dbg_dpu_debug_bus {
+ struct dpu_dbg_debug_bus_common cmn;
+ struct dpu_debug_bus_entry *entries;
+ u32 top_blk_off;
+};
+
+struct dpu_dbg_vbif_debug_bus {
+ struct dpu_dbg_debug_bus_common cmn;
+ struct vbif_debug_bus_entry *entries;
+};
+
+/**
+ * struct dpu_dbg_base - global dpu debug base structure
+ * @reg_base_list: list of register dumping regions
+ * @dev: device pointer
+ * @dump_work: work struct for deferring register dump work to separate thread
+ * @dbgbus_dpu: debug bus structure for the dpu
+ * @dbgbus_vbif_rt: debug bus structure for the realtime vbif
+ */
+static struct dpu_dbg_base {
+ struct list_head reg_base_list;
+ struct device *dev;
+
+ struct work_struct dump_work;
+
+ struct dpu_dbg_dpu_debug_bus dbgbus_dpu;
+ struct dpu_dbg_vbif_debug_bus dbgbus_vbif_rt;
+} dpu_dbg_base;
+
+static void _dpu_debug_bus_xbar_dump(void __iomem *mem_base,
+ struct dpu_debug_bus_entry *entry, u32 val)
+{
+ dev_err(dpu_dbg_base.dev, "xbar 0x%x %d %d 0x%x\n",
+ entry->wr_addr, entry->block_id, entry->test_id, val);
+}
+
+static void _dpu_debug_bus_lm_dump(void __iomem *mem_base,
+ struct dpu_debug_bus_entry *entry, u32 val)
+{
+ if (!(val & 0xFFF000))
+ return;
+
+ dev_err(dpu_dbg_base.dev, "lm 0x%x %d %d 0x%x\n",
+ entry->wr_addr, entry->block_id, entry->test_id, val);
+}
+
+static void _dpu_debug_bus_ppb0_dump(void __iomem *mem_base,
+ struct dpu_debug_bus_entry *entry, u32 val)
+{
+ if (!(val & BIT(15)))
+ return;
+
+ dev_err(dpu_dbg_base.dev, "ppb0 0x%x %d %d 0x%x\n",
+ entry->wr_addr, entry->block_id, entry->test_id, val);
+}
+
+static void _dpu_debug_bus_ppb1_dump(void __iomem *mem_base,
+ struct dpu_debug_bus_entry *entry, u32 val)
+{
+ if (!(val & BIT(15)))
+ return;
+
+ dev_err(dpu_dbg_base.dev, "ppb1 0x%x %d %d 0x%x\n",
+ entry->wr_addr, entry->block_id, entry->test_id, val);
+}
+
+static struct dpu_debug_bus_entry dbg_bus_dpu_8998[] = {
+
+ /* Unpack 0 sspp 0*/
+ { DBGBUS_SSPP0, 50, 2 },
+ { DBGBUS_SSPP0, 60, 2 },
+ { DBGBUS_SSPP0, 70, 2 },
+ { DBGBUS_SSPP0, 85, 2 },
+
+ /* Upack 0 sspp 1*/
+ { DBGBUS_SSPP1, 50, 2 },
+ { DBGBUS_SSPP1, 60, 2 },
+ { DBGBUS_SSPP1, 70, 2 },
+ { DBGBUS_SSPP1, 85, 2 },
+
+ /* scheduler */
+ { DBGBUS_DSPP, 130, 0 },
+ { DBGBUS_DSPP, 130, 1 },
+ { DBGBUS_DSPP, 130, 2 },
+ { DBGBUS_DSPP, 130, 3 },
+ { DBGBUS_DSPP, 130, 4 },
+ { DBGBUS_DSPP, 130, 5 },
+
+ /* qseed */
+ { DBGBUS_SSPP0, 6, 0},
+ { DBGBUS_SSPP0, 6, 1},
+ { DBGBUS_SSPP0, 26, 0},
+ { DBGBUS_SSPP0, 26, 1},
+ { DBGBUS_SSPP1, 6, 0},
+ { DBGBUS_SSPP1, 6, 1},
+ { DBGBUS_SSPP1, 26, 0},
+ { DBGBUS_SSPP1, 26, 1},
+
+ /* scale */
+ { DBGBUS_SSPP0, 16, 0},
+ { DBGBUS_SSPP0, 16, 1},
+ { DBGBUS_SSPP0, 36, 0},
+ { DBGBUS_SSPP0, 36, 1},
+ { DBGBUS_SSPP1, 16, 0},
+ { DBGBUS_SSPP1, 16, 1},
+ { DBGBUS_SSPP1, 36, 0},
+ { DBGBUS_SSPP1, 36, 1},
+
+ /* fetch sspp0 */
+
+ /* vig 0 */
+ { DBGBUS_SSPP0, 0, 0 },
+ { DBGBUS_SSPP0, 0, 1 },
+ { DBGBUS_SSPP0, 0, 2 },
+ { DBGBUS_SSPP0, 0, 3 },
+ { DBGBUS_SSPP0, 0, 4 },
+ { DBGBUS_SSPP0, 0, 5 },
+ { DBGBUS_SSPP0, 0, 6 },
+ { DBGBUS_SSPP0, 0, 7 },
+
+ { DBGBUS_SSPP0, 1, 0 },
+ { DBGBUS_SSPP0, 1, 1 },
+ { DBGBUS_SSPP0, 1, 2 },
+ { DBGBUS_SSPP0, 1, 3 },
+ { DBGBUS_SSPP0, 1, 4 },
+ { DBGBUS_SSPP0, 1, 5 },
+ { DBGBUS_SSPP0, 1, 6 },
+ { DBGBUS_SSPP0, 1, 7 },
+
+ { DBGBUS_SSPP0, 2, 0 },
+ { DBGBUS_SSPP0, 2, 1 },
+ { DBGBUS_SSPP0, 2, 2 },
+ { DBGBUS_SSPP0, 2, 3 },
+ { DBGBUS_SSPP0, 2, 4 },
+ { DBGBUS_SSPP0, 2, 5 },
+ { DBGBUS_SSPP0, 2, 6 },
+ { DBGBUS_SSPP0, 2, 7 },
+
+ { DBGBUS_SSPP0, 4, 0 },
+ { DBGBUS_SSPP0, 4, 1 },
+ { DBGBUS_SSPP0, 4, 2 },
+ { DBGBUS_SSPP0, 4, 3 },
+ { DBGBUS_SSPP0, 4, 4 },
+ { DBGBUS_SSPP0, 4, 5 },
+ { DBGBUS_SSPP0, 4, 6 },
+ { DBGBUS_SSPP0, 4, 7 },
+
+ { DBGBUS_SSPP0, 5, 0 },
+ { DBGBUS_SSPP0, 5, 1 },
+ { DBGBUS_SSPP0, 5, 2 },
+ { DBGBUS_SSPP0, 5, 3 },
+ { DBGBUS_SSPP0, 5, 4 },
+ { DBGBUS_SSPP0, 5, 5 },
+ { DBGBUS_SSPP0, 5, 6 },
+ { DBGBUS_SSPP0, 5, 7 },
+
+ /* vig 2 */
+ { DBGBUS_SSPP0, 20, 0 },
+ { DBGBUS_SSPP0, 20, 1 },
+ { DBGBUS_SSPP0, 20, 2 },
+ { DBGBUS_SSPP0, 20, 3 },
+ { DBGBUS_SSPP0, 20, 4 },
+ { DBGBUS_SSPP0, 20, 5 },
+ { DBGBUS_SSPP0, 20, 6 },
+ { DBGBUS_SSPP0, 20, 7 },
+
+ { DBGBUS_SSPP0, 21, 0 },
+ { DBGBUS_SSPP0, 21, 1 },
+ { DBGBUS_SSPP0, 21, 2 },
+ { DBGBUS_SSPP0, 21, 3 },
+ { DBGBUS_SSPP0, 21, 4 },
+ { DBGBUS_SSPP0, 21, 5 },
+ { DBGBUS_SSPP0, 21, 6 },
+ { DBGBUS_SSPP0, 21, 7 },
+
+ { DBGBUS_SSPP0, 22, 0 },
+ { DBGBUS_SSPP0, 22, 1 },
+ { DBGBUS_SSPP0, 22, 2 },
+ { DBGBUS_SSPP0, 22, 3 },
+ { DBGBUS_SSPP0, 22, 4 },
+ { DBGBUS_SSPP0, 22, 5 },
+ { DBGBUS_SSPP0, 22, 6 },
+ { DBGBUS_SSPP0, 22, 7 },
+
+ { DBGBUS_SSPP0, 24, 0 },
+ { DBGBUS_SSPP0, 24, 1 },
+ { DBGBUS_SSPP0, 24, 2 },
+ { DBGBUS_SSPP0, 24, 3 },
+ { DBGBUS_SSPP0, 24, 4 },
+ { DBGBUS_SSPP0, 24, 5 },
+ { DBGBUS_SSPP0, 24, 6 },
+ { DBGBUS_SSPP0, 24, 7 },
+
+ { DBGBUS_SSPP0, 25, 0 },
+ { DBGBUS_SSPP0, 25, 1 },
+ { DBGBUS_SSPP0, 25, 2 },
+ { DBGBUS_SSPP0, 25, 3 },
+ { DBGBUS_SSPP0, 25, 4 },
+ { DBGBUS_SSPP0, 25, 5 },
+ { DBGBUS_SSPP0, 25, 6 },
+ { DBGBUS_SSPP0, 25, 7 },
+
+ /* dma 2 */
+ { DBGBUS_SSPP0, 30, 0 },
+ { DBGBUS_SSPP0, 30, 1 },
+ { DBGBUS_SSPP0, 30, 2 },
+ { DBGBUS_SSPP0, 30, 3 },
+ { DBGBUS_SSPP0, 30, 4 },
+ { DBGBUS_SSPP0, 30, 5 },
+ { DBGBUS_SSPP0, 30, 6 },
+ { DBGBUS_SSPP0, 30, 7 },
+
+ { DBGBUS_SSPP0, 31, 0 },
+ { DBGBUS_SSPP0, 31, 1 },
+ { DBGBUS_SSPP0, 31, 2 },
+ { DBGBUS_SSPP0, 31, 3 },
+ { DBGBUS_SSPP0, 31, 4 },
+ { DBGBUS_SSPP0, 31, 5 },
+ { DBGBUS_SSPP0, 31, 6 },
+ { DBGBUS_SSPP0, 31, 7 },
+
+ { DBGBUS_SSPP0, 32, 0 },
+ { DBGBUS_SSPP0, 32, 1 },
+ { DBGBUS_SSPP0, 32, 2 },
+ { DBGBUS_SSPP0, 32, 3 },
+ { DBGBUS_SSPP0, 32, 4 },
+ { DBGBUS_SSPP0, 32, 5 },
+ { DBGBUS_SSPP0, 32, 6 },
+ { DBGBUS_SSPP0, 32, 7 },
+
+ { DBGBUS_SSPP0, 33, 0 },
+ { DBGBUS_SSPP0, 33, 1 },
+ { DBGBUS_SSPP0, 33, 2 },
+ { DBGBUS_SSPP0, 33, 3 },
+ { DBGBUS_SSPP0, 33, 4 },
+ { DBGBUS_SSPP0, 33, 5 },
+ { DBGBUS_SSPP0, 33, 6 },
+ { DBGBUS_SSPP0, 33, 7 },
+
+ { DBGBUS_SSPP0, 34, 0 },
+ { DBGBUS_SSPP0, 34, 1 },
+ { DBGBUS_SSPP0, 34, 2 },
+ { DBGBUS_SSPP0, 34, 3 },
+ { DBGBUS_SSPP0, 34, 4 },
+ { DBGBUS_SSPP0, 34, 5 },
+ { DBGBUS_SSPP0, 34, 6 },
+ { DBGBUS_SSPP0, 34, 7 },
+
+ { DBGBUS_SSPP0, 35, 0 },
+ { DBGBUS_SSPP0, 35, 1 },
+ { DBGBUS_SSPP0, 35, 2 },
+ { DBGBUS_SSPP0, 35, 3 },
+
+ /* dma 0 */
+ { DBGBUS_SSPP0, 40, 0 },
+ { DBGBUS_SSPP0, 40, 1 },
+ { DBGBUS_SSPP0, 40, 2 },
+ { DBGBUS_SSPP0, 40, 3 },
+ { DBGBUS_SSPP0, 40, 4 },
+ { DBGBUS_SSPP0, 40, 5 },
+ { DBGBUS_SSPP0, 40, 6 },
+ { DBGBUS_SSPP0, 40, 7 },
+
+ { DBGBUS_SSPP0, 41, 0 },
+ { DBGBUS_SSPP0, 41, 1 },
+ { DBGBUS_SSPP0, 41, 2 },
+ { DBGBUS_SSPP0, 41, 3 },
+ { DBGBUS_SSPP0, 41, 4 },
+ { DBGBUS_SSPP0, 41, 5 },
+ { DBGBUS_SSPP0, 41, 6 },
+ { DBGBUS_SSPP0, 41, 7 },
+
+ { DBGBUS_SSPP0, 42, 0 },
+ { DBGBUS_SSPP0, 42, 1 },
+ { DBGBUS_SSPP0, 42, 2 },
+ { DBGBUS_SSPP0, 42, 3 },
+ { DBGBUS_SSPP0, 42, 4 },
+ { DBGBUS_SSPP0, 42, 5 },
+ { DBGBUS_SSPP0, 42, 6 },
+ { DBGBUS_SSPP0, 42, 7 },
+
+ { DBGBUS_SSPP0, 44, 0 },
+ { DBGBUS_SSPP0, 44, 1 },
+ { DBGBUS_SSPP0, 44, 2 },
+ { DBGBUS_SSPP0, 44, 3 },
+ { DBGBUS_SSPP0, 44, 4 },
+ { DBGBUS_SSPP0, 44, 5 },
+ { DBGBUS_SSPP0, 44, 6 },
+ { DBGBUS_SSPP0, 44, 7 },
+
+ { DBGBUS_SSPP0, 45, 0 },
+ { DBGBUS_SSPP0, 45, 1 },
+ { DBGBUS_SSPP0, 45, 2 },
+ { DBGBUS_SSPP0, 45, 3 },
+ { DBGBUS_SSPP0, 45, 4 },
+ { DBGBUS_SSPP0, 45, 5 },
+ { DBGBUS_SSPP0, 45, 6 },
+ { DBGBUS_SSPP0, 45, 7 },
+
+ /* fetch sspp1 */
+ /* vig 1 */
+ { DBGBUS_SSPP1, 0, 0 },
+ { DBGBUS_SSPP1, 0, 1 },
+ { DBGBUS_SSPP1, 0, 2 },
+ { DBGBUS_SSPP1, 0, 3 },
+ { DBGBUS_SSPP1, 0, 4 },
+ { DBGBUS_SSPP1, 0, 5 },
+ { DBGBUS_SSPP1, 0, 6 },
+ { DBGBUS_SSPP1, 0, 7 },
+
+ { DBGBUS_SSPP1, 1, 0 },
+ { DBGBUS_SSPP1, 1, 1 },
+ { DBGBUS_SSPP1, 1, 2 },
+ { DBGBUS_SSPP1, 1, 3 },
+ { DBGBUS_SSPP1, 1, 4 },
+ { DBGBUS_SSPP1, 1, 5 },
+ { DBGBUS_SSPP1, 1, 6 },
+ { DBGBUS_SSPP1, 1, 7 },
+
+ { DBGBUS_SSPP1, 2, 0 },
+ { DBGBUS_SSPP1, 2, 1 },
+ { DBGBUS_SSPP1, 2, 2 },
+ { DBGBUS_SSPP1, 2, 3 },
+ { DBGBUS_SSPP1, 2, 4 },
+ { DBGBUS_SSPP1, 2, 5 },
+ { DBGBUS_SSPP1, 2, 6 },
+ { DBGBUS_SSPP1, 2, 7 },
+
+ { DBGBUS_SSPP1, 4, 0 },
+ { DBGBUS_SSPP1, 4, 1 },
+ { DBGBUS_SSPP1, 4, 2 },
+ { DBGBUS_SSPP1, 4, 3 },
+ { DBGBUS_SSPP1, 4, 4 },
+ { DBGBUS_SSPP1, 4, 5 },
+ { DBGBUS_SSPP1, 4, 6 },
+ { DBGBUS_SSPP1, 4, 7 },
+
+ { DBGBUS_SSPP1, 5, 0 },
+ { DBGBUS_SSPP1, 5, 1 },
+ { DBGBUS_SSPP1, 5, 2 },
+ { DBGBUS_SSPP1, 5, 3 },
+ { DBGBUS_SSPP1, 5, 4 },
+ { DBGBUS_SSPP1, 5, 5 },
+ { DBGBUS_SSPP1, 5, 6 },
+ { DBGBUS_SSPP1, 5, 7 },
+
+ /* vig 3 */
+ { DBGBUS_SSPP1, 20, 0 },
+ { DBGBUS_SSPP1, 20, 1 },
+ { DBGBUS_SSPP1, 20, 2 },
+ { DBGBUS_SSPP1, 20, 3 },
+ { DBGBUS_SSPP1, 20, 4 },
+ { DBGBUS_SSPP1, 20, 5 },
+ { DBGBUS_SSPP1, 20, 6 },
+ { DBGBUS_SSPP1, 20, 7 },
+
+ { DBGBUS_SSPP1, 21, 0 },
+ { DBGBUS_SSPP1, 21, 1 },
+ { DBGBUS_SSPP1, 21, 2 },
+ { DBGBUS_SSPP1, 21, 3 },
+ { DBGBUS_SSPP1, 21, 4 },
+ { DBGBUS_SSPP1, 21, 5 },
+ { DBGBUS_SSPP1, 21, 6 },
+ { DBGBUS_SSPP1, 21, 7 },
+
+ { DBGBUS_SSPP1, 22, 0 },
+ { DBGBUS_SSPP1, 22, 1 },
+ { DBGBUS_SSPP1, 22, 2 },
+ { DBGBUS_SSPP1, 22, 3 },
+ { DBGBUS_SSPP1, 22, 4 },
+ { DBGBUS_SSPP1, 22, 5 },
+ { DBGBUS_SSPP1, 22, 6 },
+ { DBGBUS_SSPP1, 22, 7 },
+
+ { DBGBUS_SSPP1, 24, 0 },
+ { DBGBUS_SSPP1, 24, 1 },
+ { DBGBUS_SSPP1, 24, 2 },
+ { DBGBUS_SSPP1, 24, 3 },
+ { DBGBUS_SSPP1, 24, 4 },
+ { DBGBUS_SSPP1, 24, 5 },
+ { DBGBUS_SSPP1, 24, 6 },
+ { DBGBUS_SSPP1, 24, 7 },
+
+ { DBGBUS_SSPP1, 25, 0 },
+ { DBGBUS_SSPP1, 25, 1 },
+ { DBGBUS_SSPP1, 25, 2 },
+ { DBGBUS_SSPP1, 25, 3 },
+ { DBGBUS_SSPP1, 25, 4 },
+ { DBGBUS_SSPP1, 25, 5 },
+ { DBGBUS_SSPP1, 25, 6 },
+ { DBGBUS_SSPP1, 25, 7 },
+
+ /* dma 3 */
+ { DBGBUS_SSPP1, 30, 0 },
+ { DBGBUS_SSPP1, 30, 1 },
+ { DBGBUS_SSPP1, 30, 2 },
+ { DBGBUS_SSPP1, 30, 3 },
+ { DBGBUS_SSPP1, 30, 4 },
+ { DBGBUS_SSPP1, 30, 5 },
+ { DBGBUS_SSPP1, 30, 6 },
+ { DBGBUS_SSPP1, 30, 7 },
+
+ { DBGBUS_SSPP1, 31, 0 },
+ { DBGBUS_SSPP1, 31, 1 },
+ { DBGBUS_SSPP1, 31, 2 },
+ { DBGBUS_SSPP1, 31, 3 },
+ { DBGBUS_SSPP1, 31, 4 },
+ { DBGBUS_SSPP1, 31, 5 },
+ { DBGBUS_SSPP1, 31, 6 },
+ { DBGBUS_SSPP1, 31, 7 },
+
+ { DBGBUS_SSPP1, 32, 0 },
+ { DBGBUS_SSPP1, 32, 1 },
+ { DBGBUS_SSPP1, 32, 2 },
+ { DBGBUS_SSPP1, 32, 3 },
+ { DBGBUS_SSPP1, 32, 4 },
+ { DBGBUS_SSPP1, 32, 5 },
+ { DBGBUS_SSPP1, 32, 6 },
+ { DBGBUS_SSPP1, 32, 7 },
+
+ { DBGBUS_SSPP1, 33, 0 },
+ { DBGBUS_SSPP1, 33, 1 },
+ { DBGBUS_SSPP1, 33, 2 },
+ { DBGBUS_SSPP1, 33, 3 },
+ { DBGBUS_SSPP1, 33, 4 },
+ { DBGBUS_SSPP1, 33, 5 },
+ { DBGBUS_SSPP1, 33, 6 },
+ { DBGBUS_SSPP1, 33, 7 },
+
+ { DBGBUS_SSPP1, 34, 0 },
+ { DBGBUS_SSPP1, 34, 1 },
+ { DBGBUS_SSPP1, 34, 2 },
+ { DBGBUS_SSPP1, 34, 3 },
+ { DBGBUS_SSPP1, 34, 4 },
+ { DBGBUS_SSPP1, 34, 5 },
+ { DBGBUS_SSPP1, 34, 6 },
+ { DBGBUS_SSPP1, 34, 7 },
+
+ { DBGBUS_SSPP1, 35, 0 },
+ { DBGBUS_SSPP1, 35, 1 },
+ { DBGBUS_SSPP1, 35, 2 },
+
+ /* dma 1 */
+ { DBGBUS_SSPP1, 40, 0 },
+ { DBGBUS_SSPP1, 40, 1 },
+ { DBGBUS_SSPP1, 40, 2 },
+ { DBGBUS_SSPP1, 40, 3 },
+ { DBGBUS_SSPP1, 40, 4 },
+ { DBGBUS_SSPP1, 40, 5 },
+ { DBGBUS_SSPP1, 40, 6 },
+ { DBGBUS_SSPP1, 40, 7 },
+
+ { DBGBUS_SSPP1, 41, 0 },
+ { DBGBUS_SSPP1, 41, 1 },
+ { DBGBUS_SSPP1, 41, 2 },
+ { DBGBUS_SSPP1, 41, 3 },
+ { DBGBUS_SSPP1, 41, 4 },
+ { DBGBUS_SSPP1, 41, 5 },
+ { DBGBUS_SSPP1, 41, 6 },
+ { DBGBUS_SSPP1, 41, 7 },
+
+ { DBGBUS_SSPP1, 42, 0 },
+ { DBGBUS_SSPP1, 42, 1 },
+ { DBGBUS_SSPP1, 42, 2 },
+ { DBGBUS_SSPP1, 42, 3 },
+ { DBGBUS_SSPP1, 42, 4 },
+ { DBGBUS_SSPP1, 42, 5 },
+ { DBGBUS_SSPP1, 42, 6 },
+ { DBGBUS_SSPP1, 42, 7 },
+
+ { DBGBUS_SSPP1, 44, 0 },
+ { DBGBUS_SSPP1, 44, 1 },
+ { DBGBUS_SSPP1, 44, 2 },
+ { DBGBUS_SSPP1, 44, 3 },
+ { DBGBUS_SSPP1, 44, 4 },
+ { DBGBUS_SSPP1, 44, 5 },
+ { DBGBUS_SSPP1, 44, 6 },
+ { DBGBUS_SSPP1, 44, 7 },
+
+ { DBGBUS_SSPP1, 45, 0 },
+ { DBGBUS_SSPP1, 45, 1 },
+ { DBGBUS_SSPP1, 45, 2 },
+ { DBGBUS_SSPP1, 45, 3 },
+ { DBGBUS_SSPP1, 45, 4 },
+ { DBGBUS_SSPP1, 45, 5 },
+ { DBGBUS_SSPP1, 45, 6 },
+ { DBGBUS_SSPP1, 45, 7 },
+
+ /* cursor 1 */
+ { DBGBUS_SSPP1, 80, 0 },
+ { DBGBUS_SSPP1, 80, 1 },
+ { DBGBUS_SSPP1, 80, 2 },
+ { DBGBUS_SSPP1, 80, 3 },
+ { DBGBUS_SSPP1, 80, 4 },
+ { DBGBUS_SSPP1, 80, 5 },
+ { DBGBUS_SSPP1, 80, 6 },
+ { DBGBUS_SSPP1, 80, 7 },
+
+ { DBGBUS_SSPP1, 81, 0 },
+ { DBGBUS_SSPP1, 81, 1 },
+ { DBGBUS_SSPP1, 81, 2 },
+ { DBGBUS_SSPP1, 81, 3 },
+ { DBGBUS_SSPP1, 81, 4 },
+ { DBGBUS_SSPP1, 81, 5 },
+ { DBGBUS_SSPP1, 81, 6 },
+ { DBGBUS_SSPP1, 81, 7 },
+
+ { DBGBUS_SSPP1, 82, 0 },
+ { DBGBUS_SSPP1, 82, 1 },
+ { DBGBUS_SSPP1, 82, 2 },
+ { DBGBUS_SSPP1, 82, 3 },
+ { DBGBUS_SSPP1, 82, 4 },
+ { DBGBUS_SSPP1, 82, 5 },
+ { DBGBUS_SSPP1, 82, 6 },
+ { DBGBUS_SSPP1, 82, 7 },
+
+ { DBGBUS_SSPP1, 83, 0 },
+ { DBGBUS_SSPP1, 83, 1 },
+ { DBGBUS_SSPP1, 83, 2 },
+ { DBGBUS_SSPP1, 83, 3 },
+ { DBGBUS_SSPP1, 83, 4 },
+ { DBGBUS_SSPP1, 83, 5 },
+ { DBGBUS_SSPP1, 83, 6 },
+ { DBGBUS_SSPP1, 83, 7 },
+
+ { DBGBUS_SSPP1, 84, 0 },
+ { DBGBUS_SSPP1, 84, 1 },
+ { DBGBUS_SSPP1, 84, 2 },
+ { DBGBUS_SSPP1, 84, 3 },
+ { DBGBUS_SSPP1, 84, 4 },
+ { DBGBUS_SSPP1, 84, 5 },
+ { DBGBUS_SSPP1, 84, 6 },
+ { DBGBUS_SSPP1, 84, 7 },
+
+ /* dspp */
+ { DBGBUS_DSPP, 13, 0 },
+ { DBGBUS_DSPP, 19, 0 },
+ { DBGBUS_DSPP, 14, 0 },
+ { DBGBUS_DSPP, 14, 1 },
+ { DBGBUS_DSPP, 14, 3 },
+ { DBGBUS_DSPP, 20, 0 },
+ { DBGBUS_DSPP, 20, 1 },
+ { DBGBUS_DSPP, 20, 3 },
+
+ /* ppb_0 */
+ { DBGBUS_DSPP, 31, 0, _dpu_debug_bus_ppb0_dump },
+ { DBGBUS_DSPP, 33, 0, _dpu_debug_bus_ppb0_dump },
+ { DBGBUS_DSPP, 35, 0, _dpu_debug_bus_ppb0_dump },
+ { DBGBUS_DSPP, 42, 0, _dpu_debug_bus_ppb0_dump },
+
+ /* ppb_1 */
+ { DBGBUS_DSPP, 32, 0, _dpu_debug_bus_ppb1_dump },
+ { DBGBUS_DSPP, 34, 0, _dpu_debug_bus_ppb1_dump },
+ { DBGBUS_DSPP, 36, 0, _dpu_debug_bus_ppb1_dump },
+ { DBGBUS_DSPP, 43, 0, _dpu_debug_bus_ppb1_dump },
+
+ /* lm_lut */
+ { DBGBUS_DSPP, 109, 0 },
+ { DBGBUS_DSPP, 105, 0 },
+ { DBGBUS_DSPP, 103, 0 },
+
+ /* tear-check */
+ { DBGBUS_PERIPH, 63, 0 },
+ { DBGBUS_PERIPH, 64, 0 },
+ { DBGBUS_PERIPH, 65, 0 },
+ { DBGBUS_PERIPH, 73, 0 },
+ { DBGBUS_PERIPH, 74, 0 },
+
+ /* crossbar */
+ { DBGBUS_DSPP, 0, 0, _dpu_debug_bus_xbar_dump },
+
+ /* rotator */
+ { DBGBUS_DSPP, 9, 0},
+
+ /* blend */
+ /* LM0 */
+ { DBGBUS_DSPP, 63, 0},
+ { DBGBUS_DSPP, 63, 1},
+ { DBGBUS_DSPP, 63, 2},
+ { DBGBUS_DSPP, 63, 3},
+ { DBGBUS_DSPP, 63, 4},
+ { DBGBUS_DSPP, 63, 5},
+ { DBGBUS_DSPP, 63, 6},
+ { DBGBUS_DSPP, 63, 7, _dpu_debug_bus_lm_dump },
+
+ { DBGBUS_DSPP, 64, 0},
+ { DBGBUS_DSPP, 64, 1},
+ { DBGBUS_DSPP, 64, 2},
+ { DBGBUS_DSPP, 64, 3},
+ { DBGBUS_DSPP, 64, 4},
+ { DBGBUS_DSPP, 64, 5},
+ { DBGBUS_DSPP, 64, 6},
+ { DBGBUS_DSPP, 64, 7, _dpu_debug_bus_lm_dump },
+
+ { DBGBUS_DSPP, 65, 0},
+ { DBGBUS_DSPP, 65, 1},
+ { DBGBUS_DSPP, 65, 2},
+ { DBGBUS_DSPP, 65, 3},
+ { DBGBUS_DSPP, 65, 4},
+ { DBGBUS_DSPP, 65, 5},
+ { DBGBUS_DSPP, 65, 6},
+ { DBGBUS_DSPP, 65, 7, _dpu_debug_bus_lm_dump },
+
+ { DBGBUS_DSPP, 66, 0},
+ { DBGBUS_DSPP, 66, 1},
+ { DBGBUS_DSPP, 66, 2},
+ { DBGBUS_DSPP, 66, 3},
+ { DBGBUS_DSPP, 66, 4},
+ { DBGBUS_DSPP, 66, 5},
+ { DBGBUS_DSPP, 66, 6},
+ { DBGBUS_DSPP, 66, 7, _dpu_debug_bus_lm_dump },
+
+ { DBGBUS_DSPP, 67, 0},
+ { DBGBUS_DSPP, 67, 1},
+ { DBGBUS_DSPP, 67, 2},
+ { DBGBUS_DSPP, 67, 3},
+ { DBGBUS_DSPP, 67, 4},
+ { DBGBUS_DSPP, 67, 5},
+ { DBGBUS_DSPP, 67, 6},
+ { DBGBUS_DSPP, 67, 7, _dpu_debug_bus_lm_dump },
+
+ { DBGBUS_DSPP, 68, 0},
+ { DBGBUS_DSPP, 68, 1},
+ { DBGBUS_DSPP, 68, 2},
+ { DBGBUS_DSPP, 68, 3},
+ { DBGBUS_DSPP, 68, 4},
+ { DBGBUS_DSPP, 68, 5},
+ { DBGBUS_DSPP, 68, 6},
+ { DBGBUS_DSPP, 68, 7, _dpu_debug_bus_lm_dump },
+
+ { DBGBUS_DSPP, 69, 0},
+ { DBGBUS_DSPP, 69, 1},
+ { DBGBUS_DSPP, 69, 2},
+ { DBGBUS_DSPP, 69, 3},
+ { DBGBUS_DSPP, 69, 4},
+ { DBGBUS_DSPP, 69, 5},
+ { DBGBUS_DSPP, 69, 6},
+ { DBGBUS_DSPP, 69, 7, _dpu_debug_bus_lm_dump },
+
+ /* LM1 */
+ { DBGBUS_DSPP, 70, 0},
+ { DBGBUS_DSPP, 70, 1},
+ { DBGBUS_DSPP, 70, 2},
+ { DBGBUS_DSPP, 70, 3},
+ { DBGBUS_DSPP, 70, 4},
+ { DBGBUS_DSPP, 70, 5},
+ { DBGBUS_DSPP, 70, 6},
+ { DBGBUS_DSPP, 70, 7, _dpu_debug_bus_lm_dump },
+
+ { DBGBUS_DSPP, 71, 0},
+ { DBGBUS_DSPP, 71, 1},
+ { DBGBUS_DSPP, 71, 2},
+ { DBGBUS_DSPP, 71, 3},
+ { DBGBUS_DSPP, 71, 4},
+ { DBGBUS_DSPP, 71, 5},
+ { DBGBUS_DSPP, 71, 6},
+ { DBGBUS_DSPP, 71, 7, _dpu_debug_bus_lm_dump },
+
+ { DBGBUS_DSPP, 72, 0},
+ { DBGBUS_DSPP, 72, 1},
+ { DBGBUS_DSPP, 72, 2},
+ { DBGBUS_DSPP, 72, 3},
+ { DBGBUS_DSPP, 72, 4},
+ { DBGBUS_DSPP, 72, 5},
+ { DBGBUS_DSPP, 72, 6},
+ { DBGBUS_DSPP, 72, 7, _dpu_debug_bus_lm_dump },
+
+ { DBGBUS_DSPP, 73, 0},
+ { DBGBUS_DSPP, 73, 1},
+ { DBGBUS_DSPP, 73, 2},
+ { DBGBUS_DSPP, 73, 3},
+ { DBGBUS_DSPP, 73, 4},
+ { DBGBUS_DSPP, 73, 5},
+ { DBGBUS_DSPP, 73, 6},
+ { DBGBUS_DSPP, 73, 7, _dpu_debug_bus_lm_dump },
+
+ { DBGBUS_DSPP, 74, 0},
+ { DBGBUS_DSPP, 74, 1},
+ { DBGBUS_DSPP, 74, 2},
+ { DBGBUS_DSPP, 74, 3},
+ { DBGBUS_DSPP, 74, 4},
+ { DBGBUS_DSPP, 74, 5},
+ { DBGBUS_DSPP, 74, 6},
+ { DBGBUS_DSPP, 74, 7, _dpu_debug_bus_lm_dump },
+
+ { DBGBUS_DSPP, 75, 0},
+ { DBGBUS_DSPP, 75, 1},
+ { DBGBUS_DSPP, 75, 2},
+ { DBGBUS_DSPP, 75, 3},
+ { DBGBUS_DSPP, 75, 4},
+ { DBGBUS_DSPP, 75, 5},
+ { DBGBUS_DSPP, 75, 6},
+ { DBGBUS_DSPP, 75, 7, _dpu_debug_bus_lm_dump },
+
+ { DBGBUS_DSPP, 76, 0},
+ { DBGBUS_DSPP, 76, 1},
+ { DBGBUS_DSPP, 76, 2},
+ { DBGBUS_DSPP, 76, 3},
+ { DBGBUS_DSPP, 76, 4},
+ { DBGBUS_DSPP, 76, 5},
+ { DBGBUS_DSPP, 76, 6},
+ { DBGBUS_DSPP, 76, 7, _dpu_debug_bus_lm_dump },
+
+ /* LM2 */
+ { DBGBUS_DSPP, 77, 0},
+ { DBGBUS_DSPP, 77, 1},
+ { DBGBUS_DSPP, 77, 2},
+ { DBGBUS_DSPP, 77, 3},
+ { DBGBUS_DSPP, 77, 4},
+ { DBGBUS_DSPP, 77, 5},
+ { DBGBUS_DSPP, 77, 6},
+ { DBGBUS_DSPP, 77, 7, _dpu_debug_bus_lm_dump },
+
+ { DBGBUS_DSPP, 78, 0},
+ { DBGBUS_DSPP, 78, 1},
+ { DBGBUS_DSPP, 78, 2},
+ { DBGBUS_DSPP, 78, 3},
+ { DBGBUS_DSPP, 78, 4},
+ { DBGBUS_DSPP, 78, 5},
+ { DBGBUS_DSPP, 78, 6},
+ { DBGBUS_DSPP, 78, 7, _dpu_debug_bus_lm_dump },
+
+ { DBGBUS_DSPP, 79, 0},
+ { DBGBUS_DSPP, 79, 1},
+ { DBGBUS_DSPP, 79, 2},
+ { DBGBUS_DSPP, 79, 3},
+ { DBGBUS_DSPP, 79, 4},
+ { DBGBUS_DSPP, 79, 5},
+ { DBGBUS_DSPP, 79, 6},
+ { DBGBUS_DSPP, 79, 7, _dpu_debug_bus_lm_dump },
+
+ { DBGBUS_DSPP, 80, 0},
+ { DBGBUS_DSPP, 80, 1},
+ { DBGBUS_DSPP, 80, 2},
+ { DBGBUS_DSPP, 80, 3},
+ { DBGBUS_DSPP, 80, 4},
+ { DBGBUS_DSPP, 80, 5},
+ { DBGBUS_DSPP, 80, 6},
+ { DBGBUS_DSPP, 80, 7, _dpu_debug_bus_lm_dump },
+
+ { DBGBUS_DSPP, 81, 0},
+ { DBGBUS_DSPP, 81, 1},
+ { DBGBUS_DSPP, 81, 2},
+ { DBGBUS_DSPP, 81, 3},
+ { DBGBUS_DSPP, 81, 4},
+ { DBGBUS_DSPP, 81, 5},
+ { DBGBUS_DSPP, 81, 6},
+ { DBGBUS_DSPP, 81, 7, _dpu_debug_bus_lm_dump },
+
+ { DBGBUS_DSPP, 82, 0},
+ { DBGBUS_DSPP, 82, 1},
+ { DBGBUS_DSPP, 82, 2},
+ { DBGBUS_DSPP, 82, 3},
+ { DBGBUS_DSPP, 82, 4},
+ { DBGBUS_DSPP, 82, 5},
+ { DBGBUS_DSPP, 82, 6},
+ { DBGBUS_DSPP, 82, 7, _dpu_debug_bus_lm_dump },
+
+ { DBGBUS_DSPP, 83, 0},
+ { DBGBUS_DSPP, 83, 1},
+ { DBGBUS_DSPP, 83, 2},
+ { DBGBUS_DSPP, 83, 3},
+ { DBGBUS_DSPP, 83, 4},
+ { DBGBUS_DSPP, 83, 5},
+ { DBGBUS_DSPP, 83, 6},
+ { DBGBUS_DSPP, 83, 7, _dpu_debug_bus_lm_dump },
+
+ /* csc */
+ { DBGBUS_SSPP0, 7, 0},
+ { DBGBUS_SSPP0, 7, 1},
+ { DBGBUS_SSPP0, 27, 0},
+ { DBGBUS_SSPP0, 27, 1},
+ { DBGBUS_SSPP1, 7, 0},
+ { DBGBUS_SSPP1, 7, 1},
+ { DBGBUS_SSPP1, 27, 0},
+ { DBGBUS_SSPP1, 27, 1},
+
+ /* pcc */
+ { DBGBUS_SSPP0, 3, 3},
+ { DBGBUS_SSPP0, 23, 3},
+ { DBGBUS_SSPP0, 33, 3},
+ { DBGBUS_SSPP0, 43, 3},
+ { DBGBUS_SSPP1, 3, 3},
+ { DBGBUS_SSPP1, 23, 3},
+ { DBGBUS_SSPP1, 33, 3},
+ { DBGBUS_SSPP1, 43, 3},
+
+ /* spa */
+ { DBGBUS_SSPP0, 8, 0},
+ { DBGBUS_SSPP0, 28, 0},
+ { DBGBUS_SSPP1, 8, 0},
+ { DBGBUS_SSPP1, 28, 0},
+ { DBGBUS_DSPP, 13, 0},
+ { DBGBUS_DSPP, 19, 0},
+
+ /* igc */
+ { DBGBUS_SSPP0, 9, 0},
+ { DBGBUS_SSPP0, 9, 1},
+ { DBGBUS_SSPP0, 9, 3},
+ { DBGBUS_SSPP0, 29, 0},
+ { DBGBUS_SSPP0, 29, 1},
+ { DBGBUS_SSPP0, 29, 3},
+ { DBGBUS_SSPP0, 17, 0},
+ { DBGBUS_SSPP0, 17, 1},
+ { DBGBUS_SSPP0, 17, 3},
+ { DBGBUS_SSPP0, 37, 0},
+ { DBGBUS_SSPP0, 37, 1},
+ { DBGBUS_SSPP0, 37, 3},
+ { DBGBUS_SSPP0, 46, 0},
+ { DBGBUS_SSPP0, 46, 1},
+ { DBGBUS_SSPP0, 46, 3},
+
+ { DBGBUS_SSPP1, 9, 0},
+ { DBGBUS_SSPP1, 9, 1},
+ { DBGBUS_SSPP1, 9, 3},
+ { DBGBUS_SSPP1, 29, 0},
+ { DBGBUS_SSPP1, 29, 1},
+ { DBGBUS_SSPP1, 29, 3},
+ { DBGBUS_SSPP1, 17, 0},
+ { DBGBUS_SSPP1, 17, 1},
+ { DBGBUS_SSPP1, 17, 3},
+ { DBGBUS_SSPP1, 37, 0},
+ { DBGBUS_SSPP1, 37, 1},
+ { DBGBUS_SSPP1, 37, 3},
+ { DBGBUS_SSPP1, 46, 0},
+ { DBGBUS_SSPP1, 46, 1},
+ { DBGBUS_SSPP1, 46, 3},
+
+ { DBGBUS_DSPP, 14, 0},
+ { DBGBUS_DSPP, 14, 1},
+ { DBGBUS_DSPP, 14, 3},
+ { DBGBUS_DSPP, 20, 0},
+ { DBGBUS_DSPP, 20, 1},
+ { DBGBUS_DSPP, 20, 3},
+
+ { DBGBUS_PERIPH, 60, 0},
+};
+
+static struct dpu_debug_bus_entry dbg_bus_dpu_sdm845[] = {
+
+ /* Unpack 0 sspp 0*/
+ { DBGBUS_SSPP0, 50, 2 },
+ { DBGBUS_SSPP0, 60, 2 },
+ { DBGBUS_SSPP0, 70, 2 },
+
+ /* Upack 0 sspp 1*/
+ { DBGBUS_SSPP1, 50, 2 },
+ { DBGBUS_SSPP1, 60, 2 },
+ { DBGBUS_SSPP1, 70, 2 },
+
+ /* scheduler */
+ { DBGBUS_DSPP, 130, 0 },
+ { DBGBUS_DSPP, 130, 1 },
+ { DBGBUS_DSPP, 130, 2 },
+ { DBGBUS_DSPP, 130, 3 },
+ { DBGBUS_DSPP, 130, 4 },
+ { DBGBUS_DSPP, 130, 5 },
+
+ /* qseed */
+ { DBGBUS_SSPP0, 6, 0},
+ { DBGBUS_SSPP0, 6, 1},
+ { DBGBUS_SSPP0, 26, 0},
+ { DBGBUS_SSPP0, 26, 1},
+ { DBGBUS_SSPP1, 6, 0},
+ { DBGBUS_SSPP1, 6, 1},
+ { DBGBUS_SSPP1, 26, 0},
+ { DBGBUS_SSPP1, 26, 1},
+
+ /* scale */
+ { DBGBUS_SSPP0, 16, 0},
+ { DBGBUS_SSPP0, 16, 1},
+ { DBGBUS_SSPP0, 36, 0},
+ { DBGBUS_SSPP0, 36, 1},
+ { DBGBUS_SSPP1, 16, 0},
+ { DBGBUS_SSPP1, 16, 1},
+ { DBGBUS_SSPP1, 36, 0},
+ { DBGBUS_SSPP1, 36, 1},
+
+ /* fetch sspp0 */
+
+ /* vig 0 */
+ { DBGBUS_SSPP0, 0, 0 },
+ { DBGBUS_SSPP0, 0, 1 },
+ { DBGBUS_SSPP0, 0, 2 },
+ { DBGBUS_SSPP0, 0, 3 },
+ { DBGBUS_SSPP0, 0, 4 },
+ { DBGBUS_SSPP0, 0, 5 },
+ { DBGBUS_SSPP0, 0, 6 },
+ { DBGBUS_SSPP0, 0, 7 },
+
+ { DBGBUS_SSPP0, 1, 0 },
+ { DBGBUS_SSPP0, 1, 1 },
+ { DBGBUS_SSPP0, 1, 2 },
+ { DBGBUS_SSPP0, 1, 3 },
+ { DBGBUS_SSPP0, 1, 4 },
+ { DBGBUS_SSPP0, 1, 5 },
+ { DBGBUS_SSPP0, 1, 6 },
+ { DBGBUS_SSPP0, 1, 7 },
+
+ { DBGBUS_SSPP0, 2, 0 },
+ { DBGBUS_SSPP0, 2, 1 },
+ { DBGBUS_SSPP0, 2, 2 },
+ { DBGBUS_SSPP0, 2, 3 },
+ { DBGBUS_SSPP0, 2, 4 },
+ { DBGBUS_SSPP0, 2, 5 },
+ { DBGBUS_SSPP0, 2, 6 },
+ { DBGBUS_SSPP0, 2, 7 },
+
+ { DBGBUS_SSPP0, 4, 0 },
+ { DBGBUS_SSPP0, 4, 1 },
+ { DBGBUS_SSPP0, 4, 2 },
+ { DBGBUS_SSPP0, 4, 3 },
+ { DBGBUS_SSPP0, 4, 4 },
+ { DBGBUS_SSPP0, 4, 5 },
+ { DBGBUS_SSPP0, 4, 6 },
+ { DBGBUS_SSPP0, 4, 7 },
+
+ { DBGBUS_SSPP0, 5, 0 },
+ { DBGBUS_SSPP0, 5, 1 },
+ { DBGBUS_SSPP0, 5, 2 },
+ { DBGBUS_SSPP0, 5, 3 },
+ { DBGBUS_SSPP0, 5, 4 },
+ { DBGBUS_SSPP0, 5, 5 },
+ { DBGBUS_SSPP0, 5, 6 },
+ { DBGBUS_SSPP0, 5, 7 },
+
+ /* vig 2 */
+ { DBGBUS_SSPP0, 20, 0 },
+ { DBGBUS_SSPP0, 20, 1 },
+ { DBGBUS_SSPP0, 20, 2 },
+ { DBGBUS_SSPP0, 20, 3 },
+ { DBGBUS_SSPP0, 20, 4 },
+ { DBGBUS_SSPP0, 20, 5 },
+ { DBGBUS_SSPP0, 20, 6 },
+ { DBGBUS_SSPP0, 20, 7 },
+
+ { DBGBUS_SSPP0, 21, 0 },
+ { DBGBUS_SSPP0, 21, 1 },
+ { DBGBUS_SSPP0, 21, 2 },
+ { DBGBUS_SSPP0, 21, 3 },
+ { DBGBUS_SSPP0, 21, 4 },
+ { DBGBUS_SSPP0, 21, 5 },
+ { DBGBUS_SSPP0, 21, 6 },
+ { DBGBUS_SSPP0, 21, 7 },
+
+ { DBGBUS_SSPP0, 22, 0 },
+ { DBGBUS_SSPP0, 22, 1 },
+ { DBGBUS_SSPP0, 22, 2 },
+ { DBGBUS_SSPP0, 22, 3 },
+ { DBGBUS_SSPP0, 22, 4 },
+ { DBGBUS_SSPP0, 22, 5 },
+ { DBGBUS_SSPP0, 22, 6 },
+ { DBGBUS_SSPP0, 22, 7 },
+
+ { DBGBUS_SSPP0, 24, 0 },
+ { DBGBUS_SSPP0, 24, 1 },
+ { DBGBUS_SSPP0, 24, 2 },
+ { DBGBUS_SSPP0, 24, 3 },
+ { DBGBUS_SSPP0, 24, 4 },
+ { DBGBUS_SSPP0, 24, 5 },
+ { DBGBUS_SSPP0, 24, 6 },
+ { DBGBUS_SSPP0, 24, 7 },
+
+ { DBGBUS_SSPP0, 25, 0 },
+ { DBGBUS_SSPP0, 25, 1 },
+ { DBGBUS_SSPP0, 25, 2 },
+ { DBGBUS_SSPP0, 25, 3 },
+ { DBGBUS_SSPP0, 25, 4 },
+ { DBGBUS_SSPP0, 25, 5 },
+ { DBGBUS_SSPP0, 25, 6 },
+ { DBGBUS_SSPP0, 25, 7 },
+
+ /* dma 2 */
+ { DBGBUS_SSPP0, 30, 0 },
+ { DBGBUS_SSPP0, 30, 1 },
+ { DBGBUS_SSPP0, 30, 2 },
+ { DBGBUS_SSPP0, 30, 3 },
+ { DBGBUS_SSPP0, 30, 4 },
+ { DBGBUS_SSPP0, 30, 5 },
+ { DBGBUS_SSPP0, 30, 6 },
+ { DBGBUS_SSPP0, 30, 7 },
+
+ { DBGBUS_SSPP0, 31, 0 },
+ { DBGBUS_SSPP0, 31, 1 },
+ { DBGBUS_SSPP0, 31, 2 },
+ { DBGBUS_SSPP0, 31, 3 },
+ { DBGBUS_SSPP0, 31, 4 },
+ { DBGBUS_SSPP0, 31, 5 },
+ { DBGBUS_SSPP0, 31, 6 },
+ { DBGBUS_SSPP0, 31, 7 },
+
+ { DBGBUS_SSPP0, 32, 0 },
+ { DBGBUS_SSPP0, 32, 1 },
+ { DBGBUS_SSPP0, 32, 2 },
+ { DBGBUS_SSPP0, 32, 3 },
+ { DBGBUS_SSPP0, 32, 4 },
+ { DBGBUS_SSPP0, 32, 5 },
+ { DBGBUS_SSPP0, 32, 6 },
+ { DBGBUS_SSPP0, 32, 7 },
+
+ { DBGBUS_SSPP0, 33, 0 },
+ { DBGBUS_SSPP0, 33, 1 },
+ { DBGBUS_SSPP0, 33, 2 },
+ { DBGBUS_SSPP0, 33, 3 },
+ { DBGBUS_SSPP0, 33, 4 },
+ { DBGBUS_SSPP0, 33, 5 },
+ { DBGBUS_SSPP0, 33, 6 },
+ { DBGBUS_SSPP0, 33, 7 },
+
+ { DBGBUS_SSPP0, 34, 0 },
+ { DBGBUS_SSPP0, 34, 1 },
+ { DBGBUS_SSPP0, 34, 2 },
+ { DBGBUS_SSPP0, 34, 3 },
+ { DBGBUS_SSPP0, 34, 4 },
+ { DBGBUS_SSPP0, 34, 5 },
+ { DBGBUS_SSPP0, 34, 6 },
+ { DBGBUS_SSPP0, 34, 7 },
+
+ { DBGBUS_SSPP0, 35, 0 },
+ { DBGBUS_SSPP0, 35, 1 },
+ { DBGBUS_SSPP0, 35, 2 },
+ { DBGBUS_SSPP0, 35, 3 },
+
+ /* dma 0 */
+ { DBGBUS_SSPP0, 40, 0 },
+ { DBGBUS_SSPP0, 40, 1 },
+ { DBGBUS_SSPP0, 40, 2 },
+ { DBGBUS_SSPP0, 40, 3 },
+ { DBGBUS_SSPP0, 40, 4 },
+ { DBGBUS_SSPP0, 40, 5 },
+ { DBGBUS_SSPP0, 40, 6 },
+ { DBGBUS_SSPP0, 40, 7 },
+
+ { DBGBUS_SSPP0, 41, 0 },
+ { DBGBUS_SSPP0, 41, 1 },
+ { DBGBUS_SSPP0, 41, 2 },
+ { DBGBUS_SSPP0, 41, 3 },
+ { DBGBUS_SSPP0, 41, 4 },
+ { DBGBUS_SSPP0, 41, 5 },
+ { DBGBUS_SSPP0, 41, 6 },
+ { DBGBUS_SSPP0, 41, 7 },
+
+ { DBGBUS_SSPP0, 42, 0 },
+ { DBGBUS_SSPP0, 42, 1 },
+ { DBGBUS_SSPP0, 42, 2 },
+ { DBGBUS_SSPP0, 42, 3 },
+ { DBGBUS_SSPP0, 42, 4 },
+ { DBGBUS_SSPP0, 42, 5 },
+ { DBGBUS_SSPP0, 42, 6 },
+ { DBGBUS_SSPP0, 42, 7 },
+
+ { DBGBUS_SSPP0, 44, 0 },
+ { DBGBUS_SSPP0, 44, 1 },
+ { DBGBUS_SSPP0, 44, 2 },
+ { DBGBUS_SSPP0, 44, 3 },
+ { DBGBUS_SSPP0, 44, 4 },
+ { DBGBUS_SSPP0, 44, 5 },
+ { DBGBUS_SSPP0, 44, 6 },
+ { DBGBUS_SSPP0, 44, 7 },
+
+ { DBGBUS_SSPP0, 45, 0 },
+ { DBGBUS_SSPP0, 45, 1 },
+ { DBGBUS_SSPP0, 45, 2 },
+ { DBGBUS_SSPP0, 45, 3 },
+ { DBGBUS_SSPP0, 45, 4 },
+ { DBGBUS_SSPP0, 45, 5 },
+ { DBGBUS_SSPP0, 45, 6 },
+ { DBGBUS_SSPP0, 45, 7 },
+
+ /* fetch sspp1 */
+ /* vig 1 */
+ { DBGBUS_SSPP1, 0, 0 },
+ { DBGBUS_SSPP1, 0, 1 },
+ { DBGBUS_SSPP1, 0, 2 },
+ { DBGBUS_SSPP1, 0, 3 },
+ { DBGBUS_SSPP1, 0, 4 },
+ { DBGBUS_SSPP1, 0, 5 },
+ { DBGBUS_SSPP1, 0, 6 },
+ { DBGBUS_SSPP1, 0, 7 },
+
+ { DBGBUS_SSPP1, 1, 0 },
+ { DBGBUS_SSPP1, 1, 1 },
+ { DBGBUS_SSPP1, 1, 2 },
+ { DBGBUS_SSPP1, 1, 3 },
+ { DBGBUS_SSPP1, 1, 4 },
+ { DBGBUS_SSPP1, 1, 5 },
+ { DBGBUS_SSPP1, 1, 6 },
+ { DBGBUS_SSPP1, 1, 7 },
+
+ { DBGBUS_SSPP1, 2, 0 },
+ { DBGBUS_SSPP1, 2, 1 },
+ { DBGBUS_SSPP1, 2, 2 },
+ { DBGBUS_SSPP1, 2, 3 },
+ { DBGBUS_SSPP1, 2, 4 },
+ { DBGBUS_SSPP1, 2, 5 },
+ { DBGBUS_SSPP1, 2, 6 },
+ { DBGBUS_SSPP1, 2, 7 },
+
+ { DBGBUS_SSPP1, 4, 0 },
+ { DBGBUS_SSPP1, 4, 1 },
+ { DBGBUS_SSPP1, 4, 2 },
+ { DBGBUS_SSPP1, 4, 3 },
+ { DBGBUS_SSPP1, 4, 4 },
+ { DBGBUS_SSPP1, 4, 5 },
+ { DBGBUS_SSPP1, 4, 6 },
+ { DBGBUS_SSPP1, 4, 7 },
+
+ { DBGBUS_SSPP1, 5, 0 },
+ { DBGBUS_SSPP1, 5, 1 },
+ { DBGBUS_SSPP1, 5, 2 },
+ { DBGBUS_SSPP1, 5, 3 },
+ { DBGBUS_SSPP1, 5, 4 },
+ { DBGBUS_SSPP1, 5, 5 },
+ { DBGBUS_SSPP1, 5, 6 },
+ { DBGBUS_SSPP1, 5, 7 },
+
+ /* vig 3 */
+ { DBGBUS_SSPP1, 20, 0 },
+ { DBGBUS_SSPP1, 20, 1 },
+ { DBGBUS_SSPP1, 20, 2 },
+ { DBGBUS_SSPP1, 20, 3 },
+ { DBGBUS_SSPP1, 20, 4 },
+ { DBGBUS_SSPP1, 20, 5 },
+ { DBGBUS_SSPP1, 20, 6 },
+ { DBGBUS_SSPP1, 20, 7 },
+
+ { DBGBUS_SSPP1, 21, 0 },
+ { DBGBUS_SSPP1, 21, 1 },
+ { DBGBUS_SSPP1, 21, 2 },
+ { DBGBUS_SSPP1, 21, 3 },
+ { DBGBUS_SSPP1, 21, 4 },
+ { DBGBUS_SSPP1, 21, 5 },
+ { DBGBUS_SSPP1, 21, 6 },
+ { DBGBUS_SSPP1, 21, 7 },
+
+ { DBGBUS_SSPP1, 22, 0 },
+ { DBGBUS_SSPP1, 22, 1 },
+ { DBGBUS_SSPP1, 22, 2 },
+ { DBGBUS_SSPP1, 22, 3 },
+ { DBGBUS_SSPP1, 22, 4 },
+ { DBGBUS_SSPP1, 22, 5 },
+ { DBGBUS_SSPP1, 22, 6 },
+ { DBGBUS_SSPP1, 22, 7 },
+
+ { DBGBUS_SSPP1, 24, 0 },
+ { DBGBUS_SSPP1, 24, 1 },
+ { DBGBUS_SSPP1, 24, 2 },
+ { DBGBUS_SSPP1, 24, 3 },
+ { DBGBUS_SSPP1, 24, 4 },
+ { DBGBUS_SSPP1, 24, 5 },
+ { DBGBUS_SSPP1, 24, 6 },
+ { DBGBUS_SSPP1, 24, 7 },
+
+ { DBGBUS_SSPP1, 25, 0 },
+ { DBGBUS_SSPP1, 25, 1 },
+ { DBGBUS_SSPP1, 25, 2 },
+ { DBGBUS_SSPP1, 25, 3 },
+ { DBGBUS_SSPP1, 25, 4 },
+ { DBGBUS_SSPP1, 25, 5 },
+ { DBGBUS_SSPP1, 25, 6 },
+ { DBGBUS_SSPP1, 25, 7 },
+
+ /* dma 3 */
+ { DBGBUS_SSPP1, 30, 0 },
+ { DBGBUS_SSPP1, 30, 1 },
+ { DBGBUS_SSPP1, 30, 2 },
+ { DBGBUS_SSPP1, 30, 3 },
+ { DBGBUS_SSPP1, 30, 4 },
+ { DBGBUS_SSPP1, 30, 5 },
+ { DBGBUS_SSPP1, 30, 6 },
+ { DBGBUS_SSPP1, 30, 7 },
+
+ { DBGBUS_SSPP1, 31, 0 },
+ { DBGBUS_SSPP1, 31, 1 },
+ { DBGBUS_SSPP1, 31, 2 },
+ { DBGBUS_SSPP1, 31, 3 },
+ { DBGBUS_SSPP1, 31, 4 },
+ { DBGBUS_SSPP1, 31, 5 },
+ { DBGBUS_SSPP1, 31, 6 },
+ { DBGBUS_SSPP1, 31, 7 },
+
+ { DBGBUS_SSPP1, 32, 0 },
+ { DBGBUS_SSPP1, 32, 1 },
+ { DBGBUS_SSPP1, 32, 2 },
+ { DBGBUS_SSPP1, 32, 3 },
+ { DBGBUS_SSPP1, 32, 4 },
+ { DBGBUS_SSPP1, 32, 5 },
+ { DBGBUS_SSPP1, 32, 6 },
+ { DBGBUS_SSPP1, 32, 7 },
+
+ { DBGBUS_SSPP1, 33, 0 },
+ { DBGBUS_SSPP1, 33, 1 },
+ { DBGBUS_SSPP1, 33, 2 },
+ { DBGBUS_SSPP1, 33, 3 },
+ { DBGBUS_SSPP1, 33, 4 },
+ { DBGBUS_SSPP1, 33, 5 },
+ { DBGBUS_SSPP1, 33, 6 },
+ { DBGBUS_SSPP1, 33, 7 },
+
+ { DBGBUS_SSPP1, 34, 0 },
+ { DBGBUS_SSPP1, 34, 1 },
+ { DBGBUS_SSPP1, 34, 2 },
+ { DBGBUS_SSPP1, 34, 3 },
+ { DBGBUS_SSPP1, 34, 4 },
+ { DBGBUS_SSPP1, 34, 5 },
+ { DBGBUS_SSPP1, 34, 6 },
+ { DBGBUS_SSPP1, 34, 7 },
+
+ { DBGBUS_SSPP1, 35, 0 },
+ { DBGBUS_SSPP1, 35, 1 },
+ { DBGBUS_SSPP1, 35, 2 },
+
+ /* dma 1 */
+ { DBGBUS_SSPP1, 40, 0 },
+ { DBGBUS_SSPP1, 40, 1 },
+ { DBGBUS_SSPP1, 40, 2 },
+ { DBGBUS_SSPP1, 40, 3 },
+ { DBGBUS_SSPP1, 40, 4 },
+ { DBGBUS_SSPP1, 40, 5 },
+ { DBGBUS_SSPP1, 40, 6 },
+ { DBGBUS_SSPP1, 40, 7 },
+
+ { DBGBUS_SSPP1, 41, 0 },
+ { DBGBUS_SSPP1, 41, 1 },
+ { DBGBUS_SSPP1, 41, 2 },
+ { DBGBUS_SSPP1, 41, 3 },
+ { DBGBUS_SSPP1, 41, 4 },
+ { DBGBUS_SSPP1, 41, 5 },
+ { DBGBUS_SSPP1, 41, 6 },
+ { DBGBUS_SSPP1, 41, 7 },
+
+ { DBGBUS_SSPP1, 42, 0 },
+ { DBGBUS_SSPP1, 42, 1 },
+ { DBGBUS_SSPP1, 42, 2 },
+ { DBGBUS_SSPP1, 42, 3 },
+ { DBGBUS_SSPP1, 42, 4 },
+ { DBGBUS_SSPP1, 42, 5 },
+ { DBGBUS_SSPP1, 42, 6 },
+ { DBGBUS_SSPP1, 42, 7 },
+
+ { DBGBUS_SSPP1, 44, 0 },
+ { DBGBUS_SSPP1, 44, 1 },
+ { DBGBUS_SSPP1, 44, 2 },
+ { DBGBUS_SSPP1, 44, 3 },
+ { DBGBUS_SSPP1, 44, 4 },
+ { DBGBUS_SSPP1, 44, 5 },
+ { DBGBUS_SSPP1, 44, 6 },
+ { DBGBUS_SSPP1, 44, 7 },
+
+ { DBGBUS_SSPP1, 45, 0 },
+ { DBGBUS_SSPP1, 45, 1 },
+ { DBGBUS_SSPP1, 45, 2 },
+ { DBGBUS_SSPP1, 45, 3 },
+ { DBGBUS_SSPP1, 45, 4 },
+ { DBGBUS_SSPP1, 45, 5 },
+ { DBGBUS_SSPP1, 45, 6 },
+ { DBGBUS_SSPP1, 45, 7 },
+
+ /* dspp */
+ { DBGBUS_DSPP, 13, 0 },
+ { DBGBUS_DSPP, 19, 0 },
+ { DBGBUS_DSPP, 14, 0 },
+ { DBGBUS_DSPP, 14, 1 },
+ { DBGBUS_DSPP, 14, 3 },
+ { DBGBUS_DSPP, 20, 0 },
+ { DBGBUS_DSPP, 20, 1 },
+ { DBGBUS_DSPP, 20, 3 },
+
+ /* ppb_0 */
+ { DBGBUS_DSPP, 31, 0, _dpu_debug_bus_ppb0_dump },
+ { DBGBUS_DSPP, 33, 0, _dpu_debug_bus_ppb0_dump },
+ { DBGBUS_DSPP, 35, 0, _dpu_debug_bus_ppb0_dump },
+ { DBGBUS_DSPP, 42, 0, _dpu_debug_bus_ppb0_dump },
+
+ /* ppb_1 */
+ { DBGBUS_DSPP, 32, 0, _dpu_debug_bus_ppb1_dump },
+ { DBGBUS_DSPP, 34, 0, _dpu_debug_bus_ppb1_dump },
+ { DBGBUS_DSPP, 36, 0, _dpu_debug_bus_ppb1_dump },
+ { DBGBUS_DSPP, 43, 0, _dpu_debug_bus_ppb1_dump },
+
+ /* lm_lut */
+ { DBGBUS_DSPP, 109, 0 },
+ { DBGBUS_DSPP, 105, 0 },
+ { DBGBUS_DSPP, 103, 0 },
+
+ /* crossbar */
+ { DBGBUS_DSPP, 0, 0, _dpu_debug_bus_xbar_dump },
+
+ /* rotator */
+ { DBGBUS_DSPP, 9, 0},
+
+ /* blend */
+ /* LM0 */
+ { DBGBUS_DSPP, 63, 1},
+ { DBGBUS_DSPP, 63, 2},
+ { DBGBUS_DSPP, 63, 3},
+ { DBGBUS_DSPP, 63, 4},
+ { DBGBUS_DSPP, 63, 5},
+ { DBGBUS_DSPP, 63, 6},
+ { DBGBUS_DSPP, 63, 7, _dpu_debug_bus_lm_dump },
+
+ { DBGBUS_DSPP, 64, 1},
+ { DBGBUS_DSPP, 64, 2},
+ { DBGBUS_DSPP, 64, 3},
+ { DBGBUS_DSPP, 64, 4},
+ { DBGBUS_DSPP, 64, 5},
+ { DBGBUS_DSPP, 64, 6},
+ { DBGBUS_DSPP, 64, 7, _dpu_debug_bus_lm_dump },
+
+ { DBGBUS_DSPP, 65, 1},
+ { DBGBUS_DSPP, 65, 2},
+ { DBGBUS_DSPP, 65, 3},
+ { DBGBUS_DSPP, 65, 4},
+ { DBGBUS_DSPP, 65, 5},
+ { DBGBUS_DSPP, 65, 6},
+ { DBGBUS_DSPP, 65, 7, _dpu_debug_bus_lm_dump },
+
+ { DBGBUS_DSPP, 66, 1},
+ { DBGBUS_DSPP, 66, 2},
+ { DBGBUS_DSPP, 66, 3},
+ { DBGBUS_DSPP, 66, 4},
+ { DBGBUS_DSPP, 66, 5},
+ { DBGBUS_DSPP, 66, 6},
+ { DBGBUS_DSPP, 66, 7, _dpu_debug_bus_lm_dump },
+
+ { DBGBUS_DSPP, 67, 1},
+ { DBGBUS_DSPP, 67, 2},
+ { DBGBUS_DSPP, 67, 3},
+ { DBGBUS_DSPP, 67, 4},
+ { DBGBUS_DSPP, 67, 5},
+ { DBGBUS_DSPP, 67, 6},
+ { DBGBUS_DSPP, 67, 7, _dpu_debug_bus_lm_dump },
+
+ { DBGBUS_DSPP, 68, 1},
+ { DBGBUS_DSPP, 68, 2},
+ { DBGBUS_DSPP, 68, 3},
+ { DBGBUS_DSPP, 68, 4},
+ { DBGBUS_DSPP, 68, 5},
+ { DBGBUS_DSPP, 68, 6},
+ { DBGBUS_DSPP, 68, 7, _dpu_debug_bus_lm_dump },
+
+ { DBGBUS_DSPP, 69, 1},
+ { DBGBUS_DSPP, 69, 2},
+ { DBGBUS_DSPP, 69, 3},
+ { DBGBUS_DSPP, 69, 4},
+ { DBGBUS_DSPP, 69, 5},
+ { DBGBUS_DSPP, 69, 6},
+ { DBGBUS_DSPP, 69, 7, _dpu_debug_bus_lm_dump },
+
+ { DBGBUS_DSPP, 84, 1},
+ { DBGBUS_DSPP, 84, 2},
+ { DBGBUS_DSPP, 84, 3},
+ { DBGBUS_DSPP, 84, 4},
+ { DBGBUS_DSPP, 84, 5},
+ { DBGBUS_DSPP, 84, 6},
+ { DBGBUS_DSPP, 84, 7, _dpu_debug_bus_lm_dump },
+
+
+ { DBGBUS_DSPP, 85, 1},
+ { DBGBUS_DSPP, 85, 2},
+ { DBGBUS_DSPP, 85, 3},
+ { DBGBUS_DSPP, 85, 4},
+ { DBGBUS_DSPP, 85, 5},
+ { DBGBUS_DSPP, 85, 6},
+ { DBGBUS_DSPP, 85, 7, _dpu_debug_bus_lm_dump },
+
+
+ { DBGBUS_DSPP, 86, 1},
+ { DBGBUS_DSPP, 86, 2},
+ { DBGBUS_DSPP, 86, 3},
+ { DBGBUS_DSPP, 86, 4},
+ { DBGBUS_DSPP, 86, 5},
+ { DBGBUS_DSPP, 86, 6},
+ { DBGBUS_DSPP, 86, 7, _dpu_debug_bus_lm_dump },
+
+
+ { DBGBUS_DSPP, 87, 1},
+ { DBGBUS_DSPP, 87, 2},
+ { DBGBUS_DSPP, 87, 3},
+ { DBGBUS_DSPP, 87, 4},
+ { DBGBUS_DSPP, 87, 5},
+ { DBGBUS_DSPP, 87, 6},
+ { DBGBUS_DSPP, 87, 7, _dpu_debug_bus_lm_dump },
+
+ /* LM1 */
+ { DBGBUS_DSPP, 70, 1},
+ { DBGBUS_DSPP, 70, 2},
+ { DBGBUS_DSPP, 70, 3},
+ { DBGBUS_DSPP, 70, 4},
+ { DBGBUS_DSPP, 70, 5},
+ { DBGBUS_DSPP, 70, 6},
+ { DBGBUS_DSPP, 70, 7, _dpu_debug_bus_lm_dump },
+
+ { DBGBUS_DSPP, 71, 1},
+ { DBGBUS_DSPP, 71, 2},
+ { DBGBUS_DSPP, 71, 3},
+ { DBGBUS_DSPP, 71, 4},
+ { DBGBUS_DSPP, 71, 5},
+ { DBGBUS_DSPP, 71, 6},
+ { DBGBUS_DSPP, 71, 7, _dpu_debug_bus_lm_dump },
+
+ { DBGBUS_DSPP, 72, 1},
+ { DBGBUS_DSPP, 72, 2},
+ { DBGBUS_DSPP, 72, 3},
+ { DBGBUS_DSPP, 72, 4},
+ { DBGBUS_DSPP, 72, 5},
+ { DBGBUS_DSPP, 72, 6},
+ { DBGBUS_DSPP, 72, 7, _dpu_debug_bus_lm_dump },
+
+ { DBGBUS_DSPP, 73, 1},
+ { DBGBUS_DSPP, 73, 2},
+ { DBGBUS_DSPP, 73, 3},
+ { DBGBUS_DSPP, 73, 4},
+ { DBGBUS_DSPP, 73, 5},
+ { DBGBUS_DSPP, 73, 6},
+ { DBGBUS_DSPP, 73, 7, _dpu_debug_bus_lm_dump },
+
+ { DBGBUS_DSPP, 74, 1},
+ { DBGBUS_DSPP, 74, 2},
+ { DBGBUS_DSPP, 74, 3},
+ { DBGBUS_DSPP, 74, 4},
+ { DBGBUS_DSPP, 74, 5},
+ { DBGBUS_DSPP, 74, 6},
+ { DBGBUS_DSPP, 74, 7, _dpu_debug_bus_lm_dump },
+
+ { DBGBUS_DSPP, 75, 1},
+ { DBGBUS_DSPP, 75, 2},
+ { DBGBUS_DSPP, 75, 3},
+ { DBGBUS_DSPP, 75, 4},
+ { DBGBUS_DSPP, 75, 5},
+ { DBGBUS_DSPP, 75, 6},
+ { DBGBUS_DSPP, 75, 7, _dpu_debug_bus_lm_dump },
+
+ { DBGBUS_DSPP, 76, 1},
+ { DBGBUS_DSPP, 76, 2},
+ { DBGBUS_DSPP, 76, 3},
+ { DBGBUS_DSPP, 76, 4},
+ { DBGBUS_DSPP, 76, 5},
+ { DBGBUS_DSPP, 76, 6},
+ { DBGBUS_DSPP, 76, 7, _dpu_debug_bus_lm_dump },
+
+ { DBGBUS_DSPP, 88, 1},
+ { DBGBUS_DSPP, 88, 2},
+ { DBGBUS_DSPP, 88, 3},
+ { DBGBUS_DSPP, 88, 4},
+ { DBGBUS_DSPP, 88, 5},
+ { DBGBUS_DSPP, 88, 6},
+ { DBGBUS_DSPP, 88, 7, _dpu_debug_bus_lm_dump },
+
+ { DBGBUS_DSPP, 89, 1},
+ { DBGBUS_DSPP, 89, 2},
+ { DBGBUS_DSPP, 89, 3},
+ { DBGBUS_DSPP, 89, 4},
+ { DBGBUS_DSPP, 89, 5},
+ { DBGBUS_DSPP, 89, 6},
+ { DBGBUS_DSPP, 89, 7, _dpu_debug_bus_lm_dump },
+
+ { DBGBUS_DSPP, 90, 1},
+ { DBGBUS_DSPP, 90, 2},
+ { DBGBUS_DSPP, 90, 3},
+ { DBGBUS_DSPP, 90, 4},
+ { DBGBUS_DSPP, 90, 5},
+ { DBGBUS_DSPP, 90, 6},
+ { DBGBUS_DSPP, 90, 7, _dpu_debug_bus_lm_dump },
+
+ { DBGBUS_DSPP, 91, 1},
+ { DBGBUS_DSPP, 91, 2},
+ { DBGBUS_DSPP, 91, 3},
+ { DBGBUS_DSPP, 91, 4},
+ { DBGBUS_DSPP, 91, 5},
+ { DBGBUS_DSPP, 91, 6},
+ { DBGBUS_DSPP, 91, 7, _dpu_debug_bus_lm_dump },
+
+ /* LM2 */
+ { DBGBUS_DSPP, 77, 0},
+ { DBGBUS_DSPP, 77, 1},
+ { DBGBUS_DSPP, 77, 2},
+ { DBGBUS_DSPP, 77, 3},
+ { DBGBUS_DSPP, 77, 4},
+ { DBGBUS_DSPP, 77, 5},
+ { DBGBUS_DSPP, 77, 6},
+ { DBGBUS_DSPP, 77, 7, _dpu_debug_bus_lm_dump },
+
+ { DBGBUS_DSPP, 78, 0},
+ { DBGBUS_DSPP, 78, 1},
+ { DBGBUS_DSPP, 78, 2},
+ { DBGBUS_DSPP, 78, 3},
+ { DBGBUS_DSPP, 78, 4},
+ { DBGBUS_DSPP, 78, 5},
+ { DBGBUS_DSPP, 78, 6},
+ { DBGBUS_DSPP, 78, 7, _dpu_debug_bus_lm_dump },
+
+ { DBGBUS_DSPP, 79, 0},
+ { DBGBUS_DSPP, 79, 1},
+ { DBGBUS_DSPP, 79, 2},
+ { DBGBUS_DSPP, 79, 3},
+ { DBGBUS_DSPP, 79, 4},
+ { DBGBUS_DSPP, 79, 5},
+ { DBGBUS_DSPP, 79, 6},
+ { DBGBUS_DSPP, 79, 7, _dpu_debug_bus_lm_dump },
+
+ { DBGBUS_DSPP, 80, 0},
+ { DBGBUS_DSPP, 80, 1},
+ { DBGBUS_DSPP, 80, 2},
+ { DBGBUS_DSPP, 80, 3},
+ { DBGBUS_DSPP, 80, 4},
+ { DBGBUS_DSPP, 80, 5},
+ { DBGBUS_DSPP, 80, 6},
+ { DBGBUS_DSPP, 80, 7, _dpu_debug_bus_lm_dump },
+
+ { DBGBUS_DSPP, 81, 0},
+ { DBGBUS_DSPP, 81, 1},
+ { DBGBUS_DSPP, 81, 2},
+ { DBGBUS_DSPP, 81, 3},
+ { DBGBUS_DSPP, 81, 4},
+ { DBGBUS_DSPP, 81, 5},
+ { DBGBUS_DSPP, 81, 6},
+ { DBGBUS_DSPP, 81, 7, _dpu_debug_bus_lm_dump },
+
+ { DBGBUS_DSPP, 82, 0},
+ { DBGBUS_DSPP, 82, 1},
+ { DBGBUS_DSPP, 82, 2},
+ { DBGBUS_DSPP, 82, 3},
+ { DBGBUS_DSPP, 82, 4},
+ { DBGBUS_DSPP, 82, 5},
+ { DBGBUS_DSPP, 82, 6},
+ { DBGBUS_DSPP, 82, 7, _dpu_debug_bus_lm_dump },
+
+ { DBGBUS_DSPP, 83, 0},
+ { DBGBUS_DSPP, 83, 1},
+ { DBGBUS_DSPP, 83, 2},
+ { DBGBUS_DSPP, 83, 3},
+ { DBGBUS_DSPP, 83, 4},
+ { DBGBUS_DSPP, 83, 5},
+ { DBGBUS_DSPP, 83, 6},
+ { DBGBUS_DSPP, 83, 7, _dpu_debug_bus_lm_dump },
+
+ { DBGBUS_DSPP, 92, 1},
+ { DBGBUS_DSPP, 92, 2},
+ { DBGBUS_DSPP, 92, 3},
+ { DBGBUS_DSPP, 92, 4},
+ { DBGBUS_DSPP, 92, 5},
+ { DBGBUS_DSPP, 92, 6},
+ { DBGBUS_DSPP, 92, 7, _dpu_debug_bus_lm_dump },
+
+ { DBGBUS_DSPP, 93, 1},
+ { DBGBUS_DSPP, 93, 2},
+ { DBGBUS_DSPP, 93, 3},
+ { DBGBUS_DSPP, 93, 4},
+ { DBGBUS_DSPP, 93, 5},
+ { DBGBUS_DSPP, 93, 6},
+ { DBGBUS_DSPP, 93, 7, _dpu_debug_bus_lm_dump },
+
+ { DBGBUS_DSPP, 94, 1},
+ { DBGBUS_DSPP, 94, 2},
+ { DBGBUS_DSPP, 94, 3},
+ { DBGBUS_DSPP, 94, 4},
+ { DBGBUS_DSPP, 94, 5},
+ { DBGBUS_DSPP, 94, 6},
+ { DBGBUS_DSPP, 94, 7, _dpu_debug_bus_lm_dump },
+
+ { DBGBUS_DSPP, 95, 1},
+ { DBGBUS_DSPP, 95, 2},
+ { DBGBUS_DSPP, 95, 3},
+ { DBGBUS_DSPP, 95, 4},
+ { DBGBUS_DSPP, 95, 5},
+ { DBGBUS_DSPP, 95, 6},
+ { DBGBUS_DSPP, 95, 7, _dpu_debug_bus_lm_dump },
+
+ /* LM5 */
+ { DBGBUS_DSPP, 110, 1},
+ { DBGBUS_DSPP, 110, 2},
+ { DBGBUS_DSPP, 110, 3},
+ { DBGBUS_DSPP, 110, 4},
+ { DBGBUS_DSPP, 110, 5},
+ { DBGBUS_DSPP, 110, 6},
+ { DBGBUS_DSPP, 110, 7, _dpu_debug_bus_lm_dump },
+
+ { DBGBUS_DSPP, 111, 1},
+ { DBGBUS_DSPP, 111, 2},
+ { DBGBUS_DSPP, 111, 3},
+ { DBGBUS_DSPP, 111, 4},
+ { DBGBUS_DSPP, 111, 5},
+ { DBGBUS_DSPP, 111, 6},
+ { DBGBUS_DSPP, 111, 7, _dpu_debug_bus_lm_dump },
+
+ { DBGBUS_DSPP, 112, 1},
+ { DBGBUS_DSPP, 112, 2},
+ { DBGBUS_DSPP, 112, 3},
+ { DBGBUS_DSPP, 112, 4},
+ { DBGBUS_DSPP, 112, 5},
+ { DBGBUS_DSPP, 112, 6},
+ { DBGBUS_DSPP, 112, 7, _dpu_debug_bus_lm_dump },
+
+ { DBGBUS_DSPP, 113, 1},
+ { DBGBUS_DSPP, 113, 2},
+ { DBGBUS_DSPP, 113, 3},
+ { DBGBUS_DSPP, 113, 4},
+ { DBGBUS_DSPP, 113, 5},
+ { DBGBUS_DSPP, 113, 6},
+ { DBGBUS_DSPP, 113, 7, _dpu_debug_bus_lm_dump },
+
+ { DBGBUS_DSPP, 114, 1},
+ { DBGBUS_DSPP, 114, 2},
+ { DBGBUS_DSPP, 114, 3},
+ { DBGBUS_DSPP, 114, 4},
+ { DBGBUS_DSPP, 114, 5},
+ { DBGBUS_DSPP, 114, 6},
+ { DBGBUS_DSPP, 114, 7, _dpu_debug_bus_lm_dump },
+
+ { DBGBUS_DSPP, 115, 1},
+ { DBGBUS_DSPP, 115, 2},
+ { DBGBUS_DSPP, 115, 3},
+ { DBGBUS_DSPP, 115, 4},
+ { DBGBUS_DSPP, 115, 5},
+ { DBGBUS_DSPP, 115, 6},
+ { DBGBUS_DSPP, 115, 7, _dpu_debug_bus_lm_dump },
+
+ { DBGBUS_DSPP, 116, 1},
+ { DBGBUS_DSPP, 116, 2},
+ { DBGBUS_DSPP, 116, 3},
+ { DBGBUS_DSPP, 116, 4},
+ { DBGBUS_DSPP, 116, 5},
+ { DBGBUS_DSPP, 116, 6},
+ { DBGBUS_DSPP, 116, 7, _dpu_debug_bus_lm_dump },
+
+ { DBGBUS_DSPP, 117, 1},
+ { DBGBUS_DSPP, 117, 2},
+ { DBGBUS_DSPP, 117, 3},
+ { DBGBUS_DSPP, 117, 4},
+ { DBGBUS_DSPP, 117, 5},
+ { DBGBUS_DSPP, 117, 6},
+ { DBGBUS_DSPP, 117, 7, _dpu_debug_bus_lm_dump },
+
+ { DBGBUS_DSPP, 118, 1},
+ { DBGBUS_DSPP, 118, 2},
+ { DBGBUS_DSPP, 118, 3},
+ { DBGBUS_DSPP, 118, 4},
+ { DBGBUS_DSPP, 118, 5},
+ { DBGBUS_DSPP, 118, 6},
+ { DBGBUS_DSPP, 118, 7, _dpu_debug_bus_lm_dump },
+
+ { DBGBUS_DSPP, 119, 1},
+ { DBGBUS_DSPP, 119, 2},
+ { DBGBUS_DSPP, 119, 3},
+ { DBGBUS_DSPP, 119, 4},
+ { DBGBUS_DSPP, 119, 5},
+ { DBGBUS_DSPP, 119, 6},
+ { DBGBUS_DSPP, 119, 7, _dpu_debug_bus_lm_dump },
+
+ { DBGBUS_DSPP, 120, 1},
+ { DBGBUS_DSPP, 120, 2},
+ { DBGBUS_DSPP, 120, 3},
+ { DBGBUS_DSPP, 120, 4},
+ { DBGBUS_DSPP, 120, 5},
+ { DBGBUS_DSPP, 120, 6},
+ { DBGBUS_DSPP, 120, 7, _dpu_debug_bus_lm_dump },
+
+ /* csc */
+ { DBGBUS_SSPP0, 7, 0},
+ { DBGBUS_SSPP0, 7, 1},
+ { DBGBUS_SSPP0, 27, 0},
+ { DBGBUS_SSPP0, 27, 1},
+ { DBGBUS_SSPP1, 7, 0},
+ { DBGBUS_SSPP1, 7, 1},
+ { DBGBUS_SSPP1, 27, 0},
+ { DBGBUS_SSPP1, 27, 1},
+
+ /* pcc */
+ { DBGBUS_SSPP0, 3, 3},
+ { DBGBUS_SSPP0, 23, 3},
+ { DBGBUS_SSPP0, 33, 3},
+ { DBGBUS_SSPP0, 43, 3},
+ { DBGBUS_SSPP1, 3, 3},
+ { DBGBUS_SSPP1, 23, 3},
+ { DBGBUS_SSPP1, 33, 3},
+ { DBGBUS_SSPP1, 43, 3},
+
+ /* spa */
+ { DBGBUS_SSPP0, 8, 0},
+ { DBGBUS_SSPP0, 28, 0},
+ { DBGBUS_SSPP1, 8, 0},
+ { DBGBUS_SSPP1, 28, 0},
+ { DBGBUS_DSPP, 13, 0},
+ { DBGBUS_DSPP, 19, 0},
+
+ /* igc */
+ { DBGBUS_SSPP0, 17, 0},
+ { DBGBUS_SSPP0, 17, 1},
+ { DBGBUS_SSPP0, 17, 3},
+ { DBGBUS_SSPP0, 37, 0},
+ { DBGBUS_SSPP0, 37, 1},
+ { DBGBUS_SSPP0, 37, 3},
+ { DBGBUS_SSPP0, 46, 0},
+ { DBGBUS_SSPP0, 46, 1},
+ { DBGBUS_SSPP0, 46, 3},
+
+ { DBGBUS_SSPP1, 17, 0},
+ { DBGBUS_SSPP1, 17, 1},
+ { DBGBUS_SSPP1, 17, 3},
+ { DBGBUS_SSPP1, 37, 0},
+ { DBGBUS_SSPP1, 37, 1},
+ { DBGBUS_SSPP1, 37, 3},
+ { DBGBUS_SSPP1, 46, 0},
+ { DBGBUS_SSPP1, 46, 1},
+ { DBGBUS_SSPP1, 46, 3},
+
+ { DBGBUS_DSPP, 14, 0},
+ { DBGBUS_DSPP, 14, 1},
+ { DBGBUS_DSPP, 14, 3},
+ { DBGBUS_DSPP, 20, 0},
+ { DBGBUS_DSPP, 20, 1},
+ { DBGBUS_DSPP, 20, 3},
+
+ /* intf0-3 */
+ { DBGBUS_PERIPH, 0, 0},
+ { DBGBUS_PERIPH, 1, 0},
+ { DBGBUS_PERIPH, 2, 0},
+ { DBGBUS_PERIPH, 3, 0},
+
+ /* te counter wrapper */
+ { DBGBUS_PERIPH, 60, 0},
+
+ /* dsc0 */
+ { DBGBUS_PERIPH, 47, 0},
+ { DBGBUS_PERIPH, 47, 1},
+ { DBGBUS_PERIPH, 47, 2},
+ { DBGBUS_PERIPH, 47, 3},
+ { DBGBUS_PERIPH, 47, 4},
+ { DBGBUS_PERIPH, 47, 5},
+ { DBGBUS_PERIPH, 47, 6},
+ { DBGBUS_PERIPH, 47, 7},
+
+ /* dsc1 */
+ { DBGBUS_PERIPH, 48, 0},
+ { DBGBUS_PERIPH, 48, 1},
+ { DBGBUS_PERIPH, 48, 2},
+ { DBGBUS_PERIPH, 48, 3},
+ { DBGBUS_PERIPH, 48, 4},
+ { DBGBUS_PERIPH, 48, 5},
+ { DBGBUS_PERIPH, 48, 6},
+ { DBGBUS_PERIPH, 48, 7},
+
+ /* dsc2 */
+ { DBGBUS_PERIPH, 51, 0},
+ { DBGBUS_PERIPH, 51, 1},
+ { DBGBUS_PERIPH, 51, 2},
+ { DBGBUS_PERIPH, 51, 3},
+ { DBGBUS_PERIPH, 51, 4},
+ { DBGBUS_PERIPH, 51, 5},
+ { DBGBUS_PERIPH, 51, 6},
+ { DBGBUS_PERIPH, 51, 7},
+
+ /* dsc3 */
+ { DBGBUS_PERIPH, 52, 0},
+ { DBGBUS_PERIPH, 52, 1},
+ { DBGBUS_PERIPH, 52, 2},
+ { DBGBUS_PERIPH, 52, 3},
+ { DBGBUS_PERIPH, 52, 4},
+ { DBGBUS_PERIPH, 52, 5},
+ { DBGBUS_PERIPH, 52, 6},
+ { DBGBUS_PERIPH, 52, 7},
+
+ /* tear-check */
+ { DBGBUS_PERIPH, 63, 0 },
+ { DBGBUS_PERIPH, 64, 0 },
+ { DBGBUS_PERIPH, 65, 0 },
+ { DBGBUS_PERIPH, 73, 0 },
+ { DBGBUS_PERIPH, 74, 0 },
+
+ /* cdwn */
+ { DBGBUS_PERIPH, 80, 0},
+ { DBGBUS_PERIPH, 80, 1},
+ { DBGBUS_PERIPH, 80, 2},
+
+ { DBGBUS_PERIPH, 81, 0},
+ { DBGBUS_PERIPH, 81, 1},
+ { DBGBUS_PERIPH, 81, 2},
+
+ { DBGBUS_PERIPH, 82, 0},
+ { DBGBUS_PERIPH, 82, 1},
+ { DBGBUS_PERIPH, 82, 2},
+ { DBGBUS_PERIPH, 82, 3},
+ { DBGBUS_PERIPH, 82, 4},
+ { DBGBUS_PERIPH, 82, 5},
+ { DBGBUS_PERIPH, 82, 6},
+ { DBGBUS_PERIPH, 82, 7},
+
+ /* hdmi */
+ { DBGBUS_PERIPH, 68, 0},
+ { DBGBUS_PERIPH, 68, 1},
+ { DBGBUS_PERIPH, 68, 2},
+ { DBGBUS_PERIPH, 68, 3},
+ { DBGBUS_PERIPH, 68, 4},
+ { DBGBUS_PERIPH, 68, 5},
+
+ /* edp */
+ { DBGBUS_PERIPH, 69, 0},
+ { DBGBUS_PERIPH, 69, 1},
+ { DBGBUS_PERIPH, 69, 2},
+ { DBGBUS_PERIPH, 69, 3},
+ { DBGBUS_PERIPH, 69, 4},
+ { DBGBUS_PERIPH, 69, 5},
+
+ /* dsi0 */
+ { DBGBUS_PERIPH, 70, 0},
+ { DBGBUS_PERIPH, 70, 1},
+ { DBGBUS_PERIPH, 70, 2},
+ { DBGBUS_PERIPH, 70, 3},
+ { DBGBUS_PERIPH, 70, 4},
+ { DBGBUS_PERIPH, 70, 5},
+
+ /* dsi1 */
+ { DBGBUS_PERIPH, 71, 0},
+ { DBGBUS_PERIPH, 71, 1},
+ { DBGBUS_PERIPH, 71, 2},
+ { DBGBUS_PERIPH, 71, 3},
+ { DBGBUS_PERIPH, 71, 4},
+ { DBGBUS_PERIPH, 71, 5},
+};
+
+static struct vbif_debug_bus_entry vbif_dbg_bus_msm8998[] = {
+ {0x214, 0x21c, 16, 2, 0x0, 0xd}, /* arb clients */
+ {0x214, 0x21c, 16, 2, 0x80, 0xc0}, /* arb clients */
+ {0x214, 0x21c, 16, 2, 0x100, 0x140}, /* arb clients */
+ {0x214, 0x21c, 0, 16, 0x0, 0xf}, /* xin blocks - axi side */
+ {0x214, 0x21c, 0, 16, 0x80, 0xa4}, /* xin blocks - axi side */
+ {0x214, 0x21c, 0, 15, 0x100, 0x124}, /* xin blocks - axi side */
+ {0x21c, 0x214, 0, 14, 0, 0xc}, /* xin blocks - clock side */
+};
+
+/**
+ * _dpu_dbg_enable_power - use callback to turn power on for hw register access
+ * @enable: whether to turn power on or off
+ */
+static inline void _dpu_dbg_enable_power(int enable)
+{
+ if (enable)
+ pm_runtime_get_sync(dpu_dbg_base.dev);
+ else
+ pm_runtime_put_sync(dpu_dbg_base.dev);
+}
+
+static void _dpu_dbg_dump_dpu_dbg_bus(struct dpu_dbg_dpu_debug_bus *bus)
+{
+ bool in_log, in_mem;
+ u32 **dump_mem = NULL;
+ u32 *dump_addr = NULL;
+ u32 status = 0;
+ struct dpu_debug_bus_entry *head;
+ phys_addr_t phys = 0;
+ int list_size;
+ int i;
+ u32 offset;
+ void __iomem *mem_base = NULL;
+ struct dpu_dbg_reg_base *reg_base;
+
+ if (!bus || !bus->cmn.entries_size)
+ return;
+
+ list_for_each_entry(reg_base, &dpu_dbg_base.reg_base_list,
+ reg_base_head)
+ if (strlen(reg_base->name) &&
+ !strcmp(reg_base->name, bus->cmn.name))
+ mem_base = reg_base->base + bus->top_blk_off;
+
+ if (!mem_base) {
+ pr_err("unable to find mem_base for %s\n", bus->cmn.name);
+ return;
+ }
+
+ dump_mem = &bus->cmn.dumped_content;
+
+ /* will keep in memory 4 entries of 4 bytes each */
+ list_size = (bus->cmn.entries_size * 4 * 4);
+
+ in_log = (bus->cmn.enable_mask & DPU_DBG_DUMP_IN_LOG);
+ in_mem = (bus->cmn.enable_mask & DPU_DBG_DUMP_IN_MEM);
+
+ if (!in_log && !in_mem)
+ return;
+
+ dev_info(dpu_dbg_base.dev, "======== start %s dump =========\n",
+ bus->cmn.name);
+
+ if (in_mem) {
+ if (!(*dump_mem))
+ *dump_mem = dma_alloc_coherent(dpu_dbg_base.dev,
+ list_size, &phys, GFP_KERNEL);
+
+ if (*dump_mem) {
+ dump_addr = *dump_mem;
+ dev_info(dpu_dbg_base.dev,
+ "%s: start_addr:0x%pK len:0x%x\n",
+ __func__, dump_addr, list_size);
+ } else {
+ in_mem = false;
+ pr_err("dump_mem: allocation fails\n");
+ }
+ }
+
+ _dpu_dbg_enable_power(true);
+ for (i = 0; i < bus->cmn.entries_size; i++) {
+ head = bus->entries + i;
+ writel_relaxed(TEST_MASK(head->block_id, head->test_id),
+ mem_base + head->wr_addr);
+ wmb(); /* make sure test bits were written */
+
+ if (bus->cmn.flags & DBGBUS_FLAGS_DSPP) {
+ offset = DBGBUS_DSPP_STATUS;
+ /* keep DSPP test point enabled */
+ if (head->wr_addr != DBGBUS_DSPP)
+ writel_relaxed(0xF, mem_base + DBGBUS_DSPP);
+ } else {
+ offset = head->wr_addr + 0x4;
+ }
+
+ status = readl_relaxed(mem_base + offset);
+
+ if (in_log)
+ dev_info(dpu_dbg_base.dev,
+ "waddr=0x%x blk=%d tst=%d val=0x%x\n",
+ head->wr_addr, head->block_id,
+ head->test_id, status);
+
+ if (dump_addr && in_mem) {
+ dump_addr[i*4] = head->wr_addr;
+ dump_addr[i*4 + 1] = head->block_id;
+ dump_addr[i*4 + 2] = head->test_id;
+ dump_addr[i*4 + 3] = status;
+ }
+
+ if (head->analyzer)
+ head->analyzer(mem_base, head, status);
+
+ /* Disable debug bus once we are done */
+ writel_relaxed(0, mem_base + head->wr_addr);
+ if (bus->cmn.flags & DBGBUS_FLAGS_DSPP &&
+ head->wr_addr != DBGBUS_DSPP)
+ writel_relaxed(0x0, mem_base + DBGBUS_DSPP);
+ }
+ _dpu_dbg_enable_power(false);
+
+ dev_info(dpu_dbg_base.dev, "======== end %s dump =========\n",
+ bus->cmn.name);
+}
+
+static void _dpu_dbg_dump_vbif_debug_bus_entry(
+ struct vbif_debug_bus_entry *head, void __iomem *mem_base,
+ u32 *dump_addr, bool in_log)
+{
+ int i, j;
+ u32 val;
+
+ if (!dump_addr && !in_log)
+ return;
+
+ for (i = 0; i < head->block_cnt; i++) {
+ writel_relaxed(1 << (i + head->bit_offset),
+ mem_base + head->block_bus_addr);
+ /* make sure that current bus blcok enable */
+ wmb();
+ for (j = head->test_pnt_start; j < head->test_pnt_cnt; j++) {
+ writel_relaxed(j, mem_base + head->block_bus_addr + 4);
+ /* make sure that test point is enabled */
+ wmb();
+ val = readl_relaxed(mem_base + MMSS_VBIF_TEST_BUS_OUT);
+ if (dump_addr) {
+ *dump_addr++ = head->block_bus_addr;
+ *dump_addr++ = i;
+ *dump_addr++ = j;
+ *dump_addr++ = val;
+ }
+ if (in_log)
+ dev_info(dpu_dbg_base.dev,
+ "testpoint:%x arb/xin id=%d index=%d val=0x%x\n",
+ head->block_bus_addr, i, j, val);
+ }
+ }
+}
+
+static void _dpu_dbg_dump_vbif_dbg_bus(struct dpu_dbg_vbif_debug_bus *bus)
+{
+ bool in_log, in_mem;
+ u32 **dump_mem = NULL;
+ u32 *dump_addr = NULL;
+ u32 value, d0, d1;
+ unsigned long reg, reg1, reg2;
+ struct vbif_debug_bus_entry *head;
+ phys_addr_t phys = 0;
+ int i, list_size = 0;
+ void __iomem *mem_base = NULL;
+ struct vbif_debug_bus_entry *dbg_bus;
+ u32 bus_size;
+ struct dpu_dbg_reg_base *reg_base;
+
+ if (!bus || !bus->cmn.entries_size)
+ return;
+
+ list_for_each_entry(reg_base, &dpu_dbg_base.reg_base_list,
+ reg_base_head)
+ if (strlen(reg_base->name) &&
+ !strcmp(reg_base->name, bus->cmn.name))
+ mem_base = reg_base->base;
+
+ if (!mem_base) {
+ pr_err("unable to find mem_base for %s\n", bus->cmn.name);
+ return;
+ }
+
+ dbg_bus = bus->entries;
+ bus_size = bus->cmn.entries_size;
+ list_size = bus->cmn.entries_size;
+ dump_mem = &bus->cmn.dumped_content;
+
+ dev_info(dpu_dbg_base.dev, "======== start %s dump =========\n",
+ bus->cmn.name);
+
+ if (!dump_mem || !dbg_bus || !bus_size || !list_size)
+ return;
+
+ /* allocate memory for each test point */
+ for (i = 0; i < bus_size; i++) {
+ head = dbg_bus + i;
+ list_size += (head->block_cnt * head->test_pnt_cnt);
+ }
+
+ /* 4 bytes * 4 entries for each test point*/
+ list_size *= 16;
+
+ in_log = (bus->cmn.enable_mask & DPU_DBG_DUMP_IN_LOG);
+ in_mem = (bus->cmn.enable_mask & DPU_DBG_DUMP_IN_MEM);
+
+ if (!in_log && !in_mem)
+ return;
+
+ if (in_mem) {
+ if (!(*dump_mem))
+ *dump_mem = dma_alloc_coherent(dpu_dbg_base.dev,
+ list_size, &phys, GFP_KERNEL);
+
+ if (*dump_mem) {
+ dump_addr = *dump_mem;
+ dev_info(dpu_dbg_base.dev,
+ "%s: start_addr:0x%pK len:0x%x\n",
+ __func__, dump_addr, list_size);
+ } else {
+ in_mem = false;
+ pr_err("dump_mem: allocation fails\n");
+ }
+ }
+
+ _dpu_dbg_enable_power(true);
+
+ value = readl_relaxed(mem_base + MMSS_VBIF_CLKON);
+ writel_relaxed(value | BIT(1), mem_base + MMSS_VBIF_CLKON);
+
+ /* make sure that vbif core is on */
+ wmb();
+
+ /**
+ * Extract VBIF error info based on XIN halt and error status.
+ * If the XIN client is not in HALT state, or an error is detected,
+ * then retrieve the VBIF error info for it.
+ */
+ reg = readl_relaxed(mem_base + MMSS_VBIF_XIN_HALT_CTRL1);
+ reg1 = readl_relaxed(mem_base + MMSS_VBIF_PND_ERR);
+ reg2 = readl_relaxed(mem_base + MMSS_VBIF_SRC_ERR);
+ dev_err(dpu_dbg_base.dev,
+ "XIN HALT:0x%lX, PND ERR:0x%lX, SRC ERR:0x%lX\n",
+ reg, reg1, reg2);
+ reg >>= 16;
+ reg &= ~(reg1 | reg2);
+ for (i = 0; i < MMSS_VBIF_CLIENT_NUM; i++) {
+ if (!test_bit(0, &reg)) {
+ writel_relaxed(i, mem_base + MMSS_VBIF_ERR_INFO);
+ /* make sure reg write goes through */
+ wmb();
+
+ d0 = readl_relaxed(mem_base + MMSS_VBIF_ERR_INFO);
+ d1 = readl_relaxed(mem_base + MMSS_VBIF_ERR_INFO_1);
+
+ dev_err(dpu_dbg_base.dev,
+ "Client:%d, errinfo=0x%X, errinfo1=0x%X\n",
+ i, d0, d1);
+ }
+ reg >>= 1;
+ }
+
+ for (i = 0; i < bus_size; i++) {
+ head = dbg_bus + i;
+
+ writel_relaxed(0, mem_base + head->disable_bus_addr);
+ writel_relaxed(BIT(0), mem_base + MMSS_VBIF_TEST_BUS_OUT_CTRL);
+ /* make sure that other bus is off */
+ wmb();
+
+ _dpu_dbg_dump_vbif_debug_bus_entry(head, mem_base, dump_addr,
+ in_log);
+ if (dump_addr)
+ dump_addr += (head->block_cnt * head->test_pnt_cnt * 4);
+ }
+
+ _dpu_dbg_enable_power(false);
+
+ dev_info(dpu_dbg_base.dev, "======== end %s dump =========\n",
+ bus->cmn.name);
+}
+
+/**
+ * _dpu_dump_array - dump array of register bases
+ * @name: string indicating origin of dump
+ * @dump_dbgbus_dpu: whether to dump the dpu debug bus
+ * @dump_dbgbus_vbif_rt: whether to dump the vbif rt debug bus
+ */
+static void _dpu_dump_array(const char *name, bool dump_dbgbus_dpu,
+ bool dump_dbgbus_vbif_rt)
+{
+ if (dump_dbgbus_dpu)
+ _dpu_dbg_dump_dpu_dbg_bus(&dpu_dbg_base.dbgbus_dpu);
+
+ if (dump_dbgbus_vbif_rt)
+ _dpu_dbg_dump_vbif_dbg_bus(&dpu_dbg_base.dbgbus_vbif_rt);
+}
+
+/**
+ * _dpu_dump_work - deferred dump work function
+ * @work: work structure
+ */
+static void _dpu_dump_work(struct work_struct *work)
+{
+ _dpu_dump_array("dpudump_workitem",
+ dpu_dbg_base.dbgbus_dpu.cmn.include_in_deferred_work,
+ dpu_dbg_base.dbgbus_vbif_rt.cmn.include_in_deferred_work);
+}
+
+void dpu_dbg_dump(bool queue_work, const char *name, bool dump_dbgbus_dpu,
+ bool dump_dbgbus_vbif_rt)
+{
+ if (queue_work && work_pending(&dpu_dbg_base.dump_work))
+ return;
+
+ if (!queue_work) {
+ _dpu_dump_array(name, dump_dbgbus_dpu, dump_dbgbus_vbif_rt);
+ return;
+ }
+
+ /* schedule work to dump later */
+ dpu_dbg_base.dbgbus_dpu.cmn.include_in_deferred_work = dump_dbgbus_dpu;
+ dpu_dbg_base.dbgbus_vbif_rt.cmn.include_in_deferred_work =
+ dump_dbgbus_vbif_rt;
+ schedule_work(&dpu_dbg_base.dump_work);
+}
+
+/*
+ * dpu_dbg_debugfs_open - debugfs open handler for debug dump
+ * @inode: debugfs inode
+ * @file: file handle
+ */
+static int dpu_dbg_debugfs_open(struct inode *inode, struct file *file)
+{
+ /* non-seekable */
+ file->f_mode &= ~(FMODE_LSEEK | FMODE_PREAD | FMODE_PWRITE);
+ file->private_data = inode->i_private;
+ return 0;
+}
+
+/**
+ * dpu_dbg_dump_write - debugfs write handler for debug dump
+ * @file: file handler
+ * @user_buf: user buffer content from debugfs
+ * @count: size of user buffer
+ * @ppos: position offset of user buffer
+ */
+static ssize_t dpu_dbg_dump_write(struct file *file,
+ const char __user *user_buf, size_t count, loff_t *ppos)
+{
+ _dpu_dump_array("dump_debugfs", true, true);
+ return count;
+}
+
+static const struct file_operations dpu_dbg_dump_fops = {
+ .open = dpu_dbg_debugfs_open,
+ .write = dpu_dbg_dump_write,
+};
+
+int dpu_dbg_debugfs_register(struct dentry *debugfs_root)
+{
+ static struct dpu_dbg_base *dbg = &dpu_dbg_base;
+ char debug_name[80] = "";
+
+ if (!debugfs_root)
+ return -EINVAL;
+
+ debugfs_create_file("dump", 0600, debugfs_root, NULL,
+ &dpu_dbg_dump_fops);
+
+ if (dbg->dbgbus_dpu.entries) {
+ dbg->dbgbus_dpu.cmn.name = DBGBUS_NAME_DPU;
+ snprintf(debug_name, sizeof(debug_name), "%s_dbgbus",
+ dbg->dbgbus_dpu.cmn.name);
+ dbg->dbgbus_dpu.cmn.enable_mask = DEFAULT_DBGBUS_DPU;
+ debugfs_create_u32(debug_name, 0600, debugfs_root,
+ &dbg->dbgbus_dpu.cmn.enable_mask);
+ }
+
+ if (dbg->dbgbus_vbif_rt.entries) {
+ dbg->dbgbus_vbif_rt.cmn.name = DBGBUS_NAME_VBIF_RT;
+ snprintf(debug_name, sizeof(debug_name), "%s_dbgbus",
+ dbg->dbgbus_vbif_rt.cmn.name);
+ dbg->dbgbus_vbif_rt.cmn.enable_mask = DEFAULT_DBGBUS_VBIFRT;
+ debugfs_create_u32(debug_name, 0600, debugfs_root,
+ &dbg->dbgbus_vbif_rt.cmn.enable_mask);
+ }
+
+ return 0;
+}
+
+static void _dpu_dbg_debugfs_destroy(void)
+{
+}
+
+void dpu_dbg_init_dbg_buses(u32 hwversion)
+{
+ static struct dpu_dbg_base *dbg = &dpu_dbg_base;
+
+ memset(&dbg->dbgbus_dpu, 0, sizeof(dbg->dbgbus_dpu));
+ memset(&dbg->dbgbus_vbif_rt, 0, sizeof(dbg->dbgbus_vbif_rt));
+
+ if (IS_MSM8998_TARGET(hwversion)) {
+ dbg->dbgbus_dpu.entries = dbg_bus_dpu_8998;
+ dbg->dbgbus_dpu.cmn.entries_size = ARRAY_SIZE(dbg_bus_dpu_8998);
+ dbg->dbgbus_dpu.cmn.flags = DBGBUS_FLAGS_DSPP;
+
+ dbg->dbgbus_vbif_rt.entries = vbif_dbg_bus_msm8998;
+ dbg->dbgbus_vbif_rt.cmn.entries_size =
+ ARRAY_SIZE(vbif_dbg_bus_msm8998);
+ } else if (IS_SDM845_TARGET(hwversion) || IS_SDM670_TARGET(hwversion)) {
+ dbg->dbgbus_dpu.entries = dbg_bus_dpu_sdm845;
+ dbg->dbgbus_dpu.cmn.entries_size =
+ ARRAY_SIZE(dbg_bus_dpu_sdm845);
+ dbg->dbgbus_dpu.cmn.flags = DBGBUS_FLAGS_DSPP;
+
+ /* vbif is unchanged vs 8998 */
+ dbg->dbgbus_vbif_rt.entries = vbif_dbg_bus_msm8998;
+ dbg->dbgbus_vbif_rt.cmn.entries_size =
+ ARRAY_SIZE(vbif_dbg_bus_msm8998);
+ } else {
+ pr_err("unsupported chipset id %X\n", hwversion);
+ }
+}
+
+int dpu_dbg_init(struct device *dev)
+{
+ if (!dev) {
+ pr_err("invalid params\n");
+ return -EINVAL;
+ }
+
+ INIT_LIST_HEAD(&dpu_dbg_base.reg_base_list);
+ dpu_dbg_base.dev = dev;
+
+ INIT_WORK(&dpu_dbg_base.dump_work, _dpu_dump_work);
+
+ return 0;
+}
+
+/**
+ * dpu_dbg_destroy - destroy dpu debug facilities
+ */
+void dpu_dbg_destroy(void)
+{
+ _dpu_dbg_debugfs_destroy();
+}
+
+void dpu_dbg_set_dpu_top_offset(u32 blk_off)
+{
+ dpu_dbg_base.dbgbus_dpu.top_blk_off = blk_off;
+}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_dbg.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_dbg.h
new file mode 100644
index 000000000000..1e6fa945f98b
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_dbg.h
@@ -0,0 +1,103 @@
+/* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef DPU_DBG_H_
+#define DPU_DBG_H_
+
+#include <stdarg.h>
+#include <linux/debugfs.h>
+#include <linux/list.h>
+
+enum dpu_dbg_dump_flag {
+ DPU_DBG_DUMP_IN_LOG = BIT(0),
+ DPU_DBG_DUMP_IN_MEM = BIT(1),
+};
+
+#if defined(CONFIG_DEBUG_FS)
+
+/**
+ * dpu_dbg_init_dbg_buses - initialize debug bus dumping support for the chipset
+ * @hwversion: Chipset revision
+ */
+void dpu_dbg_init_dbg_buses(u32 hwversion);
+
+/**
+ * dpu_dbg_init - initialize global dpu debug facilities: regdump
+ * @dev: device handle
+ * Returns: 0 or -ERROR
+ */
+int dpu_dbg_init(struct device *dev);
+
+/**
+ * dpu_dbg_debugfs_register - register entries at the given debugfs dir
+ * @debugfs_root: debugfs root in which to create dpu debug entries
+ * Returns: 0 or -ERROR
+ */
+int dpu_dbg_debugfs_register(struct dentry *debugfs_root);
+
+/**
+ * dpu_dbg_destroy - destroy the global dpu debug facilities
+ * Returns: none
+ */
+void dpu_dbg_destroy(void);
+
+/**
+ * dpu_dbg_dump - trigger dumping of all dpu_dbg facilities
+ * @queue_work: whether to queue the dumping work to the work_struct
+ * @name: string indicating origin of dump
+ * @dump_dbgbus: dump the dpu debug bus
+ * @dump_vbif_rt: dump the vbif rt bus
+ * Returns: none
+ */
+void dpu_dbg_dump(bool queue_work, const char *name, bool dump_dbgbus_dpu,
+ bool dump_dbgbus_vbif_rt);
+
+/**
+ * dpu_dbg_set_dpu_top_offset - set the target specific offset from mdss base
+ * address of the top registers. Used for accessing debug bus controls.
+ * @blk_off: offset from mdss base of the top block
+ */
+void dpu_dbg_set_dpu_top_offset(u32 blk_off);
+
+#else
+
+static inline void dpu_dbg_init_dbg_buses(u32 hwversion)
+{
+}
+
+static inline int dpu_dbg_init(struct device *dev)
+{
+ return 0;
+}
+
+static inline int dpu_dbg_debugfs_register(struct dentry *debugfs_root)
+{
+ return 0;
+}
+
+static inline void dpu_dbg_destroy(void)
+{
+}
+
+static inline void dpu_dbg_dump(bool queue_work, const char *name,
+ bool dump_dbgbus_dpu, bool dump_dbgbus_vbif_rt)
+{
+}
+
+static inline void dpu_dbg_set_dpu_top_offset(u32 blk_off)
+{
+}
+
+#endif /* defined(CONFIG_DEBUG_FS) */
+
+
+#endif /* DPU_DBG_H_ */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
new file mode 100644
index 000000000000..1b4de3486ef9
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
@@ -0,0 +1,2500 @@
+/*
+ * Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__
+#include <linux/kthread.h>
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+
+#include "msm_drv.h"
+#include "dpu_kms.h"
+#include <drm/drm_crtc.h>
+#include <drm/drm_crtc_helper.h>
+#include "dpu_hwio.h"
+#include "dpu_hw_catalog.h"
+#include "dpu_hw_intf.h"
+#include "dpu_hw_ctl.h"
+#include "dpu_formats.h"
+#include "dpu_encoder_phys.h"
+#include "dpu_crtc.h"
+#include "dpu_trace.h"
+#include "dpu_core_irq.h"
+
+#define DPU_DEBUG_ENC(e, fmt, ...) DPU_DEBUG("enc%d " fmt,\
+ (e) ? (e)->base.base.id : -1, ##__VA_ARGS__)
+
+#define DPU_ERROR_ENC(e, fmt, ...) DPU_ERROR("enc%d " fmt,\
+ (e) ? (e)->base.base.id : -1, ##__VA_ARGS__)
+
+#define DPU_DEBUG_PHYS(p, fmt, ...) DPU_DEBUG("enc%d intf%d pp%d " fmt,\
+ (p) ? (p)->parent->base.id : -1, \
+ (p) ? (p)->intf_idx - INTF_0 : -1, \
+ (p) ? ((p)->hw_pp ? (p)->hw_pp->idx - PINGPONG_0 : -1) : -1, \
+ ##__VA_ARGS__)
+
+#define DPU_ERROR_PHYS(p, fmt, ...) DPU_ERROR("enc%d intf%d pp%d " fmt,\
+ (p) ? (p)->parent->base.id : -1, \
+ (p) ? (p)->intf_idx - INTF_0 : -1, \
+ (p) ? ((p)->hw_pp ? (p)->hw_pp->idx - PINGPONG_0 : -1) : -1, \
+ ##__VA_ARGS__)
+
+/*
+ * Two to anticipate panels that can do cmd/vid dynamic switching
+ * plan is to create all possible physical encoder types, and switch between
+ * them at runtime
+ */
+#define NUM_PHYS_ENCODER_TYPES 2
+
+#define MAX_PHYS_ENCODERS_PER_VIRTUAL \
+ (MAX_H_TILES_PER_DISPLAY * NUM_PHYS_ENCODER_TYPES)
+
+#define MAX_CHANNELS_PER_ENC 2
+
+#define MISR_BUFF_SIZE 256
+
+#define IDLE_SHORT_TIMEOUT 1
+
+#define MAX_VDISPLAY_SPLIT 1080
+
+/**
+ * enum dpu_enc_rc_events - events for resource control state machine
+ * @DPU_ENC_RC_EVENT_KICKOFF:
+ * This event happens at NORMAL priority.
+ * Event that signals the start of the transfer. When this event is
+ * received, enable MDP/DSI core clocks. Regardless of the previous
+ * state, the resource should be in ON state at the end of this event.
+ * @DPU_ENC_RC_EVENT_FRAME_DONE:
+ * This event happens at INTERRUPT level.
+ * Event signals the end of the data transfer after the PP FRAME_DONE
+ * event. At the end of this event, a delayed work is scheduled to go to
+ * IDLE_PC state after IDLE_TIMEOUT time.
+ * @DPU_ENC_RC_EVENT_PRE_STOP:
+ * This event happens at NORMAL priority.
+ * This event, when received during the ON state, leave the RC STATE
+ * in the PRE_OFF state. It should be followed by the STOP event as
+ * part of encoder disable.
+ * If received during IDLE or OFF states, it will do nothing.
+ * @DPU_ENC_RC_EVENT_STOP:
+ * This event happens at NORMAL priority.
+ * When this event is received, disable all the MDP/DSI core clocks, and
+ * disable IRQs. It should be called from the PRE_OFF or IDLE states.
+ * IDLE is expected when IDLE_PC has run, and PRE_OFF did nothing.
+ * PRE_OFF is expected when PRE_STOP was executed during the ON state.
+ * Resource state should be in OFF at the end of the event.
+ * @DPU_ENC_RC_EVENT_ENTER_IDLE:
+ * This event happens at NORMAL priority from a work item.
+ * Event signals that there were no frame updates for IDLE_TIMEOUT time.
+ * This would disable MDP/DSI core clocks and change the resource state
+ * to IDLE.
+ */
+enum dpu_enc_rc_events {
+ DPU_ENC_RC_EVENT_KICKOFF = 1,
+ DPU_ENC_RC_EVENT_FRAME_DONE,
+ DPU_ENC_RC_EVENT_PRE_STOP,
+ DPU_ENC_RC_EVENT_STOP,
+ DPU_ENC_RC_EVENT_ENTER_IDLE
+};
+
+/*
+ * enum dpu_enc_rc_states - states that the resource control maintains
+ * @DPU_ENC_RC_STATE_OFF: Resource is in OFF state
+ * @DPU_ENC_RC_STATE_PRE_OFF: Resource is transitioning to OFF state
+ * @DPU_ENC_RC_STATE_ON: Resource is in ON state
+ * @DPU_ENC_RC_STATE_MODESET: Resource is in modeset state
+ * @DPU_ENC_RC_STATE_IDLE: Resource is in IDLE state
+ */
+enum dpu_enc_rc_states {
+ DPU_ENC_RC_STATE_OFF,
+ DPU_ENC_RC_STATE_PRE_OFF,
+ DPU_ENC_RC_STATE_ON,
+ DPU_ENC_RC_STATE_IDLE
+};
+
+/**
+ * struct dpu_encoder_virt - virtual encoder. Container of one or more physical
+ * encoders. Virtual encoder manages one "logical" display. Physical
+ * encoders manage one intf block, tied to a specific panel/sub-panel.
+ * Virtual encoder defers as much as possible to the physical encoders.
+ * Virtual encoder registers itself with the DRM Framework as the encoder.
+ * @base: drm_encoder base class for registration with DRM
+ * @enc_spin_lock: Virtual-Encoder-Wide Spin Lock for IRQ purposes
+ * @bus_scaling_client: Client handle to the bus scaling interface
+ * @num_phys_encs: Actual number of physical encoders contained.
+ * @phys_encs: Container of physical encoders managed.
+ * @cur_master: Pointer to the current master in this mode. Optimization
+ * Only valid after enable. Cleared as disable.
+ * @hw_pp Handle to the pingpong blocks used for the display. No.
+ * pingpong blocks can be different than num_phys_encs.
+ * @intfs_swapped Whether or not the phys_enc interfaces have been swapped
+ * for partial update right-only cases, such as pingpong
+ * split where virtual pingpong does not generate IRQs
+ * @crtc_vblank_cb: Callback into the upper layer / CRTC for
+ * notification of the VBLANK
+ * @crtc_vblank_cb_data: Data from upper layer for VBLANK notification
+ * @crtc_kickoff_cb: Callback into CRTC that will flush & start
+ * all CTL paths
+ * @crtc_kickoff_cb_data: Opaque user data given to crtc_kickoff_cb
+ * @debugfs_root: Debug file system root file node
+ * @enc_lock: Lock around physical encoder create/destroy and
+ access.
+ * @frame_busy_mask: Bitmask tracking which phys_enc we are still
+ * busy processing current command.
+ * Bit0 = phys_encs[0] etc.
+ * @crtc_frame_event_cb: callback handler for frame event
+ * @crtc_frame_event_cb_data: callback handler private data
+ * @frame_done_timeout: frame done timeout in Hz
+ * @frame_done_timer: watchdog timer for frame done event
+ * @vsync_event_timer: vsync timer
+ * @disp_info: local copy of msm_display_info struct
+ * @misr_enable: misr enable/disable status
+ * @misr_frame_count: misr frame count before start capturing the data
+ * @idle_pc_supported: indicate if idle power collaps is supported
+ * @rc_lock: resource control mutex lock to protect
+ * virt encoder over various state changes
+ * @rc_state: resource controller state
+ * @delayed_off_work: delayed worker to schedule disabling of
+ * clks and resources after IDLE_TIMEOUT time.
+ * @vsync_event_work: worker to handle vsync event for autorefresh
+ * @topology: topology of the display
+ * @mode_set_complete: flag to indicate modeset completion
+ * @idle_timeout: idle timeout duration in milliseconds
+ */
+struct dpu_encoder_virt {
+ struct drm_encoder base;
+ spinlock_t enc_spinlock;
+ uint32_t bus_scaling_client;
+
+ uint32_t display_num_of_h_tiles;
+
+ unsigned int num_phys_encs;
+ struct dpu_encoder_phys *phys_encs[MAX_PHYS_ENCODERS_PER_VIRTUAL];
+ struct dpu_encoder_phys *cur_master;
+ struct dpu_hw_pingpong *hw_pp[MAX_CHANNELS_PER_ENC];
+
+ bool intfs_swapped;
+
+ void (*crtc_vblank_cb)(void *);
+ void *crtc_vblank_cb_data;
+
+ struct dentry *debugfs_root;
+ struct mutex enc_lock;
+ DECLARE_BITMAP(frame_busy_mask, MAX_PHYS_ENCODERS_PER_VIRTUAL);
+ void (*crtc_frame_event_cb)(void *, u32 event);
+ void *crtc_frame_event_cb_data;
+
+ atomic_t frame_done_timeout;
+ struct timer_list frame_done_timer;
+ struct timer_list vsync_event_timer;
+
+ struct msm_display_info disp_info;
+ bool misr_enable;
+ u32 misr_frame_count;
+
+ bool idle_pc_supported;
+ struct mutex rc_lock;
+ enum dpu_enc_rc_states rc_state;
+ struct kthread_delayed_work delayed_off_work;
+ struct kthread_work vsync_event_work;
+ struct msm_display_topology topology;
+ bool mode_set_complete;
+
+ u32 idle_timeout;
+};
+
+#define to_dpu_encoder_virt(x) container_of(x, struct dpu_encoder_virt, base)
+static inline int _dpu_encoder_power_enable(struct dpu_encoder_virt *dpu_enc,
+ bool enable)
+{
+ struct drm_encoder *drm_enc;
+ struct msm_drm_private *priv;
+ struct dpu_kms *dpu_kms;
+
+ if (!dpu_enc) {
+ DPU_ERROR("invalid dpu enc\n");
+ return -EINVAL;
+ }
+
+ drm_enc = &dpu_enc->base;
+ if (!drm_enc->dev || !drm_enc->dev->dev_private) {
+ DPU_ERROR("drm device invalid\n");
+ return -EINVAL;
+ }
+
+ priv = drm_enc->dev->dev_private;
+ if (!priv->kms) {
+ DPU_ERROR("invalid kms\n");
+ return -EINVAL;
+ }
+
+ dpu_kms = to_dpu_kms(priv->kms);
+
+ if (enable)
+ pm_runtime_get_sync(&dpu_kms->pdev->dev);
+ else
+ pm_runtime_put_sync(&dpu_kms->pdev->dev);
+
+ return 0;
+}
+
+void dpu_encoder_helper_report_irq_timeout(struct dpu_encoder_phys *phys_enc,
+ enum dpu_intr_idx intr_idx)
+{
+ DRM_ERROR("irq timeout id=%u, intf=%d, pp=%d, intr=%d\n",
+ DRMID(phys_enc->parent), phys_enc->intf_idx - INTF_0,
+ phys_enc->hw_pp->idx - PINGPONG_0, intr_idx);
+
+ if (phys_enc->parent_ops->handle_frame_done)
+ phys_enc->parent_ops->handle_frame_done(
+ phys_enc->parent, phys_enc,
+ DPU_ENCODER_FRAME_EVENT_ERROR);
+}
+
+static int dpu_encoder_helper_wait_event_timeout(int32_t drm_id,
+ int32_t hw_id, struct dpu_encoder_wait_info *info);
+
+int dpu_encoder_helper_wait_for_irq(struct dpu_encoder_phys *phys_enc,
+ enum dpu_intr_idx intr_idx,
+ struct dpu_encoder_wait_info *wait_info)
+{
+ struct dpu_encoder_irq *irq;
+ u32 irq_status;
+ int ret;
+
+ if (!phys_enc || !wait_info || intr_idx >= INTR_IDX_MAX) {
+ DPU_ERROR("invalid params\n");
+ return -EINVAL;
+ }
+ irq = &phys_enc->irq[intr_idx];
+
+ /* note: do master / slave checking outside */
+
+ /* return EWOULDBLOCK since we know the wait isn't necessary */
+ if (phys_enc->enable_state == DPU_ENC_DISABLED) {
+ DRM_ERROR("encoder is disabled id=%u, intr=%d, hw=%d, irq=%d",
+ DRMID(phys_enc->parent), intr_idx, irq->hw_idx,
+ irq->irq_idx);
+ return -EWOULDBLOCK;
+ }
+
+ if (irq->irq_idx < 0) {
+ DRM_DEBUG_KMS("skip irq wait id=%u, intr=%d, hw=%d, irq=%s",
+ DRMID(phys_enc->parent), intr_idx, irq->hw_idx,
+ irq->name);
+ return 0;
+ }
+
+ DRM_DEBUG_KMS("id=%u, intr=%d, hw=%d, irq=%d, pp=%d, pending_cnt=%d",
+ DRMID(phys_enc->parent), intr_idx, irq->hw_idx,
+ irq->irq_idx, phys_enc->hw_pp->idx - PINGPONG_0,
+ atomic_read(wait_info->atomic_cnt));
+
+ ret = dpu_encoder_helper_wait_event_timeout(
+ DRMID(phys_enc->parent),
+ irq->hw_idx,
+ wait_info);
+
+ if (ret <= 0) {
+ irq_status = dpu_core_irq_read(phys_enc->dpu_kms,
+ irq->irq_idx, true);
+ if (irq_status) {
+ unsigned long flags;
+
+ DRM_DEBUG_KMS("irq not triggered id=%u, intr=%d, "
+ "hw=%d, irq=%d, pp=%d, atomic_cnt=%d",
+ DRMID(phys_enc->parent), intr_idx,
+ irq->hw_idx, irq->irq_idx,
+ phys_enc->hw_pp->idx - PINGPONG_0,
+ atomic_read(wait_info->atomic_cnt));
+ local_irq_save(flags);
+ irq->cb.func(phys_enc, irq->irq_idx);
+ local_irq_restore(flags);
+ ret = 0;
+ } else {
+ ret = -ETIMEDOUT;
+ DRM_DEBUG_KMS("irq timeout id=%u, intr=%d, "
+ "hw=%d, irq=%d, pp=%d, atomic_cnt=%d",
+ DRMID(phys_enc->parent), intr_idx,
+ irq->hw_idx, irq->irq_idx,
+ phys_enc->hw_pp->idx - PINGPONG_0,
+ atomic_read(wait_info->atomic_cnt));
+ }
+ } else {
+ ret = 0;
+ trace_dpu_enc_irq_wait_success(DRMID(phys_enc->parent),
+ intr_idx, irq->hw_idx, irq->irq_idx,
+ phys_enc->hw_pp->idx - PINGPONG_0,
+ atomic_read(wait_info->atomic_cnt));
+ }
+
+ return ret;
+}
+
+int dpu_encoder_helper_register_irq(struct dpu_encoder_phys *phys_enc,
+ enum dpu_intr_idx intr_idx)
+{
+ struct dpu_encoder_irq *irq;
+ int ret = 0;
+
+ if (!phys_enc || intr_idx >= INTR_IDX_MAX) {
+ DPU_ERROR("invalid params\n");
+ return -EINVAL;
+ }
+ irq = &phys_enc->irq[intr_idx];
+
+ if (irq->irq_idx >= 0) {
+ DPU_DEBUG_PHYS(phys_enc,
+ "skipping already registered irq %s type %d\n",
+ irq->name, irq->intr_type);
+ return 0;
+ }
+
+ irq->irq_idx = dpu_core_irq_idx_lookup(phys_enc->dpu_kms,
+ irq->intr_type, irq->hw_idx);
+ if (irq->irq_idx < 0) {
+ DPU_ERROR_PHYS(phys_enc,
+ "failed to lookup IRQ index for %s type:%d\n",
+ irq->name, irq->intr_type);
+ return -EINVAL;
+ }
+
+ ret = dpu_core_irq_register_callback(phys_enc->dpu_kms, irq->irq_idx,
+ &irq->cb);
+ if (ret) {
+ DPU_ERROR_PHYS(phys_enc,
+ "failed to register IRQ callback for %s\n",
+ irq->name);
+ irq->irq_idx = -EINVAL;
+ return ret;
+ }
+
+ ret = dpu_core_irq_enable(phys_enc->dpu_kms, &irq->irq_idx, 1);
+ if (ret) {
+ DRM_ERROR("enable failed id=%u, intr=%d, hw=%d, irq=%d",
+ DRMID(phys_enc->parent), intr_idx, irq->hw_idx,
+ irq->irq_idx);
+ dpu_core_irq_unregister_callback(phys_enc->dpu_kms,
+ irq->irq_idx, &irq->cb);
+ irq->irq_idx = -EINVAL;
+ return ret;
+ }
+
+ trace_dpu_enc_irq_register_success(DRMID(phys_enc->parent), intr_idx,
+ irq->hw_idx, irq->irq_idx);
+
+ return ret;
+}
+
+int dpu_encoder_helper_unregister_irq(struct dpu_encoder_phys *phys_enc,
+ enum dpu_intr_idx intr_idx)
+{
+ struct dpu_encoder_irq *irq;
+ int ret;
+
+ if (!phys_enc) {
+ DPU_ERROR("invalid encoder\n");
+ return -EINVAL;
+ }
+ irq = &phys_enc->irq[intr_idx];
+
+ /* silently skip irqs that weren't registered */
+ if (irq->irq_idx < 0) {
+ DRM_ERROR("duplicate unregister id=%u, intr=%d, hw=%d, irq=%d",
+ DRMID(phys_enc->parent), intr_idx, irq->hw_idx,
+ irq->irq_idx);
+ return 0;
+ }
+
+ ret = dpu_core_irq_disable(phys_enc->dpu_kms, &irq->irq_idx, 1);
+ if (ret) {
+ DRM_ERROR("disable failed id=%u, intr=%d, hw=%d, irq=%d ret=%d",
+ DRMID(phys_enc->parent), intr_idx, irq->hw_idx,
+ irq->irq_idx, ret);
+ }
+
+ ret = dpu_core_irq_unregister_callback(phys_enc->dpu_kms, irq->irq_idx,
+ &irq->cb);
+ if (ret) {
+ DRM_ERROR("unreg cb fail id=%u, intr=%d, hw=%d, irq=%d ret=%d",
+ DRMID(phys_enc->parent), intr_idx, irq->hw_idx,
+ irq->irq_idx, ret);
+ }
+
+ trace_dpu_enc_irq_unregister_success(DRMID(phys_enc->parent), intr_idx,
+ irq->hw_idx, irq->irq_idx);
+
+ irq->irq_idx = -EINVAL;
+
+ return 0;
+}
+
+void dpu_encoder_get_hw_resources(struct drm_encoder *drm_enc,
+ struct dpu_encoder_hw_resources *hw_res,
+ struct drm_connector_state *conn_state)
+{
+ struct dpu_encoder_virt *dpu_enc = NULL;
+ int i = 0;
+
+ if (!hw_res || !drm_enc || !conn_state) {
+ DPU_ERROR("invalid argument(s), drm_enc %d, res %d, state %d\n",
+ drm_enc != 0, hw_res != 0, conn_state != 0);
+ return;
+ }
+
+ dpu_enc = to_dpu_encoder_virt(drm_enc);
+ DPU_DEBUG_ENC(dpu_enc, "\n");
+
+ /* Query resources used by phys encs, expected to be without overlap */
+ memset(hw_res, 0, sizeof(*hw_res));
+ hw_res->display_num_of_h_tiles = dpu_enc->display_num_of_h_tiles;
+
+ for (i = 0; i < dpu_enc->num_phys_encs; i++) {
+ struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
+
+ if (phys && phys->ops.get_hw_resources)
+ phys->ops.get_hw_resources(phys, hw_res, conn_state);
+ }
+}
+
+static void dpu_encoder_destroy(struct drm_encoder *drm_enc)
+{
+ struct dpu_encoder_virt *dpu_enc = NULL;
+ int i = 0;
+
+ if (!drm_enc) {
+ DPU_ERROR("invalid encoder\n");
+ return;
+ }
+
+ dpu_enc = to_dpu_encoder_virt(drm_enc);
+ DPU_DEBUG_ENC(dpu_enc, "\n");
+
+ mutex_lock(&dpu_enc->enc_lock);
+
+ for (i = 0; i < dpu_enc->num_phys_encs; i++) {
+ struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
+
+ if (phys && phys->ops.destroy) {
+ phys->ops.destroy(phys);
+ --dpu_enc->num_phys_encs;
+ dpu_enc->phys_encs[i] = NULL;
+ }
+ }
+
+ if (dpu_enc->num_phys_encs)
+ DPU_ERROR_ENC(dpu_enc, "expected 0 num_phys_encs not %d\n",
+ dpu_enc->num_phys_encs);
+ dpu_enc->num_phys_encs = 0;
+ mutex_unlock(&dpu_enc->enc_lock);
+
+ drm_encoder_cleanup(drm_enc);
+ mutex_destroy(&dpu_enc->enc_lock);
+
+ kfree(dpu_enc);
+}
+
+void dpu_encoder_helper_split_config(
+ struct dpu_encoder_phys *phys_enc,
+ enum dpu_intf interface)
+{
+ struct dpu_encoder_virt *dpu_enc;
+ struct split_pipe_cfg cfg = { 0 };
+ struct dpu_hw_mdp *hw_mdptop;
+ struct msm_display_info *disp_info;
+
+ if (!phys_enc || !phys_enc->hw_mdptop || !phys_enc->parent) {
+ DPU_ERROR("invalid arg(s), encoder %d\n", phys_enc != 0);
+ return;
+ }
+
+ dpu_enc = to_dpu_encoder_virt(phys_enc->parent);
+ hw_mdptop = phys_enc->hw_mdptop;
+ disp_info = &dpu_enc->disp_info;
+
+ if (disp_info->intf_type != DRM_MODE_CONNECTOR_DSI)
+ return;
+
+ /**
+ * disable split modes since encoder will be operating in as the only
+ * encoder, either for the entire use case in the case of, for example,
+ * single DSI, or for this frame in the case of left/right only partial
+ * update.
+ */
+ if (phys_enc->split_role == ENC_ROLE_SOLO) {
+ if (hw_mdptop->ops.setup_split_pipe)
+ hw_mdptop->ops.setup_split_pipe(hw_mdptop, &cfg);
+ return;
+ }
+
+ cfg.en = true;
+ cfg.mode = phys_enc->intf_mode;
+ cfg.intf = interface;
+
+ if (cfg.en && phys_enc->ops.needs_single_flush &&
+ phys_enc->ops.needs_single_flush(phys_enc))
+ cfg.split_flush_en = true;
+
+ if (phys_enc->split_role == ENC_ROLE_MASTER) {
+ DPU_DEBUG_ENC(dpu_enc, "enable %d\n", cfg.en);
+
+ if (hw_mdptop->ops.setup_split_pipe)
+ hw_mdptop->ops.setup_split_pipe(hw_mdptop, &cfg);
+ }
+}
+
+static void _dpu_encoder_adjust_mode(struct drm_connector *connector,
+ struct drm_display_mode *adj_mode)
+{
+ struct drm_display_mode *cur_mode;
+
+ if (!connector || !adj_mode)
+ return;
+
+ list_for_each_entry(cur_mode, &connector->modes, head) {
+ if (cur_mode->vdisplay == adj_mode->vdisplay &&
+ cur_mode->hdisplay == adj_mode->hdisplay &&
+ cur_mode->vrefresh == adj_mode->vrefresh) {
+ adj_mode->private = cur_mode->private;
+ adj_mode->private_flags |= cur_mode->private_flags;
+ }
+ }
+}
+
+static struct msm_display_topology dpu_encoder_get_topology(
+ struct dpu_encoder_virt *dpu_enc,
+ struct dpu_kms *dpu_kms,
+ struct drm_display_mode *mode)
+{
+ struct msm_display_topology topology;
+ int i, intf_count = 0;
+
+ for (i = 0; i < MAX_PHYS_ENCODERS_PER_VIRTUAL; i++)
+ if (dpu_enc->phys_encs[i])
+ intf_count++;
+
+ /* User split topology for width > 1080 */
+ topology.num_lm = (mode->vdisplay > MAX_VDISPLAY_SPLIT) ? 2 : 1;
+ topology.num_enc = 0;
+ topology.num_intf = intf_count;
+
+ return topology;
+}
+static int dpu_encoder_virt_atomic_check(
+ struct drm_encoder *drm_enc,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state)
+{
+ struct dpu_encoder_virt *dpu_enc;
+ struct msm_drm_private *priv;
+ struct dpu_kms *dpu_kms;
+ const struct drm_display_mode *mode;
+ struct drm_display_mode *adj_mode;
+ struct msm_display_topology topology;
+ int i = 0;
+ int ret = 0;
+
+ if (!drm_enc || !crtc_state || !conn_state) {
+ DPU_ERROR("invalid arg(s), drm_enc %d, crtc/conn state %d/%d\n",
+ drm_enc != 0, crtc_state != 0, conn_state != 0);
+ return -EINVAL;
+ }
+
+ dpu_enc = to_dpu_encoder_virt(drm_enc);
+ DPU_DEBUG_ENC(dpu_enc, "\n");
+
+ priv = drm_enc->dev->dev_private;
+ dpu_kms = to_dpu_kms(priv->kms);
+ mode = &crtc_state->mode;
+ adj_mode = &crtc_state->adjusted_mode;
+ trace_dpu_enc_atomic_check(DRMID(drm_enc));
+
+ /*
+ * display drivers may populate private fields of the drm display mode
+ * structure while registering possible modes of a connector with DRM.
+ * These private fields are not populated back while DRM invokes
+ * the mode_set callbacks. This module retrieves and populates the
+ * private fields of the given mode.
+ */
+ _dpu_encoder_adjust_mode(conn_state->connector, adj_mode);
+
+ /* perform atomic check on the first physical encoder (master) */
+ for (i = 0; i < dpu_enc->num_phys_encs; i++) {
+ struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
+
+ if (phys && phys->ops.atomic_check)
+ ret = phys->ops.atomic_check(phys, crtc_state,
+ conn_state);
+ else if (phys && phys->ops.mode_fixup)
+ if (!phys->ops.mode_fixup(phys, mode, adj_mode))
+ ret = -EINVAL;
+
+ if (ret) {
+ DPU_ERROR_ENC(dpu_enc,
+ "mode unsupported, phys idx %d\n", i);
+ break;
+ }
+ }
+
+ topology = dpu_encoder_get_topology(dpu_enc, dpu_kms, adj_mode);
+
+ /* Reserve dynamic resources now. Indicating AtomicTest phase */
+ if (!ret) {
+ /*
+ * Avoid reserving resources when mode set is pending. Topology
+ * info may not be available to complete reservation.
+ */
+ if (drm_atomic_crtc_needs_modeset(crtc_state)
+ && dpu_enc->mode_set_complete) {
+ ret = dpu_rm_reserve(&dpu_kms->rm, drm_enc, crtc_state,
+ conn_state, topology, true);
+ dpu_enc->mode_set_complete = false;
+ }
+ }
+
+ if (!ret)
+ drm_mode_set_crtcinfo(adj_mode, 0);
+
+ trace_dpu_enc_atomic_check_flags(DRMID(drm_enc), adj_mode->flags,
+ adj_mode->private_flags);
+
+ return ret;
+}
+
+static void _dpu_encoder_update_vsync_source(struct dpu_encoder_virt *dpu_enc,
+ struct msm_display_info *disp_info)
+{
+ struct dpu_vsync_source_cfg vsync_cfg = { 0 };
+ struct msm_drm_private *priv;
+ struct dpu_kms *dpu_kms;
+ struct dpu_hw_mdp *hw_mdptop;
+ struct drm_encoder *drm_enc;
+ int i;
+
+ if (!dpu_enc || !disp_info) {
+ DPU_ERROR("invalid param dpu_enc:%d or disp_info:%d\n",
+ dpu_enc != NULL, disp_info != NULL);
+ return;
+ } else if (dpu_enc->num_phys_encs > ARRAY_SIZE(dpu_enc->hw_pp)) {
+ DPU_ERROR("invalid num phys enc %d/%d\n",
+ dpu_enc->num_phys_encs,
+ (int) ARRAY_SIZE(dpu_enc->hw_pp));
+ return;
+ }
+
+ drm_enc = &dpu_enc->base;
+ /* this pointers are checked in virt_enable_helper */
+ priv = drm_enc->dev->dev_private;
+
+ dpu_kms = to_dpu_kms(priv->kms);
+ if (!dpu_kms) {
+ DPU_ERROR("invalid dpu_kms\n");
+ return;
+ }
+
+ hw_mdptop = dpu_kms->hw_mdp;
+ if (!hw_mdptop) {
+ DPU_ERROR("invalid mdptop\n");
+ return;
+ }
+
+ if (hw_mdptop->ops.setup_vsync_source &&
+ disp_info->capabilities & MSM_DISPLAY_CAP_CMD_MODE) {
+ for (i = 0; i < dpu_enc->num_phys_encs; i++)
+ vsync_cfg.ppnumber[i] = dpu_enc->hw_pp[i]->idx;
+
+ vsync_cfg.pp_count = dpu_enc->num_phys_encs;
+ if (disp_info->is_te_using_watchdog_timer)
+ vsync_cfg.vsync_source = DPU_VSYNC_SOURCE_WD_TIMER_0;
+ else
+ vsync_cfg.vsync_source = DPU_VSYNC0_SOURCE_GPIO;
+
+ hw_mdptop->ops.setup_vsync_source(hw_mdptop, &vsync_cfg);
+ }
+}
+
+static void _dpu_encoder_irq_control(struct drm_encoder *drm_enc, bool enable)
+{
+ struct dpu_encoder_virt *dpu_enc;
+ int i;
+
+ if (!drm_enc) {
+ DPU_ERROR("invalid encoder\n");
+ return;
+ }
+
+ dpu_enc = to_dpu_encoder_virt(drm_enc);
+
+ DPU_DEBUG_ENC(dpu_enc, "enable:%d\n", enable);
+ for (i = 0; i < dpu_enc->num_phys_encs; i++) {
+ struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
+
+ if (phys && phys->ops.irq_control)
+ phys->ops.irq_control(phys, enable);
+ }
+
+}
+
+static void _dpu_encoder_resource_control_helper(struct drm_encoder *drm_enc,
+ bool enable)
+{
+ struct msm_drm_private *priv;
+ struct dpu_kms *dpu_kms;
+ struct dpu_encoder_virt *dpu_enc;
+
+ dpu_enc = to_dpu_encoder_virt(drm_enc);
+ priv = drm_enc->dev->dev_private;
+ dpu_kms = to_dpu_kms(priv->kms);
+
+ trace_dpu_enc_rc_helper(DRMID(drm_enc), enable);
+
+ if (!dpu_enc->cur_master) {
+ DPU_ERROR("encoder master not set\n");
+ return;
+ }
+
+ if (enable) {
+ /* enable DPU core clks */
+ pm_runtime_get_sync(&dpu_kms->pdev->dev);
+
+ /* enable all the irq */
+ _dpu_encoder_irq_control(drm_enc, true);
+
+ } else {
+ /* disable all the irq */
+ _dpu_encoder_irq_control(drm_enc, false);
+
+ /* disable DPU core clks */
+ pm_runtime_put_sync(&dpu_kms->pdev->dev);
+ }
+
+}
+
+static int dpu_encoder_resource_control(struct drm_encoder *drm_enc,
+ u32 sw_event)
+{
+ struct dpu_encoder_virt *dpu_enc;
+ struct msm_drm_private *priv;
+ struct msm_drm_thread *disp_thread;
+ bool is_vid_mode = false;
+
+ if (!drm_enc || !drm_enc->dev || !drm_enc->dev->dev_private ||
+ !drm_enc->crtc) {
+ DPU_ERROR("invalid parameters\n");
+ return -EINVAL;
+ }
+ dpu_enc = to_dpu_encoder_virt(drm_enc);
+ priv = drm_enc->dev->dev_private;
+ is_vid_mode = dpu_enc->disp_info.capabilities &
+ MSM_DISPLAY_CAP_VID_MODE;
+
+ if (drm_enc->crtc->index >= ARRAY_SIZE(priv->disp_thread)) {
+ DPU_ERROR("invalid crtc index\n");
+ return -EINVAL;
+ }
+ disp_thread = &priv->disp_thread[drm_enc->crtc->index];
+
+ /*
+ * when idle_pc is not supported, process only KICKOFF, STOP and MODESET
+ * events and return early for other events (ie wb display).
+ */
+ if (!dpu_enc->idle_pc_supported &&
+ (sw_event != DPU_ENC_RC_EVENT_KICKOFF &&
+ sw_event != DPU_ENC_RC_EVENT_STOP &&
+ sw_event != DPU_ENC_RC_EVENT_PRE_STOP))
+ return 0;
+
+ trace_dpu_enc_rc(DRMID(drm_enc), sw_event, dpu_enc->idle_pc_supported,
+ dpu_enc->rc_state, "begin");
+
+ switch (sw_event) {
+ case DPU_ENC_RC_EVENT_KICKOFF:
+ /* cancel delayed off work, if any */
+ if (kthread_cancel_delayed_work_sync(
+ &dpu_enc->delayed_off_work))
+ DPU_DEBUG_ENC(dpu_enc, "sw_event:%d, work cancelled\n",
+ sw_event);
+
+ mutex_lock(&dpu_enc->rc_lock);
+
+ /* return if the resource control is already in ON state */
+ if (dpu_enc->rc_state == DPU_ENC_RC_STATE_ON) {
+ DRM_DEBUG_KMS("id;%u, sw_event:%d, rc in ON state\n",
+ DRMID(drm_enc), sw_event);
+ mutex_unlock(&dpu_enc->rc_lock);
+ return 0;
+ } else if (dpu_enc->rc_state != DPU_ENC_RC_STATE_OFF &&
+ dpu_enc->rc_state != DPU_ENC_RC_STATE_IDLE) {
+ DRM_DEBUG_KMS("id;%u, sw_event:%d, rc in state %d\n",
+ DRMID(drm_enc), sw_event,
+ dpu_enc->rc_state);
+ mutex_unlock(&dpu_enc->rc_lock);
+ return -EINVAL;
+ }
+
+ if (is_vid_mode && dpu_enc->rc_state == DPU_ENC_RC_STATE_IDLE)
+ _dpu_encoder_irq_control(drm_enc, true);
+ else
+ _dpu_encoder_resource_control_helper(drm_enc, true);
+
+ dpu_enc->rc_state = DPU_ENC_RC_STATE_ON;
+
+ trace_dpu_enc_rc(DRMID(drm_enc), sw_event,
+ dpu_enc->idle_pc_supported, dpu_enc->rc_state,
+ "kickoff");
+
+ mutex_unlock(&dpu_enc->rc_lock);
+ break;
+
+ case DPU_ENC_RC_EVENT_FRAME_DONE:
+ /*
+ * mutex lock is not used as this event happens at interrupt
+ * context. And locking is not required as, the other events
+ * like KICKOFF and STOP does a wait-for-idle before executing
+ * the resource_control
+ */
+ if (dpu_enc->rc_state != DPU_ENC_RC_STATE_ON) {
+ DRM_DEBUG_KMS("id:%d, sw_event:%d,rc:%d-unexpected\n",
+ DRMID(drm_enc), sw_event,
+ dpu_enc->rc_state);
+ return -EINVAL;
+ }
+
+ /*
+ * schedule off work item only when there are no
+ * frames pending
+ */
+ if (dpu_crtc_frame_pending(drm_enc->crtc) > 1) {
+ DRM_DEBUG_KMS("id:%d skip schedule work\n",
+ DRMID(drm_enc));
+ return 0;
+ }
+
+ kthread_queue_delayed_work(
+ &disp_thread->worker,
+ &dpu_enc->delayed_off_work,
+ msecs_to_jiffies(dpu_enc->idle_timeout));
+
+ trace_dpu_enc_rc(DRMID(drm_enc), sw_event,
+ dpu_enc->idle_pc_supported, dpu_enc->rc_state,
+ "frame done");
+ break;
+
+ case DPU_ENC_RC_EVENT_PRE_STOP:
+ /* cancel delayed off work, if any */
+ if (kthread_cancel_delayed_work_sync(
+ &dpu_enc->delayed_off_work))
+ DPU_DEBUG_ENC(dpu_enc, "sw_event:%d, work cancelled\n",
+ sw_event);
+
+ mutex_lock(&dpu_enc->rc_lock);
+
+ if (is_vid_mode &&
+ dpu_enc->rc_state == DPU_ENC_RC_STATE_IDLE) {
+ _dpu_encoder_irq_control(drm_enc, true);
+ }
+ /* skip if is already OFF or IDLE, resources are off already */
+ else if (dpu_enc->rc_state == DPU_ENC_RC_STATE_OFF ||
+ dpu_enc->rc_state == DPU_ENC_RC_STATE_IDLE) {
+ DRM_DEBUG_KMS("id:%u, sw_event:%d, rc in %d state\n",
+ DRMID(drm_enc), sw_event,
+ dpu_enc->rc_state);
+ mutex_unlock(&dpu_enc->rc_lock);
+ return 0;
+ }
+
+ dpu_enc->rc_state = DPU_ENC_RC_STATE_PRE_OFF;
+
+ trace_dpu_enc_rc(DRMID(drm_enc), sw_event,
+ dpu_enc->idle_pc_supported, dpu_enc->rc_state,
+ "pre stop");
+
+ mutex_unlock(&dpu_enc->rc_lock);
+ break;
+
+ case DPU_ENC_RC_EVENT_STOP:
+ mutex_lock(&dpu_enc->rc_lock);
+
+ /* return if the resource control is already in OFF state */
+ if (dpu_enc->rc_state == DPU_ENC_RC_STATE_OFF) {
+ DRM_DEBUG_KMS("id: %u, sw_event:%d, rc in OFF state\n",
+ DRMID(drm_enc), sw_event);
+ mutex_unlock(&dpu_enc->rc_lock);
+ return 0;
+ } else if (dpu_enc->rc_state == DPU_ENC_RC_STATE_ON) {
+ DRM_ERROR("id: %u, sw_event:%d, rc in state %d\n",
+ DRMID(drm_enc), sw_event, dpu_enc->rc_state);
+ mutex_unlock(&dpu_enc->rc_lock);
+ return -EINVAL;
+ }
+
+ /**
+ * expect to arrive here only if in either idle state or pre-off
+ * and in IDLE state the resources are already disabled
+ */
+ if (dpu_enc->rc_state == DPU_ENC_RC_STATE_PRE_OFF)
+ _dpu_encoder_resource_control_helper(drm_enc, false);
+
+ dpu_enc->rc_state = DPU_ENC_RC_STATE_OFF;
+
+ trace_dpu_enc_rc(DRMID(drm_enc), sw_event,
+ dpu_enc->idle_pc_supported, dpu_enc->rc_state,
+ "stop");
+
+ mutex_unlock(&dpu_enc->rc_lock);
+ break;
+
+ case DPU_ENC_RC_EVENT_ENTER_IDLE:
+ mutex_lock(&dpu_enc->rc_lock);
+
+ if (dpu_enc->rc_state != DPU_ENC_RC_STATE_ON) {
+ DRM_ERROR("id: %u, sw_event:%d, rc:%d !ON state\n",
+ DRMID(drm_enc), sw_event, dpu_enc->rc_state);
+ mutex_unlock(&dpu_enc->rc_lock);
+ return 0;
+ }
+
+ /*
+ * if we are in ON but a frame was just kicked off,
+ * ignore the IDLE event, it's probably a stale timer event
+ */
+ if (dpu_enc->frame_busy_mask[0]) {
+ DRM_ERROR("id:%u, sw_event:%d, rc:%d frame pending\n",
+ DRMID(drm_enc), sw_event, dpu_enc->rc_state);
+ mutex_unlock(&dpu_enc->rc_lock);
+ return 0;
+ }
+
+ if (is_vid_mode)
+ _dpu_encoder_irq_control(drm_enc, false);
+ else
+ _dpu_encoder_resource_control_helper(drm_enc, false);
+
+ dpu_enc->rc_state = DPU_ENC_RC_STATE_IDLE;
+
+ trace_dpu_enc_rc(DRMID(drm_enc), sw_event,
+ dpu_enc->idle_pc_supported, dpu_enc->rc_state,
+ "idle");
+
+ mutex_unlock(&dpu_enc->rc_lock);
+ break;
+
+ default:
+ DRM_ERROR("id:%u, unexpected sw_event: %d\n", DRMID(drm_enc),
+ sw_event);
+ trace_dpu_enc_rc(DRMID(drm_enc), sw_event,
+ dpu_enc->idle_pc_supported, dpu_enc->rc_state,
+ "error");
+ break;
+ }
+
+ trace_dpu_enc_rc(DRMID(drm_enc), sw_event,
+ dpu_enc->idle_pc_supported, dpu_enc->rc_state,
+ "end");
+ return 0;
+}
+
+static void dpu_encoder_virt_mode_set(struct drm_encoder *drm_enc,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adj_mode)
+{
+ struct dpu_encoder_virt *dpu_enc;
+ struct msm_drm_private *priv;
+ struct dpu_kms *dpu_kms;
+ struct list_head *connector_list;
+ struct drm_connector *conn = NULL, *conn_iter;
+ struct dpu_rm_hw_iter pp_iter;
+ struct msm_display_topology topology;
+ enum dpu_rm_topology_name topology_name;
+ int i = 0, ret;
+
+ if (!drm_enc) {
+ DPU_ERROR("invalid encoder\n");
+ return;
+ }
+
+ dpu_enc = to_dpu_encoder_virt(drm_enc);
+ DPU_DEBUG_ENC(dpu_enc, "\n");
+
+ priv = drm_enc->dev->dev_private;
+ dpu_kms = to_dpu_kms(priv->kms);
+ connector_list = &dpu_kms->dev->mode_config.connector_list;
+
+ trace_dpu_enc_mode_set(DRMID(drm_enc));
+
+ list_for_each_entry(conn_iter, connector_list, head)
+ if (conn_iter->encoder == drm_enc)
+ conn = conn_iter;
+
+ if (!conn) {
+ DPU_ERROR_ENC(dpu_enc, "failed to find attached connector\n");
+ return;
+ } else if (!conn->state) {
+ DPU_ERROR_ENC(dpu_enc, "invalid connector state\n");
+ return;
+ }
+
+ topology = dpu_encoder_get_topology(dpu_enc, dpu_kms, adj_mode);
+
+ /* Reserve dynamic resources now. Indicating non-AtomicTest phase */
+ ret = dpu_rm_reserve(&dpu_kms->rm, drm_enc, drm_enc->crtc->state,
+ conn->state, topology, false);
+ if (ret) {
+ DPU_ERROR_ENC(dpu_enc,
+ "failed to reserve hw resources, %d\n", ret);
+ return;
+ }
+
+ dpu_rm_init_hw_iter(&pp_iter, drm_enc->base.id, DPU_HW_BLK_PINGPONG);
+ for (i = 0; i < MAX_CHANNELS_PER_ENC; i++) {
+ dpu_enc->hw_pp[i] = NULL;
+ if (!dpu_rm_get_hw(&dpu_kms->rm, &pp_iter))
+ break;
+ dpu_enc->hw_pp[i] = (struct dpu_hw_pingpong *) pp_iter.hw;
+ }
+
+ topology_name = dpu_rm_get_topology_name(topology);
+ for (i = 0; i < dpu_enc->num_phys_encs; i++) {
+ struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
+
+ if (phys) {
+ if (!dpu_enc->hw_pp[i]) {
+ DPU_ERROR_ENC(dpu_enc,
+ "invalid pingpong block for the encoder\n");
+ return;
+ }
+ phys->hw_pp = dpu_enc->hw_pp[i];
+ phys->connector = conn->state->connector;
+ phys->topology_name = topology_name;
+ if (phys->ops.mode_set)
+ phys->ops.mode_set(phys, mode, adj_mode);
+ }
+ }
+
+ dpu_enc->mode_set_complete = true;
+}
+
+static void _dpu_encoder_virt_enable_helper(struct drm_encoder *drm_enc)
+{
+ struct dpu_encoder_virt *dpu_enc = NULL;
+ struct msm_drm_private *priv;
+ struct dpu_kms *dpu_kms;
+
+ if (!drm_enc || !drm_enc->dev || !drm_enc->dev->dev_private) {
+ DPU_ERROR("invalid parameters\n");
+ return;
+ }
+
+ priv = drm_enc->dev->dev_private;
+ dpu_kms = to_dpu_kms(priv->kms);
+ if (!dpu_kms) {
+ DPU_ERROR("invalid dpu_kms\n");
+ return;
+ }
+
+ dpu_enc = to_dpu_encoder_virt(drm_enc);
+ if (!dpu_enc || !dpu_enc->cur_master) {
+ DPU_ERROR("invalid dpu encoder/master\n");
+ return;
+ }
+
+ if (dpu_enc->disp_info.intf_type == DRM_MODE_CONNECTOR_DisplayPort &&
+ dpu_enc->cur_master->hw_mdptop &&
+ dpu_enc->cur_master->hw_mdptop->ops.intf_audio_select)
+ dpu_enc->cur_master->hw_mdptop->ops.intf_audio_select(
+ dpu_enc->cur_master->hw_mdptop);
+
+ if (dpu_enc->cur_master->hw_mdptop &&
+ dpu_enc->cur_master->hw_mdptop->ops.reset_ubwc)
+ dpu_enc->cur_master->hw_mdptop->ops.reset_ubwc(
+ dpu_enc->cur_master->hw_mdptop,
+ dpu_kms->catalog);
+
+ _dpu_encoder_update_vsync_source(dpu_enc, &dpu_enc->disp_info);
+}
+
+void dpu_encoder_virt_restore(struct drm_encoder *drm_enc)
+{
+ struct dpu_encoder_virt *dpu_enc = NULL;
+ int i;
+
+ if (!drm_enc) {
+ DPU_ERROR("invalid encoder\n");
+ return;
+ }
+ dpu_enc = to_dpu_encoder_virt(drm_enc);
+
+ for (i = 0; i < dpu_enc->num_phys_encs; i++) {
+ struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
+
+ if (phys && (phys != dpu_enc->cur_master) && phys->ops.restore)
+ phys->ops.restore(phys);
+ }
+
+ if (dpu_enc->cur_master && dpu_enc->cur_master->ops.restore)
+ dpu_enc->cur_master->ops.restore(dpu_enc->cur_master);
+
+ _dpu_encoder_virt_enable_helper(drm_enc);
+}
+
+static void dpu_encoder_virt_enable(struct drm_encoder *drm_enc)
+{
+ struct dpu_encoder_virt *dpu_enc = NULL;
+ int i, ret = 0;
+ struct drm_display_mode *cur_mode = NULL;
+
+ if (!drm_enc) {
+ DPU_ERROR("invalid encoder\n");
+ return;
+ }
+ dpu_enc = to_dpu_encoder_virt(drm_enc);
+ cur_mode = &dpu_enc->base.crtc->state->adjusted_mode;
+
+ trace_dpu_enc_enable(DRMID(drm_enc), cur_mode->hdisplay,
+ cur_mode->vdisplay);
+
+ dpu_enc->cur_master = NULL;
+ for (i = 0; i < dpu_enc->num_phys_encs; i++) {
+ struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
+
+ if (phys && phys->ops.is_master && phys->ops.is_master(phys)) {
+ DPU_DEBUG_ENC(dpu_enc, "master is now idx %d\n", i);
+ dpu_enc->cur_master = phys;
+ break;
+ }
+ }
+
+ if (!dpu_enc->cur_master) {
+ DPU_ERROR("virt encoder has no master! num_phys %d\n", i);
+ return;
+ }
+
+ ret = dpu_encoder_resource_control(drm_enc, DPU_ENC_RC_EVENT_KICKOFF);
+ if (ret) {
+ DPU_ERROR_ENC(dpu_enc, "dpu resource control failed: %d\n",
+ ret);
+ return;
+ }
+
+ for (i = 0; i < dpu_enc->num_phys_encs; i++) {
+ struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
+
+ if (!phys)
+ continue;
+
+ if (phys != dpu_enc->cur_master) {
+ if (phys->ops.enable)
+ phys->ops.enable(phys);
+ }
+
+ if (dpu_enc->misr_enable && (dpu_enc->disp_info.capabilities &
+ MSM_DISPLAY_CAP_VID_MODE) && phys->ops.setup_misr)
+ phys->ops.setup_misr(phys, true,
+ dpu_enc->misr_frame_count);
+ }
+
+ if (dpu_enc->cur_master->ops.enable)
+ dpu_enc->cur_master->ops.enable(dpu_enc->cur_master);
+
+ _dpu_encoder_virt_enable_helper(drm_enc);
+}
+
+static void dpu_encoder_virt_disable(struct drm_encoder *drm_enc)
+{
+ struct dpu_encoder_virt *dpu_enc = NULL;
+ struct msm_drm_private *priv;
+ struct dpu_kms *dpu_kms;
+ struct drm_display_mode *mode;
+ int i = 0;
+
+ if (!drm_enc) {
+ DPU_ERROR("invalid encoder\n");
+ return;
+ } else if (!drm_enc->dev) {
+ DPU_ERROR("invalid dev\n");
+ return;
+ } else if (!drm_enc->dev->dev_private) {
+ DPU_ERROR("invalid dev_private\n");
+ return;
+ }
+
+ mode = &drm_enc->crtc->state->adjusted_mode;
+
+ dpu_enc = to_dpu_encoder_virt(drm_enc);
+ DPU_DEBUG_ENC(dpu_enc, "\n");
+
+ priv = drm_enc->dev->dev_private;
+ dpu_kms = to_dpu_kms(priv->kms);
+
+ trace_dpu_enc_disable(DRMID(drm_enc));
+
+ /* wait for idle */
+ dpu_encoder_wait_for_event(drm_enc, MSM_ENC_TX_COMPLETE);
+
+ dpu_encoder_resource_control(drm_enc, DPU_ENC_RC_EVENT_PRE_STOP);
+
+ for (i = 0; i < dpu_enc->num_phys_encs; i++) {
+ struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
+
+ if (phys && phys->ops.disable)
+ phys->ops.disable(phys);
+ }
+
+ /* after phys waits for frame-done, should be no more frames pending */
+ if (atomic_xchg(&dpu_enc->frame_done_timeout, 0)) {
+ DPU_ERROR("enc%d timeout pending\n", drm_enc->base.id);
+ del_timer_sync(&dpu_enc->frame_done_timer);
+ }
+
+ dpu_encoder_resource_control(drm_enc, DPU_ENC_RC_EVENT_STOP);
+
+ for (i = 0; i < dpu_enc->num_phys_encs; i++) {
+ if (dpu_enc->phys_encs[i])
+ dpu_enc->phys_encs[i]->connector = NULL;
+ }
+
+ dpu_enc->cur_master = NULL;
+
+ DPU_DEBUG_ENC(dpu_enc, "encoder disabled\n");
+
+ dpu_rm_release(&dpu_kms->rm, drm_enc);
+}
+
+static enum dpu_intf dpu_encoder_get_intf(struct dpu_mdss_cfg *catalog,
+ enum dpu_intf_type type, u32 controller_id)
+{
+ int i = 0;
+
+ for (i = 0; i < catalog->intf_count; i++) {
+ if (catalog->intf[i].type == type
+ && catalog->intf[i].controller_id == controller_id) {
+ return catalog->intf[i].id;
+ }
+ }
+
+ return INTF_MAX;
+}
+
+static void dpu_encoder_vblank_callback(struct drm_encoder *drm_enc,
+ struct dpu_encoder_phys *phy_enc)
+{
+ struct dpu_encoder_virt *dpu_enc = NULL;
+ unsigned long lock_flags;
+
+ if (!drm_enc || !phy_enc)
+ return;
+
+ DPU_ATRACE_BEGIN("encoder_vblank_callback");
+ dpu_enc = to_dpu_encoder_virt(drm_enc);
+
+ spin_lock_irqsave(&dpu_enc->enc_spinlock, lock_flags);
+ if (dpu_enc->crtc_vblank_cb)
+ dpu_enc->crtc_vblank_cb(dpu_enc->crtc_vblank_cb_data);
+ spin_unlock_irqrestore(&dpu_enc->enc_spinlock, lock_flags);
+
+ atomic_inc(&phy_enc->vsync_cnt);
+ DPU_ATRACE_END("encoder_vblank_callback");
+}
+
+static void dpu_encoder_underrun_callback(struct drm_encoder *drm_enc,
+ struct dpu_encoder_phys *phy_enc)
+{
+ if (!phy_enc)
+ return;
+
+ DPU_ATRACE_BEGIN("encoder_underrun_callback");
+ atomic_inc(&phy_enc->underrun_cnt);
+ trace_dpu_enc_underrun_cb(DRMID(drm_enc),
+ atomic_read(&phy_enc->underrun_cnt));
+ DPU_ATRACE_END("encoder_underrun_callback");
+}
+
+void dpu_encoder_register_vblank_callback(struct drm_encoder *drm_enc,
+ void (*vbl_cb)(void *), void *vbl_data)
+{
+ struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc);
+ unsigned long lock_flags;
+ bool enable;
+ int i;
+
+ enable = vbl_cb ? true : false;
+
+ if (!drm_enc) {
+ DPU_ERROR("invalid encoder\n");
+ return;
+ }
+ trace_dpu_enc_vblank_cb(DRMID(drm_enc), enable);
+
+ spin_lock_irqsave(&dpu_enc->enc_spinlock, lock_flags);
+ dpu_enc->crtc_vblank_cb = vbl_cb;
+ dpu_enc->crtc_vblank_cb_data = vbl_data;
+ spin_unlock_irqrestore(&dpu_enc->enc_spinlock, lock_flags);
+
+ for (i = 0; i < dpu_enc->num_phys_encs; i++) {
+ struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
+
+ if (phys && phys->ops.control_vblank_irq)
+ phys->ops.control_vblank_irq(phys, enable);
+ }
+}
+
+void dpu_encoder_register_frame_event_callback(struct drm_encoder *drm_enc,
+ void (*frame_event_cb)(void *, u32 event),
+ void *frame_event_cb_data)
+{
+ struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc);
+ unsigned long lock_flags;
+ bool enable;
+
+ enable = frame_event_cb ? true : false;
+
+ if (!drm_enc) {
+ DPU_ERROR("invalid encoder\n");
+ return;
+ }
+ trace_dpu_enc_frame_event_cb(DRMID(drm_enc), enable);
+
+ spin_lock_irqsave(&dpu_enc->enc_spinlock, lock_flags);
+ dpu_enc->crtc_frame_event_cb = frame_event_cb;
+ dpu_enc->crtc_frame_event_cb_data = frame_event_cb_data;
+ spin_unlock_irqrestore(&dpu_enc->enc_spinlock, lock_flags);
+}
+
+static void dpu_encoder_frame_done_callback(
+ struct drm_encoder *drm_enc,
+ struct dpu_encoder_phys *ready_phys, u32 event)
+{
+ struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc);
+ unsigned int i;
+
+ if (event & (DPU_ENCODER_FRAME_EVENT_DONE
+ | DPU_ENCODER_FRAME_EVENT_ERROR
+ | DPU_ENCODER_FRAME_EVENT_PANEL_DEAD)) {
+
+ if (!dpu_enc->frame_busy_mask[0]) {
+ /**
+ * suppress frame_done without waiter,
+ * likely autorefresh
+ */
+ trace_dpu_enc_frame_done_cb_not_busy(DRMID(drm_enc),
+ event, ready_phys->intf_idx);
+ return;
+ }
+
+ /* One of the physical encoders has become idle */
+ for (i = 0; i < dpu_enc->num_phys_encs; i++) {
+ if (dpu_enc->phys_encs[i] == ready_phys) {
+ clear_bit(i, dpu_enc->frame_busy_mask);
+ trace_dpu_enc_frame_done_cb(DRMID(drm_enc), i,
+ dpu_enc->frame_busy_mask[0]);
+ }
+ }
+
+ if (!dpu_enc->frame_busy_mask[0]) {
+ atomic_set(&dpu_enc->frame_done_timeout, 0);
+ del_timer(&dpu_enc->frame_done_timer);
+
+ dpu_encoder_resource_control(drm_enc,
+ DPU_ENC_RC_EVENT_FRAME_DONE);
+
+ if (dpu_enc->crtc_frame_event_cb)
+ dpu_enc->crtc_frame_event_cb(
+ dpu_enc->crtc_frame_event_cb_data,
+ event);
+ }
+ } else {
+ if (dpu_enc->crtc_frame_event_cb)
+ dpu_enc->crtc_frame_event_cb(
+ dpu_enc->crtc_frame_event_cb_data, event);
+ }
+}
+
+static void dpu_encoder_off_work(struct kthread_work *work)
+{
+ struct dpu_encoder_virt *dpu_enc = container_of(work,
+ struct dpu_encoder_virt, delayed_off_work.work);
+
+ if (!dpu_enc) {
+ DPU_ERROR("invalid dpu encoder\n");
+ return;
+ }
+
+ dpu_encoder_resource_control(&dpu_enc->base,
+ DPU_ENC_RC_EVENT_ENTER_IDLE);
+
+ dpu_encoder_frame_done_callback(&dpu_enc->base, NULL,
+ DPU_ENCODER_FRAME_EVENT_IDLE);
+}
+
+/**
+ * _dpu_encoder_trigger_flush - trigger flush for a physical encoder
+ * drm_enc: Pointer to drm encoder structure
+ * phys: Pointer to physical encoder structure
+ * extra_flush_bits: Additional bit mask to include in flush trigger
+ */
+static inline void _dpu_encoder_trigger_flush(struct drm_encoder *drm_enc,
+ struct dpu_encoder_phys *phys, uint32_t extra_flush_bits)
+{
+ struct dpu_hw_ctl *ctl;
+ int pending_kickoff_cnt;
+ u32 ret = UINT_MAX;
+
+ if (!drm_enc || !phys) {
+ DPU_ERROR("invalid argument(s), drm_enc %d, phys_enc %d\n",
+ drm_enc != 0, phys != 0);
+ return;
+ }
+
+ if (!phys->hw_pp) {
+ DPU_ERROR("invalid pingpong hw\n");
+ return;
+ }
+
+ ctl = phys->hw_ctl;
+ if (!ctl || !ctl->ops.trigger_flush) {
+ DPU_ERROR("missing trigger cb\n");
+ return;
+ }
+
+ pending_kickoff_cnt = dpu_encoder_phys_inc_pending(phys);
+
+ if (extra_flush_bits && ctl->ops.update_pending_flush)
+ ctl->ops.update_pending_flush(ctl, extra_flush_bits);
+
+ ctl->ops.trigger_flush(ctl);
+
+ if (ctl->ops.get_pending_flush)
+ ret = ctl->ops.get_pending_flush(ctl);
+
+ trace_dpu_enc_trigger_flush(DRMID(drm_enc), phys->intf_idx,
+ pending_kickoff_cnt, ctl->idx, ret);
+}
+
+/**
+ * _dpu_encoder_trigger_start - trigger start for a physical encoder
+ * phys: Pointer to physical encoder structure
+ */
+static inline void _dpu_encoder_trigger_start(struct dpu_encoder_phys *phys)
+{
+ if (!phys) {
+ DPU_ERROR("invalid argument(s)\n");
+ return;
+ }
+
+ if (!phys->hw_pp) {
+ DPU_ERROR("invalid pingpong hw\n");
+ return;
+ }
+
+ if (phys->ops.trigger_start && phys->enable_state != DPU_ENC_DISABLED)
+ phys->ops.trigger_start(phys);
+}
+
+void dpu_encoder_helper_trigger_start(struct dpu_encoder_phys *phys_enc)
+{
+ struct dpu_hw_ctl *ctl;
+
+ if (!phys_enc) {
+ DPU_ERROR("invalid encoder\n");
+ return;
+ }
+
+ ctl = phys_enc->hw_ctl;
+ if (ctl && ctl->ops.trigger_start) {
+ ctl->ops.trigger_start(ctl);
+ trace_dpu_enc_trigger_start(DRMID(phys_enc->parent), ctl->idx);
+ }
+}
+
+static int dpu_encoder_helper_wait_event_timeout(
+ int32_t drm_id,
+ int32_t hw_id,
+ struct dpu_encoder_wait_info *info)
+{
+ int rc = 0;
+ s64 expected_time = ktime_to_ms(ktime_get()) + info->timeout_ms;
+ s64 jiffies = msecs_to_jiffies(info->timeout_ms);
+ s64 time;
+
+ do {
+ rc = wait_event_timeout(*(info->wq),
+ atomic_read(info->atomic_cnt) == 0, jiffies);
+ time = ktime_to_ms(ktime_get());
+
+ trace_dpu_enc_wait_event_timeout(drm_id, hw_id, rc, time,
+ expected_time,
+ atomic_read(info->atomic_cnt));
+ /* If we timed out, counter is valid and time is less, wait again */
+ } while (atomic_read(info->atomic_cnt) && (rc == 0) &&
+ (time < expected_time));
+
+ return rc;
+}
+
+void dpu_encoder_helper_hw_reset(struct dpu_encoder_phys *phys_enc)
+{
+ struct dpu_encoder_virt *dpu_enc;
+ struct dpu_hw_ctl *ctl;
+ int rc;
+
+ if (!phys_enc) {
+ DPU_ERROR("invalid encoder\n");
+ return;
+ }
+ dpu_enc = to_dpu_encoder_virt(phys_enc->parent);
+ ctl = phys_enc->hw_ctl;
+
+ if (!ctl || !ctl->ops.reset)
+ return;
+
+ DRM_DEBUG_KMS("id:%u ctl %d reset\n", DRMID(phys_enc->parent),
+ ctl->idx);
+
+ rc = ctl->ops.reset(ctl);
+ if (rc) {
+ DPU_ERROR_ENC(dpu_enc, "ctl %d reset failure\n", ctl->idx);
+ dpu_dbg_dump(false, __func__, true, true);
+ }
+
+ phys_enc->enable_state = DPU_ENC_ENABLED;
+}
+
+/**
+ * _dpu_encoder_kickoff_phys - handle physical encoder kickoff
+ * Iterate through the physical encoders and perform consolidated flush
+ * and/or control start triggering as needed. This is done in the virtual
+ * encoder rather than the individual physical ones in order to handle
+ * use cases that require visibility into multiple physical encoders at
+ * a time.
+ * dpu_enc: Pointer to virtual encoder structure
+ */
+static void _dpu_encoder_kickoff_phys(struct dpu_encoder_virt *dpu_enc)
+{
+ struct dpu_hw_ctl *ctl;
+ uint32_t i, pending_flush;
+ unsigned long lock_flags;
+
+ if (!dpu_enc) {
+ DPU_ERROR("invalid encoder\n");
+ return;
+ }
+
+ pending_flush = 0x0;
+
+ /* update pending counts and trigger kickoff ctl flush atomically */
+ spin_lock_irqsave(&dpu_enc->enc_spinlock, lock_flags);
+
+ /* don't perform flush/start operations for slave encoders */
+ for (i = 0; i < dpu_enc->num_phys_encs; i++) {
+ struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
+
+ if (!phys || phys->enable_state == DPU_ENC_DISABLED)
+ continue;
+
+ ctl = phys->hw_ctl;
+ if (!ctl)
+ continue;
+
+ if (phys->split_role != ENC_ROLE_SLAVE)
+ set_bit(i, dpu_enc->frame_busy_mask);
+ if (!phys->ops.needs_single_flush ||
+ !phys->ops.needs_single_flush(phys))
+ _dpu_encoder_trigger_flush(&dpu_enc->base, phys, 0x0);
+ else if (ctl->ops.get_pending_flush)
+ pending_flush |= ctl->ops.get_pending_flush(ctl);
+ }
+
+ /* for split flush, combine pending flush masks and send to master */
+ if (pending_flush && dpu_enc->cur_master) {
+ _dpu_encoder_trigger_flush(
+ &dpu_enc->base,
+ dpu_enc->cur_master,
+ pending_flush);
+ }
+
+ _dpu_encoder_trigger_start(dpu_enc->cur_master);
+
+ spin_unlock_irqrestore(&dpu_enc->enc_spinlock, lock_flags);
+}
+
+void dpu_encoder_trigger_kickoff_pending(struct drm_encoder *drm_enc)
+{
+ struct dpu_encoder_virt *dpu_enc;
+ struct dpu_encoder_phys *phys;
+ unsigned int i;
+ struct dpu_hw_ctl *ctl;
+ struct msm_display_info *disp_info;
+
+ if (!drm_enc) {
+ DPU_ERROR("invalid encoder\n");
+ return;
+ }
+ dpu_enc = to_dpu_encoder_virt(drm_enc);
+ disp_info = &dpu_enc->disp_info;
+
+ for (i = 0; i < dpu_enc->num_phys_encs; i++) {
+ phys = dpu_enc->phys_encs[i];
+
+ if (phys && phys->hw_ctl) {
+ ctl = phys->hw_ctl;
+ if (ctl->ops.clear_pending_flush)
+ ctl->ops.clear_pending_flush(ctl);
+
+ /* update only for command mode primary ctl */
+ if ((phys == dpu_enc->cur_master) &&
+ (disp_info->capabilities & MSM_DISPLAY_CAP_CMD_MODE)
+ && ctl->ops.trigger_pending)
+ ctl->ops.trigger_pending(ctl);
+ }
+ }
+}
+
+static u32 _dpu_encoder_calculate_linetime(struct dpu_encoder_virt *dpu_enc,
+ struct drm_display_mode *mode)
+{
+ u64 pclk_rate;
+ u32 pclk_period;
+ u32 line_time;
+
+ /*
+ * For linetime calculation, only operate on master encoder.
+ */
+ if (!dpu_enc->cur_master)
+ return 0;
+
+ if (!dpu_enc->cur_master->ops.get_line_count) {
+ DPU_ERROR("get_line_count function not defined\n");
+ return 0;
+ }
+
+ pclk_rate = mode->clock; /* pixel clock in kHz */
+ if (pclk_rate == 0) {
+ DPU_ERROR("pclk is 0, cannot calculate line time\n");
+ return 0;
+ }
+
+ pclk_period = DIV_ROUND_UP_ULL(1000000000ull, pclk_rate);
+ if (pclk_period == 0) {
+ DPU_ERROR("pclk period is 0\n");
+ return 0;
+ }
+
+ /*
+ * Line time calculation based on Pixel clock and HTOTAL.
+ * Final unit is in ns.
+ */
+ line_time = (pclk_period * mode->htotal) / 1000;
+ if (line_time == 0) {
+ DPU_ERROR("line time calculation is 0\n");
+ return 0;
+ }
+
+ DPU_DEBUG_ENC(dpu_enc,
+ "clk_rate=%lldkHz, clk_period=%d, linetime=%dns\n",
+ pclk_rate, pclk_period, line_time);
+
+ return line_time;
+}
+
+static int _dpu_encoder_wakeup_time(struct drm_encoder *drm_enc,
+ ktime_t *wakeup_time)
+{
+ struct drm_display_mode *mode;
+ struct dpu_encoder_virt *dpu_enc;
+ u32 cur_line;
+ u32 line_time;
+ u32 vtotal, time_to_vsync;
+ ktime_t cur_time;
+
+ dpu_enc = to_dpu_encoder_virt(drm_enc);
+
+ if (!drm_enc->crtc || !drm_enc->crtc->state) {
+ DPU_ERROR("crtc/crtc state object is NULL\n");
+ return -EINVAL;
+ }
+ mode = &drm_enc->crtc->state->adjusted_mode;
+
+ line_time = _dpu_encoder_calculate_linetime(dpu_enc, mode);
+ if (!line_time)
+ return -EINVAL;
+
+ cur_line = dpu_enc->cur_master->ops.get_line_count(dpu_enc->cur_master);
+
+ vtotal = mode->vtotal;
+ if (cur_line >= vtotal)
+ time_to_vsync = line_time * vtotal;
+ else
+ time_to_vsync = line_time * (vtotal - cur_line);
+
+ if (time_to_vsync == 0) {
+ DPU_ERROR("time to vsync should not be zero, vtotal=%d\n",
+ vtotal);
+ return -EINVAL;
+ }
+
+ cur_time = ktime_get();
+ *wakeup_time = ktime_add_ns(cur_time, time_to_vsync);
+
+ DPU_DEBUG_ENC(dpu_enc,
+ "cur_line=%u vtotal=%u time_to_vsync=%u, cur_time=%lld, wakeup_time=%lld\n",
+ cur_line, vtotal, time_to_vsync,
+ ktime_to_ms(cur_time),
+ ktime_to_ms(*wakeup_time));
+ return 0;
+}
+
+static void dpu_encoder_vsync_event_handler(struct timer_list *t)
+{
+ struct dpu_encoder_virt *dpu_enc = from_timer(dpu_enc, t,
+ vsync_event_timer);
+ struct drm_encoder *drm_enc = &dpu_enc->base;
+ struct msm_drm_private *priv;
+ struct msm_drm_thread *event_thread;
+
+ if (!drm_enc->dev || !drm_enc->dev->dev_private ||
+ !drm_enc->crtc) {
+ DPU_ERROR("invalid parameters\n");
+ return;
+ }
+
+ priv = drm_enc->dev->dev_private;
+
+ if (drm_enc->crtc->index >= ARRAY_SIZE(priv->event_thread)) {
+ DPU_ERROR("invalid crtc index\n");
+ return;
+ }
+ event_thread = &priv->event_thread[drm_enc->crtc->index];
+ if (!event_thread) {
+ DPU_ERROR("event_thread not found for crtc:%d\n",
+ drm_enc->crtc->index);
+ return;
+ }
+
+ del_timer(&dpu_enc->vsync_event_timer);
+}
+
+static void dpu_encoder_vsync_event_work_handler(struct kthread_work *work)
+{
+ struct dpu_encoder_virt *dpu_enc = container_of(work,
+ struct dpu_encoder_virt, vsync_event_work);
+ ktime_t wakeup_time;
+
+ if (!dpu_enc) {
+ DPU_ERROR("invalid dpu encoder\n");
+ return;
+ }
+
+ if (_dpu_encoder_wakeup_time(&dpu_enc->base, &wakeup_time))
+ return;
+
+ trace_dpu_enc_vsync_event_work(DRMID(&dpu_enc->base), wakeup_time);
+ mod_timer(&dpu_enc->vsync_event_timer,
+ nsecs_to_jiffies(ktime_to_ns(wakeup_time)));
+}
+
+void dpu_encoder_prepare_for_kickoff(struct drm_encoder *drm_enc,
+ struct dpu_encoder_kickoff_params *params)
+{
+ struct dpu_encoder_virt *dpu_enc;
+ struct dpu_encoder_phys *phys;
+ bool needs_hw_reset = false;
+ unsigned int i;
+
+ if (!drm_enc || !params) {
+ DPU_ERROR("invalid args\n");
+ return;
+ }
+ dpu_enc = to_dpu_encoder_virt(drm_enc);
+
+ trace_dpu_enc_prepare_kickoff(DRMID(drm_enc));
+
+ /* prepare for next kickoff, may include waiting on previous kickoff */
+ DPU_ATRACE_BEGIN("enc_prepare_for_kickoff");
+ for (i = 0; i < dpu_enc->num_phys_encs; i++) {
+ phys = dpu_enc->phys_encs[i];
+ if (phys) {
+ if (phys->ops.prepare_for_kickoff)
+ phys->ops.prepare_for_kickoff(phys, params);
+ if (phys->enable_state == DPU_ENC_ERR_NEEDS_HW_RESET)
+ needs_hw_reset = true;
+ }
+ }
+ DPU_ATRACE_END("enc_prepare_for_kickoff");
+
+ dpu_encoder_resource_control(drm_enc, DPU_ENC_RC_EVENT_KICKOFF);
+
+ /* if any phys needs reset, reset all phys, in-order */
+ if (needs_hw_reset) {
+ trace_dpu_enc_prepare_kickoff_reset(DRMID(drm_enc));
+ for (i = 0; i < dpu_enc->num_phys_encs; i++) {
+ phys = dpu_enc->phys_encs[i];
+ if (phys && phys->ops.hw_reset)
+ phys->ops.hw_reset(phys);
+ }
+ }
+}
+
+void dpu_encoder_kickoff(struct drm_encoder *drm_enc)
+{
+ struct dpu_encoder_virt *dpu_enc;
+ struct dpu_encoder_phys *phys;
+ ktime_t wakeup_time;
+ unsigned int i;
+
+ if (!drm_enc) {
+ DPU_ERROR("invalid encoder\n");
+ return;
+ }
+ DPU_ATRACE_BEGIN("encoder_kickoff");
+ dpu_enc = to_dpu_encoder_virt(drm_enc);
+
+ trace_dpu_enc_kickoff(DRMID(drm_enc));
+
+ atomic_set(&dpu_enc->frame_done_timeout,
+ DPU_FRAME_DONE_TIMEOUT * 1000 /
+ drm_enc->crtc->state->adjusted_mode.vrefresh);
+ mod_timer(&dpu_enc->frame_done_timer, jiffies +
+ ((atomic_read(&dpu_enc->frame_done_timeout) * HZ) / 1000));
+
+ /* All phys encs are ready to go, trigger the kickoff */
+ _dpu_encoder_kickoff_phys(dpu_enc);
+
+ /* allow phys encs to handle any post-kickoff business */
+ for (i = 0; i < dpu_enc->num_phys_encs; i++) {
+ phys = dpu_enc->phys_encs[i];
+ if (phys && phys->ops.handle_post_kickoff)
+ phys->ops.handle_post_kickoff(phys);
+ }
+
+ if (dpu_enc->disp_info.intf_type == DRM_MODE_CONNECTOR_DSI &&
+ !_dpu_encoder_wakeup_time(drm_enc, &wakeup_time)) {
+ trace_dpu_enc_early_kickoff(DRMID(drm_enc),
+ ktime_to_ms(wakeup_time));
+ mod_timer(&dpu_enc->vsync_event_timer,
+ nsecs_to_jiffies(ktime_to_ns(wakeup_time)));
+ }
+
+ DPU_ATRACE_END("encoder_kickoff");
+}
+
+void dpu_encoder_prepare_commit(struct drm_encoder *drm_enc)
+{
+ struct dpu_encoder_virt *dpu_enc;
+ struct dpu_encoder_phys *phys;
+ int i;
+
+ if (!drm_enc) {
+ DPU_ERROR("invalid encoder\n");
+ return;
+ }
+ dpu_enc = to_dpu_encoder_virt(drm_enc);
+
+ for (i = 0; i < dpu_enc->num_phys_encs; i++) {
+ phys = dpu_enc->phys_encs[i];
+ if (phys && phys->ops.prepare_commit)
+ phys->ops.prepare_commit(phys);
+ }
+}
+
+#ifdef CONFIG_DEBUG_FS
+static int _dpu_encoder_status_show(struct seq_file *s, void *data)
+{
+ struct dpu_encoder_virt *dpu_enc;
+ int i;
+
+ if (!s || !s->private)
+ return -EINVAL;
+
+ dpu_enc = s->private;
+
+ mutex_lock(&dpu_enc->enc_lock);
+ for (i = 0; i < dpu_enc->num_phys_encs; i++) {
+ struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
+
+ if (!phys)
+ continue;
+
+ seq_printf(s, "intf:%d vsync:%8d underrun:%8d ",
+ phys->intf_idx - INTF_0,
+ atomic_read(&phys->vsync_cnt),
+ atomic_read(&phys->underrun_cnt));
+
+ switch (phys->intf_mode) {
+ case INTF_MODE_VIDEO:
+ seq_puts(s, "mode: video\n");
+ break;
+ case INTF_MODE_CMD:
+ seq_puts(s, "mode: command\n");
+ break;
+ default:
+ seq_puts(s, "mode: ???\n");
+ break;
+ }
+ }
+ mutex_unlock(&dpu_enc->enc_lock);
+
+ return 0;
+}
+
+static int _dpu_encoder_debugfs_status_open(struct inode *inode,
+ struct file *file)
+{
+ return single_open(file, _dpu_encoder_status_show, inode->i_private);
+}
+
+static ssize_t _dpu_encoder_misr_setup(struct file *file,
+ const char __user *user_buf, size_t count, loff_t *ppos)
+{
+ struct dpu_encoder_virt *dpu_enc;
+ int i = 0, rc;
+ char buf[MISR_BUFF_SIZE + 1];
+ size_t buff_copy;
+ u32 frame_count, enable;
+
+ if (!file || !file->private_data)
+ return -EINVAL;
+
+ dpu_enc = file->private_data;
+
+ buff_copy = min_t(size_t, count, MISR_BUFF_SIZE);
+ if (copy_from_user(buf, user_buf, buff_copy))
+ return -EINVAL;
+
+ buf[buff_copy] = 0; /* end of string */
+
+ if (sscanf(buf, "%u %u", &enable, &frame_count) != 2)
+ return -EINVAL;
+
+ rc = _dpu_encoder_power_enable(dpu_enc, true);
+ if (rc)
+ return rc;
+
+ mutex_lock(&dpu_enc->enc_lock);
+ dpu_enc->misr_enable = enable;
+ dpu_enc->misr_frame_count = frame_count;
+ for (i = 0; i < dpu_enc->num_phys_encs; i++) {
+ struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
+
+ if (!phys || !phys->ops.setup_misr)
+ continue;
+
+ phys->ops.setup_misr(phys, enable, frame_count);
+ }
+ mutex_unlock(&dpu_enc->enc_lock);
+ _dpu_encoder_power_enable(dpu_enc, false);
+
+ return count;
+}
+
+static ssize_t _dpu_encoder_misr_read(struct file *file,
+ char __user *user_buff, size_t count, loff_t *ppos)
+{
+ struct dpu_encoder_virt *dpu_enc;
+ int i = 0, len = 0;
+ char buf[MISR_BUFF_SIZE + 1] = {'\0'};
+ int rc;
+
+ if (*ppos)
+ return 0;
+
+ if (!file || !file->private_data)
+ return -EINVAL;
+
+ dpu_enc = file->private_data;
+
+ rc = _dpu_encoder_power_enable(dpu_enc, true);
+ if (rc)
+ return rc;
+
+ mutex_lock(&dpu_enc->enc_lock);
+ if (!dpu_enc->misr_enable) {
+ len += snprintf(buf + len, MISR_BUFF_SIZE - len,
+ "disabled\n");
+ goto buff_check;
+ } else if (dpu_enc->disp_info.capabilities &
+ ~MSM_DISPLAY_CAP_VID_MODE) {
+ len += snprintf(buf + len, MISR_BUFF_SIZE - len,
+ "unsupported\n");
+ goto buff_check;
+ }
+
+ for (i = 0; i < dpu_enc->num_phys_encs; i++) {
+ struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
+
+ if (!phys || !phys->ops.collect_misr)
+ continue;
+
+ len += snprintf(buf + len, MISR_BUFF_SIZE - len,
+ "Intf idx:%d\n", phys->intf_idx - INTF_0);
+ len += snprintf(buf + len, MISR_BUFF_SIZE - len, "0x%x\n",
+ phys->ops.collect_misr(phys));
+ }
+
+buff_check:
+ if (count <= len) {
+ len = 0;
+ goto end;
+ }
+
+ if (copy_to_user(user_buff, buf, len)) {
+ len = -EFAULT;
+ goto end;
+ }
+
+ *ppos += len; /* increase offset */
+
+end:
+ mutex_unlock(&dpu_enc->enc_lock);
+ _dpu_encoder_power_enable(dpu_enc, false);
+ return len;
+}
+
+static int _dpu_encoder_init_debugfs(struct drm_encoder *drm_enc)
+{
+ struct dpu_encoder_virt *dpu_enc;
+ struct msm_drm_private *priv;
+ struct dpu_kms *dpu_kms;
+ int i;
+
+ static const struct file_operations debugfs_status_fops = {
+ .open = _dpu_encoder_debugfs_status_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ };
+
+ static const struct file_operations debugfs_misr_fops = {
+ .open = simple_open,
+ .read = _dpu_encoder_misr_read,
+ .write = _dpu_encoder_misr_setup,
+ };
+
+ char name[DPU_NAME_SIZE];
+
+ if (!drm_enc || !drm_enc->dev || !drm_enc->dev->dev_private) {
+ DPU_ERROR("invalid encoder or kms\n");
+ return -EINVAL;
+ }
+
+ dpu_enc = to_dpu_encoder_virt(drm_enc);
+ priv = drm_enc->dev->dev_private;
+ dpu_kms = to_dpu_kms(priv->kms);
+
+ snprintf(name, DPU_NAME_SIZE, "encoder%u", drm_enc->base.id);
+
+ /* create overall sub-directory for the encoder */
+ dpu_enc->debugfs_root = debugfs_create_dir(name,
+ drm_enc->dev->primary->debugfs_root);
+ if (!dpu_enc->debugfs_root)
+ return -ENOMEM;
+
+ /* don't error check these */
+ debugfs_create_file("status", 0600,
+ dpu_enc->debugfs_root, dpu_enc, &debugfs_status_fops);
+
+ debugfs_create_file("misr_data", 0600,
+ dpu_enc->debugfs_root, dpu_enc, &debugfs_misr_fops);
+
+ for (i = 0; i < dpu_enc->num_phys_encs; i++)
+ if (dpu_enc->phys_encs[i] &&
+ dpu_enc->phys_encs[i]->ops.late_register)
+ dpu_enc->phys_encs[i]->ops.late_register(
+ dpu_enc->phys_encs[i],
+ dpu_enc->debugfs_root);
+
+ return 0;
+}
+
+static void _dpu_encoder_destroy_debugfs(struct drm_encoder *drm_enc)
+{
+ struct dpu_encoder_virt *dpu_enc;
+
+ if (!drm_enc)
+ return;
+
+ dpu_enc = to_dpu_encoder_virt(drm_enc);
+ debugfs_remove_recursive(dpu_enc->debugfs_root);
+}
+#else
+static int _dpu_encoder_init_debugfs(struct drm_encoder *drm_enc)
+{
+ return 0;
+}
+
+static void _dpu_encoder_destroy_debugfs(struct drm_encoder *drm_enc)
+{
+}
+#endif
+
+static int dpu_encoder_late_register(struct drm_encoder *encoder)
+{
+ return _dpu_encoder_init_debugfs(encoder);
+}
+
+static void dpu_encoder_early_unregister(struct drm_encoder *encoder)
+{
+ _dpu_encoder_destroy_debugfs(encoder);
+}
+
+static int dpu_encoder_virt_add_phys_encs(
+ u32 display_caps,
+ struct dpu_encoder_virt *dpu_enc,
+ struct dpu_enc_phys_init_params *params)
+{
+ struct dpu_encoder_phys *enc = NULL;
+
+ DPU_DEBUG_ENC(dpu_enc, "\n");
+
+ /*
+ * We may create up to NUM_PHYS_ENCODER_TYPES physical encoder types
+ * in this function, check up-front.
+ */
+ if (dpu_enc->num_phys_encs + NUM_PHYS_ENCODER_TYPES >=
+ ARRAY_SIZE(dpu_enc->phys_encs)) {
+ DPU_ERROR_ENC(dpu_enc, "too many physical encoders %d\n",
+ dpu_enc->num_phys_encs);
+ return -EINVAL;
+ }
+
+ if (display_caps & MSM_DISPLAY_CAP_VID_MODE) {
+ enc = dpu_encoder_phys_vid_init(params);
+
+ if (IS_ERR_OR_NULL(enc)) {
+ DPU_ERROR_ENC(dpu_enc, "failed to init vid enc: %ld\n",
+ PTR_ERR(enc));
+ return enc == 0 ? -EINVAL : PTR_ERR(enc);
+ }
+
+ dpu_enc->phys_encs[dpu_enc->num_phys_encs] = enc;
+ ++dpu_enc->num_phys_encs;
+ }
+
+ if (display_caps & MSM_DISPLAY_CAP_CMD_MODE) {
+ enc = dpu_encoder_phys_cmd_init(params);
+
+ if (IS_ERR_OR_NULL(enc)) {
+ DPU_ERROR_ENC(dpu_enc, "failed to init cmd enc: %ld\n",
+ PTR_ERR(enc));
+ return enc == 0 ? -EINVAL : PTR_ERR(enc);
+ }
+
+ dpu_enc->phys_encs[dpu_enc->num_phys_encs] = enc;
+ ++dpu_enc->num_phys_encs;
+ }
+
+ return 0;
+}
+
+static const struct dpu_encoder_virt_ops dpu_encoder_parent_ops = {
+ .handle_vblank_virt = dpu_encoder_vblank_callback,
+ .handle_underrun_virt = dpu_encoder_underrun_callback,
+ .handle_frame_done = dpu_encoder_frame_done_callback,
+};
+
+static int dpu_encoder_setup_display(struct dpu_encoder_virt *dpu_enc,
+ struct dpu_kms *dpu_kms,
+ struct msm_display_info *disp_info,
+ int *drm_enc_mode)
+{
+ int ret = 0;
+ int i = 0;
+ enum dpu_intf_type intf_type;
+ struct dpu_enc_phys_init_params phys_params;
+
+ if (!dpu_enc || !dpu_kms) {
+ DPU_ERROR("invalid arg(s), enc %d kms %d\n",
+ dpu_enc != 0, dpu_kms != 0);
+ return -EINVAL;
+ }
+
+ memset(&phys_params, 0, sizeof(phys_params));
+ phys_params.dpu_kms = dpu_kms;
+ phys_params.parent = &dpu_enc->base;
+ phys_params.parent_ops = &dpu_encoder_parent_ops;
+ phys_params.enc_spinlock = &dpu_enc->enc_spinlock;
+
+ DPU_DEBUG("\n");
+
+ if (disp_info->intf_type == DRM_MODE_CONNECTOR_DSI) {
+ *drm_enc_mode = DRM_MODE_ENCODER_DSI;
+ intf_type = INTF_DSI;
+ } else if (disp_info->intf_type == DRM_MODE_CONNECTOR_HDMIA) {
+ *drm_enc_mode = DRM_MODE_ENCODER_TMDS;
+ intf_type = INTF_HDMI;
+ } else if (disp_info->intf_type == DRM_MODE_CONNECTOR_DisplayPort) {
+ *drm_enc_mode = DRM_MODE_ENCODER_TMDS;
+ intf_type = INTF_DP;
+ } else {
+ DPU_ERROR_ENC(dpu_enc, "unsupported display interface type\n");
+ return -EINVAL;
+ }
+
+ WARN_ON(disp_info->num_of_h_tiles < 1);
+
+ dpu_enc->display_num_of_h_tiles = disp_info->num_of_h_tiles;
+
+ DPU_DEBUG("dsi_info->num_of_h_tiles %d\n", disp_info->num_of_h_tiles);
+
+ if ((disp_info->capabilities & MSM_DISPLAY_CAP_CMD_MODE) ||
+ (disp_info->capabilities & MSM_DISPLAY_CAP_VID_MODE))
+ dpu_enc->idle_pc_supported =
+ dpu_kms->catalog->caps->has_idle_pc;
+
+ mutex_lock(&dpu_enc->enc_lock);
+ for (i = 0; i < disp_info->num_of_h_tiles && !ret; i++) {
+ /*
+ * Left-most tile is at index 0, content is controller id
+ * h_tile_instance_ids[2] = {0, 1}; DSI0 = left, DSI1 = right
+ * h_tile_instance_ids[2] = {1, 0}; DSI1 = left, DSI0 = right
+ */
+ u32 controller_id = disp_info->h_tile_instance[i];
+
+ if (disp_info->num_of_h_tiles > 1) {
+ if (i == 0)
+ phys_params.split_role = ENC_ROLE_MASTER;
+ else
+ phys_params.split_role = ENC_ROLE_SLAVE;
+ } else {
+ phys_params.split_role = ENC_ROLE_SOLO;
+ }
+
+ DPU_DEBUG("h_tile_instance %d = %d, split_role %d\n",
+ i, controller_id, phys_params.split_role);
+
+ phys_params.intf_idx = dpu_encoder_get_intf(dpu_kms->catalog,
+ intf_type,
+ controller_id);
+ if (phys_params.intf_idx == INTF_MAX) {
+ DPU_ERROR_ENC(dpu_enc, "could not get intf: type %d, id %d\n",
+ intf_type, controller_id);
+ ret = -EINVAL;
+ }
+
+ if (!ret) {
+ ret = dpu_encoder_virt_add_phys_encs(disp_info->capabilities,
+ dpu_enc,
+ &phys_params);
+ if (ret)
+ DPU_ERROR_ENC(dpu_enc, "failed to add phys encs\n");
+ }
+ }
+
+ for (i = 0; i < dpu_enc->num_phys_encs; i++) {
+ struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
+
+ if (phys) {
+ atomic_set(&phys->vsync_cnt, 0);
+ atomic_set(&phys->underrun_cnt, 0);
+ }
+ }
+ mutex_unlock(&dpu_enc->enc_lock);
+
+ return ret;
+}
+
+static void dpu_encoder_frame_done_timeout(struct timer_list *t)
+{
+ struct dpu_encoder_virt *dpu_enc = from_timer(dpu_enc, t,
+ frame_done_timer);
+ struct drm_encoder *drm_enc = &dpu_enc->base;
+ struct msm_drm_private *priv;
+ u32 event;
+
+ if (!drm_enc->dev || !drm_enc->dev->dev_private) {
+ DPU_ERROR("invalid parameters\n");
+ return;
+ }
+ priv = drm_enc->dev->dev_private;
+
+ if (!dpu_enc->frame_busy_mask[0] || !dpu_enc->crtc_frame_event_cb) {
+ DRM_DEBUG_KMS("id:%u invalid timeout frame_busy_mask=%lu\n",
+ DRMID(drm_enc), dpu_enc->frame_busy_mask[0]);
+ return;
+ } else if (!atomic_xchg(&dpu_enc->frame_done_timeout, 0)) {
+ DRM_DEBUG_KMS("id:%u invalid timeout\n", DRMID(drm_enc));
+ return;
+ }
+
+ DPU_ERROR_ENC(dpu_enc, "frame done timeout\n");
+
+ event = DPU_ENCODER_FRAME_EVENT_ERROR;
+ trace_dpu_enc_frame_done_timeout(DRMID(drm_enc), event);
+ dpu_enc->crtc_frame_event_cb(dpu_enc->crtc_frame_event_cb_data, event);
+}
+
+static const struct drm_encoder_helper_funcs dpu_encoder_helper_funcs = {
+ .mode_set = dpu_encoder_virt_mode_set,
+ .disable = dpu_encoder_virt_disable,
+ .enable = dpu_kms_encoder_enable,
+ .atomic_check = dpu_encoder_virt_atomic_check,
+
+ /* This is called by dpu_kms_encoder_enable */
+ .commit = dpu_encoder_virt_enable,
+};
+
+static const struct drm_encoder_funcs dpu_encoder_funcs = {
+ .destroy = dpu_encoder_destroy,
+ .late_register = dpu_encoder_late_register,
+ .early_unregister = dpu_encoder_early_unregister,
+};
+
+int dpu_encoder_setup(struct drm_device *dev, struct drm_encoder *enc,
+ struct msm_display_info *disp_info)
+{
+ struct msm_drm_private *priv = dev->dev_private;
+ struct dpu_kms *dpu_kms = to_dpu_kms(priv->kms);
+ struct drm_encoder *drm_enc = NULL;
+ struct dpu_encoder_virt *dpu_enc = NULL;
+ int drm_enc_mode = DRM_MODE_ENCODER_NONE;
+ int ret = 0;
+
+ dpu_enc = to_dpu_encoder_virt(enc);
+
+ mutex_init(&dpu_enc->enc_lock);
+ ret = dpu_encoder_setup_display(dpu_enc, dpu_kms, disp_info,
+ &drm_enc_mode);
+ if (ret)
+ goto fail;
+
+ dpu_enc->cur_master = NULL;
+ spin_lock_init(&dpu_enc->enc_spinlock);
+
+ atomic_set(&dpu_enc->frame_done_timeout, 0);
+ timer_setup(&dpu_enc->frame_done_timer,
+ dpu_encoder_frame_done_timeout, 0);
+
+ if (disp_info->intf_type == DRM_MODE_CONNECTOR_DSI)
+ timer_setup(&dpu_enc->vsync_event_timer,
+ dpu_encoder_vsync_event_handler,
+ 0);
+
+
+ mutex_init(&dpu_enc->rc_lock);
+ kthread_init_delayed_work(&dpu_enc->delayed_off_work,
+ dpu_encoder_off_work);
+ dpu_enc->idle_timeout = IDLE_TIMEOUT;
+
+ kthread_init_work(&dpu_enc->vsync_event_work,
+ dpu_encoder_vsync_event_work_handler);
+
+ memcpy(&dpu_enc->disp_info, disp_info, sizeof(*disp_info));
+
+ DPU_DEBUG_ENC(dpu_enc, "created\n");
+
+ return ret;
+
+fail:
+ DPU_ERROR("failed to create encoder\n");
+ if (drm_enc)
+ dpu_encoder_destroy(drm_enc);
+
+ return ret;
+
+
+}
+
+struct drm_encoder *dpu_encoder_init(struct drm_device *dev,
+ int drm_enc_mode)
+{
+ struct dpu_encoder_virt *dpu_enc = NULL;
+ int rc = 0;
+
+ dpu_enc = devm_kzalloc(dev->dev, sizeof(*dpu_enc), GFP_KERNEL);
+ if (!dpu_enc)
+ return ERR_PTR(ENOMEM);
+
+ rc = drm_encoder_init(dev, &dpu_enc->base, &dpu_encoder_funcs,
+ drm_enc_mode, NULL);
+ if (rc) {
+ devm_kfree(dev->dev, dpu_enc);
+ return ERR_PTR(rc);
+ }
+
+ drm_encoder_helper_add(&dpu_enc->base, &dpu_encoder_helper_funcs);
+
+ return &dpu_enc->base;
+}
+
+int dpu_encoder_wait_for_event(struct drm_encoder *drm_enc,
+ enum msm_event_wait event)
+{
+ int (*fn_wait)(struct dpu_encoder_phys *phys_enc) = NULL;
+ struct dpu_encoder_virt *dpu_enc = NULL;
+ int i, ret = 0;
+
+ if (!drm_enc) {
+ DPU_ERROR("invalid encoder\n");
+ return -EINVAL;
+ }
+ dpu_enc = to_dpu_encoder_virt(drm_enc);
+ DPU_DEBUG_ENC(dpu_enc, "\n");
+
+ for (i = 0; i < dpu_enc->num_phys_encs; i++) {
+ struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
+ if (!phys)
+ continue;
+
+ switch (event) {
+ case MSM_ENC_COMMIT_DONE:
+ fn_wait = phys->ops.wait_for_commit_done;
+ break;
+ case MSM_ENC_TX_COMPLETE:
+ fn_wait = phys->ops.wait_for_tx_complete;
+ break;
+ case MSM_ENC_VBLANK:
+ fn_wait = phys->ops.wait_for_vblank;
+ break;
+ default:
+ DPU_ERROR_ENC(dpu_enc, "unknown wait event %d\n",
+ event);
+ return -EINVAL;
+ };
+
+ if (fn_wait) {
+ DPU_ATRACE_BEGIN("wait_for_completion_event");
+ ret = fn_wait(phys);
+ DPU_ATRACE_END("wait_for_completion_event");
+ if (ret)
+ return ret;
+ }
+ }
+
+ return ret;
+}
+
+enum dpu_intf_mode dpu_encoder_get_intf_mode(struct drm_encoder *encoder)
+{
+ struct dpu_encoder_virt *dpu_enc = NULL;
+ int i;
+
+ if (!encoder) {
+ DPU_ERROR("invalid encoder\n");
+ return INTF_MODE_NONE;
+ }
+ dpu_enc = to_dpu_encoder_virt(encoder);
+
+ if (dpu_enc->cur_master)
+ return dpu_enc->cur_master->intf_mode;
+
+ for (i = 0; i < dpu_enc->num_phys_encs; i++) {
+ struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
+
+ if (phys)
+ return phys->intf_mode;
+ }
+
+ return INTF_MODE_NONE;
+}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.h
new file mode 100644
index 000000000000..60f809fc7c13
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.h
@@ -0,0 +1,177 @@
+/*
+ * Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __DPU_ENCODER_H__
+#define __DPU_ENCODER_H__
+
+#include <drm/drm_crtc.h>
+#include "dpu_hw_mdss.h"
+
+#define DPU_ENCODER_FRAME_EVENT_DONE BIT(0)
+#define DPU_ENCODER_FRAME_EVENT_ERROR BIT(1)
+#define DPU_ENCODER_FRAME_EVENT_PANEL_DEAD BIT(2)
+#define DPU_ENCODER_FRAME_EVENT_IDLE BIT(3)
+
+#define IDLE_TIMEOUT (66 - 16/2)
+
+/**
+ * Encoder functions and data types
+ * @intfs: Interfaces this encoder is using, INTF_MODE_NONE if unused
+ * @needs_cdm: Encoder requests a CDM based on pixel format conversion needs
+ * @display_num_of_h_tiles: Number of horizontal tiles in case of split
+ * interface
+ * @topology: Topology of the display
+ */
+struct dpu_encoder_hw_resources {
+ enum dpu_intf_mode intfs[INTF_MAX];
+ bool needs_cdm;
+ u32 display_num_of_h_tiles;
+};
+
+/**
+ * dpu_encoder_kickoff_params - info encoder requires at kickoff
+ * @affected_displays: bitmask, bit set means the ROI of the commit lies within
+ * the bounds of the physical display at the bit index
+ */
+struct dpu_encoder_kickoff_params {
+ unsigned long affected_displays;
+};
+
+/**
+ * dpu_encoder_get_hw_resources - Populate table of required hardware resources
+ * @encoder: encoder pointer
+ * @hw_res: resource table to populate with encoder required resources
+ * @conn_state: report hw reqs based on this proposed connector state
+ */
+void dpu_encoder_get_hw_resources(struct drm_encoder *encoder,
+ struct dpu_encoder_hw_resources *hw_res,
+ struct drm_connector_state *conn_state);
+
+/**
+ * dpu_encoder_register_vblank_callback - provide callback to encoder that
+ * will be called on the next vblank.
+ * @encoder: encoder pointer
+ * @cb: callback pointer, provide NULL to deregister and disable IRQs
+ * @data: user data provided to callback
+ */
+void dpu_encoder_register_vblank_callback(struct drm_encoder *encoder,
+ void (*cb)(void *), void *data);
+
+/**
+ * dpu_encoder_register_frame_event_callback - provide callback to encoder that
+ * will be called after the request is complete, or other events.
+ * @encoder: encoder pointer
+ * @cb: callback pointer, provide NULL to deregister
+ * @data: user data provided to callback
+ */
+void dpu_encoder_register_frame_event_callback(struct drm_encoder *encoder,
+ void (*cb)(void *, u32), void *data);
+
+/**
+ * dpu_encoder_prepare_for_kickoff - schedule double buffer flip of the ctl
+ * path (i.e. ctl flush and start) at next appropriate time.
+ * Immediately: if no previous commit is outstanding.
+ * Delayed: Block until next trigger can be issued.
+ * @encoder: encoder pointer
+ * @params: kickoff time parameters
+ */
+void dpu_encoder_prepare_for_kickoff(struct drm_encoder *encoder,
+ struct dpu_encoder_kickoff_params *params);
+
+/**
+ * dpu_encoder_trigger_kickoff_pending - Clear the flush bits from previous
+ * kickoff and trigger the ctl prepare progress for command mode display.
+ * @encoder: encoder pointer
+ */
+void dpu_encoder_trigger_kickoff_pending(struct drm_encoder *encoder);
+
+/**
+ * dpu_encoder_kickoff - trigger a double buffer flip of the ctl path
+ * (i.e. ctl flush and start) immediately.
+ * @encoder: encoder pointer
+ */
+void dpu_encoder_kickoff(struct drm_encoder *encoder);
+
+/**
+ * dpu_encoder_wait_for_event - Waits for encoder events
+ * @encoder: encoder pointer
+ * @event: event to wait for
+ * MSM_ENC_COMMIT_DONE - Wait for hardware to have flushed the current pending
+ * frames to hardware at a vblank or ctl_start
+ * Encoders will map this differently depending on the
+ * panel type.
+ * vid mode -> vsync_irq
+ * cmd mode -> ctl_start
+ * MSM_ENC_TX_COMPLETE - Wait for the hardware to transfer all the pixels to
+ * the panel. Encoders will map this differently
+ * depending on the panel type.
+ * vid mode -> vsync_irq
+ * cmd mode -> pp_done
+ * Returns: 0 on success, -EWOULDBLOCK if already signaled, error otherwise
+ */
+int dpu_encoder_wait_for_event(struct drm_encoder *drm_encoder,
+ enum msm_event_wait event);
+
+/*
+ * dpu_encoder_get_intf_mode - get interface mode of the given encoder
+ * @encoder: Pointer to drm encoder object
+ */
+enum dpu_intf_mode dpu_encoder_get_intf_mode(struct drm_encoder *encoder);
+
+/**
+ * dpu_encoder_virt_restore - restore the encoder configs
+ * @encoder: encoder pointer
+ */
+void dpu_encoder_virt_restore(struct drm_encoder *encoder);
+
+/**
+ * dpu_encoder_init - initialize virtual encoder object
+ * @dev: Pointer to drm device structure
+ * @disp_info: Pointer to display information structure
+ * Returns: Pointer to newly created drm encoder
+ */
+struct drm_encoder *dpu_encoder_init(
+ struct drm_device *dev,
+ int drm_enc_mode);
+
+/**
+ * dpu_encoder_setup - setup dpu_encoder for the display probed
+ * @dev: Pointer to drm device structure
+ * @enc: Pointer to the drm_encoder
+ * @disp_info: Pointer to the display info
+ */
+int dpu_encoder_setup(struct drm_device *dev, struct drm_encoder *enc,
+ struct msm_display_info *disp_info);
+
+/**
+ * dpu_encoder_prepare_commit - prepare encoder at the very beginning of an
+ * atomic commit, before any registers are written
+ * @drm_enc: Pointer to previously created drm encoder structure
+ */
+void dpu_encoder_prepare_commit(struct drm_encoder *drm_enc);
+
+/**
+ * dpu_encoder_set_idle_timeout - set the idle timeout for video
+ * and command mode encoders.
+ * @drm_enc: Pointer to previously created drm encoder structure
+ * @idle_timeout: idle timeout duration in milliseconds
+ */
+void dpu_encoder_set_idle_timeout(struct drm_encoder *drm_enc,
+ u32 idle_timeout);
+
+#endif /* __DPU_ENCODER_H__ */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys.h
new file mode 100644
index 000000000000..c7df8aad6613
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys.h
@@ -0,0 +1,430 @@
+/*
+ * Copyright (c) 2015-2018 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __DPU_ENCODER_PHYS_H__
+#define __DPU_ENCODER_PHYS_H__
+
+#include <linux/jiffies.h>
+
+#include "dpu_kms.h"
+#include "dpu_hw_intf.h"
+#include "dpu_hw_pingpong.h"
+#include "dpu_hw_ctl.h"
+#include "dpu_hw_top.h"
+#include "dpu_hw_cdm.h"
+#include "dpu_encoder.h"
+
+#define DPU_ENCODER_NAME_MAX 16
+
+/* wait for at most 2 vsync for lowest refresh rate (24hz) */
+#define KICKOFF_TIMEOUT_MS 84
+#define KICKOFF_TIMEOUT_JIFFIES msecs_to_jiffies(KICKOFF_TIMEOUT_MS)
+
+/**
+ * enum dpu_enc_split_role - Role this physical encoder will play in a
+ * split-panel configuration, where one panel is master, and others slaves.
+ * Masters have extra responsibilities, like managing the VBLANK IRQ.
+ * @ENC_ROLE_SOLO: This is the one and only panel. This encoder is master.
+ * @ENC_ROLE_MASTER: This encoder is the master of a split panel config.
+ * @ENC_ROLE_SLAVE: This encoder is not the master of a split panel config.
+ */
+enum dpu_enc_split_role {
+ ENC_ROLE_SOLO,
+ ENC_ROLE_MASTER,
+ ENC_ROLE_SLAVE,
+};
+
+/**
+ * enum dpu_enc_enable_state - current enabled state of the physical encoder
+ * @DPU_ENC_DISABLING: Encoder transitioning to disable state
+ * Events bounding transition are encoder type specific
+ * @DPU_ENC_DISABLED: Encoder is disabled
+ * @DPU_ENC_ENABLING: Encoder transitioning to enabled
+ * Events bounding transition are encoder type specific
+ * @DPU_ENC_ENABLED: Encoder is enabled
+ * @DPU_ENC_ERR_NEEDS_HW_RESET: Encoder is enabled, but requires a hw_reset
+ * to recover from a previous error
+ */
+enum dpu_enc_enable_state {
+ DPU_ENC_DISABLING,
+ DPU_ENC_DISABLED,
+ DPU_ENC_ENABLING,
+ DPU_ENC_ENABLED,
+ DPU_ENC_ERR_NEEDS_HW_RESET
+};
+
+struct dpu_encoder_phys;
+
+/**
+ * struct dpu_encoder_virt_ops - Interface the containing virtual encoder
+ * provides for the physical encoders to use to callback.
+ * @handle_vblank_virt: Notify virtual encoder of vblank IRQ reception
+ * Note: This is called from IRQ handler context.
+ * @handle_underrun_virt: Notify virtual encoder of underrun IRQ reception
+ * Note: This is called from IRQ handler context.
+ * @handle_frame_done: Notify virtual encoder that this phys encoder
+ * completes last request frame.
+ */
+struct dpu_encoder_virt_ops {
+ void (*handle_vblank_virt)(struct drm_encoder *,
+ struct dpu_encoder_phys *phys);
+ void (*handle_underrun_virt)(struct drm_encoder *,
+ struct dpu_encoder_phys *phys);
+ void (*handle_frame_done)(struct drm_encoder *,
+ struct dpu_encoder_phys *phys, u32 event);
+};
+
+/**
+ * struct dpu_encoder_phys_ops - Interface the physical encoders provide to
+ * the containing virtual encoder.
+ * @late_register: DRM Call. Add Userspace interfaces, debugfs.
+ * @prepare_commit: MSM Atomic Call, start of atomic commit sequence
+ * @is_master: Whether this phys_enc is the current master
+ * encoder. Can be switched at enable time. Based
+ * on split_role and current mode (CMD/VID).
+ * @mode_fixup: DRM Call. Fixup a DRM mode.
+ * @mode_set: DRM Call. Set a DRM mode.
+ * This likely caches the mode, for use at enable.
+ * @enable: DRM Call. Enable a DRM mode.
+ * @disable: DRM Call. Disable mode.
+ * @atomic_check: DRM Call. Atomic check new DRM state.
+ * @destroy: DRM Call. Destroy and release resources.
+ * @get_hw_resources: Populate the structure with the hardware
+ * resources that this phys_enc is using.
+ * Expect no overlap between phys_encs.
+ * @control_vblank_irq Register/Deregister for VBLANK IRQ
+ * @wait_for_commit_done: Wait for hardware to have flushed the
+ * current pending frames to hardware
+ * @wait_for_tx_complete: Wait for hardware to transfer the pixels
+ * to the panel
+ * @wait_for_vblank: Wait for VBLANK, for sub-driver internal use
+ * @prepare_for_kickoff: Do any work necessary prior to a kickoff
+ * For CMD encoder, may wait for previous tx done
+ * @handle_post_kickoff: Do any work necessary post-kickoff work
+ * @trigger_start: Process start event on physical encoder
+ * @needs_single_flush: Whether encoder slaves need to be flushed
+ * @setup_misr: Sets up MISR, enable and disables based on sysfs
+ * @collect_misr: Collects MISR data on frame update
+ * @hw_reset: Issue HW recovery such as CTL reset and clear
+ * DPU_ENC_ERR_NEEDS_HW_RESET state
+ * @irq_control: Handler to enable/disable all the encoder IRQs
+ * @prepare_idle_pc: phys encoder can update the vsync_enable status
+ * on idle power collapse prepare
+ * @restore: Restore all the encoder configs.
+ * @get_line_count: Obtain current vertical line count
+ */
+
+struct dpu_encoder_phys_ops {
+ int (*late_register)(struct dpu_encoder_phys *encoder,
+ struct dentry *debugfs_root);
+ void (*prepare_commit)(struct dpu_encoder_phys *encoder);
+ bool (*is_master)(struct dpu_encoder_phys *encoder);
+ bool (*mode_fixup)(struct dpu_encoder_phys *encoder,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode);
+ void (*mode_set)(struct dpu_encoder_phys *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode);
+ void (*enable)(struct dpu_encoder_phys *encoder);
+ void (*disable)(struct dpu_encoder_phys *encoder);
+ int (*atomic_check)(struct dpu_encoder_phys *encoder,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state);
+ void (*destroy)(struct dpu_encoder_phys *encoder);
+ void (*get_hw_resources)(struct dpu_encoder_phys *encoder,
+ struct dpu_encoder_hw_resources *hw_res,
+ struct drm_connector_state *conn_state);
+ int (*control_vblank_irq)(struct dpu_encoder_phys *enc, bool enable);
+ int (*wait_for_commit_done)(struct dpu_encoder_phys *phys_enc);
+ int (*wait_for_tx_complete)(struct dpu_encoder_phys *phys_enc);
+ int (*wait_for_vblank)(struct dpu_encoder_phys *phys_enc);
+ void (*prepare_for_kickoff)(struct dpu_encoder_phys *phys_enc,
+ struct dpu_encoder_kickoff_params *params);
+ void (*handle_post_kickoff)(struct dpu_encoder_phys *phys_enc);
+ void (*trigger_start)(struct dpu_encoder_phys *phys_enc);
+ bool (*needs_single_flush)(struct dpu_encoder_phys *phys_enc);
+
+ void (*setup_misr)(struct dpu_encoder_phys *phys_encs,
+ bool enable, u32 frame_count);
+ u32 (*collect_misr)(struct dpu_encoder_phys *phys_enc);
+ void (*hw_reset)(struct dpu_encoder_phys *phys_enc);
+ void (*irq_control)(struct dpu_encoder_phys *phys, bool enable);
+ void (*prepare_idle_pc)(struct dpu_encoder_phys *phys_enc);
+ void (*restore)(struct dpu_encoder_phys *phys);
+ int (*get_line_count)(struct dpu_encoder_phys *phys);
+};
+
+/**
+ * enum dpu_intr_idx - dpu encoder interrupt index
+ * @INTR_IDX_VSYNC: Vsync interrupt for video mode panel
+ * @INTR_IDX_PINGPONG: Pingpong done unterrupt for cmd mode panel
+ * @INTR_IDX_UNDERRUN: Underrun unterrupt for video and cmd mode panel
+ * @INTR_IDX_RDPTR: Readpointer done unterrupt for cmd mode panel
+ */
+enum dpu_intr_idx {
+ INTR_IDX_VSYNC,
+ INTR_IDX_PINGPONG,
+ INTR_IDX_UNDERRUN,
+ INTR_IDX_CTL_START,
+ INTR_IDX_RDPTR,
+ INTR_IDX_MAX,
+};
+
+/**
+ * dpu_encoder_irq - tracking structure for interrupts
+ * @name: string name of interrupt
+ * @intr_type: Encoder interrupt type
+ * @intr_idx: Encoder interrupt enumeration
+ * @hw_idx: HW Block ID
+ * @irq_idx: IRQ interface lookup index from DPU IRQ framework
+ * will be -EINVAL if IRQ is not registered
+ * @irq_cb: interrupt callback
+ */
+struct dpu_encoder_irq {
+ const char *name;
+ enum dpu_intr_type intr_type;
+ enum dpu_intr_idx intr_idx;
+ int hw_idx;
+ int irq_idx;
+ struct dpu_irq_callback cb;
+};
+
+/**
+ * struct dpu_encoder_phys - physical encoder that drives a single INTF block
+ * tied to a specific panel / sub-panel. Abstract type, sub-classed by
+ * phys_vid or phys_cmd for video mode or command mode encs respectively.
+ * @parent: Pointer to the containing virtual encoder
+ * @connector: If a mode is set, cached pointer to the active connector
+ * @ops: Operations exposed to the virtual encoder
+ * @parent_ops: Callbacks exposed by the parent to the phys_enc
+ * @hw_mdptop: Hardware interface to the top registers
+ * @hw_ctl: Hardware interface to the ctl registers
+ * @hw_cdm: Hardware interface to the cdm registers
+ * @cdm_cfg: Chroma-down hardware configuration
+ * @hw_pp: Hardware interface to the ping pong registers
+ * @dpu_kms: Pointer to the dpu_kms top level
+ * @cached_mode: DRM mode cached at mode_set time, acted on in enable
+ * @enabled: Whether the encoder has enabled and running a mode
+ * @split_role: Role to play in a split-panel configuration
+ * @intf_mode: Interface mode
+ * @intf_idx: Interface index on dpu hardware
+ * @topology_name: topology selected for the display
+ * @enc_spinlock: Virtual-Encoder-Wide Spin Lock for IRQ purposes
+ * @enable_state: Enable state tracking
+ * @vblank_refcount: Reference count of vblank request
+ * @vsync_cnt: Vsync count for the physical encoder
+ * @underrun_cnt: Underrun count for the physical encoder
+ * @pending_kickoff_cnt: Atomic counter tracking the number of kickoffs
+ * vs. the number of done/vblank irqs. Should hover
+ * between 0-2 Incremented when a new kickoff is
+ * scheduled. Decremented in irq handler
+ * @pending_ctlstart_cnt: Atomic counter tracking the number of ctl start
+ * pending.
+ * @pending_kickoff_wq: Wait queue for blocking until kickoff completes
+ * @irq: IRQ tracking structures
+ */
+struct dpu_encoder_phys {
+ struct drm_encoder *parent;
+ struct drm_connector *connector;
+ struct dpu_encoder_phys_ops ops;
+ const struct dpu_encoder_virt_ops *parent_ops;
+ struct dpu_hw_mdp *hw_mdptop;
+ struct dpu_hw_ctl *hw_ctl;
+ struct dpu_hw_cdm *hw_cdm;
+ struct dpu_hw_cdm_cfg cdm_cfg;
+ struct dpu_hw_pingpong *hw_pp;
+ struct dpu_kms *dpu_kms;
+ struct drm_display_mode cached_mode;
+ enum dpu_enc_split_role split_role;
+ enum dpu_intf_mode intf_mode;
+ enum dpu_intf intf_idx;
+ enum dpu_rm_topology_name topology_name;
+ spinlock_t *enc_spinlock;
+ enum dpu_enc_enable_state enable_state;
+ atomic_t vblank_refcount;
+ atomic_t vsync_cnt;
+ atomic_t underrun_cnt;
+ atomic_t pending_ctlstart_cnt;
+ atomic_t pending_kickoff_cnt;
+ wait_queue_head_t pending_kickoff_wq;
+ struct dpu_encoder_irq irq[INTR_IDX_MAX];
+};
+
+static inline int dpu_encoder_phys_inc_pending(struct dpu_encoder_phys *phys)
+{
+ atomic_inc_return(&phys->pending_ctlstart_cnt);
+ return atomic_inc_return(&phys->pending_kickoff_cnt);
+}
+
+/**
+ * struct dpu_encoder_phys_vid - sub-class of dpu_encoder_phys to handle video
+ * mode specific operations
+ * @base: Baseclass physical encoder structure
+ * @hw_intf: Hardware interface to the intf registers
+ * @timing_params: Current timing parameter
+ */
+struct dpu_encoder_phys_vid {
+ struct dpu_encoder_phys base;
+ struct dpu_hw_intf *hw_intf;
+ struct intf_timing_params timing_params;
+};
+
+/**
+ * struct dpu_encoder_phys_cmd - sub-class of dpu_encoder_phys to handle command
+ * mode specific operations
+ * @base: Baseclass physical encoder structure
+ * @intf_idx: Intf Block index used by this phys encoder
+ * @stream_sel: Stream selection for multi-stream interfaces
+ * @serialize_wait4pp: serialize wait4pp feature waits for pp_done interrupt
+ * after ctl_start instead of before next frame kickoff
+ * @pp_timeout_report_cnt: number of pingpong done irq timeout errors
+ * @pending_vblank_cnt: Atomic counter tracking pending wait for VBLANK
+ * @pending_vblank_wq: Wait queue for blocking until VBLANK received
+ */
+struct dpu_encoder_phys_cmd {
+ struct dpu_encoder_phys base;
+ int stream_sel;
+ bool serialize_wait4pp;
+ int pp_timeout_report_cnt;
+ atomic_t pending_vblank_cnt;
+ wait_queue_head_t pending_vblank_wq;
+};
+
+/**
+ * struct dpu_enc_phys_init_params - initialization parameters for phys encs
+ * @dpu_kms: Pointer to the dpu_kms top level
+ * @parent: Pointer to the containing virtual encoder
+ * @parent_ops: Callbacks exposed by the parent to the phys_enc
+ * @split_role: Role to play in a split-panel configuration
+ * @intf_idx: Interface index this phys_enc will control
+ * @enc_spinlock: Virtual-Encoder-Wide Spin Lock for IRQ purposes
+ */
+struct dpu_enc_phys_init_params {
+ struct dpu_kms *dpu_kms;
+ struct drm_encoder *parent;
+ const struct dpu_encoder_virt_ops *parent_ops;
+ enum dpu_enc_split_role split_role;
+ enum dpu_intf intf_idx;
+ spinlock_t *enc_spinlock;
+};
+
+/**
+ * dpu_encoder_wait_info - container for passing arguments to irq wait functions
+ * @wq: wait queue structure
+ * @atomic_cnt: wait until atomic_cnt equals zero
+ * @timeout_ms: timeout value in milliseconds
+ */
+struct dpu_encoder_wait_info {
+ wait_queue_head_t *wq;
+ atomic_t *atomic_cnt;
+ s64 timeout_ms;
+};
+
+/**
+ * dpu_encoder_phys_vid_init - Construct a new video mode physical encoder
+ * @p: Pointer to init params structure
+ * Return: Error code or newly allocated encoder
+ */
+struct dpu_encoder_phys *dpu_encoder_phys_vid_init(
+ struct dpu_enc_phys_init_params *p);
+
+/**
+ * dpu_encoder_phys_cmd_init - Construct a new command mode physical encoder
+ * @p: Pointer to init params structure
+ * Return: Error code or newly allocated encoder
+ */
+struct dpu_encoder_phys *dpu_encoder_phys_cmd_init(
+ struct dpu_enc_phys_init_params *p);
+
+/**
+ * dpu_encoder_helper_trigger_start - control start helper function
+ * This helper function may be optionally specified by physical
+ * encoders if they require ctl_start triggering.
+ * @phys_enc: Pointer to physical encoder structure
+ */
+void dpu_encoder_helper_trigger_start(struct dpu_encoder_phys *phys_enc);
+
+/**
+ * dpu_encoder_helper_hw_reset - issue ctl hw reset
+ * This helper function may be optionally specified by physical
+ * encoders if they require ctl hw reset. If state is currently
+ * DPU_ENC_ERR_NEEDS_HW_RESET, it is set back to DPU_ENC_ENABLED.
+ * @phys_enc: Pointer to physical encoder structure
+ */
+void dpu_encoder_helper_hw_reset(struct dpu_encoder_phys *phys_enc);
+
+static inline enum dpu_3d_blend_mode dpu_encoder_helper_get_3d_blend_mode(
+ struct dpu_encoder_phys *phys_enc)
+{
+ if (!phys_enc || phys_enc->enable_state == DPU_ENC_DISABLING)
+ return BLEND_3D_NONE;
+
+ if (phys_enc->split_role == ENC_ROLE_SOLO &&
+ phys_enc->topology_name == DPU_RM_TOPOLOGY_DUALPIPE_3DMERGE)
+ return BLEND_3D_H_ROW_INT;
+
+ return BLEND_3D_NONE;
+}
+
+/**
+ * dpu_encoder_helper_split_config - split display configuration helper function
+ * This helper function may be used by physical encoders to configure
+ * the split display related registers.
+ * @phys_enc: Pointer to physical encoder structure
+ * @interface: enum dpu_intf setting
+ */
+void dpu_encoder_helper_split_config(
+ struct dpu_encoder_phys *phys_enc,
+ enum dpu_intf interface);
+
+/**
+ * dpu_encoder_helper_report_irq_timeout - utility to report error that irq has
+ * timed out, including reporting frame error event to crtc and debug dump
+ * @phys_enc: Pointer to physical encoder structure
+ * @intr_idx: Failing interrupt index
+ */
+void dpu_encoder_helper_report_irq_timeout(struct dpu_encoder_phys *phys_enc,
+ enum dpu_intr_idx intr_idx);
+
+/**
+ * dpu_encoder_helper_wait_for_irq - utility to wait on an irq.
+ * note: will call dpu_encoder_helper_wait_for_irq on timeout
+ * @phys_enc: Pointer to physical encoder structure
+ * @intr_idx: encoder interrupt index
+ * @wait_info: wait info struct
+ * @Return: 0 or -ERROR
+ */
+int dpu_encoder_helper_wait_for_irq(struct dpu_encoder_phys *phys_enc,
+ enum dpu_intr_idx intr_idx,
+ struct dpu_encoder_wait_info *wait_info);
+
+/**
+ * dpu_encoder_helper_register_irq - register and enable an irq
+ * @phys_enc: Pointer to physical encoder structure
+ * @intr_idx: encoder interrupt index
+ * @Return: 0 or -ERROR
+ */
+int dpu_encoder_helper_register_irq(struct dpu_encoder_phys *phys_enc,
+ enum dpu_intr_idx intr_idx);
+
+/**
+ * dpu_encoder_helper_unregister_irq - unregister and disable an irq
+ * @phys_enc: Pointer to physical encoder structure
+ * @intr_idx: encoder interrupt index
+ * @Return: 0 or -ERROR
+ */
+int dpu_encoder_helper_unregister_irq(struct dpu_encoder_phys *phys_enc,
+ enum dpu_intr_idx intr_idx);
+
+#endif /* __dpu_encoder_phys_H__ */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c
new file mode 100644
index 000000000000..3084675ed425
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c
@@ -0,0 +1,905 @@
+/*
+ * Copyright (c) 2015-2018 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__
+#include "dpu_encoder_phys.h"
+#include "dpu_hw_interrupts.h"
+#include "dpu_core_irq.h"
+#include "dpu_formats.h"
+#include "dpu_trace.h"
+
+#define DPU_DEBUG_CMDENC(e, fmt, ...) DPU_DEBUG("enc%d intf%d " fmt, \
+ (e) && (e)->base.parent ? \
+ (e)->base.parent->base.id : -1, \
+ (e) ? (e)->base.intf_idx - INTF_0 : -1, ##__VA_ARGS__)
+
+#define DPU_ERROR_CMDENC(e, fmt, ...) DPU_ERROR("enc%d intf%d " fmt, \
+ (e) && (e)->base.parent ? \
+ (e)->base.parent->base.id : -1, \
+ (e) ? (e)->base.intf_idx - INTF_0 : -1, ##__VA_ARGS__)
+
+#define to_dpu_encoder_phys_cmd(x) \
+ container_of(x, struct dpu_encoder_phys_cmd, base)
+
+#define PP_TIMEOUT_MAX_TRIALS 10
+
+/*
+ * Tearcheck sync start and continue thresholds are empirically found
+ * based on common panels In the future, may want to allow panels to override
+ * these default values
+ */
+#define DEFAULT_TEARCHECK_SYNC_THRESH_START 4
+#define DEFAULT_TEARCHECK_SYNC_THRESH_CONTINUE 4
+
+#define DPU_ENC_WR_PTR_START_TIMEOUT_US 20000
+
+static inline int _dpu_encoder_phys_cmd_get_idle_timeout(
+ struct dpu_encoder_phys_cmd *cmd_enc)
+{
+ return KICKOFF_TIMEOUT_MS;
+}
+
+static inline bool dpu_encoder_phys_cmd_is_master(
+ struct dpu_encoder_phys *phys_enc)
+{
+ return (phys_enc->split_role != ENC_ROLE_SLAVE) ? true : false;
+}
+
+static bool dpu_encoder_phys_cmd_mode_fixup(
+ struct dpu_encoder_phys *phys_enc,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adj_mode)
+{
+ if (phys_enc)
+ DPU_DEBUG_CMDENC(to_dpu_encoder_phys_cmd(phys_enc), "\n");
+ return true;
+}
+
+static void _dpu_encoder_phys_cmd_update_intf_cfg(
+ struct dpu_encoder_phys *phys_enc)
+{
+ struct dpu_encoder_phys_cmd *cmd_enc =
+ to_dpu_encoder_phys_cmd(phys_enc);
+ struct dpu_hw_ctl *ctl;
+ struct dpu_hw_intf_cfg intf_cfg = { 0 };
+
+ if (!phys_enc)
+ return;
+
+ ctl = phys_enc->hw_ctl;
+ if (!ctl || !ctl->ops.setup_intf_cfg)
+ return;
+
+ intf_cfg.intf = phys_enc->intf_idx;
+ intf_cfg.intf_mode_sel = DPU_CTL_MODE_SEL_CMD;
+ intf_cfg.stream_sel = cmd_enc->stream_sel;
+ intf_cfg.mode_3d = dpu_encoder_helper_get_3d_blend_mode(phys_enc);
+ ctl->ops.setup_intf_cfg(ctl, &intf_cfg);
+}
+
+static void dpu_encoder_phys_cmd_pp_tx_done_irq(void *arg, int irq_idx)
+{
+ struct dpu_encoder_phys *phys_enc = arg;
+ unsigned long lock_flags;
+ int new_cnt;
+ u32 event = DPU_ENCODER_FRAME_EVENT_DONE;
+
+ if (!phys_enc || !phys_enc->hw_pp)
+ return;
+
+ DPU_ATRACE_BEGIN("pp_done_irq");
+ /* notify all synchronous clients first, then asynchronous clients */
+ if (phys_enc->parent_ops->handle_frame_done)
+ phys_enc->parent_ops->handle_frame_done(phys_enc->parent,
+ phys_enc, event);
+
+ spin_lock_irqsave(phys_enc->enc_spinlock, lock_flags);
+ new_cnt = atomic_add_unless(&phys_enc->pending_kickoff_cnt, -1, 0);
+ spin_unlock_irqrestore(phys_enc->enc_spinlock, lock_flags);
+
+ trace_dpu_enc_phys_cmd_pp_tx_done(DRMID(phys_enc->parent),
+ phys_enc->hw_pp->idx - PINGPONG_0,
+ new_cnt, event);
+
+ /* Signal any waiting atomic commit thread */
+ wake_up_all(&phys_enc->pending_kickoff_wq);
+ DPU_ATRACE_END("pp_done_irq");
+}
+
+static void dpu_encoder_phys_cmd_pp_rd_ptr_irq(void *arg, int irq_idx)
+{
+ struct dpu_encoder_phys *phys_enc = arg;
+ struct dpu_encoder_phys_cmd *cmd_enc;
+
+ if (!phys_enc || !phys_enc->hw_pp)
+ return;
+
+ DPU_ATRACE_BEGIN("rd_ptr_irq");
+ cmd_enc = to_dpu_encoder_phys_cmd(phys_enc);
+
+ if (phys_enc->parent_ops->handle_vblank_virt)
+ phys_enc->parent_ops->handle_vblank_virt(phys_enc->parent,
+ phys_enc);
+
+ atomic_add_unless(&cmd_enc->pending_vblank_cnt, -1, 0);
+ wake_up_all(&cmd_enc->pending_vblank_wq);
+ DPU_ATRACE_END("rd_ptr_irq");
+}
+
+static void dpu_encoder_phys_cmd_ctl_start_irq(void *arg, int irq_idx)
+{
+ struct dpu_encoder_phys *phys_enc = arg;
+ struct dpu_encoder_phys_cmd *cmd_enc;
+
+ if (!phys_enc || !phys_enc->hw_ctl)
+ return;
+
+ DPU_ATRACE_BEGIN("ctl_start_irq");
+ cmd_enc = to_dpu_encoder_phys_cmd(phys_enc);
+
+ atomic_add_unless(&phys_enc->pending_ctlstart_cnt, -1, 0);
+
+ /* Signal any waiting ctl start interrupt */
+ wake_up_all(&phys_enc->pending_kickoff_wq);
+ DPU_ATRACE_END("ctl_start_irq");
+}
+
+static void dpu_encoder_phys_cmd_underrun_irq(void *arg, int irq_idx)
+{
+ struct dpu_encoder_phys *phys_enc = arg;
+
+ if (!phys_enc)
+ return;
+
+ if (phys_enc->parent_ops->handle_underrun_virt)
+ phys_enc->parent_ops->handle_underrun_virt(phys_enc->parent,
+ phys_enc);
+}
+
+static void _dpu_encoder_phys_cmd_setup_irq_hw_idx(
+ struct dpu_encoder_phys *phys_enc)
+{
+ struct dpu_encoder_irq *irq;
+
+ irq = &phys_enc->irq[INTR_IDX_CTL_START];
+ irq->hw_idx = phys_enc->hw_ctl->idx;
+ irq->irq_idx = -EINVAL;
+
+ irq = &phys_enc->irq[INTR_IDX_PINGPONG];
+ irq->hw_idx = phys_enc->hw_pp->idx;
+ irq->irq_idx = -EINVAL;
+
+ irq = &phys_enc->irq[INTR_IDX_RDPTR];
+ irq->hw_idx = phys_enc->hw_pp->idx;
+ irq->irq_idx = -EINVAL;
+
+ irq = &phys_enc->irq[INTR_IDX_UNDERRUN];
+ irq->hw_idx = phys_enc->intf_idx;
+ irq->irq_idx = -EINVAL;
+}
+
+static void dpu_encoder_phys_cmd_mode_set(
+ struct dpu_encoder_phys *phys_enc,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adj_mode)
+{
+ struct dpu_encoder_phys_cmd *cmd_enc =
+ to_dpu_encoder_phys_cmd(phys_enc);
+ struct dpu_rm *rm = &phys_enc->dpu_kms->rm;
+ struct dpu_rm_hw_iter iter;
+ int i, instance;
+
+ if (!phys_enc || !mode || !adj_mode) {
+ DPU_ERROR("invalid args\n");
+ return;
+ }
+ phys_enc->cached_mode = *adj_mode;
+ DPU_DEBUG_CMDENC(cmd_enc, "caching mode:\n");
+ drm_mode_debug_printmodeline(adj_mode);
+
+ instance = phys_enc->split_role == ENC_ROLE_SLAVE ? 1 : 0;
+
+ /* Retrieve previously allocated HW Resources. Shouldn't fail */
+ dpu_rm_init_hw_iter(&iter, phys_enc->parent->base.id, DPU_HW_BLK_CTL);
+ for (i = 0; i <= instance; i++) {
+ if (dpu_rm_get_hw(rm, &iter))
+ phys_enc->hw_ctl = (struct dpu_hw_ctl *)iter.hw;
+ }
+
+ if (IS_ERR_OR_NULL(phys_enc->hw_ctl)) {
+ DPU_ERROR_CMDENC(cmd_enc, "failed to init ctl: %ld\n",
+ PTR_ERR(phys_enc->hw_ctl));
+ phys_enc->hw_ctl = NULL;
+ return;
+ }
+
+ _dpu_encoder_phys_cmd_setup_irq_hw_idx(phys_enc);
+}
+
+static int _dpu_encoder_phys_cmd_handle_ppdone_timeout(
+ struct dpu_encoder_phys *phys_enc)
+{
+ struct dpu_encoder_phys_cmd *cmd_enc =
+ to_dpu_encoder_phys_cmd(phys_enc);
+ u32 frame_event = DPU_ENCODER_FRAME_EVENT_ERROR;
+ bool do_log = false;
+
+ if (!phys_enc || !phys_enc->hw_pp || !phys_enc->hw_ctl)
+ return -EINVAL;
+
+ cmd_enc->pp_timeout_report_cnt++;
+ if (cmd_enc->pp_timeout_report_cnt == PP_TIMEOUT_MAX_TRIALS) {
+ frame_event |= DPU_ENCODER_FRAME_EVENT_PANEL_DEAD;
+ do_log = true;
+ } else if (cmd_enc->pp_timeout_report_cnt == 1) {
+ do_log = true;
+ }
+
+ trace_dpu_enc_phys_cmd_pdone_timeout(DRMID(phys_enc->parent),
+ phys_enc->hw_pp->idx - PINGPONG_0,
+ cmd_enc->pp_timeout_report_cnt,
+ atomic_read(&phys_enc->pending_kickoff_cnt),
+ frame_event);
+
+ /* to avoid flooding, only log first time, and "dead" time */
+ if (do_log) {
+ DRM_ERROR("id:%d pp:%d kickoff timeout %d cnt %d koff_cnt %d\n",
+ DRMID(phys_enc->parent),
+ phys_enc->hw_pp->idx - PINGPONG_0,
+ phys_enc->hw_ctl->idx - CTL_0,
+ cmd_enc->pp_timeout_report_cnt,
+ atomic_read(&phys_enc->pending_kickoff_cnt));
+
+ dpu_encoder_helper_unregister_irq(phys_enc, INTR_IDX_RDPTR);
+ dpu_dbg_dump(false, __func__, true, true);
+ }
+
+ atomic_add_unless(&phys_enc->pending_kickoff_cnt, -1, 0);
+
+ /* request a ctl reset before the next kickoff */
+ phys_enc->enable_state = DPU_ENC_ERR_NEEDS_HW_RESET;
+
+ if (phys_enc->parent_ops->handle_frame_done)
+ phys_enc->parent_ops->handle_frame_done(
+ phys_enc->parent, phys_enc, frame_event);
+
+ return -ETIMEDOUT;
+}
+
+static int _dpu_encoder_phys_cmd_wait_for_idle(
+ struct dpu_encoder_phys *phys_enc)
+{
+ struct dpu_encoder_phys_cmd *cmd_enc =
+ to_dpu_encoder_phys_cmd(phys_enc);
+ struct dpu_encoder_wait_info wait_info;
+ int ret;
+
+ if (!phys_enc) {
+ DPU_ERROR("invalid encoder\n");
+ return -EINVAL;
+ }
+
+ wait_info.wq = &phys_enc->pending_kickoff_wq;
+ wait_info.atomic_cnt = &phys_enc->pending_kickoff_cnt;
+ wait_info.timeout_ms = KICKOFF_TIMEOUT_MS;
+
+ ret = dpu_encoder_helper_wait_for_irq(phys_enc, INTR_IDX_PINGPONG,
+ &wait_info);
+ if (ret == -ETIMEDOUT)
+ _dpu_encoder_phys_cmd_handle_ppdone_timeout(phys_enc);
+ else if (!ret)
+ cmd_enc->pp_timeout_report_cnt = 0;
+
+ return ret;
+}
+
+static int dpu_encoder_phys_cmd_control_vblank_irq(
+ struct dpu_encoder_phys *phys_enc,
+ bool enable)
+{
+ int ret = 0;
+ int refcount;
+
+ if (!phys_enc || !phys_enc->hw_pp) {
+ DPU_ERROR("invalid encoder\n");
+ return -EINVAL;
+ }
+
+ refcount = atomic_read(&phys_enc->vblank_refcount);
+
+ /* Slave encoders don't report vblank */
+ if (!dpu_encoder_phys_cmd_is_master(phys_enc))
+ goto end;
+
+ /* protect against negative */
+ if (!enable && refcount == 0) {
+ ret = -EINVAL;
+ goto end;
+ }
+
+ DRM_DEBUG_KMS("id:%u pp:%d enable=%s/%d\n", DRMID(phys_enc->parent),
+ phys_enc->hw_pp->idx - PINGPONG_0,
+ enable ? "true" : "false", refcount);
+
+ if (enable && atomic_inc_return(&phys_enc->vblank_refcount) == 1)
+ ret = dpu_encoder_helper_register_irq(phys_enc, INTR_IDX_RDPTR);
+ else if (!enable && atomic_dec_return(&phys_enc->vblank_refcount) == 0)
+ ret = dpu_encoder_helper_unregister_irq(phys_enc,
+ INTR_IDX_RDPTR);
+
+end:
+ if (ret) {
+ DRM_ERROR("vblank irq err id:%u pp:%d ret:%d, enable %s/%d\n",
+ DRMID(phys_enc->parent),
+ phys_enc->hw_pp->idx - PINGPONG_0, ret,
+ enable ? "true" : "false", refcount);
+ }
+
+ return ret;
+}
+
+static void dpu_encoder_phys_cmd_irq_control(struct dpu_encoder_phys *phys_enc,
+ bool enable)
+{
+ struct dpu_encoder_phys_cmd *cmd_enc;
+
+ if (!phys_enc)
+ return;
+
+ cmd_enc = to_dpu_encoder_phys_cmd(phys_enc);
+
+ trace_dpu_enc_phys_cmd_irq_ctrl(DRMID(phys_enc->parent),
+ phys_enc->hw_pp->idx - PINGPONG_0,
+ enable, atomic_read(&phys_enc->vblank_refcount));
+
+ if (enable) {
+ dpu_encoder_helper_register_irq(phys_enc, INTR_IDX_PINGPONG);
+ dpu_encoder_helper_register_irq(phys_enc, INTR_IDX_UNDERRUN);
+ dpu_encoder_phys_cmd_control_vblank_irq(phys_enc, true);
+
+ if (dpu_encoder_phys_cmd_is_master(phys_enc))
+ dpu_encoder_helper_register_irq(phys_enc,
+ INTR_IDX_CTL_START);
+ } else {
+ if (dpu_encoder_phys_cmd_is_master(phys_enc))
+ dpu_encoder_helper_unregister_irq(phys_enc,
+ INTR_IDX_CTL_START);
+
+ dpu_encoder_helper_unregister_irq(phys_enc, INTR_IDX_UNDERRUN);
+ dpu_encoder_phys_cmd_control_vblank_irq(phys_enc, false);
+ dpu_encoder_helper_unregister_irq(phys_enc, INTR_IDX_PINGPONG);
+ }
+}
+
+static void dpu_encoder_phys_cmd_tearcheck_config(
+ struct dpu_encoder_phys *phys_enc)
+{
+ struct dpu_encoder_phys_cmd *cmd_enc =
+ to_dpu_encoder_phys_cmd(phys_enc);
+ struct dpu_hw_tear_check tc_cfg = { 0 };
+ struct drm_display_mode *mode;
+ bool tc_enable = true;
+ u32 vsync_hz;
+ struct msm_drm_private *priv;
+ struct dpu_kms *dpu_kms;
+
+ if (!phys_enc || !phys_enc->hw_pp) {
+ DPU_ERROR("invalid encoder\n");
+ return;
+ }
+ mode = &phys_enc->cached_mode;
+
+ DPU_DEBUG_CMDENC(cmd_enc, "pp %d\n", phys_enc->hw_pp->idx - PINGPONG_0);
+
+ if (!phys_enc->hw_pp->ops.setup_tearcheck ||
+ !phys_enc->hw_pp->ops.enable_tearcheck) {
+ DPU_DEBUG_CMDENC(cmd_enc, "tearcheck not supported\n");
+ return;
+ }
+
+ dpu_kms = phys_enc->dpu_kms;
+ if (!dpu_kms || !dpu_kms->dev || !dpu_kms->dev->dev_private) {
+ DPU_ERROR("invalid device\n");
+ return;
+ }
+ priv = dpu_kms->dev->dev_private;
+
+ /*
+ * TE default: dsi byte clock calculated base on 70 fps;
+ * around 14 ms to complete a kickoff cycle if te disabled;
+ * vclk_line base on 60 fps; write is faster than read;
+ * init == start == rdptr;
+ *
+ * vsync_count is ratio of MDP VSYNC clock frequency to LCD panel
+ * frequency divided by the no. of rows (lines) in the LCDpanel.
+ */
+ vsync_hz = dpu_kms_get_clk_rate(dpu_kms, "vsync");
+ if (vsync_hz <= 0) {
+ DPU_DEBUG_CMDENC(cmd_enc, "invalid - vsync_hz %u\n",
+ vsync_hz);
+ return;
+ }
+
+ tc_cfg.vsync_count = vsync_hz / (mode->vtotal * mode->vrefresh);
+
+ /* enable external TE after kickoff to avoid premature autorefresh */
+ tc_cfg.hw_vsync_mode = 0;
+
+ /*
+ * By setting sync_cfg_height to near max register value, we essentially
+ * disable dpu hw generated TE signal, since hw TE will arrive first.
+ * Only caveat is if due to error, we hit wrap-around.
+ */
+ tc_cfg.sync_cfg_height = 0xFFF0;
+ tc_cfg.vsync_init_val = mode->vdisplay;
+ tc_cfg.sync_threshold_start = DEFAULT_TEARCHECK_SYNC_THRESH_START;
+ tc_cfg.sync_threshold_continue = DEFAULT_TEARCHECK_SYNC_THRESH_CONTINUE;
+ tc_cfg.start_pos = mode->vdisplay;
+ tc_cfg.rd_ptr_irq = mode->vdisplay + 1;
+
+ DPU_DEBUG_CMDENC(cmd_enc,
+ "tc %d vsync_clk_speed_hz %u vtotal %u vrefresh %u\n",
+ phys_enc->hw_pp->idx - PINGPONG_0, vsync_hz,
+ mode->vtotal, mode->vrefresh);
+ DPU_DEBUG_CMDENC(cmd_enc,
+ "tc %d enable %u start_pos %u rd_ptr_irq %u\n",
+ phys_enc->hw_pp->idx - PINGPONG_0, tc_enable, tc_cfg.start_pos,
+ tc_cfg.rd_ptr_irq);
+ DPU_DEBUG_CMDENC(cmd_enc,
+ "tc %d hw_vsync_mode %u vsync_count %u vsync_init_val %u\n",
+ phys_enc->hw_pp->idx - PINGPONG_0, tc_cfg.hw_vsync_mode,
+ tc_cfg.vsync_count, tc_cfg.vsync_init_val);
+ DPU_DEBUG_CMDENC(cmd_enc,
+ "tc %d cfgheight %u thresh_start %u thresh_cont %u\n",
+ phys_enc->hw_pp->idx - PINGPONG_0, tc_cfg.sync_cfg_height,
+ tc_cfg.sync_threshold_start, tc_cfg.sync_threshold_continue);
+
+ phys_enc->hw_pp->ops.setup_tearcheck(phys_enc->hw_pp, &tc_cfg);
+ phys_enc->hw_pp->ops.enable_tearcheck(phys_enc->hw_pp, tc_enable);
+}
+
+static void _dpu_encoder_phys_cmd_pingpong_config(
+ struct dpu_encoder_phys *phys_enc)
+{
+ struct dpu_encoder_phys_cmd *cmd_enc =
+ to_dpu_encoder_phys_cmd(phys_enc);
+
+ if (!phys_enc || !phys_enc->hw_ctl || !phys_enc->hw_pp
+ || !phys_enc->hw_ctl->ops.setup_intf_cfg) {
+ DPU_ERROR("invalid arg(s), enc %d\n", phys_enc != 0);
+ return;
+ }
+
+ DPU_DEBUG_CMDENC(cmd_enc, "pp %d, enabling mode:\n",
+ phys_enc->hw_pp->idx - PINGPONG_0);
+ drm_mode_debug_printmodeline(&phys_enc->cached_mode);
+
+ _dpu_encoder_phys_cmd_update_intf_cfg(phys_enc);
+ dpu_encoder_phys_cmd_tearcheck_config(phys_enc);
+}
+
+static bool dpu_encoder_phys_cmd_needs_single_flush(
+ struct dpu_encoder_phys *phys_enc)
+{
+ /**
+ * we do separate flush for each CTL and let
+ * CTL_START synchronize them
+ */
+ return false;
+}
+
+static void dpu_encoder_phys_cmd_enable_helper(
+ struct dpu_encoder_phys *phys_enc)
+{
+ struct dpu_hw_ctl *ctl;
+ u32 flush_mask = 0;
+
+ if (!phys_enc || !phys_enc->hw_ctl || !phys_enc->hw_pp) {
+ DPU_ERROR("invalid arg(s), encoder %d\n", phys_enc != 0);
+ return;
+ }
+
+ dpu_encoder_helper_split_config(phys_enc, phys_enc->intf_idx);
+
+ _dpu_encoder_phys_cmd_pingpong_config(phys_enc);
+
+ if (!dpu_encoder_phys_cmd_is_master(phys_enc))
+ goto skip_flush;
+
+ ctl = phys_enc->hw_ctl;
+ ctl->ops.get_bitmask_intf(ctl, &flush_mask, phys_enc->intf_idx);
+ ctl->ops.update_pending_flush(ctl, flush_mask);
+
+skip_flush:
+ return;
+}
+
+static void dpu_encoder_phys_cmd_enable(struct dpu_encoder_phys *phys_enc)
+{
+ struct dpu_encoder_phys_cmd *cmd_enc =
+ to_dpu_encoder_phys_cmd(phys_enc);
+
+ if (!phys_enc || !phys_enc->hw_pp) {
+ DPU_ERROR("invalid phys encoder\n");
+ return;
+ }
+
+ DPU_DEBUG_CMDENC(cmd_enc, "pp %d\n", phys_enc->hw_pp->idx - PINGPONG_0);
+
+ if (phys_enc->enable_state == DPU_ENC_ENABLED) {
+ DPU_ERROR("already enabled\n");
+ return;
+ }
+
+ dpu_encoder_phys_cmd_enable_helper(phys_enc);
+ phys_enc->enable_state = DPU_ENC_ENABLED;
+}
+
+static void _dpu_encoder_phys_cmd_connect_te(
+ struct dpu_encoder_phys *phys_enc, bool enable)
+{
+ if (!phys_enc || !phys_enc->hw_pp ||
+ !phys_enc->hw_pp->ops.connect_external_te)
+ return;
+
+ trace_dpu_enc_phys_cmd_connect_te(DRMID(phys_enc->parent), enable);
+ phys_enc->hw_pp->ops.connect_external_te(phys_enc->hw_pp, enable);
+}
+
+static void dpu_encoder_phys_cmd_prepare_idle_pc(
+ struct dpu_encoder_phys *phys_enc)
+{
+ _dpu_encoder_phys_cmd_connect_te(phys_enc, false);
+}
+
+static int dpu_encoder_phys_cmd_get_line_count(
+ struct dpu_encoder_phys *phys_enc)
+{
+ struct dpu_hw_pingpong *hw_pp;
+
+ if (!phys_enc || !phys_enc->hw_pp)
+ return -EINVAL;
+
+ if (!dpu_encoder_phys_cmd_is_master(phys_enc))
+ return -EINVAL;
+
+ hw_pp = phys_enc->hw_pp;
+ if (!hw_pp->ops.get_line_count)
+ return -EINVAL;
+
+ return hw_pp->ops.get_line_count(hw_pp);
+}
+
+static void dpu_encoder_phys_cmd_disable(struct dpu_encoder_phys *phys_enc)
+{
+ struct dpu_encoder_phys_cmd *cmd_enc =
+ to_dpu_encoder_phys_cmd(phys_enc);
+
+ if (!phys_enc || !phys_enc->hw_pp) {
+ DPU_ERROR("invalid encoder\n");
+ return;
+ }
+ DRM_DEBUG_KMS("id:%u pp:%d state:%d\n", DRMID(phys_enc->parent),
+ phys_enc->hw_pp->idx - PINGPONG_0,
+ phys_enc->enable_state);
+
+ if (phys_enc->enable_state == DPU_ENC_DISABLED) {
+ DPU_ERROR_CMDENC(cmd_enc, "already disabled\n");
+ return;
+ }
+
+ if (phys_enc->hw_pp->ops.enable_tearcheck)
+ phys_enc->hw_pp->ops.enable_tearcheck(phys_enc->hw_pp, false);
+ phys_enc->enable_state = DPU_ENC_DISABLED;
+}
+
+static void dpu_encoder_phys_cmd_destroy(struct dpu_encoder_phys *phys_enc)
+{
+ struct dpu_encoder_phys_cmd *cmd_enc =
+ to_dpu_encoder_phys_cmd(phys_enc);
+
+ if (!phys_enc) {
+ DPU_ERROR("invalid encoder\n");
+ return;
+ }
+ kfree(cmd_enc);
+}
+
+static void dpu_encoder_phys_cmd_get_hw_resources(
+ struct dpu_encoder_phys *phys_enc,
+ struct dpu_encoder_hw_resources *hw_res,
+ struct drm_connector_state *conn_state)
+{
+ struct dpu_encoder_phys_cmd *cmd_enc =
+ to_dpu_encoder_phys_cmd(phys_enc);
+
+ if (!phys_enc) {
+ DPU_ERROR("invalid encoder\n");
+ return;
+ }
+
+ if ((phys_enc->intf_idx - INTF_0) >= INTF_MAX) {
+ DPU_ERROR("invalid intf idx:%d\n", phys_enc->intf_idx);
+ return;
+ }
+
+ DPU_DEBUG_CMDENC(cmd_enc, "\n");
+ hw_res->intfs[phys_enc->intf_idx - INTF_0] = INTF_MODE_CMD;
+}
+
+static void dpu_encoder_phys_cmd_prepare_for_kickoff(
+ struct dpu_encoder_phys *phys_enc,
+ struct dpu_encoder_kickoff_params *params)
+{
+ struct dpu_encoder_phys_cmd *cmd_enc =
+ to_dpu_encoder_phys_cmd(phys_enc);
+ int ret;
+
+ if (!phys_enc || !phys_enc->hw_pp) {
+ DPU_ERROR("invalid encoder\n");
+ return;
+ }
+ DRM_DEBUG_KMS("id:%u pp:%d pending_cnt:%d\n", DRMID(phys_enc->parent),
+ phys_enc->hw_pp->idx - PINGPONG_0,
+ atomic_read(&phys_enc->pending_kickoff_cnt));
+
+ /*
+ * Mark kickoff request as outstanding. If there are more than one,
+ * outstanding, then we have to wait for the previous one to complete
+ */
+ ret = _dpu_encoder_phys_cmd_wait_for_idle(phys_enc);
+ if (ret) {
+ /* force pending_kickoff_cnt 0 to discard failed kickoff */
+ atomic_set(&phys_enc->pending_kickoff_cnt, 0);
+ DRM_ERROR("failed wait_for_idle: id:%u ret:%d pp:%d\n",
+ DRMID(phys_enc->parent), ret,
+ phys_enc->hw_pp->idx - PINGPONG_0);
+ }
+
+ DPU_DEBUG_CMDENC(cmd_enc, "pp:%d pending_cnt %d\n",
+ phys_enc->hw_pp->idx - PINGPONG_0,
+ atomic_read(&phys_enc->pending_kickoff_cnt));
+}
+
+static int _dpu_encoder_phys_cmd_wait_for_ctl_start(
+ struct dpu_encoder_phys *phys_enc)
+{
+ struct dpu_encoder_phys_cmd *cmd_enc =
+ to_dpu_encoder_phys_cmd(phys_enc);
+ struct dpu_encoder_wait_info wait_info;
+ int ret;
+
+ if (!phys_enc || !phys_enc->hw_ctl) {
+ DPU_ERROR("invalid argument(s)\n");
+ return -EINVAL;
+ }
+
+ wait_info.wq = &phys_enc->pending_kickoff_wq;
+ wait_info.atomic_cnt = &phys_enc->pending_ctlstart_cnt;
+ wait_info.timeout_ms = KICKOFF_TIMEOUT_MS;
+
+ ret = dpu_encoder_helper_wait_for_irq(phys_enc, INTR_IDX_CTL_START,
+ &wait_info);
+ if (ret == -ETIMEDOUT) {
+ DPU_ERROR_CMDENC(cmd_enc, "ctl start interrupt wait failed\n");
+ ret = -EINVAL;
+ } else if (!ret)
+ ret = 0;
+
+ return ret;
+}
+
+static int dpu_encoder_phys_cmd_wait_for_tx_complete(
+ struct dpu_encoder_phys *phys_enc)
+{
+ int rc;
+ struct dpu_encoder_phys_cmd *cmd_enc;
+
+ if (!phys_enc)
+ return -EINVAL;
+
+ cmd_enc = to_dpu_encoder_phys_cmd(phys_enc);
+
+ rc = _dpu_encoder_phys_cmd_wait_for_idle(phys_enc);
+ if (rc) {
+ DRM_ERROR("failed wait_for_idle: id:%u ret:%d intf:%d\n",
+ DRMID(phys_enc->parent), rc,
+ phys_enc->intf_idx - INTF_0);
+ }
+
+ return rc;
+}
+
+static int dpu_encoder_phys_cmd_wait_for_commit_done(
+ struct dpu_encoder_phys *phys_enc)
+{
+ int rc = 0;
+ struct dpu_encoder_phys_cmd *cmd_enc;
+
+ if (!phys_enc)
+ return -EINVAL;
+
+ cmd_enc = to_dpu_encoder_phys_cmd(phys_enc);
+
+ /* only required for master controller */
+ if (dpu_encoder_phys_cmd_is_master(phys_enc))
+ rc = _dpu_encoder_phys_cmd_wait_for_ctl_start(phys_enc);
+
+ /* required for both controllers */
+ if (!rc && cmd_enc->serialize_wait4pp)
+ dpu_encoder_phys_cmd_prepare_for_kickoff(phys_enc, NULL);
+
+ return rc;
+}
+
+static int dpu_encoder_phys_cmd_wait_for_vblank(
+ struct dpu_encoder_phys *phys_enc)
+{
+ int rc = 0;
+ struct dpu_encoder_phys_cmd *cmd_enc;
+ struct dpu_encoder_wait_info wait_info;
+
+ if (!phys_enc)
+ return -EINVAL;
+
+ cmd_enc = to_dpu_encoder_phys_cmd(phys_enc);
+
+ /* only required for master controller */
+ if (!dpu_encoder_phys_cmd_is_master(phys_enc))
+ return rc;
+
+ wait_info.wq = &cmd_enc->pending_vblank_wq;
+ wait_info.atomic_cnt = &cmd_enc->pending_vblank_cnt;
+ wait_info.timeout_ms = _dpu_encoder_phys_cmd_get_idle_timeout(cmd_enc);
+
+ atomic_inc(&cmd_enc->pending_vblank_cnt);
+
+ rc = dpu_encoder_helper_wait_for_irq(phys_enc, INTR_IDX_RDPTR,
+ &wait_info);
+
+ return rc;
+}
+
+static void dpu_encoder_phys_cmd_handle_post_kickoff(
+ struct dpu_encoder_phys *phys_enc)
+{
+ if (!phys_enc)
+ return;
+
+ /**
+ * re-enable external TE, either for the first time after enabling
+ * or if disabled for Autorefresh
+ */
+ _dpu_encoder_phys_cmd_connect_te(phys_enc, true);
+}
+
+static void dpu_encoder_phys_cmd_trigger_start(
+ struct dpu_encoder_phys *phys_enc)
+{
+ if (!phys_enc)
+ return;
+
+ dpu_encoder_helper_trigger_start(phys_enc);
+}
+
+static void dpu_encoder_phys_cmd_init_ops(
+ struct dpu_encoder_phys_ops *ops)
+{
+ ops->is_master = dpu_encoder_phys_cmd_is_master;
+ ops->mode_set = dpu_encoder_phys_cmd_mode_set;
+ ops->mode_fixup = dpu_encoder_phys_cmd_mode_fixup;
+ ops->enable = dpu_encoder_phys_cmd_enable;
+ ops->disable = dpu_encoder_phys_cmd_disable;
+ ops->destroy = dpu_encoder_phys_cmd_destroy;
+ ops->get_hw_resources = dpu_encoder_phys_cmd_get_hw_resources;
+ ops->control_vblank_irq = dpu_encoder_phys_cmd_control_vblank_irq;
+ ops->wait_for_commit_done = dpu_encoder_phys_cmd_wait_for_commit_done;
+ ops->prepare_for_kickoff = dpu_encoder_phys_cmd_prepare_for_kickoff;
+ ops->wait_for_tx_complete = dpu_encoder_phys_cmd_wait_for_tx_complete;
+ ops->wait_for_vblank = dpu_encoder_phys_cmd_wait_for_vblank;
+ ops->trigger_start = dpu_encoder_phys_cmd_trigger_start;
+ ops->needs_single_flush = dpu_encoder_phys_cmd_needs_single_flush;
+ ops->hw_reset = dpu_encoder_helper_hw_reset;
+ ops->irq_control = dpu_encoder_phys_cmd_irq_control;
+ ops->restore = dpu_encoder_phys_cmd_enable_helper;
+ ops->prepare_idle_pc = dpu_encoder_phys_cmd_prepare_idle_pc;
+ ops->handle_post_kickoff = dpu_encoder_phys_cmd_handle_post_kickoff;
+ ops->get_line_count = dpu_encoder_phys_cmd_get_line_count;
+}
+
+struct dpu_encoder_phys *dpu_encoder_phys_cmd_init(
+ struct dpu_enc_phys_init_params *p)
+{
+ struct dpu_encoder_phys *phys_enc = NULL;
+ struct dpu_encoder_phys_cmd *cmd_enc = NULL;
+ struct dpu_hw_mdp *hw_mdp;
+ struct dpu_encoder_irq *irq;
+ int i, ret = 0;
+
+ DPU_DEBUG("intf %d\n", p->intf_idx - INTF_0);
+
+ cmd_enc = kzalloc(sizeof(*cmd_enc), GFP_KERNEL);
+ if (!cmd_enc) {
+ ret = -ENOMEM;
+ DPU_ERROR("failed to allocate\n");
+ goto fail;
+ }
+ phys_enc = &cmd_enc->base;
+
+ hw_mdp = dpu_rm_get_mdp(&p->dpu_kms->rm);
+ if (IS_ERR_OR_NULL(hw_mdp)) {
+ ret = PTR_ERR(hw_mdp);
+ DPU_ERROR("failed to get mdptop\n");
+ goto fail_mdp_init;
+ }
+ phys_enc->hw_mdptop = hw_mdp;
+ phys_enc->intf_idx = p->intf_idx;
+
+ dpu_encoder_phys_cmd_init_ops(&phys_enc->ops);
+ phys_enc->parent = p->parent;
+ phys_enc->parent_ops = p->parent_ops;
+ phys_enc->dpu_kms = p->dpu_kms;
+ phys_enc->split_role = p->split_role;
+ phys_enc->intf_mode = INTF_MODE_CMD;
+ phys_enc->enc_spinlock = p->enc_spinlock;
+ cmd_enc->stream_sel = 0;
+ phys_enc->enable_state = DPU_ENC_DISABLED;
+ for (i = 0; i < INTR_IDX_MAX; i++) {
+ irq = &phys_enc->irq[i];
+ INIT_LIST_HEAD(&irq->cb.list);
+ irq->irq_idx = -EINVAL;
+ irq->hw_idx = -EINVAL;
+ irq->cb.arg = phys_enc;
+ }
+
+ irq = &phys_enc->irq[INTR_IDX_CTL_START];
+ irq->name = "ctl_start";
+ irq->intr_type = DPU_IRQ_TYPE_CTL_START;
+ irq->intr_idx = INTR_IDX_CTL_START;
+ irq->cb.func = dpu_encoder_phys_cmd_ctl_start_irq;
+
+ irq = &phys_enc->irq[INTR_IDX_PINGPONG];
+ irq->name = "pp_done";
+ irq->intr_type = DPU_IRQ_TYPE_PING_PONG_COMP;
+ irq->intr_idx = INTR_IDX_PINGPONG;
+ irq->cb.func = dpu_encoder_phys_cmd_pp_tx_done_irq;
+
+ irq = &phys_enc->irq[INTR_IDX_RDPTR];
+ irq->name = "pp_rd_ptr";
+ irq->intr_type = DPU_IRQ_TYPE_PING_PONG_RD_PTR;
+ irq->intr_idx = INTR_IDX_RDPTR;
+ irq->cb.func = dpu_encoder_phys_cmd_pp_rd_ptr_irq;
+
+ irq = &phys_enc->irq[INTR_IDX_UNDERRUN];
+ irq->name = "underrun";
+ irq->intr_type = DPU_IRQ_TYPE_INTF_UNDER_RUN;
+ irq->intr_idx = INTR_IDX_UNDERRUN;
+ irq->cb.func = dpu_encoder_phys_cmd_underrun_irq;
+
+ atomic_set(&phys_enc->vblank_refcount, 0);
+ atomic_set(&phys_enc->pending_kickoff_cnt, 0);
+ atomic_set(&phys_enc->pending_ctlstart_cnt, 0);
+ atomic_set(&cmd_enc->pending_vblank_cnt, 0);
+ init_waitqueue_head(&phys_enc->pending_kickoff_wq);
+ init_waitqueue_head(&cmd_enc->pending_vblank_wq);
+
+ DPU_DEBUG_CMDENC(cmd_enc, "created\n");
+
+ return phys_enc;
+
+fail_mdp_init:
+ kfree(cmd_enc);
+fail:
+ return ERR_PTR(ret);
+}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c
new file mode 100644
index 000000000000..14fc7c2a6bb7
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c
@@ -0,0 +1,922 @@
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__
+#include "dpu_encoder_phys.h"
+#include "dpu_hw_interrupts.h"
+#include "dpu_core_irq.h"
+#include "dpu_formats.h"
+#include "dpu_trace.h"
+
+#define DPU_DEBUG_VIDENC(e, fmt, ...) DPU_DEBUG("enc%d intf%d " fmt, \
+ (e) && (e)->base.parent ? \
+ (e)->base.parent->base.id : -1, \
+ (e) && (e)->hw_intf ? \
+ (e)->hw_intf->idx - INTF_0 : -1, ##__VA_ARGS__)
+
+#define DPU_ERROR_VIDENC(e, fmt, ...) DPU_ERROR("enc%d intf%d " fmt, \
+ (e) && (e)->base.parent ? \
+ (e)->base.parent->base.id : -1, \
+ (e) && (e)->hw_intf ? \
+ (e)->hw_intf->idx - INTF_0 : -1, ##__VA_ARGS__)
+
+#define to_dpu_encoder_phys_vid(x) \
+ container_of(x, struct dpu_encoder_phys_vid, base)
+
+static bool dpu_encoder_phys_vid_is_master(
+ struct dpu_encoder_phys *phys_enc)
+{
+ bool ret = false;
+
+ if (phys_enc->split_role != ENC_ROLE_SLAVE)
+ ret = true;
+
+ return ret;
+}
+
+static void drm_mode_to_intf_timing_params(
+ const struct dpu_encoder_phys_vid *vid_enc,
+ const struct drm_display_mode *mode,
+ struct intf_timing_params *timing)
+{
+ memset(timing, 0, sizeof(*timing));
+
+ if ((mode->htotal < mode->hsync_end)
+ || (mode->hsync_start < mode->hdisplay)
+ || (mode->vtotal < mode->vsync_end)
+ || (mode->vsync_start < mode->vdisplay)
+ || (mode->hsync_end < mode->hsync_start)
+ || (mode->vsync_end < mode->vsync_start)) {
+ DPU_ERROR(
+ "invalid params - hstart:%d,hend:%d,htot:%d,hdisplay:%d\n",
+ mode->hsync_start, mode->hsync_end,
+ mode->htotal, mode->hdisplay);
+ DPU_ERROR("vstart:%d,vend:%d,vtot:%d,vdisplay:%d\n",
+ mode->vsync_start, mode->vsync_end,
+ mode->vtotal, mode->vdisplay);
+ return;
+ }
+
+ /*
+ * https://www.kernel.org/doc/htmldocs/drm/ch02s05.html
+ * Active Region Front Porch Sync Back Porch
+ * <-----------------><------------><-----><----------->
+ * <- [hv]display --->
+ * <--------- [hv]sync_start ------>
+ * <----------------- [hv]sync_end ------->
+ * <---------------------------- [hv]total ------------->
+ */
+ timing->width = mode->hdisplay; /* active width */
+ timing->height = mode->vdisplay; /* active height */
+ timing->xres = timing->width;
+ timing->yres = timing->height;
+ timing->h_back_porch = mode->htotal - mode->hsync_end;
+ timing->h_front_porch = mode->hsync_start - mode->hdisplay;
+ timing->v_back_porch = mode->vtotal - mode->vsync_end;
+ timing->v_front_porch = mode->vsync_start - mode->vdisplay;
+ timing->hsync_pulse_width = mode->hsync_end - mode->hsync_start;
+ timing->vsync_pulse_width = mode->vsync_end - mode->vsync_start;
+ timing->hsync_polarity = (mode->flags & DRM_MODE_FLAG_NHSYNC) ? 1 : 0;
+ timing->vsync_polarity = (mode->flags & DRM_MODE_FLAG_NVSYNC) ? 1 : 0;
+ timing->border_clr = 0;
+ timing->underflow_clr = 0xff;
+ timing->hsync_skew = mode->hskew;
+
+ /* DSI controller cannot handle active-low sync signals. */
+ if (vid_enc->hw_intf->cap->type == INTF_DSI) {
+ timing->hsync_polarity = 0;
+ timing->vsync_polarity = 0;
+ }
+
+ /*
+ * For edp only:
+ * DISPLAY_V_START = (VBP * HCYCLE) + HBP
+ * DISPLAY_V_END = (VBP + VACTIVE) * HCYCLE - 1 - HFP
+ */
+ /*
+ * if (vid_enc->hw->cap->type == INTF_EDP) {
+ * display_v_start += mode->htotal - mode->hsync_start;
+ * display_v_end -= mode->hsync_start - mode->hdisplay;
+ * }
+ */
+}
+
+static inline u32 get_horizontal_total(const struct intf_timing_params *timing)
+{
+ u32 active = timing->xres;
+ u32 inactive =
+ timing->h_back_porch + timing->h_front_porch +
+ timing->hsync_pulse_width;
+ return active + inactive;
+}
+
+static inline u32 get_vertical_total(const struct intf_timing_params *timing)
+{
+ u32 active = timing->yres;
+ u32 inactive =
+ timing->v_back_porch + timing->v_front_porch +
+ timing->vsync_pulse_width;
+ return active + inactive;
+}
+
+/*
+ * programmable_fetch_get_num_lines:
+ * Number of fetch lines in vertical front porch
+ * @timing: Pointer to the intf timing information for the requested mode
+ *
+ * Returns the number of fetch lines in vertical front porch at which mdp
+ * can start fetching the next frame.
+ *
+ * Number of needed prefetch lines is anything that cannot be absorbed in the
+ * start of frame time (back porch + vsync pulse width).
+ *
+ * Some panels have very large VFP, however we only need a total number of
+ * lines based on the chip worst case latencies.
+ */
+static u32 programmable_fetch_get_num_lines(
+ struct dpu_encoder_phys_vid *vid_enc,
+ const struct intf_timing_params *timing)
+{
+ u32 worst_case_needed_lines =
+ vid_enc->hw_intf->cap->prog_fetch_lines_worst_case;
+ u32 start_of_frame_lines =
+ timing->v_back_porch + timing->vsync_pulse_width;
+ u32 needed_vfp_lines = worst_case_needed_lines - start_of_frame_lines;
+ u32 actual_vfp_lines = 0;
+
+ /* Fetch must be outside active lines, otherwise undefined. */
+ if (start_of_frame_lines >= worst_case_needed_lines) {
+ DPU_DEBUG_VIDENC(vid_enc,
+ "prog fetch is not needed, large vbp+vsw\n");
+ actual_vfp_lines = 0;
+ } else if (timing->v_front_porch < needed_vfp_lines) {
+ /* Warn fetch needed, but not enough porch in panel config */
+ pr_warn_once
+ ("low vbp+vfp may lead to perf issues in some cases\n");
+ DPU_DEBUG_VIDENC(vid_enc,
+ "less vfp than fetch req, using entire vfp\n");
+ actual_vfp_lines = timing->v_front_porch;
+ } else {
+ DPU_DEBUG_VIDENC(vid_enc, "room in vfp for needed prefetch\n");
+ actual_vfp_lines = needed_vfp_lines;
+ }
+
+ DPU_DEBUG_VIDENC(vid_enc,
+ "v_front_porch %u v_back_porch %u vsync_pulse_width %u\n",
+ timing->v_front_porch, timing->v_back_porch,
+ timing->vsync_pulse_width);
+ DPU_DEBUG_VIDENC(vid_enc,
+ "wc_lines %u needed_vfp_lines %u actual_vfp_lines %u\n",
+ worst_case_needed_lines, needed_vfp_lines, actual_vfp_lines);
+
+ return actual_vfp_lines;
+}
+
+/*
+ * programmable_fetch_config: Programs HW to prefetch lines by offsetting
+ * the start of fetch into the vertical front porch for cases where the
+ * vsync pulse width and vertical back porch time is insufficient
+ *
+ * Gets # of lines to pre-fetch, then calculate VSYNC counter value.
+ * HW layer requires VSYNC counter of first pixel of tgt VFP line.
+ *
+ * @timing: Pointer to the intf timing information for the requested mode
+ */
+static void programmable_fetch_config(struct dpu_encoder_phys *phys_enc,
+ const struct intf_timing_params *timing)
+{
+ struct dpu_encoder_phys_vid *vid_enc =
+ to_dpu_encoder_phys_vid(phys_enc);
+ struct intf_prog_fetch f = { 0 };
+ u32 vfp_fetch_lines = 0;
+ u32 horiz_total = 0;
+ u32 vert_total = 0;
+ u32 vfp_fetch_start_vsync_counter = 0;
+ unsigned long lock_flags;
+
+ if (WARN_ON_ONCE(!vid_enc->hw_intf->ops.setup_prg_fetch))
+ return;
+
+ vfp_fetch_lines = programmable_fetch_get_num_lines(vid_enc, timing);
+ if (vfp_fetch_lines) {
+ vert_total = get_vertical_total(timing);
+ horiz_total = get_horizontal_total(timing);
+ vfp_fetch_start_vsync_counter =
+ (vert_total - vfp_fetch_lines) * horiz_total + 1;
+ f.enable = 1;
+ f.fetch_start = vfp_fetch_start_vsync_counter;
+ }
+
+ DPU_DEBUG_VIDENC(vid_enc,
+ "vfp_fetch_lines %u vfp_fetch_start_vsync_counter %u\n",
+ vfp_fetch_lines, vfp_fetch_start_vsync_counter);
+
+ spin_lock_irqsave(phys_enc->enc_spinlock, lock_flags);
+ vid_enc->hw_intf->ops.setup_prg_fetch(vid_enc->hw_intf, &f);
+ spin_unlock_irqrestore(phys_enc->enc_spinlock, lock_flags);
+}
+
+static bool dpu_encoder_phys_vid_mode_fixup(
+ struct dpu_encoder_phys *phys_enc,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adj_mode)
+{
+ if (phys_enc)
+ DPU_DEBUG_VIDENC(to_dpu_encoder_phys_vid(phys_enc), "\n");
+
+ /*
+ * Modifying mode has consequences when the mode comes back to us
+ */
+ return true;
+}
+
+static void dpu_encoder_phys_vid_setup_timing_engine(
+ struct dpu_encoder_phys *phys_enc)
+{
+ struct dpu_encoder_phys_vid *vid_enc;
+ struct drm_display_mode mode;
+ struct intf_timing_params timing_params = { 0 };
+ const struct dpu_format *fmt = NULL;
+ u32 fmt_fourcc = DRM_FORMAT_RGB888;
+ unsigned long lock_flags;
+ struct dpu_hw_intf_cfg intf_cfg = { 0 };
+
+ if (!phys_enc || !phys_enc->hw_ctl->ops.setup_intf_cfg) {
+ DPU_ERROR("invalid encoder %d\n", phys_enc != 0);
+ return;
+ }
+
+ mode = phys_enc->cached_mode;
+ vid_enc = to_dpu_encoder_phys_vid(phys_enc);
+ if (!vid_enc->hw_intf->ops.setup_timing_gen) {
+ DPU_ERROR("timing engine setup is not supported\n");
+ return;
+ }
+
+ DPU_DEBUG_VIDENC(vid_enc, "enabling mode:\n");
+ drm_mode_debug_printmodeline(&mode);
+
+ if (phys_enc->split_role != ENC_ROLE_SOLO) {
+ mode.hdisplay >>= 1;
+ mode.htotal >>= 1;
+ mode.hsync_start >>= 1;
+ mode.hsync_end >>= 1;
+
+ DPU_DEBUG_VIDENC(vid_enc,
+ "split_role %d, halve horizontal %d %d %d %d\n",
+ phys_enc->split_role,
+ mode.hdisplay, mode.htotal,
+ mode.hsync_start, mode.hsync_end);
+ }
+
+ drm_mode_to_intf_timing_params(vid_enc, &mode, &timing_params);
+
+ fmt = dpu_get_dpu_format(fmt_fourcc);
+ DPU_DEBUG_VIDENC(vid_enc, "fmt_fourcc 0x%X\n", fmt_fourcc);
+
+ intf_cfg.intf = vid_enc->hw_intf->idx;
+ intf_cfg.intf_mode_sel = DPU_CTL_MODE_SEL_VID;
+ intf_cfg.stream_sel = 0; /* Don't care value for video mode */
+ intf_cfg.mode_3d = dpu_encoder_helper_get_3d_blend_mode(phys_enc);
+
+ spin_lock_irqsave(phys_enc->enc_spinlock, lock_flags);
+ vid_enc->hw_intf->ops.setup_timing_gen(vid_enc->hw_intf,
+ &timing_params, fmt);
+ phys_enc->hw_ctl->ops.setup_intf_cfg(phys_enc->hw_ctl, &intf_cfg);
+ spin_unlock_irqrestore(phys_enc->enc_spinlock, lock_flags);
+
+ programmable_fetch_config(phys_enc, &timing_params);
+
+ vid_enc->timing_params = timing_params;
+}
+
+static void dpu_encoder_phys_vid_vblank_irq(void *arg, int irq_idx)
+{
+ struct dpu_encoder_phys *phys_enc = arg;
+ struct dpu_hw_ctl *hw_ctl;
+ unsigned long lock_flags;
+ u32 flush_register = 0;
+ int new_cnt = -1, old_cnt = -1;
+
+ if (!phys_enc)
+ return;
+
+ hw_ctl = phys_enc->hw_ctl;
+ if (!hw_ctl)
+ return;
+
+ DPU_ATRACE_BEGIN("vblank_irq");
+
+ if (phys_enc->parent_ops->handle_vblank_virt)
+ phys_enc->parent_ops->handle_vblank_virt(phys_enc->parent,
+ phys_enc);
+
+ old_cnt = atomic_read(&phys_enc->pending_kickoff_cnt);
+
+ /*
+ * only decrement the pending flush count if we've actually flushed
+ * hardware. due to sw irq latency, vblank may have already happened
+ * so we need to double-check with hw that it accepted the flush bits
+ */
+ spin_lock_irqsave(phys_enc->enc_spinlock, lock_flags);
+ if (hw_ctl && hw_ctl->ops.get_flush_register)
+ flush_register = hw_ctl->ops.get_flush_register(hw_ctl);
+
+ if (flush_register == 0)
+ new_cnt = atomic_add_unless(&phys_enc->pending_kickoff_cnt,
+ -1, 0);
+ spin_unlock_irqrestore(phys_enc->enc_spinlock, lock_flags);
+
+ /* Signal any waiting atomic commit thread */
+ wake_up_all(&phys_enc->pending_kickoff_wq);
+ DPU_ATRACE_END("vblank_irq");
+}
+
+static void dpu_encoder_phys_vid_underrun_irq(void *arg, int irq_idx)
+{
+ struct dpu_encoder_phys *phys_enc = arg;
+
+ if (!phys_enc)
+ return;
+
+ if (phys_enc->parent_ops->handle_underrun_virt)
+ phys_enc->parent_ops->handle_underrun_virt(phys_enc->parent,
+ phys_enc);
+}
+
+static bool _dpu_encoder_phys_is_dual_ctl(struct dpu_encoder_phys *phys_enc)
+{
+ if (!phys_enc)
+ return false;
+
+ if (phys_enc->topology_name == DPU_RM_TOPOLOGY_DUALPIPE)
+ return true;
+
+ return false;
+}
+
+static bool dpu_encoder_phys_vid_needs_single_flush(
+ struct dpu_encoder_phys *phys_enc)
+{
+ return (phys_enc && _dpu_encoder_phys_is_dual_ctl(phys_enc));
+}
+
+static void _dpu_encoder_phys_vid_setup_irq_hw_idx(
+ struct dpu_encoder_phys *phys_enc)
+{
+ struct dpu_encoder_irq *irq;
+
+ /*
+ * Initialize irq->hw_idx only when irq is not registered.
+ * Prevent invalidating irq->irq_idx as modeset may be
+ * called many times during dfps.
+ */
+
+ irq = &phys_enc->irq[INTR_IDX_VSYNC];
+ if (irq->irq_idx < 0)
+ irq->hw_idx = phys_enc->intf_idx;
+
+ irq = &phys_enc->irq[INTR_IDX_UNDERRUN];
+ if (irq->irq_idx < 0)
+ irq->hw_idx = phys_enc->intf_idx;
+}
+
+static void dpu_encoder_phys_vid_mode_set(
+ struct dpu_encoder_phys *phys_enc,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adj_mode)
+{
+ struct dpu_rm *rm;
+ struct dpu_rm_hw_iter iter;
+ int i, instance;
+ struct dpu_encoder_phys_vid *vid_enc;
+
+ if (!phys_enc || !phys_enc->dpu_kms) {
+ DPU_ERROR("invalid encoder/kms\n");
+ return;
+ }
+
+ rm = &phys_enc->dpu_kms->rm;
+ vid_enc = to_dpu_encoder_phys_vid(phys_enc);
+
+ if (adj_mode) {
+ phys_enc->cached_mode = *adj_mode;
+ drm_mode_debug_printmodeline(adj_mode);
+ DPU_DEBUG_VIDENC(vid_enc, "caching mode:\n");
+ }
+
+ instance = phys_enc->split_role == ENC_ROLE_SLAVE ? 1 : 0;
+
+ /* Retrieve previously allocated HW Resources. Shouldn't fail */
+ dpu_rm_init_hw_iter(&iter, phys_enc->parent->base.id, DPU_HW_BLK_CTL);
+ for (i = 0; i <= instance; i++) {
+ if (dpu_rm_get_hw(rm, &iter))
+ phys_enc->hw_ctl = (struct dpu_hw_ctl *)iter.hw;
+ }
+ if (IS_ERR_OR_NULL(phys_enc->hw_ctl)) {
+ DPU_ERROR_VIDENC(vid_enc, "failed to init ctl, %ld\n",
+ PTR_ERR(phys_enc->hw_ctl));
+ phys_enc->hw_ctl = NULL;
+ return;
+ }
+
+ _dpu_encoder_phys_vid_setup_irq_hw_idx(phys_enc);
+}
+
+static int dpu_encoder_phys_vid_control_vblank_irq(
+ struct dpu_encoder_phys *phys_enc,
+ bool enable)
+{
+ int ret = 0;
+ struct dpu_encoder_phys_vid *vid_enc;
+ int refcount;
+
+ if (!phys_enc) {
+ DPU_ERROR("invalid encoder\n");
+ return -EINVAL;
+ }
+
+ refcount = atomic_read(&phys_enc->vblank_refcount);
+ vid_enc = to_dpu_encoder_phys_vid(phys_enc);
+
+ /* Slave encoders don't report vblank */
+ if (!dpu_encoder_phys_vid_is_master(phys_enc))
+ goto end;
+
+ /* protect against negative */
+ if (!enable && refcount == 0) {
+ ret = -EINVAL;
+ goto end;
+ }
+
+ DRM_DEBUG_KMS("id:%u enable=%d/%d\n", DRMID(phys_enc->parent), enable,
+ atomic_read(&phys_enc->vblank_refcount));
+
+ if (enable && atomic_inc_return(&phys_enc->vblank_refcount) == 1)
+ ret = dpu_encoder_helper_register_irq(phys_enc, INTR_IDX_VSYNC);
+ else if (!enable && atomic_dec_return(&phys_enc->vblank_refcount) == 0)
+ ret = dpu_encoder_helper_unregister_irq(phys_enc,
+ INTR_IDX_VSYNC);
+
+end:
+ if (ret) {
+ DRM_ERROR("failed: id:%u intf:%d ret:%d enable:%d refcnt:%d\n",
+ DRMID(phys_enc->parent),
+ vid_enc->hw_intf->idx - INTF_0, ret, enable,
+ refcount);
+ }
+ return ret;
+}
+
+static void dpu_encoder_phys_vid_enable(struct dpu_encoder_phys *phys_enc)
+{
+ struct msm_drm_private *priv;
+ struct dpu_encoder_phys_vid *vid_enc;
+ struct dpu_hw_intf *intf;
+ struct dpu_hw_ctl *ctl;
+ u32 flush_mask = 0;
+
+ if (!phys_enc || !phys_enc->parent || !phys_enc->parent->dev ||
+ !phys_enc->parent->dev->dev_private) {
+ DPU_ERROR("invalid encoder/device\n");
+ return;
+ }
+ priv = phys_enc->parent->dev->dev_private;
+
+ vid_enc = to_dpu_encoder_phys_vid(phys_enc);
+ intf = vid_enc->hw_intf;
+ ctl = phys_enc->hw_ctl;
+ if (!vid_enc->hw_intf || !phys_enc->hw_ctl) {
+ DPU_ERROR("invalid hw_intf %d hw_ctl %d\n",
+ vid_enc->hw_intf != 0, phys_enc->hw_ctl != 0);
+ return;
+ }
+
+ DPU_DEBUG_VIDENC(vid_enc, "\n");
+
+ if (WARN_ON(!vid_enc->hw_intf->ops.enable_timing))
+ return;
+
+ dpu_encoder_helper_split_config(phys_enc, vid_enc->hw_intf->idx);
+
+ dpu_encoder_phys_vid_setup_timing_engine(phys_enc);
+
+ /*
+ * For single flush cases (dual-ctl or pp-split), skip setting the
+ * flush bit for the slave intf, since both intfs use same ctl
+ * and HW will only flush the master.
+ */
+ if (dpu_encoder_phys_vid_needs_single_flush(phys_enc) &&
+ !dpu_encoder_phys_vid_is_master(phys_enc))
+ goto skip_flush;
+
+ ctl->ops.get_bitmask_intf(ctl, &flush_mask, intf->idx);
+ ctl->ops.update_pending_flush(ctl, flush_mask);
+
+skip_flush:
+ DPU_DEBUG_VIDENC(vid_enc, "update pending flush ctl %d flush_mask %x\n",
+ ctl->idx - CTL_0, flush_mask);
+
+ /* ctl_flush & timing engine enable will be triggered by framework */
+ if (phys_enc->enable_state == DPU_ENC_DISABLED)
+ phys_enc->enable_state = DPU_ENC_ENABLING;
+}
+
+static void dpu_encoder_phys_vid_destroy(struct dpu_encoder_phys *phys_enc)
+{
+ struct dpu_encoder_phys_vid *vid_enc;
+
+ if (!phys_enc) {
+ DPU_ERROR("invalid encoder\n");
+ return;
+ }
+
+ vid_enc = to_dpu_encoder_phys_vid(phys_enc);
+ DPU_DEBUG_VIDENC(vid_enc, "\n");
+ kfree(vid_enc);
+}
+
+static void dpu_encoder_phys_vid_get_hw_resources(
+ struct dpu_encoder_phys *phys_enc,
+ struct dpu_encoder_hw_resources *hw_res,
+ struct drm_connector_state *conn_state)
+{
+ struct dpu_encoder_phys_vid *vid_enc;
+
+ if (!phys_enc || !hw_res) {
+ DPU_ERROR("invalid arg(s), enc %d hw_res %d conn_state %d\n",
+ phys_enc != 0, hw_res != 0, conn_state != 0);
+ return;
+ }
+
+ vid_enc = to_dpu_encoder_phys_vid(phys_enc);
+ if (!vid_enc->hw_intf) {
+ DPU_ERROR("invalid arg(s), hw_intf\n");
+ return;
+ }
+
+ DPU_DEBUG_VIDENC(vid_enc, "\n");
+ hw_res->intfs[vid_enc->hw_intf->idx - INTF_0] = INTF_MODE_VIDEO;
+}
+
+static int _dpu_encoder_phys_vid_wait_for_vblank(
+ struct dpu_encoder_phys *phys_enc, bool notify)
+{
+ struct dpu_encoder_wait_info wait_info;
+ int ret;
+
+ if (!phys_enc) {
+ pr_err("invalid encoder\n");
+ return -EINVAL;
+ }
+
+ wait_info.wq = &phys_enc->pending_kickoff_wq;
+ wait_info.atomic_cnt = &phys_enc->pending_kickoff_cnt;
+ wait_info.timeout_ms = KICKOFF_TIMEOUT_MS;
+
+ if (!dpu_encoder_phys_vid_is_master(phys_enc)) {
+ if (notify && phys_enc->parent_ops->handle_frame_done)
+ phys_enc->parent_ops->handle_frame_done(
+ phys_enc->parent, phys_enc,
+ DPU_ENCODER_FRAME_EVENT_DONE);
+ return 0;
+ }
+
+ /* Wait for kickoff to complete */
+ ret = dpu_encoder_helper_wait_for_irq(phys_enc, INTR_IDX_VSYNC,
+ &wait_info);
+
+ if (ret == -ETIMEDOUT) {
+ dpu_encoder_helper_report_irq_timeout(phys_enc, INTR_IDX_VSYNC);
+ } else if (!ret && notify && phys_enc->parent_ops->handle_frame_done)
+ phys_enc->parent_ops->handle_frame_done(
+ phys_enc->parent, phys_enc,
+ DPU_ENCODER_FRAME_EVENT_DONE);
+
+ return ret;
+}
+
+static int dpu_encoder_phys_vid_wait_for_vblank(
+ struct dpu_encoder_phys *phys_enc)
+{
+ return _dpu_encoder_phys_vid_wait_for_vblank(phys_enc, true);
+}
+
+static void dpu_encoder_phys_vid_prepare_for_kickoff(
+ struct dpu_encoder_phys *phys_enc,
+ struct dpu_encoder_kickoff_params *params)
+{
+ struct dpu_encoder_phys_vid *vid_enc;
+ struct dpu_hw_ctl *ctl;
+ int rc;
+
+ if (!phys_enc || !params) {
+ DPU_ERROR("invalid encoder/parameters\n");
+ return;
+ }
+ vid_enc = to_dpu_encoder_phys_vid(phys_enc);
+
+ ctl = phys_enc->hw_ctl;
+ if (!ctl || !ctl->ops.wait_reset_status)
+ return;
+
+ /*
+ * hw supports hardware initiated ctl reset, so before we kickoff a new
+ * frame, need to check and wait for hw initiated ctl reset completion
+ */
+ rc = ctl->ops.wait_reset_status(ctl);
+ if (rc) {
+ DPU_ERROR_VIDENC(vid_enc, "ctl %d reset failure: %d\n",
+ ctl->idx, rc);
+ dpu_encoder_helper_unregister_irq(phys_enc, INTR_IDX_VSYNC);
+ dpu_dbg_dump(false, __func__, true, true);
+ }
+}
+
+static void dpu_encoder_phys_vid_disable(struct dpu_encoder_phys *phys_enc)
+{
+ struct msm_drm_private *priv;
+ struct dpu_encoder_phys_vid *vid_enc;
+ unsigned long lock_flags;
+ int ret;
+
+ if (!phys_enc || !phys_enc->parent || !phys_enc->parent->dev ||
+ !phys_enc->parent->dev->dev_private) {
+ DPU_ERROR("invalid encoder/device\n");
+ return;
+ }
+ priv = phys_enc->parent->dev->dev_private;
+
+ vid_enc = to_dpu_encoder_phys_vid(phys_enc);
+ if (!vid_enc->hw_intf || !phys_enc->hw_ctl) {
+ DPU_ERROR("invalid hw_intf %d hw_ctl %d\n",
+ vid_enc->hw_intf != 0, phys_enc->hw_ctl != 0);
+ return;
+ }
+
+ DPU_DEBUG_VIDENC(vid_enc, "\n");
+
+ if (WARN_ON(!vid_enc->hw_intf->ops.enable_timing))
+ return;
+
+ if (phys_enc->enable_state == DPU_ENC_DISABLED) {
+ DPU_ERROR("already disabled\n");
+ return;
+ }
+
+ spin_lock_irqsave(phys_enc->enc_spinlock, lock_flags);
+ vid_enc->hw_intf->ops.enable_timing(vid_enc->hw_intf, 0);
+ if (dpu_encoder_phys_vid_is_master(phys_enc))
+ dpu_encoder_phys_inc_pending(phys_enc);
+ spin_unlock_irqrestore(phys_enc->enc_spinlock, lock_flags);
+
+ /*
+ * Wait for a vsync so we know the ENABLE=0 latched before
+ * the (connector) source of the vsync's gets disabled,
+ * otherwise we end up in a funny state if we re-enable
+ * before the disable latches, which results that some of
+ * the settings changes for the new modeset (like new
+ * scanout buffer) don't latch properly..
+ */
+ if (dpu_encoder_phys_vid_is_master(phys_enc)) {
+ ret = _dpu_encoder_phys_vid_wait_for_vblank(phys_enc, false);
+ if (ret) {
+ atomic_set(&phys_enc->pending_kickoff_cnt, 0);
+ DRM_ERROR("wait disable failed: id:%u intf:%d ret:%d\n",
+ DRMID(phys_enc->parent),
+ vid_enc->hw_intf->idx - INTF_0, ret);
+ }
+ }
+
+ phys_enc->enable_state = DPU_ENC_DISABLED;
+}
+
+static void dpu_encoder_phys_vid_handle_post_kickoff(
+ struct dpu_encoder_phys *phys_enc)
+{
+ unsigned long lock_flags;
+ struct dpu_encoder_phys_vid *vid_enc;
+
+ if (!phys_enc) {
+ DPU_ERROR("invalid encoder\n");
+ return;
+ }
+
+ vid_enc = to_dpu_encoder_phys_vid(phys_enc);
+ DPU_DEBUG_VIDENC(vid_enc, "enable_state %d\n", phys_enc->enable_state);
+
+ /*
+ * Video mode must flush CTL before enabling timing engine
+ * Video encoders need to turn on their interfaces now
+ */
+ if (phys_enc->enable_state == DPU_ENC_ENABLING) {
+ trace_dpu_enc_phys_vid_post_kickoff(DRMID(phys_enc->parent),
+ vid_enc->hw_intf->idx - INTF_0);
+ spin_lock_irqsave(phys_enc->enc_spinlock, lock_flags);
+ vid_enc->hw_intf->ops.enable_timing(vid_enc->hw_intf, 1);
+ spin_unlock_irqrestore(phys_enc->enc_spinlock, lock_flags);
+ phys_enc->enable_state = DPU_ENC_ENABLED;
+ }
+}
+
+static void dpu_encoder_phys_vid_irq_control(struct dpu_encoder_phys *phys_enc,
+ bool enable)
+{
+ struct dpu_encoder_phys_vid *vid_enc;
+ int ret;
+
+ if (!phys_enc)
+ return;
+
+ vid_enc = to_dpu_encoder_phys_vid(phys_enc);
+
+ trace_dpu_enc_phys_vid_irq_ctrl(DRMID(phys_enc->parent),
+ vid_enc->hw_intf->idx - INTF_0,
+ enable,
+ atomic_read(&phys_enc->vblank_refcount));
+
+ if (enable) {
+ ret = dpu_encoder_phys_vid_control_vblank_irq(phys_enc, true);
+ if (ret)
+ return;
+
+ dpu_encoder_helper_register_irq(phys_enc, INTR_IDX_UNDERRUN);
+ } else {
+ dpu_encoder_phys_vid_control_vblank_irq(phys_enc, false);
+ dpu_encoder_helper_unregister_irq(phys_enc, INTR_IDX_UNDERRUN);
+ }
+}
+
+static void dpu_encoder_phys_vid_setup_misr(struct dpu_encoder_phys *phys_enc,
+ bool enable, u32 frame_count)
+{
+ struct dpu_encoder_phys_vid *vid_enc;
+
+ if (!phys_enc)
+ return;
+ vid_enc = to_dpu_encoder_phys_vid(phys_enc);
+
+ if (vid_enc->hw_intf && vid_enc->hw_intf->ops.setup_misr)
+ vid_enc->hw_intf->ops.setup_misr(vid_enc->hw_intf,
+ enable, frame_count);
+}
+
+static u32 dpu_encoder_phys_vid_collect_misr(struct dpu_encoder_phys *phys_enc)
+{
+ struct dpu_encoder_phys_vid *vid_enc;
+
+ if (!phys_enc)
+ return 0;
+ vid_enc = to_dpu_encoder_phys_vid(phys_enc);
+
+ return vid_enc->hw_intf && vid_enc->hw_intf->ops.collect_misr ?
+ vid_enc->hw_intf->ops.collect_misr(vid_enc->hw_intf) : 0;
+}
+
+static int dpu_encoder_phys_vid_get_line_count(
+ struct dpu_encoder_phys *phys_enc)
+{
+ struct dpu_encoder_phys_vid *vid_enc;
+
+ if (!phys_enc)
+ return -EINVAL;
+
+ if (!dpu_encoder_phys_vid_is_master(phys_enc))
+ return -EINVAL;
+
+ vid_enc = to_dpu_encoder_phys_vid(phys_enc);
+ if (!vid_enc->hw_intf || !vid_enc->hw_intf->ops.get_line_count)
+ return -EINVAL;
+
+ return vid_enc->hw_intf->ops.get_line_count(vid_enc->hw_intf);
+}
+
+static void dpu_encoder_phys_vid_init_ops(struct dpu_encoder_phys_ops *ops)
+{
+ ops->is_master = dpu_encoder_phys_vid_is_master;
+ ops->mode_set = dpu_encoder_phys_vid_mode_set;
+ ops->mode_fixup = dpu_encoder_phys_vid_mode_fixup;
+ ops->enable = dpu_encoder_phys_vid_enable;
+ ops->disable = dpu_encoder_phys_vid_disable;
+ ops->destroy = dpu_encoder_phys_vid_destroy;
+ ops->get_hw_resources = dpu_encoder_phys_vid_get_hw_resources;
+ ops->control_vblank_irq = dpu_encoder_phys_vid_control_vblank_irq;
+ ops->wait_for_commit_done = dpu_encoder_phys_vid_wait_for_vblank;
+ ops->wait_for_vblank = dpu_encoder_phys_vid_wait_for_vblank;
+ ops->wait_for_tx_complete = dpu_encoder_phys_vid_wait_for_vblank;
+ ops->irq_control = dpu_encoder_phys_vid_irq_control;
+ ops->prepare_for_kickoff = dpu_encoder_phys_vid_prepare_for_kickoff;
+ ops->handle_post_kickoff = dpu_encoder_phys_vid_handle_post_kickoff;
+ ops->needs_single_flush = dpu_encoder_phys_vid_needs_single_flush;
+ ops->setup_misr = dpu_encoder_phys_vid_setup_misr;
+ ops->collect_misr = dpu_encoder_phys_vid_collect_misr;
+ ops->hw_reset = dpu_encoder_helper_hw_reset;
+ ops->get_line_count = dpu_encoder_phys_vid_get_line_count;
+}
+
+struct dpu_encoder_phys *dpu_encoder_phys_vid_init(
+ struct dpu_enc_phys_init_params *p)
+{
+ struct dpu_encoder_phys *phys_enc = NULL;
+ struct dpu_encoder_phys_vid *vid_enc = NULL;
+ struct dpu_rm_hw_iter iter;
+ struct dpu_hw_mdp *hw_mdp;
+ struct dpu_encoder_irq *irq;
+ int i, ret = 0;
+
+ if (!p) {
+ ret = -EINVAL;
+ goto fail;
+ }
+
+ vid_enc = kzalloc(sizeof(*vid_enc), GFP_KERNEL);
+ if (!vid_enc) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ phys_enc = &vid_enc->base;
+
+ hw_mdp = dpu_rm_get_mdp(&p->dpu_kms->rm);
+ if (IS_ERR_OR_NULL(hw_mdp)) {
+ ret = PTR_ERR(hw_mdp);
+ DPU_ERROR("failed to get mdptop\n");
+ goto fail;
+ }
+ phys_enc->hw_mdptop = hw_mdp;
+ phys_enc->intf_idx = p->intf_idx;
+
+ /**
+ * hw_intf resource permanently assigned to this encoder
+ * Other resources allocated at atomic commit time by use case
+ */
+ dpu_rm_init_hw_iter(&iter, 0, DPU_HW_BLK_INTF);
+ while (dpu_rm_get_hw(&p->dpu_kms->rm, &iter)) {
+ struct dpu_hw_intf *hw_intf = (struct dpu_hw_intf *)iter.hw;
+
+ if (hw_intf->idx == p->intf_idx) {
+ vid_enc->hw_intf = hw_intf;
+ break;
+ }
+ }
+
+ if (!vid_enc->hw_intf) {
+ ret = -EINVAL;
+ DPU_ERROR("failed to get hw_intf\n");
+ goto fail;
+ }
+
+ DPU_DEBUG_VIDENC(vid_enc, "\n");
+
+ dpu_encoder_phys_vid_init_ops(&phys_enc->ops);
+ phys_enc->parent = p->parent;
+ phys_enc->parent_ops = p->parent_ops;
+ phys_enc->dpu_kms = p->dpu_kms;
+ phys_enc->split_role = p->split_role;
+ phys_enc->intf_mode = INTF_MODE_VIDEO;
+ phys_enc->enc_spinlock = p->enc_spinlock;
+ for (i = 0; i < INTR_IDX_MAX; i++) {
+ irq = &phys_enc->irq[i];
+ INIT_LIST_HEAD(&irq->cb.list);
+ irq->irq_idx = -EINVAL;
+ irq->hw_idx = -EINVAL;
+ irq->cb.arg = phys_enc;
+ }
+
+ irq = &phys_enc->irq[INTR_IDX_VSYNC];
+ irq->name = "vsync_irq";
+ irq->intr_type = DPU_IRQ_TYPE_INTF_VSYNC;
+ irq->intr_idx = INTR_IDX_VSYNC;
+ irq->cb.func = dpu_encoder_phys_vid_vblank_irq;
+
+ irq = &phys_enc->irq[INTR_IDX_UNDERRUN];
+ irq->name = "underrun";
+ irq->intr_type = DPU_IRQ_TYPE_INTF_UNDER_RUN;
+ irq->intr_idx = INTR_IDX_UNDERRUN;
+ irq->cb.func = dpu_encoder_phys_vid_underrun_irq;
+
+ atomic_set(&phys_enc->vblank_refcount, 0);
+ atomic_set(&phys_enc->pending_kickoff_cnt, 0);
+ init_waitqueue_head(&phys_enc->pending_kickoff_wq);
+ phys_enc->enable_state = DPU_ENC_DISABLED;
+
+ DPU_DEBUG_VIDENC(vid_enc, "created intf idx:%d\n", p->intf_idx);
+
+ return phys_enc;
+
+fail:
+ DPU_ERROR("failed to create encoder\n");
+ if (vid_enc)
+ dpu_encoder_phys_vid_destroy(phys_enc);
+
+ return ERR_PTR(ret);
+}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_formats.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_formats.c
new file mode 100644
index 000000000000..bfcd165e96df
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_formats.c
@@ -0,0 +1,1173 @@
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__
+
+#include <uapi/drm/drm_fourcc.h>
+
+#include "msm_media_info.h"
+#include "dpu_kms.h"
+#include "dpu_formats.h"
+
+#define DPU_UBWC_META_MACRO_W_H 16
+#define DPU_UBWC_META_BLOCK_SIZE 256
+#define DPU_UBWC_PLANE_SIZE_ALIGNMENT 4096
+
+#define DPU_TILE_HEIGHT_DEFAULT 1
+#define DPU_TILE_HEIGHT_TILED 4
+#define DPU_TILE_HEIGHT_UBWC 4
+#define DPU_TILE_HEIGHT_NV12 8
+
+#define DPU_MAX_IMG_WIDTH 0x3FFF
+#define DPU_MAX_IMG_HEIGHT 0x3FFF
+
+/**
+ * DPU supported format packing, bpp, and other format
+ * information.
+ * DPU currently only supports interleaved RGB formats
+ * UBWC support for a pixel format is indicated by the flag,
+ * there is additional meta data plane for such formats
+ */
+
+#define INTERLEAVED_RGB_FMT(fmt, a, r, g, b, e0, e1, e2, e3, uc, alpha, \
+bp, flg, fm, np) \
+{ \
+ .base.pixel_format = DRM_FORMAT_ ## fmt, \
+ .fetch_planes = DPU_PLANE_INTERLEAVED, \
+ .alpha_enable = alpha, \
+ .element = { (e0), (e1), (e2), (e3) }, \
+ .bits = { g, b, r, a }, \
+ .chroma_sample = DPU_CHROMA_RGB, \
+ .unpack_align_msb = 0, \
+ .unpack_tight = 1, \
+ .unpack_count = uc, \
+ .bpp = bp, \
+ .fetch_mode = fm, \
+ .flag = {(flg)}, \
+ .num_planes = np, \
+ .tile_height = DPU_TILE_HEIGHT_DEFAULT \
+}
+
+#define INTERLEAVED_RGB_FMT_TILED(fmt, a, r, g, b, e0, e1, e2, e3, uc, \
+alpha, bp, flg, fm, np, th) \
+{ \
+ .base.pixel_format = DRM_FORMAT_ ## fmt, \
+ .fetch_planes = DPU_PLANE_INTERLEAVED, \
+ .alpha_enable = alpha, \
+ .element = { (e0), (e1), (e2), (e3) }, \
+ .bits = { g, b, r, a }, \
+ .chroma_sample = DPU_CHROMA_RGB, \
+ .unpack_align_msb = 0, \
+ .unpack_tight = 1, \
+ .unpack_count = uc, \
+ .bpp = bp, \
+ .fetch_mode = fm, \
+ .flag = {(flg)}, \
+ .num_planes = np, \
+ .tile_height = th \
+}
+
+
+#define INTERLEAVED_YUV_FMT(fmt, a, r, g, b, e0, e1, e2, e3, \
+alpha, chroma, count, bp, flg, fm, np) \
+{ \
+ .base.pixel_format = DRM_FORMAT_ ## fmt, \
+ .fetch_planes = DPU_PLANE_INTERLEAVED, \
+ .alpha_enable = alpha, \
+ .element = { (e0), (e1), (e2), (e3)}, \
+ .bits = { g, b, r, a }, \
+ .chroma_sample = chroma, \
+ .unpack_align_msb = 0, \
+ .unpack_tight = 1, \
+ .unpack_count = count, \
+ .bpp = bp, \
+ .fetch_mode = fm, \
+ .flag = {(flg)}, \
+ .num_planes = np, \
+ .tile_height = DPU_TILE_HEIGHT_DEFAULT \
+}
+
+#define PSEUDO_YUV_FMT(fmt, a, r, g, b, e0, e1, chroma, flg, fm, np) \
+{ \
+ .base.pixel_format = DRM_FORMAT_ ## fmt, \
+ .fetch_planes = DPU_PLANE_PSEUDO_PLANAR, \
+ .alpha_enable = false, \
+ .element = { (e0), (e1), 0, 0 }, \
+ .bits = { g, b, r, a }, \
+ .chroma_sample = chroma, \
+ .unpack_align_msb = 0, \
+ .unpack_tight = 1, \
+ .unpack_count = 2, \
+ .bpp = 2, \
+ .fetch_mode = fm, \
+ .flag = {(flg)}, \
+ .num_planes = np, \
+ .tile_height = DPU_TILE_HEIGHT_DEFAULT \
+}
+
+#define PSEUDO_YUV_FMT_TILED(fmt, a, r, g, b, e0, e1, chroma, \
+flg, fm, np, th) \
+{ \
+ .base.pixel_format = DRM_FORMAT_ ## fmt, \
+ .fetch_planes = DPU_PLANE_PSEUDO_PLANAR, \
+ .alpha_enable = false, \
+ .element = { (e0), (e1), 0, 0 }, \
+ .bits = { g, b, r, a }, \
+ .chroma_sample = chroma, \
+ .unpack_align_msb = 0, \
+ .unpack_tight = 1, \
+ .unpack_count = 2, \
+ .bpp = 2, \
+ .fetch_mode = fm, \
+ .flag = {(flg)}, \
+ .num_planes = np, \
+ .tile_height = th \
+}
+
+#define PSEUDO_YUV_FMT_LOOSE(fmt, a, r, g, b, e0, e1, chroma, flg, fm, np)\
+{ \
+ .base.pixel_format = DRM_FORMAT_ ## fmt, \
+ .fetch_planes = DPU_PLANE_PSEUDO_PLANAR, \
+ .alpha_enable = false, \
+ .element = { (e0), (e1), 0, 0 }, \
+ .bits = { g, b, r, a }, \
+ .chroma_sample = chroma, \
+ .unpack_align_msb = 1, \
+ .unpack_tight = 0, \
+ .unpack_count = 2, \
+ .bpp = 2, \
+ .fetch_mode = fm, \
+ .flag = {(flg)}, \
+ .num_planes = np, \
+ .tile_height = DPU_TILE_HEIGHT_DEFAULT \
+}
+
+#define PSEUDO_YUV_FMT_LOOSE_TILED(fmt, a, r, g, b, e0, e1, chroma, \
+flg, fm, np, th) \
+{ \
+ .base.pixel_format = DRM_FORMAT_ ## fmt, \
+ .fetch_planes = DPU_PLANE_PSEUDO_PLANAR, \
+ .alpha_enable = false, \
+ .element = { (e0), (e1), 0, 0 }, \
+ .bits = { g, b, r, a }, \
+ .chroma_sample = chroma, \
+ .unpack_align_msb = 1, \
+ .unpack_tight = 0, \
+ .unpack_count = 2, \
+ .bpp = 2, \
+ .fetch_mode = fm, \
+ .flag = {(flg)}, \
+ .num_planes = np, \
+ .tile_height = th \
+}
+
+
+#define PLANAR_YUV_FMT(fmt, a, r, g, b, e0, e1, e2, alpha, chroma, bp, \
+flg, fm, np) \
+{ \
+ .base.pixel_format = DRM_FORMAT_ ## fmt, \
+ .fetch_planes = DPU_PLANE_PLANAR, \
+ .alpha_enable = alpha, \
+ .element = { (e0), (e1), (e2), 0 }, \
+ .bits = { g, b, r, a }, \
+ .chroma_sample = chroma, \
+ .unpack_align_msb = 0, \
+ .unpack_tight = 1, \
+ .unpack_count = 1, \
+ .bpp = bp, \
+ .fetch_mode = fm, \
+ .flag = {(flg)}, \
+ .num_planes = np, \
+ .tile_height = DPU_TILE_HEIGHT_DEFAULT \
+}
+
+/*
+ * struct dpu_media_color_map - maps drm format to media format
+ * @format: DRM base pixel format
+ * @color: Media API color related to DRM format
+ */
+struct dpu_media_color_map {
+ uint32_t format;
+ uint32_t color;
+};
+
+static const struct dpu_format dpu_format_map[] = {
+ INTERLEAVED_RGB_FMT(ARGB8888,
+ COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C1_B_Cb, C0_G_Y, C2_R_Cr, C3_ALPHA, 4,
+ true, 4, 0,
+ DPU_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(ABGR8888,
+ COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
+ true, 4, 0,
+ DPU_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(XBGR8888,
+ COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
+ true, 4, 0,
+ DPU_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(RGBA8888,
+ COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C3_ALPHA, C1_B_Cb, C0_G_Y, C2_R_Cr, 4,
+ true, 4, 0,
+ DPU_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(BGRA8888,
+ COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C3_ALPHA, C2_R_Cr, C0_G_Y, C1_B_Cb, 4,
+ true, 4, 0,
+ DPU_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(BGRX8888,
+ COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C3_ALPHA, C2_R_Cr, C0_G_Y, C1_B_Cb, 4,
+ false, 4, 0,
+ DPU_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(XRGB8888,
+ COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C1_B_Cb, C0_G_Y, C2_R_Cr, C3_ALPHA, 4,
+ false, 4, 0,
+ DPU_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(RGBX8888,
+ COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C3_ALPHA, C1_B_Cb, C0_G_Y, C2_R_Cr, 4,
+ false, 4, 0,
+ DPU_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(RGB888,
+ 0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C1_B_Cb, C0_G_Y, C2_R_Cr, 0, 3,
+ false, 3, 0,
+ DPU_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(BGR888,
+ 0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C2_R_Cr, C0_G_Y, C1_B_Cb, 0, 3,
+ false, 3, 0,
+ DPU_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(RGB565,
+ 0, COLOR_5BIT, COLOR_6BIT, COLOR_5BIT,
+ C1_B_Cb, C0_G_Y, C2_R_Cr, 0, 3,
+ false, 2, 0,
+ DPU_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(BGR565,
+ 0, COLOR_5BIT, COLOR_6BIT, COLOR_5BIT,
+ C2_R_Cr, C0_G_Y, C1_B_Cb, 0, 3,
+ false, 2, 0,
+ DPU_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(ARGB1555,
+ COLOR_ALPHA_1BIT, COLOR_5BIT, COLOR_5BIT, COLOR_5BIT,
+ C1_B_Cb, C0_G_Y, C2_R_Cr, C3_ALPHA, 4,
+ true, 2, 0,
+ DPU_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(ABGR1555,
+ COLOR_ALPHA_1BIT, COLOR_5BIT, COLOR_5BIT, COLOR_5BIT,
+ C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
+ true, 2, 0,
+ DPU_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(RGBA5551,
+ COLOR_ALPHA_1BIT, COLOR_5BIT, COLOR_5BIT, COLOR_5BIT,
+ C3_ALPHA, C1_B_Cb, C0_G_Y, C2_R_Cr, 4,
+ true, 2, 0,
+ DPU_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(BGRA5551,
+ COLOR_ALPHA_1BIT, COLOR_5BIT, COLOR_5BIT, COLOR_5BIT,
+ C3_ALPHA, C2_R_Cr, C0_G_Y, C1_B_Cb, 4,
+ true, 2, 0,
+ DPU_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(XRGB1555,
+ COLOR_ALPHA_1BIT, COLOR_5BIT, COLOR_5BIT, COLOR_5BIT,
+ C1_B_Cb, C0_G_Y, C2_R_Cr, C3_ALPHA, 4,
+ false, 2, 0,
+ DPU_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(XBGR1555,
+ COLOR_ALPHA_1BIT, COLOR_5BIT, COLOR_5BIT, COLOR_5BIT,
+ C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
+ false, 2, 0,
+ DPU_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(RGBX5551,
+ COLOR_ALPHA_1BIT, COLOR_5BIT, COLOR_5BIT, COLOR_5BIT,
+ C3_ALPHA, C1_B_Cb, C0_G_Y, C2_R_Cr, 4,
+ false, 2, 0,
+ DPU_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(BGRX5551,
+ COLOR_ALPHA_1BIT, COLOR_5BIT, COLOR_5BIT, COLOR_5BIT,
+ C3_ALPHA, C2_R_Cr, C0_G_Y, C1_B_Cb, 4,
+ false, 2, 0,
+ DPU_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(ARGB4444,
+ COLOR_ALPHA_4BIT, COLOR_4BIT, COLOR_4BIT, COLOR_4BIT,
+ C1_B_Cb, C0_G_Y, C2_R_Cr, C3_ALPHA, 4,
+ true, 2, 0,
+ DPU_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(ABGR4444,
+ COLOR_ALPHA_4BIT, COLOR_4BIT, COLOR_4BIT, COLOR_4BIT,
+ C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
+ true, 2, 0,
+ DPU_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(RGBA4444,
+ COLOR_ALPHA_4BIT, COLOR_4BIT, COLOR_4BIT, COLOR_4BIT,
+ C3_ALPHA, C1_B_Cb, C0_G_Y, C2_R_Cr, 4,
+ true, 2, 0,
+ DPU_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(BGRA4444,
+ COLOR_ALPHA_4BIT, COLOR_4BIT, COLOR_4BIT, COLOR_4BIT,
+ C3_ALPHA, C2_R_Cr, C0_G_Y, C1_B_Cb, 4,
+ true, 2, 0,
+ DPU_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(XRGB4444,
+ COLOR_ALPHA_4BIT, COLOR_4BIT, COLOR_4BIT, COLOR_4BIT,
+ C1_B_Cb, C0_G_Y, C2_R_Cr, C3_ALPHA, 4,
+ false, 2, 0,
+ DPU_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(XBGR4444,
+ COLOR_ALPHA_4BIT, COLOR_4BIT, COLOR_4BIT, COLOR_4BIT,
+ C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
+ false, 2, 0,
+ DPU_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(RGBX4444,
+ COLOR_ALPHA_4BIT, COLOR_4BIT, COLOR_4BIT, COLOR_4BIT,
+ C3_ALPHA, C1_B_Cb, C0_G_Y, C2_R_Cr, 4,
+ false, 2, 0,
+ DPU_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(BGRX4444,
+ COLOR_ALPHA_4BIT, COLOR_4BIT, COLOR_4BIT, COLOR_4BIT,
+ C3_ALPHA, C2_R_Cr, C0_G_Y, C1_B_Cb, 4,
+ false, 2, 0,
+ DPU_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(BGRA1010102,
+ COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C3_ALPHA, C2_R_Cr, C0_G_Y, C1_B_Cb, 4,
+ true, 4, DPU_FORMAT_FLAG_DX,
+ DPU_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(RGBA1010102,
+ COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C3_ALPHA, C1_B_Cb, C0_G_Y, C2_R_Cr, 4,
+ true, 4, DPU_FORMAT_FLAG_DX,
+ DPU_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(ABGR2101010,
+ COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
+ true, 4, DPU_FORMAT_FLAG_DX,
+ DPU_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(ARGB2101010,
+ COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C1_B_Cb, C0_G_Y, C2_R_Cr, C3_ALPHA, 4,
+ true, 4, DPU_FORMAT_FLAG_DX,
+ DPU_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(XRGB2101010,
+ COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C1_B_Cb, C0_G_Y, C2_R_Cr, C3_ALPHA, 4,
+ false, 4, DPU_FORMAT_FLAG_DX,
+ DPU_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(BGRX1010102,
+ COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C3_ALPHA, C2_R_Cr, C0_G_Y, C1_B_Cb, 4,
+ false, 4, DPU_FORMAT_FLAG_DX,
+ DPU_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(XBGR2101010,
+ COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
+ false, 4, DPU_FORMAT_FLAG_DX,
+ DPU_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(RGBX1010102,
+ COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C3_ALPHA, C1_B_Cb, C0_G_Y, C2_R_Cr, 4,
+ false, 4, DPU_FORMAT_FLAG_DX,
+ DPU_FETCH_LINEAR, 1),
+
+ PSEUDO_YUV_FMT(NV12,
+ 0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C1_B_Cb, C2_R_Cr,
+ DPU_CHROMA_420, DPU_FORMAT_FLAG_YUV,
+ DPU_FETCH_LINEAR, 2),
+
+ PSEUDO_YUV_FMT(NV21,
+ 0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C2_R_Cr, C1_B_Cb,
+ DPU_CHROMA_420, DPU_FORMAT_FLAG_YUV,
+ DPU_FETCH_LINEAR, 2),
+
+ PSEUDO_YUV_FMT(NV16,
+ 0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C1_B_Cb, C2_R_Cr,
+ DPU_CHROMA_H2V1, DPU_FORMAT_FLAG_YUV,
+ DPU_FETCH_LINEAR, 2),
+
+ PSEUDO_YUV_FMT(NV61,
+ 0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C2_R_Cr, C1_B_Cb,
+ DPU_CHROMA_H2V1, DPU_FORMAT_FLAG_YUV,
+ DPU_FETCH_LINEAR, 2),
+
+ INTERLEAVED_YUV_FMT(VYUY,
+ 0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C2_R_Cr, C0_G_Y, C1_B_Cb, C0_G_Y,
+ false, DPU_CHROMA_H2V1, 4, 2, DPU_FORMAT_FLAG_YUV,
+ DPU_FETCH_LINEAR, 2),
+
+ INTERLEAVED_YUV_FMT(UYVY,
+ 0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C1_B_Cb, C0_G_Y, C2_R_Cr, C0_G_Y,
+ false, DPU_CHROMA_H2V1, 4, 2, DPU_FORMAT_FLAG_YUV,
+ DPU_FETCH_LINEAR, 2),
+
+ INTERLEAVED_YUV_FMT(YUYV,
+ 0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C0_G_Y, C1_B_Cb, C0_G_Y, C2_R_Cr,
+ false, DPU_CHROMA_H2V1, 4, 2, DPU_FORMAT_FLAG_YUV,
+ DPU_FETCH_LINEAR, 2),
+
+ INTERLEAVED_YUV_FMT(YVYU,
+ 0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C0_G_Y, C2_R_Cr, C0_G_Y, C1_B_Cb,
+ false, DPU_CHROMA_H2V1, 4, 2, DPU_FORMAT_FLAG_YUV,
+ DPU_FETCH_LINEAR, 2),
+
+ PLANAR_YUV_FMT(YUV420,
+ 0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C2_R_Cr, C1_B_Cb, C0_G_Y,
+ false, DPU_CHROMA_420, 1, DPU_FORMAT_FLAG_YUV,
+ DPU_FETCH_LINEAR, 3),
+
+ PLANAR_YUV_FMT(YVU420,
+ 0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C1_B_Cb, C2_R_Cr, C0_G_Y,
+ false, DPU_CHROMA_420, 1, DPU_FORMAT_FLAG_YUV,
+ DPU_FETCH_LINEAR, 3),
+};
+
+/*
+ * A5x tile formats tables:
+ * These tables hold the A5x tile formats supported.
+ */
+static const struct dpu_format dpu_format_map_tile[] = {
+ INTERLEAVED_RGB_FMT_TILED(BGR565,
+ 0, COLOR_5BIT, COLOR_6BIT, COLOR_5BIT,
+ C2_R_Cr, C0_G_Y, C1_B_Cb, 0, 3,
+ false, 2, 0,
+ DPU_FETCH_UBWC, 1, DPU_TILE_HEIGHT_TILED),
+
+ INTERLEAVED_RGB_FMT_TILED(ARGB8888,
+ COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C3_ALPHA, C2_R_Cr, C0_G_Y, C1_B_Cb, 4,
+ true, 4, 0,
+ DPU_FETCH_UBWC, 1, DPU_TILE_HEIGHT_TILED),
+
+ INTERLEAVED_RGB_FMT_TILED(ABGR8888,
+ COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C3_ALPHA, C1_B_Cb, C0_G_Y, C2_R_Cr, 4,
+ true, 4, 0,
+ DPU_FETCH_UBWC, 1, DPU_TILE_HEIGHT_TILED),
+
+ INTERLEAVED_RGB_FMT_TILED(XBGR8888,
+ COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
+ false, 4, 0,
+ DPU_FETCH_UBWC, 1, DPU_TILE_HEIGHT_TILED),
+
+ INTERLEAVED_RGB_FMT_TILED(RGBA8888,
+ COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
+ true, 4, 0,
+ DPU_FETCH_UBWC, 1, DPU_TILE_HEIGHT_TILED),
+
+ INTERLEAVED_RGB_FMT_TILED(BGRA8888,
+ COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C1_B_Cb, C0_G_Y, C2_R_Cr, C3_ALPHA, 4,
+ true, 4, 0,
+ DPU_FETCH_UBWC, 1, DPU_TILE_HEIGHT_TILED),
+
+ INTERLEAVED_RGB_FMT_TILED(BGRX8888,
+ COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C1_B_Cb, C0_G_Y, C2_R_Cr, C3_ALPHA, 4,
+ false, 4, 0,
+ DPU_FETCH_UBWC, 1, DPU_TILE_HEIGHT_TILED),
+
+ INTERLEAVED_RGB_FMT_TILED(XRGB8888,
+ COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C3_ALPHA, C2_R_Cr, C0_G_Y, C1_B_Cb, 4,
+ false, 4, 0,
+ DPU_FETCH_UBWC, 1, DPU_TILE_HEIGHT_TILED),
+
+ INTERLEAVED_RGB_FMT_TILED(RGBX8888,
+ COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
+ false, 4, 0,
+ DPU_FETCH_UBWC, 1, DPU_TILE_HEIGHT_TILED),
+
+ INTERLEAVED_RGB_FMT_TILED(ABGR2101010,
+ COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
+ true, 4, DPU_FORMAT_FLAG_DX,
+ DPU_FETCH_UBWC, 1, DPU_TILE_HEIGHT_TILED),
+
+ INTERLEAVED_RGB_FMT_TILED(XBGR2101010,
+ COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
+ true, 4, DPU_FORMAT_FLAG_DX,
+ DPU_FETCH_UBWC, 1, DPU_TILE_HEIGHT_TILED),
+
+ PSEUDO_YUV_FMT_TILED(NV12,
+ 0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C1_B_Cb, C2_R_Cr,
+ DPU_CHROMA_420, DPU_FORMAT_FLAG_YUV,
+ DPU_FETCH_UBWC, 2, DPU_TILE_HEIGHT_NV12),
+
+ PSEUDO_YUV_FMT_TILED(NV21,
+ 0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C2_R_Cr, C1_B_Cb,
+ DPU_CHROMA_420, DPU_FORMAT_FLAG_YUV,
+ DPU_FETCH_UBWC, 2, DPU_TILE_HEIGHT_NV12),
+};
+
+/*
+ * UBWC formats table:
+ * This table holds the UBWC formats supported.
+ * If a compression ratio needs to be used for this or any other format,
+ * the data will be passed by user-space.
+ */
+static const struct dpu_format dpu_format_map_ubwc[] = {
+ INTERLEAVED_RGB_FMT_TILED(BGR565,
+ 0, COLOR_5BIT, COLOR_6BIT, COLOR_5BIT,
+ C2_R_Cr, C0_G_Y, C1_B_Cb, 0, 3,
+ false, 2, DPU_FORMAT_FLAG_COMPRESSED,
+ DPU_FETCH_UBWC, 2, DPU_TILE_HEIGHT_UBWC),
+
+ INTERLEAVED_RGB_FMT_TILED(ABGR8888,
+ COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
+ true, 4, DPU_FORMAT_FLAG_COMPRESSED,
+ DPU_FETCH_UBWC, 2, DPU_TILE_HEIGHT_UBWC),
+
+ INTERLEAVED_RGB_FMT_TILED(XBGR8888,
+ COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
+ false, 4, DPU_FORMAT_FLAG_COMPRESSED,
+ DPU_FETCH_UBWC, 2, DPU_TILE_HEIGHT_UBWC),
+
+ INTERLEAVED_RGB_FMT_TILED(ABGR2101010,
+ COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
+ true, 4, DPU_FORMAT_FLAG_DX | DPU_FORMAT_FLAG_COMPRESSED,
+ DPU_FETCH_UBWC, 2, DPU_TILE_HEIGHT_UBWC),
+
+ INTERLEAVED_RGB_FMT_TILED(XBGR2101010,
+ COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
+ true, 4, DPU_FORMAT_FLAG_DX | DPU_FORMAT_FLAG_COMPRESSED,
+ DPU_FETCH_UBWC, 2, DPU_TILE_HEIGHT_UBWC),
+
+ PSEUDO_YUV_FMT_TILED(NV12,
+ 0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C1_B_Cb, C2_R_Cr,
+ DPU_CHROMA_420, DPU_FORMAT_FLAG_YUV |
+ DPU_FORMAT_FLAG_COMPRESSED,
+ DPU_FETCH_UBWC, 4, DPU_TILE_HEIGHT_NV12),
+};
+
+static const struct dpu_format dpu_format_map_p010[] = {
+ PSEUDO_YUV_FMT_LOOSE(NV12,
+ 0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C1_B_Cb, C2_R_Cr,
+ DPU_CHROMA_420, (DPU_FORMAT_FLAG_YUV | DPU_FORMAT_FLAG_DX),
+ DPU_FETCH_LINEAR, 2),
+};
+
+static const struct dpu_format dpu_format_map_p010_ubwc[] = {
+ PSEUDO_YUV_FMT_LOOSE_TILED(NV12,
+ 0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C1_B_Cb, C2_R_Cr,
+ DPU_CHROMA_420, (DPU_FORMAT_FLAG_YUV | DPU_FORMAT_FLAG_DX |
+ DPU_FORMAT_FLAG_COMPRESSED),
+ DPU_FETCH_UBWC, 4, DPU_TILE_HEIGHT_NV12),
+};
+
+static const struct dpu_format dpu_format_map_tp10_ubwc[] = {
+ PSEUDO_YUV_FMT_TILED(NV12,
+ 0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C1_B_Cb, C2_R_Cr,
+ DPU_CHROMA_420, (DPU_FORMAT_FLAG_YUV | DPU_FORMAT_FLAG_DX |
+ DPU_FORMAT_FLAG_COMPRESSED),
+ DPU_FETCH_UBWC, 4, DPU_TILE_HEIGHT_NV12),
+};
+
+/* _dpu_get_v_h_subsample_rate - Get subsample rates for all formats we support
+ * Note: Not using the drm_format_*_subsampling since we have formats
+ */
+static void _dpu_get_v_h_subsample_rate(
+ enum dpu_chroma_samp_type chroma_sample,
+ uint32_t *v_sample,
+ uint32_t *h_sample)
+{
+ if (!v_sample || !h_sample)
+ return;
+
+ switch (chroma_sample) {
+ case DPU_CHROMA_H2V1:
+ *v_sample = 1;
+ *h_sample = 2;
+ break;
+ case DPU_CHROMA_H1V2:
+ *v_sample = 2;
+ *h_sample = 1;
+ break;
+ case DPU_CHROMA_420:
+ *v_sample = 2;
+ *h_sample = 2;
+ break;
+ default:
+ *v_sample = 1;
+ *h_sample = 1;
+ break;
+ }
+}
+
+static int _dpu_format_get_media_color_ubwc(const struct dpu_format *fmt)
+{
+ static const struct dpu_media_color_map dpu_media_ubwc_map[] = {
+ {DRM_FORMAT_ABGR8888, COLOR_FMT_RGBA8888_UBWC},
+ {DRM_FORMAT_XBGR8888, COLOR_FMT_RGBA8888_UBWC},
+ {DRM_FORMAT_ABGR2101010, COLOR_FMT_RGBA1010102_UBWC},
+ {DRM_FORMAT_XBGR2101010, COLOR_FMT_RGBA1010102_UBWC},
+ {DRM_FORMAT_BGR565, COLOR_FMT_RGB565_UBWC},
+ };
+ int color_fmt = -1;
+ int i;
+
+ if (fmt->base.pixel_format == DRM_FORMAT_NV12) {
+ if (DPU_FORMAT_IS_DX(fmt)) {
+ if (fmt->unpack_tight)
+ color_fmt = COLOR_FMT_NV12_BPP10_UBWC;
+ else
+ color_fmt = COLOR_FMT_P010_UBWC;
+ } else
+ color_fmt = COLOR_FMT_NV12_UBWC;
+ return color_fmt;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(dpu_media_ubwc_map); ++i)
+ if (fmt->base.pixel_format == dpu_media_ubwc_map[i].format) {
+ color_fmt = dpu_media_ubwc_map[i].color;
+ break;
+ }
+ return color_fmt;
+}
+
+static int _dpu_format_get_plane_sizes_ubwc(
+ const struct dpu_format *fmt,
+ const uint32_t width,
+ const uint32_t height,
+ struct dpu_hw_fmt_layout *layout)
+{
+ int i;
+ int color;
+ bool meta = DPU_FORMAT_IS_UBWC(fmt);
+
+ memset(layout, 0, sizeof(struct dpu_hw_fmt_layout));
+ layout->format = fmt;
+ layout->width = width;
+ layout->height = height;
+ layout->num_planes = fmt->num_planes;
+
+ color = _dpu_format_get_media_color_ubwc(fmt);
+ if (color < 0) {
+ DRM_ERROR("UBWC format not supported for fmt: %4.4s\n",
+ (char *)&fmt->base.pixel_format);
+ return -EINVAL;
+ }
+
+ if (DPU_FORMAT_IS_YUV(layout->format)) {
+ uint32_t y_sclines, uv_sclines;
+ uint32_t y_meta_scanlines = 0;
+ uint32_t uv_meta_scanlines = 0;
+
+ layout->num_planes = 2;
+ layout->plane_pitch[0] = VENUS_Y_STRIDE(color, width);
+ y_sclines = VENUS_Y_SCANLINES(color, height);
+ layout->plane_size[0] = MSM_MEDIA_ALIGN(layout->plane_pitch[0] *
+ y_sclines, DPU_UBWC_PLANE_SIZE_ALIGNMENT);
+
+ layout->plane_pitch[1] = VENUS_UV_STRIDE(color, width);
+ uv_sclines = VENUS_UV_SCANLINES(color, height);
+ layout->plane_size[1] = MSM_MEDIA_ALIGN(layout->plane_pitch[1] *
+ uv_sclines, DPU_UBWC_PLANE_SIZE_ALIGNMENT);
+
+ if (!meta)
+ goto done;
+
+ layout->num_planes += 2;
+ layout->plane_pitch[2] = VENUS_Y_META_STRIDE(color, width);
+ y_meta_scanlines = VENUS_Y_META_SCANLINES(color, height);
+ layout->plane_size[2] = MSM_MEDIA_ALIGN(layout->plane_pitch[2] *
+ y_meta_scanlines, DPU_UBWC_PLANE_SIZE_ALIGNMENT);
+
+ layout->plane_pitch[3] = VENUS_UV_META_STRIDE(color, width);
+ uv_meta_scanlines = VENUS_UV_META_SCANLINES(color, height);
+ layout->plane_size[3] = MSM_MEDIA_ALIGN(layout->plane_pitch[3] *
+ uv_meta_scanlines, DPU_UBWC_PLANE_SIZE_ALIGNMENT);
+
+ } else {
+ uint32_t rgb_scanlines, rgb_meta_scanlines;
+
+ layout->num_planes = 1;
+
+ layout->plane_pitch[0] = VENUS_RGB_STRIDE(color, width);
+ rgb_scanlines = VENUS_RGB_SCANLINES(color, height);
+ layout->plane_size[0] = MSM_MEDIA_ALIGN(layout->plane_pitch[0] *
+ rgb_scanlines, DPU_UBWC_PLANE_SIZE_ALIGNMENT);
+
+ if (!meta)
+ goto done;
+ layout->num_planes += 2;
+ layout->plane_pitch[2] = VENUS_RGB_META_STRIDE(color, width);
+ rgb_meta_scanlines = VENUS_RGB_META_SCANLINES(color, height);
+ layout->plane_size[2] = MSM_MEDIA_ALIGN(layout->plane_pitch[2] *
+ rgb_meta_scanlines, DPU_UBWC_PLANE_SIZE_ALIGNMENT);
+ }
+
+done:
+ for (i = 0; i < DPU_MAX_PLANES; i++)
+ layout->total_size += layout->plane_size[i];
+
+ return 0;
+}
+
+static int _dpu_format_get_plane_sizes_linear(
+ const struct dpu_format *fmt,
+ const uint32_t width,
+ const uint32_t height,
+ struct dpu_hw_fmt_layout *layout,
+ const uint32_t *pitches)
+{
+ int i;
+
+ memset(layout, 0, sizeof(struct dpu_hw_fmt_layout));
+ layout->format = fmt;
+ layout->width = width;
+ layout->height = height;
+ layout->num_planes = fmt->num_planes;
+
+ /* Due to memset above, only need to set planes of interest */
+ if (fmt->fetch_planes == DPU_PLANE_INTERLEAVED) {
+ layout->num_planes = 1;
+ layout->plane_size[0] = width * height * layout->format->bpp;
+ layout->plane_pitch[0] = width * layout->format->bpp;
+ } else {
+ uint32_t v_subsample, h_subsample;
+ uint32_t chroma_samp;
+ uint32_t bpp = 1;
+
+ chroma_samp = fmt->chroma_sample;
+ _dpu_get_v_h_subsample_rate(chroma_samp, &v_subsample,
+ &h_subsample);
+
+ if (width % h_subsample || height % v_subsample) {
+ DRM_ERROR("mismatch in subsample vs dimensions\n");
+ return -EINVAL;
+ }
+
+ if ((fmt->base.pixel_format == DRM_FORMAT_NV12) &&
+ (DPU_FORMAT_IS_DX(fmt)))
+ bpp = 2;
+ layout->plane_pitch[0] = width * bpp;
+ layout->plane_pitch[1] = layout->plane_pitch[0] / h_subsample;
+ layout->plane_size[0] = layout->plane_pitch[0] * height;
+ layout->plane_size[1] = layout->plane_pitch[1] *
+ (height / v_subsample);
+
+ if (fmt->fetch_planes == DPU_PLANE_PSEUDO_PLANAR) {
+ layout->num_planes = 2;
+ layout->plane_size[1] *= 2;
+ layout->plane_pitch[1] *= 2;
+ } else {
+ /* planar */
+ layout->num_planes = 3;
+ layout->plane_size[2] = layout->plane_size[1];
+ layout->plane_pitch[2] = layout->plane_pitch[1];
+ }
+ }
+
+ /*
+ * linear format: allow user allocated pitches if they are greater than
+ * the requirement.
+ * ubwc format: pitch values are computed uniformly across
+ * all the components based on ubwc specifications.
+ */
+ for (i = 0; i < layout->num_planes && i < DPU_MAX_PLANES; ++i) {
+ if (pitches && layout->plane_pitch[i] < pitches[i])
+ layout->plane_pitch[i] = pitches[i];
+ }
+
+ for (i = 0; i < DPU_MAX_PLANES; i++)
+ layout->total_size += layout->plane_size[i];
+
+ return 0;
+}
+
+static int dpu_format_get_plane_sizes(
+ const struct dpu_format *fmt,
+ const uint32_t w,
+ const uint32_t h,
+ struct dpu_hw_fmt_layout *layout,
+ const uint32_t *pitches)
+{
+ if (!layout || !fmt) {
+ DRM_ERROR("invalid pointer\n");
+ return -EINVAL;
+ }
+
+ if ((w > DPU_MAX_IMG_WIDTH) || (h > DPU_MAX_IMG_HEIGHT)) {
+ DRM_ERROR("image dimensions outside max range\n");
+ return -ERANGE;
+ }
+
+ if (DPU_FORMAT_IS_UBWC(fmt) || DPU_FORMAT_IS_TILE(fmt))
+ return _dpu_format_get_plane_sizes_ubwc(fmt, w, h, layout);
+
+ return _dpu_format_get_plane_sizes_linear(fmt, w, h, layout, pitches);
+}
+
+static int _dpu_format_populate_addrs_ubwc(
+ struct msm_gem_address_space *aspace,
+ struct drm_framebuffer *fb,
+ struct dpu_hw_fmt_layout *layout)
+{
+ uint32_t base_addr = 0;
+ bool meta;
+
+ if (!fb || !layout) {
+ DRM_ERROR("invalid pointers\n");
+ return -EINVAL;
+ }
+
+ if (aspace)
+ base_addr = msm_framebuffer_iova(fb, aspace, 0);
+ if (!base_addr) {
+ DRM_ERROR("failed to retrieve base addr\n");
+ return -EFAULT;
+ }
+
+ meta = DPU_FORMAT_IS_UBWC(layout->format);
+
+ /* Per-format logic for verifying active planes */
+ if (DPU_FORMAT_IS_YUV(layout->format)) {
+ /************************************************/
+ /* UBWC ** */
+ /* buffer ** DPU PLANE */
+ /* format ** */
+ /************************************************/
+ /* ------------------- ** -------------------- */
+ /* | Y meta | ** | Y bitstream | */
+ /* | data | ** | plane | */
+ /* ------------------- ** -------------------- */
+ /* | Y bitstream | ** | CbCr bitstream | */
+ /* | data | ** | plane | */
+ /* ------------------- ** -------------------- */
+ /* | Cbcr metadata | ** | Y meta | */
+ /* | data | ** | plane | */
+ /* ------------------- ** -------------------- */
+ /* | CbCr bitstream | ** | CbCr meta | */
+ /* | data | ** | plane | */
+ /* ------------------- ** -------------------- */
+ /************************************************/
+
+ /* configure Y bitstream plane */
+ layout->plane_addr[0] = base_addr + layout->plane_size[2];
+
+ /* configure CbCr bitstream plane */
+ layout->plane_addr[1] = base_addr + layout->plane_size[0]
+ + layout->plane_size[2] + layout->plane_size[3];
+
+ if (!meta)
+ goto done;
+
+ /* configure Y metadata plane */
+ layout->plane_addr[2] = base_addr;
+
+ /* configure CbCr metadata plane */
+ layout->plane_addr[3] = base_addr + layout->plane_size[0]
+ + layout->plane_size[2];
+
+ } else {
+ /************************************************/
+ /* UBWC ** */
+ /* buffer ** DPU PLANE */
+ /* format ** */
+ /************************************************/
+ /* ------------------- ** -------------------- */
+ /* | RGB meta | ** | RGB bitstream | */
+ /* | data | ** | plane | */
+ /* ------------------- ** -------------------- */
+ /* | RGB bitstream | ** | NONE | */
+ /* | data | ** | | */
+ /* ------------------- ** -------------------- */
+ /* ** | RGB meta | */
+ /* ** | plane | */
+ /* ** -------------------- */
+ /************************************************/
+
+ layout->plane_addr[0] = base_addr + layout->plane_size[2];
+ layout->plane_addr[1] = 0;
+
+ if (!meta)
+ goto done;
+
+ layout->plane_addr[2] = base_addr;
+ layout->plane_addr[3] = 0;
+ }
+done:
+ return 0;
+}
+
+static int _dpu_format_populate_addrs_linear(
+ struct msm_gem_address_space *aspace,
+ struct drm_framebuffer *fb,
+ struct dpu_hw_fmt_layout *layout)
+{
+ unsigned int i;
+
+ /* Can now check the pitches given vs pitches expected */
+ for (i = 0; i < layout->num_planes; ++i) {
+ if (layout->plane_pitch[i] > fb->pitches[i]) {
+ DRM_ERROR("plane %u expected pitch %u, fb %u\n",
+ i, layout->plane_pitch[i], fb->pitches[i]);
+ return -EINVAL;
+ }
+ }
+
+ /* Populate addresses for simple formats here */
+ for (i = 0; i < layout->num_planes; ++i) {
+ if (aspace)
+ layout->plane_addr[i] =
+ msm_framebuffer_iova(fb, aspace, i);
+ if (!layout->plane_addr[i]) {
+ DRM_ERROR("failed to retrieve base addr\n");
+ return -EFAULT;
+ }
+ }
+
+ return 0;
+}
+
+int dpu_format_populate_layout(
+ struct msm_gem_address_space *aspace,
+ struct drm_framebuffer *fb,
+ struct dpu_hw_fmt_layout *layout)
+{
+ uint32_t plane_addr[DPU_MAX_PLANES];
+ int i, ret;
+
+ if (!fb || !layout) {
+ DRM_ERROR("invalid arguments\n");
+ return -EINVAL;
+ }
+
+ if ((fb->width > DPU_MAX_IMG_WIDTH) ||
+ (fb->height > DPU_MAX_IMG_HEIGHT)) {
+ DRM_ERROR("image dimensions outside max range\n");
+ return -ERANGE;
+ }
+
+ layout->format = to_dpu_format(msm_framebuffer_format(fb));
+
+ /* Populate the plane sizes etc via get_format */
+ ret = dpu_format_get_plane_sizes(layout->format, fb->width, fb->height,
+ layout, fb->pitches);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < DPU_MAX_PLANES; ++i)
+ plane_addr[i] = layout->plane_addr[i];
+
+ /* Populate the addresses given the fb */
+ if (DPU_FORMAT_IS_UBWC(layout->format) ||
+ DPU_FORMAT_IS_TILE(layout->format))
+ ret = _dpu_format_populate_addrs_ubwc(aspace, fb, layout);
+ else
+ ret = _dpu_format_populate_addrs_linear(aspace, fb, layout);
+
+ /* check if anything changed */
+ if (!ret && !memcmp(plane_addr, layout->plane_addr, sizeof(plane_addr)))
+ ret = -EAGAIN;
+
+ return ret;
+}
+
+int dpu_format_check_modified_format(
+ const struct msm_kms *kms,
+ const struct msm_format *msm_fmt,
+ const struct drm_mode_fb_cmd2 *cmd,
+ struct drm_gem_object **bos)
+{
+ int ret, i, num_base_fmt_planes;
+ const struct dpu_format *fmt;
+ struct dpu_hw_fmt_layout layout;
+ uint32_t bos_total_size = 0;
+
+ if (!msm_fmt || !cmd || !bos) {
+ DRM_ERROR("invalid arguments\n");
+ return -EINVAL;
+ }
+
+ fmt = to_dpu_format(msm_fmt);
+ num_base_fmt_planes = drm_format_num_planes(fmt->base.pixel_format);
+
+ ret = dpu_format_get_plane_sizes(fmt, cmd->width, cmd->height,
+ &layout, cmd->pitches);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < num_base_fmt_planes; i++) {
+ if (!bos[i]) {
+ DRM_ERROR("invalid handle for plane %d\n", i);
+ return -EINVAL;
+ }
+ if ((i == 0) || (bos[i] != bos[0]))
+ bos_total_size += bos[i]->size;
+ }
+
+ if (bos_total_size < layout.total_size) {
+ DRM_ERROR("buffers total size too small %u expected %u\n",
+ bos_total_size, layout.total_size);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+const struct dpu_format *dpu_get_dpu_format_ext(
+ const uint32_t format,
+ const uint64_t modifier)
+{
+ uint32_t i = 0;
+ const struct dpu_format *fmt = NULL;
+ const struct dpu_format *map = NULL;
+ ssize_t map_size = 0;
+
+ /*
+ * Currently only support exactly zero or one modifier.
+ * All planes use the same modifier.
+ */
+ DPU_DEBUG("plane format modifier 0x%llX\n", modifier);
+
+ switch (modifier) {
+ case 0:
+ map = dpu_format_map;
+ map_size = ARRAY_SIZE(dpu_format_map);
+ break;
+ case DRM_FORMAT_MOD_QCOM_COMPRESSED:
+ map = dpu_format_map_ubwc;
+ map_size = ARRAY_SIZE(dpu_format_map_ubwc);
+ DPU_DEBUG("found fmt: %4.4s DRM_FORMAT_MOD_QCOM_COMPRESSED\n",
+ (char *)&format);
+ break;
+ default:
+ DPU_ERROR("unsupported format modifier %llX\n", modifier);
+ return NULL;
+ }
+
+ for (i = 0; i < map_size; i++) {
+ if (format == map[i].base.pixel_format) {
+ fmt = &map[i];
+ break;
+ }
+ }
+
+ if (fmt == NULL)
+ DPU_ERROR("unsupported fmt: %4.4s modifier 0x%llX\n",
+ (char *)&format, modifier);
+ else
+ DPU_DEBUG("fmt %4.4s mod 0x%llX ubwc %d yuv %d\n",
+ (char *)&format, modifier,
+ DPU_FORMAT_IS_UBWC(fmt),
+ DPU_FORMAT_IS_YUV(fmt));
+
+ return fmt;
+}
+
+const struct msm_format *dpu_get_msm_format(
+ struct msm_kms *kms,
+ const uint32_t format,
+ const uint64_t modifiers)
+{
+ const struct dpu_format *fmt = dpu_get_dpu_format_ext(format,
+ modifiers);
+ if (fmt)
+ return &fmt->base;
+ return NULL;
+}
+
+uint32_t dpu_populate_formats(
+ const struct dpu_format_extended *format_list,
+ uint32_t *pixel_formats,
+ uint64_t *pixel_modifiers,
+ uint32_t pixel_formats_max)
+{
+ uint32_t i, fourcc_format;
+
+ if (!format_list || !pixel_formats)
+ return 0;
+
+ for (i = 0, fourcc_format = 0;
+ format_list->fourcc_format && i < pixel_formats_max;
+ ++format_list) {
+ /* verify if listed format is in dpu_format_map? */
+
+ /* optionally return modified formats */
+ if (pixel_modifiers) {
+ /* assume same modifier for all fb planes */
+ pixel_formats[i] = format_list->fourcc_format;
+ pixel_modifiers[i++] = format_list->modifier;
+ } else {
+ /* assume base formats grouped together */
+ if (fourcc_format != format_list->fourcc_format) {
+ fourcc_format = format_list->fourcc_format;
+ pixel_formats[i++] = fourcc_format;
+ }
+ }
+ }
+
+ return i;
+}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_formats.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_formats.h
new file mode 100644
index 000000000000..a54451d8d011
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_formats.h
@@ -0,0 +1,88 @@
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _DPU_FORMATS_H
+#define _DPU_FORMATS_H
+
+#include <drm/drm_fourcc.h>
+#include "msm_gem.h"
+#include "dpu_hw_mdss.h"
+
+/**
+ * dpu_get_dpu_format_ext() - Returns dpu format structure pointer.
+ * @format: DRM FourCC Code
+ * @modifiers: format modifier array from client, one per plane
+ */
+const struct dpu_format *dpu_get_dpu_format_ext(
+ const uint32_t format,
+ const uint64_t modifier);
+
+#define dpu_get_dpu_format(f) dpu_get_dpu_format_ext(f, 0)
+
+/**
+ * dpu_get_msm_format - get an dpu_format by its msm_format base
+ * callback function registers with the msm_kms layer
+ * @kms: kms driver
+ * @format: DRM FourCC Code
+ * @modifiers: data layout modifier
+ */
+const struct msm_format *dpu_get_msm_format(
+ struct msm_kms *kms,
+ const uint32_t format,
+ const uint64_t modifiers);
+
+/**
+ * dpu_populate_formats - populate the given array with fourcc codes supported
+ * @format_list: pointer to list of possible formats
+ * @pixel_formats: array to populate with fourcc codes
+ * @pixel_modifiers: array to populate with drm modifiers, can be NULL
+ * @pixel_formats_max: length of pixel formats array
+ * Return: number of elements populated
+ */
+uint32_t dpu_populate_formats(
+ const struct dpu_format_extended *format_list,
+ uint32_t *pixel_formats,
+ uint64_t *pixel_modifiers,
+ uint32_t pixel_formats_max);
+
+/**
+ * dpu_format_check_modified_format - validate format and buffers for
+ * dpu non-standard, i.e. modified format
+ * @kms: kms driver
+ * @msm_fmt: pointer to the msm_fmt base pointer of an dpu_format
+ * @cmd: fb_cmd2 structure user request
+ * @bos: gem buffer object list
+ *
+ * Return: error code on failure, 0 on success
+ */
+int dpu_format_check_modified_format(
+ const struct msm_kms *kms,
+ const struct msm_format *msm_fmt,
+ const struct drm_mode_fb_cmd2 *cmd,
+ struct drm_gem_object **bos);
+
+/**
+ * dpu_format_populate_layout - populate the given format layout based on
+ * mmu, fb, and format found in the fb
+ * @aspace: address space pointer
+ * @fb: framebuffer pointer
+ * @fmtl: format layout structure to populate
+ *
+ * Return: error code on failure, -EAGAIN if success but the addresses
+ * are the same as before or 0 if new addresses were populated
+ */
+int dpu_format_populate_layout(
+ struct msm_gem_address_space *aspace,
+ struct drm_framebuffer *fb,
+ struct dpu_hw_fmt_layout *fmtl);
+
+#endif /*_DPU_FORMATS_H */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_blk.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_blk.c
new file mode 100644
index 000000000000..58d29e43faef
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_blk.c
@@ -0,0 +1,155 @@
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__
+
+#include <linux/mutex.h>
+#include <linux/errno.h>
+#include <linux/slab.h>
+
+#include "dpu_hw_mdss.h"
+#include "dpu_hw_blk.h"
+
+/* Serialization lock for dpu_hw_blk_list */
+static DEFINE_MUTEX(dpu_hw_blk_lock);
+
+/* List of all hw block objects */
+static LIST_HEAD(dpu_hw_blk_list);
+
+/**
+ * dpu_hw_blk_init - initialize hw block object
+ * @type: hw block type - enum dpu_hw_blk_type
+ * @id: instance id of the hw block
+ * @ops: Pointer to block operations
+ * return: 0 if success; error code otherwise
+ */
+int dpu_hw_blk_init(struct dpu_hw_blk *hw_blk, u32 type, int id,
+ struct dpu_hw_blk_ops *ops)
+{
+ if (!hw_blk) {
+ pr_err("invalid parameters\n");
+ return -EINVAL;
+ }
+
+ INIT_LIST_HEAD(&hw_blk->list);
+ hw_blk->type = type;
+ hw_blk->id = id;
+ atomic_set(&hw_blk->refcount, 0);
+
+ if (ops)
+ hw_blk->ops = *ops;
+
+ mutex_lock(&dpu_hw_blk_lock);
+ list_add(&hw_blk->list, &dpu_hw_blk_list);
+ mutex_unlock(&dpu_hw_blk_lock);
+
+ return 0;
+}
+
+/**
+ * dpu_hw_blk_destroy - destroy hw block object.
+ * @hw_blk: pointer to hw block object
+ * return: none
+ */
+void dpu_hw_blk_destroy(struct dpu_hw_blk *hw_blk)
+{
+ if (!hw_blk) {
+ pr_err("invalid parameters\n");
+ return;
+ }
+
+ if (atomic_read(&hw_blk->refcount))
+ pr_err("hw_blk:%d.%d invalid refcount\n", hw_blk->type,
+ hw_blk->id);
+
+ mutex_lock(&dpu_hw_blk_lock);
+ list_del(&hw_blk->list);
+ mutex_unlock(&dpu_hw_blk_lock);
+}
+
+/**
+ * dpu_hw_blk_get - get hw_blk from free pool
+ * @hw_blk: if specified, increment reference count only
+ * @type: if hw_blk is not specified, allocate the next available of this type
+ * @id: if specified (>= 0), allocate the given instance of the above type
+ * return: pointer to hw block object
+ */
+struct dpu_hw_blk *dpu_hw_blk_get(struct dpu_hw_blk *hw_blk, u32 type, int id)
+{
+ struct dpu_hw_blk *curr;
+ int rc, refcount;
+
+ if (!hw_blk) {
+ mutex_lock(&dpu_hw_blk_lock);
+ list_for_each_entry(curr, &dpu_hw_blk_list, list) {
+ if ((curr->type != type) ||
+ (id >= 0 && curr->id != id) ||
+ (id < 0 &&
+ atomic_read(&curr->refcount)))
+ continue;
+
+ hw_blk = curr;
+ break;
+ }
+ mutex_unlock(&dpu_hw_blk_lock);
+ }
+
+ if (!hw_blk) {
+ pr_debug("no hw_blk:%d\n", type);
+ return NULL;
+ }
+
+ refcount = atomic_inc_return(&hw_blk->refcount);
+
+ if (refcount == 1 && hw_blk->ops.start) {
+ rc = hw_blk->ops.start(hw_blk);
+ if (rc) {
+ pr_err("failed to start hw_blk:%d rc:%d\n", type, rc);
+ goto error_start;
+ }
+ }
+
+ pr_debug("hw_blk:%d.%d refcount:%d\n", hw_blk->type,
+ hw_blk->id, refcount);
+ return hw_blk;
+
+error_start:
+ dpu_hw_blk_put(hw_blk);
+ return ERR_PTR(rc);
+}
+
+/**
+ * dpu_hw_blk_put - put hw_blk to free pool if decremented refcount is zero
+ * @hw_blk: hw block to be freed
+ * @free_blk: function to be called when reference count goes to zero
+ */
+void dpu_hw_blk_put(struct dpu_hw_blk *hw_blk)
+{
+ if (!hw_blk) {
+ pr_err("invalid parameters\n");
+ return;
+ }
+
+ pr_debug("hw_blk:%d.%d refcount:%d\n", hw_blk->type, hw_blk->id,
+ atomic_read(&hw_blk->refcount));
+
+ if (!atomic_read(&hw_blk->refcount)) {
+ pr_err("hw_blk:%d.%d invalid put\n", hw_blk->type, hw_blk->id);
+ return;
+ }
+
+ if (atomic_dec_return(&hw_blk->refcount))
+ return;
+
+ if (hw_blk->ops.stop)
+ hw_blk->ops.stop(hw_blk);
+}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_blk.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_blk.h
new file mode 100644
index 000000000000..0f4ca8af1ec5
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_blk.h
@@ -0,0 +1,53 @@
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _DPU_HW_BLK_H
+#define _DPU_HW_BLK_H
+
+#include <linux/types.h>
+#include <linux/list.h>
+#include <linux/atomic.h>
+
+struct dpu_hw_blk;
+
+/**
+ * struct dpu_hw_blk_ops - common hardware block operations
+ * @start: start operation on first get
+ * @stop: stop operation on last put
+ */
+struct dpu_hw_blk_ops {
+ int (*start)(struct dpu_hw_blk *);
+ void (*stop)(struct dpu_hw_blk *);
+};
+
+/**
+ * struct dpu_hw_blk - definition of hardware block object
+ * @list: list of hardware blocks
+ * @type: hardware block type
+ * @id: instance id
+ * @refcount: reference/usage count
+ */
+struct dpu_hw_blk {
+ struct list_head list;
+ u32 type;
+ int id;
+ atomic_t refcount;
+ struct dpu_hw_blk_ops ops;
+};
+
+int dpu_hw_blk_init(struct dpu_hw_blk *hw_blk, u32 type, int id,
+ struct dpu_hw_blk_ops *ops);
+void dpu_hw_blk_destroy(struct dpu_hw_blk *hw_blk);
+
+struct dpu_hw_blk *dpu_hw_blk_get(struct dpu_hw_blk *hw_blk, u32 type, int id);
+void dpu_hw_blk_put(struct dpu_hw_blk *hw_blk);
+#endif /*_DPU_HW_BLK_H */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c
new file mode 100644
index 000000000000..44ee06398b1d
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c
@@ -0,0 +1,511 @@
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__
+#include <linux/slab.h>
+#include <linux/of_address.h>
+#include <linux/platform_device.h>
+#include "dpu_hw_mdss.h"
+#include "dpu_hw_catalog.h"
+#include "dpu_hw_catalog_format.h"
+#include "dpu_kms.h"
+
+#define VIG_SDM845_MASK \
+ (BIT(DPU_SSPP_SRC) | BIT(DPU_SSPP_SCALER_QSEED3) | BIT(DPU_SSPP_QOS) |\
+ BIT(DPU_SSPP_CSC_10BIT) | BIT(DPU_SSPP_CDP) | BIT(DPU_SSPP_QOS_8LVL) |\
+ BIT(DPU_SSPP_TS_PREFILL) | BIT(DPU_SSPP_EXCL_RECT))
+
+#define DMA_SDM845_MASK \
+ (BIT(DPU_SSPP_SRC) | BIT(DPU_SSPP_QOS) | BIT(DPU_SSPP_QOS_8LVL) |\
+ BIT(DPU_SSPP_TS_PREFILL) | BIT(DPU_SSPP_TS_PREFILL_REC1) |\
+ BIT(DPU_SSPP_CDP) | BIT(DPU_SSPP_EXCL_RECT))
+
+#define MIXER_SDM845_MASK \
+ (BIT(DPU_MIXER_SOURCESPLIT) | BIT(DPU_DIM_LAYER))
+
+#define PINGPONG_SDM845_MASK BIT(DPU_PINGPONG_DITHER)
+
+#define PINGPONG_SDM845_SPLIT_MASK \
+ (PINGPONG_SDM845_MASK | BIT(DPU_PINGPONG_TE2))
+
+#define DEFAULT_PIXEL_RAM_SIZE (50 * 1024)
+#define DEFAULT_DPU_LINE_WIDTH 2048
+#define DEFAULT_DPU_OUTPUT_LINE_WIDTH 2560
+
+#define MAX_HORZ_DECIMATION 4
+#define MAX_VERT_DECIMATION 4
+
+#define MAX_UPSCALE_RATIO 20
+#define MAX_DOWNSCALE_RATIO 4
+#define SSPP_UNITY_SCALE 1
+
+#define STRCAT(X, Y) (X Y)
+
+/*************************************************************
+ * DPU sub blocks config
+ *************************************************************/
+/* DPU top level caps */
+static const struct dpu_caps sdm845_dpu_caps = {
+ .max_mixer_width = DEFAULT_DPU_OUTPUT_LINE_WIDTH,
+ .max_mixer_blendstages = 0xb,
+ .qseed_type = DPU_SSPP_SCALER_QSEED3,
+ .smart_dma_rev = DPU_SSPP_SMART_DMA_V2,
+ .ubwc_version = DPU_HW_UBWC_VER_20,
+ .has_src_split = true,
+ .has_dim_layer = true,
+ .has_idle_pc = true,
+};
+
+static struct dpu_mdp_cfg sdm845_mdp[] = {
+ {
+ .name = "top_0", .id = MDP_TOP,
+ .base = 0x0, .len = 0x45C,
+ .features = 0,
+ .highest_bank_bit = 0x2,
+ .has_dest_scaler = true,
+ .clk_ctrls[DPU_CLK_CTRL_VIG0] = {
+ .reg_off = 0x2AC, .bit_off = 0},
+ .clk_ctrls[DPU_CLK_CTRL_VIG1] = {
+ .reg_off = 0x2B4, .bit_off = 0},
+ .clk_ctrls[DPU_CLK_CTRL_VIG2] = {
+ .reg_off = 0x2BC, .bit_off = 0},
+ .clk_ctrls[DPU_CLK_CTRL_VIG3] = {
+ .reg_off = 0x2C4, .bit_off = 0},
+ .clk_ctrls[DPU_CLK_CTRL_DMA0] = {
+ .reg_off = 0x2AC, .bit_off = 8},
+ .clk_ctrls[DPU_CLK_CTRL_DMA1] = {
+ .reg_off = 0x2B4, .bit_off = 8},
+ .clk_ctrls[DPU_CLK_CTRL_CURSOR0] = {
+ .reg_off = 0x2BC, .bit_off = 8},
+ .clk_ctrls[DPU_CLK_CTRL_CURSOR1] = {
+ .reg_off = 0x2C4, .bit_off = 8},
+ },
+};
+
+/*************************************************************
+ * CTL sub blocks config
+ *************************************************************/
+static struct dpu_ctl_cfg sdm845_ctl[] = {
+ {
+ .name = "ctl_0", .id = CTL_0,
+ .base = 0x1000, .len = 0xE4,
+ .features = BIT(DPU_CTL_SPLIT_DISPLAY)
+ },
+ {
+ .name = "ctl_1", .id = CTL_1,
+ .base = 0x1200, .len = 0xE4,
+ .features = BIT(DPU_CTL_SPLIT_DISPLAY)
+ },
+ {
+ .name = "ctl_2", .id = CTL_2,
+ .base = 0x1400, .len = 0xE4,
+ .features = 0
+ },
+ {
+ .name = "ctl_3", .id = CTL_3,
+ .base = 0x1600, .len = 0xE4,
+ .features = 0
+ },
+ {
+ .name = "ctl_4", .id = CTL_4,
+ .base = 0x1800, .len = 0xE4,
+ .features = 0
+ },
+};
+
+/*************************************************************
+ * SSPP sub blocks config
+ *************************************************************/
+
+/* SSPP common configuration */
+static const struct dpu_sspp_blks_common sdm845_sspp_common = {
+ .maxlinewidth = DEFAULT_DPU_OUTPUT_LINE_WIDTH,
+ .pixel_ram_size = DEFAULT_PIXEL_RAM_SIZE,
+ .maxhdeciexp = MAX_HORZ_DECIMATION,
+ .maxvdeciexp = MAX_VERT_DECIMATION,
+};
+
+#define _VIG_SBLK(num, sdma_pri) \
+ { \
+ .common = &sdm845_sspp_common, \
+ .maxdwnscale = MAX_DOWNSCALE_RATIO, \
+ .maxupscale = MAX_UPSCALE_RATIO, \
+ .smart_dma_priority = sdma_pri, \
+ .src_blk = {.name = STRCAT("sspp_src_", num), \
+ .id = DPU_SSPP_SRC, .base = 0x00, .len = 0x150,}, \
+ .scaler_blk = {.name = STRCAT("sspp_scaler", num), \
+ .id = DPU_SSPP_SCALER_QSEED3, \
+ .base = 0xa00, .len = 0xa0,}, \
+ .csc_blk = {.name = STRCAT("sspp_csc", num), \
+ .id = DPU_SSPP_CSC_10BIT, \
+ .base = 0x1a00, .len = 0x100,}, \
+ .format_list = plane_formats_yuv, \
+ .virt_format_list = plane_formats, \
+ }
+
+#define _DMA_SBLK(num, sdma_pri) \
+ { \
+ .common = &sdm845_sspp_common, \
+ .maxdwnscale = SSPP_UNITY_SCALE, \
+ .maxupscale = SSPP_UNITY_SCALE, \
+ .smart_dma_priority = sdma_pri, \
+ .src_blk = {.name = STRCAT("sspp_src_", num), \
+ .id = DPU_SSPP_SRC, .base = 0x00, .len = 0x150,}, \
+ .format_list = plane_formats, \
+ .virt_format_list = plane_formats, \
+ }
+
+static const struct dpu_sspp_sub_blks sdm845_vig_sblk_0 = _VIG_SBLK("0", 5);
+static const struct dpu_sspp_sub_blks sdm845_vig_sblk_1 = _VIG_SBLK("1", 6);
+static const struct dpu_sspp_sub_blks sdm845_vig_sblk_2 = _VIG_SBLK("2", 7);
+static const struct dpu_sspp_sub_blks sdm845_vig_sblk_3 = _VIG_SBLK("3", 8);
+
+static const struct dpu_sspp_sub_blks sdm845_dma_sblk_0 = _DMA_SBLK("8", 1);
+static const struct dpu_sspp_sub_blks sdm845_dma_sblk_1 = _DMA_SBLK("9", 2);
+static const struct dpu_sspp_sub_blks sdm845_dma_sblk_2 = _DMA_SBLK("10", 3);
+static const struct dpu_sspp_sub_blks sdm845_dma_sblk_3 = _DMA_SBLK("11", 4);
+
+#define SSPP_VIG_BLK(_name, _id, _base, _sblk, _xinid, _clkctrl) \
+ { \
+ .name = _name, .id = _id, \
+ .base = _base, .len = 0x1c8, \
+ .features = VIG_SDM845_MASK, \
+ .sblk = &_sblk, \
+ .xin_id = _xinid, \
+ .type = SSPP_TYPE_VIG, \
+ .clk_ctrl = _clkctrl \
+ }
+
+#define SSPP_DMA_BLK(_name, _id, _base, _sblk, _xinid, _clkctrl) \
+ { \
+ .name = _name, .id = _id, \
+ .base = _base, .len = 0x1c8, \
+ .features = DMA_SDM845_MASK, \
+ .sblk = &_sblk, \
+ .xin_id = _xinid, \
+ .type = SSPP_TYPE_DMA, \
+ .clk_ctrl = _clkctrl \
+ }
+
+static struct dpu_sspp_cfg sdm845_sspp[] = {
+ SSPP_VIG_BLK("sspp_0", SSPP_VIG0, 0x4000,
+ sdm845_vig_sblk_0, 0, DPU_CLK_CTRL_VIG0),
+ SSPP_VIG_BLK("sspp_1", SSPP_VIG1, 0x6000,
+ sdm845_vig_sblk_1, 4, DPU_CLK_CTRL_VIG1),
+ SSPP_VIG_BLK("sspp_2", SSPP_VIG2, 0x8000,
+ sdm845_vig_sblk_2, 8, DPU_CLK_CTRL_VIG2),
+ SSPP_VIG_BLK("sspp_3", SSPP_VIG3, 0xa000,
+ sdm845_vig_sblk_3, 12, DPU_CLK_CTRL_VIG3),
+ SSPP_DMA_BLK("sspp_8", SSPP_DMA0, 0x24000,
+ sdm845_dma_sblk_0, 1, DPU_CLK_CTRL_DMA0),
+ SSPP_DMA_BLK("sspp_9", SSPP_DMA1, 0x26000,
+ sdm845_dma_sblk_1, 5, DPU_CLK_CTRL_DMA1),
+ SSPP_DMA_BLK("sspp_10", SSPP_DMA2, 0x28000,
+ sdm845_dma_sblk_2, 9, DPU_CLK_CTRL_CURSOR0),
+ SSPP_DMA_BLK("sspp_11", SSPP_DMA3, 0x2a000,
+ sdm845_dma_sblk_3, 13, DPU_CLK_CTRL_CURSOR1),
+};
+
+/*************************************************************
+ * MIXER sub blocks config
+ *************************************************************/
+static const struct dpu_lm_sub_blks sdm845_lm_sblk = {
+ .maxwidth = DEFAULT_DPU_OUTPUT_LINE_WIDTH,
+ .maxblendstages = 11, /* excluding base layer */
+ .blendstage_base = { /* offsets relative to mixer base */
+ 0x20, 0x38, 0x50, 0x68, 0x80, 0x98,
+ 0xb0, 0xc8, 0xe0, 0xf8, 0x110
+ },
+};
+
+#define LM_BLK(_name, _id, _base, _ds, _pp, _lmpair) \
+ { \
+ .name = _name, .id = _id, \
+ .base = _base, .len = 0x320, \
+ .features = MIXER_SDM845_MASK, \
+ .sblk = &sdm845_lm_sblk, \
+ .ds = _ds, \
+ .pingpong = _pp, \
+ .lm_pair_mask = (1 << _lmpair) \
+ }
+
+static struct dpu_lm_cfg sdm845_lm[] = {
+ LM_BLK("lm_0", LM_0, 0x44000, DS_0, PINGPONG_0, LM_1),
+ LM_BLK("lm_1", LM_1, 0x45000, DS_1, PINGPONG_1, LM_0),
+ LM_BLK("lm_2", LM_2, 0x46000, DS_MAX, PINGPONG_2, LM_5),
+ LM_BLK("lm_3", LM_3, 0x0, DS_MAX, PINGPONG_MAX, 0),
+ LM_BLK("lm_4", LM_4, 0x0, DS_MAX, PINGPONG_MAX, 0),
+ LM_BLK("lm_5", LM_5, 0x49000, DS_MAX, PINGPONG_3, LM_2),
+};
+
+/*************************************************************
+ * DS sub blocks config
+ *************************************************************/
+static const struct dpu_ds_top_cfg sdm845_ds_top = {
+ .name = "ds_top_0", .id = DS_TOP,
+ .base = 0x60000, .len = 0xc,
+ .maxinputwidth = DEFAULT_DPU_LINE_WIDTH,
+ .maxoutputwidth = DEFAULT_DPU_OUTPUT_LINE_WIDTH,
+ .maxupscale = MAX_UPSCALE_RATIO,
+};
+
+#define DS_BLK(_name, _id, _base) \
+ {\
+ .name = _name, .id = _id, \
+ .base = _base, .len = 0x800, \
+ .features = DPU_SSPP_SCALER_QSEED3, \
+ .top = &sdm845_ds_top \
+ }
+
+static struct dpu_ds_cfg sdm845_ds[] = {
+ DS_BLK("ds_0", DS_0, 0x800),
+ DS_BLK("ds_1", DS_1, 0x1000),
+};
+
+/*************************************************************
+ * PINGPONG sub blocks config
+ *************************************************************/
+static const struct dpu_pingpong_sub_blks sdm845_pp_sblk_te = {
+ .te2 = {.id = DPU_PINGPONG_TE2, .base = 0x2000, .len = 0x0,
+ .version = 0x1},
+ .dither = {.id = DPU_PINGPONG_DITHER, .base = 0x30e0,
+ .len = 0x20, .version = 0x10000},
+};
+
+static const struct dpu_pingpong_sub_blks sdm845_pp_sblk = {
+ .dither = {.id = DPU_PINGPONG_DITHER, .base = 0x30e0,
+ .len = 0x20, .version = 0x10000},
+};
+
+#define PP_BLK_TE(_name, _id, _base) \
+ {\
+ .name = _name, .id = _id, \
+ .base = _base, .len = 0xd4, \
+ .features = PINGPONG_SDM845_SPLIT_MASK, \
+ .sblk = &sdm845_pp_sblk_te \
+ }
+#define PP_BLK(_name, _id, _base) \
+ {\
+ .name = _name, .id = _id, \
+ .base = _base, .len = 0xd4, \
+ .features = PINGPONG_SDM845_MASK, \
+ .sblk = &sdm845_pp_sblk \
+ }
+
+static struct dpu_pingpong_cfg sdm845_pp[] = {
+ PP_BLK_TE("pingpong_0", PINGPONG_0, 0x70000),
+ PP_BLK_TE("pingpong_1", PINGPONG_1, 0x70800),
+ PP_BLK("pingpong_2", PINGPONG_2, 0x71000),
+ PP_BLK("pingpong_3", PINGPONG_3, 0x71800),
+};
+
+/*************************************************************
+ * INTF sub blocks config
+ *************************************************************/
+#define INTF_BLK(_name, _id, _base, _type, _ctrl_id) \
+ {\
+ .name = _name, .id = _id, \
+ .base = _base, .len = 0x280, \
+ .type = _type, \
+ .controller_id = _ctrl_id, \
+ .prog_fetch_lines_worst_case = 24 \
+ }
+
+static struct dpu_intf_cfg sdm845_intf[] = {
+ INTF_BLK("intf_0", INTF_0, 0x6A000, INTF_DP, 0),
+ INTF_BLK("intf_1", INTF_1, 0x6A800, INTF_DSI, 0),
+ INTF_BLK("intf_2", INTF_2, 0x6B000, INTF_DSI, 1),
+ INTF_BLK("intf_3", INTF_3, 0x6B800, INTF_DP, 1),
+};
+
+/*************************************************************
+ * CDM sub blocks config
+ *************************************************************/
+static struct dpu_cdm_cfg sdm845_cdm[] = {
+ {
+ .name = "cdm_0", .id = CDM_0,
+ .base = 0x79200, .len = 0x224,
+ .features = 0,
+ .intf_connect = BIT(INTF_3),
+ },
+};
+
+/*************************************************************
+ * VBIF sub blocks config
+ *************************************************************/
+/* VBIF QOS remap */
+static u32 sdm845_rt_pri_lvl[] = {3, 3, 4, 4, 5, 5, 6, 6};
+static u32 sdm845_nrt_pri_lvl[] = {3, 3, 3, 3, 3, 3, 3, 3};
+
+static struct dpu_vbif_cfg sdm845_vbif[] = {
+ {
+ .name = "vbif_0", .id = VBIF_0,
+ .base = 0, .len = 0x1040,
+ .features = BIT(DPU_VBIF_QOS_REMAP),
+ .xin_halt_timeout = 0x4000,
+ .qos_rt_tbl = {
+ .npriority_lvl = ARRAY_SIZE(sdm845_rt_pri_lvl),
+ .priority_lvl = sdm845_rt_pri_lvl,
+ },
+ .qos_nrt_tbl = {
+ .npriority_lvl = ARRAY_SIZE(sdm845_nrt_pri_lvl),
+ .priority_lvl = sdm845_nrt_pri_lvl,
+ },
+ .memtype_count = 14,
+ .memtype = {3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3},
+ },
+};
+
+static struct dpu_reg_dma_cfg sdm845_regdma = {
+ .base = 0x0, .version = 0x1, .trigger_sel_off = 0x119c
+};
+
+/*************************************************************
+ * PERF data config
+ *************************************************************/
+
+/* SSPP QOS LUTs */
+static struct dpu_qos_lut_entry sdm845_qos_linear[] = {
+ {.fl = 4, .lut = 0x357},
+ {.fl = 5, .lut = 0x3357},
+ {.fl = 6, .lut = 0x23357},
+ {.fl = 7, .lut = 0x223357},
+ {.fl = 8, .lut = 0x2223357},
+ {.fl = 9, .lut = 0x22223357},
+ {.fl = 10, .lut = 0x222223357},
+ {.fl = 11, .lut = 0x2222223357},
+ {.fl = 12, .lut = 0x22222223357},
+ {.fl = 13, .lut = 0x222222223357},
+ {.fl = 14, .lut = 0x1222222223357},
+ {.fl = 0, .lut = 0x11222222223357}
+};
+
+static struct dpu_qos_lut_entry sdm845_qos_macrotile[] = {
+ {.fl = 10, .lut = 0x344556677},
+ {.fl = 11, .lut = 0x3344556677},
+ {.fl = 12, .lut = 0x23344556677},
+ {.fl = 13, .lut = 0x223344556677},
+ {.fl = 14, .lut = 0x1223344556677},
+ {.fl = 0, .lut = 0x112233344556677},
+};
+
+static struct dpu_qos_lut_entry sdm845_qos_nrt[] = {
+ {.fl = 0, .lut = 0x0},
+};
+
+static struct dpu_perf_cfg sdm845_perf_data = {
+ .max_bw_low = 6800000,
+ .max_bw_high = 6800000,
+ .min_core_ib = 2400000,
+ .min_llcc_ib = 800000,
+ .min_dram_ib = 800000,
+ .core_ib_ff = "6.0",
+ .core_clk_ff = "1.0",
+ .comp_ratio_rt =
+ "NV12/5/1/1.23 AB24/5/1/1.23 XB24/5/1/1.23",
+ .comp_ratio_nrt =
+ "NV12/5/1/1.25 AB24/5/1/1.25 XB24/5/1/1.25",
+ .undersized_prefill_lines = 2,
+ .xtra_prefill_lines = 2,
+ .dest_scale_prefill_lines = 3,
+ .macrotile_prefill_lines = 4,
+ .yuv_nv12_prefill_lines = 8,
+ .linear_prefill_lines = 1,
+ .downscaling_prefill_lines = 1,
+ .amortizable_threshold = 25,
+ .min_prefill_lines = 24,
+ .danger_lut_tbl = {0xf, 0xffff, 0x0},
+ .qos_lut_tbl = {
+ {.nentry = ARRAY_SIZE(sdm845_qos_linear),
+ .entries = sdm845_qos_linear
+ },
+ {.nentry = ARRAY_SIZE(sdm845_qos_macrotile),
+ .entries = sdm845_qos_macrotile
+ },
+ {.nentry = ARRAY_SIZE(sdm845_qos_nrt),
+ .entries = sdm845_qos_nrt
+ },
+ },
+ .cdp_cfg = {
+ {.rd_enable = 1, .wr_enable = 1},
+ {.rd_enable = 1, .wr_enable = 0}
+ },
+};
+
+/*************************************************************
+ * Hardware catalog init
+ *************************************************************/
+
+/*
+ * sdm845_cfg_init(): populate sdm845 dpu sub-blocks reg offsets
+ * and instance counts.
+ */
+static void sdm845_cfg_init(struct dpu_mdss_cfg *dpu_cfg)
+{
+ *dpu_cfg = (struct dpu_mdss_cfg){
+ .caps = &sdm845_dpu_caps,
+ .mdp_count = ARRAY_SIZE(sdm845_mdp),
+ .mdp = sdm845_mdp,
+ .ctl_count = ARRAY_SIZE(sdm845_ctl),
+ .ctl = sdm845_ctl,
+ .sspp_count = ARRAY_SIZE(sdm845_sspp),
+ .sspp = sdm845_sspp,
+ .mixer_count = ARRAY_SIZE(sdm845_lm),
+ .mixer = sdm845_lm,
+ .ds_count = ARRAY_SIZE(sdm845_ds),
+ .ds = sdm845_ds,
+ .pingpong_count = ARRAY_SIZE(sdm845_pp),
+ .pingpong = sdm845_pp,
+ .cdm_count = ARRAY_SIZE(sdm845_cdm),
+ .cdm = sdm845_cdm,
+ .intf_count = ARRAY_SIZE(sdm845_intf),
+ .intf = sdm845_intf,
+ .vbif_count = ARRAY_SIZE(sdm845_vbif),
+ .vbif = sdm845_vbif,
+ .reg_dma_count = 1,
+ .dma_cfg = sdm845_regdma,
+ .perf = sdm845_perf_data,
+ };
+}
+
+static struct dpu_mdss_hw_cfg_handler cfg_handler[] = {
+ { .hw_rev = DPU_HW_VER_400, .cfg_init = sdm845_cfg_init},
+ { .hw_rev = DPU_HW_VER_401, .cfg_init = sdm845_cfg_init},
+};
+
+void dpu_hw_catalog_deinit(struct dpu_mdss_cfg *dpu_cfg)
+{
+ kfree(dpu_cfg);
+}
+
+struct dpu_mdss_cfg *dpu_hw_catalog_init(u32 hw_rev)
+{
+ int i;
+ struct dpu_mdss_cfg *dpu_cfg;
+
+ dpu_cfg = kzalloc(sizeof(*dpu_cfg), GFP_KERNEL);
+ if (!dpu_cfg)
+ return ERR_PTR(-ENOMEM);
+
+ for (i = 0; i < ARRAY_SIZE(cfg_handler); i++) {
+ if (cfg_handler[i].hw_rev == hw_rev) {
+ cfg_handler[i].cfg_init(dpu_cfg);
+ dpu_cfg->hwversion = hw_rev;
+ return dpu_cfg;
+ }
+ }
+
+ DPU_ERROR("unsupported chipset id:%X\n", hw_rev);
+ dpu_hw_catalog_deinit(dpu_cfg);
+ return ERR_PTR(-ENODEV);
+}
+
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h
new file mode 100644
index 000000000000..f0cb0d4fc80e
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h
@@ -0,0 +1,804 @@
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _DPU_HW_CATALOG_H
+#define _DPU_HW_CATALOG_H
+
+#include <linux/kernel.h>
+#include <linux/bug.h>
+#include <linux/bitmap.h>
+#include <linux/err.h>
+#include <drm/drmP.h>
+
+/**
+ * Max hardware block count: For ex: max 12 SSPP pipes or
+ * 5 ctl paths. In all cases, it can have max 12 hardware blocks
+ * based on current design
+ */
+#define MAX_BLOCKS 12
+
+#define DPU_HW_VER(MAJOR, MINOR, STEP) (((MAJOR & 0xF) << 28) |\
+ ((MINOR & 0xFFF) << 16) |\
+ (STEP & 0xFFFF))
+
+#define DPU_HW_MAJOR(rev) ((rev) >> 28)
+#define DPU_HW_MINOR(rev) (((rev) >> 16) & 0xFFF)
+#define DPU_HW_STEP(rev) ((rev) & 0xFFFF)
+#define DPU_HW_MAJOR_MINOR(rev) ((rev) >> 16)
+
+#define IS_DPU_MAJOR_MINOR_SAME(rev1, rev2) \
+ (DPU_HW_MAJOR_MINOR((rev1)) == DPU_HW_MAJOR_MINOR((rev2)))
+
+#define DPU_HW_VER_170 DPU_HW_VER(1, 7, 0) /* 8996 v1.0 */
+#define DPU_HW_VER_171 DPU_HW_VER(1, 7, 1) /* 8996 v2.0 */
+#define DPU_HW_VER_172 DPU_HW_VER(1, 7, 2) /* 8996 v3.0 */
+#define DPU_HW_VER_300 DPU_HW_VER(3, 0, 0) /* 8998 v1.0 */
+#define DPU_HW_VER_301 DPU_HW_VER(3, 0, 1) /* 8998 v1.1 */
+#define DPU_HW_VER_400 DPU_HW_VER(4, 0, 0) /* sdm845 v1.0 */
+#define DPU_HW_VER_401 DPU_HW_VER(4, 0, 1) /* sdm845 v2.0 */
+#define DPU_HW_VER_410 DPU_HW_VER(4, 1, 0) /* sdm670 v1.0 */
+#define DPU_HW_VER_500 DPU_HW_VER(5, 0, 0) /* sdm855 v1.0 */
+
+
+#define IS_MSM8996_TARGET(rev) IS_DPU_MAJOR_MINOR_SAME((rev), DPU_HW_VER_170)
+#define IS_MSM8998_TARGET(rev) IS_DPU_MAJOR_MINOR_SAME((rev), DPU_HW_VER_300)
+#define IS_SDM845_TARGET(rev) IS_DPU_MAJOR_MINOR_SAME((rev), DPU_HW_VER_400)
+#define IS_SDM670_TARGET(rev) IS_DPU_MAJOR_MINOR_SAME((rev), DPU_HW_VER_410)
+#define IS_SDM855_TARGET(rev) IS_DPU_MAJOR_MINOR_SAME((rev), DPU_HW_VER_500)
+
+
+#define DPU_HW_BLK_NAME_LEN 16
+
+#define MAX_IMG_WIDTH 0x3fff
+#define MAX_IMG_HEIGHT 0x3fff
+
+#define CRTC_DUAL_MIXERS 2
+
+#define MAX_XIN_COUNT 16
+
+/**
+ * Supported UBWC feature versions
+ */
+enum {
+ DPU_HW_UBWC_VER_10 = 0x100,
+ DPU_HW_UBWC_VER_20 = 0x200,
+ DPU_HW_UBWC_VER_30 = 0x300,
+};
+
+#define IS_UBWC_20_SUPPORTED(rev) ((rev) >= DPU_HW_UBWC_VER_20)
+
+/**
+ * MDP TOP BLOCK features
+ * @DPU_MDP_PANIC_PER_PIPE Panic configuration needs to be be done per pipe
+ * @DPU_MDP_10BIT_SUPPORT, Chipset supports 10 bit pixel formats
+ * @DPU_MDP_BWC, MDSS HW supports Bandwidth compression.
+ * @DPU_MDP_UBWC_1_0, This chipsets supports Universal Bandwidth
+ * compression initial revision
+ * @DPU_MDP_UBWC_1_5, Universal Bandwidth compression version 1.5
+ * @DPU_MDP_MAX Maximum value
+
+ */
+enum {
+ DPU_MDP_PANIC_PER_PIPE = 0x1,
+ DPU_MDP_10BIT_SUPPORT,
+ DPU_MDP_BWC,
+ DPU_MDP_UBWC_1_0,
+ DPU_MDP_UBWC_1_5,
+ DPU_MDP_MAX
+};
+
+/**
+ * SSPP sub-blocks/features
+ * @DPU_SSPP_SRC Src and fetch part of the pipes,
+ * @DPU_SSPP_SCALER_QSEED2, QSEED2 algorithm support
+ * @DPU_SSPP_SCALER_QSEED3, QSEED3 alogorithm support
+ * @DPU_SSPP_SCALER_RGB, RGB Scaler, supported by RGB pipes
+ * @DPU_SSPP_CSC, Support of Color space converion
+ * @DPU_SSPP_CSC_10BIT, Support of 10-bit Color space conversion
+ * @DPU_SSPP_CURSOR, SSPP can be used as a cursor layer
+ * @DPU_SSPP_QOS, SSPP support QoS control, danger/safe/creq
+ * @DPU_SSPP_QOS_8LVL, SSPP support 8-level QoS control
+ * @DPU_SSPP_EXCL_RECT, SSPP supports exclusion rect
+ * @DPU_SSPP_SMART_DMA_V1, SmartDMA 1.0 support
+ * @DPU_SSPP_SMART_DMA_V2, SmartDMA 2.0 support
+ * @DPU_SSPP_TS_PREFILL Supports prefill with traffic shaper
+ * @DPU_SSPP_TS_PREFILL_REC1 Supports prefill with traffic shaper multirec
+ * @DPU_SSPP_CDP Supports client driven prefetch
+ * @DPU_SSPP_MAX maximum value
+ */
+enum {
+ DPU_SSPP_SRC = 0x1,
+ DPU_SSPP_SCALER_QSEED2,
+ DPU_SSPP_SCALER_QSEED3,
+ DPU_SSPP_SCALER_RGB,
+ DPU_SSPP_CSC,
+ DPU_SSPP_CSC_10BIT,
+ DPU_SSPP_CURSOR,
+ DPU_SSPP_QOS,
+ DPU_SSPP_QOS_8LVL,
+ DPU_SSPP_EXCL_RECT,
+ DPU_SSPP_SMART_DMA_V1,
+ DPU_SSPP_SMART_DMA_V2,
+ DPU_SSPP_TS_PREFILL,
+ DPU_SSPP_TS_PREFILL_REC1,
+ DPU_SSPP_CDP,
+ DPU_SSPP_MAX
+};
+
+/*
+ * MIXER sub-blocks/features
+ * @DPU_MIXER_LAYER Layer mixer layer blend configuration,
+ * @DPU_MIXER_SOURCESPLIT Layer mixer supports source-split configuration
+ * @DPU_MIXER_GC Gamma correction block
+ * @DPU_DIM_LAYER Layer mixer supports dim layer
+ * @DPU_MIXER_MAX maximum value
+ */
+enum {
+ DPU_MIXER_LAYER = 0x1,
+ DPU_MIXER_SOURCESPLIT,
+ DPU_MIXER_GC,
+ DPU_DIM_LAYER,
+ DPU_MIXER_MAX
+};
+
+/**
+ * PINGPONG sub-blocks
+ * @DPU_PINGPONG_TE Tear check block
+ * @DPU_PINGPONG_TE2 Additional tear check block for split pipes
+ * @DPU_PINGPONG_SPLIT PP block supports split fifo
+ * @DPU_PINGPONG_SLAVE PP block is a suitable slave for split fifo
+ * @DPU_PINGPONG_DITHER, Dither blocks
+ * @DPU_PINGPONG_MAX
+ */
+enum {
+ DPU_PINGPONG_TE = 0x1,
+ DPU_PINGPONG_TE2,
+ DPU_PINGPONG_SPLIT,
+ DPU_PINGPONG_SLAVE,
+ DPU_PINGPONG_DITHER,
+ DPU_PINGPONG_MAX
+};
+
+/**
+ * CTL sub-blocks
+ * @DPU_CTL_SPLIT_DISPLAY CTL supports video mode split display
+ * @DPU_CTL_MAX
+ */
+enum {
+ DPU_CTL_SPLIT_DISPLAY = 0x1,
+ DPU_CTL_MAX
+};
+
+/**
+ * VBIF sub-blocks and features
+ * @DPU_VBIF_QOS_OTLIM VBIF supports OT Limit
+ * @DPU_VBIF_QOS_REMAP VBIF supports QoS priority remap
+ * @DPU_VBIF_MAX maximum value
+ */
+enum {
+ DPU_VBIF_QOS_OTLIM = 0x1,
+ DPU_VBIF_QOS_REMAP,
+ DPU_VBIF_MAX
+};
+
+/**
+ * MACRO DPU_HW_BLK_INFO - information of HW blocks inside DPU
+ * @name: string name for debug purposes
+ * @id: enum identifying this block
+ * @base: register base offset to mdss
+ * @len: length of hardware block
+ * @features bit mask identifying sub-blocks/features
+ */
+#define DPU_HW_BLK_INFO \
+ char name[DPU_HW_BLK_NAME_LEN]; \
+ u32 id; \
+ u32 base; \
+ u32 len; \
+ unsigned long features
+
+/**
+ * MACRO DPU_HW_SUBBLK_INFO - information of HW sub-block inside DPU
+ * @name: string name for debug purposes
+ * @id: enum identifying this sub-block
+ * @base: offset of this sub-block relative to the block
+ * offset
+ * @len register block length of this sub-block
+ */
+#define DPU_HW_SUBBLK_INFO \
+ char name[DPU_HW_BLK_NAME_LEN]; \
+ u32 id; \
+ u32 base; \
+ u32 len
+
+/**
+ * struct dpu_src_blk: SSPP part of the source pipes
+ * @info: HW register and features supported by this sub-blk
+ */
+struct dpu_src_blk {
+ DPU_HW_SUBBLK_INFO;
+};
+
+/**
+ * struct dpu_scaler_blk: Scaler information
+ * @info: HW register and features supported by this sub-blk
+ * @version: qseed block revision
+ */
+struct dpu_scaler_blk {
+ DPU_HW_SUBBLK_INFO;
+ u32 version;
+};
+
+struct dpu_csc_blk {
+ DPU_HW_SUBBLK_INFO;
+};
+
+/**
+ * struct dpu_pp_blk : Pixel processing sub-blk information
+ * @info: HW register and features supported by this sub-blk
+ * @version: HW Algorithm version
+ */
+struct dpu_pp_blk {
+ DPU_HW_SUBBLK_INFO;
+ u32 version;
+};
+
+/**
+ * struct dpu_format_extended - define dpu specific pixel format+modifier
+ * @fourcc_format: Base FOURCC pixel format code
+ * @modifier: 64-bit drm format modifier, same modifier must be applied to all
+ * framebuffer planes
+ */
+struct dpu_format_extended {
+ uint32_t fourcc_format;
+ uint64_t modifier;
+};
+
+/**
+ * enum dpu_qos_lut_usage - define QoS LUT use cases
+ */
+enum dpu_qos_lut_usage {
+ DPU_QOS_LUT_USAGE_LINEAR,
+ DPU_QOS_LUT_USAGE_MACROTILE,
+ DPU_QOS_LUT_USAGE_NRT,
+ DPU_QOS_LUT_USAGE_MAX,
+};
+
+/**
+ * struct dpu_qos_lut_entry - define QoS LUT table entry
+ * @fl: fill level, or zero on last entry to indicate default lut
+ * @lut: lut to use if equal to or less than fill level
+ */
+struct dpu_qos_lut_entry {
+ u32 fl;
+ u64 lut;
+};
+
+/**
+ * struct dpu_qos_lut_tbl - define QoS LUT table
+ * @nentry: number of entry in this table
+ * @entries: Pointer to table entries
+ */
+struct dpu_qos_lut_tbl {
+ u32 nentry;
+ struct dpu_qos_lut_entry *entries;
+};
+
+/**
+ * struct dpu_caps - define DPU capabilities
+ * @max_mixer_width max layer mixer line width support.
+ * @max_mixer_blendstages max layer mixer blend stages or
+ * supported z order
+ * @qseed_type qseed2 or qseed3 support.
+ * @smart_dma_rev Supported version of SmartDMA feature.
+ * @ubwc_version UBWC feature version (0x0 for not supported)
+ * @has_src_split source split feature status
+ * @has_dim_layer dim layer feature status
+ * @has_idle_pc indicate if idle power collapse feature is supported
+ */
+struct dpu_caps {
+ u32 max_mixer_width;
+ u32 max_mixer_blendstages;
+ u32 qseed_type;
+ u32 smart_dma_rev;
+ u32 ubwc_version;
+ bool has_src_split;
+ bool has_dim_layer;
+ bool has_idle_pc;
+};
+
+/**
+ * struct dpu_sspp_blks_common : SSPP sub-blocks common configuration
+ * @maxwidth: max pixelwidth supported by this pipe
+ * @pixel_ram_size: size of latency hiding and de-tiling buffer in bytes
+ * @maxhdeciexp: max horizontal decimation supported by this pipe
+ * (max is 2^value)
+ * @maxvdeciexp: max vertical decimation supported by this pipe
+ * (max is 2^value)
+ */
+struct dpu_sspp_blks_common {
+ u32 maxlinewidth;
+ u32 pixel_ram_size;
+ u32 maxhdeciexp;
+ u32 maxvdeciexp;
+};
+
+/**
+ * struct dpu_sspp_sub_blks : SSPP sub-blocks
+ * common: Pointer to common configurations shared by sub blocks
+ * @creq_vblank: creq priority during vertical blanking
+ * @danger_vblank: danger priority during vertical blanking
+ * @maxdwnscale: max downscale ratio supported(without DECIMATION)
+ * @maxupscale: maxupscale ratio supported
+ * @smart_dma_priority: hw priority of rect1 of multirect pipe
+ * @max_per_pipe_bw: maximum allowable bandwidth of this pipe in kBps
+ * @src_blk:
+ * @scaler_blk:
+ * @csc_blk:
+ * @hsic:
+ * @memcolor:
+ * @pcc_blk:
+ * @igc_blk:
+ * @format_list: Pointer to list of supported formats
+ * @virt_format_list: Pointer to list of supported formats for virtual planes
+ */
+struct dpu_sspp_sub_blks {
+ const struct dpu_sspp_blks_common *common;
+ u32 creq_vblank;
+ u32 danger_vblank;
+ u32 maxdwnscale;
+ u32 maxupscale;
+ u32 smart_dma_priority;
+ u32 max_per_pipe_bw;
+ struct dpu_src_blk src_blk;
+ struct dpu_scaler_blk scaler_blk;
+ struct dpu_pp_blk csc_blk;
+ struct dpu_pp_blk hsic_blk;
+ struct dpu_pp_blk memcolor_blk;
+ struct dpu_pp_blk pcc_blk;
+ struct dpu_pp_blk igc_blk;
+
+ const struct dpu_format_extended *format_list;
+ const struct dpu_format_extended *virt_format_list;
+};
+
+/**
+ * struct dpu_lm_sub_blks: information of mixer block
+ * @maxwidth: Max pixel width supported by this mixer
+ * @maxblendstages: Max number of blend-stages supported
+ * @blendstage_base: Blend-stage register base offset
+ * @gc: gamma correction block
+ */
+struct dpu_lm_sub_blks {
+ u32 maxwidth;
+ u32 maxblendstages;
+ u32 blendstage_base[MAX_BLOCKS];
+ struct dpu_pp_blk gc;
+};
+
+struct dpu_pingpong_sub_blks {
+ struct dpu_pp_blk te;
+ struct dpu_pp_blk te2;
+ struct dpu_pp_blk dither;
+};
+
+/**
+ * dpu_clk_ctrl_type - Defines top level clock control signals
+ */
+enum dpu_clk_ctrl_type {
+ DPU_CLK_CTRL_NONE,
+ DPU_CLK_CTRL_VIG0,
+ DPU_CLK_CTRL_VIG1,
+ DPU_CLK_CTRL_VIG2,
+ DPU_CLK_CTRL_VIG3,
+ DPU_CLK_CTRL_VIG4,
+ DPU_CLK_CTRL_RGB0,
+ DPU_CLK_CTRL_RGB1,
+ DPU_CLK_CTRL_RGB2,
+ DPU_CLK_CTRL_RGB3,
+ DPU_CLK_CTRL_DMA0,
+ DPU_CLK_CTRL_DMA1,
+ DPU_CLK_CTRL_CURSOR0,
+ DPU_CLK_CTRL_CURSOR1,
+ DPU_CLK_CTRL_INLINE_ROT0_SSPP,
+ DPU_CLK_CTRL_MAX,
+};
+
+/* struct dpu_clk_ctrl_reg : Clock control register
+ * @reg_off: register offset
+ * @bit_off: bit offset
+ */
+struct dpu_clk_ctrl_reg {
+ u32 reg_off;
+ u32 bit_off;
+};
+
+/* struct dpu_mdp_cfg : MDP TOP-BLK instance info
+ * @id: index identifying this block
+ * @base: register base offset to mdss
+ * @features bit mask identifying sub-blocks/features
+ * @highest_bank_bit: UBWC parameter
+ * @ubwc_static: ubwc static configuration
+ * @ubwc_swizzle: ubwc default swizzle setting
+ * @has_dest_scaler: indicates support of destination scaler
+ * @clk_ctrls clock control register definition
+ */
+struct dpu_mdp_cfg {
+ DPU_HW_BLK_INFO;
+ u32 highest_bank_bit;
+ u32 ubwc_static;
+ u32 ubwc_swizzle;
+ bool has_dest_scaler;
+ struct dpu_clk_ctrl_reg clk_ctrls[DPU_CLK_CTRL_MAX];
+};
+
+/* struct dpu_mdp_cfg : MDP TOP-BLK instance info
+ * @id: index identifying this block
+ * @base: register base offset to mdss
+ * @features bit mask identifying sub-blocks/features
+ */
+struct dpu_ctl_cfg {
+ DPU_HW_BLK_INFO;
+};
+
+/**
+ * struct dpu_sspp_cfg - information of source pipes
+ * @id: index identifying this block
+ * @base register offset of this block
+ * @features bit mask identifying sub-blocks/features
+ * @sblk: SSPP sub-blocks information
+ * @xin_id: bus client identifier
+ * @clk_ctrl clock control identifier
+ * @type sspp type identifier
+ */
+struct dpu_sspp_cfg {
+ DPU_HW_BLK_INFO;
+ const struct dpu_sspp_sub_blks *sblk;
+ u32 xin_id;
+ enum dpu_clk_ctrl_type clk_ctrl;
+ u32 type;
+};
+
+/**
+ * struct dpu_lm_cfg - information of layer mixer blocks
+ * @id: index identifying this block
+ * @base register offset of this block
+ * @features bit mask identifying sub-blocks/features
+ * @sblk: LM Sub-blocks information
+ * @pingpong: ID of connected PingPong, PINGPONG_MAX if unsupported
+ * @ds: ID of connected DS, DS_MAX if unsupported
+ * @lm_pair_mask: Bitmask of LMs that can be controlled by same CTL
+ */
+struct dpu_lm_cfg {
+ DPU_HW_BLK_INFO;
+ const struct dpu_lm_sub_blks *sblk;
+ u32 pingpong;
+ u32 ds;
+ unsigned long lm_pair_mask;
+};
+
+/**
+ * struct dpu_ds_top_cfg - information of dest scaler top
+ * @id enum identifying this block
+ * @base register offset of this block
+ * @features bit mask identifying features
+ * @version hw version of dest scaler
+ * @maxinputwidth maximum input line width
+ * @maxoutputwidth maximum output line width
+ * @maxupscale maximum upscale ratio
+ */
+struct dpu_ds_top_cfg {
+ DPU_HW_BLK_INFO;
+ u32 version;
+ u32 maxinputwidth;
+ u32 maxoutputwidth;
+ u32 maxupscale;
+};
+
+/**
+ * struct dpu_ds_cfg - information of dest scaler blocks
+ * @id enum identifying this block
+ * @base register offset wrt DS top offset
+ * @features bit mask identifying features
+ * @version hw version of the qseed block
+ * @top DS top information
+ */
+struct dpu_ds_cfg {
+ DPU_HW_BLK_INFO;
+ u32 version;
+ const struct dpu_ds_top_cfg *top;
+};
+
+/**
+ * struct dpu_pingpong_cfg - information of PING-PONG blocks
+ * @id enum identifying this block
+ * @base register offset of this block
+ * @features bit mask identifying sub-blocks/features
+ * @sblk sub-blocks information
+ */
+struct dpu_pingpong_cfg {
+ DPU_HW_BLK_INFO;
+ const struct dpu_pingpong_sub_blks *sblk;
+};
+
+/**
+ * struct dpu_cdm_cfg - information of chroma down blocks
+ * @id enum identifying this block
+ * @base register offset of this block
+ * @features bit mask identifying sub-blocks/features
+ * @intf_connect Bitmask of INTF IDs this CDM can connect to
+ */
+struct dpu_cdm_cfg {
+ DPU_HW_BLK_INFO;
+ unsigned long intf_connect;
+};
+
+/**
+ * struct dpu_intf_cfg - information of timing engine blocks
+ * @id enum identifying this block
+ * @base register offset of this block
+ * @features bit mask identifying sub-blocks/features
+ * @type: Interface type(DSI, DP, HDMI)
+ * @controller_id: Controller Instance ID in case of multiple of intf type
+ * @prog_fetch_lines_worst_case Worst case latency num lines needed to prefetch
+ */
+struct dpu_intf_cfg {
+ DPU_HW_BLK_INFO;
+ u32 type; /* interface type*/
+ u32 controller_id;
+ u32 prog_fetch_lines_worst_case;
+};
+
+/**
+ * struct dpu_vbif_dynamic_ot_cfg - dynamic OT setting
+ * @pps pixel per seconds
+ * @ot_limit OT limit to use up to specified pixel per second
+ */
+struct dpu_vbif_dynamic_ot_cfg {
+ u64 pps;
+ u32 ot_limit;
+};
+
+/**
+ * struct dpu_vbif_dynamic_ot_tbl - dynamic OT setting table
+ * @count length of cfg
+ * @cfg pointer to array of configuration settings with
+ * ascending requirements
+ */
+struct dpu_vbif_dynamic_ot_tbl {
+ u32 count;
+ struct dpu_vbif_dynamic_ot_cfg *cfg;
+};
+
+/**
+ * struct dpu_vbif_qos_tbl - QoS priority table
+ * @npriority_lvl num of priority level
+ * @priority_lvl pointer to array of priority level in ascending order
+ */
+struct dpu_vbif_qos_tbl {
+ u32 npriority_lvl;
+ u32 *priority_lvl;
+};
+
+/**
+ * struct dpu_vbif_cfg - information of VBIF blocks
+ * @id enum identifying this block
+ * @base register offset of this block
+ * @features bit mask identifying sub-blocks/features
+ * @ot_rd_limit default OT read limit
+ * @ot_wr_limit default OT write limit
+ * @xin_halt_timeout maximum time (in usec) for xin to halt
+ * @dynamic_ot_rd_tbl dynamic OT read configuration table
+ * @dynamic_ot_wr_tbl dynamic OT write configuration table
+ * @qos_rt_tbl real-time QoS priority table
+ * @qos_nrt_tbl non-real-time QoS priority table
+ * @memtype_count number of defined memtypes
+ * @memtype array of xin memtype definitions
+ */
+struct dpu_vbif_cfg {
+ DPU_HW_BLK_INFO;
+ u32 default_ot_rd_limit;
+ u32 default_ot_wr_limit;
+ u32 xin_halt_timeout;
+ struct dpu_vbif_dynamic_ot_tbl dynamic_ot_rd_tbl;
+ struct dpu_vbif_dynamic_ot_tbl dynamic_ot_wr_tbl;
+ struct dpu_vbif_qos_tbl qos_rt_tbl;
+ struct dpu_vbif_qos_tbl qos_nrt_tbl;
+ u32 memtype_count;
+ u32 memtype[MAX_XIN_COUNT];
+};
+/**
+ * struct dpu_reg_dma_cfg - information of lut dma blocks
+ * @id enum identifying this block
+ * @base register offset of this block
+ * @features bit mask identifying sub-blocks/features
+ * @version version of lutdma hw block
+ * @trigger_sel_off offset to trigger select registers of lutdma
+ */
+struct dpu_reg_dma_cfg {
+ DPU_HW_BLK_INFO;
+ u32 version;
+ u32 trigger_sel_off;
+};
+
+/**
+ * Define CDP use cases
+ * @DPU_PERF_CDP_UDAGE_RT: real-time use cases
+ * @DPU_PERF_CDP_USAGE_NRT: non real-time use cases such as WFD
+ */
+enum {
+ DPU_PERF_CDP_USAGE_RT,
+ DPU_PERF_CDP_USAGE_NRT,
+ DPU_PERF_CDP_USAGE_MAX
+};
+
+/**
+ * struct dpu_perf_cdp_cfg - define CDP use case configuration
+ * @rd_enable: true if read pipe CDP is enabled
+ * @wr_enable: true if write pipe CDP is enabled
+ */
+struct dpu_perf_cdp_cfg {
+ bool rd_enable;
+ bool wr_enable;
+};
+
+/**
+ * struct dpu_perf_cfg - performance control settings
+ * @max_bw_low low threshold of maximum bandwidth (kbps)
+ * @max_bw_high high threshold of maximum bandwidth (kbps)
+ * @min_core_ib minimum bandwidth for core (kbps)
+ * @min_core_ib minimum mnoc ib vote in kbps
+ * @min_llcc_ib minimum llcc ib vote in kbps
+ * @min_dram_ib minimum dram ib vote in kbps
+ * @core_ib_ff core instantaneous bandwidth fudge factor
+ * @core_clk_ff core clock fudge factor
+ * @comp_ratio_rt string of 0 or more of <fourcc>/<ven>/<mod>/<comp ratio>
+ * @comp_ratio_nrt string of 0 or more of <fourcc>/<ven>/<mod>/<comp ratio>
+ * @undersized_prefill_lines undersized prefill in lines
+ * @xtra_prefill_lines extra prefill latency in lines
+ * @dest_scale_prefill_lines destination scaler latency in lines
+ * @macrotile_perfill_lines macrotile latency in lines
+ * @yuv_nv12_prefill_lines yuv_nv12 latency in lines
+ * @linear_prefill_lines linear latency in lines
+ * @downscaling_prefill_lines downscaling latency in lines
+ * @amortizable_theshold minimum y position for traffic shaping prefill
+ * @min_prefill_lines minimum pipeline latency in lines
+ * @safe_lut_tbl: LUT tables for safe signals
+ * @danger_lut_tbl: LUT tables for danger signals
+ * @qos_lut_tbl: LUT tables for QoS signals
+ * @cdp_cfg cdp use case configurations
+ */
+struct dpu_perf_cfg {
+ u32 max_bw_low;
+ u32 max_bw_high;
+ u32 min_core_ib;
+ u32 min_llcc_ib;
+ u32 min_dram_ib;
+ const char *core_ib_ff;
+ const char *core_clk_ff;
+ const char *comp_ratio_rt;
+ const char *comp_ratio_nrt;
+ u32 undersized_prefill_lines;
+ u32 xtra_prefill_lines;
+ u32 dest_scale_prefill_lines;
+ u32 macrotile_prefill_lines;
+ u32 yuv_nv12_prefill_lines;
+ u32 linear_prefill_lines;
+ u32 downscaling_prefill_lines;
+ u32 amortizable_threshold;
+ u32 min_prefill_lines;
+ u32 safe_lut_tbl[DPU_QOS_LUT_USAGE_MAX];
+ u32 danger_lut_tbl[DPU_QOS_LUT_USAGE_MAX];
+ struct dpu_qos_lut_tbl qos_lut_tbl[DPU_QOS_LUT_USAGE_MAX];
+ struct dpu_perf_cdp_cfg cdp_cfg[DPU_PERF_CDP_USAGE_MAX];
+};
+
+/**
+ * struct dpu_mdss_cfg - information of MDSS HW
+ * This is the main catalog data structure representing
+ * this HW version. Contains number of instances,
+ * register offsets, capabilities of the all MDSS HW sub-blocks.
+ *
+ * @dma_formats Supported formats for dma pipe
+ * @cursor_formats Supported formats for cursor pipe
+ * @vig_formats Supported formats for vig pipe
+ */
+struct dpu_mdss_cfg {
+ u32 hwversion;
+
+ const struct dpu_caps *caps;
+
+ u32 mdp_count;
+ struct dpu_mdp_cfg *mdp;
+
+ u32 ctl_count;
+ struct dpu_ctl_cfg *ctl;
+
+ u32 sspp_count;
+ struct dpu_sspp_cfg *sspp;
+
+ u32 mixer_count;
+ struct dpu_lm_cfg *mixer;
+
+ u32 ds_count;
+ struct dpu_ds_cfg *ds;
+
+ u32 pingpong_count;
+ struct dpu_pingpong_cfg *pingpong;
+
+ u32 cdm_count;
+ struct dpu_cdm_cfg *cdm;
+
+ u32 intf_count;
+ struct dpu_intf_cfg *intf;
+
+ u32 vbif_count;
+ struct dpu_vbif_cfg *vbif;
+
+ u32 reg_dma_count;
+ struct dpu_reg_dma_cfg dma_cfg;
+
+ u32 ad_count;
+
+ /* Add additional block data structures here */
+
+ struct dpu_perf_cfg perf;
+ struct dpu_format_extended *dma_formats;
+ struct dpu_format_extended *cursor_formats;
+ struct dpu_format_extended *vig_formats;
+};
+
+struct dpu_mdss_hw_cfg_handler {
+ u32 hw_rev;
+ void (*cfg_init)(struct dpu_mdss_cfg *dpu_cfg);
+};
+
+/*
+ * Access Macros
+ */
+#define BLK_MDP(s) ((s)->mdp)
+#define BLK_CTL(s) ((s)->ctl)
+#define BLK_VIG(s) ((s)->vig)
+#define BLK_RGB(s) ((s)->rgb)
+#define BLK_DMA(s) ((s)->dma)
+#define BLK_CURSOR(s) ((s)->cursor)
+#define BLK_MIXER(s) ((s)->mixer)
+#define BLK_DS(s) ((s)->ds)
+#define BLK_PINGPONG(s) ((s)->pingpong)
+#define BLK_CDM(s) ((s)->cdm)
+#define BLK_INTF(s) ((s)->intf)
+#define BLK_AD(s) ((s)->ad)
+
+/**
+ * dpu_hw_catalog_init - dpu hardware catalog init API retrieves
+ * hardcoded target specific catalog information in config structure
+ * @hw_rev: caller needs provide the hardware revision.
+ *
+ * Return: dpu config structure
+ */
+struct dpu_mdss_cfg *dpu_hw_catalog_init(u32 hw_rev);
+
+/**
+ * dpu_hw_catalog_deinit - dpu hardware catalog cleanup
+ * @dpu_cfg: pointer returned from init function
+ */
+void dpu_hw_catalog_deinit(struct dpu_mdss_cfg *dpu_cfg);
+
+/**
+ * dpu_hw_sspp_multirect_enabled - check multirect enabled for the sspp
+ * @cfg: pointer to sspp cfg
+ */
+static inline bool dpu_hw_sspp_multirect_enabled(const struct dpu_sspp_cfg *cfg)
+{
+ return test_bit(DPU_SSPP_SMART_DMA_V1, &cfg->features) ||
+ test_bit(DPU_SSPP_SMART_DMA_V2, &cfg->features);
+}
+#endif /* _DPU_HW_CATALOG_H */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog_format.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog_format.h
new file mode 100644
index 000000000000..3c9f028628ef
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog_format.h
@@ -0,0 +1,168 @@
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "dpu_hw_mdss.h"
+
+static const struct dpu_format_extended plane_formats[] = {
+ {DRM_FORMAT_ARGB8888, 0},
+ {DRM_FORMAT_ABGR8888, 0},
+ {DRM_FORMAT_RGBA8888, 0},
+ {DRM_FORMAT_ABGR8888, DRM_FORMAT_MOD_QCOM_COMPRESSED},
+ {DRM_FORMAT_BGRA8888, 0},
+ {DRM_FORMAT_XRGB8888, 0},
+ {DRM_FORMAT_RGBX8888, 0},
+ {DRM_FORMAT_BGRX8888, 0},
+ {DRM_FORMAT_XBGR8888, 0},
+ {DRM_FORMAT_XBGR8888, DRM_FORMAT_MOD_QCOM_COMPRESSED},
+ {DRM_FORMAT_RGB888, 0},
+ {DRM_FORMAT_BGR888, 0},
+ {DRM_FORMAT_RGB565, 0},
+ {DRM_FORMAT_BGR565, DRM_FORMAT_MOD_QCOM_COMPRESSED},
+ {DRM_FORMAT_BGR565, 0},
+ {DRM_FORMAT_ARGB1555, 0},
+ {DRM_FORMAT_ABGR1555, 0},
+ {DRM_FORMAT_RGBA5551, 0},
+ {DRM_FORMAT_BGRA5551, 0},
+ {DRM_FORMAT_XRGB1555, 0},
+ {DRM_FORMAT_XBGR1555, 0},
+ {DRM_FORMAT_RGBX5551, 0},
+ {DRM_FORMAT_BGRX5551, 0},
+ {DRM_FORMAT_ARGB4444, 0},
+ {DRM_FORMAT_ABGR4444, 0},
+ {DRM_FORMAT_RGBA4444, 0},
+ {DRM_FORMAT_BGRA4444, 0},
+ {DRM_FORMAT_XRGB4444, 0},
+ {DRM_FORMAT_XBGR4444, 0},
+ {DRM_FORMAT_RGBX4444, 0},
+ {DRM_FORMAT_BGRX4444, 0},
+ {0, 0},
+};
+
+static const struct dpu_format_extended plane_formats_yuv[] = {
+ {DRM_FORMAT_ARGB8888, 0},
+ {DRM_FORMAT_ABGR8888, 0},
+ {DRM_FORMAT_RGBA8888, 0},
+ {DRM_FORMAT_BGRX8888, 0},
+ {DRM_FORMAT_ABGR8888, DRM_FORMAT_MOD_QCOM_COMPRESSED},
+ {DRM_FORMAT_BGRA8888, 0},
+ {DRM_FORMAT_XRGB8888, 0},
+ {DRM_FORMAT_XBGR8888, 0},
+ {DRM_FORMAT_RGBX8888, 0},
+ {DRM_FORMAT_XBGR8888, DRM_FORMAT_MOD_QCOM_COMPRESSED},
+ {DRM_FORMAT_RGB888, 0},
+ {DRM_FORMAT_BGR888, 0},
+ {DRM_FORMAT_RGB565, 0},
+ {DRM_FORMAT_BGR565, DRM_FORMAT_MOD_QCOM_COMPRESSED},
+ {DRM_FORMAT_BGR565, 0},
+ {DRM_FORMAT_ARGB1555, 0},
+ {DRM_FORMAT_ABGR1555, 0},
+ {DRM_FORMAT_RGBA5551, 0},
+ {DRM_FORMAT_BGRA5551, 0},
+ {DRM_FORMAT_XRGB1555, 0},
+ {DRM_FORMAT_XBGR1555, 0},
+ {DRM_FORMAT_RGBX5551, 0},
+ {DRM_FORMAT_BGRX5551, 0},
+ {DRM_FORMAT_ARGB4444, 0},
+ {DRM_FORMAT_ABGR4444, 0},
+ {DRM_FORMAT_RGBA4444, 0},
+ {DRM_FORMAT_BGRA4444, 0},
+ {DRM_FORMAT_XRGB4444, 0},
+ {DRM_FORMAT_XBGR4444, 0},
+ {DRM_FORMAT_RGBX4444, 0},
+ {DRM_FORMAT_BGRX4444, 0},
+
+ {DRM_FORMAT_NV12, 0},
+ {DRM_FORMAT_NV12, DRM_FORMAT_MOD_QCOM_COMPRESSED},
+ {DRM_FORMAT_NV21, 0},
+ {DRM_FORMAT_NV16, 0},
+ {DRM_FORMAT_NV61, 0},
+ {DRM_FORMAT_VYUY, 0},
+ {DRM_FORMAT_UYVY, 0},
+ {DRM_FORMAT_YUYV, 0},
+ {DRM_FORMAT_YVYU, 0},
+ {DRM_FORMAT_YUV420, 0},
+ {DRM_FORMAT_YVU420, 0},
+ {0, 0},
+};
+
+static const struct dpu_format_extended cursor_formats[] = {
+ {DRM_FORMAT_ARGB8888, 0},
+ {DRM_FORMAT_ABGR8888, 0},
+ {DRM_FORMAT_RGBA8888, 0},
+ {DRM_FORMAT_BGRA8888, 0},
+ {DRM_FORMAT_XRGB8888, 0},
+ {DRM_FORMAT_ARGB1555, 0},
+ {DRM_FORMAT_ABGR1555, 0},
+ {DRM_FORMAT_RGBA5551, 0},
+ {DRM_FORMAT_BGRA5551, 0},
+ {DRM_FORMAT_ARGB4444, 0},
+ {DRM_FORMAT_ABGR4444, 0},
+ {DRM_FORMAT_RGBA4444, 0},
+ {DRM_FORMAT_BGRA4444, 0},
+ {0, 0},
+};
+
+static const struct dpu_format_extended wb2_formats[] = {
+ {DRM_FORMAT_RGB565, 0},
+ {DRM_FORMAT_BGR565, DRM_FORMAT_MOD_QCOM_COMPRESSED},
+ {DRM_FORMAT_RGB888, 0},
+ {DRM_FORMAT_ARGB8888, 0},
+ {DRM_FORMAT_RGBA8888, 0},
+ {DRM_FORMAT_ABGR8888, DRM_FORMAT_MOD_QCOM_COMPRESSED},
+ {DRM_FORMAT_XRGB8888, 0},
+ {DRM_FORMAT_RGBX8888, 0},
+ {DRM_FORMAT_XBGR8888, DRM_FORMAT_MOD_QCOM_COMPRESSED},
+ {DRM_FORMAT_ARGB1555, 0},
+ {DRM_FORMAT_RGBA5551, 0},
+ {DRM_FORMAT_XRGB1555, 0},
+ {DRM_FORMAT_RGBX5551, 0},
+ {DRM_FORMAT_ARGB4444, 0},
+ {DRM_FORMAT_RGBA4444, 0},
+ {DRM_FORMAT_RGBX4444, 0},
+ {DRM_FORMAT_XRGB4444, 0},
+
+ {DRM_FORMAT_BGR565, 0},
+ {DRM_FORMAT_BGR888, 0},
+ {DRM_FORMAT_ABGR8888, 0},
+ {DRM_FORMAT_BGRA8888, 0},
+ {DRM_FORMAT_BGRX8888, 0},
+ {DRM_FORMAT_XBGR8888, 0},
+ {DRM_FORMAT_ABGR1555, 0},
+ {DRM_FORMAT_BGRA5551, 0},
+ {DRM_FORMAT_XBGR1555, 0},
+ {DRM_FORMAT_BGRX5551, 0},
+ {DRM_FORMAT_ABGR4444, 0},
+ {DRM_FORMAT_BGRA4444, 0},
+ {DRM_FORMAT_BGRX4444, 0},
+ {DRM_FORMAT_XBGR4444, 0},
+
+ {DRM_FORMAT_YUV420, 0},
+ {DRM_FORMAT_NV12, 0},
+ {DRM_FORMAT_NV12, DRM_FORMAT_MOD_QCOM_COMPRESSED},
+ {DRM_FORMAT_NV16, 0},
+ {DRM_FORMAT_YUYV, 0},
+
+ {0, 0},
+};
+
+static const struct dpu_format_extended rgb_10bit_formats[] = {
+ {DRM_FORMAT_BGRA1010102, 0},
+ {DRM_FORMAT_BGRX1010102, 0},
+ {DRM_FORMAT_RGBA1010102, 0},
+ {DRM_FORMAT_RGBX1010102, 0},
+ {DRM_FORMAT_ABGR2101010, 0},
+ {DRM_FORMAT_ABGR2101010, DRM_FORMAT_MOD_QCOM_COMPRESSED},
+ {DRM_FORMAT_XBGR2101010, 0},
+ {DRM_FORMAT_XBGR2101010, DRM_FORMAT_MOD_QCOM_COMPRESSED},
+ {DRM_FORMAT_ARGB2101010, 0},
+ {DRM_FORMAT_XRGB2101010, 0},
+};
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_cdm.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_cdm.c
new file mode 100644
index 000000000000..554874ba0c3b
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_cdm.c
@@ -0,0 +1,323 @@
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "dpu_hw_mdss.h"
+#include "dpu_hwio.h"
+#include "dpu_hw_catalog.h"
+#include "dpu_hw_cdm.h"
+#include "dpu_dbg.h"
+#include "dpu_kms.h"
+
+#define CDM_CSC_10_OPMODE 0x000
+#define CDM_CSC_10_BASE 0x004
+
+#define CDM_CDWN2_OP_MODE 0x100
+#define CDM_CDWN2_CLAMP_OUT 0x104
+#define CDM_CDWN2_PARAMS_3D_0 0x108
+#define CDM_CDWN2_PARAMS_3D_1 0x10C
+#define CDM_CDWN2_COEFF_COSITE_H_0 0x110
+#define CDM_CDWN2_COEFF_COSITE_H_1 0x114
+#define CDM_CDWN2_COEFF_COSITE_H_2 0x118
+#define CDM_CDWN2_COEFF_OFFSITE_H_0 0x11C
+#define CDM_CDWN2_COEFF_OFFSITE_H_1 0x120
+#define CDM_CDWN2_COEFF_OFFSITE_H_2 0x124
+#define CDM_CDWN2_COEFF_COSITE_V 0x128
+#define CDM_CDWN2_COEFF_OFFSITE_V 0x12C
+#define CDM_CDWN2_OUT_SIZE 0x130
+
+#define CDM_HDMI_PACK_OP_MODE 0x200
+#define CDM_CSC_10_MATRIX_COEFF_0 0x004
+
+/**
+ * Horizontal coefficients for cosite chroma downscale
+ * s13 representation of coefficients
+ */
+static u32 cosite_h_coeff[] = {0x00000016, 0x000001cc, 0x0100009e};
+
+/**
+ * Horizontal coefficients for offsite chroma downscale
+ */
+static u32 offsite_h_coeff[] = {0x000b0005, 0x01db01eb, 0x00e40046};
+
+/**
+ * Vertical coefficients for cosite chroma downscale
+ */
+static u32 cosite_v_coeff[] = {0x00080004};
+/**
+ * Vertical coefficients for offsite chroma downscale
+ */
+static u32 offsite_v_coeff[] = {0x00060002};
+
+/* Limited Range rgb2yuv coeff with clamp and bias values for CSC 10 module */
+static struct dpu_csc_cfg rgb2yuv_cfg = {
+ {
+ 0x0083, 0x0102, 0x0032,
+ 0x1fb5, 0x1f6c, 0x00e1,
+ 0x00e1, 0x1f45, 0x1fdc
+ },
+ { 0x00, 0x00, 0x00 },
+ { 0x0040, 0x0200, 0x0200 },
+ { 0x000, 0x3ff, 0x000, 0x3ff, 0x000, 0x3ff },
+ { 0x040, 0x3ac, 0x040, 0x3c0, 0x040, 0x3c0 },
+};
+
+static struct dpu_cdm_cfg *_cdm_offset(enum dpu_cdm cdm,
+ struct dpu_mdss_cfg *m,
+ void __iomem *addr,
+ struct dpu_hw_blk_reg_map *b)
+{
+ int i;
+
+ for (i = 0; i < m->cdm_count; i++) {
+ if (cdm == m->cdm[i].id) {
+ b->base_off = addr;
+ b->blk_off = m->cdm[i].base;
+ b->length = m->cdm[i].len;
+ b->hwversion = m->hwversion;
+ b->log_mask = DPU_DBG_MASK_CDM;
+ return &m->cdm[i];
+ }
+ }
+
+ return ERR_PTR(-EINVAL);
+}
+
+static int dpu_hw_cdm_setup_csc_10bit(struct dpu_hw_cdm *ctx,
+ struct dpu_csc_cfg *data)
+{
+ dpu_hw_csc_setup(&ctx->hw, CDM_CSC_10_MATRIX_COEFF_0, data, true);
+
+ return 0;
+}
+
+static int dpu_hw_cdm_setup_cdwn(struct dpu_hw_cdm *ctx,
+ struct dpu_hw_cdm_cfg *cfg)
+{
+ struct dpu_hw_blk_reg_map *c = &ctx->hw;
+ u32 opmode = 0;
+ u32 out_size = 0;
+
+ if (cfg->output_bit_depth == CDM_CDWN_OUTPUT_10BIT)
+ opmode &= ~BIT(7);
+ else
+ opmode |= BIT(7);
+
+ /* ENABLE DWNS_H bit */
+ opmode |= BIT(1);
+
+ switch (cfg->h_cdwn_type) {
+ case CDM_CDWN_DISABLE:
+ /* CLEAR METHOD_H field */
+ opmode &= ~(0x18);
+ /* CLEAR DWNS_H bit */
+ opmode &= ~BIT(1);
+ break;
+ case CDM_CDWN_PIXEL_DROP:
+ /* Clear METHOD_H field (pixel drop is 0) */
+ opmode &= ~(0x18);
+ break;
+ case CDM_CDWN_AVG:
+ /* Clear METHOD_H field (Average is 0x1) */
+ opmode &= ~(0x18);
+ opmode |= (0x1 << 0x3);
+ break;
+ case CDM_CDWN_COSITE:
+ /* Clear METHOD_H field (Average is 0x2) */
+ opmode &= ~(0x18);
+ opmode |= (0x2 << 0x3);
+ /* Co-site horizontal coefficients */
+ DPU_REG_WRITE(c, CDM_CDWN2_COEFF_COSITE_H_0,
+ cosite_h_coeff[0]);
+ DPU_REG_WRITE(c, CDM_CDWN2_COEFF_COSITE_H_1,
+ cosite_h_coeff[1]);
+ DPU_REG_WRITE(c, CDM_CDWN2_COEFF_COSITE_H_2,
+ cosite_h_coeff[2]);
+ break;
+ case CDM_CDWN_OFFSITE:
+ /* Clear METHOD_H field (Average is 0x3) */
+ opmode &= ~(0x18);
+ opmode |= (0x3 << 0x3);
+
+ /* Off-site horizontal coefficients */
+ DPU_REG_WRITE(c, CDM_CDWN2_COEFF_OFFSITE_H_0,
+ offsite_h_coeff[0]);
+ DPU_REG_WRITE(c, CDM_CDWN2_COEFF_OFFSITE_H_1,
+ offsite_h_coeff[1]);
+ DPU_REG_WRITE(c, CDM_CDWN2_COEFF_OFFSITE_H_2,
+ offsite_h_coeff[2]);
+ break;
+ default:
+ pr_err("%s invalid horz down sampling type\n", __func__);
+ return -EINVAL;
+ }
+
+ /* ENABLE DWNS_V bit */
+ opmode |= BIT(2);
+
+ switch (cfg->v_cdwn_type) {
+ case CDM_CDWN_DISABLE:
+ /* CLEAR METHOD_V field */
+ opmode &= ~(0x60);
+ /* CLEAR DWNS_V bit */
+ opmode &= ~BIT(2);
+ break;
+ case CDM_CDWN_PIXEL_DROP:
+ /* Clear METHOD_V field (pixel drop is 0) */
+ opmode &= ~(0x60);
+ break;
+ case CDM_CDWN_AVG:
+ /* Clear METHOD_V field (Average is 0x1) */
+ opmode &= ~(0x60);
+ opmode |= (0x1 << 0x5);
+ break;
+ case CDM_CDWN_COSITE:
+ /* Clear METHOD_V field (Average is 0x2) */
+ opmode &= ~(0x60);
+ opmode |= (0x2 << 0x5);
+ /* Co-site vertical coefficients */
+ DPU_REG_WRITE(c,
+ CDM_CDWN2_COEFF_COSITE_V,
+ cosite_v_coeff[0]);
+ break;
+ case CDM_CDWN_OFFSITE:
+ /* Clear METHOD_V field (Average is 0x3) */
+ opmode &= ~(0x60);
+ opmode |= (0x3 << 0x5);
+
+ /* Off-site vertical coefficients */
+ DPU_REG_WRITE(c,
+ CDM_CDWN2_COEFF_OFFSITE_V,
+ offsite_v_coeff[0]);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (cfg->v_cdwn_type || cfg->h_cdwn_type)
+ opmode |= BIT(0); /* EN CDWN module */
+ else
+ opmode &= ~BIT(0);
+
+ out_size = (cfg->output_width & 0xFFFF) |
+ ((cfg->output_height & 0xFFFF) << 16);
+ DPU_REG_WRITE(c, CDM_CDWN2_OUT_SIZE, out_size);
+ DPU_REG_WRITE(c, CDM_CDWN2_OP_MODE, opmode);
+ DPU_REG_WRITE(c, CDM_CDWN2_CLAMP_OUT,
+ ((0x3FF << 16) | 0x0));
+
+ return 0;
+}
+
+static int dpu_hw_cdm_enable(struct dpu_hw_cdm *ctx,
+ struct dpu_hw_cdm_cfg *cdm)
+{
+ struct dpu_hw_blk_reg_map *c = &ctx->hw;
+ const struct dpu_format *fmt = cdm->output_fmt;
+ struct cdm_output_cfg cdm_cfg = { 0 };
+ u32 opmode = 0;
+ u32 csc = 0;
+
+ if (!DPU_FORMAT_IS_YUV(fmt))
+ return -EINVAL;
+
+ if (cdm->output_type == CDM_CDWN_OUTPUT_HDMI) {
+ if (fmt->chroma_sample != DPU_CHROMA_H1V2)
+ return -EINVAL; /*unsupported format */
+ opmode = BIT(0);
+ opmode |= (fmt->chroma_sample << 1);
+ cdm_cfg.intf_en = true;
+ }
+
+ csc |= BIT(2);
+ csc &= ~BIT(1);
+ csc |= BIT(0);
+
+ if (ctx->hw_mdp && ctx->hw_mdp->ops.setup_cdm_output)
+ ctx->hw_mdp->ops.setup_cdm_output(ctx->hw_mdp, &cdm_cfg);
+
+ DPU_REG_WRITE(c, CDM_CSC_10_OPMODE, csc);
+ DPU_REG_WRITE(c, CDM_HDMI_PACK_OP_MODE, opmode);
+ return 0;
+}
+
+static void dpu_hw_cdm_disable(struct dpu_hw_cdm *ctx)
+{
+ struct cdm_output_cfg cdm_cfg = { 0 };
+
+ if (ctx->hw_mdp && ctx->hw_mdp->ops.setup_cdm_output)
+ ctx->hw_mdp->ops.setup_cdm_output(ctx->hw_mdp, &cdm_cfg);
+}
+
+static void _setup_cdm_ops(struct dpu_hw_cdm_ops *ops,
+ unsigned long features)
+{
+ ops->setup_csc_data = dpu_hw_cdm_setup_csc_10bit;
+ ops->setup_cdwn = dpu_hw_cdm_setup_cdwn;
+ ops->enable = dpu_hw_cdm_enable;
+ ops->disable = dpu_hw_cdm_disable;
+}
+
+static struct dpu_hw_blk_ops dpu_hw_ops = {
+ .start = NULL,
+ .stop = NULL,
+};
+
+struct dpu_hw_cdm *dpu_hw_cdm_init(enum dpu_cdm idx,
+ void __iomem *addr,
+ struct dpu_mdss_cfg *m,
+ struct dpu_hw_mdp *hw_mdp)
+{
+ struct dpu_hw_cdm *c;
+ struct dpu_cdm_cfg *cfg;
+ int rc;
+
+ c = kzalloc(sizeof(*c), GFP_KERNEL);
+ if (!c)
+ return ERR_PTR(-ENOMEM);
+
+ cfg = _cdm_offset(idx, m, addr, &c->hw);
+ if (IS_ERR_OR_NULL(cfg)) {
+ kfree(c);
+ return ERR_PTR(-EINVAL);
+ }
+
+ c->idx = idx;
+ c->caps = cfg;
+ _setup_cdm_ops(&c->ops, c->caps->features);
+ c->hw_mdp = hw_mdp;
+
+ rc = dpu_hw_blk_init(&c->base, DPU_HW_BLK_CDM, idx, &dpu_hw_ops);
+ if (rc) {
+ DPU_ERROR("failed to init hw blk %d\n", rc);
+ goto blk_init_error;
+ }
+
+ /*
+ * Perform any default initialization for the chroma down module
+ * @setup default csc coefficients
+ */
+ dpu_hw_cdm_setup_csc_10bit(c, &rgb2yuv_cfg);
+
+ return c;
+
+blk_init_error:
+ kzfree(c);
+
+ return ERR_PTR(rc);
+}
+
+void dpu_hw_cdm_destroy(struct dpu_hw_cdm *cdm)
+{
+ if (cdm)
+ dpu_hw_blk_destroy(&cdm->base);
+ kfree(cdm);
+}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_cdm.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_cdm.h
new file mode 100644
index 000000000000..5cceb1ecb8e0
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_cdm.h
@@ -0,0 +1,139 @@
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _DPU_HW_CDM_H
+#define _DPU_HW_CDM_H
+
+#include "dpu_hw_mdss.h"
+#include "dpu_hw_top.h"
+#include "dpu_hw_blk.h"
+
+struct dpu_hw_cdm;
+
+struct dpu_hw_cdm_cfg {
+ u32 output_width;
+ u32 output_height;
+ u32 output_bit_depth;
+ u32 h_cdwn_type;
+ u32 v_cdwn_type;
+ const struct dpu_format *output_fmt;
+ u32 output_type;
+ int flags;
+};
+
+enum dpu_hw_cdwn_type {
+ CDM_CDWN_DISABLE,
+ CDM_CDWN_PIXEL_DROP,
+ CDM_CDWN_AVG,
+ CDM_CDWN_COSITE,
+ CDM_CDWN_OFFSITE,
+};
+
+enum dpu_hw_cdwn_output_type {
+ CDM_CDWN_OUTPUT_HDMI,
+ CDM_CDWN_OUTPUT_WB,
+};
+
+enum dpu_hw_cdwn_output_bit_depth {
+ CDM_CDWN_OUTPUT_8BIT,
+ CDM_CDWN_OUTPUT_10BIT,
+};
+
+/**
+ * struct dpu_hw_cdm_ops : Interface to the chroma down Hw driver functions
+ * Assumption is these functions will be called after
+ * clocks are enabled
+ * @setup_csc: Programs the csc matrix
+ * @setup_cdwn: Sets up the chroma down sub module
+ * @enable: Enables the output to interface and programs the
+ * output packer
+ * @disable: Puts the cdm in bypass mode
+ */
+struct dpu_hw_cdm_ops {
+ /**
+ * Programs the CSC matrix for conversion from RGB space to YUV space,
+ * it is optional to call this function as this matrix is automatically
+ * set during initialization, user should call this if it wants
+ * to program a different matrix than default matrix.
+ * @cdm: Pointer to the chroma down context structure
+ * @data Pointer to CSC configuration data
+ * return: 0 if success; error code otherwise
+ */
+ int (*setup_csc_data)(struct dpu_hw_cdm *cdm,
+ struct dpu_csc_cfg *data);
+
+ /**
+ * Programs the Chroma downsample part.
+ * @cdm Pointer to chroma down context
+ */
+ int (*setup_cdwn)(struct dpu_hw_cdm *cdm,
+ struct dpu_hw_cdm_cfg *cfg);
+
+ /**
+ * Enable the CDM module
+ * @cdm Pointer to chroma down context
+ */
+ int (*enable)(struct dpu_hw_cdm *cdm,
+ struct dpu_hw_cdm_cfg *cfg);
+
+ /**
+ * Disable the CDM module
+ * @cdm Pointer to chroma down context
+ */
+ void (*disable)(struct dpu_hw_cdm *cdm);
+};
+
+struct dpu_hw_cdm {
+ struct dpu_hw_blk base;
+ struct dpu_hw_blk_reg_map hw;
+
+ /* chroma down */
+ const struct dpu_cdm_cfg *caps;
+ enum dpu_cdm idx;
+
+ /* mdp top hw driver */
+ struct dpu_hw_mdp *hw_mdp;
+
+ /* ops */
+ struct dpu_hw_cdm_ops ops;
+};
+
+/**
+ * dpu_hw_cdm - convert base object dpu_hw_base to container
+ * @hw: Pointer to base hardware block
+ * return: Pointer to hardware block container
+ */
+static inline struct dpu_hw_cdm *to_dpu_hw_cdm(struct dpu_hw_blk *hw)
+{
+ return container_of(hw, struct dpu_hw_cdm, base);
+}
+
+/**
+ * dpu_hw_cdm_init - initializes the cdm hw driver object.
+ * should be called once before accessing every cdm.
+ * @idx: cdm index for which driver object is required
+ * @addr: mapped register io address of MDP
+ * @m : pointer to mdss catalog data
+ * @hw_mdp: pointer to mdp top hw driver object
+ */
+struct dpu_hw_cdm *dpu_hw_cdm_init(enum dpu_cdm idx,
+ void __iomem *addr,
+ struct dpu_mdss_cfg *m,
+ struct dpu_hw_mdp *hw_mdp);
+
+/**
+ * dpu_hw_cdm_destroy - destroys CDM driver context
+ * @cdm: pointer to CDM driver context
+ */
+void dpu_hw_cdm_destroy(struct dpu_hw_cdm *cdm);
+
+#endif /*_DPU_HW_CDM_H */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c
new file mode 100644
index 000000000000..06be7cf7ce50
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c
@@ -0,0 +1,540 @@
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/delay.h>
+#include "dpu_hwio.h"
+#include "dpu_hw_ctl.h"
+#include "dpu_dbg.h"
+#include "dpu_kms.h"
+
+#define CTL_LAYER(lm) \
+ (((lm) == LM_5) ? (0x024) : (((lm) - LM_0) * 0x004))
+#define CTL_LAYER_EXT(lm) \
+ (0x40 + (((lm) - LM_0) * 0x004))
+#define CTL_LAYER_EXT2(lm) \
+ (0x70 + (((lm) - LM_0) * 0x004))
+#define CTL_LAYER_EXT3(lm) \
+ (0xA0 + (((lm) - LM_0) * 0x004))
+#define CTL_TOP 0x014
+#define CTL_FLUSH 0x018
+#define CTL_START 0x01C
+#define CTL_PREPARE 0x0d0
+#define CTL_SW_RESET 0x030
+#define CTL_LAYER_EXTN_OFFSET 0x40
+
+#define CTL_MIXER_BORDER_OUT BIT(24)
+#define CTL_FLUSH_MASK_CTL BIT(17)
+
+#define DPU_REG_RESET_TIMEOUT_US 2000
+
+static struct dpu_ctl_cfg *_ctl_offset(enum dpu_ctl ctl,
+ struct dpu_mdss_cfg *m,
+ void __iomem *addr,
+ struct dpu_hw_blk_reg_map *b)
+{
+ int i;
+
+ for (i = 0; i < m->ctl_count; i++) {
+ if (ctl == m->ctl[i].id) {
+ b->base_off = addr;
+ b->blk_off = m->ctl[i].base;
+ b->length = m->ctl[i].len;
+ b->hwversion = m->hwversion;
+ b->log_mask = DPU_DBG_MASK_CTL;
+ return &m->ctl[i];
+ }
+ }
+ return ERR_PTR(-ENOMEM);
+}
+
+static int _mixer_stages(const struct dpu_lm_cfg *mixer, int count,
+ enum dpu_lm lm)
+{
+ int i;
+ int stages = -EINVAL;
+
+ for (i = 0; i < count; i++) {
+ if (lm == mixer[i].id) {
+ stages = mixer[i].sblk->maxblendstages;
+ break;
+ }
+ }
+
+ return stages;
+}
+
+static inline void dpu_hw_ctl_trigger_start(struct dpu_hw_ctl *ctx)
+{
+ DPU_REG_WRITE(&ctx->hw, CTL_START, 0x1);
+}
+
+static inline void dpu_hw_ctl_trigger_pending(struct dpu_hw_ctl *ctx)
+{
+ DPU_REG_WRITE(&ctx->hw, CTL_PREPARE, 0x1);
+}
+
+static inline void dpu_hw_ctl_clear_pending_flush(struct dpu_hw_ctl *ctx)
+{
+ ctx->pending_flush_mask = 0x0;
+}
+
+static inline void dpu_hw_ctl_update_pending_flush(struct dpu_hw_ctl *ctx,
+ u32 flushbits)
+{
+ ctx->pending_flush_mask |= flushbits;
+}
+
+static u32 dpu_hw_ctl_get_pending_flush(struct dpu_hw_ctl *ctx)
+{
+ if (!ctx)
+ return 0x0;
+
+ return ctx->pending_flush_mask;
+}
+
+static inline void dpu_hw_ctl_trigger_flush(struct dpu_hw_ctl *ctx)
+{
+
+ DPU_REG_WRITE(&ctx->hw, CTL_FLUSH, ctx->pending_flush_mask);
+}
+
+static inline u32 dpu_hw_ctl_get_flush_register(struct dpu_hw_ctl *ctx)
+{
+ struct dpu_hw_blk_reg_map *c = &ctx->hw;
+
+ return DPU_REG_READ(c, CTL_FLUSH);
+}
+
+static inline uint32_t dpu_hw_ctl_get_bitmask_sspp(struct dpu_hw_ctl *ctx,
+ enum dpu_sspp sspp)
+{
+ uint32_t flushbits = 0;
+
+ switch (sspp) {
+ case SSPP_VIG0:
+ flushbits = BIT(0);
+ break;
+ case SSPP_VIG1:
+ flushbits = BIT(1);
+ break;
+ case SSPP_VIG2:
+ flushbits = BIT(2);
+ break;
+ case SSPP_VIG3:
+ flushbits = BIT(18);
+ break;
+ case SSPP_RGB0:
+ flushbits = BIT(3);
+ break;
+ case SSPP_RGB1:
+ flushbits = BIT(4);
+ break;
+ case SSPP_RGB2:
+ flushbits = BIT(5);
+ break;
+ case SSPP_RGB3:
+ flushbits = BIT(19);
+ break;
+ case SSPP_DMA0:
+ flushbits = BIT(11);
+ break;
+ case SSPP_DMA1:
+ flushbits = BIT(12);
+ break;
+ case SSPP_DMA2:
+ flushbits = BIT(24);
+ break;
+ case SSPP_DMA3:
+ flushbits = BIT(25);
+ break;
+ case SSPP_CURSOR0:
+ flushbits = BIT(22);
+ break;
+ case SSPP_CURSOR1:
+ flushbits = BIT(23);
+ break;
+ default:
+ break;
+ }
+
+ return flushbits;
+}
+
+static inline uint32_t dpu_hw_ctl_get_bitmask_mixer(struct dpu_hw_ctl *ctx,
+ enum dpu_lm lm)
+{
+ uint32_t flushbits = 0;
+
+ switch (lm) {
+ case LM_0:
+ flushbits = BIT(6);
+ break;
+ case LM_1:
+ flushbits = BIT(7);
+ break;
+ case LM_2:
+ flushbits = BIT(8);
+ break;
+ case LM_3:
+ flushbits = BIT(9);
+ break;
+ case LM_4:
+ flushbits = BIT(10);
+ break;
+ case LM_5:
+ flushbits = BIT(20);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ flushbits |= CTL_FLUSH_MASK_CTL;
+
+ return flushbits;
+}
+
+static inline int dpu_hw_ctl_get_bitmask_intf(struct dpu_hw_ctl *ctx,
+ u32 *flushbits, enum dpu_intf intf)
+{
+ switch (intf) {
+ case INTF_0:
+ *flushbits |= BIT(31);
+ break;
+ case INTF_1:
+ *flushbits |= BIT(30);
+ break;
+ case INTF_2:
+ *flushbits |= BIT(29);
+ break;
+ case INTF_3:
+ *flushbits |= BIT(28);
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static inline int dpu_hw_ctl_get_bitmask_cdm(struct dpu_hw_ctl *ctx,
+ u32 *flushbits, enum dpu_cdm cdm)
+{
+ switch (cdm) {
+ case CDM_0:
+ *flushbits |= BIT(26);
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static u32 dpu_hw_ctl_poll_reset_status(struct dpu_hw_ctl *ctx, u32 timeout_us)
+{
+ struct dpu_hw_blk_reg_map *c = &ctx->hw;
+ ktime_t timeout;
+ u32 status;
+
+ timeout = ktime_add_us(ktime_get(), timeout_us);
+
+ /*
+ * it takes around 30us to have mdp finish resetting its ctl path
+ * poll every 50us so that reset should be completed at 1st poll
+ */
+ do {
+ status = DPU_REG_READ(c, CTL_SW_RESET);
+ status &= 0x1;
+ if (status)
+ usleep_range(20, 50);
+ } while (status && ktime_compare_safe(ktime_get(), timeout) < 0);
+
+ return status;
+}
+
+static int dpu_hw_ctl_reset_control(struct dpu_hw_ctl *ctx)
+{
+ struct dpu_hw_blk_reg_map *c = &ctx->hw;
+
+ pr_debug("issuing hw ctl reset for ctl:%d\n", ctx->idx);
+ DPU_REG_WRITE(c, CTL_SW_RESET, 0x1);
+ if (dpu_hw_ctl_poll_reset_status(ctx, DPU_REG_RESET_TIMEOUT_US))
+ return -EINVAL;
+
+ return 0;
+}
+
+static int dpu_hw_ctl_wait_reset_status(struct dpu_hw_ctl *ctx)
+{
+ struct dpu_hw_blk_reg_map *c = &ctx->hw;
+ u32 status;
+
+ status = DPU_REG_READ(c, CTL_SW_RESET);
+ status &= 0x01;
+ if (!status)
+ return 0;
+
+ pr_debug("hw ctl reset is set for ctl:%d\n", ctx->idx);
+ if (dpu_hw_ctl_poll_reset_status(ctx, DPU_REG_RESET_TIMEOUT_US)) {
+ pr_err("hw recovery is not complete for ctl:%d\n", ctx->idx);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void dpu_hw_ctl_clear_all_blendstages(struct dpu_hw_ctl *ctx)
+{
+ struct dpu_hw_blk_reg_map *c = &ctx->hw;
+ int i;
+
+ for (i = 0; i < ctx->mixer_count; i++) {
+ DPU_REG_WRITE(c, CTL_LAYER(LM_0 + i), 0);
+ DPU_REG_WRITE(c, CTL_LAYER_EXT(LM_0 + i), 0);
+ DPU_REG_WRITE(c, CTL_LAYER_EXT2(LM_0 + i), 0);
+ DPU_REG_WRITE(c, CTL_LAYER_EXT3(LM_0 + i), 0);
+ }
+}
+
+static void dpu_hw_ctl_setup_blendstage(struct dpu_hw_ctl *ctx,
+ enum dpu_lm lm, struct dpu_hw_stage_cfg *stage_cfg)
+{
+ struct dpu_hw_blk_reg_map *c = &ctx->hw;
+ u32 mixercfg = 0, mixercfg_ext = 0, mix, ext;
+ u32 mixercfg_ext2 = 0, mixercfg_ext3 = 0;
+ int i, j;
+ u8 stages;
+ int pipes_per_stage;
+
+ stages = _mixer_stages(ctx->mixer_hw_caps, ctx->mixer_count, lm);
+ if (stages < 0)
+ return;
+
+ if (test_bit(DPU_MIXER_SOURCESPLIT,
+ &ctx->mixer_hw_caps->features))
+ pipes_per_stage = PIPES_PER_STAGE;
+ else
+ pipes_per_stage = 1;
+
+ mixercfg = CTL_MIXER_BORDER_OUT; /* always set BORDER_OUT */
+
+ if (!stage_cfg)
+ goto exit;
+
+ for (i = 0; i <= stages; i++) {
+ /* overflow to ext register if 'i + 1 > 7' */
+ mix = (i + 1) & 0x7;
+ ext = i >= 7;
+
+ for (j = 0 ; j < pipes_per_stage; j++) {
+ enum dpu_sspp_multirect_index rect_index =
+ stage_cfg->multirect_index[i][j];
+
+ switch (stage_cfg->stage[i][j]) {
+ case SSPP_VIG0:
+ if (rect_index == DPU_SSPP_RECT_1) {
+ mixercfg_ext3 |= ((i + 1) & 0xF) << 0;
+ } else {
+ mixercfg |= mix << 0;
+ mixercfg_ext |= ext << 0;
+ }
+ break;
+ case SSPP_VIG1:
+ if (rect_index == DPU_SSPP_RECT_1) {
+ mixercfg_ext3 |= ((i + 1) & 0xF) << 4;
+ } else {
+ mixercfg |= mix << 3;
+ mixercfg_ext |= ext << 2;
+ }
+ break;
+ case SSPP_VIG2:
+ if (rect_index == DPU_SSPP_RECT_1) {
+ mixercfg_ext3 |= ((i + 1) & 0xF) << 8;
+ } else {
+ mixercfg |= mix << 6;
+ mixercfg_ext |= ext << 4;
+ }
+ break;
+ case SSPP_VIG3:
+ if (rect_index == DPU_SSPP_RECT_1) {
+ mixercfg_ext3 |= ((i + 1) & 0xF) << 12;
+ } else {
+ mixercfg |= mix << 26;
+ mixercfg_ext |= ext << 6;
+ }
+ break;
+ case SSPP_RGB0:
+ mixercfg |= mix << 9;
+ mixercfg_ext |= ext << 8;
+ break;
+ case SSPP_RGB1:
+ mixercfg |= mix << 12;
+ mixercfg_ext |= ext << 10;
+ break;
+ case SSPP_RGB2:
+ mixercfg |= mix << 15;
+ mixercfg_ext |= ext << 12;
+ break;
+ case SSPP_RGB3:
+ mixercfg |= mix << 29;
+ mixercfg_ext |= ext << 14;
+ break;
+ case SSPP_DMA0:
+ if (rect_index == DPU_SSPP_RECT_1) {
+ mixercfg_ext2 |= ((i + 1) & 0xF) << 8;
+ } else {
+ mixercfg |= mix << 18;
+ mixercfg_ext |= ext << 16;
+ }
+ break;
+ case SSPP_DMA1:
+ if (rect_index == DPU_SSPP_RECT_1) {
+ mixercfg_ext2 |= ((i + 1) & 0xF) << 12;
+ } else {
+ mixercfg |= mix << 21;
+ mixercfg_ext |= ext << 18;
+ }
+ break;
+ case SSPP_DMA2:
+ if (rect_index == DPU_SSPP_RECT_1) {
+ mixercfg_ext2 |= ((i + 1) & 0xF) << 16;
+ } else {
+ mix |= (i + 1) & 0xF;
+ mixercfg_ext2 |= mix << 0;
+ }
+ break;
+ case SSPP_DMA3:
+ if (rect_index == DPU_SSPP_RECT_1) {
+ mixercfg_ext2 |= ((i + 1) & 0xF) << 20;
+ } else {
+ mix |= (i + 1) & 0xF;
+ mixercfg_ext2 |= mix << 4;
+ }
+ break;
+ case SSPP_CURSOR0:
+ mixercfg_ext |= ((i + 1) & 0xF) << 20;
+ break;
+ case SSPP_CURSOR1:
+ mixercfg_ext |= ((i + 1) & 0xF) << 26;
+ break;
+ default:
+ break;
+ }
+ }
+ }
+
+exit:
+ DPU_REG_WRITE(c, CTL_LAYER(lm), mixercfg);
+ DPU_REG_WRITE(c, CTL_LAYER_EXT(lm), mixercfg_ext);
+ DPU_REG_WRITE(c, CTL_LAYER_EXT2(lm), mixercfg_ext2);
+ DPU_REG_WRITE(c, CTL_LAYER_EXT3(lm), mixercfg_ext3);
+}
+
+static void dpu_hw_ctl_intf_cfg(struct dpu_hw_ctl *ctx,
+ struct dpu_hw_intf_cfg *cfg)
+{
+ struct dpu_hw_blk_reg_map *c = &ctx->hw;
+ u32 intf_cfg = 0;
+
+ intf_cfg |= (cfg->intf & 0xF) << 4;
+
+ if (cfg->mode_3d) {
+ intf_cfg |= BIT(19);
+ intf_cfg |= (cfg->mode_3d - 0x1) << 20;
+ }
+
+ switch (cfg->intf_mode_sel) {
+ case DPU_CTL_MODE_SEL_VID:
+ intf_cfg &= ~BIT(17);
+ intf_cfg &= ~(0x3 << 15);
+ break;
+ case DPU_CTL_MODE_SEL_CMD:
+ intf_cfg |= BIT(17);
+ intf_cfg |= ((cfg->stream_sel & 0x3) << 15);
+ break;
+ default:
+ pr_err("unknown interface type %d\n", cfg->intf_mode_sel);
+ return;
+ }
+
+ DPU_REG_WRITE(c, CTL_TOP, intf_cfg);
+}
+
+static void _setup_ctl_ops(struct dpu_hw_ctl_ops *ops,
+ unsigned long cap)
+{
+ ops->clear_pending_flush = dpu_hw_ctl_clear_pending_flush;
+ ops->update_pending_flush = dpu_hw_ctl_update_pending_flush;
+ ops->get_pending_flush = dpu_hw_ctl_get_pending_flush;
+ ops->trigger_flush = dpu_hw_ctl_trigger_flush;
+ ops->get_flush_register = dpu_hw_ctl_get_flush_register;
+ ops->trigger_start = dpu_hw_ctl_trigger_start;
+ ops->trigger_pending = dpu_hw_ctl_trigger_pending;
+ ops->setup_intf_cfg = dpu_hw_ctl_intf_cfg;
+ ops->reset = dpu_hw_ctl_reset_control;
+ ops->wait_reset_status = dpu_hw_ctl_wait_reset_status;
+ ops->clear_all_blendstages = dpu_hw_ctl_clear_all_blendstages;
+ ops->setup_blendstage = dpu_hw_ctl_setup_blendstage;
+ ops->get_bitmask_sspp = dpu_hw_ctl_get_bitmask_sspp;
+ ops->get_bitmask_mixer = dpu_hw_ctl_get_bitmask_mixer;
+ ops->get_bitmask_intf = dpu_hw_ctl_get_bitmask_intf;
+ ops->get_bitmask_cdm = dpu_hw_ctl_get_bitmask_cdm;
+};
+
+static struct dpu_hw_blk_ops dpu_hw_ops = {
+ .start = NULL,
+ .stop = NULL,
+};
+
+struct dpu_hw_ctl *dpu_hw_ctl_init(enum dpu_ctl idx,
+ void __iomem *addr,
+ struct dpu_mdss_cfg *m)
+{
+ struct dpu_hw_ctl *c;
+ struct dpu_ctl_cfg *cfg;
+ int rc;
+
+ c = kzalloc(sizeof(*c), GFP_KERNEL);
+ if (!c)
+ return ERR_PTR(-ENOMEM);
+
+ cfg = _ctl_offset(idx, m, addr, &c->hw);
+ if (IS_ERR_OR_NULL(cfg)) {
+ kfree(c);
+ pr_err("failed to create dpu_hw_ctl %d\n", idx);
+ return ERR_PTR(-EINVAL);
+ }
+
+ c->caps = cfg;
+ _setup_ctl_ops(&c->ops, c->caps->features);
+ c->idx = idx;
+ c->mixer_count = m->mixer_count;
+ c->mixer_hw_caps = m->mixer;
+
+ rc = dpu_hw_blk_init(&c->base, DPU_HW_BLK_CTL, idx, &dpu_hw_ops);
+ if (rc) {
+ DPU_ERROR("failed to init hw blk %d\n", rc);
+ goto blk_init_error;
+ }
+
+ return c;
+
+blk_init_error:
+ kzfree(c);
+
+ return ERR_PTR(rc);
+}
+
+void dpu_hw_ctl_destroy(struct dpu_hw_ctl *ctx)
+{
+ if (ctx)
+ dpu_hw_blk_destroy(&ctx->base);
+ kfree(ctx);
+}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.h
new file mode 100644
index 000000000000..c66a71f8b839
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.h
@@ -0,0 +1,218 @@
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _DPU_HW_CTL_H
+#define _DPU_HW_CTL_H
+
+#include "dpu_hw_mdss.h"
+#include "dpu_hw_util.h"
+#include "dpu_hw_catalog.h"
+#include "dpu_hw_sspp.h"
+#include "dpu_hw_blk.h"
+
+/**
+ * dpu_ctl_mode_sel: Interface mode selection
+ * DPU_CTL_MODE_SEL_VID: Video mode interface
+ * DPU_CTL_MODE_SEL_CMD: Command mode interface
+ */
+enum dpu_ctl_mode_sel {
+ DPU_CTL_MODE_SEL_VID = 0,
+ DPU_CTL_MODE_SEL_CMD
+};
+
+struct dpu_hw_ctl;
+/**
+ * struct dpu_hw_stage_cfg - blending stage cfg
+ * @stage : SSPP_ID at each stage
+ * @multirect_index: index of the rectangle of SSPP.
+ */
+struct dpu_hw_stage_cfg {
+ enum dpu_sspp stage[DPU_STAGE_MAX][PIPES_PER_STAGE];
+ enum dpu_sspp_multirect_index multirect_index
+ [DPU_STAGE_MAX][PIPES_PER_STAGE];
+};
+
+/**
+ * struct dpu_hw_intf_cfg :Describes how the DPU writes data to output interface
+ * @intf : Interface id
+ * @mode_3d: 3d mux configuration
+ * @intf_mode_sel: Interface mode, cmd / vid
+ * @stream_sel: Stream selection for multi-stream interfaces
+ */
+struct dpu_hw_intf_cfg {
+ enum dpu_intf intf;
+ enum dpu_3d_blend_mode mode_3d;
+ enum dpu_ctl_mode_sel intf_mode_sel;
+ int stream_sel;
+};
+
+/**
+ * struct dpu_hw_ctl_ops - Interface to the wb Hw driver functions
+ * Assumption is these functions will be called after clocks are enabled
+ */
+struct dpu_hw_ctl_ops {
+ /**
+ * kickoff hw operation for Sw controlled interfaces
+ * DSI cmd mode and WB interface are SW controlled
+ * @ctx : ctl path ctx pointer
+ */
+ void (*trigger_start)(struct dpu_hw_ctl *ctx);
+
+ /**
+ * kickoff prepare is in progress hw operation for sw
+ * controlled interfaces: DSI cmd mode and WB interface
+ * are SW controlled
+ * @ctx : ctl path ctx pointer
+ */
+ void (*trigger_pending)(struct dpu_hw_ctl *ctx);
+
+ /**
+ * Clear the value of the cached pending_flush_mask
+ * No effect on hardware
+ * @ctx : ctl path ctx pointer
+ */
+ void (*clear_pending_flush)(struct dpu_hw_ctl *ctx);
+
+ /**
+ * Query the value of the cached pending_flush_mask
+ * No effect on hardware
+ * @ctx : ctl path ctx pointer
+ */
+ u32 (*get_pending_flush)(struct dpu_hw_ctl *ctx);
+
+ /**
+ * OR in the given flushbits to the cached pending_flush_mask
+ * No effect on hardware
+ * @ctx : ctl path ctx pointer
+ * @flushbits : module flushmask
+ */
+ void (*update_pending_flush)(struct dpu_hw_ctl *ctx,
+ u32 flushbits);
+
+ /**
+ * Write the value of the pending_flush_mask to hardware
+ * @ctx : ctl path ctx pointer
+ */
+ void (*trigger_flush)(struct dpu_hw_ctl *ctx);
+
+ /**
+ * Read the value of the flush register
+ * @ctx : ctl path ctx pointer
+ * @Return: value of the ctl flush register.
+ */
+ u32 (*get_flush_register)(struct dpu_hw_ctl *ctx);
+
+ /**
+ * Setup ctl_path interface config
+ * @ctx
+ * @cfg : interface config structure pointer
+ */
+ void (*setup_intf_cfg)(struct dpu_hw_ctl *ctx,
+ struct dpu_hw_intf_cfg *cfg);
+
+ int (*reset)(struct dpu_hw_ctl *c);
+
+ /*
+ * wait_reset_status - checks ctl reset status
+ * @ctx : ctl path ctx pointer
+ *
+ * This function checks the ctl reset status bit.
+ * If the reset bit is set, it keeps polling the status till the hw
+ * reset is complete.
+ * Returns: 0 on success or -error if reset incomplete within interval
+ */
+ int (*wait_reset_status)(struct dpu_hw_ctl *ctx);
+
+ uint32_t (*get_bitmask_sspp)(struct dpu_hw_ctl *ctx,
+ enum dpu_sspp blk);
+
+ uint32_t (*get_bitmask_mixer)(struct dpu_hw_ctl *ctx,
+ enum dpu_lm blk);
+
+ int (*get_bitmask_intf)(struct dpu_hw_ctl *ctx,
+ u32 *flushbits,
+ enum dpu_intf blk);
+
+ int (*get_bitmask_cdm)(struct dpu_hw_ctl *ctx,
+ u32 *flushbits,
+ enum dpu_cdm blk);
+
+ /**
+ * Set all blend stages to disabled
+ * @ctx : ctl path ctx pointer
+ */
+ void (*clear_all_blendstages)(struct dpu_hw_ctl *ctx);
+
+ /**
+ * Configure layer mixer to pipe configuration
+ * @ctx : ctl path ctx pointer
+ * @lm : layer mixer enumeration
+ * @cfg : blend stage configuration
+ */
+ void (*setup_blendstage)(struct dpu_hw_ctl *ctx,
+ enum dpu_lm lm, struct dpu_hw_stage_cfg *cfg);
+};
+
+/**
+ * struct dpu_hw_ctl : CTL PATH driver object
+ * @base: hardware block base structure
+ * @hw: block register map object
+ * @idx: control path index
+ * @caps: control path capabilities
+ * @mixer_count: number of mixers
+ * @mixer_hw_caps: mixer hardware capabilities
+ * @pending_flush_mask: storage for pending ctl_flush managed via ops
+ * @ops: operation list
+ */
+struct dpu_hw_ctl {
+ struct dpu_hw_blk base;
+ struct dpu_hw_blk_reg_map hw;
+
+ /* ctl path */
+ int idx;
+ const struct dpu_ctl_cfg *caps;
+ int mixer_count;
+ const struct dpu_lm_cfg *mixer_hw_caps;
+ u32 pending_flush_mask;
+
+ /* ops */
+ struct dpu_hw_ctl_ops ops;
+};
+
+/**
+ * dpu_hw_ctl - convert base object dpu_hw_base to container
+ * @hw: Pointer to base hardware block
+ * return: Pointer to hardware block container
+ */
+static inline struct dpu_hw_ctl *to_dpu_hw_ctl(struct dpu_hw_blk *hw)
+{
+ return container_of(hw, struct dpu_hw_ctl, base);
+}
+
+/**
+ * dpu_hw_ctl_init(): Initializes the ctl_path hw driver object.
+ * should be called before accessing every ctl path registers.
+ * @idx: ctl_path index for which driver object is required
+ * @addr: mapped register io address of MDP
+ * @m : pointer to mdss catalog data
+ */
+struct dpu_hw_ctl *dpu_hw_ctl_init(enum dpu_ctl idx,
+ void __iomem *addr,
+ struct dpu_mdss_cfg *m);
+
+/**
+ * dpu_hw_ctl_destroy(): Destroys ctl driver context
+ * should be called to free the context
+ */
+void dpu_hw_ctl_destroy(struct dpu_hw_ctl *ctx);
+
+#endif /*_DPU_HW_CTL_H */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.c
new file mode 100644
index 000000000000..c0b7f0049365
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.c
@@ -0,0 +1,1183 @@
+/* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/bitops.h>
+#include <linux/slab.h>
+
+#include "dpu_kms.h"
+#include "dpu_hw_interrupts.h"
+#include "dpu_hw_util.h"
+#include "dpu_hw_mdss.h"
+
+/**
+ * Register offsets in MDSS register file for the interrupt registers
+ * w.r.t. to the MDP base
+ */
+#define MDP_SSPP_TOP0_OFF 0x0
+#define MDP_INTF_0_OFF 0x6A000
+#define MDP_INTF_1_OFF 0x6A800
+#define MDP_INTF_2_OFF 0x6B000
+#define MDP_INTF_3_OFF 0x6B800
+#define MDP_INTF_4_OFF 0x6C000
+#define MDP_AD4_0_OFF 0x7C000
+#define MDP_AD4_1_OFF 0x7D000
+#define MDP_AD4_INTR_EN_OFF 0x41c
+#define MDP_AD4_INTR_CLEAR_OFF 0x424
+#define MDP_AD4_INTR_STATUS_OFF 0x420
+
+/**
+ * WB interrupt status bit definitions
+ */
+#define DPU_INTR_WB_0_DONE BIT(0)
+#define DPU_INTR_WB_1_DONE BIT(1)
+#define DPU_INTR_WB_2_DONE BIT(4)
+
+/**
+ * WDOG timer interrupt status bit definitions
+ */
+#define DPU_INTR_WD_TIMER_0_DONE BIT(2)
+#define DPU_INTR_WD_TIMER_1_DONE BIT(3)
+#define DPU_INTR_WD_TIMER_2_DONE BIT(5)
+#define DPU_INTR_WD_TIMER_3_DONE BIT(6)
+#define DPU_INTR_WD_TIMER_4_DONE BIT(7)
+
+/**
+ * Pingpong interrupt status bit definitions
+ */
+#define DPU_INTR_PING_PONG_0_DONE BIT(8)
+#define DPU_INTR_PING_PONG_1_DONE BIT(9)
+#define DPU_INTR_PING_PONG_2_DONE BIT(10)
+#define DPU_INTR_PING_PONG_3_DONE BIT(11)
+#define DPU_INTR_PING_PONG_0_RD_PTR BIT(12)
+#define DPU_INTR_PING_PONG_1_RD_PTR BIT(13)
+#define DPU_INTR_PING_PONG_2_RD_PTR BIT(14)
+#define DPU_INTR_PING_PONG_3_RD_PTR BIT(15)
+#define DPU_INTR_PING_PONG_0_WR_PTR BIT(16)
+#define DPU_INTR_PING_PONG_1_WR_PTR BIT(17)
+#define DPU_INTR_PING_PONG_2_WR_PTR BIT(18)
+#define DPU_INTR_PING_PONG_3_WR_PTR BIT(19)
+#define DPU_INTR_PING_PONG_0_AUTOREFRESH_DONE BIT(20)
+#define DPU_INTR_PING_PONG_1_AUTOREFRESH_DONE BIT(21)
+#define DPU_INTR_PING_PONG_2_AUTOREFRESH_DONE BIT(22)
+#define DPU_INTR_PING_PONG_3_AUTOREFRESH_DONE BIT(23)
+
+/**
+ * Interface interrupt status bit definitions
+ */
+#define DPU_INTR_INTF_0_UNDERRUN BIT(24)
+#define DPU_INTR_INTF_1_UNDERRUN BIT(26)
+#define DPU_INTR_INTF_2_UNDERRUN BIT(28)
+#define DPU_INTR_INTF_3_UNDERRUN BIT(30)
+#define DPU_INTR_INTF_0_VSYNC BIT(25)
+#define DPU_INTR_INTF_1_VSYNC BIT(27)
+#define DPU_INTR_INTF_2_VSYNC BIT(29)
+#define DPU_INTR_INTF_3_VSYNC BIT(31)
+
+/**
+ * Pingpong Secondary interrupt status bit definitions
+ */
+#define DPU_INTR_PING_PONG_S0_AUTOREFRESH_DONE BIT(0)
+#define DPU_INTR_PING_PONG_S0_WR_PTR BIT(4)
+#define DPU_INTR_PING_PONG_S0_RD_PTR BIT(8)
+#define DPU_INTR_PING_PONG_S0_TEAR_DETECTED BIT(22)
+#define DPU_INTR_PING_PONG_S0_TE_DETECTED BIT(28)
+
+/**
+ * Pingpong TEAR detection interrupt status bit definitions
+ */
+#define DPU_INTR_PING_PONG_0_TEAR_DETECTED BIT(16)
+#define DPU_INTR_PING_PONG_1_TEAR_DETECTED BIT(17)
+#define DPU_INTR_PING_PONG_2_TEAR_DETECTED BIT(18)
+#define DPU_INTR_PING_PONG_3_TEAR_DETECTED BIT(19)
+
+/**
+ * Pingpong TE detection interrupt status bit definitions
+ */
+#define DPU_INTR_PING_PONG_0_TE_DETECTED BIT(24)
+#define DPU_INTR_PING_PONG_1_TE_DETECTED BIT(25)
+#define DPU_INTR_PING_PONG_2_TE_DETECTED BIT(26)
+#define DPU_INTR_PING_PONG_3_TE_DETECTED BIT(27)
+
+/**
+ * Ctl start interrupt status bit definitions
+ */
+#define DPU_INTR_CTL_0_START BIT(9)
+#define DPU_INTR_CTL_1_START BIT(10)
+#define DPU_INTR_CTL_2_START BIT(11)
+#define DPU_INTR_CTL_3_START BIT(12)
+#define DPU_INTR_CTL_4_START BIT(13)
+
+/**
+ * Concurrent WB overflow interrupt status bit definitions
+ */
+#define DPU_INTR_CWB_2_OVERFLOW BIT(14)
+#define DPU_INTR_CWB_3_OVERFLOW BIT(15)
+
+/**
+ * Histogram VIG done interrupt status bit definitions
+ */
+#define DPU_INTR_HIST_VIG_0_DONE BIT(0)
+#define DPU_INTR_HIST_VIG_1_DONE BIT(4)
+#define DPU_INTR_HIST_VIG_2_DONE BIT(8)
+#define DPU_INTR_HIST_VIG_3_DONE BIT(10)
+
+/**
+ * Histogram VIG reset Sequence done interrupt status bit definitions
+ */
+#define DPU_INTR_HIST_VIG_0_RSTSEQ_DONE BIT(1)
+#define DPU_INTR_HIST_VIG_1_RSTSEQ_DONE BIT(5)
+#define DPU_INTR_HIST_VIG_2_RSTSEQ_DONE BIT(9)
+#define DPU_INTR_HIST_VIG_3_RSTSEQ_DONE BIT(11)
+
+/**
+ * Histogram DSPP done interrupt status bit definitions
+ */
+#define DPU_INTR_HIST_DSPP_0_DONE BIT(12)
+#define DPU_INTR_HIST_DSPP_1_DONE BIT(16)
+#define DPU_INTR_HIST_DSPP_2_DONE BIT(20)
+#define DPU_INTR_HIST_DSPP_3_DONE BIT(22)
+
+/**
+ * Histogram DSPP reset Sequence done interrupt status bit definitions
+ */
+#define DPU_INTR_HIST_DSPP_0_RSTSEQ_DONE BIT(13)
+#define DPU_INTR_HIST_DSPP_1_RSTSEQ_DONE BIT(17)
+#define DPU_INTR_HIST_DSPP_2_RSTSEQ_DONE BIT(21)
+#define DPU_INTR_HIST_DSPP_3_RSTSEQ_DONE BIT(23)
+
+/**
+ * INTF interrupt status bit definitions
+ */
+#define DPU_INTR_VIDEO_INTO_STATIC BIT(0)
+#define DPU_INTR_VIDEO_OUTOF_STATIC BIT(1)
+#define DPU_INTR_DSICMD_0_INTO_STATIC BIT(2)
+#define DPU_INTR_DSICMD_0_OUTOF_STATIC BIT(3)
+#define DPU_INTR_DSICMD_1_INTO_STATIC BIT(4)
+#define DPU_INTR_DSICMD_1_OUTOF_STATIC BIT(5)
+#define DPU_INTR_DSICMD_2_INTO_STATIC BIT(6)
+#define DPU_INTR_DSICMD_2_OUTOF_STATIC BIT(7)
+#define DPU_INTR_PROG_LINE BIT(8)
+
+/**
+ * AD4 interrupt status bit definitions
+ */
+#define DPU_INTR_BRIGHTPR_UPDATED BIT(4)
+#define DPU_INTR_DARKENH_UPDATED BIT(3)
+#define DPU_INTR_STREN_OUTROI_UPDATED BIT(2)
+#define DPU_INTR_STREN_INROI_UPDATED BIT(1)
+#define DPU_INTR_BACKLIGHT_UPDATED BIT(0)
+/**
+ * struct dpu_intr_reg - array of DPU register sets
+ * @clr_off: offset to CLEAR reg
+ * @en_off: offset to ENABLE reg
+ * @status_off: offset to STATUS reg
+ */
+struct dpu_intr_reg {
+ u32 clr_off;
+ u32 en_off;
+ u32 status_off;
+};
+
+/**
+ * struct dpu_irq_type - maps each irq with i/f
+ * @intr_type: type of interrupt listed in dpu_intr_type
+ * @instance_idx: instance index of the associated HW block in DPU
+ * @irq_mask: corresponding bit in the interrupt status reg
+ * @reg_idx: which reg set to use
+ */
+struct dpu_irq_type {
+ u32 intr_type;
+ u32 instance_idx;
+ u32 irq_mask;
+ u32 reg_idx;
+};
+
+/**
+ * List of DPU interrupt registers
+ */
+static const struct dpu_intr_reg dpu_intr_set[] = {
+ {
+ MDP_SSPP_TOP0_OFF+INTR_CLEAR,
+ MDP_SSPP_TOP0_OFF+INTR_EN,
+ MDP_SSPP_TOP0_OFF+INTR_STATUS
+ },
+ {
+ MDP_SSPP_TOP0_OFF+INTR2_CLEAR,
+ MDP_SSPP_TOP0_OFF+INTR2_EN,
+ MDP_SSPP_TOP0_OFF+INTR2_STATUS
+ },
+ {
+ MDP_SSPP_TOP0_OFF+HIST_INTR_CLEAR,
+ MDP_SSPP_TOP0_OFF+HIST_INTR_EN,
+ MDP_SSPP_TOP0_OFF+HIST_INTR_STATUS
+ },
+ {
+ MDP_INTF_0_OFF+INTF_INTR_CLEAR,
+ MDP_INTF_0_OFF+INTF_INTR_EN,
+ MDP_INTF_0_OFF+INTF_INTR_STATUS
+ },
+ {
+ MDP_INTF_1_OFF+INTF_INTR_CLEAR,
+ MDP_INTF_1_OFF+INTF_INTR_EN,
+ MDP_INTF_1_OFF+INTF_INTR_STATUS
+ },
+ {
+ MDP_INTF_2_OFF+INTF_INTR_CLEAR,
+ MDP_INTF_2_OFF+INTF_INTR_EN,
+ MDP_INTF_2_OFF+INTF_INTR_STATUS
+ },
+ {
+ MDP_INTF_3_OFF+INTF_INTR_CLEAR,
+ MDP_INTF_3_OFF+INTF_INTR_EN,
+ MDP_INTF_3_OFF+INTF_INTR_STATUS
+ },
+ {
+ MDP_INTF_4_OFF+INTF_INTR_CLEAR,
+ MDP_INTF_4_OFF+INTF_INTR_EN,
+ MDP_INTF_4_OFF+INTF_INTR_STATUS
+ },
+ {
+ MDP_AD4_0_OFF + MDP_AD4_INTR_CLEAR_OFF,
+ MDP_AD4_0_OFF + MDP_AD4_INTR_EN_OFF,
+ MDP_AD4_0_OFF + MDP_AD4_INTR_STATUS_OFF,
+ },
+ {
+ MDP_AD4_1_OFF + MDP_AD4_INTR_CLEAR_OFF,
+ MDP_AD4_1_OFF + MDP_AD4_INTR_EN_OFF,
+ MDP_AD4_1_OFF + MDP_AD4_INTR_STATUS_OFF,
+ }
+};
+
+/**
+ * IRQ mapping table - use for lookup an irq_idx in this table that have
+ * a matching interface type and instance index.
+ */
+static const struct dpu_irq_type dpu_irq_map[] = {
+ /* BEGIN MAP_RANGE: 0-31, INTR */
+ /* irq_idx: 0-3 */
+ { DPU_IRQ_TYPE_WB_ROT_COMP, WB_0, DPU_INTR_WB_0_DONE, 0},
+ { DPU_IRQ_TYPE_WB_ROT_COMP, WB_1, DPU_INTR_WB_1_DONE, 0},
+ { DPU_IRQ_TYPE_WD_TIMER, WD_TIMER_0, DPU_INTR_WD_TIMER_0_DONE, 0},
+ { DPU_IRQ_TYPE_WD_TIMER, WD_TIMER_1, DPU_INTR_WD_TIMER_1_DONE, 0},
+ /* irq_idx: 4-7 */
+ { DPU_IRQ_TYPE_WB_WFD_COMP, WB_2, DPU_INTR_WB_2_DONE, 0},
+ { DPU_IRQ_TYPE_WD_TIMER, WD_TIMER_2, DPU_INTR_WD_TIMER_2_DONE, 0},
+ { DPU_IRQ_TYPE_WD_TIMER, WD_TIMER_3, DPU_INTR_WD_TIMER_3_DONE, 0},
+ { DPU_IRQ_TYPE_WD_TIMER, WD_TIMER_4, DPU_INTR_WD_TIMER_4_DONE, 0},
+ /* irq_idx: 8-11 */
+ { DPU_IRQ_TYPE_PING_PONG_COMP, PINGPONG_0,
+ DPU_INTR_PING_PONG_0_DONE, 0},
+ { DPU_IRQ_TYPE_PING_PONG_COMP, PINGPONG_1,
+ DPU_INTR_PING_PONG_1_DONE, 0},
+ { DPU_IRQ_TYPE_PING_PONG_COMP, PINGPONG_2,
+ DPU_INTR_PING_PONG_2_DONE, 0},
+ { DPU_IRQ_TYPE_PING_PONG_COMP, PINGPONG_3,
+ DPU_INTR_PING_PONG_3_DONE, 0},
+ /* irq_idx: 12-15 */
+ { DPU_IRQ_TYPE_PING_PONG_RD_PTR, PINGPONG_0,
+ DPU_INTR_PING_PONG_0_RD_PTR, 0},
+ { DPU_IRQ_TYPE_PING_PONG_RD_PTR, PINGPONG_1,
+ DPU_INTR_PING_PONG_1_RD_PTR, 0},
+ { DPU_IRQ_TYPE_PING_PONG_RD_PTR, PINGPONG_2,
+ DPU_INTR_PING_PONG_2_RD_PTR, 0},
+ { DPU_IRQ_TYPE_PING_PONG_RD_PTR, PINGPONG_3,
+ DPU_INTR_PING_PONG_3_RD_PTR, 0},
+ /* irq_idx: 16-19 */
+ { DPU_IRQ_TYPE_PING_PONG_WR_PTR, PINGPONG_0,
+ DPU_INTR_PING_PONG_0_WR_PTR, 0},
+ { DPU_IRQ_TYPE_PING_PONG_WR_PTR, PINGPONG_1,
+ DPU_INTR_PING_PONG_1_WR_PTR, 0},
+ { DPU_IRQ_TYPE_PING_PONG_WR_PTR, PINGPONG_2,
+ DPU_INTR_PING_PONG_2_WR_PTR, 0},
+ { DPU_IRQ_TYPE_PING_PONG_WR_PTR, PINGPONG_3,
+ DPU_INTR_PING_PONG_3_WR_PTR, 0},
+ /* irq_idx: 20-23 */
+ { DPU_IRQ_TYPE_PING_PONG_AUTO_REF, PINGPONG_0,
+ DPU_INTR_PING_PONG_0_AUTOREFRESH_DONE, 0},
+ { DPU_IRQ_TYPE_PING_PONG_AUTO_REF, PINGPONG_1,
+ DPU_INTR_PING_PONG_1_AUTOREFRESH_DONE, 0},
+ { DPU_IRQ_TYPE_PING_PONG_AUTO_REF, PINGPONG_2,
+ DPU_INTR_PING_PONG_2_AUTOREFRESH_DONE, 0},
+ { DPU_IRQ_TYPE_PING_PONG_AUTO_REF, PINGPONG_3,
+ DPU_INTR_PING_PONG_3_AUTOREFRESH_DONE, 0},
+ /* irq_idx: 24-27 */
+ { DPU_IRQ_TYPE_INTF_UNDER_RUN, INTF_0, DPU_INTR_INTF_0_UNDERRUN, 0},
+ { DPU_IRQ_TYPE_INTF_VSYNC, INTF_0, DPU_INTR_INTF_0_VSYNC, 0},
+ { DPU_IRQ_TYPE_INTF_UNDER_RUN, INTF_1, DPU_INTR_INTF_1_UNDERRUN, 0},
+ { DPU_IRQ_TYPE_INTF_VSYNC, INTF_1, DPU_INTR_INTF_1_VSYNC, 0},
+ /* irq_idx: 28-31 */
+ { DPU_IRQ_TYPE_INTF_UNDER_RUN, INTF_2, DPU_INTR_INTF_2_UNDERRUN, 0},
+ { DPU_IRQ_TYPE_INTF_VSYNC, INTF_2, DPU_INTR_INTF_2_VSYNC, 0},
+ { DPU_IRQ_TYPE_INTF_UNDER_RUN, INTF_3, DPU_INTR_INTF_3_UNDERRUN, 0},
+ { DPU_IRQ_TYPE_INTF_VSYNC, INTF_3, DPU_INTR_INTF_3_VSYNC, 0},
+
+ /* BEGIN MAP_RANGE: 32-64, INTR2 */
+ /* irq_idx: 32-35 */
+ { DPU_IRQ_TYPE_PING_PONG_AUTO_REF, PINGPONG_S0,
+ DPU_INTR_PING_PONG_S0_AUTOREFRESH_DONE, 1},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 1},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 1},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 1},
+ /* irq_idx: 36-39 */
+ { DPU_IRQ_TYPE_PING_PONG_WR_PTR, PINGPONG_S0,
+ DPU_INTR_PING_PONG_S0_WR_PTR, 1},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 1},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 1},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 1},
+ /* irq_idx: 40 */
+ { DPU_IRQ_TYPE_PING_PONG_RD_PTR, PINGPONG_S0,
+ DPU_INTR_PING_PONG_S0_RD_PTR, 1},
+ /* irq_idx: 41-45 */
+ { DPU_IRQ_TYPE_CTL_START, CTL_0,
+ DPU_INTR_CTL_0_START, 1},
+ { DPU_IRQ_TYPE_CTL_START, CTL_1,
+ DPU_INTR_CTL_1_START, 1},
+ { DPU_IRQ_TYPE_CTL_START, CTL_2,
+ DPU_INTR_CTL_2_START, 1},
+ { DPU_IRQ_TYPE_CTL_START, CTL_3,
+ DPU_INTR_CTL_3_START, 1},
+ { DPU_IRQ_TYPE_CTL_START, CTL_4,
+ DPU_INTR_CTL_4_START, 1},
+ /* irq_idx: 46-47 */
+ { DPU_IRQ_TYPE_CWB_OVERFLOW, CWB_2, DPU_INTR_CWB_2_OVERFLOW, 1},
+ { DPU_IRQ_TYPE_CWB_OVERFLOW, CWB_3, DPU_INTR_CWB_3_OVERFLOW, 1},
+ /* irq_idx: 48-51 */
+ { DPU_IRQ_TYPE_PING_PONG_TEAR_CHECK, PINGPONG_0,
+ DPU_INTR_PING_PONG_0_TEAR_DETECTED, 1},
+ { DPU_IRQ_TYPE_PING_PONG_TEAR_CHECK, PINGPONG_1,
+ DPU_INTR_PING_PONG_1_TEAR_DETECTED, 1},
+ { DPU_IRQ_TYPE_PING_PONG_TEAR_CHECK, PINGPONG_2,
+ DPU_INTR_PING_PONG_2_TEAR_DETECTED, 1},
+ { DPU_IRQ_TYPE_PING_PONG_TEAR_CHECK, PINGPONG_3,
+ DPU_INTR_PING_PONG_3_TEAR_DETECTED, 1},
+ /* irq_idx: 52-55 */
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 1},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 1},
+ { DPU_IRQ_TYPE_PING_PONG_TEAR_CHECK, PINGPONG_S0,
+ DPU_INTR_PING_PONG_S0_TEAR_DETECTED, 1},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 1},
+ /* irq_idx: 56-59 */
+ { DPU_IRQ_TYPE_PING_PONG_TE_CHECK, PINGPONG_0,
+ DPU_INTR_PING_PONG_0_TE_DETECTED, 1},
+ { DPU_IRQ_TYPE_PING_PONG_TE_CHECK, PINGPONG_1,
+ DPU_INTR_PING_PONG_1_TE_DETECTED, 1},
+ { DPU_IRQ_TYPE_PING_PONG_TE_CHECK, PINGPONG_2,
+ DPU_INTR_PING_PONG_2_TE_DETECTED, 1},
+ { DPU_IRQ_TYPE_PING_PONG_TE_CHECK, PINGPONG_3,
+ DPU_INTR_PING_PONG_3_TE_DETECTED, 1},
+ /* irq_idx: 60-63 */
+ { DPU_IRQ_TYPE_PING_PONG_TE_CHECK, PINGPONG_S0,
+ DPU_INTR_PING_PONG_S0_TE_DETECTED, 1},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 1},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 1},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 1},
+
+ /* BEGIN MAP_RANGE: 64-95 HIST */
+ /* irq_idx: 64-67 */
+ { DPU_IRQ_TYPE_HIST_VIG_DONE, SSPP_VIG0, DPU_INTR_HIST_VIG_0_DONE, 2},
+ { DPU_IRQ_TYPE_HIST_VIG_RSTSEQ, SSPP_VIG0,
+ DPU_INTR_HIST_VIG_0_RSTSEQ_DONE, 2},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 2},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 2},
+ /* irq_idx: 68-71 */
+ { DPU_IRQ_TYPE_HIST_VIG_DONE, SSPP_VIG1, DPU_INTR_HIST_VIG_1_DONE, 2},
+ { DPU_IRQ_TYPE_HIST_VIG_RSTSEQ, SSPP_VIG1,
+ DPU_INTR_HIST_VIG_1_RSTSEQ_DONE, 2},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 2},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 2},
+ /* irq_idx: 72-75 */
+ { DPU_IRQ_TYPE_HIST_VIG_DONE, SSPP_VIG2, DPU_INTR_HIST_VIG_2_DONE, 2},
+ { DPU_IRQ_TYPE_HIST_VIG_RSTSEQ, SSPP_VIG2,
+ DPU_INTR_HIST_VIG_2_RSTSEQ_DONE, 2},
+ { DPU_IRQ_TYPE_HIST_VIG_DONE, SSPP_VIG3, DPU_INTR_HIST_VIG_3_DONE, 2},
+ { DPU_IRQ_TYPE_HIST_VIG_RSTSEQ, SSPP_VIG3,
+ DPU_INTR_HIST_VIG_3_RSTSEQ_DONE, 2},
+ /* irq_idx: 76-79 */
+ { DPU_IRQ_TYPE_HIST_DSPP_DONE, DSPP_0, DPU_INTR_HIST_DSPP_0_DONE, 2},
+ { DPU_IRQ_TYPE_HIST_DSPP_RSTSEQ, DSPP_0,
+ DPU_INTR_HIST_DSPP_0_RSTSEQ_DONE, 2},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 2},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 2},
+ /* irq_idx: 80-83 */
+ { DPU_IRQ_TYPE_HIST_DSPP_DONE, DSPP_1, DPU_INTR_HIST_DSPP_1_DONE, 2},
+ { DPU_IRQ_TYPE_HIST_DSPP_RSTSEQ, DSPP_1,
+ DPU_INTR_HIST_DSPP_1_RSTSEQ_DONE, 2},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 2},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 2},
+ /* irq_idx: 84-87 */
+ { DPU_IRQ_TYPE_HIST_DSPP_DONE, DSPP_2, DPU_INTR_HIST_DSPP_2_DONE, 2},
+ { DPU_IRQ_TYPE_HIST_DSPP_RSTSEQ, DSPP_2,
+ DPU_INTR_HIST_DSPP_2_RSTSEQ_DONE, 2},
+ { DPU_IRQ_TYPE_HIST_DSPP_DONE, DSPP_3, DPU_INTR_HIST_DSPP_3_DONE, 2},
+ { DPU_IRQ_TYPE_HIST_DSPP_RSTSEQ, DSPP_3,
+ DPU_INTR_HIST_DSPP_3_RSTSEQ_DONE, 2},
+ /* irq_idx: 88-91 */
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 2},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 2},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 2},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 2},
+ /* irq_idx: 92-95 */
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 2},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 2},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 2},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 2},
+
+ /* BEGIN MAP_RANGE: 96-127 INTF_0_INTR */
+ /* irq_idx: 96-99 */
+ { DPU_IRQ_TYPE_SFI_VIDEO_IN, INTF_0,
+ DPU_INTR_VIDEO_INTO_STATIC, 3},
+ { DPU_IRQ_TYPE_SFI_VIDEO_OUT, INTF_0,
+ DPU_INTR_VIDEO_OUTOF_STATIC, 3},
+ { DPU_IRQ_TYPE_SFI_CMD_0_IN, INTF_0,
+ DPU_INTR_DSICMD_0_INTO_STATIC, 3},
+ { DPU_IRQ_TYPE_SFI_CMD_0_OUT, INTF_0,
+ DPU_INTR_DSICMD_0_OUTOF_STATIC, 3},
+ /* irq_idx: 100-103 */
+ { DPU_IRQ_TYPE_SFI_CMD_1_IN, INTF_0,
+ DPU_INTR_DSICMD_1_INTO_STATIC, 3},
+ { DPU_IRQ_TYPE_SFI_CMD_1_OUT, INTF_0,
+ DPU_INTR_DSICMD_1_OUTOF_STATIC, 3},
+ { DPU_IRQ_TYPE_SFI_CMD_2_IN, INTF_0,
+ DPU_INTR_DSICMD_2_INTO_STATIC, 3},
+ { DPU_IRQ_TYPE_SFI_CMD_2_OUT, INTF_0,
+ DPU_INTR_DSICMD_2_OUTOF_STATIC, 3},
+ /* irq_idx: 104-107 */
+ { DPU_IRQ_TYPE_PROG_LINE, INTF_0, DPU_INTR_PROG_LINE, 3},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 3},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 3},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 3},
+ /* irq_idx: 108-111 */
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 3},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 3},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 3},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 3},
+ /* irq_idx: 112-115 */
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 3},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 3},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 3},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 3},
+ /* irq_idx: 116-119 */
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 3},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 3},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 3},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 3},
+ /* irq_idx: 120-123 */
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 3},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 3},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 3},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 3},
+ /* irq_idx: 124-127 */
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 3},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 3},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 3},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 3},
+
+ /* BEGIN MAP_RANGE: 128-159 INTF_1_INTR */
+ /* irq_idx: 128-131 */
+ { DPU_IRQ_TYPE_SFI_VIDEO_IN, INTF_1,
+ DPU_INTR_VIDEO_INTO_STATIC, 4},
+ { DPU_IRQ_TYPE_SFI_VIDEO_OUT, INTF_1,
+ DPU_INTR_VIDEO_OUTOF_STATIC, 4},
+ { DPU_IRQ_TYPE_SFI_CMD_0_IN, INTF_1,
+ DPU_INTR_DSICMD_0_INTO_STATIC, 4},
+ { DPU_IRQ_TYPE_SFI_CMD_0_OUT, INTF_1,
+ DPU_INTR_DSICMD_0_OUTOF_STATIC, 4},
+ /* irq_idx: 132-135 */
+ { DPU_IRQ_TYPE_SFI_CMD_1_IN, INTF_1,
+ DPU_INTR_DSICMD_1_INTO_STATIC, 4},
+ { DPU_IRQ_TYPE_SFI_CMD_1_OUT, INTF_1,
+ DPU_INTR_DSICMD_1_OUTOF_STATIC, 4},
+ { DPU_IRQ_TYPE_SFI_CMD_2_IN, INTF_1,
+ DPU_INTR_DSICMD_2_INTO_STATIC, 4},
+ { DPU_IRQ_TYPE_SFI_CMD_2_OUT, INTF_1,
+ DPU_INTR_DSICMD_2_OUTOF_STATIC, 4},
+ /* irq_idx: 136-139 */
+ { DPU_IRQ_TYPE_PROG_LINE, INTF_1, DPU_INTR_PROG_LINE, 4},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 4},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 4},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 4},
+ /* irq_idx: 140-143 */
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 4},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 4},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 4},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 4},
+ /* irq_idx: 144-147 */
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 4},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 4},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 4},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 4},
+ /* irq_idx: 148-151 */
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 4},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 4},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 4},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 4},
+ /* irq_idx: 152-155 */
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 4},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 4},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 4},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 4},
+ /* irq_idx: 156-159 */
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 4},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 4},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 4},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 4},
+
+ /* BEGIN MAP_RANGE: 160-191 INTF_2_INTR */
+ /* irq_idx: 160-163 */
+ { DPU_IRQ_TYPE_SFI_VIDEO_IN, INTF_2,
+ DPU_INTR_VIDEO_INTO_STATIC, 5},
+ { DPU_IRQ_TYPE_SFI_VIDEO_OUT, INTF_2,
+ DPU_INTR_VIDEO_OUTOF_STATIC, 5},
+ { DPU_IRQ_TYPE_SFI_CMD_0_IN, INTF_2,
+ DPU_INTR_DSICMD_0_INTO_STATIC, 5},
+ { DPU_IRQ_TYPE_SFI_CMD_0_OUT, INTF_2,
+ DPU_INTR_DSICMD_0_OUTOF_STATIC, 5},
+ /* irq_idx: 164-167 */
+ { DPU_IRQ_TYPE_SFI_CMD_1_IN, INTF_2,
+ DPU_INTR_DSICMD_1_INTO_STATIC, 5},
+ { DPU_IRQ_TYPE_SFI_CMD_1_OUT, INTF_2,
+ DPU_INTR_DSICMD_1_OUTOF_STATIC, 5},
+ { DPU_IRQ_TYPE_SFI_CMD_2_IN, INTF_2,
+ DPU_INTR_DSICMD_2_INTO_STATIC, 5},
+ { DPU_IRQ_TYPE_SFI_CMD_2_OUT, INTF_2,
+ DPU_INTR_DSICMD_2_OUTOF_STATIC, 5},
+ /* irq_idx: 168-171 */
+ { DPU_IRQ_TYPE_PROG_LINE, INTF_2, DPU_INTR_PROG_LINE, 5},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 5},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 5},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 5},
+ /* irq_idx: 172-175 */
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 5},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 5},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 5},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 5},
+ /* irq_idx: 176-179 */
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 5},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 5},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 5},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 5},
+ /* irq_idx: 180-183 */
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 5},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 5},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 5},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 5},
+ /* irq_idx: 184-187 */
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 5},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 5},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 5},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 5},
+ /* irq_idx: 188-191 */
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 5},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 5},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 5},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 5},
+
+ /* BEGIN MAP_RANGE: 192-223 INTF_3_INTR */
+ /* irq_idx: 192-195 */
+ { DPU_IRQ_TYPE_SFI_VIDEO_IN, INTF_3,
+ DPU_INTR_VIDEO_INTO_STATIC, 6},
+ { DPU_IRQ_TYPE_SFI_VIDEO_OUT, INTF_3,
+ DPU_INTR_VIDEO_OUTOF_STATIC, 6},
+ { DPU_IRQ_TYPE_SFI_CMD_0_IN, INTF_3,
+ DPU_INTR_DSICMD_0_INTO_STATIC, 6},
+ { DPU_IRQ_TYPE_SFI_CMD_0_OUT, INTF_3,
+ DPU_INTR_DSICMD_0_OUTOF_STATIC, 6},
+ /* irq_idx: 196-199 */
+ { DPU_IRQ_TYPE_SFI_CMD_1_IN, INTF_3,
+ DPU_INTR_DSICMD_1_INTO_STATIC, 6},
+ { DPU_IRQ_TYPE_SFI_CMD_1_OUT, INTF_3,
+ DPU_INTR_DSICMD_1_OUTOF_STATIC, 6},
+ { DPU_IRQ_TYPE_SFI_CMD_2_IN, INTF_3,
+ DPU_INTR_DSICMD_2_INTO_STATIC, 6},
+ { DPU_IRQ_TYPE_SFI_CMD_2_OUT, INTF_3,
+ DPU_INTR_DSICMD_2_OUTOF_STATIC, 6},
+ /* irq_idx: 200-203 */
+ { DPU_IRQ_TYPE_PROG_LINE, INTF_3, DPU_INTR_PROG_LINE, 6},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 6},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 6},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 6},
+ /* irq_idx: 204-207 */
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 6},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 6},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 6},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 6},
+ /* irq_idx: 208-211 */
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 6},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 6},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 6},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 6},
+ /* irq_idx: 212-215 */
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 6},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 6},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 6},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 6},
+ /* irq_idx: 216-219 */
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 6},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 6},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 6},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 6},
+ /* irq_idx: 220-223 */
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 6},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 6},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 6},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 6},
+
+ /* BEGIN MAP_RANGE: 224-255 INTF_4_INTR */
+ /* irq_idx: 224-227 */
+ { DPU_IRQ_TYPE_SFI_VIDEO_IN, INTF_4,
+ DPU_INTR_VIDEO_INTO_STATIC, 7},
+ { DPU_IRQ_TYPE_SFI_VIDEO_OUT, INTF_4,
+ DPU_INTR_VIDEO_OUTOF_STATIC, 7},
+ { DPU_IRQ_TYPE_SFI_CMD_0_IN, INTF_4,
+ DPU_INTR_DSICMD_0_INTO_STATIC, 7},
+ { DPU_IRQ_TYPE_SFI_CMD_0_OUT, INTF_4,
+ DPU_INTR_DSICMD_0_OUTOF_STATIC, 7},
+ /* irq_idx: 228-231 */
+ { DPU_IRQ_TYPE_SFI_CMD_1_IN, INTF_4,
+ DPU_INTR_DSICMD_1_INTO_STATIC, 7},
+ { DPU_IRQ_TYPE_SFI_CMD_1_OUT, INTF_4,
+ DPU_INTR_DSICMD_1_OUTOF_STATIC, 7},
+ { DPU_IRQ_TYPE_SFI_CMD_2_IN, INTF_4,
+ DPU_INTR_DSICMD_2_INTO_STATIC, 7},
+ { DPU_IRQ_TYPE_SFI_CMD_2_OUT, INTF_4,
+ DPU_INTR_DSICMD_2_OUTOF_STATIC, 7},
+ /* irq_idx: 232-235 */
+ { DPU_IRQ_TYPE_PROG_LINE, INTF_4, DPU_INTR_PROG_LINE, 7},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 7},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 7},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 7},
+ /* irq_idx: 236-239 */
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 7},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 7},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 7},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 7},
+ /* irq_idx: 240-243 */
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 7},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 7},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 7},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 7},
+ /* irq_idx: 244-247 */
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 7},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 7},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 7},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 7},
+ /* irq_idx: 248-251 */
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 7},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 7},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 7},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 7},
+ /* irq_idx: 252-255 */
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 7},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 7},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 7},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 7},
+
+ /* BEGIN MAP_RANGE: 256-287 AD4_0_INTR */
+ /* irq_idx: 256-259 */
+ { DPU_IRQ_TYPE_AD4_BL_DONE, DSPP_0, DPU_INTR_BACKLIGHT_UPDATED, 8},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
+ /* irq_idx: 260-263 */
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
+ /* irq_idx: 264-267 */
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
+ /* irq_idx: 268-271 */
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
+ /* irq_idx: 272-275 */
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
+ /* irq_idx: 276-279 */
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
+ /* irq_idx: 280-283 */
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
+ /* irq_idx: 284-287 */
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
+
+ /* BEGIN MAP_RANGE: 288-319 AD4_1_INTR */
+ /* irq_idx: 288-291 */
+ { DPU_IRQ_TYPE_AD4_BL_DONE, DSPP_1, DPU_INTR_BACKLIGHT_UPDATED, 9},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
+ /* irq_idx: 292-295 */
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
+ /* irq_idx: 296-299 */
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
+ /* irq_idx: 300-303 */
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
+ /* irq_idx: 304-307 */
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
+ /* irq_idx: 308-311 */
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
+ /* irq_idx: 312-315 */
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
+ /* irq_idx: 315-319 */
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
+};
+
+static int dpu_hw_intr_irqidx_lookup(enum dpu_intr_type intr_type,
+ u32 instance_idx)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(dpu_irq_map); i++) {
+ if (intr_type == dpu_irq_map[i].intr_type &&
+ instance_idx == dpu_irq_map[i].instance_idx)
+ return i;
+ }
+
+ pr_debug("IRQ lookup fail!! intr_type=%d, instance_idx=%d\n",
+ intr_type, instance_idx);
+ return -EINVAL;
+}
+
+static void dpu_hw_intr_set_mask(struct dpu_hw_intr *intr, uint32_t reg_off,
+ uint32_t mask)
+{
+ if (!intr)
+ return;
+
+ DPU_REG_WRITE(&intr->hw, reg_off, mask);
+
+ /* ensure register writes go through */
+ wmb();
+}
+
+static void dpu_hw_intr_dispatch_irq(struct dpu_hw_intr *intr,
+ void (*cbfunc)(void *, int),
+ void *arg)
+{
+ int reg_idx;
+ int irq_idx;
+ int start_idx;
+ int end_idx;
+ u32 irq_status;
+ unsigned long irq_flags;
+
+ if (!intr)
+ return;
+
+ /*
+ * The dispatcher will save the IRQ status before calling here.
+ * Now need to go through each IRQ status and find matching
+ * irq lookup index.
+ */
+ spin_lock_irqsave(&intr->irq_lock, irq_flags);
+ for (reg_idx = 0; reg_idx < ARRAY_SIZE(dpu_intr_set); reg_idx++) {
+ irq_status = intr->save_irq_status[reg_idx];
+
+ /*
+ * Each Interrupt register has a range of 32 indexes, and
+ * that is static for dpu_irq_map.
+ */
+ start_idx = reg_idx * 32;
+ end_idx = start_idx + 32;
+
+ if (start_idx >= ARRAY_SIZE(dpu_irq_map) ||
+ end_idx > ARRAY_SIZE(dpu_irq_map))
+ continue;
+
+ /*
+ * Search through matching intr status from irq map.
+ * start_idx and end_idx defined the search range in
+ * the dpu_irq_map.
+ */
+ for (irq_idx = start_idx;
+ (irq_idx < end_idx) && irq_status;
+ irq_idx++)
+ if ((irq_status & dpu_irq_map[irq_idx].irq_mask) &&
+ (dpu_irq_map[irq_idx].reg_idx == reg_idx)) {
+ /*
+ * Once a match on irq mask, perform a callback
+ * to the given cbfunc. cbfunc will take care
+ * the interrupt status clearing. If cbfunc is
+ * not provided, then the interrupt clearing
+ * is here.
+ */
+ if (cbfunc)
+ cbfunc(arg, irq_idx);
+ else
+ intr->ops.clear_intr_status_nolock(
+ intr, irq_idx);
+
+ /*
+ * When callback finish, clear the irq_status
+ * with the matching mask. Once irq_status
+ * is all cleared, the search can be stopped.
+ */
+ irq_status &= ~dpu_irq_map[irq_idx].irq_mask;
+ }
+ }
+ spin_unlock_irqrestore(&intr->irq_lock, irq_flags);
+}
+
+static int dpu_hw_intr_enable_irq(struct dpu_hw_intr *intr, int irq_idx)
+{
+ int reg_idx;
+ unsigned long irq_flags;
+ const struct dpu_intr_reg *reg;
+ const struct dpu_irq_type *irq;
+ const char *dbgstr = NULL;
+ uint32_t cache_irq_mask;
+
+ if (!intr)
+ return -EINVAL;
+
+ if (irq_idx < 0 || irq_idx >= ARRAY_SIZE(dpu_irq_map)) {
+ pr_err("invalid IRQ index: [%d]\n", irq_idx);
+ return -EINVAL;
+ }
+
+ irq = &dpu_irq_map[irq_idx];
+ reg_idx = irq->reg_idx;
+ reg = &dpu_intr_set[reg_idx];
+
+ spin_lock_irqsave(&intr->irq_lock, irq_flags);
+ cache_irq_mask = intr->cache_irq_mask[reg_idx];
+ if (cache_irq_mask & irq->irq_mask) {
+ dbgstr = "DPU IRQ already set:";
+ } else {
+ dbgstr = "DPU IRQ enabled:";
+
+ cache_irq_mask |= irq->irq_mask;
+ /* Cleaning any pending interrupt */
+ DPU_REG_WRITE(&intr->hw, reg->clr_off, irq->irq_mask);
+ /* Enabling interrupts with the new mask */
+ DPU_REG_WRITE(&intr->hw, reg->en_off, cache_irq_mask);
+
+ /* ensure register write goes through */
+ wmb();
+
+ intr->cache_irq_mask[reg_idx] = cache_irq_mask;
+ }
+ spin_unlock_irqrestore(&intr->irq_lock, irq_flags);
+
+ pr_debug("%s MASK:0x%.8x, CACHE-MASK:0x%.8x\n", dbgstr,
+ irq->irq_mask, cache_irq_mask);
+
+ return 0;
+}
+
+static int dpu_hw_intr_disable_irq_nolock(struct dpu_hw_intr *intr, int irq_idx)
+{
+ int reg_idx;
+ const struct dpu_intr_reg *reg;
+ const struct dpu_irq_type *irq;
+ const char *dbgstr = NULL;
+ uint32_t cache_irq_mask;
+
+ if (!intr)
+ return -EINVAL;
+
+ if (irq_idx < 0 || irq_idx >= ARRAY_SIZE(dpu_irq_map)) {
+ pr_err("invalid IRQ index: [%d]\n", irq_idx);
+ return -EINVAL;
+ }
+
+ irq = &dpu_irq_map[irq_idx];
+ reg_idx = irq->reg_idx;
+ reg = &dpu_intr_set[reg_idx];
+
+ cache_irq_mask = intr->cache_irq_mask[reg_idx];
+ if ((cache_irq_mask & irq->irq_mask) == 0) {
+ dbgstr = "DPU IRQ is already cleared:";
+ } else {
+ dbgstr = "DPU IRQ mask disable:";
+
+ cache_irq_mask &= ~irq->irq_mask;
+ /* Disable interrupts based on the new mask */
+ DPU_REG_WRITE(&intr->hw, reg->en_off, cache_irq_mask);
+ /* Cleaning any pending interrupt */
+ DPU_REG_WRITE(&intr->hw, reg->clr_off, irq->irq_mask);
+
+ /* ensure register write goes through */
+ wmb();
+
+ intr->cache_irq_mask[reg_idx] = cache_irq_mask;
+ }
+
+ pr_debug("%s MASK:0x%.8x, CACHE-MASK:0x%.8x\n", dbgstr,
+ irq->irq_mask, cache_irq_mask);
+
+ return 0;
+}
+
+static int dpu_hw_intr_disable_irq(struct dpu_hw_intr *intr, int irq_idx)
+{
+ unsigned long irq_flags;
+
+ if (!intr)
+ return -EINVAL;
+
+ if (irq_idx < 0 || irq_idx >= ARRAY_SIZE(dpu_irq_map)) {
+ pr_err("invalid IRQ index: [%d]\n", irq_idx);
+ return -EINVAL;
+ }
+
+ spin_lock_irqsave(&intr->irq_lock, irq_flags);
+ dpu_hw_intr_disable_irq_nolock(intr, irq_idx);
+ spin_unlock_irqrestore(&intr->irq_lock, irq_flags);
+
+ return 0;
+}
+
+static int dpu_hw_intr_clear_irqs(struct dpu_hw_intr *intr)
+{
+ int i;
+
+ if (!intr)
+ return -EINVAL;
+
+ for (i = 0; i < ARRAY_SIZE(dpu_intr_set); i++)
+ DPU_REG_WRITE(&intr->hw, dpu_intr_set[i].clr_off, 0xffffffff);
+
+ /* ensure register writes go through */
+ wmb();
+
+ return 0;
+}
+
+static int dpu_hw_intr_disable_irqs(struct dpu_hw_intr *intr)
+{
+ int i;
+
+ if (!intr)
+ return -EINVAL;
+
+ for (i = 0; i < ARRAY_SIZE(dpu_intr_set); i++)
+ DPU_REG_WRITE(&intr->hw, dpu_intr_set[i].en_off, 0x00000000);
+
+ /* ensure register writes go through */
+ wmb();
+
+ return 0;
+}
+
+static int dpu_hw_intr_get_valid_interrupts(struct dpu_hw_intr *intr,
+ uint32_t *mask)
+{
+ if (!intr || !mask)
+ return -EINVAL;
+
+ *mask = IRQ_SOURCE_MDP | IRQ_SOURCE_DSI0 | IRQ_SOURCE_DSI1
+ | IRQ_SOURCE_HDMI | IRQ_SOURCE_EDP;
+
+ return 0;
+}
+
+static void dpu_hw_intr_get_interrupt_statuses(struct dpu_hw_intr *intr)
+{
+ int i;
+ u32 enable_mask;
+ unsigned long irq_flags;
+
+ if (!intr)
+ return;
+
+ spin_lock_irqsave(&intr->irq_lock, irq_flags);
+ for (i = 0; i < ARRAY_SIZE(dpu_intr_set); i++) {
+ /* Read interrupt status */
+ intr->save_irq_status[i] = DPU_REG_READ(&intr->hw,
+ dpu_intr_set[i].status_off);
+
+ /* Read enable mask */
+ enable_mask = DPU_REG_READ(&intr->hw, dpu_intr_set[i].en_off);
+
+ /* and clear the interrupt */
+ if (intr->save_irq_status[i])
+ DPU_REG_WRITE(&intr->hw, dpu_intr_set[i].clr_off,
+ intr->save_irq_status[i]);
+
+ /* Finally update IRQ status based on enable mask */
+ intr->save_irq_status[i] &= enable_mask;
+ }
+
+ /* ensure register writes go through */
+ wmb();
+
+ spin_unlock_irqrestore(&intr->irq_lock, irq_flags);
+}
+
+static void dpu_hw_intr_clear_intr_status_nolock(struct dpu_hw_intr *intr,
+ int irq_idx)
+{
+ int reg_idx;
+
+ if (!intr)
+ return;
+
+ reg_idx = dpu_irq_map[irq_idx].reg_idx;
+ DPU_REG_WRITE(&intr->hw, dpu_intr_set[reg_idx].clr_off,
+ dpu_irq_map[irq_idx].irq_mask);
+
+ /* ensure register writes go through */
+ wmb();
+}
+
+static void dpu_hw_intr_clear_interrupt_status(struct dpu_hw_intr *intr,
+ int irq_idx)
+{
+ unsigned long irq_flags;
+
+ if (!intr)
+ return;
+
+ spin_lock_irqsave(&intr->irq_lock, irq_flags);
+ dpu_hw_intr_clear_intr_status_nolock(intr, irq_idx);
+ spin_unlock_irqrestore(&intr->irq_lock, irq_flags);
+}
+
+static u32 dpu_hw_intr_get_interrupt_status(struct dpu_hw_intr *intr,
+ int irq_idx, bool clear)
+{
+ int reg_idx;
+ unsigned long irq_flags;
+ u32 intr_status;
+
+ if (!intr)
+ return 0;
+
+ if (irq_idx >= ARRAY_SIZE(dpu_irq_map) || irq_idx < 0) {
+ pr_err("invalid IRQ index: [%d]\n", irq_idx);
+ return 0;
+ }
+
+ spin_lock_irqsave(&intr->irq_lock, irq_flags);
+
+ reg_idx = dpu_irq_map[irq_idx].reg_idx;
+ intr_status = DPU_REG_READ(&intr->hw,
+ dpu_intr_set[reg_idx].status_off) &
+ dpu_irq_map[irq_idx].irq_mask;
+ if (intr_status && clear)
+ DPU_REG_WRITE(&intr->hw, dpu_intr_set[reg_idx].clr_off,
+ intr_status);
+
+ /* ensure register writes go through */
+ wmb();
+
+ spin_unlock_irqrestore(&intr->irq_lock, irq_flags);
+
+ return intr_status;
+}
+
+static void __setup_intr_ops(struct dpu_hw_intr_ops *ops)
+{
+ ops->set_mask = dpu_hw_intr_set_mask;
+ ops->irq_idx_lookup = dpu_hw_intr_irqidx_lookup;
+ ops->enable_irq = dpu_hw_intr_enable_irq;
+ ops->disable_irq = dpu_hw_intr_disable_irq;
+ ops->dispatch_irqs = dpu_hw_intr_dispatch_irq;
+ ops->clear_all_irqs = dpu_hw_intr_clear_irqs;
+ ops->disable_all_irqs = dpu_hw_intr_disable_irqs;
+ ops->get_valid_interrupts = dpu_hw_intr_get_valid_interrupts;
+ ops->get_interrupt_statuses = dpu_hw_intr_get_interrupt_statuses;
+ ops->clear_interrupt_status = dpu_hw_intr_clear_interrupt_status;
+ ops->clear_intr_status_nolock = dpu_hw_intr_clear_intr_status_nolock;
+ ops->get_interrupt_status = dpu_hw_intr_get_interrupt_status;
+}
+
+static void __intr_offset(struct dpu_mdss_cfg *m,
+ void __iomem *addr, struct dpu_hw_blk_reg_map *hw)
+{
+ hw->base_off = addr;
+ hw->blk_off = m->mdp[0].base;
+ hw->hwversion = m->hwversion;
+}
+
+struct dpu_hw_intr *dpu_hw_intr_init(void __iomem *addr,
+ struct dpu_mdss_cfg *m)
+{
+ struct dpu_hw_intr *intr;
+
+ if (!addr || !m)
+ return ERR_PTR(-EINVAL);
+
+ intr = kzalloc(sizeof(*intr), GFP_KERNEL);
+ if (!intr)
+ return ERR_PTR(-ENOMEM);
+
+ __intr_offset(m, addr, &intr->hw);
+ __setup_intr_ops(&intr->ops);
+
+ intr->irq_idx_tbl_size = ARRAY_SIZE(dpu_irq_map);
+
+ intr->cache_irq_mask = kcalloc(ARRAY_SIZE(dpu_intr_set), sizeof(u32),
+ GFP_KERNEL);
+ if (intr->cache_irq_mask == NULL) {
+ kfree(intr);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ intr->save_irq_status = kcalloc(ARRAY_SIZE(dpu_intr_set), sizeof(u32),
+ GFP_KERNEL);
+ if (intr->save_irq_status == NULL) {
+ kfree(intr->cache_irq_mask);
+ kfree(intr);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ spin_lock_init(&intr->irq_lock);
+
+ return intr;
+}
+
+void dpu_hw_intr_destroy(struct dpu_hw_intr *intr)
+{
+ if (intr) {
+ kfree(intr->cache_irq_mask);
+ kfree(intr->save_irq_status);
+ kfree(intr);
+ }
+}
+
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.h
new file mode 100644
index 000000000000..61e4cba36562
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.h
@@ -0,0 +1,257 @@
+/* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _DPU_HW_INTERRUPTS_H
+#define _DPU_HW_INTERRUPTS_H
+
+#include <linux/types.h>
+
+#include "dpu_hwio.h"
+#include "dpu_hw_catalog.h"
+#include "dpu_hw_util.h"
+#include "dpu_hw_mdss.h"
+
+#define IRQ_SOURCE_MDP BIT(0)
+#define IRQ_SOURCE_DSI0 BIT(4)
+#define IRQ_SOURCE_DSI1 BIT(5)
+#define IRQ_SOURCE_HDMI BIT(8)
+#define IRQ_SOURCE_EDP BIT(12)
+#define IRQ_SOURCE_MHL BIT(16)
+
+/**
+ * dpu_intr_type - HW Interrupt Type
+ * @DPU_IRQ_TYPE_WB_ROT_COMP: WB rotator done
+ * @DPU_IRQ_TYPE_WB_WFD_COMP: WB WFD done
+ * @DPU_IRQ_TYPE_PING_PONG_COMP: PingPong done
+ * @DPU_IRQ_TYPE_PING_PONG_RD_PTR: PingPong read pointer
+ * @DPU_IRQ_TYPE_PING_PONG_WR_PTR: PingPong write pointer
+ * @DPU_IRQ_TYPE_PING_PONG_AUTO_REF: PingPong auto refresh
+ * @DPU_IRQ_TYPE_PING_PONG_TEAR_CHECK: PingPong Tear check
+ * @DPU_IRQ_TYPE_PING_PONG_TE_CHECK: PingPong TE detection
+ * @DPU_IRQ_TYPE_INTF_UNDER_RUN: INTF underrun
+ * @DPU_IRQ_TYPE_INTF_VSYNC: INTF VSYNC
+ * @DPU_IRQ_TYPE_CWB_OVERFLOW: Concurrent WB overflow
+ * @DPU_IRQ_TYPE_HIST_VIG_DONE: VIG Histogram done
+ * @DPU_IRQ_TYPE_HIST_VIG_RSTSEQ: VIG Histogram reset
+ * @DPU_IRQ_TYPE_HIST_DSPP_DONE: DSPP Histogram done
+ * @DPU_IRQ_TYPE_HIST_DSPP_RSTSEQ: DSPP Histogram reset
+ * @DPU_IRQ_TYPE_WD_TIMER: Watchdog timer
+ * @DPU_IRQ_TYPE_SFI_VIDEO_IN: Video static frame INTR into static
+ * @DPU_IRQ_TYPE_SFI_VIDEO_OUT: Video static frame INTR out-of static
+ * @DPU_IRQ_TYPE_SFI_CMD_0_IN: DSI CMD0 static frame INTR into static
+ * @DPU_IRQ_TYPE_SFI_CMD_0_OUT: DSI CMD0 static frame INTR out-of static
+ * @DPU_IRQ_TYPE_SFI_CMD_1_IN: DSI CMD1 static frame INTR into static
+ * @DPU_IRQ_TYPE_SFI_CMD_1_OUT: DSI CMD1 static frame INTR out-of static
+ * @DPU_IRQ_TYPE_SFI_CMD_2_IN: DSI CMD2 static frame INTR into static
+ * @DPU_IRQ_TYPE_SFI_CMD_2_OUT: DSI CMD2 static frame INTR out-of static
+ * @DPU_IRQ_TYPE_PROG_LINE: Programmable Line interrupt
+ * @DPU_IRQ_TYPE_AD4_BL_DONE: AD4 backlight
+ * @DPU_IRQ_TYPE_CTL_START: Control start
+ * @DPU_IRQ_TYPE_RESERVED: Reserved for expansion
+ */
+enum dpu_intr_type {
+ DPU_IRQ_TYPE_WB_ROT_COMP,
+ DPU_IRQ_TYPE_WB_WFD_COMP,
+ DPU_IRQ_TYPE_PING_PONG_COMP,
+ DPU_IRQ_TYPE_PING_PONG_RD_PTR,
+ DPU_IRQ_TYPE_PING_PONG_WR_PTR,
+ DPU_IRQ_TYPE_PING_PONG_AUTO_REF,
+ DPU_IRQ_TYPE_PING_PONG_TEAR_CHECK,
+ DPU_IRQ_TYPE_PING_PONG_TE_CHECK,
+ DPU_IRQ_TYPE_INTF_UNDER_RUN,
+ DPU_IRQ_TYPE_INTF_VSYNC,
+ DPU_IRQ_TYPE_CWB_OVERFLOW,
+ DPU_IRQ_TYPE_HIST_VIG_DONE,
+ DPU_IRQ_TYPE_HIST_VIG_RSTSEQ,
+ DPU_IRQ_TYPE_HIST_DSPP_DONE,
+ DPU_IRQ_TYPE_HIST_DSPP_RSTSEQ,
+ DPU_IRQ_TYPE_WD_TIMER,
+ DPU_IRQ_TYPE_SFI_VIDEO_IN,
+ DPU_IRQ_TYPE_SFI_VIDEO_OUT,
+ DPU_IRQ_TYPE_SFI_CMD_0_IN,
+ DPU_IRQ_TYPE_SFI_CMD_0_OUT,
+ DPU_IRQ_TYPE_SFI_CMD_1_IN,
+ DPU_IRQ_TYPE_SFI_CMD_1_OUT,
+ DPU_IRQ_TYPE_SFI_CMD_2_IN,
+ DPU_IRQ_TYPE_SFI_CMD_2_OUT,
+ DPU_IRQ_TYPE_PROG_LINE,
+ DPU_IRQ_TYPE_AD4_BL_DONE,
+ DPU_IRQ_TYPE_CTL_START,
+ DPU_IRQ_TYPE_RESERVED,
+};
+
+struct dpu_hw_intr;
+
+/**
+ * Interrupt operations.
+ */
+struct dpu_hw_intr_ops {
+ /**
+ * set_mask - Programs the given interrupt register with the
+ * given interrupt mask. Register value will get overwritten.
+ * @intr: HW interrupt handle
+ * @reg_off: MDSS HW register offset
+ * @irqmask: IRQ mask value
+ */
+ void (*set_mask)(
+ struct dpu_hw_intr *intr,
+ uint32_t reg,
+ uint32_t irqmask);
+
+ /**
+ * irq_idx_lookup - Lookup IRQ index on the HW interrupt type
+ * Used for all irq related ops
+ * @intr_type: Interrupt type defined in dpu_intr_type
+ * @instance_idx: HW interrupt block instance
+ * @return: irq_idx or -EINVAL for lookup fail
+ */
+ int (*irq_idx_lookup)(
+ enum dpu_intr_type intr_type,
+ u32 instance_idx);
+
+ /**
+ * enable_irq - Enable IRQ based on lookup IRQ index
+ * @intr: HW interrupt handle
+ * @irq_idx: Lookup irq index return from irq_idx_lookup
+ * @return: 0 for success, otherwise failure
+ */
+ int (*enable_irq)(
+ struct dpu_hw_intr *intr,
+ int irq_idx);
+
+ /**
+ * disable_irq - Disable IRQ based on lookup IRQ index
+ * @intr: HW interrupt handle
+ * @irq_idx: Lookup irq index return from irq_idx_lookup
+ * @return: 0 for success, otherwise failure
+ */
+ int (*disable_irq)(
+ struct dpu_hw_intr *intr,
+ int irq_idx);
+
+ /**
+ * clear_all_irqs - Clears all the interrupts (i.e. acknowledges
+ * any asserted IRQs). Useful during reset.
+ * @intr: HW interrupt handle
+ * @return: 0 for success, otherwise failure
+ */
+ int (*clear_all_irqs)(
+ struct dpu_hw_intr *intr);
+
+ /**
+ * disable_all_irqs - Disables all the interrupts. Useful during reset.
+ * @intr: HW interrupt handle
+ * @return: 0 for success, otherwise failure
+ */
+ int (*disable_all_irqs)(
+ struct dpu_hw_intr *intr);
+
+ /**
+ * dispatch_irqs - IRQ dispatcher will call the given callback
+ * function when a matching interrupt status bit is
+ * found in the irq mapping table.
+ * @intr: HW interrupt handle
+ * @cbfunc: Callback function pointer
+ * @arg: Argument to pass back during callback
+ */
+ void (*dispatch_irqs)(
+ struct dpu_hw_intr *intr,
+ void (*cbfunc)(void *arg, int irq_idx),
+ void *arg);
+
+ /**
+ * get_interrupt_statuses - Gets and store value from all interrupt
+ * status registers that are currently fired.
+ * @intr: HW interrupt handle
+ */
+ void (*get_interrupt_statuses)(
+ struct dpu_hw_intr *intr);
+
+ /**
+ * clear_interrupt_status - Clears HW interrupt status based on given
+ * lookup IRQ index.
+ * @intr: HW interrupt handle
+ * @irq_idx: Lookup irq index return from irq_idx_lookup
+ */
+ void (*clear_interrupt_status)(
+ struct dpu_hw_intr *intr,
+ int irq_idx);
+
+ /**
+ * clear_intr_status_nolock() - clears the HW interrupts without lock
+ * @intr: HW interrupt handle
+ * @irq_idx: Lookup irq index return from irq_idx_lookup
+ */
+ void (*clear_intr_status_nolock)(
+ struct dpu_hw_intr *intr,
+ int irq_idx);
+
+ /**
+ * get_interrupt_status - Gets HW interrupt status, and clear if set,
+ * based on given lookup IRQ index.
+ * @intr: HW interrupt handle
+ * @irq_idx: Lookup irq index return from irq_idx_lookup
+ * @clear: True to clear irq after read
+ */
+ u32 (*get_interrupt_status)(
+ struct dpu_hw_intr *intr,
+ int irq_idx,
+ bool clear);
+
+ /**
+ * get_valid_interrupts - Gets a mask of all valid interrupt sources
+ * within DPU. These are actually status bits
+ * within interrupt registers that specify the
+ * source of the interrupt in IRQs. For example,
+ * valid interrupt sources can be MDP, DSI,
+ * HDMI etc.
+ * @intr: HW interrupt handle
+ * @mask: Returning the interrupt source MASK
+ * @return: 0 for success, otherwise failure
+ */
+ int (*get_valid_interrupts)(
+ struct dpu_hw_intr *intr,
+ uint32_t *mask);
+};
+
+/**
+ * struct dpu_hw_intr: hw interrupts handling data structure
+ * @hw: virtual address mapping
+ * @ops: function pointer mapping for IRQ handling
+ * @cache_irq_mask: array of IRQ enable masks reg storage created during init
+ * @save_irq_status: array of IRQ status reg storage created during init
+ * @irq_idx_tbl_size: total number of irq_idx mapped in the hw_interrupts
+ * @irq_lock: spinlock for accessing IRQ resources
+ */
+struct dpu_hw_intr {
+ struct dpu_hw_blk_reg_map hw;
+ struct dpu_hw_intr_ops ops;
+ u32 *cache_irq_mask;
+ u32 *save_irq_status;
+ u32 irq_idx_tbl_size;
+ spinlock_t irq_lock;
+};
+
+/**
+ * dpu_hw_intr_init(): Initializes the interrupts hw object
+ * @addr: mapped register io address of MDP
+ * @m : pointer to mdss catalog data
+ */
+struct dpu_hw_intr *dpu_hw_intr_init(void __iomem *addr,
+ struct dpu_mdss_cfg *m);
+
+/**
+ * dpu_hw_intr_destroy(): Cleanup interrutps hw object
+ * @intr: pointer to interrupts hw object
+ */
+void dpu_hw_intr_destroy(struct dpu_hw_intr *intr);
+#endif
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c
new file mode 100644
index 000000000000..d280df5613c9
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c
@@ -0,0 +1,349 @@
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "dpu_hwio.h"
+#include "dpu_hw_catalog.h"
+#include "dpu_hw_intf.h"
+#include "dpu_dbg.h"
+#include "dpu_kms.h"
+
+#define INTF_TIMING_ENGINE_EN 0x000
+#define INTF_CONFIG 0x004
+#define INTF_HSYNC_CTL 0x008
+#define INTF_VSYNC_PERIOD_F0 0x00C
+#define INTF_VSYNC_PERIOD_F1 0x010
+#define INTF_VSYNC_PULSE_WIDTH_F0 0x014
+#define INTF_VSYNC_PULSE_WIDTH_F1 0x018
+#define INTF_DISPLAY_V_START_F0 0x01C
+#define INTF_DISPLAY_V_START_F1 0x020
+#define INTF_DISPLAY_V_END_F0 0x024
+#define INTF_DISPLAY_V_END_F1 0x028
+#define INTF_ACTIVE_V_START_F0 0x02C
+#define INTF_ACTIVE_V_START_F1 0x030
+#define INTF_ACTIVE_V_END_F0 0x034
+#define INTF_ACTIVE_V_END_F1 0x038
+#define INTF_DISPLAY_HCTL 0x03C
+#define INTF_ACTIVE_HCTL 0x040
+#define INTF_BORDER_COLOR 0x044
+#define INTF_UNDERFLOW_COLOR 0x048
+#define INTF_HSYNC_SKEW 0x04C
+#define INTF_POLARITY_CTL 0x050
+#define INTF_TEST_CTL 0x054
+#define INTF_TP_COLOR0 0x058
+#define INTF_TP_COLOR1 0x05C
+#define INTF_FRAME_LINE_COUNT_EN 0x0A8
+#define INTF_FRAME_COUNT 0x0AC
+#define INTF_LINE_COUNT 0x0B0
+
+#define INTF_DEFLICKER_CONFIG 0x0F0
+#define INTF_DEFLICKER_STRNG_COEFF 0x0F4
+#define INTF_DEFLICKER_WEAK_COEFF 0x0F8
+
+#define INTF_DSI_CMD_MODE_TRIGGER_EN 0x084
+#define INTF_PANEL_FORMAT 0x090
+#define INTF_TPG_ENABLE 0x100
+#define INTF_TPG_MAIN_CONTROL 0x104
+#define INTF_TPG_VIDEO_CONFIG 0x108
+#define INTF_TPG_COMPONENT_LIMITS 0x10C
+#define INTF_TPG_RECTANGLE 0x110
+#define INTF_TPG_INITIAL_VALUE 0x114
+#define INTF_TPG_BLK_WHITE_PATTERN_FRAMES 0x118
+#define INTF_TPG_RGB_MAPPING 0x11C
+#define INTF_PROG_FETCH_START 0x170
+#define INTF_PROG_ROT_START 0x174
+
+#define INTF_FRAME_LINE_COUNT_EN 0x0A8
+#define INTF_FRAME_COUNT 0x0AC
+#define INTF_LINE_COUNT 0x0B0
+
+#define INTF_MISR_CTRL 0x180
+#define INTF_MISR_SIGNATURE 0x184
+
+static struct dpu_intf_cfg *_intf_offset(enum dpu_intf intf,
+ struct dpu_mdss_cfg *m,
+ void __iomem *addr,
+ struct dpu_hw_blk_reg_map *b)
+{
+ int i;
+
+ for (i = 0; i < m->intf_count; i++) {
+ if ((intf == m->intf[i].id) &&
+ (m->intf[i].type != INTF_NONE)) {
+ b->base_off = addr;
+ b->blk_off = m->intf[i].base;
+ b->length = m->intf[i].len;
+ b->hwversion = m->hwversion;
+ b->log_mask = DPU_DBG_MASK_INTF;
+ return &m->intf[i];
+ }
+ }
+
+ return ERR_PTR(-EINVAL);
+}
+
+static void dpu_hw_intf_setup_timing_engine(struct dpu_hw_intf *ctx,
+ const struct intf_timing_params *p,
+ const struct dpu_format *fmt)
+{
+ struct dpu_hw_blk_reg_map *c = &ctx->hw;
+ u32 hsync_period, vsync_period;
+ u32 display_v_start, display_v_end;
+ u32 hsync_start_x, hsync_end_x;
+ u32 active_h_start, active_h_end;
+ u32 active_v_start, active_v_end;
+ u32 active_hctl, display_hctl, hsync_ctl;
+ u32 polarity_ctl, den_polarity, hsync_polarity, vsync_polarity;
+ u32 panel_format;
+ u32 intf_cfg;
+
+ /* read interface_cfg */
+ intf_cfg = DPU_REG_READ(c, INTF_CONFIG);
+ hsync_period = p->hsync_pulse_width + p->h_back_porch + p->width +
+ p->h_front_porch;
+ vsync_period = p->vsync_pulse_width + p->v_back_porch + p->height +
+ p->v_front_porch;
+
+ display_v_start = ((p->vsync_pulse_width + p->v_back_porch) *
+ hsync_period) + p->hsync_skew;
+ display_v_end = ((vsync_period - p->v_front_porch) * hsync_period) +
+ p->hsync_skew - 1;
+
+ if (ctx->cap->type == INTF_EDP || ctx->cap->type == INTF_DP) {
+ display_v_start += p->hsync_pulse_width + p->h_back_porch;
+ display_v_end -= p->h_front_porch;
+ }
+
+ hsync_start_x = p->h_back_porch + p->hsync_pulse_width;
+ hsync_end_x = hsync_period - p->h_front_porch - 1;
+
+ if (p->width != p->xres) {
+ active_h_start = hsync_start_x;
+ active_h_end = active_h_start + p->xres - 1;
+ } else {
+ active_h_start = 0;
+ active_h_end = 0;
+ }
+
+ if (p->height != p->yres) {
+ active_v_start = display_v_start;
+ active_v_end = active_v_start + (p->yres * hsync_period) - 1;
+ } else {
+ active_v_start = 0;
+ active_v_end = 0;
+ }
+
+ if (active_h_end) {
+ active_hctl = (active_h_end << 16) | active_h_start;
+ intf_cfg |= BIT(29); /* ACTIVE_H_ENABLE */
+ } else {
+ active_hctl = 0;
+ }
+
+ if (active_v_end)
+ intf_cfg |= BIT(30); /* ACTIVE_V_ENABLE */
+
+ hsync_ctl = (hsync_period << 16) | p->hsync_pulse_width;
+ display_hctl = (hsync_end_x << 16) | hsync_start_x;
+
+ den_polarity = 0;
+ if (ctx->cap->type == INTF_HDMI) {
+ hsync_polarity = p->yres >= 720 ? 0 : 1;
+ vsync_polarity = p->yres >= 720 ? 0 : 1;
+ } else {
+ hsync_polarity = 0;
+ vsync_polarity = 0;
+ }
+ polarity_ctl = (den_polarity << 2) | /* DEN Polarity */
+ (vsync_polarity << 1) | /* VSYNC Polarity */
+ (hsync_polarity << 0); /* HSYNC Polarity */
+
+ if (!DPU_FORMAT_IS_YUV(fmt))
+ panel_format = (fmt->bits[C0_G_Y] |
+ (fmt->bits[C1_B_Cb] << 2) |
+ (fmt->bits[C2_R_Cr] << 4) |
+ (0x21 << 8));
+ else
+ /* Interface treats all the pixel data in RGB888 format */
+ panel_format = (COLOR_8BIT |
+ (COLOR_8BIT << 2) |
+ (COLOR_8BIT << 4) |
+ (0x21 << 8));
+
+ DPU_REG_WRITE(c, INTF_HSYNC_CTL, hsync_ctl);
+ DPU_REG_WRITE(c, INTF_VSYNC_PERIOD_F0, vsync_period * hsync_period);
+ DPU_REG_WRITE(c, INTF_VSYNC_PULSE_WIDTH_F0,
+ p->vsync_pulse_width * hsync_period);
+ DPU_REG_WRITE(c, INTF_DISPLAY_HCTL, display_hctl);
+ DPU_REG_WRITE(c, INTF_DISPLAY_V_START_F0, display_v_start);
+ DPU_REG_WRITE(c, INTF_DISPLAY_V_END_F0, display_v_end);
+ DPU_REG_WRITE(c, INTF_ACTIVE_HCTL, active_hctl);
+ DPU_REG_WRITE(c, INTF_ACTIVE_V_START_F0, active_v_start);
+ DPU_REG_WRITE(c, INTF_ACTIVE_V_END_F0, active_v_end);
+ DPU_REG_WRITE(c, INTF_BORDER_COLOR, p->border_clr);
+ DPU_REG_WRITE(c, INTF_UNDERFLOW_COLOR, p->underflow_clr);
+ DPU_REG_WRITE(c, INTF_HSYNC_SKEW, p->hsync_skew);
+ DPU_REG_WRITE(c, INTF_POLARITY_CTL, polarity_ctl);
+ DPU_REG_WRITE(c, INTF_FRAME_LINE_COUNT_EN, 0x3);
+ DPU_REG_WRITE(c, INTF_CONFIG, intf_cfg);
+ DPU_REG_WRITE(c, INTF_PANEL_FORMAT, panel_format);
+}
+
+static void dpu_hw_intf_enable_timing_engine(
+ struct dpu_hw_intf *intf,
+ u8 enable)
+{
+ struct dpu_hw_blk_reg_map *c = &intf->hw;
+ /* Note: Display interface select is handled in top block hw layer */
+ DPU_REG_WRITE(c, INTF_TIMING_ENGINE_EN, enable != 0);
+}
+
+static void dpu_hw_intf_setup_prg_fetch(
+ struct dpu_hw_intf *intf,
+ const struct intf_prog_fetch *fetch)
+{
+ struct dpu_hw_blk_reg_map *c = &intf->hw;
+ int fetch_enable;
+
+ /*
+ * Fetch should always be outside the active lines. If the fetching
+ * is programmed within active region, hardware behavior is unknown.
+ */
+
+ fetch_enable = DPU_REG_READ(c, INTF_CONFIG);
+ if (fetch->enable) {
+ fetch_enable |= BIT(31);
+ DPU_REG_WRITE(c, INTF_PROG_FETCH_START,
+ fetch->fetch_start);
+ } else {
+ fetch_enable &= ~BIT(31);
+ }
+
+ DPU_REG_WRITE(c, INTF_CONFIG, fetch_enable);
+}
+
+static void dpu_hw_intf_get_status(
+ struct dpu_hw_intf *intf,
+ struct intf_status *s)
+{
+ struct dpu_hw_blk_reg_map *c = &intf->hw;
+
+ s->is_en = DPU_REG_READ(c, INTF_TIMING_ENGINE_EN);
+ if (s->is_en) {
+ s->frame_count = DPU_REG_READ(c, INTF_FRAME_COUNT);
+ s->line_count = DPU_REG_READ(c, INTF_LINE_COUNT);
+ } else {
+ s->line_count = 0;
+ s->frame_count = 0;
+ }
+}
+
+static void dpu_hw_intf_setup_misr(struct dpu_hw_intf *intf,
+ bool enable, u32 frame_count)
+{
+ struct dpu_hw_blk_reg_map *c = &intf->hw;
+ u32 config = 0;
+
+ DPU_REG_WRITE(c, INTF_MISR_CTRL, MISR_CTRL_STATUS_CLEAR);
+ /* clear misr data */
+ wmb();
+
+ if (enable)
+ config = (frame_count & MISR_FRAME_COUNT_MASK) |
+ MISR_CTRL_ENABLE | INTF_MISR_CTRL_FREE_RUN_MASK;
+
+ DPU_REG_WRITE(c, INTF_MISR_CTRL, config);
+}
+
+static u32 dpu_hw_intf_collect_misr(struct dpu_hw_intf *intf)
+{
+ struct dpu_hw_blk_reg_map *c = &intf->hw;
+
+ return DPU_REG_READ(c, INTF_MISR_SIGNATURE);
+}
+
+static u32 dpu_hw_intf_get_line_count(struct dpu_hw_intf *intf)
+{
+ struct dpu_hw_blk_reg_map *c;
+
+ if (!intf)
+ return 0;
+
+ c = &intf->hw;
+
+ return DPU_REG_READ(c, INTF_LINE_COUNT);
+}
+
+static void _setup_intf_ops(struct dpu_hw_intf_ops *ops,
+ unsigned long cap)
+{
+ ops->setup_timing_gen = dpu_hw_intf_setup_timing_engine;
+ ops->setup_prg_fetch = dpu_hw_intf_setup_prg_fetch;
+ ops->get_status = dpu_hw_intf_get_status;
+ ops->enable_timing = dpu_hw_intf_enable_timing_engine;
+ ops->setup_misr = dpu_hw_intf_setup_misr;
+ ops->collect_misr = dpu_hw_intf_collect_misr;
+ ops->get_line_count = dpu_hw_intf_get_line_count;
+}
+
+static struct dpu_hw_blk_ops dpu_hw_ops = {
+ .start = NULL,
+ .stop = NULL,
+};
+
+struct dpu_hw_intf *dpu_hw_intf_init(enum dpu_intf idx,
+ void __iomem *addr,
+ struct dpu_mdss_cfg *m)
+{
+ struct dpu_hw_intf *c;
+ struct dpu_intf_cfg *cfg;
+ int rc;
+
+ c = kzalloc(sizeof(*c), GFP_KERNEL);
+ if (!c)
+ return ERR_PTR(-ENOMEM);
+
+ cfg = _intf_offset(idx, m, addr, &c->hw);
+ if (IS_ERR_OR_NULL(cfg)) {
+ kfree(c);
+ pr_err("failed to create dpu_hw_intf %d\n", idx);
+ return ERR_PTR(-EINVAL);
+ }
+
+ /*
+ * Assign ops
+ */
+ c->idx = idx;
+ c->cap = cfg;
+ c->mdss = m;
+ _setup_intf_ops(&c->ops, c->cap->features);
+
+ rc = dpu_hw_blk_init(&c->base, DPU_HW_BLK_INTF, idx, &dpu_hw_ops);
+ if (rc) {
+ DPU_ERROR("failed to init hw blk %d\n", rc);
+ goto blk_init_error;
+ }
+
+ return c;
+
+blk_init_error:
+ kzfree(c);
+
+ return ERR_PTR(rc);
+}
+
+void dpu_hw_intf_destroy(struct dpu_hw_intf *intf)
+{
+ if (intf)
+ dpu_hw_blk_destroy(&intf->base);
+ kfree(intf);
+}
+
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.h
new file mode 100644
index 000000000000..a79d735da68d
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.h
@@ -0,0 +1,128 @@
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _DPU_HW_INTF_H
+#define _DPU_HW_INTF_H
+
+#include "dpu_hw_catalog.h"
+#include "dpu_hw_mdss.h"
+#include "dpu_hw_util.h"
+#include "dpu_hw_blk.h"
+
+struct dpu_hw_intf;
+
+/* intf timing settings */
+struct intf_timing_params {
+ u32 width; /* active width */
+ u32 height; /* active height */
+ u32 xres; /* Display panel width */
+ u32 yres; /* Display panel height */
+
+ u32 h_back_porch;
+ u32 h_front_porch;
+ u32 v_back_porch;
+ u32 v_front_porch;
+ u32 hsync_pulse_width;
+ u32 vsync_pulse_width;
+ u32 hsync_polarity;
+ u32 vsync_polarity;
+ u32 border_clr;
+ u32 underflow_clr;
+ u32 hsync_skew;
+};
+
+struct intf_prog_fetch {
+ u8 enable;
+ /* vsync counter for the front porch pixel line */
+ u32 fetch_start;
+};
+
+struct intf_status {
+ u8 is_en; /* interface timing engine is enabled or not */
+ u32 frame_count; /* frame count since timing engine enabled */
+ u32 line_count; /* current line count including blanking */
+};
+
+/**
+ * struct dpu_hw_intf_ops : Interface to the interface Hw driver functions
+ * Assumption is these functions will be called after clocks are enabled
+ * @ setup_timing_gen : programs the timing engine
+ * @ setup_prog_fetch : enables/disables the programmable fetch logic
+ * @ enable_timing: enable/disable timing engine
+ * @ get_status: returns if timing engine is enabled or not
+ * @ setup_misr: enables/disables MISR in HW register
+ * @ collect_misr: reads and stores MISR data from HW register
+ * @ get_line_count: reads current vertical line counter
+ */
+struct dpu_hw_intf_ops {
+ void (*setup_timing_gen)(struct dpu_hw_intf *intf,
+ const struct intf_timing_params *p,
+ const struct dpu_format *fmt);
+
+ void (*setup_prg_fetch)(struct dpu_hw_intf *intf,
+ const struct intf_prog_fetch *fetch);
+
+ void (*enable_timing)(struct dpu_hw_intf *intf,
+ u8 enable);
+
+ void (*get_status)(struct dpu_hw_intf *intf,
+ struct intf_status *status);
+
+ void (*setup_misr)(struct dpu_hw_intf *intf,
+ bool enable, u32 frame_count);
+
+ u32 (*collect_misr)(struct dpu_hw_intf *intf);
+
+ u32 (*get_line_count)(struct dpu_hw_intf *intf);
+};
+
+struct dpu_hw_intf {
+ struct dpu_hw_blk base;
+ struct dpu_hw_blk_reg_map hw;
+
+ /* intf */
+ enum dpu_intf idx;
+ const struct dpu_intf_cfg *cap;
+ const struct dpu_mdss_cfg *mdss;
+
+ /* ops */
+ struct dpu_hw_intf_ops ops;
+};
+
+/**
+ * to_dpu_hw_intf - convert base object dpu_hw_base to container
+ * @hw: Pointer to base hardware block
+ * return: Pointer to hardware block container
+ */
+static inline struct dpu_hw_intf *to_dpu_hw_intf(struct dpu_hw_blk *hw)
+{
+ return container_of(hw, struct dpu_hw_intf, base);
+}
+
+/**
+ * dpu_hw_intf_init(): Initializes the intf driver for the passed
+ * interface idx.
+ * @idx: interface index for which driver object is required
+ * @addr: mapped register io address of MDP
+ * @m : pointer to mdss catalog data
+ */
+struct dpu_hw_intf *dpu_hw_intf_init(enum dpu_intf idx,
+ void __iomem *addr,
+ struct dpu_mdss_cfg *m);
+
+/**
+ * dpu_hw_intf_destroy(): Destroys INTF driver context
+ * @intf: Pointer to INTF driver context
+ */
+void dpu_hw_intf_destroy(struct dpu_hw_intf *intf);
+
+#endif /*_DPU_HW_INTF_H */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.c
new file mode 100644
index 000000000000..4ab72b0f07a5
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.c
@@ -0,0 +1,261 @@
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "dpu_kms.h"
+#include "dpu_hw_catalog.h"
+#include "dpu_hwio.h"
+#include "dpu_hw_lm.h"
+#include "dpu_hw_mdss.h"
+#include "dpu_dbg.h"
+#include "dpu_kms.h"
+
+#define LM_OP_MODE 0x00
+#define LM_OUT_SIZE 0x04
+#define LM_BORDER_COLOR_0 0x08
+#define LM_BORDER_COLOR_1 0x010
+
+/* These register are offset to mixer base + stage base */
+#define LM_BLEND0_OP 0x00
+#define LM_BLEND0_CONST_ALPHA 0x04
+#define LM_FG_COLOR_FILL_COLOR_0 0x08
+#define LM_FG_COLOR_FILL_COLOR_1 0x0C
+#define LM_FG_COLOR_FILL_SIZE 0x10
+#define LM_FG_COLOR_FILL_XY 0x14
+
+#define LM_BLEND0_FG_ALPHA 0x04
+#define LM_BLEND0_BG_ALPHA 0x08
+
+#define LM_MISR_CTRL 0x310
+#define LM_MISR_SIGNATURE 0x314
+
+static struct dpu_lm_cfg *_lm_offset(enum dpu_lm mixer,
+ struct dpu_mdss_cfg *m,
+ void __iomem *addr,
+ struct dpu_hw_blk_reg_map *b)
+{
+ int i;
+
+ for (i = 0; i < m->mixer_count; i++) {
+ if (mixer == m->mixer[i].id) {
+ b->base_off = addr;
+ b->blk_off = m->mixer[i].base;
+ b->length = m->mixer[i].len;
+ b->hwversion = m->hwversion;
+ b->log_mask = DPU_DBG_MASK_LM;
+ return &m->mixer[i];
+ }
+ }
+
+ return ERR_PTR(-ENOMEM);
+}
+
+/**
+ * _stage_offset(): returns the relative offset of the blend registers
+ * for the stage to be setup
+ * @c: mixer ctx contains the mixer to be programmed
+ * @stage: stage index to setup
+ */
+static inline int _stage_offset(struct dpu_hw_mixer *ctx, enum dpu_stage stage)
+{
+ const struct dpu_lm_sub_blks *sblk = ctx->cap->sblk;
+ int rc;
+
+ if (stage == DPU_STAGE_BASE)
+ rc = -EINVAL;
+ else if (stage <= sblk->maxblendstages)
+ rc = sblk->blendstage_base[stage - DPU_STAGE_0];
+ else
+ rc = -EINVAL;
+
+ return rc;
+}
+
+static void dpu_hw_lm_setup_out(struct dpu_hw_mixer *ctx,
+ struct dpu_hw_mixer_cfg *mixer)
+{
+ struct dpu_hw_blk_reg_map *c = &ctx->hw;
+ u32 outsize;
+ u32 op_mode;
+
+ op_mode = DPU_REG_READ(c, LM_OP_MODE);
+
+ outsize = mixer->out_height << 16 | mixer->out_width;
+ DPU_REG_WRITE(c, LM_OUT_SIZE, outsize);
+
+ /* SPLIT_LEFT_RIGHT */
+ if (mixer->right_mixer)
+ op_mode |= BIT(31);
+ else
+ op_mode &= ~BIT(31);
+ DPU_REG_WRITE(c, LM_OP_MODE, op_mode);
+}
+
+static void dpu_hw_lm_setup_border_color(struct dpu_hw_mixer *ctx,
+ struct dpu_mdss_color *color,
+ u8 border_en)
+{
+ struct dpu_hw_blk_reg_map *c = &ctx->hw;
+
+ if (border_en) {
+ DPU_REG_WRITE(c, LM_BORDER_COLOR_0,
+ (color->color_0 & 0xFFF) |
+ ((color->color_1 & 0xFFF) << 0x10));
+ DPU_REG_WRITE(c, LM_BORDER_COLOR_1,
+ (color->color_2 & 0xFFF) |
+ ((color->color_3 & 0xFFF) << 0x10));
+ }
+}
+
+static void dpu_hw_lm_setup_blend_config_sdm845(struct dpu_hw_mixer *ctx,
+ u32 stage, u32 fg_alpha, u32 bg_alpha, u32 blend_op)
+{
+ struct dpu_hw_blk_reg_map *c = &ctx->hw;
+ int stage_off;
+ u32 const_alpha;
+
+ if (stage == DPU_STAGE_BASE)
+ return;
+
+ stage_off = _stage_offset(ctx, stage);
+ if (WARN_ON(stage_off < 0))
+ return;
+
+ const_alpha = (bg_alpha & 0xFF) | ((fg_alpha & 0xFF) << 16);
+ DPU_REG_WRITE(c, LM_BLEND0_CONST_ALPHA + stage_off, const_alpha);
+ DPU_REG_WRITE(c, LM_BLEND0_OP + stage_off, blend_op);
+}
+
+static void dpu_hw_lm_setup_blend_config(struct dpu_hw_mixer *ctx,
+ u32 stage, u32 fg_alpha, u32 bg_alpha, u32 blend_op)
+{
+ struct dpu_hw_blk_reg_map *c = &ctx->hw;
+ int stage_off;
+
+ if (stage == DPU_STAGE_BASE)
+ return;
+
+ stage_off = _stage_offset(ctx, stage);
+ if (WARN_ON(stage_off < 0))
+ return;
+
+ DPU_REG_WRITE(c, LM_BLEND0_FG_ALPHA + stage_off, fg_alpha);
+ DPU_REG_WRITE(c, LM_BLEND0_BG_ALPHA + stage_off, bg_alpha);
+ DPU_REG_WRITE(c, LM_BLEND0_OP + stage_off, blend_op);
+}
+
+static void dpu_hw_lm_setup_color3(struct dpu_hw_mixer *ctx,
+ uint32_t mixer_op_mode)
+{
+ struct dpu_hw_blk_reg_map *c = &ctx->hw;
+ int op_mode;
+
+ /* read the existing op_mode configuration */
+ op_mode = DPU_REG_READ(c, LM_OP_MODE);
+
+ op_mode = (op_mode & (BIT(31) | BIT(30))) | mixer_op_mode;
+
+ DPU_REG_WRITE(c, LM_OP_MODE, op_mode);
+}
+
+static void dpu_hw_lm_gc(struct dpu_hw_mixer *mixer,
+ void *cfg)
+{
+}
+
+static void dpu_hw_lm_setup_misr(struct dpu_hw_mixer *ctx,
+ bool enable, u32 frame_count)
+{
+ struct dpu_hw_blk_reg_map *c = &ctx->hw;
+ u32 config = 0;
+
+ DPU_REG_WRITE(c, LM_MISR_CTRL, MISR_CTRL_STATUS_CLEAR);
+ /* clear misr data */
+ wmb();
+
+ if (enable)
+ config = (frame_count & MISR_FRAME_COUNT_MASK) |
+ MISR_CTRL_ENABLE | INTF_MISR_CTRL_FREE_RUN_MASK;
+
+ DPU_REG_WRITE(c, LM_MISR_CTRL, config);
+}
+
+static u32 dpu_hw_lm_collect_misr(struct dpu_hw_mixer *ctx)
+{
+ struct dpu_hw_blk_reg_map *c = &ctx->hw;
+
+ return DPU_REG_READ(c, LM_MISR_SIGNATURE);
+}
+
+static void _setup_mixer_ops(struct dpu_mdss_cfg *m,
+ struct dpu_hw_lm_ops *ops,
+ unsigned long features)
+{
+ ops->setup_mixer_out = dpu_hw_lm_setup_out;
+ if (IS_SDM845_TARGET(m->hwversion) || IS_SDM670_TARGET(m->hwversion))
+ ops->setup_blend_config = dpu_hw_lm_setup_blend_config_sdm845;
+ else
+ ops->setup_blend_config = dpu_hw_lm_setup_blend_config;
+ ops->setup_alpha_out = dpu_hw_lm_setup_color3;
+ ops->setup_border_color = dpu_hw_lm_setup_border_color;
+ ops->setup_gc = dpu_hw_lm_gc;
+ ops->setup_misr = dpu_hw_lm_setup_misr;
+ ops->collect_misr = dpu_hw_lm_collect_misr;
+};
+
+static struct dpu_hw_blk_ops dpu_hw_ops = {
+ .start = NULL,
+ .stop = NULL,
+};
+
+struct dpu_hw_mixer *dpu_hw_lm_init(enum dpu_lm idx,
+ void __iomem *addr,
+ struct dpu_mdss_cfg *m)
+{
+ struct dpu_hw_mixer *c;
+ struct dpu_lm_cfg *cfg;
+ int rc;
+
+ c = kzalloc(sizeof(*c), GFP_KERNEL);
+ if (!c)
+ return ERR_PTR(-ENOMEM);
+
+ cfg = _lm_offset(idx, m, addr, &c->hw);
+ if (IS_ERR_OR_NULL(cfg)) {
+ kfree(c);
+ return ERR_PTR(-EINVAL);
+ }
+
+ /* Assign ops */
+ c->idx = idx;
+ c->cap = cfg;
+ _setup_mixer_ops(m, &c->ops, c->cap->features);
+
+ rc = dpu_hw_blk_init(&c->base, DPU_HW_BLK_LM, idx, &dpu_hw_ops);
+ if (rc) {
+ DPU_ERROR("failed to init hw blk %d\n", rc);
+ goto blk_init_error;
+ }
+
+ return c;
+
+blk_init_error:
+ kzfree(c);
+
+ return ERR_PTR(rc);
+}
+
+void dpu_hw_lm_destroy(struct dpu_hw_mixer *lm)
+{
+ if (lm)
+ dpu_hw_blk_destroy(&lm->base);
+ kfree(lm);
+}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.h
new file mode 100644
index 000000000000..e29e5dab31bf
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.h
@@ -0,0 +1,122 @@
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _DPU_HW_LM_H
+#define _DPU_HW_LM_H
+
+#include "dpu_hw_mdss.h"
+#include "dpu_hw_util.h"
+#include "dpu_hw_blk.h"
+
+struct dpu_hw_mixer;
+
+struct dpu_hw_mixer_cfg {
+ u32 out_width;
+ u32 out_height;
+ bool right_mixer;
+ int flags;
+};
+
+struct dpu_hw_color3_cfg {
+ u8 keep_fg[DPU_STAGE_MAX];
+};
+
+/**
+ *
+ * struct dpu_hw_lm_ops : Interface to the mixer Hw driver functions
+ * Assumption is these functions will be called after clocks are enabled
+ */
+struct dpu_hw_lm_ops {
+ /*
+ * Sets up mixer output width and height
+ * and border color if enabled
+ */
+ void (*setup_mixer_out)(struct dpu_hw_mixer *ctx,
+ struct dpu_hw_mixer_cfg *cfg);
+
+ /*
+ * Alpha blending configuration
+ * for the specified stage
+ */
+ void (*setup_blend_config)(struct dpu_hw_mixer *ctx, uint32_t stage,
+ uint32_t fg_alpha, uint32_t bg_alpha, uint32_t blend_op);
+
+ /*
+ * Alpha color component selection from either fg or bg
+ */
+ void (*setup_alpha_out)(struct dpu_hw_mixer *ctx, uint32_t mixer_op);
+
+ /**
+ * setup_border_color : enable/disable border color
+ */
+ void (*setup_border_color)(struct dpu_hw_mixer *ctx,
+ struct dpu_mdss_color *color,
+ u8 border_en);
+ /**
+ * setup_gc : enable/disable gamma correction feature
+ */
+ void (*setup_gc)(struct dpu_hw_mixer *mixer,
+ void *cfg);
+
+ /* setup_misr: enables/disables MISR in HW register */
+ void (*setup_misr)(struct dpu_hw_mixer *ctx,
+ bool enable, u32 frame_count);
+
+ /* collect_misr: reads and stores MISR data from HW register */
+ u32 (*collect_misr)(struct dpu_hw_mixer *ctx);
+};
+
+struct dpu_hw_mixer {
+ struct dpu_hw_blk base;
+ struct dpu_hw_blk_reg_map hw;
+
+ /* lm */
+ enum dpu_lm idx;
+ const struct dpu_lm_cfg *cap;
+ const struct dpu_mdp_cfg *mdp;
+ const struct dpu_ctl_cfg *ctl;
+
+ /* ops */
+ struct dpu_hw_lm_ops ops;
+
+ /* store mixer info specific to display */
+ struct dpu_hw_mixer_cfg cfg;
+};
+
+/**
+ * to_dpu_hw_mixer - convert base object dpu_hw_base to container
+ * @hw: Pointer to base hardware block
+ * return: Pointer to hardware block container
+ */
+static inline struct dpu_hw_mixer *to_dpu_hw_mixer(struct dpu_hw_blk *hw)
+{
+ return container_of(hw, struct dpu_hw_mixer, base);
+}
+
+/**
+ * dpu_hw_lm_init(): Initializes the mixer hw driver object.
+ * should be called once before accessing every mixer.
+ * @idx: mixer index for which driver object is required
+ * @addr: mapped register io address of MDP
+ * @m : pointer to mdss catalog data
+ */
+struct dpu_hw_mixer *dpu_hw_lm_init(enum dpu_lm idx,
+ void __iomem *addr,
+ struct dpu_mdss_cfg *m);
+
+/**
+ * dpu_hw_lm_destroy(): Destroys layer mixer driver context
+ * @lm: Pointer to LM driver context
+ */
+void dpu_hw_lm_destroy(struct dpu_hw_mixer *lm);
+
+#endif /*_DPU_HW_LM_H */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_mdss.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_mdss.h
new file mode 100644
index 000000000000..35e6bf930924
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_mdss.h
@@ -0,0 +1,465 @@
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _DPU_HW_MDSS_H
+#define _DPU_HW_MDSS_H
+
+#include <linux/kernel.h>
+#include <linux/err.h>
+
+#include "msm_drv.h"
+
+#define DPU_DBG_NAME "dpu"
+
+#define DPU_NONE 0
+
+#ifndef DPU_CSC_MATRIX_COEFF_SIZE
+#define DPU_CSC_MATRIX_COEFF_SIZE 9
+#endif
+
+#ifndef DPU_CSC_CLAMP_SIZE
+#define DPU_CSC_CLAMP_SIZE 6
+#endif
+
+#ifndef DPU_CSC_BIAS_SIZE
+#define DPU_CSC_BIAS_SIZE 3
+#endif
+
+#ifndef DPU_MAX_PLANES
+#define DPU_MAX_PLANES 4
+#endif
+
+#define PIPES_PER_STAGE 2
+#ifndef DPU_MAX_DE_CURVES
+#define DPU_MAX_DE_CURVES 3
+#endif
+
+enum dpu_format_flags {
+ DPU_FORMAT_FLAG_YUV_BIT,
+ DPU_FORMAT_FLAG_DX_BIT,
+ DPU_FORMAT_FLAG_COMPRESSED_BIT,
+ DPU_FORMAT_FLAG_BIT_MAX,
+};
+
+#define DPU_FORMAT_FLAG_YUV BIT(DPU_FORMAT_FLAG_YUV_BIT)
+#define DPU_FORMAT_FLAG_DX BIT(DPU_FORMAT_FLAG_DX_BIT)
+#define DPU_FORMAT_FLAG_COMPRESSED BIT(DPU_FORMAT_FLAG_COMPRESSED_BIT)
+#define DPU_FORMAT_IS_YUV(X) \
+ (test_bit(DPU_FORMAT_FLAG_YUV_BIT, (X)->flag))
+#define DPU_FORMAT_IS_DX(X) \
+ (test_bit(DPU_FORMAT_FLAG_DX_BIT, (X)->flag))
+#define DPU_FORMAT_IS_LINEAR(X) ((X)->fetch_mode == DPU_FETCH_LINEAR)
+#define DPU_FORMAT_IS_TILE(X) \
+ (((X)->fetch_mode == DPU_FETCH_UBWC) && \
+ !test_bit(DPU_FORMAT_FLAG_COMPRESSED_BIT, (X)->flag))
+#define DPU_FORMAT_IS_UBWC(X) \
+ (((X)->fetch_mode == DPU_FETCH_UBWC) && \
+ test_bit(DPU_FORMAT_FLAG_COMPRESSED_BIT, (X)->flag))
+
+#define DPU_BLEND_FG_ALPHA_FG_CONST (0 << 0)
+#define DPU_BLEND_FG_ALPHA_BG_CONST (1 << 0)
+#define DPU_BLEND_FG_ALPHA_FG_PIXEL (2 << 0)
+#define DPU_BLEND_FG_ALPHA_BG_PIXEL (3 << 0)
+#define DPU_BLEND_FG_INV_ALPHA (1 << 2)
+#define DPU_BLEND_FG_MOD_ALPHA (1 << 3)
+#define DPU_BLEND_FG_INV_MOD_ALPHA (1 << 4)
+#define DPU_BLEND_FG_TRANSP_EN (1 << 5)
+#define DPU_BLEND_BG_ALPHA_FG_CONST (0 << 8)
+#define DPU_BLEND_BG_ALPHA_BG_CONST (1 << 8)
+#define DPU_BLEND_BG_ALPHA_FG_PIXEL (2 << 8)
+#define DPU_BLEND_BG_ALPHA_BG_PIXEL (3 << 8)
+#define DPU_BLEND_BG_INV_ALPHA (1 << 10)
+#define DPU_BLEND_BG_MOD_ALPHA (1 << 11)
+#define DPU_BLEND_BG_INV_MOD_ALPHA (1 << 12)
+#define DPU_BLEND_BG_TRANSP_EN (1 << 13)
+
+#define DPU_VSYNC0_SOURCE_GPIO 0
+#define DPU_VSYNC1_SOURCE_GPIO 1
+#define DPU_VSYNC2_SOURCE_GPIO 2
+#define DPU_VSYNC_SOURCE_INTF_0 3
+#define DPU_VSYNC_SOURCE_INTF_1 4
+#define DPU_VSYNC_SOURCE_INTF_2 5
+#define DPU_VSYNC_SOURCE_INTF_3 6
+#define DPU_VSYNC_SOURCE_WD_TIMER_4 11
+#define DPU_VSYNC_SOURCE_WD_TIMER_3 12
+#define DPU_VSYNC_SOURCE_WD_TIMER_2 13
+#define DPU_VSYNC_SOURCE_WD_TIMER_1 14
+#define DPU_VSYNC_SOURCE_WD_TIMER_0 15
+
+enum dpu_hw_blk_type {
+ DPU_HW_BLK_TOP = 0,
+ DPU_HW_BLK_SSPP,
+ DPU_HW_BLK_LM,
+ DPU_HW_BLK_CTL,
+ DPU_HW_BLK_CDM,
+ DPU_HW_BLK_PINGPONG,
+ DPU_HW_BLK_INTF,
+ DPU_HW_BLK_WB,
+ DPU_HW_BLK_MAX,
+};
+
+enum dpu_mdp {
+ MDP_TOP = 0x1,
+ MDP_MAX,
+};
+
+enum dpu_sspp {
+ SSPP_NONE,
+ SSPP_VIG0,
+ SSPP_VIG1,
+ SSPP_VIG2,
+ SSPP_VIG3,
+ SSPP_RGB0,
+ SSPP_RGB1,
+ SSPP_RGB2,
+ SSPP_RGB3,
+ SSPP_DMA0,
+ SSPP_DMA1,
+ SSPP_DMA2,
+ SSPP_DMA3,
+ SSPP_CURSOR0,
+ SSPP_CURSOR1,
+ SSPP_MAX
+};
+
+enum dpu_sspp_type {
+ SSPP_TYPE_VIG,
+ SSPP_TYPE_RGB,
+ SSPP_TYPE_DMA,
+ SSPP_TYPE_CURSOR,
+ SSPP_TYPE_MAX
+};
+
+enum dpu_lm {
+ LM_0 = 1,
+ LM_1,
+ LM_2,
+ LM_3,
+ LM_4,
+ LM_5,
+ LM_6,
+ LM_MAX
+};
+
+enum dpu_stage {
+ DPU_STAGE_BASE = 0,
+ DPU_STAGE_0,
+ DPU_STAGE_1,
+ DPU_STAGE_2,
+ DPU_STAGE_3,
+ DPU_STAGE_4,
+ DPU_STAGE_5,
+ DPU_STAGE_6,
+ DPU_STAGE_7,
+ DPU_STAGE_8,
+ DPU_STAGE_9,
+ DPU_STAGE_10,
+ DPU_STAGE_MAX
+};
+enum dpu_dspp {
+ DSPP_0 = 1,
+ DSPP_1,
+ DSPP_2,
+ DSPP_3,
+ DSPP_MAX
+};
+
+enum dpu_ds {
+ DS_TOP,
+ DS_0,
+ DS_1,
+ DS_MAX
+};
+
+enum dpu_ctl {
+ CTL_0 = 1,
+ CTL_1,
+ CTL_2,
+ CTL_3,
+ CTL_4,
+ CTL_MAX
+};
+
+enum dpu_cdm {
+ CDM_0 = 1,
+ CDM_1,
+ CDM_MAX
+};
+
+enum dpu_pingpong {
+ PINGPONG_0 = 1,
+ PINGPONG_1,
+ PINGPONG_2,
+ PINGPONG_3,
+ PINGPONG_4,
+ PINGPONG_S0,
+ PINGPONG_MAX
+};
+
+enum dpu_intf {
+ INTF_0 = 1,
+ INTF_1,
+ INTF_2,
+ INTF_3,
+ INTF_4,
+ INTF_5,
+ INTF_6,
+ INTF_MAX
+};
+
+enum dpu_intf_type {
+ INTF_NONE = 0x0,
+ INTF_DSI = 0x1,
+ INTF_HDMI = 0x3,
+ INTF_LCDC = 0x5,
+ INTF_EDP = 0x9,
+ INTF_DP = 0xa,
+ INTF_TYPE_MAX,
+
+ /* virtual interfaces */
+ INTF_WB = 0x100,
+};
+
+enum dpu_intf_mode {
+ INTF_MODE_NONE = 0,
+ INTF_MODE_CMD,
+ INTF_MODE_VIDEO,
+ INTF_MODE_WB_BLOCK,
+ INTF_MODE_WB_LINE,
+ INTF_MODE_MAX
+};
+
+enum dpu_wb {
+ WB_0 = 1,
+ WB_1,
+ WB_2,
+ WB_3,
+ WB_MAX
+};
+
+enum dpu_ad {
+ AD_0 = 0x1,
+ AD_1,
+ AD_MAX
+};
+
+enum dpu_cwb {
+ CWB_0 = 0x1,
+ CWB_1,
+ CWB_2,
+ CWB_3,
+ CWB_MAX
+};
+
+enum dpu_wd_timer {
+ WD_TIMER_0 = 0x1,
+ WD_TIMER_1,
+ WD_TIMER_2,
+ WD_TIMER_3,
+ WD_TIMER_4,
+ WD_TIMER_5,
+ WD_TIMER_MAX
+};
+
+enum dpu_vbif {
+ VBIF_0,
+ VBIF_1,
+ VBIF_MAX,
+ VBIF_RT = VBIF_0,
+ VBIF_NRT = VBIF_1
+};
+
+enum dpu_iommu_domain {
+ DPU_IOMMU_DOMAIN_UNSECURE,
+ DPU_IOMMU_DOMAIN_SECURE,
+ DPU_IOMMU_DOMAIN_MAX
+};
+
+/**
+ * DPU HW,Component order color map
+ */
+enum {
+ C0_G_Y = 0,
+ C1_B_Cb = 1,
+ C2_R_Cr = 2,
+ C3_ALPHA = 3
+};
+
+/**
+ * enum dpu_plane_type - defines how the color component pixel packing
+ * @DPU_PLANE_INTERLEAVED : Color components in single plane
+ * @DPU_PLANE_PLANAR : Color component in separate planes
+ * @DPU_PLANE_PSEUDO_PLANAR : Chroma components interleaved in separate plane
+ */
+enum dpu_plane_type {
+ DPU_PLANE_INTERLEAVED,
+ DPU_PLANE_PLANAR,
+ DPU_PLANE_PSEUDO_PLANAR,
+};
+
+/**
+ * enum dpu_chroma_samp_type - chroma sub-samplng type
+ * @DPU_CHROMA_RGB : No chroma subsampling
+ * @DPU_CHROMA_H2V1 : Chroma pixels are horizontally subsampled
+ * @DPU_CHROMA_H1V2 : Chroma pixels are vertically subsampled
+ * @DPU_CHROMA_420 : 420 subsampling
+ */
+enum dpu_chroma_samp_type {
+ DPU_CHROMA_RGB,
+ DPU_CHROMA_H2V1,
+ DPU_CHROMA_H1V2,
+ DPU_CHROMA_420
+};
+
+/**
+ * dpu_fetch_type - Defines How DPU HW fetches data
+ * @DPU_FETCH_LINEAR : fetch is line by line
+ * @DPU_FETCH_TILE : fetches data in Z order from a tile
+ * @DPU_FETCH_UBWC : fetch and decompress data
+ */
+enum dpu_fetch_type {
+ DPU_FETCH_LINEAR,
+ DPU_FETCH_TILE,
+ DPU_FETCH_UBWC
+};
+
+/**
+ * Value of enum chosen to fit the number of bits
+ * expected by the HW programming.
+ */
+enum {
+ COLOR_ALPHA_1BIT = 0,
+ COLOR_ALPHA_4BIT = 1,
+ COLOR_4BIT = 0,
+ COLOR_5BIT = 1, /* No 5-bit Alpha */
+ COLOR_6BIT = 2, /* 6-Bit Alpha also = 2 */
+ COLOR_8BIT = 3, /* 8-Bit Alpha also = 3 */
+};
+
+/**
+ * enum dpu_3d_blend_mode
+ * Desribes how the 3d data is blended
+ * @BLEND_3D_NONE : 3d blending not enabled
+ * @BLEND_3D_FRAME_INT : Frame interleaving
+ * @BLEND_3D_H_ROW_INT : Horizontal row interleaving
+ * @BLEND_3D_V_ROW_INT : vertical row interleaving
+ * @BLEND_3D_COL_INT : column interleaving
+ * @BLEND_3D_MAX :
+ */
+enum dpu_3d_blend_mode {
+ BLEND_3D_NONE = 0,
+ BLEND_3D_FRAME_INT,
+ BLEND_3D_H_ROW_INT,
+ BLEND_3D_V_ROW_INT,
+ BLEND_3D_COL_INT,
+ BLEND_3D_MAX
+};
+
+/** struct dpu_format - defines the format configuration which
+ * allows DPU HW to correctly fetch and decode the format
+ * @base: base msm_format struture containing fourcc code
+ * @fetch_planes: how the color components are packed in pixel format
+ * @element: element color ordering
+ * @bits: element bit widths
+ * @chroma_sample: chroma sub-samplng type
+ * @unpack_align_msb: unpack aligned, 0 to LSB, 1 to MSB
+ * @unpack_tight: 0 for loose, 1 for tight
+ * @unpack_count: 0 = 1 component, 1 = 2 component
+ * @bpp: bytes per pixel
+ * @alpha_enable: whether the format has an alpha channel
+ * @num_planes: number of planes (including meta data planes)
+ * @fetch_mode: linear, tiled, or ubwc hw fetch behavior
+ * @is_yuv: is format a yuv variant
+ * @flag: usage bit flags
+ * @tile_width: format tile width
+ * @tile_height: format tile height
+ */
+struct dpu_format {
+ struct msm_format base;
+ enum dpu_plane_type fetch_planes;
+ u8 element[DPU_MAX_PLANES];
+ u8 bits[DPU_MAX_PLANES];
+ enum dpu_chroma_samp_type chroma_sample;
+ u8 unpack_align_msb;
+ u8 unpack_tight;
+ u8 unpack_count;
+ u8 bpp;
+ u8 alpha_enable;
+ u8 num_planes;
+ enum dpu_fetch_type fetch_mode;
+ DECLARE_BITMAP(flag, DPU_FORMAT_FLAG_BIT_MAX);
+ u16 tile_width;
+ u16 tile_height;
+};
+#define to_dpu_format(x) container_of(x, struct dpu_format, base)
+
+/**
+ * struct dpu_hw_fmt_layout - format information of the source pixel data
+ * @format: pixel format parameters
+ * @num_planes: number of planes (including meta data planes)
+ * @width: image width
+ * @height: image height
+ * @total_size: total size in bytes
+ * @plane_addr: address of each plane
+ * @plane_size: length of each plane
+ * @plane_pitch: pitch of each plane
+ */
+struct dpu_hw_fmt_layout {
+ const struct dpu_format *format;
+ uint32_t num_planes;
+ uint32_t width;
+ uint32_t height;
+ uint32_t total_size;
+ uint32_t plane_addr[DPU_MAX_PLANES];
+ uint32_t plane_size[DPU_MAX_PLANES];
+ uint32_t plane_pitch[DPU_MAX_PLANES];
+};
+
+struct dpu_csc_cfg {
+ /* matrix coefficients in S15.16 format */
+ uint32_t csc_mv[DPU_CSC_MATRIX_COEFF_SIZE];
+ uint32_t csc_pre_bv[DPU_CSC_BIAS_SIZE];
+ uint32_t csc_post_bv[DPU_CSC_BIAS_SIZE];
+ uint32_t csc_pre_lv[DPU_CSC_CLAMP_SIZE];
+ uint32_t csc_post_lv[DPU_CSC_CLAMP_SIZE];
+};
+
+/**
+ * struct dpu_mdss_color - mdss color description
+ * color 0 : green
+ * color 1 : blue
+ * color 2 : red
+ * color 3 : alpha
+ */
+struct dpu_mdss_color {
+ u32 color_0;
+ u32 color_1;
+ u32 color_2;
+ u32 color_3;
+};
+
+/*
+ * Define bit masks for h/w logging.
+ */
+#define DPU_DBG_MASK_NONE (1 << 0)
+#define DPU_DBG_MASK_CDM (1 << 1)
+#define DPU_DBG_MASK_INTF (1 << 2)
+#define DPU_DBG_MASK_LM (1 << 3)
+#define DPU_DBG_MASK_CTL (1 << 4)
+#define DPU_DBG_MASK_PINGPONG (1 << 5)
+#define DPU_DBG_MASK_SSPP (1 << 6)
+#define DPU_DBG_MASK_WB (1 << 7)
+#define DPU_DBG_MASK_TOP (1 << 8)
+#define DPU_DBG_MASK_VBIF (1 << 9)
+#define DPU_DBG_MASK_ROT (1 << 10)
+
+#endif /* _DPU_HW_MDSS_H */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.c
new file mode 100644
index 000000000000..cc3a623903f4
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.c
@@ -0,0 +1,250 @@
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/iopoll.h>
+
+#include "dpu_hw_mdss.h"
+#include "dpu_hwio.h"
+#include "dpu_hw_catalog.h"
+#include "dpu_hw_pingpong.h"
+#include "dpu_dbg.h"
+#include "dpu_kms.h"
+#include "dpu_trace.h"
+
+#define PP_TEAR_CHECK_EN 0x000
+#define PP_SYNC_CONFIG_VSYNC 0x004
+#define PP_SYNC_CONFIG_HEIGHT 0x008
+#define PP_SYNC_WRCOUNT 0x00C
+#define PP_VSYNC_INIT_VAL 0x010
+#define PP_INT_COUNT_VAL 0x014
+#define PP_SYNC_THRESH 0x018
+#define PP_START_POS 0x01C
+#define PP_RD_PTR_IRQ 0x020
+#define PP_WR_PTR_IRQ 0x024
+#define PP_OUT_LINE_COUNT 0x028
+#define PP_LINE_COUNT 0x02C
+
+#define PP_FBC_MODE 0x034
+#define PP_FBC_BUDGET_CTL 0x038
+#define PP_FBC_LOSSY_MODE 0x03C
+
+static struct dpu_pingpong_cfg *_pingpong_offset(enum dpu_pingpong pp,
+ struct dpu_mdss_cfg *m,
+ void __iomem *addr,
+ struct dpu_hw_blk_reg_map *b)
+{
+ int i;
+
+ for (i = 0; i < m->pingpong_count; i++) {
+ if (pp == m->pingpong[i].id) {
+ b->base_off = addr;
+ b->blk_off = m->pingpong[i].base;
+ b->length = m->pingpong[i].len;
+ b->hwversion = m->hwversion;
+ b->log_mask = DPU_DBG_MASK_PINGPONG;
+ return &m->pingpong[i];
+ }
+ }
+
+ return ERR_PTR(-EINVAL);
+}
+
+static int dpu_hw_pp_setup_te_config(struct dpu_hw_pingpong *pp,
+ struct dpu_hw_tear_check *te)
+{
+ struct dpu_hw_blk_reg_map *c;
+ int cfg;
+
+ if (!pp || !te)
+ return -EINVAL;
+ c = &pp->hw;
+
+ cfg = BIT(19); /*VSYNC_COUNTER_EN */
+ if (te->hw_vsync_mode)
+ cfg |= BIT(20);
+
+ cfg |= te->vsync_count;
+
+ DPU_REG_WRITE(c, PP_SYNC_CONFIG_VSYNC, cfg);
+ DPU_REG_WRITE(c, PP_SYNC_CONFIG_HEIGHT, te->sync_cfg_height);
+ DPU_REG_WRITE(c, PP_VSYNC_INIT_VAL, te->vsync_init_val);
+ DPU_REG_WRITE(c, PP_RD_PTR_IRQ, te->rd_ptr_irq);
+ DPU_REG_WRITE(c, PP_START_POS, te->start_pos);
+ DPU_REG_WRITE(c, PP_SYNC_THRESH,
+ ((te->sync_threshold_continue << 16) |
+ te->sync_threshold_start));
+ DPU_REG_WRITE(c, PP_SYNC_WRCOUNT,
+ (te->start_pos + te->sync_threshold_start + 1));
+
+ return 0;
+}
+
+static int dpu_hw_pp_poll_timeout_wr_ptr(struct dpu_hw_pingpong *pp,
+ u32 timeout_us)
+{
+ struct dpu_hw_blk_reg_map *c;
+ u32 val;
+ int rc;
+
+ if (!pp)
+ return -EINVAL;
+
+ c = &pp->hw;
+ rc = readl_poll_timeout(c->base_off + c->blk_off + PP_LINE_COUNT,
+ val, (val & 0xffff) >= 1, 10, timeout_us);
+
+ return rc;
+}
+
+static int dpu_hw_pp_enable_te(struct dpu_hw_pingpong *pp, bool enable)
+{
+ struct dpu_hw_blk_reg_map *c;
+
+ if (!pp)
+ return -EINVAL;
+ c = &pp->hw;
+
+ DPU_REG_WRITE(c, PP_TEAR_CHECK_EN, enable);
+ return 0;
+}
+
+static int dpu_hw_pp_connect_external_te(struct dpu_hw_pingpong *pp,
+ bool enable_external_te)
+{
+ struct dpu_hw_blk_reg_map *c = &pp->hw;
+ u32 cfg;
+ int orig;
+
+ if (!pp)
+ return -EINVAL;
+
+ c = &pp->hw;
+ cfg = DPU_REG_READ(c, PP_SYNC_CONFIG_VSYNC);
+ orig = (bool)(cfg & BIT(20));
+ if (enable_external_te)
+ cfg |= BIT(20);
+ else
+ cfg &= ~BIT(20);
+ DPU_REG_WRITE(c, PP_SYNC_CONFIG_VSYNC, cfg);
+ trace_dpu_pp_connect_ext_te(pp->idx - PINGPONG_0, cfg);
+
+ return orig;
+}
+
+static int dpu_hw_pp_get_vsync_info(struct dpu_hw_pingpong *pp,
+ struct dpu_hw_pp_vsync_info *info)
+{
+ struct dpu_hw_blk_reg_map *c;
+ u32 val;
+
+ if (!pp || !info)
+ return -EINVAL;
+ c = &pp->hw;
+
+ val = DPU_REG_READ(c, PP_VSYNC_INIT_VAL);
+ info->rd_ptr_init_val = val & 0xffff;
+
+ val = DPU_REG_READ(c, PP_INT_COUNT_VAL);
+ info->rd_ptr_frame_count = (val & 0xffff0000) >> 16;
+ info->rd_ptr_line_count = val & 0xffff;
+
+ val = DPU_REG_READ(c, PP_LINE_COUNT);
+ info->wr_ptr_line_count = val & 0xffff;
+
+ return 0;
+}
+
+static u32 dpu_hw_pp_get_line_count(struct dpu_hw_pingpong *pp)
+{
+ struct dpu_hw_blk_reg_map *c = &pp->hw;
+ u32 height, init;
+ u32 line = 0xFFFF;
+
+ if (!pp)
+ return 0;
+ c = &pp->hw;
+
+ init = DPU_REG_READ(c, PP_VSYNC_INIT_VAL) & 0xFFFF;
+ height = DPU_REG_READ(c, PP_SYNC_CONFIG_HEIGHT) & 0xFFFF;
+
+ if (height < init)
+ goto line_count_exit;
+
+ line = DPU_REG_READ(c, PP_INT_COUNT_VAL) & 0xFFFF;
+
+ if (line < init)
+ line += (0xFFFF - init);
+ else
+ line -= init;
+
+line_count_exit:
+ return line;
+}
+
+static void _setup_pingpong_ops(struct dpu_hw_pingpong_ops *ops,
+ const struct dpu_pingpong_cfg *hw_cap)
+{
+ ops->setup_tearcheck = dpu_hw_pp_setup_te_config;
+ ops->enable_tearcheck = dpu_hw_pp_enable_te;
+ ops->connect_external_te = dpu_hw_pp_connect_external_te;
+ ops->get_vsync_info = dpu_hw_pp_get_vsync_info;
+ ops->poll_timeout_wr_ptr = dpu_hw_pp_poll_timeout_wr_ptr;
+ ops->get_line_count = dpu_hw_pp_get_line_count;
+};
+
+static struct dpu_hw_blk_ops dpu_hw_ops = {
+ .start = NULL,
+ .stop = NULL,
+};
+
+struct dpu_hw_pingpong *dpu_hw_pingpong_init(enum dpu_pingpong idx,
+ void __iomem *addr,
+ struct dpu_mdss_cfg *m)
+{
+ struct dpu_hw_pingpong *c;
+ struct dpu_pingpong_cfg *cfg;
+ int rc;
+
+ c = kzalloc(sizeof(*c), GFP_KERNEL);
+ if (!c)
+ return ERR_PTR(-ENOMEM);
+
+ cfg = _pingpong_offset(idx, m, addr, &c->hw);
+ if (IS_ERR_OR_NULL(cfg)) {
+ kfree(c);
+ return ERR_PTR(-EINVAL);
+ }
+
+ c->idx = idx;
+ c->caps = cfg;
+ _setup_pingpong_ops(&c->ops, c->caps);
+
+ rc = dpu_hw_blk_init(&c->base, DPU_HW_BLK_PINGPONG, idx, &dpu_hw_ops);
+ if (rc) {
+ DPU_ERROR("failed to init hw blk %d\n", rc);
+ goto blk_init_error;
+ }
+
+ return c;
+
+blk_init_error:
+ kzfree(c);
+
+ return ERR_PTR(rc);
+}
+
+void dpu_hw_pingpong_destroy(struct dpu_hw_pingpong *pp)
+{
+ if (pp)
+ dpu_hw_blk_destroy(&pp->base);
+ kfree(pp);
+}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.h
new file mode 100644
index 000000000000..3caccd7d6a3e
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.h
@@ -0,0 +1,136 @@
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _DPU_HW_PINGPONG_H
+#define _DPU_HW_PINGPONG_H
+
+#include "dpu_hw_catalog.h"
+#include "dpu_hw_mdss.h"
+#include "dpu_hw_util.h"
+#include "dpu_hw_blk.h"
+
+struct dpu_hw_pingpong;
+
+struct dpu_hw_tear_check {
+ /*
+ * This is ratio of MDP VSYNC clk freq(Hz) to
+ * refresh rate divided by no of lines
+ */
+ u32 vsync_count;
+ u32 sync_cfg_height;
+ u32 vsync_init_val;
+ u32 sync_threshold_start;
+ u32 sync_threshold_continue;
+ u32 start_pos;
+ u32 rd_ptr_irq;
+ u8 hw_vsync_mode;
+};
+
+struct dpu_hw_pp_vsync_info {
+ u32 rd_ptr_init_val; /* value of rd pointer at vsync edge */
+ u32 rd_ptr_frame_count; /* num frames sent since enabling interface */
+ u32 rd_ptr_line_count; /* current line on panel (rd ptr) */
+ u32 wr_ptr_line_count; /* current line within pp fifo (wr ptr) */
+};
+
+/**
+ *
+ * struct dpu_hw_pingpong_ops : Interface to the pingpong Hw driver functions
+ * Assumption is these functions will be called after clocks are enabled
+ * @setup_tearcheck : program tear check values
+ * @enable_tearcheck : enables tear check
+ * @get_vsync_info : retries timing info of the panel
+ * @setup_dither : function to program the dither hw block
+ * @get_line_count: obtain current vertical line counter
+ */
+struct dpu_hw_pingpong_ops {
+ /**
+ * enables vysnc generation and sets up init value of
+ * read pointer and programs the tear check cofiguration
+ */
+ int (*setup_tearcheck)(struct dpu_hw_pingpong *pp,
+ struct dpu_hw_tear_check *cfg);
+
+ /**
+ * enables tear check block
+ */
+ int (*enable_tearcheck)(struct dpu_hw_pingpong *pp,
+ bool enable);
+
+ /**
+ * read, modify, write to either set or clear listening to external TE
+ * @Return: 1 if TE was originally connected, 0 if not, or -ERROR
+ */
+ int (*connect_external_te)(struct dpu_hw_pingpong *pp,
+ bool enable_external_te);
+
+ /**
+ * provides the programmed and current
+ * line_count
+ */
+ int (*get_vsync_info)(struct dpu_hw_pingpong *pp,
+ struct dpu_hw_pp_vsync_info *info);
+
+ /**
+ * poll until write pointer transmission starts
+ * @Return: 0 on success, -ETIMEDOUT on timeout
+ */
+ int (*poll_timeout_wr_ptr)(struct dpu_hw_pingpong *pp, u32 timeout_us);
+
+ /**
+ * Obtain current vertical line counter
+ */
+ u32 (*get_line_count)(struct dpu_hw_pingpong *pp);
+};
+
+struct dpu_hw_pingpong {
+ struct dpu_hw_blk base;
+ struct dpu_hw_blk_reg_map hw;
+
+ /* pingpong */
+ enum dpu_pingpong idx;
+ const struct dpu_pingpong_cfg *caps;
+
+ /* ops */
+ struct dpu_hw_pingpong_ops ops;
+};
+
+/**
+ * dpu_hw_pingpong - convert base object dpu_hw_base to container
+ * @hw: Pointer to base hardware block
+ * return: Pointer to hardware block container
+ */
+static inline struct dpu_hw_pingpong *to_dpu_hw_pingpong(struct dpu_hw_blk *hw)
+{
+ return container_of(hw, struct dpu_hw_pingpong, base);
+}
+
+/**
+ * dpu_hw_pingpong_init - initializes the pingpong driver for the passed
+ * pingpong idx.
+ * @idx: Pingpong index for which driver object is required
+ * @addr: Mapped register io address of MDP
+ * @m: Pointer to mdss catalog data
+ * Returns: Error code or allocated dpu_hw_pingpong context
+ */
+struct dpu_hw_pingpong *dpu_hw_pingpong_init(enum dpu_pingpong idx,
+ void __iomem *addr,
+ struct dpu_mdss_cfg *m);
+
+/**
+ * dpu_hw_pingpong_destroy - destroys pingpong driver context
+ * should be called to free the context
+ * @pp: Pointer to PP driver context returned by dpu_hw_pingpong_init
+ */
+void dpu_hw_pingpong_destroy(struct dpu_hw_pingpong *pp);
+
+#endif /*_DPU_HW_PINGPONG_H */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.c
new file mode 100644
index 000000000000..c25b52a6b219
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.c
@@ -0,0 +1,753 @@
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "dpu_hwio.h"
+#include "dpu_hw_catalog.h"
+#include "dpu_hw_lm.h"
+#include "dpu_hw_sspp.h"
+#include "dpu_dbg.h"
+#include "dpu_kms.h"
+
+#define DPU_FETCH_CONFIG_RESET_VALUE 0x00000087
+
+/* DPU_SSPP_SRC */
+#define SSPP_SRC_SIZE 0x00
+#define SSPP_SRC_XY 0x08
+#define SSPP_OUT_SIZE 0x0c
+#define SSPP_OUT_XY 0x10
+#define SSPP_SRC0_ADDR 0x14
+#define SSPP_SRC1_ADDR 0x18
+#define SSPP_SRC2_ADDR 0x1C
+#define SSPP_SRC3_ADDR 0x20
+#define SSPP_SRC_YSTRIDE0 0x24
+#define SSPP_SRC_YSTRIDE1 0x28
+#define SSPP_SRC_FORMAT 0x30
+#define SSPP_SRC_UNPACK_PATTERN 0x34
+#define SSPP_SRC_OP_MODE 0x38
+
+/* SSPP_MULTIRECT*/
+#define SSPP_SRC_SIZE_REC1 0x16C
+#define SSPP_SRC_XY_REC1 0x168
+#define SSPP_OUT_SIZE_REC1 0x160
+#define SSPP_OUT_XY_REC1 0x164
+#define SSPP_SRC_FORMAT_REC1 0x174
+#define SSPP_SRC_UNPACK_PATTERN_REC1 0x178
+#define SSPP_SRC_OP_MODE_REC1 0x17C
+#define SSPP_MULTIRECT_OPMODE 0x170
+#define SSPP_SRC_CONSTANT_COLOR_REC1 0x180
+#define SSPP_EXCL_REC_SIZE_REC1 0x184
+#define SSPP_EXCL_REC_XY_REC1 0x188
+
+#define MDSS_MDP_OP_DEINTERLACE BIT(22)
+#define MDSS_MDP_OP_DEINTERLACE_ODD BIT(23)
+#define MDSS_MDP_OP_IGC_ROM_1 BIT(18)
+#define MDSS_MDP_OP_IGC_ROM_0 BIT(17)
+#define MDSS_MDP_OP_IGC_EN BIT(16)
+#define MDSS_MDP_OP_FLIP_UD BIT(14)
+#define MDSS_MDP_OP_FLIP_LR BIT(13)
+#define MDSS_MDP_OP_BWC_EN BIT(0)
+#define MDSS_MDP_OP_PE_OVERRIDE BIT(31)
+#define MDSS_MDP_OP_BWC_LOSSLESS (0 << 1)
+#define MDSS_MDP_OP_BWC_Q_HIGH (1 << 1)
+#define MDSS_MDP_OP_BWC_Q_MED (2 << 1)
+
+#define SSPP_SRC_CONSTANT_COLOR 0x3c
+#define SSPP_EXCL_REC_CTL 0x40
+#define SSPP_UBWC_STATIC_CTRL 0x44
+#define SSPP_FETCH_CONFIG 0x048
+#define SSPP_DANGER_LUT 0x60
+#define SSPP_SAFE_LUT 0x64
+#define SSPP_CREQ_LUT 0x68
+#define SSPP_QOS_CTRL 0x6C
+#define SSPP_DECIMATION_CONFIG 0xB4
+#define SSPP_SRC_ADDR_SW_STATUS 0x70
+#define SSPP_CREQ_LUT_0 0x74
+#define SSPP_CREQ_LUT_1 0x78
+#define SSPP_SW_PIX_EXT_C0_LR 0x100
+#define SSPP_SW_PIX_EXT_C0_TB 0x104
+#define SSPP_SW_PIX_EXT_C0_REQ_PIXELS 0x108
+#define SSPP_SW_PIX_EXT_C1C2_LR 0x110
+#define SSPP_SW_PIX_EXT_C1C2_TB 0x114
+#define SSPP_SW_PIX_EXT_C1C2_REQ_PIXELS 0x118
+#define SSPP_SW_PIX_EXT_C3_LR 0x120
+#define SSPP_SW_PIX_EXT_C3_TB 0x124
+#define SSPP_SW_PIX_EXT_C3_REQ_PIXELS 0x128
+#define SSPP_TRAFFIC_SHAPER 0x130
+#define SSPP_CDP_CNTL 0x134
+#define SSPP_UBWC_ERROR_STATUS 0x138
+#define SSPP_TRAFFIC_SHAPER_PREFILL 0x150
+#define SSPP_TRAFFIC_SHAPER_REC1_PREFILL 0x154
+#define SSPP_TRAFFIC_SHAPER_REC1 0x158
+#define SSPP_EXCL_REC_SIZE 0x1B4
+#define SSPP_EXCL_REC_XY 0x1B8
+#define SSPP_VIG_OP_MODE 0x0
+#define SSPP_VIG_CSC_10_OP_MODE 0x0
+#define SSPP_TRAFFIC_SHAPER_BPC_MAX 0xFF
+
+/* SSPP_QOS_CTRL */
+#define SSPP_QOS_CTRL_VBLANK_EN BIT(16)
+#define SSPP_QOS_CTRL_DANGER_SAFE_EN BIT(0)
+#define SSPP_QOS_CTRL_DANGER_VBLANK_MASK 0x3
+#define SSPP_QOS_CTRL_DANGER_VBLANK_OFF 4
+#define SSPP_QOS_CTRL_CREQ_VBLANK_MASK 0x3
+#define SSPP_QOS_CTRL_CREQ_VBLANK_OFF 20
+
+/* DPU_SSPP_SCALER_QSEED2 */
+#define SCALE_CONFIG 0x04
+#define COMP0_3_PHASE_STEP_X 0x10
+#define COMP0_3_PHASE_STEP_Y 0x14
+#define COMP1_2_PHASE_STEP_X 0x18
+#define COMP1_2_PHASE_STEP_Y 0x1c
+#define COMP0_3_INIT_PHASE_X 0x20
+#define COMP0_3_INIT_PHASE_Y 0x24
+#define COMP1_2_INIT_PHASE_X 0x28
+#define COMP1_2_INIT_PHASE_Y 0x2C
+#define VIG_0_QSEED2_SHARP 0x30
+
+/*
+ * Definitions for ViG op modes
+ */
+#define VIG_OP_CSC_DST_DATAFMT BIT(19)
+#define VIG_OP_CSC_SRC_DATAFMT BIT(18)
+#define VIG_OP_CSC_EN BIT(17)
+#define VIG_OP_MEM_PROT_CONT BIT(15)
+#define VIG_OP_MEM_PROT_VAL BIT(14)
+#define VIG_OP_MEM_PROT_SAT BIT(13)
+#define VIG_OP_MEM_PROT_HUE BIT(12)
+#define VIG_OP_HIST BIT(8)
+#define VIG_OP_SKY_COL BIT(7)
+#define VIG_OP_FOIL BIT(6)
+#define VIG_OP_SKIN_COL BIT(5)
+#define VIG_OP_PA_EN BIT(4)
+#define VIG_OP_PA_SAT_ZERO_EXP BIT(2)
+#define VIG_OP_MEM_PROT_BLEND BIT(1)
+
+/*
+ * Definitions for CSC 10 op modes
+ */
+#define VIG_CSC_10_SRC_DATAFMT BIT(1)
+#define VIG_CSC_10_EN BIT(0)
+#define CSC_10BIT_OFFSET 4
+
+/* traffic shaper clock in Hz */
+#define TS_CLK 19200000
+
+static inline int _sspp_subblk_offset(struct dpu_hw_pipe *ctx,
+ int s_id,
+ u32 *idx)
+{
+ int rc = 0;
+ const struct dpu_sspp_sub_blks *sblk = ctx->cap->sblk;
+
+ if (!ctx)
+ return -EINVAL;
+
+ switch (s_id) {
+ case DPU_SSPP_SRC:
+ *idx = sblk->src_blk.base;
+ break;
+ case DPU_SSPP_SCALER_QSEED2:
+ case DPU_SSPP_SCALER_QSEED3:
+ case DPU_SSPP_SCALER_RGB:
+ *idx = sblk->scaler_blk.base;
+ break;
+ case DPU_SSPP_CSC:
+ case DPU_SSPP_CSC_10BIT:
+ *idx = sblk->csc_blk.base;
+ break;
+ default:
+ rc = -EINVAL;
+ }
+
+ return rc;
+}
+
+static void dpu_hw_sspp_setup_multirect(struct dpu_hw_pipe *ctx,
+ enum dpu_sspp_multirect_index index,
+ enum dpu_sspp_multirect_mode mode)
+{
+ u32 mode_mask;
+ u32 idx;
+
+ if (_sspp_subblk_offset(ctx, DPU_SSPP_SRC, &idx))
+ return;
+
+ if (index == DPU_SSPP_RECT_SOLO) {
+ /**
+ * if rect index is RECT_SOLO, we cannot expect a
+ * virtual plane sharing the same SSPP id. So we go
+ * and disable multirect
+ */
+ mode_mask = 0;
+ } else {
+ mode_mask = DPU_REG_READ(&ctx->hw, SSPP_MULTIRECT_OPMODE + idx);
+ mode_mask |= index;
+ if (mode == DPU_SSPP_MULTIRECT_TIME_MX)
+ mode_mask |= BIT(2);
+ else
+ mode_mask &= ~BIT(2);
+ }
+
+ DPU_REG_WRITE(&ctx->hw, SSPP_MULTIRECT_OPMODE + idx, mode_mask);
+}
+
+static void _sspp_setup_opmode(struct dpu_hw_pipe *ctx,
+ u32 mask, u8 en)
+{
+ u32 idx;
+ u32 opmode;
+
+ if (!test_bit(DPU_SSPP_SCALER_QSEED2, &ctx->cap->features) ||
+ _sspp_subblk_offset(ctx, DPU_SSPP_SCALER_QSEED2, &idx) ||
+ !test_bit(DPU_SSPP_CSC, &ctx->cap->features))
+ return;
+
+ opmode = DPU_REG_READ(&ctx->hw, SSPP_VIG_OP_MODE + idx);
+
+ if (en)
+ opmode |= mask;
+ else
+ opmode &= ~mask;
+
+ DPU_REG_WRITE(&ctx->hw, SSPP_VIG_OP_MODE + idx, opmode);
+}
+
+static void _sspp_setup_csc10_opmode(struct dpu_hw_pipe *ctx,
+ u32 mask, u8 en)
+{
+ u32 idx;
+ u32 opmode;
+
+ if (_sspp_subblk_offset(ctx, DPU_SSPP_CSC_10BIT, &idx))
+ return;
+
+ opmode = DPU_REG_READ(&ctx->hw, SSPP_VIG_CSC_10_OP_MODE + idx);
+ if (en)
+ opmode |= mask;
+ else
+ opmode &= ~mask;
+
+ DPU_REG_WRITE(&ctx->hw, SSPP_VIG_CSC_10_OP_MODE + idx, opmode);
+}
+
+/**
+ * Setup source pixel format, flip,
+ */
+static void dpu_hw_sspp_setup_format(struct dpu_hw_pipe *ctx,
+ const struct dpu_format *fmt, u32 flags,
+ enum dpu_sspp_multirect_index rect_mode)
+{
+ struct dpu_hw_blk_reg_map *c;
+ u32 chroma_samp, unpack, src_format;
+ u32 opmode = 0;
+ u32 fast_clear = 0;
+ u32 op_mode_off, unpack_pat_off, format_off;
+ u32 idx;
+
+ if (_sspp_subblk_offset(ctx, DPU_SSPP_SRC, &idx) || !fmt)
+ return;
+
+ if (rect_mode == DPU_SSPP_RECT_SOLO || rect_mode == DPU_SSPP_RECT_0) {
+ op_mode_off = SSPP_SRC_OP_MODE;
+ unpack_pat_off = SSPP_SRC_UNPACK_PATTERN;
+ format_off = SSPP_SRC_FORMAT;
+ } else {
+ op_mode_off = SSPP_SRC_OP_MODE_REC1;
+ unpack_pat_off = SSPP_SRC_UNPACK_PATTERN_REC1;
+ format_off = SSPP_SRC_FORMAT_REC1;
+ }
+
+ c = &ctx->hw;
+ opmode = DPU_REG_READ(c, op_mode_off + idx);
+ opmode &= ~(MDSS_MDP_OP_FLIP_LR | MDSS_MDP_OP_FLIP_UD |
+ MDSS_MDP_OP_BWC_EN | MDSS_MDP_OP_PE_OVERRIDE);
+
+ if (flags & DPU_SSPP_FLIP_LR)
+ opmode |= MDSS_MDP_OP_FLIP_LR;
+ if (flags & DPU_SSPP_FLIP_UD)
+ opmode |= MDSS_MDP_OP_FLIP_UD;
+
+ chroma_samp = fmt->chroma_sample;
+ if (flags & DPU_SSPP_SOURCE_ROTATED_90) {
+ if (chroma_samp == DPU_CHROMA_H2V1)
+ chroma_samp = DPU_CHROMA_H1V2;
+ else if (chroma_samp == DPU_CHROMA_H1V2)
+ chroma_samp = DPU_CHROMA_H2V1;
+ }
+
+ src_format = (chroma_samp << 23) | (fmt->fetch_planes << 19) |
+ (fmt->bits[C3_ALPHA] << 6) | (fmt->bits[C2_R_Cr] << 4) |
+ (fmt->bits[C1_B_Cb] << 2) | (fmt->bits[C0_G_Y] << 0);
+
+ if (flags & DPU_SSPP_ROT_90)
+ src_format |= BIT(11); /* ROT90 */
+
+ if (fmt->alpha_enable && fmt->fetch_planes == DPU_PLANE_INTERLEAVED)
+ src_format |= BIT(8); /* SRCC3_EN */
+
+ if (flags & DPU_SSPP_SOLID_FILL)
+ src_format |= BIT(22);
+
+ unpack = (fmt->element[3] << 24) | (fmt->element[2] << 16) |
+ (fmt->element[1] << 8) | (fmt->element[0] << 0);
+ src_format |= ((fmt->unpack_count - 1) << 12) |
+ (fmt->unpack_tight << 17) |
+ (fmt->unpack_align_msb << 18) |
+ ((fmt->bpp - 1) << 9);
+
+ if (fmt->fetch_mode != DPU_FETCH_LINEAR) {
+ if (DPU_FORMAT_IS_UBWC(fmt))
+ opmode |= MDSS_MDP_OP_BWC_EN;
+ src_format |= (fmt->fetch_mode & 3) << 30; /*FRAME_FORMAT */
+ DPU_REG_WRITE(c, SSPP_FETCH_CONFIG,
+ DPU_FETCH_CONFIG_RESET_VALUE |
+ ctx->mdp->highest_bank_bit << 18);
+ if (IS_UBWC_20_SUPPORTED(ctx->catalog->caps->ubwc_version)) {
+ fast_clear = fmt->alpha_enable ? BIT(31) : 0;
+ DPU_REG_WRITE(c, SSPP_UBWC_STATIC_CTRL,
+ fast_clear | (ctx->mdp->ubwc_swizzle) |
+ (ctx->mdp->highest_bank_bit << 4));
+ }
+ }
+
+ opmode |= MDSS_MDP_OP_PE_OVERRIDE;
+
+ /* if this is YUV pixel format, enable CSC */
+ if (DPU_FORMAT_IS_YUV(fmt))
+ src_format |= BIT(15);
+
+ if (DPU_FORMAT_IS_DX(fmt))
+ src_format |= BIT(14);
+
+ /* update scaler opmode, if appropriate */
+ if (test_bit(DPU_SSPP_CSC, &ctx->cap->features))
+ _sspp_setup_opmode(ctx, VIG_OP_CSC_EN | VIG_OP_CSC_SRC_DATAFMT,
+ DPU_FORMAT_IS_YUV(fmt));
+ else if (test_bit(DPU_SSPP_CSC_10BIT, &ctx->cap->features))
+ _sspp_setup_csc10_opmode(ctx,
+ VIG_CSC_10_EN | VIG_CSC_10_SRC_DATAFMT,
+ DPU_FORMAT_IS_YUV(fmt));
+
+ DPU_REG_WRITE(c, format_off + idx, src_format);
+ DPU_REG_WRITE(c, unpack_pat_off + idx, unpack);
+ DPU_REG_WRITE(c, op_mode_off + idx, opmode);
+
+ /* clear previous UBWC error */
+ DPU_REG_WRITE(c, SSPP_UBWC_ERROR_STATUS + idx, BIT(31));
+}
+
+static void dpu_hw_sspp_setup_pe_config(struct dpu_hw_pipe *ctx,
+ struct dpu_hw_pixel_ext *pe_ext)
+{
+ struct dpu_hw_blk_reg_map *c;
+ u8 color;
+ u32 lr_pe[4], tb_pe[4], tot_req_pixels[4];
+ const u32 bytemask = 0xff;
+ const u32 shortmask = 0xffff;
+ u32 idx;
+
+ if (_sspp_subblk_offset(ctx, DPU_SSPP_SRC, &idx) || !pe_ext)
+ return;
+
+ c = &ctx->hw;
+
+ /* program SW pixel extension override for all pipes*/
+ for (color = 0; color < DPU_MAX_PLANES; color++) {
+ /* color 2 has the same set of registers as color 1 */
+ if (color == 2)
+ continue;
+
+ lr_pe[color] = ((pe_ext->right_ftch[color] & bytemask) << 24)|
+ ((pe_ext->right_rpt[color] & bytemask) << 16)|
+ ((pe_ext->left_ftch[color] & bytemask) << 8)|
+ (pe_ext->left_rpt[color] & bytemask);
+
+ tb_pe[color] = ((pe_ext->btm_ftch[color] & bytemask) << 24)|
+ ((pe_ext->btm_rpt[color] & bytemask) << 16)|
+ ((pe_ext->top_ftch[color] & bytemask) << 8)|
+ (pe_ext->top_rpt[color] & bytemask);
+
+ tot_req_pixels[color] = (((pe_ext->roi_h[color] +
+ pe_ext->num_ext_pxls_top[color] +
+ pe_ext->num_ext_pxls_btm[color]) & shortmask) << 16) |
+ ((pe_ext->roi_w[color] +
+ pe_ext->num_ext_pxls_left[color] +
+ pe_ext->num_ext_pxls_right[color]) & shortmask);
+ }
+
+ /* color 0 */
+ DPU_REG_WRITE(c, SSPP_SW_PIX_EXT_C0_LR + idx, lr_pe[0]);
+ DPU_REG_WRITE(c, SSPP_SW_PIX_EXT_C0_TB + idx, tb_pe[0]);
+ DPU_REG_WRITE(c, SSPP_SW_PIX_EXT_C0_REQ_PIXELS + idx,
+ tot_req_pixels[0]);
+
+ /* color 1 and color 2 */
+ DPU_REG_WRITE(c, SSPP_SW_PIX_EXT_C1C2_LR + idx, lr_pe[1]);
+ DPU_REG_WRITE(c, SSPP_SW_PIX_EXT_C1C2_TB + idx, tb_pe[1]);
+ DPU_REG_WRITE(c, SSPP_SW_PIX_EXT_C1C2_REQ_PIXELS + idx,
+ tot_req_pixels[1]);
+
+ /* color 3 */
+ DPU_REG_WRITE(c, SSPP_SW_PIX_EXT_C3_LR + idx, lr_pe[3]);
+ DPU_REG_WRITE(c, SSPP_SW_PIX_EXT_C3_TB + idx, lr_pe[3]);
+ DPU_REG_WRITE(c, SSPP_SW_PIX_EXT_C3_REQ_PIXELS + idx,
+ tot_req_pixels[3]);
+}
+
+static void _dpu_hw_sspp_setup_scaler3(struct dpu_hw_pipe *ctx,
+ struct dpu_hw_pipe_cfg *sspp,
+ struct dpu_hw_pixel_ext *pe,
+ void *scaler_cfg)
+{
+ u32 idx;
+ struct dpu_hw_scaler3_cfg *scaler3_cfg = scaler_cfg;
+
+ (void)pe;
+ if (_sspp_subblk_offset(ctx, DPU_SSPP_SCALER_QSEED3, &idx) || !sspp
+ || !scaler3_cfg || !ctx || !ctx->cap || !ctx->cap->sblk)
+ return;
+
+ dpu_hw_setup_scaler3(&ctx->hw, scaler3_cfg, idx,
+ ctx->cap->sblk->scaler_blk.version,
+ sspp->layout.format);
+}
+
+static u32 _dpu_hw_sspp_get_scaler3_ver(struct dpu_hw_pipe *ctx)
+{
+ u32 idx;
+
+ if (!ctx || _sspp_subblk_offset(ctx, DPU_SSPP_SCALER_QSEED3, &idx))
+ return 0;
+
+ return dpu_hw_get_scaler3_ver(&ctx->hw, idx);
+}
+
+/**
+ * dpu_hw_sspp_setup_rects()
+ */
+static void dpu_hw_sspp_setup_rects(struct dpu_hw_pipe *ctx,
+ struct dpu_hw_pipe_cfg *cfg,
+ enum dpu_sspp_multirect_index rect_index)
+{
+ struct dpu_hw_blk_reg_map *c;
+ u32 src_size, src_xy, dst_size, dst_xy, ystride0, ystride1;
+ u32 src_size_off, src_xy_off, out_size_off, out_xy_off;
+ u32 idx;
+
+ if (_sspp_subblk_offset(ctx, DPU_SSPP_SRC, &idx) || !cfg)
+ return;
+
+ c = &ctx->hw;
+
+ if (rect_index == DPU_SSPP_RECT_SOLO || rect_index == DPU_SSPP_RECT_0) {
+ src_size_off = SSPP_SRC_SIZE;
+ src_xy_off = SSPP_SRC_XY;
+ out_size_off = SSPP_OUT_SIZE;
+ out_xy_off = SSPP_OUT_XY;
+ } else {
+ src_size_off = SSPP_SRC_SIZE_REC1;
+ src_xy_off = SSPP_SRC_XY_REC1;
+ out_size_off = SSPP_OUT_SIZE_REC1;
+ out_xy_off = SSPP_OUT_XY_REC1;
+ }
+
+
+ /* src and dest rect programming */
+ src_xy = (cfg->src_rect.y1 << 16) | cfg->src_rect.x1;
+ src_size = (drm_rect_height(&cfg->src_rect) << 16) |
+ drm_rect_width(&cfg->src_rect);
+ dst_xy = (cfg->dst_rect.y1 << 16) | cfg->dst_rect.x1;
+ dst_size = (drm_rect_height(&cfg->dst_rect) << 16) |
+ drm_rect_width(&cfg->dst_rect);
+
+ if (rect_index == DPU_SSPP_RECT_SOLO) {
+ ystride0 = (cfg->layout.plane_pitch[0]) |
+ (cfg->layout.plane_pitch[1] << 16);
+ ystride1 = (cfg->layout.plane_pitch[2]) |
+ (cfg->layout.plane_pitch[3] << 16);
+ } else {
+ ystride0 = DPU_REG_READ(c, SSPP_SRC_YSTRIDE0 + idx);
+ ystride1 = DPU_REG_READ(c, SSPP_SRC_YSTRIDE1 + idx);
+
+ if (rect_index == DPU_SSPP_RECT_0) {
+ ystride0 = (ystride0 & 0xFFFF0000) |
+ (cfg->layout.plane_pitch[0] & 0x0000FFFF);
+ ystride1 = (ystride1 & 0xFFFF0000)|
+ (cfg->layout.plane_pitch[2] & 0x0000FFFF);
+ } else {
+ ystride0 = (ystride0 & 0x0000FFFF) |
+ ((cfg->layout.plane_pitch[0] << 16) &
+ 0xFFFF0000);
+ ystride1 = (ystride1 & 0x0000FFFF) |
+ ((cfg->layout.plane_pitch[2] << 16) &
+ 0xFFFF0000);
+ }
+ }
+
+ /* rectangle register programming */
+ DPU_REG_WRITE(c, src_size_off + idx, src_size);
+ DPU_REG_WRITE(c, src_xy_off + idx, src_xy);
+ DPU_REG_WRITE(c, out_size_off + idx, dst_size);
+ DPU_REG_WRITE(c, out_xy_off + idx, dst_xy);
+
+ DPU_REG_WRITE(c, SSPP_SRC_YSTRIDE0 + idx, ystride0);
+ DPU_REG_WRITE(c, SSPP_SRC_YSTRIDE1 + idx, ystride1);
+}
+
+static void dpu_hw_sspp_setup_sourceaddress(struct dpu_hw_pipe *ctx,
+ struct dpu_hw_pipe_cfg *cfg,
+ enum dpu_sspp_multirect_index rect_mode)
+{
+ int i;
+ u32 idx;
+
+ if (_sspp_subblk_offset(ctx, DPU_SSPP_SRC, &idx))
+ return;
+
+ if (rect_mode == DPU_SSPP_RECT_SOLO) {
+ for (i = 0; i < ARRAY_SIZE(cfg->layout.plane_addr); i++)
+ DPU_REG_WRITE(&ctx->hw, SSPP_SRC0_ADDR + idx + i * 0x4,
+ cfg->layout.plane_addr[i]);
+ } else if (rect_mode == DPU_SSPP_RECT_0) {
+ DPU_REG_WRITE(&ctx->hw, SSPP_SRC0_ADDR + idx,
+ cfg->layout.plane_addr[0]);
+ DPU_REG_WRITE(&ctx->hw, SSPP_SRC2_ADDR + idx,
+ cfg->layout.plane_addr[2]);
+ } else {
+ DPU_REG_WRITE(&ctx->hw, SSPP_SRC1_ADDR + idx,
+ cfg->layout.plane_addr[0]);
+ DPU_REG_WRITE(&ctx->hw, SSPP_SRC3_ADDR + idx,
+ cfg->layout.plane_addr[2]);
+ }
+}
+
+static void dpu_hw_sspp_setup_csc(struct dpu_hw_pipe *ctx,
+ struct dpu_csc_cfg *data)
+{
+ u32 idx;
+ bool csc10 = false;
+
+ if (_sspp_subblk_offset(ctx, DPU_SSPP_CSC, &idx) || !data)
+ return;
+
+ if (test_bit(DPU_SSPP_CSC_10BIT, &ctx->cap->features)) {
+ idx += CSC_10BIT_OFFSET;
+ csc10 = true;
+ }
+
+ dpu_hw_csc_setup(&ctx->hw, idx, data, csc10);
+}
+
+static void dpu_hw_sspp_setup_solidfill(struct dpu_hw_pipe *ctx, u32 color, enum
+ dpu_sspp_multirect_index rect_index)
+{
+ u32 idx;
+
+ if (_sspp_subblk_offset(ctx, DPU_SSPP_SRC, &idx))
+ return;
+
+ if (rect_index == DPU_SSPP_RECT_SOLO || rect_index == DPU_SSPP_RECT_0)
+ DPU_REG_WRITE(&ctx->hw, SSPP_SRC_CONSTANT_COLOR + idx, color);
+ else
+ DPU_REG_WRITE(&ctx->hw, SSPP_SRC_CONSTANT_COLOR_REC1 + idx,
+ color);
+}
+
+static void dpu_hw_sspp_setup_danger_safe_lut(struct dpu_hw_pipe *ctx,
+ struct dpu_hw_pipe_qos_cfg *cfg)
+{
+ u32 idx;
+
+ if (_sspp_subblk_offset(ctx, DPU_SSPP_SRC, &idx))
+ return;
+
+ DPU_REG_WRITE(&ctx->hw, SSPP_DANGER_LUT + idx, cfg->danger_lut);
+ DPU_REG_WRITE(&ctx->hw, SSPP_SAFE_LUT + idx, cfg->safe_lut);
+}
+
+static void dpu_hw_sspp_setup_creq_lut(struct dpu_hw_pipe *ctx,
+ struct dpu_hw_pipe_qos_cfg *cfg)
+{
+ u32 idx;
+
+ if (_sspp_subblk_offset(ctx, DPU_SSPP_SRC, &idx))
+ return;
+
+ if (ctx->cap && test_bit(DPU_SSPP_QOS_8LVL, &ctx->cap->features)) {
+ DPU_REG_WRITE(&ctx->hw, SSPP_CREQ_LUT_0 + idx, cfg->creq_lut);
+ DPU_REG_WRITE(&ctx->hw, SSPP_CREQ_LUT_1 + idx,
+ cfg->creq_lut >> 32);
+ } else {
+ DPU_REG_WRITE(&ctx->hw, SSPP_CREQ_LUT + idx, cfg->creq_lut);
+ }
+}
+
+static void dpu_hw_sspp_setup_qos_ctrl(struct dpu_hw_pipe *ctx,
+ struct dpu_hw_pipe_qos_cfg *cfg)
+{
+ u32 idx;
+ u32 qos_ctrl = 0;
+
+ if (_sspp_subblk_offset(ctx, DPU_SSPP_SRC, &idx))
+ return;
+
+ if (cfg->vblank_en) {
+ qos_ctrl |= ((cfg->creq_vblank &
+ SSPP_QOS_CTRL_CREQ_VBLANK_MASK) <<
+ SSPP_QOS_CTRL_CREQ_VBLANK_OFF);
+ qos_ctrl |= ((cfg->danger_vblank &
+ SSPP_QOS_CTRL_DANGER_VBLANK_MASK) <<
+ SSPP_QOS_CTRL_DANGER_VBLANK_OFF);
+ qos_ctrl |= SSPP_QOS_CTRL_VBLANK_EN;
+ }
+
+ if (cfg->danger_safe_en)
+ qos_ctrl |= SSPP_QOS_CTRL_DANGER_SAFE_EN;
+
+ DPU_REG_WRITE(&ctx->hw, SSPP_QOS_CTRL + idx, qos_ctrl);
+}
+
+static void dpu_hw_sspp_setup_cdp(struct dpu_hw_pipe *ctx,
+ struct dpu_hw_pipe_cdp_cfg *cfg)
+{
+ u32 idx;
+ u32 cdp_cntl = 0;
+
+ if (!ctx || !cfg)
+ return;
+
+ if (_sspp_subblk_offset(ctx, DPU_SSPP_SRC, &idx))
+ return;
+
+ if (cfg->enable)
+ cdp_cntl |= BIT(0);
+ if (cfg->ubwc_meta_enable)
+ cdp_cntl |= BIT(1);
+ if (cfg->tile_amortize_enable)
+ cdp_cntl |= BIT(2);
+ if (cfg->preload_ahead == DPU_SSPP_CDP_PRELOAD_AHEAD_64)
+ cdp_cntl |= BIT(3);
+
+ DPU_REG_WRITE(&ctx->hw, SSPP_CDP_CNTL, cdp_cntl);
+}
+
+static void _setup_layer_ops(struct dpu_hw_pipe *c,
+ unsigned long features)
+{
+ if (test_bit(DPU_SSPP_SRC, &features)) {
+ c->ops.setup_format = dpu_hw_sspp_setup_format;
+ c->ops.setup_rects = dpu_hw_sspp_setup_rects;
+ c->ops.setup_sourceaddress = dpu_hw_sspp_setup_sourceaddress;
+ c->ops.setup_solidfill = dpu_hw_sspp_setup_solidfill;
+ c->ops.setup_pe = dpu_hw_sspp_setup_pe_config;
+ }
+
+ if (test_bit(DPU_SSPP_QOS, &features)) {
+ c->ops.setup_danger_safe_lut =
+ dpu_hw_sspp_setup_danger_safe_lut;
+ c->ops.setup_creq_lut = dpu_hw_sspp_setup_creq_lut;
+ c->ops.setup_qos_ctrl = dpu_hw_sspp_setup_qos_ctrl;
+ }
+
+ if (test_bit(DPU_SSPP_CSC, &features) ||
+ test_bit(DPU_SSPP_CSC_10BIT, &features))
+ c->ops.setup_csc = dpu_hw_sspp_setup_csc;
+
+ if (dpu_hw_sspp_multirect_enabled(c->cap))
+ c->ops.setup_multirect = dpu_hw_sspp_setup_multirect;
+
+ if (test_bit(DPU_SSPP_SCALER_QSEED3, &features)) {
+ c->ops.setup_scaler = _dpu_hw_sspp_setup_scaler3;
+ c->ops.get_scaler_ver = _dpu_hw_sspp_get_scaler3_ver;
+ }
+
+ if (test_bit(DPU_SSPP_CDP, &features))
+ c->ops.setup_cdp = dpu_hw_sspp_setup_cdp;
+}
+
+static struct dpu_sspp_cfg *_sspp_offset(enum dpu_sspp sspp,
+ void __iomem *addr,
+ struct dpu_mdss_cfg *catalog,
+ struct dpu_hw_blk_reg_map *b)
+{
+ int i;
+
+ if ((sspp < SSPP_MAX) && catalog && addr && b) {
+ for (i = 0; i < catalog->sspp_count; i++) {
+ if (sspp == catalog->sspp[i].id) {
+ b->base_off = addr;
+ b->blk_off = catalog->sspp[i].base;
+ b->length = catalog->sspp[i].len;
+ b->hwversion = catalog->hwversion;
+ b->log_mask = DPU_DBG_MASK_SSPP;
+ return &catalog->sspp[i];
+ }
+ }
+ }
+
+ return ERR_PTR(-ENOMEM);
+}
+
+static struct dpu_hw_blk_ops dpu_hw_ops = {
+ .start = NULL,
+ .stop = NULL,
+};
+
+struct dpu_hw_pipe *dpu_hw_sspp_init(enum dpu_sspp idx,
+ void __iomem *addr, struct dpu_mdss_cfg *catalog,
+ bool is_virtual_pipe)
+{
+ struct dpu_hw_pipe *hw_pipe;
+ struct dpu_sspp_cfg *cfg;
+ int rc;
+
+ if (!addr || !catalog)
+ return ERR_PTR(-EINVAL);
+
+ hw_pipe = kzalloc(sizeof(*hw_pipe), GFP_KERNEL);
+ if (!hw_pipe)
+ return ERR_PTR(-ENOMEM);
+
+ cfg = _sspp_offset(idx, addr, catalog, &hw_pipe->hw);
+ if (IS_ERR_OR_NULL(cfg)) {
+ kfree(hw_pipe);
+ return ERR_PTR(-EINVAL);
+ }
+
+ /* Assign ops */
+ hw_pipe->catalog = catalog;
+ hw_pipe->mdp = &catalog->mdp[0];
+ hw_pipe->idx = idx;
+ hw_pipe->cap = cfg;
+ _setup_layer_ops(hw_pipe, hw_pipe->cap->features);
+
+ rc = dpu_hw_blk_init(&hw_pipe->base, DPU_HW_BLK_SSPP, idx, &dpu_hw_ops);
+ if (rc) {
+ DPU_ERROR("failed to init hw blk %d\n", rc);
+ goto blk_init_error;
+ }
+
+ return hw_pipe;
+
+blk_init_error:
+ kzfree(hw_pipe);
+
+ return ERR_PTR(rc);
+}
+
+void dpu_hw_sspp_destroy(struct dpu_hw_pipe *ctx)
+{
+ if (ctx)
+ dpu_hw_blk_destroy(&ctx->base);
+ kfree(ctx);
+}
+
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.h
new file mode 100644
index 000000000000..4d81e5f5ce1b
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.h
@@ -0,0 +1,424 @@
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _DPU_HW_SSPP_H
+#define _DPU_HW_SSPP_H
+
+#include "dpu_hw_catalog.h"
+#include "dpu_hw_mdss.h"
+#include "dpu_hw_util.h"
+#include "dpu_hw_blk.h"
+#include "dpu_formats.h"
+
+struct dpu_hw_pipe;
+
+/**
+ * Flags
+ */
+#define DPU_SSPP_FLIP_LR BIT(0)
+#define DPU_SSPP_FLIP_UD BIT(1)
+#define DPU_SSPP_SOURCE_ROTATED_90 BIT(2)
+#define DPU_SSPP_ROT_90 BIT(3)
+#define DPU_SSPP_SOLID_FILL BIT(4)
+
+/**
+ * Define all scaler feature bits in catalog
+ */
+#define DPU_SSPP_SCALER ((1UL << DPU_SSPP_SCALER_RGB) | \
+ (1UL << DPU_SSPP_SCALER_QSEED2) | \
+ (1UL << DPU_SSPP_SCALER_QSEED3))
+
+/**
+ * Component indices
+ */
+enum {
+ DPU_SSPP_COMP_0,
+ DPU_SSPP_COMP_1_2,
+ DPU_SSPP_COMP_2,
+ DPU_SSPP_COMP_3,
+
+ DPU_SSPP_COMP_MAX
+};
+
+/**
+ * DPU_SSPP_RECT_SOLO - multirect disabled
+ * DPU_SSPP_RECT_0 - rect0 of a multirect pipe
+ * DPU_SSPP_RECT_1 - rect1 of a multirect pipe
+ *
+ * Note: HW supports multirect with either RECT0 or
+ * RECT1. Considering no benefit of such configs over
+ * SOLO mode and to keep the plane management simple,
+ * we dont support single rect multirect configs.
+ */
+enum dpu_sspp_multirect_index {
+ DPU_SSPP_RECT_SOLO = 0,
+ DPU_SSPP_RECT_0,
+ DPU_SSPP_RECT_1,
+};
+
+enum dpu_sspp_multirect_mode {
+ DPU_SSPP_MULTIRECT_NONE = 0,
+ DPU_SSPP_MULTIRECT_PARALLEL,
+ DPU_SSPP_MULTIRECT_TIME_MX,
+};
+
+enum {
+ DPU_FRAME_LINEAR,
+ DPU_FRAME_TILE_A4X,
+ DPU_FRAME_TILE_A5X,
+};
+
+enum dpu_hw_filter {
+ DPU_SCALE_FILTER_NEAREST = 0,
+ DPU_SCALE_FILTER_BIL,
+ DPU_SCALE_FILTER_PCMN,
+ DPU_SCALE_FILTER_CA,
+ DPU_SCALE_FILTER_MAX
+};
+
+enum dpu_hw_filter_alpa {
+ DPU_SCALE_ALPHA_PIXEL_REP,
+ DPU_SCALE_ALPHA_BIL
+};
+
+enum dpu_hw_filter_yuv {
+ DPU_SCALE_2D_4X4,
+ DPU_SCALE_2D_CIR,
+ DPU_SCALE_1D_SEP,
+ DPU_SCALE_BIL
+};
+
+struct dpu_hw_sharp_cfg {
+ u32 strength;
+ u32 edge_thr;
+ u32 smooth_thr;
+ u32 noise_thr;
+};
+
+struct dpu_hw_pixel_ext {
+ /* scaling factors are enabled for this input layer */
+ uint8_t enable_pxl_ext;
+
+ int init_phase_x[DPU_MAX_PLANES];
+ int phase_step_x[DPU_MAX_PLANES];
+ int init_phase_y[DPU_MAX_PLANES];
+ int phase_step_y[DPU_MAX_PLANES];
+
+ /*
+ * Number of pixels extension in left, right, top and bottom direction
+ * for all color components. This pixel value for each color component
+ * should be sum of fetch + repeat pixels.
+ */
+ int num_ext_pxls_left[DPU_MAX_PLANES];
+ int num_ext_pxls_right[DPU_MAX_PLANES];
+ int num_ext_pxls_top[DPU_MAX_PLANES];
+ int num_ext_pxls_btm[DPU_MAX_PLANES];
+
+ /*
+ * Number of pixels needs to be overfetched in left, right, top and
+ * bottom directions from source image for scaling.
+ */
+ int left_ftch[DPU_MAX_PLANES];
+ int right_ftch[DPU_MAX_PLANES];
+ int top_ftch[DPU_MAX_PLANES];
+ int btm_ftch[DPU_MAX_PLANES];
+
+ /*
+ * Number of pixels needs to be repeated in left, right, top and
+ * bottom directions for scaling.
+ */
+ int left_rpt[DPU_MAX_PLANES];
+ int right_rpt[DPU_MAX_PLANES];
+ int top_rpt[DPU_MAX_PLANES];
+ int btm_rpt[DPU_MAX_PLANES];
+
+ uint32_t roi_w[DPU_MAX_PLANES];
+ uint32_t roi_h[DPU_MAX_PLANES];
+
+ /*
+ * Filter type to be used for scaling in horizontal and vertical
+ * directions
+ */
+ enum dpu_hw_filter horz_filter[DPU_MAX_PLANES];
+ enum dpu_hw_filter vert_filter[DPU_MAX_PLANES];
+
+};
+
+/**
+ * struct dpu_hw_pipe_cfg : Pipe description
+ * @layout: format layout information for programming buffer to hardware
+ * @src_rect: src ROI, caller takes into account the different operations
+ * such as decimation, flip etc to program this field
+ * @dest_rect: destination ROI.
+ * @index: index of the rectangle of SSPP
+ * @mode: parallel or time multiplex multirect mode
+ */
+struct dpu_hw_pipe_cfg {
+ struct dpu_hw_fmt_layout layout;
+ struct drm_rect src_rect;
+ struct drm_rect dst_rect;
+ enum dpu_sspp_multirect_index index;
+ enum dpu_sspp_multirect_mode mode;
+};
+
+/**
+ * struct dpu_hw_pipe_qos_cfg : Source pipe QoS configuration
+ * @danger_lut: LUT for generate danger level based on fill level
+ * @safe_lut: LUT for generate safe level based on fill level
+ * @creq_lut: LUT for generate creq level based on fill level
+ * @creq_vblank: creq value generated to vbif during vertical blanking
+ * @danger_vblank: danger value generated during vertical blanking
+ * @vblank_en: enable creq_vblank and danger_vblank during vblank
+ * @danger_safe_en: enable danger safe generation
+ */
+struct dpu_hw_pipe_qos_cfg {
+ u32 danger_lut;
+ u32 safe_lut;
+ u64 creq_lut;
+ u32 creq_vblank;
+ u32 danger_vblank;
+ bool vblank_en;
+ bool danger_safe_en;
+};
+
+/**
+ * enum CDP preload ahead address size
+ */
+enum {
+ DPU_SSPP_CDP_PRELOAD_AHEAD_32,
+ DPU_SSPP_CDP_PRELOAD_AHEAD_64
+};
+
+/**
+ * struct dpu_hw_pipe_cdp_cfg : CDP configuration
+ * @enable: true to enable CDP
+ * @ubwc_meta_enable: true to enable ubwc metadata preload
+ * @tile_amortize_enable: true to enable amortization control for tile format
+ * @preload_ahead: number of request to preload ahead
+ * DPU_SSPP_CDP_PRELOAD_AHEAD_32,
+ * DPU_SSPP_CDP_PRELOAD_AHEAD_64
+ */
+struct dpu_hw_pipe_cdp_cfg {
+ bool enable;
+ bool ubwc_meta_enable;
+ bool tile_amortize_enable;
+ u32 preload_ahead;
+};
+
+/**
+ * struct dpu_hw_pipe_ts_cfg - traffic shaper configuration
+ * @size: size to prefill in bytes, or zero to disable
+ * @time: time to prefill in usec, or zero to disable
+ */
+struct dpu_hw_pipe_ts_cfg {
+ u64 size;
+ u64 time;
+};
+
+/**
+ * struct dpu_hw_sspp_ops - interface to the SSPP Hw driver functions
+ * Caller must call the init function to get the pipe context for each pipe
+ * Assumption is these functions will be called after clocks are enabled
+ */
+struct dpu_hw_sspp_ops {
+ /**
+ * setup_format - setup pixel format cropping rectangle, flip
+ * @ctx: Pointer to pipe context
+ * @cfg: Pointer to pipe config structure
+ * @flags: Extra flags for format config
+ * @index: rectangle index in multirect
+ */
+ void (*setup_format)(struct dpu_hw_pipe *ctx,
+ const struct dpu_format *fmt, u32 flags,
+ enum dpu_sspp_multirect_index index);
+
+ /**
+ * setup_rects - setup pipe ROI rectangles
+ * @ctx: Pointer to pipe context
+ * @cfg: Pointer to pipe config structure
+ * @index: rectangle index in multirect
+ */
+ void (*setup_rects)(struct dpu_hw_pipe *ctx,
+ struct dpu_hw_pipe_cfg *cfg,
+ enum dpu_sspp_multirect_index index);
+
+ /**
+ * setup_pe - setup pipe pixel extension
+ * @ctx: Pointer to pipe context
+ * @pe_ext: Pointer to pixel ext settings
+ */
+ void (*setup_pe)(struct dpu_hw_pipe *ctx,
+ struct dpu_hw_pixel_ext *pe_ext);
+
+ /**
+ * setup_sourceaddress - setup pipe source addresses
+ * @ctx: Pointer to pipe context
+ * @cfg: Pointer to pipe config structure
+ * @index: rectangle index in multirect
+ */
+ void (*setup_sourceaddress)(struct dpu_hw_pipe *ctx,
+ struct dpu_hw_pipe_cfg *cfg,
+ enum dpu_sspp_multirect_index index);
+
+ /**
+ * setup_csc - setup color space coversion
+ * @ctx: Pointer to pipe context
+ * @data: Pointer to config structure
+ */
+ void (*setup_csc)(struct dpu_hw_pipe *ctx, struct dpu_csc_cfg *data);
+
+ /**
+ * setup_solidfill - enable/disable colorfill
+ * @ctx: Pointer to pipe context
+ * @const_color: Fill color value
+ * @flags: Pipe flags
+ * @index: rectangle index in multirect
+ */
+ void (*setup_solidfill)(struct dpu_hw_pipe *ctx, u32 color,
+ enum dpu_sspp_multirect_index index);
+
+ /**
+ * setup_multirect - setup multirect configuration
+ * @ctx: Pointer to pipe context
+ * @index: rectangle index in multirect
+ * @mode: parallel fetch / time multiplex multirect mode
+ */
+
+ void (*setup_multirect)(struct dpu_hw_pipe *ctx,
+ enum dpu_sspp_multirect_index index,
+ enum dpu_sspp_multirect_mode mode);
+
+ /**
+ * setup_sharpening - setup sharpening
+ * @ctx: Pointer to pipe context
+ * @cfg: Pointer to config structure
+ */
+ void (*setup_sharpening)(struct dpu_hw_pipe *ctx,
+ struct dpu_hw_sharp_cfg *cfg);
+
+ /**
+ * setup_danger_safe_lut - setup danger safe LUTs
+ * @ctx: Pointer to pipe context
+ * @cfg: Pointer to pipe QoS configuration
+ *
+ */
+ void (*setup_danger_safe_lut)(struct dpu_hw_pipe *ctx,
+ struct dpu_hw_pipe_qos_cfg *cfg);
+
+ /**
+ * setup_creq_lut - setup CREQ LUT
+ * @ctx: Pointer to pipe context
+ * @cfg: Pointer to pipe QoS configuration
+ *
+ */
+ void (*setup_creq_lut)(struct dpu_hw_pipe *ctx,
+ struct dpu_hw_pipe_qos_cfg *cfg);
+
+ /**
+ * setup_qos_ctrl - setup QoS control
+ * @ctx: Pointer to pipe context
+ * @cfg: Pointer to pipe QoS configuration
+ *
+ */
+ void (*setup_qos_ctrl)(struct dpu_hw_pipe *ctx,
+ struct dpu_hw_pipe_qos_cfg *cfg);
+
+ /**
+ * setup_histogram - setup histograms
+ * @ctx: Pointer to pipe context
+ * @cfg: Pointer to histogram configuration
+ */
+ void (*setup_histogram)(struct dpu_hw_pipe *ctx,
+ void *cfg);
+
+ /**
+ * setup_scaler - setup scaler
+ * @ctx: Pointer to pipe context
+ * @pipe_cfg: Pointer to pipe configuration
+ * @pe_cfg: Pointer to pixel extension configuration
+ * @scaler_cfg: Pointer to scaler configuration
+ */
+ void (*setup_scaler)(struct dpu_hw_pipe *ctx,
+ struct dpu_hw_pipe_cfg *pipe_cfg,
+ struct dpu_hw_pixel_ext *pe_cfg,
+ void *scaler_cfg);
+
+ /**
+ * get_scaler_ver - get scaler h/w version
+ * @ctx: Pointer to pipe context
+ */
+ u32 (*get_scaler_ver)(struct dpu_hw_pipe *ctx);
+
+ /**
+ * setup_cdp - setup client driven prefetch
+ * @ctx: Pointer to pipe context
+ * @cfg: Pointer to cdp configuration
+ */
+ void (*setup_cdp)(struct dpu_hw_pipe *ctx,
+ struct dpu_hw_pipe_cdp_cfg *cfg);
+};
+
+/**
+ * struct dpu_hw_pipe - pipe description
+ * @base: hardware block base structure
+ * @hw: block hardware details
+ * @catalog: back pointer to catalog
+ * @mdp: pointer to associated mdp portion of the catalog
+ * @idx: pipe index
+ * @cap: pointer to layer_cfg
+ * @ops: pointer to operations possible for this pipe
+ */
+struct dpu_hw_pipe {
+ struct dpu_hw_blk base;
+ struct dpu_hw_blk_reg_map hw;
+ struct dpu_mdss_cfg *catalog;
+ struct dpu_mdp_cfg *mdp;
+
+ /* Pipe */
+ enum dpu_sspp idx;
+ const struct dpu_sspp_cfg *cap;
+
+ /* Ops */
+ struct dpu_hw_sspp_ops ops;
+};
+
+/**
+ * dpu_hw_pipe - convert base object dpu_hw_base to container
+ * @hw: Pointer to base hardware block
+ * return: Pointer to hardware block container
+ */
+static inline struct dpu_hw_pipe *to_dpu_hw_pipe(struct dpu_hw_blk *hw)
+{
+ return container_of(hw, struct dpu_hw_pipe, base);
+}
+
+/**
+ * dpu_hw_sspp_init - initializes the sspp hw driver object.
+ * Should be called once before accessing every pipe.
+ * @idx: Pipe index for which driver object is required
+ * @addr: Mapped register io address of MDP
+ * @catalog : Pointer to mdss catalog data
+ * @is_virtual_pipe: is this pipe virtual pipe
+ */
+struct dpu_hw_pipe *dpu_hw_sspp_init(enum dpu_sspp idx,
+ void __iomem *addr, struct dpu_mdss_cfg *catalog,
+ bool is_virtual_pipe);
+
+/**
+ * dpu_hw_sspp_destroy(): Destroys SSPP driver context
+ * should be called during Hw pipe cleanup.
+ * @ctx: Pointer to SSPP driver context returned by dpu_hw_sspp_init
+ */
+void dpu_hw_sspp_destroy(struct dpu_hw_pipe *ctx);
+
+#endif /*_DPU_HW_SSPP_H */
+
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.c
new file mode 100644
index 000000000000..db2798e862fc
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.c
@@ -0,0 +1,398 @@
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "dpu_hwio.h"
+#include "dpu_hw_catalog.h"
+#include "dpu_hw_top.h"
+#include "dpu_dbg.h"
+#include "dpu_kms.h"
+
+#define SSPP_SPARE 0x28
+#define UBWC_STATIC 0x144
+
+#define FLD_SPLIT_DISPLAY_CMD BIT(1)
+#define FLD_SMART_PANEL_FREE_RUN BIT(2)
+#define FLD_INTF_1_SW_TRG_MUX BIT(4)
+#define FLD_INTF_2_SW_TRG_MUX BIT(8)
+#define FLD_TE_LINE_INTER_WATERLEVEL_MASK 0xFFFF
+
+#define DANGER_STATUS 0x360
+#define SAFE_STATUS 0x364
+
+#define TE_LINE_INTERVAL 0x3F4
+
+#define TRAFFIC_SHAPER_EN BIT(31)
+#define TRAFFIC_SHAPER_RD_CLIENT(num) (0x030 + (num * 4))
+#define TRAFFIC_SHAPER_WR_CLIENT(num) (0x060 + (num * 4))
+#define TRAFFIC_SHAPER_FIXPOINT_FACTOR 4
+
+#define MDP_WD_TIMER_0_CTL 0x380
+#define MDP_WD_TIMER_0_CTL2 0x384
+#define MDP_WD_TIMER_0_LOAD_VALUE 0x388
+#define MDP_WD_TIMER_1_CTL 0x390
+#define MDP_WD_TIMER_1_CTL2 0x394
+#define MDP_WD_TIMER_1_LOAD_VALUE 0x398
+#define MDP_WD_TIMER_2_CTL 0x420
+#define MDP_WD_TIMER_2_CTL2 0x424
+#define MDP_WD_TIMER_2_LOAD_VALUE 0x428
+#define MDP_WD_TIMER_3_CTL 0x430
+#define MDP_WD_TIMER_3_CTL2 0x434
+#define MDP_WD_TIMER_3_LOAD_VALUE 0x438
+#define MDP_WD_TIMER_4_CTL 0x440
+#define MDP_WD_TIMER_4_CTL2 0x444
+#define MDP_WD_TIMER_4_LOAD_VALUE 0x448
+
+#define MDP_TICK_COUNT 16
+#define XO_CLK_RATE 19200
+#define MS_TICKS_IN_SEC 1000
+
+#define CALCULATE_WD_LOAD_VALUE(fps) \
+ ((uint32_t)((MS_TICKS_IN_SEC * XO_CLK_RATE)/(MDP_TICK_COUNT * fps)))
+
+#define DCE_SEL 0x450
+
+static void dpu_hw_setup_split_pipe(struct dpu_hw_mdp *mdp,
+ struct split_pipe_cfg *cfg)
+{
+ struct dpu_hw_blk_reg_map *c;
+ u32 upper_pipe = 0;
+ u32 lower_pipe = 0;
+
+ if (!mdp || !cfg)
+ return;
+
+ c = &mdp->hw;
+
+ if (cfg->en) {
+ if (cfg->mode == INTF_MODE_CMD) {
+ lower_pipe = FLD_SPLIT_DISPLAY_CMD;
+ /* interface controlling sw trigger */
+ if (cfg->intf == INTF_2)
+ lower_pipe |= FLD_INTF_1_SW_TRG_MUX;
+ else
+ lower_pipe |= FLD_INTF_2_SW_TRG_MUX;
+ upper_pipe = lower_pipe;
+ } else {
+ if (cfg->intf == INTF_2) {
+ lower_pipe = FLD_INTF_1_SW_TRG_MUX;
+ upper_pipe = FLD_INTF_2_SW_TRG_MUX;
+ } else {
+ lower_pipe = FLD_INTF_2_SW_TRG_MUX;
+ upper_pipe = FLD_INTF_1_SW_TRG_MUX;
+ }
+ }
+ }
+
+ DPU_REG_WRITE(c, SSPP_SPARE, cfg->split_flush_en ? 0x1 : 0x0);
+ DPU_REG_WRITE(c, SPLIT_DISPLAY_LOWER_PIPE_CTRL, lower_pipe);
+ DPU_REG_WRITE(c, SPLIT_DISPLAY_UPPER_PIPE_CTRL, upper_pipe);
+ DPU_REG_WRITE(c, SPLIT_DISPLAY_EN, cfg->en & 0x1);
+}
+
+static void dpu_hw_setup_cdm_output(struct dpu_hw_mdp *mdp,
+ struct cdm_output_cfg *cfg)
+{
+ struct dpu_hw_blk_reg_map *c;
+ u32 out_ctl = 0;
+
+ if (!mdp || !cfg)
+ return;
+
+ c = &mdp->hw;
+
+ if (cfg->intf_en)
+ out_ctl |= BIT(19);
+
+ DPU_REG_WRITE(c, MDP_OUT_CTL_0, out_ctl);
+}
+
+static bool dpu_hw_setup_clk_force_ctrl(struct dpu_hw_mdp *mdp,
+ enum dpu_clk_ctrl_type clk_ctrl, bool enable)
+{
+ struct dpu_hw_blk_reg_map *c;
+ u32 reg_off, bit_off;
+ u32 reg_val, new_val;
+ bool clk_forced_on;
+
+ if (!mdp)
+ return false;
+
+ c = &mdp->hw;
+
+ if (clk_ctrl <= DPU_CLK_CTRL_NONE || clk_ctrl >= DPU_CLK_CTRL_MAX)
+ return false;
+
+ reg_off = mdp->caps->clk_ctrls[clk_ctrl].reg_off;
+ bit_off = mdp->caps->clk_ctrls[clk_ctrl].bit_off;
+
+ reg_val = DPU_REG_READ(c, reg_off);
+
+ if (enable)
+ new_val = reg_val | BIT(bit_off);
+ else
+ new_val = reg_val & ~BIT(bit_off);
+
+ DPU_REG_WRITE(c, reg_off, new_val);
+
+ clk_forced_on = !(reg_val & BIT(bit_off));
+
+ return clk_forced_on;
+}
+
+
+static void dpu_hw_get_danger_status(struct dpu_hw_mdp *mdp,
+ struct dpu_danger_safe_status *status)
+{
+ struct dpu_hw_blk_reg_map *c;
+ u32 value;
+
+ if (!mdp || !status)
+ return;
+
+ c = &mdp->hw;
+
+ value = DPU_REG_READ(c, DANGER_STATUS);
+ status->mdp = (value >> 0) & 0x3;
+ status->sspp[SSPP_VIG0] = (value >> 4) & 0x3;
+ status->sspp[SSPP_VIG1] = (value >> 6) & 0x3;
+ status->sspp[SSPP_VIG2] = (value >> 8) & 0x3;
+ status->sspp[SSPP_VIG3] = (value >> 10) & 0x3;
+ status->sspp[SSPP_RGB0] = (value >> 12) & 0x3;
+ status->sspp[SSPP_RGB1] = (value >> 14) & 0x3;
+ status->sspp[SSPP_RGB2] = (value >> 16) & 0x3;
+ status->sspp[SSPP_RGB3] = (value >> 18) & 0x3;
+ status->sspp[SSPP_DMA0] = (value >> 20) & 0x3;
+ status->sspp[SSPP_DMA1] = (value >> 22) & 0x3;
+ status->sspp[SSPP_DMA2] = (value >> 28) & 0x3;
+ status->sspp[SSPP_DMA3] = (value >> 30) & 0x3;
+ status->sspp[SSPP_CURSOR0] = (value >> 24) & 0x3;
+ status->sspp[SSPP_CURSOR1] = (value >> 26) & 0x3;
+}
+
+static void dpu_hw_setup_vsync_source(struct dpu_hw_mdp *mdp,
+ struct dpu_vsync_source_cfg *cfg)
+{
+ struct dpu_hw_blk_reg_map *c;
+ u32 reg, wd_load_value, wd_ctl, wd_ctl2, i;
+ static const u32 pp_offset[PINGPONG_MAX] = {0xC, 0x8, 0x4, 0x13, 0x18};
+
+ if (!mdp || !cfg || (cfg->pp_count > ARRAY_SIZE(cfg->ppnumber)))
+ return;
+
+ c = &mdp->hw;
+ reg = DPU_REG_READ(c, MDP_VSYNC_SEL);
+ for (i = 0; i < cfg->pp_count; i++) {
+ int pp_idx = cfg->ppnumber[i] - PINGPONG_0;
+
+ if (pp_idx >= ARRAY_SIZE(pp_offset))
+ continue;
+
+ reg &= ~(0xf << pp_offset[pp_idx]);
+ reg |= (cfg->vsync_source & 0xf) << pp_offset[pp_idx];
+ }
+ DPU_REG_WRITE(c, MDP_VSYNC_SEL, reg);
+
+ if (cfg->vsync_source >= DPU_VSYNC_SOURCE_WD_TIMER_4 &&
+ cfg->vsync_source <= DPU_VSYNC_SOURCE_WD_TIMER_0) {
+ switch (cfg->vsync_source) {
+ case DPU_VSYNC_SOURCE_WD_TIMER_4:
+ wd_load_value = MDP_WD_TIMER_4_LOAD_VALUE;
+ wd_ctl = MDP_WD_TIMER_4_CTL;
+ wd_ctl2 = MDP_WD_TIMER_4_CTL2;
+ break;
+ case DPU_VSYNC_SOURCE_WD_TIMER_3:
+ wd_load_value = MDP_WD_TIMER_3_LOAD_VALUE;
+ wd_ctl = MDP_WD_TIMER_3_CTL;
+ wd_ctl2 = MDP_WD_TIMER_3_CTL2;
+ break;
+ case DPU_VSYNC_SOURCE_WD_TIMER_2:
+ wd_load_value = MDP_WD_TIMER_2_LOAD_VALUE;
+ wd_ctl = MDP_WD_TIMER_2_CTL;
+ wd_ctl2 = MDP_WD_TIMER_2_CTL2;
+ break;
+ case DPU_VSYNC_SOURCE_WD_TIMER_1:
+ wd_load_value = MDP_WD_TIMER_1_LOAD_VALUE;
+ wd_ctl = MDP_WD_TIMER_1_CTL;
+ wd_ctl2 = MDP_WD_TIMER_1_CTL2;
+ break;
+ case DPU_VSYNC_SOURCE_WD_TIMER_0:
+ default:
+ wd_load_value = MDP_WD_TIMER_0_LOAD_VALUE;
+ wd_ctl = MDP_WD_TIMER_0_CTL;
+ wd_ctl2 = MDP_WD_TIMER_0_CTL2;
+ break;
+ }
+
+ DPU_REG_WRITE(c, wd_load_value,
+ CALCULATE_WD_LOAD_VALUE(cfg->frame_rate));
+
+ DPU_REG_WRITE(c, wd_ctl, BIT(0)); /* clear timer */
+ reg = DPU_REG_READ(c, wd_ctl2);
+ reg |= BIT(8); /* enable heartbeat timer */
+ reg |= BIT(0); /* enable WD timer */
+ DPU_REG_WRITE(c, wd_ctl2, reg);
+
+ /* make sure that timers are enabled/disabled for vsync state */
+ wmb();
+ }
+}
+
+static void dpu_hw_get_safe_status(struct dpu_hw_mdp *mdp,
+ struct dpu_danger_safe_status *status)
+{
+ struct dpu_hw_blk_reg_map *c;
+ u32 value;
+
+ if (!mdp || !status)
+ return;
+
+ c = &mdp->hw;
+
+ value = DPU_REG_READ(c, SAFE_STATUS);
+ status->mdp = (value >> 0) & 0x1;
+ status->sspp[SSPP_VIG0] = (value >> 4) & 0x1;
+ status->sspp[SSPP_VIG1] = (value >> 6) & 0x1;
+ status->sspp[SSPP_VIG2] = (value >> 8) & 0x1;
+ status->sspp[SSPP_VIG3] = (value >> 10) & 0x1;
+ status->sspp[SSPP_RGB0] = (value >> 12) & 0x1;
+ status->sspp[SSPP_RGB1] = (value >> 14) & 0x1;
+ status->sspp[SSPP_RGB2] = (value >> 16) & 0x1;
+ status->sspp[SSPP_RGB3] = (value >> 18) & 0x1;
+ status->sspp[SSPP_DMA0] = (value >> 20) & 0x1;
+ status->sspp[SSPP_DMA1] = (value >> 22) & 0x1;
+ status->sspp[SSPP_DMA2] = (value >> 28) & 0x1;
+ status->sspp[SSPP_DMA3] = (value >> 30) & 0x1;
+ status->sspp[SSPP_CURSOR0] = (value >> 24) & 0x1;
+ status->sspp[SSPP_CURSOR1] = (value >> 26) & 0x1;
+}
+
+static void dpu_hw_reset_ubwc(struct dpu_hw_mdp *mdp, struct dpu_mdss_cfg *m)
+{
+ struct dpu_hw_blk_reg_map c;
+
+ if (!mdp || !m)
+ return;
+
+ if (!IS_UBWC_20_SUPPORTED(m->caps->ubwc_version))
+ return;
+
+ /* force blk offset to zero to access beginning of register region */
+ c = mdp->hw;
+ c.blk_off = 0x0;
+ DPU_REG_WRITE(&c, UBWC_STATIC, m->mdp[0].ubwc_static);
+}
+
+static void dpu_hw_intf_audio_select(struct dpu_hw_mdp *mdp)
+{
+ struct dpu_hw_blk_reg_map *c;
+
+ if (!mdp)
+ return;
+
+ c = &mdp->hw;
+
+ DPU_REG_WRITE(c, HDMI_DP_CORE_SELECT, 0x1);
+}
+
+static void _setup_mdp_ops(struct dpu_hw_mdp_ops *ops,
+ unsigned long cap)
+{
+ ops->setup_split_pipe = dpu_hw_setup_split_pipe;
+ ops->setup_cdm_output = dpu_hw_setup_cdm_output;
+ ops->setup_clk_force_ctrl = dpu_hw_setup_clk_force_ctrl;
+ ops->get_danger_status = dpu_hw_get_danger_status;
+ ops->setup_vsync_source = dpu_hw_setup_vsync_source;
+ ops->get_safe_status = dpu_hw_get_safe_status;
+ ops->reset_ubwc = dpu_hw_reset_ubwc;
+ ops->intf_audio_select = dpu_hw_intf_audio_select;
+}
+
+static const struct dpu_mdp_cfg *_top_offset(enum dpu_mdp mdp,
+ const struct dpu_mdss_cfg *m,
+ void __iomem *addr,
+ struct dpu_hw_blk_reg_map *b)
+{
+ int i;
+
+ if (!m || !addr || !b)
+ return ERR_PTR(-EINVAL);
+
+ for (i = 0; i < m->mdp_count; i++) {
+ if (mdp == m->mdp[i].id) {
+ b->base_off = addr;
+ b->blk_off = m->mdp[i].base;
+ b->length = m->mdp[i].len;
+ b->hwversion = m->hwversion;
+ b->log_mask = DPU_DBG_MASK_TOP;
+ return &m->mdp[i];
+ }
+ }
+
+ return ERR_PTR(-EINVAL);
+}
+
+static struct dpu_hw_blk_ops dpu_hw_ops = {
+ .start = NULL,
+ .stop = NULL,
+};
+
+struct dpu_hw_mdp *dpu_hw_mdptop_init(enum dpu_mdp idx,
+ void __iomem *addr,
+ const struct dpu_mdss_cfg *m)
+{
+ struct dpu_hw_mdp *mdp;
+ const struct dpu_mdp_cfg *cfg;
+ int rc;
+
+ if (!addr || !m)
+ return ERR_PTR(-EINVAL);
+
+ mdp = kzalloc(sizeof(*mdp), GFP_KERNEL);
+ if (!mdp)
+ return ERR_PTR(-ENOMEM);
+
+ cfg = _top_offset(idx, m, addr, &mdp->hw);
+ if (IS_ERR_OR_NULL(cfg)) {
+ kfree(mdp);
+ return ERR_PTR(-EINVAL);
+ }
+
+ /*
+ * Assign ops
+ */
+ mdp->idx = idx;
+ mdp->caps = cfg;
+ _setup_mdp_ops(&mdp->ops, mdp->caps->features);
+
+ rc = dpu_hw_blk_init(&mdp->base, DPU_HW_BLK_TOP, idx, &dpu_hw_ops);
+ if (rc) {
+ DPU_ERROR("failed to init hw blk %d\n", rc);
+ goto blk_init_error;
+ }
+
+ dpu_dbg_set_dpu_top_offset(mdp->hw.blk_off);
+
+ return mdp;
+
+blk_init_error:
+ kzfree(mdp);
+
+ return ERR_PTR(rc);
+}
+
+void dpu_hw_mdp_destroy(struct dpu_hw_mdp *mdp)
+{
+ if (mdp)
+ dpu_hw_blk_destroy(&mdp->base);
+ kfree(mdp);
+}
+
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.h
new file mode 100644
index 000000000000..899925aaa6d7
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.h
@@ -0,0 +1,202 @@
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _DPU_HW_TOP_H
+#define _DPU_HW_TOP_H
+
+#include "dpu_hw_catalog.h"
+#include "dpu_hw_mdss.h"
+#include "dpu_hw_util.h"
+#include "dpu_hw_blk.h"
+
+struct dpu_hw_mdp;
+
+/**
+ * struct traffic_shaper_cfg: traffic shaper configuration
+ * @en : enable/disable traffic shaper
+ * @rd_client : true if read client; false if write client
+ * @client_id : client identifier
+ * @bpc_denom : denominator of byte per clk
+ * @bpc_numer : numerator of byte per clk
+ */
+struct traffic_shaper_cfg {
+ bool en;
+ bool rd_client;
+ u32 client_id;
+ u32 bpc_denom;
+ u64 bpc_numer;
+};
+
+/**
+ * struct split_pipe_cfg - pipe configuration for dual display panels
+ * @en : Enable/disable dual pipe confguration
+ * @mode : Panel interface mode
+ * @intf : Interface id for main control path
+ * @split_flush_en: Allows both the paths to be flushed when master path is
+ * flushed
+ */
+struct split_pipe_cfg {
+ bool en;
+ enum dpu_intf_mode mode;
+ enum dpu_intf intf;
+ bool split_flush_en;
+};
+
+/**
+ * struct cdm_output_cfg: output configuration for cdm
+ * @intf_en : enable/disable interface output
+ */
+struct cdm_output_cfg {
+ bool intf_en;
+};
+
+/**
+ * struct dpu_danger_safe_status: danger and safe status signals
+ * @mdp: top level status
+ * @sspp: source pipe status
+ */
+struct dpu_danger_safe_status {
+ u8 mdp;
+ u8 sspp[SSPP_MAX];
+};
+
+/**
+ * struct dpu_vsync_source_cfg - configure vsync source and configure the
+ * watchdog timers if required.
+ * @pp_count: number of ping pongs active
+ * @frame_rate: Display frame rate
+ * @ppnumber: ping pong index array
+ * @vsync_source: vsync source selection
+ */
+struct dpu_vsync_source_cfg {
+ u32 pp_count;
+ u32 frame_rate;
+ u32 ppnumber[PINGPONG_MAX];
+ u32 vsync_source;
+};
+
+/**
+ * struct dpu_hw_mdp_ops - interface to the MDP TOP Hw driver functions
+ * Assumption is these functions will be called after clocks are enabled.
+ * @setup_split_pipe : Programs the pipe control registers
+ * @setup_pp_split : Programs the pp split control registers
+ * @setup_cdm_output : programs cdm control
+ * @setup_traffic_shaper : programs traffic shaper control
+ */
+struct dpu_hw_mdp_ops {
+ /** setup_split_pipe() : Regsiters are not double buffered, thisk
+ * function should be called before timing control enable
+ * @mdp : mdp top context driver
+ * @cfg : upper and lower part of pipe configuration
+ */
+ void (*setup_split_pipe)(struct dpu_hw_mdp *mdp,
+ struct split_pipe_cfg *p);
+
+ /**
+ * setup_cdm_output() : Setup selection control of the cdm data path
+ * @mdp : mdp top context driver
+ * @cfg : cdm output configuration
+ */
+ void (*setup_cdm_output)(struct dpu_hw_mdp *mdp,
+ struct cdm_output_cfg *cfg);
+
+ /**
+ * setup_traffic_shaper() : Setup traffic shaper control
+ * @mdp : mdp top context driver
+ * @cfg : traffic shaper configuration
+ */
+ void (*setup_traffic_shaper)(struct dpu_hw_mdp *mdp,
+ struct traffic_shaper_cfg *cfg);
+
+ /**
+ * setup_clk_force_ctrl - set clock force control
+ * @mdp: mdp top context driver
+ * @clk_ctrl: clock to be controlled
+ * @enable: force on enable
+ * @return: if the clock is forced-on by this function
+ */
+ bool (*setup_clk_force_ctrl)(struct dpu_hw_mdp *mdp,
+ enum dpu_clk_ctrl_type clk_ctrl, bool enable);
+
+ /**
+ * get_danger_status - get danger status
+ * @mdp: mdp top context driver
+ * @status: Pointer to danger safe status
+ */
+ void (*get_danger_status)(struct dpu_hw_mdp *mdp,
+ struct dpu_danger_safe_status *status);
+
+ /**
+ * setup_vsync_source - setup vsync source configuration details
+ * @mdp: mdp top context driver
+ * @cfg: vsync source selection configuration
+ */
+ void (*setup_vsync_source)(struct dpu_hw_mdp *mdp,
+ struct dpu_vsync_source_cfg *cfg);
+
+ /**
+ * get_safe_status - get safe status
+ * @mdp: mdp top context driver
+ * @status: Pointer to danger safe status
+ */
+ void (*get_safe_status)(struct dpu_hw_mdp *mdp,
+ struct dpu_danger_safe_status *status);
+
+ /**
+ * reset_ubwc - reset top level UBWC configuration
+ * @mdp: mdp top context driver
+ * @m: pointer to mdss catalog data
+ */
+ void (*reset_ubwc)(struct dpu_hw_mdp *mdp, struct dpu_mdss_cfg *m);
+
+ /**
+ * intf_audio_select - select the external interface for audio
+ * @mdp: mdp top context driver
+ */
+ void (*intf_audio_select)(struct dpu_hw_mdp *mdp);
+};
+
+struct dpu_hw_mdp {
+ struct dpu_hw_blk base;
+ struct dpu_hw_blk_reg_map hw;
+
+ /* top */
+ enum dpu_mdp idx;
+ const struct dpu_mdp_cfg *caps;
+
+ /* ops */
+ struct dpu_hw_mdp_ops ops;
+};
+
+/**
+ * to_dpu_hw_mdp - convert base object dpu_hw_base to container
+ * @hw: Pointer to base hardware block
+ * return: Pointer to hardware block container
+ */
+static inline struct dpu_hw_mdp *to_dpu_hw_mdp(struct dpu_hw_blk *hw)
+{
+ return container_of(hw, struct dpu_hw_mdp, base);
+}
+
+/**
+ * dpu_hw_mdptop_init - initializes the top driver for the passed idx
+ * @idx: Interface index for which driver object is required
+ * @addr: Mapped register io address of MDP
+ * @m: Pointer to mdss catalog data
+ */
+struct dpu_hw_mdp *dpu_hw_mdptop_init(enum dpu_mdp idx,
+ void __iomem *addr,
+ const struct dpu_mdss_cfg *m);
+
+void dpu_hw_mdp_destroy(struct dpu_hw_mdp *mdp);
+
+#endif /*_DPU_HW_TOP_H */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.c
new file mode 100644
index 000000000000..4cabae480a7b
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.c
@@ -0,0 +1,368 @@
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__
+
+#include "msm_drv.h"
+#include "dpu_kms.h"
+#include "dpu_hw_mdss.h"
+#include "dpu_hw_util.h"
+
+/* using a file static variables for debugfs access */
+static u32 dpu_hw_util_log_mask = DPU_DBG_MASK_NONE;
+
+/* DPU_SCALER_QSEED3 */
+#define QSEED3_HW_VERSION 0x00
+#define QSEED3_OP_MODE 0x04
+#define QSEED3_RGB2Y_COEFF 0x08
+#define QSEED3_PHASE_INIT 0x0C
+#define QSEED3_PHASE_STEP_Y_H 0x10
+#define QSEED3_PHASE_STEP_Y_V 0x14
+#define QSEED3_PHASE_STEP_UV_H 0x18
+#define QSEED3_PHASE_STEP_UV_V 0x1C
+#define QSEED3_PRELOAD 0x20
+#define QSEED3_DE_SHARPEN 0x24
+#define QSEED3_DE_SHARPEN_CTL 0x28
+#define QSEED3_DE_SHAPE_CTL 0x2C
+#define QSEED3_DE_THRESHOLD 0x30
+#define QSEED3_DE_ADJUST_DATA_0 0x34
+#define QSEED3_DE_ADJUST_DATA_1 0x38
+#define QSEED3_DE_ADJUST_DATA_2 0x3C
+#define QSEED3_SRC_SIZE_Y_RGB_A 0x40
+#define QSEED3_SRC_SIZE_UV 0x44
+#define QSEED3_DST_SIZE 0x48
+#define QSEED3_COEF_LUT_CTRL 0x4C
+#define QSEED3_COEF_LUT_SWAP_BIT 0
+#define QSEED3_COEF_LUT_DIR_BIT 1
+#define QSEED3_COEF_LUT_Y_CIR_BIT 2
+#define QSEED3_COEF_LUT_UV_CIR_BIT 3
+#define QSEED3_COEF_LUT_Y_SEP_BIT 4
+#define QSEED3_COEF_LUT_UV_SEP_BIT 5
+#define QSEED3_BUFFER_CTRL 0x50
+#define QSEED3_CLK_CTRL0 0x54
+#define QSEED3_CLK_CTRL1 0x58
+#define QSEED3_CLK_STATUS 0x5C
+#define QSEED3_MISR_CTRL 0x70
+#define QSEED3_MISR_SIGNATURE_0 0x74
+#define QSEED3_MISR_SIGNATURE_1 0x78
+#define QSEED3_PHASE_INIT_Y_H 0x90
+#define QSEED3_PHASE_INIT_Y_V 0x94
+#define QSEED3_PHASE_INIT_UV_H 0x98
+#define QSEED3_PHASE_INIT_UV_V 0x9C
+#define QSEED3_COEF_LUT 0x100
+#define QSEED3_FILTERS 5
+#define QSEED3_LUT_REGIONS 4
+#define QSEED3_CIRCULAR_LUTS 9
+#define QSEED3_SEPARABLE_LUTS 10
+#define QSEED3_LUT_SIZE 60
+#define QSEED3_ENABLE 2
+#define QSEED3_DIR_LUT_SIZE (200 * sizeof(u32))
+#define QSEED3_CIR_LUT_SIZE \
+ (QSEED3_LUT_SIZE * QSEED3_CIRCULAR_LUTS * sizeof(u32))
+#define QSEED3_SEP_LUT_SIZE \
+ (QSEED3_LUT_SIZE * QSEED3_SEPARABLE_LUTS * sizeof(u32))
+
+void dpu_reg_write(struct dpu_hw_blk_reg_map *c,
+ u32 reg_off,
+ u32 val,
+ const char *name)
+{
+ /* don't need to mutex protect this */
+ if (c->log_mask & dpu_hw_util_log_mask)
+ DPU_DEBUG_DRIVER("[%s:0x%X] <= 0x%X\n",
+ name, c->blk_off + reg_off, val);
+ writel_relaxed(val, c->base_off + c->blk_off + reg_off);
+}
+
+int dpu_reg_read(struct dpu_hw_blk_reg_map *c, u32 reg_off)
+{
+ return readl_relaxed(c->base_off + c->blk_off + reg_off);
+}
+
+u32 *dpu_hw_util_get_log_mask_ptr(void)
+{
+ return &dpu_hw_util_log_mask;
+}
+
+static void _dpu_hw_setup_scaler3_lut(struct dpu_hw_blk_reg_map *c,
+ struct dpu_hw_scaler3_cfg *scaler3_cfg, u32 offset)
+{
+ int i, j, filter;
+ int config_lut = 0x0;
+ unsigned long lut_flags;
+ u32 lut_addr, lut_offset, lut_len;
+ u32 *lut[QSEED3_FILTERS] = {NULL, NULL, NULL, NULL, NULL};
+ static const uint32_t off_tbl[QSEED3_FILTERS][QSEED3_LUT_REGIONS][2] = {
+ {{18, 0x000}, {12, 0x120}, {12, 0x1E0}, {8, 0x2A0} },
+ {{6, 0x320}, {3, 0x3E0}, {3, 0x440}, {3, 0x4A0} },
+ {{6, 0x500}, {3, 0x5c0}, {3, 0x620}, {3, 0x680} },
+ {{6, 0x380}, {3, 0x410}, {3, 0x470}, {3, 0x4d0} },
+ {{6, 0x560}, {3, 0x5f0}, {3, 0x650}, {3, 0x6b0} },
+ };
+
+ lut_flags = (unsigned long) scaler3_cfg->lut_flag;
+ if (test_bit(QSEED3_COEF_LUT_DIR_BIT, &lut_flags) &&
+ (scaler3_cfg->dir_len == QSEED3_DIR_LUT_SIZE)) {
+ lut[0] = scaler3_cfg->dir_lut;
+ config_lut = 1;
+ }
+ if (test_bit(QSEED3_COEF_LUT_Y_CIR_BIT, &lut_flags) &&
+ (scaler3_cfg->y_rgb_cir_lut_idx < QSEED3_CIRCULAR_LUTS) &&
+ (scaler3_cfg->cir_len == QSEED3_CIR_LUT_SIZE)) {
+ lut[1] = scaler3_cfg->cir_lut +
+ scaler3_cfg->y_rgb_cir_lut_idx * QSEED3_LUT_SIZE;
+ config_lut = 1;
+ }
+ if (test_bit(QSEED3_COEF_LUT_UV_CIR_BIT, &lut_flags) &&
+ (scaler3_cfg->uv_cir_lut_idx < QSEED3_CIRCULAR_LUTS) &&
+ (scaler3_cfg->cir_len == QSEED3_CIR_LUT_SIZE)) {
+ lut[2] = scaler3_cfg->cir_lut +
+ scaler3_cfg->uv_cir_lut_idx * QSEED3_LUT_SIZE;
+ config_lut = 1;
+ }
+ if (test_bit(QSEED3_COEF_LUT_Y_SEP_BIT, &lut_flags) &&
+ (scaler3_cfg->y_rgb_sep_lut_idx < QSEED3_SEPARABLE_LUTS) &&
+ (scaler3_cfg->sep_len == QSEED3_SEP_LUT_SIZE)) {
+ lut[3] = scaler3_cfg->sep_lut +
+ scaler3_cfg->y_rgb_sep_lut_idx * QSEED3_LUT_SIZE;
+ config_lut = 1;
+ }
+ if (test_bit(QSEED3_COEF_LUT_UV_SEP_BIT, &lut_flags) &&
+ (scaler3_cfg->uv_sep_lut_idx < QSEED3_SEPARABLE_LUTS) &&
+ (scaler3_cfg->sep_len == QSEED3_SEP_LUT_SIZE)) {
+ lut[4] = scaler3_cfg->sep_lut +
+ scaler3_cfg->uv_sep_lut_idx * QSEED3_LUT_SIZE;
+ config_lut = 1;
+ }
+
+ if (config_lut) {
+ for (filter = 0; filter < QSEED3_FILTERS; filter++) {
+ if (!lut[filter])
+ continue;
+ lut_offset = 0;
+ for (i = 0; i < QSEED3_LUT_REGIONS; i++) {
+ lut_addr = QSEED3_COEF_LUT + offset
+ + off_tbl[filter][i][1];
+ lut_len = off_tbl[filter][i][0] << 2;
+ for (j = 0; j < lut_len; j++) {
+ DPU_REG_WRITE(c,
+ lut_addr,
+ (lut[filter])[lut_offset++]);
+ lut_addr += 4;
+ }
+ }
+ }
+ }
+
+ if (test_bit(QSEED3_COEF_LUT_SWAP_BIT, &lut_flags))
+ DPU_REG_WRITE(c, QSEED3_COEF_LUT_CTRL + offset, BIT(0));
+
+}
+
+static void _dpu_hw_setup_scaler3_de(struct dpu_hw_blk_reg_map *c,
+ struct dpu_hw_scaler3_de_cfg *de_cfg, u32 offset)
+{
+ u32 sharp_lvl, sharp_ctl, shape_ctl, de_thr;
+ u32 adjust_a, adjust_b, adjust_c;
+
+ if (!de_cfg->enable)
+ return;
+
+ sharp_lvl = (de_cfg->sharpen_level1 & 0x1FF) |
+ ((de_cfg->sharpen_level2 & 0x1FF) << 16);
+
+ sharp_ctl = ((de_cfg->limit & 0xF) << 9) |
+ ((de_cfg->prec_shift & 0x7) << 13) |
+ ((de_cfg->clip & 0x7) << 16);
+
+ shape_ctl = (de_cfg->thr_quiet & 0xFF) |
+ ((de_cfg->thr_dieout & 0x3FF) << 16);
+
+ de_thr = (de_cfg->thr_low & 0x3FF) |
+ ((de_cfg->thr_high & 0x3FF) << 16);
+
+ adjust_a = (de_cfg->adjust_a[0] & 0x3FF) |
+ ((de_cfg->adjust_a[1] & 0x3FF) << 10) |
+ ((de_cfg->adjust_a[2] & 0x3FF) << 20);
+
+ adjust_b = (de_cfg->adjust_b[0] & 0x3FF) |
+ ((de_cfg->adjust_b[1] & 0x3FF) << 10) |
+ ((de_cfg->adjust_b[2] & 0x3FF) << 20);
+
+ adjust_c = (de_cfg->adjust_c[0] & 0x3FF) |
+ ((de_cfg->adjust_c[1] & 0x3FF) << 10) |
+ ((de_cfg->adjust_c[2] & 0x3FF) << 20);
+
+ DPU_REG_WRITE(c, QSEED3_DE_SHARPEN + offset, sharp_lvl);
+ DPU_REG_WRITE(c, QSEED3_DE_SHARPEN_CTL + offset, sharp_ctl);
+ DPU_REG_WRITE(c, QSEED3_DE_SHAPE_CTL + offset, shape_ctl);
+ DPU_REG_WRITE(c, QSEED3_DE_THRESHOLD + offset, de_thr);
+ DPU_REG_WRITE(c, QSEED3_DE_ADJUST_DATA_0 + offset, adjust_a);
+ DPU_REG_WRITE(c, QSEED3_DE_ADJUST_DATA_1 + offset, adjust_b);
+ DPU_REG_WRITE(c, QSEED3_DE_ADJUST_DATA_2 + offset, adjust_c);
+
+}
+
+void dpu_hw_setup_scaler3(struct dpu_hw_blk_reg_map *c,
+ struct dpu_hw_scaler3_cfg *scaler3_cfg,
+ u32 scaler_offset, u32 scaler_version,
+ const struct dpu_format *format)
+{
+ u32 op_mode = 0;
+ u32 phase_init, preload, src_y_rgb, src_uv, dst;
+
+ if (!scaler3_cfg->enable)
+ goto end;
+
+ op_mode |= BIT(0);
+ op_mode |= (scaler3_cfg->y_rgb_filter_cfg & 0x3) << 16;
+
+ if (format && DPU_FORMAT_IS_YUV(format)) {
+ op_mode |= BIT(12);
+ op_mode |= (scaler3_cfg->uv_filter_cfg & 0x3) << 24;
+ }
+
+ op_mode |= (scaler3_cfg->blend_cfg & 1) << 31;
+ op_mode |= (scaler3_cfg->dir_en) ? BIT(4) : 0;
+
+ preload =
+ ((scaler3_cfg->preload_x[0] & 0x7F) << 0) |
+ ((scaler3_cfg->preload_y[0] & 0x7F) << 8) |
+ ((scaler3_cfg->preload_x[1] & 0x7F) << 16) |
+ ((scaler3_cfg->preload_y[1] & 0x7F) << 24);
+
+ src_y_rgb = (scaler3_cfg->src_width[0] & 0x1FFFF) |
+ ((scaler3_cfg->src_height[0] & 0x1FFFF) << 16);
+
+ src_uv = (scaler3_cfg->src_width[1] & 0x1FFFF) |
+ ((scaler3_cfg->src_height[1] & 0x1FFFF) << 16);
+
+ dst = (scaler3_cfg->dst_width & 0x1FFFF) |
+ ((scaler3_cfg->dst_height & 0x1FFFF) << 16);
+
+ if (scaler3_cfg->de.enable) {
+ _dpu_hw_setup_scaler3_de(c, &scaler3_cfg->de, scaler_offset);
+ op_mode |= BIT(8);
+ }
+
+ if (scaler3_cfg->lut_flag)
+ _dpu_hw_setup_scaler3_lut(c, scaler3_cfg,
+ scaler_offset);
+
+ if (scaler_version == 0x1002) {
+ phase_init =
+ ((scaler3_cfg->init_phase_x[0] & 0x3F) << 0) |
+ ((scaler3_cfg->init_phase_y[0] & 0x3F) << 8) |
+ ((scaler3_cfg->init_phase_x[1] & 0x3F) << 16) |
+ ((scaler3_cfg->init_phase_y[1] & 0x3F) << 24);
+ DPU_REG_WRITE(c, QSEED3_PHASE_INIT + scaler_offset, phase_init);
+ } else {
+ DPU_REG_WRITE(c, QSEED3_PHASE_INIT_Y_H + scaler_offset,
+ scaler3_cfg->init_phase_x[0] & 0x1FFFFF);
+ DPU_REG_WRITE(c, QSEED3_PHASE_INIT_Y_V + scaler_offset,
+ scaler3_cfg->init_phase_y[0] & 0x1FFFFF);
+ DPU_REG_WRITE(c, QSEED3_PHASE_INIT_UV_H + scaler_offset,
+ scaler3_cfg->init_phase_x[1] & 0x1FFFFF);
+ DPU_REG_WRITE(c, QSEED3_PHASE_INIT_UV_V + scaler_offset,
+ scaler3_cfg->init_phase_y[1] & 0x1FFFFF);
+ }
+
+ DPU_REG_WRITE(c, QSEED3_PHASE_STEP_Y_H + scaler_offset,
+ scaler3_cfg->phase_step_x[0] & 0xFFFFFF);
+
+ DPU_REG_WRITE(c, QSEED3_PHASE_STEP_Y_V + scaler_offset,
+ scaler3_cfg->phase_step_y[0] & 0xFFFFFF);
+
+ DPU_REG_WRITE(c, QSEED3_PHASE_STEP_UV_H + scaler_offset,
+ scaler3_cfg->phase_step_x[1] & 0xFFFFFF);
+
+ DPU_REG_WRITE(c, QSEED3_PHASE_STEP_UV_V + scaler_offset,
+ scaler3_cfg->phase_step_y[1] & 0xFFFFFF);
+
+ DPU_REG_WRITE(c, QSEED3_PRELOAD + scaler_offset, preload);
+
+ DPU_REG_WRITE(c, QSEED3_SRC_SIZE_Y_RGB_A + scaler_offset, src_y_rgb);
+
+ DPU_REG_WRITE(c, QSEED3_SRC_SIZE_UV + scaler_offset, src_uv);
+
+ DPU_REG_WRITE(c, QSEED3_DST_SIZE + scaler_offset, dst);
+
+end:
+ if (format && !DPU_FORMAT_IS_DX(format))
+ op_mode |= BIT(14);
+
+ if (format && format->alpha_enable) {
+ op_mode |= BIT(10);
+ if (scaler_version == 0x1002)
+ op_mode |= (scaler3_cfg->alpha_filter_cfg & 0x1) << 30;
+ else
+ op_mode |= (scaler3_cfg->alpha_filter_cfg & 0x3) << 29;
+ }
+
+ DPU_REG_WRITE(c, QSEED3_OP_MODE + scaler_offset, op_mode);
+}
+
+u32 dpu_hw_get_scaler3_ver(struct dpu_hw_blk_reg_map *c,
+ u32 scaler_offset)
+{
+ return DPU_REG_READ(c, QSEED3_HW_VERSION + scaler_offset);
+}
+
+void dpu_hw_csc_setup(struct dpu_hw_blk_reg_map *c,
+ u32 csc_reg_off,
+ struct dpu_csc_cfg *data, bool csc10)
+{
+ static const u32 matrix_shift = 7;
+ u32 clamp_shift = csc10 ? 16 : 8;
+ u32 val;
+
+ /* matrix coeff - convert S15.16 to S4.9 */
+ val = ((data->csc_mv[0] >> matrix_shift) & 0x1FFF) |
+ (((data->csc_mv[1] >> matrix_shift) & 0x1FFF) << 16);
+ DPU_REG_WRITE(c, csc_reg_off, val);
+ val = ((data->csc_mv[2] >> matrix_shift) & 0x1FFF) |
+ (((data->csc_mv[3] >> matrix_shift) & 0x1FFF) << 16);
+ DPU_REG_WRITE(c, csc_reg_off + 0x4, val);
+ val = ((data->csc_mv[4] >> matrix_shift) & 0x1FFF) |
+ (((data->csc_mv[5] >> matrix_shift) & 0x1FFF) << 16);
+ DPU_REG_WRITE(c, csc_reg_off + 0x8, val);
+ val = ((data->csc_mv[6] >> matrix_shift) & 0x1FFF) |
+ (((data->csc_mv[7] >> matrix_shift) & 0x1FFF) << 16);
+ DPU_REG_WRITE(c, csc_reg_off + 0xc, val);
+ val = (data->csc_mv[8] >> matrix_shift) & 0x1FFF;
+ DPU_REG_WRITE(c, csc_reg_off + 0x10, val);
+
+ /* Pre clamp */
+ val = (data->csc_pre_lv[0] << clamp_shift) | data->csc_pre_lv[1];
+ DPU_REG_WRITE(c, csc_reg_off + 0x14, val);
+ val = (data->csc_pre_lv[2] << clamp_shift) | data->csc_pre_lv[3];
+ DPU_REG_WRITE(c, csc_reg_off + 0x18, val);
+ val = (data->csc_pre_lv[4] << clamp_shift) | data->csc_pre_lv[5];
+ DPU_REG_WRITE(c, csc_reg_off + 0x1c, val);
+
+ /* Post clamp */
+ val = (data->csc_post_lv[0] << clamp_shift) | data->csc_post_lv[1];
+ DPU_REG_WRITE(c, csc_reg_off + 0x20, val);
+ val = (data->csc_post_lv[2] << clamp_shift) | data->csc_post_lv[3];
+ DPU_REG_WRITE(c, csc_reg_off + 0x24, val);
+ val = (data->csc_post_lv[4] << clamp_shift) | data->csc_post_lv[5];
+ DPU_REG_WRITE(c, csc_reg_off + 0x28, val);
+
+ /* Pre-Bias */
+ DPU_REG_WRITE(c, csc_reg_off + 0x2c, data->csc_pre_bv[0]);
+ DPU_REG_WRITE(c, csc_reg_off + 0x30, data->csc_pre_bv[1]);
+ DPU_REG_WRITE(c, csc_reg_off + 0x34, data->csc_pre_bv[2]);
+
+ /* Post-Bias */
+ DPU_REG_WRITE(c, csc_reg_off + 0x38, data->csc_post_bv[0]);
+ DPU_REG_WRITE(c, csc_reg_off + 0x3c, data->csc_post_bv[1]);
+ DPU_REG_WRITE(c, csc_reg_off + 0x40, data->csc_post_bv[2]);
+}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.h
new file mode 100644
index 000000000000..1240f505ca53
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.h
@@ -0,0 +1,348 @@
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _DPU_HW_UTIL_H
+#define _DPU_HW_UTIL_H
+
+#include <linux/io.h>
+#include <linux/slab.h>
+#include "dpu_hw_mdss.h"
+
+#define REG_MASK(n) ((BIT(n)) - 1)
+struct dpu_format_extended;
+
+/*
+ * This is the common struct maintained by each sub block
+ * for mapping the register offsets in this block to the
+ * absoulute IO address
+ * @base_off: mdp register mapped offset
+ * @blk_off: pipe offset relative to mdss offset
+ * @length length of register block offset
+ * @xin_id xin id
+ * @hwversion mdss hw version number
+ */
+struct dpu_hw_blk_reg_map {
+ void __iomem *base_off;
+ u32 blk_off;
+ u32 length;
+ u32 xin_id;
+ u32 hwversion;
+ u32 log_mask;
+};
+
+/**
+ * struct dpu_hw_scaler3_de_cfg : QSEEDv3 detail enhancer configuration
+ * @enable: detail enhancer enable/disable
+ * @sharpen_level1: sharpening strength for noise
+ * @sharpen_level2: sharpening strength for signal
+ * @ clip: clip shift
+ * @ limit: limit value
+ * @ thr_quiet: quiet threshold
+ * @ thr_dieout: dieout threshold
+ * @ thr_high: low threshold
+ * @ thr_high: high threshold
+ * @ prec_shift: precision shift
+ * @ adjust_a: A-coefficients for mapping curve
+ * @ adjust_b: B-coefficients for mapping curve
+ * @ adjust_c: C-coefficients for mapping curve
+ */
+struct dpu_hw_scaler3_de_cfg {
+ u32 enable;
+ int16_t sharpen_level1;
+ int16_t sharpen_level2;
+ uint16_t clip;
+ uint16_t limit;
+ uint16_t thr_quiet;
+ uint16_t thr_dieout;
+ uint16_t thr_low;
+ uint16_t thr_high;
+ uint16_t prec_shift;
+ int16_t adjust_a[DPU_MAX_DE_CURVES];
+ int16_t adjust_b[DPU_MAX_DE_CURVES];
+ int16_t adjust_c[DPU_MAX_DE_CURVES];
+};
+
+
+/**
+ * struct dpu_hw_scaler3_cfg : QSEEDv3 configuration
+ * @enable: scaler enable
+ * @dir_en: direction detection block enable
+ * @ init_phase_x: horizontal initial phase
+ * @ phase_step_x: horizontal phase step
+ * @ init_phase_y: vertical initial phase
+ * @ phase_step_y: vertical phase step
+ * @ preload_x: horizontal preload value
+ * @ preload_y: vertical preload value
+ * @ src_width: source width
+ * @ src_height: source height
+ * @ dst_width: destination width
+ * @ dst_height: destination height
+ * @ y_rgb_filter_cfg: y/rgb plane filter configuration
+ * @ uv_filter_cfg: uv plane filter configuration
+ * @ alpha_filter_cfg: alpha filter configuration
+ * @ blend_cfg: blend coefficients configuration
+ * @ lut_flag: scaler LUT update flags
+ * 0x1 swap LUT bank
+ * 0x2 update 2D filter LUT
+ * 0x4 update y circular filter LUT
+ * 0x8 update uv circular filter LUT
+ * 0x10 update y separable filter LUT
+ * 0x20 update uv separable filter LUT
+ * @ dir_lut_idx: 2D filter LUT index
+ * @ y_rgb_cir_lut_idx: y circular filter LUT index
+ * @ uv_cir_lut_idx: uv circular filter LUT index
+ * @ y_rgb_sep_lut_idx: y circular filter LUT index
+ * @ uv_sep_lut_idx: uv separable filter LUT index
+ * @ dir_lut: pointer to 2D LUT
+ * @ cir_lut: pointer to circular filter LUT
+ * @ sep_lut: pointer to separable filter LUT
+ * @ de: detail enhancer configuration
+ */
+struct dpu_hw_scaler3_cfg {
+ u32 enable;
+ u32 dir_en;
+ int32_t init_phase_x[DPU_MAX_PLANES];
+ int32_t phase_step_x[DPU_MAX_PLANES];
+ int32_t init_phase_y[DPU_MAX_PLANES];
+ int32_t phase_step_y[DPU_MAX_PLANES];
+
+ u32 preload_x[DPU_MAX_PLANES];
+ u32 preload_y[DPU_MAX_PLANES];
+ u32 src_width[DPU_MAX_PLANES];
+ u32 src_height[DPU_MAX_PLANES];
+
+ u32 dst_width;
+ u32 dst_height;
+
+ u32 y_rgb_filter_cfg;
+ u32 uv_filter_cfg;
+ u32 alpha_filter_cfg;
+ u32 blend_cfg;
+
+ u32 lut_flag;
+ u32 dir_lut_idx;
+
+ u32 y_rgb_cir_lut_idx;
+ u32 uv_cir_lut_idx;
+ u32 y_rgb_sep_lut_idx;
+ u32 uv_sep_lut_idx;
+ u32 *dir_lut;
+ size_t dir_len;
+ u32 *cir_lut;
+ size_t cir_len;
+ u32 *sep_lut;
+ size_t sep_len;
+
+ /*
+ * Detail enhancer settings
+ */
+ struct dpu_hw_scaler3_de_cfg de;
+};
+
+struct dpu_hw_scaler3_lut_cfg {
+ bool is_configured;
+ u32 *dir_lut;
+ size_t dir_len;
+ u32 *cir_lut;
+ size_t cir_len;
+ u32 *sep_lut;
+ size_t sep_len;
+};
+
+/**
+ * struct dpu_drm_pix_ext_v1 - version 1 of pixel ext structure
+ * @num_ext_pxls_lr: Number of total horizontal pixels
+ * @num_ext_pxls_tb: Number of total vertical lines
+ * @left_ftch: Number of extra pixels to overfetch from left
+ * @right_ftch: Number of extra pixels to overfetch from right
+ * @top_ftch: Number of extra lines to overfetch from top
+ * @btm_ftch: Number of extra lines to overfetch from bottom
+ * @left_rpt: Number of extra pixels to repeat from left
+ * @right_rpt: Number of extra pixels to repeat from right
+ * @top_rpt: Number of extra lines to repeat from top
+ * @btm_rpt: Number of extra lines to repeat from bottom
+ */
+struct dpu_drm_pix_ext_v1 {
+ /*
+ * Number of pixels ext in left, right, top and bottom direction
+ * for all color components.
+ */
+ int32_t num_ext_pxls_lr[DPU_MAX_PLANES];
+ int32_t num_ext_pxls_tb[DPU_MAX_PLANES];
+
+ /*
+ * Number of pixels needs to be overfetched in left, right, top
+ * and bottom directions from source image for scaling.
+ */
+ int32_t left_ftch[DPU_MAX_PLANES];
+ int32_t right_ftch[DPU_MAX_PLANES];
+ int32_t top_ftch[DPU_MAX_PLANES];
+ int32_t btm_ftch[DPU_MAX_PLANES];
+ /*
+ * Number of pixels needs to be repeated in left, right, top and
+ * bottom directions for scaling.
+ */
+ int32_t left_rpt[DPU_MAX_PLANES];
+ int32_t right_rpt[DPU_MAX_PLANES];
+ int32_t top_rpt[DPU_MAX_PLANES];
+ int32_t btm_rpt[DPU_MAX_PLANES];
+
+};
+
+/**
+ * struct dpu_drm_de_v1 - version 1 of detail enhancer structure
+ * @enable: Enables/disables detail enhancer
+ * @sharpen_level1: Sharpening strength for noise
+ * @sharpen_level2: Sharpening strength for context
+ * @clip: Clip coefficient
+ * @limit: Detail enhancer limit factor
+ * @thr_quiet: Quite zone threshold
+ * @thr_dieout: Die-out zone threshold
+ * @thr_low: Linear zone left threshold
+ * @thr_high: Linear zone right threshold
+ * @prec_shift: Detail enhancer precision
+ * @adjust_a: Mapping curves A coefficients
+ * @adjust_b: Mapping curves B coefficients
+ * @adjust_c: Mapping curves C coefficients
+ */
+struct dpu_drm_de_v1 {
+ uint32_t enable;
+ int16_t sharpen_level1;
+ int16_t sharpen_level2;
+ uint16_t clip;
+ uint16_t limit;
+ uint16_t thr_quiet;
+ uint16_t thr_dieout;
+ uint16_t thr_low;
+ uint16_t thr_high;
+ uint16_t prec_shift;
+ int16_t adjust_a[DPU_MAX_DE_CURVES];
+ int16_t adjust_b[DPU_MAX_DE_CURVES];
+ int16_t adjust_c[DPU_MAX_DE_CURVES];
+};
+
+/**
+ * struct dpu_drm_scaler_v2 - version 2 of struct dpu_drm_scaler
+ * @enable: Scaler enable
+ * @dir_en: Detail enhancer enable
+ * @pe: Pixel extension settings
+ * @horz_decimate: Horizontal decimation factor
+ * @vert_decimate: Vertical decimation factor
+ * @init_phase_x: Initial scaler phase values for x
+ * @phase_step_x: Phase step values for x
+ * @init_phase_y: Initial scaler phase values for y
+ * @phase_step_y: Phase step values for y
+ * @preload_x: Horizontal preload value
+ * @preload_y: Vertical preload value
+ * @src_width: Source width
+ * @src_height: Source height
+ * @dst_width: Destination width
+ * @dst_height: Destination height
+ * @y_rgb_filter_cfg: Y/RGB plane filter configuration
+ * @uv_filter_cfg: UV plane filter configuration
+ * @alpha_filter_cfg: Alpha filter configuration
+ * @blend_cfg: Selection of blend coefficients
+ * @lut_flag: LUT configuration flags
+ * @dir_lut_idx: 2d 4x4 LUT index
+ * @y_rgb_cir_lut_idx: Y/RGB circular LUT index
+ * @uv_cir_lut_idx: UV circular LUT index
+ * @y_rgb_sep_lut_idx: Y/RGB separable LUT index
+ * @uv_sep_lut_idx: UV separable LUT index
+ * @de: Detail enhancer settings
+ */
+struct dpu_drm_scaler_v2 {
+ /*
+ * General definitions
+ */
+ uint32_t enable;
+ uint32_t dir_en;
+
+ /*
+ * Pix ext settings
+ */
+ struct dpu_drm_pix_ext_v1 pe;
+
+ /*
+ * Decimation settings
+ */
+ uint32_t horz_decimate;
+ uint32_t vert_decimate;
+
+ /*
+ * Phase settings
+ */
+ int32_t init_phase_x[DPU_MAX_PLANES];
+ int32_t phase_step_x[DPU_MAX_PLANES];
+ int32_t init_phase_y[DPU_MAX_PLANES];
+ int32_t phase_step_y[DPU_MAX_PLANES];
+
+ uint32_t preload_x[DPU_MAX_PLANES];
+ uint32_t preload_y[DPU_MAX_PLANES];
+ uint32_t src_width[DPU_MAX_PLANES];
+ uint32_t src_height[DPU_MAX_PLANES];
+
+ uint32_t dst_width;
+ uint32_t dst_height;
+
+ uint32_t y_rgb_filter_cfg;
+ uint32_t uv_filter_cfg;
+ uint32_t alpha_filter_cfg;
+ uint32_t blend_cfg;
+
+ uint32_t lut_flag;
+ uint32_t dir_lut_idx;
+
+ /* for Y(RGB) and UV planes*/
+ uint32_t y_rgb_cir_lut_idx;
+ uint32_t uv_cir_lut_idx;
+ uint32_t y_rgb_sep_lut_idx;
+ uint32_t uv_sep_lut_idx;
+
+ /*
+ * Detail enhancer settings
+ */
+ struct dpu_drm_de_v1 de;
+};
+
+
+u32 *dpu_hw_util_get_log_mask_ptr(void);
+
+void dpu_reg_write(struct dpu_hw_blk_reg_map *c,
+ u32 reg_off,
+ u32 val,
+ const char *name);
+int dpu_reg_read(struct dpu_hw_blk_reg_map *c, u32 reg_off);
+
+#define DPU_REG_WRITE(c, off, val) dpu_reg_write(c, off, val, #off)
+#define DPU_REG_READ(c, off) dpu_reg_read(c, off)
+
+#define MISR_FRAME_COUNT_MASK 0xFF
+#define MISR_CTRL_ENABLE BIT(8)
+#define MISR_CTRL_STATUS BIT(9)
+#define MISR_CTRL_STATUS_CLEAR BIT(10)
+#define INTF_MISR_CTRL_FREE_RUN_MASK BIT(31)
+
+void *dpu_hw_util_get_dir(void);
+
+void dpu_hw_setup_scaler3(struct dpu_hw_blk_reg_map *c,
+ struct dpu_hw_scaler3_cfg *scaler3_cfg,
+ u32 scaler_offset, u32 scaler_version,
+ const struct dpu_format *format);
+
+u32 dpu_hw_get_scaler3_ver(struct dpu_hw_blk_reg_map *c,
+ u32 scaler_offset);
+
+void dpu_hw_csc_setup(struct dpu_hw_blk_reg_map *c,
+ u32 csc_reg_off,
+ struct dpu_csc_cfg *data, bool csc10);
+
+#endif /* _DPU_HW_UTIL_H */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_vbif.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_vbif.c
new file mode 100644
index 000000000000..d43905525f92
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_vbif.c
@@ -0,0 +1,275 @@
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "dpu_hwio.h"
+#include "dpu_hw_catalog.h"
+#include "dpu_hw_vbif.h"
+#include "dpu_dbg.h"
+
+#define VBIF_VERSION 0x0000
+#define VBIF_CLK_FORCE_CTRL0 0x0008
+#define VBIF_CLK_FORCE_CTRL1 0x000C
+#define VBIF_QOS_REMAP_00 0x0020
+#define VBIF_QOS_REMAP_01 0x0024
+#define VBIF_QOS_REMAP_10 0x0028
+#define VBIF_QOS_REMAP_11 0x002C
+#define VBIF_WRITE_GATHER_EN 0x00AC
+#define VBIF_IN_RD_LIM_CONF0 0x00B0
+#define VBIF_IN_RD_LIM_CONF1 0x00B4
+#define VBIF_IN_RD_LIM_CONF2 0x00B8
+#define VBIF_IN_WR_LIM_CONF0 0x00C0
+#define VBIF_IN_WR_LIM_CONF1 0x00C4
+#define VBIF_IN_WR_LIM_CONF2 0x00C8
+#define VBIF_OUT_RD_LIM_CONF0 0x00D0
+#define VBIF_OUT_WR_LIM_CONF0 0x00D4
+#define VBIF_OUT_AXI_AMEMTYPE_CONF0 0x0160
+#define VBIF_OUT_AXI_AMEMTYPE_CONF1 0x0164
+#define VBIF_XIN_PND_ERR 0x0190
+#define VBIF_XIN_SRC_ERR 0x0194
+#define VBIF_XIN_CLR_ERR 0x019C
+#define VBIF_XIN_HALT_CTRL0 0x0200
+#define VBIF_XIN_HALT_CTRL1 0x0204
+#define VBIF_XINL_QOS_RP_REMAP_000 0x0550
+#define VBIF_XINL_QOS_LVL_REMAP_000 0x0590
+
+static void dpu_hw_clear_errors(struct dpu_hw_vbif *vbif,
+ u32 *pnd_errors, u32 *src_errors)
+{
+ struct dpu_hw_blk_reg_map *c;
+ u32 pnd, src;
+
+ if (!vbif)
+ return;
+ c = &vbif->hw;
+ pnd = DPU_REG_READ(c, VBIF_XIN_PND_ERR);
+ src = DPU_REG_READ(c, VBIF_XIN_SRC_ERR);
+
+ if (pnd_errors)
+ *pnd_errors = pnd;
+ if (src_errors)
+ *src_errors = src;
+
+ DPU_REG_WRITE(c, VBIF_XIN_CLR_ERR, pnd | src);
+}
+
+static void dpu_hw_set_mem_type(struct dpu_hw_vbif *vbif,
+ u32 xin_id, u32 value)
+{
+ struct dpu_hw_blk_reg_map *c;
+ u32 reg_off;
+ u32 bit_off;
+ u32 reg_val;
+
+ /*
+ * Assume 4 bits per bit field, 8 fields per 32-bit register so
+ * 16 bit fields maximum across two registers
+ */
+ if (!vbif || xin_id >= MAX_XIN_COUNT || xin_id >= 16)
+ return;
+
+ c = &vbif->hw;
+
+ if (xin_id >= 8) {
+ xin_id -= 8;
+ reg_off = VBIF_OUT_AXI_AMEMTYPE_CONF1;
+ } else {
+ reg_off = VBIF_OUT_AXI_AMEMTYPE_CONF0;
+ }
+ bit_off = (xin_id & 0x7) * 4;
+ reg_val = DPU_REG_READ(c, reg_off);
+ reg_val &= ~(0x7 << bit_off);
+ reg_val |= (value & 0x7) << bit_off;
+ DPU_REG_WRITE(c, reg_off, reg_val);
+}
+
+static void dpu_hw_set_limit_conf(struct dpu_hw_vbif *vbif,
+ u32 xin_id, bool rd, u32 limit)
+{
+ struct dpu_hw_blk_reg_map *c = &vbif->hw;
+ u32 reg_val;
+ u32 reg_off;
+ u32 bit_off;
+
+ if (rd)
+ reg_off = VBIF_IN_RD_LIM_CONF0;
+ else
+ reg_off = VBIF_IN_WR_LIM_CONF0;
+
+ reg_off += (xin_id / 4) * 4;
+ bit_off = (xin_id % 4) * 8;
+ reg_val = DPU_REG_READ(c, reg_off);
+ reg_val &= ~(0xFF << bit_off);
+ reg_val |= (limit) << bit_off;
+ DPU_REG_WRITE(c, reg_off, reg_val);
+}
+
+static u32 dpu_hw_get_limit_conf(struct dpu_hw_vbif *vbif,
+ u32 xin_id, bool rd)
+{
+ struct dpu_hw_blk_reg_map *c = &vbif->hw;
+ u32 reg_val;
+ u32 reg_off;
+ u32 bit_off;
+ u32 limit;
+
+ if (rd)
+ reg_off = VBIF_IN_RD_LIM_CONF0;
+ else
+ reg_off = VBIF_IN_WR_LIM_CONF0;
+
+ reg_off += (xin_id / 4) * 4;
+ bit_off = (xin_id % 4) * 8;
+ reg_val = DPU_REG_READ(c, reg_off);
+ limit = (reg_val >> bit_off) & 0xFF;
+
+ return limit;
+}
+
+static void dpu_hw_set_halt_ctrl(struct dpu_hw_vbif *vbif,
+ u32 xin_id, bool enable)
+{
+ struct dpu_hw_blk_reg_map *c = &vbif->hw;
+ u32 reg_val;
+
+ reg_val = DPU_REG_READ(c, VBIF_XIN_HALT_CTRL0);
+
+ if (enable)
+ reg_val |= BIT(xin_id);
+ else
+ reg_val &= ~BIT(xin_id);
+
+ DPU_REG_WRITE(c, VBIF_XIN_HALT_CTRL0, reg_val);
+}
+
+static bool dpu_hw_get_halt_ctrl(struct dpu_hw_vbif *vbif,
+ u32 xin_id)
+{
+ struct dpu_hw_blk_reg_map *c = &vbif->hw;
+ u32 reg_val;
+
+ reg_val = DPU_REG_READ(c, VBIF_XIN_HALT_CTRL1);
+
+ return (reg_val & BIT(xin_id)) ? true : false;
+}
+
+static void dpu_hw_set_qos_remap(struct dpu_hw_vbif *vbif,
+ u32 xin_id, u32 level, u32 remap_level)
+{
+ struct dpu_hw_blk_reg_map *c;
+ u32 reg_val, reg_val_lvl, mask, reg_high, reg_shift;
+
+ if (!vbif)
+ return;
+
+ c = &vbif->hw;
+
+ reg_high = ((xin_id & 0x8) >> 3) * 4 + (level * 8);
+ reg_shift = (xin_id & 0x7) * 4;
+
+ reg_val = DPU_REG_READ(c, VBIF_XINL_QOS_RP_REMAP_000 + reg_high);
+ reg_val_lvl = DPU_REG_READ(c, VBIF_XINL_QOS_LVL_REMAP_000 + reg_high);
+
+ mask = 0x7 << reg_shift;
+
+ reg_val &= ~mask;
+ reg_val |= (remap_level << reg_shift) & mask;
+
+ reg_val_lvl &= ~mask;
+ reg_val_lvl |= (remap_level << reg_shift) & mask;
+
+ DPU_REG_WRITE(c, VBIF_XINL_QOS_RP_REMAP_000 + reg_high, reg_val);
+ DPU_REG_WRITE(c, VBIF_XINL_QOS_LVL_REMAP_000 + reg_high, reg_val_lvl);
+}
+
+static void dpu_hw_set_write_gather_en(struct dpu_hw_vbif *vbif, u32 xin_id)
+{
+ struct dpu_hw_blk_reg_map *c;
+ u32 reg_val;
+
+ if (!vbif || xin_id >= MAX_XIN_COUNT)
+ return;
+
+ c = &vbif->hw;
+
+ reg_val = DPU_REG_READ(c, VBIF_WRITE_GATHER_EN);
+ reg_val |= BIT(xin_id);
+ DPU_REG_WRITE(c, VBIF_WRITE_GATHER_EN, reg_val);
+}
+
+static void _setup_vbif_ops(struct dpu_hw_vbif_ops *ops,
+ unsigned long cap)
+{
+ ops->set_limit_conf = dpu_hw_set_limit_conf;
+ ops->get_limit_conf = dpu_hw_get_limit_conf;
+ ops->set_halt_ctrl = dpu_hw_set_halt_ctrl;
+ ops->get_halt_ctrl = dpu_hw_get_halt_ctrl;
+ if (test_bit(DPU_VBIF_QOS_REMAP, &cap))
+ ops->set_qos_remap = dpu_hw_set_qos_remap;
+ ops->set_mem_type = dpu_hw_set_mem_type;
+ ops->clear_errors = dpu_hw_clear_errors;
+ ops->set_write_gather_en = dpu_hw_set_write_gather_en;
+}
+
+static const struct dpu_vbif_cfg *_top_offset(enum dpu_vbif vbif,
+ const struct dpu_mdss_cfg *m,
+ void __iomem *addr,
+ struct dpu_hw_blk_reg_map *b)
+{
+ int i;
+
+ for (i = 0; i < m->vbif_count; i++) {
+ if (vbif == m->vbif[i].id) {
+ b->base_off = addr;
+ b->blk_off = m->vbif[i].base;
+ b->length = m->vbif[i].len;
+ b->hwversion = m->hwversion;
+ b->log_mask = DPU_DBG_MASK_VBIF;
+ return &m->vbif[i];
+ }
+ }
+
+ return ERR_PTR(-EINVAL);
+}
+
+struct dpu_hw_vbif *dpu_hw_vbif_init(enum dpu_vbif idx,
+ void __iomem *addr,
+ const struct dpu_mdss_cfg *m)
+{
+ struct dpu_hw_vbif *c;
+ const struct dpu_vbif_cfg *cfg;
+
+ c = kzalloc(sizeof(*c), GFP_KERNEL);
+ if (!c)
+ return ERR_PTR(-ENOMEM);
+
+ cfg = _top_offset(idx, m, addr, &c->hw);
+ if (IS_ERR_OR_NULL(cfg)) {
+ kfree(c);
+ return ERR_PTR(-EINVAL);
+ }
+
+ /*
+ * Assign ops
+ */
+ c->idx = idx;
+ c->cap = cfg;
+ _setup_vbif_ops(&c->ops, c->cap->features);
+
+ /* no need to register sub-range in dpu dbg, dump entire vbif io base */
+
+ return c;
+}
+
+void dpu_hw_vbif_destroy(struct dpu_hw_vbif *vbif)
+{
+ kfree(vbif);
+}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_vbif.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_vbif.h
new file mode 100644
index 000000000000..471ff673c045
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_vbif.h
@@ -0,0 +1,128 @@
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _DPU_HW_VBIF_H
+#define _DPU_HW_VBIF_H
+
+#include "dpu_hw_catalog.h"
+#include "dpu_hw_mdss.h"
+#include "dpu_hw_util.h"
+
+struct dpu_hw_vbif;
+
+/**
+ * struct dpu_hw_vbif_ops : Interface to the VBIF hardware driver functions
+ * Assumption is these functions will be called after clocks are enabled
+ */
+struct dpu_hw_vbif_ops {
+ /**
+ * set_limit_conf - set transaction limit config
+ * @vbif: vbif context driver
+ * @xin_id: client interface identifier
+ * @rd: true for read limit; false for write limit
+ * @limit: outstanding transaction limit
+ */
+ void (*set_limit_conf)(struct dpu_hw_vbif *vbif,
+ u32 xin_id, bool rd, u32 limit);
+
+ /**
+ * get_limit_conf - get transaction limit config
+ * @vbif: vbif context driver
+ * @xin_id: client interface identifier
+ * @rd: true for read limit; false for write limit
+ * @return: outstanding transaction limit
+ */
+ u32 (*get_limit_conf)(struct dpu_hw_vbif *vbif,
+ u32 xin_id, bool rd);
+
+ /**
+ * set_halt_ctrl - set halt control
+ * @vbif: vbif context driver
+ * @xin_id: client interface identifier
+ * @enable: halt control enable
+ */
+ void (*set_halt_ctrl)(struct dpu_hw_vbif *vbif,
+ u32 xin_id, bool enable);
+
+ /**
+ * get_halt_ctrl - get halt control
+ * @vbif: vbif context driver
+ * @xin_id: client interface identifier
+ * @return: halt control enable
+ */
+ bool (*get_halt_ctrl)(struct dpu_hw_vbif *vbif,
+ u32 xin_id);
+
+ /**
+ * set_qos_remap - set QoS priority remap
+ * @vbif: vbif context driver
+ * @xin_id: client interface identifier
+ * @level: priority level
+ * @remap_level: remapped level
+ */
+ void (*set_qos_remap)(struct dpu_hw_vbif *vbif,
+ u32 xin_id, u32 level, u32 remap_level);
+
+ /**
+ * set_mem_type - set memory type
+ * @vbif: vbif context driver
+ * @xin_id: client interface identifier
+ * @value: memory type value
+ */
+ void (*set_mem_type)(struct dpu_hw_vbif *vbif,
+ u32 xin_id, u32 value);
+
+ /**
+ * clear_errors - clear any vbif errors
+ * This function clears any detected pending/source errors
+ * on the VBIF interface, and optionally returns the detected
+ * error mask(s).
+ * @vbif: vbif context driver
+ * @pnd_errors: pointer to pending error reporting variable
+ * @src_errors: pointer to source error reporting variable
+ */
+ void (*clear_errors)(struct dpu_hw_vbif *vbif,
+ u32 *pnd_errors, u32 *src_errors);
+
+ /**
+ * set_write_gather_en - set write_gather enable
+ * @vbif: vbif context driver
+ * @xin_id: client interface identifier
+ */
+ void (*set_write_gather_en)(struct dpu_hw_vbif *vbif, u32 xin_id);
+};
+
+struct dpu_hw_vbif {
+ /* base */
+ struct dpu_hw_blk_reg_map hw;
+
+ /* vbif */
+ enum dpu_vbif idx;
+ const struct dpu_vbif_cfg *cap;
+
+ /* ops */
+ struct dpu_hw_vbif_ops ops;
+};
+
+/**
+ * dpu_hw_vbif_init - initializes the vbif driver for the passed interface idx
+ * @idx: Interface index for which driver object is required
+ * @addr: Mapped register io address of MDSS
+ * @m: Pointer to mdss catalog data
+ */
+struct dpu_hw_vbif *dpu_hw_vbif_init(enum dpu_vbif idx,
+ void __iomem *addr,
+ const struct dpu_mdss_cfg *m);
+
+void dpu_hw_vbif_destroy(struct dpu_hw_vbif *vbif);
+
+#endif /*_DPU_HW_VBIF_H */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hwio.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hwio.h
new file mode 100644
index 000000000000..5b2bc9b65b15
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hwio.h
@@ -0,0 +1,56 @@
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _DPU_HWIO_H
+#define _DPU_HWIO_H
+
+#include "dpu_hw_util.h"
+
+/**
+ * MDP TOP block Register and bit fields and defines
+ */
+#define DISP_INTF_SEL 0x004
+#define INTR_EN 0x010
+#define INTR_STATUS 0x014
+#define INTR_CLEAR 0x018
+#define INTR2_EN 0x008
+#define INTR2_STATUS 0x00c
+#define INTR2_CLEAR 0x02c
+#define HIST_INTR_EN 0x01c
+#define HIST_INTR_STATUS 0x020
+#define HIST_INTR_CLEAR 0x024
+#define INTF_INTR_EN 0x1C0
+#define INTF_INTR_STATUS 0x1C4
+#define INTF_INTR_CLEAR 0x1C8
+#define SPLIT_DISPLAY_EN 0x2F4
+#define SPLIT_DISPLAY_UPPER_PIPE_CTRL 0x2F8
+#define DSPP_IGC_COLOR0_RAM_LUTN 0x300
+#define DSPP_IGC_COLOR1_RAM_LUTN 0x304
+#define DSPP_IGC_COLOR2_RAM_LUTN 0x308
+#define HW_EVENTS_CTL 0x37C
+#define CLK_CTRL3 0x3A8
+#define CLK_STATUS3 0x3AC
+#define CLK_CTRL4 0x3B0
+#define CLK_STATUS4 0x3B4
+#define CLK_CTRL5 0x3B8
+#define CLK_STATUS5 0x3BC
+#define CLK_CTRL7 0x3D0
+#define CLK_STATUS7 0x3D4
+#define SPLIT_DISPLAY_LOWER_PIPE_CTRL 0x3F0
+#define SPLIT_DISPLAY_TE_LINE_INTERVAL 0x3F4
+#define INTF_SW_RESET_MASK 0x3FC
+#define HDMI_DP_CORE_SELECT 0x408
+#define MDP_OUT_CTL_0 0x410
+#define MDP_VSYNC_SEL 0x414
+#define DCE_SEL 0x450
+
+#endif /*_DPU_HWIO_H */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_io_util.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_io_util.c
new file mode 100644
index 000000000000..790d39f816dc
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_io_util.c
@@ -0,0 +1,203 @@
+/* Copyright (c) 2012-2015, 2017-2018, The Linux Foundation.
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk.h>
+#include <linux/clk/clk-conf.h>
+#include <linux/err.h>
+#include <linux/delay.h>
+
+#include "dpu_io_util.h"
+
+void msm_dss_put_clk(struct dss_clk *clk_arry, int num_clk)
+{
+ int i;
+
+ for (i = num_clk - 1; i >= 0; i--) {
+ if (clk_arry[i].clk)
+ clk_put(clk_arry[i].clk);
+ clk_arry[i].clk = NULL;
+ }
+}
+
+int msm_dss_get_clk(struct device *dev, struct dss_clk *clk_arry, int num_clk)
+{
+ int i, rc = 0;
+
+ for (i = 0; i < num_clk; i++) {
+ clk_arry[i].clk = clk_get(dev, clk_arry[i].clk_name);
+ rc = PTR_ERR_OR_ZERO(clk_arry[i].clk);
+ if (rc) {
+ DEV_ERR("%pS->%s: '%s' get failed. rc=%d\n",
+ __builtin_return_address(0), __func__,
+ clk_arry[i].clk_name, rc);
+ goto error;
+ }
+ }
+
+ return rc;
+
+error:
+ for (i--; i >= 0; i--) {
+ if (clk_arry[i].clk)
+ clk_put(clk_arry[i].clk);
+ clk_arry[i].clk = NULL;
+ }
+
+ return rc;
+}
+
+int msm_dss_clk_set_rate(struct dss_clk *clk_arry, int num_clk)
+{
+ int i, rc = 0;
+
+ for (i = 0; i < num_clk; i++) {
+ if (clk_arry[i].clk) {
+ if (clk_arry[i].type != DSS_CLK_AHB) {
+ DEV_DBG("%pS->%s: '%s' rate %ld\n",
+ __builtin_return_address(0), __func__,
+ clk_arry[i].clk_name,
+ clk_arry[i].rate);
+ rc = clk_set_rate(clk_arry[i].clk,
+ clk_arry[i].rate);
+ if (rc) {
+ DEV_ERR("%pS->%s: %s failed. rc=%d\n",
+ __builtin_return_address(0),
+ __func__,
+ clk_arry[i].clk_name, rc);
+ break;
+ }
+ }
+ } else {
+ DEV_ERR("%pS->%s: '%s' is not available\n",
+ __builtin_return_address(0), __func__,
+ clk_arry[i].clk_name);
+ rc = -EPERM;
+ break;
+ }
+ }
+
+ return rc;
+}
+
+int msm_dss_enable_clk(struct dss_clk *clk_arry, int num_clk, int enable)
+{
+ int i, rc = 0;
+
+ if (enable) {
+ for (i = 0; i < num_clk; i++) {
+ DEV_DBG("%pS->%s: enable '%s'\n",
+ __builtin_return_address(0), __func__,
+ clk_arry[i].clk_name);
+ if (clk_arry[i].clk) {
+ rc = clk_prepare_enable(clk_arry[i].clk);
+ if (rc)
+ DEV_ERR("%pS->%s: %s en fail. rc=%d\n",
+ __builtin_return_address(0),
+ __func__,
+ clk_arry[i].clk_name, rc);
+ } else {
+ DEV_ERR("%pS->%s: '%s' is not available\n",
+ __builtin_return_address(0), __func__,
+ clk_arry[i].clk_name);
+ rc = -EPERM;
+ }
+
+ if (rc) {
+ msm_dss_enable_clk(&clk_arry[i],
+ i, false);
+ break;
+ }
+ }
+ } else {
+ for (i = num_clk - 1; i >= 0; i--) {
+ DEV_DBG("%pS->%s: disable '%s'\n",
+ __builtin_return_address(0), __func__,
+ clk_arry[i].clk_name);
+
+ if (clk_arry[i].clk)
+ clk_disable_unprepare(clk_arry[i].clk);
+ else
+ DEV_ERR("%pS->%s: '%s' is not available\n",
+ __builtin_return_address(0), __func__,
+ clk_arry[i].clk_name);
+ }
+ }
+
+ return rc;
+}
+
+int msm_dss_parse_clock(struct platform_device *pdev,
+ struct dss_module_power *mp)
+{
+ u32 i, rc = 0;
+ const char *clock_name;
+ int num_clk = 0;
+
+ if (!pdev || !mp)
+ return -EINVAL;
+
+ mp->num_clk = 0;
+ num_clk = of_property_count_strings(pdev->dev.of_node, "clock-names");
+ if (num_clk <= 0) {
+ pr_debug("clocks are not defined\n");
+ return 0;
+ }
+
+ mp->clk_config = devm_kzalloc(&pdev->dev,
+ sizeof(struct dss_clk) * num_clk,
+ GFP_KERNEL);
+ if (!mp->clk_config)
+ return -ENOMEM;
+
+ for (i = 0; i < num_clk; i++) {
+ rc = of_property_read_string_index(pdev->dev.of_node,
+ "clock-names", i,
+ &clock_name);
+ if (rc) {
+ dev_err(&pdev->dev, "Failed to get clock name for %d\n",
+ i);
+ break;
+ }
+ strlcpy(mp->clk_config[i].clk_name, clock_name,
+ sizeof(mp->clk_config[i].clk_name));
+
+ mp->clk_config[i].type = DSS_CLK_AHB;
+ }
+
+ rc = msm_dss_get_clk(&pdev->dev, mp->clk_config, num_clk);
+ if (rc) {
+ dev_err(&pdev->dev, "Failed to get clock refs %d\n", rc);
+ goto err;
+ }
+
+ rc = of_clk_set_defaults(pdev->dev.of_node, false);
+ if (rc) {
+ dev_err(&pdev->dev, "Failed to set clock defaults %d\n", rc);
+ goto err;
+ }
+
+ for (i = 0; i < num_clk; i++) {
+ u32 rate = clk_get_rate(mp->clk_config[i].clk);
+ if (!rate)
+ continue;
+ mp->clk_config[i].rate = rate;
+ mp->clk_config[i].type = DSS_CLK_PCLK;
+ }
+
+ mp->num_clk = num_clk;
+ return 0;
+
+err:
+ msm_dss_put_clk(mp->clk_config, num_clk);
+ return rc;
+}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_io_util.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_io_util.h
new file mode 100644
index 000000000000..bc07381d7429
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_io_util.h
@@ -0,0 +1,57 @@
+/* Copyright (c) 2012, 2017-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __DPU_IO_UTIL_H__
+#define __DPU_IO_UTIL_H__
+
+#include <linux/gpio.h>
+#include <linux/platform_device.h>
+#include <linux/types.h>
+
+#define DEV_DBG(fmt, args...) pr_debug(fmt, ##args)
+#define DEV_INFO(fmt, args...) pr_info(fmt, ##args)
+#define DEV_WARN(fmt, args...) pr_warn(fmt, ##args)
+#define DEV_ERR(fmt, args...) pr_err(fmt, ##args)
+
+struct dss_gpio {
+ unsigned int gpio;
+ unsigned int value;
+ char gpio_name[32];
+};
+
+enum dss_clk_type {
+ DSS_CLK_AHB, /* no set rate. rate controlled through rpm */
+ DSS_CLK_PCLK,
+};
+
+struct dss_clk {
+ struct clk *clk; /* clk handle */
+ char clk_name[32];
+ enum dss_clk_type type;
+ unsigned long rate;
+ unsigned long max_rate;
+};
+
+struct dss_module_power {
+ unsigned int num_gpio;
+ struct dss_gpio *gpio_config;
+ unsigned int num_clk;
+ struct dss_clk *clk_config;
+};
+
+int msm_dss_get_clk(struct device *dev, struct dss_clk *clk_arry, int num_clk);
+void msm_dss_put_clk(struct dss_clk *clk_arry, int num_clk);
+int msm_dss_clk_set_rate(struct dss_clk *clk_arry, int num_clk);
+int msm_dss_enable_clk(struct dss_clk *clk_arry, int num_clk, int enable);
+int msm_dss_parse_clock(struct platform_device *pdev,
+ struct dss_module_power *mp);
+#endif /* __DPU_IO_UTIL_H__ */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_irq.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_irq.c
new file mode 100644
index 000000000000..d5e6ce0140cf
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_irq.c
@@ -0,0 +1,66 @@
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__
+
+#include <linux/irqdomain.h>
+#include <linux/irq.h>
+#include <linux/kthread.h>
+
+#include "dpu_irq.h"
+#include "dpu_core_irq.h"
+
+irqreturn_t dpu_irq(struct msm_kms *kms)
+{
+ struct dpu_kms *dpu_kms = to_dpu_kms(kms);
+
+ return dpu_core_irq(dpu_kms);
+}
+
+void dpu_irq_preinstall(struct msm_kms *kms)
+{
+ struct dpu_kms *dpu_kms = to_dpu_kms(kms);
+
+ if (!dpu_kms->dev || !dpu_kms->dev->dev) {
+ pr_err("invalid device handles\n");
+ return;
+ }
+
+ dpu_core_irq_preinstall(dpu_kms);
+}
+
+int dpu_irq_postinstall(struct msm_kms *kms)
+{
+ struct dpu_kms *dpu_kms = to_dpu_kms(kms);
+ int rc;
+
+ if (!kms) {
+ DPU_ERROR("invalid parameters\n");
+ return -EINVAL;
+ }
+
+ rc = dpu_core_irq_postinstall(dpu_kms);
+
+ return rc;
+}
+
+void dpu_irq_uninstall(struct msm_kms *kms)
+{
+ struct dpu_kms *dpu_kms = to_dpu_kms(kms);
+
+ if (!kms) {
+ DPU_ERROR("invalid parameters\n");
+ return;
+ }
+
+ dpu_core_irq_uninstall(dpu_kms);
+}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_irq.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_irq.h
new file mode 100644
index 000000000000..3e147f7176e2
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_irq.h
@@ -0,0 +1,59 @@
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __DPU_IRQ_H__
+#define __DPU_IRQ_H__
+
+#include <linux/kernel.h>
+#include <linux/irqdomain.h>
+
+#include "msm_kms.h"
+
+/**
+ * dpu_irq_controller - define MDSS level interrupt controller context
+ * @enabled_mask: enable status of MDSS level interrupt
+ * @domain: interrupt domain of this controller
+ */
+struct dpu_irq_controller {
+ unsigned long enabled_mask;
+ struct irq_domain *domain;
+};
+
+/**
+ * dpu_irq_preinstall - perform pre-installation of MDSS IRQ handler
+ * @kms: pointer to kms context
+ * @return: none
+ */
+void dpu_irq_preinstall(struct msm_kms *kms);
+
+/**
+ * dpu_irq_postinstall - perform post-installation of MDSS IRQ handler
+ * @kms: pointer to kms context
+ * @return: 0 if success; error code otherwise
+ */
+int dpu_irq_postinstall(struct msm_kms *kms);
+
+/**
+ * dpu_irq_uninstall - uninstall MDSS IRQ handler
+ * @drm_dev: pointer to kms context
+ * @return: none
+ */
+void dpu_irq_uninstall(struct msm_kms *kms);
+
+/**
+ * dpu_irq - MDSS level IRQ handler
+ * @kms: pointer to kms context
+ * @return: interrupt handling status
+ */
+irqreturn_t dpu_irq(struct msm_kms *kms);
+
+#endif /* __DPU_IRQ_H__ */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c
new file mode 100644
index 000000000000..7dd6bd2d6d37
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c
@@ -0,0 +1,1345 @@
+/*
+ * Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__
+
+#include <drm/drm_crtc.h>
+#include <linux/debugfs.h>
+#include <linux/of_irq.h>
+#include <linux/dma-buf.h>
+
+#include "msm_drv.h"
+#include "msm_mmu.h"
+#include "msm_gem.h"
+
+#include "dpu_kms.h"
+#include "dpu_core_irq.h"
+#include "dpu_formats.h"
+#include "dpu_hw_vbif.h"
+#include "dpu_vbif.h"
+#include "dpu_encoder.h"
+#include "dpu_plane.h"
+#include "dpu_crtc.h"
+
+#define CREATE_TRACE_POINTS
+#include "dpu_trace.h"
+
+static const char * const iommu_ports[] = {
+ "mdp_0",
+};
+
+/*
+ * To enable overall DRM driver logging
+ * # echo 0x2 > /sys/module/drm/parameters/debug
+ *
+ * To enable DRM driver h/w logging
+ * # echo <mask> > /sys/kernel/debug/dri/0/debug/hw_log_mask
+ *
+ * See dpu_hw_mdss.h for h/w logging mask definitions (search for DPU_DBG_MASK_)
+ */
+#define DPU_DEBUGFS_DIR "msm_dpu"
+#define DPU_DEBUGFS_HWMASKNAME "hw_log_mask"
+
+static int dpu_kms_hw_init(struct msm_kms *kms);
+static int _dpu_kms_mmu_destroy(struct dpu_kms *dpu_kms);
+
+static unsigned long dpu_iomap_size(struct platform_device *pdev,
+ const char *name)
+{
+ struct resource *res;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, name);
+ if (!res) {
+ DRM_ERROR("failed to get memory resource: %s\n", name);
+ return 0;
+ }
+
+ return resource_size(res);
+}
+
+#ifdef CONFIG_DEBUG_FS
+static int _dpu_danger_signal_status(struct seq_file *s,
+ bool danger_status)
+{
+ struct dpu_kms *kms = (struct dpu_kms *)s->private;
+ struct msm_drm_private *priv;
+ struct dpu_danger_safe_status status;
+ int i;
+
+ if (!kms || !kms->dev || !kms->dev->dev_private || !kms->hw_mdp) {
+ DPU_ERROR("invalid arg(s)\n");
+ return 0;
+ }
+
+ priv = kms->dev->dev_private;
+ memset(&status, 0, sizeof(struct dpu_danger_safe_status));
+
+ pm_runtime_get_sync(&kms->pdev->dev);
+ if (danger_status) {
+ seq_puts(s, "\nDanger signal status:\n");
+ if (kms->hw_mdp->ops.get_danger_status)
+ kms->hw_mdp->ops.get_danger_status(kms->hw_mdp,
+ &status);
+ } else {
+ seq_puts(s, "\nSafe signal status:\n");
+ if (kms->hw_mdp->ops.get_danger_status)
+ kms->hw_mdp->ops.get_danger_status(kms->hw_mdp,
+ &status);
+ }
+ pm_runtime_put_sync(&kms->pdev->dev);
+
+ seq_printf(s, "MDP : 0x%x\n", status.mdp);
+
+ for (i = SSPP_VIG0; i < SSPP_MAX; i++)
+ seq_printf(s, "SSPP%d : 0x%x \t", i - SSPP_VIG0,
+ status.sspp[i]);
+ seq_puts(s, "\n");
+
+ return 0;
+}
+
+#define DEFINE_DPU_DEBUGFS_SEQ_FOPS(__prefix) \
+static int __prefix ## _open(struct inode *inode, struct file *file) \
+{ \
+ return single_open(file, __prefix ## _show, inode->i_private); \
+} \
+static const struct file_operations __prefix ## _fops = { \
+ .owner = THIS_MODULE, \
+ .open = __prefix ## _open, \
+ .release = single_release, \
+ .read = seq_read, \
+ .llseek = seq_lseek, \
+}
+
+static int dpu_debugfs_danger_stats_show(struct seq_file *s, void *v)
+{
+ return _dpu_danger_signal_status(s, true);
+}
+DEFINE_DPU_DEBUGFS_SEQ_FOPS(dpu_debugfs_danger_stats);
+
+static int dpu_debugfs_safe_stats_show(struct seq_file *s, void *v)
+{
+ return _dpu_danger_signal_status(s, false);
+}
+DEFINE_DPU_DEBUGFS_SEQ_FOPS(dpu_debugfs_safe_stats);
+
+static void dpu_debugfs_danger_destroy(struct dpu_kms *dpu_kms)
+{
+ debugfs_remove_recursive(dpu_kms->debugfs_danger);
+ dpu_kms->debugfs_danger = NULL;
+}
+
+static int dpu_debugfs_danger_init(struct dpu_kms *dpu_kms,
+ struct dentry *parent)
+{
+ dpu_kms->debugfs_danger = debugfs_create_dir("danger",
+ parent);
+ if (!dpu_kms->debugfs_danger) {
+ DPU_ERROR("failed to create danger debugfs\n");
+ return -EINVAL;
+ }
+
+ debugfs_create_file("danger_status", 0600, dpu_kms->debugfs_danger,
+ dpu_kms, &dpu_debugfs_danger_stats_fops);
+ debugfs_create_file("safe_status", 0600, dpu_kms->debugfs_danger,
+ dpu_kms, &dpu_debugfs_safe_stats_fops);
+
+ return 0;
+}
+
+static int _dpu_debugfs_show_regset32(struct seq_file *s, void *data)
+{
+ struct dpu_debugfs_regset32 *regset;
+ struct dpu_kms *dpu_kms;
+ struct drm_device *dev;
+ struct msm_drm_private *priv;
+ void __iomem *base;
+ uint32_t i, addr;
+
+ if (!s || !s->private)
+ return 0;
+
+ regset = s->private;
+
+ dpu_kms = regset->dpu_kms;
+ if (!dpu_kms || !dpu_kms->mmio)
+ return 0;
+
+ dev = dpu_kms->dev;
+ if (!dev)
+ return 0;
+
+ priv = dev->dev_private;
+ if (!priv)
+ return 0;
+
+ base = dpu_kms->mmio + regset->offset;
+
+ /* insert padding spaces, if needed */
+ if (regset->offset & 0xF) {
+ seq_printf(s, "[%x]", regset->offset & ~0xF);
+ for (i = 0; i < (regset->offset & 0xF); i += 4)
+ seq_puts(s, " ");
+ }
+
+ pm_runtime_get_sync(&dpu_kms->pdev->dev);
+
+ /* main register output */
+ for (i = 0; i < regset->blk_len; i += 4) {
+ addr = regset->offset + i;
+ if ((addr & 0xF) == 0x0)
+ seq_printf(s, i ? "\n[%x]" : "[%x]", addr);
+ seq_printf(s, " %08x", readl_relaxed(base + i));
+ }
+ seq_puts(s, "\n");
+ pm_runtime_put_sync(&dpu_kms->pdev->dev);
+
+ return 0;
+}
+
+static int dpu_debugfs_open_regset32(struct inode *inode,
+ struct file *file)
+{
+ return single_open(file, _dpu_debugfs_show_regset32, inode->i_private);
+}
+
+static const struct file_operations dpu_fops_regset32 = {
+ .open = dpu_debugfs_open_regset32,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+void dpu_debugfs_setup_regset32(struct dpu_debugfs_regset32 *regset,
+ uint32_t offset, uint32_t length, struct dpu_kms *dpu_kms)
+{
+ if (regset) {
+ regset->offset = offset;
+ regset->blk_len = length;
+ regset->dpu_kms = dpu_kms;
+ }
+}
+
+void *dpu_debugfs_create_regset32(const char *name, umode_t mode,
+ void *parent, struct dpu_debugfs_regset32 *regset)
+{
+ if (!name || !regset || !regset->dpu_kms || !regset->blk_len)
+ return NULL;
+
+ /* make sure offset is a multiple of 4 */
+ regset->offset = round_down(regset->offset, 4);
+
+ return debugfs_create_file(name, mode, parent,
+ regset, &dpu_fops_regset32);
+}
+
+static int _dpu_debugfs_init(struct dpu_kms *dpu_kms)
+{
+ void *p;
+ int rc;
+
+ p = dpu_hw_util_get_log_mask_ptr();
+
+ if (!dpu_kms || !p)
+ return -EINVAL;
+
+ dpu_kms->debugfs_root = debugfs_create_dir("debug",
+ dpu_kms->dev->primary->debugfs_root);
+ if (IS_ERR_OR_NULL(dpu_kms->debugfs_root)) {
+ DRM_ERROR("debugfs create_dir failed %ld\n",
+ PTR_ERR(dpu_kms->debugfs_root));
+ return PTR_ERR(dpu_kms->debugfs_root);
+ }
+
+ rc = dpu_dbg_debugfs_register(dpu_kms->debugfs_root);
+ if (rc) {
+ DRM_ERROR("failed to reg dpu dbg debugfs: %d\n", rc);
+ return rc;
+ }
+
+ /* allow root to be NULL */
+ debugfs_create_x32(DPU_DEBUGFS_HWMASKNAME, 0600, dpu_kms->debugfs_root, p);
+
+ (void) dpu_debugfs_danger_init(dpu_kms, dpu_kms->debugfs_root);
+ (void) dpu_debugfs_vbif_init(dpu_kms, dpu_kms->debugfs_root);
+ (void) dpu_debugfs_core_irq_init(dpu_kms, dpu_kms->debugfs_root);
+
+ rc = dpu_core_perf_debugfs_init(&dpu_kms->perf, dpu_kms->debugfs_root);
+ if (rc) {
+ DPU_ERROR("failed to init perf %d\n", rc);
+ return rc;
+ }
+
+ return 0;
+}
+
+static void _dpu_debugfs_destroy(struct dpu_kms *dpu_kms)
+{
+ /* don't need to NULL check debugfs_root */
+ if (dpu_kms) {
+ dpu_debugfs_vbif_destroy(dpu_kms);
+ dpu_debugfs_danger_destroy(dpu_kms);
+ dpu_debugfs_core_irq_destroy(dpu_kms);
+ debugfs_remove_recursive(dpu_kms->debugfs_root);
+ }
+}
+#else
+static void _dpu_debugfs_destroy(struct dpu_kms *dpu_kms)
+{
+}
+#endif
+
+static int dpu_kms_enable_vblank(struct msm_kms *kms, struct drm_crtc *crtc)
+{
+ return dpu_crtc_vblank(crtc, true);
+}
+
+static void dpu_kms_disable_vblank(struct msm_kms *kms, struct drm_crtc *crtc)
+{
+ dpu_crtc_vblank(crtc, false);
+}
+
+static void dpu_kms_prepare_commit(struct msm_kms *kms,
+ struct drm_atomic_state *state)
+{
+ struct dpu_kms *dpu_kms;
+ struct msm_drm_private *priv;
+ struct drm_device *dev;
+ struct drm_encoder *encoder;
+
+ if (!kms)
+ return;
+ dpu_kms = to_dpu_kms(kms);
+ dev = dpu_kms->dev;
+
+ if (!dev || !dev->dev_private)
+ return;
+ priv = dev->dev_private;
+ pm_runtime_get_sync(&dpu_kms->pdev->dev);
+
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head)
+ if (encoder->crtc != NULL)
+ dpu_encoder_prepare_commit(encoder);
+}
+
+/*
+ * Override the encoder enable since we need to setup the inline rotator and do
+ * some crtc magic before enabling any bridge that might be present.
+ */
+void dpu_kms_encoder_enable(struct drm_encoder *encoder)
+{
+ const struct drm_encoder_helper_funcs *funcs = encoder->helper_private;
+ struct drm_crtc *crtc = encoder->crtc;
+
+ /* Forward this enable call to the commit hook */
+ if (funcs && funcs->commit)
+ funcs->commit(encoder);
+
+ if (crtc && crtc->state->active) {
+ trace_dpu_kms_enc_enable(DRMID(crtc));
+ dpu_crtc_commit_kickoff(crtc);
+ }
+}
+
+static void dpu_kms_commit(struct msm_kms *kms, struct drm_atomic_state *state)
+{
+ struct drm_crtc *crtc;
+ struct drm_crtc_state *crtc_state;
+ int i;
+
+ for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
+ /* If modeset is required, kickoff is run in encoder_enable */
+ if (drm_atomic_crtc_needs_modeset(crtc_state))
+ continue;
+
+ if (crtc->state->active) {
+ trace_dpu_kms_commit(DRMID(crtc));
+ dpu_crtc_commit_kickoff(crtc);
+ }
+ }
+}
+
+static void dpu_kms_complete_commit(struct msm_kms *kms,
+ struct drm_atomic_state *old_state)
+{
+ struct dpu_kms *dpu_kms;
+ struct msm_drm_private *priv;
+ struct drm_crtc *crtc;
+ struct drm_crtc_state *old_crtc_state;
+ int i;
+
+ if (!kms || !old_state)
+ return;
+ dpu_kms = to_dpu_kms(kms);
+
+ if (!dpu_kms->dev || !dpu_kms->dev->dev_private)
+ return;
+ priv = dpu_kms->dev->dev_private;
+
+ DPU_ATRACE_BEGIN("kms_complete_commit");
+
+ for_each_old_crtc_in_state(old_state, crtc, old_crtc_state, i)
+ dpu_crtc_complete_commit(crtc, old_crtc_state);
+
+ pm_runtime_put_sync(&dpu_kms->pdev->dev);
+
+ DPU_ATRACE_END("kms_complete_commit");
+}
+
+static void dpu_kms_wait_for_commit_done(struct msm_kms *kms,
+ struct drm_crtc *crtc)
+{
+ struct drm_encoder *encoder;
+ struct drm_device *dev;
+ int ret;
+
+ if (!kms || !crtc || !crtc->state) {
+ DPU_ERROR("invalid params\n");
+ return;
+ }
+
+ dev = crtc->dev;
+
+ if (!crtc->state->enable) {
+ DPU_DEBUG("[crtc:%d] not enable\n", crtc->base.id);
+ return;
+ }
+
+ if (!crtc->state->active) {
+ DPU_DEBUG("[crtc:%d] not active\n", crtc->base.id);
+ return;
+ }
+
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+ if (encoder->crtc != crtc)
+ continue;
+ /*
+ * Wait for post-flush if necessary to delay before
+ * plane_cleanup. For example, wait for vsync in case of video
+ * mode panels. This may be a no-op for command mode panels.
+ */
+ trace_dpu_kms_wait_for_commit_done(DRMID(crtc));
+ ret = dpu_encoder_wait_for_event(encoder, MSM_ENC_COMMIT_DONE);
+ if (ret && ret != -EWOULDBLOCK) {
+ DPU_ERROR("wait for commit done returned %d\n", ret);
+ break;
+ }
+ }
+}
+
+static void _dpu_kms_initialize_dsi(struct drm_device *dev,
+ struct msm_drm_private *priv,
+ struct dpu_kms *dpu_kms)
+{
+ struct drm_encoder *encoder = NULL;
+ int i, rc;
+
+ /*TODO: Support two independent DSI connectors */
+ encoder = dpu_encoder_init(dev, DRM_MODE_CONNECTOR_DSI);
+ if (IS_ERR_OR_NULL(encoder)) {
+ DPU_ERROR("encoder init failed for dsi display\n");
+ return;
+ }
+
+ priv->encoders[priv->num_encoders++] = encoder;
+
+ for (i = 0; i < ARRAY_SIZE(priv->dsi); i++) {
+ if (!priv->dsi[i]) {
+ DPU_DEBUG("invalid msm_dsi for ctrl %d\n", i);
+ return;
+ }
+
+ rc = msm_dsi_modeset_init(priv->dsi[i], dev, encoder);
+ if (rc) {
+ DPU_ERROR("modeset_init failed for dsi[%d], rc = %d\n",
+ i, rc);
+ continue;
+ }
+ }
+}
+
+/**
+ * _dpu_kms_setup_displays - create encoders, bridges and connectors
+ * for underlying displays
+ * @dev: Pointer to drm device structure
+ * @priv: Pointer to private drm device data
+ * @dpu_kms: Pointer to dpu kms structure
+ * Returns: Zero on success
+ */
+static void _dpu_kms_setup_displays(struct drm_device *dev,
+ struct msm_drm_private *priv,
+ struct dpu_kms *dpu_kms)
+{
+ _dpu_kms_initialize_dsi(dev, priv, dpu_kms);
+
+ /**
+ * Extend this function to initialize other
+ * types of displays
+ */
+}
+
+static void _dpu_kms_drm_obj_destroy(struct dpu_kms *dpu_kms)
+{
+ struct msm_drm_private *priv;
+ int i;
+
+ if (!dpu_kms) {
+ DPU_ERROR("invalid dpu_kms\n");
+ return;
+ } else if (!dpu_kms->dev) {
+ DPU_ERROR("invalid dev\n");
+ return;
+ } else if (!dpu_kms->dev->dev_private) {
+ DPU_ERROR("invalid dev_private\n");
+ return;
+ }
+ priv = dpu_kms->dev->dev_private;
+
+ for (i = 0; i < priv->num_crtcs; i++)
+ priv->crtcs[i]->funcs->destroy(priv->crtcs[i]);
+ priv->num_crtcs = 0;
+
+ for (i = 0; i < priv->num_planes; i++)
+ priv->planes[i]->funcs->destroy(priv->planes[i]);
+ priv->num_planes = 0;
+
+ for (i = 0; i < priv->num_connectors; i++)
+ priv->connectors[i]->funcs->destroy(priv->connectors[i]);
+ priv->num_connectors = 0;
+
+ for (i = 0; i < priv->num_encoders; i++)
+ priv->encoders[i]->funcs->destroy(priv->encoders[i]);
+ priv->num_encoders = 0;
+}
+
+static int _dpu_kms_drm_obj_init(struct dpu_kms *dpu_kms)
+{
+ struct drm_device *dev;
+ struct drm_plane *primary_planes[MAX_PLANES], *plane;
+ struct drm_crtc *crtc;
+
+ struct msm_drm_private *priv;
+ struct dpu_mdss_cfg *catalog;
+
+ int primary_planes_idx = 0, i, ret;
+ int max_crtc_count;
+
+ if (!dpu_kms || !dpu_kms->dev || !dpu_kms->dev->dev) {
+ DPU_ERROR("invalid dpu_kms\n");
+ return -EINVAL;
+ }
+
+ dev = dpu_kms->dev;
+ priv = dev->dev_private;
+ catalog = dpu_kms->catalog;
+
+ /*
+ * Create encoder and query display drivers to create
+ * bridges and connectors
+ */
+ _dpu_kms_setup_displays(dev, priv, dpu_kms);
+
+ max_crtc_count = min(catalog->mixer_count, priv->num_encoders);
+
+ /* Create the planes */
+ for (i = 0; i < catalog->sspp_count; i++) {
+ bool primary = true;
+
+ if (catalog->sspp[i].features & BIT(DPU_SSPP_CURSOR)
+ || primary_planes_idx >= max_crtc_count)
+ primary = false;
+
+ plane = dpu_plane_init(dev, catalog->sspp[i].id, primary,
+ (1UL << max_crtc_count) - 1, 0);
+ if (IS_ERR(plane)) {
+ DPU_ERROR("dpu_plane_init failed\n");
+ ret = PTR_ERR(plane);
+ goto fail;
+ }
+ priv->planes[priv->num_planes++] = plane;
+
+ if (primary)
+ primary_planes[primary_planes_idx++] = plane;
+ }
+
+ max_crtc_count = min(max_crtc_count, primary_planes_idx);
+
+ /* Create one CRTC per encoder */
+ for (i = 0; i < max_crtc_count; i++) {
+ crtc = dpu_crtc_init(dev, primary_planes[i]);
+ if (IS_ERR(crtc)) {
+ ret = PTR_ERR(crtc);
+ goto fail;
+ }
+ priv->crtcs[priv->num_crtcs++] = crtc;
+ }
+
+ /* All CRTCs are compatible with all encoders */
+ for (i = 0; i < priv->num_encoders; i++)
+ priv->encoders[i]->possible_crtcs = (1 << priv->num_crtcs) - 1;
+
+ return 0;
+fail:
+ _dpu_kms_drm_obj_destroy(dpu_kms);
+ return ret;
+}
+
+#ifdef CONFIG_DEBUG_FS
+static int dpu_kms_debugfs_init(struct msm_kms *kms, struct drm_minor *minor)
+{
+ struct dpu_kms *dpu_kms = to_dpu_kms(kms);
+ struct drm_device *dev;
+ int rc;
+
+ if (!dpu_kms || !dpu_kms->dev || !dpu_kms->dev->dev) {
+ DPU_ERROR("invalid dpu_kms\n");
+ return -EINVAL;
+ }
+
+ dev = dpu_kms->dev;
+
+ rc = _dpu_debugfs_init(dpu_kms);
+ if (rc)
+ DPU_ERROR("dpu_debugfs init failed: %d\n", rc);
+
+ return rc;
+}
+#endif
+
+static long dpu_kms_round_pixclk(struct msm_kms *kms, unsigned long rate,
+ struct drm_encoder *encoder)
+{
+ return rate;
+}
+
+static void _dpu_kms_hw_destroy(struct dpu_kms *dpu_kms)
+{
+ struct drm_device *dev;
+ int i;
+
+ dev = dpu_kms->dev;
+ if (!dev)
+ return;
+
+ if (dpu_kms->hw_intr)
+ dpu_hw_intr_destroy(dpu_kms->hw_intr);
+ dpu_kms->hw_intr = NULL;
+
+ if (dpu_kms->power_event)
+ dpu_power_handle_unregister_event(
+ &dpu_kms->phandle, dpu_kms->power_event);
+
+ /* safe to call these more than once during shutdown */
+ _dpu_debugfs_destroy(dpu_kms);
+ _dpu_kms_mmu_destroy(dpu_kms);
+
+ if (dpu_kms->catalog) {
+ for (i = 0; i < dpu_kms->catalog->vbif_count; i++) {
+ u32 vbif_idx = dpu_kms->catalog->vbif[i].id;
+
+ if ((vbif_idx < VBIF_MAX) && dpu_kms->hw_vbif[vbif_idx])
+ dpu_hw_vbif_destroy(dpu_kms->hw_vbif[vbif_idx]);
+ }
+ }
+
+ if (dpu_kms->rm_init)
+ dpu_rm_destroy(&dpu_kms->rm);
+ dpu_kms->rm_init = false;
+
+ if (dpu_kms->catalog)
+ dpu_hw_catalog_deinit(dpu_kms->catalog);
+ dpu_kms->catalog = NULL;
+
+ if (dpu_kms->core_client)
+ dpu_power_client_destroy(&dpu_kms->phandle,
+ dpu_kms->core_client);
+ dpu_kms->core_client = NULL;
+
+ if (dpu_kms->vbif[VBIF_NRT])
+ devm_iounmap(&dpu_kms->pdev->dev, dpu_kms->vbif[VBIF_NRT]);
+ dpu_kms->vbif[VBIF_NRT] = NULL;
+
+ if (dpu_kms->vbif[VBIF_RT])
+ devm_iounmap(&dpu_kms->pdev->dev, dpu_kms->vbif[VBIF_RT]);
+ dpu_kms->vbif[VBIF_RT] = NULL;
+
+ if (dpu_kms->mmio)
+ devm_iounmap(&dpu_kms->pdev->dev, dpu_kms->mmio);
+ dpu_kms->mmio = NULL;
+}
+
+static void dpu_kms_destroy(struct msm_kms *kms)
+{
+ struct dpu_kms *dpu_kms;
+
+ if (!kms) {
+ DPU_ERROR("invalid kms\n");
+ return;
+ }
+
+ dpu_kms = to_dpu_kms(kms);
+
+ dpu_dbg_destroy();
+ _dpu_kms_hw_destroy(dpu_kms);
+}
+
+static int dpu_kms_pm_suspend(struct device *dev)
+{
+ struct drm_device *ddev;
+ struct drm_modeset_acquire_ctx ctx;
+ struct drm_atomic_state *state;
+ struct dpu_kms *dpu_kms;
+ int ret = 0, num_crtcs = 0;
+
+ if (!dev)
+ return -EINVAL;
+
+ ddev = dev_get_drvdata(dev);
+ if (!ddev || !ddev_to_msm_kms(ddev))
+ return -EINVAL;
+
+ dpu_kms = to_dpu_kms(ddev_to_msm_kms(ddev));
+
+ /* disable hot-plug polling */
+ drm_kms_helper_poll_disable(ddev);
+
+ /* acquire modeset lock(s) */
+ drm_modeset_acquire_init(&ctx, 0);
+
+retry:
+ DPU_ATRACE_BEGIN("kms_pm_suspend");
+
+ ret = drm_modeset_lock_all_ctx(ddev, &ctx);
+ if (ret)
+ goto unlock;
+
+ /* save current state for resume */
+ if (dpu_kms->suspend_state)
+ drm_atomic_state_put(dpu_kms->suspend_state);
+ dpu_kms->suspend_state = drm_atomic_helper_duplicate_state(ddev, &ctx);
+ if (IS_ERR_OR_NULL(dpu_kms->suspend_state)) {
+ DRM_ERROR("failed to back up suspend state\n");
+ dpu_kms->suspend_state = NULL;
+ goto unlock;
+ }
+
+ /* create atomic state to disable all CRTCs */
+ state = drm_atomic_state_alloc(ddev);
+ if (IS_ERR_OR_NULL(state)) {
+ DRM_ERROR("failed to allocate crtc disable state\n");
+ goto unlock;
+ }
+
+ state->acquire_ctx = &ctx;
+
+ /* check for nothing to do */
+ if (num_crtcs == 0) {
+ DRM_DEBUG("all crtcs are already in the off state\n");
+ drm_atomic_state_put(state);
+ goto suspended;
+ }
+
+ /* commit the "disable all" state */
+ ret = drm_atomic_commit(state);
+ if (ret < 0) {
+ DRM_ERROR("failed to disable crtcs, %d\n", ret);
+ drm_atomic_state_put(state);
+ goto unlock;
+ }
+
+suspended:
+ dpu_kms->suspend_block = true;
+
+unlock:
+ if (ret == -EDEADLK) {
+ drm_modeset_backoff(&ctx);
+ goto retry;
+ }
+ drm_modeset_drop_locks(&ctx);
+ drm_modeset_acquire_fini(&ctx);
+
+ DPU_ATRACE_END("kms_pm_suspend");
+ return 0;
+}
+
+static int dpu_kms_pm_resume(struct device *dev)
+{
+ struct drm_device *ddev;
+ struct dpu_kms *dpu_kms;
+ int ret;
+
+ if (!dev)
+ return -EINVAL;
+
+ ddev = dev_get_drvdata(dev);
+ if (!ddev || !ddev_to_msm_kms(ddev))
+ return -EINVAL;
+
+ dpu_kms = to_dpu_kms(ddev_to_msm_kms(ddev));
+
+ DPU_ATRACE_BEGIN("kms_pm_resume");
+
+ drm_mode_config_reset(ddev);
+
+ drm_modeset_lock_all(ddev);
+
+ dpu_kms->suspend_block = false;
+
+ if (dpu_kms->suspend_state) {
+ dpu_kms->suspend_state->acquire_ctx =
+ ddev->mode_config.acquire_ctx;
+ ret = drm_atomic_commit(dpu_kms->suspend_state);
+ if (ret < 0) {
+ DRM_ERROR("failed to restore state, %d\n", ret);
+ drm_atomic_state_put(dpu_kms->suspend_state);
+ }
+ dpu_kms->suspend_state = NULL;
+ }
+ drm_modeset_unlock_all(ddev);
+
+ /* enable hot-plug polling */
+ drm_kms_helper_poll_enable(ddev);
+
+ DPU_ATRACE_END("kms_pm_resume");
+ return 0;
+}
+
+static void _dpu_kms_set_encoder_mode(struct msm_kms *kms,
+ struct drm_encoder *encoder,
+ bool cmd_mode)
+{
+ struct msm_display_info info;
+ struct msm_drm_private *priv = encoder->dev->dev_private;
+ int i, rc = 0;
+
+ memset(&info, 0, sizeof(info));
+
+ info.intf_type = encoder->encoder_type;
+ info.capabilities = cmd_mode ? MSM_DISPLAY_CAP_CMD_MODE :
+ MSM_DISPLAY_CAP_VID_MODE;
+
+ /* TODO: No support for DSI swap */
+ for (i = 0; i < ARRAY_SIZE(priv->dsi); i++) {
+ if (priv->dsi[i]) {
+ info.h_tile_instance[info.num_of_h_tiles] = i;
+ info.num_of_h_tiles++;
+ }
+ }
+
+ rc = dpu_encoder_setup(encoder->dev, encoder, &info);
+ if (rc)
+ DPU_ERROR("failed to setup DPU encoder %d: rc:%d\n",
+ encoder->base.id, rc);
+}
+
+static const struct msm_kms_funcs kms_funcs = {
+ .hw_init = dpu_kms_hw_init,
+ .irq_preinstall = dpu_irq_preinstall,
+ .irq_postinstall = dpu_irq_postinstall,
+ .irq_uninstall = dpu_irq_uninstall,
+ .irq = dpu_irq,
+ .prepare_commit = dpu_kms_prepare_commit,
+ .commit = dpu_kms_commit,
+ .complete_commit = dpu_kms_complete_commit,
+ .wait_for_crtc_commit_done = dpu_kms_wait_for_commit_done,
+ .enable_vblank = dpu_kms_enable_vblank,
+ .disable_vblank = dpu_kms_disable_vblank,
+ .check_modified_format = dpu_format_check_modified_format,
+ .get_format = dpu_get_msm_format,
+ .round_pixclk = dpu_kms_round_pixclk,
+ .pm_suspend = dpu_kms_pm_suspend,
+ .pm_resume = dpu_kms_pm_resume,
+ .destroy = dpu_kms_destroy,
+ .set_encoder_mode = _dpu_kms_set_encoder_mode,
+#ifdef CONFIG_DEBUG_FS
+ .debugfs_init = dpu_kms_debugfs_init,
+#endif
+};
+
+/* the caller api needs to turn on clock before calling it */
+static inline void _dpu_kms_core_hw_rev_init(struct dpu_kms *dpu_kms)
+{
+ dpu_kms->core_rev = readl_relaxed(dpu_kms->mmio + 0x0);
+}
+
+static int _dpu_kms_mmu_destroy(struct dpu_kms *dpu_kms)
+{
+ struct msm_mmu *mmu;
+
+ mmu = dpu_kms->base.aspace->mmu;
+
+ mmu->funcs->detach(mmu, (const char **)iommu_ports,
+ ARRAY_SIZE(iommu_ports));
+ msm_gem_address_space_put(dpu_kms->base.aspace);
+
+ return 0;
+}
+
+static int _dpu_kms_mmu_init(struct dpu_kms *dpu_kms)
+{
+ struct iommu_domain *domain;
+ struct msm_gem_address_space *aspace;
+ int ret;
+
+ domain = iommu_domain_alloc(&platform_bus_type);
+ if (!domain)
+ return 0;
+
+ aspace = msm_gem_address_space_create(dpu_kms->dev->dev,
+ domain, "dpu1");
+ if (IS_ERR(aspace)) {
+ ret = PTR_ERR(aspace);
+ goto fail;
+ }
+
+ dpu_kms->base.aspace = aspace;
+
+ ret = aspace->mmu->funcs->attach(aspace->mmu, iommu_ports,
+ ARRAY_SIZE(iommu_ports));
+ if (ret) {
+ DPU_ERROR("failed to attach iommu %d\n", ret);
+ msm_gem_address_space_put(aspace);
+ goto fail;
+ }
+
+ return 0;
+fail:
+ _dpu_kms_mmu_destroy(dpu_kms);
+
+ return ret;
+}
+
+static struct dss_clk *_dpu_kms_get_clk(struct dpu_kms *dpu_kms,
+ char *clock_name)
+{
+ struct dss_module_power *mp = &dpu_kms->mp;
+ int i;
+
+ for (i = 0; i < mp->num_clk; i++) {
+ if (!strcmp(mp->clk_config[i].clk_name, clock_name))
+ return &mp->clk_config[i];
+ }
+
+ return NULL;
+}
+
+u64 dpu_kms_get_clk_rate(struct dpu_kms *dpu_kms, char *clock_name)
+{
+ struct dss_clk *clk;
+
+ clk = _dpu_kms_get_clk(dpu_kms, clock_name);
+ if (!clk)
+ return -EINVAL;
+
+ return clk_get_rate(clk->clk);
+}
+
+static void dpu_kms_handle_power_event(u32 event_type, void *usr)
+{
+ struct dpu_kms *dpu_kms = usr;
+
+ if (!dpu_kms)
+ return;
+
+ if (event_type == DPU_POWER_EVENT_POST_ENABLE)
+ dpu_vbif_init_memtypes(dpu_kms);
+}
+
+static int dpu_kms_hw_init(struct msm_kms *kms)
+{
+ struct dpu_kms *dpu_kms;
+ struct drm_device *dev;
+ struct msm_drm_private *priv;
+ int i, rc = -EINVAL;
+
+ if (!kms) {
+ DPU_ERROR("invalid kms\n");
+ goto end;
+ }
+
+ dpu_kms = to_dpu_kms(kms);
+ dev = dpu_kms->dev;
+ if (!dev) {
+ DPU_ERROR("invalid device\n");
+ goto end;
+ }
+
+ rc = dpu_dbg_init(&dpu_kms->pdev->dev);
+ if (rc) {
+ DRM_ERROR("failed to init dpu dbg: %d\n", rc);
+ goto end;
+ }
+
+ priv = dev->dev_private;
+ if (!priv) {
+ DPU_ERROR("invalid private data\n");
+ goto dbg_destroy;
+ }
+
+ dpu_kms->mmio = msm_ioremap(dpu_kms->pdev, "mdp", "mdp");
+ if (IS_ERR(dpu_kms->mmio)) {
+ rc = PTR_ERR(dpu_kms->mmio);
+ DPU_ERROR("mdp register memory map failed: %d\n", rc);
+ dpu_kms->mmio = NULL;
+ goto error;
+ }
+ DRM_DEBUG("mapped dpu address space @%pK\n", dpu_kms->mmio);
+ dpu_kms->mmio_len = dpu_iomap_size(dpu_kms->pdev, "mdp");
+
+ dpu_kms->vbif[VBIF_RT] = msm_ioremap(dpu_kms->pdev, "vbif", "vbif");
+ if (IS_ERR(dpu_kms->vbif[VBIF_RT])) {
+ rc = PTR_ERR(dpu_kms->vbif[VBIF_RT]);
+ DPU_ERROR("vbif register memory map failed: %d\n", rc);
+ dpu_kms->vbif[VBIF_RT] = NULL;
+ goto error;
+ }
+ dpu_kms->vbif_len[VBIF_RT] = dpu_iomap_size(dpu_kms->pdev, "vbif");
+ dpu_kms->vbif[VBIF_NRT] = msm_ioremap(dpu_kms->pdev, "vbif_nrt", "vbif_nrt");
+ if (IS_ERR(dpu_kms->vbif[VBIF_NRT])) {
+ dpu_kms->vbif[VBIF_NRT] = NULL;
+ DPU_DEBUG("VBIF NRT is not defined");
+ } else {
+ dpu_kms->vbif_len[VBIF_NRT] = dpu_iomap_size(dpu_kms->pdev,
+ "vbif_nrt");
+ }
+
+ dpu_kms->reg_dma = msm_ioremap(dpu_kms->pdev, "regdma", "regdma");
+ if (IS_ERR(dpu_kms->reg_dma)) {
+ dpu_kms->reg_dma = NULL;
+ DPU_DEBUG("REG_DMA is not defined");
+ } else {
+ dpu_kms->reg_dma_len = dpu_iomap_size(dpu_kms->pdev, "regdma");
+ }
+
+ dpu_kms->core_client = dpu_power_client_create(&dpu_kms->phandle,
+ "core");
+ if (IS_ERR_OR_NULL(dpu_kms->core_client)) {
+ rc = PTR_ERR(dpu_kms->core_client);
+ if (!dpu_kms->core_client)
+ rc = -EINVAL;
+ DPU_ERROR("dpu power client create failed: %d\n", rc);
+ dpu_kms->core_client = NULL;
+ goto error;
+ }
+
+ pm_runtime_get_sync(&dpu_kms->pdev->dev);
+
+ _dpu_kms_core_hw_rev_init(dpu_kms);
+
+ pr_info("dpu hardware revision:0x%x\n", dpu_kms->core_rev);
+
+ dpu_kms->catalog = dpu_hw_catalog_init(dpu_kms->core_rev);
+ if (IS_ERR_OR_NULL(dpu_kms->catalog)) {
+ rc = PTR_ERR(dpu_kms->catalog);
+ if (!dpu_kms->catalog)
+ rc = -EINVAL;
+ DPU_ERROR("catalog init failed: %d\n", rc);
+ dpu_kms->catalog = NULL;
+ goto power_error;
+ }
+
+ dpu_dbg_init_dbg_buses(dpu_kms->core_rev);
+
+ /*
+ * Now we need to read the HW catalog and initialize resources such as
+ * clocks, regulators, GDSC/MMAGIC, ioremap the register ranges etc
+ */
+ rc = _dpu_kms_mmu_init(dpu_kms);
+ if (rc) {
+ DPU_ERROR("dpu_kms_mmu_init failed: %d\n", rc);
+ goto power_error;
+ }
+
+ rc = dpu_rm_init(&dpu_kms->rm, dpu_kms->catalog, dpu_kms->mmio,
+ dpu_kms->dev);
+ if (rc) {
+ DPU_ERROR("rm init failed: %d\n", rc);
+ goto power_error;
+ }
+
+ dpu_kms->rm_init = true;
+
+ dpu_kms->hw_mdp = dpu_rm_get_mdp(&dpu_kms->rm);
+ if (IS_ERR_OR_NULL(dpu_kms->hw_mdp)) {
+ rc = PTR_ERR(dpu_kms->hw_mdp);
+ if (!dpu_kms->hw_mdp)
+ rc = -EINVAL;
+ DPU_ERROR("failed to get hw_mdp: %d\n", rc);
+ dpu_kms->hw_mdp = NULL;
+ goto power_error;
+ }
+
+ for (i = 0; i < dpu_kms->catalog->vbif_count; i++) {
+ u32 vbif_idx = dpu_kms->catalog->vbif[i].id;
+
+ dpu_kms->hw_vbif[i] = dpu_hw_vbif_init(vbif_idx,
+ dpu_kms->vbif[vbif_idx], dpu_kms->catalog);
+ if (IS_ERR_OR_NULL(dpu_kms->hw_vbif[vbif_idx])) {
+ rc = PTR_ERR(dpu_kms->hw_vbif[vbif_idx]);
+ if (!dpu_kms->hw_vbif[vbif_idx])
+ rc = -EINVAL;
+ DPU_ERROR("failed to init vbif %d: %d\n", vbif_idx, rc);
+ dpu_kms->hw_vbif[vbif_idx] = NULL;
+ goto power_error;
+ }
+ }
+
+ rc = dpu_core_perf_init(&dpu_kms->perf, dev, dpu_kms->catalog,
+ &dpu_kms->phandle,
+ _dpu_kms_get_clk(dpu_kms, "core"));
+ if (rc) {
+ DPU_ERROR("failed to init perf %d\n", rc);
+ goto perf_err;
+ }
+
+ dpu_kms->hw_intr = dpu_hw_intr_init(dpu_kms->mmio, dpu_kms->catalog);
+ if (IS_ERR_OR_NULL(dpu_kms->hw_intr)) {
+ rc = PTR_ERR(dpu_kms->hw_intr);
+ DPU_ERROR("hw_intr init failed: %d\n", rc);
+ dpu_kms->hw_intr = NULL;
+ goto hw_intr_init_err;
+ }
+
+ /*
+ * _dpu_kms_drm_obj_init should create the DRM related objects
+ * i.e. CRTCs, planes, encoders, connectors and so forth
+ */
+ rc = _dpu_kms_drm_obj_init(dpu_kms);
+ if (rc) {
+ DPU_ERROR("modeset init failed: %d\n", rc);
+ goto drm_obj_init_err;
+ }
+
+ dev->mode_config.min_width = 0;
+ dev->mode_config.min_height = 0;
+
+ /*
+ * max crtc width is equal to the max mixer width * 2 and max height is
+ * is 4K
+ */
+ dev->mode_config.max_width =
+ dpu_kms->catalog->caps->max_mixer_width * 2;
+ dev->mode_config.max_height = 4096;
+
+ /*
+ * Support format modifiers for compression etc.
+ */
+ dev->mode_config.allow_fb_modifiers = true;
+
+ /*
+ * Handle (re)initializations during power enable
+ */
+ dpu_kms_handle_power_event(DPU_POWER_EVENT_POST_ENABLE, dpu_kms);
+ dpu_kms->power_event = dpu_power_handle_register_event(
+ &dpu_kms->phandle,
+ DPU_POWER_EVENT_POST_ENABLE,
+ dpu_kms_handle_power_event, dpu_kms, "kms");
+
+ pm_runtime_put_sync(&dpu_kms->pdev->dev);
+
+ return 0;
+
+drm_obj_init_err:
+ dpu_core_perf_destroy(&dpu_kms->perf);
+hw_intr_init_err:
+perf_err:
+power_error:
+ pm_runtime_put_sync(&dpu_kms->pdev->dev);
+error:
+ _dpu_kms_hw_destroy(dpu_kms);
+dbg_destroy:
+ dpu_dbg_destroy();
+end:
+ return rc;
+}
+
+struct msm_kms *dpu_kms_init(struct drm_device *dev)
+{
+ struct msm_drm_private *priv;
+ struct dpu_kms *dpu_kms;
+ int irq;
+
+ if (!dev || !dev->dev_private) {
+ DPU_ERROR("drm device node invalid\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ priv = dev->dev_private;
+ dpu_kms = to_dpu_kms(priv->kms);
+
+ irq = irq_of_parse_and_map(dpu_kms->pdev->dev.of_node, 0);
+ if (irq < 0) {
+ DPU_ERROR("failed to get irq: %d\n", irq);
+ return ERR_PTR(irq);
+ }
+ dpu_kms->base.irq = irq;
+
+ return &dpu_kms->base;
+}
+
+static int dpu_bind(struct device *dev, struct device *master, void *data)
+{
+ struct drm_device *ddev = dev_get_drvdata(master);
+ struct platform_device *pdev = to_platform_device(dev);
+ struct msm_drm_private *priv = ddev->dev_private;
+ struct dpu_kms *dpu_kms;
+ struct dss_module_power *mp;
+ int ret = 0;
+
+ dpu_kms = devm_kzalloc(&pdev->dev, sizeof(*dpu_kms), GFP_KERNEL);
+ if (!dpu_kms)
+ return -ENOMEM;
+
+ mp = &dpu_kms->mp;
+ ret = msm_dss_parse_clock(pdev, mp);
+ if (ret) {
+ DPU_ERROR("failed to parse clocks, ret=%d\n", ret);
+ return ret;
+ }
+
+ dpu_power_resource_init(pdev, &dpu_kms->phandle);
+
+ platform_set_drvdata(pdev, dpu_kms);
+
+ msm_kms_init(&dpu_kms->base, &kms_funcs);
+ dpu_kms->dev = ddev;
+ dpu_kms->pdev = pdev;
+
+ pm_runtime_enable(&pdev->dev);
+ dpu_kms->rpm_enabled = true;
+
+ priv->kms = &dpu_kms->base;
+ return ret;
+}
+
+static void dpu_unbind(struct device *dev, struct device *master, void *data)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct dpu_kms *dpu_kms = platform_get_drvdata(pdev);
+ struct dss_module_power *mp = &dpu_kms->mp;
+
+ dpu_power_resource_deinit(pdev, &dpu_kms->phandle);
+ msm_dss_put_clk(mp->clk_config, mp->num_clk);
+ devm_kfree(&pdev->dev, mp->clk_config);
+ mp->num_clk = 0;
+
+ if (dpu_kms->rpm_enabled)
+ pm_runtime_disable(&pdev->dev);
+}
+
+static const struct component_ops dpu_ops = {
+ .bind = dpu_bind,
+ .unbind = dpu_unbind,
+};
+
+static int dpu_dev_probe(struct platform_device *pdev)
+{
+ return component_add(&pdev->dev, &dpu_ops);
+}
+
+static int dpu_dev_remove(struct platform_device *pdev)
+{
+ component_del(&pdev->dev, &dpu_ops);
+ return 0;
+}
+
+static int __maybe_unused dpu_runtime_suspend(struct device *dev)
+{
+ int rc = -1;
+ struct platform_device *pdev = to_platform_device(dev);
+ struct dpu_kms *dpu_kms = platform_get_drvdata(pdev);
+ struct drm_device *ddev;
+ struct dss_module_power *mp = &dpu_kms->mp;
+
+ ddev = dpu_kms->dev;
+ if (!ddev) {
+ DPU_ERROR("invalid drm_device\n");
+ goto exit;
+ }
+
+ rc = dpu_power_resource_enable(&dpu_kms->phandle,
+ dpu_kms->core_client, false);
+ if (rc)
+ DPU_ERROR("resource disable failed: %d\n", rc);
+
+ rc = msm_dss_enable_clk(mp->clk_config, mp->num_clk, false);
+ if (rc)
+ DPU_ERROR("clock disable failed rc:%d\n", rc);
+
+exit:
+ return rc;
+}
+
+static int __maybe_unused dpu_runtime_resume(struct device *dev)
+{
+ int rc = -1;
+ struct platform_device *pdev = to_platform_device(dev);
+ struct dpu_kms *dpu_kms = platform_get_drvdata(pdev);
+ struct drm_device *ddev;
+ struct dss_module_power *mp = &dpu_kms->mp;
+
+ ddev = dpu_kms->dev;
+ if (!ddev) {
+ DPU_ERROR("invalid drm_device\n");
+ goto exit;
+ }
+
+ rc = msm_dss_enable_clk(mp->clk_config, mp->num_clk, true);
+ if (rc) {
+ DPU_ERROR("clock enable failed rc:%d\n", rc);
+ goto exit;
+ }
+
+ rc = dpu_power_resource_enable(&dpu_kms->phandle,
+ dpu_kms->core_client, true);
+ if (rc)
+ DPU_ERROR("resource enable failed: %d\n", rc);
+
+exit:
+ return rc;
+}
+
+static const struct dev_pm_ops dpu_pm_ops = {
+ SET_RUNTIME_PM_OPS(dpu_runtime_suspend, dpu_runtime_resume, NULL)
+};
+
+static const struct of_device_id dpu_dt_match[] = {
+ { .compatible = "qcom,sdm845-dpu", },
+ {}
+};
+MODULE_DEVICE_TABLE(of, dpu_dt_match);
+
+static struct platform_driver dpu_driver = {
+ .probe = dpu_dev_probe,
+ .remove = dpu_dev_remove,
+ .driver = {
+ .name = "msm_dpu",
+ .of_match_table = dpu_dt_match,
+ .pm = &dpu_pm_ops,
+ },
+};
+
+void __init msm_dpu_register(void)
+{
+ platform_driver_register(&dpu_driver);
+}
+
+void __exit msm_dpu_unregister(void)
+{
+ platform_driver_unregister(&dpu_driver);
+}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h
new file mode 100644
index 000000000000..66d466628e2b
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h
@@ -0,0 +1,290 @@
+/*
+ * Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __DPU_KMS_H__
+#define __DPU_KMS_H__
+
+#include "msm_drv.h"
+#include "msm_kms.h"
+#include "msm_mmu.h"
+#include "msm_gem.h"
+#include "dpu_dbg.h"
+#include "dpu_hw_catalog.h"
+#include "dpu_hw_ctl.h"
+#include "dpu_hw_lm.h"
+#include "dpu_hw_interrupts.h"
+#include "dpu_hw_top.h"
+#include "dpu_rm.h"
+#include "dpu_power_handle.h"
+#include "dpu_irq.h"
+#include "dpu_core_perf.h"
+
+#define DRMID(x) ((x) ? (x)->base.id : -1)
+
+/**
+ * DPU_DEBUG - macro for kms/plane/crtc/encoder/connector logs
+ * @fmt: Pointer to format string
+ */
+#define DPU_DEBUG(fmt, ...) \
+ do { \
+ if (unlikely(drm_debug & DRM_UT_KMS)) \
+ DRM_DEBUG(fmt, ##__VA_ARGS__); \
+ else \
+ pr_debug(fmt, ##__VA_ARGS__); \
+ } while (0)
+
+/**
+ * DPU_DEBUG_DRIVER - macro for hardware driver logging
+ * @fmt: Pointer to format string
+ */
+#define DPU_DEBUG_DRIVER(fmt, ...) \
+ do { \
+ if (unlikely(drm_debug & DRM_UT_DRIVER)) \
+ DRM_ERROR(fmt, ##__VA_ARGS__); \
+ else \
+ pr_debug(fmt, ##__VA_ARGS__); \
+ } while (0)
+
+#define DPU_ERROR(fmt, ...) pr_err("[dpu error]" fmt, ##__VA_ARGS__)
+
+/**
+ * ktime_compare_safe - compare two ktime structures
+ * This macro is similar to the standard ktime_compare() function, but
+ * attempts to also handle ktime overflows.
+ * @A: First ktime value
+ * @B: Second ktime value
+ * Returns: -1 if A < B, 0 if A == B, 1 if A > B
+ */
+#define ktime_compare_safe(A, B) \
+ ktime_compare(ktime_sub((A), (B)), ktime_set(0, 0))
+
+#define DPU_NAME_SIZE 12
+
+/* timeout in frames waiting for frame done */
+#define DPU_FRAME_DONE_TIMEOUT 60
+
+/*
+ * struct dpu_irq_callback - IRQ callback handlers
+ * @list: list to callback
+ * @func: intr handler
+ * @arg: argument for the handler
+ */
+struct dpu_irq_callback {
+ struct list_head list;
+ void (*func)(void *arg, int irq_idx);
+ void *arg;
+};
+
+/**
+ * struct dpu_irq: IRQ structure contains callback registration info
+ * @total_irq: total number of irq_idx obtained from HW interrupts mapping
+ * @irq_cb_tbl: array of IRQ callbacks setting
+ * @enable_counts array of IRQ enable counts
+ * @cb_lock: callback lock
+ * @debugfs_file: debugfs file for irq statistics
+ */
+struct dpu_irq {
+ u32 total_irqs;
+ struct list_head *irq_cb_tbl;
+ atomic_t *enable_counts;
+ atomic_t *irq_counts;
+ spinlock_t cb_lock;
+ struct dentry *debugfs_file;
+};
+
+struct dpu_kms {
+ struct msm_kms base;
+ struct drm_device *dev;
+ int core_rev;
+ struct dpu_mdss_cfg *catalog;
+
+ struct dpu_power_handle phandle;
+ struct dpu_power_client *core_client;
+ struct dpu_power_event *power_event;
+
+ /* directory entry for debugfs */
+ struct dentry *debugfs_root;
+ struct dentry *debugfs_danger;
+ struct dentry *debugfs_vbif;
+
+ /* io/register spaces: */
+ void __iomem *mmio, *vbif[VBIF_MAX], *reg_dma;
+ unsigned long mmio_len, vbif_len[VBIF_MAX], reg_dma_len;
+
+ struct regulator *vdd;
+ struct regulator *mmagic;
+ struct regulator *venus;
+
+ struct dpu_hw_intr *hw_intr;
+ struct dpu_irq irq_obj;
+
+ struct dpu_core_perf perf;
+
+ /* saved atomic state during system suspend */
+ struct drm_atomic_state *suspend_state;
+ bool suspend_block;
+
+ struct dpu_rm rm;
+ bool rm_init;
+
+ struct dpu_hw_vbif *hw_vbif[VBIF_MAX];
+ struct dpu_hw_mdp *hw_mdp;
+
+ bool has_danger_ctrl;
+
+ struct platform_device *pdev;
+ bool rpm_enabled;
+ struct dss_module_power mp;
+};
+
+struct vsync_info {
+ u32 frame_count;
+ u32 line_count;
+};
+
+#define to_dpu_kms(x) container_of(x, struct dpu_kms, base)
+
+/* get struct msm_kms * from drm_device * */
+#define ddev_to_msm_kms(D) ((D) && (D)->dev_private ? \
+ ((struct msm_drm_private *)((D)->dev_private))->kms : NULL)
+
+/**
+ * dpu_kms_is_suspend_state - whether or not the system is pm suspended
+ * @dev: Pointer to drm device
+ * Return: Suspend status
+ */
+static inline bool dpu_kms_is_suspend_state(struct drm_device *dev)
+{
+ if (!ddev_to_msm_kms(dev))
+ return false;
+
+ return to_dpu_kms(ddev_to_msm_kms(dev))->suspend_state != NULL;
+}
+
+/**
+ * dpu_kms_is_suspend_blocked - whether or not commits are blocked due to pm
+ * suspend status
+ * @dev: Pointer to drm device
+ * Return: True if commits should be rejected due to pm suspend
+ */
+static inline bool dpu_kms_is_suspend_blocked(struct drm_device *dev)
+{
+ if (!dpu_kms_is_suspend_state(dev))
+ return false;
+
+ return to_dpu_kms(ddev_to_msm_kms(dev))->suspend_block;
+}
+
+/**
+ * Debugfs functions - extra helper functions for debugfs support
+ *
+ * Main debugfs documentation is located at,
+ *
+ * Documentation/filesystems/debugfs.txt
+ *
+ * @dpu_debugfs_setup_regset32: Initialize data for dpu_debugfs_create_regset32
+ * @dpu_debugfs_create_regset32: Create 32-bit register dump file
+ * @dpu_debugfs_get_root: Get root dentry for DPU_KMS's debugfs node
+ */
+
+/**
+ * Companion structure for dpu_debugfs_create_regset32. Do not initialize the
+ * members of this structure explicitly; use dpu_debugfs_setup_regset32 instead.
+ */
+struct dpu_debugfs_regset32 {
+ uint32_t offset;
+ uint32_t blk_len;
+ struct dpu_kms *dpu_kms;
+};
+
+/**
+ * dpu_debugfs_setup_regset32 - Initialize register block definition for debugfs
+ * This function is meant to initialize dpu_debugfs_regset32 structures for use
+ * with dpu_debugfs_create_regset32.
+ * @regset: opaque register definition structure
+ * @offset: sub-block offset
+ * @length: sub-block length, in bytes
+ * @dpu_kms: pointer to dpu kms structure
+ */
+void dpu_debugfs_setup_regset32(struct dpu_debugfs_regset32 *regset,
+ uint32_t offset, uint32_t length, struct dpu_kms *dpu_kms);
+
+/**
+ * dpu_debugfs_create_regset32 - Create register read back file for debugfs
+ *
+ * This function is almost identical to the standard debugfs_create_regset32()
+ * function, with the main difference being that a list of register
+ * names/offsets do not need to be provided. The 'read' function simply outputs
+ * sequential register values over a specified range.
+ *
+ * Similar to the related debugfs_create_regset32 API, the structure pointed to
+ * by regset needs to persist for the lifetime of the created file. The calling
+ * code is responsible for initialization/management of this structure.
+ *
+ * The structure pointed to by regset is meant to be opaque. Please use
+ * dpu_debugfs_setup_regset32 to initialize it.
+ *
+ * @name: File name within debugfs
+ * @mode: File mode within debugfs
+ * @parent: Parent directory entry within debugfs, can be NULL
+ * @regset: Pointer to persistent register block definition
+ *
+ * Return: dentry pointer for newly created file, use either debugfs_remove()
+ * or debugfs_remove_recursive() (on a parent directory) to remove the
+ * file
+ */
+void *dpu_debugfs_create_regset32(const char *name, umode_t mode,
+ void *parent, struct dpu_debugfs_regset32 *regset);
+
+/**
+ * dpu_debugfs_get_root - Return root directory entry for KMS's debugfs
+ *
+ * The return value should be passed as the 'parent' argument to subsequent
+ * debugfs create calls.
+ *
+ * @dpu_kms: Pointer to DPU's KMS structure
+ *
+ * Return: dentry pointer for DPU's debugfs location
+ */
+void *dpu_debugfs_get_root(struct dpu_kms *dpu_kms);
+
+/**
+ * DPU info management functions
+ * These functions/definitions allow for building up a 'dpu_info' structure
+ * containing one or more "key=value\n" entries.
+ */
+#define DPU_KMS_INFO_MAX_SIZE 4096
+
+/**
+ * Vblank enable/disable functions
+ */
+int dpu_enable_vblank(struct msm_kms *kms, struct drm_crtc *crtc);
+void dpu_disable_vblank(struct msm_kms *kms, struct drm_crtc *crtc);
+
+void dpu_kms_encoder_enable(struct drm_encoder *encoder);
+
+/**
+ * dpu_kms_get_clk_rate() - get the clock rate
+ * @dpu_kms: poiner to dpu_kms structure
+ * @clock_name: clock name to get the rate
+ *
+ * Return: current clock rate
+ */
+u64 dpu_kms_get_clk_rate(struct dpu_kms *dpu_kms, char *clock_name);
+
+#endif /* __dpu_kms_H__ */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_mdss.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_mdss.c
new file mode 100644
index 000000000000..9e533b86682c
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_mdss.c
@@ -0,0 +1,245 @@
+/*
+ * SPDX-License-Identifier: GPL-2.0
+ * Copyright (c) 2018, The Linux Foundation
+ */
+
+#include "dpu_kms.h"
+
+#define to_dpu_mdss(x) container_of(x, struct dpu_mdss, base)
+
+#define HW_INTR_STATUS 0x0010
+
+struct dpu_mdss {
+ struct msm_mdss base;
+ void __iomem *mmio;
+ unsigned long mmio_len;
+ u32 hwversion;
+ struct dss_module_power mp;
+ struct dpu_irq_controller irq_controller;
+};
+
+static irqreturn_t dpu_mdss_irq(int irq, void *arg)
+{
+ struct dpu_mdss *dpu_mdss = arg;
+ u32 interrupts;
+
+ interrupts = readl_relaxed(dpu_mdss->mmio + HW_INTR_STATUS);
+
+ while (interrupts) {
+ irq_hw_number_t hwirq = fls(interrupts) - 1;
+ unsigned int mapping;
+ int rc;
+
+ mapping = irq_find_mapping(dpu_mdss->irq_controller.domain,
+ hwirq);
+ if (mapping == 0) {
+ DRM_ERROR("couldn't find irq mapping for %lu\n", hwirq);
+ return IRQ_NONE;
+ }
+
+ rc = generic_handle_irq(mapping);
+ if (rc < 0) {
+ DRM_ERROR("handle irq fail: irq=%lu mapping=%u rc=%d\n",
+ hwirq, mapping, rc);
+ return IRQ_NONE;
+ }
+
+ interrupts &= ~(1 << hwirq);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static void dpu_mdss_irq_mask(struct irq_data *irqd)
+{
+ struct dpu_mdss *dpu_mdss = irq_data_get_irq_chip_data(irqd);
+
+ /* memory barrier */
+ smp_mb__before_atomic();
+ clear_bit(irqd->hwirq, &dpu_mdss->irq_controller.enabled_mask);
+ /* memory barrier */
+ smp_mb__after_atomic();
+}
+
+static void dpu_mdss_irq_unmask(struct irq_data *irqd)
+{
+ struct dpu_mdss *dpu_mdss = irq_data_get_irq_chip_data(irqd);
+
+ /* memory barrier */
+ smp_mb__before_atomic();
+ set_bit(irqd->hwirq, &dpu_mdss->irq_controller.enabled_mask);
+ /* memory barrier */
+ smp_mb__after_atomic();
+}
+
+static struct irq_chip dpu_mdss_irq_chip = {
+ .name = "dpu_mdss",
+ .irq_mask = dpu_mdss_irq_mask,
+ .irq_unmask = dpu_mdss_irq_unmask,
+};
+
+static int dpu_mdss_irqdomain_map(struct irq_domain *domain,
+ unsigned int irq, irq_hw_number_t hwirq)
+{
+ struct dpu_mdss *dpu_mdss = domain->host_data;
+ int ret;
+
+ irq_set_chip_and_handler(irq, &dpu_mdss_irq_chip, handle_level_irq);
+ ret = irq_set_chip_data(irq, dpu_mdss);
+
+ return ret;
+}
+
+static const struct irq_domain_ops dpu_mdss_irqdomain_ops = {
+ .map = dpu_mdss_irqdomain_map,
+ .xlate = irq_domain_xlate_onecell,
+};
+
+static int _dpu_mdss_irq_domain_add(struct dpu_mdss *dpu_mdss)
+{
+ struct device *dev;
+ struct irq_domain *domain;
+
+ dev = dpu_mdss->base.dev->dev;
+
+ domain = irq_domain_add_linear(dev->of_node, 32,
+ &dpu_mdss_irqdomain_ops, dpu_mdss);
+ if (!domain) {
+ DPU_ERROR("failed to add irq_domain\n");
+ return -EINVAL;
+ }
+
+ dpu_mdss->irq_controller.enabled_mask = 0;
+ dpu_mdss->irq_controller.domain = domain;
+
+ return 0;
+}
+
+static int _dpu_mdss_irq_domain_fini(struct dpu_mdss *dpu_mdss)
+{
+ if (dpu_mdss->irq_controller.domain) {
+ irq_domain_remove(dpu_mdss->irq_controller.domain);
+ dpu_mdss->irq_controller.domain = NULL;
+ }
+ return 0;
+}
+static int dpu_mdss_enable(struct msm_mdss *mdss)
+{
+ struct dpu_mdss *dpu_mdss = to_dpu_mdss(mdss);
+ struct dss_module_power *mp = &dpu_mdss->mp;
+ int ret;
+
+ ret = msm_dss_enable_clk(mp->clk_config, mp->num_clk, true);
+ if (ret)
+ DPU_ERROR("clock enable failed, ret:%d\n", ret);
+
+ return ret;
+}
+
+static int dpu_mdss_disable(struct msm_mdss *mdss)
+{
+ struct dpu_mdss *dpu_mdss = to_dpu_mdss(mdss);
+ struct dss_module_power *mp = &dpu_mdss->mp;
+ int ret;
+
+ ret = msm_dss_enable_clk(mp->clk_config, mp->num_clk, false);
+ if (ret)
+ DPU_ERROR("clock disable failed, ret:%d\n", ret);
+
+ return ret;
+}
+
+static void dpu_mdss_destroy(struct drm_device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev->dev);
+ struct msm_drm_private *priv = dev->dev_private;
+ struct dpu_mdss *dpu_mdss = to_dpu_mdss(priv->mdss);
+ struct dss_module_power *mp = &dpu_mdss->mp;
+
+ _dpu_mdss_irq_domain_fini(dpu_mdss);
+
+ msm_dss_put_clk(mp->clk_config, mp->num_clk);
+ devm_kfree(&pdev->dev, mp->clk_config);
+
+ if (dpu_mdss->mmio)
+ devm_iounmap(&pdev->dev, dpu_mdss->mmio);
+ dpu_mdss->mmio = NULL;
+
+ pm_runtime_disable(dev->dev);
+ priv->mdss = NULL;
+}
+
+static const struct msm_mdss_funcs mdss_funcs = {
+ .enable = dpu_mdss_enable,
+ .disable = dpu_mdss_disable,
+ .destroy = dpu_mdss_destroy,
+};
+
+int dpu_mdss_init(struct drm_device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev->dev);
+ struct msm_drm_private *priv = dev->dev_private;
+ struct resource *res;
+ struct dpu_mdss *dpu_mdss;
+ struct dss_module_power *mp;
+ int ret = 0;
+
+ dpu_mdss = devm_kzalloc(dev->dev, sizeof(*dpu_mdss), GFP_KERNEL);
+ if (!dpu_mdss)
+ return -ENOMEM;
+
+ dpu_mdss->mmio = msm_ioremap(pdev, "mdss", "mdss");
+ if (IS_ERR(dpu_mdss->mmio))
+ return PTR_ERR(dpu_mdss->mmio);
+
+ DRM_DEBUG("mapped mdss address space @%pK\n", dpu_mdss->mmio);
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "mdss");
+ if (!res) {
+ DRM_ERROR("failed to get memory resource for mdss\n");
+ return -ENOMEM;
+ }
+ dpu_mdss->mmio_len = resource_size(res);
+
+ mp = &dpu_mdss->mp;
+ ret = msm_dss_parse_clock(pdev, mp);
+ if (ret) {
+ DPU_ERROR("failed to parse clocks, ret=%d\n", ret);
+ goto clk_parse_err;
+ }
+
+ dpu_mdss->base.dev = dev;
+ dpu_mdss->base.funcs = &mdss_funcs;
+
+ ret = _dpu_mdss_irq_domain_add(dpu_mdss);
+ if (ret)
+ goto irq_domain_error;
+
+ ret = devm_request_irq(dev->dev, platform_get_irq(pdev, 0),
+ dpu_mdss_irq, 0, "dpu_mdss_isr", dpu_mdss);
+ if (ret) {
+ DPU_ERROR("failed to init irq: %d\n", ret);
+ goto irq_error;
+ }
+
+ pm_runtime_enable(dev->dev);
+
+ pm_runtime_get_sync(dev->dev);
+ dpu_mdss->hwversion = readl_relaxed(dpu_mdss->mmio);
+ pm_runtime_put_sync(dev->dev);
+
+ priv->mdss = &dpu_mdss->base;
+
+ return ret;
+
+irq_error:
+ _dpu_mdss_irq_domain_fini(dpu_mdss);
+irq_domain_error:
+ msm_dss_put_clk(mp->clk_config, mp->num_clk);
+clk_parse_err:
+ devm_kfree(&pdev->dev, mp->clk_config);
+ if (dpu_mdss->mmio)
+ devm_iounmap(&pdev->dev, dpu_mdss->mmio);
+ dpu_mdss->mmio = NULL;
+ return ret;
+}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c
new file mode 100644
index 000000000000..b640e39ebaca
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c
@@ -0,0 +1,1963 @@
+/*
+ * Copyright (C) 2014-2018 The Linux Foundation. All rights reserved.
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__
+
+#include <linux/debugfs.h>
+#include <linux/dma-buf.h>
+
+#include "msm_drv.h"
+#include "dpu_kms.h"
+#include "dpu_formats.h"
+#include "dpu_hw_sspp.h"
+#include "dpu_hw_catalog_format.h"
+#include "dpu_trace.h"
+#include "dpu_crtc.h"
+#include "dpu_vbif.h"
+#include "dpu_plane.h"
+
+#define DPU_DEBUG_PLANE(pl, fmt, ...) DPU_DEBUG("plane%d " fmt,\
+ (pl) ? (pl)->base.base.id : -1, ##__VA_ARGS__)
+
+#define DPU_ERROR_PLANE(pl, fmt, ...) DPU_ERROR("plane%d " fmt,\
+ (pl) ? (pl)->base.base.id : -1, ##__VA_ARGS__)
+
+#define DECIMATED_DIMENSION(dim, deci) (((dim) + ((1 << (deci)) - 1)) >> (deci))
+#define PHASE_STEP_SHIFT 21
+#define PHASE_STEP_UNIT_SCALE ((int) (1 << PHASE_STEP_SHIFT))
+#define PHASE_RESIDUAL 15
+
+#define SHARP_STRENGTH_DEFAULT 32
+#define SHARP_EDGE_THR_DEFAULT 112
+#define SHARP_SMOOTH_THR_DEFAULT 8
+#define SHARP_NOISE_THR_DEFAULT 2
+
+#define DPU_NAME_SIZE 12
+
+#define DPU_PLANE_COLOR_FILL_FLAG BIT(31)
+#define DPU_ZPOS_MAX 255
+
+/* multirect rect index */
+enum {
+ R0,
+ R1,
+ R_MAX
+};
+
+#define DPU_QSEED3_DEFAULT_PRELOAD_H 0x4
+#define DPU_QSEED3_DEFAULT_PRELOAD_V 0x3
+
+#define DEFAULT_REFRESH_RATE 60
+
+/**
+ * enum dpu_plane_qos - Different qos configurations for each pipe
+ *
+ * @DPU_PLANE_QOS_VBLANK_CTRL: Setup VBLANK qos for the pipe.
+ * @DPU_PLANE_QOS_VBLANK_AMORTIZE: Enables Amortization within pipe.
+ * this configuration is mutually exclusive from VBLANK_CTRL.
+ * @DPU_PLANE_QOS_PANIC_CTRL: Setup panic for the pipe.
+ */
+enum dpu_plane_qos {
+ DPU_PLANE_QOS_VBLANK_CTRL = BIT(0),
+ DPU_PLANE_QOS_VBLANK_AMORTIZE = BIT(1),
+ DPU_PLANE_QOS_PANIC_CTRL = BIT(2),
+};
+
+/*
+ * struct dpu_plane - local dpu plane structure
+ * @aspace: address space pointer
+ * @csc_ptr: Points to dpu_csc_cfg structure to use for current
+ * @mplane_list: List of multirect planes of the same pipe
+ * @catalog: Points to dpu catalog structure
+ * @revalidate: force revalidation of all the plane properties
+ */
+struct dpu_plane {
+ struct drm_plane base;
+
+ struct mutex lock;
+
+ enum dpu_sspp pipe;
+ uint32_t features; /* capabilities from catalog */
+ uint32_t nformats;
+ uint32_t formats[64];
+
+ struct dpu_hw_pipe *pipe_hw;
+ struct dpu_hw_pipe_cfg pipe_cfg;
+ struct dpu_hw_pipe_qos_cfg pipe_qos_cfg;
+ uint32_t color_fill;
+ bool is_error;
+ bool is_rt_pipe;
+ bool is_virtual;
+ struct list_head mplane_list;
+ struct dpu_mdss_cfg *catalog;
+
+ struct dpu_csc_cfg *csc_ptr;
+
+ const struct dpu_sspp_sub_blks *pipe_sblk;
+ char pipe_name[DPU_NAME_SIZE];
+
+ /* debugfs related stuff */
+ struct dentry *debugfs_root;
+ struct dpu_debugfs_regset32 debugfs_src;
+ struct dpu_debugfs_regset32 debugfs_scaler;
+ struct dpu_debugfs_regset32 debugfs_csc;
+ bool debugfs_default_scale;
+};
+
+#define to_dpu_plane(x) container_of(x, struct dpu_plane, base)
+
+static struct dpu_kms *_dpu_plane_get_kms(struct drm_plane *plane)
+{
+ struct msm_drm_private *priv;
+
+ if (!plane || !plane->dev)
+ return NULL;
+ priv = plane->dev->dev_private;
+ if (!priv)
+ return NULL;
+ return to_dpu_kms(priv->kms);
+}
+
+static bool dpu_plane_enabled(struct drm_plane_state *state)
+{
+ return state && state->fb && state->crtc;
+}
+
+static bool dpu_plane_sspp_enabled(struct drm_plane_state *state)
+{
+ return state && state->crtc;
+}
+
+/**
+ * _dpu_plane_calc_fill_level - calculate fill level of the given source format
+ * @plane: Pointer to drm plane
+ * @fmt: Pointer to source buffer format
+ * @src_wdith: width of source buffer
+ * Return: fill level corresponding to the source buffer/format or 0 if error
+ */
+static inline int _dpu_plane_calc_fill_level(struct drm_plane *plane,
+ const struct dpu_format *fmt, u32 src_width)
+{
+ struct dpu_plane *pdpu, *tmp;
+ struct dpu_plane_state *pstate;
+ u32 fixed_buff_size;
+ u32 total_fl;
+
+ if (!plane || !fmt || !plane->state || !src_width || !fmt->bpp) {
+ DPU_ERROR("invalid arguments\n");
+ return 0;
+ }
+
+ pdpu = to_dpu_plane(plane);
+ pstate = to_dpu_plane_state(plane->state);
+ fixed_buff_size = pdpu->pipe_sblk->common->pixel_ram_size;
+
+ list_for_each_entry(tmp, &pdpu->mplane_list, mplane_list) {
+ if (!dpu_plane_enabled(tmp->base.state))
+ continue;
+ DPU_DEBUG("plane%d/%d src_width:%d/%d\n",
+ pdpu->base.base.id, tmp->base.base.id,
+ src_width,
+ drm_rect_width(&tmp->pipe_cfg.src_rect));
+ src_width = max_t(u32, src_width,
+ drm_rect_width(&tmp->pipe_cfg.src_rect));
+ }
+
+ if (fmt->fetch_planes == DPU_PLANE_PSEUDO_PLANAR) {
+ if (fmt->chroma_sample == DPU_CHROMA_420) {
+ /* NV12 */
+ total_fl = (fixed_buff_size / 2) /
+ ((src_width + 32) * fmt->bpp);
+ } else {
+ /* non NV12 */
+ total_fl = (fixed_buff_size / 2) * 2 /
+ ((src_width + 32) * fmt->bpp);
+ }
+ } else {
+ if (pstate->multirect_mode == DPU_SSPP_MULTIRECT_PARALLEL) {
+ total_fl = (fixed_buff_size / 2) * 2 /
+ ((src_width + 32) * fmt->bpp);
+ } else {
+ total_fl = (fixed_buff_size) * 2 /
+ ((src_width + 32) * fmt->bpp);
+ }
+ }
+
+ DPU_DEBUG("plane%u: pnum:%d fmt: %4.4s w:%u fl:%u\n",
+ plane->base.id, pdpu->pipe - SSPP_VIG0,
+ (char *)&fmt->base.pixel_format,
+ src_width, total_fl);
+
+ return total_fl;
+}
+
+/**
+ * _dpu_plane_get_qos_lut - get LUT mapping based on fill level
+ * @tbl: Pointer to LUT table
+ * @total_fl: fill level
+ * Return: LUT setting corresponding to the fill level
+ */
+static u64 _dpu_plane_get_qos_lut(const struct dpu_qos_lut_tbl *tbl,
+ u32 total_fl)
+{
+ int i;
+
+ if (!tbl || !tbl->nentry || !tbl->entries)
+ return 0;
+
+ for (i = 0; i < tbl->nentry; i++)
+ if (total_fl <= tbl->entries[i].fl)
+ return tbl->entries[i].lut;
+
+ /* if last fl is zero, use as default */
+ if (!tbl->entries[i-1].fl)
+ return tbl->entries[i-1].lut;
+
+ return 0;
+}
+
+/**
+ * _dpu_plane_set_qos_lut - set QoS LUT of the given plane
+ * @plane: Pointer to drm plane
+ * @fb: Pointer to framebuffer associated with the given plane
+ */
+static void _dpu_plane_set_qos_lut(struct drm_plane *plane,
+ struct drm_framebuffer *fb)
+{
+ struct dpu_plane *pdpu;
+ const struct dpu_format *fmt = NULL;
+ u64 qos_lut;
+ u32 total_fl = 0, lut_usage;
+
+ if (!plane || !fb) {
+ DPU_ERROR("invalid arguments plane %d fb %d\n",
+ plane != 0, fb != 0);
+ return;
+ }
+
+ pdpu = to_dpu_plane(plane);
+
+ if (!pdpu->pipe_hw || !pdpu->pipe_sblk || !pdpu->catalog) {
+ DPU_ERROR("invalid arguments\n");
+ return;
+ } else if (!pdpu->pipe_hw->ops.setup_creq_lut) {
+ return;
+ }
+
+ if (!pdpu->is_rt_pipe) {
+ lut_usage = DPU_QOS_LUT_USAGE_NRT;
+ } else {
+ fmt = dpu_get_dpu_format_ext(
+ fb->format->format,
+ fb->modifier);
+ total_fl = _dpu_plane_calc_fill_level(plane, fmt,
+ drm_rect_width(&pdpu->pipe_cfg.src_rect));
+
+ if (fmt && DPU_FORMAT_IS_LINEAR(fmt))
+ lut_usage = DPU_QOS_LUT_USAGE_LINEAR;
+ else
+ lut_usage = DPU_QOS_LUT_USAGE_MACROTILE;
+ }
+
+ qos_lut = _dpu_plane_get_qos_lut(
+ &pdpu->catalog->perf.qos_lut_tbl[lut_usage], total_fl);
+
+ pdpu->pipe_qos_cfg.creq_lut = qos_lut;
+
+ trace_dpu_perf_set_qos_luts(pdpu->pipe - SSPP_VIG0,
+ (fmt) ? fmt->base.pixel_format : 0,
+ pdpu->is_rt_pipe, total_fl, qos_lut, lut_usage);
+
+ DPU_DEBUG("plane%u: pnum:%d fmt: %4.4s rt:%d fl:%u lut:0x%llx\n",
+ plane->base.id,
+ pdpu->pipe - SSPP_VIG0,
+ fmt ? (char *)&fmt->base.pixel_format : NULL,
+ pdpu->is_rt_pipe, total_fl, qos_lut);
+
+ pdpu->pipe_hw->ops.setup_creq_lut(pdpu->pipe_hw, &pdpu->pipe_qos_cfg);
+}
+
+/**
+ * _dpu_plane_set_panic_lut - set danger/safe LUT of the given plane
+ * @plane: Pointer to drm plane
+ * @fb: Pointer to framebuffer associated with the given plane
+ */
+static void _dpu_plane_set_danger_lut(struct drm_plane *plane,
+ struct drm_framebuffer *fb)
+{
+ struct dpu_plane *pdpu;
+ const struct dpu_format *fmt = NULL;
+ u32 danger_lut, safe_lut;
+
+ if (!plane || !fb) {
+ DPU_ERROR("invalid arguments\n");
+ return;
+ }
+
+ pdpu = to_dpu_plane(plane);
+
+ if (!pdpu->pipe_hw || !pdpu->pipe_sblk || !pdpu->catalog) {
+ DPU_ERROR("invalid arguments\n");
+ return;
+ } else if (!pdpu->pipe_hw->ops.setup_danger_safe_lut) {
+ return;
+ }
+
+ if (!pdpu->is_rt_pipe) {
+ danger_lut = pdpu->catalog->perf.danger_lut_tbl
+ [DPU_QOS_LUT_USAGE_NRT];
+ safe_lut = pdpu->catalog->perf.safe_lut_tbl
+ [DPU_QOS_LUT_USAGE_NRT];
+ } else {
+ fmt = dpu_get_dpu_format_ext(
+ fb->format->format,
+ fb->modifier);
+
+ if (fmt && DPU_FORMAT_IS_LINEAR(fmt)) {
+ danger_lut = pdpu->catalog->perf.danger_lut_tbl
+ [DPU_QOS_LUT_USAGE_LINEAR];
+ safe_lut = pdpu->catalog->perf.safe_lut_tbl
+ [DPU_QOS_LUT_USAGE_LINEAR];
+ } else {
+ danger_lut = pdpu->catalog->perf.danger_lut_tbl
+ [DPU_QOS_LUT_USAGE_MACROTILE];
+ safe_lut = pdpu->catalog->perf.safe_lut_tbl
+ [DPU_QOS_LUT_USAGE_MACROTILE];
+ }
+ }
+
+ pdpu->pipe_qos_cfg.danger_lut = danger_lut;
+ pdpu->pipe_qos_cfg.safe_lut = safe_lut;
+
+ trace_dpu_perf_set_danger_luts(pdpu->pipe - SSPP_VIG0,
+ (fmt) ? fmt->base.pixel_format : 0,
+ (fmt) ? fmt->fetch_mode : 0,
+ pdpu->pipe_qos_cfg.danger_lut,
+ pdpu->pipe_qos_cfg.safe_lut);
+
+ DPU_DEBUG("plane%u: pnum:%d fmt: %4.4s mode:%d luts[0x%x, 0x%x]\n",
+ plane->base.id,
+ pdpu->pipe - SSPP_VIG0,
+ fmt ? (char *)&fmt->base.pixel_format : NULL,
+ fmt ? fmt->fetch_mode : -1,
+ pdpu->pipe_qos_cfg.danger_lut,
+ pdpu->pipe_qos_cfg.safe_lut);
+
+ pdpu->pipe_hw->ops.setup_danger_safe_lut(pdpu->pipe_hw,
+ &pdpu->pipe_qos_cfg);
+}
+
+/**
+ * _dpu_plane_set_qos_ctrl - set QoS control of the given plane
+ * @plane: Pointer to drm plane
+ * @enable: true to enable QoS control
+ * @flags: QoS control mode (enum dpu_plane_qos)
+ */
+static void _dpu_plane_set_qos_ctrl(struct drm_plane *plane,
+ bool enable, u32 flags)
+{
+ struct dpu_plane *pdpu;
+
+ if (!plane) {
+ DPU_ERROR("invalid arguments\n");
+ return;
+ }
+
+ pdpu = to_dpu_plane(plane);
+
+ if (!pdpu->pipe_hw || !pdpu->pipe_sblk) {
+ DPU_ERROR("invalid arguments\n");
+ return;
+ } else if (!pdpu->pipe_hw->ops.setup_qos_ctrl) {
+ return;
+ }
+
+ if (flags & DPU_PLANE_QOS_VBLANK_CTRL) {
+ pdpu->pipe_qos_cfg.creq_vblank = pdpu->pipe_sblk->creq_vblank;
+ pdpu->pipe_qos_cfg.danger_vblank =
+ pdpu->pipe_sblk->danger_vblank;
+ pdpu->pipe_qos_cfg.vblank_en = enable;
+ }
+
+ if (flags & DPU_PLANE_QOS_VBLANK_AMORTIZE) {
+ /* this feature overrules previous VBLANK_CTRL */
+ pdpu->pipe_qos_cfg.vblank_en = false;
+ pdpu->pipe_qos_cfg.creq_vblank = 0; /* clear vblank bits */
+ }
+
+ if (flags & DPU_PLANE_QOS_PANIC_CTRL)
+ pdpu->pipe_qos_cfg.danger_safe_en = enable;
+
+ if (!pdpu->is_rt_pipe) {
+ pdpu->pipe_qos_cfg.vblank_en = false;
+ pdpu->pipe_qos_cfg.danger_safe_en = false;
+ }
+
+ DPU_DEBUG("plane%u: pnum:%d ds:%d vb:%d pri[0x%x, 0x%x] is_rt:%d\n",
+ plane->base.id,
+ pdpu->pipe - SSPP_VIG0,
+ pdpu->pipe_qos_cfg.danger_safe_en,
+ pdpu->pipe_qos_cfg.vblank_en,
+ pdpu->pipe_qos_cfg.creq_vblank,
+ pdpu->pipe_qos_cfg.danger_vblank,
+ pdpu->is_rt_pipe);
+
+ pdpu->pipe_hw->ops.setup_qos_ctrl(pdpu->pipe_hw,
+ &pdpu->pipe_qos_cfg);
+}
+
+int dpu_plane_danger_signal_ctrl(struct drm_plane *plane, bool enable)
+{
+ struct dpu_plane *pdpu;
+ struct msm_drm_private *priv;
+ struct dpu_kms *dpu_kms;
+
+ if (!plane || !plane->dev) {
+ DPU_ERROR("invalid arguments\n");
+ return -EINVAL;
+ }
+
+ priv = plane->dev->dev_private;
+ if (!priv || !priv->kms) {
+ DPU_ERROR("invalid KMS reference\n");
+ return -EINVAL;
+ }
+
+ dpu_kms = to_dpu_kms(priv->kms);
+ pdpu = to_dpu_plane(plane);
+
+ if (!pdpu->is_rt_pipe)
+ goto end;
+
+ pm_runtime_get_sync(&dpu_kms->pdev->dev);
+ _dpu_plane_set_qos_ctrl(plane, enable, DPU_PLANE_QOS_PANIC_CTRL);
+ pm_runtime_put_sync(&dpu_kms->pdev->dev);
+
+end:
+ return 0;
+}
+
+/**
+ * _dpu_plane_set_ot_limit - set OT limit for the given plane
+ * @plane: Pointer to drm plane
+ * @crtc: Pointer to drm crtc
+ */
+static void _dpu_plane_set_ot_limit(struct drm_plane *plane,
+ struct drm_crtc *crtc)
+{
+ struct dpu_plane *pdpu;
+ struct dpu_vbif_set_ot_params ot_params;
+ struct msm_drm_private *priv;
+ struct dpu_kms *dpu_kms;
+
+ if (!plane || !plane->dev || !crtc) {
+ DPU_ERROR("invalid arguments plane %d crtc %d\n",
+ plane != 0, crtc != 0);
+ return;
+ }
+
+ priv = plane->dev->dev_private;
+ if (!priv || !priv->kms) {
+ DPU_ERROR("invalid KMS reference\n");
+ return;
+ }
+
+ dpu_kms = to_dpu_kms(priv->kms);
+ pdpu = to_dpu_plane(plane);
+ if (!pdpu->pipe_hw) {
+ DPU_ERROR("invalid pipe reference\n");
+ return;
+ }
+
+ memset(&ot_params, 0, sizeof(ot_params));
+ ot_params.xin_id = pdpu->pipe_hw->cap->xin_id;
+ ot_params.num = pdpu->pipe_hw->idx - SSPP_NONE;
+ ot_params.width = drm_rect_width(&pdpu->pipe_cfg.src_rect);
+ ot_params.height = drm_rect_height(&pdpu->pipe_cfg.src_rect);
+ ot_params.is_wfd = !pdpu->is_rt_pipe;
+ ot_params.frame_rate = crtc->mode.vrefresh;
+ ot_params.vbif_idx = VBIF_RT;
+ ot_params.clk_ctrl = pdpu->pipe_hw->cap->clk_ctrl;
+ ot_params.rd = true;
+
+ dpu_vbif_set_ot_limit(dpu_kms, &ot_params);
+}
+
+/**
+ * _dpu_plane_set_vbif_qos - set vbif QoS for the given plane
+ * @plane: Pointer to drm plane
+ */
+static void _dpu_plane_set_qos_remap(struct drm_plane *plane)
+{
+ struct dpu_plane *pdpu;
+ struct dpu_vbif_set_qos_params qos_params;
+ struct msm_drm_private *priv;
+ struct dpu_kms *dpu_kms;
+
+ if (!plane || !plane->dev) {
+ DPU_ERROR("invalid arguments\n");
+ return;
+ }
+
+ priv = plane->dev->dev_private;
+ if (!priv || !priv->kms) {
+ DPU_ERROR("invalid KMS reference\n");
+ return;
+ }
+
+ dpu_kms = to_dpu_kms(priv->kms);
+ pdpu = to_dpu_plane(plane);
+ if (!pdpu->pipe_hw) {
+ DPU_ERROR("invalid pipe reference\n");
+ return;
+ }
+
+ memset(&qos_params, 0, sizeof(qos_params));
+ qos_params.vbif_idx = VBIF_RT;
+ qos_params.clk_ctrl = pdpu->pipe_hw->cap->clk_ctrl;
+ qos_params.xin_id = pdpu->pipe_hw->cap->xin_id;
+ qos_params.num = pdpu->pipe_hw->idx - SSPP_VIG0;
+ qos_params.is_rt = pdpu->is_rt_pipe;
+
+ DPU_DEBUG("plane%d pipe:%d vbif:%d xin:%d rt:%d, clk_ctrl:%d\n",
+ plane->base.id, qos_params.num,
+ qos_params.vbif_idx,
+ qos_params.xin_id, qos_params.is_rt,
+ qos_params.clk_ctrl);
+
+ dpu_vbif_set_qos_remap(dpu_kms, &qos_params);
+}
+
+/**
+ * _dpu_plane_get_aspace: gets the address space
+ */
+static int _dpu_plane_get_aspace(
+ struct dpu_plane *pdpu,
+ struct dpu_plane_state *pstate,
+ struct msm_gem_address_space **aspace)
+{
+ struct dpu_kms *kms;
+
+ if (!pdpu || !pstate || !aspace) {
+ DPU_ERROR("invalid parameters\n");
+ return -EINVAL;
+ }
+
+ kms = _dpu_plane_get_kms(&pdpu->base);
+ if (!kms) {
+ DPU_ERROR("invalid kms\n");
+ return -EINVAL;
+ }
+
+ *aspace = kms->base.aspace;
+
+ return 0;
+}
+
+static inline void _dpu_plane_set_scanout(struct drm_plane *plane,
+ struct dpu_plane_state *pstate,
+ struct dpu_hw_pipe_cfg *pipe_cfg,
+ struct drm_framebuffer *fb)
+{
+ struct dpu_plane *pdpu;
+ struct msm_gem_address_space *aspace = NULL;
+ int ret;
+
+ if (!plane || !pstate || !pipe_cfg || !fb) {
+ DPU_ERROR(
+ "invalid arg(s), plane %d state %d cfg %d fb %d\n",
+ plane != 0, pstate != 0, pipe_cfg != 0, fb != 0);
+ return;
+ }
+
+ pdpu = to_dpu_plane(plane);
+ if (!pdpu->pipe_hw) {
+ DPU_ERROR_PLANE(pdpu, "invalid pipe_hw\n");
+ return;
+ }
+
+ ret = _dpu_plane_get_aspace(pdpu, pstate, &aspace);
+ if (ret) {
+ DPU_ERROR_PLANE(pdpu, "Failed to get aspace %d\n", ret);
+ return;
+ }
+
+ ret = dpu_format_populate_layout(aspace, fb, &pipe_cfg->layout);
+ if (ret == -EAGAIN)
+ DPU_DEBUG_PLANE(pdpu, "not updating same src addrs\n");
+ else if (ret)
+ DPU_ERROR_PLANE(pdpu, "failed to get format layout, %d\n", ret);
+ else if (pdpu->pipe_hw->ops.setup_sourceaddress) {
+ trace_dpu_plane_set_scanout(pdpu->pipe_hw->idx,
+ &pipe_cfg->layout,
+ pstate->multirect_index);
+ pdpu->pipe_hw->ops.setup_sourceaddress(pdpu->pipe_hw, pipe_cfg,
+ pstate->multirect_index);
+ }
+}
+
+static void _dpu_plane_setup_scaler3(struct dpu_plane *pdpu,
+ struct dpu_plane_state *pstate,
+ uint32_t src_w, uint32_t src_h, uint32_t dst_w, uint32_t dst_h,
+ struct dpu_hw_scaler3_cfg *scale_cfg,
+ const struct dpu_format *fmt,
+ uint32_t chroma_subsmpl_h, uint32_t chroma_subsmpl_v)
+{
+ uint32_t i;
+
+ if (!pdpu || !pstate || !scale_cfg || !fmt || !chroma_subsmpl_h ||
+ !chroma_subsmpl_v) {
+ DPU_ERROR(
+ "pdpu %d pstate %d scale_cfg %d fmt %d smp_h %d smp_v %d\n",
+ !!pdpu, !!pstate, !!scale_cfg, !!fmt, chroma_subsmpl_h,
+ chroma_subsmpl_v);
+ return;
+ }
+
+ memset(scale_cfg, 0, sizeof(*scale_cfg));
+ memset(&pstate->pixel_ext, 0, sizeof(struct dpu_hw_pixel_ext));
+
+ scale_cfg->phase_step_x[DPU_SSPP_COMP_0] =
+ mult_frac((1 << PHASE_STEP_SHIFT), src_w, dst_w);
+ scale_cfg->phase_step_y[DPU_SSPP_COMP_0] =
+ mult_frac((1 << PHASE_STEP_SHIFT), src_h, dst_h);
+
+
+ scale_cfg->phase_step_y[DPU_SSPP_COMP_1_2] =
+ scale_cfg->phase_step_y[DPU_SSPP_COMP_0] / chroma_subsmpl_v;
+ scale_cfg->phase_step_x[DPU_SSPP_COMP_1_2] =
+ scale_cfg->phase_step_x[DPU_SSPP_COMP_0] / chroma_subsmpl_h;
+
+ scale_cfg->phase_step_x[DPU_SSPP_COMP_2] =
+ scale_cfg->phase_step_x[DPU_SSPP_COMP_1_2];
+ scale_cfg->phase_step_y[DPU_SSPP_COMP_2] =
+ scale_cfg->phase_step_y[DPU_SSPP_COMP_1_2];
+
+ scale_cfg->phase_step_x[DPU_SSPP_COMP_3] =
+ scale_cfg->phase_step_x[DPU_SSPP_COMP_0];
+ scale_cfg->phase_step_y[DPU_SSPP_COMP_3] =
+ scale_cfg->phase_step_y[DPU_SSPP_COMP_0];
+
+ for (i = 0; i < DPU_MAX_PLANES; i++) {
+ scale_cfg->src_width[i] = src_w;
+ scale_cfg->src_height[i] = src_h;
+ if (i == DPU_SSPP_COMP_1_2 || i == DPU_SSPP_COMP_2) {
+ scale_cfg->src_width[i] /= chroma_subsmpl_h;
+ scale_cfg->src_height[i] /= chroma_subsmpl_v;
+ }
+ scale_cfg->preload_x[i] = DPU_QSEED3_DEFAULT_PRELOAD_H;
+ scale_cfg->preload_y[i] = DPU_QSEED3_DEFAULT_PRELOAD_V;
+ pstate->pixel_ext.num_ext_pxls_top[i] =
+ scale_cfg->src_height[i];
+ pstate->pixel_ext.num_ext_pxls_left[i] =
+ scale_cfg->src_width[i];
+ }
+ if (!(DPU_FORMAT_IS_YUV(fmt)) && (src_h == dst_h)
+ && (src_w == dst_w))
+ return;
+
+ scale_cfg->dst_width = dst_w;
+ scale_cfg->dst_height = dst_h;
+ scale_cfg->y_rgb_filter_cfg = DPU_SCALE_BIL;
+ scale_cfg->uv_filter_cfg = DPU_SCALE_BIL;
+ scale_cfg->alpha_filter_cfg = DPU_SCALE_ALPHA_BIL;
+ scale_cfg->lut_flag = 0;
+ scale_cfg->blend_cfg = 1;
+ scale_cfg->enable = 1;
+}
+
+static inline void _dpu_plane_setup_csc(struct dpu_plane *pdpu)
+{
+ static const struct dpu_csc_cfg dpu_csc_YUV2RGB_601L = {
+ {
+ /* S15.16 format */
+ 0x00012A00, 0x00000000, 0x00019880,
+ 0x00012A00, 0xFFFF9B80, 0xFFFF3000,
+ 0x00012A00, 0x00020480, 0x00000000,
+ },
+ /* signed bias */
+ { 0xfff0, 0xff80, 0xff80,},
+ { 0x0, 0x0, 0x0,},
+ /* unsigned clamp */
+ { 0x10, 0xeb, 0x10, 0xf0, 0x10, 0xf0,},
+ { 0x00, 0xff, 0x00, 0xff, 0x00, 0xff,},
+ };
+ static const struct dpu_csc_cfg dpu_csc10_YUV2RGB_601L = {
+ {
+ /* S15.16 format */
+ 0x00012A00, 0x00000000, 0x00019880,
+ 0x00012A00, 0xFFFF9B80, 0xFFFF3000,
+ 0x00012A00, 0x00020480, 0x00000000,
+ },
+ /* signed bias */
+ { 0xffc0, 0xfe00, 0xfe00,},
+ { 0x0, 0x0, 0x0,},
+ /* unsigned clamp */
+ { 0x40, 0x3ac, 0x40, 0x3c0, 0x40, 0x3c0,},
+ { 0x00, 0x3ff, 0x00, 0x3ff, 0x00, 0x3ff,},
+ };
+
+ if (!pdpu) {
+ DPU_ERROR("invalid plane\n");
+ return;
+ }
+
+ if (BIT(DPU_SSPP_CSC_10BIT) & pdpu->features)
+ pdpu->csc_ptr = (struct dpu_csc_cfg *)&dpu_csc10_YUV2RGB_601L;
+ else
+ pdpu->csc_ptr = (struct dpu_csc_cfg *)&dpu_csc_YUV2RGB_601L;
+
+ DPU_DEBUG_PLANE(pdpu, "using 0x%X 0x%X 0x%X...\n",
+ pdpu->csc_ptr->csc_mv[0],
+ pdpu->csc_ptr->csc_mv[1],
+ pdpu->csc_ptr->csc_mv[2]);
+}
+
+static void _dpu_plane_setup_scaler(struct dpu_plane *pdpu,
+ struct dpu_plane_state *pstate,
+ const struct dpu_format *fmt, bool color_fill)
+{
+ struct dpu_hw_pixel_ext *pe;
+ uint32_t chroma_subsmpl_h, chroma_subsmpl_v;
+
+ if (!pdpu || !fmt || !pstate) {
+ DPU_ERROR("invalid arg(s), plane %d fmt %d state %d\n",
+ pdpu != 0, fmt != 0, pstate != 0);
+ return;
+ }
+
+ pe = &pstate->pixel_ext;
+
+ /* don't chroma subsample if decimating */
+ chroma_subsmpl_h =
+ drm_format_horz_chroma_subsampling(fmt->base.pixel_format);
+ chroma_subsmpl_v =
+ drm_format_vert_chroma_subsampling(fmt->base.pixel_format);
+
+ /* update scaler. calculate default config for QSEED3 */
+ _dpu_plane_setup_scaler3(pdpu, pstate,
+ drm_rect_width(&pdpu->pipe_cfg.src_rect),
+ drm_rect_height(&pdpu->pipe_cfg.src_rect),
+ drm_rect_width(&pdpu->pipe_cfg.dst_rect),
+ drm_rect_height(&pdpu->pipe_cfg.dst_rect),
+ &pstate->scaler3_cfg, fmt,
+ chroma_subsmpl_h, chroma_subsmpl_v);
+}
+
+/**
+ * _dpu_plane_color_fill - enables color fill on plane
+ * @pdpu: Pointer to DPU plane object
+ * @color: RGB fill color value, [23..16] Blue, [15..8] Green, [7..0] Red
+ * @alpha: 8-bit fill alpha value, 255 selects 100% alpha
+ * Returns: 0 on success
+ */
+static int _dpu_plane_color_fill(struct dpu_plane *pdpu,
+ uint32_t color, uint32_t alpha)
+{
+ const struct dpu_format *fmt;
+ const struct drm_plane *plane;
+ struct dpu_plane_state *pstate;
+
+ if (!pdpu || !pdpu->base.state) {
+ DPU_ERROR("invalid plane\n");
+ return -EINVAL;
+ }
+
+ if (!pdpu->pipe_hw) {
+ DPU_ERROR_PLANE(pdpu, "invalid plane h/w pointer\n");
+ return -EINVAL;
+ }
+
+ plane = &pdpu->base;
+ pstate = to_dpu_plane_state(plane->state);
+
+ DPU_DEBUG_PLANE(pdpu, "\n");
+
+ /*
+ * select fill format to match user property expectation,
+ * h/w only supports RGB variants
+ */
+ fmt = dpu_get_dpu_format(DRM_FORMAT_ABGR8888);
+
+ /* update sspp */
+ if (fmt && pdpu->pipe_hw->ops.setup_solidfill) {
+ pdpu->pipe_hw->ops.setup_solidfill(pdpu->pipe_hw,
+ (color & 0xFFFFFF) | ((alpha & 0xFF) << 24),
+ pstate->multirect_index);
+
+ /* override scaler/decimation if solid fill */
+ pdpu->pipe_cfg.src_rect.x1 = 0;
+ pdpu->pipe_cfg.src_rect.y1 = 0;
+ pdpu->pipe_cfg.src_rect.x2 =
+ drm_rect_width(&pdpu->pipe_cfg.dst_rect);
+ pdpu->pipe_cfg.src_rect.y2 =
+ drm_rect_height(&pdpu->pipe_cfg.dst_rect);
+ _dpu_plane_setup_scaler(pdpu, pstate, fmt, true);
+
+ if (pdpu->pipe_hw->ops.setup_format)
+ pdpu->pipe_hw->ops.setup_format(pdpu->pipe_hw,
+ fmt, DPU_SSPP_SOLID_FILL,
+ pstate->multirect_index);
+
+ if (pdpu->pipe_hw->ops.setup_rects)
+ pdpu->pipe_hw->ops.setup_rects(pdpu->pipe_hw,
+ &pdpu->pipe_cfg,
+ pstate->multirect_index);
+
+ if (pdpu->pipe_hw->ops.setup_pe)
+ pdpu->pipe_hw->ops.setup_pe(pdpu->pipe_hw,
+ &pstate->pixel_ext);
+
+ if (pdpu->pipe_hw->ops.setup_scaler &&
+ pstate->multirect_index != DPU_SSPP_RECT_1)
+ pdpu->pipe_hw->ops.setup_scaler(pdpu->pipe_hw,
+ &pdpu->pipe_cfg, &pstate->pixel_ext,
+ &pstate->scaler3_cfg);
+ }
+
+ return 0;
+}
+
+void dpu_plane_clear_multirect(const struct drm_plane_state *drm_state)
+{
+ struct dpu_plane_state *pstate;
+
+ if (!drm_state)
+ return;
+
+ pstate = to_dpu_plane_state(drm_state);
+
+ pstate->multirect_index = DPU_SSPP_RECT_SOLO;
+ pstate->multirect_mode = DPU_SSPP_MULTIRECT_NONE;
+}
+
+int dpu_plane_validate_multirect_v2(struct dpu_multirect_plane_states *plane)
+{
+ struct dpu_plane_state *pstate[R_MAX];
+ const struct drm_plane_state *drm_state[R_MAX];
+ struct drm_rect src[R_MAX], dst[R_MAX];
+ struct dpu_plane *dpu_plane[R_MAX];
+ const struct dpu_format *fmt[R_MAX];
+ int i, buffer_lines;
+ unsigned int max_tile_height = 1;
+ bool parallel_fetch_qualified = true;
+ bool has_tiled_rect = false;
+
+ for (i = 0; i < R_MAX; i++) {
+ const struct msm_format *msm_fmt;
+
+ drm_state[i] = i ? plane->r1 : plane->r0;
+ msm_fmt = msm_framebuffer_format(drm_state[i]->fb);
+ fmt[i] = to_dpu_format(msm_fmt);
+
+ if (DPU_FORMAT_IS_UBWC(fmt[i])) {
+ has_tiled_rect = true;
+ if (fmt[i]->tile_height > max_tile_height)
+ max_tile_height = fmt[i]->tile_height;
+ }
+ }
+
+ for (i = 0; i < R_MAX; i++) {
+ int width_threshold;
+
+ pstate[i] = to_dpu_plane_state(drm_state[i]);
+ dpu_plane[i] = to_dpu_plane(drm_state[i]->plane);
+
+ if (pstate[i] == NULL) {
+ DPU_ERROR("DPU plane state of plane id %d is NULL\n",
+ drm_state[i]->plane->base.id);
+ return -EINVAL;
+ }
+
+ src[i].x1 = drm_state[i]->src_x >> 16;
+ src[i].y1 = drm_state[i]->src_y >> 16;
+ src[i].x2 = src[i].x1 + (drm_state[i]->src_w >> 16);
+ src[i].y2 = src[i].y1 + (drm_state[i]->src_h >> 16);
+
+ dst[i] = drm_plane_state_dest(drm_state[i]);
+
+ if (drm_rect_calc_hscale(&src[i], &dst[i], 1, 1) != 1 ||
+ drm_rect_calc_vscale(&src[i], &dst[i], 1, 1) != 1) {
+ DPU_ERROR_PLANE(dpu_plane[i],
+ "scaling is not supported in multirect mode\n");
+ return -EINVAL;
+ }
+
+ if (DPU_FORMAT_IS_YUV(fmt[i])) {
+ DPU_ERROR_PLANE(dpu_plane[i],
+ "Unsupported format for multirect mode\n");
+ return -EINVAL;
+ }
+
+ /**
+ * SSPP PD_MEM is split half - one for each RECT.
+ * Tiled formats need 5 lines of buffering while fetching
+ * whereas linear formats need only 2 lines.
+ * So we cannot support more than half of the supported SSPP
+ * width for tiled formats.
+ */
+ width_threshold = dpu_plane[i]->pipe_sblk->common->maxlinewidth;
+ if (has_tiled_rect)
+ width_threshold /= 2;
+
+ if (parallel_fetch_qualified &&
+ drm_rect_width(&src[i]) > width_threshold)
+ parallel_fetch_qualified = false;
+
+ }
+
+ /* Validate RECT's and set the mode */
+
+ /* Prefer PARALLEL FETCH Mode over TIME_MX Mode */
+ if (parallel_fetch_qualified) {
+ pstate[R0]->multirect_mode = DPU_SSPP_MULTIRECT_PARALLEL;
+ pstate[R1]->multirect_mode = DPU_SSPP_MULTIRECT_PARALLEL;
+
+ goto done;
+ }
+
+ /* TIME_MX Mode */
+ buffer_lines = 2 * max_tile_height;
+
+ if (dst[R1].y1 >= dst[R0].y2 + buffer_lines ||
+ dst[R0].y1 >= dst[R1].y2 + buffer_lines) {
+ pstate[R0]->multirect_mode = DPU_SSPP_MULTIRECT_TIME_MX;
+ pstate[R1]->multirect_mode = DPU_SSPP_MULTIRECT_TIME_MX;
+ } else {
+ DPU_ERROR(
+ "No multirect mode possible for the planes (%d - %d)\n",
+ drm_state[R0]->plane->base.id,
+ drm_state[R1]->plane->base.id);
+ return -EINVAL;
+ }
+
+done:
+ if (dpu_plane[R0]->is_virtual) {
+ pstate[R0]->multirect_index = DPU_SSPP_RECT_1;
+ pstate[R1]->multirect_index = DPU_SSPP_RECT_0;
+ } else {
+ pstate[R0]->multirect_index = DPU_SSPP_RECT_0;
+ pstate[R1]->multirect_index = DPU_SSPP_RECT_1;
+ };
+
+ DPU_DEBUG_PLANE(dpu_plane[R0], "R0: %d - %d\n",
+ pstate[R0]->multirect_mode, pstate[R0]->multirect_index);
+ DPU_DEBUG_PLANE(dpu_plane[R1], "R1: %d - %d\n",
+ pstate[R1]->multirect_mode, pstate[R1]->multirect_index);
+ return 0;
+}
+
+/**
+ * dpu_plane_get_ctl_flush - get control flush for the given plane
+ * @plane: Pointer to drm plane structure
+ * @ctl: Pointer to hardware control driver
+ * @flush_sspp: Pointer to sspp flush control word
+ */
+void dpu_plane_get_ctl_flush(struct drm_plane *plane, struct dpu_hw_ctl *ctl,
+ u32 *flush_sspp)
+{
+ struct dpu_plane_state *pstate;
+
+ if (!plane || !flush_sspp) {
+ DPU_ERROR("invalid parameters\n");
+ return;
+ }
+
+ pstate = to_dpu_plane_state(plane->state);
+
+ *flush_sspp = ctl->ops.get_bitmask_sspp(ctl, dpu_plane_pipe(plane));
+}
+
+static int dpu_plane_prepare_fb(struct drm_plane *plane,
+ struct drm_plane_state *new_state)
+{
+ struct drm_framebuffer *fb = new_state->fb;
+ struct dpu_plane *pdpu = to_dpu_plane(plane);
+ struct dpu_plane_state *pstate = to_dpu_plane_state(new_state);
+ struct dpu_hw_fmt_layout layout;
+ struct drm_gem_object *obj;
+ struct msm_gem_object *msm_obj;
+ struct dma_fence *fence;
+ struct msm_gem_address_space *aspace;
+ int ret;
+
+ if (!new_state->fb)
+ return 0;
+
+ DPU_DEBUG_PLANE(pdpu, "FB[%u]\n", fb->base.id);
+
+ ret = _dpu_plane_get_aspace(pdpu, pstate, &aspace);
+ if (ret) {
+ DPU_ERROR_PLANE(pdpu, "Failed to get aspace\n");
+ return ret;
+ }
+
+ /* cache aspace */
+ pstate->aspace = aspace;
+
+ /*
+ * TODO: Need to sort out the msm_framebuffer_prepare() call below so
+ * we can use msm_atomic_prepare_fb() instead of doing the
+ * implicit fence and fb prepare by hand here.
+ */
+ obj = msm_framebuffer_bo(new_state->fb, 0);
+ msm_obj = to_msm_bo(obj);
+ fence = reservation_object_get_excl_rcu(msm_obj->resv);
+ if (fence)
+ drm_atomic_set_fence_for_plane(new_state, fence);
+
+ if (pstate->aspace) {
+ ret = msm_framebuffer_prepare(new_state->fb,
+ pstate->aspace);
+ if (ret) {
+ DPU_ERROR("failed to prepare framebuffer\n");
+ return ret;
+ }
+ }
+
+ /* validate framebuffer layout before commit */
+ ret = dpu_format_populate_layout(pstate->aspace,
+ new_state->fb, &layout);
+ if (ret) {
+ DPU_ERROR_PLANE(pdpu, "failed to get format layout, %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void dpu_plane_cleanup_fb(struct drm_plane *plane,
+ struct drm_plane_state *old_state)
+{
+ struct dpu_plane *pdpu = to_dpu_plane(plane);
+ struct dpu_plane_state *old_pstate;
+
+ if (!old_state || !old_state->fb)
+ return;
+
+ old_pstate = to_dpu_plane_state(old_state);
+
+ DPU_DEBUG_PLANE(pdpu, "FB[%u]\n", old_state->fb->base.id);
+
+ msm_framebuffer_cleanup(old_state->fb, old_pstate->aspace);
+}
+
+static bool dpu_plane_validate_src(struct drm_rect *src,
+ struct drm_rect *fb_rect,
+ uint32_t min_src_size)
+{
+ /* Ensure fb size is supported */
+ if (drm_rect_width(fb_rect) > MAX_IMG_WIDTH ||
+ drm_rect_height(fb_rect) > MAX_IMG_HEIGHT)
+ return false;
+
+ /* Ensure src rect is above the minimum size */
+ if (drm_rect_width(src) < min_src_size ||
+ drm_rect_height(src) < min_src_size)
+ return false;
+
+ /* Ensure src is fully encapsulated in fb */
+ return drm_rect_intersect(fb_rect, src) &&
+ drm_rect_equals(fb_rect, src);
+}
+
+static int dpu_plane_sspp_atomic_check(struct drm_plane *plane,
+ struct drm_plane_state *state)
+{
+ int ret = 0;
+ struct dpu_plane *pdpu;
+ struct dpu_plane_state *pstate;
+ const struct dpu_format *fmt;
+ struct drm_rect src, dst, fb_rect = { 0 };
+ uint32_t max_upscale = 1, max_downscale = 1;
+ uint32_t min_src_size, max_linewidth;
+ int hscale = 1, vscale = 1;
+
+ if (!plane || !state) {
+ DPU_ERROR("invalid arg(s), plane %d state %d\n",
+ plane != 0, state != 0);
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ pdpu = to_dpu_plane(plane);
+ pstate = to_dpu_plane_state(state);
+
+ if (!pdpu->pipe_sblk) {
+ DPU_ERROR_PLANE(pdpu, "invalid catalog\n");
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ src.x1 = state->src_x >> 16;
+ src.y1 = state->src_y >> 16;
+ src.x2 = src.x1 + (state->src_w >> 16);
+ src.y2 = src.y1 + (state->src_h >> 16);
+
+ dst = drm_plane_state_dest(state);
+
+ fb_rect.x2 = state->fb->width;
+ fb_rect.y2 = state->fb->height;
+
+ max_linewidth = pdpu->pipe_sblk->common->maxlinewidth;
+
+ if (pdpu->features & DPU_SSPP_SCALER) {
+ max_downscale = pdpu->pipe_sblk->maxdwnscale;
+ max_upscale = pdpu->pipe_sblk->maxupscale;
+ }
+ if (drm_rect_width(&src) < drm_rect_width(&dst))
+ hscale = drm_rect_calc_hscale(&src, &dst, 1, max_upscale);
+ else
+ hscale = drm_rect_calc_hscale(&dst, &src, 1, max_downscale);
+ if (drm_rect_height(&src) < drm_rect_height(&dst))
+ vscale = drm_rect_calc_vscale(&src, &dst, 1, max_upscale);
+ else
+ vscale = drm_rect_calc_vscale(&dst, &src, 1, max_downscale);
+
+ DPU_DEBUG_PLANE(pdpu, "check %d -> %d\n",
+ dpu_plane_enabled(plane->state), dpu_plane_enabled(state));
+
+ if (!dpu_plane_enabled(state))
+ goto exit;
+
+ fmt = to_dpu_format(msm_framebuffer_format(state->fb));
+
+ min_src_size = DPU_FORMAT_IS_YUV(fmt) ? 2 : 1;
+
+ if (DPU_FORMAT_IS_YUV(fmt) &&
+ (!(pdpu->features & DPU_SSPP_SCALER) ||
+ !(pdpu->features & (BIT(DPU_SSPP_CSC)
+ | BIT(DPU_SSPP_CSC_10BIT))))) {
+ DPU_ERROR_PLANE(pdpu,
+ "plane doesn't have scaler/csc for yuv\n");
+ ret = -EINVAL;
+
+ /* check src bounds */
+ } else if (!dpu_plane_validate_src(&src, &fb_rect, min_src_size)) {
+ DPU_ERROR_PLANE(pdpu, "invalid source " DRM_RECT_FMT "\n",
+ DRM_RECT_ARG(&src));
+ ret = -E2BIG;
+
+ /* valid yuv image */
+ } else if (DPU_FORMAT_IS_YUV(fmt) &&
+ (src.x1 & 0x1 || src.y1 & 0x1 ||
+ drm_rect_width(&src) & 0x1 ||
+ drm_rect_height(&src) & 0x1)) {
+ DPU_ERROR_PLANE(pdpu, "invalid yuv source " DRM_RECT_FMT "\n",
+ DRM_RECT_ARG(&src));
+ ret = -EINVAL;
+
+ /* min dst support */
+ } else if (drm_rect_width(&dst) < 0x1 || drm_rect_height(&dst) < 0x1) {
+ DPU_ERROR_PLANE(pdpu, "invalid dest rect " DRM_RECT_FMT "\n",
+ DRM_RECT_ARG(&dst));
+ ret = -EINVAL;
+
+ /* check decimated source width */
+ } else if (drm_rect_width(&src) > max_linewidth) {
+ DPU_ERROR_PLANE(pdpu, "invalid src " DRM_RECT_FMT " line:%u\n",
+ DRM_RECT_ARG(&src), max_linewidth);
+ ret = -E2BIG;
+
+ /* check scaler capability */
+ } else if (hscale < 0 || vscale < 0) {
+ DPU_ERROR_PLANE(pdpu, "invalid scaling requested src="
+ DRM_RECT_FMT " dst=" DRM_RECT_FMT "\n",
+ DRM_RECT_ARG(&src), DRM_RECT_ARG(&dst));
+ ret = -E2BIG;
+ }
+
+exit:
+ return ret;
+}
+
+static int dpu_plane_atomic_check(struct drm_plane *plane,
+ struct drm_plane_state *state)
+{
+ if (!state->fb)
+ return 0;
+
+ DPU_DEBUG_PLANE(to_dpu_plane(plane), "\n");
+
+ return dpu_plane_sspp_atomic_check(plane, state);
+}
+
+void dpu_plane_flush(struct drm_plane *plane)
+{
+ struct dpu_plane *pdpu;
+ struct dpu_plane_state *pstate;
+
+ if (!plane || !plane->state) {
+ DPU_ERROR("invalid plane\n");
+ return;
+ }
+
+ pdpu = to_dpu_plane(plane);
+ pstate = to_dpu_plane_state(plane->state);
+
+ /*
+ * These updates have to be done immediately before the plane flush
+ * timing, and may not be moved to the atomic_update/mode_set functions.
+ */
+ if (pdpu->is_error)
+ /* force white frame with 100% alpha pipe output on error */
+ _dpu_plane_color_fill(pdpu, 0xFFFFFF, 0xFF);
+ else if (pdpu->color_fill & DPU_PLANE_COLOR_FILL_FLAG)
+ /* force 100% alpha */
+ _dpu_plane_color_fill(pdpu, pdpu->color_fill, 0xFF);
+ else if (pdpu->pipe_hw && pdpu->csc_ptr && pdpu->pipe_hw->ops.setup_csc)
+ pdpu->pipe_hw->ops.setup_csc(pdpu->pipe_hw, pdpu->csc_ptr);
+
+ /* flag h/w flush complete */
+ if (plane->state)
+ pstate->pending = false;
+}
+
+/**
+ * dpu_plane_set_error: enable/disable error condition
+ * @plane: pointer to drm_plane structure
+ */
+void dpu_plane_set_error(struct drm_plane *plane, bool error)
+{
+ struct dpu_plane *pdpu;
+
+ if (!plane)
+ return;
+
+ pdpu = to_dpu_plane(plane);
+ pdpu->is_error = error;
+}
+
+static int dpu_plane_sspp_atomic_update(struct drm_plane *plane,
+ struct drm_plane_state *old_state)
+{
+ uint32_t nplanes, src_flags;
+ struct dpu_plane *pdpu;
+ struct drm_plane_state *state;
+ struct dpu_plane_state *pstate;
+ struct dpu_plane_state *old_pstate;
+ const struct dpu_format *fmt;
+ struct drm_crtc *crtc;
+ struct drm_framebuffer *fb;
+ struct drm_rect src, dst;
+
+ if (!plane) {
+ DPU_ERROR("invalid plane\n");
+ return -EINVAL;
+ } else if (!plane->state) {
+ DPU_ERROR("invalid plane state\n");
+ return -EINVAL;
+ } else if (!old_state) {
+ DPU_ERROR("invalid old state\n");
+ return -EINVAL;
+ }
+
+ pdpu = to_dpu_plane(plane);
+ state = plane->state;
+
+ pstate = to_dpu_plane_state(state);
+
+ old_pstate = to_dpu_plane_state(old_state);
+
+ crtc = state->crtc;
+ fb = state->fb;
+ if (!crtc || !fb) {
+ DPU_ERROR_PLANE(pdpu, "invalid crtc %d or fb %d\n",
+ crtc != 0, fb != 0);
+ return -EINVAL;
+ }
+ fmt = to_dpu_format(msm_framebuffer_format(fb));
+ nplanes = fmt->num_planes;
+
+ memset(&(pdpu->pipe_cfg), 0, sizeof(struct dpu_hw_pipe_cfg));
+
+ _dpu_plane_set_scanout(plane, pstate, &pdpu->pipe_cfg, fb);
+
+ pstate->pending = true;
+
+ pdpu->is_rt_pipe = (dpu_crtc_get_client_type(crtc) != NRT_CLIENT);
+ _dpu_plane_set_qos_ctrl(plane, false, DPU_PLANE_QOS_PANIC_CTRL);
+
+ src.x1 = state->src_x >> 16;
+ src.y1 = state->src_y >> 16;
+ src.x2 = src.x1 + (state->src_w >> 16);
+ src.y2 = src.y1 + (state->src_h >> 16);
+
+ dst = drm_plane_state_dest(state);
+
+ DPU_DEBUG_PLANE(pdpu, "FB[%u] " DRM_RECT_FMT "->crtc%u " DRM_RECT_FMT
+ ", %4.4s ubwc %d\n", fb->base.id, DRM_RECT_ARG(&src),
+ crtc->base.id, DRM_RECT_ARG(&dst),
+ (char *)&fmt->base.pixel_format,
+ DPU_FORMAT_IS_UBWC(fmt));
+
+ pdpu->pipe_cfg.src_rect = src;
+ pdpu->pipe_cfg.dst_rect = dst;
+
+ _dpu_plane_setup_scaler(pdpu, pstate, fmt, false);
+
+ /* override for color fill */
+ if (pdpu->color_fill & DPU_PLANE_COLOR_FILL_FLAG) {
+ /* skip remaining processing on color fill */
+ return 0;
+ }
+
+ if (pdpu->pipe_hw->ops.setup_rects) {
+ pdpu->pipe_hw->ops.setup_rects(pdpu->pipe_hw,
+ &pdpu->pipe_cfg,
+ pstate->multirect_index);
+ }
+
+ if (pdpu->pipe_hw->ops.setup_pe &&
+ (pstate->multirect_index != DPU_SSPP_RECT_1))
+ pdpu->pipe_hw->ops.setup_pe(pdpu->pipe_hw,
+ &pstate->pixel_ext);
+
+ /**
+ * when programmed in multirect mode, scalar block will be
+ * bypassed. Still we need to update alpha and bitwidth
+ * ONLY for RECT0
+ */
+ if (pdpu->pipe_hw->ops.setup_scaler &&
+ pstate->multirect_index != DPU_SSPP_RECT_1)
+ pdpu->pipe_hw->ops.setup_scaler(pdpu->pipe_hw,
+ &pdpu->pipe_cfg, &pstate->pixel_ext,
+ &pstate->scaler3_cfg);
+
+ if (pdpu->pipe_hw->ops.setup_multirect)
+ pdpu->pipe_hw->ops.setup_multirect(
+ pdpu->pipe_hw,
+ pstate->multirect_index,
+ pstate->multirect_mode);
+
+ if (pdpu->pipe_hw->ops.setup_format) {
+ src_flags = 0x0;
+
+ /* update format */
+ pdpu->pipe_hw->ops.setup_format(pdpu->pipe_hw, fmt, src_flags,
+ pstate->multirect_index);
+
+ if (pdpu->pipe_hw->ops.setup_cdp) {
+ struct dpu_hw_pipe_cdp_cfg *cdp_cfg = &pstate->cdp_cfg;
+
+ memset(cdp_cfg, 0, sizeof(struct dpu_hw_pipe_cdp_cfg));
+
+ cdp_cfg->enable = pdpu->catalog->perf.cdp_cfg
+ [DPU_PERF_CDP_USAGE_RT].rd_enable;
+ cdp_cfg->ubwc_meta_enable =
+ DPU_FORMAT_IS_UBWC(fmt);
+ cdp_cfg->tile_amortize_enable =
+ DPU_FORMAT_IS_UBWC(fmt) ||
+ DPU_FORMAT_IS_TILE(fmt);
+ cdp_cfg->preload_ahead = DPU_SSPP_CDP_PRELOAD_AHEAD_64;
+
+ pdpu->pipe_hw->ops.setup_cdp(pdpu->pipe_hw, cdp_cfg);
+ }
+
+ /* update csc */
+ if (DPU_FORMAT_IS_YUV(fmt))
+ _dpu_plane_setup_csc(pdpu);
+ else
+ pdpu->csc_ptr = 0;
+ }
+
+ _dpu_plane_set_qos_lut(plane, fb);
+ _dpu_plane_set_danger_lut(plane, fb);
+
+ if (plane->type != DRM_PLANE_TYPE_CURSOR) {
+ _dpu_plane_set_qos_ctrl(plane, true, DPU_PLANE_QOS_PANIC_CTRL);
+ _dpu_plane_set_ot_limit(plane, crtc);
+ }
+
+ _dpu_plane_set_qos_remap(plane);
+ return 0;
+}
+
+static void _dpu_plane_atomic_disable(struct drm_plane *plane,
+ struct drm_plane_state *old_state)
+{
+ struct dpu_plane *pdpu;
+ struct drm_plane_state *state;
+ struct dpu_plane_state *pstate;
+
+ if (!plane) {
+ DPU_ERROR("invalid plane\n");
+ return;
+ } else if (!plane->state) {
+ DPU_ERROR("invalid plane state\n");
+ return;
+ } else if (!old_state) {
+ DPU_ERROR("invalid old state\n");
+ return;
+ }
+
+ pdpu = to_dpu_plane(plane);
+ state = plane->state;
+ pstate = to_dpu_plane_state(state);
+
+ trace_dpu_plane_disable(DRMID(plane), is_dpu_plane_virtual(plane),
+ pstate->multirect_mode);
+
+ pstate->pending = true;
+
+ if (is_dpu_plane_virtual(plane) &&
+ pdpu->pipe_hw && pdpu->pipe_hw->ops.setup_multirect)
+ pdpu->pipe_hw->ops.setup_multirect(pdpu->pipe_hw,
+ DPU_SSPP_RECT_SOLO, DPU_SSPP_MULTIRECT_NONE);
+}
+
+static void dpu_plane_atomic_update(struct drm_plane *plane,
+ struct drm_plane_state *old_state)
+{
+ struct dpu_plane *pdpu;
+ struct drm_plane_state *state;
+
+ if (!plane) {
+ DPU_ERROR("invalid plane\n");
+ return;
+ } else if (!plane->state) {
+ DPU_ERROR("invalid plane state\n");
+ return;
+ }
+
+ pdpu = to_dpu_plane(plane);
+ pdpu->is_error = false;
+ state = plane->state;
+
+ DPU_DEBUG_PLANE(pdpu, "\n");
+
+ if (!dpu_plane_sspp_enabled(state)) {
+ _dpu_plane_atomic_disable(plane, old_state);
+ } else {
+ int ret;
+
+ ret = dpu_plane_sspp_atomic_update(plane, old_state);
+ /* atomic_check should have ensured that this doesn't fail */
+ WARN_ON(ret < 0);
+ }
+}
+
+void dpu_plane_restore(struct drm_plane *plane)
+{
+ struct dpu_plane *pdpu;
+
+ if (!plane || !plane->state) {
+ DPU_ERROR("invalid plane\n");
+ return;
+ }
+
+ pdpu = to_dpu_plane(plane);
+
+ DPU_DEBUG_PLANE(pdpu, "\n");
+
+ /* last plane state is same as current state */
+ dpu_plane_atomic_update(plane, plane->state);
+}
+
+static void dpu_plane_destroy(struct drm_plane *plane)
+{
+ struct dpu_plane *pdpu = plane ? to_dpu_plane(plane) : NULL;
+
+ DPU_DEBUG_PLANE(pdpu, "\n");
+
+ if (pdpu) {
+ _dpu_plane_set_qos_ctrl(plane, false, DPU_PLANE_QOS_PANIC_CTRL);
+
+ mutex_destroy(&pdpu->lock);
+
+ drm_plane_helper_disable(plane, NULL);
+
+ /* this will destroy the states as well */
+ drm_plane_cleanup(plane);
+
+ if (pdpu->pipe_hw)
+ dpu_hw_sspp_destroy(pdpu->pipe_hw);
+
+ kfree(pdpu);
+ }
+}
+
+static void dpu_plane_destroy_state(struct drm_plane *plane,
+ struct drm_plane_state *state)
+{
+ struct dpu_plane_state *pstate;
+
+ if (!plane || !state) {
+ DPU_ERROR("invalid arg(s), plane %d state %d\n",
+ plane != 0, state != 0);
+ return;
+ }
+
+ pstate = to_dpu_plane_state(state);
+
+ /* remove ref count for frame buffers */
+ if (state->fb)
+ drm_framebuffer_put(state->fb);
+
+ kfree(pstate);
+}
+
+static struct drm_plane_state *
+dpu_plane_duplicate_state(struct drm_plane *plane)
+{
+ struct dpu_plane *pdpu;
+ struct dpu_plane_state *pstate;
+ struct dpu_plane_state *old_state;
+
+ if (!plane) {
+ DPU_ERROR("invalid plane\n");
+ return NULL;
+ } else if (!plane->state) {
+ DPU_ERROR("invalid plane state\n");
+ return NULL;
+ }
+
+ old_state = to_dpu_plane_state(plane->state);
+ pdpu = to_dpu_plane(plane);
+ pstate = kmemdup(old_state, sizeof(*old_state), GFP_KERNEL);
+ if (!pstate) {
+ DPU_ERROR_PLANE(pdpu, "failed to allocate state\n");
+ return NULL;
+ }
+
+ DPU_DEBUG_PLANE(pdpu, "\n");
+
+ pstate->pending = false;
+
+ __drm_atomic_helper_plane_duplicate_state(plane, &pstate->base);
+
+ return &pstate->base;
+}
+
+static void dpu_plane_reset(struct drm_plane *plane)
+{
+ struct dpu_plane *pdpu;
+ struct dpu_plane_state *pstate;
+
+ if (!plane) {
+ DPU_ERROR("invalid plane\n");
+ return;
+ }
+
+ pdpu = to_dpu_plane(plane);
+ DPU_DEBUG_PLANE(pdpu, "\n");
+
+ /* remove previous state, if present */
+ if (plane->state) {
+ dpu_plane_destroy_state(plane, plane->state);
+ plane->state = 0;
+ }
+
+ pstate = kzalloc(sizeof(*pstate), GFP_KERNEL);
+ if (!pstate) {
+ DPU_ERROR_PLANE(pdpu, "failed to allocate state\n");
+ return;
+ }
+
+ pstate->base.plane = plane;
+
+ plane->state = &pstate->base;
+}
+
+#ifdef CONFIG_DEBUG_FS
+static ssize_t _dpu_plane_danger_read(struct file *file,
+ char __user *buff, size_t count, loff_t *ppos)
+{
+ struct dpu_kms *kms = file->private_data;
+ struct dpu_mdss_cfg *cfg = kms->catalog;
+ int len = 0;
+ char buf[40] = {'\0'};
+
+ if (!cfg)
+ return -ENODEV;
+
+ if (*ppos)
+ return 0; /* the end */
+
+ len = snprintf(buf, sizeof(buf), "%d\n", !kms->has_danger_ctrl);
+ if (len < 0 || len >= sizeof(buf))
+ return 0;
+
+ if ((count < sizeof(buf)) || copy_to_user(buff, buf, len))
+ return -EFAULT;
+
+ *ppos += len; /* increase offset */
+
+ return len;
+}
+
+static void _dpu_plane_set_danger_state(struct dpu_kms *kms, bool enable)
+{
+ struct drm_plane *plane;
+
+ drm_for_each_plane(plane, kms->dev) {
+ if (plane->fb && plane->state) {
+ dpu_plane_danger_signal_ctrl(plane, enable);
+ DPU_DEBUG("plane:%d img:%dx%d ",
+ plane->base.id, plane->fb->width,
+ plane->fb->height);
+ DPU_DEBUG("src[%d,%d,%d,%d] dst[%d,%d,%d,%d]\n",
+ plane->state->src_x >> 16,
+ plane->state->src_y >> 16,
+ plane->state->src_w >> 16,
+ plane->state->src_h >> 16,
+ plane->state->crtc_x, plane->state->crtc_y,
+ plane->state->crtc_w, plane->state->crtc_h);
+ } else {
+ DPU_DEBUG("Inactive plane:%d\n", plane->base.id);
+ }
+ }
+}
+
+static ssize_t _dpu_plane_danger_write(struct file *file,
+ const char __user *user_buf, size_t count, loff_t *ppos)
+{
+ struct dpu_kms *kms = file->private_data;
+ struct dpu_mdss_cfg *cfg = kms->catalog;
+ int disable_panic;
+ char buf[10];
+
+ if (!cfg)
+ return -EFAULT;
+
+ if (count >= sizeof(buf))
+ return -EFAULT;
+
+ if (copy_from_user(buf, user_buf, count))
+ return -EFAULT;
+
+ buf[count] = 0; /* end of string */
+
+ if (kstrtoint(buf, 0, &disable_panic))
+ return -EFAULT;
+
+ if (disable_panic) {
+ /* Disable panic signal for all active pipes */
+ DPU_DEBUG("Disabling danger:\n");
+ _dpu_plane_set_danger_state(kms, false);
+ kms->has_danger_ctrl = false;
+ } else {
+ /* Enable panic signal for all active pipes */
+ DPU_DEBUG("Enabling danger:\n");
+ kms->has_danger_ctrl = true;
+ _dpu_plane_set_danger_state(kms, true);
+ }
+
+ return count;
+}
+
+static const struct file_operations dpu_plane_danger_enable = {
+ .open = simple_open,
+ .read = _dpu_plane_danger_read,
+ .write = _dpu_plane_danger_write,
+};
+
+static int _dpu_plane_init_debugfs(struct drm_plane *plane)
+{
+ struct dpu_plane *pdpu;
+ struct dpu_kms *kms;
+ struct msm_drm_private *priv;
+ const struct dpu_sspp_sub_blks *sblk = 0;
+ const struct dpu_sspp_cfg *cfg = 0;
+
+ if (!plane || !plane->dev) {
+ DPU_ERROR("invalid arguments\n");
+ return -EINVAL;
+ }
+
+ priv = plane->dev->dev_private;
+ if (!priv || !priv->kms) {
+ DPU_ERROR("invalid KMS reference\n");
+ return -EINVAL;
+ }
+
+ kms = to_dpu_kms(priv->kms);
+ pdpu = to_dpu_plane(plane);
+
+ if (pdpu && pdpu->pipe_hw)
+ cfg = pdpu->pipe_hw->cap;
+ if (cfg)
+ sblk = cfg->sblk;
+
+ if (!sblk)
+ return 0;
+
+ /* create overall sub-directory for the pipe */
+ pdpu->debugfs_root =
+ debugfs_create_dir(pdpu->pipe_name,
+ plane->dev->primary->debugfs_root);
+
+ if (!pdpu->debugfs_root)
+ return -ENOMEM;
+
+ /* don't error check these */
+ debugfs_create_x32("features", 0600,
+ pdpu->debugfs_root, &pdpu->features);
+
+ /* add register dump support */
+ dpu_debugfs_setup_regset32(&pdpu->debugfs_src,
+ sblk->src_blk.base + cfg->base,
+ sblk->src_blk.len,
+ kms);
+ dpu_debugfs_create_regset32("src_blk", 0400,
+ pdpu->debugfs_root, &pdpu->debugfs_src);
+
+ if (cfg->features & BIT(DPU_SSPP_SCALER_QSEED3) ||
+ cfg->features & BIT(DPU_SSPP_SCALER_QSEED2)) {
+ dpu_debugfs_setup_regset32(&pdpu->debugfs_scaler,
+ sblk->scaler_blk.base + cfg->base,
+ sblk->scaler_blk.len,
+ kms);
+ dpu_debugfs_create_regset32("scaler_blk", 0400,
+ pdpu->debugfs_root,
+ &pdpu->debugfs_scaler);
+ debugfs_create_bool("default_scaling",
+ 0600,
+ pdpu->debugfs_root,
+ &pdpu->debugfs_default_scale);
+ }
+
+ if (cfg->features & BIT(DPU_SSPP_CSC) ||
+ cfg->features & BIT(DPU_SSPP_CSC_10BIT)) {
+ dpu_debugfs_setup_regset32(&pdpu->debugfs_csc,
+ sblk->csc_blk.base + cfg->base,
+ sblk->csc_blk.len,
+ kms);
+ dpu_debugfs_create_regset32("csc_blk", 0400,
+ pdpu->debugfs_root, &pdpu->debugfs_csc);
+ }
+
+ debugfs_create_u32("xin_id",
+ 0400,
+ pdpu->debugfs_root,
+ (u32 *) &cfg->xin_id);
+ debugfs_create_u32("clk_ctrl",
+ 0400,
+ pdpu->debugfs_root,
+ (u32 *) &cfg->clk_ctrl);
+ debugfs_create_x32("creq_vblank",
+ 0600,
+ pdpu->debugfs_root,
+ (u32 *) &sblk->creq_vblank);
+ debugfs_create_x32("danger_vblank",
+ 0600,
+ pdpu->debugfs_root,
+ (u32 *) &sblk->danger_vblank);
+
+ debugfs_create_file("disable_danger",
+ 0600,
+ pdpu->debugfs_root,
+ kms, &dpu_plane_danger_enable);
+
+ return 0;
+}
+
+static void _dpu_plane_destroy_debugfs(struct drm_plane *plane)
+{
+ struct dpu_plane *pdpu;
+
+ if (!plane)
+ return;
+ pdpu = to_dpu_plane(plane);
+
+ debugfs_remove_recursive(pdpu->debugfs_root);
+}
+#else
+static int _dpu_plane_init_debugfs(struct drm_plane *plane)
+{
+ return 0;
+}
+static void _dpu_plane_destroy_debugfs(struct drm_plane *plane)
+{
+}
+#endif
+
+static int dpu_plane_late_register(struct drm_plane *plane)
+{
+ return _dpu_plane_init_debugfs(plane);
+}
+
+static void dpu_plane_early_unregister(struct drm_plane *plane)
+{
+ _dpu_plane_destroy_debugfs(plane);
+}
+
+static const struct drm_plane_funcs dpu_plane_funcs = {
+ .update_plane = drm_atomic_helper_update_plane,
+ .disable_plane = drm_atomic_helper_disable_plane,
+ .destroy = dpu_plane_destroy,
+ .reset = dpu_plane_reset,
+ .atomic_duplicate_state = dpu_plane_duplicate_state,
+ .atomic_destroy_state = dpu_plane_destroy_state,
+ .late_register = dpu_plane_late_register,
+ .early_unregister = dpu_plane_early_unregister,
+};
+
+static const struct drm_plane_helper_funcs dpu_plane_helper_funcs = {
+ .prepare_fb = dpu_plane_prepare_fb,
+ .cleanup_fb = dpu_plane_cleanup_fb,
+ .atomic_check = dpu_plane_atomic_check,
+ .atomic_update = dpu_plane_atomic_update,
+};
+
+enum dpu_sspp dpu_plane_pipe(struct drm_plane *plane)
+{
+ return plane ? to_dpu_plane(plane)->pipe : SSPP_NONE;
+}
+
+bool is_dpu_plane_virtual(struct drm_plane *plane)
+{
+ return plane ? to_dpu_plane(plane)->is_virtual : false;
+}
+
+/* initialize plane */
+struct drm_plane *dpu_plane_init(struct drm_device *dev,
+ uint32_t pipe, bool primary_plane,
+ unsigned long possible_crtcs, u32 master_plane_id)
+{
+ struct drm_plane *plane = NULL, *master_plane = NULL;
+ const struct dpu_format_extended *format_list;
+ struct dpu_plane *pdpu;
+ struct msm_drm_private *priv;
+ struct dpu_kms *kms;
+ enum drm_plane_type type;
+ int zpos_max = DPU_ZPOS_MAX;
+ int ret = -EINVAL;
+
+ if (!dev) {
+ DPU_ERROR("[%u]device is NULL\n", pipe);
+ goto exit;
+ }
+
+ priv = dev->dev_private;
+ if (!priv) {
+ DPU_ERROR("[%u]private data is NULL\n", pipe);
+ goto exit;
+ }
+
+ if (!priv->kms) {
+ DPU_ERROR("[%u]invalid KMS reference\n", pipe);
+ goto exit;
+ }
+ kms = to_dpu_kms(priv->kms);
+
+ if (!kms->catalog) {
+ DPU_ERROR("[%u]invalid catalog reference\n", pipe);
+ goto exit;
+ }
+
+ /* create and zero local structure */
+ pdpu = kzalloc(sizeof(*pdpu), GFP_KERNEL);
+ if (!pdpu) {
+ DPU_ERROR("[%u]failed to allocate local plane struct\n", pipe);
+ ret = -ENOMEM;
+ goto exit;
+ }
+
+ /* cache local stuff for later */
+ plane = &pdpu->base;
+ pdpu->pipe = pipe;
+ pdpu->is_virtual = (master_plane_id != 0);
+ INIT_LIST_HEAD(&pdpu->mplane_list);
+ master_plane = drm_plane_find(dev, NULL, master_plane_id);
+ if (master_plane) {
+ struct dpu_plane *mpdpu = to_dpu_plane(master_plane);
+
+ list_add_tail(&pdpu->mplane_list, &mpdpu->mplane_list);
+ }
+
+ /* initialize underlying h/w driver */
+ pdpu->pipe_hw = dpu_hw_sspp_init(pipe, kms->mmio, kms->catalog,
+ master_plane_id != 0);
+ if (IS_ERR(pdpu->pipe_hw)) {
+ DPU_ERROR("[%u]SSPP init failed\n", pipe);
+ ret = PTR_ERR(pdpu->pipe_hw);
+ goto clean_plane;
+ } else if (!pdpu->pipe_hw->cap || !pdpu->pipe_hw->cap->sblk) {
+ DPU_ERROR("[%u]SSPP init returned invalid cfg\n", pipe);
+ goto clean_sspp;
+ }
+
+ /* cache features mask for later */
+ pdpu->features = pdpu->pipe_hw->cap->features;
+ pdpu->pipe_sblk = pdpu->pipe_hw->cap->sblk;
+ if (!pdpu->pipe_sblk) {
+ DPU_ERROR("[%u]invalid sblk\n", pipe);
+ goto clean_sspp;
+ }
+
+ if (!master_plane_id)
+ format_list = pdpu->pipe_sblk->format_list;
+ else
+ format_list = pdpu->pipe_sblk->virt_format_list;
+
+ pdpu->nformats = dpu_populate_formats(format_list,
+ pdpu->formats,
+ 0,
+ ARRAY_SIZE(pdpu->formats));
+
+ if (!pdpu->nformats) {
+ DPU_ERROR("[%u]no valid formats for plane\n", pipe);
+ goto clean_sspp;
+ }
+
+ if (pdpu->features & BIT(DPU_SSPP_CURSOR))
+ type = DRM_PLANE_TYPE_CURSOR;
+ else if (primary_plane)
+ type = DRM_PLANE_TYPE_PRIMARY;
+ else
+ type = DRM_PLANE_TYPE_OVERLAY;
+ ret = drm_universal_plane_init(dev, plane, 0xff, &dpu_plane_funcs,
+ pdpu->formats, pdpu->nformats,
+ NULL, type, NULL);
+ if (ret)
+ goto clean_sspp;
+
+ pdpu->catalog = kms->catalog;
+
+ if (kms->catalog->mixer_count &&
+ kms->catalog->mixer[0].sblk->maxblendstages) {
+ zpos_max = kms->catalog->mixer[0].sblk->maxblendstages - 1;
+ if (zpos_max > DPU_STAGE_MAX - DPU_STAGE_0 - 1)
+ zpos_max = DPU_STAGE_MAX - DPU_STAGE_0 - 1;
+ }
+
+ ret = drm_plane_create_zpos_property(plane, 0, 0, zpos_max);
+ if (ret)
+ DPU_ERROR("failed to install zpos property, rc = %d\n", ret);
+
+ /* success! finalize initialization */
+ drm_plane_helper_add(plane, &dpu_plane_helper_funcs);
+
+ /* save user friendly pipe name for later */
+ snprintf(pdpu->pipe_name, DPU_NAME_SIZE, "plane%u", plane->base.id);
+
+ mutex_init(&pdpu->lock);
+
+ DPU_DEBUG("%s created for pipe:%u id:%u virtual:%u\n", pdpu->pipe_name,
+ pipe, plane->base.id, master_plane_id);
+ return plane;
+
+clean_sspp:
+ if (pdpu && pdpu->pipe_hw)
+ dpu_hw_sspp_destroy(pdpu->pipe_hw);
+clean_plane:
+ kfree(pdpu);
+exit:
+ return ERR_PTR(ret);
+}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.h
new file mode 100644
index 000000000000..f6fe6ddc7a3a
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.h
@@ -0,0 +1,175 @@
+/*
+ * Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef _DPU_PLANE_H_
+#define _DPU_PLANE_H_
+
+#include <drm/drm_crtc.h>
+
+#include "dpu_kms.h"
+#include "dpu_hw_mdss.h"
+#include "dpu_hw_sspp.h"
+
+/**
+ * struct dpu_plane_state: Define dpu extension of drm plane state object
+ * @base: base drm plane state object
+ * @property_state: Local storage for msm_prop properties
+ * @property_values: cached plane property values
+ * @aspace: pointer to address space for input/output buffers
+ * @input_fence: dereferenced input fence pointer
+ * @stage: assigned by crtc blender
+ * @multirect_index: index of the rectangle of SSPP
+ * @multirect_mode: parallel or time multiplex multirect mode
+ * @pending: whether the current update is still pending
+ * @scaler3_cfg: configuration data for scaler3
+ * @pixel_ext: configuration data for pixel extensions
+ * @scaler_check_state: indicates status of user provided pixel extension data
+ * @cdp_cfg: CDP configuration
+ */
+struct dpu_plane_state {
+ struct drm_plane_state base;
+ struct msm_gem_address_space *aspace;
+ void *input_fence;
+ enum dpu_stage stage;
+ uint32_t multirect_index;
+ uint32_t multirect_mode;
+ bool pending;
+
+ /* scaler configuration */
+ struct dpu_hw_scaler3_cfg scaler3_cfg;
+ struct dpu_hw_pixel_ext pixel_ext;
+
+ struct dpu_hw_pipe_cdp_cfg cdp_cfg;
+};
+
+/**
+ * struct dpu_multirect_plane_states: Defines multirect pair of drm plane states
+ * @r0: drm plane configured on rect 0
+ * @r1: drm plane configured on rect 1
+ */
+struct dpu_multirect_plane_states {
+ const struct drm_plane_state *r0;
+ const struct drm_plane_state *r1;
+};
+
+#define to_dpu_plane_state(x) \
+ container_of(x, struct dpu_plane_state, base)
+
+/**
+ * dpu_plane_pipe - return sspp identifier for the given plane
+ * @plane: Pointer to DRM plane object
+ * Returns: sspp identifier of the given plane
+ */
+enum dpu_sspp dpu_plane_pipe(struct drm_plane *plane);
+
+/**
+ * is_dpu_plane_virtual - check for virtual plane
+ * @plane: Pointer to DRM plane object
+ * returns: true - if the plane is virtual
+ * false - if the plane is primary
+ */
+bool is_dpu_plane_virtual(struct drm_plane *plane);
+
+/**
+ * dpu_plane_get_ctl_flush - get control flush mask
+ * @plane: Pointer to DRM plane object
+ * @ctl: Pointer to control hardware
+ * @flush_sspp: Pointer to sspp flush control word
+ */
+void dpu_plane_get_ctl_flush(struct drm_plane *plane, struct dpu_hw_ctl *ctl,
+ u32 *flush_sspp);
+
+/**
+ * dpu_plane_restore - restore hw state if previously power collapsed
+ * @plane: Pointer to drm plane structure
+ */
+void dpu_plane_restore(struct drm_plane *plane);
+
+/**
+ * dpu_plane_flush - final plane operations before commit flush
+ * @plane: Pointer to drm plane structure
+ */
+void dpu_plane_flush(struct drm_plane *plane);
+
+/**
+ * dpu_plane_kickoff - final plane operations before commit kickoff
+ * @plane: Pointer to drm plane structure
+ */
+void dpu_plane_kickoff(struct drm_plane *plane);
+
+/**
+ * dpu_plane_set_error: enable/disable error condition
+ * @plane: pointer to drm_plane structure
+ */
+void dpu_plane_set_error(struct drm_plane *plane, bool error);
+
+/**
+ * dpu_plane_init - create new dpu plane for the given pipe
+ * @dev: Pointer to DRM device
+ * @pipe: dpu hardware pipe identifier
+ * @primary_plane: true if this pipe is primary plane for crtc
+ * @possible_crtcs: bitmask of crtc that can be attached to the given pipe
+ * @master_plane_id: primary plane id of a multirect pipe. 0 value passed for
+ * a regular plane initialization. A non-zero primary plane
+ * id will be passed for a virtual pipe initialization.
+ *
+ */
+struct drm_plane *dpu_plane_init(struct drm_device *dev,
+ uint32_t pipe, bool primary_plane,
+ unsigned long possible_crtcs, u32 master_plane_id);
+
+/**
+ * dpu_plane_validate_multirecti_v2 - validate the multirect planes
+ * against hw limitations
+ * @plane: drm plate states of the multirect pair
+ */
+int dpu_plane_validate_multirect_v2(struct dpu_multirect_plane_states *plane);
+
+/**
+ * dpu_plane_clear_multirect - clear multirect bits for the given pipe
+ * @drm_state: Pointer to DRM plane state
+ */
+void dpu_plane_clear_multirect(const struct drm_plane_state *drm_state);
+
+/**
+ * dpu_plane_wait_input_fence - wait for input fence object
+ * @plane: Pointer to DRM plane object
+ * @wait_ms: Wait timeout value
+ * Returns: Zero on success
+ */
+int dpu_plane_wait_input_fence(struct drm_plane *plane, uint32_t wait_ms);
+
+/**
+ * dpu_plane_color_fill - enables color fill on plane
+ * @plane: Pointer to DRM plane object
+ * @color: RGB fill color value, [23..16] Blue, [15..8] Green, [7..0] Red
+ * @alpha: 8-bit fill alpha value, 255 selects 100% alpha
+ * Returns: 0 on success
+ */
+int dpu_plane_color_fill(struct drm_plane *plane,
+ uint32_t color, uint32_t alpha);
+
+/**
+ * dpu_plane_set_revalidate - sets revalidate flag which forces a full
+ * validation of the plane properties in the next atomic check
+ * @plane: Pointer to DRM plane object
+ * @enable: Boolean to set/unset the flag
+ */
+void dpu_plane_set_revalidate(struct drm_plane *plane, bool enable);
+
+#endif /* _DPU_PLANE_H_ */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_power_handle.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_power_handle.c
new file mode 100644
index 000000000000..a75eebca2f37
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_power_handle.c
@@ -0,0 +1,249 @@
+/* Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define pr_fmt(fmt) "[drm:%s:%d]: " fmt, __func__, __LINE__
+
+#include <linux/kernel.h>
+#include <linux/of.h>
+#include <linux/string.h>
+#include <linux/of_address.h>
+#include <linux/slab.h>
+#include <linux/mutex.h>
+#include <linux/of_platform.h>
+
+#include "dpu_power_handle.h"
+#include "dpu_trace.h"
+
+static const char *data_bus_name[DPU_POWER_HANDLE_DBUS_ID_MAX] = {
+ [DPU_POWER_HANDLE_DBUS_ID_MNOC] = "qcom,dpu-data-bus",
+ [DPU_POWER_HANDLE_DBUS_ID_LLCC] = "qcom,dpu-llcc-bus",
+ [DPU_POWER_HANDLE_DBUS_ID_EBI] = "qcom,dpu-ebi-bus",
+};
+
+const char *dpu_power_handle_get_dbus_name(u32 bus_id)
+{
+ if (bus_id < DPU_POWER_HANDLE_DBUS_ID_MAX)
+ return data_bus_name[bus_id];
+
+ return NULL;
+}
+
+static void dpu_power_event_trigger_locked(struct dpu_power_handle *phandle,
+ u32 event_type)
+{
+ struct dpu_power_event *event;
+
+ list_for_each_entry(event, &phandle->event_list, list) {
+ if (event->event_type & event_type)
+ event->cb_fnc(event_type, event->usr);
+ }
+}
+
+struct dpu_power_client *dpu_power_client_create(
+ struct dpu_power_handle *phandle, char *client_name)
+{
+ struct dpu_power_client *client;
+ static u32 id;
+
+ if (!client_name || !phandle) {
+ pr_err("client name is null or invalid power data\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ client = kzalloc(sizeof(struct dpu_power_client), GFP_KERNEL);
+ if (!client)
+ return ERR_PTR(-ENOMEM);
+
+ mutex_lock(&phandle->phandle_lock);
+ strlcpy(client->name, client_name, MAX_CLIENT_NAME_LEN);
+ client->usecase_ndx = VOTE_INDEX_DISABLE;
+ client->id = id;
+ client->active = true;
+ pr_debug("client %s created:%pK id :%d\n", client_name,
+ client, id);
+ id++;
+ list_add(&client->list, &phandle->power_client_clist);
+ mutex_unlock(&phandle->phandle_lock);
+
+ return client;
+}
+
+void dpu_power_client_destroy(struct dpu_power_handle *phandle,
+ struct dpu_power_client *client)
+{
+ if (!client || !phandle) {
+ pr_err("reg bus vote: invalid client handle\n");
+ } else if (!client->active) {
+ pr_err("dpu power deinit already done\n");
+ kfree(client);
+ } else {
+ pr_debug("bus vote client %s destroyed:%pK id:%u\n",
+ client->name, client, client->id);
+ mutex_lock(&phandle->phandle_lock);
+ list_del_init(&client->list);
+ mutex_unlock(&phandle->phandle_lock);
+ kfree(client);
+ }
+}
+
+void dpu_power_resource_init(struct platform_device *pdev,
+ struct dpu_power_handle *phandle)
+{
+ phandle->dev = &pdev->dev;
+
+ INIT_LIST_HEAD(&phandle->power_client_clist);
+ INIT_LIST_HEAD(&phandle->event_list);
+
+ mutex_init(&phandle->phandle_lock);
+}
+
+void dpu_power_resource_deinit(struct platform_device *pdev,
+ struct dpu_power_handle *phandle)
+{
+ struct dpu_power_client *curr_client, *next_client;
+ struct dpu_power_event *curr_event, *next_event;
+
+ if (!phandle || !pdev) {
+ pr_err("invalid input param\n");
+ return;
+ }
+
+ mutex_lock(&phandle->phandle_lock);
+ list_for_each_entry_safe(curr_client, next_client,
+ &phandle->power_client_clist, list) {
+ pr_err("client:%s-%d still registered with refcount:%d\n",
+ curr_client->name, curr_client->id,
+ curr_client->refcount);
+ curr_client->active = false;
+ list_del(&curr_client->list);
+ }
+
+ list_for_each_entry_safe(curr_event, next_event,
+ &phandle->event_list, list) {
+ pr_err("event:%d, client:%s still registered\n",
+ curr_event->event_type,
+ curr_event->client_name);
+ curr_event->active = false;
+ list_del(&curr_event->list);
+ }
+ mutex_unlock(&phandle->phandle_lock);
+}
+
+int dpu_power_resource_enable(struct dpu_power_handle *phandle,
+ struct dpu_power_client *pclient, bool enable)
+{
+ bool changed = false;
+ u32 max_usecase_ndx = VOTE_INDEX_DISABLE, prev_usecase_ndx;
+ struct dpu_power_client *client;
+
+ if (!phandle || !pclient) {
+ pr_err("invalid input argument\n");
+ return -EINVAL;
+ }
+
+ mutex_lock(&phandle->phandle_lock);
+ if (enable)
+ pclient->refcount++;
+ else if (pclient->refcount)
+ pclient->refcount--;
+
+ if (pclient->refcount)
+ pclient->usecase_ndx = VOTE_INDEX_LOW;
+ else
+ pclient->usecase_ndx = VOTE_INDEX_DISABLE;
+
+ list_for_each_entry(client, &phandle->power_client_clist, list) {
+ if (client->usecase_ndx < VOTE_INDEX_MAX &&
+ client->usecase_ndx > max_usecase_ndx)
+ max_usecase_ndx = client->usecase_ndx;
+ }
+
+ if (phandle->current_usecase_ndx != max_usecase_ndx) {
+ changed = true;
+ prev_usecase_ndx = phandle->current_usecase_ndx;
+ phandle->current_usecase_ndx = max_usecase_ndx;
+ }
+
+ pr_debug("%pS: changed=%d current idx=%d request client %s id:%u enable:%d refcount:%d\n",
+ __builtin_return_address(0), changed, max_usecase_ndx,
+ pclient->name, pclient->id, enable, pclient->refcount);
+
+ if (!changed)
+ goto end;
+
+ if (enable) {
+ dpu_power_event_trigger_locked(phandle,
+ DPU_POWER_EVENT_PRE_ENABLE);
+ dpu_power_event_trigger_locked(phandle,
+ DPU_POWER_EVENT_POST_ENABLE);
+
+ } else {
+ dpu_power_event_trigger_locked(phandle,
+ DPU_POWER_EVENT_PRE_DISABLE);
+ dpu_power_event_trigger_locked(phandle,
+ DPU_POWER_EVENT_POST_DISABLE);
+ }
+
+end:
+ mutex_unlock(&phandle->phandle_lock);
+ return 0;
+}
+
+struct dpu_power_event *dpu_power_handle_register_event(
+ struct dpu_power_handle *phandle,
+ u32 event_type, void (*cb_fnc)(u32 event_type, void *usr),
+ void *usr, char *client_name)
+{
+ struct dpu_power_event *event;
+
+ if (!phandle) {
+ pr_err("invalid power handle\n");
+ return ERR_PTR(-EINVAL);
+ } else if (!cb_fnc || !event_type) {
+ pr_err("no callback fnc or event type\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ event = kzalloc(sizeof(struct dpu_power_event), GFP_KERNEL);
+ if (!event)
+ return ERR_PTR(-ENOMEM);
+
+ event->event_type = event_type;
+ event->cb_fnc = cb_fnc;
+ event->usr = usr;
+ strlcpy(event->client_name, client_name, MAX_CLIENT_NAME_LEN);
+ event->active = true;
+
+ mutex_lock(&phandle->phandle_lock);
+ list_add(&event->list, &phandle->event_list);
+ mutex_unlock(&phandle->phandle_lock);
+
+ return event;
+}
+
+void dpu_power_handle_unregister_event(
+ struct dpu_power_handle *phandle,
+ struct dpu_power_event *event)
+{
+ if (!phandle || !event) {
+ pr_err("invalid phandle or event\n");
+ } else if (!event->active) {
+ pr_err("power handle deinit already done\n");
+ kfree(event);
+ } else {
+ mutex_lock(&phandle->phandle_lock);
+ list_del_init(&event->list);
+ mutex_unlock(&phandle->phandle_lock);
+ kfree(event);
+ }
+}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_power_handle.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_power_handle.h
new file mode 100644
index 000000000000..344f74464eca
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_power_handle.h
@@ -0,0 +1,225 @@
+/* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _DPU_POWER_HANDLE_H_
+#define _DPU_POWER_HANDLE_H_
+
+#define MAX_CLIENT_NAME_LEN 128
+
+#define DPU_POWER_HANDLE_ENABLE_BUS_AB_QUOTA 0
+#define DPU_POWER_HANDLE_DISABLE_BUS_AB_QUOTA 0
+#define DPU_POWER_HANDLE_ENABLE_BUS_IB_QUOTA 1600000000
+#define DPU_POWER_HANDLE_DISABLE_BUS_IB_QUOTA 0
+
+#include "dpu_io_util.h"
+
+/* event will be triggered before power handler disable */
+#define DPU_POWER_EVENT_PRE_DISABLE 0x1
+
+/* event will be triggered after power handler disable */
+#define DPU_POWER_EVENT_POST_DISABLE 0x2
+
+/* event will be triggered before power handler enable */
+#define DPU_POWER_EVENT_PRE_ENABLE 0x4
+
+/* event will be triggered after power handler enable */
+#define DPU_POWER_EVENT_POST_ENABLE 0x8
+
+/**
+ * mdss_bus_vote_type: register bus vote type
+ * VOTE_INDEX_DISABLE: removes the client vote
+ * VOTE_INDEX_LOW: keeps the lowest vote for register bus
+ * VOTE_INDEX_MAX: invalid
+ */
+enum mdss_bus_vote_type {
+ VOTE_INDEX_DISABLE,
+ VOTE_INDEX_LOW,
+ VOTE_INDEX_MAX,
+};
+
+/**
+ * enum dpu_power_handle_data_bus_client - type of axi bus clients
+ * @DPU_POWER_HANDLE_DATA_BUS_CLIENT_RT: core real-time bus client
+ * @DPU_POWER_HANDLE_DATA_BUS_CLIENT_NRT: core non-real-time bus client
+ * @DPU_POWER_HANDLE_DATA_BUS_CLIENT_MAX: maximum number of bus client type
+ */
+enum dpu_power_handle_data_bus_client {
+ DPU_POWER_HANDLE_DATA_BUS_CLIENT_RT,
+ DPU_POWER_HANDLE_DATA_BUS_CLIENT_NRT,
+ DPU_POWER_HANDLE_DATA_BUS_CLIENT_MAX
+};
+
+/**
+ * enum DPU_POWER_HANDLE_DBUS_ID - data bus identifier
+ * @DPU_POWER_HANDLE_DBUS_ID_MNOC: DPU/MNOC data bus
+ * @DPU_POWER_HANDLE_DBUS_ID_LLCC: MNOC/LLCC data bus
+ * @DPU_POWER_HANDLE_DBUS_ID_EBI: LLCC/EBI data bus
+ */
+enum DPU_POWER_HANDLE_DBUS_ID {
+ DPU_POWER_HANDLE_DBUS_ID_MNOC,
+ DPU_POWER_HANDLE_DBUS_ID_LLCC,
+ DPU_POWER_HANDLE_DBUS_ID_EBI,
+ DPU_POWER_HANDLE_DBUS_ID_MAX,
+};
+
+/**
+ * struct dpu_power_client: stores the power client for dpu driver
+ * @name: name of the client
+ * @usecase_ndx: current regs bus vote type
+ * @refcount: current refcount if multiple modules are using same
+ * same client for enable/disable. Power module will
+ * aggregate the refcount and vote accordingly for this
+ * client.
+ * @id: assigned during create. helps for debugging.
+ * @list: list to attach power handle master list
+ * @ab: arbitrated bandwidth for each bus client
+ * @ib: instantaneous bandwidth for each bus client
+ * @active: inidcates the state of dpu power handle
+ */
+struct dpu_power_client {
+ char name[MAX_CLIENT_NAME_LEN];
+ short usecase_ndx;
+ short refcount;
+ u32 id;
+ struct list_head list;
+ u64 ab[DPU_POWER_HANDLE_DATA_BUS_CLIENT_MAX];
+ u64 ib[DPU_POWER_HANDLE_DATA_BUS_CLIENT_MAX];
+ bool active;
+};
+
+/*
+ * struct dpu_power_event - local event registration structure
+ * @client_name: name of the client registering
+ * @cb_fnc: pointer to desired callback function
+ * @usr: user pointer to pass to callback event trigger
+ * @event: refer to DPU_POWER_HANDLE_EVENT_*
+ * @list: list to attach event master list
+ * @active: indicates the state of dpu power handle
+ */
+struct dpu_power_event {
+ char client_name[MAX_CLIENT_NAME_LEN];
+ void (*cb_fnc)(u32 event_type, void *usr);
+ void *usr;
+ u32 event_type;
+ struct list_head list;
+ bool active;
+};
+
+/**
+ * struct dpu_power_handle: power handle main struct
+ * @client_clist: master list to store all clients
+ * @phandle_lock: lock to synchronize the enable/disable
+ * @dev: pointer to device structure
+ * @usecase_ndx: current usecase index
+ * @event_list: current power handle event list
+ */
+struct dpu_power_handle {
+ struct list_head power_client_clist;
+ struct mutex phandle_lock;
+ struct device *dev;
+ u32 current_usecase_ndx;
+ struct list_head event_list;
+};
+
+/**
+ * dpu_power_resource_init() - initializes the dpu power handle
+ * @pdev: platform device to search the power resources
+ * @pdata: power handle to store the power resources
+ */
+void dpu_power_resource_init(struct platform_device *pdev,
+ struct dpu_power_handle *pdata);
+
+/**
+ * dpu_power_resource_deinit() - release the dpu power handle
+ * @pdev: platform device for power resources
+ * @pdata: power handle containing the resources
+ *
+ * Return: error code.
+ */
+void dpu_power_resource_deinit(struct platform_device *pdev,
+ struct dpu_power_handle *pdata);
+
+/**
+ * dpu_power_client_create() - create the client on power handle
+ * @pdata: power handle containing the resources
+ * @client_name: new client name for registration
+ *
+ * Return: error code.
+ */
+struct dpu_power_client *dpu_power_client_create(struct dpu_power_handle *pdata,
+ char *client_name);
+
+/**
+ * dpu_power_client_destroy() - destroy the client on power handle
+ * @pdata: power handle containing the resources
+ * @client_name: new client name for registration
+ *
+ * Return: none
+ */
+void dpu_power_client_destroy(struct dpu_power_handle *phandle,
+ struct dpu_power_client *client);
+
+/**
+ * dpu_power_resource_enable() - enable/disable the power resources
+ * @pdata: power handle containing the resources
+ * @client: client information to enable/disable its vote
+ * @enable: boolean request for enable/disable
+ *
+ * Return: error code.
+ */
+int dpu_power_resource_enable(struct dpu_power_handle *pdata,
+ struct dpu_power_client *pclient, bool enable);
+
+/**
+ * dpu_power_data_bus_bandwidth_ctrl() - control data bus bandwidth enable
+ * @phandle: power handle containing the resources
+ * @client: client information to bandwidth control
+ * @enable: true to enable bandwidth for data base
+ *
+ * Return: none
+ */
+void dpu_power_data_bus_bandwidth_ctrl(struct dpu_power_handle *phandle,
+ struct dpu_power_client *pclient, int enable);
+
+/**
+ * dpu_power_handle_register_event - register a callback function for an event.
+ * Clients can register for multiple events with a single register.
+ * Any block with access to phandle can register for the event
+ * notification.
+ * @phandle: power handle containing the resources
+ * @event_type: event type to register; refer DPU_POWER_HANDLE_EVENT_*
+ * @cb_fnc: pointer to desired callback function
+ * @usr: user pointer to pass to callback on event trigger
+ *
+ * Return: event pointer if success, or error code otherwise
+ */
+struct dpu_power_event *dpu_power_handle_register_event(
+ struct dpu_power_handle *phandle,
+ u32 event_type, void (*cb_fnc)(u32 event_type, void *usr),
+ void *usr, char *client_name);
+/**
+ * dpu_power_handle_unregister_event - unregister callback for event(s)
+ * @phandle: power handle containing the resources
+ * @event: event pointer returned after power handle register
+ */
+void dpu_power_handle_unregister_event(struct dpu_power_handle *phandle,
+ struct dpu_power_event *event);
+
+/**
+ * dpu_power_handle_get_dbus_name - get name of given data bus identifier
+ * @bus_id: data bus identifier
+ * Return: Pointer to name string if success; NULL otherwise
+ */
+const char *dpu_power_handle_get_dbus_name(u32 bus_id);
+
+#endif /* _DPU_POWER_HANDLE_H_ */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c
new file mode 100644
index 000000000000..13c0a36d4ef9
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c
@@ -0,0 +1,1079 @@
+/*
+ * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define pr_fmt(fmt) "[drm:%s] " fmt, __func__
+#include "dpu_kms.h"
+#include "dpu_hw_lm.h"
+#include "dpu_hw_ctl.h"
+#include "dpu_hw_cdm.h"
+#include "dpu_hw_pingpong.h"
+#include "dpu_hw_intf.h"
+#include "dpu_encoder.h"
+#include "dpu_trace.h"
+
+#define RESERVED_BY_OTHER(h, r) \
+ ((h)->rsvp && ((h)->rsvp->enc_id != (r)->enc_id))
+
+#define RM_RQ_LOCK(r) ((r)->top_ctrl & BIT(DPU_RM_TOPCTL_RESERVE_LOCK))
+#define RM_RQ_CLEAR(r) ((r)->top_ctrl & BIT(DPU_RM_TOPCTL_RESERVE_CLEAR))
+#define RM_RQ_DS(r) ((r)->top_ctrl & BIT(DPU_RM_TOPCTL_DS))
+#define RM_IS_TOPOLOGY_MATCH(t, r) ((t).num_lm == (r).num_lm && \
+ (t).num_comp_enc == (r).num_enc && \
+ (t).num_intf == (r).num_intf)
+
+struct dpu_rm_topology_def {
+ enum dpu_rm_topology_name top_name;
+ int num_lm;
+ int num_comp_enc;
+ int num_intf;
+ int num_ctl;
+ int needs_split_display;
+};
+
+static const struct dpu_rm_topology_def g_top_table[] = {
+ { DPU_RM_TOPOLOGY_NONE, 0, 0, 0, 0, false },
+ { DPU_RM_TOPOLOGY_SINGLEPIPE, 1, 0, 1, 1, false },
+ { DPU_RM_TOPOLOGY_DUALPIPE, 2, 0, 2, 2, true },
+ { DPU_RM_TOPOLOGY_DUALPIPE_3DMERGE, 2, 0, 1, 1, false },
+};
+
+/**
+ * struct dpu_rm_requirements - Reservation requirements parameter bundle
+ * @top_ctrl: topology control preference from kernel client
+ * @top: selected topology for the display
+ * @hw_res: Hardware resources required as reported by the encoders
+ */
+struct dpu_rm_requirements {
+ uint64_t top_ctrl;
+ const struct dpu_rm_topology_def *topology;
+ struct dpu_encoder_hw_resources hw_res;
+};
+
+/**
+ * struct dpu_rm_rsvp - Use Case Reservation tagging structure
+ * Used to tag HW blocks as reserved by a CRTC->Encoder->Connector chain
+ * By using as a tag, rather than lists of pointers to HW blocks used
+ * we can avoid some list management since we don't know how many blocks
+ * of each type a given use case may require.
+ * @list: List head for list of all reservations
+ * @seq: Global RSVP sequence number for debugging, especially for
+ * differentiating differenct allocations for same encoder.
+ * @enc_id: Reservations are tracked by Encoder DRM object ID.
+ * CRTCs may be connected to multiple Encoders.
+ * An encoder or connector id identifies the display path.
+ * @topology DRM<->HW topology use case
+ */
+struct dpu_rm_rsvp {
+ struct list_head list;
+ uint32_t seq;
+ uint32_t enc_id;
+ enum dpu_rm_topology_name topology;
+};
+
+/**
+ * struct dpu_rm_hw_blk - hardware block tracking list member
+ * @list: List head for list of all hardware blocks tracking items
+ * @rsvp: Pointer to use case reservation if reserved by a client
+ * @rsvp_nxt: Temporary pointer used during reservation to the incoming
+ * request. Will be swapped into rsvp if proposal is accepted
+ * @type: Type of hardware block this structure tracks
+ * @id: Hardware ID number, within it's own space, ie. LM_X
+ * @catalog: Pointer to the hardware catalog entry for this block
+ * @hw: Pointer to the hardware register access object for this block
+ */
+struct dpu_rm_hw_blk {
+ struct list_head list;
+ struct dpu_rm_rsvp *rsvp;
+ struct dpu_rm_rsvp *rsvp_nxt;
+ enum dpu_hw_blk_type type;
+ uint32_t id;
+ struct dpu_hw_blk *hw;
+};
+
+/**
+ * dpu_rm_dbg_rsvp_stage - enum of steps in making reservation for event logging
+ */
+enum dpu_rm_dbg_rsvp_stage {
+ DPU_RM_STAGE_BEGIN,
+ DPU_RM_STAGE_AFTER_CLEAR,
+ DPU_RM_STAGE_AFTER_RSVPNEXT,
+ DPU_RM_STAGE_FINAL
+};
+
+static void _dpu_rm_print_rsvps(
+ struct dpu_rm *rm,
+ enum dpu_rm_dbg_rsvp_stage stage)
+{
+ struct dpu_rm_rsvp *rsvp;
+ struct dpu_rm_hw_blk *blk;
+ enum dpu_hw_blk_type type;
+
+ DPU_DEBUG("%d\n", stage);
+
+ list_for_each_entry(rsvp, &rm->rsvps, list) {
+ DRM_DEBUG_KMS("%d rsvp[s%ue%u] topology %d\n", stage, rsvp->seq,
+ rsvp->enc_id, rsvp->topology);
+ }
+
+ for (type = 0; type < DPU_HW_BLK_MAX; type++) {
+ list_for_each_entry(blk, &rm->hw_blks[type], list) {
+ if (!blk->rsvp && !blk->rsvp_nxt)
+ continue;
+
+ DRM_DEBUG_KMS("%d rsvp[s%ue%u->s%ue%u] %d %d\n", stage,
+ (blk->rsvp) ? blk->rsvp->seq : 0,
+ (blk->rsvp) ? blk->rsvp->enc_id : 0,
+ (blk->rsvp_nxt) ? blk->rsvp_nxt->seq : 0,
+ (blk->rsvp_nxt) ? blk->rsvp_nxt->enc_id : 0,
+ blk->type, blk->id);
+ }
+ }
+}
+
+struct dpu_hw_mdp *dpu_rm_get_mdp(struct dpu_rm *rm)
+{
+ return rm->hw_mdp;
+}
+
+enum dpu_rm_topology_name
+dpu_rm_get_topology_name(struct msm_display_topology topology)
+{
+ int i;
+
+ for (i = 0; i < DPU_RM_TOPOLOGY_MAX; i++)
+ if (RM_IS_TOPOLOGY_MATCH(g_top_table[i], topology))
+ return g_top_table[i].top_name;
+
+ return DPU_RM_TOPOLOGY_NONE;
+}
+
+void dpu_rm_init_hw_iter(
+ struct dpu_rm_hw_iter *iter,
+ uint32_t enc_id,
+ enum dpu_hw_blk_type type)
+{
+ memset(iter, 0, sizeof(*iter));
+ iter->enc_id = enc_id;
+ iter->type = type;
+}
+
+static bool _dpu_rm_get_hw_locked(struct dpu_rm *rm, struct dpu_rm_hw_iter *i)
+{
+ struct list_head *blk_list;
+
+ if (!rm || !i || i->type >= DPU_HW_BLK_MAX) {
+ DPU_ERROR("invalid rm\n");
+ return false;
+ }
+
+ i->hw = NULL;
+ blk_list = &rm->hw_blks[i->type];
+
+ if (i->blk && (&i->blk->list == blk_list)) {
+ DPU_DEBUG("attempt resume iteration past last\n");
+ return false;
+ }
+
+ i->blk = list_prepare_entry(i->blk, blk_list, list);
+
+ list_for_each_entry_continue(i->blk, blk_list, list) {
+ struct dpu_rm_rsvp *rsvp = i->blk->rsvp;
+
+ if (i->blk->type != i->type) {
+ DPU_ERROR("found incorrect block type %d on %d list\n",
+ i->blk->type, i->type);
+ return false;
+ }
+
+ if ((i->enc_id == 0) || (rsvp && rsvp->enc_id == i->enc_id)) {
+ i->hw = i->blk->hw;
+ DPU_DEBUG("found type %d id %d for enc %d\n",
+ i->type, i->blk->id, i->enc_id);
+ return true;
+ }
+ }
+
+ DPU_DEBUG("no match, type %d for enc %d\n", i->type, i->enc_id);
+
+ return false;
+}
+
+bool dpu_rm_get_hw(struct dpu_rm *rm, struct dpu_rm_hw_iter *i)
+{
+ bool ret;
+
+ mutex_lock(&rm->rm_lock);
+ ret = _dpu_rm_get_hw_locked(rm, i);
+ mutex_unlock(&rm->rm_lock);
+
+ return ret;
+}
+
+static void _dpu_rm_hw_destroy(enum dpu_hw_blk_type type, void *hw)
+{
+ switch (type) {
+ case DPU_HW_BLK_LM:
+ dpu_hw_lm_destroy(hw);
+ break;
+ case DPU_HW_BLK_CTL:
+ dpu_hw_ctl_destroy(hw);
+ break;
+ case DPU_HW_BLK_CDM:
+ dpu_hw_cdm_destroy(hw);
+ break;
+ case DPU_HW_BLK_PINGPONG:
+ dpu_hw_pingpong_destroy(hw);
+ break;
+ case DPU_HW_BLK_INTF:
+ dpu_hw_intf_destroy(hw);
+ break;
+ case DPU_HW_BLK_SSPP:
+ /* SSPPs are not managed by the resource manager */
+ case DPU_HW_BLK_TOP:
+ /* Top is a singleton, not managed in hw_blks list */
+ case DPU_HW_BLK_MAX:
+ default:
+ DPU_ERROR("unsupported block type %d\n", type);
+ break;
+ }
+}
+
+int dpu_rm_destroy(struct dpu_rm *rm)
+{
+
+ struct dpu_rm_rsvp *rsvp_cur, *rsvp_nxt;
+ struct dpu_rm_hw_blk *hw_cur, *hw_nxt;
+ enum dpu_hw_blk_type type;
+
+ if (!rm) {
+ DPU_ERROR("invalid rm\n");
+ return -EINVAL;
+ }
+
+ list_for_each_entry_safe(rsvp_cur, rsvp_nxt, &rm->rsvps, list) {
+ list_del(&rsvp_cur->list);
+ kfree(rsvp_cur);
+ }
+
+
+ for (type = 0; type < DPU_HW_BLK_MAX; type++) {
+ list_for_each_entry_safe(hw_cur, hw_nxt, &rm->hw_blks[type],
+ list) {
+ list_del(&hw_cur->list);
+ _dpu_rm_hw_destroy(hw_cur->type, hw_cur->hw);
+ kfree(hw_cur);
+ }
+ }
+
+ dpu_hw_mdp_destroy(rm->hw_mdp);
+ rm->hw_mdp = NULL;
+
+ mutex_destroy(&rm->rm_lock);
+
+ return 0;
+}
+
+static int _dpu_rm_hw_blk_create(
+ struct dpu_rm *rm,
+ struct dpu_mdss_cfg *cat,
+ void __iomem *mmio,
+ enum dpu_hw_blk_type type,
+ uint32_t id,
+ void *hw_catalog_info)
+{
+ struct dpu_rm_hw_blk *blk;
+ struct dpu_hw_mdp *hw_mdp;
+ void *hw;
+
+ hw_mdp = rm->hw_mdp;
+
+ switch (type) {
+ case DPU_HW_BLK_LM:
+ hw = dpu_hw_lm_init(id, mmio, cat);
+ break;
+ case DPU_HW_BLK_CTL:
+ hw = dpu_hw_ctl_init(id, mmio, cat);
+ break;
+ case DPU_HW_BLK_CDM:
+ hw = dpu_hw_cdm_init(id, mmio, cat, hw_mdp);
+ break;
+ case DPU_HW_BLK_PINGPONG:
+ hw = dpu_hw_pingpong_init(id, mmio, cat);
+ break;
+ case DPU_HW_BLK_INTF:
+ hw = dpu_hw_intf_init(id, mmio, cat);
+ break;
+ case DPU_HW_BLK_SSPP:
+ /* SSPPs are not managed by the resource manager */
+ case DPU_HW_BLK_TOP:
+ /* Top is a singleton, not managed in hw_blks list */
+ case DPU_HW_BLK_MAX:
+ default:
+ DPU_ERROR("unsupported block type %d\n", type);
+ return -EINVAL;
+ }
+
+ if (IS_ERR_OR_NULL(hw)) {
+ DPU_ERROR("failed hw object creation: type %d, err %ld\n",
+ type, PTR_ERR(hw));
+ return -EFAULT;
+ }
+
+ blk = kzalloc(sizeof(*blk), GFP_KERNEL);
+ if (!blk) {
+ _dpu_rm_hw_destroy(type, hw);
+ return -ENOMEM;
+ }
+
+ blk->type = type;
+ blk->id = id;
+ blk->hw = hw;
+ list_add_tail(&blk->list, &rm->hw_blks[type]);
+
+ return 0;
+}
+
+int dpu_rm_init(struct dpu_rm *rm,
+ struct dpu_mdss_cfg *cat,
+ void __iomem *mmio,
+ struct drm_device *dev)
+{
+ int rc, i;
+ enum dpu_hw_blk_type type;
+
+ if (!rm || !cat || !mmio || !dev) {
+ DPU_ERROR("invalid kms\n");
+ return -EINVAL;
+ }
+
+ /* Clear, setup lists */
+ memset(rm, 0, sizeof(*rm));
+
+ mutex_init(&rm->rm_lock);
+
+ INIT_LIST_HEAD(&rm->rsvps);
+ for (type = 0; type < DPU_HW_BLK_MAX; type++)
+ INIT_LIST_HEAD(&rm->hw_blks[type]);
+
+ rm->dev = dev;
+
+ /* Some of the sub-blocks require an mdptop to be created */
+ rm->hw_mdp = dpu_hw_mdptop_init(MDP_TOP, mmio, cat);
+ if (IS_ERR_OR_NULL(rm->hw_mdp)) {
+ rc = PTR_ERR(rm->hw_mdp);
+ rm->hw_mdp = NULL;
+ DPU_ERROR("failed: mdp hw not available\n");
+ goto fail;
+ }
+
+ /* Interrogate HW catalog and create tracking items for hw blocks */
+ for (i = 0; i < cat->mixer_count; i++) {
+ struct dpu_lm_cfg *lm = &cat->mixer[i];
+
+ if (lm->pingpong == PINGPONG_MAX) {
+ DPU_DEBUG("skip mixer %d without pingpong\n", lm->id);
+ continue;
+ }
+
+ rc = _dpu_rm_hw_blk_create(rm, cat, mmio, DPU_HW_BLK_LM,
+ cat->mixer[i].id, &cat->mixer[i]);
+ if (rc) {
+ DPU_ERROR("failed: lm hw not available\n");
+ goto fail;
+ }
+
+ if (!rm->lm_max_width) {
+ rm->lm_max_width = lm->sblk->maxwidth;
+ } else if (rm->lm_max_width != lm->sblk->maxwidth) {
+ /*
+ * Don't expect to have hw where lm max widths differ.
+ * If found, take the min.
+ */
+ DPU_ERROR("unsupported: lm maxwidth differs\n");
+ if (rm->lm_max_width > lm->sblk->maxwidth)
+ rm->lm_max_width = lm->sblk->maxwidth;
+ }
+ }
+
+ for (i = 0; i < cat->pingpong_count; i++) {
+ rc = _dpu_rm_hw_blk_create(rm, cat, mmio, DPU_HW_BLK_PINGPONG,
+ cat->pingpong[i].id, &cat->pingpong[i]);
+ if (rc) {
+ DPU_ERROR("failed: pp hw not available\n");
+ goto fail;
+ }
+ }
+
+ for (i = 0; i < cat->intf_count; i++) {
+ if (cat->intf[i].type == INTF_NONE) {
+ DPU_DEBUG("skip intf %d with type none\n", i);
+ continue;
+ }
+
+ rc = _dpu_rm_hw_blk_create(rm, cat, mmio, DPU_HW_BLK_INTF,
+ cat->intf[i].id, &cat->intf[i]);
+ if (rc) {
+ DPU_ERROR("failed: intf hw not available\n");
+ goto fail;
+ }
+ }
+
+ for (i = 0; i < cat->ctl_count; i++) {
+ rc = _dpu_rm_hw_blk_create(rm, cat, mmio, DPU_HW_BLK_CTL,
+ cat->ctl[i].id, &cat->ctl[i]);
+ if (rc) {
+ DPU_ERROR("failed: ctl hw not available\n");
+ goto fail;
+ }
+ }
+
+ for (i = 0; i < cat->cdm_count; i++) {
+ rc = _dpu_rm_hw_blk_create(rm, cat, mmio, DPU_HW_BLK_CDM,
+ cat->cdm[i].id, &cat->cdm[i]);
+ if (rc) {
+ DPU_ERROR("failed: cdm hw not available\n");
+ goto fail;
+ }
+ }
+
+ return 0;
+
+fail:
+ dpu_rm_destroy(rm);
+
+ return rc;
+}
+
+/**
+ * _dpu_rm_check_lm_and_get_connected_blks - check if proposed layer mixer meets
+ * proposed use case requirements, incl. hardwired dependent blocks like
+ * pingpong
+ * @rm: dpu resource manager handle
+ * @rsvp: reservation currently being created
+ * @reqs: proposed use case requirements
+ * @lm: proposed layer mixer, function checks if lm, and all other hardwired
+ * blocks connected to the lm (pp) is available and appropriate
+ * @pp: output parameter, pingpong block attached to the layer mixer.
+ * NULL if pp was not available, or not matching requirements.
+ * @primary_lm: if non-null, this function check if lm is compatible primary_lm
+ * as well as satisfying all other requirements
+ * @Return: true if lm matches all requirements, false otherwise
+ */
+static bool _dpu_rm_check_lm_and_get_connected_blks(
+ struct dpu_rm *rm,
+ struct dpu_rm_rsvp *rsvp,
+ struct dpu_rm_requirements *reqs,
+ struct dpu_rm_hw_blk *lm,
+ struct dpu_rm_hw_blk **pp,
+ struct dpu_rm_hw_blk *primary_lm)
+{
+ const struct dpu_lm_cfg *lm_cfg = to_dpu_hw_mixer(lm->hw)->cap;
+ struct dpu_rm_hw_iter iter;
+
+ *pp = NULL;
+
+ DPU_DEBUG("check lm %d pp %d\n",
+ lm_cfg->id, lm_cfg->pingpong);
+
+ /* Check if this layer mixer is a peer of the proposed primary LM */
+ if (primary_lm) {
+ const struct dpu_lm_cfg *prim_lm_cfg =
+ to_dpu_hw_mixer(primary_lm->hw)->cap;
+
+ if (!test_bit(lm_cfg->id, &prim_lm_cfg->lm_pair_mask)) {
+ DPU_DEBUG("lm %d not peer of lm %d\n", lm_cfg->id,
+ prim_lm_cfg->id);
+ return false;
+ }
+ }
+
+ /* Already reserved? */
+ if (RESERVED_BY_OTHER(lm, rsvp)) {
+ DPU_DEBUG("lm %d already reserved\n", lm_cfg->id);
+ return false;
+ }
+
+ dpu_rm_init_hw_iter(&iter, 0, DPU_HW_BLK_PINGPONG);
+ while (_dpu_rm_get_hw_locked(rm, &iter)) {
+ if (iter.blk->id == lm_cfg->pingpong) {
+ *pp = iter.blk;
+ break;
+ }
+ }
+
+ if (!*pp) {
+ DPU_ERROR("failed to get pp on lm %d\n", lm_cfg->pingpong);
+ return false;
+ }
+
+ if (RESERVED_BY_OTHER(*pp, rsvp)) {
+ DPU_DEBUG("lm %d pp %d already reserved\n", lm->id,
+ (*pp)->id);
+ return false;
+ }
+
+ return true;
+}
+
+static int _dpu_rm_reserve_lms(
+ struct dpu_rm *rm,
+ struct dpu_rm_rsvp *rsvp,
+ struct dpu_rm_requirements *reqs)
+
+{
+ struct dpu_rm_hw_blk *lm[MAX_BLOCKS];
+ struct dpu_rm_hw_blk *pp[MAX_BLOCKS];
+ struct dpu_rm_hw_iter iter_i, iter_j;
+ int lm_count = 0;
+ int i, rc = 0;
+
+ if (!reqs->topology->num_lm) {
+ DPU_ERROR("invalid number of lm: %d\n", reqs->topology->num_lm);
+ return -EINVAL;
+ }
+
+ /* Find a primary mixer */
+ dpu_rm_init_hw_iter(&iter_i, 0, DPU_HW_BLK_LM);
+ while (lm_count != reqs->topology->num_lm &&
+ _dpu_rm_get_hw_locked(rm, &iter_i)) {
+ memset(&lm, 0, sizeof(lm));
+ memset(&pp, 0, sizeof(pp));
+
+ lm_count = 0;
+ lm[lm_count] = iter_i.blk;
+
+ if (!_dpu_rm_check_lm_and_get_connected_blks(
+ rm, rsvp, reqs, lm[lm_count],
+ &pp[lm_count], NULL))
+ continue;
+
+ ++lm_count;
+
+ /* Valid primary mixer found, find matching peers */
+ dpu_rm_init_hw_iter(&iter_j, 0, DPU_HW_BLK_LM);
+
+ while (lm_count != reqs->topology->num_lm &&
+ _dpu_rm_get_hw_locked(rm, &iter_j)) {
+ if (iter_i.blk == iter_j.blk)
+ continue;
+
+ if (!_dpu_rm_check_lm_and_get_connected_blks(
+ rm, rsvp, reqs, iter_j.blk,
+ &pp[lm_count], iter_i.blk))
+ continue;
+
+ lm[lm_count] = iter_j.blk;
+ ++lm_count;
+ }
+ }
+
+ if (lm_count != reqs->topology->num_lm) {
+ DPU_DEBUG("unable to find appropriate mixers\n");
+ return -ENAVAIL;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(lm); i++) {
+ if (!lm[i])
+ break;
+
+ lm[i]->rsvp_nxt = rsvp;
+ pp[i]->rsvp_nxt = rsvp;
+
+ trace_dpu_rm_reserve_lms(lm[i]->id, lm[i]->type, rsvp->enc_id,
+ pp[i]->id);
+ }
+
+ return rc;
+}
+
+static int _dpu_rm_reserve_ctls(
+ struct dpu_rm *rm,
+ struct dpu_rm_rsvp *rsvp,
+ const struct dpu_rm_topology_def *top)
+{
+ struct dpu_rm_hw_blk *ctls[MAX_BLOCKS];
+ struct dpu_rm_hw_iter iter;
+ int i = 0;
+
+ memset(&ctls, 0, sizeof(ctls));
+
+ dpu_rm_init_hw_iter(&iter, 0, DPU_HW_BLK_CTL);
+ while (_dpu_rm_get_hw_locked(rm, &iter)) {
+ const struct dpu_hw_ctl *ctl = to_dpu_hw_ctl(iter.blk->hw);
+ unsigned long features = ctl->caps->features;
+ bool has_split_display;
+
+ if (RESERVED_BY_OTHER(iter.blk, rsvp))
+ continue;
+
+ has_split_display = BIT(DPU_CTL_SPLIT_DISPLAY) & features;
+
+ DPU_DEBUG("ctl %d caps 0x%lX\n", iter.blk->id, features);
+
+ if (top->needs_split_display != has_split_display)
+ continue;
+
+ ctls[i] = iter.blk;
+ DPU_DEBUG("ctl %d match\n", iter.blk->id);
+
+ if (++i == top->num_ctl)
+ break;
+ }
+
+ if (i != top->num_ctl)
+ return -ENAVAIL;
+
+ for (i = 0; i < ARRAY_SIZE(ctls) && i < top->num_ctl; i++) {
+ ctls[i]->rsvp_nxt = rsvp;
+ trace_dpu_rm_reserve_ctls(ctls[i]->id, ctls[i]->type,
+ rsvp->enc_id);
+ }
+
+ return 0;
+}
+
+static int _dpu_rm_reserve_cdm(
+ struct dpu_rm *rm,
+ struct dpu_rm_rsvp *rsvp,
+ uint32_t id,
+ enum dpu_hw_blk_type type)
+{
+ struct dpu_rm_hw_iter iter;
+
+ DRM_DEBUG_KMS("type %d id %d\n", type, id);
+
+ dpu_rm_init_hw_iter(&iter, 0, DPU_HW_BLK_CDM);
+ while (_dpu_rm_get_hw_locked(rm, &iter)) {
+ const struct dpu_hw_cdm *cdm = to_dpu_hw_cdm(iter.blk->hw);
+ const struct dpu_cdm_cfg *caps = cdm->caps;
+ bool match = false;
+
+ if (RESERVED_BY_OTHER(iter.blk, rsvp))
+ continue;
+
+ if (type == DPU_HW_BLK_INTF && id != INTF_MAX)
+ match = test_bit(id, &caps->intf_connect);
+
+ DRM_DEBUG_KMS("iter: type:%d id:%d enc:%d cdm:%lu match:%d\n",
+ iter.blk->type, iter.blk->id, rsvp->enc_id,
+ caps->intf_connect, match);
+
+ if (!match)
+ continue;
+
+ trace_dpu_rm_reserve_cdm(iter.blk->id, iter.blk->type,
+ rsvp->enc_id);
+ iter.blk->rsvp_nxt = rsvp;
+ break;
+ }
+
+ if (!iter.hw) {
+ DPU_ERROR("couldn't reserve cdm for type %d id %d\n", type, id);
+ return -ENAVAIL;
+ }
+
+ return 0;
+}
+
+static int _dpu_rm_reserve_intf(
+ struct dpu_rm *rm,
+ struct dpu_rm_rsvp *rsvp,
+ uint32_t id,
+ enum dpu_hw_blk_type type,
+ bool needs_cdm)
+{
+ struct dpu_rm_hw_iter iter;
+ int ret = 0;
+
+ /* Find the block entry in the rm, and note the reservation */
+ dpu_rm_init_hw_iter(&iter, 0, type);
+ while (_dpu_rm_get_hw_locked(rm, &iter)) {
+ if (iter.blk->id != id)
+ continue;
+
+ if (RESERVED_BY_OTHER(iter.blk, rsvp)) {
+ DPU_ERROR("type %d id %d already reserved\n", type, id);
+ return -ENAVAIL;
+ }
+
+ iter.blk->rsvp_nxt = rsvp;
+ trace_dpu_rm_reserve_intf(iter.blk->id, iter.blk->type,
+ rsvp->enc_id);
+ break;
+ }
+
+ /* Shouldn't happen since intfs are fixed at probe */
+ if (!iter.hw) {
+ DPU_ERROR("couldn't find type %d id %d\n", type, id);
+ return -EINVAL;
+ }
+
+ if (needs_cdm)
+ ret = _dpu_rm_reserve_cdm(rm, rsvp, id, type);
+
+ return ret;
+}
+
+static int _dpu_rm_reserve_intf_related_hw(
+ struct dpu_rm *rm,
+ struct dpu_rm_rsvp *rsvp,
+ struct dpu_encoder_hw_resources *hw_res)
+{
+ int i, ret = 0;
+ u32 id;
+
+ for (i = 0; i < ARRAY_SIZE(hw_res->intfs); i++) {
+ if (hw_res->intfs[i] == INTF_MODE_NONE)
+ continue;
+ id = i + INTF_0;
+ ret = _dpu_rm_reserve_intf(rm, rsvp, id,
+ DPU_HW_BLK_INTF, hw_res->needs_cdm);
+ if (ret)
+ return ret;
+ }
+
+ return ret;
+}
+
+static int _dpu_rm_make_next_rsvp(
+ struct dpu_rm *rm,
+ struct drm_encoder *enc,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state,
+ struct dpu_rm_rsvp *rsvp,
+ struct dpu_rm_requirements *reqs)
+{
+ int ret;
+ struct dpu_rm_topology_def topology;
+
+ /* Create reservation info, tag reserved blocks with it as we go */
+ rsvp->seq = ++rm->rsvp_next_seq;
+ rsvp->enc_id = enc->base.id;
+ rsvp->topology = reqs->topology->top_name;
+ list_add_tail(&rsvp->list, &rm->rsvps);
+
+ ret = _dpu_rm_reserve_lms(rm, rsvp, reqs);
+ if (ret) {
+ DPU_ERROR("unable to find appropriate mixers\n");
+ return ret;
+ }
+
+ /*
+ * Do assignment preferring to give away low-resource CTLs first:
+ * - Check mixers without Split Display
+ * - Only then allow to grab from CTLs with split display capability
+ */
+ _dpu_rm_reserve_ctls(rm, rsvp, reqs->topology);
+ if (ret && !reqs->topology->needs_split_display) {
+ memcpy(&topology, reqs->topology, sizeof(topology));
+ topology.needs_split_display = true;
+ _dpu_rm_reserve_ctls(rm, rsvp, &topology);
+ }
+ if (ret) {
+ DPU_ERROR("unable to find appropriate CTL\n");
+ return ret;
+ }
+
+ /* Assign INTFs and blks whose usage is tied to them: CTL & CDM */
+ ret = _dpu_rm_reserve_intf_related_hw(rm, rsvp, &reqs->hw_res);
+ if (ret)
+ return ret;
+
+ return ret;
+}
+
+static int _dpu_rm_populate_requirements(
+ struct dpu_rm *rm,
+ struct drm_encoder *enc,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state,
+ struct dpu_rm_requirements *reqs,
+ struct msm_display_topology req_topology)
+{
+ int i;
+
+ memset(reqs, 0, sizeof(*reqs));
+
+ dpu_encoder_get_hw_resources(enc, &reqs->hw_res, conn_state);
+
+ for (i = 0; i < DPU_RM_TOPOLOGY_MAX; i++) {
+ if (RM_IS_TOPOLOGY_MATCH(g_top_table[i],
+ req_topology)) {
+ reqs->topology = &g_top_table[i];
+ break;
+ }
+ }
+
+ if (!reqs->topology) {
+ DPU_ERROR("invalid topology for the display\n");
+ return -EINVAL;
+ }
+
+ /**
+ * Set the requirement based on caps if not set from user space
+ * This will ensure to select LM tied with DS blocks
+ * Currently, DS blocks are tied with LM 0 and LM 1 (primary display)
+ */
+ if (!RM_RQ_DS(reqs) && rm->hw_mdp->caps->has_dest_scaler &&
+ conn_state->connector->connector_type == DRM_MODE_CONNECTOR_DSI)
+ reqs->top_ctrl |= BIT(DPU_RM_TOPCTL_DS);
+
+ DRM_DEBUG_KMS("top_ctrl: 0x%llX num_h_tiles: %d\n", reqs->top_ctrl,
+ reqs->hw_res.display_num_of_h_tiles);
+ DRM_DEBUG_KMS("num_lm: %d num_ctl: %d topology: %d split_display: %d\n",
+ reqs->topology->num_lm, reqs->topology->num_ctl,
+ reqs->topology->top_name,
+ reqs->topology->needs_split_display);
+
+ return 0;
+}
+
+static struct dpu_rm_rsvp *_dpu_rm_get_rsvp(
+ struct dpu_rm *rm,
+ struct drm_encoder *enc)
+{
+ struct dpu_rm_rsvp *i;
+
+ if (!rm || !enc) {
+ DPU_ERROR("invalid params\n");
+ return NULL;
+ }
+
+ if (list_empty(&rm->rsvps))
+ return NULL;
+
+ list_for_each_entry(i, &rm->rsvps, list)
+ if (i->enc_id == enc->base.id)
+ return i;
+
+ return NULL;
+}
+
+static struct drm_connector *_dpu_rm_get_connector(
+ struct drm_encoder *enc)
+{
+ struct drm_connector *conn = NULL;
+ struct list_head *connector_list =
+ &enc->dev->mode_config.connector_list;
+
+ list_for_each_entry(conn, connector_list, head)
+ if (conn->encoder == enc)
+ return conn;
+
+ return NULL;
+}
+
+/**
+ * _dpu_rm_release_rsvp - release resources and release a reservation
+ * @rm: KMS handle
+ * @rsvp: RSVP pointer to release and release resources for
+ */
+static void _dpu_rm_release_rsvp(
+ struct dpu_rm *rm,
+ struct dpu_rm_rsvp *rsvp,
+ struct drm_connector *conn)
+{
+ struct dpu_rm_rsvp *rsvp_c, *rsvp_n;
+ struct dpu_rm_hw_blk *blk;
+ enum dpu_hw_blk_type type;
+
+ if (!rsvp)
+ return;
+
+ DPU_DEBUG("rel rsvp %d enc %d\n", rsvp->seq, rsvp->enc_id);
+
+ list_for_each_entry_safe(rsvp_c, rsvp_n, &rm->rsvps, list) {
+ if (rsvp == rsvp_c) {
+ list_del(&rsvp_c->list);
+ break;
+ }
+ }
+
+ for (type = 0; type < DPU_HW_BLK_MAX; type++) {
+ list_for_each_entry(blk, &rm->hw_blks[type], list) {
+ if (blk->rsvp == rsvp) {
+ blk->rsvp = NULL;
+ DPU_DEBUG("rel rsvp %d enc %d %d %d\n",
+ rsvp->seq, rsvp->enc_id,
+ blk->type, blk->id);
+ }
+ if (blk->rsvp_nxt == rsvp) {
+ blk->rsvp_nxt = NULL;
+ DPU_DEBUG("rel rsvp_nxt %d enc %d %d %d\n",
+ rsvp->seq, rsvp->enc_id,
+ blk->type, blk->id);
+ }
+ }
+ }
+
+ kfree(rsvp);
+}
+
+void dpu_rm_release(struct dpu_rm *rm, struct drm_encoder *enc)
+{
+ struct dpu_rm_rsvp *rsvp;
+ struct drm_connector *conn;
+
+ if (!rm || !enc) {
+ DPU_ERROR("invalid params\n");
+ return;
+ }
+
+ mutex_lock(&rm->rm_lock);
+
+ rsvp = _dpu_rm_get_rsvp(rm, enc);
+ if (!rsvp) {
+ DPU_ERROR("failed to find rsvp for enc %d\n", enc->base.id);
+ goto end;
+ }
+
+ conn = _dpu_rm_get_connector(enc);
+ if (!conn) {
+ DPU_ERROR("failed to get connector for enc %d\n", enc->base.id);
+ goto end;
+ }
+
+ _dpu_rm_release_rsvp(rm, rsvp, conn);
+end:
+ mutex_unlock(&rm->rm_lock);
+}
+
+static int _dpu_rm_commit_rsvp(
+ struct dpu_rm *rm,
+ struct dpu_rm_rsvp *rsvp,
+ struct drm_connector_state *conn_state)
+{
+ struct dpu_rm_hw_blk *blk;
+ enum dpu_hw_blk_type type;
+ int ret = 0;
+
+ /* Swap next rsvp to be the active */
+ for (type = 0; type < DPU_HW_BLK_MAX; type++) {
+ list_for_each_entry(blk, &rm->hw_blks[type], list) {
+ if (blk->rsvp_nxt) {
+ blk->rsvp = blk->rsvp_nxt;
+ blk->rsvp_nxt = NULL;
+ }
+ }
+ }
+
+ if (!ret)
+ DRM_DEBUG_KMS("rsrv enc %d topology %d\n", rsvp->enc_id,
+ rsvp->topology);
+
+ return ret;
+}
+
+int dpu_rm_reserve(
+ struct dpu_rm *rm,
+ struct drm_encoder *enc,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state,
+ struct msm_display_topology topology,
+ bool test_only)
+{
+ struct dpu_rm_rsvp *rsvp_cur, *rsvp_nxt;
+ struct dpu_rm_requirements reqs;
+ int ret;
+
+ if (!rm || !enc || !crtc_state || !conn_state) {
+ DPU_ERROR("invalid arguments\n");
+ return -EINVAL;
+ }
+
+ /* Check if this is just a page-flip */
+ if (!drm_atomic_crtc_needs_modeset(crtc_state))
+ return 0;
+
+ DRM_DEBUG_KMS("reserving hw for conn %d enc %d crtc %d test_only %d\n",
+ conn_state->connector->base.id, enc->base.id,
+ crtc_state->crtc->base.id, test_only);
+
+ mutex_lock(&rm->rm_lock);
+
+ _dpu_rm_print_rsvps(rm, DPU_RM_STAGE_BEGIN);
+
+ ret = _dpu_rm_populate_requirements(rm, enc, crtc_state,
+ conn_state, &reqs, topology);
+ if (ret) {
+ DPU_ERROR("failed to populate hw requirements\n");
+ goto end;
+ }
+
+ /*
+ * We only support one active reservation per-hw-block. But to implement
+ * transactional semantics for test-only, and for allowing failure while
+ * modifying your existing reservation, over the course of this
+ * function we can have two reservations:
+ * Current: Existing reservation
+ * Next: Proposed reservation. The proposed reservation may fail, or may
+ * be discarded if in test-only mode.
+ * If reservation is successful, and we're not in test-only, then we
+ * replace the current with the next.
+ */
+ rsvp_nxt = kzalloc(sizeof(*rsvp_nxt), GFP_KERNEL);
+ if (!rsvp_nxt) {
+ ret = -ENOMEM;
+ goto end;
+ }
+
+ rsvp_cur = _dpu_rm_get_rsvp(rm, enc);
+
+ /*
+ * User can request that we clear out any reservation during the
+ * atomic_check phase by using this CLEAR bit
+ */
+ if (rsvp_cur && test_only && RM_RQ_CLEAR(&reqs)) {
+ DPU_DEBUG("test_only & CLEAR: clear rsvp[s%de%d]\n",
+ rsvp_cur->seq, rsvp_cur->enc_id);
+ _dpu_rm_release_rsvp(rm, rsvp_cur, conn_state->connector);
+ rsvp_cur = NULL;
+ _dpu_rm_print_rsvps(rm, DPU_RM_STAGE_AFTER_CLEAR);
+ }
+
+ /* Check the proposed reservation, store it in hw's "next" field */
+ ret = _dpu_rm_make_next_rsvp(rm, enc, crtc_state, conn_state,
+ rsvp_nxt, &reqs);
+
+ _dpu_rm_print_rsvps(rm, DPU_RM_STAGE_AFTER_RSVPNEXT);
+
+ if (ret) {
+ DPU_ERROR("failed to reserve hw resources: %d\n", ret);
+ _dpu_rm_release_rsvp(rm, rsvp_nxt, conn_state->connector);
+ } else if (test_only && !RM_RQ_LOCK(&reqs)) {
+ /*
+ * Normally, if test_only, test the reservation and then undo
+ * However, if the user requests LOCK, then keep the reservation
+ * made during the atomic_check phase.
+ */
+ DPU_DEBUG("test_only: discard test rsvp[s%de%d]\n",
+ rsvp_nxt->seq, rsvp_nxt->enc_id);
+ _dpu_rm_release_rsvp(rm, rsvp_nxt, conn_state->connector);
+ } else {
+ if (test_only && RM_RQ_LOCK(&reqs))
+ DPU_DEBUG("test_only & LOCK: lock rsvp[s%de%d]\n",
+ rsvp_nxt->seq, rsvp_nxt->enc_id);
+
+ _dpu_rm_release_rsvp(rm, rsvp_cur, conn_state->connector);
+
+ ret = _dpu_rm_commit_rsvp(rm, rsvp_nxt, conn_state);
+ }
+
+ _dpu_rm_print_rsvps(rm, DPU_RM_STAGE_FINAL);
+
+end:
+ mutex_unlock(&rm->rm_lock);
+
+ return ret;
+}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.h
new file mode 100644
index 000000000000..ffd1841a6067
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.h
@@ -0,0 +1,199 @@
+/*
+ * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __DPU_RM_H__
+#define __DPU_RM_H__
+
+#include <linux/list.h>
+
+#include "msm_kms.h"
+#include "dpu_hw_top.h"
+
+/**
+ * enum dpu_rm_topology_name - HW resource use case in use by connector
+ * @DPU_RM_TOPOLOGY_NONE: No topology in use currently
+ * @DPU_RM_TOPOLOGY_SINGLEPIPE: 1 LM, 1 PP, 1 INTF/WB
+ * @DPU_RM_TOPOLOGY_DUALPIPE: 2 LM, 2 PP, 2 INTF/WB
+ * @DPU_RM_TOPOLOGY_DUALPIPE_3DMERGE: 2 LM, 2 PP, 3DMux, 1 INTF/WB
+ */
+enum dpu_rm_topology_name {
+ DPU_RM_TOPOLOGY_NONE = 0,
+ DPU_RM_TOPOLOGY_SINGLEPIPE,
+ DPU_RM_TOPOLOGY_DUALPIPE,
+ DPU_RM_TOPOLOGY_DUALPIPE_3DMERGE,
+ DPU_RM_TOPOLOGY_MAX,
+};
+
+/**
+ * enum dpu_rm_topology_control - HW resource use case in use by connector
+ * @DPU_RM_TOPCTL_RESERVE_LOCK: If set, in AtomicTest phase, after a successful
+ * test, reserve the resources for this display.
+ * Normal behavior would not impact the reservation
+ * list during the AtomicTest phase.
+ * @DPU_RM_TOPCTL_RESERVE_CLEAR: If set, in AtomicTest phase, before testing,
+ * release any reservation held by this display.
+ * Normal behavior would not impact the
+ * reservation list during the AtomicTest phase.
+ * @DPU_RM_TOPCTL_DS : Require layer mixers with DS capabilities
+ */
+enum dpu_rm_topology_control {
+ DPU_RM_TOPCTL_RESERVE_LOCK,
+ DPU_RM_TOPCTL_RESERVE_CLEAR,
+ DPU_RM_TOPCTL_DS,
+};
+
+/**
+ * struct dpu_rm - DPU dynamic hardware resource manager
+ * @dev: device handle for event logging purposes
+ * @rsvps: list of hardware reservations by each crtc->encoder->connector
+ * @hw_blks: array of lists of hardware resources present in the system, one
+ * list per type of hardware block
+ * @hw_mdp: hardware object for mdp_top
+ * @lm_max_width: cached layer mixer maximum width
+ * @rsvp_next_seq: sequence number for next reservation for debugging purposes
+ * @rm_lock: resource manager mutex
+ */
+struct dpu_rm {
+ struct drm_device *dev;
+ struct list_head rsvps;
+ struct list_head hw_blks[DPU_HW_BLK_MAX];
+ struct dpu_hw_mdp *hw_mdp;
+ uint32_t lm_max_width;
+ uint32_t rsvp_next_seq;
+ struct mutex rm_lock;
+};
+
+/**
+ * struct dpu_rm_hw_blk - resource manager internal structure
+ * forward declaration for single iterator definition without void pointer
+ */
+struct dpu_rm_hw_blk;
+
+/**
+ * struct dpu_rm_hw_iter - iterator for use with dpu_rm
+ * @hw: dpu_hw object requested, or NULL on failure
+ * @blk: dpu_rm internal block representation. Clients ignore. Used as iterator.
+ * @enc_id: DRM ID of Encoder client wishes to search for, or 0 for Any Encoder
+ * @type: Hardware Block Type client wishes to search for.
+ */
+struct dpu_rm_hw_iter {
+ void *hw;
+ struct dpu_rm_hw_blk *blk;
+ uint32_t enc_id;
+ enum dpu_hw_blk_type type;
+};
+
+/**
+ * dpu_rm_init - Read hardware catalog and create reservation tracking objects
+ * for all HW blocks.
+ * @rm: DPU Resource Manager handle
+ * @cat: Pointer to hardware catalog
+ * @mmio: mapped register io address of MDP
+ * @dev: device handle for event logging purposes
+ * @Return: 0 on Success otherwise -ERROR
+ */
+int dpu_rm_init(struct dpu_rm *rm,
+ struct dpu_mdss_cfg *cat,
+ void __iomem *mmio,
+ struct drm_device *dev);
+
+/**
+ * dpu_rm_destroy - Free all memory allocated by dpu_rm_init
+ * @rm: DPU Resource Manager handle
+ * @Return: 0 on Success otherwise -ERROR
+ */
+int dpu_rm_destroy(struct dpu_rm *rm);
+
+/**
+ * dpu_rm_reserve - Given a CRTC->Encoder->Connector display chain, analyze
+ * the use connections and user requirements, specified through related
+ * topology control properties, and reserve hardware blocks to that
+ * display chain.
+ * HW blocks can then be accessed through dpu_rm_get_* functions.
+ * HW Reservations should be released via dpu_rm_release_hw.
+ * @rm: DPU Resource Manager handle
+ * @drm_enc: DRM Encoder handle
+ * @crtc_state: Proposed Atomic DRM CRTC State handle
+ * @conn_state: Proposed Atomic DRM Connector State handle
+ * @topology: Pointer to topology info for the display
+ * @test_only: Atomic-Test phase, discard results (unless property overrides)
+ * @Return: 0 on Success otherwise -ERROR
+ */
+int dpu_rm_reserve(struct dpu_rm *rm,
+ struct drm_encoder *drm_enc,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state,
+ struct msm_display_topology topology,
+ bool test_only);
+
+/**
+ * dpu_rm_reserve - Given the encoder for the display chain, release any
+ * HW blocks previously reserved for that use case.
+ * @rm: DPU Resource Manager handle
+ * @enc: DRM Encoder handle
+ * @Return: 0 on Success otherwise -ERROR
+ */
+void dpu_rm_release(struct dpu_rm *rm, struct drm_encoder *enc);
+
+/**
+ * dpu_rm_get_mdp - Retrieve HW block for MDP TOP.
+ * This is never reserved, and is usable by any display.
+ * @rm: DPU Resource Manager handle
+ * @Return: Pointer to hw block or NULL
+ */
+struct dpu_hw_mdp *dpu_rm_get_mdp(struct dpu_rm *rm);
+
+/**
+ * dpu_rm_init_hw_iter - setup given iterator for new iteration over hw list
+ * using dpu_rm_get_hw
+ * @iter: iter object to initialize
+ * @enc_id: DRM ID of Encoder client wishes to search for, or 0 for Any Encoder
+ * @type: Hardware Block Type client wishes to search for.
+ */
+void dpu_rm_init_hw_iter(
+ struct dpu_rm_hw_iter *iter,
+ uint32_t enc_id,
+ enum dpu_hw_blk_type type);
+/**
+ * dpu_rm_get_hw - retrieve reserved hw object given encoder and hw type
+ * Meant to do a single pass through the hardware list to iteratively
+ * retrieve hardware blocks of a given type for a given encoder.
+ * Initialize an iterator object.
+ * Set hw block type of interest. Set encoder id of interest, 0 for any.
+ * Function returns first hw of type for that encoder.
+ * Subsequent calls will return the next reserved hw of that type in-order.
+ * Iterator HW pointer will be null on failure to find hw.
+ * @rm: DPU Resource Manager handle
+ * @iter: iterator object
+ * @Return: true on match found, false on no match found
+ */
+bool dpu_rm_get_hw(struct dpu_rm *rm, struct dpu_rm_hw_iter *iter);
+
+/**
+ * dpu_rm_check_property_topctl - validate property bitmask before it is set
+ * @val: user's proposed topology control bitmask
+ * @Return: 0 on success or error
+ */
+int dpu_rm_check_property_topctl(uint64_t val);
+
+/**
+ * dpu_rm_get_topology_name - returns the name of the the given topology
+ * definition
+ * @topology: topology definition
+ * @Return: name of the topology
+ */
+enum dpu_rm_topology_name
+dpu_rm_get_topology_name(struct msm_display_topology topology);
+
+#endif /* __DPU_RM_H__ */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_trace.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_trace.h
new file mode 100644
index 000000000000..ae0ca5076238
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_trace.h
@@ -0,0 +1,1007 @@
+/* Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#if !defined(_DPU_TRACE_H_) || defined(TRACE_HEADER_MULTI_READ)
+#define _DPU_TRACE_H_
+
+#include <linux/stringify.h>
+#include <linux/types.h>
+#include <linux/tracepoint.h>
+
+#include <drm/drm_rect.h>
+#include "dpu_crtc.h"
+#include "dpu_encoder_phys.h"
+#include "dpu_hw_mdss.h"
+#include "dpu_hw_vbif.h"
+#include "dpu_plane.h"
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM dpu
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE dpu_trace
+
+TRACE_EVENT(dpu_perf_set_qos_luts,
+ TP_PROTO(u32 pnum, u32 fmt, bool rt, u32 fl,
+ u32 lut, u32 lut_usage),
+ TP_ARGS(pnum, fmt, rt, fl, lut, lut_usage),
+ TP_STRUCT__entry(
+ __field(u32, pnum)
+ __field(u32, fmt)
+ __field(bool, rt)
+ __field(u32, fl)
+ __field(u64, lut)
+ __field(u32, lut_usage)
+ ),
+ TP_fast_assign(
+ __entry->pnum = pnum;
+ __entry->fmt = fmt;
+ __entry->rt = rt;
+ __entry->fl = fl;
+ __entry->lut = lut;
+ __entry->lut_usage = lut_usage;
+ ),
+ TP_printk("pnum=%d fmt=%x rt=%d fl=%d lut=0x%llx lut_usage=%d",
+ __entry->pnum, __entry->fmt,
+ __entry->rt, __entry->fl,
+ __entry->lut, __entry->lut_usage)
+);
+
+TRACE_EVENT(dpu_perf_set_danger_luts,
+ TP_PROTO(u32 pnum, u32 fmt, u32 mode, u32 danger_lut,
+ u32 safe_lut),
+ TP_ARGS(pnum, fmt, mode, danger_lut, safe_lut),
+ TP_STRUCT__entry(
+ __field(u32, pnum)
+ __field(u32, fmt)
+ __field(u32, mode)
+ __field(u32, danger_lut)
+ __field(u32, safe_lut)
+ ),
+ TP_fast_assign(
+ __entry->pnum = pnum;
+ __entry->fmt = fmt;
+ __entry->mode = mode;
+ __entry->danger_lut = danger_lut;
+ __entry->safe_lut = safe_lut;
+ ),
+ TP_printk("pnum=%d fmt=%x mode=%d luts[0x%x, 0x%x]",
+ __entry->pnum, __entry->fmt,
+ __entry->mode, __entry->danger_lut,
+ __entry->safe_lut)
+);
+
+TRACE_EVENT(dpu_perf_set_ot,
+ TP_PROTO(u32 pnum, u32 xin_id, u32 rd_lim, u32 vbif_idx),
+ TP_ARGS(pnum, xin_id, rd_lim, vbif_idx),
+ TP_STRUCT__entry(
+ __field(u32, pnum)
+ __field(u32, xin_id)
+ __field(u32, rd_lim)
+ __field(u32, vbif_idx)
+ ),
+ TP_fast_assign(
+ __entry->pnum = pnum;
+ __entry->xin_id = xin_id;
+ __entry->rd_lim = rd_lim;
+ __entry->vbif_idx = vbif_idx;
+ ),
+ TP_printk("pnum:%d xin_id:%d ot:%d vbif:%d",
+ __entry->pnum, __entry->xin_id, __entry->rd_lim,
+ __entry->vbif_idx)
+)
+
+TRACE_EVENT(dpu_perf_update_bus,
+ TP_PROTO(int client, unsigned long long ab_quota,
+ unsigned long long ib_quota),
+ TP_ARGS(client, ab_quota, ib_quota),
+ TP_STRUCT__entry(
+ __field(int, client)
+ __field(u64, ab_quota)
+ __field(u64, ib_quota)
+ ),
+ TP_fast_assign(
+ __entry->client = client;
+ __entry->ab_quota = ab_quota;
+ __entry->ib_quota = ib_quota;
+ ),
+ TP_printk("Request client:%d ab=%llu ib=%llu",
+ __entry->client,
+ __entry->ab_quota,
+ __entry->ib_quota)
+)
+
+
+TRACE_EVENT(dpu_cmd_release_bw,
+ TP_PROTO(u32 crtc_id),
+ TP_ARGS(crtc_id),
+ TP_STRUCT__entry(
+ __field(u32, crtc_id)
+ ),
+ TP_fast_assign(
+ __entry->crtc_id = crtc_id;
+ ),
+ TP_printk("crtc:%d", __entry->crtc_id)
+);
+
+TRACE_EVENT(tracing_mark_write,
+ TP_PROTO(int pid, const char *name, bool trace_begin),
+ TP_ARGS(pid, name, trace_begin),
+ TP_STRUCT__entry(
+ __field(int, pid)
+ __string(trace_name, name)
+ __field(bool, trace_begin)
+ ),
+ TP_fast_assign(
+ __entry->pid = pid;
+ __assign_str(trace_name, name);
+ __entry->trace_begin = trace_begin;
+ ),
+ TP_printk("%s|%d|%s", __entry->trace_begin ? "B" : "E",
+ __entry->pid, __get_str(trace_name))
+)
+
+TRACE_EVENT(dpu_trace_counter,
+ TP_PROTO(int pid, char *name, int value),
+ TP_ARGS(pid, name, value),
+ TP_STRUCT__entry(
+ __field(int, pid)
+ __string(counter_name, name)
+ __field(int, value)
+ ),
+ TP_fast_assign(
+ __entry->pid = current->tgid;
+ __assign_str(counter_name, name);
+ __entry->value = value;
+ ),
+ TP_printk("%d|%s|%d", __entry->pid,
+ __get_str(counter_name), __entry->value)
+)
+
+TRACE_EVENT(dpu_perf_crtc_update,
+ TP_PROTO(u32 crtc, u64 bw_ctl_mnoc, u64 bw_ctl_llcc,
+ u64 bw_ctl_ebi, u32 core_clk_rate,
+ bool stop_req, u32 update_bus, u32 update_clk),
+ TP_ARGS(crtc, bw_ctl_mnoc, bw_ctl_llcc, bw_ctl_ebi, core_clk_rate,
+ stop_req, update_bus, update_clk),
+ TP_STRUCT__entry(
+ __field(u32, crtc)
+ __field(u64, bw_ctl_mnoc)
+ __field(u64, bw_ctl_llcc)
+ __field(u64, bw_ctl_ebi)
+ __field(u32, core_clk_rate)
+ __field(bool, stop_req)
+ __field(u32, update_bus)
+ __field(u32, update_clk)
+ ),
+ TP_fast_assign(
+ __entry->crtc = crtc;
+ __entry->bw_ctl_mnoc = bw_ctl_mnoc;
+ __entry->bw_ctl_llcc = bw_ctl_llcc;
+ __entry->bw_ctl_ebi = bw_ctl_ebi;
+ __entry->core_clk_rate = core_clk_rate;
+ __entry->stop_req = stop_req;
+ __entry->update_bus = update_bus;
+ __entry->update_clk = update_clk;
+ ),
+ TP_printk(
+ "crtc=%d bw_mnoc=%llu bw_llcc=%llu bw_ebi=%llu clk_rate=%u stop_req=%d u_bus=%d u_clk=%d",
+ __entry->crtc,
+ __entry->bw_ctl_mnoc,
+ __entry->bw_ctl_llcc,
+ __entry->bw_ctl_ebi,
+ __entry->core_clk_rate,
+ __entry->stop_req,
+ __entry->update_bus,
+ __entry->update_clk)
+);
+
+DECLARE_EVENT_CLASS(dpu_enc_irq_template,
+ TP_PROTO(uint32_t drm_id, enum dpu_intr_idx intr_idx, int hw_idx,
+ int irq_idx),
+ TP_ARGS(drm_id, intr_idx, hw_idx, irq_idx),
+ TP_STRUCT__entry(
+ __field( uint32_t, drm_id )
+ __field( enum dpu_intr_idx, intr_idx )
+ __field( int, hw_idx )
+ __field( int, irq_idx )
+ ),
+ TP_fast_assign(
+ __entry->drm_id = drm_id;
+ __entry->intr_idx = intr_idx;
+ __entry->hw_idx = hw_idx;
+ __entry->irq_idx = irq_idx;
+ ),
+ TP_printk("id=%u, intr=%d, hw=%d, irq=%d",
+ __entry->drm_id, __entry->intr_idx, __entry->hw_idx,
+ __entry->irq_idx)
+);
+DEFINE_EVENT(dpu_enc_irq_template, dpu_enc_irq_register_success,
+ TP_PROTO(uint32_t drm_id, enum dpu_intr_idx intr_idx, int hw_idx,
+ int irq_idx),
+ TP_ARGS(drm_id, intr_idx, hw_idx, irq_idx)
+);
+DEFINE_EVENT(dpu_enc_irq_template, dpu_enc_irq_unregister_success,
+ TP_PROTO(uint32_t drm_id, enum dpu_intr_idx intr_idx, int hw_idx,
+ int irq_idx),
+ TP_ARGS(drm_id, intr_idx, hw_idx, irq_idx)
+);
+
+TRACE_EVENT(dpu_enc_irq_wait_success,
+ TP_PROTO(uint32_t drm_id, enum dpu_intr_idx intr_idx, int hw_idx,
+ int irq_idx, enum dpu_pingpong pp_idx, int atomic_cnt),
+ TP_ARGS(drm_id, intr_idx, hw_idx, irq_idx, pp_idx, atomic_cnt),
+ TP_STRUCT__entry(
+ __field( uint32_t, drm_id )
+ __field( enum dpu_intr_idx, intr_idx )
+ __field( int, hw_idx )
+ __field( int, irq_idx )
+ __field( enum dpu_pingpong, pp_idx )
+ __field( int, atomic_cnt )
+ ),
+ TP_fast_assign(
+ __entry->drm_id = drm_id;
+ __entry->intr_idx = intr_idx;
+ __entry->hw_idx = hw_idx;
+ __entry->irq_idx = irq_idx;
+ __entry->pp_idx = pp_idx;
+ __entry->atomic_cnt = atomic_cnt;
+ ),
+ TP_printk("id=%u, intr=%d, hw=%d, irq=%d, pp=%d, atomic_cnt=%d",
+ __entry->drm_id, __entry->intr_idx, __entry->hw_idx,
+ __entry->irq_idx, __entry->pp_idx, __entry->atomic_cnt)
+);
+
+DECLARE_EVENT_CLASS(dpu_drm_obj_template,
+ TP_PROTO(uint32_t drm_id),
+ TP_ARGS(drm_id),
+ TP_STRUCT__entry(
+ __field( uint32_t, drm_id )
+ ),
+ TP_fast_assign(
+ __entry->drm_id = drm_id;
+ ),
+ TP_printk("id=%u", __entry->drm_id)
+);
+DEFINE_EVENT(dpu_drm_obj_template, dpu_enc_atomic_check,
+ TP_PROTO(uint32_t drm_id),
+ TP_ARGS(drm_id)
+);
+DEFINE_EVENT(dpu_drm_obj_template, dpu_enc_mode_set,
+ TP_PROTO(uint32_t drm_id),
+ TP_ARGS(drm_id)
+);
+DEFINE_EVENT(dpu_drm_obj_template, dpu_enc_disable,
+ TP_PROTO(uint32_t drm_id),
+ TP_ARGS(drm_id)
+);
+DEFINE_EVENT(dpu_drm_obj_template, dpu_enc_kickoff,
+ TP_PROTO(uint32_t drm_id),
+ TP_ARGS(drm_id)
+);
+DEFINE_EVENT(dpu_drm_obj_template, dpu_enc_prepare_kickoff,
+ TP_PROTO(uint32_t drm_id),
+ TP_ARGS(drm_id)
+);
+DEFINE_EVENT(dpu_drm_obj_template, dpu_enc_prepare_kickoff_reset,
+ TP_PROTO(uint32_t drm_id),
+ TP_ARGS(drm_id)
+);
+DEFINE_EVENT(dpu_drm_obj_template, dpu_crtc_complete_flip,
+ TP_PROTO(uint32_t drm_id),
+ TP_ARGS(drm_id)
+);
+DEFINE_EVENT(dpu_drm_obj_template, dpu_crtc_vblank_cb,
+ TP_PROTO(uint32_t drm_id),
+ TP_ARGS(drm_id)
+);
+DEFINE_EVENT(dpu_drm_obj_template, dpu_crtc_complete_commit,
+ TP_PROTO(uint32_t drm_id),
+ TP_ARGS(drm_id)
+);
+DEFINE_EVENT(dpu_drm_obj_template, dpu_kms_enc_enable,
+ TP_PROTO(uint32_t drm_id),
+ TP_ARGS(drm_id)
+);
+DEFINE_EVENT(dpu_drm_obj_template, dpu_kms_commit,
+ TP_PROTO(uint32_t drm_id),
+ TP_ARGS(drm_id)
+);
+DEFINE_EVENT(dpu_drm_obj_template, dpu_kms_wait_for_commit_done,
+ TP_PROTO(uint32_t drm_id),
+ TP_ARGS(drm_id)
+);
+
+TRACE_EVENT(dpu_enc_enable,
+ TP_PROTO(uint32_t drm_id, int hdisplay, int vdisplay),
+ TP_ARGS(drm_id, hdisplay, vdisplay),
+ TP_STRUCT__entry(
+ __field( uint32_t, drm_id )
+ __field( int, hdisplay )
+ __field( int, vdisplay )
+ ),
+ TP_fast_assign(
+ __entry->drm_id = drm_id;
+ __entry->hdisplay = hdisplay;
+ __entry->vdisplay = vdisplay;
+ ),
+ TP_printk("id=%u, mode=%dx%d",
+ __entry->drm_id, __entry->hdisplay, __entry->vdisplay)
+);
+
+DECLARE_EVENT_CLASS(dpu_enc_keyval_template,
+ TP_PROTO(uint32_t drm_id, int val),
+ TP_ARGS(drm_id, val),
+ TP_STRUCT__entry(
+ __field( uint32_t, drm_id )
+ __field( int, val )
+ ),
+ TP_fast_assign(
+ __entry->drm_id = drm_id;
+ __entry->val = val;
+ ),
+ TP_printk("id=%u, val=%d", __entry->drm_id, __entry->val)
+);
+DEFINE_EVENT(dpu_enc_keyval_template, dpu_enc_underrun_cb,
+ TP_PROTO(uint32_t drm_id, int count),
+ TP_ARGS(drm_id, count)
+);
+DEFINE_EVENT(dpu_enc_keyval_template, dpu_enc_trigger_start,
+ TP_PROTO(uint32_t drm_id, int ctl_idx),
+ TP_ARGS(drm_id, ctl_idx)
+);
+
+TRACE_EVENT(dpu_enc_atomic_check_flags,
+ TP_PROTO(uint32_t drm_id, unsigned int flags, int private_flags),
+ TP_ARGS(drm_id, flags, private_flags),
+ TP_STRUCT__entry(
+ __field( uint32_t, drm_id )
+ __field( unsigned int, flags )
+ __field( int, private_flags )
+ ),
+ TP_fast_assign(
+ __entry->drm_id = drm_id;
+ __entry->flags = flags;
+ __entry->private_flags = private_flags;
+ ),
+ TP_printk("id=%u, flags=%u, private_flags=%d",
+ __entry->drm_id, __entry->flags, __entry->private_flags)
+);
+
+DECLARE_EVENT_CLASS(dpu_enc_id_enable_template,
+ TP_PROTO(uint32_t drm_id, bool enable),
+ TP_ARGS(drm_id, enable),
+ TP_STRUCT__entry(
+ __field( uint32_t, drm_id )
+ __field( bool, enable )
+ ),
+ TP_fast_assign(
+ __entry->drm_id = drm_id;
+ __entry->enable = enable;
+ ),
+ TP_printk("id=%u, enable=%s",
+ __entry->drm_id, __entry->enable ? "true" : "false")
+);
+DEFINE_EVENT(dpu_enc_id_enable_template, dpu_enc_rc_helper,
+ TP_PROTO(uint32_t drm_id, bool enable),
+ TP_ARGS(drm_id, enable)
+);
+DEFINE_EVENT(dpu_enc_id_enable_template, dpu_enc_vblank_cb,
+ TP_PROTO(uint32_t drm_id, bool enable),
+ TP_ARGS(drm_id, enable)
+);
+DEFINE_EVENT(dpu_enc_id_enable_template, dpu_enc_frame_event_cb,
+ TP_PROTO(uint32_t drm_id, bool enable),
+ TP_ARGS(drm_id, enable)
+);
+DEFINE_EVENT(dpu_enc_id_enable_template, dpu_enc_phys_cmd_connect_te,
+ TP_PROTO(uint32_t drm_id, bool enable),
+ TP_ARGS(drm_id, enable)
+);
+
+TRACE_EVENT(dpu_enc_rc,
+ TP_PROTO(uint32_t drm_id, u32 sw_event, bool idle_pc_supported,
+ int rc_state, const char *stage),
+ TP_ARGS(drm_id, sw_event, idle_pc_supported, rc_state, stage),
+ TP_STRUCT__entry(
+ __field( uint32_t, drm_id )
+ __field( u32, sw_event )
+ __field( bool, idle_pc_supported )
+ __field( int, rc_state )
+ __string( stage_str, stage )
+ ),
+ TP_fast_assign(
+ __entry->drm_id = drm_id;
+ __entry->sw_event = sw_event;
+ __entry->idle_pc_supported = idle_pc_supported;
+ __entry->rc_state = rc_state;
+ __assign_str(stage_str, stage);
+ ),
+ TP_printk("%s: id:%u, sw_event:%d, idle_pc_supported:%s, rc_state:%d\n",
+ __get_str(stage_str), __entry->drm_id, __entry->sw_event,
+ __entry->idle_pc_supported ? "true" : "false",
+ __entry->rc_state)
+);
+
+TRACE_EVENT(dpu_enc_frame_done_cb_not_busy,
+ TP_PROTO(uint32_t drm_id, u32 event, enum dpu_intf intf_idx),
+ TP_ARGS(drm_id, event, intf_idx),
+ TP_STRUCT__entry(
+ __field( uint32_t, drm_id )
+ __field( u32, event )
+ __field( enum dpu_intf, intf_idx )
+ ),
+ TP_fast_assign(
+ __entry->drm_id = drm_id;
+ __entry->event = event;
+ __entry->intf_idx = intf_idx;
+ ),
+ TP_printk("id=%u, event=%u, intf=%d", __entry->drm_id, __entry->event,
+ __entry->intf_idx)
+);
+
+TRACE_EVENT(dpu_enc_frame_done_cb,
+ TP_PROTO(uint32_t drm_id, unsigned int idx,
+ unsigned long frame_busy_mask),
+ TP_ARGS(drm_id, idx, frame_busy_mask),
+ TP_STRUCT__entry(
+ __field( uint32_t, drm_id )
+ __field( unsigned int, idx )
+ __field( unsigned long, frame_busy_mask )
+ ),
+ TP_fast_assign(
+ __entry->drm_id = drm_id;
+ __entry->idx = idx;
+ __entry->frame_busy_mask = frame_busy_mask;
+ ),
+ TP_printk("id=%u, idx=%u, frame_busy_mask=%lx", __entry->drm_id,
+ __entry->idx, __entry->frame_busy_mask)
+);
+
+TRACE_EVENT(dpu_enc_trigger_flush,
+ TP_PROTO(uint32_t drm_id, enum dpu_intf intf_idx,
+ int pending_kickoff_cnt, int ctl_idx, u32 pending_flush_ret),
+ TP_ARGS(drm_id, intf_idx, pending_kickoff_cnt, ctl_idx,
+ pending_flush_ret),
+ TP_STRUCT__entry(
+ __field( uint32_t, drm_id )
+ __field( enum dpu_intf, intf_idx )
+ __field( int, pending_kickoff_cnt )
+ __field( int, ctl_idx )
+ __field( u32, pending_flush_ret )
+ ),
+ TP_fast_assign(
+ __entry->drm_id = drm_id;
+ __entry->intf_idx = intf_idx;
+ __entry->pending_kickoff_cnt = pending_kickoff_cnt;
+ __entry->ctl_idx = ctl_idx;
+ __entry->pending_flush_ret = pending_flush_ret;
+ ),
+ TP_printk("id=%u, intf_idx=%d, pending_kickoff_cnt=%d ctl_idx=%d "
+ "pending_flush_ret=%u", __entry->drm_id,
+ __entry->intf_idx, __entry->pending_kickoff_cnt,
+ __entry->ctl_idx, __entry->pending_flush_ret)
+);
+
+DECLARE_EVENT_CLASS(dpu_enc_ktime_template,
+ TP_PROTO(uint32_t drm_id, ktime_t time),
+ TP_ARGS(drm_id, time),
+ TP_STRUCT__entry(
+ __field( uint32_t, drm_id )
+ __field( ktime_t, time )
+ ),
+ TP_fast_assign(
+ __entry->drm_id = drm_id;
+ __entry->time = time;
+ ),
+ TP_printk("id=%u, time=%lld", __entry->drm_id,
+ ktime_to_ms(__entry->time))
+);
+DEFINE_EVENT(dpu_enc_ktime_template, dpu_enc_vsync_event_work,
+ TP_PROTO(uint32_t drm_id, ktime_t time),
+ TP_ARGS(drm_id, time)
+);
+DEFINE_EVENT(dpu_enc_ktime_template, dpu_enc_early_kickoff,
+ TP_PROTO(uint32_t drm_id, ktime_t time),
+ TP_ARGS(drm_id, time)
+);
+
+DECLARE_EVENT_CLASS(dpu_id_event_template,
+ TP_PROTO(uint32_t drm_id, u32 event),
+ TP_ARGS(drm_id, event),
+ TP_STRUCT__entry(
+ __field( uint32_t, drm_id )
+ __field( u32, event )
+ ),
+ TP_fast_assign(
+ __entry->drm_id = drm_id;
+ __entry->event = event;
+ ),
+ TP_printk("id=%u, event=%u", __entry->drm_id, __entry->event)
+);
+DEFINE_EVENT(dpu_id_event_template, dpu_enc_frame_done_timeout,
+ TP_PROTO(uint32_t drm_id, u32 event),
+ TP_ARGS(drm_id, event)
+);
+DEFINE_EVENT(dpu_id_event_template, dpu_crtc_frame_event_cb,
+ TP_PROTO(uint32_t drm_id, u32 event),
+ TP_ARGS(drm_id, event)
+);
+DEFINE_EVENT(dpu_id_event_template, dpu_crtc_handle_power_event,
+ TP_PROTO(uint32_t drm_id, u32 event),
+ TP_ARGS(drm_id, event)
+);
+DEFINE_EVENT(dpu_id_event_template, dpu_crtc_frame_event_done,
+ TP_PROTO(uint32_t drm_id, u32 event),
+ TP_ARGS(drm_id, event)
+);
+DEFINE_EVENT(dpu_id_event_template, dpu_crtc_frame_event_more_pending,
+ TP_PROTO(uint32_t drm_id, u32 event),
+ TP_ARGS(drm_id, event)
+);
+
+TRACE_EVENT(dpu_enc_wait_event_timeout,
+ TP_PROTO(uint32_t drm_id, int32_t hw_id, int rc, s64 time,
+ s64 expected_time, int atomic_cnt),
+ TP_ARGS(drm_id, hw_id, rc, time, expected_time, atomic_cnt),
+ TP_STRUCT__entry(
+ __field( uint32_t, drm_id )
+ __field( int32_t, hw_id )
+ __field( int, rc )
+ __field( s64, time )
+ __field( s64, expected_time )
+ __field( int, atomic_cnt )
+ ),
+ TP_fast_assign(
+ __entry->drm_id = drm_id;
+ __entry->hw_id = hw_id;
+ __entry->rc = rc;
+ __entry->time = time;
+ __entry->expected_time = expected_time;
+ __entry->atomic_cnt = atomic_cnt;
+ ),
+ TP_printk("id=%u, hw_id=%d, rc=%d, time=%lld, expected=%lld cnt=%d",
+ __entry->drm_id, __entry->hw_id, __entry->rc, __entry->time,
+ __entry->expected_time, __entry->atomic_cnt)
+);
+
+TRACE_EVENT(dpu_enc_phys_cmd_irq_ctrl,
+ TP_PROTO(uint32_t drm_id, enum dpu_pingpong pp, bool enable,
+ int refcnt),
+ TP_ARGS(drm_id, pp, enable, refcnt),
+ TP_STRUCT__entry(
+ __field( uint32_t, drm_id )
+ __field( enum dpu_pingpong, pp )
+ __field( bool, enable )
+ __field( int, refcnt )
+ ),
+ TP_fast_assign(
+ __entry->drm_id = drm_id;
+ __entry->pp = pp;
+ __entry->enable = enable;
+ __entry->refcnt = refcnt;
+ ),
+ TP_printk("id=%u, pp=%d, enable=%s, refcnt=%d", __entry->drm_id,
+ __entry->pp, __entry->enable ? "true" : "false",
+ __entry->refcnt)
+);
+
+TRACE_EVENT(dpu_enc_phys_cmd_pp_tx_done,
+ TP_PROTO(uint32_t drm_id, enum dpu_pingpong pp, int new_count,
+ u32 event),
+ TP_ARGS(drm_id, pp, new_count, event),
+ TP_STRUCT__entry(
+ __field( uint32_t, drm_id )
+ __field( enum dpu_pingpong, pp )
+ __field( int, new_count )
+ __field( u32, event )
+ ),
+ TP_fast_assign(
+ __entry->drm_id = drm_id;
+ __entry->pp = pp;
+ __entry->new_count = new_count;
+ __entry->event = event;
+ ),
+ TP_printk("id=%u, pp=%d, new_count=%d, event=%u", __entry->drm_id,
+ __entry->pp, __entry->new_count, __entry->event)
+);
+
+TRACE_EVENT(dpu_enc_phys_cmd_pdone_timeout,
+ TP_PROTO(uint32_t drm_id, enum dpu_pingpong pp, int timeout_count,
+ int kickoff_count, u32 event),
+ TP_ARGS(drm_id, pp, timeout_count, kickoff_count, event),
+ TP_STRUCT__entry(
+ __field( uint32_t, drm_id )
+ __field( enum dpu_pingpong, pp )
+ __field( int, timeout_count )
+ __field( int, kickoff_count )
+ __field( u32, event )
+ ),
+ TP_fast_assign(
+ __entry->drm_id = drm_id;
+ __entry->pp = pp;
+ __entry->timeout_count = timeout_count;
+ __entry->kickoff_count = kickoff_count;
+ __entry->event = event;
+ ),
+ TP_printk("id=%u, pp=%d, timeout_count=%d, kickoff_count=%d, event=%u",
+ __entry->drm_id, __entry->pp, __entry->timeout_count,
+ __entry->kickoff_count, __entry->event)
+);
+
+TRACE_EVENT(dpu_enc_phys_vid_post_kickoff,
+ TP_PROTO(uint32_t drm_id, enum dpu_intf intf_idx),
+ TP_ARGS(drm_id, intf_idx),
+ TP_STRUCT__entry(
+ __field( uint32_t, drm_id )
+ __field( enum dpu_intf, intf_idx )
+ ),
+ TP_fast_assign(
+ __entry->drm_id = drm_id;
+ __entry->intf_idx = intf_idx;
+ ),
+ TP_printk("id=%u, intf_idx=%d", __entry->drm_id, __entry->intf_idx)
+);
+
+TRACE_EVENT(dpu_enc_phys_vid_irq_ctrl,
+ TP_PROTO(uint32_t drm_id, enum dpu_intf intf_idx, bool enable,
+ int refcnt),
+ TP_ARGS(drm_id, intf_idx, enable, refcnt),
+ TP_STRUCT__entry(
+ __field( uint32_t, drm_id )
+ __field( enum dpu_intf, intf_idx )
+ __field( bool, enable )
+ __field( int, refcnt )
+ ),
+ TP_fast_assign(
+ __entry->drm_id = drm_id;
+ __entry->intf_idx = intf_idx;
+ __entry->enable = enable;
+ __entry->refcnt = refcnt;
+ ),
+ TP_printk("id=%u, intf_idx=%d enable=%s refcnt=%d", __entry->drm_id,
+ __entry->intf_idx, __entry->enable ? "true" : "false",
+ __entry->drm_id)
+);
+
+TRACE_EVENT(dpu_crtc_setup_mixer,
+ TP_PROTO(uint32_t crtc_id, uint32_t plane_id,
+ struct drm_plane_state *state, struct dpu_plane_state *pstate,
+ uint32_t stage_idx, enum dpu_sspp sspp, uint32_t pixel_format,
+ uint64_t modifier),
+ TP_ARGS(crtc_id, plane_id, state, pstate, stage_idx, sspp,
+ pixel_format, modifier),
+ TP_STRUCT__entry(
+ __field( uint32_t, crtc_id )
+ __field( uint32_t, plane_id )
+ __field( struct drm_plane_state*,state )
+ __field( struct dpu_plane_state*,pstate )
+ __field( uint32_t, stage_idx )
+ __field( enum dpu_sspp, sspp )
+ __field( uint32_t, pixel_format )
+ __field( uint64_t, modifier )
+ ),
+ TP_fast_assign(
+ __entry->crtc_id = crtc_id;
+ __entry->plane_id = plane_id;
+ __entry->state = state;
+ __entry->pstate = pstate;
+ __entry->stage_idx = stage_idx;
+ __entry->sspp = sspp;
+ __entry->pixel_format = pixel_format;
+ __entry->modifier = modifier;
+ ),
+ TP_printk("crtc_id:%u plane_id:%u fb_id:%u src:{%ux%u+%ux%u} "
+ "dst:{%ux%u+%ux%u} stage_idx:%u stage:%d, sspp:%d "
+ "multirect_index:%d multirect_mode:%u pix_format:%u "
+ "modifier:%llu",
+ __entry->crtc_id, __entry->plane_id,
+ __entry->state->fb ? __entry->state->fb->base.id : -1,
+ __entry->state->src_w >> 16, __entry->state->src_h >> 16,
+ __entry->state->src_x >> 16, __entry->state->src_y >> 16,
+ __entry->state->crtc_w, __entry->state->crtc_h,
+ __entry->state->crtc_x, __entry->state->crtc_y,
+ __entry->stage_idx, __entry->pstate->stage, __entry->sspp,
+ __entry->pstate->multirect_index,
+ __entry->pstate->multirect_mode, __entry->pixel_format,
+ __entry->modifier)
+);
+
+TRACE_EVENT(dpu_crtc_setup_lm_bounds,
+ TP_PROTO(uint32_t drm_id, int mixer, struct drm_rect *bounds),
+ TP_ARGS(drm_id, mixer, bounds),
+ TP_STRUCT__entry(
+ __field( uint32_t, drm_id )
+ __field( int, mixer )
+ __field( struct drm_rect *, bounds )
+ ),
+ TP_fast_assign(
+ __entry->drm_id = drm_id;
+ __entry->mixer = mixer;
+ __entry->bounds = bounds;
+ ),
+ TP_printk("id:%u mixer:%d bounds:" DRM_RECT_FMT, __entry->drm_id,
+ __entry->mixer, DRM_RECT_ARG(__entry->bounds))
+);
+
+TRACE_EVENT(dpu_crtc_vblank_enable,
+ TP_PROTO(uint32_t drm_id, uint32_t enc_id, bool enable,
+ struct dpu_crtc *crtc),
+ TP_ARGS(drm_id, enc_id, enable, crtc),
+ TP_STRUCT__entry(
+ __field( uint32_t, drm_id )
+ __field( uint32_t, enc_id )
+ __field( bool, enable )
+ __field( struct dpu_crtc *, crtc )
+ ),
+ TP_fast_assign(
+ __entry->drm_id = drm_id;
+ __entry->enc_id = enc_id;
+ __entry->enable = enable;
+ __entry->crtc = crtc;
+ ),
+ TP_printk("id:%u encoder:%u enable:%s state{enabled:%s suspend:%s "
+ "vblank_req:%s}",
+ __entry->drm_id, __entry->enc_id,
+ __entry->enable ? "true" : "false",
+ __entry->crtc->enabled ? "true" : "false",
+ __entry->crtc->suspend ? "true" : "false",
+ __entry->crtc->vblank_requested ? "true" : "false")
+);
+
+DECLARE_EVENT_CLASS(dpu_crtc_enable_template,
+ TP_PROTO(uint32_t drm_id, bool enable, struct dpu_crtc *crtc),
+ TP_ARGS(drm_id, enable, crtc),
+ TP_STRUCT__entry(
+ __field( uint32_t, drm_id )
+ __field( bool, enable )
+ __field( struct dpu_crtc *, crtc )
+ ),
+ TP_fast_assign(
+ __entry->drm_id = drm_id;
+ __entry->enable = enable;
+ __entry->crtc = crtc;
+ ),
+ TP_printk("id:%u enable:%s state{enabled:%s suspend:%s vblank_req:%s}",
+ __entry->drm_id, __entry->enable ? "true" : "false",
+ __entry->crtc->enabled ? "true" : "false",
+ __entry->crtc->suspend ? "true" : "false",
+ __entry->crtc->vblank_requested ? "true" : "false")
+);
+DEFINE_EVENT(dpu_crtc_enable_template, dpu_crtc_set_suspend,
+ TP_PROTO(uint32_t drm_id, bool enable, struct dpu_crtc *crtc),
+ TP_ARGS(drm_id, enable, crtc)
+);
+DEFINE_EVENT(dpu_crtc_enable_template, dpu_crtc_enable,
+ TP_PROTO(uint32_t drm_id, bool enable, struct dpu_crtc *crtc),
+ TP_ARGS(drm_id, enable, crtc)
+);
+DEFINE_EVENT(dpu_crtc_enable_template, dpu_crtc_disable,
+ TP_PROTO(uint32_t drm_id, bool enable, struct dpu_crtc *crtc),
+ TP_ARGS(drm_id, enable, crtc)
+);
+DEFINE_EVENT(dpu_crtc_enable_template, dpu_crtc_vblank,
+ TP_PROTO(uint32_t drm_id, bool enable, struct dpu_crtc *crtc),
+ TP_ARGS(drm_id, enable, crtc)
+);
+
+TRACE_EVENT(dpu_crtc_disable_frame_pending,
+ TP_PROTO(uint32_t drm_id, int frame_pending),
+ TP_ARGS(drm_id, frame_pending),
+ TP_STRUCT__entry(
+ __field( uint32_t, drm_id )
+ __field( int, frame_pending )
+ ),
+ TP_fast_assign(
+ __entry->drm_id = drm_id;
+ __entry->frame_pending = frame_pending;
+ ),
+ TP_printk("id:%u frame_pending:%d", __entry->drm_id,
+ __entry->frame_pending)
+);
+
+TRACE_EVENT(dpu_plane_set_scanout,
+ TP_PROTO(enum dpu_sspp index, struct dpu_hw_fmt_layout *layout,
+ enum dpu_sspp_multirect_index multirect_index),
+ TP_ARGS(index, layout, multirect_index),
+ TP_STRUCT__entry(
+ __field( enum dpu_sspp, index )
+ __field( struct dpu_hw_fmt_layout*, layout )
+ __field( enum dpu_sspp_multirect_index, multirect_index)
+ ),
+ TP_fast_assign(
+ __entry->index = index;
+ __entry->layout = layout;
+ __entry->multirect_index = multirect_index;
+ ),
+ TP_printk("index:%d layout:{%ux%u @ [%u/%u, %u/%u, %u/%u, %u/%u]} "
+ "multirect_index:%d", __entry->index, __entry->layout->width,
+ __entry->layout->height, __entry->layout->plane_addr[0],
+ __entry->layout->plane_size[0],
+ __entry->layout->plane_addr[1],
+ __entry->layout->plane_size[1],
+ __entry->layout->plane_addr[2],
+ __entry->layout->plane_size[2],
+ __entry->layout->plane_addr[3],
+ __entry->layout->plane_size[3], __entry->multirect_index)
+);
+
+TRACE_EVENT(dpu_plane_disable,
+ TP_PROTO(uint32_t drm_id, bool is_virtual, uint32_t multirect_mode),
+ TP_ARGS(drm_id, is_virtual, multirect_mode),
+ TP_STRUCT__entry(
+ __field( uint32_t, drm_id )
+ __field( bool, is_virtual )
+ __field( uint32_t, multirect_mode )
+ ),
+ TP_fast_assign(
+ __entry->drm_id = drm_id;
+ __entry->is_virtual = is_virtual;
+ __entry->multirect_mode = multirect_mode;
+ ),
+ TP_printk("id:%u is_virtual:%s multirect_mode:%u", __entry->drm_id,
+ __entry->is_virtual ? "true" : "false",
+ __entry->multirect_mode)
+);
+
+DECLARE_EVENT_CLASS(dpu_rm_iter_template,
+ TP_PROTO(uint32_t id, enum dpu_hw_blk_type type, uint32_t enc_id),
+ TP_ARGS(id, type, enc_id),
+ TP_STRUCT__entry(
+ __field( uint32_t, id )
+ __field( enum dpu_hw_blk_type, type )
+ __field( uint32_t, enc_id )
+ ),
+ TP_fast_assign(
+ __entry->id = id;
+ __entry->type = type;
+ __entry->enc_id = enc_id;
+ ),
+ TP_printk("id:%d type:%d enc_id:%u", __entry->id, __entry->type,
+ __entry->enc_id)
+);
+DEFINE_EVENT(dpu_rm_iter_template, dpu_rm_reserve_cdm,
+ TP_PROTO(uint32_t id, enum dpu_hw_blk_type type, uint32_t enc_id),
+ TP_ARGS(id, type, enc_id)
+);
+DEFINE_EVENT(dpu_rm_iter_template, dpu_rm_reserve_intf,
+ TP_PROTO(uint32_t id, enum dpu_hw_blk_type type, uint32_t enc_id),
+ TP_ARGS(id, type, enc_id)
+);
+DEFINE_EVENT(dpu_rm_iter_template, dpu_rm_reserve_ctls,
+ TP_PROTO(uint32_t id, enum dpu_hw_blk_type type, uint32_t enc_id),
+ TP_ARGS(id, type, enc_id)
+);
+
+TRACE_EVENT(dpu_rm_reserve_lms,
+ TP_PROTO(uint32_t id, enum dpu_hw_blk_type type, uint32_t enc_id,
+ uint32_t pp_id),
+ TP_ARGS(id, type, enc_id, pp_id),
+ TP_STRUCT__entry(
+ __field( uint32_t, id )
+ __field( enum dpu_hw_blk_type, type )
+ __field( uint32_t, enc_id )
+ __field( uint32_t, pp_id )
+ ),
+ TP_fast_assign(
+ __entry->id = id;
+ __entry->type = type;
+ __entry->enc_id = enc_id;
+ __entry->pp_id = pp_id;
+ ),
+ TP_printk("id:%d type:%d enc_id:%u pp_id:%u", __entry->id,
+ __entry->type, __entry->enc_id, __entry->pp_id)
+);
+
+TRACE_EVENT(dpu_vbif_wait_xin_halt_fail,
+ TP_PROTO(enum dpu_vbif index, u32 xin_id),
+ TP_ARGS(index, xin_id),
+ TP_STRUCT__entry(
+ __field( enum dpu_vbif, index )
+ __field( u32, xin_id )
+ ),
+ TP_fast_assign(
+ __entry->index = index;
+ __entry->xin_id = xin_id;
+ ),
+ TP_printk("index:%d xin_id:%u", __entry->index, __entry->xin_id)
+);
+
+TRACE_EVENT(dpu_pp_connect_ext_te,
+ TP_PROTO(enum dpu_pingpong pp, u32 cfg),
+ TP_ARGS(pp, cfg),
+ TP_STRUCT__entry(
+ __field( enum dpu_pingpong, pp )
+ __field( u32, cfg )
+ ),
+ TP_fast_assign(
+ __entry->pp = pp;
+ __entry->cfg = cfg;
+ ),
+ TP_printk("pp:%d cfg:%u", __entry->pp, __entry->cfg)
+);
+
+DECLARE_EVENT_CLASS(dpu_core_irq_idx_cnt_template,
+ TP_PROTO(int irq_idx, int enable_count),
+ TP_ARGS(irq_idx, enable_count),
+ TP_STRUCT__entry(
+ __field( int, irq_idx )
+ __field( int, enable_count )
+ ),
+ TP_fast_assign(
+ __entry->irq_idx = irq_idx;
+ __entry->enable_count = enable_count;
+ ),
+ TP_printk("irq_idx:%d enable_count:%u", __entry->irq_idx,
+ __entry->enable_count)
+);
+DEFINE_EVENT(dpu_core_irq_idx_cnt_template, dpu_core_irq_enable_idx,
+ TP_PROTO(int irq_idx, int enable_count),
+ TP_ARGS(irq_idx, enable_count)
+);
+DEFINE_EVENT(dpu_core_irq_idx_cnt_template, dpu_core_irq_disable_idx,
+ TP_PROTO(int irq_idx, int enable_count),
+ TP_ARGS(irq_idx, enable_count)
+);
+
+DECLARE_EVENT_CLASS(dpu_core_irq_callback_template,
+ TP_PROTO(int irq_idx, struct dpu_irq_callback *callback),
+ TP_ARGS(irq_idx, callback),
+ TP_STRUCT__entry(
+ __field( int, irq_idx )
+ __field( struct dpu_irq_callback *, callback)
+ ),
+ TP_fast_assign(
+ __entry->irq_idx = irq_idx;
+ __entry->callback = callback;
+ ),
+ TP_printk("irq_idx:%d callback:%pK", __entry->irq_idx,
+ __entry->callback)
+);
+DEFINE_EVENT(dpu_core_irq_callback_template, dpu_core_irq_register_callback,
+ TP_PROTO(int irq_idx, struct dpu_irq_callback *callback),
+ TP_ARGS(irq_idx, callback)
+);
+DEFINE_EVENT(dpu_core_irq_callback_template, dpu_core_irq_unregister_callback,
+ TP_PROTO(int irq_idx, struct dpu_irq_callback *callback),
+ TP_ARGS(irq_idx, callback)
+);
+
+TRACE_EVENT(dpu_core_perf_update_clk,
+ TP_PROTO(struct drm_device *dev, bool stop_req, u64 clk_rate),
+ TP_ARGS(dev, stop_req, clk_rate),
+ TP_STRUCT__entry(
+ __field( struct drm_device *, dev )
+ __field( bool, stop_req )
+ __field( u64, clk_rate )
+ ),
+ TP_fast_assign(
+ __entry->dev = dev;
+ __entry->stop_req = stop_req;
+ __entry->clk_rate = clk_rate;
+ ),
+ TP_printk("dev:%s stop_req:%s clk_rate:%llu", __entry->dev->unique,
+ __entry->stop_req ? "true" : "false", __entry->clk_rate)
+);
+
+#define DPU_ATRACE_END(name) trace_tracing_mark_write(current->tgid, name, 0)
+#define DPU_ATRACE_BEGIN(name) trace_tracing_mark_write(current->tgid, name, 1)
+#define DPU_ATRACE_FUNC() DPU_ATRACE_BEGIN(__func__)
+
+#define DPU_ATRACE_INT(name, value) \
+ trace_dpu_trace_counter(current->tgid, name, value)
+
+#endif /* _DPU_TRACE_H_ */
+
+/* This part must be outside protection */
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#include <trace/define_trace.h>
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_vbif.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_vbif.c
new file mode 100644
index 000000000000..295528292296
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_vbif.c
@@ -0,0 +1,384 @@
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__
+
+#include <linux/debugfs.h>
+
+#include "dpu_vbif.h"
+#include "dpu_hw_vbif.h"
+#include "dpu_trace.h"
+
+/**
+ * _dpu_vbif_wait_for_xin_halt - wait for the xin to halt
+ * @vbif: Pointer to hardware vbif driver
+ * @xin_id: Client interface identifier
+ * @return: 0 if success; error code otherwise
+ */
+static int _dpu_vbif_wait_for_xin_halt(struct dpu_hw_vbif *vbif, u32 xin_id)
+{
+ ktime_t timeout;
+ bool status;
+ int rc;
+
+ if (!vbif || !vbif->cap || !vbif->ops.get_halt_ctrl) {
+ DPU_ERROR("invalid arguments vbif %d\n", vbif != 0);
+ return -EINVAL;
+ }
+
+ timeout = ktime_add_us(ktime_get(), vbif->cap->xin_halt_timeout);
+ for (;;) {
+ status = vbif->ops.get_halt_ctrl(vbif, xin_id);
+ if (status)
+ break;
+ if (ktime_compare_safe(ktime_get(), timeout) > 0) {
+ status = vbif->ops.get_halt_ctrl(vbif, xin_id);
+ break;
+ }
+ usleep_range(501, 1000);
+ }
+
+ if (!status) {
+ rc = -ETIMEDOUT;
+ DPU_ERROR("VBIF %d client %d not halting. TIMEDOUT.\n",
+ vbif->idx - VBIF_0, xin_id);
+ } else {
+ rc = 0;
+ DPU_DEBUG("VBIF %d client %d is halted\n",
+ vbif->idx - VBIF_0, xin_id);
+ }
+
+ return rc;
+}
+
+/**
+ * _dpu_vbif_apply_dynamic_ot_limit - determine OT based on usecase parameters
+ * @vbif: Pointer to hardware vbif driver
+ * @ot_lim: Pointer to OT limit to be modified
+ * @params: Pointer to usecase parameters
+ */
+static void _dpu_vbif_apply_dynamic_ot_limit(struct dpu_hw_vbif *vbif,
+ u32 *ot_lim, struct dpu_vbif_set_ot_params *params)
+{
+ u64 pps;
+ const struct dpu_vbif_dynamic_ot_tbl *tbl;
+ u32 i;
+
+ if (!vbif || !(vbif->cap->features & BIT(DPU_VBIF_QOS_OTLIM)))
+ return;
+
+ /* Dynamic OT setting done only for WFD */
+ if (!params->is_wfd)
+ return;
+
+ pps = params->frame_rate;
+ pps *= params->width;
+ pps *= params->height;
+
+ tbl = params->rd ? &vbif->cap->dynamic_ot_rd_tbl :
+ &vbif->cap->dynamic_ot_wr_tbl;
+
+ for (i = 0; i < tbl->count; i++) {
+ if (pps <= tbl->cfg[i].pps) {
+ *ot_lim = tbl->cfg[i].ot_limit;
+ break;
+ }
+ }
+
+ DPU_DEBUG("vbif:%d xin:%d w:%d h:%d fps:%d pps:%llu ot:%u\n",
+ vbif->idx - VBIF_0, params->xin_id,
+ params->width, params->height, params->frame_rate,
+ pps, *ot_lim);
+}
+
+/**
+ * _dpu_vbif_get_ot_limit - get OT based on usecase & configuration parameters
+ * @vbif: Pointer to hardware vbif driver
+ * @params: Pointer to usecase parameters
+ * @return: OT limit
+ */
+static u32 _dpu_vbif_get_ot_limit(struct dpu_hw_vbif *vbif,
+ struct dpu_vbif_set_ot_params *params)
+{
+ u32 ot_lim = 0;
+ u32 val;
+
+ if (!vbif || !vbif->cap) {
+ DPU_ERROR("invalid arguments vbif %d\n", vbif != 0);
+ return -EINVAL;
+ }
+
+ if (vbif->cap->default_ot_wr_limit && !params->rd)
+ ot_lim = vbif->cap->default_ot_wr_limit;
+ else if (vbif->cap->default_ot_rd_limit && params->rd)
+ ot_lim = vbif->cap->default_ot_rd_limit;
+
+ /*
+ * If default ot is not set from dt/catalog,
+ * then do not configure it.
+ */
+ if (ot_lim == 0)
+ goto exit;
+
+ /* Modify the limits if the target and the use case requires it */
+ _dpu_vbif_apply_dynamic_ot_limit(vbif, &ot_lim, params);
+
+ if (vbif && vbif->ops.get_limit_conf) {
+ val = vbif->ops.get_limit_conf(vbif,
+ params->xin_id, params->rd);
+ if (val == ot_lim)
+ ot_lim = 0;
+ }
+
+exit:
+ DPU_DEBUG("vbif:%d xin:%d ot_lim:%d\n",
+ vbif->idx - VBIF_0, params->xin_id, ot_lim);
+ return ot_lim;
+}
+
+/**
+ * dpu_vbif_set_ot_limit - set OT based on usecase & configuration parameters
+ * @vbif: Pointer to hardware vbif driver
+ * @params: Pointer to usecase parameters
+ *
+ * Note this function would block waiting for bus halt.
+ */
+void dpu_vbif_set_ot_limit(struct dpu_kms *dpu_kms,
+ struct dpu_vbif_set_ot_params *params)
+{
+ struct dpu_hw_vbif *vbif = NULL;
+ struct dpu_hw_mdp *mdp;
+ bool forced_on = false;
+ u32 ot_lim;
+ int ret, i;
+
+ if (!dpu_kms) {
+ DPU_ERROR("invalid arguments\n");
+ return;
+ }
+ mdp = dpu_kms->hw_mdp;
+
+ for (i = 0; i < ARRAY_SIZE(dpu_kms->hw_vbif); i++) {
+ if (dpu_kms->hw_vbif[i] &&
+ dpu_kms->hw_vbif[i]->idx == params->vbif_idx)
+ vbif = dpu_kms->hw_vbif[i];
+ }
+
+ if (!vbif || !mdp) {
+ DPU_DEBUG("invalid arguments vbif %d mdp %d\n",
+ vbif != 0, mdp != 0);
+ return;
+ }
+
+ if (!mdp->ops.setup_clk_force_ctrl ||
+ !vbif->ops.set_limit_conf ||
+ !vbif->ops.set_halt_ctrl)
+ return;
+
+ /* set write_gather_en for all write clients */
+ if (vbif->ops.set_write_gather_en && !params->rd)
+ vbif->ops.set_write_gather_en(vbif, params->xin_id);
+
+ ot_lim = _dpu_vbif_get_ot_limit(vbif, params) & 0xFF;
+
+ if (ot_lim == 0)
+ goto exit;
+
+ trace_dpu_perf_set_ot(params->num, params->xin_id, ot_lim,
+ params->vbif_idx);
+
+ forced_on = mdp->ops.setup_clk_force_ctrl(mdp, params->clk_ctrl, true);
+
+ vbif->ops.set_limit_conf(vbif, params->xin_id, params->rd, ot_lim);
+
+ vbif->ops.set_halt_ctrl(vbif, params->xin_id, true);
+
+ ret = _dpu_vbif_wait_for_xin_halt(vbif, params->xin_id);
+ if (ret)
+ trace_dpu_vbif_wait_xin_halt_fail(vbif->idx, params->xin_id);
+
+ vbif->ops.set_halt_ctrl(vbif, params->xin_id, false);
+
+ if (forced_on)
+ mdp->ops.setup_clk_force_ctrl(mdp, params->clk_ctrl, false);
+exit:
+ return;
+}
+
+void dpu_vbif_set_qos_remap(struct dpu_kms *dpu_kms,
+ struct dpu_vbif_set_qos_params *params)
+{
+ struct dpu_hw_vbif *vbif = NULL;
+ struct dpu_hw_mdp *mdp;
+ bool forced_on = false;
+ const struct dpu_vbif_qos_tbl *qos_tbl;
+ int i;
+
+ if (!dpu_kms || !params || !dpu_kms->hw_mdp) {
+ DPU_ERROR("invalid arguments\n");
+ return;
+ }
+ mdp = dpu_kms->hw_mdp;
+
+ for (i = 0; i < ARRAY_SIZE(dpu_kms->hw_vbif); i++) {
+ if (dpu_kms->hw_vbif[i] &&
+ dpu_kms->hw_vbif[i]->idx == params->vbif_idx) {
+ vbif = dpu_kms->hw_vbif[i];
+ break;
+ }
+ }
+
+ if (!vbif || !vbif->cap) {
+ DPU_ERROR("invalid vbif %d\n", params->vbif_idx);
+ return;
+ }
+
+ if (!vbif->ops.set_qos_remap || !mdp->ops.setup_clk_force_ctrl) {
+ DPU_DEBUG("qos remap not supported\n");
+ return;
+ }
+
+ qos_tbl = params->is_rt ? &vbif->cap->qos_rt_tbl :
+ &vbif->cap->qos_nrt_tbl;
+
+ if (!qos_tbl->npriority_lvl || !qos_tbl->priority_lvl) {
+ DPU_DEBUG("qos tbl not defined\n");
+ return;
+ }
+
+ forced_on = mdp->ops.setup_clk_force_ctrl(mdp, params->clk_ctrl, true);
+
+ for (i = 0; i < qos_tbl->npriority_lvl; i++) {
+ DPU_DEBUG("vbif:%d xin:%d lvl:%d/%d\n",
+ params->vbif_idx, params->xin_id, i,
+ qos_tbl->priority_lvl[i]);
+ vbif->ops.set_qos_remap(vbif, params->xin_id, i,
+ qos_tbl->priority_lvl[i]);
+ }
+
+ if (forced_on)
+ mdp->ops.setup_clk_force_ctrl(mdp, params->clk_ctrl, false);
+}
+
+void dpu_vbif_clear_errors(struct dpu_kms *dpu_kms)
+{
+ struct dpu_hw_vbif *vbif;
+ u32 i, pnd, src;
+
+ if (!dpu_kms) {
+ DPU_ERROR("invalid argument\n");
+ return;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(dpu_kms->hw_vbif); i++) {
+ vbif = dpu_kms->hw_vbif[i];
+ if (vbif && vbif->ops.clear_errors) {
+ vbif->ops.clear_errors(vbif, &pnd, &src);
+ if (pnd || src) {
+ DRM_DEBUG_KMS("VBIF %d: pnd 0x%X, src 0x%X\n",
+ vbif->idx - VBIF_0, pnd, src);
+ }
+ }
+ }
+}
+
+void dpu_vbif_init_memtypes(struct dpu_kms *dpu_kms)
+{
+ struct dpu_hw_vbif *vbif;
+ int i, j;
+
+ if (!dpu_kms) {
+ DPU_ERROR("invalid argument\n");
+ return;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(dpu_kms->hw_vbif); i++) {
+ vbif = dpu_kms->hw_vbif[i];
+ if (vbif && vbif->cap && vbif->ops.set_mem_type) {
+ for (j = 0; j < vbif->cap->memtype_count; j++)
+ vbif->ops.set_mem_type(
+ vbif, j, vbif->cap->memtype[j]);
+ }
+ }
+}
+
+#ifdef CONFIG_DEBUG_FS
+void dpu_debugfs_vbif_destroy(struct dpu_kms *dpu_kms)
+{
+ debugfs_remove_recursive(dpu_kms->debugfs_vbif);
+ dpu_kms->debugfs_vbif = NULL;
+}
+
+int dpu_debugfs_vbif_init(struct dpu_kms *dpu_kms, struct dentry *debugfs_root)
+{
+ char vbif_name[32];
+ struct dentry *debugfs_vbif;
+ int i, j;
+
+ dpu_kms->debugfs_vbif = debugfs_create_dir("vbif", debugfs_root);
+ if (!dpu_kms->debugfs_vbif) {
+ DPU_ERROR("failed to create vbif debugfs\n");
+ return -EINVAL;
+ }
+
+ for (i = 0; i < dpu_kms->catalog->vbif_count; i++) {
+ struct dpu_vbif_cfg *vbif = &dpu_kms->catalog->vbif[i];
+
+ snprintf(vbif_name, sizeof(vbif_name), "%d", vbif->id);
+
+ debugfs_vbif = debugfs_create_dir(vbif_name,
+ dpu_kms->debugfs_vbif);
+
+ debugfs_create_u32("features", 0600, debugfs_vbif,
+ (u32 *)&vbif->features);
+
+ debugfs_create_u32("xin_halt_timeout", 0400, debugfs_vbif,
+ (u32 *)&vbif->xin_halt_timeout);
+
+ debugfs_create_u32("default_rd_ot_limit", 0400, debugfs_vbif,
+ (u32 *)&vbif->default_ot_rd_limit);
+
+ debugfs_create_u32("default_wr_ot_limit", 0400, debugfs_vbif,
+ (u32 *)&vbif->default_ot_wr_limit);
+
+ for (j = 0; j < vbif->dynamic_ot_rd_tbl.count; j++) {
+ struct dpu_vbif_dynamic_ot_cfg *cfg =
+ &vbif->dynamic_ot_rd_tbl.cfg[j];
+
+ snprintf(vbif_name, sizeof(vbif_name),
+ "dynamic_ot_rd_%d_pps", j);
+ debugfs_create_u64(vbif_name, 0400, debugfs_vbif,
+ (u64 *)&cfg->pps);
+ snprintf(vbif_name, sizeof(vbif_name),
+ "dynamic_ot_rd_%d_ot_limit", j);
+ debugfs_create_u32(vbif_name, 0400, debugfs_vbif,
+ (u32 *)&cfg->ot_limit);
+ }
+
+ for (j = 0; j < vbif->dynamic_ot_wr_tbl.count; j++) {
+ struct dpu_vbif_dynamic_ot_cfg *cfg =
+ &vbif->dynamic_ot_wr_tbl.cfg[j];
+
+ snprintf(vbif_name, sizeof(vbif_name),
+ "dynamic_ot_wr_%d_pps", j);
+ debugfs_create_u64(vbif_name, 0400, debugfs_vbif,
+ (u64 *)&cfg->pps);
+ snprintf(vbif_name, sizeof(vbif_name),
+ "dynamic_ot_wr_%d_ot_limit", j);
+ debugfs_create_u32(vbif_name, 0400, debugfs_vbif,
+ (u32 *)&cfg->ot_limit);
+ }
+ }
+
+ return 0;
+}
+#endif
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_vbif.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_vbif.h
new file mode 100644
index 000000000000..f17af52dbbd5
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_vbif.h
@@ -0,0 +1,94 @@
+/* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __DPU_VBIF_H__
+#define __DPU_VBIF_H__
+
+#include "dpu_kms.h"
+
+struct dpu_vbif_set_ot_params {
+ u32 xin_id;
+ u32 num;
+ u32 width;
+ u32 height;
+ u32 frame_rate;
+ bool rd;
+ bool is_wfd;
+ u32 vbif_idx;
+ u32 clk_ctrl;
+};
+
+struct dpu_vbif_set_memtype_params {
+ u32 xin_id;
+ u32 vbif_idx;
+ u32 clk_ctrl;
+ bool is_cacheable;
+};
+
+/**
+ * struct dpu_vbif_set_qos_params - QoS remapper parameter
+ * @vbif_idx: vbif identifier
+ * @xin_id: client interface identifier
+ * @clk_ctrl: clock control identifier of the xin
+ * @num: pipe identifier (debug only)
+ * @is_rt: true if pipe is used in real-time use case
+ */
+struct dpu_vbif_set_qos_params {
+ u32 vbif_idx;
+ u32 xin_id;
+ u32 clk_ctrl;
+ u32 num;
+ bool is_rt;
+};
+
+/**
+ * dpu_vbif_set_ot_limit - set OT limit for vbif client
+ * @dpu_kms: DPU handler
+ * @params: Pointer to OT configuration parameters
+ */
+void dpu_vbif_set_ot_limit(struct dpu_kms *dpu_kms,
+ struct dpu_vbif_set_ot_params *params);
+
+/**
+ * dpu_vbif_set_qos_remap - set QoS priority level remap
+ * @dpu_kms: DPU handler
+ * @params: Pointer to QoS configuration parameters
+ */
+void dpu_vbif_set_qos_remap(struct dpu_kms *dpu_kms,
+ struct dpu_vbif_set_qos_params *params);
+
+/**
+ * dpu_vbif_clear_errors - clear any vbif errors
+ * @dpu_kms: DPU handler
+ */
+void dpu_vbif_clear_errors(struct dpu_kms *dpu_kms);
+
+/**
+ * dpu_vbif_init_memtypes - initialize xin memory types for vbif
+ * @dpu_kms: DPU handler
+ */
+void dpu_vbif_init_memtypes(struct dpu_kms *dpu_kms);
+
+#ifdef CONFIG_DEBUG_FS
+int dpu_debugfs_vbif_init(struct dpu_kms *dpu_kms, struct dentry *debugfs_root);
+void dpu_debugfs_vbif_destroy(struct dpu_kms *dpu_kms);
+#else
+static inline int dpu_debugfs_vbif_init(struct dpu_kms *dpu_kms,
+ struct dentry *debugfs_root)
+{
+ return 0;
+}
+static inline void dpu_debugfs_vbif_destroy(struct dpu_kms *dpu_kms)
+{
+}
+#endif
+#endif /* __DPU_VBIF_H__ */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/msm_media_info.h b/drivers/gpu/drm/msm/disp/dpu1/msm_media_info.h
new file mode 100644
index 000000000000..4f12e5c534c8
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/msm_media_info.h
@@ -0,0 +1,1376 @@
+#ifndef __MEDIA_INFO_H__
+#define __MEDIA_INFO_H__
+
+#ifndef MSM_MEDIA_ALIGN
+#define MSM_MEDIA_ALIGN(__sz, __align) (((__align) & ((__align) - 1)) ?\
+ ((((__sz) + (__align) - 1) / (__align)) * (__align)) :\
+ (((__sz) + (__align) - 1) & (~((__align) - 1))))
+#endif
+
+#ifndef MSM_MEDIA_ROUNDUP
+#define MSM_MEDIA_ROUNDUP(__sz, __r) (((__sz) + ((__r) - 1)) / (__r))
+#endif
+
+#ifndef MSM_MEDIA_MAX
+#define MSM_MEDIA_MAX(__a, __b) ((__a) > (__b)?(__a):(__b))
+#endif
+
+enum color_fmts {
+ /* Venus NV12:
+ * YUV 4:2:0 image with a plane of 8 bit Y samples followed
+ * by an interleaved U/V plane containing 8 bit 2x2 subsampled
+ * colour difference samples.
+ *
+ * <-------- Y/UV_Stride -------->
+ * <------- Width ------->
+ * Y Y Y Y Y Y Y Y Y Y Y Y . . . . ^ ^
+ * Y Y Y Y Y Y Y Y Y Y Y Y . . . . | |
+ * Y Y Y Y Y Y Y Y Y Y Y Y . . . . Height |
+ * Y Y Y Y Y Y Y Y Y Y Y Y . . . . | Y_Scanlines
+ * Y Y Y Y Y Y Y Y Y Y Y Y . . . . | |
+ * Y Y Y Y Y Y Y Y Y Y Y Y . . . . | |
+ * Y Y Y Y Y Y Y Y Y Y Y Y . . . . | |
+ * Y Y Y Y Y Y Y Y Y Y Y Y . . . . V |
+ * . . . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . . . V
+ * U V U V U V U V U V U V . . . . ^
+ * U V U V U V U V U V U V . . . . |
+ * U V U V U V U V U V U V . . . . |
+ * U V U V U V U V U V U V . . . . UV_Scanlines
+ * . . . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . . . V
+ * . . . . . . . . . . . . . . . . --> Buffer size alignment
+ *
+ * Y_Stride : Width aligned to 128
+ * UV_Stride : Width aligned to 128
+ * Y_Scanlines: Height aligned to 32
+ * UV_Scanlines: Height/2 aligned to 16
+ * Extradata: Arbitrary (software-imposed) padding
+ * Total size = align((Y_Stride * Y_Scanlines
+ * + UV_Stride * UV_Scanlines
+ * + max(Extradata, Y_Stride * 8), 4096)
+ */
+ COLOR_FMT_NV12,
+
+ /* Venus NV21:
+ * YUV 4:2:0 image with a plane of 8 bit Y samples followed
+ * by an interleaved V/U plane containing 8 bit 2x2 subsampled
+ * colour difference samples.
+ *
+ * <-------- Y/UV_Stride -------->
+ * <------- Width ------->
+ * Y Y Y Y Y Y Y Y Y Y Y Y . . . . ^ ^
+ * Y Y Y Y Y Y Y Y Y Y Y Y . . . . | |
+ * Y Y Y Y Y Y Y Y Y Y Y Y . . . . Height |
+ * Y Y Y Y Y Y Y Y Y Y Y Y . . . . | Y_Scanlines
+ * Y Y Y Y Y Y Y Y Y Y Y Y . . . . | |
+ * Y Y Y Y Y Y Y Y Y Y Y Y . . . . | |
+ * Y Y Y Y Y Y Y Y Y Y Y Y . . . . | |
+ * Y Y Y Y Y Y Y Y Y Y Y Y . . . . V |
+ * . . . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . . . V
+ * V U V U V U V U V U V U . . . . ^
+ * V U V U V U V U V U V U . . . . |
+ * V U V U V U V U V U V U . . . . |
+ * V U V U V U V U V U V U . . . . UV_Scanlines
+ * . . . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . . . V
+ * . . . . . . . . . . . . . . . . --> Padding & Buffer size alignment
+ *
+ * Y_Stride : Width aligned to 128
+ * UV_Stride : Width aligned to 128
+ * Y_Scanlines: Height aligned to 32
+ * UV_Scanlines: Height/2 aligned to 16
+ * Extradata: Arbitrary (software-imposed) padding
+ * Total size = align((Y_Stride * Y_Scanlines
+ * + UV_Stride * UV_Scanlines
+ * + max(Extradata, Y_Stride * 8), 4096)
+ */
+ COLOR_FMT_NV21,
+ /* Venus NV12_MVTB:
+ * Two YUV 4:2:0 images/views one after the other
+ * in a top-bottom layout, same as NV12
+ * with a plane of 8 bit Y samples followed
+ * by an interleaved U/V plane containing 8 bit 2x2 subsampled
+ * colour difference samples.
+ *
+ *
+ * <-------- Y/UV_Stride -------->
+ * <------- Width ------->
+ * Y Y Y Y Y Y Y Y Y Y Y Y . . . . ^ ^ ^
+ * Y Y Y Y Y Y Y Y Y Y Y Y . . . . | | |
+ * Y Y Y Y Y Y Y Y Y Y Y Y . . . . Height | |
+ * Y Y Y Y Y Y Y Y Y Y Y Y . . . . | Y_Scanlines |
+ * Y Y Y Y Y Y Y Y Y Y Y Y . . . . | | |
+ * Y Y Y Y Y Y Y Y Y Y Y Y . . . . | | |
+ * Y Y Y Y Y Y Y Y Y Y Y Y . . . . | | |
+ * Y Y Y Y Y Y Y Y Y Y Y Y . . . . V | |
+ * . . . . . . . . . . . . . . . . | View_1
+ * . . . . . . . . . . . . . . . . | |
+ * . . . . . . . . . . . . . . . . | |
+ * . . . . . . . . . . . . . . . . V |
+ * U V U V U V U V U V U V . . . . ^ |
+ * U V U V U V U V U V U V . . . . | |
+ * U V U V U V U V U V U V . . . . | |
+ * U V U V U V U V U V U V . . . . UV_Scanlines |
+ * . . . . . . . . . . . . . . . . | |
+ * . . . . . . . . . . . . . . . . V V
+ * Y Y Y Y Y Y Y Y Y Y Y Y . . . . ^ ^ ^
+ * Y Y Y Y Y Y Y Y Y Y Y Y . . . . | | |
+ * Y Y Y Y Y Y Y Y Y Y Y Y . . . . Height | |
+ * Y Y Y Y Y Y Y Y Y Y Y Y . . . . | Y_Scanlines |
+ * Y Y Y Y Y Y Y Y Y Y Y Y . . . . | | |
+ * Y Y Y Y Y Y Y Y Y Y Y Y . . . . | | |
+ * Y Y Y Y Y Y Y Y Y Y Y Y . . . . | | |
+ * Y Y Y Y Y Y Y Y Y Y Y Y . . . . V | |
+ * . . . . . . . . . . . . . . . . | View_2
+ * . . . . . . . . . . . . . . . . | |
+ * . . . . . . . . . . . . . . . . | |
+ * . . . . . . . . . . . . . . . . V |
+ * U V U V U V U V U V U V . . . . ^ |
+ * U V U V U V U V U V U V . . . . | |
+ * U V U V U V U V U V U V . . . . | |
+ * U V U V U V U V U V U V . . . . UV_Scanlines |
+ * . . . . . . . . . . . . . . . . | |
+ * . . . . . . . . . . . . . . . . V V
+ * . . . . . . . . . . . . . . . . --> Buffer size alignment
+ *
+ * Y_Stride : Width aligned to 128
+ * UV_Stride : Width aligned to 128
+ * Y_Scanlines: Height aligned to 32
+ * UV_Scanlines: Height/2 aligned to 16
+ * View_1 begin at: 0 (zero)
+ * View_2 begin at: Y_Stride * Y_Scanlines + UV_Stride * UV_Scanlines
+ * Extradata: Arbitrary (software-imposed) padding
+ * Total size = align((2*(Y_Stride * Y_Scanlines)
+ * + 2*(UV_Stride * UV_Scanlines) + Extradata), 4096)
+ */
+ COLOR_FMT_NV12_MVTB,
+ /*
+ * The buffer can be of 2 types:
+ * (1) Venus NV12 UBWC Progressive
+ * (2) Venus NV12 UBWC Interlaced
+ *
+ * (1) Venus NV12 UBWC Progressive Buffer Format:
+ * Compressed Macro-tile format for NV12.
+ * Contains 4 planes in the following order -
+ * (A) Y_Meta_Plane
+ * (B) Y_UBWC_Plane
+ * (C) UV_Meta_Plane
+ * (D) UV_UBWC_Plane
+ *
+ * Y_Meta_Plane consists of meta information to decode compressed
+ * tile data in Y_UBWC_Plane.
+ * Y_UBWC_Plane consists of Y data in compressed macro-tile format.
+ * UBWC decoder block will use the Y_Meta_Plane data together with
+ * Y_UBWC_Plane data to produce loss-less uncompressed 8 bit Y samples.
+ *
+ * UV_Meta_Plane consists of meta information to decode compressed
+ * tile data in UV_UBWC_Plane.
+ * UV_UBWC_Plane consists of UV data in compressed macro-tile format.
+ * UBWC decoder block will use UV_Meta_Plane data together with
+ * UV_UBWC_Plane data to produce loss-less uncompressed 8 bit 2x2
+ * subsampled color difference samples.
+ *
+ * Each tile in Y_UBWC_Plane/UV_UBWC_Plane is independently decodable
+ * and randomly accessible. There is no dependency between tiles.
+ *
+ * <----- Y_Meta_Stride ---->
+ * <-------- Width ------>
+ * M M M M M M M M M M M M . . ^ ^
+ * M M M M M M M M M M M M . . | |
+ * M M M M M M M M M M M M . . Height |
+ * M M M M M M M M M M M M . . | Meta_Y_Scanlines
+ * M M M M M M M M M M M M . . | |
+ * M M M M M M M M M M M M . . | |
+ * M M M M M M M M M M M M . . | |
+ * M M M M M M M M M M M M . . V |
+ * . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . -------> Buffer size aligned to 4k
+ * . . . . . . . . . . . . . . V
+ * <--Compressed tile Y Stride--->
+ * <------- Width ------->
+ * Y* Y* Y* Y* Y* Y* Y* Y* . . . . ^ ^
+ * Y* Y* Y* Y* Y* Y* Y* Y* . . . . | |
+ * Y* Y* Y* Y* Y* Y* Y* Y* . . . . Height |
+ * Y* Y* Y* Y* Y* Y* Y* Y* . . . . | Macro_tile_Y_Scanlines
+ * Y* Y* Y* Y* Y* Y* Y* Y* . . . . | |
+ * Y* Y* Y* Y* Y* Y* Y* Y* . . . . | |
+ * Y* Y* Y* Y* Y* Y* Y* Y* . . . . | |
+ * Y* Y* Y* Y* Y* Y* Y* Y* . . . . V |
+ * . . . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . . . -------> Buffer size aligned to 4k
+ * . . . . . . . . . . . . . . . . V
+ * <----- UV_Meta_Stride ---->
+ * M M M M M M M M M M M M . . ^
+ * M M M M M M M M M M M M . . |
+ * M M M M M M M M M M M M . . |
+ * M M M M M M M M M M M M . . M_UV_Scanlines
+ * . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . V
+ * . . . . . . . . . . . . . . -------> Buffer size aligned to 4k
+ * <--Compressed tile UV Stride--->
+ * U* V* U* V* U* V* U* V* . . . . ^
+ * U* V* U* V* U* V* U* V* . . . . |
+ * U* V* U* V* U* V* U* V* . . . . |
+ * U* V* U* V* U* V* U* V* . . . . UV_Scanlines
+ * . . . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . . . V
+ * . . . . . . . . . . . . . . . . -------> Buffer size aligned to 4k
+ *
+ * Y_Stride = align(Width, 128)
+ * UV_Stride = align(Width, 128)
+ * Y_Scanlines = align(Height, 32)
+ * UV_Scanlines = align(Height/2, 16)
+ * Y_UBWC_Plane_size = align(Y_Stride * Y_Scanlines, 4096)
+ * UV_UBWC_Plane_size = align(UV_Stride * UV_Scanlines, 4096)
+ * Y_Meta_Stride = align(roundup(Width, Y_TileWidth), 64)
+ * Y_Meta_Scanlines = align(roundup(Height, Y_TileHeight), 16)
+ * Y_Meta_Plane_size = align(Y_Meta_Stride * Y_Meta_Scanlines, 4096)
+ * UV_Meta_Stride = align(roundup(Width, UV_TileWidth), 64)
+ * UV_Meta_Scanlines = align(roundup(Height, UV_TileHeight), 16)
+ * UV_Meta_Plane_size = align(UV_Meta_Stride * UV_Meta_Scanlines, 4096)
+ * Extradata = 8k
+ *
+ * Total size = align( Y_UBWC_Plane_size + UV_UBWC_Plane_size +
+ * Y_Meta_Plane_size + UV_Meta_Plane_size
+ * + max(Extradata, Y_Stride * 48), 4096)
+ *
+ *
+ * (2) Venus NV12 UBWC Interlaced Buffer Format:
+ * Compressed Macro-tile format for NV12 interlaced.
+ * Contains 8 planes in the following order -
+ * (A) Y_Meta_Top_Field_Plane
+ * (B) Y_UBWC_Top_Field_Plane
+ * (C) UV_Meta_Top_Field_Plane
+ * (D) UV_UBWC_Top_Field_Plane
+ * (E) Y_Meta_Bottom_Field_Plane
+ * (F) Y_UBWC_Bottom_Field_Plane
+ * (G) UV_Meta_Bottom_Field_Plane
+ * (H) UV_UBWC_Bottom_Field_Plane
+ * Y_Meta_Top_Field_Plane consists of meta information to decode
+ * compressed tile data for Y_UBWC_Top_Field_Plane.
+ * Y_UBWC_Top_Field_Plane consists of Y data in compressed macro-tile
+ * format for top field of an interlaced frame.
+ * UBWC decoder block will use the Y_Meta_Top_Field_Plane data together
+ * with Y_UBWC_Top_Field_Plane data to produce loss-less uncompressed
+ * 8 bit Y samples for top field of an interlaced frame.
+ *
+ * UV_Meta_Top_Field_Plane consists of meta information to decode
+ * compressed tile data in UV_UBWC_Top_Field_Plane.
+ * UV_UBWC_Top_Field_Plane consists of UV data in compressed macro-tile
+ * format for top field of an interlaced frame.
+ * UBWC decoder block will use UV_Meta_Top_Field_Plane data together
+ * with UV_UBWC_Top_Field_Plane data to produce loss-less uncompressed
+ * 8 bit subsampled color difference samples for top field of an
+ * interlaced frame.
+ *
+ * Each tile in Y_UBWC_Top_Field_Plane/UV_UBWC_Top_Field_Plane is
+ * independently decodable and randomly accessible. There is no
+ * dependency between tiles.
+ *
+ * Y_Meta_Bottom_Field_Plane consists of meta information to decode
+ * compressed tile data for Y_UBWC_Bottom_Field_Plane.
+ * Y_UBWC_Bottom_Field_Plane consists of Y data in compressed macro-tile
+ * format for bottom field of an interlaced frame.
+ * UBWC decoder block will use the Y_Meta_Bottom_Field_Plane data
+ * together with Y_UBWC_Bottom_Field_Plane data to produce loss-less
+ * uncompressed 8 bit Y samples for bottom field of an interlaced frame.
+ *
+ * UV_Meta_Bottom_Field_Plane consists of meta information to decode
+ * compressed tile data in UV_UBWC_Bottom_Field_Plane.
+ * UV_UBWC_Bottom_Field_Plane consists of UV data in compressed
+ * macro-tile format for bottom field of an interlaced frame.
+ * UBWC decoder block will use UV_Meta_Bottom_Field_Plane data together
+ * with UV_UBWC_Bottom_Field_Plane data to produce loss-less
+ * uncompressed 8 bit subsampled color difference samples for bottom
+ * field of an interlaced frame.
+ *
+ * Each tile in Y_UBWC_Bottom_Field_Plane/UV_UBWC_Bottom_Field_Plane is
+ * independently decodable and randomly accessible. There is no
+ * dependency between tiles.
+ *
+ * <-----Y_TF_Meta_Stride---->
+ * <-------- Width ------>
+ * M M M M M M M M M M M M . . ^ ^
+ * M M M M M M M M M M M M . . | |
+ * M M M M M M M M M M M M . . Half_height |
+ * M M M M M M M M M M M M . . | Meta_Y_TF_Scanlines
+ * M M M M M M M M M M M M . . | |
+ * M M M M M M M M M M M M . . | |
+ * M M M M M M M M M M M M . . | |
+ * M M M M M M M M M M M M . . V |
+ * . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . -------> Buffer size aligned to 4k
+ * . . . . . . . . . . . . . . V
+ * <-Compressed tile Y_TF Stride->
+ * <------- Width ------->
+ * Y* Y* Y* Y* Y* Y* Y* Y* . . . . ^ ^
+ * Y* Y* Y* Y* Y* Y* Y* Y* . . . . | |
+ * Y* Y* Y* Y* Y* Y* Y* Y* . . . . Half_height |
+ * Y* Y* Y* Y* Y* Y* Y* Y* . . . . | Macro_tile_Y_TF_Scanlines
+ * Y* Y* Y* Y* Y* Y* Y* Y* . . . . | |
+ * Y* Y* Y* Y* Y* Y* Y* Y* . . . . | |
+ * Y* Y* Y* Y* Y* Y* Y* Y* . . . . | |
+ * Y* Y* Y* Y* Y* Y* Y* Y* . . . . V |
+ * . . . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . . . -------> Buffer size aligned to 4k
+ * . . . . . . . . . . . . . . . . V
+ * <----UV_TF_Meta_Stride---->
+ * M M M M M M M M M M M M . . ^
+ * M M M M M M M M M M M M . . |
+ * M M M M M M M M M M M M . . |
+ * M M M M M M M M M M M M . . M_UV_TF_Scanlines
+ * . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . V
+ * . . . . . . . . . . . . . . -------> Buffer size aligned to 4k
+ * <-Compressed tile UV_TF Stride->
+ * U* V* U* V* U* V* U* V* . . . . ^
+ * U* V* U* V* U* V* U* V* . . . . |
+ * U* V* U* V* U* V* U* V* . . . . |
+ * U* V* U* V* U* V* U* V* . . . . UV_TF_Scanlines
+ * . . . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . . . V
+ * . . . . . . . . . . . . . . . . -------> Buffer size aligned to 4k
+ * <-----Y_BF_Meta_Stride---->
+ * <-------- Width ------>
+ * M M M M M M M M M M M M . . ^ ^
+ * M M M M M M M M M M M M . . | |
+ * M M M M M M M M M M M M . . Half_height |
+ * M M M M M M M M M M M M . . | Meta_Y_BF_Scanlines
+ * M M M M M M M M M M M M . . | |
+ * M M M M M M M M M M M M . . | |
+ * M M M M M M M M M M M M . . | |
+ * M M M M M M M M M M M M . . V |
+ * . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . -------> Buffer size aligned to 4k
+ * . . . . . . . . . . . . . . V
+ * <-Compressed tile Y_BF Stride->
+ * <------- Width ------->
+ * Y* Y* Y* Y* Y* Y* Y* Y* . . . . ^ ^
+ * Y* Y* Y* Y* Y* Y* Y* Y* . . . . | |
+ * Y* Y* Y* Y* Y* Y* Y* Y* . . . . Half_height |
+ * Y* Y* Y* Y* Y* Y* Y* Y* . . . . | Macro_tile_Y_BF_Scanlines
+ * Y* Y* Y* Y* Y* Y* Y* Y* . . . . | |
+ * Y* Y* Y* Y* Y* Y* Y* Y* . . . . | |
+ * Y* Y* Y* Y* Y* Y* Y* Y* . . . . | |
+ * Y* Y* Y* Y* Y* Y* Y* Y* . . . . V |
+ * . . . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . . . -------> Buffer size aligned to 4k
+ * . . . . . . . . . . . . . . . . V
+ * <----UV_BF_Meta_Stride---->
+ * M M M M M M M M M M M M . . ^
+ * M M M M M M M M M M M M . . |
+ * M M M M M M M M M M M M . . |
+ * M M M M M M M M M M M M . . M_UV_BF_Scanlines
+ * . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . V
+ * . . . . . . . . . . . . . . -------> Buffer size aligned to 4k
+ * <-Compressed tile UV_BF Stride->
+ * U* V* U* V* U* V* U* V* . . . . ^
+ * U* V* U* V* U* V* U* V* . . . . |
+ * U* V* U* V* U* V* U* V* . . . . |
+ * U* V* U* V* U* V* U* V* . . . . UV_BF_Scanlines
+ * . . . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . . . V
+ * . . . . . . . . . . . . . . . . -------> Buffer size aligned to 4k
+ *
+ * Half_height = (Height+1)>>1
+ * Y_TF_Stride = align(Width, 128)
+ * UV_TF_Stride = align(Width, 128)
+ * Y_TF_Scanlines = align(Half_height, 32)
+ * UV_TF_Scanlines = align((Half_height+1)/2, 32)
+ * Y_UBWC_TF_Plane_size = align(Y_TF_Stride * Y_TF_Scanlines, 4096)
+ * UV_UBWC_TF_Plane_size = align(UV_TF_Stride * UV_TF_Scanlines, 4096)
+ * Y_TF_Meta_Stride = align(roundup(Width, Y_TileWidth), 64)
+ * Y_TF_Meta_Scanlines = align(roundup(Half_height, Y_TileHeight), 16)
+ * Y_TF_Meta_Plane_size =
+ * align(Y_TF_Meta_Stride * Y_TF_Meta_Scanlines, 4096)
+ * UV_TF_Meta_Stride = align(roundup(Width, UV_TileWidth), 64)
+ * UV_TF_Meta_Scanlines = align(roundup(Half_height, UV_TileHeight), 16)
+ * UV_TF_Meta_Plane_size =
+ * align(UV_TF_Meta_Stride * UV_TF_Meta_Scanlines, 4096)
+ * Y_BF_Stride = align(Width, 128)
+ * UV_BF_Stride = align(Width, 128)
+ * Y_BF_Scanlines = align(Half_height, 32)
+ * UV_BF_Scanlines = align((Half_height+1)/2, 32)
+ * Y_UBWC_BF_Plane_size = align(Y_BF_Stride * Y_BF_Scanlines, 4096)
+ * UV_UBWC_BF_Plane_size = align(UV_BF_Stride * UV_BF_Scanlines, 4096)
+ * Y_BF_Meta_Stride = align(roundup(Width, Y_TileWidth), 64)
+ * Y_BF_Meta_Scanlines = align(roundup(Half_height, Y_TileHeight), 16)
+ * Y_BF_Meta_Plane_size =
+ * align(Y_BF_Meta_Stride * Y_BF_Meta_Scanlines, 4096)
+ * UV_BF_Meta_Stride = align(roundup(Width, UV_TileWidth), 64)
+ * UV_BF_Meta_Scanlines = align(roundup(Half_height, UV_TileHeight), 16)
+ * UV_BF_Meta_Plane_size =
+ * align(UV_BF_Meta_Stride * UV_BF_Meta_Scanlines, 4096)
+ * Extradata = 8k
+ *
+ * Total size = align( Y_UBWC_TF_Plane_size + UV_UBWC_TF_Plane_size +
+ * Y_TF_Meta_Plane_size + UV_TF_Meta_Plane_size +
+ * Y_UBWC_BF_Plane_size + UV_UBWC_BF_Plane_size +
+ * Y_BF_Meta_Plane_size + UV_BF_Meta_Plane_size +
+ * + max(Extradata, Y_TF_Stride * 48), 4096)
+ */
+ COLOR_FMT_NV12_UBWC,
+ /* Venus NV12 10-bit UBWC:
+ * Compressed Macro-tile format for NV12.
+ * Contains 4 planes in the following order -
+ * (A) Y_Meta_Plane
+ * (B) Y_UBWC_Plane
+ * (C) UV_Meta_Plane
+ * (D) UV_UBWC_Plane
+ *
+ * Y_Meta_Plane consists of meta information to decode compressed
+ * tile data in Y_UBWC_Plane.
+ * Y_UBWC_Plane consists of Y data in compressed macro-tile format.
+ * UBWC decoder block will use the Y_Meta_Plane data together with
+ * Y_UBWC_Plane data to produce loss-less uncompressed 10 bit Y samples.
+ *
+ * UV_Meta_Plane consists of meta information to decode compressed
+ * tile data in UV_UBWC_Plane.
+ * UV_UBWC_Plane consists of UV data in compressed macro-tile format.
+ * UBWC decoder block will use UV_Meta_Plane data together with
+ * UV_UBWC_Plane data to produce loss-less uncompressed 10 bit 2x2
+ * subsampled color difference samples.
+ *
+ * Each tile in Y_UBWC_Plane/UV_UBWC_Plane is independently decodable
+ * and randomly accessible. There is no dependency between tiles.
+ *
+ * <----- Y_Meta_Stride ----->
+ * <-------- Width ------>
+ * M M M M M M M M M M M M . . ^ ^
+ * M M M M M M M M M M M M . . | |
+ * M M M M M M M M M M M M . . Height |
+ * M M M M M M M M M M M M . . | Meta_Y_Scanlines
+ * M M M M M M M M M M M M . . | |
+ * M M M M M M M M M M M M . . | |
+ * M M M M M M M M M M M M . . | |
+ * M M M M M M M M M M M M . . V |
+ * . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . -------> Buffer size aligned to 4k
+ * . . . . . . . . . . . . . . V
+ * <--Compressed tile Y Stride--->
+ * <------- Width ------->
+ * Y* Y* Y* Y* Y* Y* Y* Y* . . . . ^ ^
+ * Y* Y* Y* Y* Y* Y* Y* Y* . . . . | |
+ * Y* Y* Y* Y* Y* Y* Y* Y* . . . . Height |
+ * Y* Y* Y* Y* Y* Y* Y* Y* . . . . | Macro_tile_Y_Scanlines
+ * Y* Y* Y* Y* Y* Y* Y* Y* . . . . | |
+ * Y* Y* Y* Y* Y* Y* Y* Y* . . . . | |
+ * Y* Y* Y* Y* Y* Y* Y* Y* . . . . | |
+ * Y* Y* Y* Y* Y* Y* Y* Y* . . . . V |
+ * . . . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . . . -------> Buffer size aligned to 4k
+ * . . . . . . . . . . . . . . . . V
+ * <----- UV_Meta_Stride ---->
+ * M M M M M M M M M M M M . . ^
+ * M M M M M M M M M M M M . . |
+ * M M M M M M M M M M M M . . |
+ * M M M M M M M M M M M M . . M_UV_Scanlines
+ * . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . V
+ * . . . . . . . . . . . . . . -------> Buffer size aligned to 4k
+ * <--Compressed tile UV Stride--->
+ * U* V* U* V* U* V* U* V* . . . . ^
+ * U* V* U* V* U* V* U* V* . . . . |
+ * U* V* U* V* U* V* U* V* . . . . |
+ * U* V* U* V* U* V* U* V* . . . . UV_Scanlines
+ * . . . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . . . V
+ * . . . . . . . . . . . . . . . . -------> Buffer size aligned to 4k
+ *
+ *
+ * Y_Stride = align(Width * 4/3, 128)
+ * UV_Stride = align(Width * 4/3, 128)
+ * Y_Scanlines = align(Height, 32)
+ * UV_Scanlines = align(Height/2, 16)
+ * Y_UBWC_Plane_Size = align(Y_Stride * Y_Scanlines, 4096)
+ * UV_UBWC_Plane_Size = align(UV_Stride * UV_Scanlines, 4096)
+ * Y_Meta_Stride = align(roundup(Width, Y_TileWidth), 64)
+ * Y_Meta_Scanlines = align(roundup(Height, Y_TileHeight), 16)
+ * Y_Meta_Plane_size = align(Y_Meta_Stride * Y_Meta_Scanlines, 4096)
+ * UV_Meta_Stride = align(roundup(Width, UV_TileWidth), 64)
+ * UV_Meta_Scanlines = align(roundup(Height, UV_TileHeight), 16)
+ * UV_Meta_Plane_size = align(UV_Meta_Stride * UV_Meta_Scanlines, 4096)
+ * Extradata = 8k
+ *
+ * Total size = align(Y_UBWC_Plane_size + UV_UBWC_Plane_size +
+ * Y_Meta_Plane_size + UV_Meta_Plane_size
+ * + max(Extradata, Y_Stride * 48), 4096)
+ */
+ COLOR_FMT_NV12_BPP10_UBWC,
+ /* Venus RGBA8888 format:
+ * Contains 1 plane in the following order -
+ * (A) RGBA plane
+ *
+ * <-------- RGB_Stride -------->
+ * <------- Width ------->
+ * R R R R R R R R R R R R . . . . ^ ^
+ * R R R R R R R R R R R R . . . . | |
+ * R R R R R R R R R R R R . . . . Height |
+ * R R R R R R R R R R R R . . . . | RGB_Scanlines
+ * R R R R R R R R R R R R . . . . | |
+ * R R R R R R R R R R R R . . . . | |
+ * R R R R R R R R R R R R . . . . | |
+ * R R R R R R R R R R R R . . . . V |
+ * . . . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . . . V
+ *
+ * RGB_Stride = align(Width * 4, 128)
+ * RGB_Scanlines = align(Height, 32)
+ * RGB_Plane_size = align(RGB_Stride * RGB_Scanlines, 4096)
+ * Extradata = 8k
+ *
+ * Total size = align(RGB_Plane_size + Extradata, 4096)
+ */
+ COLOR_FMT_RGBA8888,
+ /* Venus RGBA8888 UBWC format:
+ * Contains 2 planes in the following order -
+ * (A) Meta plane
+ * (B) RGBA plane
+ *
+ * <--- RGB_Meta_Stride ---->
+ * <-------- Width ------>
+ * M M M M M M M M M M M M . . ^ ^
+ * M M M M M M M M M M M M . . | |
+ * M M M M M M M M M M M M . . Height |
+ * M M M M M M M M M M M M . . | Meta_RGB_Scanlines
+ * M M M M M M M M M M M M . . | |
+ * M M M M M M M M M M M M . . | |
+ * M M M M M M M M M M M M . . | |
+ * M M M M M M M M M M M M . . V |
+ * . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . -------> Buffer size aligned to 4k
+ * . . . . . . . . . . . . . . V
+ * <-------- RGB_Stride -------->
+ * <------- Width ------->
+ * R R R R R R R R R R R R . . . . ^ ^
+ * R R R R R R R R R R R R . . . . | |
+ * R R R R R R R R R R R R . . . . Height |
+ * R R R R R R R R R R R R . . . . | RGB_Scanlines
+ * R R R R R R R R R R R R . . . . | |
+ * R R R R R R R R R R R R . . . . | |
+ * R R R R R R R R R R R R . . . . | |
+ * R R R R R R R R R R R R . . . . V |
+ * . . . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . . . -------> Buffer size aligned to 4k
+ * . . . . . . . . . . . . . . . . V
+ *
+ * RGB_Stride = align(Width * 4, 128)
+ * RGB_Scanlines = align(Height, 32)
+ * RGB_Plane_size = align(RGB_Stride * RGB_Scanlines, 4096)
+ * RGB_Meta_Stride = align(roundup(Width, RGB_TileWidth), 64)
+ * RGB_Meta_Scanline = align(roundup(Height, RGB_TileHeight), 16)
+ * RGB_Meta_Plane_size = align(RGB_Meta_Stride *
+ * RGB_Meta_Scanlines, 4096)
+ * Extradata = 8k
+ *
+ * Total size = align(RGB_Meta_Plane_size + RGB_Plane_size +
+ * Extradata, 4096)
+ */
+ COLOR_FMT_RGBA8888_UBWC,
+ /* Venus RGBA1010102 UBWC format:
+ * Contains 2 planes in the following order -
+ * (A) Meta plane
+ * (B) RGBA plane
+ *
+ * <--- RGB_Meta_Stride ---->
+ * <-------- Width ------>
+ * M M M M M M M M M M M M . . ^ ^
+ * M M M M M M M M M M M M . . | |
+ * M M M M M M M M M M M M . . Height |
+ * M M M M M M M M M M M M . . | Meta_RGB_Scanlines
+ * M M M M M M M M M M M M . . | |
+ * M M M M M M M M M M M M . . | |
+ * M M M M M M M M M M M M . . | |
+ * M M M M M M M M M M M M . . V |
+ * . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . -------> Buffer size aligned to 4k
+ * . . . . . . . . . . . . . . V
+ * <-------- RGB_Stride -------->
+ * <------- Width ------->
+ * R R R R R R R R R R R R . . . . ^ ^
+ * R R R R R R R R R R R R . . . . | |
+ * R R R R R R R R R R R R . . . . Height |
+ * R R R R R R R R R R R R . . . . | RGB_Scanlines
+ * R R R R R R R R R R R R . . . . | |
+ * R R R R R R R R R R R R . . . . | |
+ * R R R R R R R R R R R R . . . . | |
+ * R R R R R R R R R R R R . . . . V |
+ * . . . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . . . -------> Buffer size aligned to 4k
+ * . . . . . . . . . . . . . . . . V
+ *
+ * RGB_Stride = align(Width * 4, 256)
+ * RGB_Scanlines = align(Height, 16)
+ * RGB_Plane_size = align(RGB_Stride * RGB_Scanlines, 4096)
+ * RGB_Meta_Stride = align(roundup(Width, RGB_TileWidth), 64)
+ * RGB_Meta_Scanline = align(roundup(Height, RGB_TileHeight), 16)
+ * RGB_Meta_Plane_size = align(RGB_Meta_Stride *
+ * RGB_Meta_Scanlines, 4096)
+ * Extradata = 8k
+ *
+ * Total size = align(RGB_Meta_Plane_size + RGB_Plane_size +
+ * Extradata, 4096)
+ */
+ COLOR_FMT_RGBA1010102_UBWC,
+ /* Venus RGB565 UBWC format:
+ * Contains 2 planes in the following order -
+ * (A) Meta plane
+ * (B) RGB plane
+ *
+ * <--- RGB_Meta_Stride ---->
+ * <-------- Width ------>
+ * M M M M M M M M M M M M . . ^ ^
+ * M M M M M M M M M M M M . . | |
+ * M M M M M M M M M M M M . . Height |
+ * M M M M M M M M M M M M . . | Meta_RGB_Scanlines
+ * M M M M M M M M M M M M . . | |
+ * M M M M M M M M M M M M . . | |
+ * M M M M M M M M M M M M . . | |
+ * M M M M M M M M M M M M . . V |
+ * . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . -------> Buffer size aligned to 4k
+ * . . . . . . . . . . . . . . V
+ * <-------- RGB_Stride -------->
+ * <------- Width ------->
+ * R R R R R R R R R R R R . . . . ^ ^
+ * R R R R R R R R R R R R . . . . | |
+ * R R R R R R R R R R R R . . . . Height |
+ * R R R R R R R R R R R R . . . . | RGB_Scanlines
+ * R R R R R R R R R R R R . . . . | |
+ * R R R R R R R R R R R R . . . . | |
+ * R R R R R R R R R R R R . . . . | |
+ * R R R R R R R R R R R R . . . . V |
+ * . . . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . . . -------> Buffer size aligned to 4k
+ * . . . . . . . . . . . . . . . . V
+ *
+ * RGB_Stride = align(Width * 2, 128)
+ * RGB_Scanlines = align(Height, 16)
+ * RGB_Plane_size = align(RGB_Stride * RGB_Scanlines, 4096)
+ * RGB_Meta_Stride = align(roundup(Width, RGB_TileWidth), 64)
+ * RGB_Meta_Scanline = align(roundup(Height, RGB_TileHeight), 16)
+ * RGB_Meta_Plane_size = align(RGB_Meta_Stride *
+ * RGB_Meta_Scanlines, 4096)
+ * Extradata = 8k
+ *
+ * Total size = align(RGB_Meta_Plane_size + RGB_Plane_size +
+ * Extradata, 4096)
+ */
+ COLOR_FMT_RGB565_UBWC,
+ /* P010 UBWC:
+ * Compressed Macro-tile format for NV12.
+ * Contains 4 planes in the following order -
+ * (A) Y_Meta_Plane
+ * (B) Y_UBWC_Plane
+ * (C) UV_Meta_Plane
+ * (D) UV_UBWC_Plane
+ *
+ * Y_Meta_Plane consists of meta information to decode compressed
+ * tile data in Y_UBWC_Plane.
+ * Y_UBWC_Plane consists of Y data in compressed macro-tile format.
+ * UBWC decoder block will use the Y_Meta_Plane data together with
+ * Y_UBWC_Plane data to produce loss-less uncompressed 10 bit Y samples.
+ *
+ * UV_Meta_Plane consists of meta information to decode compressed
+ * tile data in UV_UBWC_Plane.
+ * UV_UBWC_Plane consists of UV data in compressed macro-tile format.
+ * UBWC decoder block will use UV_Meta_Plane data together with
+ * UV_UBWC_Plane data to produce loss-less uncompressed 10 bit 2x2
+ * subsampled color difference samples.
+ *
+ * Each tile in Y_UBWC_Plane/UV_UBWC_Plane is independently decodable
+ * and randomly accessible. There is no dependency between tiles.
+ *
+ * <----- Y_Meta_Stride ----->
+ * <-------- Width ------>
+ * M M M M M M M M M M M M . . ^ ^
+ * M M M M M M M M M M M M . . | |
+ * M M M M M M M M M M M M . . Height |
+ * M M M M M M M M M M M M . . | Meta_Y_Scanlines
+ * M M M M M M M M M M M M . . | |
+ * M M M M M M M M M M M M . . | |
+ * M M M M M M M M M M M M . . | |
+ * M M M M M M M M M M M M . . V |
+ * . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . -------> Buffer size aligned to 4k
+ * . . . . . . . . . . . . . . V
+ * <--Compressed tile Y Stride--->
+ * <------- Width ------->
+ * Y* Y* Y* Y* Y* Y* Y* Y* . . . . ^ ^
+ * Y* Y* Y* Y* Y* Y* Y* Y* . . . . | |
+ * Y* Y* Y* Y* Y* Y* Y* Y* . . . . Height |
+ * Y* Y* Y* Y* Y* Y* Y* Y* . . . . | Macro_tile_Y_Scanlines
+ * Y* Y* Y* Y* Y* Y* Y* Y* . . . . | |
+ * Y* Y* Y* Y* Y* Y* Y* Y* . . . . | |
+ * Y* Y* Y* Y* Y* Y* Y* Y* . . . . | |
+ * Y* Y* Y* Y* Y* Y* Y* Y* . . . . V |
+ * . . . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . . . -------> Buffer size aligned to 4k
+ * . . . . . . . . . . . . . . . . V
+ * <----- UV_Meta_Stride ---->
+ * M M M M M M M M M M M M . . ^
+ * M M M M M M M M M M M M . . |
+ * M M M M M M M M M M M M . . |
+ * M M M M M M M M M M M M . . M_UV_Scanlines
+ * . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . V
+ * . . . . . . . . . . . . . . -------> Buffer size aligned to 4k
+ * <--Compressed tile UV Stride--->
+ * U* V* U* V* U* V* U* V* . . . . ^
+ * U* V* U* V* U* V* U* V* . . . . |
+ * U* V* U* V* U* V* U* V* . . . . |
+ * U* V* U* V* U* V* U* V* . . . . UV_Scanlines
+ * . . . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . . . V
+ * . . . . . . . . . . . . . . . . -------> Buffer size aligned to 4k
+ *
+ *
+ * Y_Stride = align(Width * 2, 256)
+ * UV_Stride = align(Width * 2, 256)
+ * Y_Scanlines = align(Height, 16)
+ * UV_Scanlines = align(Height/2, 16)
+ * Y_UBWC_Plane_Size = align(Y_Stride * Y_Scanlines, 4096)
+ * UV_UBWC_Plane_Size = align(UV_Stride * UV_Scanlines, 4096)
+ * Y_Meta_Stride = align(roundup(Width, Y_TileWidth), 64)
+ * Y_Meta_Scanlines = align(roundup(Height, Y_TileHeight), 16)
+ * Y_Meta_Plane_size = align(Y_Meta_Stride * Y_Meta_Scanlines, 4096)
+ * UV_Meta_Stride = align(roundup(Width, UV_TileWidth), 64)
+ * UV_Meta_Scanlines = align(roundup(Height, UV_TileHeight), 16)
+ * UV_Meta_Plane_size = align(UV_Meta_Stride * UV_Meta_Scanlines, 4096)
+ * Extradata = 8k
+ *
+ * Total size = align(Y_UBWC_Plane_size + UV_UBWC_Plane_size +
+ * Y_Meta_Plane_size + UV_Meta_Plane_size
+ * + max(Extradata, Y_Stride * 48), 4096)
+ */
+ COLOR_FMT_P010_UBWC,
+ /* Venus P010:
+ * YUV 4:2:0 image with a plane of 10 bit Y samples followed
+ * by an interleaved U/V plane containing 10 bit 2x2 subsampled
+ * colour difference samples.
+ *
+ * <-------- Y/UV_Stride -------->
+ * <------- Width ------->
+ * Y Y Y Y Y Y Y Y Y Y Y Y . . . . ^ ^
+ * Y Y Y Y Y Y Y Y Y Y Y Y . . . . | |
+ * Y Y Y Y Y Y Y Y Y Y Y Y . . . . Height |
+ * Y Y Y Y Y Y Y Y Y Y Y Y . . . . | Y_Scanlines
+ * Y Y Y Y Y Y Y Y Y Y Y Y . . . . | |
+ * Y Y Y Y Y Y Y Y Y Y Y Y . . . . | |
+ * Y Y Y Y Y Y Y Y Y Y Y Y . . . . | |
+ * Y Y Y Y Y Y Y Y Y Y Y Y . . . . V |
+ * . . . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . . . V
+ * U V U V U V U V U V U V . . . . ^
+ * U V U V U V U V U V U V . . . . |
+ * U V U V U V U V U V U V . . . . |
+ * U V U V U V U V U V U V . . . . UV_Scanlines
+ * . . . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . . . V
+ * . . . . . . . . . . . . . . . . --> Buffer size alignment
+ *
+ * Y_Stride : Width * 2 aligned to 128
+ * UV_Stride : Width * 2 aligned to 128
+ * Y_Scanlines: Height aligned to 32
+ * UV_Scanlines: Height/2 aligned to 16
+ * Extradata: Arbitrary (software-imposed) padding
+ * Total size = align((Y_Stride * Y_Scanlines
+ * + UV_Stride * UV_Scanlines
+ * + max(Extradata, Y_Stride * 8), 4096)
+ */
+ COLOR_FMT_P010,
+};
+
+#define COLOR_FMT_RGBA1010102_UBWC COLOR_FMT_RGBA1010102_UBWC
+#define COLOR_FMT_RGB565_UBWC COLOR_FMT_RGB565_UBWC
+#define COLOR_FMT_P010_UBWC COLOR_FMT_P010_UBWC
+#define COLOR_FMT_P010 COLOR_FMT_P010
+
+static inline unsigned int VENUS_EXTRADATA_SIZE(int width, int height)
+{
+ (void)height;
+ (void)width;
+
+ /*
+ * In the future, calculate the size based on the w/h but just
+ * hardcode it for now since 16K satisfies all current usecases.
+ */
+ return 16 * 1024;
+}
+
+/*
+ * Function arguments:
+ * @color_fmt
+ * @width
+ * Progressive: width
+ * Interlaced: width
+ */
+static inline unsigned int VENUS_Y_STRIDE(int color_fmt, int width)
+{
+ unsigned int alignment, stride = 0;
+
+ if (!width)
+ goto invalid_input;
+
+ switch (color_fmt) {
+ case COLOR_FMT_NV21:
+ case COLOR_FMT_NV12:
+ case COLOR_FMT_NV12_MVTB:
+ case COLOR_FMT_NV12_UBWC:
+ alignment = 128;
+ stride = MSM_MEDIA_ALIGN(width, alignment);
+ break;
+ case COLOR_FMT_NV12_BPP10_UBWC:
+ alignment = 256;
+ stride = MSM_MEDIA_ALIGN(width, 192);
+ stride = MSM_MEDIA_ALIGN(stride * 4/3, alignment);
+ break;
+ case COLOR_FMT_P010_UBWC:
+ alignment = 256;
+ stride = MSM_MEDIA_ALIGN(width * 2, alignment);
+ break;
+ case COLOR_FMT_P010:
+ alignment = 128;
+ stride = MSM_MEDIA_ALIGN(width*2, alignment);
+ break;
+ default:
+ break;
+ }
+invalid_input:
+ return stride;
+}
+
+/*
+ * Function arguments:
+ * @color_fmt
+ * @width
+ * Progressive: width
+ * Interlaced: width
+ */
+static inline unsigned int VENUS_UV_STRIDE(int color_fmt, int width)
+{
+ unsigned int alignment, stride = 0;
+
+ if (!width)
+ goto invalid_input;
+
+ switch (color_fmt) {
+ case COLOR_FMT_NV21:
+ case COLOR_FMT_NV12:
+ case COLOR_FMT_NV12_MVTB:
+ case COLOR_FMT_NV12_UBWC:
+ alignment = 128;
+ stride = MSM_MEDIA_ALIGN(width, alignment);
+ break;
+ case COLOR_FMT_NV12_BPP10_UBWC:
+ alignment = 256;
+ stride = MSM_MEDIA_ALIGN(width, 192);
+ stride = MSM_MEDIA_ALIGN(stride * 4/3, alignment);
+ break;
+ case COLOR_FMT_P010_UBWC:
+ alignment = 256;
+ stride = MSM_MEDIA_ALIGN(width * 2, alignment);
+ break;
+ case COLOR_FMT_P010:
+ alignment = 128;
+ stride = MSM_MEDIA_ALIGN(width*2, alignment);
+ break;
+ default:
+ break;
+ }
+invalid_input:
+ return stride;
+}
+
+/*
+ * Function arguments:
+ * @color_fmt
+ * @height
+ * Progressive: height
+ * Interlaced: (height+1)>>1
+ */
+static inline unsigned int VENUS_Y_SCANLINES(int color_fmt, int height)
+{
+ unsigned int alignment, sclines = 0;
+
+ if (!height)
+ goto invalid_input;
+
+ switch (color_fmt) {
+ case COLOR_FMT_NV21:
+ case COLOR_FMT_NV12:
+ case COLOR_FMT_NV12_MVTB:
+ case COLOR_FMT_NV12_UBWC:
+ case COLOR_FMT_P010:
+ alignment = 32;
+ break;
+ case COLOR_FMT_NV12_BPP10_UBWC:
+ case COLOR_FMT_P010_UBWC:
+ alignment = 16;
+ break;
+ default:
+ return 0;
+ }
+ sclines = MSM_MEDIA_ALIGN(height, alignment);
+invalid_input:
+ return sclines;
+}
+
+/*
+ * Function arguments:
+ * @color_fmt
+ * @height
+ * Progressive: height
+ * Interlaced: (height+1)>>1
+ */
+static inline unsigned int VENUS_UV_SCANLINES(int color_fmt, int height)
+{
+ unsigned int alignment, sclines = 0;
+
+ if (!height)
+ goto invalid_input;
+
+ switch (color_fmt) {
+ case COLOR_FMT_NV21:
+ case COLOR_FMT_NV12:
+ case COLOR_FMT_NV12_MVTB:
+ case COLOR_FMT_NV12_BPP10_UBWC:
+ case COLOR_FMT_P010_UBWC:
+ case COLOR_FMT_P010:
+ alignment = 16;
+ break;
+ case COLOR_FMT_NV12_UBWC:
+ alignment = 32;
+ break;
+ default:
+ goto invalid_input;
+ }
+
+ sclines = MSM_MEDIA_ALIGN((height+1)>>1, alignment);
+
+invalid_input:
+ return sclines;
+}
+
+/*
+ * Function arguments:
+ * @color_fmt
+ * @width
+ * Progressive: width
+ * Interlaced: width
+ */
+static inline unsigned int VENUS_Y_META_STRIDE(int color_fmt, int width)
+{
+ int y_tile_width = 0, y_meta_stride = 0;
+
+ if (!width)
+ goto invalid_input;
+
+ switch (color_fmt) {
+ case COLOR_FMT_NV12_UBWC:
+ case COLOR_FMT_P010_UBWC:
+ y_tile_width = 32;
+ break;
+ case COLOR_FMT_NV12_BPP10_UBWC:
+ y_tile_width = 48;
+ break;
+ default:
+ goto invalid_input;
+ }
+
+ y_meta_stride = MSM_MEDIA_ROUNDUP(width, y_tile_width);
+ y_meta_stride = MSM_MEDIA_ALIGN(y_meta_stride, 64);
+
+invalid_input:
+ return y_meta_stride;
+}
+
+/*
+ * Function arguments:
+ * @color_fmt
+ * @height
+ * Progressive: height
+ * Interlaced: (height+1)>>1
+ */
+static inline unsigned int VENUS_Y_META_SCANLINES(int color_fmt, int height)
+{
+ int y_tile_height = 0, y_meta_scanlines = 0;
+
+ if (!height)
+ goto invalid_input;
+
+ switch (color_fmt) {
+ case COLOR_FMT_NV12_UBWC:
+ y_tile_height = 8;
+ break;
+ case COLOR_FMT_NV12_BPP10_UBWC:
+ case COLOR_FMT_P010_UBWC:
+ y_tile_height = 4;
+ break;
+ default:
+ goto invalid_input;
+ }
+
+ y_meta_scanlines = MSM_MEDIA_ROUNDUP(height, y_tile_height);
+ y_meta_scanlines = MSM_MEDIA_ALIGN(y_meta_scanlines, 16);
+
+invalid_input:
+ return y_meta_scanlines;
+}
+
+/*
+ * Function arguments:
+ * @color_fmt
+ * @width
+ * Progressive: width
+ * Interlaced: width
+ */
+static inline unsigned int VENUS_UV_META_STRIDE(int color_fmt, int width)
+{
+ int uv_tile_width = 0, uv_meta_stride = 0;
+
+ if (!width)
+ goto invalid_input;
+
+ switch (color_fmt) {
+ case COLOR_FMT_NV12_UBWC:
+ case COLOR_FMT_P010_UBWC:
+ uv_tile_width = 16;
+ break;
+ case COLOR_FMT_NV12_BPP10_UBWC:
+ uv_tile_width = 24;
+ break;
+ default:
+ goto invalid_input;
+ }
+
+ uv_meta_stride = MSM_MEDIA_ROUNDUP((width+1)>>1, uv_tile_width);
+ uv_meta_stride = MSM_MEDIA_ALIGN(uv_meta_stride, 64);
+
+invalid_input:
+ return uv_meta_stride;
+}
+
+/*
+ * Function arguments:
+ * @color_fmt
+ * @height
+ * Progressive: height
+ * Interlaced: (height+1)>>1
+ */
+static inline unsigned int VENUS_UV_META_SCANLINES(int color_fmt, int height)
+{
+ int uv_tile_height = 0, uv_meta_scanlines = 0;
+
+ if (!height)
+ goto invalid_input;
+
+ switch (color_fmt) {
+ case COLOR_FMT_NV12_UBWC:
+ uv_tile_height = 8;
+ break;
+ case COLOR_FMT_NV12_BPP10_UBWC:
+ case COLOR_FMT_P010_UBWC:
+ uv_tile_height = 4;
+ break;
+ default:
+ goto invalid_input;
+ }
+
+ uv_meta_scanlines = MSM_MEDIA_ROUNDUP((height+1)>>1, uv_tile_height);
+ uv_meta_scanlines = MSM_MEDIA_ALIGN(uv_meta_scanlines, 16);
+
+invalid_input:
+ return uv_meta_scanlines;
+}
+
+static inline unsigned int VENUS_RGB_STRIDE(int color_fmt, int width)
+{
+ unsigned int alignment = 0, stride = 0, bpp = 4;
+
+ if (!width)
+ goto invalid_input;
+
+ switch (color_fmt) {
+ case COLOR_FMT_RGBA8888:
+ alignment = 128;
+ break;
+ case COLOR_FMT_RGB565_UBWC:
+ alignment = 256;
+ bpp = 2;
+ break;
+ case COLOR_FMT_RGBA8888_UBWC:
+ case COLOR_FMT_RGBA1010102_UBWC:
+ alignment = 256;
+ break;
+ default:
+ goto invalid_input;
+ }
+
+ stride = MSM_MEDIA_ALIGN(width * bpp, alignment);
+
+invalid_input:
+ return stride;
+}
+
+static inline unsigned int VENUS_RGB_SCANLINES(int color_fmt, int height)
+{
+ unsigned int alignment = 0, scanlines = 0;
+
+ if (!height)
+ goto invalid_input;
+
+ switch (color_fmt) {
+ case COLOR_FMT_RGBA8888:
+ alignment = 32;
+ break;
+ case COLOR_FMT_RGBA8888_UBWC:
+ case COLOR_FMT_RGBA1010102_UBWC:
+ case COLOR_FMT_RGB565_UBWC:
+ alignment = 16;
+ break;
+ default:
+ goto invalid_input;
+ }
+
+ scanlines = MSM_MEDIA_ALIGN(height, alignment);
+
+invalid_input:
+ return scanlines;
+}
+
+static inline unsigned int VENUS_RGB_META_STRIDE(int color_fmt, int width)
+{
+ int rgb_tile_width = 0, rgb_meta_stride = 0;
+
+ if (!width)
+ goto invalid_input;
+
+ switch (color_fmt) {
+ case COLOR_FMT_RGBA8888_UBWC:
+ case COLOR_FMT_RGBA1010102_UBWC:
+ case COLOR_FMT_RGB565_UBWC:
+ rgb_tile_width = 16;
+ break;
+ default:
+ goto invalid_input;
+ }
+
+ rgb_meta_stride = MSM_MEDIA_ROUNDUP(width, rgb_tile_width);
+ rgb_meta_stride = MSM_MEDIA_ALIGN(rgb_meta_stride, 64);
+
+invalid_input:
+ return rgb_meta_stride;
+}
+
+static inline unsigned int VENUS_RGB_META_SCANLINES(int color_fmt, int height)
+{
+ int rgb_tile_height = 0, rgb_meta_scanlines = 0;
+
+ if (!height)
+ goto invalid_input;
+
+ switch (color_fmt) {
+ case COLOR_FMT_RGBA8888_UBWC:
+ case COLOR_FMT_RGBA1010102_UBWC:
+ case COLOR_FMT_RGB565_UBWC:
+ rgb_tile_height = 4;
+ break;
+ default:
+ goto invalid_input;
+ }
+
+ rgb_meta_scanlines = MSM_MEDIA_ROUNDUP(height, rgb_tile_height);
+ rgb_meta_scanlines = MSM_MEDIA_ALIGN(rgb_meta_scanlines, 16);
+
+invalid_input:
+ return rgb_meta_scanlines;
+}
+
+/*
+ * Function arguments:
+ * @color_fmt
+ * @width
+ * Progressive: width
+ * Interlaced: width
+ * @height
+ * Progressive: height
+ * Interlaced: height
+ */
+static inline unsigned int VENUS_BUFFER_SIZE(
+ int color_fmt, int width, int height)
+{
+ const unsigned int extra_size = VENUS_EXTRADATA_SIZE(width, height);
+ unsigned int uv_alignment = 0, size = 0;
+ unsigned int y_plane, uv_plane, y_stride,
+ uv_stride, y_sclines, uv_sclines;
+ unsigned int y_ubwc_plane = 0, uv_ubwc_plane = 0;
+ unsigned int y_meta_stride = 0, y_meta_scanlines = 0;
+ unsigned int uv_meta_stride = 0, uv_meta_scanlines = 0;
+ unsigned int y_meta_plane = 0, uv_meta_plane = 0;
+ unsigned int rgb_stride = 0, rgb_scanlines = 0;
+ unsigned int rgb_plane = 0, rgb_ubwc_plane = 0, rgb_meta_plane = 0;
+ unsigned int rgb_meta_stride = 0, rgb_meta_scanlines = 0;
+
+ if (!width || !height)
+ goto invalid_input;
+
+ y_stride = VENUS_Y_STRIDE(color_fmt, width);
+ uv_stride = VENUS_UV_STRIDE(color_fmt, width);
+ y_sclines = VENUS_Y_SCANLINES(color_fmt, height);
+ uv_sclines = VENUS_UV_SCANLINES(color_fmt, height);
+ rgb_stride = VENUS_RGB_STRIDE(color_fmt, width);
+ rgb_scanlines = VENUS_RGB_SCANLINES(color_fmt, height);
+
+ switch (color_fmt) {
+ case COLOR_FMT_NV21:
+ case COLOR_FMT_NV12:
+ case COLOR_FMT_P010:
+ uv_alignment = 4096;
+ y_plane = y_stride * y_sclines;
+ uv_plane = uv_stride * uv_sclines + uv_alignment;
+ size = y_plane + uv_plane +
+ MSM_MEDIA_MAX(extra_size, 8 * y_stride);
+ size = MSM_MEDIA_ALIGN(size, 4096);
+ break;
+ case COLOR_FMT_NV12_MVTB:
+ uv_alignment = 4096;
+ y_plane = y_stride * y_sclines;
+ uv_plane = uv_stride * uv_sclines + uv_alignment;
+ size = y_plane + uv_plane;
+ size = 2 * size + extra_size;
+ size = MSM_MEDIA_ALIGN(size, 4096);
+ break;
+ case COLOR_FMT_NV12_UBWC:
+ y_sclines = VENUS_Y_SCANLINES(color_fmt, (height+1)>>1);
+ y_ubwc_plane = MSM_MEDIA_ALIGN(y_stride * y_sclines, 4096);
+ uv_sclines = VENUS_UV_SCANLINES(color_fmt, (height+1)>>1);
+ uv_ubwc_plane = MSM_MEDIA_ALIGN(uv_stride * uv_sclines, 4096);
+ y_meta_stride = VENUS_Y_META_STRIDE(color_fmt, width);
+ y_meta_scanlines =
+ VENUS_Y_META_SCANLINES(color_fmt, (height+1)>>1);
+ y_meta_plane = MSM_MEDIA_ALIGN(
+ y_meta_stride * y_meta_scanlines, 4096);
+ uv_meta_stride = VENUS_UV_META_STRIDE(color_fmt, width);
+ uv_meta_scanlines =
+ VENUS_UV_META_SCANLINES(color_fmt, (height+1)>>1);
+ uv_meta_plane = MSM_MEDIA_ALIGN(uv_meta_stride *
+ uv_meta_scanlines, 4096);
+
+ size = (y_ubwc_plane + uv_ubwc_plane + y_meta_plane +
+ uv_meta_plane)*2 +
+ MSM_MEDIA_MAX(extra_size + 8192, 48 * y_stride);
+ size = MSM_MEDIA_ALIGN(size, 4096);
+ break;
+ case COLOR_FMT_NV12_BPP10_UBWC:
+ y_ubwc_plane = MSM_MEDIA_ALIGN(y_stride * y_sclines, 4096);
+ uv_ubwc_plane = MSM_MEDIA_ALIGN(uv_stride * uv_sclines, 4096);
+ y_meta_stride = VENUS_Y_META_STRIDE(color_fmt, width);
+ y_meta_scanlines = VENUS_Y_META_SCANLINES(color_fmt, height);
+ y_meta_plane = MSM_MEDIA_ALIGN(
+ y_meta_stride * y_meta_scanlines, 4096);
+ uv_meta_stride = VENUS_UV_META_STRIDE(color_fmt, width);
+ uv_meta_scanlines = VENUS_UV_META_SCANLINES(color_fmt, height);
+ uv_meta_plane = MSM_MEDIA_ALIGN(uv_meta_stride *
+ uv_meta_scanlines, 4096);
+
+ size = y_ubwc_plane + uv_ubwc_plane + y_meta_plane +
+ uv_meta_plane +
+ MSM_MEDIA_MAX(extra_size + 8192, 48 * y_stride);
+ size = MSM_MEDIA_ALIGN(size, 4096);
+ break;
+ case COLOR_FMT_P010_UBWC:
+ y_ubwc_plane = MSM_MEDIA_ALIGN(y_stride * y_sclines, 4096);
+ uv_ubwc_plane = MSM_MEDIA_ALIGN(uv_stride * uv_sclines, 4096);
+ y_meta_stride = VENUS_Y_META_STRIDE(color_fmt, width);
+ y_meta_scanlines = VENUS_Y_META_SCANLINES(color_fmt, height);
+ y_meta_plane = MSM_MEDIA_ALIGN(
+ y_meta_stride * y_meta_scanlines, 4096);
+ uv_meta_stride = VENUS_UV_META_STRIDE(color_fmt, width);
+ uv_meta_scanlines = VENUS_UV_META_SCANLINES(color_fmt, height);
+ uv_meta_plane = MSM_MEDIA_ALIGN(uv_meta_stride *
+ uv_meta_scanlines, 4096);
+
+ size = y_ubwc_plane + uv_ubwc_plane + y_meta_plane +
+ uv_meta_plane;
+ size = MSM_MEDIA_ALIGN(size, 4096);
+ break;
+ case COLOR_FMT_RGBA8888:
+ rgb_plane = MSM_MEDIA_ALIGN(rgb_stride * rgb_scanlines, 4096);
+ size = rgb_plane;
+ size = MSM_MEDIA_ALIGN(size, 4096);
+ break;
+ case COLOR_FMT_RGBA8888_UBWC:
+ case COLOR_FMT_RGBA1010102_UBWC:
+ case COLOR_FMT_RGB565_UBWC:
+ rgb_ubwc_plane = MSM_MEDIA_ALIGN(rgb_stride * rgb_scanlines,
+ 4096);
+ rgb_meta_stride = VENUS_RGB_META_STRIDE(color_fmt, width);
+ rgb_meta_scanlines = VENUS_RGB_META_SCANLINES(color_fmt,
+ height);
+ rgb_meta_plane = MSM_MEDIA_ALIGN(rgb_meta_stride *
+ rgb_meta_scanlines, 4096);
+ size = rgb_ubwc_plane + rgb_meta_plane;
+ size = MSM_MEDIA_ALIGN(size, 4096);
+ break;
+ default:
+ break;
+ }
+invalid_input:
+ return size;
+}
+
+static inline unsigned int VENUS_VIEW2_OFFSET(
+ int color_fmt, int width, int height)
+{
+ unsigned int offset = 0;
+ unsigned int y_plane, uv_plane, y_stride,
+ uv_stride, y_sclines, uv_sclines;
+ if (!width || !height)
+ goto invalid_input;
+
+ y_stride = VENUS_Y_STRIDE(color_fmt, width);
+ uv_stride = VENUS_UV_STRIDE(color_fmt, width);
+ y_sclines = VENUS_Y_SCANLINES(color_fmt, height);
+ uv_sclines = VENUS_UV_SCANLINES(color_fmt, height);
+ switch (color_fmt) {
+ case COLOR_FMT_NV12_MVTB:
+ y_plane = y_stride * y_sclines;
+ uv_plane = uv_stride * uv_sclines;
+ offset = y_plane + uv_plane;
+ break;
+ default:
+ break;
+ }
+invalid_input:
+ return offset;
+}
+
+#endif
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4.xml.h b/drivers/gpu/drm/msm/disp/mdp4/mdp4.xml.h
index 576cea30d391..4b36b8954bae 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4.xml.h
+++ b/drivers/gpu/drm/msm/disp/mdp4/mdp4.xml.h
@@ -8,19 +8,19 @@ http://github.com/freedreno/envytools/
git clone https://github.com/freedreno/envytools.git
The rules-ng-ng source files this header was generated from are:
-- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 676 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20915 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 2849 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 37411 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 33004 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 602 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 41799 bytes, from 2017-06-16 12:32:42)
-- /home/robclark/src/freedreno/envytools/rnndb/edp/edp.xml ( 10416 bytes, from 2017-05-17 13:21:27)
-
-Copyright (C) 2013-2017 by the following authors:
+- /home/robclark/src/envytools/rnndb/msm.xml ( 676 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/mdp/mdp4.xml ( 20915 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/mdp/mdp_common.xml ( 2849 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/mdp/mdp5.xml ( 37411 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/dsi/dsi.xml ( 37239 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/dsi/sfpb.xml ( 602 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/hdmi/hdmi.xml ( 41799 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/edp/edp.xml ( 10416 bytes, from 2018-07-03 19:37:13)
+
+Copyright (C) 2013-2018 by the following authors:
- Rob Clark <robdclark@gmail.com> (robclark)
- Ilia Mirkin <imirkin@alum.mit.edu> (imirkin)
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c b/drivers/gpu/drm/msm/disp/mdp4/mdp4_crtc.c
index 14bd3bd3e040..457c29dba4a1 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c
+++ b/drivers/gpu/drm/msm/disp/mdp4/mdp4_crtc.c
@@ -129,7 +129,7 @@ static void unref_cursor_worker(struct drm_flip_work *work, void *val)
struct msm_kms *kms = &mdp4_kms->base.base;
msm_gem_put_iova(val, kms->aspace);
- drm_gem_object_unreference_unlocked(val);
+ drm_gem_object_put_unlocked(val);
}
static void mdp4_crtc_destroy(struct drm_crtc *crtc)
@@ -201,7 +201,7 @@ static void blend_setup(struct drm_crtc *crtc)
int idx = idxs[pipe_id];
if (idx > 0) {
const struct mdp_format *format =
- to_mdp_format(msm_framebuffer_format(plane->fb));
+ to_mdp_format(msm_framebuffer_format(plane->state->fb));
alpha[idx-1] = format->alpha_enable;
}
}
@@ -351,6 +351,7 @@ static void mdp4_crtc_atomic_flush(struct drm_crtc *crtc,
spin_lock_irqsave(&dev->event_lock, flags);
mdp4_crtc->event = crtc->state->event;
+ crtc->state->event = NULL;
spin_unlock_irqrestore(&dev->event_lock, flags);
blend_setup(crtc);
@@ -382,7 +383,7 @@ static void update_cursor(struct drm_crtc *crtc)
if (next_bo) {
/* take a obj ref + iova ref when we start scanning out: */
- drm_gem_object_reference(next_bo);
+ drm_gem_object_get(next_bo);
msm_gem_get_iova(next_bo, kms->aspace, &iova);
/* enable cursor: */
@@ -467,7 +468,7 @@ static int mdp4_crtc_cursor_set(struct drm_crtc *crtc,
return 0;
fail:
- drm_gem_object_unreference_unlocked(cursor_bo);
+ drm_gem_object_put_unlocked(cursor_bo);
return ret;
}
@@ -664,7 +665,6 @@ struct drm_crtc *mdp4_crtc_init(struct drm_device *dev,
drm_crtc_init_with_planes(dev, crtc, plane, NULL, &mdp4_crtc_funcs,
NULL);
drm_crtc_helper_add(crtc, &mdp4_crtc_helper_funcs);
- plane->crtc = crtc;
return crtc;
}
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_dsi_encoder.c b/drivers/gpu/drm/msm/disp/mdp4/mdp4_dsi_encoder.c
index 6a1ebdace391..6a1ebdace391 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_dsi_encoder.c
+++ b/drivers/gpu/drm/msm/disp/mdp4/mdp4_dsi_encoder.c
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_dtv_encoder.c b/drivers/gpu/drm/msm/disp/mdp4/mdp4_dtv_encoder.c
index ba8e587f734b..ba8e587f734b 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_dtv_encoder.c
+++ b/drivers/gpu/drm/msm/disp/mdp4/mdp4_dtv_encoder.c
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_irq.c b/drivers/gpu/drm/msm/disp/mdp4/mdp4_irq.c
index b764d7f10312..b764d7f10312 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_irq.c
+++ b/drivers/gpu/drm/msm/disp/mdp4/mdp4_irq.c
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c b/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c
index f7f087419ed8..44d1cda56974 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c
+++ b/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c
@@ -125,6 +125,8 @@ static void mdp4_complete_commit(struct msm_kms *kms, struct drm_atomic_state *s
struct drm_crtc *crtc;
struct drm_crtc_state *crtc_state;
+ drm_atomic_helper_wait_for_vblanks(mdp4_kms->dev, state);
+
/* see 119ecb7fd */
for_each_new_crtc_in_state(state, crtc, crtc_state, i)
drm_crtc_vblank_put(crtc);
@@ -164,7 +166,7 @@ static void mdp4_destroy(struct msm_kms *kms)
if (mdp4_kms->blank_cursor_iova)
msm_gem_put_iova(mdp4_kms->blank_cursor_bo, kms->aspace);
- drm_gem_object_unreference_unlocked(mdp4_kms->blank_cursor_bo);
+ drm_gem_object_put_unlocked(mdp4_kms->blank_cursor_bo);
if (aspace) {
aspace->mmu->funcs->detach(aspace->mmu,
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h b/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.h
index a1b3e31e959e..0c13f8697bfe 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h
+++ b/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.h
@@ -22,7 +22,7 @@
#include "msm_drv.h"
#include "msm_kms.h"
-#include "mdp/mdp_kms.h"
+#include "disp/mdp_kms.h"
#include "mdp4.xml.h"
struct device_node;
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_lcdc_encoder.c b/drivers/gpu/drm/msm/disp/mdp4/mdp4_lcdc_encoder.c
index 4a645926edb7..2bfb39082f54 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_lcdc_encoder.c
+++ b/drivers/gpu/drm/msm/disp/mdp4/mdp4_lcdc_encoder.c
@@ -341,7 +341,7 @@ static void mdp4_lcdc_encoder_disable(struct drm_encoder *encoder)
mdp4_write(mdp4_kms, REG_MDP4_LCDC_ENABLE, 0);
panel = of_drm_find_panel(mdp4_lcdc_encoder->panel_node);
- if (panel) {
+ if (!IS_ERR(panel)) {
drm_panel_disable(panel);
drm_panel_unprepare(panel);
}
@@ -410,7 +410,7 @@ static void mdp4_lcdc_encoder_enable(struct drm_encoder *encoder)
dev_err(dev->dev, "failed to enable lcdc_clk: %d\n", ret);
panel = of_drm_find_panel(mdp4_lcdc_encoder->panel_node);
- if (panel) {
+ if (!IS_ERR(panel)) {
drm_panel_prepare(panel);
drm_panel_enable(panel);
}
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_lvds_connector.c b/drivers/gpu/drm/msm/disp/mdp4/mdp4_lvds_connector.c
index e3b1c86b7aae..5368e621999c 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_lvds_connector.c
+++ b/drivers/gpu/drm/msm/disp/mdp4/mdp4_lvds_connector.c
@@ -34,9 +34,12 @@ static enum drm_connector_status mdp4_lvds_connector_detect(
struct mdp4_lvds_connector *mdp4_lvds_connector =
to_mdp4_lvds_connector(connector);
- if (!mdp4_lvds_connector->panel)
+ if (!mdp4_lvds_connector->panel) {
mdp4_lvds_connector->panel =
of_drm_find_panel(mdp4_lvds_connector->panel_node);
+ if (IS_ERR(mdp4_lvds_connector->panel))
+ mdp4_lvds_connector->panel = NULL;
+ }
return mdp4_lvds_connector->panel ?
connector_status_connected :
@@ -129,7 +132,7 @@ struct drm_connector *mdp4_lvds_connector_init(struct drm_device *dev,
connector->interlace_allowed = 0;
connector->doublescan_allowed = 0;
- drm_mode_connector_attach_encoder(connector, encoder);
+ drm_connector_attach_encoder(connector, encoder);
return connector;
}
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_lvds_pll.c b/drivers/gpu/drm/msm/disp/mdp4/mdp4_lvds_pll.c
index ce4245971673..ce4245971673 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_lvds_pll.c
+++ b/drivers/gpu/drm/msm/disp/mdp4/mdp4_lvds_pll.c
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c b/drivers/gpu/drm/msm/disp/mdp4/mdp4_plane.c
index 7a1ad3af08e3..79ff653d8081 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c
+++ b/drivers/gpu/drm/msm/disp/mdp4/mdp4_plane.c
@@ -68,7 +68,7 @@ static void mdp4_plane_destroy(struct drm_plane *plane)
{
struct mdp4_plane *mdp4_plane = to_mdp4_plane(plane);
- drm_plane_helper_disable(plane);
+ drm_plane_helper_disable(plane, NULL);
drm_plane_cleanup(plane);
kfree(mdp4_plane);
@@ -98,21 +98,6 @@ static const struct drm_plane_funcs mdp4_plane_funcs = {
.atomic_destroy_state = drm_atomic_helper_plane_destroy_state,
};
-static int mdp4_plane_prepare_fb(struct drm_plane *plane,
- struct drm_plane_state *new_state)
-{
- struct mdp4_plane *mdp4_plane = to_mdp4_plane(plane);
- struct mdp4_kms *mdp4_kms = get_kms(plane);
- struct msm_kms *kms = &mdp4_kms->base.base;
- struct drm_framebuffer *fb = new_state->fb;
-
- if (!fb)
- return 0;
-
- DBG("%s: prepare: FB[%u]", mdp4_plane->name, fb->base.id);
- return msm_framebuffer_prepare(fb, kms->aspace);
-}
-
static void mdp4_plane_cleanup_fb(struct drm_plane *plane,
struct drm_plane_state *old_state)
{
@@ -152,7 +137,7 @@ static void mdp4_plane_atomic_update(struct drm_plane *plane,
}
static const struct drm_plane_helper_funcs mdp4_plane_helper_funcs = {
- .prepare_fb = mdp4_plane_prepare_fb,
+ .prepare_fb = msm_atomic_prepare_fb,
.cleanup_fb = mdp4_plane_cleanup_fb,
.atomic_check = mdp4_plane_atomic_check,
.atomic_update = mdp4_plane_atomic_update,
@@ -182,8 +167,6 @@ static void mdp4_plane_set_scanout(struct drm_plane *plane,
msm_framebuffer_iova(fb, kms->aspace, 2));
mdp4_write(mdp4_kms, REG_MDP4_PIPE_SRCP3_BASE(pipe),
msm_framebuffer_iova(fb, kms->aspace, 3));
-
- plane->fb = fb;
}
static void mdp4_write_csc_config(struct mdp4_kms *mdp4_kms,
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5.xml.h b/drivers/gpu/drm/msm/disp/mdp5/mdp5.xml.h
index d9c10e02ee41..784d98989e3a 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5.xml.h
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5.xml.h
@@ -8,19 +8,19 @@ http://github.com/freedreno/envytools/
git clone https://github.com/freedreno/envytools.git
The rules-ng-ng source files this header was generated from are:
-- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 676 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20915 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 2849 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 37411 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 33004 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 602 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 41799 bytes, from 2017-06-16 12:32:42)
-- /home/robclark/src/freedreno/envytools/rnndb/edp/edp.xml ( 10416 bytes, from 2017-05-17 13:21:27)
-
-Copyright (C) 2013-2017 by the following authors:
+- /home/robclark/src/envytools/rnndb/msm.xml ( 676 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/mdp/mdp4.xml ( 20915 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/mdp/mdp_common.xml ( 2849 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/mdp/mdp5.xml ( 37411 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/dsi/dsi.xml ( 37239 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/dsi/sfpb.xml ( 602 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/hdmi/hdmi.xml ( 41799 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/edp/edp.xml ( 10416 bytes, from 2018-07-03 19:37:13)
+
+Copyright (C) 2013-2018 by the following authors:
- Rob Clark <robdclark@gmail.com> (robclark)
- Ilia Mirkin <imirkin@alum.mit.edu> (imirkin)
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_cfg.c
index 824067d2d427..824067d2d427 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.c
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_cfg.c
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.h b/drivers/gpu/drm/msm/disp/mdp5/mdp5_cfg.h
index 75910d0f2f4c..75910d0f2f4c 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.h
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_cfg.h
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cmd_encoder.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_cmd_encoder.c
index 1abc7f5c345c..d6f79dc755b4 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cmd_encoder.c
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_cmd_encoder.c
@@ -159,7 +159,7 @@ void mdp5_cmd_encoder_disable(struct drm_encoder *encoder)
pingpong_tearcheck_disable(encoder);
mdp5_ctl_set_encoder_state(ctl, pipeline, false);
- mdp5_ctl_commit(ctl, pipeline, mdp_ctl_flush_mask_encoder(intf));
+ mdp5_ctl_commit(ctl, pipeline, mdp_ctl_flush_mask_encoder(intf), true);
bs_set(mdp5_cmd_enc, 0);
@@ -180,7 +180,7 @@ void mdp5_cmd_encoder_enable(struct drm_encoder *encoder)
if (pingpong_tearcheck_enable(encoder))
return;
- mdp5_ctl_commit(ctl, pipeline, mdp_ctl_flush_mask_encoder(intf));
+ mdp5_ctl_commit(ctl, pipeline, mdp_ctl_flush_mask_encoder(intf), true);
mdp5_ctl_set_encoder_state(ctl, pipeline, true);
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c
index e414850dbbda..b1da9ce54379 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c
@@ -65,7 +65,7 @@ struct mdp5_crtc {
struct drm_gem_object *scanout_bo;
uint64_t iova;
uint32_t width, height;
- uint32_t x, y;
+ int x, y;
} cursor;
};
#define to_mdp5_crtc(x) container_of(x, struct mdp5_crtc, base)
@@ -97,9 +97,13 @@ static u32 crtc_flush(struct drm_crtc *crtc, u32 flush_mask)
struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
struct mdp5_ctl *ctl = mdp5_cstate->ctl;
struct mdp5_pipeline *pipeline = &mdp5_cstate->pipeline;
+ bool start = !mdp5_cstate->defer_start;
+
+ mdp5_cstate->defer_start = false;
DBG("%s: flush=%08x", crtc->name, flush_mask);
- return mdp5_ctl_commit(ctl, pipeline, flush_mask);
+
+ return mdp5_ctl_commit(ctl, pipeline, flush_mask, start);
}
/*
@@ -170,7 +174,7 @@ static void unref_cursor_worker(struct drm_flip_work *work, void *val)
struct msm_kms *kms = &mdp5_kms->base.base;
msm_gem_put_iova(val, kms->aspace);
- drm_gem_object_unreference_unlocked(val);
+ drm_gem_object_put_unlocked(val);
}
static void mdp5_crtc_destroy(struct drm_crtc *crtc)
@@ -426,6 +430,7 @@ static void mdp5_crtc_atomic_disable(struct drm_crtc *crtc,
struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
struct mdp5_kms *mdp5_kms = get_kms(crtc);
struct device *dev = &mdp5_kms->pdev->dev;
+ unsigned long flags;
DBG("%s", crtc->name);
@@ -441,6 +446,14 @@ static void mdp5_crtc_atomic_disable(struct drm_crtc *crtc,
mdp_irq_unregister(&mdp5_kms->base, &mdp5_crtc->err);
pm_runtime_put_sync(dev);
+ if (crtc->state->event && !crtc->state->active) {
+ WARN_ON(mdp5_crtc->event);
+ spin_lock_irqsave(&mdp5_kms->dev->event_lock, flags);
+ drm_crtc_send_vblank_event(crtc, crtc->state->event);
+ crtc->state->event = NULL;
+ spin_unlock_irqrestore(&mdp5_kms->dev->event_lock, flags);
+ }
+
mdp5_crtc->enabled = false;
}
@@ -704,6 +717,7 @@ static void mdp5_crtc_atomic_flush(struct drm_crtc *crtc,
spin_lock_irqsave(&dev->event_lock, flags);
mdp5_crtc->event = crtc->state->event;
+ crtc->state->event = NULL;
spin_unlock_irqrestore(&dev->event_lock, flags);
/*
@@ -746,20 +760,31 @@ static void get_roi(struct drm_crtc *crtc, uint32_t *roi_w, uint32_t *roi_h)
* Cursor Region Of Interest (ROI) is a plane read from cursor
* buffer to render. The ROI region is determined by the visibility of
* the cursor point. In the default Cursor image the cursor point will
- * be at the top left of the cursor image, unless it is specified
- * otherwise using hotspot feature.
+ * be at the top left of the cursor image.
*
+ * Without rotation:
* If the cursor point reaches the right (xres - x < cursor.width) or
* bottom (yres - y < cursor.height) boundary of the screen, then ROI
* width and ROI height need to be evaluated to crop the cursor image
* accordingly.
* (xres-x) will be new cursor width when x > (xres - cursor.width)
* (yres-y) will be new cursor height when y > (yres - cursor.height)
+ *
+ * With rotation:
+ * We get negative x and/or y coordinates.
+ * (cursor.width - abs(x)) will be new cursor width when x < 0
+ * (cursor.height - abs(y)) will be new cursor width when y < 0
*/
- *roi_w = min(mdp5_crtc->cursor.width, xres -
+ if (mdp5_crtc->cursor.x >= 0)
+ *roi_w = min(mdp5_crtc->cursor.width, xres -
mdp5_crtc->cursor.x);
- *roi_h = min(mdp5_crtc->cursor.height, yres -
+ else
+ *roi_w = mdp5_crtc->cursor.width - abs(mdp5_crtc->cursor.x);
+ if (mdp5_crtc->cursor.y >= 0)
+ *roi_h = min(mdp5_crtc->cursor.height, yres -
mdp5_crtc->cursor.y);
+ else
+ *roi_h = mdp5_crtc->cursor.height - abs(mdp5_crtc->cursor.y);
}
static void mdp5_crtc_restore_cursor(struct drm_crtc *crtc)
@@ -769,7 +794,7 @@ static void mdp5_crtc_restore_cursor(struct drm_crtc *crtc)
struct mdp5_kms *mdp5_kms = get_kms(crtc);
const enum mdp5_cursor_alpha cur_alpha = CURSOR_ALPHA_PER_PIXEL;
uint32_t blendcfg, stride;
- uint32_t x, y, width, height;
+ uint32_t x, y, src_x, src_y, width, height;
uint32_t roi_w, roi_h;
int lm;
@@ -786,6 +811,26 @@ static void mdp5_crtc_restore_cursor(struct drm_crtc *crtc)
get_roi(crtc, &roi_w, &roi_h);
+ /* If cusror buffer overlaps due to rotation on the
+ * upper or left screen border the pixel offset inside
+ * the cursor buffer of the ROI is the positive overlap
+ * distance.
+ */
+ if (mdp5_crtc->cursor.x < 0) {
+ src_x = abs(mdp5_crtc->cursor.x);
+ x = 0;
+ } else {
+ src_x = 0;
+ }
+ if (mdp5_crtc->cursor.y < 0) {
+ src_y = abs(mdp5_crtc->cursor.y);
+ y = 0;
+ } else {
+ src_y = 0;
+ }
+ DBG("%s: x=%d, y=%d roi_w=%d roi_h=%d src_x=%d src_y=%d",
+ crtc->name, x, y, roi_w, roi_h, src_x, src_y);
+
mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_STRIDE(lm), stride);
mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_FORMAT(lm),
MDP5_LM_CURSOR_FORMAT_FORMAT(CURSOR_FMT_ARGB8888));
@@ -798,6 +843,9 @@ static void mdp5_crtc_restore_cursor(struct drm_crtc *crtc)
mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_START_XY(lm),
MDP5_LM_CURSOR_START_XY_Y_START(y) |
MDP5_LM_CURSOR_START_XY_X_START(x));
+ mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_XY(lm),
+ MDP5_LM_CURSOR_XY_SRC_Y(src_y) |
+ MDP5_LM_CURSOR_XY_SRC_X(src_x));
mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_BASE_ADDR(lm),
mdp5_crtc->cursor.iova);
@@ -918,8 +966,9 @@ static int mdp5_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
if (unlikely(!crtc->state->enable))
return 0;
- mdp5_crtc->cursor.x = x = max(x, 0);
- mdp5_crtc->cursor.y = y = max(y, 0);
+ /* accept negative x/y coordinates up to maximum cursor overlap */
+ mdp5_crtc->cursor.x = x = max(x, -(int)mdp5_crtc->cursor.width);
+ mdp5_crtc->cursor.y = y = max(y, -(int)mdp5_crtc->cursor.height);
get_roi(crtc, &roi_w, &roi_h);
@@ -947,12 +996,17 @@ mdp5_crtc_atomic_print_state(struct drm_printer *p,
if (WARN_ON(!pipeline))
return;
+ if (mdp5_cstate->ctl)
+ drm_printf(p, "\tctl=%d\n", mdp5_ctl_get_ctl_id(mdp5_cstate->ctl));
+
drm_printf(p, "\thwmixer=%s\n", pipeline->mixer ?
pipeline->mixer->name : "(null)");
if (mdp5_kms->caps & MDP_CAP_SRC_SPLIT)
drm_printf(p, "\tright hwmixer=%s\n", pipeline->r_mixer ?
pipeline->r_mixer->name : "(null)");
+
+ drm_printf(p, "\tcmd_mode=%d\n", mdp5_cstate->cmd_mode);
}
static void mdp5_crtc_reset(struct drm_crtc *crtc)
@@ -1188,7 +1242,6 @@ struct drm_crtc *mdp5_crtc_init(struct drm_device *dev,
"unref cursor", unref_cursor_worker);
drm_crtc_helper_add(crtc, &mdp5_crtc_helper_funcs);
- plane->crtc = crtc;
return crtc;
}
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_ctl.c
index 439e0a300e25..f93d5681267c 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.c
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_ctl.c
@@ -41,7 +41,9 @@ struct mdp5_ctl {
u32 status;
bool encoder_enabled;
- uint32_t start_mask;
+
+ /* pending flush_mask bits */
+ u32 flush_mask;
/* REG_MDP5_CTL_*(<id>) registers access info + lock: */
spinlock_t hw_lock;
@@ -173,16 +175,8 @@ static void set_ctl_op(struct mdp5_ctl *ctl, struct mdp5_pipeline *pipeline)
int mdp5_ctl_set_pipeline(struct mdp5_ctl *ctl, struct mdp5_pipeline *pipeline)
{
- struct mdp5_ctl_manager *ctl_mgr = ctl->ctlm;
- struct mdp5_kms *mdp5_kms = get_kms(ctl_mgr);
+ struct mdp5_kms *mdp5_kms = get_kms(ctl->ctlm);
struct mdp5_interface *intf = pipeline->intf;
- struct mdp5_hw_mixer *mixer = pipeline->mixer;
- struct mdp5_hw_mixer *r_mixer = pipeline->r_mixer;
-
- ctl->start_mask = mdp_ctl_flush_mask_lm(mixer->lm) |
- mdp_ctl_flush_mask_encoder(intf);
- if (r_mixer)
- ctl->start_mask |= mdp_ctl_flush_mask_lm(r_mixer->lm);
/* Virtual interfaces need not set a display intf (e.g.: Writeback) */
if (!mdp5_cfg_intf_is_virtual(intf->type))
@@ -198,7 +192,7 @@ static bool start_signal_needed(struct mdp5_ctl *ctl,
{
struct mdp5_interface *intf = pipeline->intf;
- if (!ctl->encoder_enabled || ctl->start_mask != 0)
+ if (!ctl->encoder_enabled)
return false;
switch (intf->type) {
@@ -227,25 +221,6 @@ static void send_start_signal(struct mdp5_ctl *ctl)
spin_unlock_irqrestore(&ctl->hw_lock, flags);
}
-static void refill_start_mask(struct mdp5_ctl *ctl,
- struct mdp5_pipeline *pipeline)
-{
- struct mdp5_interface *intf = pipeline->intf;
- struct mdp5_hw_mixer *mixer = pipeline->mixer;
- struct mdp5_hw_mixer *r_mixer = pipeline->r_mixer;
-
- ctl->start_mask = mdp_ctl_flush_mask_lm(mixer->lm);
- if (r_mixer)
- ctl->start_mask |= mdp_ctl_flush_mask_lm(r_mixer->lm);
-
- /*
- * Writeback encoder needs to program & flush
- * address registers for each page flip..
- */
- if (intf->type == INTF_WB)
- ctl->start_mask |= mdp_ctl_flush_mask_encoder(intf);
-}
-
/**
* mdp5_ctl_set_encoder_state() - set the encoder state
*
@@ -268,7 +243,6 @@ int mdp5_ctl_set_encoder_state(struct mdp5_ctl *ctl,
if (start_signal_needed(ctl, pipeline)) {
send_start_signal(ctl);
- refill_start_mask(ctl, pipeline);
}
return 0;
@@ -494,6 +468,8 @@ u32 mdp_ctl_flush_mask_lm(int lm)
case 0: return MDP5_CTL_FLUSH_LM0;
case 1: return MDP5_CTL_FLUSH_LM1;
case 2: return MDP5_CTL_FLUSH_LM2;
+ case 3: return MDP5_CTL_FLUSH_LM3;
+ case 4: return MDP5_CTL_FLUSH_LM4;
case 5: return MDP5_CTL_FLUSH_LM5;
default: return 0;
}
@@ -557,17 +533,14 @@ static void fix_for_single_flush(struct mdp5_ctl *ctl, u32 *flush_mask,
*/
u32 mdp5_ctl_commit(struct mdp5_ctl *ctl,
struct mdp5_pipeline *pipeline,
- u32 flush_mask)
+ u32 flush_mask, bool start)
{
struct mdp5_ctl_manager *ctl_mgr = ctl->ctlm;
unsigned long flags;
u32 flush_id = ctl->id;
u32 curr_ctl_flush_mask;
- ctl->start_mask &= ~flush_mask;
-
- VERB("flush_mask=%x, start_mask=%x, trigger=%x", flush_mask,
- ctl->start_mask, ctl->pending_ctl_trigger);
+ VERB("flush_mask=%x, trigger=%x", flush_mask, ctl->pending_ctl_trigger);
if (ctl->pending_ctl_trigger & flush_mask) {
flush_mask |= MDP5_CTL_FLUSH_CTL;
@@ -582,6 +555,14 @@ u32 mdp5_ctl_commit(struct mdp5_ctl *ctl,
fix_for_single_flush(ctl, &flush_mask, &flush_id);
+ if (!start) {
+ ctl->flush_mask |= flush_mask;
+ return curr_ctl_flush_mask;
+ } else {
+ flush_mask |= ctl->flush_mask;
+ ctl->flush_mask = 0;
+ }
+
if (flush_mask) {
spin_lock_irqsave(&ctl->hw_lock, flags);
ctl_write(ctl, REG_MDP5_CTL_FLUSH(flush_id), flush_mask);
@@ -590,7 +571,6 @@ u32 mdp5_ctl_commit(struct mdp5_ctl *ctl,
if (start_signal_needed(ctl, pipeline)) {
send_start_signal(ctl);
- refill_start_mask(ctl, pipeline);
}
return curr_ctl_flush_mask;
@@ -711,6 +691,7 @@ struct mdp5_ctl_manager *mdp5_ctlm_init(struct drm_device *dev,
struct mdp5_ctl_manager *ctl_mgr;
const struct mdp5_cfg_hw *hw_cfg = mdp5_cfg_get_hw_config(cfg_hnd);
int rev = mdp5_cfg_get_hw_rev(cfg_hnd);
+ unsigned dsi_cnt = 0;
const struct mdp5_ctl_block *ctl_cfg = &hw_cfg->ctl;
unsigned long flags;
int c, ret;
@@ -760,7 +741,10 @@ struct mdp5_ctl_manager *mdp5_ctlm_init(struct drm_device *dev,
* only write into CTL0's FLUSH register) to keep two DSI pipes in sync.
* Single FLUSH is supported from hw rev v3.0.
*/
- if (rev >= 3) {
+ for (c = 0; c < ARRAY_SIZE(hw_cfg->intf.connect); c++)
+ if (hw_cfg->intf.connect[c] == INTF_DSI)
+ dsi_cnt++;
+ if ((rev >= 3) && (dsi_cnt > 1)) {
ctl_mgr->single_flush_supported = true;
/* Reserve CTL0/1 for INTF1/2 */
ctl_mgr->ctls[0].status |= CTL_STAT_BOOKED;
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.h b/drivers/gpu/drm/msm/disp/mdp5/mdp5_ctl.h
index b63120388dc6..403b0db0fa4c 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.h
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_ctl.h
@@ -78,7 +78,7 @@ u32 mdp_ctl_flush_mask_encoder(struct mdp5_interface *intf);
/* @flush_mask: see CTL flush masks definitions below */
u32 mdp5_ctl_commit(struct mdp5_ctl *ctl, struct mdp5_pipeline *pipeline,
- u32 flush_mask);
+ u32 flush_mask, bool start);
u32 mdp5_ctl_get_commit_status(struct mdp5_ctl *ctl);
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_encoder.c
index 36ad3cbe5f79..fcd44d1d1068 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_encoder.c
@@ -228,7 +228,7 @@ static void mdp5_vid_encoder_disable(struct drm_encoder *encoder)
spin_lock_irqsave(&mdp5_encoder->intf_lock, flags);
mdp5_write(mdp5_kms, REG_MDP5_INTF_TIMING_ENGINE_EN(intfn), 0);
spin_unlock_irqrestore(&mdp5_encoder->intf_lock, flags);
- mdp5_ctl_commit(ctl, pipeline, mdp_ctl_flush_mask_encoder(intf));
+ mdp5_ctl_commit(ctl, pipeline, mdp_ctl_flush_mask_encoder(intf), true);
/*
* Wait for a vsync so we know the ENABLE=0 latched before
@@ -262,7 +262,7 @@ static void mdp5_vid_encoder_enable(struct drm_encoder *encoder)
spin_lock_irqsave(&mdp5_encoder->intf_lock, flags);
mdp5_write(mdp5_kms, REG_MDP5_INTF_TIMING_ENGINE_EN(intfn), 1);
spin_unlock_irqrestore(&mdp5_encoder->intf_lock, flags);
- mdp5_ctl_commit(ctl, pipeline, mdp_ctl_flush_mask_encoder(intf));
+ mdp5_ctl_commit(ctl, pipeline, mdp_ctl_flush_mask_encoder(intf), true);
mdp5_ctl_set_encoder_state(ctl, pipeline, true);
@@ -320,6 +320,17 @@ static int mdp5_encoder_atomic_check(struct drm_encoder *encoder,
mdp5_cstate->ctl = ctl;
mdp5_cstate->pipeline.intf = intf;
+ /*
+ * This is a bit awkward, but we want to flush the CTL and hit the
+ * START bit at most once for an atomic update. In the non-full-
+ * modeset case, this is done from crtc->atomic_flush(), but that
+ * is too early in the case of full modeset, in which case we
+ * defer to encoder->enable(). But we need to *know* whether
+ * encoder->enable() will be called to do this:
+ */
+ if (drm_atomic_crtc_needs_modeset(crtc_state))
+ mdp5_cstate->defer_start = true;
+
return 0;
}
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_irq.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_irq.c
index 280e368bc9bb..280e368bc9bb 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_irq.c
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_irq.c
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c
index 6d8e3a9a6fc0..bddd625ab91b 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c
@@ -70,60 +70,112 @@ static int mdp5_hw_init(struct msm_kms *kms)
return 0;
}
-struct mdp5_state *mdp5_get_state(struct drm_atomic_state *s)
+/* Global/shared object state funcs */
+
+/*
+ * This is a helper that returns the private state currently in operation.
+ * Note that this would return the "old_state" if called in the atomic check
+ * path, and the "new_state" after the atomic swap has been done.
+ */
+struct mdp5_global_state *
+mdp5_get_existing_global_state(struct mdp5_kms *mdp5_kms)
+{
+ return to_mdp5_global_state(mdp5_kms->glob_state.state);
+}
+
+/*
+ * This acquires the modeset lock set aside for global state, creates
+ * a new duplicated private object state.
+ */
+struct mdp5_global_state *mdp5_get_global_state(struct drm_atomic_state *s)
{
struct msm_drm_private *priv = s->dev->dev_private;
struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(priv->kms));
- struct msm_kms_state *state = to_kms_state(s);
- struct mdp5_state *new_state;
+ struct drm_private_state *priv_state;
int ret;
- if (state->state)
- return state->state;
-
- ret = drm_modeset_lock(&mdp5_kms->state_lock, s->acquire_ctx);
+ ret = drm_modeset_lock(&mdp5_kms->glob_state_lock, s->acquire_ctx);
if (ret)
return ERR_PTR(ret);
- new_state = kmalloc(sizeof(*mdp5_kms->state), GFP_KERNEL);
- if (!new_state)
- return ERR_PTR(-ENOMEM);
+ priv_state = drm_atomic_get_private_obj_state(s, &mdp5_kms->glob_state);
+ if (IS_ERR(priv_state))
+ return ERR_CAST(priv_state);
- /* Copy state: */
- new_state->hwpipe = mdp5_kms->state->hwpipe;
- new_state->hwmixer = mdp5_kms->state->hwmixer;
- if (mdp5_kms->smp)
- new_state->smp = mdp5_kms->state->smp;
+ return to_mdp5_global_state(priv_state);
+}
+
+static struct drm_private_state *
+mdp5_global_duplicate_state(struct drm_private_obj *obj)
+{
+ struct mdp5_global_state *state;
- state->state = new_state;
+ state = kmemdup(obj->state, sizeof(*state), GFP_KERNEL);
+ if (!state)
+ return NULL;
+
+ __drm_atomic_helper_private_obj_duplicate_state(obj, &state->base);
- return new_state;
+ return &state->base;
}
-static void mdp5_swap_state(struct msm_kms *kms, struct drm_atomic_state *state)
+static void mdp5_global_destroy_state(struct drm_private_obj *obj,
+ struct drm_private_state *state)
{
- struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
- swap(to_kms_state(state)->state, mdp5_kms->state);
+ struct mdp5_global_state *mdp5_state = to_mdp5_global_state(state);
+
+ kfree(mdp5_state);
+}
+
+static const struct drm_private_state_funcs mdp5_global_state_funcs = {
+ .atomic_duplicate_state = mdp5_global_duplicate_state,
+ .atomic_destroy_state = mdp5_global_destroy_state,
+};
+
+static int mdp5_global_obj_init(struct mdp5_kms *mdp5_kms)
+{
+ struct mdp5_global_state *state;
+
+ drm_modeset_lock_init(&mdp5_kms->glob_state_lock);
+
+ state = kzalloc(sizeof(*state), GFP_KERNEL);
+ if (!state)
+ return -ENOMEM;
+
+ state->mdp5_kms = mdp5_kms;
+
+ drm_atomic_private_obj_init(&mdp5_kms->glob_state,
+ &state->base,
+ &mdp5_global_state_funcs);
+ return 0;
}
static void mdp5_prepare_commit(struct msm_kms *kms, struct drm_atomic_state *state)
{
struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
struct device *dev = &mdp5_kms->pdev->dev;
+ struct mdp5_global_state *global_state;
+
+ global_state = mdp5_get_existing_global_state(mdp5_kms);
pm_runtime_get_sync(dev);
if (mdp5_kms->smp)
- mdp5_smp_prepare_commit(mdp5_kms->smp, &mdp5_kms->state->smp);
+ mdp5_smp_prepare_commit(mdp5_kms->smp, &global_state->smp);
}
static void mdp5_complete_commit(struct msm_kms *kms, struct drm_atomic_state *state)
{
struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
struct device *dev = &mdp5_kms->pdev->dev;
+ struct mdp5_global_state *global_state;
+
+ drm_atomic_helper_wait_for_vblanks(mdp5_kms->dev, state);
+
+ global_state = mdp5_get_existing_global_state(mdp5_kms);
if (mdp5_kms->smp)
- mdp5_smp_complete_commit(mdp5_kms->smp, &mdp5_kms->state->smp);
+ mdp5_smp_complete_commit(mdp5_kms->smp, &global_state->smp);
pm_runtime_put_sync(dev);
}
@@ -229,7 +281,6 @@ static const struct mdp_kms_funcs kms_funcs = {
.irq = mdp5_irq,
.enable_vblank = mdp5_enable_vblank,
.disable_vblank = mdp5_disable_vblank,
- .swap_state = mdp5_swap_state,
.prepare_commit = mdp5_prepare_commit,
.complete_commit = mdp5_complete_commit,
.wait_for_crtc_commit_done = mdp5_wait_for_crtc_commit_done,
@@ -727,7 +778,8 @@ static void mdp5_destroy(struct platform_device *pdev)
if (mdp5_kms->rpm_enabled)
pm_runtime_disable(&pdev->dev);
- kfree(mdp5_kms->state);
+ drm_atomic_private_obj_fini(&mdp5_kms->glob_state);
+ drm_modeset_lock_fini(&mdp5_kms->glob_state_lock);
}
static int construct_pipes(struct mdp5_kms *mdp5_kms, int cnt,
@@ -880,12 +932,9 @@ static int mdp5_init(struct platform_device *pdev, struct drm_device *dev)
mdp5_kms->dev = dev;
mdp5_kms->pdev = pdev;
- drm_modeset_lock_init(&mdp5_kms->state_lock);
- mdp5_kms->state = kzalloc(sizeof(*mdp5_kms->state), GFP_KERNEL);
- if (!mdp5_kms->state) {
- ret = -ENOMEM;
+ ret = mdp5_global_obj_init(mdp5_kms);
+ if (ret)
goto fail;
- }
mdp5_kms->mmio = msm_ioremap(pdev, "mdp_phys", "MDP5");
if (IS_ERR(mdp5_kms->mmio)) {
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h b/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.h
index 9b3fe01089d1..854dfd30e829 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.h
@@ -20,7 +20,7 @@
#include "msm_drv.h"
#include "msm_kms.h"
-#include "mdp/mdp_kms.h"
+#include "disp/mdp_kms.h"
#include "mdp5_cfg.h" /* must be included before mdp5.xml.h */
#include "mdp5.xml.h"
#include "mdp5_pipe.h"
@@ -28,8 +28,6 @@
#include "mdp5_ctl.h"
#include "mdp5_smp.h"
-struct mdp5_state;
-
struct mdp5_kms {
struct mdp_kms base;
@@ -49,11 +47,12 @@ struct mdp5_kms {
struct mdp5_cfg_handler *cfg;
uint32_t caps; /* MDP capabilities (MDP_CAP_XXX bits) */
- /**
- * Global atomic state. Do not access directly, use mdp5_get_state()
+ /*
+ * Global private object state, Do not access directly, use
+ * mdp5_global_get_state()
*/
- struct mdp5_state *state;
- struct drm_modeset_lock state_lock;
+ struct drm_modeset_lock glob_state_lock;
+ struct drm_private_obj glob_state;
struct mdp5_smp *smp;
struct mdp5_ctl_manager *ctlm;
@@ -81,19 +80,23 @@ struct mdp5_kms {
};
#define to_mdp5_kms(x) container_of(x, struct mdp5_kms, base)
-/* Global atomic state for tracking resources that are shared across
+/* Global private object state for tracking resources that are shared across
* multiple kms objects (planes/crtcs/etc).
- *
- * For atomic updates which require modifying global state,
*/
-struct mdp5_state {
+#define to_mdp5_global_state(x) container_of(x, struct mdp5_global_state, base)
+struct mdp5_global_state {
+ struct drm_private_state base;
+
+ struct drm_atomic_state *state;
+ struct mdp5_kms *mdp5_kms;
+
struct mdp5_hw_pipe_state hwpipe;
struct mdp5_hw_mixer_state hwmixer;
struct mdp5_smp_state smp;
};
-struct mdp5_state *__must_check
-mdp5_get_state(struct drm_atomic_state *s);
+struct mdp5_global_state * mdp5_get_existing_global_state(struct mdp5_kms *mdp5_kms);
+struct mdp5_global_state *__must_check mdp5_get_global_state(struct drm_atomic_state *s);
/* Atomic plane state. Subclasses the base drm_plane_state in order to
* track assigned hwpipe and hw specific state.
@@ -133,6 +136,14 @@ struct mdp5_crtc_state {
u32 pp_done_irqmask;
bool cmd_mode;
+
+ /* should we not write CTL[n].START register on flush? If the
+ * encoder has changed this is set to true, since encoder->enable()
+ * is called after crtc state is committed, but we only want to
+ * write the CTL[n].START register once. This lets us defer
+ * writing CTL[n].START until encoder->enable()
+ */
+ bool defer_start;
};
#define to_mdp5_crtc_state(x) \
container_of(x, struct mdp5_crtc_state, base)
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_mdss.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_mdss.c
index f2a0db7a8a03..1cc4e57f0226 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_mdss.c
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_mdss.c
@@ -20,12 +20,10 @@
#include "msm_drv.h"
#include "mdp5_kms.h"
-/*
- * If needed, this can become more specific: something like struct mdp5_mdss,
- * which contains a 'struct msm_mdss base' member.
- */
-struct msm_mdss {
- struct drm_device *dev;
+#define to_mdp5_mdss(x) container_of(x, struct mdp5_mdss, base)
+
+struct mdp5_mdss {
+ struct msm_mdss base;
void __iomem *mmio, *vbif;
@@ -41,22 +39,22 @@ struct msm_mdss {
} irqcontroller;
};
-static inline void mdss_write(struct msm_mdss *mdss, u32 reg, u32 data)
+static inline void mdss_write(struct mdp5_mdss *mdp5_mdss, u32 reg, u32 data)
{
- msm_writel(data, mdss->mmio + reg);
+ msm_writel(data, mdp5_mdss->mmio + reg);
}
-static inline u32 mdss_read(struct msm_mdss *mdss, u32 reg)
+static inline u32 mdss_read(struct mdp5_mdss *mdp5_mdss, u32 reg)
{
- return msm_readl(mdss->mmio + reg);
+ return msm_readl(mdp5_mdss->mmio + reg);
}
static irqreturn_t mdss_irq(int irq, void *arg)
{
- struct msm_mdss *mdss = arg;
+ struct mdp5_mdss *mdp5_mdss = arg;
u32 intr;
- intr = mdss_read(mdss, REG_MDSS_HW_INTR_STATUS);
+ intr = mdss_read(mdp5_mdss, REG_MDSS_HW_INTR_STATUS);
VERB("intr=%08x", intr);
@@ -64,7 +62,7 @@ static irqreturn_t mdss_irq(int irq, void *arg)
irq_hw_number_t hwirq = fls(intr) - 1;
generic_handle_irq(irq_find_mapping(
- mdss->irqcontroller.domain, hwirq));
+ mdp5_mdss->irqcontroller.domain, hwirq));
intr &= ~(1 << hwirq);
}
@@ -84,19 +82,19 @@ static irqreturn_t mdss_irq(int irq, void *arg)
static void mdss_hw_mask_irq(struct irq_data *irqd)
{
- struct msm_mdss *mdss = irq_data_get_irq_chip_data(irqd);
+ struct mdp5_mdss *mdp5_mdss = irq_data_get_irq_chip_data(irqd);
smp_mb__before_atomic();
- clear_bit(irqd->hwirq, &mdss->irqcontroller.enabled_mask);
+ clear_bit(irqd->hwirq, &mdp5_mdss->irqcontroller.enabled_mask);
smp_mb__after_atomic();
}
static void mdss_hw_unmask_irq(struct irq_data *irqd)
{
- struct msm_mdss *mdss = irq_data_get_irq_chip_data(irqd);
+ struct mdp5_mdss *mdp5_mdss = irq_data_get_irq_chip_data(irqd);
smp_mb__before_atomic();
- set_bit(irqd->hwirq, &mdss->irqcontroller.enabled_mask);
+ set_bit(irqd->hwirq, &mdp5_mdss->irqcontroller.enabled_mask);
smp_mb__after_atomic();
}
@@ -109,13 +107,13 @@ static struct irq_chip mdss_hw_irq_chip = {
static int mdss_hw_irqdomain_map(struct irq_domain *d, unsigned int irq,
irq_hw_number_t hwirq)
{
- struct msm_mdss *mdss = d->host_data;
+ struct mdp5_mdss *mdp5_mdss = d->host_data;
if (!(VALID_IRQS & (1 << hwirq)))
return -EPERM;
irq_set_chip_and_handler(irq, &mdss_hw_irq_chip, handle_level_irq);
- irq_set_chip_data(irq, mdss);
+ irq_set_chip_data(irq, mdp5_mdss);
return 0;
}
@@ -126,90 +124,99 @@ static const struct irq_domain_ops mdss_hw_irqdomain_ops = {
};
-static int mdss_irq_domain_init(struct msm_mdss *mdss)
+static int mdss_irq_domain_init(struct mdp5_mdss *mdp5_mdss)
{
- struct device *dev = mdss->dev->dev;
+ struct device *dev = mdp5_mdss->base.dev->dev;
struct irq_domain *d;
d = irq_domain_add_linear(dev->of_node, 32, &mdss_hw_irqdomain_ops,
- mdss);
+ mdp5_mdss);
if (!d) {
dev_err(dev, "mdss irq domain add failed\n");
return -ENXIO;
}
- mdss->irqcontroller.enabled_mask = 0;
- mdss->irqcontroller.domain = d;
+ mdp5_mdss->irqcontroller.enabled_mask = 0;
+ mdp5_mdss->irqcontroller.domain = d;
return 0;
}
-int msm_mdss_enable(struct msm_mdss *mdss)
+static int mdp5_mdss_enable(struct msm_mdss *mdss)
{
+ struct mdp5_mdss *mdp5_mdss = to_mdp5_mdss(mdss);
DBG("");
- clk_prepare_enable(mdss->ahb_clk);
- if (mdss->axi_clk)
- clk_prepare_enable(mdss->axi_clk);
- if (mdss->vsync_clk)
- clk_prepare_enable(mdss->vsync_clk);
+ clk_prepare_enable(mdp5_mdss->ahb_clk);
+ if (mdp5_mdss->axi_clk)
+ clk_prepare_enable(mdp5_mdss->axi_clk);
+ if (mdp5_mdss->vsync_clk)
+ clk_prepare_enable(mdp5_mdss->vsync_clk);
return 0;
}
-int msm_mdss_disable(struct msm_mdss *mdss)
+static int mdp5_mdss_disable(struct msm_mdss *mdss)
{
+ struct mdp5_mdss *mdp5_mdss = to_mdp5_mdss(mdss);
DBG("");
- if (mdss->vsync_clk)
- clk_disable_unprepare(mdss->vsync_clk);
- if (mdss->axi_clk)
- clk_disable_unprepare(mdss->axi_clk);
- clk_disable_unprepare(mdss->ahb_clk);
+ if (mdp5_mdss->vsync_clk)
+ clk_disable_unprepare(mdp5_mdss->vsync_clk);
+ if (mdp5_mdss->axi_clk)
+ clk_disable_unprepare(mdp5_mdss->axi_clk);
+ clk_disable_unprepare(mdp5_mdss->ahb_clk);
return 0;
}
-static int msm_mdss_get_clocks(struct msm_mdss *mdss)
+static int msm_mdss_get_clocks(struct mdp5_mdss *mdp5_mdss)
{
- struct platform_device *pdev = to_platform_device(mdss->dev->dev);
+ struct platform_device *pdev =
+ to_platform_device(mdp5_mdss->base.dev->dev);
- mdss->ahb_clk = msm_clk_get(pdev, "iface");
- if (IS_ERR(mdss->ahb_clk))
- mdss->ahb_clk = NULL;
+ mdp5_mdss->ahb_clk = msm_clk_get(pdev, "iface");
+ if (IS_ERR(mdp5_mdss->ahb_clk))
+ mdp5_mdss->ahb_clk = NULL;
- mdss->axi_clk = msm_clk_get(pdev, "bus");
- if (IS_ERR(mdss->axi_clk))
- mdss->axi_clk = NULL;
+ mdp5_mdss->axi_clk = msm_clk_get(pdev, "bus");
+ if (IS_ERR(mdp5_mdss->axi_clk))
+ mdp5_mdss->axi_clk = NULL;
- mdss->vsync_clk = msm_clk_get(pdev, "vsync");
- if (IS_ERR(mdss->vsync_clk))
- mdss->vsync_clk = NULL;
+ mdp5_mdss->vsync_clk = msm_clk_get(pdev, "vsync");
+ if (IS_ERR(mdp5_mdss->vsync_clk))
+ mdp5_mdss->vsync_clk = NULL;
return 0;
}
-void msm_mdss_destroy(struct drm_device *dev)
+static void mdp5_mdss_destroy(struct drm_device *dev)
{
struct msm_drm_private *priv = dev->dev_private;
- struct msm_mdss *mdss = priv->mdss;
+ struct mdp5_mdss *mdp5_mdss = to_mdp5_mdss(priv->mdss);
- if (!mdss)
+ if (!mdp5_mdss)
return;
- irq_domain_remove(mdss->irqcontroller.domain);
- mdss->irqcontroller.domain = NULL;
+ irq_domain_remove(mdp5_mdss->irqcontroller.domain);
+ mdp5_mdss->irqcontroller.domain = NULL;
- regulator_disable(mdss->vdd);
+ regulator_disable(mdp5_mdss->vdd);
pm_runtime_disable(dev->dev);
}
-int msm_mdss_init(struct drm_device *dev)
+static const struct msm_mdss_funcs mdss_funcs = {
+ .enable = mdp5_mdss_enable,
+ .disable = mdp5_mdss_disable,
+ .destroy = mdp5_mdss_destroy,
+};
+
+int mdp5_mdss_init(struct drm_device *dev)
{
struct platform_device *pdev = to_platform_device(dev->dev);
struct msm_drm_private *priv = dev->dev_private;
- struct msm_mdss *mdss;
+ struct mdp5_mdss *mdp5_mdss;
int ret;
DBG("");
@@ -217,40 +224,40 @@ int msm_mdss_init(struct drm_device *dev)
if (!of_device_is_compatible(dev->dev->of_node, "qcom,mdss"))
return 0;
- mdss = devm_kzalloc(dev->dev, sizeof(*mdss), GFP_KERNEL);
- if (!mdss) {
+ mdp5_mdss = devm_kzalloc(dev->dev, sizeof(*mdp5_mdss), GFP_KERNEL);
+ if (!mdp5_mdss) {
ret = -ENOMEM;
goto fail;
}
- mdss->dev = dev;
+ mdp5_mdss->base.dev = dev;
- mdss->mmio = msm_ioremap(pdev, "mdss_phys", "MDSS");
- if (IS_ERR(mdss->mmio)) {
- ret = PTR_ERR(mdss->mmio);
+ mdp5_mdss->mmio = msm_ioremap(pdev, "mdss_phys", "MDSS");
+ if (IS_ERR(mdp5_mdss->mmio)) {
+ ret = PTR_ERR(mdp5_mdss->mmio);
goto fail;
}
- mdss->vbif = msm_ioremap(pdev, "vbif_phys", "VBIF");
- if (IS_ERR(mdss->vbif)) {
- ret = PTR_ERR(mdss->vbif);
+ mdp5_mdss->vbif = msm_ioremap(pdev, "vbif_phys", "VBIF");
+ if (IS_ERR(mdp5_mdss->vbif)) {
+ ret = PTR_ERR(mdp5_mdss->vbif);
goto fail;
}
- ret = msm_mdss_get_clocks(mdss);
+ ret = msm_mdss_get_clocks(mdp5_mdss);
if (ret) {
dev_err(dev->dev, "failed to get clocks: %d\n", ret);
goto fail;
}
/* Regulator to enable GDSCs in downstream kernels */
- mdss->vdd = devm_regulator_get(dev->dev, "vdd");
- if (IS_ERR(mdss->vdd)) {
- ret = PTR_ERR(mdss->vdd);
+ mdp5_mdss->vdd = devm_regulator_get(dev->dev, "vdd");
+ if (IS_ERR(mdp5_mdss->vdd)) {
+ ret = PTR_ERR(mdp5_mdss->vdd);
goto fail;
}
- ret = regulator_enable(mdss->vdd);
+ ret = regulator_enable(mdp5_mdss->vdd);
if (ret) {
dev_err(dev->dev, "failed to enable regulator vdd: %d\n",
ret);
@@ -258,25 +265,26 @@ int msm_mdss_init(struct drm_device *dev)
}
ret = devm_request_irq(dev->dev, platform_get_irq(pdev, 0),
- mdss_irq, 0, "mdss_isr", mdss);
+ mdss_irq, 0, "mdss_isr", mdp5_mdss);
if (ret) {
dev_err(dev->dev, "failed to init irq: %d\n", ret);
goto fail_irq;
}
- ret = mdss_irq_domain_init(mdss);
+ ret = mdss_irq_domain_init(mdp5_mdss);
if (ret) {
dev_err(dev->dev, "failed to init sub-block irqs: %d\n", ret);
goto fail_irq;
}
- priv->mdss = mdss;
+ mdp5_mdss->base.funcs = &mdss_funcs;
+ priv->mdss = &mdp5_mdss->base;
pm_runtime_enable(dev->dev);
return 0;
fail_irq:
- regulator_disable(mdss->vdd);
+ regulator_disable(mdp5_mdss->vdd);
fail:
return ret;
}
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_mixer.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_mixer.c
index 8a00991f03c7..113e6b569562 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_mixer.c
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_mixer.c
@@ -52,14 +52,14 @@ int mdp5_mixer_assign(struct drm_atomic_state *s, struct drm_crtc *crtc,
{
struct msm_drm_private *priv = s->dev->dev_private;
struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(priv->kms));
- struct mdp5_state *state = mdp5_get_state(s);
+ struct mdp5_global_state *global_state = mdp5_get_global_state(s);
struct mdp5_hw_mixer_state *new_state;
int i;
- if (IS_ERR(state))
- return PTR_ERR(state);
+ if (IS_ERR(global_state))
+ return PTR_ERR(global_state);
- new_state = &state->hwmixer;
+ new_state = &global_state->hwmixer;
for (i = 0; i < mdp5_kms->num_hwmixers; i++) {
struct mdp5_hw_mixer *cur = mdp5_kms->hwmixers[i];
@@ -129,8 +129,8 @@ int mdp5_mixer_assign(struct drm_atomic_state *s, struct drm_crtc *crtc,
void mdp5_mixer_release(struct drm_atomic_state *s, struct mdp5_hw_mixer *mixer)
{
- struct mdp5_state *state = mdp5_get_state(s);
- struct mdp5_hw_mixer_state *new_state = &state->hwmixer;
+ struct mdp5_global_state *global_state = mdp5_get_global_state(s);
+ struct mdp5_hw_mixer_state *new_state = &global_state->hwmixer;
if (!mixer)
return;
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_mixer.h b/drivers/gpu/drm/msm/disp/mdp5/mdp5_mixer.h
index 9be94f567fbd..9be94f567fbd 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_mixer.h
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_mixer.h
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_pipe.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_pipe.c
index ff52c49095f9..1ef26bc63163 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_pipe.c
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_pipe.c
@@ -24,17 +24,19 @@ int mdp5_pipe_assign(struct drm_atomic_state *s, struct drm_plane *plane,
{
struct msm_drm_private *priv = s->dev->dev_private;
struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(priv->kms));
- struct mdp5_state *state;
+ struct mdp5_global_state *new_global_state, *old_global_state;
struct mdp5_hw_pipe_state *old_state, *new_state;
int i, j;
- state = mdp5_get_state(s);
- if (IS_ERR(state))
- return PTR_ERR(state);
+ new_global_state = mdp5_get_global_state(s);
+ if (IS_ERR(new_global_state))
+ return PTR_ERR(new_global_state);
- /* grab old_state after mdp5_get_state(), since now we hold lock: */
- old_state = &mdp5_kms->state->hwpipe;
- new_state = &state->hwpipe;
+ /* grab old_state after mdp5_get_global_state(), since now we hold lock: */
+ old_global_state = mdp5_get_existing_global_state(mdp5_kms);
+
+ old_state = &old_global_state->hwpipe;
+ new_state = &new_global_state->hwpipe;
for (i = 0; i < mdp5_kms->num_hwpipes; i++) {
struct mdp5_hw_pipe *cur = mdp5_kms->hwpipes[i];
@@ -107,7 +109,7 @@ int mdp5_pipe_assign(struct drm_atomic_state *s, struct drm_plane *plane,
WARN_ON(r_hwpipe);
DBG("%s: alloc SMP blocks", (*hwpipe)->name);
- ret = mdp5_smp_assign(mdp5_kms->smp, &state->smp,
+ ret = mdp5_smp_assign(mdp5_kms->smp, &new_global_state->smp,
(*hwpipe)->pipe, blkcfg);
if (ret)
return -ENOMEM;
@@ -132,7 +134,7 @@ void mdp5_pipe_release(struct drm_atomic_state *s, struct mdp5_hw_pipe *hwpipe)
{
struct msm_drm_private *priv = s->dev->dev_private;
struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(priv->kms));
- struct mdp5_state *state = mdp5_get_state(s);
+ struct mdp5_global_state *state = mdp5_get_global_state(s);
struct mdp5_hw_pipe_state *new_state = &state->hwpipe;
if (!hwpipe)
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_pipe.h b/drivers/gpu/drm/msm/disp/mdp5/mdp5_pipe.h
index bb2b0ac7aa2b..bb2b0ac7aa2b 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_pipe.h
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_pipe.h
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c
index 29678876fc09..7d306c5acd09 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c
@@ -46,7 +46,7 @@ static void mdp5_plane_destroy(struct drm_plane *plane)
{
struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane);
- drm_plane_helper_disable(plane);
+ drm_plane_helper_disable(plane, NULL);
drm_plane_cleanup(plane);
kfree(mdp5_plane);
@@ -245,20 +245,6 @@ static const struct drm_plane_funcs mdp5_plane_funcs = {
.atomic_print_state = mdp5_plane_atomic_print_state,
};
-static int mdp5_plane_prepare_fb(struct drm_plane *plane,
- struct drm_plane_state *new_state)
-{
- struct mdp5_kms *mdp5_kms = get_kms(plane);
- struct msm_kms *kms = &mdp5_kms->base.base;
- struct drm_framebuffer *fb = new_state->fb;
-
- if (!new_state->fb)
- return 0;
-
- DBG("%s: prepare: FB[%u]", plane->name, fb->base.id);
- return msm_framebuffer_prepare(fb, kms->aspace);
-}
-
static void mdp5_plane_cleanup_fb(struct drm_plane *plane,
struct drm_plane_state *old_state)
{
@@ -286,7 +272,6 @@ static int mdp5_plane_atomic_check_with_state(struct drm_crtc_state *crtc_state,
uint32_t max_width, max_height;
bool out_of_bounds = false;
uint32_t caps = 0;
- struct drm_rect clip;
int min_scale, max_scale;
int ret;
@@ -320,14 +305,10 @@ static int mdp5_plane_atomic_check_with_state(struct drm_crtc_state *crtc_state,
return -ERANGE;
}
- clip.x1 = 0;
- clip.y1 = 0;
- clip.x2 = crtc_state->adjusted_mode.hdisplay;
- clip.y2 = crtc_state->adjusted_mode.vdisplay;
min_scale = FRAC_16_16(1, 8);
max_scale = FRAC_16_16(8, 1);
- ret = drm_atomic_helper_check_plane_state(state, crtc_state, &clip,
+ ret = drm_atomic_helper_check_plane_state(state, crtc_state,
min_scale, max_scale,
true, true);
if (ret)
@@ -471,7 +452,6 @@ static int mdp5_plane_atomic_async_check(struct drm_plane *plane,
{
struct mdp5_plane_state *mdp5_state = to_mdp5_plane_state(state);
struct drm_crtc_state *crtc_state;
- struct drm_rect clip;
int min_scale, max_scale;
int ret;
@@ -499,14 +479,10 @@ static int mdp5_plane_atomic_async_check(struct drm_plane *plane,
plane->state->fb != state->fb)
return -EINVAL;
- clip.x1 = 0;
- clip.y1 = 0;
- clip.x2 = crtc_state->adjusted_mode.hdisplay;
- clip.y2 = crtc_state->adjusted_mode.vdisplay;
min_scale = FRAC_16_16(1, 8);
max_scale = FRAC_16_16(8, 1);
- ret = drm_atomic_helper_check_plane_state(state, crtc_state, &clip,
+ ret = drm_atomic_helper_check_plane_state(state, crtc_state,
min_scale, max_scale,
true, true);
if (ret)
@@ -536,7 +512,7 @@ static void mdp5_plane_atomic_async_update(struct drm_plane *plane,
if (plane_enabled(new_state)) {
struct mdp5_ctl *ctl;
struct mdp5_pipeline *pipeline =
- mdp5_crtc_get_pipeline(plane->crtc);
+ mdp5_crtc_get_pipeline(new_state->crtc);
int ret;
ret = mdp5_plane_mode_set(plane, new_state->crtc, new_state->fb,
@@ -545,7 +521,7 @@ static void mdp5_plane_atomic_async_update(struct drm_plane *plane,
ctl = mdp5_crtc_get_ctl(new_state->crtc);
- mdp5_ctl_commit(ctl, pipeline, mdp5_plane_get_flush(plane));
+ mdp5_ctl_commit(ctl, pipeline, mdp5_plane_get_flush(plane), true);
}
*to_mdp5_plane_state(plane->state) =
@@ -553,7 +529,7 @@ static void mdp5_plane_atomic_async_update(struct drm_plane *plane,
}
static const struct drm_plane_helper_funcs mdp5_plane_helper_funcs = {
- .prepare_fb = mdp5_plane_prepare_fb,
+ .prepare_fb = msm_atomic_prepare_fb,
.cleanup_fb = mdp5_plane_cleanup_fb,
.atomic_check = mdp5_plane_atomic_check,
.atomic_update = mdp5_plane_atomic_update,
@@ -1053,8 +1029,6 @@ static int mdp5_plane_mode_set(struct drm_plane *plane,
src_img_w, src_img_h,
src_x + src_w, src_y, src_w, src_h);
- plane->fb = fb;
-
return ret;
}
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_smp.c
index ae4983d9d0a5..96c2b828dba4 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.c
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_smp.c
@@ -340,17 +340,20 @@ void mdp5_smp_dump(struct mdp5_smp *smp, struct drm_printer *p)
struct mdp5_kms *mdp5_kms = get_kms(smp);
struct mdp5_hw_pipe_state *hwpstate;
struct mdp5_smp_state *state;
+ struct mdp5_global_state *global_state;
int total = 0, i, j;
drm_printf(p, "name\tinuse\tplane\n");
drm_printf(p, "----\t-----\t-----\n");
if (drm_can_sleep())
- drm_modeset_lock(&mdp5_kms->state_lock, NULL);
+ drm_modeset_lock(&mdp5_kms->glob_state_lock, NULL);
+
+ global_state = mdp5_get_existing_global_state(mdp5_kms);
/* grab these *after* we hold the state_lock */
- hwpstate = &mdp5_kms->state->hwpipe;
- state = &mdp5_kms->state->smp;
+ hwpstate = &global_state->hwpipe;
+ state = &global_state->smp;
for (i = 0; i < mdp5_kms->num_hwpipes; i++) {
struct mdp5_hw_pipe *hwpipe = mdp5_kms->hwpipes[i];
@@ -374,7 +377,7 @@ void mdp5_smp_dump(struct mdp5_smp *smp, struct drm_printer *p)
bitmap_weight(state->state, smp->blk_cnt));
if (drm_can_sleep())
- drm_modeset_unlock(&mdp5_kms->state_lock);
+ drm_modeset_unlock(&mdp5_kms->glob_state_lock);
}
void mdp5_smp_destroy(struct mdp5_smp *smp)
@@ -384,7 +387,8 @@ void mdp5_smp_destroy(struct mdp5_smp *smp)
struct mdp5_smp *mdp5_smp_init(struct mdp5_kms *mdp5_kms, const struct mdp5_smp_block *cfg)
{
- struct mdp5_smp_state *state = &mdp5_kms->state->smp;
+ struct mdp5_smp_state *state;
+ struct mdp5_global_state *global_state;
struct mdp5_smp *smp = NULL;
int ret;
@@ -398,6 +402,9 @@ struct mdp5_smp *mdp5_smp_init(struct mdp5_kms *mdp5_kms, const struct mdp5_smp_
smp->blk_cnt = cfg->mmb_count;
smp->blk_size = cfg->mmb_size;
+ global_state = mdp5_get_existing_global_state(mdp5_kms);
+ state = &global_state->smp;
+
/* statically tied MMBs cannot be re-allocated: */
bitmap_copy(state->state, cfg->reserved_state, smp->blk_cnt);
memcpy(smp->reserved, cfg->reserved, sizeof(smp->reserved));
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.h b/drivers/gpu/drm/msm/disp/mdp5/mdp5_smp.h
index b41d0448fbe8..b41d0448fbe8 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.h
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_smp.h
diff --git a/drivers/gpu/drm/msm/mdp/mdp_common.xml.h b/drivers/gpu/drm/msm/disp/mdp_common.xml.h
index 1494c407be44..d420c8044e23 100644
--- a/drivers/gpu/drm/msm/mdp/mdp_common.xml.h
+++ b/drivers/gpu/drm/msm/disp/mdp_common.xml.h
@@ -8,19 +8,19 @@ http://github.com/freedreno/envytools/
git clone https://github.com/freedreno/envytools.git
The rules-ng-ng source files this header was generated from are:
-- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 676 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20915 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 2849 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 37411 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 33004 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 602 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 41799 bytes, from 2017-06-16 12:32:42)
-- /home/robclark/src/freedreno/envytools/rnndb/edp/edp.xml ( 10416 bytes, from 2017-05-17 13:21:27)
-
-Copyright (C) 2013-2017 by the following authors:
+- /home/robclark/src/envytools/rnndb/msm.xml ( 676 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/mdp/mdp4.xml ( 20915 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/mdp/mdp_common.xml ( 2849 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/mdp/mdp5.xml ( 37411 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/dsi/dsi.xml ( 37239 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/dsi/sfpb.xml ( 602 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/hdmi/hdmi.xml ( 41799 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/edp/edp.xml ( 10416 bytes, from 2018-07-03 19:37:13)
+
+Copyright (C) 2013-2018 by the following authors:
- Rob Clark <robdclark@gmail.com> (robclark)
- Ilia Mirkin <imirkin@alum.mit.edu> (imirkin)
diff --git a/drivers/gpu/drm/msm/mdp/mdp_format.c b/drivers/gpu/drm/msm/disp/mdp_format.c
index b4a8aa4490ee..005760bee708 100644
--- a/drivers/gpu/drm/msm/mdp/mdp_format.c
+++ b/drivers/gpu/drm/msm/disp/mdp_format.c
@@ -171,7 +171,8 @@ uint32_t mdp_get_formats(uint32_t *pixel_formats, uint32_t max_formats,
return i;
}
-const struct msm_format *mdp_get_format(struct msm_kms *kms, uint32_t format)
+const struct msm_format *mdp_get_format(struct msm_kms *kms, uint32_t format,
+ uint64_t modifier)
{
int i;
for (i = 0; i < ARRAY_SIZE(formats); i++) {
diff --git a/drivers/gpu/drm/msm/mdp/mdp_kms.c b/drivers/gpu/drm/msm/disp/mdp_kms.c
index 64287304054d..64287304054d 100644
--- a/drivers/gpu/drm/msm/mdp/mdp_kms.c
+++ b/drivers/gpu/drm/msm/disp/mdp_kms.c
diff --git a/drivers/gpu/drm/msm/mdp/mdp_kms.h b/drivers/gpu/drm/msm/disp/mdp_kms.h
index 1185487e7e5e..4fa8dbe4e165 100644
--- a/drivers/gpu/drm/msm/mdp/mdp_kms.h
+++ b/drivers/gpu/drm/msm/disp/mdp_kms.h
@@ -98,7 +98,7 @@ struct mdp_format {
#define MDP_FORMAT_IS_YUV(mdp_format) ((mdp_format)->is_yuv)
uint32_t mdp_get_formats(uint32_t *formats, uint32_t max_formats, bool rgb_only);
-const struct msm_format *mdp_get_format(struct msm_kms *kms, uint32_t format);
+const struct msm_format *mdp_get_format(struct msm_kms *kms, uint32_t format, uint64_t modifier);
/* MDP capabilities */
#define MDP_CAP_SMP BIT(0) /* Shared Memory Pool */
diff --git a/drivers/gpu/drm/msm/dsi/dsi.c b/drivers/gpu/drm/msm/dsi/dsi.c
index 98742d7af6dc..ff8164cc6738 100644
--- a/drivers/gpu/drm/msm/dsi/dsi.c
+++ b/drivers/gpu/drm/msm/dsi/dsi.c
@@ -192,13 +192,14 @@ void __exit msm_dsi_unregister(void)
int msm_dsi_modeset_init(struct msm_dsi *msm_dsi, struct drm_device *dev,
struct drm_encoder *encoder)
{
- struct msm_drm_private *priv = dev->dev_private;
+ struct msm_drm_private *priv;
struct drm_bridge *ext_bridge;
int ret;
- if (WARN_ON(!encoder))
+ if (WARN_ON(!encoder) || WARN_ON(!msm_dsi) || WARN_ON(!dev))
return -EINVAL;
+ priv = dev->dev_private;
msm_dsi->dev = dev;
ret = msm_dsi_host_modeset_init(msm_dsi->host, dev);
@@ -207,6 +208,9 @@ int msm_dsi_modeset_init(struct msm_dsi *msm_dsi, struct drm_device *dev,
goto fail;
}
+ if (!msm_dsi_manager_validate_current_config(msm_dsi->id))
+ goto fail;
+
msm_dsi->encoder = encoder;
msm_dsi->bridge = msm_dsi_manager_bridge_init(msm_dsi->id);
@@ -245,19 +249,17 @@ int msm_dsi_modeset_init(struct msm_dsi *msm_dsi, struct drm_device *dev,
return 0;
fail:
- if (msm_dsi) {
- /* bridge/connector are normally destroyed by drm: */
- if (msm_dsi->bridge) {
- msm_dsi_manager_bridge_destroy(msm_dsi->bridge);
- msm_dsi->bridge = NULL;
- }
+ /* bridge/connector are normally destroyed by drm: */
+ if (msm_dsi->bridge) {
+ msm_dsi_manager_bridge_destroy(msm_dsi->bridge);
+ msm_dsi->bridge = NULL;
+ }
- /* don't destroy connector if we didn't make it */
- if (msm_dsi->connector && !msm_dsi->external_bridge)
- msm_dsi->connector->funcs->destroy(msm_dsi->connector);
+ /* don't destroy connector if we didn't make it */
+ if (msm_dsi->connector && !msm_dsi->external_bridge)
+ msm_dsi->connector->funcs->destroy(msm_dsi->connector);
- msm_dsi->connector = NULL;
- }
+ msm_dsi->connector = NULL;
return ret;
}
diff --git a/drivers/gpu/drm/msm/dsi/dsi.h b/drivers/gpu/drm/msm/dsi/dsi.h
index 2302046197a8..08f3fc6771b7 100644
--- a/drivers/gpu/drm/msm/dsi/dsi.h
+++ b/drivers/gpu/drm/msm/dsi/dsi.h
@@ -36,6 +36,7 @@ enum msm_dsi_phy_type {
MSM_DSI_PHY_20NM,
MSM_DSI_PHY_28NM_8960,
MSM_DSI_PHY_14NM,
+ MSM_DSI_PHY_10NM,
MSM_DSI_PHY_MAX
};
@@ -99,6 +100,7 @@ bool msm_dsi_manager_cmd_xfer_trigger(int id, u32 dma_base, u32 len);
void msm_dsi_manager_attach_dsi_device(int id, u32 device_flags);
int msm_dsi_manager_register(struct msm_dsi *msm_dsi);
void msm_dsi_manager_unregister(struct msm_dsi *msm_dsi);
+bool msm_dsi_manager_validate_current_config(u8 id);
/* msm dsi */
static inline bool msm_dsi_device_connected(struct msm_dsi *msm_dsi)
@@ -148,6 +150,7 @@ static inline int msm_dsi_pll_set_usecase(struct msm_dsi_pll *pll,
#endif
/* dsi host */
+struct msm_dsi_host;
int msm_dsi_host_xfer_prepare(struct mipi_dsi_host *host,
const struct mipi_dsi_msg *msg);
void msm_dsi_host_xfer_restore(struct mipi_dsi_host *host,
@@ -161,7 +164,8 @@ void msm_dsi_host_cmd_xfer_commit(struct mipi_dsi_host *host,
int msm_dsi_host_enable(struct mipi_dsi_host *host);
int msm_dsi_host_disable(struct mipi_dsi_host *host);
int msm_dsi_host_power_on(struct mipi_dsi_host *host,
- struct msm_dsi_phy_shared_timings *phy_shared_timings);
+ struct msm_dsi_phy_shared_timings *phy_shared_timings,
+ bool is_dual_dsi);
int msm_dsi_host_power_off(struct mipi_dsi_host *host);
int msm_dsi_host_set_display_mode(struct mipi_dsi_host *host,
struct drm_display_mode *mode);
@@ -174,13 +178,29 @@ int msm_dsi_host_set_src_pll(struct mipi_dsi_host *host,
struct msm_dsi_pll *src_pll);
void msm_dsi_host_reset_phy(struct mipi_dsi_host *host);
void msm_dsi_host_get_phy_clk_req(struct mipi_dsi_host *host,
- struct msm_dsi_phy_clk_request *clk_req);
+ struct msm_dsi_phy_clk_request *clk_req,
+ bool is_dual_dsi);
void msm_dsi_host_destroy(struct mipi_dsi_host *host);
int msm_dsi_host_modeset_init(struct mipi_dsi_host *host,
struct drm_device *dev);
int msm_dsi_host_init(struct msm_dsi *msm_dsi);
int msm_dsi_runtime_suspend(struct device *dev);
int msm_dsi_runtime_resume(struct device *dev);
+int dsi_link_clk_enable_6g(struct msm_dsi_host *msm_host);
+int dsi_link_clk_enable_v2(struct msm_dsi_host *msm_host);
+void dsi_link_clk_disable_6g(struct msm_dsi_host *msm_host);
+void dsi_link_clk_disable_v2(struct msm_dsi_host *msm_host);
+int dsi_tx_buf_alloc_6g(struct msm_dsi_host *msm_host, int size);
+int dsi_tx_buf_alloc_v2(struct msm_dsi_host *msm_host, int size);
+void *dsi_tx_buf_get_6g(struct msm_dsi_host *msm_host);
+void *dsi_tx_buf_get_v2(struct msm_dsi_host *msm_host);
+void dsi_tx_buf_put_6g(struct msm_dsi_host *msm_host);
+int dsi_dma_base_get_6g(struct msm_dsi_host *msm_host, uint64_t *iova);
+int dsi_dma_base_get_v2(struct msm_dsi_host *msm_host, uint64_t *iova);
+int dsi_clk_init_v2(struct msm_dsi_host *msm_host);
+int dsi_clk_init_6g_v2(struct msm_dsi_host *msm_host);
+int dsi_calc_clk_rate_v2(struct msm_dsi_host *msm_host, bool is_dual_dsi);
+int dsi_calc_clk_rate_6g(struct msm_dsi_host *msm_host, bool is_dual_dsi);
/* dsi phy */
struct msm_dsi_phy;
diff --git a/drivers/gpu/drm/msm/dsi/dsi.xml.h b/drivers/gpu/drm/msm/dsi/dsi.xml.h
index 479086ccf180..21f489a737d7 100644
--- a/drivers/gpu/drm/msm/dsi/dsi.xml.h
+++ b/drivers/gpu/drm/msm/dsi/dsi.xml.h
@@ -8,19 +8,19 @@ http://github.com/freedreno/envytools/
git clone https://github.com/freedreno/envytools.git
The rules-ng-ng source files this header was generated from are:
-- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 676 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20915 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 2849 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 37411 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 33004 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 602 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 41799 bytes, from 2017-06-16 12:32:42)
-- /home/robclark/src/freedreno/envytools/rnndb/edp/edp.xml ( 10416 bytes, from 2017-05-17 13:21:27)
-
-Copyright (C) 2013-2017 by the following authors:
+- /home/robclark/src/envytools/rnndb/msm.xml ( 676 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/mdp/mdp4.xml ( 20915 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/mdp/mdp_common.xml ( 2849 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/mdp/mdp5.xml ( 37411 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/dsi/dsi.xml ( 37239 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/dsi/sfpb.xml ( 602 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/hdmi/hdmi.xml ( 41799 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/edp/edp.xml ( 10416 bytes, from 2018-07-03 19:37:13)
+
+Copyright (C) 2013-2018 by the following authors:
- Rob Clark <robdclark@gmail.com> (robclark)
- Ilia Mirkin <imirkin@alum.mit.edu> (imirkin)
@@ -1556,5 +1556,175 @@ static inline uint32_t REG_DSI_14nm_PHY_LN_VREG_CNTRL(uint32_t i0) { return 0x00
#define REG_DSI_14nm_PHY_PLL_PLL_BANDGAP 0x00000108
+#define REG_DSI_10nm_PHY_CMN_REVISION_ID0 0x00000000
+
+#define REG_DSI_10nm_PHY_CMN_REVISION_ID1 0x00000004
+
+#define REG_DSI_10nm_PHY_CMN_REVISION_ID2 0x00000008
+
+#define REG_DSI_10nm_PHY_CMN_REVISION_ID3 0x0000000c
+
+#define REG_DSI_10nm_PHY_CMN_CLK_CFG0 0x00000010
+
+#define REG_DSI_10nm_PHY_CMN_CLK_CFG1 0x00000014
+
+#define REG_DSI_10nm_PHY_CMN_GLBL_CTRL 0x00000018
+
+#define REG_DSI_10nm_PHY_CMN_RBUF_CTRL 0x0000001c
+
+#define REG_DSI_10nm_PHY_CMN_VREG_CTRL 0x00000020
+
+#define REG_DSI_10nm_PHY_CMN_CTRL_0 0x00000024
+
+#define REG_DSI_10nm_PHY_CMN_CTRL_1 0x00000028
+
+#define REG_DSI_10nm_PHY_CMN_CTRL_2 0x0000002c
+
+#define REG_DSI_10nm_PHY_CMN_LANE_CFG0 0x00000030
+
+#define REG_DSI_10nm_PHY_CMN_LANE_CFG1 0x00000034
+
+#define REG_DSI_10nm_PHY_CMN_PLL_CNTRL 0x00000038
+
+#define REG_DSI_10nm_PHY_CMN_LANE_CTRL0 0x00000098
+
+#define REG_DSI_10nm_PHY_CMN_LANE_CTRL1 0x0000009c
+
+#define REG_DSI_10nm_PHY_CMN_LANE_CTRL2 0x000000a0
+
+#define REG_DSI_10nm_PHY_CMN_LANE_CTRL3 0x000000a4
+
+#define REG_DSI_10nm_PHY_CMN_LANE_CTRL4 0x000000a8
+
+#define REG_DSI_10nm_PHY_CMN_TIMING_CTRL_0 0x000000ac
+
+#define REG_DSI_10nm_PHY_CMN_TIMING_CTRL_1 0x000000b0
+
+#define REG_DSI_10nm_PHY_CMN_TIMING_CTRL_2 0x000000b4
+
+#define REG_DSI_10nm_PHY_CMN_TIMING_CTRL_3 0x000000b8
+
+#define REG_DSI_10nm_PHY_CMN_TIMING_CTRL_4 0x000000bc
+
+#define REG_DSI_10nm_PHY_CMN_TIMING_CTRL_5 0x000000c0
+
+#define REG_DSI_10nm_PHY_CMN_TIMING_CTRL_6 0x000000c4
+
+#define REG_DSI_10nm_PHY_CMN_TIMING_CTRL_7 0x000000c8
+
+#define REG_DSI_10nm_PHY_CMN_TIMING_CTRL_8 0x000000cc
+
+#define REG_DSI_10nm_PHY_CMN_TIMING_CTRL_9 0x000000d0
+
+#define REG_DSI_10nm_PHY_CMN_TIMING_CTRL_10 0x000000d4
+
+#define REG_DSI_10nm_PHY_CMN_TIMING_CTRL_11 0x000000d8
+
+#define REG_DSI_10nm_PHY_CMN_PHY_STATUS 0x000000ec
+
+#define REG_DSI_10nm_PHY_CMN_LANE_STATUS0 0x000000f4
+
+#define REG_DSI_10nm_PHY_CMN_LANE_STATUS1 0x000000f8
+
+static inline uint32_t REG_DSI_10nm_PHY_LN(uint32_t i0) { return 0x00000000 + 0x80*i0; }
+
+static inline uint32_t REG_DSI_10nm_PHY_LN_CFG0(uint32_t i0) { return 0x00000000 + 0x80*i0; }
+
+static inline uint32_t REG_DSI_10nm_PHY_LN_CFG1(uint32_t i0) { return 0x00000004 + 0x80*i0; }
+
+static inline uint32_t REG_DSI_10nm_PHY_LN_CFG2(uint32_t i0) { return 0x00000008 + 0x80*i0; }
+
+static inline uint32_t REG_DSI_10nm_PHY_LN_CFG3(uint32_t i0) { return 0x0000000c + 0x80*i0; }
+
+static inline uint32_t REG_DSI_10nm_PHY_LN_TEST_DATAPATH(uint32_t i0) { return 0x00000010 + 0x80*i0; }
+
+static inline uint32_t REG_DSI_10nm_PHY_LN_PIN_SWAP(uint32_t i0) { return 0x00000014 + 0x80*i0; }
+
+static inline uint32_t REG_DSI_10nm_PHY_LN_HSTX_STR_CTRL(uint32_t i0) { return 0x00000018 + 0x80*i0; }
+
+static inline uint32_t REG_DSI_10nm_PHY_LN_OFFSET_TOP_CTRL(uint32_t i0) { return 0x0000001c + 0x80*i0; }
+
+static inline uint32_t REG_DSI_10nm_PHY_LN_OFFSET_BOT_CTRL(uint32_t i0) { return 0x00000020 + 0x80*i0; }
+
+static inline uint32_t REG_DSI_10nm_PHY_LN_LPTX_STR_CTRL(uint32_t i0) { return 0x00000024 + 0x80*i0; }
+
+static inline uint32_t REG_DSI_10nm_PHY_LN_LPRX_CTRL(uint32_t i0) { return 0x00000028 + 0x80*i0; }
+
+static inline uint32_t REG_DSI_10nm_PHY_LN_TX_DCTRL(uint32_t i0) { return 0x0000002c + 0x80*i0; }
+
+#define REG_DSI_10nm_PHY_PLL_ANALOG_CONTROLS_ONE 0x00000000
+
+#define REG_DSI_10nm_PHY_PLL_ANALOG_CONTROLS_TWO 0x00000004
+
+#define REG_DSI_10nm_PHY_PLL_ANALOG_CONTROLS_THREE 0x00000010
+
+#define REG_DSI_10nm_PHY_PLL_DSM_DIVIDER 0x0000001c
+
+#define REG_DSI_10nm_PHY_PLL_FEEDBACK_DIVIDER 0x00000020
+
+#define REG_DSI_10nm_PHY_PLL_SYSTEM_MUXES 0x00000024
+
+#define REG_DSI_10nm_PHY_PLL_CMODE 0x0000002c
+
+#define REG_DSI_10nm_PHY_PLL_CALIBRATION_SETTINGS 0x00000030
+
+#define REG_DSI_10nm_PHY_PLL_BAND_SEL_CAL_SETTINGS_THREE 0x00000054
+
+#define REG_DSI_10nm_PHY_PLL_FREQ_DETECT_SETTINGS_ONE 0x00000064
+
+#define REG_DSI_10nm_PHY_PLL_PFILT 0x0000007c
+
+#define REG_DSI_10nm_PHY_PLL_IFILT 0x00000080
+
+#define REG_DSI_10nm_PHY_PLL_OUTDIV 0x00000094
+
+#define REG_DSI_10nm_PHY_PLL_CORE_OVERRIDE 0x000000a4
+
+#define REG_DSI_10nm_PHY_PLL_CORE_INPUT_OVERRIDE 0x000000a8
+
+#define REG_DSI_10nm_PHY_PLL_PLL_DIGITAL_TIMERS_TWO 0x000000b4
+
+#define REG_DSI_10nm_PHY_PLL_DECIMAL_DIV_START_1 0x000000cc
+
+#define REG_DSI_10nm_PHY_PLL_FRAC_DIV_START_LOW_1 0x000000d0
+
+#define REG_DSI_10nm_PHY_PLL_FRAC_DIV_START_MID_1 0x000000d4
+
+#define REG_DSI_10nm_PHY_PLL_FRAC_DIV_START_HIGH_1 0x000000d8
+
+#define REG_DSI_10nm_PHY_PLL_SSC_STEPSIZE_LOW_1 0x0000010c
+
+#define REG_DSI_10nm_PHY_PLL_SSC_STEPSIZE_HIGH_1 0x00000110
+
+#define REG_DSI_10nm_PHY_PLL_SSC_DIV_PER_LOW_1 0x00000114
+
+#define REG_DSI_10nm_PHY_PLL_SSC_DIV_PER_HIGH_1 0x00000118
+
+#define REG_DSI_10nm_PHY_PLL_SSC_DIV_ADJPER_LOW_1 0x0000011c
+
+#define REG_DSI_10nm_PHY_PLL_SSC_DIV_ADJPER_HIGH_1 0x00000120
+
+#define REG_DSI_10nm_PHY_PLL_SSC_CONTROL 0x0000013c
+
+#define REG_DSI_10nm_PHY_PLL_PLL_OUTDIV_RATE 0x00000140
+
+#define REG_DSI_10nm_PHY_PLL_PLL_LOCKDET_RATE_1 0x00000144
+
+#define REG_DSI_10nm_PHY_PLL_PLL_PROP_GAIN_RATE_1 0x0000014c
+
+#define REG_DSI_10nm_PHY_PLL_PLL_BAND_SET_RATE_1 0x00000154
+
+#define REG_DSI_10nm_PHY_PLL_PLL_INT_GAIN_IFILT_BAND_1 0x0000015c
+
+#define REG_DSI_10nm_PHY_PLL_PLL_FL_INT_GAIN_PFILT_BAND_1 0x00000164
+
+#define REG_DSI_10nm_PHY_PLL_PLL_LOCK_OVERRIDE 0x00000180
+
+#define REG_DSI_10nm_PHY_PLL_PLL_LOCK_DELAY 0x00000184
+
+#define REG_DSI_10nm_PHY_PLL_CLOCK_INVERTERS 0x0000018c
+
+#define REG_DSI_10nm_PHY_PLL_COMMON_STATUS_ONE 0x000001a0
+
#endif /* DSI_XML */
diff --git a/drivers/gpu/drm/msm/dsi/dsi_cfg.c b/drivers/gpu/drm/msm/dsi/dsi_cfg.c
index 65c1dfbbe019..dcdfb1bb54f9 100644
--- a/drivers/gpu/drm/msm/dsi/dsi_cfg.c
+++ b/drivers/gpu/drm/msm/dsi/dsi_cfg.c
@@ -118,19 +118,76 @@ static const struct msm_dsi_config msm8996_dsi_cfg = {
.num_dsi = 2,
};
+static const char * const dsi_sdm845_bus_clk_names[] = {
+ "iface", "bus",
+};
+
+static const struct msm_dsi_config sdm845_dsi_cfg = {
+ .io_offset = DSI_6G_REG_SHIFT,
+ .reg_cfg = {
+ .num = 1,
+ .regs = {
+ {"vdda", 21800, 4 }, /* 1.2 V */
+ },
+ },
+ .bus_clk_names = dsi_sdm845_bus_clk_names,
+ .num_bus_clks = ARRAY_SIZE(dsi_sdm845_bus_clk_names),
+ .io_start = { 0xae94000, 0xae96000 },
+ .num_dsi = 2,
+};
+
+const static struct msm_dsi_host_cfg_ops msm_dsi_v2_host_ops = {
+ .link_clk_enable = dsi_link_clk_enable_v2,
+ .link_clk_disable = dsi_link_clk_disable_v2,
+ .clk_init_ver = dsi_clk_init_v2,
+ .tx_buf_alloc = dsi_tx_buf_alloc_v2,
+ .tx_buf_get = dsi_tx_buf_get_v2,
+ .tx_buf_put = NULL,
+ .dma_base_get = dsi_dma_base_get_v2,
+ .calc_clk_rate = dsi_calc_clk_rate_v2,
+};
+
+const static struct msm_dsi_host_cfg_ops msm_dsi_6g_host_ops = {
+ .link_clk_enable = dsi_link_clk_enable_6g,
+ .link_clk_disable = dsi_link_clk_disable_6g,
+ .clk_init_ver = NULL,
+ .tx_buf_alloc = dsi_tx_buf_alloc_6g,
+ .tx_buf_get = dsi_tx_buf_get_6g,
+ .tx_buf_put = dsi_tx_buf_put_6g,
+ .dma_base_get = dsi_dma_base_get_6g,
+ .calc_clk_rate = dsi_calc_clk_rate_6g,
+};
+
+const static struct msm_dsi_host_cfg_ops msm_dsi_6g_v2_host_ops = {
+ .link_clk_enable = dsi_link_clk_enable_6g,
+ .link_clk_disable = dsi_link_clk_disable_6g,
+ .clk_init_ver = dsi_clk_init_6g_v2,
+ .tx_buf_alloc = dsi_tx_buf_alloc_6g,
+ .tx_buf_get = dsi_tx_buf_get_6g,
+ .tx_buf_put = dsi_tx_buf_put_6g,
+ .dma_base_get = dsi_dma_base_get_6g,
+ .calc_clk_rate = dsi_calc_clk_rate_6g,
+};
+
static const struct msm_dsi_cfg_handler dsi_cfg_handlers[] = {
- {MSM_DSI_VER_MAJOR_V2, MSM_DSI_V2_VER_MINOR_8064, &apq8064_dsi_cfg},
+ {MSM_DSI_VER_MAJOR_V2, MSM_DSI_V2_VER_MINOR_8064,
+ &apq8064_dsi_cfg, &msm_dsi_v2_host_ops},
{MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V1_0,
- &msm8974_apq8084_dsi_cfg},
+ &msm8974_apq8084_dsi_cfg, &msm_dsi_6g_host_ops},
{MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V1_1,
- &msm8974_apq8084_dsi_cfg},
+ &msm8974_apq8084_dsi_cfg, &msm_dsi_6g_host_ops},
{MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V1_1_1,
- &msm8974_apq8084_dsi_cfg},
+ &msm8974_apq8084_dsi_cfg, &msm_dsi_6g_host_ops},
{MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V1_2,
- &msm8974_apq8084_dsi_cfg},
- {MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V1_3, &msm8994_dsi_cfg},
- {MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V1_3_1, &msm8916_dsi_cfg},
- {MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V1_4_1, &msm8996_dsi_cfg},
+ &msm8974_apq8084_dsi_cfg, &msm_dsi_6g_host_ops},
+ {MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V1_3,
+ &msm8994_dsi_cfg, &msm_dsi_6g_host_ops},
+ {MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V1_3_1,
+ &msm8916_dsi_cfg, &msm_dsi_6g_host_ops},
+ {MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V1_4_1,
+ &msm8996_dsi_cfg, &msm_dsi_6g_host_ops},
+ {MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V2_2_1,
+ &sdm845_dsi_cfg, &msm_dsi_6g_v2_host_ops},
};
const struct msm_dsi_cfg_handler *msm_dsi_cfg_get(u32 major, u32 minor)
diff --git a/drivers/gpu/drm/msm/dsi/dsi_cfg.h b/drivers/gpu/drm/msm/dsi/dsi_cfg.h
index 00a5da2663c6..16c507911110 100644
--- a/drivers/gpu/drm/msm/dsi/dsi_cfg.h
+++ b/drivers/gpu/drm/msm/dsi/dsi_cfg.h
@@ -25,6 +25,7 @@
#define MSM_DSI_6G_VER_MINOR_V1_3 0x10030000
#define MSM_DSI_6G_VER_MINOR_V1_3_1 0x10030001
#define MSM_DSI_6G_VER_MINOR_V1_4_1 0x10040001
+#define MSM_DSI_6G_VER_MINOR_V2_2_1 0x20020001
#define MSM_DSI_V2_VER_MINOR_8064 0x0
@@ -39,10 +40,22 @@ struct msm_dsi_config {
const int num_dsi;
};
+struct msm_dsi_host_cfg_ops {
+ int (*link_clk_enable)(struct msm_dsi_host *msm_host);
+ void (*link_clk_disable)(struct msm_dsi_host *msm_host);
+ int (*clk_init_ver)(struct msm_dsi_host *msm_host);
+ int (*tx_buf_alloc)(struct msm_dsi_host *msm_host, int size);
+ void* (*tx_buf_get)(struct msm_dsi_host *msm_host);
+ void (*tx_buf_put)(struct msm_dsi_host *msm_host);
+ int (*dma_base_get)(struct msm_dsi_host *msm_host, uint64_t *iova);
+ int (*calc_clk_rate)(struct msm_dsi_host *msm_host, bool is_dual_dsi);
+};
+
struct msm_dsi_cfg_handler {
u32 major;
u32 minor;
const struct msm_dsi_config *cfg;
+ const struct msm_dsi_host_cfg_ops *ops;
};
const struct msm_dsi_cfg_handler *msm_dsi_cfg_get(u32 major, u32 minor);
diff --git a/drivers/gpu/drm/msm/dsi/dsi_host.c b/drivers/gpu/drm/msm/dsi/dsi_host.c
index 0f7324a686ca..96fb5f635314 100644
--- a/drivers/gpu/drm/msm/dsi/dsi_host.c
+++ b/drivers/gpu/drm/msm/dsi/dsi_host.c
@@ -115,8 +115,10 @@ struct msm_dsi_host {
struct clk *pixel_clk;
struct clk *byte_clk_src;
struct clk *pixel_clk_src;
+ struct clk *byte_intf_clk;
u32 byte_clk_rate;
+ u32 pixel_clk_rate;
u32 esc_clk_rate;
/* DSI v2 specific clocks */
@@ -172,6 +174,7 @@ struct msm_dsi_host {
bool registered;
bool power_on;
+ bool enabled;
int irq;
};
@@ -214,7 +217,7 @@ static const struct msm_dsi_cfg_handler *dsi_get_config(
goto exit;
}
- ahb_clk = clk_get(dev, "iface_clk");
+ ahb_clk = msm_clk_get(msm_host->pdev, "iface");
if (IS_ERR(ahb_clk)) {
pr_err("%s: cannot get interface clock\n", __func__);
goto put_gdsc;
@@ -225,7 +228,7 @@ static const struct msm_dsi_cfg_handler *dsi_get_config(
ret = regulator_enable(gdsc_reg);
if (ret) {
pr_err("%s: unable to enable gdsc\n", __func__);
- goto put_clk;
+ goto put_gdsc;
}
ret = clk_prepare_enable(ahb_clk);
@@ -249,8 +252,6 @@ disable_clks:
disable_gdsc:
regulator_disable(gdsc_reg);
pm_runtime_put_sync(dev);
-put_clk:
- clk_put(ahb_clk);
put_gdsc:
regulator_put(gdsc_reg);
exit:
@@ -332,6 +333,54 @@ static int dsi_regulator_init(struct msm_dsi_host *msm_host)
return 0;
}
+int dsi_clk_init_v2(struct msm_dsi_host *msm_host)
+{
+ struct platform_device *pdev = msm_host->pdev;
+ int ret = 0;
+
+ msm_host->src_clk = msm_clk_get(pdev, "src");
+
+ if (IS_ERR(msm_host->src_clk)) {
+ ret = PTR_ERR(msm_host->src_clk);
+ pr_err("%s: can't find src clock. ret=%d\n",
+ __func__, ret);
+ msm_host->src_clk = NULL;
+ return ret;
+ }
+
+ msm_host->esc_clk_src = clk_get_parent(msm_host->esc_clk);
+ if (!msm_host->esc_clk_src) {
+ ret = -ENODEV;
+ pr_err("%s: can't get esc clock parent. ret=%d\n",
+ __func__, ret);
+ return ret;
+ }
+
+ msm_host->dsi_clk_src = clk_get_parent(msm_host->src_clk);
+ if (!msm_host->dsi_clk_src) {
+ ret = -ENODEV;
+ pr_err("%s: can't get src clock parent. ret=%d\n",
+ __func__, ret);
+ }
+
+ return ret;
+}
+
+int dsi_clk_init_6g_v2(struct msm_dsi_host *msm_host)
+{
+ struct platform_device *pdev = msm_host->pdev;
+ int ret = 0;
+
+ msm_host->byte_intf_clk = msm_clk_get(pdev, "byte_intf");
+ if (IS_ERR(msm_host->byte_intf_clk)) {
+ ret = PTR_ERR(msm_host->byte_intf_clk);
+ pr_err("%s: can't find byte_intf clock. ret=%d\n",
+ __func__, ret);
+ }
+
+ return ret;
+}
+
static int dsi_clk_init(struct msm_dsi_host *msm_host)
{
struct platform_device *pdev = msm_host->pdev;
@@ -393,31 +442,8 @@ static int dsi_clk_init(struct msm_dsi_host *msm_host)
goto exit;
}
- if (cfg_hnd->major == MSM_DSI_VER_MAJOR_V2) {
- msm_host->src_clk = msm_clk_get(pdev, "src");
- if (IS_ERR(msm_host->src_clk)) {
- ret = PTR_ERR(msm_host->src_clk);
- pr_err("%s: can't find src clock. ret=%d\n",
- __func__, ret);
- msm_host->src_clk = NULL;
- goto exit;
- }
-
- msm_host->esc_clk_src = clk_get_parent(msm_host->esc_clk);
- if (!msm_host->esc_clk_src) {
- ret = -ENODEV;
- pr_err("%s: can't get esc clock parent. ret=%d\n",
- __func__, ret);
- goto exit;
- }
-
- msm_host->dsi_clk_src = clk_get_parent(msm_host->src_clk);
- if (!msm_host->dsi_clk_src) {
- ret = -ENODEV;
- pr_err("%s: can't get src clock parent. ret=%d\n",
- __func__, ret);
- }
- }
+ if (cfg_hnd->ops->clk_init_ver)
+ ret = cfg_hnd->ops->clk_init_ver(msm_host);
exit:
return ret;
}
@@ -485,7 +511,7 @@ int msm_dsi_runtime_resume(struct device *dev)
return dsi_bus_clk_enable(msm_host);
}
-static int dsi_link_clk_enable_6g(struct msm_dsi_host *msm_host)
+int dsi_link_clk_enable_6g(struct msm_dsi_host *msm_host)
{
int ret;
@@ -498,12 +524,22 @@ static int dsi_link_clk_enable_6g(struct msm_dsi_host *msm_host)
goto error;
}
- ret = clk_set_rate(msm_host->pixel_clk, msm_host->mode->clock * 1000);
+ ret = clk_set_rate(msm_host->pixel_clk, msm_host->pixel_clk_rate);
if (ret) {
pr_err("%s: Failed to set rate pixel clk, %d\n", __func__, ret);
goto error;
}
+ if (msm_host->byte_intf_clk) {
+ ret = clk_set_rate(msm_host->byte_intf_clk,
+ msm_host->byte_clk_rate / 2);
+ if (ret) {
+ pr_err("%s: Failed to set rate byte intf clk, %d\n",
+ __func__, ret);
+ goto error;
+ }
+ }
+
ret = clk_prepare_enable(msm_host->esc_clk);
if (ret) {
pr_err("%s: Failed to enable dsi esc clk\n", __func__);
@@ -522,8 +558,19 @@ static int dsi_link_clk_enable_6g(struct msm_dsi_host *msm_host)
goto pixel_clk_err;
}
+ if (msm_host->byte_intf_clk) {
+ ret = clk_prepare_enable(msm_host->byte_intf_clk);
+ if (ret) {
+ pr_err("%s: Failed to enable byte intf clk\n",
+ __func__);
+ goto byte_intf_clk_err;
+ }
+ }
+
return 0;
+byte_intf_clk_err:
+ clk_disable_unprepare(msm_host->pixel_clk);
pixel_clk_err:
clk_disable_unprepare(msm_host->byte_clk);
byte_clk_err:
@@ -532,7 +579,7 @@ error:
return ret;
}
-static int dsi_link_clk_enable_v2(struct msm_dsi_host *msm_host)
+int dsi_link_clk_enable_v2(struct msm_dsi_host *msm_host)
{
int ret;
@@ -558,7 +605,7 @@ static int dsi_link_clk_enable_v2(struct msm_dsi_host *msm_host)
goto error;
}
- ret = clk_set_rate(msm_host->pixel_clk, msm_host->mode->clock * 1000);
+ ret = clk_set_rate(msm_host->pixel_clk, msm_host->pixel_clk_rate);
if (ret) {
pr_err("%s: Failed to set rate pixel clk, %d\n", __func__, ret);
goto error;
@@ -600,96 +647,121 @@ error:
return ret;
}
-static int dsi_link_clk_enable(struct msm_dsi_host *msm_host)
+void dsi_link_clk_disable_6g(struct msm_dsi_host *msm_host)
{
- const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd;
+ clk_disable_unprepare(msm_host->esc_clk);
+ clk_disable_unprepare(msm_host->pixel_clk);
+ if (msm_host->byte_intf_clk)
+ clk_disable_unprepare(msm_host->byte_intf_clk);
+ clk_disable_unprepare(msm_host->byte_clk);
+}
- if (cfg_hnd->major == MSM_DSI_VER_MAJOR_6G)
- return dsi_link_clk_enable_6g(msm_host);
- else
- return dsi_link_clk_enable_v2(msm_host);
+void dsi_link_clk_disable_v2(struct msm_dsi_host *msm_host)
+{
+ clk_disable_unprepare(msm_host->pixel_clk);
+ clk_disable_unprepare(msm_host->src_clk);
+ clk_disable_unprepare(msm_host->esc_clk);
+ clk_disable_unprepare(msm_host->byte_clk);
}
-static void dsi_link_clk_disable(struct msm_dsi_host *msm_host)
+static u32 dsi_get_pclk_rate(struct msm_dsi_host *msm_host, bool is_dual_dsi)
{
- const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd;
+ struct drm_display_mode *mode = msm_host->mode;
+ u32 pclk_rate;
- if (cfg_hnd->major == MSM_DSI_VER_MAJOR_6G) {
- clk_disable_unprepare(msm_host->esc_clk);
- clk_disable_unprepare(msm_host->pixel_clk);
- clk_disable_unprepare(msm_host->byte_clk);
- } else {
- clk_disable_unprepare(msm_host->pixel_clk);
- clk_disable_unprepare(msm_host->src_clk);
- clk_disable_unprepare(msm_host->esc_clk);
- clk_disable_unprepare(msm_host->byte_clk);
- }
+ pclk_rate = mode->clock * 1000;
+
+ /*
+ * For dual DSI mode, the current DRM mode has the complete width of the
+ * panel. Since, the complete panel is driven by two DSI controllers,
+ * the clock rates have to be split between the two dsi controllers.
+ * Adjust the byte and pixel clock rates for each dsi host accordingly.
+ */
+ if (is_dual_dsi)
+ pclk_rate /= 2;
+
+ return pclk_rate;
}
-static int dsi_calc_clk_rate(struct msm_dsi_host *msm_host)
+static void dsi_calc_pclk(struct msm_dsi_host *msm_host, bool is_dual_dsi)
{
- struct drm_display_mode *mode = msm_host->mode;
- const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd;
u8 lanes = msm_host->lanes;
u32 bpp = dsi_get_bpp(msm_host->format);
- u32 pclk_rate;
-
- if (!mode) {
- pr_err("%s: mode not set\n", __func__);
- return -EINVAL;
- }
+ u32 pclk_rate = dsi_get_pclk_rate(msm_host, is_dual_dsi);
+ u64 pclk_bpp = (u64)pclk_rate * bpp;
- pclk_rate = mode->clock * 1000;
- if (lanes > 0) {
- msm_host->byte_clk_rate = (pclk_rate * bpp) / (8 * lanes);
- } else {
+ if (lanes == 0) {
pr_err("%s: forcing mdss_dsi lanes to 1\n", __func__);
- msm_host->byte_clk_rate = (pclk_rate * bpp) / 8;
+ lanes = 1;
}
- DBG("pclk=%d, bclk=%d", pclk_rate, msm_host->byte_clk_rate);
+ do_div(pclk_bpp, (8 * lanes));
- msm_host->esc_clk_rate = clk_get_rate(msm_host->esc_clk);
+ msm_host->pixel_clk_rate = pclk_rate;
+ msm_host->byte_clk_rate = pclk_bpp;
- if (cfg_hnd->major == MSM_DSI_VER_MAJOR_V2) {
- unsigned int esc_mhz, esc_div;
- unsigned long byte_mhz;
+ DBG("pclk=%d, bclk=%d", msm_host->pixel_clk_rate,
+ msm_host->byte_clk_rate);
- msm_host->src_clk_rate = (pclk_rate * bpp) / 8;
+}
- /*
- * esc clock is byte clock followed by a 4 bit divider,
- * we need to find an escape clock frequency within the
- * mipi DSI spec range within the maximum divider limit
- * We iterate here between an escape clock frequencey
- * between 20 Mhz to 5 Mhz and pick up the first one
- * that can be supported by our divider
- */
+int dsi_calc_clk_rate_6g(struct msm_dsi_host *msm_host, bool is_dual_dsi)
+{
+ if (!msm_host->mode) {
+ pr_err("%s: mode not set\n", __func__);
+ return -EINVAL;
+ }
- byte_mhz = msm_host->byte_clk_rate / 1000000;
+ dsi_calc_pclk(msm_host, is_dual_dsi);
+ msm_host->esc_clk_rate = clk_get_rate(msm_host->esc_clk);
+ return 0;
+}
- for (esc_mhz = 20; esc_mhz >= 5; esc_mhz--) {
- esc_div = DIV_ROUND_UP(byte_mhz, esc_mhz);
+int dsi_calc_clk_rate_v2(struct msm_dsi_host *msm_host, bool is_dual_dsi)
+{
+ u32 bpp = dsi_get_bpp(msm_host->format);
+ u64 pclk_bpp;
+ unsigned int esc_mhz, esc_div;
+ unsigned long byte_mhz;
- /*
- * TODO: Ideally, we shouldn't know what sort of divider
- * is available in mmss_cc, we're just assuming that
- * it'll always be a 4 bit divider. Need to come up with
- * a better way here.
- */
- if (esc_div >= 1 && esc_div <= 16)
- break;
- }
+ dsi_calc_pclk(msm_host, is_dual_dsi);
- if (esc_mhz < 5)
- return -EINVAL;
+ pclk_bpp = (u64)dsi_get_pclk_rate(msm_host, is_dual_dsi) * bpp;
+ do_div(pclk_bpp, 8);
+ msm_host->src_clk_rate = pclk_bpp;
+
+ /*
+ * esc clock is byte clock followed by a 4 bit divider,
+ * we need to find an escape clock frequency within the
+ * mipi DSI spec range within the maximum divider limit
+ * We iterate here between an escape clock frequencey
+ * between 20 Mhz to 5 Mhz and pick up the first one
+ * that can be supported by our divider
+ */
+
+ byte_mhz = msm_host->byte_clk_rate / 1000000;
- msm_host->esc_clk_rate = msm_host->byte_clk_rate / esc_div;
+ for (esc_mhz = 20; esc_mhz >= 5; esc_mhz--) {
+ esc_div = DIV_ROUND_UP(byte_mhz, esc_mhz);
- DBG("esc=%d, src=%d", msm_host->esc_clk_rate,
- msm_host->src_clk_rate);
+ /*
+ * TODO: Ideally, we shouldn't know what sort of divider
+ * is available in mmss_cc, we're just assuming that
+ * it'll always be a 4 bit divider. Need to come up with
+ * a better way here.
+ */
+ if (esc_div >= 1 && esc_div <= 16)
+ break;
}
+ if (esc_mhz < 5)
+ return -EINVAL;
+
+ msm_host->esc_clk_rate = msm_host->byte_clk_rate / esc_div;
+
+ DBG("esc=%d, src=%d", msm_host->esc_clk_rate,
+ msm_host->src_clk_rate);
+
return 0;
}
@@ -740,7 +812,7 @@ static inline enum dsi_cmd_dst_format dsi_get_cmd_fmt(
switch (mipi_fmt) {
case MIPI_DSI_FMT_RGB888: return CMD_DST_FORMAT_RGB888;
case MIPI_DSI_FMT_RGB666_PACKED:
- case MIPI_DSI_FMT_RGB666: return VID_DST_FORMAT_RGB666;
+ case MIPI_DSI_FMT_RGB666: return CMD_DST_FORMAT_RGB666;
case MIPI_DSI_FMT_RGB565: return CMD_DST_FORMAT_RGB565;
default: return CMD_DST_FORMAT_RGB888;
}
@@ -849,7 +921,7 @@ static void dsi_ctrl_config(struct msm_dsi_host *msm_host, bool enable,
dsi_write(msm_host, REG_DSI_CTRL, data);
}
-static void dsi_timing_setup(struct msm_dsi_host *msm_host)
+static void dsi_timing_setup(struct msm_dsi_host *msm_host, bool is_dual_dsi)
{
struct drm_display_mode *mode = msm_host->mode;
u32 hs_start = 0, vs_start = 0; /* take sync start as 0 */
@@ -861,10 +933,26 @@ static void dsi_timing_setup(struct msm_dsi_host *msm_host)
u32 ha_end = ha_start + mode->hdisplay;
u32 va_start = v_total - mode->vsync_start;
u32 va_end = va_start + mode->vdisplay;
+ u32 hdisplay = mode->hdisplay;
u32 wc;
DBG("");
+ /*
+ * For dual DSI mode, the current DRM mode has
+ * the complete width of the panel. Since, the complete
+ * panel is driven by two DSI controllers, the horizontal
+ * timings have to be split between the two dsi controllers.
+ * Adjust the DSI host timing values accordingly.
+ */
+ if (is_dual_dsi) {
+ h_total /= 2;
+ hs_end /= 2;
+ ha_start /= 2;
+ ha_end /= 2;
+ hdisplay /= 2;
+ }
+
if (msm_host->mode_flags & MIPI_DSI_MODE_VIDEO) {
dsi_write(msm_host, REG_DSI_ACTIVE_H,
DSI_ACTIVE_H_START(ha_start) |
@@ -885,7 +973,7 @@ static void dsi_timing_setup(struct msm_dsi_host *msm_host)
DSI_ACTIVE_VSYNC_VPOS_END(vs_end));
} else { /* command mode */
/* image data and 1 byte write_memory_start cmd */
- wc = mode->hdisplay * dsi_get_bpp(msm_host->format) / 8 + 1;
+ wc = hdisplay * dsi_get_bpp(msm_host->format) / 8 + 1;
dsi_write(msm_host, REG_DSI_CMD_MDP_STREAM_CTRL,
DSI_CMD_MDP_STREAM_CTRL_WORD_COUNT(wc) |
@@ -895,7 +983,7 @@ static void dsi_timing_setup(struct msm_dsi_host *msm_host)
MIPI_DSI_DCS_LONG_WRITE));
dsi_write(msm_host, REG_DSI_CMD_MDP_STREAM_TOTAL,
- DSI_CMD_MDP_STREAM_TOTAL_H_TOTAL(mode->hdisplay) |
+ DSI_CMD_MDP_STREAM_TOTAL_H_TOTAL(hdisplay) |
DSI_CMD_MDP_STREAM_TOTAL_V_TOTAL(mode->vdisplay));
}
}
@@ -951,13 +1039,19 @@ static void dsi_set_tx_power_mode(int mode, struct msm_dsi_host *msm_host)
static void dsi_wait4video_done(struct msm_dsi_host *msm_host)
{
+ u32 ret = 0;
+ struct device *dev = &msm_host->pdev->dev;
+
dsi_intr_ctrl(msm_host, DSI_IRQ_MASK_VIDEO_DONE, 1);
reinit_completion(&msm_host->video_comp);
- wait_for_completion_timeout(&msm_host->video_comp,
+ ret = wait_for_completion_timeout(&msm_host->video_comp,
msecs_to_jiffies(70));
+ if (ret <= 0)
+ dev_err(dev, "wait for video done timed out\n");
+
dsi_intr_ctrl(msm_host, DSI_IRQ_MASK_VIDEO_DONE, 0);
}
@@ -966,58 +1060,44 @@ static void dsi_wait4video_eng_busy(struct msm_dsi_host *msm_host)
if (!(msm_host->mode_flags & MIPI_DSI_MODE_VIDEO))
return;
- if (msm_host->power_on) {
+ if (msm_host->power_on && msm_host->enabled) {
dsi_wait4video_done(msm_host);
/* delay 4 ms to skip BLLP */
usleep_range(2000, 4000);
}
}
-/* dsi_cmd */
-static int dsi_tx_buf_alloc(struct msm_dsi_host *msm_host, int size)
+int dsi_tx_buf_alloc_6g(struct msm_dsi_host *msm_host, int size)
{
struct drm_device *dev = msm_host->dev;
struct msm_drm_private *priv = dev->dev_private;
- const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd;
- int ret;
uint64_t iova;
+ u8 *data;
- if (cfg_hnd->major == MSM_DSI_VER_MAJOR_6G) {
- msm_host->tx_gem_obj = msm_gem_new(dev, size, MSM_BO_UNCACHED);
- if (IS_ERR(msm_host->tx_gem_obj)) {
- ret = PTR_ERR(msm_host->tx_gem_obj);
- pr_err("%s: failed to allocate gem, %d\n",
- __func__, ret);
- msm_host->tx_gem_obj = NULL;
- return ret;
- }
+ data = msm_gem_kernel_new(dev, size, MSM_BO_UNCACHED,
+ priv->kms->aspace,
+ &msm_host->tx_gem_obj, &iova);
- ret = msm_gem_get_iova(msm_host->tx_gem_obj,
- priv->kms->aspace, &iova);
- mutex_unlock(&dev->struct_mutex);
- if (ret) {
- pr_err("%s: failed to get iova, %d\n", __func__, ret);
- return ret;
- }
+ if (IS_ERR(data)) {
+ msm_host->tx_gem_obj = NULL;
+ return PTR_ERR(data);
+ }
- if (iova & 0x07) {
- pr_err("%s: buf NOT 8 bytes aligned\n", __func__);
- return -EINVAL;
- }
+ msm_host->tx_size = msm_host->tx_gem_obj->size;
- msm_host->tx_size = msm_host->tx_gem_obj->size;
- } else {
- msm_host->tx_buf = dma_alloc_coherent(dev->dev, size,
+ return 0;
+}
+
+int dsi_tx_buf_alloc_v2(struct msm_dsi_host *msm_host, int size)
+{
+ struct drm_device *dev = msm_host->dev;
+
+ msm_host->tx_buf = dma_alloc_coherent(dev->dev, size,
&msm_host->tx_buf_paddr, GFP_KERNEL);
- if (!msm_host->tx_buf) {
- ret = -ENOMEM;
- pr_err("%s: failed to allocate tx buf, %d\n",
- __func__, ret);
- return ret;
- }
+ if (!msm_host->tx_buf)
+ return -ENOMEM;
- msm_host->tx_size = size;
- }
+ msm_host->tx_size = size;
return 0;
}
@@ -1025,13 +1105,22 @@ static int dsi_tx_buf_alloc(struct msm_dsi_host *msm_host, int size)
static void dsi_tx_buf_free(struct msm_dsi_host *msm_host)
{
struct drm_device *dev = msm_host->dev;
+ struct msm_drm_private *priv;
+
+ /*
+ * This is possible if we're tearing down before we've had a chance to
+ * fully initialize. A very real possibility if our probe is deferred,
+ * in which case we'll hit msm_dsi_host_destroy() without having run
+ * through the dsi_tx_buf_alloc().
+ */
+ if (!dev)
+ return;
+ priv = dev->dev_private;
if (msm_host->tx_gem_obj) {
- msm_gem_put_iova(msm_host->tx_gem_obj, 0);
- mutex_lock(&dev->struct_mutex);
- msm_gem_free_object(msm_host->tx_gem_obj);
+ msm_gem_put_iova(msm_host->tx_gem_obj, priv->kms->aspace);
+ drm_gem_object_put_unlocked(msm_host->tx_gem_obj);
msm_host->tx_gem_obj = NULL;
- mutex_unlock(&dev->struct_mutex);
}
if (msm_host->tx_buf)
@@ -1039,6 +1128,21 @@ static void dsi_tx_buf_free(struct msm_dsi_host *msm_host)
msm_host->tx_buf_paddr);
}
+void *dsi_tx_buf_get_6g(struct msm_dsi_host *msm_host)
+{
+ return msm_gem_get_vaddr(msm_host->tx_gem_obj);
+}
+
+void *dsi_tx_buf_get_v2(struct msm_dsi_host *msm_host)
+{
+ return msm_host->tx_buf;
+}
+
+void dsi_tx_buf_put_6g(struct msm_dsi_host *msm_host)
+{
+ msm_gem_put_vaddr(msm_host->tx_gem_obj);
+}
+
/*
* prepare cmd buffer to be txed
*/
@@ -1063,15 +1167,11 @@ static int dsi_cmd_dma_add(struct msm_dsi_host *msm_host,
return -EINVAL;
}
- if (cfg_hnd->major == MSM_DSI_VER_MAJOR_6G) {
- data = msm_gem_get_vaddr(msm_host->tx_gem_obj);
- if (IS_ERR(data)) {
- ret = PTR_ERR(data);
- pr_err("%s: get vaddr failed, %d\n", __func__, ret);
- return ret;
- }
- } else {
- data = msm_host->tx_buf;
+ data = cfg_hnd->ops->tx_buf_get(msm_host);
+ if (IS_ERR(data)) {
+ ret = PTR_ERR(data);
+ pr_err("%s: get vaddr failed, %d\n", __func__, ret);
+ return ret;
}
/* MSM specific command format in memory */
@@ -1092,8 +1192,8 @@ static int dsi_cmd_dma_add(struct msm_dsi_host *msm_host,
if (packet.size < len)
memset(data + packet.size, 0xff, len - packet.size);
- if (cfg_hnd->major == MSM_DSI_VER_MAJOR_6G)
- msm_gem_put_vaddr(msm_host->tx_gem_obj);
+ if (cfg_hnd->ops->tx_buf_put)
+ cfg_hnd->ops->tx_buf_put(msm_host);
return len;
}
@@ -1140,24 +1240,38 @@ static int dsi_long_read_resp(u8 *buf, const struct mipi_dsi_msg *msg)
return msg->rx_len;
}
-static int dsi_cmd_dma_tx(struct msm_dsi_host *msm_host, int len)
+int dsi_dma_base_get_6g(struct msm_dsi_host *msm_host, uint64_t *dma_base)
{
- const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd;
struct drm_device *dev = msm_host->dev;
struct msm_drm_private *priv = dev->dev_private;
+
+ if (!dma_base)
+ return -EINVAL;
+
+ return msm_gem_get_iova(msm_host->tx_gem_obj,
+ priv->kms->aspace, dma_base);
+}
+
+int dsi_dma_base_get_v2(struct msm_dsi_host *msm_host, uint64_t *dma_base)
+{
+ if (!dma_base)
+ return -EINVAL;
+
+ *dma_base = msm_host->tx_buf_paddr;
+ return 0;
+}
+
+static int dsi_cmd_dma_tx(struct msm_dsi_host *msm_host, int len)
+{
+ const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd;
int ret;
uint64_t dma_base;
bool triggered;
- if (cfg_hnd->major == MSM_DSI_VER_MAJOR_6G) {
- ret = msm_gem_get_iova(msm_host->tx_gem_obj,
- priv->kms->aspace, &dma_base);
- if (ret) {
- pr_err("%s: failed to get iova: %d\n", __func__, ret);
- return ret;
- }
- } else {
- dma_base = msm_host->tx_buf_paddr;
+ ret = cfg_hnd->ops->dma_base_get(msm_host, &dma_base);
+ if (ret) {
+ pr_err("%s: failed to get iova: %d\n", __func__, ret);
+ return ret;
}
reinit_completion(&msm_host->dma_comp);
@@ -1795,6 +1909,7 @@ int msm_dsi_host_modeset_init(struct mipi_dsi_host *host,
struct drm_device *dev)
{
struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
+ const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd;
struct platform_device *pdev = msm_host->pdev;
int ret;
@@ -1815,7 +1930,7 @@ int msm_dsi_host_modeset_init(struct mipi_dsi_host *host,
}
msm_host->dev = dev;
- ret = dsi_tx_buf_alloc(msm_host, SZ_4K);
+ ret = cfg_hnd->ops->tx_buf_alloc(msm_host, SZ_4K);
if (ret) {
pr_err("%s: alloc tx gem obj failed, %d\n", __func__, ret);
return ret;
@@ -1848,7 +1963,7 @@ int msm_dsi_host_register(struct mipi_dsi_host *host, bool check_defer)
* output
*/
if (check_defer && msm_host->device_node) {
- if (!of_drm_find_panel(msm_host->device_node))
+ if (IS_ERR(of_drm_find_panel(msm_host->device_node)))
if (!of_drm_find_bridge(msm_host->device_node))
return -EPROBE_DEFER;
}
@@ -1873,6 +1988,7 @@ int msm_dsi_host_xfer_prepare(struct mipi_dsi_host *host,
const struct mipi_dsi_msg *msg)
{
struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
+ const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd;
/* TODO: make sure dsi_cmd_mdp is idle.
* Since DSI6G v1.2.0, we can set DSI_TRIG_CTRL.BLOCK_DMA_WITHIN_FRAME
@@ -1885,7 +2001,7 @@ int msm_dsi_host_xfer_prepare(struct mipi_dsi_host *host,
* mdp clock need to be enabled to receive dsi interrupt
*/
pm_runtime_get_sync(&msm_host->pdev->dev);
- dsi_link_clk_enable(msm_host);
+ cfg_hnd->ops->link_clk_enable(msm_host);
/* TODO: vote for bus bandwidth */
@@ -1906,6 +2022,7 @@ void msm_dsi_host_xfer_restore(struct mipi_dsi_host *host,
const struct mipi_dsi_msg *msg)
{
struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
+ const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd;
dsi_intr_ctrl(msm_host, DSI_IRQ_MASK_CMD_DMA_DONE, 0);
dsi_write(msm_host, REG_DSI_CTRL, msm_host->dma_cmd_ctrl_restore);
@@ -1915,7 +2032,7 @@ void msm_dsi_host_xfer_restore(struct mipi_dsi_host *host,
/* TODO: unvote for bus bandwidth */
- dsi_link_clk_disable(msm_host);
+ cfg_hnd->ops->link_clk_disable(msm_host);
pm_runtime_put_autosuspend(&msm_host->pdev->dev);
}
@@ -2079,7 +2196,6 @@ int msm_dsi_host_set_src_pll(struct mipi_dsi_host *host,
struct msm_dsi_pll *src_pll)
{
struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
- const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd;
struct clk *byte_clk_provider, *pixel_clk_provider;
int ret;
@@ -2105,14 +2221,16 @@ int msm_dsi_host_set_src_pll(struct mipi_dsi_host *host,
goto exit;
}
- if (cfg_hnd->major == MSM_DSI_VER_MAJOR_V2) {
+ if (msm_host->dsi_clk_src) {
ret = clk_set_parent(msm_host->dsi_clk_src, pixel_clk_provider);
if (ret) {
pr_err("%s: can't set parent to dsi_clk_src. ret=%d\n",
__func__, ret);
goto exit;
}
+ }
+ if (msm_host->esc_clk_src) {
ret = clk_set_parent(msm_host->esc_clk_src, byte_clk_provider);
if (ret) {
pr_err("%s: can't set parent to esc_clk_src. ret=%d\n",
@@ -2139,12 +2257,14 @@ void msm_dsi_host_reset_phy(struct mipi_dsi_host *host)
}
void msm_dsi_host_get_phy_clk_req(struct mipi_dsi_host *host,
- struct msm_dsi_phy_clk_request *clk_req)
+ struct msm_dsi_phy_clk_request *clk_req,
+ bool is_dual_dsi)
{
struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
+ const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd;
int ret;
- ret = dsi_calc_clk_rate(msm_host);
+ ret = cfg_hnd->ops->calc_clk_rate(msm_host, is_dual_dsi);
if (ret) {
pr_err("%s: unable to calc clk rate, %d\n", __func__, ret);
return;
@@ -2170,7 +2290,7 @@ int msm_dsi_host_enable(struct mipi_dsi_host *host)
* pm_runtime_put_autosuspend(&msm_host->pdev->dev);
* }
*/
-
+ msm_host->enabled = true;
return 0;
}
@@ -2178,6 +2298,7 @@ int msm_dsi_host_disable(struct mipi_dsi_host *host)
{
struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
+ msm_host->enabled = false;
dsi_op_mode_config(msm_host,
!!(msm_host->mode_flags & MIPI_DSI_MODE_VIDEO), false);
@@ -2205,9 +2326,11 @@ static void msm_dsi_sfpb_config(struct msm_dsi_host *msm_host, bool enable)
}
int msm_dsi_host_power_on(struct mipi_dsi_host *host,
- struct msm_dsi_phy_shared_timings *phy_shared_timings)
+ struct msm_dsi_phy_shared_timings *phy_shared_timings,
+ bool is_dual_dsi)
{
struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
+ const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd;
int ret = 0;
mutex_lock(&msm_host->dev_mutex);
@@ -2226,7 +2349,7 @@ int msm_dsi_host_power_on(struct mipi_dsi_host *host,
}
pm_runtime_get_sync(&msm_host->pdev->dev);
- ret = dsi_link_clk_enable(msm_host);
+ ret = cfg_hnd->ops->link_clk_enable(msm_host);
if (ret) {
pr_err("%s: failed to enable link clocks. ret=%d\n",
__func__, ret);
@@ -2240,7 +2363,7 @@ int msm_dsi_host_power_on(struct mipi_dsi_host *host,
goto fail_disable_clk;
}
- dsi_timing_setup(msm_host);
+ dsi_timing_setup(msm_host, is_dual_dsi);
dsi_sw_reset(msm_host);
dsi_ctrl_config(msm_host, true, phy_shared_timings);
@@ -2253,7 +2376,7 @@ int msm_dsi_host_power_on(struct mipi_dsi_host *host,
return 0;
fail_disable_clk:
- dsi_link_clk_disable(msm_host);
+ cfg_hnd->ops->link_clk_disable(msm_host);
pm_runtime_put_autosuspend(&msm_host->pdev->dev);
fail_disable_reg:
dsi_host_regulator_disable(msm_host);
@@ -2265,6 +2388,7 @@ unlock_ret:
int msm_dsi_host_power_off(struct mipi_dsi_host *host)
{
struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
+ const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd;
mutex_lock(&msm_host->dev_mutex);
if (!msm_host->power_on) {
@@ -2279,7 +2403,7 @@ int msm_dsi_host_power_off(struct mipi_dsi_host *host)
pinctrl_pm_select_sleep_state(&msm_host->pdev->dev);
- dsi_link_clk_disable(msm_host);
+ cfg_hnd->ops->link_clk_disable(msm_host);
pm_runtime_put_autosuspend(&msm_host->pdev->dev);
dsi_host_regulator_disable(msm_host);
diff --git a/drivers/gpu/drm/msm/dsi/dsi_manager.c b/drivers/gpu/drm/msm/dsi/dsi_manager.c
index 855248132b2b..5224010d90e4 100644
--- a/drivers/gpu/drm/msm/dsi/dsi_manager.c
+++ b/drivers/gpu/drm/msm/dsi/dsi_manager.c
@@ -88,6 +88,8 @@ static int dsi_mgr_setup_components(int id)
msm_dsi_phy_set_usecase(msm_dsi->phy, MSM_DSI_PHY_STANDALONE);
src_pll = msm_dsi_phy_get_pll(msm_dsi->phy);
+ if (IS_ERR(src_pll))
+ return PTR_ERR(src_pll);
ret = msm_dsi_host_set_src_pll(msm_dsi->host, src_pll);
} else if (!other_dsi) {
ret = 0;
@@ -116,6 +118,8 @@ static int dsi_mgr_setup_components(int id)
msm_dsi_phy_set_usecase(clk_slave_dsi->phy,
MSM_DSI_PHY_SLAVE);
src_pll = msm_dsi_phy_get_pll(clk_master_dsi->phy);
+ if (IS_ERR(src_pll))
+ return PTR_ERR(src_pll);
ret = msm_dsi_host_set_src_pll(msm_dsi->host, src_pll);
if (ret)
return ret;
@@ -130,8 +134,9 @@ static int enable_phy(struct msm_dsi *msm_dsi, int src_pll_id,
{
struct msm_dsi_phy_clk_request clk_req;
int ret;
+ bool is_dual_dsi = IS_DUAL_DSI();
- msm_dsi_host_get_phy_clk_req(msm_dsi->host, &clk_req);
+ msm_dsi_host_get_phy_clk_req(msm_dsi->host, &clk_req, is_dual_dsi);
ret = msm_dsi_phy_enable(msm_dsi->phy, src_pll_id, &clk_req);
msm_dsi_phy_get_shared_timings(msm_dsi->phy, shared_timings);
@@ -301,102 +306,25 @@ static void dsi_mgr_connector_destroy(struct drm_connector *connector)
kfree(dsi_connector);
}
-static void dsi_dual_connector_fix_modes(struct drm_connector *connector)
-{
- struct drm_display_mode *mode, *m;
-
- /* Only support left-right mode */
- list_for_each_entry_safe(mode, m, &connector->probed_modes, head) {
- mode->clock >>= 1;
- mode->hdisplay >>= 1;
- mode->hsync_start >>= 1;
- mode->hsync_end >>= 1;
- mode->htotal >>= 1;
- drm_mode_set_name(mode);
- }
-}
-
-static int dsi_dual_connector_tile_init(
- struct drm_connector *connector, int id)
-{
- struct drm_display_mode *mode;
- /* Fake topology id */
- char topo_id[8] = {'M', 'S', 'M', 'D', 'U', 'D', 'S', 'I'};
-
- if (connector->tile_group) {
- DBG("Tile property has been initialized");
- return 0;
- }
-
- /* Use the first mode only for now */
- mode = list_first_entry(&connector->probed_modes,
- struct drm_display_mode,
- head);
- if (!mode)
- return -EINVAL;
-
- connector->tile_group = drm_mode_get_tile_group(
- connector->dev, topo_id);
- if (!connector->tile_group)
- connector->tile_group = drm_mode_create_tile_group(
- connector->dev, topo_id);
- if (!connector->tile_group) {
- pr_err("%s: failed to create tile group\n", __func__);
- return -ENOMEM;
- }
-
- connector->has_tile = true;
- connector->tile_is_single_monitor = true;
-
- /* mode has been fixed */
- connector->tile_h_size = mode->hdisplay;
- connector->tile_v_size = mode->vdisplay;
-
- /* Only support left-right mode */
- connector->num_h_tile = 2;
- connector->num_v_tile = 1;
-
- connector->tile_v_loc = 0;
- connector->tile_h_loc = (id == DSI_RIGHT) ? 1 : 0;
-
- return 0;
-}
-
static int dsi_mgr_connector_get_modes(struct drm_connector *connector)
{
int id = dsi_mgr_connector_get_id(connector);
struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id);
struct drm_panel *panel = msm_dsi->panel;
- int ret, num;
+ int num;
if (!panel)
return 0;
- /* Since we have 2 connectors, but only 1 drm_panel in dual DSI mode,
- * panel should not attach to any connector.
- * Only temporarily attach panel to the current connector here,
- * to let panel set mode to this connector.
+ /*
+ * In dual DSI mode, we have one connector that can be
+ * attached to the drm_panel.
*/
drm_panel_attach(panel, connector);
num = drm_panel_get_modes(panel);
- drm_panel_detach(panel);
if (!num)
return 0;
- if (IS_DUAL_DSI()) {
- /* report half resolution to user */
- dsi_dual_connector_fix_modes(connector);
- ret = dsi_dual_connector_tile_init(connector, id);
- if (ret)
- return ret;
- ret = drm_mode_connector_set_tile_property(connector);
- if (ret) {
- pr_err("%s: set tile property failed, %d\n",
- __func__, ret);
- return ret;
- }
- }
-
return num;
}
@@ -450,11 +378,11 @@ static void dsi_mgr_bridge_pre_enable(struct drm_bridge *bridge)
if (ret)
goto phy_en_fail;
- /* Do nothing with the host if it is DSI 1 in case of dual DSI */
- if (is_dual_dsi && (DSI_1 == id))
+ /* Do nothing with the host if it is slave-DSI in case of dual DSI */
+ if (is_dual_dsi && !IS_MASTER_DSI_LINK(id))
return;
- ret = msm_dsi_host_power_on(host, &phy_shared_timings[id]);
+ ret = msm_dsi_host_power_on(host, &phy_shared_timings[id], is_dual_dsi);
if (ret) {
pr_err("%s: power on host %d failed, %d\n", __func__, id, ret);
goto host_on_fail;
@@ -462,7 +390,7 @@ static void dsi_mgr_bridge_pre_enable(struct drm_bridge *bridge)
if (is_dual_dsi && msm_dsi1) {
ret = msm_dsi_host_power_on(msm_dsi1->host,
- &phy_shared_timings[DSI_1]);
+ &phy_shared_timings[DSI_1], is_dual_dsi);
if (ret) {
pr_err("%s: power on host1 failed, %d\n",
__func__, ret);
@@ -552,11 +480,11 @@ static void dsi_mgr_bridge_post_disable(struct drm_bridge *bridge)
return;
/*
- * Do nothing with the host if it is DSI 1 in case of dual DSI.
+ * Do nothing with the host if it is slave-DSI in case of dual DSI.
* It is safe to call dsi_mgr_phy_disable() here because a single PHY
* won't be diabled until both PHYs request disable.
*/
- if (is_dual_dsi && (DSI_1 == id))
+ if (is_dual_dsi && !IS_MASTER_DSI_LINK(id))
goto disable_phy;
if (panel) {
@@ -617,7 +545,7 @@ static void dsi_mgr_bridge_mode_set(struct drm_bridge *bridge,
mode->vsync_end, mode->vtotal,
mode->type, mode->flags);
- if (is_dual_dsi && (DSI_1 == id))
+ if (is_dual_dsi && !IS_MASTER_DSI_LINK(id))
return;
msm_dsi_host_set_display_mode(host, adjusted_mode);
@@ -680,11 +608,28 @@ struct drm_connector *msm_dsi_manager_connector_init(u8 id)
connector->interlace_allowed = 0;
connector->doublescan_allowed = 0;
- drm_mode_connector_attach_encoder(connector, msm_dsi->encoder);
+ drm_connector_attach_encoder(connector, msm_dsi->encoder);
return connector;
}
+bool msm_dsi_manager_validate_current_config(u8 id)
+{
+ bool is_dual_dsi = IS_DUAL_DSI();
+
+ /*
+ * For dual DSI, we only have one drm panel. For this
+ * use case, we register only one bridge/connector.
+ * Skip bridge/connector initialisation if it is
+ * slave-DSI for dual DSI configuration.
+ */
+ if (is_dual_dsi && !IS_MASTER_DSI_LINK(id)) {
+ DBG("Skip bridge registration for slave DSI->id: %d\n", id);
+ return false;
+ }
+ return true;
+}
+
/* initialize bridge */
struct drm_bridge *msm_dsi_manager_bridge_init(u8 id)
{
@@ -747,12 +692,8 @@ struct drm_connector *msm_dsi_manager_ext_bridge_init(u8 id)
connector_list = &dev->mode_config.connector_list;
list_for_each_entry(connector, connector_list, head) {
- int i;
-
- for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
- if (connector->encoder_ids[i] == encoder->base.id)
- return connector;
- }
+ if (drm_connector_has_possible_encoder(connector, encoder))
+ return connector;
}
return ERR_PTR(-ENODEV);
@@ -832,6 +773,7 @@ void msm_dsi_manager_attach_dsi_device(int id, u32 device_flags)
struct msm_drm_private *priv;
struct msm_kms *kms;
struct drm_encoder *encoder;
+ bool cmd_mode;
/*
* drm_device pointer is assigned to msm_dsi only in the modeset_init
@@ -846,10 +788,11 @@ void msm_dsi_manager_attach_dsi_device(int id, u32 device_flags)
priv = dev->dev_private;
kms = priv->kms;
encoder = msm_dsi_get_encoder(msm_dsi);
+ cmd_mode = !(device_flags &
+ MIPI_DSI_MODE_VIDEO);
if (encoder && kms->funcs->set_encoder_mode)
- if (!(device_flags & MIPI_DSI_MODE_VIDEO))
- kms->funcs->set_encoder_mode(kms, encoder, true);
+ kms->funcs->set_encoder_mode(kms, encoder, cmd_mode);
}
int msm_dsi_manager_register(struct msm_dsi *msm_dsi)
@@ -858,7 +801,7 @@ int msm_dsi_manager_register(struct msm_dsi *msm_dsi)
int id = msm_dsi->id;
int ret;
- if (id > DSI_MAX) {
+ if (id >= DSI_MAX) {
pr_err("%s: invalid id %d\n", __func__, id);
return -EINVAL;
}
diff --git a/drivers/gpu/drm/msm/dsi/mmss_cc.xml.h b/drivers/gpu/drm/msm/dsi/mmss_cc.xml.h
index 57cf7fa7f1c4..874265314413 100644
--- a/drivers/gpu/drm/msm/dsi/mmss_cc.xml.h
+++ b/drivers/gpu/drm/msm/dsi/mmss_cc.xml.h
@@ -8,19 +8,19 @@ http://github.com/freedreno/envytools/
git clone https://github.com/freedreno/envytools.git
The rules-ng-ng source files this header was generated from are:
-- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 676 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20915 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 2849 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 37411 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 33004 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 602 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 41799 bytes, from 2017-06-16 12:32:42)
-- /home/robclark/src/freedreno/envytools/rnndb/edp/edp.xml ( 10416 bytes, from 2017-05-17 13:21:27)
-
-Copyright (C) 2013-2017 by the following authors:
+- /home/robclark/src/envytools/rnndb/msm.xml ( 676 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/mdp/mdp4.xml ( 20915 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/mdp/mdp_common.xml ( 2849 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/mdp/mdp5.xml ( 37411 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/dsi/dsi.xml ( 37239 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/dsi/sfpb.xml ( 602 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/hdmi/hdmi.xml ( 41799 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/edp/edp.xml ( 10416 bytes, from 2018-07-03 19:37:13)
+
+Copyright (C) 2013-2018 by the following authors:
- Rob Clark <robdclark@gmail.com> (robclark)
- Ilia Mirkin <imirkin@alum.mit.edu> (imirkin)
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c
index 790ca280cbfd..9a9fa0c75a13 100644
--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c
@@ -265,6 +265,115 @@ int msm_dsi_dphy_timing_calc_v2(struct msm_dsi_dphy_timing *timing,
return 0;
}
+int msm_dsi_dphy_timing_calc_v3(struct msm_dsi_dphy_timing *timing,
+ struct msm_dsi_phy_clk_request *clk_req)
+{
+ const unsigned long bit_rate = clk_req->bitclk_rate;
+ const unsigned long esc_rate = clk_req->escclk_rate;
+ s32 ui, ui_x8, lpx;
+ s32 tmax, tmin;
+ s32 pcnt0 = 50;
+ s32 pcnt1 = 50;
+ s32 pcnt2 = 10;
+ s32 pcnt3 = 30;
+ s32 pcnt4 = 10;
+ s32 pcnt5 = 2;
+ s32 coeff = 1000; /* Precision, should avoid overflow */
+ s32 hb_en, hb_en_ckln;
+ s32 temp;
+
+ if (!bit_rate || !esc_rate)
+ return -EINVAL;
+
+ timing->hs_halfbyte_en = 0;
+ hb_en = 0;
+ timing->hs_halfbyte_en_ckln = 0;
+ hb_en_ckln = 0;
+
+ ui = mult_frac(NSEC_PER_MSEC, coeff, bit_rate / 1000);
+ ui_x8 = ui << 3;
+ lpx = mult_frac(NSEC_PER_MSEC, coeff, esc_rate / 1000);
+
+ temp = S_DIV_ROUND_UP(38 * coeff, ui_x8);
+ tmin = max_t(s32, temp, 0);
+ temp = (95 * coeff) / ui_x8;
+ tmax = max_t(s32, temp, 0);
+ timing->clk_prepare = linear_inter(tmax, tmin, pcnt0, 0, false);
+
+ temp = 300 * coeff - (timing->clk_prepare << 3) * ui;
+ tmin = S_DIV_ROUND_UP(temp, ui_x8) - 1;
+ tmax = (tmin > 255) ? 511 : 255;
+ timing->clk_zero = linear_inter(tmax, tmin, pcnt5, 0, false);
+
+ tmin = DIV_ROUND_UP(60 * coeff + 3 * ui, ui_x8);
+ temp = 105 * coeff + 12 * ui - 20 * coeff;
+ tmax = (temp + 3 * ui) / ui_x8;
+ timing->clk_trail = linear_inter(tmax, tmin, pcnt3, 0, false);
+
+ temp = S_DIV_ROUND_UP(40 * coeff + 4 * ui, ui_x8);
+ tmin = max_t(s32, temp, 0);
+ temp = (85 * coeff + 6 * ui) / ui_x8;
+ tmax = max_t(s32, temp, 0);
+ timing->hs_prepare = linear_inter(tmax, tmin, pcnt1, 0, false);
+
+ temp = 145 * coeff + 10 * ui - (timing->hs_prepare << 3) * ui;
+ tmin = S_DIV_ROUND_UP(temp, ui_x8) - 1;
+ tmax = 255;
+ timing->hs_zero = linear_inter(tmax, tmin, pcnt4, 0, false);
+
+ tmin = DIV_ROUND_UP(60 * coeff + 4 * ui, ui_x8) - 1;
+ temp = 105 * coeff + 12 * ui - 20 * coeff;
+ tmax = (temp / ui_x8) - 1;
+ timing->hs_trail = linear_inter(tmax, tmin, pcnt3, 0, false);
+
+ temp = 50 * coeff + ((hb_en << 2) - 8) * ui;
+ timing->hs_rqst = S_DIV_ROUND_UP(temp, ui_x8);
+
+ tmin = DIV_ROUND_UP(100 * coeff, ui_x8) - 1;
+ tmax = 255;
+ timing->hs_exit = linear_inter(tmax, tmin, pcnt2, 0, false);
+
+ temp = 50 * coeff + ((hb_en_ckln << 2) - 8) * ui;
+ timing->hs_rqst_ckln = S_DIV_ROUND_UP(temp, ui_x8);
+
+ temp = 60 * coeff + 52 * ui - 43 * ui;
+ tmin = DIV_ROUND_UP(temp, ui_x8) - 1;
+ tmax = 63;
+ timing->shared_timings.clk_post =
+ linear_inter(tmax, tmin, pcnt2, 0, false);
+
+ temp = 8 * ui + (timing->clk_prepare << 3) * ui;
+ temp += (((timing->clk_zero + 3) << 3) + 11) * ui;
+ temp += hb_en_ckln ? (((timing->hs_rqst_ckln << 3) + 4) * ui) :
+ (((timing->hs_rqst_ckln << 3) + 8) * ui);
+ tmin = S_DIV_ROUND_UP(temp, ui_x8) - 1;
+ tmax = 63;
+ if (tmin > tmax) {
+ temp = linear_inter(tmax << 1, tmin, pcnt2, 0, false);
+ timing->shared_timings.clk_pre = temp >> 1;
+ timing->shared_timings.clk_pre_inc_by_2 = 1;
+ } else {
+ timing->shared_timings.clk_pre =
+ linear_inter(tmax, tmin, pcnt2, 0, false);
+ timing->shared_timings.clk_pre_inc_by_2 = 0;
+ }
+
+ timing->ta_go = 3;
+ timing->ta_sure = 0;
+ timing->ta_get = 4;
+
+ DBG("%d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d",
+ timing->shared_timings.clk_pre, timing->shared_timings.clk_post,
+ timing->shared_timings.clk_pre_inc_by_2, timing->clk_zero,
+ timing->clk_trail, timing->clk_prepare, timing->hs_exit,
+ timing->hs_zero, timing->hs_prepare, timing->hs_trail,
+ timing->hs_rqst, timing->hs_rqst_ckln, timing->hs_halfbyte_en,
+ timing->hs_halfbyte_en_ckln, timing->hs_prep_dly,
+ timing->hs_prep_dly_ckln);
+
+ return 0;
+}
+
void msm_dsi_phy_set_src_pll(struct msm_dsi_phy *phy, int pll_id, u32 reg,
u32 bit_mask)
{
@@ -395,6 +504,10 @@ static const struct of_device_id dsi_phy_dt_match[] = {
{ .compatible = "qcom,dsi-phy-14nm",
.data = &dsi_phy_14nm_cfgs },
#endif
+#ifdef CONFIG_DRM_MSM_DSI_10NM_PHY
+ { .compatible = "qcom,dsi-phy-10nm",
+ .data = &dsi_phy_10nm_cfgs },
+#endif
{}
};
@@ -503,10 +616,10 @@ static int dsi_phy_driver_probe(struct platform_device *pdev)
goto fail;
phy->pll = msm_dsi_pll_init(pdev, phy->cfg->type, phy->id);
- if (!phy->pll)
+ if (IS_ERR_OR_NULL(phy->pll))
dev_info(dev,
- "%s: pll init failed, need separate pll clk driver\n",
- __func__);
+ "%s: pll init failed: %ld, need separate pll clk driver\n",
+ __func__, PTR_ERR(phy->pll));
dsi_phy_disable_resource(phy);
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h
index 1733f6608a09..a24ab80994a3 100644
--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h
@@ -48,6 +48,7 @@ extern const struct msm_dsi_phy_cfg dsi_phy_28nm_lp_cfgs;
extern const struct msm_dsi_phy_cfg dsi_phy_20nm_cfgs;
extern const struct msm_dsi_phy_cfg dsi_phy_28nm_8960_cfgs;
extern const struct msm_dsi_phy_cfg dsi_phy_14nm_cfgs;
+extern const struct msm_dsi_phy_cfg dsi_phy_10nm_cfgs;
struct msm_dsi_dphy_timing {
u32 clk_pre;
@@ -100,6 +101,8 @@ int msm_dsi_dphy_timing_calc(struct msm_dsi_dphy_timing *timing,
struct msm_dsi_phy_clk_request *clk_req);
int msm_dsi_dphy_timing_calc_v2(struct msm_dsi_dphy_timing *timing,
struct msm_dsi_phy_clk_request *clk_req);
+int msm_dsi_dphy_timing_calc_v3(struct msm_dsi_dphy_timing *timing,
+ struct msm_dsi_phy_clk_request *clk_req);
void msm_dsi_phy_set_src_pll(struct msm_dsi_phy *phy, int pll_id, u32 reg,
u32 bit_mask);
int msm_dsi_phy_init_common(struct msm_dsi_phy *phy);
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_10nm.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_10nm.c
new file mode 100644
index 000000000000..b3fffc8dbb2a
--- /dev/null
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_10nm.c
@@ -0,0 +1,223 @@
+/*
+ * SPDX-License-Identifier: GPL-2.0
+ * Copyright (c) 2018, The Linux Foundation
+ */
+
+#include <linux/iopoll.h>
+
+#include "dsi_phy.h"
+#include "dsi.xml.h"
+
+static int dsi_phy_hw_v3_0_is_pll_on(struct msm_dsi_phy *phy)
+{
+ void __iomem *base = phy->base;
+ u32 data = 0;
+
+ data = dsi_phy_read(base + REG_DSI_10nm_PHY_CMN_PLL_CNTRL);
+ mb(); /* make sure read happened */
+
+ return (data & BIT(0));
+}
+
+static void dsi_phy_hw_v3_0_config_lpcdrx(struct msm_dsi_phy *phy, bool enable)
+{
+ void __iomem *lane_base = phy->lane_base;
+ int phy_lane_0 = 0; /* TODO: Support all lane swap configs */
+
+ /*
+ * LPRX and CDRX need to enabled only for physical data lane
+ * corresponding to the logical data lane 0
+ */
+ if (enable)
+ dsi_phy_write(lane_base +
+ REG_DSI_10nm_PHY_LN_LPRX_CTRL(phy_lane_0), 0x3);
+ else
+ dsi_phy_write(lane_base +
+ REG_DSI_10nm_PHY_LN_LPRX_CTRL(phy_lane_0), 0);
+}
+
+static void dsi_phy_hw_v3_0_lane_settings(struct msm_dsi_phy *phy)
+{
+ int i;
+ u8 tx_dctrl[] = { 0x00, 0x00, 0x00, 0x04, 0x01 };
+ void __iomem *lane_base = phy->lane_base;
+
+ /* Strength ctrl settings */
+ for (i = 0; i < 5; i++) {
+ dsi_phy_write(lane_base + REG_DSI_10nm_PHY_LN_LPTX_STR_CTRL(i),
+ 0x55);
+ /*
+ * Disable LPRX and CDRX for all lanes. And later on, it will
+ * be only enabled for the physical data lane corresponding
+ * to the logical data lane 0
+ */
+ dsi_phy_write(lane_base + REG_DSI_10nm_PHY_LN_LPRX_CTRL(i), 0);
+ dsi_phy_write(lane_base + REG_DSI_10nm_PHY_LN_PIN_SWAP(i), 0x0);
+ dsi_phy_write(lane_base + REG_DSI_10nm_PHY_LN_HSTX_STR_CTRL(i),
+ 0x88);
+ }
+
+ dsi_phy_hw_v3_0_config_lpcdrx(phy, true);
+
+ /* other settings */
+ for (i = 0; i < 5; i++) {
+ dsi_phy_write(lane_base + REG_DSI_10nm_PHY_LN_CFG0(i), 0x0);
+ dsi_phy_write(lane_base + REG_DSI_10nm_PHY_LN_CFG1(i), 0x0);
+ dsi_phy_write(lane_base + REG_DSI_10nm_PHY_LN_CFG2(i), 0x0);
+ dsi_phy_write(lane_base + REG_DSI_10nm_PHY_LN_CFG3(i),
+ i == 4 ? 0x80 : 0x0);
+ dsi_phy_write(lane_base +
+ REG_DSI_10nm_PHY_LN_OFFSET_TOP_CTRL(i), 0x0);
+ dsi_phy_write(lane_base +
+ REG_DSI_10nm_PHY_LN_OFFSET_BOT_CTRL(i), 0x0);
+ dsi_phy_write(lane_base + REG_DSI_10nm_PHY_LN_TX_DCTRL(i),
+ tx_dctrl[i]);
+ }
+
+ /* Toggle BIT 0 to release freeze I/0 */
+ dsi_phy_write(lane_base + REG_DSI_10nm_PHY_LN_TX_DCTRL(3), 0x05);
+ dsi_phy_write(lane_base + REG_DSI_10nm_PHY_LN_TX_DCTRL(3), 0x04);
+}
+
+static int dsi_10nm_phy_enable(struct msm_dsi_phy *phy, int src_pll_id,
+ struct msm_dsi_phy_clk_request *clk_req)
+{
+ int ret;
+ u32 status;
+ u32 const delay_us = 5;
+ u32 const timeout_us = 1000;
+ struct msm_dsi_dphy_timing *timing = &phy->timing;
+ void __iomem *base = phy->base;
+ u32 data;
+
+ DBG("");
+
+ if (msm_dsi_dphy_timing_calc_v3(timing, clk_req)) {
+ dev_err(&phy->pdev->dev,
+ "%s: D-PHY timing calculation failed\n", __func__);
+ return -EINVAL;
+ }
+
+ if (dsi_phy_hw_v3_0_is_pll_on(phy))
+ pr_warn("PLL turned on before configuring PHY\n");
+
+ /* wait for REFGEN READY */
+ ret = readl_poll_timeout_atomic(base + REG_DSI_10nm_PHY_CMN_PHY_STATUS,
+ status, (status & BIT(0)),
+ delay_us, timeout_us);
+ if (ret) {
+ pr_err("Ref gen not ready. Aborting\n");
+ return -EINVAL;
+ }
+
+ /* de-assert digital and pll power down */
+ data = BIT(6) | BIT(5);
+ dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_CTRL_0, data);
+
+ /* Assert PLL core reset */
+ dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_PLL_CNTRL, 0x00);
+
+ /* turn off resync FIFO */
+ dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_RBUF_CTRL, 0x00);
+
+ /* Select MS1 byte-clk */
+ dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_GLBL_CTRL, 0x10);
+
+ /* Enable LDO */
+ dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_VREG_CTRL, 0x59);
+
+ /* Configure PHY lane swap (TODO: we need to calculate this) */
+ dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_LANE_CFG0, 0x21);
+ dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_LANE_CFG1, 0x84);
+
+ /* DSI PHY timings */
+ dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_TIMING_CTRL_0,
+ timing->hs_halfbyte_en);
+ dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_TIMING_CTRL_1,
+ timing->clk_zero);
+ dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_TIMING_CTRL_2,
+ timing->clk_prepare);
+ dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_TIMING_CTRL_3,
+ timing->clk_trail);
+ dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_TIMING_CTRL_4,
+ timing->hs_exit);
+ dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_TIMING_CTRL_5,
+ timing->hs_zero);
+ dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_TIMING_CTRL_6,
+ timing->hs_prepare);
+ dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_TIMING_CTRL_7,
+ timing->hs_trail);
+ dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_TIMING_CTRL_8,
+ timing->hs_rqst);
+ dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_TIMING_CTRL_9,
+ timing->ta_go | (timing->ta_sure << 3));
+ dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_TIMING_CTRL_10,
+ timing->ta_get);
+ dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_TIMING_CTRL_11,
+ 0x00);
+
+ /* Remove power down from all blocks */
+ dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_CTRL_0, 0x7f);
+
+ /* power up lanes */
+ data = dsi_phy_read(base + REG_DSI_10nm_PHY_CMN_CTRL_0);
+
+ /* TODO: only power up lanes that are used */
+ data |= 0x1F;
+ dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_CTRL_0, data);
+ dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_LANE_CTRL0, 0x1F);
+
+ /* Select full-rate mode */
+ dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_CTRL_2, 0x40);
+
+ ret = msm_dsi_pll_set_usecase(phy->pll, phy->usecase);
+ if (ret) {
+ dev_err(&phy->pdev->dev, "%s: set pll usecase failed, %d\n",
+ __func__, ret);
+ return ret;
+ }
+
+ /* DSI lane settings */
+ dsi_phy_hw_v3_0_lane_settings(phy);
+
+ DBG("DSI%d PHY enabled", phy->id);
+
+ return 0;
+}
+
+static void dsi_10nm_phy_disable(struct msm_dsi_phy *phy)
+{
+}
+
+static int dsi_10nm_phy_init(struct msm_dsi_phy *phy)
+{
+ struct platform_device *pdev = phy->pdev;
+
+ phy->lane_base = msm_ioremap(pdev, "dsi_phy_lane",
+ "DSI_PHY_LANE");
+ if (IS_ERR(phy->lane_base)) {
+ dev_err(&pdev->dev, "%s: failed to map phy lane base\n",
+ __func__);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+const struct msm_dsi_phy_cfg dsi_phy_10nm_cfgs = {
+ .type = MSM_DSI_PHY_10NM,
+ .src_pll_truthtable = { {false, false}, {true, false} },
+ .reg_cfg = {
+ .num = 1,
+ .regs = {
+ {"vdds", 36000, 32},
+ },
+ },
+ .ops = {
+ .enable = dsi_10nm_phy_enable,
+ .disable = dsi_10nm_phy_disable,
+ .init = dsi_10nm_phy_init,
+ },
+ .io_start = { 0xae94400, 0xae96400 },
+ .num_dsi_phy = 2,
+};
diff --git a/drivers/gpu/drm/msm/dsi/pll/dsi_pll.c b/drivers/gpu/drm/msm/dsi/pll/dsi_pll.c
index bc289f5c9078..613e206fa4fc 100644
--- a/drivers/gpu/drm/msm/dsi/pll/dsi_pll.c
+++ b/drivers/gpu/drm/msm/dsi/pll/dsi_pll.c
@@ -166,6 +166,9 @@ struct msm_dsi_pll *msm_dsi_pll_init(struct platform_device *pdev,
case MSM_DSI_PHY_14NM:
pll = msm_dsi_pll_14nm_init(pdev, id);
break;
+ case MSM_DSI_PHY_10NM:
+ pll = msm_dsi_pll_10nm_init(pdev, id);
+ break;
default:
pll = ERR_PTR(-ENXIO);
break;
@@ -173,7 +176,7 @@ struct msm_dsi_pll *msm_dsi_pll_init(struct platform_device *pdev,
if (IS_ERR(pll)) {
dev_err(dev, "%s: failed to init DSI PLL\n", __func__);
- return NULL;
+ return pll;
}
pll->type = type;
diff --git a/drivers/gpu/drm/msm/dsi/pll/dsi_pll.h b/drivers/gpu/drm/msm/dsi/pll/dsi_pll.h
index f63e7ada74a8..8b32271cbc24 100644
--- a/drivers/gpu/drm/msm/dsi/pll/dsi_pll.h
+++ b/drivers/gpu/drm/msm/dsi/pll/dsi_pll.h
@@ -115,5 +115,14 @@ msm_dsi_pll_14nm_init(struct platform_device *pdev, int id)
return ERR_PTR(-ENODEV);
}
#endif
+#ifdef CONFIG_DRM_MSM_DSI_10NM_PHY
+struct msm_dsi_pll *msm_dsi_pll_10nm_init(struct platform_device *pdev, int id);
+#else
+static inline struct msm_dsi_pll *
+msm_dsi_pll_10nm_init(struct platform_device *pdev, int id)
+{
+ return ERR_PTR(-ENODEV);
+}
+#endif
#endif /* __DSI_PLL_H__ */
diff --git a/drivers/gpu/drm/msm/dsi/pll/dsi_pll_10nm.c b/drivers/gpu/drm/msm/dsi/pll/dsi_pll_10nm.c
new file mode 100644
index 000000000000..4c03f0b7343e
--- /dev/null
+++ b/drivers/gpu/drm/msm/dsi/pll/dsi_pll_10nm.c
@@ -0,0 +1,824 @@
+/*
+ * SPDX-License-Identifier: GPL-2.0
+ * Copyright (c) 2018, The Linux Foundation
+ */
+
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/iopoll.h>
+
+#include "dsi_pll.h"
+#include "dsi.xml.h"
+
+/*
+ * DSI PLL 10nm - clock diagram (eg: DSI0):
+ *
+ * dsi0_pll_out_div_clk dsi0_pll_bit_clk
+ * | |
+ * | |
+ * +---------+ | +----------+ | +----+
+ * dsi0vco_clk ---| out_div |--o--| divl_3_0 |--o--| /8 |-- dsi0pllbyte
+ * +---------+ | +----------+ | +----+
+ * | |
+ * | | dsi0_pll_by_2_bit_clk
+ * | | |
+ * | | +----+ | |\ dsi0_pclk_mux
+ * | |--| /2 |--o--| \ |
+ * | | +----+ | \ | +---------+
+ * | --------------| |--o--| div_7_4 |-- dsi0pll
+ * |------------------------------| / +---------+
+ * | +-----+ | /
+ * -----------| /4? |--o----------|/
+ * +-----+ | |
+ * | |dsiclk_sel
+ * |
+ * dsi0_pll_post_out_div_clk
+ */
+
+#define DSI_BYTE_PLL_CLK 0
+#define DSI_PIXEL_PLL_CLK 1
+#define NUM_PROVIDED_CLKS 2
+
+struct dsi_pll_regs {
+ u32 pll_prop_gain_rate;
+ u32 pll_lockdet_rate;
+ u32 decimal_div_start;
+ u32 frac_div_start_low;
+ u32 frac_div_start_mid;
+ u32 frac_div_start_high;
+ u32 pll_clock_inverters;
+ u32 ssc_stepsize_low;
+ u32 ssc_stepsize_high;
+ u32 ssc_div_per_low;
+ u32 ssc_div_per_high;
+ u32 ssc_adjper_low;
+ u32 ssc_adjper_high;
+ u32 ssc_control;
+};
+
+struct dsi_pll_config {
+ u32 ref_freq;
+ bool div_override;
+ u32 output_div;
+ bool ignore_frac;
+ bool disable_prescaler;
+ bool enable_ssc;
+ bool ssc_center;
+ u32 dec_bits;
+ u32 frac_bits;
+ u32 lock_timer;
+ u32 ssc_freq;
+ u32 ssc_offset;
+ u32 ssc_adj_per;
+ u32 thresh_cycles;
+ u32 refclk_cycles;
+};
+
+struct pll_10nm_cached_state {
+ unsigned long vco_rate;
+ u8 bit_clk_div;
+ u8 pix_clk_div;
+ u8 pll_out_div;
+ u8 pll_mux;
+};
+
+struct dsi_pll_10nm {
+ struct msm_dsi_pll base;
+
+ int id;
+ struct platform_device *pdev;
+
+ void __iomem *phy_cmn_mmio;
+ void __iomem *mmio;
+
+ u64 vco_ref_clk_rate;
+ u64 vco_current_rate;
+
+ /* protects REG_DSI_10nm_PHY_CMN_CLK_CFG0 register */
+ spinlock_t postdiv_lock;
+
+ int vco_delay;
+ struct dsi_pll_config pll_configuration;
+ struct dsi_pll_regs reg_setup;
+
+ /* private clocks: */
+ struct clk_hw *hws[NUM_DSI_CLOCKS_MAX];
+ u32 num_hws;
+
+ /* clock-provider: */
+ struct clk_hw_onecell_data *hw_data;
+
+ struct pll_10nm_cached_state cached_state;
+
+ enum msm_dsi_phy_usecase uc;
+ struct dsi_pll_10nm *slave;
+};
+
+#define to_pll_10nm(x) container_of(x, struct dsi_pll_10nm, base)
+
+/*
+ * Global list of private DSI PLL struct pointers. We need this for Dual DSI
+ * mode, where the master PLL's clk_ops needs access the slave's private data
+ */
+static struct dsi_pll_10nm *pll_10nm_list[DSI_MAX];
+
+static void dsi_pll_setup_config(struct dsi_pll_10nm *pll)
+{
+ struct dsi_pll_config *config = &pll->pll_configuration;
+
+ config->ref_freq = pll->vco_ref_clk_rate;
+ config->output_div = 1;
+ config->dec_bits = 8;
+ config->frac_bits = 18;
+ config->lock_timer = 64;
+ config->ssc_freq = 31500;
+ config->ssc_offset = 5000;
+ config->ssc_adj_per = 2;
+ config->thresh_cycles = 32;
+ config->refclk_cycles = 256;
+
+ config->div_override = false;
+ config->ignore_frac = false;
+ config->disable_prescaler = false;
+
+ config->enable_ssc = false;
+ config->ssc_center = 0;
+}
+
+static void dsi_pll_calc_dec_frac(struct dsi_pll_10nm *pll)
+{
+ struct dsi_pll_config *config = &pll->pll_configuration;
+ struct dsi_pll_regs *regs = &pll->reg_setup;
+ u64 fref = pll->vco_ref_clk_rate;
+ u64 pll_freq;
+ u64 divider;
+ u64 dec, dec_multiple;
+ u32 frac;
+ u64 multiplier;
+
+ pll_freq = pll->vco_current_rate;
+
+ if (config->disable_prescaler)
+ divider = fref;
+ else
+ divider = fref * 2;
+
+ multiplier = 1 << config->frac_bits;
+ dec_multiple = div_u64(pll_freq * multiplier, divider);
+ div_u64_rem(dec_multiple, multiplier, &frac);
+
+ dec = div_u64(dec_multiple, multiplier);
+
+ if (pll_freq <= 1900000000UL)
+ regs->pll_prop_gain_rate = 8;
+ else if (pll_freq <= 3000000000UL)
+ regs->pll_prop_gain_rate = 10;
+ else
+ regs->pll_prop_gain_rate = 12;
+ if (pll_freq < 1100000000UL)
+ regs->pll_clock_inverters = 8;
+ else
+ regs->pll_clock_inverters = 0;
+
+ regs->pll_lockdet_rate = config->lock_timer;
+ regs->decimal_div_start = dec;
+ regs->frac_div_start_low = (frac & 0xff);
+ regs->frac_div_start_mid = (frac & 0xff00) >> 8;
+ regs->frac_div_start_high = (frac & 0x30000) >> 16;
+}
+
+#define SSC_CENTER BIT(0)
+#define SSC_EN BIT(1)
+
+static void dsi_pll_calc_ssc(struct dsi_pll_10nm *pll)
+{
+ struct dsi_pll_config *config = &pll->pll_configuration;
+ struct dsi_pll_regs *regs = &pll->reg_setup;
+ u32 ssc_per;
+ u32 ssc_mod;
+ u64 ssc_step_size;
+ u64 frac;
+
+ if (!config->enable_ssc) {
+ DBG("SSC not enabled\n");
+ return;
+ }
+
+ ssc_per = DIV_ROUND_CLOSEST(config->ref_freq, config->ssc_freq) / 2 - 1;
+ ssc_mod = (ssc_per + 1) % (config->ssc_adj_per + 1);
+ ssc_per -= ssc_mod;
+
+ frac = regs->frac_div_start_low |
+ (regs->frac_div_start_mid << 8) |
+ (regs->frac_div_start_high << 16);
+ ssc_step_size = regs->decimal_div_start;
+ ssc_step_size *= (1 << config->frac_bits);
+ ssc_step_size += frac;
+ ssc_step_size *= config->ssc_offset;
+ ssc_step_size *= (config->ssc_adj_per + 1);
+ ssc_step_size = div_u64(ssc_step_size, (ssc_per + 1));
+ ssc_step_size = DIV_ROUND_CLOSEST_ULL(ssc_step_size, 1000000);
+
+ regs->ssc_div_per_low = ssc_per & 0xFF;
+ regs->ssc_div_per_high = (ssc_per & 0xFF00) >> 8;
+ regs->ssc_stepsize_low = (u32)(ssc_step_size & 0xFF);
+ regs->ssc_stepsize_high = (u32)((ssc_step_size & 0xFF00) >> 8);
+ regs->ssc_adjper_low = config->ssc_adj_per & 0xFF;
+ regs->ssc_adjper_high = (config->ssc_adj_per & 0xFF00) >> 8;
+
+ regs->ssc_control = config->ssc_center ? SSC_CENTER : 0;
+
+ pr_debug("SCC: Dec:%d, frac:%llu, frac_bits:%d\n",
+ regs->decimal_div_start, frac, config->frac_bits);
+ pr_debug("SSC: div_per:0x%X, stepsize:0x%X, adjper:0x%X\n",
+ ssc_per, (u32)ssc_step_size, config->ssc_adj_per);
+}
+
+static void dsi_pll_ssc_commit(struct dsi_pll_10nm *pll)
+{
+ void __iomem *base = pll->mmio;
+ struct dsi_pll_regs *regs = &pll->reg_setup;
+
+ if (pll->pll_configuration.enable_ssc) {
+ pr_debug("SSC is enabled\n");
+
+ pll_write(base + REG_DSI_10nm_PHY_PLL_SSC_STEPSIZE_LOW_1,
+ regs->ssc_stepsize_low);
+ pll_write(base + REG_DSI_10nm_PHY_PLL_SSC_STEPSIZE_HIGH_1,
+ regs->ssc_stepsize_high);
+ pll_write(base + REG_DSI_10nm_PHY_PLL_SSC_DIV_PER_LOW_1,
+ regs->ssc_div_per_low);
+ pll_write(base + REG_DSI_10nm_PHY_PLL_SSC_DIV_PER_HIGH_1,
+ regs->ssc_div_per_high);
+ pll_write(base + REG_DSI_10nm_PHY_PLL_SSC_DIV_ADJPER_LOW_1,
+ regs->ssc_adjper_low);
+ pll_write(base + REG_DSI_10nm_PHY_PLL_SSC_DIV_ADJPER_HIGH_1,
+ regs->ssc_adjper_high);
+ pll_write(base + REG_DSI_10nm_PHY_PLL_SSC_CONTROL,
+ SSC_EN | regs->ssc_control);
+ }
+}
+
+static void dsi_pll_config_hzindep_reg(struct dsi_pll_10nm *pll)
+{
+ void __iomem *base = pll->mmio;
+
+ pll_write(base + REG_DSI_10nm_PHY_PLL_ANALOG_CONTROLS_ONE, 0x80);
+ pll_write(base + REG_DSI_10nm_PHY_PLL_ANALOG_CONTROLS_TWO, 0x03);
+ pll_write(base + REG_DSI_10nm_PHY_PLL_ANALOG_CONTROLS_THREE, 0x00);
+ pll_write(base + REG_DSI_10nm_PHY_PLL_DSM_DIVIDER, 0x00);
+ pll_write(base + REG_DSI_10nm_PHY_PLL_FEEDBACK_DIVIDER, 0x4e);
+ pll_write(base + REG_DSI_10nm_PHY_PLL_CALIBRATION_SETTINGS, 0x40);
+ pll_write(base + REG_DSI_10nm_PHY_PLL_BAND_SEL_CAL_SETTINGS_THREE,
+ 0xba);
+ pll_write(base + REG_DSI_10nm_PHY_PLL_FREQ_DETECT_SETTINGS_ONE, 0x0c);
+ pll_write(base + REG_DSI_10nm_PHY_PLL_OUTDIV, 0x00);
+ pll_write(base + REG_DSI_10nm_PHY_PLL_CORE_OVERRIDE, 0x00);
+ pll_write(base + REG_DSI_10nm_PHY_PLL_PLL_DIGITAL_TIMERS_TWO, 0x08);
+ pll_write(base + REG_DSI_10nm_PHY_PLL_PLL_PROP_GAIN_RATE_1, 0x08);
+ pll_write(base + REG_DSI_10nm_PHY_PLL_PLL_BAND_SET_RATE_1, 0xc0);
+ pll_write(base + REG_DSI_10nm_PHY_PLL_PLL_INT_GAIN_IFILT_BAND_1, 0xfa);
+ pll_write(base + REG_DSI_10nm_PHY_PLL_PLL_FL_INT_GAIN_PFILT_BAND_1,
+ 0x4c);
+ pll_write(base + REG_DSI_10nm_PHY_PLL_PLL_LOCK_OVERRIDE, 0x80);
+ pll_write(base + REG_DSI_10nm_PHY_PLL_PFILT, 0x29);
+ pll_write(base + REG_DSI_10nm_PHY_PLL_IFILT, 0x3f);
+}
+
+static void dsi_pll_commit(struct dsi_pll_10nm *pll)
+{
+ void __iomem *base = pll->mmio;
+ struct dsi_pll_regs *reg = &pll->reg_setup;
+
+ pll_write(base + REG_DSI_10nm_PHY_PLL_CORE_INPUT_OVERRIDE, 0x12);
+ pll_write(base + REG_DSI_10nm_PHY_PLL_DECIMAL_DIV_START_1,
+ reg->decimal_div_start);
+ pll_write(base + REG_DSI_10nm_PHY_PLL_FRAC_DIV_START_LOW_1,
+ reg->frac_div_start_low);
+ pll_write(base + REG_DSI_10nm_PHY_PLL_FRAC_DIV_START_MID_1,
+ reg->frac_div_start_mid);
+ pll_write(base + REG_DSI_10nm_PHY_PLL_FRAC_DIV_START_HIGH_1,
+ reg->frac_div_start_high);
+ pll_write(base + REG_DSI_10nm_PHY_PLL_PLL_LOCKDET_RATE_1, 0x40);
+ pll_write(base + REG_DSI_10nm_PHY_PLL_PLL_LOCK_DELAY, 0x06);
+ pll_write(base + REG_DSI_10nm_PHY_PLL_CMODE, 0x10);
+ pll_write(base + REG_DSI_10nm_PHY_PLL_CLOCK_INVERTERS,
+ reg->pll_clock_inverters);
+}
+
+static int dsi_pll_10nm_vco_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct msm_dsi_pll *pll = hw_clk_to_pll(hw);
+ struct dsi_pll_10nm *pll_10nm = to_pll_10nm(pll);
+
+ DBG("DSI PLL%d rate=%lu, parent's=%lu", pll_10nm->id, rate,
+ parent_rate);
+
+ pll_10nm->vco_current_rate = rate;
+ pll_10nm->vco_ref_clk_rate = parent_rate;
+
+ dsi_pll_setup_config(pll_10nm);
+
+ dsi_pll_calc_dec_frac(pll_10nm);
+
+ dsi_pll_calc_ssc(pll_10nm);
+
+ dsi_pll_commit(pll_10nm);
+
+ dsi_pll_config_hzindep_reg(pll_10nm);
+
+ dsi_pll_ssc_commit(pll_10nm);
+
+ /* flush, ensure all register writes are done*/
+ wmb();
+
+ return 0;
+}
+
+static int dsi_pll_10nm_lock_status(struct dsi_pll_10nm *pll)
+{
+ int rc;
+ u32 status = 0;
+ u32 const delay_us = 100;
+ u32 const timeout_us = 5000;
+
+ rc = readl_poll_timeout_atomic(pll->mmio +
+ REG_DSI_10nm_PHY_PLL_COMMON_STATUS_ONE,
+ status,
+ ((status & BIT(0)) > 0),
+ delay_us,
+ timeout_us);
+ if (rc)
+ pr_err("DSI PLL(%d) lock failed, status=0x%08x\n",
+ pll->id, status);
+
+ return rc;
+}
+
+static void dsi_pll_disable_pll_bias(struct dsi_pll_10nm *pll)
+{
+ u32 data = pll_read(pll->phy_cmn_mmio + REG_DSI_10nm_PHY_CMN_CTRL_0);
+
+ pll_write(pll->mmio + REG_DSI_10nm_PHY_PLL_SYSTEM_MUXES, 0);
+ pll_write(pll->phy_cmn_mmio + REG_DSI_10nm_PHY_CMN_CTRL_0,
+ data & ~BIT(5));
+ ndelay(250);
+}
+
+static void dsi_pll_enable_pll_bias(struct dsi_pll_10nm *pll)
+{
+ u32 data = pll_read(pll->phy_cmn_mmio + REG_DSI_10nm_PHY_CMN_CTRL_0);
+
+ pll_write(pll->phy_cmn_mmio + REG_DSI_10nm_PHY_CMN_CTRL_0,
+ data | BIT(5));
+ pll_write(pll->mmio + REG_DSI_10nm_PHY_PLL_SYSTEM_MUXES, 0xc0);
+ ndelay(250);
+}
+
+static void dsi_pll_disable_global_clk(struct dsi_pll_10nm *pll)
+{
+ u32 data;
+
+ data = pll_read(pll->phy_cmn_mmio + REG_DSI_10nm_PHY_CMN_CLK_CFG1);
+ pll_write(pll->phy_cmn_mmio + REG_DSI_10nm_PHY_CMN_CLK_CFG1,
+ data & ~BIT(5));
+}
+
+static void dsi_pll_enable_global_clk(struct dsi_pll_10nm *pll)
+{
+ u32 data;
+
+ data = pll_read(pll->phy_cmn_mmio + REG_DSI_10nm_PHY_CMN_CLK_CFG1);
+ pll_write(pll->phy_cmn_mmio + REG_DSI_10nm_PHY_CMN_CLK_CFG1,
+ data | BIT(5));
+}
+
+static int dsi_pll_10nm_vco_prepare(struct clk_hw *hw)
+{
+ struct msm_dsi_pll *pll = hw_clk_to_pll(hw);
+ struct dsi_pll_10nm *pll_10nm = to_pll_10nm(pll);
+ int rc;
+
+ dsi_pll_enable_pll_bias(pll_10nm);
+ if (pll_10nm->slave)
+ dsi_pll_enable_pll_bias(pll_10nm->slave);
+
+ /* Start PLL */
+ pll_write(pll_10nm->phy_cmn_mmio + REG_DSI_10nm_PHY_CMN_PLL_CNTRL,
+ 0x01);
+
+ /*
+ * ensure all PLL configurations are written prior to checking
+ * for PLL lock.
+ */
+ wmb();
+
+ /* Check for PLL lock */
+ rc = dsi_pll_10nm_lock_status(pll_10nm);
+ if (rc) {
+ pr_err("PLL(%d) lock failed\n", pll_10nm->id);
+ goto error;
+ }
+
+ pll->pll_on = true;
+
+ dsi_pll_enable_global_clk(pll_10nm);
+ if (pll_10nm->slave)
+ dsi_pll_enable_global_clk(pll_10nm->slave);
+
+ pll_write(pll_10nm->phy_cmn_mmio + REG_DSI_10nm_PHY_CMN_RBUF_CTRL,
+ 0x01);
+ if (pll_10nm->slave)
+ pll_write(pll_10nm->slave->phy_cmn_mmio +
+ REG_DSI_10nm_PHY_CMN_RBUF_CTRL, 0x01);
+
+error:
+ return rc;
+}
+
+static void dsi_pll_disable_sub(struct dsi_pll_10nm *pll)
+{
+ pll_write(pll->phy_cmn_mmio + REG_DSI_10nm_PHY_CMN_RBUF_CTRL, 0);
+ dsi_pll_disable_pll_bias(pll);
+}
+
+static void dsi_pll_10nm_vco_unprepare(struct clk_hw *hw)
+{
+ struct msm_dsi_pll *pll = hw_clk_to_pll(hw);
+ struct dsi_pll_10nm *pll_10nm = to_pll_10nm(pll);
+
+ /*
+ * To avoid any stray glitches while abruptly powering down the PLL
+ * make sure to gate the clock using the clock enable bit before
+ * powering down the PLL
+ */
+ dsi_pll_disable_global_clk(pll_10nm);
+ pll_write(pll_10nm->phy_cmn_mmio + REG_DSI_10nm_PHY_CMN_PLL_CNTRL, 0);
+ dsi_pll_disable_sub(pll_10nm);
+ if (pll_10nm->slave) {
+ dsi_pll_disable_global_clk(pll_10nm->slave);
+ dsi_pll_disable_sub(pll_10nm->slave);
+ }
+ /* flush, ensure all register writes are done */
+ wmb();
+ pll->pll_on = false;
+}
+
+static unsigned long dsi_pll_10nm_vco_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct msm_dsi_pll *pll = hw_clk_to_pll(hw);
+ struct dsi_pll_10nm *pll_10nm = to_pll_10nm(pll);
+ void __iomem *base = pll_10nm->mmio;
+ u64 ref_clk = pll_10nm->vco_ref_clk_rate;
+ u64 vco_rate = 0x0;
+ u64 multiplier;
+ u32 frac;
+ u32 dec;
+ u64 pll_freq, tmp64;
+
+ dec = pll_read(base + REG_DSI_10nm_PHY_PLL_DECIMAL_DIV_START_1);
+ dec &= 0xff;
+
+ frac = pll_read(base + REG_DSI_10nm_PHY_PLL_FRAC_DIV_START_LOW_1);
+ frac |= ((pll_read(base + REG_DSI_10nm_PHY_PLL_FRAC_DIV_START_MID_1) &
+ 0xff) << 8);
+ frac |= ((pll_read(base + REG_DSI_10nm_PHY_PLL_FRAC_DIV_START_HIGH_1) &
+ 0x3) << 16);
+
+ /*
+ * TODO:
+ * 1. Assumes prescaler is disabled
+ * 2. Multiplier is 2^18. it should be 2^(num_of_frac_bits)
+ */
+ multiplier = 1 << 18;
+ pll_freq = dec * (ref_clk * 2);
+ tmp64 = (ref_clk * 2 * frac);
+ pll_freq += div_u64(tmp64, multiplier);
+
+ vco_rate = pll_freq;
+
+ DBG("DSI PLL%d returning vco rate = %lu, dec = %x, frac = %x",
+ pll_10nm->id, (unsigned long)vco_rate, dec, frac);
+
+ return (unsigned long)vco_rate;
+}
+
+static const struct clk_ops clk_ops_dsi_pll_10nm_vco = {
+ .round_rate = msm_dsi_pll_helper_clk_round_rate,
+ .set_rate = dsi_pll_10nm_vco_set_rate,
+ .recalc_rate = dsi_pll_10nm_vco_recalc_rate,
+ .prepare = dsi_pll_10nm_vco_prepare,
+ .unprepare = dsi_pll_10nm_vco_unprepare,
+};
+
+/*
+ * PLL Callbacks
+ */
+
+static void dsi_pll_10nm_save_state(struct msm_dsi_pll *pll)
+{
+ struct dsi_pll_10nm *pll_10nm = to_pll_10nm(pll);
+ struct pll_10nm_cached_state *cached = &pll_10nm->cached_state;
+ void __iomem *phy_base = pll_10nm->phy_cmn_mmio;
+ u32 cmn_clk_cfg0, cmn_clk_cfg1;
+
+ cached->pll_out_div = pll_read(pll_10nm->mmio +
+ REG_DSI_10nm_PHY_PLL_PLL_OUTDIV_RATE);
+ cached->pll_out_div &= 0x3;
+
+ cmn_clk_cfg0 = pll_read(phy_base + REG_DSI_10nm_PHY_CMN_CLK_CFG0);
+ cached->bit_clk_div = cmn_clk_cfg0 & 0xf;
+ cached->pix_clk_div = (cmn_clk_cfg0 & 0xf0) >> 4;
+
+ cmn_clk_cfg1 = pll_read(phy_base + REG_DSI_10nm_PHY_CMN_CLK_CFG1);
+ cached->pll_mux = cmn_clk_cfg1 & 0x3;
+
+ DBG("DSI PLL%d outdiv %x bit_clk_div %x pix_clk_div %x pll_mux %x",
+ pll_10nm->id, cached->pll_out_div, cached->bit_clk_div,
+ cached->pix_clk_div, cached->pll_mux);
+}
+
+static int dsi_pll_10nm_restore_state(struct msm_dsi_pll *pll)
+{
+ struct dsi_pll_10nm *pll_10nm = to_pll_10nm(pll);
+ struct pll_10nm_cached_state *cached = &pll_10nm->cached_state;
+ void __iomem *phy_base = pll_10nm->phy_cmn_mmio;
+ u32 val;
+
+ val = pll_read(pll_10nm->mmio + REG_DSI_10nm_PHY_PLL_PLL_OUTDIV_RATE);
+ val &= ~0x3;
+ val |= cached->pll_out_div;
+ pll_write(pll_10nm->mmio + REG_DSI_10nm_PHY_PLL_PLL_OUTDIV_RATE, val);
+
+ pll_write(phy_base + REG_DSI_10nm_PHY_CMN_CLK_CFG0,
+ cached->bit_clk_div | (cached->pix_clk_div << 4));
+
+ val = pll_read(phy_base + REG_DSI_10nm_PHY_CMN_CLK_CFG1);
+ val &= ~0x3;
+ val |= cached->pll_mux;
+ pll_write(phy_base + REG_DSI_10nm_PHY_CMN_CLK_CFG1, val);
+
+ DBG("DSI PLL%d", pll_10nm->id);
+
+ return 0;
+}
+
+static int dsi_pll_10nm_set_usecase(struct msm_dsi_pll *pll,
+ enum msm_dsi_phy_usecase uc)
+{
+ struct dsi_pll_10nm *pll_10nm = to_pll_10nm(pll);
+ void __iomem *base = pll_10nm->phy_cmn_mmio;
+ u32 data = 0x0; /* internal PLL */
+
+ DBG("DSI PLL%d", pll_10nm->id);
+
+ switch (uc) {
+ case MSM_DSI_PHY_STANDALONE:
+ break;
+ case MSM_DSI_PHY_MASTER:
+ pll_10nm->slave = pll_10nm_list[(pll_10nm->id + 1) % DSI_MAX];
+ break;
+ case MSM_DSI_PHY_SLAVE:
+ data = 0x1; /* external PLL */
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* set PLL src */
+ pll_write(base + REG_DSI_10nm_PHY_CMN_CLK_CFG1, (data << 2));
+
+ pll_10nm->uc = uc;
+
+ return 0;
+}
+
+static int dsi_pll_10nm_get_provider(struct msm_dsi_pll *pll,
+ struct clk **byte_clk_provider,
+ struct clk **pixel_clk_provider)
+{
+ struct dsi_pll_10nm *pll_10nm = to_pll_10nm(pll);
+ struct clk_hw_onecell_data *hw_data = pll_10nm->hw_data;
+
+ DBG("DSI PLL%d", pll_10nm->id);
+
+ if (byte_clk_provider)
+ *byte_clk_provider = hw_data->hws[DSI_BYTE_PLL_CLK]->clk;
+ if (pixel_clk_provider)
+ *pixel_clk_provider = hw_data->hws[DSI_PIXEL_PLL_CLK]->clk;
+
+ return 0;
+}
+
+static void dsi_pll_10nm_destroy(struct msm_dsi_pll *pll)
+{
+ struct dsi_pll_10nm *pll_10nm = to_pll_10nm(pll);
+
+ DBG("DSI PLL%d", pll_10nm->id);
+}
+
+/*
+ * The post dividers and mux clocks are created using the standard divider and
+ * mux API. Unlike the 14nm PHY, the slave PLL doesn't need its dividers/mux
+ * state to follow the master PLL's divider/mux state. Therefore, we don't
+ * require special clock ops that also configure the slave PLL registers
+ */
+static int pll_10nm_register(struct dsi_pll_10nm *pll_10nm)
+{
+ char clk_name[32], parent[32], vco_name[32];
+ char parent2[32], parent3[32], parent4[32];
+ struct clk_init_data vco_init = {
+ .parent_names = (const char *[]){ "xo" },
+ .num_parents = 1,
+ .name = vco_name,
+ .flags = CLK_IGNORE_UNUSED,
+ .ops = &clk_ops_dsi_pll_10nm_vco,
+ };
+ struct device *dev = &pll_10nm->pdev->dev;
+ struct clk_hw **hws = pll_10nm->hws;
+ struct clk_hw_onecell_data *hw_data;
+ struct clk_hw *hw;
+ int num = 0;
+ int ret;
+
+ DBG("DSI%d", pll_10nm->id);
+
+ hw_data = devm_kzalloc(dev, sizeof(*hw_data) +
+ NUM_PROVIDED_CLKS * sizeof(struct clk_hw *),
+ GFP_KERNEL);
+ if (!hw_data)
+ return -ENOMEM;
+
+ snprintf(vco_name, 32, "dsi%dvco_clk", pll_10nm->id);
+ pll_10nm->base.clk_hw.init = &vco_init;
+
+ ret = clk_hw_register(dev, &pll_10nm->base.clk_hw);
+ if (ret)
+ return ret;
+
+ hws[num++] = &pll_10nm->base.clk_hw;
+
+ snprintf(clk_name, 32, "dsi%d_pll_out_div_clk", pll_10nm->id);
+ snprintf(parent, 32, "dsi%dvco_clk", pll_10nm->id);
+
+ hw = clk_hw_register_divider(dev, clk_name,
+ parent, CLK_SET_RATE_PARENT,
+ pll_10nm->mmio +
+ REG_DSI_10nm_PHY_PLL_PLL_OUTDIV_RATE,
+ 0, 2, CLK_DIVIDER_POWER_OF_TWO, NULL);
+ if (IS_ERR(hw))
+ return PTR_ERR(hw);
+
+ hws[num++] = hw;
+
+ snprintf(clk_name, 32, "dsi%d_pll_bit_clk", pll_10nm->id);
+ snprintf(parent, 32, "dsi%d_pll_out_div_clk", pll_10nm->id);
+
+ /* BIT CLK: DIV_CTRL_3_0 */
+ hw = clk_hw_register_divider(dev, clk_name, parent,
+ CLK_SET_RATE_PARENT,
+ pll_10nm->phy_cmn_mmio +
+ REG_DSI_10nm_PHY_CMN_CLK_CFG0,
+ 0, 4, CLK_DIVIDER_ONE_BASED,
+ &pll_10nm->postdiv_lock);
+ if (IS_ERR(hw))
+ return PTR_ERR(hw);
+
+ hws[num++] = hw;
+
+ snprintf(clk_name, 32, "dsi%dpllbyte", pll_10nm->id);
+ snprintf(parent, 32, "dsi%d_pll_bit_clk", pll_10nm->id);
+
+ /* DSI Byte clock = VCO_CLK / OUT_DIV / BIT_DIV / 8 */
+ hw = clk_hw_register_fixed_factor(dev, clk_name, parent,
+ CLK_SET_RATE_PARENT, 1, 8);
+ if (IS_ERR(hw))
+ return PTR_ERR(hw);
+
+ hws[num++] = hw;
+ hw_data->hws[DSI_BYTE_PLL_CLK] = hw;
+
+ snprintf(clk_name, 32, "dsi%d_pll_by_2_bit_clk", pll_10nm->id);
+ snprintf(parent, 32, "dsi%d_pll_bit_clk", pll_10nm->id);
+
+ hw = clk_hw_register_fixed_factor(dev, clk_name, parent,
+ 0, 1, 2);
+ if (IS_ERR(hw))
+ return PTR_ERR(hw);
+
+ hws[num++] = hw;
+
+ snprintf(clk_name, 32, "dsi%d_pll_post_out_div_clk", pll_10nm->id);
+ snprintf(parent, 32, "dsi%d_pll_out_div_clk", pll_10nm->id);
+
+ hw = clk_hw_register_fixed_factor(dev, clk_name, parent,
+ 0, 1, 4);
+ if (IS_ERR(hw))
+ return PTR_ERR(hw);
+
+ hws[num++] = hw;
+
+ snprintf(clk_name, 32, "dsi%d_pclk_mux", pll_10nm->id);
+ snprintf(parent, 32, "dsi%d_pll_bit_clk", pll_10nm->id);
+ snprintf(parent2, 32, "dsi%d_pll_by_2_bit_clk", pll_10nm->id);
+ snprintf(parent3, 32, "dsi%d_pll_out_div_clk", pll_10nm->id);
+ snprintf(parent4, 32, "dsi%d_pll_post_out_div_clk", pll_10nm->id);
+
+ hw = clk_hw_register_mux(dev, clk_name,
+ (const char *[]){
+ parent, parent2, parent3, parent4
+ }, 4, 0, pll_10nm->phy_cmn_mmio +
+ REG_DSI_10nm_PHY_CMN_CLK_CFG1,
+ 0, 2, 0, NULL);
+ if (IS_ERR(hw))
+ return PTR_ERR(hw);
+
+ hws[num++] = hw;
+
+ snprintf(clk_name, 32, "dsi%dpll", pll_10nm->id);
+ snprintf(parent, 32, "dsi%d_pclk_mux", pll_10nm->id);
+
+ /* PIX CLK DIV : DIV_CTRL_7_4*/
+ hw = clk_hw_register_divider(dev, clk_name, parent,
+ 0, pll_10nm->phy_cmn_mmio +
+ REG_DSI_10nm_PHY_CMN_CLK_CFG0,
+ 4, 4, CLK_DIVIDER_ONE_BASED,
+ &pll_10nm->postdiv_lock);
+ if (IS_ERR(hw))
+ return PTR_ERR(hw);
+
+ hws[num++] = hw;
+ hw_data->hws[DSI_PIXEL_PLL_CLK] = hw;
+
+ pll_10nm->num_hws = num;
+
+ hw_data->num = NUM_PROVIDED_CLKS;
+ pll_10nm->hw_data = hw_data;
+
+ ret = of_clk_add_hw_provider(dev->of_node, of_clk_hw_onecell_get,
+ pll_10nm->hw_data);
+ if (ret) {
+ dev_err(dev, "failed to register clk provider: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+struct msm_dsi_pll *msm_dsi_pll_10nm_init(struct platform_device *pdev, int id)
+{
+ struct dsi_pll_10nm *pll_10nm;
+ struct msm_dsi_pll *pll;
+ int ret;
+
+ if (!pdev)
+ return ERR_PTR(-ENODEV);
+
+ pll_10nm = devm_kzalloc(&pdev->dev, sizeof(*pll_10nm), GFP_KERNEL);
+ if (!pll_10nm)
+ return ERR_PTR(-ENOMEM);
+
+ DBG("DSI PLL%d", id);
+
+ pll_10nm->pdev = pdev;
+ pll_10nm->id = id;
+ pll_10nm_list[id] = pll_10nm;
+
+ pll_10nm->phy_cmn_mmio = msm_ioremap(pdev, "dsi_phy", "DSI_PHY");
+ if (IS_ERR_OR_NULL(pll_10nm->phy_cmn_mmio)) {
+ dev_err(&pdev->dev, "failed to map CMN PHY base\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ pll_10nm->mmio = msm_ioremap(pdev, "dsi_pll", "DSI_PLL");
+ if (IS_ERR_OR_NULL(pll_10nm->mmio)) {
+ dev_err(&pdev->dev, "failed to map PLL base\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ spin_lock_init(&pll_10nm->postdiv_lock);
+
+ pll = &pll_10nm->base;
+ pll->min_rate = 1000000000UL;
+ pll->max_rate = 3500000000UL;
+ pll->get_provider = dsi_pll_10nm_get_provider;
+ pll->destroy = dsi_pll_10nm_destroy;
+ pll->save_state = dsi_pll_10nm_save_state;
+ pll->restore_state = dsi_pll_10nm_restore_state;
+ pll->set_usecase = dsi_pll_10nm_set_usecase;
+
+ pll_10nm->vco_delay = 1;
+
+ ret = pll_10nm_register(pll_10nm);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to register PLL: %d\n", ret);
+ return ERR_PTR(ret);
+ }
+
+ /* TODO: Remove this when we have proper display handover support */
+ msm_dsi_pll_save_state(pll);
+
+ return pll;
+}
diff --git a/drivers/gpu/drm/msm/dsi/sfpb.xml.h b/drivers/gpu/drm/msm/dsi/sfpb.xml.h
index 9d4d1feaefd7..07c48ddb5301 100644
--- a/drivers/gpu/drm/msm/dsi/sfpb.xml.h
+++ b/drivers/gpu/drm/msm/dsi/sfpb.xml.h
@@ -8,19 +8,19 @@ http://github.com/freedreno/envytools/
git clone https://github.com/freedreno/envytools.git
The rules-ng-ng source files this header was generated from are:
-- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 676 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20915 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 2849 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 37411 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 33004 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 602 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 41799 bytes, from 2017-06-16 12:32:42)
-- /home/robclark/src/freedreno/envytools/rnndb/edp/edp.xml ( 10416 bytes, from 2017-05-17 13:21:27)
-
-Copyright (C) 2013-2017 by the following authors:
+- /home/robclark/src/envytools/rnndb/msm.xml ( 676 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/mdp/mdp4.xml ( 20915 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/mdp/mdp_common.xml ( 2849 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/mdp/mdp5.xml ( 37411 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/dsi/dsi.xml ( 37239 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/dsi/sfpb.xml ( 602 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/hdmi/hdmi.xml ( 41799 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/edp/edp.xml ( 10416 bytes, from 2018-07-03 19:37:13)
+
+Copyright (C) 2013-2018 by the following authors:
- Rob Clark <robdclark@gmail.com> (robclark)
- Ilia Mirkin <imirkin@alum.mit.edu> (imirkin)
diff --git a/drivers/gpu/drm/msm/edp/edp.xml.h b/drivers/gpu/drm/msm/edp/edp.xml.h
index f150d4a47707..9cb6e6fe9810 100644
--- a/drivers/gpu/drm/msm/edp/edp.xml.h
+++ b/drivers/gpu/drm/msm/edp/edp.xml.h
@@ -8,19 +8,19 @@ http://github.com/freedreno/envytools/
git clone https://github.com/freedreno/envytools.git
The rules-ng-ng source files this header was generated from are:
-- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 676 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20915 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 2849 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 37411 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 33004 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 602 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 41799 bytes, from 2017-06-16 12:32:42)
-- /home/robclark/src/freedreno/envytools/rnndb/edp/edp.xml ( 10416 bytes, from 2017-05-17 13:21:27)
-
-Copyright (C) 2013-2017 by the following authors:
+- /home/robclark/src/envytools/rnndb/msm.xml ( 676 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/mdp/mdp4.xml ( 20915 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/mdp/mdp_common.xml ( 2849 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/mdp/mdp5.xml ( 37411 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/dsi/dsi.xml ( 37239 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/dsi/sfpb.xml ( 602 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/hdmi/hdmi.xml ( 41799 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/edp/edp.xml ( 10416 bytes, from 2018-07-03 19:37:13)
+
+Copyright (C) 2013-2018 by the following authors:
- Rob Clark <robdclark@gmail.com> (robclark)
- Ilia Mirkin <imirkin@alum.mit.edu> (imirkin)
diff --git a/drivers/gpu/drm/msm/edp/edp_connector.c b/drivers/gpu/drm/msm/edp/edp_connector.c
index 6f3fc6b0f0a3..058ff92a0207 100644
--- a/drivers/gpu/drm/msm/edp/edp_connector.c
+++ b/drivers/gpu/drm/msm/edp/edp_connector.c
@@ -56,7 +56,7 @@ static int edp_connector_get_modes(struct drm_connector *connector)
if (ret)
return ret;
- drm_mode_connector_update_edid_property(connector, drm_edid);
+ drm_connector_update_edid_property(connector, drm_edid);
if (drm_edid)
ret = drm_add_edid_modes(connector, drm_edid);
@@ -134,7 +134,7 @@ struct drm_connector *msm_edp_connector_init(struct msm_edp *edp)
connector->interlace_allowed = false;
connector->doublescan_allowed = false;
- drm_mode_connector_attach_encoder(connector, edp->encoder);
+ drm_connector_attach_encoder(connector, edp->encoder);
return connector;
}
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.c b/drivers/gpu/drm/msm/hdmi/hdmi.c
index e63dc0fb55f8..c79659ca5706 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi.c
@@ -157,8 +157,10 @@ static struct hdmi *msm_hdmi_init(struct platform_device *pdev)
hdmi->qfprom_mmio = NULL;
}
- hdmi->hpd_regs = devm_kzalloc(&pdev->dev, sizeof(hdmi->hpd_regs[0]) *
- config->hpd_reg_cnt, GFP_KERNEL);
+ hdmi->hpd_regs = devm_kcalloc(&pdev->dev,
+ config->hpd_reg_cnt,
+ sizeof(hdmi->hpd_regs[0]),
+ GFP_KERNEL);
if (!hdmi->hpd_regs) {
ret = -ENOMEM;
goto fail;
@@ -178,8 +180,10 @@ static struct hdmi *msm_hdmi_init(struct platform_device *pdev)
hdmi->hpd_regs[i] = reg;
}
- hdmi->pwr_regs = devm_kzalloc(&pdev->dev, sizeof(hdmi->pwr_regs[0]) *
- config->pwr_reg_cnt, GFP_KERNEL);
+ hdmi->pwr_regs = devm_kcalloc(&pdev->dev,
+ config->pwr_reg_cnt,
+ sizeof(hdmi->pwr_regs[0]),
+ GFP_KERNEL);
if (!hdmi->pwr_regs) {
ret = -ENOMEM;
goto fail;
@@ -199,8 +203,10 @@ static struct hdmi *msm_hdmi_init(struct platform_device *pdev)
hdmi->pwr_regs[i] = reg;
}
- hdmi->hpd_clks = devm_kzalloc(&pdev->dev, sizeof(hdmi->hpd_clks[0]) *
- config->hpd_clk_cnt, GFP_KERNEL);
+ hdmi->hpd_clks = devm_kcalloc(&pdev->dev,
+ config->hpd_clk_cnt,
+ sizeof(hdmi->hpd_clks[0]),
+ GFP_KERNEL);
if (!hdmi->hpd_clks) {
ret = -ENOMEM;
goto fail;
@@ -219,8 +225,10 @@ static struct hdmi *msm_hdmi_init(struct platform_device *pdev)
hdmi->hpd_clks[i] = clk;
}
- hdmi->pwr_clks = devm_kzalloc(&pdev->dev, sizeof(hdmi->pwr_clks[0]) *
- config->pwr_clk_cnt, GFP_KERNEL);
+ hdmi->pwr_clks = devm_kcalloc(&pdev->dev,
+ config->pwr_clk_cnt,
+ sizeof(hdmi->pwr_clks[0]),
+ GFP_KERNEL);
if (!hdmi->pwr_clks) {
ret = -ENOMEM;
goto fail;
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.xml.h b/drivers/gpu/drm/msm/hdmi/hdmi.xml.h
index ecebf8b623ab..3eff3ea3b271 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi.xml.h
+++ b/drivers/gpu/drm/msm/hdmi/hdmi.xml.h
@@ -8,19 +8,19 @@ http://github.com/freedreno/envytools/
git clone https://github.com/freedreno/envytools.git
The rules-ng-ng source files this header was generated from are:
-- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 676 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20915 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 2849 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 37411 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 33004 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 602 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 41799 bytes, from 2017-06-16 12:32:42)
-- /home/robclark/src/freedreno/envytools/rnndb/edp/edp.xml ( 10416 bytes, from 2017-05-17 13:21:27)
-
-Copyright (C) 2013-2017 by the following authors:
+- /home/robclark/src/envytools/rnndb/msm.xml ( 676 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/mdp/mdp4.xml ( 20915 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/mdp/mdp_common.xml ( 2849 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/mdp/mdp5.xml ( 37411 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/dsi/dsi.xml ( 37239 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/dsi/sfpb.xml ( 602 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/hdmi/hdmi.xml ( 41799 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/edp/edp.xml ( 10416 bytes, from 2018-07-03 19:37:13)
+
+Copyright (C) 2013-2018 by the following authors:
- Rob Clark <robdclark@gmail.com> (robclark)
- Ilia Mirkin <imirkin@alum.mit.edu> (imirkin)
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_connector.c b/drivers/gpu/drm/msm/hdmi/hdmi_connector.c
index c0848dfedd50..e9c9a0af508e 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi_connector.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_connector.c
@@ -392,7 +392,7 @@ static int msm_hdmi_connector_get_modes(struct drm_connector *connector)
hdmi_write(hdmi, REG_HDMI_CTRL, hdmi_ctrl);
hdmi->hdmi_mode = drm_detect_hdmi_monitor(edid);
- drm_mode_connector_update_edid_property(connector, edid);
+ drm_connector_update_edid_property(connector, edid);
if (edid) {
ret = drm_add_edid_modes(connector, edid);
@@ -477,7 +477,7 @@ struct drm_connector *msm_hdmi_connector_init(struct hdmi *hdmi)
return ERR_PTR(ret);
}
- drm_mode_connector_attach_encoder(connector, hdmi->encoder);
+ drm_connector_attach_encoder(connector, hdmi->encoder);
return connector;
}
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_hdcp.c b/drivers/gpu/drm/msm/hdmi/hdmi_hdcp.c
index 6e767979aab3..3656155e3793 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi_hdcp.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_hdcp.c
@@ -769,7 +769,7 @@ static int msm_hdmi_hdcp_auth_part1_key_exchange(struct hdmi_hdcp_ctrl *hdcp_ctr
if (rc) {
pr_err("%s: wait key and an ready failed\n", __func__);
return rc;
- };
+ }
/* Read BCAPS and send to HDCP engine */
rc = msm_hdmi_hdcp_recv_bcaps(hdcp_ctrl);
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_phy.c b/drivers/gpu/drm/msm/hdmi/hdmi_phy.c
index 5e631392dc85..4157722d6b4d 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi_phy.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_phy.c
@@ -21,12 +21,12 @@ static int msm_hdmi_phy_resource_init(struct hdmi_phy *phy)
struct device *dev = &phy->pdev->dev;
int i, ret;
- phy->regs = devm_kzalloc(dev, sizeof(phy->regs[0]) * cfg->num_regs,
+ phy->regs = devm_kcalloc(dev, cfg->num_regs, sizeof(phy->regs[0]),
GFP_KERNEL);
if (!phy->regs)
return -ENOMEM;
- phy->clks = devm_kzalloc(dev, sizeof(phy->clks[0]) * cfg->num_clks,
+ phy->clks = devm_kcalloc(dev, cfg->num_clks, sizeof(phy->clks[0]),
GFP_KERNEL);
if (!phy->clks)
return -ENOMEM;
diff --git a/drivers/gpu/drm/msm/hdmi/qfprom.xml.h b/drivers/gpu/drm/msm/hdmi/qfprom.xml.h
index da646deedf4b..7717d4269662 100644
--- a/drivers/gpu/drm/msm/hdmi/qfprom.xml.h
+++ b/drivers/gpu/drm/msm/hdmi/qfprom.xml.h
@@ -8,19 +8,19 @@ http://github.com/freedreno/envytools/
git clone https://github.com/freedreno/envytools.git
The rules-ng-ng source files this header was generated from are:
-- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 676 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20915 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 2849 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 37411 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 33004 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 602 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 41799 bytes, from 2017-06-16 12:32:42)
-- /home/robclark/src/freedreno/envytools/rnndb/edp/edp.xml ( 10416 bytes, from 2017-05-17 13:21:27)
-
-Copyright (C) 2013-2017 by the following authors:
+- /home/robclark/src/envytools/rnndb/msm.xml ( 676 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/mdp/mdp4.xml ( 20915 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/mdp/mdp_common.xml ( 2849 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/mdp/mdp5.xml ( 37411 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/dsi/dsi.xml ( 37239 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/dsi/sfpb.xml ( 602 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/hdmi/hdmi.xml ( 41799 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/edp/edp.xml ( 10416 bytes, from 2018-07-03 19:37:13)
+
+Copyright (C) 2013-2018 by the following authors:
- Rob Clark <robdclark@gmail.com> (robclark)
- Ilia Mirkin <imirkin@alum.mit.edu> (imirkin)
diff --git a/drivers/gpu/drm/msm/msm_atomic.c b/drivers/gpu/drm/msm/msm_atomic.c
index bf5f8c39f34d..c1f1779c980f 100644
--- a/drivers/gpu/drm/msm/msm_atomic.c
+++ b/drivers/gpu/drm/msm/msm_atomic.c
@@ -16,69 +16,8 @@
*/
#include "msm_drv.h"
-#include "msm_kms.h"
#include "msm_gem.h"
-#include "msm_fence.h"
-
-struct msm_commit {
- struct drm_device *dev;
- struct drm_atomic_state *state;
- struct work_struct work;
- uint32_t crtc_mask;
-};
-
-static void commit_worker(struct work_struct *work);
-
-/* block until specified crtcs are no longer pending update, and
- * atomically mark them as pending update
- */
-static int start_atomic(struct msm_drm_private *priv, uint32_t crtc_mask)
-{
- int ret;
-
- spin_lock(&priv->pending_crtcs_event.lock);
- ret = wait_event_interruptible_locked(priv->pending_crtcs_event,
- !(priv->pending_crtcs & crtc_mask));
- if (ret == 0) {
- DBG("start: %08x", crtc_mask);
- priv->pending_crtcs |= crtc_mask;
- }
- spin_unlock(&priv->pending_crtcs_event.lock);
-
- return ret;
-}
-
-/* clear specified crtcs (no longer pending update)
- */
-static void end_atomic(struct msm_drm_private *priv, uint32_t crtc_mask)
-{
- spin_lock(&priv->pending_crtcs_event.lock);
- DBG("end: %08x", crtc_mask);
- priv->pending_crtcs &= ~crtc_mask;
- wake_up_all_locked(&priv->pending_crtcs_event);
- spin_unlock(&priv->pending_crtcs_event.lock);
-}
-
-static struct msm_commit *commit_init(struct drm_atomic_state *state)
-{
- struct msm_commit *c = kzalloc(sizeof(*c), GFP_KERNEL);
-
- if (!c)
- return NULL;
-
- c->dev = state->dev;
- c->state = state;
-
- INIT_WORK(&c->work, commit_worker);
-
- return c;
-}
-
-static void commit_destroy(struct msm_commit *c)
-{
- end_atomic(c->dev->dev_private, c->crtc_mask);
- kfree(c);
-}
+#include "msm_kms.h"
static void msm_atomic_wait_for_commit_done(struct drm_device *dev,
struct drm_atomic_state *old_state)
@@ -97,195 +36,51 @@ static void msm_atomic_wait_for_commit_done(struct drm_device *dev,
}
}
-/* The (potentially) asynchronous part of the commit. At this point
- * nothing can fail short of armageddon.
- */
-static void complete_commit(struct msm_commit *c, bool async)
+int msm_atomic_prepare_fb(struct drm_plane *plane,
+ struct drm_plane_state *new_state)
{
- struct drm_atomic_state *state = c->state;
- struct drm_device *dev = state->dev;
- struct msm_drm_private *priv = dev->dev_private;
+ struct msm_drm_private *priv = plane->dev->dev_private;
struct msm_kms *kms = priv->kms;
+ struct drm_gem_object *obj;
+ struct msm_gem_object *msm_obj;
+ struct dma_fence *fence;
- drm_atomic_helper_wait_for_fences(dev, state, false);
-
- kms->funcs->prepare_commit(kms, state);
-
- drm_atomic_helper_commit_modeset_disables(dev, state);
-
- drm_atomic_helper_commit_planes(dev, state, 0);
-
- drm_atomic_helper_commit_modeset_enables(dev, state);
-
- /* NOTE: _wait_for_vblanks() only waits for vblank on
- * enabled CRTCs. So we end up faulting when disabling
- * due to (potentially) unref'ing the outgoing fb's
- * before the vblank when the disable has latched.
- *
- * But if it did wait on disabled (or newly disabled)
- * CRTCs, that would be racy (ie. we could have missed
- * the irq. We need some way to poll for pipe shut
- * down. Or just live with occasionally hitting the
- * timeout in the CRTC disable path (which really should
- * not be critical path)
- */
-
- msm_atomic_wait_for_commit_done(dev, state);
-
- drm_atomic_helper_cleanup_planes(dev, state);
-
- kms->funcs->complete_commit(kms, state);
+ if (!new_state->fb)
+ return 0;
- drm_atomic_state_put(state);
+ obj = msm_framebuffer_bo(new_state->fb, 0);
+ msm_obj = to_msm_bo(obj);
+ fence = reservation_object_get_excl_rcu(msm_obj->resv);
- commit_destroy(c);
-}
+ drm_atomic_set_fence_for_plane(new_state, fence);
-static void commit_worker(struct work_struct *work)
-{
- complete_commit(container_of(work, struct msm_commit, work), true);
+ return msm_framebuffer_prepare(new_state->fb, kms->aspace);
}
-/**
- * drm_atomic_helper_commit - commit validated state object
- * @dev: DRM device
- * @state: the driver state object
- * @nonblock: nonblocking commit
- *
- * This function commits a with drm_atomic_helper_check() pre-validated state
- * object. This can still fail when e.g. the framebuffer reservation fails.
- *
- * RETURNS
- * Zero for success or -errno.
- */
-int msm_atomic_commit(struct drm_device *dev,
- struct drm_atomic_state *state, bool nonblock)
+void msm_atomic_commit_tail(struct drm_atomic_state *state)
{
+ struct drm_device *dev = state->dev;
struct msm_drm_private *priv = dev->dev_private;
- struct msm_commit *c;
- struct drm_crtc *crtc;
- struct drm_crtc_state *crtc_state;
- struct drm_plane *plane;
- struct drm_plane_state *old_plane_state, *new_plane_state;
- int i, ret;
-
- ret = drm_atomic_helper_prepare_planes(dev, state);
- if (ret)
- return ret;
+ struct msm_kms *kms = priv->kms;
- /*
- * Note that plane->atomic_async_check() should fail if we need
- * to re-assign hwpipe or anything that touches global atomic
- * state, so we'll never go down the async update path in those
- * cases.
- */
- if (state->async_update) {
- drm_atomic_helper_async_commit(dev, state);
- drm_atomic_helper_cleanup_planes(dev, state);
- return 0;
- }
+ kms->funcs->prepare_commit(kms, state);
- c = commit_init(state);
- if (!c) {
- ret = -ENOMEM;
- goto error;
- }
+ drm_atomic_helper_commit_modeset_disables(dev, state);
- /*
- * Figure out what crtcs we have:
- */
- for_each_new_crtc_in_state(state, crtc, crtc_state, i)
- c->crtc_mask |= drm_crtc_mask(crtc);
+ drm_atomic_helper_commit_planes(dev, state, 0);
- /*
- * Figure out what fence to wait for:
- */
- for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
- if ((new_plane_state->fb != old_plane_state->fb) && new_plane_state->fb) {
- struct drm_gem_object *obj = msm_framebuffer_bo(new_plane_state->fb, 0);
- struct msm_gem_object *msm_obj = to_msm_bo(obj);
- struct dma_fence *fence = reservation_object_get_excl_rcu(msm_obj->resv);
+ drm_atomic_helper_commit_modeset_enables(dev, state);
- drm_atomic_set_fence_for_plane(new_plane_state, fence);
- }
+ if (kms->funcs->commit) {
+ DRM_DEBUG_ATOMIC("triggering commit\n");
+ kms->funcs->commit(kms, state);
}
- /*
- * Wait for pending updates on any of the same crtc's and then
- * mark our set of crtc's as busy:
- */
- ret = start_atomic(dev->dev_private, c->crtc_mask);
- if (ret)
- goto err_free;
-
- BUG_ON(drm_atomic_helper_swap_state(state, false) < 0);
-
- /*
- * This is the point of no return - everything below never fails except
- * when the hw goes bonghits. Which means we can commit the new state on
- * the software side now.
- *
- * swap driver private state while still holding state_lock
- */
- if (to_kms_state(state)->state)
- priv->kms->funcs->swap_state(priv->kms, state);
-
- /*
- * Everything below can be run asynchronously without the need to grab
- * any modeset locks at all under one conditions: It must be guaranteed
- * that the asynchronous work has either been cancelled (if the driver
- * supports it, which at least requires that the framebuffers get
- * cleaned up with drm_atomic_helper_cleanup_planes()) or completed
- * before the new state gets committed on the software side with
- * drm_atomic_helper_swap_state().
- *
- * This scheme allows new atomic state updates to be prepared and
- * checked in parallel to the asynchronous completion of the previous
- * update. Which is important since compositors need to figure out the
- * composition of the next frame right after having submitted the
- * current layout.
- */
-
- drm_atomic_state_get(state);
- if (nonblock) {
- queue_work(priv->atomic_wq, &c->work);
- return 0;
- }
+ msm_atomic_wait_for_commit_done(dev, state);
- complete_commit(c, false);
+ kms->funcs->complete_commit(kms, state);
- return 0;
+ drm_atomic_helper_commit_hw_done(state);
-err_free:
- kfree(c);
-error:
drm_atomic_helper_cleanup_planes(dev, state);
- return ret;
-}
-
-struct drm_atomic_state *msm_atomic_state_alloc(struct drm_device *dev)
-{
- struct msm_kms_state *state = kzalloc(sizeof(*state), GFP_KERNEL);
-
- if (!state || drm_atomic_state_init(dev, &state->base) < 0) {
- kfree(state);
- return NULL;
- }
-
- return &state->base;
-}
-
-void msm_atomic_state_clear(struct drm_atomic_state *s)
-{
- struct msm_kms_state *state = to_kms_state(s);
- drm_atomic_state_default_clear(&state->base);
- kfree(state->state);
- state->state = NULL;
-}
-
-void msm_atomic_state_free(struct drm_atomic_state *state)
-{
- kfree(to_kms_state(state)->state);
- drm_atomic_state_default_release(state);
- kfree(state);
}
diff --git a/drivers/gpu/drm/msm/msm_debugfs.c b/drivers/gpu/drm/msm/msm_debugfs.c
index 1855182c76ce..f0da0d3c8a80 100644
--- a/drivers/gpu/drm/msm/msm_debugfs.c
+++ b/drivers/gpu/drm/msm/msm_debugfs.c
@@ -16,26 +16,101 @@
*/
#ifdef CONFIG_DEBUG_FS
+#include <linux/debugfs.h>
#include "msm_drv.h"
#include "msm_gpu.h"
#include "msm_kms.h"
#include "msm_debugfs.h"
-static int msm_gpu_show(struct drm_device *dev, struct seq_file *m)
+struct msm_gpu_show_priv {
+ struct msm_gpu_state *state;
+ struct drm_device *dev;
+};
+
+static int msm_gpu_show(struct seq_file *m, void *arg)
+{
+ struct drm_printer p = drm_seq_file_printer(m);
+ struct msm_gpu_show_priv *show_priv = m->private;
+ struct msm_drm_private *priv = show_priv->dev->dev_private;
+ struct msm_gpu *gpu = priv->gpu;
+ int ret;
+
+ ret = mutex_lock_interruptible(&show_priv->dev->struct_mutex);
+ if (ret)
+ return ret;
+
+ drm_printf(&p, "%s Status:\n", gpu->name);
+ gpu->funcs->show(gpu, show_priv->state, &p);
+
+ mutex_unlock(&show_priv->dev->struct_mutex);
+
+ return 0;
+}
+
+static int msm_gpu_release(struct inode *inode, struct file *file)
+{
+ struct seq_file *m = file->private_data;
+ struct msm_gpu_show_priv *show_priv = m->private;
+ struct msm_drm_private *priv = show_priv->dev->dev_private;
+ struct msm_gpu *gpu = priv->gpu;
+ int ret;
+
+ ret = mutex_lock_interruptible(&show_priv->dev->struct_mutex);
+ if (ret)
+ return ret;
+
+ gpu->funcs->gpu_state_put(show_priv->state);
+ mutex_unlock(&show_priv->dev->struct_mutex);
+
+ kfree(show_priv);
+
+ return single_release(inode, file);
+}
+
+static int msm_gpu_open(struct inode *inode, struct file *file)
{
+ struct drm_device *dev = inode->i_private;
struct msm_drm_private *priv = dev->dev_private;
struct msm_gpu *gpu = priv->gpu;
+ struct msm_gpu_show_priv *show_priv;
+ int ret;
- if (gpu) {
- seq_printf(m, "%s Status:\n", gpu->name);
- pm_runtime_get_sync(&gpu->pdev->dev);
- gpu->funcs->show(gpu, m);
- pm_runtime_put_sync(&gpu->pdev->dev);
+ if (!gpu)
+ return -ENODEV;
+
+ show_priv = kmalloc(sizeof(*show_priv), GFP_KERNEL);
+ if (!show_priv)
+ return -ENOMEM;
+
+ ret = mutex_lock_interruptible(&dev->struct_mutex);
+ if (ret)
+ return ret;
+
+ pm_runtime_get_sync(&gpu->pdev->dev);
+ show_priv->state = gpu->funcs->gpu_state_get(gpu);
+ pm_runtime_put_sync(&gpu->pdev->dev);
+
+ mutex_unlock(&dev->struct_mutex);
+
+ if (IS_ERR(show_priv->state)) {
+ ret = PTR_ERR(show_priv->state);
+ kfree(show_priv);
+ return ret;
}
- return 0;
+ show_priv->dev = dev;
+
+ return single_open(file, msm_gpu_show, show_priv);
}
+static const struct file_operations msm_gpu_fops = {
+ .owner = THIS_MODULE,
+ .open = msm_gpu_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = msm_gpu_release,
+};
+
static int msm_gem_show(struct drm_device *dev, struct seq_file *m)
{
struct msm_drm_private *priv = dev->dev_private;
@@ -105,7 +180,6 @@ static int show_locked(struct seq_file *m, void *arg)
}
static struct drm_info_list msm_debugfs_list[] = {
- {"gpu", show_locked, 0, msm_gpu_show},
{"gem", show_locked, 0, msm_gem_show},
{ "mm", show_locked, 0, msm_mm_show },
{ "fb", show_locked, 0, msm_fb_show },
@@ -140,9 +214,6 @@ int msm_debugfs_late_init(struct drm_device *dev)
if (ret)
return ret;
ret = late_init_minor(dev->render);
- if (ret)
- return ret;
- ret = late_init_minor(dev->control);
return ret;
}
@@ -161,8 +232,14 @@ int msm_debugfs_init(struct drm_minor *minor)
return ret;
}
- if (priv->kms->funcs->debugfs_init)
+ debugfs_create_file("gpu", S_IRUSR, minor->debugfs_root,
+ dev, &msm_gpu_fops);
+
+ if (priv->kms->funcs->debugfs_init) {
ret = priv->kms->funcs->debugfs_init(priv->kms, minor);
+ if (ret)
+ return ret;
+ }
return ret;
}
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
index d90ef1d78a1b..c1abad8a8612 100644
--- a/drivers/gpu/drm/msm/msm_drv.c
+++ b/drivers/gpu/drm/msm/msm_drv.c
@@ -1,4 +1,5 @@
/*
+ * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
* Copyright (C) 2013 Red Hat
* Author: Rob Clark <robdclark@gmail.com>
*
@@ -15,6 +16,8 @@
* this program. If not, see <http://www.gnu.org/licenses/>.
*/
+#include <linux/kthread.h>
+#include <uapi/linux/sched/types.h>
#include <drm/drm_of.h>
#include "msm_drv.h"
@@ -41,10 +44,11 @@ static const struct drm_mode_config_funcs mode_config_funcs = {
.fb_create = msm_framebuffer_create,
.output_poll_changed = drm_fb_helper_output_poll_changed,
.atomic_check = drm_atomic_helper_check,
- .atomic_commit = msm_atomic_commit,
- .atomic_state_alloc = msm_atomic_state_alloc,
- .atomic_state_clear = msm_atomic_state_clear,
- .atomic_state_free = msm_atomic_state_free,
+ .atomic_commit = drm_atomic_helper_commit,
+};
+
+static const struct drm_mode_config_helper_funcs mode_config_helper_funcs = {
+ .atomic_commit_tail = msm_atomic_commit_tail,
};
#ifdef CONFIG_DRM_MSM_REGISTER_LOGGING
@@ -77,6 +81,63 @@ module_param(modeset, bool, 0600);
* Util/helpers:
*/
+int msm_clk_bulk_get(struct device *dev, struct clk_bulk_data **bulk)
+{
+ struct property *prop;
+ const char *name;
+ struct clk_bulk_data *local;
+ int i = 0, ret, count;
+
+ count = of_property_count_strings(dev->of_node, "clock-names");
+ if (count < 1)
+ return 0;
+
+ local = devm_kcalloc(dev, sizeof(struct clk_bulk_data *),
+ count, GFP_KERNEL);
+ if (!local)
+ return -ENOMEM;
+
+ of_property_for_each_string(dev->of_node, "clock-names", prop, name) {
+ local[i].id = devm_kstrdup(dev, name, GFP_KERNEL);
+ if (!local[i].id) {
+ devm_kfree(dev, local);
+ return -ENOMEM;
+ }
+
+ i++;
+ }
+
+ ret = devm_clk_bulk_get(dev, count, local);
+
+ if (ret) {
+ for (i = 0; i < count; i++)
+ devm_kfree(dev, (void *) local[i].id);
+ devm_kfree(dev, local);
+
+ return ret;
+ }
+
+ *bulk = local;
+ return count;
+}
+
+struct clk *msm_clk_bulk_get_clock(struct clk_bulk_data *bulk, int count,
+ const char *name)
+{
+ int i;
+ char n[32];
+
+ snprintf(n, sizeof(n), "%s_clk", name);
+
+ for (i = 0; bulk && i < count; i++) {
+ if (!strcmp(bulk[i].id, name) || !strcmp(bulk[i].id, n))
+ return bulk[i].clk;
+ }
+
+
+ return NULL;
+}
+
struct clk *msm_clk_get(struct platform_device *pdev, const char *name)
{
struct clk *clk;
@@ -148,7 +209,7 @@ struct vblank_event {
bool enable;
};
-static void vblank_ctrl_worker(struct work_struct *work)
+static void vblank_ctrl_worker(struct kthread_work *work)
{
struct msm_vblank_ctrl *vbl_ctrl = container_of(work,
struct msm_vblank_ctrl, work);
@@ -196,7 +257,8 @@ static int vblank_ctrl_queue_work(struct msm_drm_private *priv,
list_add_tail(&vbl_ev->node, &vbl_ctrl->event_list);
spin_unlock_irqrestore(&vbl_ctrl->lock, flags);
- queue_work(priv->wq, &vbl_ctrl->work);
+ kthread_queue_work(&priv->disp_thread[crtc_id].worker,
+ &vbl_ctrl->work);
return 0;
}
@@ -207,19 +269,36 @@ static int msm_drm_uninit(struct device *dev)
struct drm_device *ddev = platform_get_drvdata(pdev);
struct msm_drm_private *priv = ddev->dev_private;
struct msm_kms *kms = priv->kms;
+ struct msm_mdss *mdss = priv->mdss;
struct msm_vblank_ctrl *vbl_ctrl = &priv->vblank_ctrl;
struct vblank_event *vbl_ev, *tmp;
+ int i;
/* We must cancel and cleanup any pending vblank enable/disable
* work before drm_irq_uninstall() to avoid work re-enabling an
* irq after uninstall has disabled it.
*/
- cancel_work_sync(&vbl_ctrl->work);
+ kthread_flush_work(&vbl_ctrl->work);
list_for_each_entry_safe(vbl_ev, tmp, &vbl_ctrl->event_list, node) {
list_del(&vbl_ev->node);
kfree(vbl_ev);
}
+ /* clean up display commit/event worker threads */
+ for (i = 0; i < priv->num_crtcs; i++) {
+ if (priv->disp_thread[i].thread) {
+ kthread_flush_worker(&priv->disp_thread[i].worker);
+ kthread_stop(priv->disp_thread[i].thread);
+ priv->disp_thread[i].thread = NULL;
+ }
+
+ if (priv->event_thread[i].thread) {
+ kthread_flush_worker(&priv->event_thread[i].worker);
+ kthread_stop(priv->event_thread[i].thread);
+ priv->event_thread[i].thread = NULL;
+ }
+ }
+
msm_gem_shrinker_cleanup(ddev);
drm_kms_helper_poll_fini(ddev);
@@ -242,9 +321,6 @@ static int msm_drm_uninit(struct device *dev)
flush_workqueue(priv->wq);
destroy_workqueue(priv->wq);
- flush_workqueue(priv->atomic_wq);
- destroy_workqueue(priv->atomic_wq);
-
if (kms && kms->funcs)
kms->funcs->destroy(kms);
@@ -257,7 +333,8 @@ static int msm_drm_uninit(struct device *dev)
component_unbind_all(dev, ddev);
- msm_mdss_destroy(ddev);
+ if (mdss && mdss->funcs)
+ mdss->funcs->destroy(ddev);
ddev->dev_private = NULL;
drm_dev_unref(ddev);
@@ -267,6 +344,10 @@ static int msm_drm_uninit(struct device *dev)
return 0;
}
+#define KMS_MDP4 4
+#define KMS_MDP5 5
+#define KMS_DPU 3
+
static int get_mdp_ver(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -356,7 +437,9 @@ static int msm_drm_init(struct device *dev, struct drm_driver *drv)
struct drm_device *ddev;
struct msm_drm_private *priv;
struct msm_kms *kms;
- int ret;
+ struct msm_mdss *mdss;
+ int ret, i;
+ struct sched_param param;
ddev = drm_dev_alloc(drv, dev);
if (IS_ERR(ddev)) {
@@ -368,54 +451,61 @@ static int msm_drm_init(struct device *dev, struct drm_driver *drv)
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
if (!priv) {
- drm_dev_unref(ddev);
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto err_unref_drm_dev;
}
ddev->dev_private = priv;
priv->dev = ddev;
- ret = msm_mdss_init(ddev);
- if (ret) {
- kfree(priv);
- drm_dev_unref(ddev);
- return ret;
+ switch (get_mdp_ver(pdev)) {
+ case KMS_MDP5:
+ ret = mdp5_mdss_init(ddev);
+ break;
+ case KMS_DPU:
+ ret = dpu_mdss_init(ddev);
+ break;
+ default:
+ ret = 0;
+ break;
}
+ if (ret)
+ goto err_free_priv;
+
+ mdss = priv->mdss;
priv->wq = alloc_ordered_workqueue("msm", 0);
- priv->atomic_wq = alloc_ordered_workqueue("msm:atomic", 0);
- init_waitqueue_head(&priv->pending_crtcs_event);
INIT_LIST_HEAD(&priv->inactive_list);
INIT_LIST_HEAD(&priv->vblank_ctrl.event_list);
- INIT_WORK(&priv->vblank_ctrl.work, vblank_ctrl_worker);
+ kthread_init_work(&priv->vblank_ctrl.work, vblank_ctrl_worker);
spin_lock_init(&priv->vblank_ctrl.lock);
drm_mode_config_init(ddev);
/* Bind all our sub-components: */
ret = component_bind_all(dev, ddev);
- if (ret) {
- msm_mdss_destroy(ddev);
- kfree(priv);
- drm_dev_unref(ddev);
- return ret;
- }
+ if (ret)
+ goto err_destroy_mdss;
ret = msm_init_vram(ddev);
if (ret)
- goto fail;
+ goto err_msm_uninit;
msm_gem_shrinker_init(ddev);
switch (get_mdp_ver(pdev)) {
- case 4:
+ case KMS_MDP4:
kms = mdp4_kms_init(ddev);
priv->kms = kms;
break;
- case 5:
+ case KMS_MDP5:
kms = mdp5_kms_init(ddev);
break;
+ case KMS_DPU:
+ kms = dpu_kms_init(ddev);
+ priv->kms = kms;
+ break;
default:
kms = ERR_PTR(-ENODEV);
break;
@@ -430,23 +520,100 @@ static int msm_drm_init(struct device *dev, struct drm_driver *drv)
*/
dev_err(dev, "failed to load kms\n");
ret = PTR_ERR(kms);
- goto fail;
+ goto err_msm_uninit;
}
+ /* Enable normalization of plane zpos */
+ ddev->mode_config.normalize_zpos = true;
+
if (kms) {
ret = kms->funcs->hw_init(kms);
if (ret) {
dev_err(dev, "kms hw init failed: %d\n", ret);
- goto fail;
+ goto err_msm_uninit;
}
}
ddev->mode_config.funcs = &mode_config_funcs;
+ ddev->mode_config.helper_private = &mode_config_helper_funcs;
+
+ /**
+ * this priority was found during empiric testing to have appropriate
+ * realtime scheduling to process display updates and interact with
+ * other real time and normal priority task
+ */
+ param.sched_priority = 16;
+ for (i = 0; i < priv->num_crtcs; i++) {
+
+ /* initialize display thread */
+ priv->disp_thread[i].crtc_id = priv->crtcs[i]->base.id;
+ kthread_init_worker(&priv->disp_thread[i].worker);
+ priv->disp_thread[i].dev = ddev;
+ priv->disp_thread[i].thread =
+ kthread_run(kthread_worker_fn,
+ &priv->disp_thread[i].worker,
+ "crtc_commit:%d", priv->disp_thread[i].crtc_id);
+ ret = sched_setscheduler(priv->disp_thread[i].thread,
+ SCHED_FIFO, &param);
+ if (ret)
+ pr_warn("display thread priority update failed: %d\n",
+ ret);
+
+ if (IS_ERR(priv->disp_thread[i].thread)) {
+ dev_err(dev, "failed to create crtc_commit kthread\n");
+ priv->disp_thread[i].thread = NULL;
+ }
+
+ /* initialize event thread */
+ priv->event_thread[i].crtc_id = priv->crtcs[i]->base.id;
+ kthread_init_worker(&priv->event_thread[i].worker);
+ priv->event_thread[i].dev = ddev;
+ priv->event_thread[i].thread =
+ kthread_run(kthread_worker_fn,
+ &priv->event_thread[i].worker,
+ "crtc_event:%d", priv->event_thread[i].crtc_id);
+ /**
+ * event thread should also run at same priority as disp_thread
+ * because it is handling frame_done events. A lower priority
+ * event thread and higher priority disp_thread can causes
+ * frame_pending counters beyond 2. This can lead to commit
+ * failure at crtc commit level.
+ */
+ ret = sched_setscheduler(priv->event_thread[i].thread,
+ SCHED_FIFO, &param);
+ if (ret)
+ pr_warn("display event thread priority update failed: %d\n",
+ ret);
+
+ if (IS_ERR(priv->event_thread[i].thread)) {
+ dev_err(dev, "failed to create crtc_event kthread\n");
+ priv->event_thread[i].thread = NULL;
+ }
+
+ if ((!priv->disp_thread[i].thread) ||
+ !priv->event_thread[i].thread) {
+ /* clean up previously created threads if any */
+ for ( ; i >= 0; i--) {
+ if (priv->disp_thread[i].thread) {
+ kthread_stop(
+ priv->disp_thread[i].thread);
+ priv->disp_thread[i].thread = NULL;
+ }
+
+ if (priv->event_thread[i].thread) {
+ kthread_stop(
+ priv->event_thread[i].thread);
+ priv->event_thread[i].thread = NULL;
+ }
+ }
+ goto err_msm_uninit;
+ }
+ }
ret = drm_vblank_init(ddev, priv->num_crtcs);
if (ret < 0) {
dev_err(dev, "failed to initialize vblank\n");
- goto fail;
+ goto err_msm_uninit;
}
if (kms) {
@@ -455,13 +622,13 @@ static int msm_drm_init(struct device *dev, struct drm_driver *drv)
pm_runtime_put_sync(dev);
if (ret < 0) {
dev_err(dev, "failed to install IRQ handler\n");
- goto fail;
+ goto err_msm_uninit;
}
}
ret = drm_dev_register(ddev, 0);
if (ret)
- goto fail;
+ goto err_msm_uninit;
drm_mode_config_reset(ddev);
@@ -472,15 +639,23 @@ static int msm_drm_init(struct device *dev, struct drm_driver *drv)
ret = msm_debugfs_late_init(ddev);
if (ret)
- goto fail;
+ goto err_msm_uninit;
drm_kms_helper_poll_init(ddev);
return 0;
-fail:
+err_msm_uninit:
msm_drm_uninit(dev);
return ret;
+err_destroy_mdss:
+ if (mdss && mdss->funcs)
+ mdss->funcs->destroy(ddev);
+err_free_priv:
+ kfree(priv);
+err_unref_drm_dev:
+ drm_dev_unref(ddev);
+ return ret;
}
/*
@@ -660,7 +835,7 @@ static int msm_ioctl_gem_cpu_prep(struct drm_device *dev, void *data,
ret = msm_gem_cpu_prep(obj, args->op, &timeout);
- drm_gem_object_unreference_unlocked(obj);
+ drm_gem_object_put_unlocked(obj);
return ret;
}
@@ -678,7 +853,7 @@ static int msm_ioctl_gem_cpu_fini(struct drm_device *dev, void *data,
ret = msm_gem_cpu_fini(obj);
- drm_gem_object_unreference_unlocked(obj);
+ drm_gem_object_put_unlocked(obj);
return ret;
}
@@ -718,7 +893,7 @@ static int msm_ioctl_gem_info(struct drm_device *dev, void *data,
args->offset = msm_gem_mmap_offset(obj);
}
- drm_gem_object_unreference_unlocked(obj);
+ drm_gem_object_put_unlocked(obj);
return ret;
}
@@ -783,7 +958,7 @@ static int msm_ioctl_gem_madvise(struct drm_device *dev, void *data,
ret = 0;
}
- drm_gem_object_unreference(obj);
+ drm_gem_object_put(obj);
unlock:
mutex_unlock(&dev->struct_mutex);
@@ -893,16 +1068,35 @@ static struct drm_driver msm_driver = {
static int msm_pm_suspend(struct device *dev)
{
struct drm_device *ddev = dev_get_drvdata(dev);
+ struct msm_drm_private *priv = ddev->dev_private;
+ struct msm_kms *kms = priv->kms;
+
+ /* TODO: Use atomic helper suspend/resume */
+ if (kms && kms->funcs && kms->funcs->pm_suspend)
+ return kms->funcs->pm_suspend(dev);
drm_kms_helper_poll_disable(ddev);
+ priv->pm_state = drm_atomic_helper_suspend(ddev);
+ if (IS_ERR(priv->pm_state)) {
+ drm_kms_helper_poll_enable(ddev);
+ return PTR_ERR(priv->pm_state);
+ }
+
return 0;
}
static int msm_pm_resume(struct device *dev)
{
struct drm_device *ddev = dev_get_drvdata(dev);
+ struct msm_drm_private *priv = ddev->dev_private;
+ struct msm_kms *kms = priv->kms;
+
+ /* TODO: Use atomic helper suspend/resume */
+ if (kms && kms->funcs && kms->funcs->pm_resume)
+ return kms->funcs->pm_resume(dev);
+ drm_atomic_helper_resume(ddev, priv->pm_state);
drm_kms_helper_poll_enable(ddev);
return 0;
@@ -914,11 +1108,12 @@ static int msm_runtime_suspend(struct device *dev)
{
struct drm_device *ddev = dev_get_drvdata(dev);
struct msm_drm_private *priv = ddev->dev_private;
+ struct msm_mdss *mdss = priv->mdss;
DBG("");
- if (priv->mdss)
- return msm_mdss_disable(priv->mdss);
+ if (mdss && mdss->funcs)
+ return mdss->funcs->disable(mdss);
return 0;
}
@@ -927,11 +1122,12 @@ static int msm_runtime_resume(struct device *dev)
{
struct drm_device *ddev = dev_get_drvdata(dev);
struct msm_drm_private *priv = ddev->dev_private;
+ struct msm_mdss *mdss = priv->mdss;
DBG("");
- if (priv->mdss)
- return msm_mdss_enable(priv->mdss);
+ if (mdss && mdss->funcs)
+ return mdss->funcs->enable(mdss);
return 0;
}
@@ -1030,12 +1226,13 @@ static int add_display_components(struct device *dev,
int ret;
/*
- * MDP5 based devices don't have a flat hierarchy. There is a top level
- * parent: MDSS, and children: MDP5, DSI, HDMI, eDP etc. Populate the
- * children devices, find the MDP5 node, and then add the interfaces
- * to our components list.
+ * MDP5/DPU based devices don't have a flat hierarchy. There is a top
+ * level parent: MDSS, and children: MDP5/DPU, DSI, HDMI, eDP etc.
+ * Populate the children devices, find the MDP5/DPU node, and then add
+ * the interfaces to our components list.
*/
- if (of_device_is_compatible(dev->of_node, "qcom,mdss")) {
+ if (of_device_is_compatible(dev->of_node, "qcom,mdss") ||
+ of_device_is_compatible(dev->of_node, "qcom,sdm845-mdss")) {
ret = of_platform_populate(dev->of_node, NULL, NULL, dev);
if (ret) {
dev_err(dev, "failed to populate children devices\n");
@@ -1145,8 +1342,9 @@ static int msm_pdev_remove(struct platform_device *pdev)
}
static const struct of_device_id dt_match[] = {
- { .compatible = "qcom,mdp4", .data = (void *)4 }, /* MDP4 */
- { .compatible = "qcom,mdss", .data = (void *)5 }, /* MDP5 MDSS */
+ { .compatible = "qcom,mdp4", .data = (void *)KMS_MDP4 },
+ { .compatible = "qcom,mdss", .data = (void *)KMS_MDP5 },
+ { .compatible = "qcom,sdm845-mdss", .data = (void *)KMS_DPU },
{}
};
MODULE_DEVICE_TABLE(of, dt_match);
@@ -1168,6 +1366,7 @@ static int __init msm_drm_register(void)
DBG("init");
msm_mdp_register();
+ msm_dpu_register();
msm_dsi_register();
msm_edp_register();
msm_hdmi_register();
@@ -1184,6 +1383,7 @@ static void __exit msm_drm_unregister(void)
msm_edp_unregister();
msm_dsi_unregister();
msm_mdp_unregister();
+ msm_dpu_unregister();
}
module_init(msm_drm_register);
diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h
index 0a653dd2e618..8e510d5c758a 100644
--- a/drivers/gpu/drm/msm/msm_drv.h
+++ b/drivers/gpu/drm/msm/msm_drv.h
@@ -1,4 +1,5 @@
/*
+ * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
* Copyright (C) 2013 Red Hat
* Author: Rob Clark <robdclark@gmail.com>
*
@@ -33,6 +34,7 @@
#include <linux/of_graph.h>
#include <linux/of_device.h>
#include <asm/sizes.h>
+#include <linux/kthread.h>
#include <drm/drmP.h>
#include <drm/drm_atomic.h>
@@ -51,10 +53,15 @@ struct msm_rd_state;
struct msm_perf_state;
struct msm_gem_submit;
struct msm_fence_context;
-struct msm_fence_cb;
struct msm_gem_address_space;
struct msm_gem_vma;
+#define MAX_CRTCS 8
+#define MAX_PLANES 20
+#define MAX_ENCODERS 8
+#define MAX_BRIDGES 8
+#define MAX_CONNECTORS 8
+
struct msm_file_private {
rwlock_t queuelock;
struct list_head submitqueues;
@@ -69,12 +76,77 @@ enum msm_mdp_plane_property {
};
struct msm_vblank_ctrl {
- struct work_struct work;
+ struct kthread_work work;
struct list_head event_list;
spinlock_t lock;
};
#define MSM_GPU_MAX_RINGS 4
+#define MAX_H_TILES_PER_DISPLAY 2
+
+/**
+ * enum msm_display_caps - features/capabilities supported by displays
+ * @MSM_DISPLAY_CAP_VID_MODE: Video or "active" mode supported
+ * @MSM_DISPLAY_CAP_CMD_MODE: Command mode supported
+ * @MSM_DISPLAY_CAP_HOT_PLUG: Hot plug detection supported
+ * @MSM_DISPLAY_CAP_EDID: EDID supported
+ */
+enum msm_display_caps {
+ MSM_DISPLAY_CAP_VID_MODE = BIT(0),
+ MSM_DISPLAY_CAP_CMD_MODE = BIT(1),
+ MSM_DISPLAY_CAP_HOT_PLUG = BIT(2),
+ MSM_DISPLAY_CAP_EDID = BIT(3),
+};
+
+/**
+ * enum msm_event_wait - type of HW events to wait for
+ * @MSM_ENC_COMMIT_DONE - wait for the driver to flush the registers to HW
+ * @MSM_ENC_TX_COMPLETE - wait for the HW to transfer the frame to panel
+ * @MSM_ENC_VBLANK - wait for the HW VBLANK event (for driver-internal waiters)
+ */
+enum msm_event_wait {
+ MSM_ENC_COMMIT_DONE = 0,
+ MSM_ENC_TX_COMPLETE,
+ MSM_ENC_VBLANK,
+};
+
+/**
+ * struct msm_display_topology - defines a display topology pipeline
+ * @num_lm: number of layer mixers used
+ * @num_enc: number of compression encoder blocks used
+ * @num_intf: number of interfaces the panel is mounted on
+ */
+struct msm_display_topology {
+ u32 num_lm;
+ u32 num_enc;
+ u32 num_intf;
+};
+
+/**
+ * struct msm_display_info - defines display properties
+ * @intf_type: DRM_MODE_CONNECTOR_ display type
+ * @capabilities: Bitmask of display flags
+ * @num_of_h_tiles: Number of horizontal tiles in case of split interface
+ * @h_tile_instance: Controller instance used per tile. Number of elements is
+ * based on num_of_h_tiles
+ * @is_te_using_watchdog_timer: Boolean to indicate watchdog TE is
+ * used instead of panel TE in cmd mode panels
+ */
+struct msm_display_info {
+ int intf_type;
+ uint32_t capabilities;
+ uint32_t num_of_h_tiles;
+ uint32_t h_tile_instance[MAX_H_TILES_PER_DISPLAY];
+ bool is_te_using_watchdog_timer;
+};
+
+/* Commit/Event thread specific structure */
+struct msm_drm_thread {
+ struct drm_device *dev;
+ struct task_struct *thread;
+ unsigned int crtc_id;
+ struct kthread_worker worker;
+};
struct msm_drm_private {
@@ -85,7 +157,7 @@ struct msm_drm_private {
/* subordinate devices, if present: */
struct platform_device *gpu_pdev;
- /* top level MDSS wrapper device (for MDP5 only) */
+ /* top level MDSS wrapper device (for MDP5/DPU only) */
struct msm_mdss *mdss;
/* possibly this should be in the kms component, but it is
@@ -116,26 +188,24 @@ struct msm_drm_private {
struct list_head inactive_list;
struct workqueue_struct *wq;
- struct workqueue_struct *atomic_wq;
-
- /* crtcs pending async atomic updates: */
- uint32_t pending_crtcs;
- wait_queue_head_t pending_crtcs_event;
unsigned int num_planes;
- struct drm_plane *planes[16];
+ struct drm_plane *planes[MAX_PLANES];
unsigned int num_crtcs;
- struct drm_crtc *crtcs[8];
+ struct drm_crtc *crtcs[MAX_CRTCS];
+
+ struct msm_drm_thread disp_thread[MAX_CRTCS];
+ struct msm_drm_thread event_thread[MAX_CRTCS];
unsigned int num_encoders;
- struct drm_encoder *encoders[8];
+ struct drm_encoder *encoders[MAX_ENCODERS];
unsigned int num_bridges;
- struct drm_bridge *bridges[8];
+ struct drm_bridge *bridges[MAX_BRIDGES];
unsigned int num_connectors;
- struct drm_connector *connectors[8];
+ struct drm_connector *connectors[MAX_CONNECTORS];
/* Properties */
struct drm_property *plane_property[PLANE_PROP_MAX_NUM];
@@ -155,14 +225,16 @@ struct msm_drm_private {
struct shrinker shrinker;
struct msm_vblank_ctrl vblank_ctrl;
+ struct drm_atomic_state *pm_state;
};
struct msm_format {
uint32_t pixel_format;
};
-int msm_atomic_commit(struct drm_device *dev,
- struct drm_atomic_state *state, bool nonblock);
+int msm_atomic_prepare_fb(struct drm_plane *plane,
+ struct drm_plane_state *new_state);
+void msm_atomic_commit_tail(struct drm_atomic_state *state);
struct drm_atomic_state *msm_atomic_state_alloc(struct drm_device *dev);
void msm_atomic_state_clear(struct drm_atomic_state *state);
void msm_atomic_state_free(struct drm_atomic_state *state);
@@ -178,6 +250,9 @@ struct msm_gem_address_space *
msm_gem_address_space_create(struct device *dev, struct iommu_domain *domain,
const char *name);
+int msm_register_mmu(struct drm_device *dev, struct msm_mmu *mmu);
+void msm_unregister_mmu(struct drm_device *dev, struct msm_mmu *mmu);
+
void msm_gem_submit_free(struct msm_gem_submit *submit);
int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
struct drm_file *file);
@@ -188,7 +263,7 @@ void msm_gem_shrinker_cleanup(struct drm_device *dev);
int msm_gem_mmap_obj(struct drm_gem_object *obj,
struct vm_area_struct *vma);
int msm_gem_mmap(struct file *filp, struct vm_area_struct *vma);
-int msm_gem_fault(struct vm_fault *vmf);
+vm_fault_t msm_gem_fault(struct vm_fault *vmf);
uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj);
int msm_gem_get_iova(struct drm_gem_object *obj,
struct msm_gem_address_space *aspace, uint64_t *iova);
@@ -289,6 +364,8 @@ static inline int msm_dsi_modeset_init(struct msm_dsi *msm_dsi,
void __init msm_mdp_register(void);
void __exit msm_mdp_unregister(void);
+void __init msm_dpu_register(void);
+void __exit msm_dpu_unregister(void);
#ifdef CONFIG_DEBUG_FS
void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m);
@@ -310,6 +387,10 @@ static inline void msm_perf_debugfs_cleanup(struct msm_drm_private *priv) {}
#endif
struct clk *msm_clk_get(struct platform_device *pdev, const char *name);
+int msm_clk_bulk_get(struct device *dev, struct clk_bulk_data **bulk);
+
+struct clk *msm_clk_bulk_get_clock(struct clk_bulk_data *bulk, int count,
+ const char *name);
void __iomem *msm_ioremap(struct platform_device *pdev, const char *name,
const char *dbgname);
void msm_writel(u32 data, void __iomem *addr);
diff --git a/drivers/gpu/drm/msm/msm_fb.c b/drivers/gpu/drm/msm/msm_fb.c
index fc175e724ad6..2a7348aeb38d 100644
--- a/drivers/gpu/drm/msm/msm_fb.c
+++ b/drivers/gpu/drm/msm/msm_fb.c
@@ -17,6 +17,7 @@
#include <drm/drm_crtc.h>
#include <drm/drm_crtc_helper.h>
+#include <drm/drm_gem_framebuffer_helper.h>
#include "msm_drv.h"
#include "msm_kms.h"
@@ -25,49 +26,20 @@
struct msm_framebuffer {
struct drm_framebuffer base;
const struct msm_format *format;
- struct drm_gem_object *planes[MAX_PLANE];
};
#define to_msm_framebuffer(x) container_of(x, struct msm_framebuffer, base)
static struct drm_framebuffer *msm_framebuffer_init(struct drm_device *dev,
const struct drm_mode_fb_cmd2 *mode_cmd, struct drm_gem_object **bos);
-static int msm_framebuffer_create_handle(struct drm_framebuffer *fb,
- struct drm_file *file_priv,
- unsigned int *handle)
-{
- struct msm_framebuffer *msm_fb = to_msm_framebuffer(fb);
- return drm_gem_handle_create(file_priv,
- msm_fb->planes[0], handle);
-}
-
-static void msm_framebuffer_destroy(struct drm_framebuffer *fb)
-{
- struct msm_framebuffer *msm_fb = to_msm_framebuffer(fb);
- int i, n = fb->format->num_planes;
-
- DBG("destroy: FB ID: %d (%p)", fb->base.id, fb);
-
- drm_framebuffer_cleanup(fb);
-
- for (i = 0; i < n; i++) {
- struct drm_gem_object *bo = msm_fb->planes[i];
-
- drm_gem_object_unreference_unlocked(bo);
- }
-
- kfree(msm_fb);
-}
-
static const struct drm_framebuffer_funcs msm_framebuffer_funcs = {
- .create_handle = msm_framebuffer_create_handle,
- .destroy = msm_framebuffer_destroy,
+ .create_handle = drm_gem_fb_create_handle,
+ .destroy = drm_gem_fb_destroy,
};
#ifdef CONFIG_DEBUG_FS
void msm_framebuffer_describe(struct drm_framebuffer *fb, struct seq_file *m)
{
- struct msm_framebuffer *msm_fb = to_msm_framebuffer(fb);
int i, n = fb->format->num_planes;
seq_printf(m, "fb: %dx%d@%4.4s (%2d, ID:%d)\n",
@@ -77,7 +49,7 @@ void msm_framebuffer_describe(struct drm_framebuffer *fb, struct seq_file *m)
for (i = 0; i < n; i++) {
seq_printf(m, " %d: offset=%d pitch=%d, obj: ",
i, fb->offsets[i], fb->pitches[i]);
- msm_gem_describe(msm_fb->planes[i], m);
+ msm_gem_describe(fb->obj[i], m);
}
}
#endif
@@ -90,12 +62,11 @@ void msm_framebuffer_describe(struct drm_framebuffer *fb, struct seq_file *m)
int msm_framebuffer_prepare(struct drm_framebuffer *fb,
struct msm_gem_address_space *aspace)
{
- struct msm_framebuffer *msm_fb = to_msm_framebuffer(fb);
int ret, i, n = fb->format->num_planes;
uint64_t iova;
for (i = 0; i < n; i++) {
- ret = msm_gem_get_iova(msm_fb->planes[i], aspace, &iova);
+ ret = msm_gem_get_iova(fb->obj[i], aspace, &iova);
DBG("FB[%u]: iova[%d]: %08llx (%d)", fb->base.id, i, iova, ret);
if (ret)
return ret;
@@ -107,26 +78,23 @@ int msm_framebuffer_prepare(struct drm_framebuffer *fb,
void msm_framebuffer_cleanup(struct drm_framebuffer *fb,
struct msm_gem_address_space *aspace)
{
- struct msm_framebuffer *msm_fb = to_msm_framebuffer(fb);
int i, n = fb->format->num_planes;
for (i = 0; i < n; i++)
- msm_gem_put_iova(msm_fb->planes[i], aspace);
+ msm_gem_put_iova(fb->obj[i], aspace);
}
uint32_t msm_framebuffer_iova(struct drm_framebuffer *fb,
struct msm_gem_address_space *aspace, int plane)
{
- struct msm_framebuffer *msm_fb = to_msm_framebuffer(fb);
- if (!msm_fb->planes[plane])
+ if (!fb->obj[plane])
return 0;
- return msm_gem_iova(msm_fb->planes[plane], aspace) + fb->offsets[plane];
+ return msm_gem_iova(fb->obj[plane], aspace) + fb->offsets[plane];
}
struct drm_gem_object *msm_framebuffer_bo(struct drm_framebuffer *fb, int plane)
{
- struct msm_framebuffer *msm_fb = to_msm_framebuffer(fb);
- return msm_fb->planes[plane];
+ return drm_gem_fb_get_obj(fb, plane);
}
const struct msm_format *msm_framebuffer_format(struct drm_framebuffer *fb)
@@ -160,7 +128,7 @@ struct drm_framebuffer *msm_framebuffer_create(struct drm_device *dev,
out_unref:
for (i = 0; i < n; i++)
- drm_gem_object_unreference_unlocked(bos[i]);
+ drm_gem_object_put_unlocked(bos[i]);
return ERR_PTR(ret);
}
@@ -183,7 +151,8 @@ static struct drm_framebuffer *msm_framebuffer_init(struct drm_device *dev,
hsub = drm_format_horz_chroma_subsampling(mode_cmd->pixel_format);
vsub = drm_format_vert_chroma_subsampling(mode_cmd->pixel_format);
- format = kms->funcs->get_format(kms, mode_cmd->pixel_format);
+ format = kms->funcs->get_format(kms, mode_cmd->pixel_format,
+ mode_cmd->modifier[0]);
if (!format) {
dev_err(dev->dev, "unsupported pixel format: %4.4s\n",
(char *)&mode_cmd->pixel_format);
@@ -201,7 +170,7 @@ static struct drm_framebuffer *msm_framebuffer_init(struct drm_device *dev,
msm_fb->format = format;
- if (n > ARRAY_SIZE(msm_fb->planes)) {
+ if (n > ARRAY_SIZE(fb->obj)) {
ret = -EINVAL;
goto fail;
}
@@ -220,7 +189,7 @@ static struct drm_framebuffer *msm_framebuffer_init(struct drm_device *dev,
goto fail;
}
- msm_fb->planes[i] = bos[i];
+ msm_fb->base.obj[i] = bos[i];
}
drm_helper_mode_fill_fb_struct(dev, fb, mode_cmd);
@@ -274,7 +243,7 @@ msm_alloc_stolen_fb(struct drm_device *dev, int w, int h, int p, uint32_t format
/* note: if fb creation failed, we can't rely on fb destroy
* to unref the bo:
*/
- drm_gem_object_unreference_unlocked(bo);
+ drm_gem_object_put_unlocked(bo);
return ERR_CAST(fb);
}
diff --git a/drivers/gpu/drm/msm/msm_fbdev.c b/drivers/gpu/drm/msm/msm_fbdev.c
index c178563fcd4d..456622b46335 100644
--- a/drivers/gpu/drm/msm/msm_fbdev.c
+++ b/drivers/gpu/drm/msm/msm_fbdev.c
@@ -92,8 +92,7 @@ static int msm_fbdev_create(struct drm_fb_helper *helper,
if (IS_ERR(fb)) {
dev_err(dev->dev, "failed to allocate fb\n");
- ret = PTR_ERR(fb);
- goto fail;
+ return PTR_ERR(fb);
}
bo = msm_framebuffer_bo(fb, 0);
@@ -151,13 +150,7 @@ static int msm_fbdev_create(struct drm_fb_helper *helper,
fail_unlock:
mutex_unlock(&dev->struct_mutex);
-fail:
-
- if (ret) {
- if (fb)
- drm_framebuffer_remove(fb);
- }
-
+ drm_framebuffer_remove(fb);
return ret;
}
diff --git a/drivers/gpu/drm/msm/msm_fence.h b/drivers/gpu/drm/msm/msm_fence.h
index 1aa6a4c6530c..b9fe059091f2 100644
--- a/drivers/gpu/drm/msm/msm_fence.h
+++ b/drivers/gpu/drm/msm/msm_fence.h
@@ -37,8 +37,6 @@ void msm_fence_context_free(struct msm_fence_context *fctx);
int msm_wait_fence(struct msm_fence_context *fctx, uint32_t fence,
ktime_t *timeout, bool interruptible);
-int msm_queue_fence_cb(struct msm_fence_context *fctx,
- struct msm_fence_cb *cb, uint32_t fence);
void msm_update_fence(struct msm_fence_context *fctx, uint32_t fence);
struct dma_fence * msm_fence_alloc(struct msm_fence_context *fctx);
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
index 07376de9ff4c..f59ca27a4a35 100644
--- a/drivers/gpu/drm/msm/msm_gem.c
+++ b/drivers/gpu/drm/msm/msm_gem.c
@@ -132,17 +132,19 @@ static void put_pages(struct drm_gem_object *obj)
struct msm_gem_object *msm_obj = to_msm_bo(obj);
if (msm_obj->pages) {
- /* For non-cached buffers, ensure the new pages are clean
- * because display controller, GPU, etc. are not coherent:
- */
- if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED))
- dma_unmap_sg(obj->dev->dev, msm_obj->sgt->sgl,
- msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
+ if (msm_obj->sgt) {
+ /* For non-cached buffers, ensure the new
+ * pages are clean because display controller,
+ * GPU, etc. are not coherent:
+ */
+ if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED))
+ dma_unmap_sg(obj->dev->dev, msm_obj->sgt->sgl,
+ msm_obj->sgt->nents,
+ DMA_BIDIRECTIONAL);
- if (msm_obj->sgt)
sg_free_table(msm_obj->sgt);
-
- kfree(msm_obj->sgt);
+ kfree(msm_obj->sgt);
+ }
if (use_pages(obj))
drm_gem_put_pages(obj, msm_obj->pages, true, false);
@@ -217,7 +219,7 @@ int msm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
return msm_gem_mmap_obj(vma->vm_private_data, vma);
}
-int msm_gem_fault(struct vm_fault *vmf)
+vm_fault_t msm_gem_fault(struct vm_fault *vmf)
{
struct vm_area_struct *vma = vmf->vma;
struct drm_gem_object *obj = vma->vm_private_data;
@@ -225,15 +227,18 @@ int msm_gem_fault(struct vm_fault *vmf)
struct page **pages;
unsigned long pfn;
pgoff_t pgoff;
- int ret;
+ int err;
+ vm_fault_t ret;
/*
* vm_ops.open/drm_gem_mmap_obj and close get and put
* a reference on obj. So, we dont need to hold one here.
*/
- ret = mutex_lock_interruptible(&msm_obj->lock);
- if (ret)
+ err = mutex_lock_interruptible(&msm_obj->lock);
+ if (err) {
+ ret = VM_FAULT_NOPAGE;
goto out;
+ }
if (WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) {
mutex_unlock(&msm_obj->lock);
@@ -243,7 +248,7 @@ int msm_gem_fault(struct vm_fault *vmf)
/* make sure we have pages attached now */
pages = get_pages(obj);
if (IS_ERR(pages)) {
- ret = PTR_ERR(pages);
+ ret = vmf_error(PTR_ERR(pages));
goto out_unlock;
}
@@ -255,27 +260,11 @@ int msm_gem_fault(struct vm_fault *vmf)
VERB("Inserting %p pfn %lx, pa %lx", (void *)vmf->address,
pfn, pfn << PAGE_SHIFT);
- ret = vm_insert_mixed(vma, vmf->address, __pfn_to_pfn_t(pfn, PFN_DEV));
-
+ ret = vmf_insert_mixed(vma, vmf->address, __pfn_to_pfn_t(pfn, PFN_DEV));
out_unlock:
mutex_unlock(&msm_obj->lock);
out:
- switch (ret) {
- case -EAGAIN:
- case 0:
- case -ERESTARTSYS:
- case -EINTR:
- case -EBUSY:
- /*
- * EBUSY is ok: this just means that another thread
- * already did the job.
- */
- return VM_FAULT_NOPAGE;
- case -ENOMEM:
- return VM_FAULT_OOM;
- default:
- return VM_FAULT_SIGBUS;
- }
+ return ret;
}
/** get mmap offset */
@@ -470,7 +459,7 @@ int msm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
*offset = msm_gem_mmap_offset(obj);
- drm_gem_object_unreference_unlocked(obj);
+ drm_gem_object_put_unlocked(obj);
fail:
return ret;
@@ -798,6 +787,7 @@ void msm_gem_describe_objects(struct list_head *list, struct seq_file *m)
}
#endif
+/* don't call directly! Use drm_gem_object_put() and friends */
void msm_gem_free_object(struct drm_gem_object *obj)
{
struct drm_device *dev = obj->dev;
@@ -854,7 +844,7 @@ int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file,
ret = drm_gem_handle_create(file, obj, handle);
/* drop reference from allocate - handle holds it now */
- drm_gem_object_unreference_unlocked(obj);
+ drm_gem_object_put_unlocked(obj);
return ret;
}
@@ -974,7 +964,7 @@ static struct drm_gem_object *_msm_gem_new(struct drm_device *dev,
return obj;
fail:
- drm_gem_object_unreference_unlocked(obj);
+ drm_gem_object_put_unlocked(obj);
return ERR_PTR(ret);
}
@@ -1034,7 +1024,7 @@ struct drm_gem_object *msm_gem_import(struct drm_device *dev,
return obj;
fail:
- drm_gem_object_unreference_unlocked(obj);
+ drm_gem_object_put_unlocked(obj);
return ERR_PTR(ret);
}
@@ -1052,7 +1042,7 @@ static void *_msm_gem_kernel_new(struct drm_device *dev, uint32_t size,
if (iova) {
ret = msm_gem_get_iova(obj, aspace, iova);
if (ret) {
- drm_gem_object_unreference(obj);
+ drm_gem_object_put(obj);
return ERR_PTR(ret);
}
}
@@ -1060,7 +1050,7 @@ static void *_msm_gem_kernel_new(struct drm_device *dev, uint32_t size,
vaddr = msm_gem_get_vaddr(obj);
if (IS_ERR(vaddr)) {
msm_gem_put_iova(obj, aspace);
- drm_gem_object_unreference(obj);
+ drm_gem_object_put(obj);
return ERR_CAST(vaddr);
}
diff --git a/drivers/gpu/drm/msm/msm_gem.h b/drivers/gpu/drm/msm/msm_gem.h
index 9320e184b48d..c5d9bd3e47a8 100644
--- a/drivers/gpu/drm/msm/msm_gem.h
+++ b/drivers/gpu/drm/msm/msm_gem.h
@@ -146,6 +146,7 @@ struct msm_gem_submit {
struct msm_gpu_submitqueue *queue;
struct pid *pid; /* submitting process */
bool valid; /* true if no cmdstream patching needed */
+ bool in_rb; /* "sudo" mode, copy cmds into RB */
struct msm_ringbuffer *ring;
unsigned int nr_cmds;
unsigned int nr_bos;
diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c b/drivers/gpu/drm/msm/msm_gem_submit.c
index b8dc8f96caf2..7bd83e0afa97 100644
--- a/drivers/gpu/drm/msm/msm_gem_submit.c
+++ b/drivers/gpu/drm/msm/msm_gem_submit.c
@@ -430,6 +430,12 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
if (MSM_PIPE_FLAGS(args->flags) & ~MSM_SUBMIT_FLAGS)
return -EINVAL;
+ if (args->flags & MSM_SUBMIT_SUDO) {
+ if (!IS_ENABLED(CONFIG_DRM_MSM_GPU_SUDO) ||
+ !capable(CAP_SYS_RAWIO))
+ return -EINVAL;
+ }
+
queue = msm_submitqueue_get(ctx, args->queueid);
if (!queue)
return -ENOENT;
@@ -471,6 +477,9 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
goto out_unlock;
}
+ if (args->flags & MSM_SUBMIT_SUDO)
+ submit->in_rb = true;
+
ret = submit_lookup_objects(submit, args, file);
if (ret)
goto out;
diff --git a/drivers/gpu/drm/msm/msm_gem_vma.c b/drivers/gpu/drm/msm/msm_gem_vma.c
index d34e331554f3..ffbec224551b 100644
--- a/drivers/gpu/drm/msm/msm_gem_vma.c
+++ b/drivers/gpu/drm/msm/msm_gem_vma.c
@@ -96,6 +96,8 @@ msm_gem_address_space_create(struct device *dev, struct iommu_domain *domain,
const char *name)
{
struct msm_gem_address_space *aspace;
+ u64 size = domain->geometry.aperture_end -
+ domain->geometry.aperture_start;
aspace = kzalloc(sizeof(*aspace), GFP_KERNEL);
if (!aspace)
@@ -106,7 +108,7 @@ msm_gem_address_space_create(struct device *dev, struct iommu_domain *domain,
aspace->mmu = msm_iommu_new(dev, domain);
drm_mm_init(&aspace->mm, (domain->geometry.aperture_start >> PAGE_SHIFT),
- (domain->geometry.aperture_end >> PAGE_SHIFT) - 1);
+ size >> PAGE_SHIFT);
kref_init(&aspace->kref);
diff --git a/drivers/gpu/drm/msm/msm_gpu.c b/drivers/gpu/drm/msm/msm_gpu.c
index bd376f9e18a7..5e808cfec345 100644
--- a/drivers/gpu/drm/msm/msm_gpu.c
+++ b/drivers/gpu/drm/msm/msm_gpu.c
@@ -20,10 +20,11 @@
#include "msm_mmu.h"
#include "msm_fence.h"
+#include <generated/utsrelease.h>
#include <linux/string_helpers.h>
#include <linux/pm_opp.h>
#include <linux/devfreq.h>
-
+#include <linux/devcoredump.h>
/*
* Power Management:
@@ -87,7 +88,7 @@ static struct devfreq_dev_profile msm_devfreq_profile = {
static void msm_devfreq_init(struct msm_gpu *gpu)
{
/* We need target support to do devfreq */
- if (!gpu->funcs->gpu_busy)
+ if (!gpu->funcs->gpu_busy || !gpu->core_clk)
return;
msm_devfreq_profile.initial_freq = gpu->fast_rate;
@@ -141,8 +142,6 @@ static int disable_pwrrail(struct msm_gpu *gpu)
static int enable_clk(struct msm_gpu *gpu)
{
- int i;
-
if (gpu->core_clk && gpu->fast_rate)
clk_set_rate(gpu->core_clk, gpu->fast_rate);
@@ -150,28 +149,12 @@ static int enable_clk(struct msm_gpu *gpu)
if (gpu->rbbmtimer_clk)
clk_set_rate(gpu->rbbmtimer_clk, 19200000);
- for (i = gpu->nr_clocks - 1; i >= 0; i--)
- if (gpu->grp_clks[i])
- clk_prepare(gpu->grp_clks[i]);
-
- for (i = gpu->nr_clocks - 1; i >= 0; i--)
- if (gpu->grp_clks[i])
- clk_enable(gpu->grp_clks[i]);
-
- return 0;
+ return clk_bulk_prepare_enable(gpu->nr_clocks, gpu->grp_clks);
}
static int disable_clk(struct msm_gpu *gpu)
{
- int i;
-
- for (i = gpu->nr_clocks - 1; i >= 0; i--)
- if (gpu->grp_clks[i])
- clk_disable(gpu->grp_clks[i]);
-
- for (i = gpu->nr_clocks - 1; i >= 0; i--)
- if (gpu->grp_clks[i])
- clk_unprepare(gpu->grp_clks[i]);
+ clk_bulk_disable_unprepare(gpu->nr_clocks, gpu->grp_clks);
/*
* Set the clock to a deliberately low rate. On older targets the clock
@@ -273,6 +256,123 @@ int msm_gpu_hw_init(struct msm_gpu *gpu)
return ret;
}
+#ifdef CONFIG_DEV_COREDUMP
+static ssize_t msm_gpu_devcoredump_read(char *buffer, loff_t offset,
+ size_t count, void *data, size_t datalen)
+{
+ struct msm_gpu *gpu = data;
+ struct drm_print_iterator iter;
+ struct drm_printer p;
+ struct msm_gpu_state *state;
+
+ state = msm_gpu_crashstate_get(gpu);
+ if (!state)
+ return 0;
+
+ iter.data = buffer;
+ iter.offset = 0;
+ iter.start = offset;
+ iter.remain = count;
+
+ p = drm_coredump_printer(&iter);
+
+ drm_printf(&p, "---\n");
+ drm_printf(&p, "kernel: " UTS_RELEASE "\n");
+ drm_printf(&p, "module: " KBUILD_MODNAME "\n");
+ drm_printf(&p, "time: %lld.%09ld\n",
+ state->time.tv_sec, state->time.tv_nsec);
+ if (state->comm)
+ drm_printf(&p, "comm: %s\n", state->comm);
+ if (state->cmd)
+ drm_printf(&p, "cmdline: %s\n", state->cmd);
+
+ gpu->funcs->show(gpu, state, &p);
+
+ msm_gpu_crashstate_put(gpu);
+
+ return count - iter.remain;
+}
+
+static void msm_gpu_devcoredump_free(void *data)
+{
+ struct msm_gpu *gpu = data;
+
+ msm_gpu_crashstate_put(gpu);
+}
+
+static void msm_gpu_crashstate_get_bo(struct msm_gpu_state *state,
+ struct msm_gem_object *obj, u64 iova, u32 flags)
+{
+ struct msm_gpu_state_bo *state_bo = &state->bos[state->nr_bos];
+
+ /* Don't record write only objects */
+
+ state_bo->size = obj->base.size;
+ state_bo->iova = iova;
+
+ /* Only store the data for buffer objects marked for read */
+ if ((flags & MSM_SUBMIT_BO_READ)) {
+ void *ptr;
+
+ state_bo->data = kvmalloc(obj->base.size, GFP_KERNEL);
+ if (!state_bo->data)
+ return;
+
+ ptr = msm_gem_get_vaddr_active(&obj->base);
+ if (IS_ERR(ptr)) {
+ kvfree(state_bo->data);
+ return;
+ }
+
+ memcpy(state_bo->data, ptr, obj->base.size);
+ msm_gem_put_vaddr(&obj->base);
+ }
+
+ state->nr_bos++;
+}
+
+static void msm_gpu_crashstate_capture(struct msm_gpu *gpu,
+ struct msm_gem_submit *submit, char *comm, char *cmd)
+{
+ struct msm_gpu_state *state;
+
+ /* Only save one crash state at a time */
+ if (gpu->crashstate)
+ return;
+
+ state = gpu->funcs->gpu_state_get(gpu);
+ if (IS_ERR_OR_NULL(state))
+ return;
+
+ /* Fill in the additional crash state information */
+ state->comm = kstrdup(comm, GFP_KERNEL);
+ state->cmd = kstrdup(cmd, GFP_KERNEL);
+
+ if (submit) {
+ int i;
+
+ state->bos = kcalloc(submit->nr_bos,
+ sizeof(struct msm_gpu_state_bo), GFP_KERNEL);
+
+ for (i = 0; state->bos && i < submit->nr_bos; i++)
+ msm_gpu_crashstate_get_bo(state, submit->bos[i].obj,
+ submit->bos[i].iova, submit->bos[i].flags);
+ }
+
+ /* Set the active crash state to be dumped on failure */
+ gpu->crashstate = state;
+
+ /* FIXME: Release the crashstate if this errors out? */
+ dev_coredumpm(gpu->dev->dev, THIS_MODULE, gpu, 0, GFP_KERNEL,
+ msm_gpu_devcoredump_read, msm_gpu_devcoredump_free);
+}
+#else
+static void msm_gpu_crashstate_capture(struct msm_gpu *gpu, char *comm,
+ char *cmd)
+{
+}
+#endif
+
/*
* Hangcheck detection for locked gpu:
*/
@@ -314,6 +414,7 @@ static void recover_worker(struct work_struct *work)
struct msm_drm_private *priv = dev->dev_private;
struct msm_gem_submit *submit;
struct msm_ringbuffer *cur_ring = gpu->funcs->active_ring(gpu);
+ char *comm = NULL, *cmd = NULL;
int i;
mutex_lock(&dev->struct_mutex);
@@ -327,7 +428,7 @@ static void recover_worker(struct work_struct *work)
rcu_read_lock();
task = pid_task(submit->pid, PIDTYPE_PID);
if (task) {
- char *cmd;
+ comm = kstrdup(task->comm, GFP_ATOMIC);
/*
* So slightly annoying, in other paths like
@@ -340,22 +441,28 @@ static void recover_worker(struct work_struct *work)
* about the submit going away.
*/
mutex_unlock(&dev->struct_mutex);
- cmd = kstrdup_quotable_cmdline(task, GFP_KERNEL);
+ cmd = kstrdup_quotable_cmdline(task, GFP_ATOMIC);
mutex_lock(&dev->struct_mutex);
+ }
+ rcu_read_unlock();
+ if (comm && cmd) {
dev_err(dev->dev, "%s: offending task: %s (%s)\n",
- gpu->name, task->comm, cmd);
+ gpu->name, comm, cmd);
msm_rd_dump_submit(priv->hangrd, submit,
- "offending task: %s (%s)", task->comm, cmd);
-
- kfree(cmd);
- } else {
+ "offending task: %s (%s)", comm, cmd);
+ } else
msm_rd_dump_submit(priv->hangrd, submit, NULL);
- }
- rcu_read_unlock();
}
+ /* Record the crash state */
+ pm_runtime_get_sync(&gpu->pdev->dev);
+ msm_gpu_crashstate_capture(gpu, submit, comm, cmd);
+ pm_runtime_put_sync(&gpu->pdev->dev);
+
+ kfree(cmd);
+ kfree(comm);
/*
* Update all the rings with the latest and greatest fence.. this
@@ -552,7 +659,7 @@ static void retire_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
/* move to inactive: */
msm_gem_move_to_inactive(&msm_obj->base);
msm_gem_put_iova(&msm_obj->base, gpu->aspace);
- drm_gem_object_unreference(&msm_obj->base);
+ drm_gem_object_put(&msm_obj->base);
}
pm_runtime_mark_last_busy(&gpu->pdev->dev);
@@ -634,7 +741,7 @@ void msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
WARN_ON(is_active(msm_obj) && (msm_obj->gpu != gpu));
/* submit takes a reference to the bo and iova until retired: */
- drm_gem_object_reference(&msm_obj->base);
+ drm_gem_object_get(&msm_obj->base);
msm_gem_get_iova(&msm_obj->base,
submit->gpu->aspace, &iova);
@@ -660,42 +767,22 @@ static irqreturn_t irq_handler(int irq, void *data)
return gpu->funcs->irq(gpu);
}
-static struct clk *get_clock(struct device *dev, const char *name)
-{
- struct clk *clk = devm_clk_get(dev, name);
-
- return IS_ERR(clk) ? NULL : clk;
-}
-
static int get_clocks(struct platform_device *pdev, struct msm_gpu *gpu)
{
- struct device *dev = &pdev->dev;
- struct property *prop;
- const char *name;
- int i = 0;
+ int ret = msm_clk_bulk_get(&pdev->dev, &gpu->grp_clks);
- gpu->nr_clocks = of_property_count_strings(dev->of_node, "clock-names");
- if (gpu->nr_clocks < 1) {
+ if (ret < 1) {
gpu->nr_clocks = 0;
- return 0;
+ return ret;
}
- gpu->grp_clks = devm_kcalloc(dev, sizeof(struct clk *), gpu->nr_clocks,
- GFP_KERNEL);
- if (!gpu->grp_clks)
- return -ENOMEM;
+ gpu->nr_clocks = ret;
- of_property_for_each_string(dev->of_node, "clock-names", prop, name) {
- gpu->grp_clks[i] = get_clock(dev, name);
+ gpu->core_clk = msm_clk_bulk_get_clock(gpu->grp_clks,
+ gpu->nr_clocks, "core");
- /* Remember the key clocks that we need to control later */
- if (!strcmp(name, "core") || !strcmp(name, "core_clk"))
- gpu->core_clk = gpu->grp_clks[i];
- else if (!strcmp(name, "rbbmtimer") || !strcmp(name, "rbbmtimer_clk"))
- gpu->rbbmtimer_clk = gpu->grp_clks[i];
-
- ++i;
- }
+ gpu->rbbmtimer_clk = msm_clk_bulk_get_clock(gpu->grp_clks,
+ gpu->nr_clocks, "rbbmtimer");
return 0;
}
@@ -865,7 +952,7 @@ fail:
if (gpu->memptrs_bo) {
msm_gem_put_vaddr(gpu->memptrs_bo);
msm_gem_put_iova(gpu->memptrs_bo, gpu->aspace);
- drm_gem_object_unreference_unlocked(gpu->memptrs_bo);
+ drm_gem_object_put_unlocked(gpu->memptrs_bo);
}
platform_set_drvdata(pdev, NULL);
@@ -888,7 +975,7 @@ void msm_gpu_cleanup(struct msm_gpu *gpu)
if (gpu->memptrs_bo) {
msm_gem_put_vaddr(gpu->memptrs_bo);
msm_gem_put_iova(gpu->memptrs_bo, gpu->aspace);
- drm_gem_object_unreference_unlocked(gpu->memptrs_bo);
+ drm_gem_object_put_unlocked(gpu->memptrs_bo);
}
if (!IS_ERR_OR_NULL(gpu->aspace)) {
diff --git a/drivers/gpu/drm/msm/msm_gpu.h b/drivers/gpu/drm/msm/msm_gpu.h
index fccfccd303af..9122ee6e55e4 100644
--- a/drivers/gpu/drm/msm/msm_gpu.h
+++ b/drivers/gpu/drm/msm/msm_gpu.h
@@ -27,6 +27,7 @@
struct msm_gem_submit;
struct msm_gpu_perfcntr;
+struct msm_gpu_state;
struct msm_gpu_config {
const char *ioname;
@@ -64,9 +65,14 @@ struct msm_gpu_funcs {
void (*destroy)(struct msm_gpu *gpu);
#ifdef CONFIG_DEBUG_FS
/* show GPU status in debugfs: */
- void (*show)(struct msm_gpu *gpu, struct seq_file *m);
+ void (*show)(struct msm_gpu *gpu, struct msm_gpu_state *state,
+ struct drm_printer *p);
+ /* for generation specific debugfs: */
+ int (*debugfs_init)(struct msm_gpu *gpu, struct drm_minor *minor);
#endif
int (*gpu_busy)(struct msm_gpu *gpu, uint64_t *value);
+ struct msm_gpu_state *(*gpu_state_get)(struct msm_gpu *gpu);
+ int (*gpu_state_put)(struct msm_gpu_state *state);
};
struct msm_gpu {
@@ -106,7 +112,7 @@ struct msm_gpu {
/* Power Control: */
struct regulator *gpu_reg, *gpu_cx;
- struct clk **grp_clks;
+ struct clk_bulk_data *grp_clks;
int nr_clocks;
struct clk *ebi1_clk, *core_clk, *rbbmtimer_clk;
uint32_t fast_rate;
@@ -127,6 +133,8 @@ struct msm_gpu {
u64 busy_cycles;
ktime_t time;
} devfreq;
+
+ struct msm_gpu_state *crashstate;
};
/* It turns out that all targets use the same ringbuffer size */
@@ -173,6 +181,38 @@ struct msm_gpu_submitqueue {
struct kref ref;
};
+struct msm_gpu_state_bo {
+ u64 iova;
+ size_t size;
+ void *data;
+};
+
+struct msm_gpu_state {
+ struct kref ref;
+ struct timespec64 time;
+
+ struct {
+ u64 iova;
+ u32 fence;
+ u32 seqno;
+ u32 rptr;
+ u32 wptr;
+ void *data;
+ int data_size;
+ } ring[MSM_GPU_MAX_RINGS];
+
+ int nr_registers;
+ u32 *registers;
+
+ u32 rbbm_status;
+
+ char *comm;
+ char *cmd;
+
+ int nr_bos;
+ struct msm_gpu_state_bo *bos;
+};
+
static inline void gpu_write(struct msm_gpu *gpu, u32 reg, u32 data)
{
msm_writel(data, gpu->mmio + (reg << 2));
@@ -252,4 +292,32 @@ static inline void msm_submitqueue_put(struct msm_gpu_submitqueue *queue)
kref_put(&queue->ref, msm_submitqueue_destroy);
}
+static inline struct msm_gpu_state *msm_gpu_crashstate_get(struct msm_gpu *gpu)
+{
+ struct msm_gpu_state *state = NULL;
+
+ mutex_lock(&gpu->dev->struct_mutex);
+
+ if (gpu->crashstate) {
+ kref_get(&gpu->crashstate->ref);
+ state = gpu->crashstate;
+ }
+
+ mutex_unlock(&gpu->dev->struct_mutex);
+
+ return state;
+}
+
+static inline void msm_gpu_crashstate_put(struct msm_gpu *gpu)
+{
+ mutex_lock(&gpu->dev->struct_mutex);
+
+ if (gpu->crashstate) {
+ if (gpu->funcs->gpu_state_put(gpu->crashstate))
+ gpu->crashstate = NULL;
+ }
+
+ mutex_unlock(&gpu->dev->struct_mutex);
+}
+
#endif /* __MSM_GPU_H__ */
diff --git a/drivers/gpu/drm/msm/msm_kms.h b/drivers/gpu/drm/msm/msm_kms.h
index 17d5824417ad..fd88cebb6adb 100644
--- a/drivers/gpu/drm/msm/msm_kms.h
+++ b/drivers/gpu/drm/msm/msm_kms.h
@@ -1,4 +1,5 @@
/*
+ * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
* Copyright (C) 2013 Red Hat
* Author: Rob Clark <robdclark@gmail.com>
*
@@ -40,16 +41,23 @@ struct msm_kms_funcs {
irqreturn_t (*irq)(struct msm_kms *kms);
int (*enable_vblank)(struct msm_kms *kms, struct drm_crtc *crtc);
void (*disable_vblank)(struct msm_kms *kms, struct drm_crtc *crtc);
- /* swap global atomic state: */
- void (*swap_state)(struct msm_kms *kms, struct drm_atomic_state *state);
/* modeset, bracketing atomic_commit(): */
void (*prepare_commit)(struct msm_kms *kms, struct drm_atomic_state *state);
+ void (*commit)(struct msm_kms *kms, struct drm_atomic_state *state);
void (*complete_commit)(struct msm_kms *kms, struct drm_atomic_state *state);
/* functions to wait for atomic commit completed on each CRTC */
void (*wait_for_crtc_commit_done)(struct msm_kms *kms,
struct drm_crtc *crtc);
+ /* get msm_format w/ optional format modifiers from drm_mode_fb_cmd2 */
+ const struct msm_format *(*get_format)(struct msm_kms *kms,
+ const uint32_t format,
+ const uint64_t modifiers);
+ /* do format checking on format modified through fb_cmd2 modifiers */
+ int (*check_modified_format)(const struct msm_kms *kms,
+ const struct msm_format *msm_fmt,
+ const struct drm_mode_fb_cmd2 *cmd,
+ struct drm_gem_object **bos);
/* misc: */
- const struct msm_format *(*get_format)(struct msm_kms *kms, uint32_t format);
long (*round_pixclk)(struct msm_kms *kms, unsigned long rate,
struct drm_encoder *encoder);
int (*set_split_display)(struct msm_kms *kms,
@@ -59,6 +67,9 @@ struct msm_kms_funcs {
void (*set_encoder_mode)(struct msm_kms *kms,
struct drm_encoder *encoder,
bool cmd_mode);
+ /* pm suspend/resume hooks */
+ int (*pm_suspend)(struct device *dev);
+ int (*pm_resume)(struct device *dev);
/* cleanup: */
void (*destroy)(struct msm_kms *kms);
#ifdef CONFIG_DEBUG_FS
@@ -77,18 +88,6 @@ struct msm_kms {
struct msm_gem_address_space *aspace;
};
-/**
- * Subclass of drm_atomic_state, to allow kms backend to have driver
- * private global state. The kms backend can do whatever it wants
- * with the ->state ptr. On ->atomic_state_clear() the ->state ptr
- * is kfree'd and set back to NULL.
- */
-struct msm_kms_state {
- struct drm_atomic_state base;
- void *state;
-};
-#define to_kms_state(x) container_of(x, struct msm_kms_state, base)
-
static inline void msm_kms_init(struct msm_kms *kms,
const struct msm_kms_funcs *funcs)
{
@@ -97,9 +96,20 @@ static inline void msm_kms_init(struct msm_kms *kms,
struct msm_kms *mdp4_kms_init(struct drm_device *dev);
struct msm_kms *mdp5_kms_init(struct drm_device *dev);
-int msm_mdss_init(struct drm_device *dev);
-void msm_mdss_destroy(struct drm_device *dev);
-int msm_mdss_enable(struct msm_mdss *mdss);
-int msm_mdss_disable(struct msm_mdss *mdss);
+struct msm_kms *dpu_kms_init(struct drm_device *dev);
+
+struct msm_mdss_funcs {
+ int (*enable)(struct msm_mdss *mdss);
+ int (*disable)(struct msm_mdss *mdss);
+ void (*destroy)(struct drm_device *dev);
+};
+
+struct msm_mdss {
+ struct drm_device *dev;
+ const struct msm_mdss_funcs *funcs;
+};
+
+int mdp5_mdss_init(struct drm_device *dev);
+int dpu_mdss_init(struct drm_device *dev);
#endif /* __MSM_KMS_H__ */
diff --git a/drivers/gpu/drm/msm/msm_ringbuffer.c b/drivers/gpu/drm/msm/msm_ringbuffer.c
index 6ca98da35f63..6f5295b3f2f6 100644
--- a/drivers/gpu/drm/msm/msm_ringbuffer.c
+++ b/drivers/gpu/drm/msm/msm_ringbuffer.c
@@ -76,7 +76,7 @@ void msm_ringbuffer_destroy(struct msm_ringbuffer *ring)
if (ring->bo) {
msm_gem_put_iova(ring->bo, ring->gpu->aspace);
msm_gem_put_vaddr(ring->bo);
- drm_gem_object_unreference_unlocked(ring->bo);
+ drm_gem_object_put_unlocked(ring->bo);
}
kfree(ring);
}
OpenPOWER on IntegriCloud